@agile-vibe-coding/avc 0.2.3 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +475 -3
- package/cli/agents/agent-selector.md +23 -0
- package/cli/agents/code-implementer.md +117 -0
- package/cli/agents/code-validator.md +80 -0
- package/cli/agents/context-reviewer-epic.md +101 -0
- package/cli/agents/context-reviewer-story.md +92 -0
- package/cli/agents/context-writer-epic.md +145 -0
- package/cli/agents/context-writer-story.md +111 -0
- package/cli/agents/doc-writer-epic.md +42 -0
- package/cli/agents/doc-writer-story.md +43 -0
- package/cli/agents/duplicate-detector.md +110 -0
- package/cli/agents/epic-story-decomposer.md +318 -39
- package/cli/agents/mission-scope-generator.md +68 -4
- package/cli/agents/mission-scope-validator.md +40 -6
- package/cli/agents/project-context-extractor.md +21 -6
- package/cli/agents/scaffolding-generator.md +99 -0
- package/cli/agents/seed-validator.md +71 -0
- package/cli/agents/story-scope-reviewer.md +147 -0
- package/cli/agents/story-splitter.md +83 -0
- package/cli/agents/validator-documentation.json +31 -0
- package/cli/agents/validator-documentation.md +3 -1
- package/cli/api-reference-tool.js +368 -0
- package/cli/checks/catalog.json +76 -0
- package/cli/checks/code/quality.json +26 -0
- package/cli/checks/code/testing.json +14 -0
- package/cli/checks/code/traceability.json +26 -0
- package/cli/checks/cross-refs/epic.json +171 -0
- package/cli/checks/cross-refs/story.json +149 -0
- package/cli/checks/epic/api.json +114 -0
- package/cli/checks/epic/backend.json +126 -0
- package/cli/checks/epic/cloud.json +126 -0
- package/cli/checks/epic/data.json +102 -0
- package/cli/checks/epic/database.json +114 -0
- package/cli/checks/epic/developer.json +182 -0
- package/cli/checks/epic/devops.json +174 -0
- package/cli/checks/epic/frontend.json +162 -0
- package/cli/checks/epic/mobile.json +102 -0
- package/cli/checks/epic/qa.json +90 -0
- package/cli/checks/epic/security.json +184 -0
- package/cli/checks/epic/solution-architect.json +192 -0
- package/cli/checks/epic/test-architect.json +90 -0
- package/cli/checks/epic/ui.json +102 -0
- package/cli/checks/epic/ux.json +90 -0
- package/cli/checks/fixes/epic-fix-template.md +10 -0
- package/cli/checks/fixes/story-fix-template.md +10 -0
- package/cli/checks/story/api.json +186 -0
- package/cli/checks/story/backend.json +102 -0
- package/cli/checks/story/cloud.json +102 -0
- package/cli/checks/story/data.json +210 -0
- package/cli/checks/story/database.json +102 -0
- package/cli/checks/story/developer.json +168 -0
- package/cli/checks/story/devops.json +102 -0
- package/cli/checks/story/frontend.json +174 -0
- package/cli/checks/story/mobile.json +102 -0
- package/cli/checks/story/qa.json +210 -0
- package/cli/checks/story/security.json +198 -0
- package/cli/checks/story/solution-architect.json +230 -0
- package/cli/checks/story/test-architect.json +210 -0
- package/cli/checks/story/ui.json +102 -0
- package/cli/checks/story/ux.json +102 -0
- package/cli/coding-order.js +401 -0
- package/cli/dependency-checker.js +72 -0
- package/cli/epic-story-validator.js +284 -799
- package/cli/index.js +0 -0
- package/cli/init-model-config.js +17 -10
- package/cli/init.js +514 -92
- package/cli/kanban-server-manager.js +1 -2
- package/cli/llm-claude.js +98 -31
- package/cli/llm-gemini.js +29 -5
- package/cli/llm-local.js +493 -0
- package/cli/llm-openai.js +262 -41
- package/cli/llm-provider.js +147 -8
- package/cli/llm-token-limits.js +113 -4
- package/cli/llm-verifier.js +209 -1
- package/cli/llm-xiaomi.js +143 -0
- package/cli/message-constants.js +3 -12
- package/cli/messaging-api.js +6 -12
- package/cli/micro-check-fixer.js +335 -0
- package/cli/micro-check-runner.js +449 -0
- package/cli/micro-check-scorer.js +148 -0
- package/cli/micro-check-validator.js +538 -0
- package/cli/model-pricing.js +23 -0
- package/cli/model-selector.js +3 -2
- package/cli/prompt-logger.js +57 -0
- package/cli/repl-ink.js +106 -346
- package/cli/repl-old.js +1 -2
- package/cli/seed-processor.js +194 -24
- package/cli/sprint-planning-processor.js +2638 -289
- package/cli/template-processor.js +50 -3
- package/cli/token-tracker.js +50 -23
- package/cli/tools/generate-story-validators.js +1 -1
- package/cli/validation-router.js +70 -8
- package/cli/worktree-runner.js +654 -0
- package/kanban/client/dist/assets/index-D_KC5EQT.css +1 -0
- package/kanban/client/dist/assets/index-DjY5zqW7.js +351 -0
- package/kanban/client/dist/index.html +2 -2
- package/kanban/client/src/App.jsx +43 -14
- package/kanban/client/src/components/ceremony/AskArchPopup.jsx +7 -3
- package/kanban/client/src/components/ceremony/AskModelPopup.jsx +23 -10
- package/kanban/client/src/components/ceremony/CeremonyWorkflowModal.jsx +320 -133
- package/kanban/client/src/components/ceremony/ProviderSwitcherButton.jsx +290 -0
- package/kanban/client/src/components/ceremony/SponsorCallModal.jsx +80 -13
- package/kanban/client/src/components/ceremony/SprintPlanningModal.jsx +156 -22
- package/kanban/client/src/components/ceremony/steps/ArchitectureStep.jsx +11 -11
- package/kanban/client/src/components/ceremony/steps/CompleteStep.jsx +3 -21
- package/kanban/client/src/components/ceremony/steps/ReviewAnswersStep.jsx +214 -10
- package/kanban/client/src/components/ceremony/steps/RunningStep.jsx +23 -2
- package/kanban/client/src/components/kanban/CardDetailModal.jsx +97 -10
- package/kanban/client/src/components/kanban/GroupingSelector.jsx +7 -1
- package/kanban/client/src/components/kanban/KanbanCard.jsx +23 -14
- package/kanban/client/src/components/kanban/RefineWorkItemPopup.jsx +9 -14
- package/kanban/client/src/components/kanban/RunButton.jsx +162 -0
- package/kanban/client/src/components/kanban/SeedButton.jsx +176 -0
- package/kanban/client/src/components/settings/AgentsTab.jsx +103 -75
- package/kanban/client/src/components/settings/ApiKeysTab.jsx +31 -2
- package/kanban/client/src/components/settings/CeremonyModelsTab.jsx +9 -2
- package/kanban/client/src/components/settings/CheckEditorPopup.jsx +507 -0
- package/kanban/client/src/components/settings/CostThresholdsTab.jsx +3 -2
- package/kanban/client/src/components/settings/ModelPricingTab.jsx +72 -7
- package/kanban/client/src/components/settings/OpenAIAuthSection.jsx +412 -0
- package/kanban/client/src/components/settings/SettingsModal.jsx +4 -4
- package/kanban/client/src/components/stats/CostModal.jsx +34 -3
- package/kanban/client/src/hooks/useGrouping.js +59 -0
- package/kanban/client/src/lib/api.js +118 -4
- package/kanban/client/src/lib/status-grouping.js +10 -0
- package/kanban/client/src/store/kanbanStore.js +8 -0
- package/kanban/server/index.js +23 -2
- package/kanban/server/routes/ceremony.js +153 -4
- package/kanban/server/routes/costs.js +9 -3
- package/kanban/server/routes/openai-oauth.js +366 -0
- package/kanban/server/routes/settings.js +447 -14
- package/kanban/server/routes/websocket.js +7 -2
- package/kanban/server/routes/work-items.js +141 -1
- package/kanban/server/services/CeremonyService.js +275 -24
- package/kanban/server/services/TaskRunnerService.js +261 -0
- package/kanban/server/workers/run-task-worker.js +121 -0
- package/kanban/server/workers/seed-worker.js +94 -0
- package/kanban/server/workers/sponsor-call-worker.js +14 -6
- package/kanban/server/workers/sprint-planning-worker.js +94 -12
- package/package.json +2 -3
- package/cli/agents/solver-epic-api.json +0 -15
- package/cli/agents/solver-epic-api.md +0 -39
- package/cli/agents/solver-epic-backend.json +0 -15
- package/cli/agents/solver-epic-backend.md +0 -39
- package/cli/agents/solver-epic-cloud.json +0 -15
- package/cli/agents/solver-epic-cloud.md +0 -39
- package/cli/agents/solver-epic-data.json +0 -15
- package/cli/agents/solver-epic-data.md +0 -39
- package/cli/agents/solver-epic-database.json +0 -15
- package/cli/agents/solver-epic-database.md +0 -39
- package/cli/agents/solver-epic-developer.json +0 -15
- package/cli/agents/solver-epic-developer.md +0 -39
- package/cli/agents/solver-epic-devops.json +0 -15
- package/cli/agents/solver-epic-devops.md +0 -39
- package/cli/agents/solver-epic-frontend.json +0 -15
- package/cli/agents/solver-epic-frontend.md +0 -39
- package/cli/agents/solver-epic-mobile.json +0 -15
- package/cli/agents/solver-epic-mobile.md +0 -39
- package/cli/agents/solver-epic-qa.json +0 -15
- package/cli/agents/solver-epic-qa.md +0 -39
- package/cli/agents/solver-epic-security.json +0 -15
- package/cli/agents/solver-epic-security.md +0 -39
- package/cli/agents/solver-epic-solution-architect.json +0 -15
- package/cli/agents/solver-epic-solution-architect.md +0 -39
- package/cli/agents/solver-epic-test-architect.json +0 -15
- package/cli/agents/solver-epic-test-architect.md +0 -39
- package/cli/agents/solver-epic-ui.json +0 -15
- package/cli/agents/solver-epic-ui.md +0 -39
- package/cli/agents/solver-epic-ux.json +0 -15
- package/cli/agents/solver-epic-ux.md +0 -39
- package/cli/agents/solver-story-api.json +0 -15
- package/cli/agents/solver-story-api.md +0 -39
- package/cli/agents/solver-story-backend.json +0 -15
- package/cli/agents/solver-story-backend.md +0 -39
- package/cli/agents/solver-story-cloud.json +0 -15
- package/cli/agents/solver-story-cloud.md +0 -39
- package/cli/agents/solver-story-data.json +0 -15
- package/cli/agents/solver-story-data.md +0 -39
- package/cli/agents/solver-story-database.json +0 -15
- package/cli/agents/solver-story-database.md +0 -39
- package/cli/agents/solver-story-developer.json +0 -15
- package/cli/agents/solver-story-developer.md +0 -39
- package/cli/agents/solver-story-devops.json +0 -15
- package/cli/agents/solver-story-devops.md +0 -39
- package/cli/agents/solver-story-frontend.json +0 -15
- package/cli/agents/solver-story-frontend.md +0 -39
- package/cli/agents/solver-story-mobile.json +0 -15
- package/cli/agents/solver-story-mobile.md +0 -39
- package/cli/agents/solver-story-qa.json +0 -15
- package/cli/agents/solver-story-qa.md +0 -39
- package/cli/agents/solver-story-security.json +0 -15
- package/cli/agents/solver-story-security.md +0 -39
- package/cli/agents/solver-story-solution-architect.json +0 -15
- package/cli/agents/solver-story-solution-architect.md +0 -39
- package/cli/agents/solver-story-test-architect.json +0 -15
- package/cli/agents/solver-story-test-architect.md +0 -39
- package/cli/agents/solver-story-ui.json +0 -15
- package/cli/agents/solver-story-ui.md +0 -39
- package/cli/agents/solver-story-ux.json +0 -15
- package/cli/agents/solver-story-ux.md +0 -39
- package/cli/agents/validator-epic-api.json +0 -93
- package/cli/agents/validator-epic-api.md +0 -137
- package/cli/agents/validator-epic-backend.json +0 -93
- package/cli/agents/validator-epic-backend.md +0 -130
- package/cli/agents/validator-epic-cloud.json +0 -93
- package/cli/agents/validator-epic-cloud.md +0 -137
- package/cli/agents/validator-epic-data.json +0 -93
- package/cli/agents/validator-epic-data.md +0 -130
- package/cli/agents/validator-epic-database.json +0 -93
- package/cli/agents/validator-epic-database.md +0 -137
- package/cli/agents/validator-epic-developer.json +0 -74
- package/cli/agents/validator-epic-developer.md +0 -153
- package/cli/agents/validator-epic-devops.json +0 -74
- package/cli/agents/validator-epic-devops.md +0 -153
- package/cli/agents/validator-epic-frontend.json +0 -74
- package/cli/agents/validator-epic-frontend.md +0 -153
- package/cli/agents/validator-epic-mobile.json +0 -93
- package/cli/agents/validator-epic-mobile.md +0 -130
- package/cli/agents/validator-epic-qa.json +0 -93
- package/cli/agents/validator-epic-qa.md +0 -130
- package/cli/agents/validator-epic-security.json +0 -74
- package/cli/agents/validator-epic-security.md +0 -154
- package/cli/agents/validator-epic-solution-architect.json +0 -74
- package/cli/agents/validator-epic-solution-architect.md +0 -156
- package/cli/agents/validator-epic-test-architect.json +0 -93
- package/cli/agents/validator-epic-test-architect.md +0 -130
- package/cli/agents/validator-epic-ui.json +0 -93
- package/cli/agents/validator-epic-ui.md +0 -130
- package/cli/agents/validator-epic-ux.json +0 -93
- package/cli/agents/validator-epic-ux.md +0 -130
- package/cli/agents/validator-story-api.json +0 -104
- package/cli/agents/validator-story-api.md +0 -152
- package/cli/agents/validator-story-backend.json +0 -104
- package/cli/agents/validator-story-backend.md +0 -152
- package/cli/agents/validator-story-cloud.json +0 -104
- package/cli/agents/validator-story-cloud.md +0 -152
- package/cli/agents/validator-story-data.json +0 -104
- package/cli/agents/validator-story-data.md +0 -152
- package/cli/agents/validator-story-database.json +0 -104
- package/cli/agents/validator-story-database.md +0 -152
- package/cli/agents/validator-story-developer.json +0 -104
- package/cli/agents/validator-story-developer.md +0 -152
- package/cli/agents/validator-story-devops.json +0 -104
- package/cli/agents/validator-story-devops.md +0 -152
- package/cli/agents/validator-story-frontend.json +0 -104
- package/cli/agents/validator-story-frontend.md +0 -152
- package/cli/agents/validator-story-mobile.json +0 -104
- package/cli/agents/validator-story-mobile.md +0 -152
- package/cli/agents/validator-story-qa.json +0 -104
- package/cli/agents/validator-story-qa.md +0 -152
- package/cli/agents/validator-story-security.json +0 -104
- package/cli/agents/validator-story-security.md +0 -152
- package/cli/agents/validator-story-solution-architect.json +0 -104
- package/cli/agents/validator-story-solution-architect.md +0 -152
- package/cli/agents/validator-story-test-architect.json +0 -104
- package/cli/agents/validator-story-test-architect.md +0 -152
- package/cli/agents/validator-story-ui.json +0 -104
- package/cli/agents/validator-story-ui.md +0 -152
- package/cli/agents/validator-story-ux.json +0 -104
- package/cli/agents/validator-story-ux.md +0 -152
- package/kanban/client/dist/assets/index-CiD8PS2e.js +0 -306
- package/kanban/client/dist/assets/index-nLh0m82Q.css +0 -1
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import fs from 'fs';
|
|
2
2
|
import path from 'path';
|
|
3
3
|
import { LLMProvider } from './llm-provider.js';
|
|
4
|
+
import { PromptLogger } from './prompt-logger.js';
|
|
4
5
|
import { TokenTracker } from './token-tracker.js';
|
|
5
6
|
import { EpicStoryValidator } from './epic-story-validator.js';
|
|
6
7
|
import { VerificationTracker } from './verification-tracker.js';
|
|
@@ -9,6 +10,8 @@ import { getCeremonyHeader } from './message-constants.js';
|
|
|
9
10
|
import { sendError, sendWarning, sendSuccess, sendInfo, sendOutput, sendIndented, sendSectionHeader, sendCeremonyHeader, sendProgress, sendSubstep } from './messaging-api.js';
|
|
10
11
|
import { outputBuffer } from './output-buffer.js';
|
|
11
12
|
import { loadAgent } from './agent-loader.js';
|
|
13
|
+
import { CONTEXT_GENERATION_TOOLS, dispatchToolCall } from './api-reference-tool.js';
|
|
14
|
+
import { computeCodingOrder } from './coding-order.js';
|
|
12
15
|
|
|
13
16
|
const __filename = fileURLToPath(import.meta.url);
|
|
14
17
|
const __dirname = path.dirname(__filename);
|
|
@@ -46,6 +49,14 @@ class SprintPlanningProcessor {
|
|
|
46
49
|
// Stage provider cache
|
|
47
50
|
this._stageProviders = {};
|
|
48
51
|
|
|
52
|
+
// LLM-generated context.md cache (keyed by epic.name / "epicName::storyName")
|
|
53
|
+
// Populated by generateContextFiles() before validation; consumed by validation loop and writeHierarchyFiles()
|
|
54
|
+
this._epicContextCache = new Map();
|
|
55
|
+
this._storyContextCache = new Map();
|
|
56
|
+
|
|
57
|
+
// Initialize prompt logger (writes per-call prompt/response JSON files)
|
|
58
|
+
this._promptLogger = new PromptLogger(process.cwd(), 'sprint-planning');
|
|
59
|
+
|
|
49
60
|
// Initialize token tracker
|
|
50
61
|
this.tokenTracker = new TokenTracker(this.avcPath);
|
|
51
62
|
this.tokenTracker.init();
|
|
@@ -66,6 +77,18 @@ class SprintPlanningProcessor {
|
|
|
66
77
|
// and waits for it to resolve with { selectedEpicIds, selectedStoryIds }.
|
|
67
78
|
// When null (default), the processor runs straight through without pausing.
|
|
68
79
|
this._selectionCallback = options?.selectionCallback ?? null;
|
|
80
|
+
|
|
81
|
+
// Optional callback fired immediately after Stage 6 writes work.json files to disk.
|
|
82
|
+
// Use this to trigger Kanban board refresh before Stage 7/8 (doc gen + enrichment) complete.
|
|
83
|
+
this._hierarchyWrittenCallback = options?.hierarchyWrittenCallback ?? null;
|
|
84
|
+
|
|
85
|
+
// Optional callback fired each time a single epic or story work.json is written.
|
|
86
|
+
// Allows the Kanban board to display items one-by-one as they are created.
|
|
87
|
+
this._itemWrittenCallback = options?.itemWrittenCallback ?? null;
|
|
88
|
+
|
|
89
|
+
// Optional callback when a validator call fails with quota/rate-limit error.
|
|
90
|
+
// Async: resolves with { newProvider?, newModel? } or null (retry same model).
|
|
91
|
+
this._quotaExceededCallback = options?.quotaExceededCallback ?? null;
|
|
69
92
|
}
|
|
70
93
|
|
|
71
94
|
/**
|
|
@@ -137,6 +160,102 @@ class SprintPlanningProcessor {
|
|
|
137
160
|
this.debug(`${label} TOTALS: ${epics.length} epics, ${totalStories} stories`);
|
|
138
161
|
}
|
|
139
162
|
|
|
163
|
+
/**
|
|
164
|
+
* Truncate document content for local LLMs with limited context windows.
|
|
165
|
+
* For non-local providers, returns the content unchanged.
|
|
166
|
+
*
|
|
167
|
+
* Keeps sections 1-3 in full (Overview, Target Users, Core Features),
|
|
168
|
+
* then only section headers + first paragraph for sections 4-9.
|
|
169
|
+
*
|
|
170
|
+
* @param {string} docContent - The full document content
|
|
171
|
+
* @param {number} maxChars - Maximum character length (default 6000)
|
|
172
|
+
* @returns {string} Truncated or original content
|
|
173
|
+
*/
|
|
174
|
+
_truncateDocForLocalLLM(docContent, maxChars = 6000) {
|
|
175
|
+
if (this._providerName !== 'local') {
|
|
176
|
+
return docContent;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if (!docContent || docContent.length <= maxChars) {
|
|
180
|
+
this.debug('Local LLM truncation: content already within limit', {
|
|
181
|
+
contentLength: docContent?.length || 0,
|
|
182
|
+
maxChars
|
|
183
|
+
});
|
|
184
|
+
return docContent;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
this.debug('Local LLM truncation: truncating document for context window', {
|
|
188
|
+
originalLength: docContent.length,
|
|
189
|
+
maxChars
|
|
190
|
+
});
|
|
191
|
+
|
|
192
|
+
const lines = docContent.split('\n');
|
|
193
|
+
const sections = [];
|
|
194
|
+
let currentSection = { header: null, lines: [] };
|
|
195
|
+
|
|
196
|
+
// Split into sections by ## headers
|
|
197
|
+
for (const line of lines) {
|
|
198
|
+
if (line.startsWith('## ')) {
|
|
199
|
+
if (currentSection.header !== null || currentSection.lines.length > 0) {
|
|
200
|
+
sections.push(currentSection);
|
|
201
|
+
}
|
|
202
|
+
currentSection = { header: line, lines: [] };
|
|
203
|
+
} else {
|
|
204
|
+
currentSection.lines.push(line);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
// Push last section
|
|
208
|
+
if (currentSection.header !== null || currentSection.lines.length > 0) {
|
|
209
|
+
sections.push(currentSection);
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
// Keep first 3 sections (index 0 = preamble, 1-3 = first three ## sections) in full
|
|
213
|
+
// Summarize sections 4+
|
|
214
|
+
const resultParts = [];
|
|
215
|
+
let sectionIndex = 0;
|
|
216
|
+
|
|
217
|
+
for (const section of sections) {
|
|
218
|
+
if (section.header) {
|
|
219
|
+
sectionIndex++;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
if (sectionIndex <= 3) {
|
|
223
|
+
// Keep in full
|
|
224
|
+
if (section.header) resultParts.push(section.header);
|
|
225
|
+
resultParts.push(...section.lines);
|
|
226
|
+
} else {
|
|
227
|
+
// Keep header + first paragraph only
|
|
228
|
+
if (section.header) resultParts.push(section.header);
|
|
229
|
+
let foundContent = false;
|
|
230
|
+
for (const sLine of section.lines) {
|
|
231
|
+
if (sLine.trim() === '') {
|
|
232
|
+
if (foundContent) break; // End of first paragraph
|
|
233
|
+
resultParts.push(sLine);
|
|
234
|
+
} else {
|
|
235
|
+
foundContent = true;
|
|
236
|
+
resultParts.push(sLine);
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
let result = resultParts.join('\n');
|
|
243
|
+
|
|
244
|
+
// Final hard truncation if still over limit
|
|
245
|
+
if (result.length > maxChars) {
|
|
246
|
+
result = result.substring(0, maxChars);
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
result += '\n\n[... remaining sections summarized for context window limits ...]';
|
|
250
|
+
|
|
251
|
+
this.debug('Local LLM truncation complete', {
|
|
252
|
+
originalLength: docContent.length,
|
|
253
|
+
truncatedLength: result.length
|
|
254
|
+
});
|
|
255
|
+
|
|
256
|
+
return result;
|
|
257
|
+
}
|
|
258
|
+
|
|
140
259
|
/**
|
|
141
260
|
* API call logging with timing
|
|
142
261
|
*/
|
|
@@ -222,7 +341,16 @@ class SprintPlanningProcessor {
|
|
|
222
341
|
* @returns {Promise<LLMProvider>} LLM provider instance
|
|
223
342
|
*/
|
|
224
343
|
async getProviderForStageInstance(stageName) {
|
|
225
|
-
|
|
344
|
+
let { provider, model } = this.getProviderForStage(stageName);
|
|
345
|
+
|
|
346
|
+
// Resolve to an available provider if current one has no credentials
|
|
347
|
+
const resolved = await LLMProvider.resolveAvailableProvider(provider, model);
|
|
348
|
+
if (resolved.fellBack) {
|
|
349
|
+
this.debug(`Provider fallback for ${stageName}: ${provider}→${resolved.provider} (${resolved.model})`);
|
|
350
|
+
console.warn(`[WARN] ${provider} has no API key — falling back to ${resolved.provider} for stage "${stageName}"`);
|
|
351
|
+
provider = resolved.provider;
|
|
352
|
+
model = resolved.model;
|
|
353
|
+
}
|
|
226
354
|
|
|
227
355
|
// Check if we already have a provider for this stage
|
|
228
356
|
const cacheKey = `${stageName}:${provider}:${model}`;
|
|
@@ -236,6 +364,7 @@ class SprintPlanningProcessor {
|
|
|
236
364
|
this.debug(`Creating new provider for ${stageName}: ${provider} (${model})`);
|
|
237
365
|
const providerInstance = await LLMProvider.create(provider, model);
|
|
238
366
|
this._registerTokenCallback(providerInstance, `${this.ceremonyName}-${stageName}`);
|
|
367
|
+
providerInstance.setPromptLogger(this._promptLogger, stageName);
|
|
239
368
|
this._stageProviders[cacheKey] = providerInstance;
|
|
240
369
|
|
|
241
370
|
return providerInstance;
|
|
@@ -263,6 +392,26 @@ class SprintPlanningProcessor {
|
|
|
263
392
|
}
|
|
264
393
|
}
|
|
265
394
|
|
|
395
|
+
/**
|
|
396
|
+
* Run async task functions with bounded concurrency.
|
|
397
|
+
* @param {Array<() => Promise>} tasks - Array of functions that return promises
|
|
398
|
+
* @param {number} concurrency - Max parallel tasks
|
|
399
|
+
* @returns {Promise<Array>} Results in original order
|
|
400
|
+
*/
|
|
401
|
+
async _runWithConcurrency(tasks, concurrency) {
|
|
402
|
+
const results = [];
|
|
403
|
+
const executing = new Set();
|
|
404
|
+
for (let i = 0; i < tasks.length; i++) {
|
|
405
|
+
const p = tasks[i]().then(result => { executing.delete(p); return result; });
|
|
406
|
+
executing.add(p);
|
|
407
|
+
results.push(p);
|
|
408
|
+
if (executing.size >= concurrency) {
|
|
409
|
+
await Promise.race(executing);
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
return Promise.all(results);
|
|
413
|
+
}
|
|
414
|
+
|
|
266
415
|
/**
|
|
267
416
|
* Aggregate token usage across all provider instances:
|
|
268
417
|
* - this.llmProvider (Stage 5 validation fallback)
|
|
@@ -311,6 +460,7 @@ class SprintPlanningProcessor {
|
|
|
311
460
|
try {
|
|
312
461
|
this.llmProvider = await LLMProvider.create(this._providerName, this._modelName);
|
|
313
462
|
this._registerTokenCallback(this.llmProvider);
|
|
463
|
+
this.llmProvider.setPromptLogger(this._promptLogger, 'main');
|
|
314
464
|
return this.llmProvider;
|
|
315
465
|
} catch (error) {
|
|
316
466
|
this.debug(`Could not initialize ${this._providerName} provider`);
|
|
@@ -319,7 +469,13 @@ class SprintPlanningProcessor {
|
|
|
319
469
|
}
|
|
320
470
|
}
|
|
321
471
|
|
|
322
|
-
async retryWithBackoff(fn, operation, maxRetries = 3) {
|
|
472
|
+
async retryWithBackoff(fn, operation, maxRetries = 3, options = {}) {
|
|
473
|
+
const {
|
|
474
|
+
baseDelay = this._providerName === 'local' ? 500 : 1000,
|
|
475
|
+
multiplier = this._providerName === 'local' ? 1.5 : 2,
|
|
476
|
+
maxDelay = this._providerName === 'local' ? 5000 : 30000,
|
|
477
|
+
} = options;
|
|
478
|
+
|
|
323
479
|
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
|
324
480
|
try {
|
|
325
481
|
return await fn();
|
|
@@ -327,13 +483,19 @@ class SprintPlanningProcessor {
|
|
|
327
483
|
const isLastAttempt = attempt === maxRetries;
|
|
328
484
|
const isRetriable = error.message?.includes('rate limit') ||
|
|
329
485
|
error.message?.includes('timeout') ||
|
|
330
|
-
error.message?.includes('503')
|
|
486
|
+
error.message?.includes('503') ||
|
|
487
|
+
error.message?.toLowerCase().includes('connection error') ||
|
|
488
|
+
error.message?.toLowerCase().includes('econnreset') ||
|
|
489
|
+
error.message?.toLowerCase().includes('network error') ||
|
|
490
|
+
error.message?.toLowerCase().includes('terminated') ||
|
|
491
|
+
error.message?.toLowerCase().includes('econnrefused') ||
|
|
492
|
+
error.message?.toLowerCase().includes('socket hang up');
|
|
331
493
|
|
|
332
494
|
if (isLastAttempt || !isRetriable) {
|
|
333
495
|
throw error;
|
|
334
496
|
}
|
|
335
497
|
|
|
336
|
-
const delay = Math.pow(
|
|
498
|
+
const delay = Math.min(Math.pow(multiplier, attempt) * baseDelay, maxDelay);
|
|
337
499
|
this.debug(`Retry ${attempt}/${maxRetries} in ${delay/1000}s: ${operation}`);
|
|
338
500
|
this.debug(`Error: ${error.message}`);
|
|
339
501
|
await new Promise(resolve => setTimeout(resolve, delay));
|
|
@@ -486,96 +648,14 @@ class SprintPlanningProcessor {
|
|
|
486
648
|
const docContent = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
487
649
|
this.debug(`Doc content loaded (${docContent.length} chars)`);
|
|
488
650
|
|
|
489
|
-
|
|
490
|
-
this.
|
|
491
|
-
this.debug('doc.md full content:\n' + docContent);
|
|
492
|
-
|
|
493
|
-
// Try to extract scope from known section headers
|
|
494
|
-
const scopeFromSection = this.tryExtractScopeFromSections(docContent);
|
|
495
|
-
|
|
496
|
-
if (scopeFromSection) {
|
|
497
|
-
this.debug(`✓ Scope extracted from section (${scopeFromSection.length} chars)`);
|
|
498
|
-
this.debugSection('SCOPE TEXT SENT TO LLM (extracted from doc section)');
|
|
499
|
-
this.debug('Full scope text:\n' + scopeFromSection);
|
|
500
|
-
return scopeFromSection;
|
|
501
|
-
}
|
|
502
|
-
|
|
503
|
-
// Fallback: Use entire doc.md
|
|
504
|
-
this.debug('⚠️ No standard scope section found');
|
|
505
|
-
this.debug('Using entire doc.md content as scope source');
|
|
506
|
-
|
|
507
|
-
sendWarning('No standard scope section found in doc.md');
|
|
508
|
-
sendIndented('Using entire documentation for feature extraction.', 1);
|
|
509
|
-
sendIndented('For better results and lower token usage, consider adding one of:', 1);
|
|
510
|
-
sendIndented('- "## Initial Scope"', 1);
|
|
511
|
-
sendIndented('- "## Scope"', 1);
|
|
512
|
-
sendIndented('- "## Features"', 1);
|
|
651
|
+
this.debugSection('SCOPE TEXT SENT TO LLM (full doc.md)');
|
|
652
|
+
this.debug(`Full doc content (${docContent.length} chars):\n` + docContent);
|
|
513
653
|
|
|
514
|
-
this.debugSection('SCOPE TEXT SENT TO LLM (full doc.md - no scope section found)');
|
|
515
|
-
this.debug(`Using full doc content (${docContent.length} chars) as scope`);
|
|
516
654
|
return docContent;
|
|
517
655
|
}
|
|
518
656
|
|
|
519
|
-
/**
|
|
520
|
-
* Try to extract scope from known section headers
|
|
521
|
-
* Returns null if no section found
|
|
522
|
-
*/
|
|
523
|
-
tryExtractScopeFromSections(docContent) {
|
|
524
|
-
// Section headers to try (in priority order)
|
|
525
|
-
const sectionHeaders = [
|
|
526
|
-
'Initial Scope', // Official AVC convention
|
|
527
|
-
'Scope', // Common variation
|
|
528
|
-
'Project Scope', // Formal variation
|
|
529
|
-
'Features', // Common alternative
|
|
530
|
-
'Core Features', // Detailed variation
|
|
531
|
-
'Requirements', // Specification style
|
|
532
|
-
'Functional Requirements', // Formal specification
|
|
533
|
-
'User Stories', // Agile style
|
|
534
|
-
'Feature List', // Simple list style
|
|
535
|
-
'Objectives', // Goal-oriented style
|
|
536
|
-
'Goals', // Simple goal style
|
|
537
|
-
'Deliverables', // Project management style
|
|
538
|
-
'Product Features', // Product-focused
|
|
539
|
-
'System Requirements' // Technical specification
|
|
540
|
-
];
|
|
541
|
-
|
|
542
|
-
this.debug(`Attempting to extract scope from known sections...`);
|
|
543
|
-
this.debug(`Trying ${sectionHeaders.length} section name variations`);
|
|
544
|
-
|
|
545
|
-
// Try each section header
|
|
546
|
-
for (const header of sectionHeaders) {
|
|
547
|
-
// Build regex (case-insensitive). Allow optional numeric prefix so
|
|
548
|
-
// "## 3. Initial Scope" matches when searching for "Initial Scope".
|
|
549
|
-
const regex = new RegExp(
|
|
550
|
-
`##\\s+(?:\\d+\\.\\s+)?${this.escapeRegex(header)}\\s+([\\s\\S]+?)(?=\\n#{1,2}[^#]|$)`,
|
|
551
|
-
'i'
|
|
552
|
-
);
|
|
553
|
-
|
|
554
|
-
const match = docContent.match(regex);
|
|
555
|
-
|
|
556
|
-
if (match && match[1].trim().length > 0) {
|
|
557
|
-
const scope = match[1].trim();
|
|
558
|
-
this.debug(`✓ Found scope in section: "## ${header}"`);
|
|
559
|
-
this.debug(`Extracted ${scope.length} chars`);
|
|
560
|
-
return scope;
|
|
561
|
-
}
|
|
562
|
-
|
|
563
|
-
this.debug(`✗ Section "## ${header}" not found or empty`);
|
|
564
|
-
}
|
|
565
|
-
|
|
566
|
-
this.debug('✗ No known scope section found');
|
|
567
|
-
return null;
|
|
568
|
-
}
|
|
569
|
-
|
|
570
|
-
/**
|
|
571
|
-
* Escape special regex characters in section names
|
|
572
|
-
*/
|
|
573
|
-
escapeRegex(str) {
|
|
574
|
-
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
|
575
|
-
}
|
|
576
|
-
|
|
577
657
|
// STAGE 4: Decompose into Epics + Stories
|
|
578
|
-
async decomposeIntoEpicsStories(scope, existingEpics, existingStories, progressCallback = null) {
|
|
658
|
+
async decomposeIntoEpicsStories(scope, existingEpics, existingStories, progressCallback = null, previousIssues = null) {
|
|
579
659
|
this.debugStage(4, 'Decompose into Epics + Stories');
|
|
580
660
|
|
|
581
661
|
this.debug('Stage 1/3: Decomposing scope into Epics and Stories');
|
|
@@ -616,7 +696,7 @@ ${existingStoryNames.map(name => `- ${name}`).join('\n')}
|
|
|
616
696
|
`;
|
|
617
697
|
}
|
|
618
698
|
|
|
619
|
-
prompt += `\nDecompose this project into NEW Epics (
|
|
699
|
+
prompt += `\nDecompose this project into NEW Epics (domain-based groupings) and Stories (user-facing capabilities per Epic) — create as many as needed to fully cover the scope.
|
|
620
700
|
|
|
621
701
|
IMPORTANT: Only generate NEW Epics and Stories. Skip any that match the existing ones.
|
|
622
702
|
|
|
@@ -635,6 +715,11 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
635
715
|
await progressCallback?.(null, `Calling LLM to decompose scope${existingNote}…`, {});
|
|
636
716
|
await progressCallback?.(null, null, { detail: `Sending to ${providerName} (${modelName})…` });
|
|
637
717
|
|
|
718
|
+
// Inject quality issues from previous attempt for retry
|
|
719
|
+
if (previousIssues?.length > 0) {
|
|
720
|
+
prompt += `\n\n**CRITICAL: Your previous decomposition had these quality issues. FIX ALL of them:**\n\n${previousIssues.map((issue, i) => `${i + 1}. ${issue}`).join('\n')}\n\n**Specifically:**\n- Every epic MUST have at least 1 story (most should have 2-5)\n- All epic IDs must match format: context-XXXX (e.g., context-0001)\n- All story IDs must match format: context-XXXX-XXXX (e.g., context-0001-0001)\n- Every story must have 3-8 acceptance criteria\n`;
|
|
721
|
+
}
|
|
722
|
+
|
|
638
723
|
// Log full decomposition prompt for duplicate detection analysis
|
|
639
724
|
this.debug('\n' + '='.repeat(80));
|
|
640
725
|
this.debug('FULL DECOMPOSITION PROMPT:');
|
|
@@ -643,7 +728,7 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
643
728
|
this.debug('='.repeat(80) + '\n');
|
|
644
729
|
|
|
645
730
|
// LLM call with full request/response logging
|
|
646
|
-
|
|
731
|
+
let hierarchy = await this.debugApiCall(
|
|
647
732
|
'Epic/Story Decomposition',
|
|
648
733
|
async () => {
|
|
649
734
|
this.debug('Request payload', {
|
|
@@ -698,6 +783,9 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
698
783
|
}
|
|
699
784
|
);
|
|
700
785
|
|
|
786
|
+
// NOTE: Deduplication moved to Stage 4.1 (deduplicateEpicsLLM) in process() flow.
|
|
787
|
+
// The inline algorithmic dedup here was insufficient for semantic duplicates.
|
|
788
|
+
|
|
701
789
|
if (!hierarchy.epics || !Array.isArray(hierarchy.epics)) {
|
|
702
790
|
this.debug('✗ Invalid decomposition response: missing epics array');
|
|
703
791
|
throw new Error('Invalid decomposition response: missing epics array');
|
|
@@ -722,44 +810,1769 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
722
810
|
return hierarchy;
|
|
723
811
|
}
|
|
724
812
|
|
|
725
|
-
/**
|
|
726
|
-
*
|
|
727
|
-
*
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
813
|
+
/**
|
|
814
|
+
* Check decomposition quality — returns array of issues to fix on retry.
|
|
815
|
+
* Empty array = quality is acceptable.
|
|
816
|
+
*/
|
|
817
|
+
_checkDecompositionQuality(hierarchy) {
|
|
818
|
+
const issues = [];
|
|
819
|
+
|
|
820
|
+
if (!hierarchy?.epics?.length) {
|
|
821
|
+
issues.push('No epics generated — the decomposition returned an empty hierarchy.');
|
|
822
|
+
return issues;
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
// Check for epics with 0 stories
|
|
826
|
+
for (const epic of hierarchy.epics) {
|
|
827
|
+
if (!epic.stories || epic.stories.length === 0) {
|
|
828
|
+
issues.push(`Epic "${epic.name}" (${epic.id}) has 0 stories — every epic MUST have at least 1 story. Decompose this epic's features into user-facing stories.`);
|
|
829
|
+
}
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
// Check for invalid story ID formats (must be context-XXXX-XXXX)
|
|
833
|
+
for (const epic of hierarchy.epics) {
|
|
834
|
+
for (const story of epic.stories || []) {
|
|
835
|
+
if (!story.id || !/^context-\d{4}-\d{4}[a-z]?$/.test(story.id)) {
|
|
836
|
+
issues.push(`Story "${story.name}" has invalid ID "${story.id}" — must match format context-XXXX-XXXX (e.g., context-0001-0001).`);
|
|
837
|
+
}
|
|
838
|
+
}
|
|
839
|
+
}
|
|
840
|
+
|
|
841
|
+
// Check for invalid epic ID formats
|
|
842
|
+
for (const epic of hierarchy.epics) {
|
|
843
|
+
if (!epic.id || !/^context-\d{4}$/.test(epic.id)) {
|
|
844
|
+
issues.push(`Epic "${epic.name}" has invalid ID "${epic.id}" — must match format context-XXXX (e.g., context-0001).`);
|
|
845
|
+
}
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
// Check total story count (a calculator should have at least 5 stories)
|
|
849
|
+
const totalStories = hierarchy.epics.reduce((sum, e) => sum + (e.stories?.length || 0), 0);
|
|
850
|
+
if (totalStories < hierarchy.epics.length) {
|
|
851
|
+
issues.push(`Only ${totalStories} total stories for ${hierarchy.epics.length} epics — generate more stories to cover the full scope.`);
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
// Check stories have acceptance criteria
|
|
855
|
+
for (const epic of hierarchy.epics) {
|
|
856
|
+
for (const story of epic.stories || []) {
|
|
857
|
+
if (!story.acceptance || story.acceptance.length === 0) {
|
|
858
|
+
issues.push(`Story "${story.name}" has no acceptance criteria — each story must have 3-8 testable ACs.`);
|
|
859
|
+
}
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
return issues;
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
/**
|
|
867
|
+
* Merge source epic data into target epic (stories, features, dependencies).
|
|
868
|
+
* Shared by both LLM-based and algorithmic dedup paths.
|
|
869
|
+
*/
|
|
870
|
+
_mergeEpicData(target, source) {
|
|
871
|
+
this.debug(`Merging duplicate epic "${source.name}" into "${target.name}"`);
|
|
872
|
+
|
|
873
|
+
// Merge stories, avoiding duplicates by name
|
|
874
|
+
const existingStoryNames = new Set((target.stories || []).map(s => s.name?.toLowerCase()));
|
|
875
|
+
for (const story of (source.stories || [])) {
|
|
876
|
+
if (!existingStoryNames.has(story.name?.toLowerCase())) {
|
|
877
|
+
target.stories = target.stories || [];
|
|
878
|
+
target.stories.push(story);
|
|
879
|
+
existingStoryNames.add(story.name?.toLowerCase());
|
|
880
|
+
}
|
|
881
|
+
}
|
|
882
|
+
|
|
883
|
+
// Merge features (deduplicate)
|
|
884
|
+
if (source.features?.length) {
|
|
885
|
+
const existingFeatures = new Set((target.features || []).map(f => f.toLowerCase()));
|
|
886
|
+
target.features = target.features || [];
|
|
887
|
+
for (const feature of source.features) {
|
|
888
|
+
if (!existingFeatures.has(feature.toLowerCase())) {
|
|
889
|
+
target.features.push(feature);
|
|
890
|
+
existingFeatures.add(feature.toLowerCase());
|
|
891
|
+
}
|
|
892
|
+
}
|
|
893
|
+
}
|
|
894
|
+
|
|
895
|
+
// Merge dependencies (deduplicate)
|
|
896
|
+
if (source.dependencies?.length) {
|
|
897
|
+
const existingDeps = new Set((target.dependencies || []).map(d => d.toLowerCase()));
|
|
898
|
+
target.dependencies = target.dependencies || [];
|
|
899
|
+
for (const dep of source.dependencies) {
|
|
900
|
+
if (!existingDeps.has(dep.toLowerCase())) {
|
|
901
|
+
target.dependencies.push(dep);
|
|
902
|
+
existingDeps.add(dep.toLowerCase());
|
|
903
|
+
}
|
|
904
|
+
}
|
|
905
|
+
}
|
|
906
|
+
}
|
|
907
|
+
|
|
908
|
+
/**
|
|
909
|
+
* Algorithmic fallback: deduplicate epics by Jaccard similarity.
|
|
910
|
+
* Used when LLM-based dedup fails or as a post-LLM cap enforcer.
|
|
911
|
+
*/
|
|
912
|
+
_deduplicateEpicsAlgorithmic(hierarchy) {
|
|
913
|
+
if (!hierarchy.epics || hierarchy.epics.length <= 1) return hierarchy;
|
|
914
|
+
|
|
915
|
+
// Stop words that inflate Jaccard without carrying domain meaning
|
|
916
|
+
const STOP_WORDS = new Set(['and', 'the', 'for', 'with', 'from', 'into', 'via', 'based', 'system', 'engine', 'management', 'handling', 'service', 'services', 'module', 'platform']);
|
|
917
|
+
|
|
918
|
+
const tokenize = (text) => {
|
|
919
|
+
return new Set(
|
|
920
|
+
text.toLowerCase().replace(/[^a-z0-9\s]/g, '').split(/\s+/)
|
|
921
|
+
.filter(w => w.length > 2 && !STOP_WORDS.has(w))
|
|
922
|
+
);
|
|
923
|
+
};
|
|
924
|
+
|
|
925
|
+
const jaccardSimilarity = (setA, setB) => {
|
|
926
|
+
const intersection = [...setA].filter(w => setB.has(w)).length;
|
|
927
|
+
const union = new Set([...setA, ...setB]).size;
|
|
928
|
+
return union === 0 ? 0 : intersection / union;
|
|
929
|
+
};
|
|
930
|
+
|
|
931
|
+
// Composite similarity: name (40%) + domain (30%) + features (30%)
|
|
932
|
+
const epicSimilarity = (a, b) => {
|
|
933
|
+
const nameSim = jaccardSimilarity(tokenize(a.name || ''), tokenize(b.name || ''));
|
|
934
|
+
|
|
935
|
+
// Domain: exact match = 1.0, Jaccard on tokens otherwise
|
|
936
|
+
const domA = (a.domain || '').toLowerCase();
|
|
937
|
+
const domB = (b.domain || '').toLowerCase();
|
|
938
|
+
const domainSim = domA === domB && domA ? 1.0 : jaccardSimilarity(tokenize(domA), tokenize(domB));
|
|
939
|
+
|
|
940
|
+
// Features: extract the feature-name prefix (before parentheses), tokenize all, Jaccard
|
|
941
|
+
const extractFeatureWords = (features) => {
|
|
942
|
+
const words = new Set();
|
|
943
|
+
for (const f of (features || [])) {
|
|
944
|
+
const prefix = f.split('(')[0].trim();
|
|
945
|
+
for (const w of prefix.toLowerCase().replace(/[^a-z0-9\s]/g, ' ').split(/\s+/)) {
|
|
946
|
+
if (w.length > 2 && !STOP_WORDS.has(w)) words.add(w);
|
|
947
|
+
}
|
|
948
|
+
}
|
|
949
|
+
return words;
|
|
950
|
+
};
|
|
951
|
+
const featSim = jaccardSimilarity(extractFeatureWords(a.features), extractFeatureWords(b.features));
|
|
952
|
+
|
|
953
|
+
const combined = nameSim * 0.4 + domainSim * 0.3 + featSim * 0.3;
|
|
954
|
+
return { combined, nameSim, domainSim, featSim };
|
|
955
|
+
};
|
|
956
|
+
|
|
957
|
+
const originalCount = hierarchy.epics.length;
|
|
958
|
+
|
|
959
|
+
// Pass 1: Merge epics that are duplicates or semantically overlapping.
|
|
960
|
+
let merged = true;
|
|
961
|
+
while (merged) {
|
|
962
|
+
merged = false;
|
|
963
|
+
for (let i = 0; i < hierarchy.epics.length && !merged; i++) {
|
|
964
|
+
for (let j = i + 1; j < hierarchy.epics.length && !merged; j++) {
|
|
965
|
+
const sim = epicSimilarity(hierarchy.epics[i], hierarchy.epics[j]);
|
|
966
|
+
const shouldMerge = sim.combined > 0.5
|
|
967
|
+
|| sim.nameSim >= 0.5
|
|
968
|
+
|| (sim.nameSim >= 0.35 && sim.featSim >= 0.4);
|
|
969
|
+
if (shouldMerge) {
|
|
970
|
+
this.debug(`Epic similarity ${sim.combined.toFixed(2)} between "${hierarchy.epics[i].name}" and "${hierarchy.epics[j].name}" (name=${sim.nameSim.toFixed(2)}, domain=${sim.domainSim.toFixed(2)}, features=${sim.featSim.toFixed(2)})`);
|
|
971
|
+
this._mergeEpicData(hierarchy.epics[i], hierarchy.epics[j]);
|
|
972
|
+
hierarchy.epics.splice(j, 1);
|
|
973
|
+
merged = true;
|
|
974
|
+
}
|
|
975
|
+
}
|
|
976
|
+
}
|
|
977
|
+
}
|
|
978
|
+
|
|
979
|
+
if (hierarchy.epics.length < originalCount) {
|
|
980
|
+
this.debug(`Epic deduplication: ${originalCount} → ${hierarchy.epics.length} epics`);
|
|
981
|
+
} else {
|
|
982
|
+
this.debug('Epic deduplication: no duplicates found');
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
return hierarchy;
|
|
986
|
+
}
|
|
987
|
+
|
|
988
|
+
/**
|
|
989
|
+
* LLM-based semantic duplicate detection for epics and stories.
|
|
990
|
+
* Falls back to _deduplicateEpicsAlgorithmic on any error.
|
|
991
|
+
*/
|
|
992
|
+
async deduplicateEpicsLLM(hierarchy, preRunSnapshot, progressCallback) {
|
|
993
|
+
this.debugStage(4.1, 'LLM-Based Duplicate Detection');
|
|
994
|
+
|
|
995
|
+
if (!hierarchy.epics || hierarchy.epics.length <= 1) {
|
|
996
|
+
this.debug('Skipping dedup: ≤1 epic');
|
|
997
|
+
return hierarchy;
|
|
998
|
+
}
|
|
999
|
+
|
|
1000
|
+
try {
|
|
1001
|
+
const provider = await this.getProviderForStageInstance('decomposition');
|
|
1002
|
+
const { provider: providerName, model: modelName } = this.getProviderForStage('decomposition');
|
|
1003
|
+
|
|
1004
|
+
const agentInstructions = loadAgent('duplicate-detector.md');
|
|
1005
|
+
this.debug('Duplicate detector agent loaded', { bytes: agentInstructions.length });
|
|
1006
|
+
|
|
1007
|
+
// Build compact summaries of new epics
|
|
1008
|
+
const newEpicSummaries = hierarchy.epics.map((e, i) => ({
|
|
1009
|
+
index: i,
|
|
1010
|
+
name: e.name,
|
|
1011
|
+
domain: e.domain || '',
|
|
1012
|
+
description: (e.description || '').substring(0, 300),
|
|
1013
|
+
features: (e.features || []).slice(0, 10),
|
|
1014
|
+
storyNames: (e.stories || []).map(s => s.name)
|
|
1015
|
+
}));
|
|
1016
|
+
|
|
1017
|
+
// Build existing epics context from preRunSnapshot
|
|
1018
|
+
const existingEpicSummaries = (preRunSnapshot || []).map(e => ({
|
|
1019
|
+
name: e.name,
|
|
1020
|
+
domain: e.domain || '',
|
|
1021
|
+
description: (e.description || '').substring(0, 300),
|
|
1022
|
+
storyNames: (e.stories || []).map(s => s.name)
|
|
1023
|
+
}));
|
|
1024
|
+
|
|
1025
|
+
const prompt = `Analyze the following newly generated epics for duplicates and overlaps.
|
|
1026
|
+
|
|
1027
|
+
**New Epics (just generated):**
|
|
1028
|
+
${JSON.stringify(newEpicSummaries, null, 2)}
|
|
1029
|
+
|
|
1030
|
+
${existingEpicSummaries.length > 0 ? `**Existing Epics (already on disk from prior runs):**
|
|
1031
|
+
${JSON.stringify(existingEpicSummaries, null, 2)}` : '**No existing epics on disk.**'}
|
|
1032
|
+
|
|
1033
|
+
Detect:
|
|
1034
|
+
1. New epics that should be merged together (epicMergeGroups)
|
|
1035
|
+
2. Stories within the same epic that should be merged (storyMergeGroups)
|
|
1036
|
+
3. New epics that duplicate existing on-disk epics (existingOverlaps)
|
|
1037
|
+
|
|
1038
|
+
Return JSON following the exact structure in your instructions.`;
|
|
1039
|
+
|
|
1040
|
+
this.debug('Duplicate detection prompt built', {
|
|
1041
|
+
newEpics: newEpicSummaries.length,
|
|
1042
|
+
existingEpics: existingEpicSummaries.length,
|
|
1043
|
+
promptLength: prompt.length
|
|
1044
|
+
});
|
|
1045
|
+
|
|
1046
|
+
await progressCallback?.(null, `Detecting duplicates via ${providerName} (${modelName})…`, {});
|
|
1047
|
+
|
|
1048
|
+
const result = await this.debugApiCall(
|
|
1049
|
+
'Duplicate Detection',
|
|
1050
|
+
async () => {
|
|
1051
|
+
return await this._withProgressHeartbeat(
|
|
1052
|
+
() => this.retryWithBackoff(
|
|
1053
|
+
() => provider.generateJSON(prompt, agentInstructions),
|
|
1054
|
+
'duplicate-detection'
|
|
1055
|
+
),
|
|
1056
|
+
(elapsed) => `Analyzing duplicates… ${elapsed}s`,
|
|
1057
|
+
progressCallback
|
|
1058
|
+
);
|
|
1059
|
+
}
|
|
1060
|
+
);
|
|
1061
|
+
|
|
1062
|
+
this.debug('LLM duplicate detection result', result);
|
|
1063
|
+
|
|
1064
|
+
// Apply the LLM results
|
|
1065
|
+
hierarchy = this._applyDuplicateResults(hierarchy, result);
|
|
1066
|
+
|
|
1067
|
+
const totalStories = hierarchy.epics.reduce((sum, e) => sum + (e.stories?.length || 0), 0);
|
|
1068
|
+
await progressCallback?.(null, `After dedup: ${hierarchy.epics.length} epics, ${totalStories} stories`, {});
|
|
1069
|
+
|
|
1070
|
+
return hierarchy;
|
|
1071
|
+
} catch (error) {
|
|
1072
|
+
this.debug(`LLM duplicate detection failed, falling back to algorithmic: ${error.message}`, {
|
|
1073
|
+
stack: error.stack
|
|
1074
|
+
});
|
|
1075
|
+
sendWarning(`LLM dedup failed (${error.message}), using algorithmic fallback`);
|
|
1076
|
+
return this._deduplicateEpicsAlgorithmic(hierarchy);
|
|
1077
|
+
}
|
|
1078
|
+
}
|
|
1079
|
+
|
|
1080
|
+
/**
|
|
1081
|
+
* Apply LLM duplicate detection results to the hierarchy.
|
|
1082
|
+
* Processes: existingOverlaps → epicMergeGroups → storyMergeGroups (in that order).
|
|
1083
|
+
* All indices in the LLM response refer to the ORIGINAL array positions.
|
|
1084
|
+
* We maintain an oldOriginal→currentPosition mapping throughout.
|
|
1085
|
+
*/
|
|
1086
|
+
_applyDuplicateResults(hierarchy, result) {
|
|
1087
|
+
if (!result || typeof result !== 'object') {
|
|
1088
|
+
this.debug('Invalid duplicate detection result, skipping');
|
|
1089
|
+
return hierarchy;
|
|
1090
|
+
}
|
|
1091
|
+
|
|
1092
|
+
const existingOverlaps = result.existingOverlaps || [];
|
|
1093
|
+
const epicMergeGroups = result.epicMergeGroups || [];
|
|
1094
|
+
const storyMergeGroups = result.storyMergeGroups || [];
|
|
1095
|
+
|
|
1096
|
+
const originalCount = hierarchy.epics.length;
|
|
1097
|
+
|
|
1098
|
+
// Master mapping: original index → current index (or -1 if removed)
|
|
1099
|
+
// Starts as identity mapping, updated after each removal/splice.
|
|
1100
|
+
const originalToCurrent = new Map();
|
|
1101
|
+
for (let i = 0; i < originalCount; i++) {
|
|
1102
|
+
originalToCurrent.set(i, i);
|
|
1103
|
+
}
|
|
1104
|
+
|
|
1105
|
+
// Helper: after splicing index `splicedIdx` from the array,
|
|
1106
|
+
// decrement all current-index values that are > splicedIdx.
|
|
1107
|
+
const adjustAfterSplice = (splicedCurrentIdx) => {
|
|
1108
|
+
for (const [origIdx, curIdx] of originalToCurrent) {
|
|
1109
|
+
if (curIdx === splicedCurrentIdx) {
|
|
1110
|
+
originalToCurrent.set(origIdx, -1);
|
|
1111
|
+
} else if (curIdx > splicedCurrentIdx) {
|
|
1112
|
+
originalToCurrent.set(origIdx, curIdx - 1);
|
|
1113
|
+
}
|
|
1114
|
+
}
|
|
1115
|
+
};
|
|
1116
|
+
|
|
1117
|
+
// 1. Remove new epics that duplicate existing on-disk epics (process in reverse current-index order)
|
|
1118
|
+
if (existingOverlaps.length > 0) {
|
|
1119
|
+
// Collect current indices to remove (mapped from original indices)
|
|
1120
|
+
const toRemove = existingOverlaps
|
|
1121
|
+
.filter(o => o.recommendation === 'skip' && typeof o.newEpicIndex === 'number')
|
|
1122
|
+
.map(o => ({ origIdx: o.newEpicIndex, curIdx: originalToCurrent.get(o.newEpicIndex), overlap: o }))
|
|
1123
|
+
.filter(r => r.curIdx != null && r.curIdx >= 0 && r.curIdx < hierarchy.epics.length)
|
|
1124
|
+
.sort((a, b) => b.curIdx - a.curIdx); // reverse for safe splicing
|
|
1125
|
+
|
|
1126
|
+
for (const { origIdx, curIdx, overlap } of toRemove) {
|
|
1127
|
+
this.debug(`Removing epic "${hierarchy.epics[curIdx].name}" — duplicates existing "${overlap.existingEpicName}"`);
|
|
1128
|
+
hierarchy.epics.splice(curIdx, 1);
|
|
1129
|
+
adjustAfterSplice(curIdx);
|
|
1130
|
+
}
|
|
1131
|
+
}
|
|
1132
|
+
|
|
1133
|
+
// 2. Merge epic groups
|
|
1134
|
+
if (epicMergeGroups.length > 0) {
|
|
1135
|
+
for (const group of epicMergeGroups) {
|
|
1136
|
+
const targetCur = originalToCurrent.get(group.targetIndex);
|
|
1137
|
+
if (targetCur == null || targetCur === -1) {
|
|
1138
|
+
this.debug(`Skipping epic merge group: target index ${group.targetIndex} was removed or invalid`);
|
|
1139
|
+
continue;
|
|
1140
|
+
}
|
|
1141
|
+
|
|
1142
|
+
const sourceCurIndices = (group.sourceIndices || [])
|
|
1143
|
+
.map(idx => originalToCurrent.get(idx))
|
|
1144
|
+
.filter(idx => idx != null && idx !== -1 && idx >= 0 && idx < hierarchy.epics.length && idx !== targetCur);
|
|
1145
|
+
|
|
1146
|
+
if (sourceCurIndices.length === 0) {
|
|
1147
|
+
this.debug(`Skipping epic merge group: no valid source indices`);
|
|
1148
|
+
continue;
|
|
1149
|
+
}
|
|
1150
|
+
|
|
1151
|
+
if (targetCur < 0 || targetCur >= hierarchy.epics.length) {
|
|
1152
|
+
this.debug(`Skipping epic merge group: remapped target ${targetCur} out of bounds`);
|
|
1153
|
+
continue;
|
|
1154
|
+
}
|
|
1155
|
+
|
|
1156
|
+
// Merge sources into target
|
|
1157
|
+
for (const srcIdx of sourceCurIndices) {
|
|
1158
|
+
this._mergeEpicData(hierarchy.epics[targetCur], hierarchy.epics[srcIdx]);
|
|
1159
|
+
}
|
|
1160
|
+
|
|
1161
|
+
if (group.mergedName) {
|
|
1162
|
+
hierarchy.epics[targetCur].name = group.mergedName;
|
|
1163
|
+
}
|
|
1164
|
+
|
|
1165
|
+
this.debug(`Merged epics: ${sourceCurIndices.map(i => hierarchy.epics[i]?.name).join(', ')} → "${hierarchy.epics[targetCur].name}" (reason: ${group.reason})`);
|
|
1166
|
+
|
|
1167
|
+
// Remove sources in reverse order, adjusting mapping after each
|
|
1168
|
+
for (const srcIdx of sourceCurIndices.sort((a, b) => b - a)) {
|
|
1169
|
+
hierarchy.epics.splice(srcIdx, 1);
|
|
1170
|
+
adjustAfterSplice(srcIdx);
|
|
1171
|
+
}
|
|
1172
|
+
}
|
|
1173
|
+
}
|
|
1174
|
+
|
|
1175
|
+
// 3. Merge story groups within epics
|
|
1176
|
+
// epicIndex from LLM refers to ORIGINAL positions — remap via originalToCurrent.
|
|
1177
|
+
if (storyMergeGroups.length > 0) {
|
|
1178
|
+
for (const group of storyMergeGroups) {
|
|
1179
|
+
const epicIdx = originalToCurrent.get(group.epicIndex);
|
|
1180
|
+
if (epicIdx == null || epicIdx === -1 || epicIdx < 0 || epicIdx >= hierarchy.epics.length) {
|
|
1181
|
+
this.debug(`Skipping story merge group: epic index ${group.epicIndex} (remapped: ${epicIdx}) out of bounds or removed`);
|
|
1182
|
+
continue;
|
|
1183
|
+
}
|
|
1184
|
+
|
|
1185
|
+
const epic = hierarchy.epics[epicIdx];
|
|
1186
|
+
const stories = epic.stories || [];
|
|
1187
|
+
const targetIdx = group.targetStoryIndex;
|
|
1188
|
+
|
|
1189
|
+
if (targetIdx < 0 || targetIdx >= stories.length) {
|
|
1190
|
+
this.debug(`Skipping story merge group: target story index ${targetIdx} out of bounds in epic "${epic.name}"`);
|
|
1191
|
+
continue;
|
|
1192
|
+
}
|
|
1193
|
+
|
|
1194
|
+
let sourceIndices = (group.sourceStoryIndices || [])
|
|
1195
|
+
.filter(idx => idx >= 0 && idx < stories.length && idx !== targetIdx);
|
|
1196
|
+
|
|
1197
|
+
if (sourceIndices.length === 0) continue;
|
|
1198
|
+
|
|
1199
|
+
// Guard: reject merge when both stories have ≥3 acceptance criteria (both are well-scoped)
|
|
1200
|
+
const targetAcCount = (stories[targetIdx].acceptance || []).length;
|
|
1201
|
+
const sourceAcCounts = sourceIndices.map(idx => (stories[idx]?.acceptance || []).length);
|
|
1202
|
+
if (targetAcCount >= 3 && sourceAcCounts.some(c => c >= 3)) {
|
|
1203
|
+
this.debug(`Rejecting story merge in "${epic.name}": target "${stories[targetIdx].name}" has ${targetAcCount} ACs and source(s) have ${sourceAcCounts.join(',')} ACs — both are well-scoped, merging would create an oversized story`);
|
|
1204
|
+
continue;
|
|
1205
|
+
}
|
|
1206
|
+
|
|
1207
|
+
// Cap story merges: merge at most 1 source to prevent over-merging
|
|
1208
|
+
if (sourceIndices.length > 1) {
|
|
1209
|
+
this.debug(`Story merge group in "${epic.name}" has ${sourceIndices.length} sources — capping to 1 to prevent over-merging`);
|
|
1210
|
+
sourceIndices = [sourceIndices[0]];
|
|
1211
|
+
}
|
|
1212
|
+
|
|
1213
|
+
// Merge story acceptance criteria and dependencies
|
|
1214
|
+
const target = stories[targetIdx];
|
|
1215
|
+
for (const srcIdx of sourceIndices) {
|
|
1216
|
+
const source = stories[srcIdx];
|
|
1217
|
+
|
|
1218
|
+
// Merge acceptance criteria
|
|
1219
|
+
if (source.acceptance?.length) {
|
|
1220
|
+
const existing = new Set((target.acceptance || []).map(a => a.toLowerCase()));
|
|
1221
|
+
target.acceptance = target.acceptance || [];
|
|
1222
|
+
for (const ac of source.acceptance) {
|
|
1223
|
+
if (!existing.has(ac.toLowerCase())) {
|
|
1224
|
+
target.acceptance.push(ac);
|
|
1225
|
+
existing.add(ac.toLowerCase());
|
|
1226
|
+
}
|
|
1227
|
+
}
|
|
1228
|
+
}
|
|
1229
|
+
|
|
1230
|
+
// Merge dependencies
|
|
1231
|
+
if (source.dependencies?.length) {
|
|
1232
|
+
const existing = new Set((target.dependencies || []).map(d => d.toLowerCase()));
|
|
1233
|
+
target.dependencies = target.dependencies || [];
|
|
1234
|
+
for (const dep of source.dependencies) {
|
|
1235
|
+
if (!existing.has(dep.toLowerCase())) {
|
|
1236
|
+
target.dependencies.push(dep);
|
|
1237
|
+
existing.add(dep.toLowerCase());
|
|
1238
|
+
}
|
|
1239
|
+
}
|
|
1240
|
+
}
|
|
1241
|
+
}
|
|
1242
|
+
|
|
1243
|
+
this.debug(`Merged stories in epic "${epic.name}": indices ${sourceIndices.join(',')} → ${targetIdx} (reason: ${group.reason})`);
|
|
1244
|
+
|
|
1245
|
+
// Remap dependency references: any story depending on a merged source now depends on the target
|
|
1246
|
+
const targetId = target.id;
|
|
1247
|
+
for (const srcIdx of sourceIndices) {
|
|
1248
|
+
const sourceId = stories[srcIdx]?.id;
|
|
1249
|
+
if (sourceId && targetId) {
|
|
1250
|
+
for (const e of hierarchy.epics) {
|
|
1251
|
+
for (const s of e.stories || []) {
|
|
1252
|
+
if (Array.isArray(s.dependencies)) {
|
|
1253
|
+
const depIdx = s.dependencies.indexOf(sourceId);
|
|
1254
|
+
if (depIdx !== -1 && s.id !== targetId) {
|
|
1255
|
+
s.dependencies[depIdx] = targetId;
|
|
1256
|
+
this.debug(`Remapped dependency: story "${s.name}" now depends on "${targetId}" (was "${sourceId}")`);
|
|
1257
|
+
}
|
|
1258
|
+
}
|
|
1259
|
+
}
|
|
1260
|
+
}
|
|
1261
|
+
}
|
|
1262
|
+
}
|
|
1263
|
+
|
|
1264
|
+
// Remove source stories in reverse order
|
|
1265
|
+
for (const srcIdx of sourceIndices.sort((a, b) => b - a)) {
|
|
1266
|
+
epic.stories.splice(srcIdx, 1);
|
|
1267
|
+
}
|
|
1268
|
+
}
|
|
1269
|
+
}
|
|
1270
|
+
|
|
1271
|
+
return hierarchy;
|
|
1272
|
+
}
|
|
1273
|
+
|
|
1274
|
+
// STAGE 4.2: Review and split wide stories
|
|
1275
|
+
async reviewAndSplitStories(hierarchy, progressCallback = null) {
|
|
1276
|
+
this.debugStage(4.2, 'Review and Split Wide Stories');
|
|
1277
|
+
|
|
1278
|
+
const provider = await this.getProviderForStageInstance('decomposition');
|
|
1279
|
+
const { provider: providerName, model: modelName } = this.getProviderForStage('decomposition');
|
|
1280
|
+
|
|
1281
|
+
const agentPath = path.join(this.agentsPath, 'story-scope-reviewer.md');
|
|
1282
|
+
const reviewerAgent = fs.readFileSync(agentPath, 'utf8');
|
|
1283
|
+
|
|
1284
|
+
this.debug('Story scope reviewer loaded', { agentBytes: reviewerAgent.length, provider: providerName, model: modelName });
|
|
1285
|
+
await progressCallback?.(null, `Reviewing story scopes for splits (${providerName} / ${modelName})…`, {});
|
|
1286
|
+
|
|
1287
|
+
let totalSplits = 0;
|
|
1288
|
+
const contextSizeFailures = { count: 0 };
|
|
1289
|
+
|
|
1290
|
+
// Limit concurrency for story scope review: local models can't handle unbounded parallelism
|
|
1291
|
+
const reviewConcurrency = providerName === 'local' ? 2 : hierarchy.epics.length;
|
|
1292
|
+
|
|
1293
|
+
// Process epics with bounded concurrency — one LLM call per epic
|
|
1294
|
+
const epicTasks = hierarchy.epics.map((epic) => async () => {
|
|
1295
|
+
// Early bail-out if too many context-size failures
|
|
1296
|
+
if (contextSizeFailures.count >= 3) {
|
|
1297
|
+
this.debug(`Skipping story review for "${epic.name}" — too many context-size failures`);
|
|
1298
|
+
return { epic, stories: epic.stories, splits: [] };
|
|
1299
|
+
}
|
|
1300
|
+
|
|
1301
|
+
const prompt = `## Epic
|
|
1302
|
+
name: ${epic.name}
|
|
1303
|
+
domain: ${epic.domain || 'unknown'}
|
|
1304
|
+
description: ${epic.description || ''}
|
|
1305
|
+
features: ${JSON.stringify(epic.features || [])}
|
|
1306
|
+
|
|
1307
|
+
## Stories
|
|
1308
|
+
${JSON.stringify(epic.stories || [], null, 2)}
|
|
1309
|
+
|
|
1310
|
+
Review the stories above and return the complete final story list for this epic, splitting any stories that are too broad according to your instructions.`;
|
|
1311
|
+
|
|
1312
|
+
this.debug(`Reviewing stories for epic: ${epic.name} (${(epic.stories || []).length} stories)`);
|
|
1313
|
+
|
|
1314
|
+
try {
|
|
1315
|
+
const result = await this._withProgressHeartbeat(
|
|
1316
|
+
() => this.retryWithBackoff(
|
|
1317
|
+
() => provider.generateJSON(prompt, reviewerAgent),
|
|
1318
|
+
`Story scope review for ${epic.name}`
|
|
1319
|
+
),
|
|
1320
|
+
(elapsed) => {
|
|
1321
|
+
if (elapsed < 20) return `Reviewing stories in ${epic.name}…`;
|
|
1322
|
+
if (elapsed < 45) return `Checking scope boundaries for ${epic.name}…`;
|
|
1323
|
+
return `Finalizing splits for ${epic.name}…`;
|
|
1324
|
+
},
|
|
1325
|
+
progressCallback,
|
|
1326
|
+
20000
|
|
1327
|
+
);
|
|
1328
|
+
|
|
1329
|
+
const splits = result.splits || [];
|
|
1330
|
+
const stories = result.stories || epic.stories;
|
|
1331
|
+
|
|
1332
|
+
if (splits.length > 0) {
|
|
1333
|
+
this.debug(`Splits applied in epic "${epic.name}"`, splits.map(s => ({
|
|
1334
|
+
original: s.original,
|
|
1335
|
+
into: s.into,
|
|
1336
|
+
rationale: s.rationale
|
|
1337
|
+
})));
|
|
1338
|
+
for (const split of splits) {
|
|
1339
|
+
await progressCallback?.(null, ` Split: ${split.original} → ${split.into.join(', ')} — ${split.rationale}`, {});
|
|
1340
|
+
}
|
|
1341
|
+
} else {
|
|
1342
|
+
this.debug(`No splits needed for epic "${epic.name}"`);
|
|
1343
|
+
}
|
|
1344
|
+
|
|
1345
|
+
return { epic, stories, splits };
|
|
1346
|
+
} catch (err) {
|
|
1347
|
+
if (err.message?.toLowerCase().includes('context size') || err.message?.includes('exceeded')) {
|
|
1348
|
+
contextSizeFailures.count++;
|
|
1349
|
+
this.debug(`Context-size failure #${contextSizeFailures.count} for epic "${epic.name}"`, { error: err.message });
|
|
1350
|
+
}
|
|
1351
|
+
this.debug(`Story scope review failed for epic "${epic.name}" — keeping original stories`, { error: err.message });
|
|
1352
|
+
return { epic, stories: epic.stories, splits: [] };
|
|
1353
|
+
}
|
|
1354
|
+
});
|
|
1355
|
+
|
|
1356
|
+
// Run with bounded concurrency
|
|
1357
|
+
const results = await this._runWithConcurrency(epicTasks, reviewConcurrency);
|
|
1358
|
+
|
|
1359
|
+
// Apply results back to hierarchy
|
|
1360
|
+
for (const { epic, stories, splits } of results) {
|
|
1361
|
+
epic.stories = stories;
|
|
1362
|
+
totalSplits += splits.length;
|
|
1363
|
+
}
|
|
1364
|
+
|
|
1365
|
+
const totalStories = hierarchy.epics.reduce((s, e) => s + (e.stories?.length || 0), 0);
|
|
1366
|
+
this.debug(`Story scope review complete — ${totalSplits} split(s), ${totalStories} total stories`);
|
|
1367
|
+
await progressCallback?.(null, `Story review complete: ${totalSplits} split(s), ${totalStories} total stories`, {});
|
|
1368
|
+
|
|
1369
|
+
return hierarchy;
|
|
1370
|
+
}
|
|
1371
|
+
|
|
1372
|
+
/**
|
|
1373
|
+
* Filter the decomposed hierarchy to only the epics/stories chosen by the user.
|
|
1374
|
+
* @param {Object} hierarchy - Full decomposed hierarchy
|
|
1375
|
+
* @param {string[]} selectedEpicIds - Epic IDs to keep
|
|
1376
|
+
* @param {string[]} selectedStoryIds - Story IDs to keep
|
|
1377
|
+
* @returns {Object} Filtered hierarchy
|
|
1378
|
+
*/
|
|
1379
|
+
_filterHierarchyBySelection(hierarchy, selectedEpicIds, selectedStoryIds) {
|
|
1380
|
+
const epicIdSet = new Set(selectedEpicIds);
|
|
1381
|
+
const storyIdSet = new Set(selectedStoryIds);
|
|
1382
|
+
const filteredEpics = hierarchy.epics
|
|
1383
|
+
.filter(e => epicIdSet.has(e.id))
|
|
1384
|
+
.map(e => ({
|
|
1385
|
+
...e,
|
|
1386
|
+
stories: (e.stories || []).filter(s => storyIdSet.has(s.id))
|
|
1387
|
+
}));
|
|
1388
|
+
return { ...hierarchy, epics: filteredEpics };
|
|
1389
|
+
}
|
|
1390
|
+
|
|
1391
|
+
/**
|
|
1392
|
+
* Phase 1 of contextual selection: extract structured project characteristics from scope text.
|
|
1393
|
+
* Called once per sprint-planning run when useContextualSelection is enabled.
|
|
1394
|
+
* @param {string} scope - Project scope text (first 3000 chars used)
|
|
1395
|
+
* @param {Function} progressCallback - Optional progress callback
|
|
1396
|
+
* @returns {Promise<Object>} ProjectContext JSON (empty object on failure)
|
|
1397
|
+
*/
|
|
1398
|
+
async extractProjectContext(scope, progressCallback) {
|
|
1399
|
+
this.debug('Extracting project context for contextual agent selection');
|
|
1400
|
+
try {
|
|
1401
|
+
const provider = await this.getProviderForStageInstance('validation');
|
|
1402
|
+
const agent = loadAgent('project-context-extractor.md');
|
|
1403
|
+
// Use full doc.md for tech stack extraction — the "Initial Scope" section alone
|
|
1404
|
+
// omits tech info from Overview (§1), UI/UX Design (§5), and Technical Architecture (§6).
|
|
1405
|
+
let extractionText = scope || '';
|
|
1406
|
+
if (fs.existsSync(this.projectDocPath)) {
|
|
1407
|
+
extractionText = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
1408
|
+
this.debug(`Using full doc.md for context extraction (${extractionText.length} chars)`);
|
|
1409
|
+
}
|
|
1410
|
+
const prompt = `PROJECT SCOPE:\n\n${extractionText.substring(0, 20000)}\n\nScan the entire document above from start to finish for ALL technology mentions before filling each field. Do not stop at the first occurrence. Extract the structured project context as JSON.`;
|
|
1411
|
+
const result = await provider.generateJSON(prompt, agent);
|
|
1412
|
+
return result || {};
|
|
1413
|
+
} catch (err) {
|
|
1414
|
+
this.debug('Project context extraction failed, continuing without context', { error: err.message });
|
|
1415
|
+
return {};
|
|
1416
|
+
}
|
|
1417
|
+
}
|
|
1418
|
+
|
|
1419
|
+
/**
|
|
1420
|
+
* Generate canonical root context.md content from extracted project context + doc.md.
|
|
1421
|
+
* Written once per run to {projectPath}/context.md — no LLM call needed.
|
|
1422
|
+
* @param {Object} projectContext - JSON from project-context-extractor
|
|
1423
|
+
* @param {string} docMdContent - Full root doc.md content
|
|
1424
|
+
* @returns {string}
|
|
1425
|
+
*/
|
|
1426
|
+
generateRootContextMd(projectContext, docMdContent, hierarchy = null) {
|
|
1427
|
+
const ctx = projectContext || {};
|
|
1428
|
+
const stack = (ctx.techStack || []).map(t => `- ${t}`).join('\n') || '- (not detected)';
|
|
1429
|
+
|
|
1430
|
+
// Purpose comes from the LLM-extracted field — no regex needed.
|
|
1431
|
+
const purposeText = (ctx.purpose || '').trim();
|
|
1432
|
+
|
|
1433
|
+
// Detect auth mechanism from doc.md to enforce consistency across all contexts
|
|
1434
|
+
const docLower = (docMdContent || '').toLowerCase();
|
|
1435
|
+
let authMechanism = 'session-based (httpOnly cookies)';
|
|
1436
|
+
// Check for "no auth" signals first
|
|
1437
|
+
const noAuthPatterns = /no\s+auth|no\s+login|no\s+account|no\s+user\s+account|publicly\s+accessible|no\s+backend|no\s+server|single.file|static\s+site|client.side\s+only|no\s+authentication/i;
|
|
1438
|
+
if (noAuthPatterns.test(docMdContent)) {
|
|
1439
|
+
authMechanism = 'none (public, no authentication required)';
|
|
1440
|
+
} else if (/jwt\s+token|bearer\s+token|authorization\s+header/i.test(docMdContent) && !/session.based|httponl/i.test(docMdContent)) {
|
|
1441
|
+
authMechanism = 'JWT bearer tokens';
|
|
1442
|
+
}
|
|
1443
|
+
|
|
1444
|
+
const lines = [
|
|
1445
|
+
'# Project Context',
|
|
1446
|
+
'',
|
|
1447
|
+
'## Identity',
|
|
1448
|
+
`- type: ${ctx.projectType || 'web-application'}`,
|
|
1449
|
+
`- deployment: ${ctx.deploymentType || 'local'}`,
|
|
1450
|
+
`- team: ${ctx.teamContext || 'small'}`,
|
|
1451
|
+
'',
|
|
1452
|
+
'## Purpose',
|
|
1453
|
+
purposeText || '(see root doc.md)',
|
|
1454
|
+
'',
|
|
1455
|
+
'## Tech Stack',
|
|
1456
|
+
stack,
|
|
1457
|
+
'',
|
|
1458
|
+
'## Authentication',
|
|
1459
|
+
`- mechanism: ${authMechanism}`,
|
|
1460
|
+
...(authMechanism.startsWith('none')
|
|
1461
|
+
? ['- NOTE: This project has no authentication. Do NOT add auth mechanisms (sessions, JWT, login) to any epic or story.']
|
|
1462
|
+
: ['- IMPORTANT: All epics and stories MUST use this auth mechanism consistently. Do NOT mix session cookies and JWT tokens.']),
|
|
1463
|
+
'',
|
|
1464
|
+
'## Project Characteristics',
|
|
1465
|
+
`- hasCloud: ${ctx.hasCloud ?? false}`,
|
|
1466
|
+
`- hasCI_CD: ${ctx.hasCI_CD ?? false}`,
|
|
1467
|
+
`- hasMobileApp: ${ctx.hasMobileApp ?? false}`,
|
|
1468
|
+
`- hasFrontend: ${ctx.hasFrontend ?? true}`,
|
|
1469
|
+
`- hasPublicAPI: ${ctx.hasPublicAPI ?? false}`,
|
|
1470
|
+
];
|
|
1471
|
+
|
|
1472
|
+
// Add epic map if hierarchy is available
|
|
1473
|
+
if (hierarchy?.epics?.length > 0) {
|
|
1474
|
+
lines.push('', '## Epic Map');
|
|
1475
|
+
for (const epic of hierarchy.epics) {
|
|
1476
|
+
const storyCount = (epic.stories || []).length;
|
|
1477
|
+
lines.push(`- **${epic.id}**: ${epic.name} (${storyCount} stories) — ${epic.domain || 'general'}`);
|
|
1478
|
+
}
|
|
1479
|
+
lines.push('', 'Use the epic IDs above when cross-referencing dependencies between epics and stories.');
|
|
1480
|
+
}
|
|
1481
|
+
|
|
1482
|
+
return lines.join('\n');
|
|
1483
|
+
}
|
|
1484
|
+
|
|
1485
|
+
/**
|
|
1486
|
+
* Generate a scaffolding epic by scanning all domain epic/story contexts for tech requirements.
|
|
1487
|
+
* Called AFTER generateContextFiles() so all context.md files are in cache.
|
|
1488
|
+
* Inserts the epic as the first item in hierarchy.epics with all others depending on it.
|
|
1489
|
+
*/
|
|
1490
|
+
async _generateScaffoldingEpic(hierarchy, progressCallback) {
|
|
1491
|
+
if (!hierarchy.epics || hierarchy.epics.length === 0) return;
|
|
1492
|
+
|
|
1493
|
+
// Skip if a scaffolding epic already exists (e.g., from a prior run)
|
|
1494
|
+
if (hierarchy.epics.some(e => (e.domain || '').toLowerCase() === 'scaffolding')) {
|
|
1495
|
+
this.debug('Scaffolding epic already exists — skipping generation');
|
|
1496
|
+
return;
|
|
1497
|
+
}
|
|
1498
|
+
|
|
1499
|
+
await progressCallback?.(null, 'Generating project scaffolding epic from tech requirements…', {});
|
|
1500
|
+
|
|
1501
|
+
// 1. Extract tech requirements from all cached context.md files
|
|
1502
|
+
const techMentions = new Set();
|
|
1503
|
+
const allContexts = [];
|
|
1504
|
+
|
|
1505
|
+
for (const [key, contextMd] of this._epicContextCache) {
|
|
1506
|
+
allContexts.push(contextMd);
|
|
1507
|
+
this._extractTechFromContext(contextMd, techMentions);
|
|
1508
|
+
}
|
|
1509
|
+
for (const [key, contextMd] of this._storyContextCache) {
|
|
1510
|
+
allContexts.push(contextMd);
|
|
1511
|
+
this._extractTechFromContext(contextMd, techMentions);
|
|
1512
|
+
}
|
|
1513
|
+
|
|
1514
|
+
// Also extract from root context
|
|
1515
|
+
if (this.rootContextMd) {
|
|
1516
|
+
this._extractTechFromContext(this.rootContextMd, techMentions);
|
|
1517
|
+
}
|
|
1518
|
+
|
|
1519
|
+
const techRequirements = [...techMentions].sort();
|
|
1520
|
+
this.debug('Tech requirements extracted for scaffolding', { count: techRequirements.length, items: techRequirements });
|
|
1521
|
+
|
|
1522
|
+
if (techRequirements.length === 0) {
|
|
1523
|
+
this.debug('No tech requirements found — skipping scaffolding generation');
|
|
1524
|
+
return;
|
|
1525
|
+
}
|
|
1526
|
+
|
|
1527
|
+
// 2. Call LLM to generate the scaffolding epic
|
|
1528
|
+
const provider = await this.getProviderForStageInstance('context-generation');
|
|
1529
|
+
const agentInstructions = loadAgent('scaffolding-generator.md');
|
|
1530
|
+
|
|
1531
|
+
const prompt = `## Project Context\n\n${this.rootContextMd || '(no root context)'}\n\n## Extracted Tech Requirements\n\nThe following technologies, packages, tools, and infrastructure were found across all ${hierarchy.epics.length} domain epics and their stories:\n\n${techRequirements.map(t => `- ${t}`).join('\n')}\n\n## Epic Count\n\n${hierarchy.epics.length} domain epics exist.`;
|
|
1532
|
+
|
|
1533
|
+
let scaffoldingData;
|
|
1534
|
+
try {
|
|
1535
|
+
scaffoldingData = await provider.generateJSON(prompt, agentInstructions);
|
|
1536
|
+
} catch (err) {
|
|
1537
|
+
this.debug('Scaffolding LLM call failed', { error: err.message });
|
|
1538
|
+
return;
|
|
1539
|
+
}
|
|
1540
|
+
|
|
1541
|
+
if (!scaffoldingData?.epic) {
|
|
1542
|
+
this.debug('Scaffolding LLM returned no epic data');
|
|
1543
|
+
return;
|
|
1544
|
+
}
|
|
1545
|
+
|
|
1546
|
+
// 3. Build the scaffolding epic object
|
|
1547
|
+
const scaffoldStories = (scaffoldingData.epic.stories || []).map((s, i) => {
|
|
1548
|
+
const storyId = `context-0000-${String(i + 1).padStart(4, '0')}`;
|
|
1549
|
+
const prevId = i > 0 ? `context-0000-${String(i).padStart(4, '0')}` : null;
|
|
1550
|
+
// Chain dependencies: each story depends on the previous one
|
|
1551
|
+
const deps = s.dependencies && s.dependencies.length > 0
|
|
1552
|
+
? s.dependencies
|
|
1553
|
+
: (prevId ? [prevId] : []);
|
|
1554
|
+
return {
|
|
1555
|
+
id: storyId,
|
|
1556
|
+
name: s.name,
|
|
1557
|
+
userType: s.userType || 'developers',
|
|
1558
|
+
description: s.description || '',
|
|
1559
|
+
acceptance: s.acceptance || [],
|
|
1560
|
+
dependencies: deps,
|
|
1561
|
+
};
|
|
1562
|
+
});
|
|
1563
|
+
|
|
1564
|
+
const scaffoldEpic = {
|
|
1565
|
+
id: 'context-0000',
|
|
1566
|
+
name: scaffoldingData.epic.name || 'Project Scaffolding and Environment Setup',
|
|
1567
|
+
domain: 'scaffolding',
|
|
1568
|
+
description: scaffoldingData.epic.description || '',
|
|
1569
|
+
features: scaffoldingData.epic.features || [],
|
|
1570
|
+
dependencies: [],
|
|
1571
|
+
stories: scaffoldStories,
|
|
1572
|
+
};
|
|
1573
|
+
|
|
1574
|
+
// 4. Insert as first epic
|
|
1575
|
+
hierarchy.epics.unshift(scaffoldEpic);
|
|
1576
|
+
|
|
1577
|
+
// 5. Inject dependency: all domain epics depend on scaffolding
|
|
1578
|
+
const scaffoldId = scaffoldEpic.id;
|
|
1579
|
+
let injected = 0;
|
|
1580
|
+
for (const epic of hierarchy.epics) {
|
|
1581
|
+
if (epic.id === scaffoldId) continue;
|
|
1582
|
+
if (!epic.dependencies) epic.dependencies = [];
|
|
1583
|
+
if (!epic.dependencies.includes(scaffoldId)) {
|
|
1584
|
+
epic.dependencies.push(scaffoldId);
|
|
1585
|
+
injected++;
|
|
1586
|
+
}
|
|
1587
|
+
}
|
|
1588
|
+
|
|
1589
|
+
// 6. Generate context.md for the scaffolding epic (so validators see it)
|
|
1590
|
+
try {
|
|
1591
|
+
const scaffoldContext = await this.generateEpicContextMdLLM(scaffoldEpic, provider);
|
|
1592
|
+
this._epicContextCache.set(scaffoldEpic.name, scaffoldContext);
|
|
1593
|
+
// Also generate story contexts (4th param is the provider)
|
|
1594
|
+
for (const story of scaffoldEpic.stories) {
|
|
1595
|
+
const storyContext = await this.generateStoryContextMdLLM(story, scaffoldEpic, scaffoldContext, provider);
|
|
1596
|
+
this._storyContextCache.set(`${scaffoldEpic.name}::${story.name}`, storyContext);
|
|
1597
|
+
}
|
|
1598
|
+
} catch (err) {
|
|
1599
|
+
this.debug('Scaffolding context generation failed — using fallback', { error: err.message });
|
|
1600
|
+
}
|
|
1601
|
+
|
|
1602
|
+
this.debug(`Scaffolding epic generated: "${scaffoldEpic.name}" with ${scaffoldEpic.stories.length} stories, ${injected} epics now depend on it`);
|
|
1603
|
+
this.debug('Scaffolding stories', scaffoldEpic.stories.map(s => s.name));
|
|
1604
|
+
}
|
|
1605
|
+
|
|
1606
|
+
/**
|
|
1607
|
+
* Extract technology mentions from a context.md string.
|
|
1608
|
+
* Looks for package names, frameworks, tools, and infrastructure.
|
|
1609
|
+
*/
|
|
1610
|
+
_extractTechFromContext(contextText, techSet) {
|
|
1611
|
+
if (!contextText) return;
|
|
1612
|
+
const lower = contextText.toLowerCase();
|
|
1613
|
+
|
|
1614
|
+
// Common tech patterns to detect
|
|
1615
|
+
const patterns = [
|
|
1616
|
+
// JS/Node ecosystem
|
|
1617
|
+
/\b(node\.?js|npm|yarn|pnpm)\b/gi,
|
|
1618
|
+
/\b(express\.?js|express|fastify|koa|hapi)\b/gi,
|
|
1619
|
+
/\b(react|vue\.?js|angular|svelte|next\.?js|nuxt)\b/gi,
|
|
1620
|
+
/\b(vite|webpack|rollup|esbuild|parcel)\b/gi,
|
|
1621
|
+
/\b(vitest|jest|mocha|chai|cypress|playwright)\b/gi,
|
|
1622
|
+
/\b(typescript|tsconfig)\b/gi,
|
|
1623
|
+
/\b(tailwind\s*css|bootstrap|material.ui)\b/gi,
|
|
1624
|
+
/\b(prisma|sequelize|typeorm|knex|drizzle)\b/gi,
|
|
1625
|
+
/\b(zustand|redux|mobx|react.query|tanstack)\b/gi,
|
|
1626
|
+
// Databases
|
|
1627
|
+
/\b(sqlite|postgresql|postgres|mysql|mongodb|redis)\b/gi,
|
|
1628
|
+
// Infrastructure
|
|
1629
|
+
/\b(docker|docker.compose|nginx|apache)\b/gi,
|
|
1630
|
+
/\b(kubernetes|k8s|terraform|aws|gcp|azure)\b/gi,
|
|
1631
|
+
// Python
|
|
1632
|
+
/\b(python|pip|poetry|flask|django|fastapi|pytest)\b/gi,
|
|
1633
|
+
// General
|
|
1634
|
+
/\b(git|eslint|prettier|husky)\b/gi,
|
|
1635
|
+
/\b(\.env|environment.variables|dotenv)\b/gi,
|
|
1636
|
+
// File types that indicate tech
|
|
1637
|
+
/\b(html5?|css3?|vanilla\s*javascript)\b/gi,
|
|
1638
|
+
];
|
|
1639
|
+
|
|
1640
|
+
for (const pattern of patterns) {
|
|
1641
|
+
const matches = contextText.match(pattern);
|
|
1642
|
+
if (matches) {
|
|
1643
|
+
for (const m of matches) {
|
|
1644
|
+
techSet.add(m.trim().toLowerCase());
|
|
1645
|
+
}
|
|
1646
|
+
}
|
|
1647
|
+
}
|
|
1648
|
+
}
|
|
1649
|
+
|
|
1650
|
+
/**
|
|
1651
|
+
* Update the status field in a work.json file on disk.
|
|
1652
|
+
*/
|
|
1653
|
+
_setWorkJsonStatus(workJsonPath, status) {
|
|
1654
|
+
try {
|
|
1655
|
+
if (!fs.existsSync(workJsonPath)) return;
|
|
1656
|
+
const workJson = JSON.parse(fs.readFileSync(workJsonPath, 'utf8'));
|
|
1657
|
+
workJson.status = status;
|
|
1658
|
+
fs.writeFileSync(workJsonPath, JSON.stringify(workJson, null, 2), 'utf8');
|
|
1659
|
+
} catch {}
|
|
1660
|
+
}
|
|
1661
|
+
|
|
1662
|
+
/**
|
|
1663
|
+
* Generate canonical context.md string for an epic from its JSON fields.
|
|
1664
|
+
* No LLM call needed — derived directly from decomposition output.
|
|
1665
|
+
* @param {Object} epic
|
|
1666
|
+
* @returns {string}
|
|
1667
|
+
*/
|
|
1668
|
+
generateEpicContextMd(epic) {
|
|
1669
|
+
const features = (epic.features || []).map(f => `- ${f}`).join('\n') || '- (none)';
|
|
1670
|
+
const deps = epic.dependencies || [];
|
|
1671
|
+
const optional = deps.filter(d => /optional/i.test(d));
|
|
1672
|
+
const required = deps.filter(d => !/optional/i.test(d));
|
|
1673
|
+
const reqLines = required.length ? required.map(d => `- ${d}`).join('\n') : '- (none)';
|
|
1674
|
+
const storyCount = (epic.stories || []).length;
|
|
1675
|
+
const lines = [
|
|
1676
|
+
`# Epic: ${epic.name}`,
|
|
1677
|
+
'',
|
|
1678
|
+
'## Identity',
|
|
1679
|
+
`- id: ${epic.id || '(pending)'}`,
|
|
1680
|
+
`- domain: ${epic.domain}`,
|
|
1681
|
+
`- stories: ${storyCount}`,
|
|
1682
|
+
'',
|
|
1683
|
+
'## Summary',
|
|
1684
|
+
epic.description || '(no description)',
|
|
1685
|
+
'',
|
|
1686
|
+
'## Features',
|
|
1687
|
+
features,
|
|
1688
|
+
'',
|
|
1689
|
+
'## Dependencies',
|
|
1690
|
+
'',
|
|
1691
|
+
'### Required',
|
|
1692
|
+
reqLines,
|
|
1693
|
+
];
|
|
1694
|
+
if (optional.length) {
|
|
1695
|
+
lines.push('', '### Optional');
|
|
1696
|
+
optional.forEach(d => lines.push(`- ${d}`));
|
|
1697
|
+
}
|
|
1698
|
+
return lines.join('\n');
|
|
1699
|
+
}
|
|
1700
|
+
|
|
1701
|
+
/**
|
|
1702
|
+
* Generate canonical context.md string for a story from its JSON fields.
|
|
1703
|
+
* No LLM call needed — derived directly from decomposition output.
|
|
1704
|
+
* @param {Object} story
|
|
1705
|
+
* @param {Object} epic - Parent epic for identity context
|
|
1706
|
+
* @returns {string}
|
|
1707
|
+
*/
|
|
1708
|
+
generateStoryContextMd(story, epic) {
|
|
1709
|
+
const ac = (story.acceptance || []).map((a, i) => `${i + 1}. ${a}`).join('\n') || '1. (none)';
|
|
1710
|
+
const deps = (story.dependencies || []).map(d => `- ${d}`).join('\n') || '- (none)';
|
|
1711
|
+
return [
|
|
1712
|
+
`# Story: ${story.name}`,
|
|
1713
|
+
'',
|
|
1714
|
+
'## Identity',
|
|
1715
|
+
`- id: ${story.id || '(pending)'}`,
|
|
1716
|
+
`- epic: ${epic.id || '(pending)'} (${epic.name})`,
|
|
1717
|
+
`- userType: ${story.userType || 'team member'}`,
|
|
1718
|
+
'',
|
|
1719
|
+
'## Summary',
|
|
1720
|
+
story.description || '(no description)',
|
|
1721
|
+
'',
|
|
1722
|
+
'## Acceptance Criteria',
|
|
1723
|
+
ac,
|
|
1724
|
+
'',
|
|
1725
|
+
'## Dependencies',
|
|
1726
|
+
deps,
|
|
1727
|
+
].join('\n');
|
|
1728
|
+
}
|
|
1729
|
+
|
|
1730
|
+
/**
|
|
1731
|
+
* Build deterministic scaffold for epic context.md — Identity, Features, Dependencies, Stories Overview.
|
|
1732
|
+
* LLM only needs to write Purpose, Scope, Data Model, NFRs, Success Criteria.
|
|
1733
|
+
* @param {Object} epicData - Canonical epic JSON
|
|
1734
|
+
* @returns {string} Pre-built markdown sections
|
|
1735
|
+
*/
|
|
1736
|
+
_buildEpicScaffold(epicData) {
|
|
1737
|
+
const lines = [];
|
|
1738
|
+
lines.push(`# Epic: ${epicData.name}`);
|
|
1739
|
+
lines.push('');
|
|
1740
|
+
lines.push('## Identity');
|
|
1741
|
+
lines.push(`- id: ${epicData.id}`);
|
|
1742
|
+
lines.push(`- domain: ${epicData.domain || 'general'}`);
|
|
1743
|
+
lines.push(`- stories: ${(epicData.stories || []).length}`);
|
|
1744
|
+
lines.push('');
|
|
1745
|
+
lines.push('## Features');
|
|
1746
|
+
for (const feature of epicData.features || []) {
|
|
1747
|
+
lines.push(`- ${feature}`);
|
|
1748
|
+
}
|
|
1749
|
+
lines.push('');
|
|
1750
|
+
lines.push('## Dependencies');
|
|
1751
|
+
lines.push('');
|
|
1752
|
+
lines.push('### Required');
|
|
1753
|
+
const deps = epicData.dependencies || [];
|
|
1754
|
+
if (deps.length === 0) {
|
|
1755
|
+
lines.push('- (none)');
|
|
1756
|
+
} else {
|
|
1757
|
+
for (const dep of deps) {
|
|
1758
|
+
lines.push(`- ${dep}`);
|
|
1759
|
+
}
|
|
1760
|
+
}
|
|
1761
|
+
lines.push('');
|
|
1762
|
+
lines.push('### Optional');
|
|
1763
|
+
lines.push('- (none)');
|
|
1764
|
+
lines.push('');
|
|
1765
|
+
lines.push('## Stories Overview');
|
|
1766
|
+
for (const story of epicData.stories || []) {
|
|
1767
|
+
lines.push(`- ${story.id || 'TBD'}: ${story.name}`);
|
|
1768
|
+
}
|
|
1769
|
+
return lines.join('\n');
|
|
1770
|
+
}
|
|
1771
|
+
|
|
1772
|
+
/**
|
|
1773
|
+
* Build deterministic scaffold for story context.md — Identity, Acceptance Criteria, Dependencies.
|
|
1774
|
+
* LLM only needs to write User Story, Summary, Scope, Technical Notes.
|
|
1775
|
+
* @param {Object} storyData - Canonical story JSON
|
|
1776
|
+
* @returns {string} Pre-built markdown sections
|
|
1777
|
+
*/
|
|
1778
|
+
_buildStoryScaffold(storyData) {
|
|
1779
|
+
const lines = [];
|
|
1780
|
+
lines.push(`# Story: ${storyData.name}`);
|
|
1781
|
+
lines.push('');
|
|
1782
|
+
lines.push('## Identity');
|
|
1783
|
+
lines.push(`- id: ${storyData.id || 'TBD'}`);
|
|
1784
|
+
lines.push(`- epic: ${storyData.epicId || 'TBD'} (${storyData.epicName || 'TBD'})`);
|
|
1785
|
+
lines.push(`- userType: ${storyData.userType || 'team member'}`);
|
|
1786
|
+
lines.push('');
|
|
1787
|
+
lines.push('## Acceptance Criteria');
|
|
1788
|
+
const acceptance = storyData.acceptance || [];
|
|
1789
|
+
for (let i = 0; i < acceptance.length; i++) {
|
|
1790
|
+
lines.push(`${i + 1}. ${acceptance[i]}`);
|
|
1791
|
+
}
|
|
1792
|
+
lines.push('');
|
|
1793
|
+
lines.push('## Dependencies');
|
|
1794
|
+
const deps = storyData.dependencies || [];
|
|
1795
|
+
if (deps.length === 0) {
|
|
1796
|
+
lines.push('- (none)');
|
|
1797
|
+
} else {
|
|
1798
|
+
for (const dep of deps) {
|
|
1799
|
+
lines.push(`- ${dep}`);
|
|
1800
|
+
}
|
|
1801
|
+
}
|
|
1802
|
+
return lines.join('\n');
|
|
1803
|
+
}
|
|
1804
|
+
|
|
1805
|
+
/**
|
|
1806
|
+
* Detect domain-specific concerns from project context and work item features.
|
|
1807
|
+
* Returns checklist items that the LLM MUST address in NFRs or Technical Notes.
|
|
1808
|
+
* @param {string} rootContext - Root context.md text
|
|
1809
|
+
* @param {Object} workItem - Epic or story-like object with features/description
|
|
1810
|
+
* @returns {string[]} Domain hints
|
|
1811
|
+
*/
|
|
1812
|
+
_detectDomainHints(rootContext, workItem) {
|
|
1813
|
+
const hints = [];
|
|
1814
|
+
const allText = [
|
|
1815
|
+
rootContext,
|
|
1816
|
+
workItem.description || '',
|
|
1817
|
+
...(workItem.features || []),
|
|
1818
|
+
].join(' ').toLowerCase();
|
|
1819
|
+
|
|
1820
|
+
// Scheduling / time-related → timezone handling
|
|
1821
|
+
if (/schedul|cron|recurring|time.?zone|appointment|booking|remind/i.test(allText)) {
|
|
1822
|
+
hints.push('Timezone handling: specify how dates/times are stored (UTC recommended) and how user-local display is handled');
|
|
1823
|
+
}
|
|
1824
|
+
|
|
1825
|
+
// Auth / passwords → hashing strategy
|
|
1826
|
+
if (/auth|password|login|sign.?up|credential|session|jwt|token/i.test(allText)) {
|
|
1827
|
+
hints.push('Authentication security: specify password hashing algorithm (bcrypt/argon2), token expiry, and session invalidation strategy');
|
|
1828
|
+
}
|
|
1829
|
+
|
|
1830
|
+
// Messaging / notifications → delivery guarantees
|
|
1831
|
+
if (/messag|chat|notification|push|sms|whatsapp|email|webhook/i.test(allText)) {
|
|
1832
|
+
hints.push('Message delivery: specify retry/queue strategy for failed deliveries, idempotency handling, and rate limiting');
|
|
1833
|
+
}
|
|
1834
|
+
|
|
1835
|
+
// File uploads / media → size limits and storage
|
|
1836
|
+
if (/upload|media|image|file|attachment|storage|s3|blob/i.test(allText)) {
|
|
1837
|
+
hints.push('File handling: specify max file size, allowed MIME types, storage backend, and virus/malware scanning if applicable');
|
|
1838
|
+
}
|
|
1839
|
+
|
|
1840
|
+
// Payment / billing → PCI compliance
|
|
1841
|
+
if (/payment|billing|invoice|subscript|stripe|charge|refund/i.test(allText)) {
|
|
1842
|
+
hints.push('Payment security: specify PCI compliance approach (tokenization recommended), refund handling, and receipt/audit trail');
|
|
1843
|
+
}
|
|
1844
|
+
|
|
1845
|
+
// Real-time / websocket → connection management
|
|
1846
|
+
if (/real.?time|websocket|socket\.io|live|stream|sse|event.?source/i.test(allText)) {
|
|
1847
|
+
hints.push('Real-time connections: specify reconnection strategy, heartbeat interval, and graceful degradation when WebSocket is unavailable');
|
|
1848
|
+
}
|
|
1849
|
+
|
|
1850
|
+
return hints;
|
|
1851
|
+
}
|
|
1852
|
+
|
|
1853
|
+
/**
|
|
1854
|
+
* Deterministic quality check for generated context.md — verifies structural completeness
|
|
1855
|
+
* and source fidelity without relying on LLM self-assessment.
|
|
1856
|
+
* @param {string} contextText - Generated context.md content
|
|
1857
|
+
* @param {Object} sourceJson - The canonical source JSON (epic or story)
|
|
1858
|
+
* @param {'epic'|'story'} type
|
|
1859
|
+
* @param {Set<string>} [validIds] - Optional set of all valid epic/story IDs in the hierarchy for dependency cross-validation
|
|
1860
|
+
* @returns {{ score: number, issues: string[] }}
|
|
1861
|
+
*/
|
|
1862
|
+
_computeContextScore(contextText, sourceJson, type, validIds = null) {
|
|
1863
|
+
let score = 100;
|
|
1864
|
+
const issues = [];
|
|
1865
|
+
const lower = contextText.toLowerCase();
|
|
1866
|
+
|
|
1867
|
+
// 1. Required sections present
|
|
1868
|
+
const requiredSections = type === 'epic'
|
|
1869
|
+
? ['## Identity', '## Purpose', '## Scope', '### In Scope', '### Out of Scope',
|
|
1870
|
+
'## Features', '## Non-Functional Requirements', '## Dependencies', '## Success Criteria', '## Stories Overview']
|
|
1871
|
+
: ['## Identity', '## User Story', '## Summary', '## Scope', '### In Scope',
|
|
1872
|
+
'### Out of Scope', '## Acceptance Criteria', '## Technical Notes', '## Dependencies'];
|
|
1873
|
+
|
|
1874
|
+
for (const section of requiredSections) {
|
|
1875
|
+
if (!contextText.includes(section)) {
|
|
1876
|
+
score -= 10;
|
|
1877
|
+
issues.push(`Missing required section: ${section}`);
|
|
1878
|
+
}
|
|
1879
|
+
}
|
|
1880
|
+
|
|
1881
|
+
// 2. Feature / acceptance criteria coverage against source JSON
|
|
1882
|
+
if (type === 'epic') {
|
|
1883
|
+
const features = sourceJson.features || [];
|
|
1884
|
+
let missing = 0;
|
|
1885
|
+
for (const feature of features) {
|
|
1886
|
+
// Extract the key term before parenthetical details
|
|
1887
|
+
const featureKey = feature.split('(')[0].trim().toLowerCase().replace(/[-_]/g, ' ');
|
|
1888
|
+
if (!lower.includes(featureKey) && featureKey.length > 3) {
|
|
1889
|
+
missing++;
|
|
1890
|
+
issues.push(`Feature possibly missing from output: "${feature.slice(0, 60)}"`);
|
|
1891
|
+
}
|
|
1892
|
+
}
|
|
1893
|
+
score -= missing * 5;
|
|
1894
|
+
} else {
|
|
1895
|
+
const acceptance = sourceJson.acceptance || [];
|
|
1896
|
+
if (acceptance.length > 0) {
|
|
1897
|
+
// Strip markdown formatting (backticks, bold, italic) for fuzzy matching
|
|
1898
|
+
const stripMd = (s) => s.toLowerCase().replace(/[`*_~]/g, '').replace(/\s+/g, ' ');
|
|
1899
|
+
const lowerStripped = stripMd(contextText);
|
|
1900
|
+
let matched = 0;
|
|
1901
|
+
for (const ac of acceptance) {
|
|
1902
|
+
const acStripped = stripMd(ac);
|
|
1903
|
+
// Extract key terms (3+ char words) and check if majority appear in context
|
|
1904
|
+
const keyTerms = acStripped.split(/\s+/).filter(w => w.length >= 3 && !/^(the|and|for|with|from|that|this|are|was|has|have|will|can|not|but|its|also|into)$/.test(w));
|
|
1905
|
+
if (keyTerms.length === 0) { matched++; continue; }
|
|
1906
|
+
const found = keyTerms.filter(term => lowerStripped.includes(term)).length;
|
|
1907
|
+
if (found / keyTerms.length >= 0.5) matched++;
|
|
1908
|
+
}
|
|
1909
|
+
const coverage = matched / acceptance.length;
|
|
1910
|
+
if (coverage < 0.7) {
|
|
1911
|
+
const missCount = acceptance.length - matched;
|
|
1912
|
+
score -= missCount * 5;
|
|
1913
|
+
issues.push(`Only ${matched}/${acceptance.length} acceptance criteria found in output`);
|
|
1914
|
+
}
|
|
1915
|
+
}
|
|
1916
|
+
}
|
|
1917
|
+
|
|
1918
|
+
// 3. Non-ASCII anomaly detection (catches Chinese/CJK character leaks from local models)
|
|
1919
|
+
const cjkPattern = /[\u3000-\u9FFF\uF900-\uFAFF\uFE30-\uFE4F]/g;
|
|
1920
|
+
const cjkMatches = contextText.match(cjkPattern);
|
|
1921
|
+
if (cjkMatches && cjkMatches.length > 0) {
|
|
1922
|
+
score -= 15;
|
|
1923
|
+
issues.push(`Non-ASCII anomaly: ${cjkMatches.length} CJK character(s) detected — likely model generation artifact`);
|
|
1924
|
+
}
|
|
1925
|
+
|
|
1926
|
+
// 4. Section minimum substance (headers with < 2 content lines)
|
|
1927
|
+
const sectionBlocks = contextText.split(/^## /m).filter(s => s.trim());
|
|
1928
|
+
for (const block of sectionBlocks) {
|
|
1929
|
+
const lines = block.split('\n').filter(l => l.trim());
|
|
1930
|
+
const sectionName = lines[0]?.trim().split('\n')[0] || 'unknown';
|
|
1931
|
+
// Skip subsections (### In Scope etc.) — they're part of parent
|
|
1932
|
+
if (sectionName.startsWith('#')) continue;
|
|
1933
|
+
if (lines.length < 2) {
|
|
1934
|
+
score -= 5;
|
|
1935
|
+
issues.push(`Section too thin: "## ${sectionName}" has only ${lines.length} content line(s)`);
|
|
1936
|
+
}
|
|
1937
|
+
}
|
|
1938
|
+
|
|
1939
|
+
// 5. Dependencies match source JSON
|
|
1940
|
+
const sourceDeps = sourceJson.dependencies || [];
|
|
1941
|
+
for (const dep of sourceDeps) {
|
|
1942
|
+
const depId = typeof dep === 'string' ? dep : dep.id || dep.name || '';
|
|
1943
|
+
if (depId && !contextText.includes(depId)) {
|
|
1944
|
+
score -= 5;
|
|
1945
|
+
issues.push(`Dependency from source JSON not found in output: "${depId}"`);
|
|
1946
|
+
}
|
|
1947
|
+
}
|
|
1948
|
+
|
|
1949
|
+
// 6. Dependency cross-validation — verify referenced IDs exist in hierarchy
|
|
1950
|
+
if (validIds && sourceDeps.length > 0) {
|
|
1951
|
+
for (const dep of sourceDeps) {
|
|
1952
|
+
const depId = typeof dep === 'string' ? dep : dep.id || dep.name || '';
|
|
1953
|
+
if (depId && /^context-\d{4}(-\d{4})?$/.test(depId) && !validIds.has(depId)) {
|
|
1954
|
+
score -= 10;
|
|
1955
|
+
issues.push(`Broken dependency chain: "${depId}" does not exist in the hierarchy`);
|
|
1956
|
+
}
|
|
1957
|
+
}
|
|
1958
|
+
}
|
|
1959
|
+
|
|
1960
|
+
// 7. Data model presence for data-heavy epics (soft suggestion, no score penalty)
|
|
1961
|
+
if (type === 'epic') {
|
|
1962
|
+
const featureText = (sourceJson.features || []).join(' ').toLowerCase();
|
|
1963
|
+
const hasDataKeywords = /stor|persist|record|track|log|database|schema|model|ingestion/.test(featureText);
|
|
1964
|
+
if (hasDataKeywords && !contextText.includes('## Data Model')) {
|
|
1965
|
+
issues.push('Consider adding "## Data Model Sketch" section — features mention data storage');
|
|
1966
|
+
}
|
|
1967
|
+
}
|
|
1968
|
+
|
|
1969
|
+
// 8. Auth mechanism consistency — check against root context
|
|
1970
|
+
if (this.rootContextMd) {
|
|
1971
|
+
const rootLower = this.rootContextMd.toLowerCase();
|
|
1972
|
+
const isNoAuth = rootLower.includes('none (public') || rootLower.includes('no authentication required');
|
|
1973
|
+
const isSessionAuth = !isNoAuth && (rootLower.includes('session-based') || rootLower.includes('httponly'));
|
|
1974
|
+
const isJwtAuth = !isNoAuth && rootLower.includes('jwt bearer');
|
|
1975
|
+
if (isNoAuth) {
|
|
1976
|
+
// No-auth project — penalize any auth mechanism references
|
|
1977
|
+
if (/\bjwt\b|bearer\s+token|session\s+cookie|httponly|authorization\s*:\s*bearer|login|sign.?in/i.test(contextText)) {
|
|
1978
|
+
score -= 10;
|
|
1979
|
+
issues.push('Auth inconsistency: context references auth mechanisms but project has no authentication');
|
|
1980
|
+
}
|
|
1981
|
+
} else if (isSessionAuth) {
|
|
1982
|
+
// Session auth project — penalize JWT references
|
|
1983
|
+
if (/\bjwt\b|bearer\s+token|authorization\s*:\s*bearer/i.test(contextText)) {
|
|
1984
|
+
score -= 10;
|
|
1985
|
+
issues.push('Auth inconsistency: context references JWT/bearer tokens but project uses session-based auth (httpOnly cookies)');
|
|
1986
|
+
}
|
|
1987
|
+
} else if (isJwtAuth) {
|
|
1988
|
+
// JWT project — penalize session cookie references
|
|
1989
|
+
if (/session\s+cookie|httponly\s+cookie|sessionid\s+cookie/i.test(contextText)) {
|
|
1990
|
+
score -= 10;
|
|
1991
|
+
issues.push('Auth inconsistency: context references session cookies but project uses JWT bearer tokens');
|
|
1992
|
+
}
|
|
1993
|
+
}
|
|
1994
|
+
}
|
|
1995
|
+
|
|
1996
|
+
// 9. Rate limit numeric consistency — detect contradictory values within same context
|
|
1997
|
+
const rateLimitMatches = contextText.match(/(\d+)\s*(requests?|req|messages?|calls?)\s*\/?\s*(second|sec|s|minute|min|m|hour|hr|h)\b/gi) || [];
|
|
1998
|
+
if (rateLimitMatches.length >= 2) {
|
|
1999
|
+
// Normalize to per-minute for comparison
|
|
2000
|
+
const normalize = (match) => {
|
|
2001
|
+
const m = match.match(/(\d+)\s*(?:requests?|req|messages?|calls?)\s*\/?\s*(second|sec|s|minute|min|m|hour|hr|h)/i);
|
|
2002
|
+
if (!m) return null;
|
|
2003
|
+
const val = parseInt(m[1]);
|
|
2004
|
+
const unit = m[2].toLowerCase();
|
|
2005
|
+
if (unit.startsWith('s')) return val * 60; // per sec → per min
|
|
2006
|
+
if (unit.startsWith('h')) return val / 60; // per hour → per min
|
|
2007
|
+
return val; // per min
|
|
2008
|
+
};
|
|
2009
|
+
const normalized = rateLimitMatches.map(normalize).filter(v => v !== null);
|
|
2010
|
+
if (normalized.length >= 2) {
|
|
2011
|
+
const min = Math.min(...normalized);
|
|
2012
|
+
const max = Math.max(...normalized);
|
|
2013
|
+
if (max / min > 10) { // >10x discrepancy
|
|
2014
|
+
score -= 5;
|
|
2015
|
+
issues.push(`Rate limit inconsistency: found values ranging from ${min}/min to ${max}/min (${max/min}x difference)`);
|
|
2016
|
+
}
|
|
2017
|
+
}
|
|
2018
|
+
}
|
|
2019
|
+
|
|
2020
|
+
return { score: Math.max(0, score), issues };
|
|
2021
|
+
}
|
|
2022
|
+
|
|
2023
|
+
/**
|
|
2024
|
+
* Call generateJSON with tool support if the provider has it, otherwise plain generateJSON.
|
|
2025
|
+
* Only provides tools on the first write call (iter 0) — refinement iterations don't need them.
|
|
2026
|
+
* @param {Object} provider - LLM provider instance
|
|
2027
|
+
* @param {string} prompt - User prompt
|
|
2028
|
+
* @param {string} instructions - System/agent instructions
|
|
2029
|
+
* @param {boolean} [useTools=false] - Whether to attempt tool-augmented generation
|
|
2030
|
+
* @returns {Promise<Object>} Parsed JSON result
|
|
2031
|
+
*/
|
|
2032
|
+
async _generateJSONMaybeWithTools(provider, prompt, instructions, useTools = false) {
|
|
2033
|
+
if (useTools && typeof provider.generateJSONWithTools === 'function') {
|
|
2034
|
+
try {
|
|
2035
|
+
return await provider.generateJSONWithTools(
|
|
2036
|
+
prompt, instructions,
|
|
2037
|
+
CONTEXT_GENERATION_TOOLS, dispatchToolCall
|
|
2038
|
+
);
|
|
2039
|
+
} catch (err) {
|
|
2040
|
+
this.debug(`Tool-augmented generation failed, falling back to plain generateJSON: ${err.message}`);
|
|
2041
|
+
}
|
|
2042
|
+
}
|
|
2043
|
+
return provider.generateJSON(prompt, instructions);
|
|
2044
|
+
}
|
|
2045
|
+
|
|
2046
|
+
/**
|
|
2047
|
+
* Generate a complete canonical context.md for an epic using an LLM agent.
|
|
2048
|
+
* Uses deterministic pre-check (not LLM self-score) to decide whether to invoke the reviewer.
|
|
2049
|
+
* Falls back to the structured formatter if the LLM call fails.
|
|
2050
|
+
* @param {Object} epic
|
|
2051
|
+
* @param {LLMProvider} provider
|
|
2052
|
+
* @returns {Promise<string>} context.md text
|
|
2053
|
+
*/
|
|
2054
|
+
async generateEpicContextMdLLM(epic, provider) {
|
|
2055
|
+
const writerInstructions = loadAgent('context-writer-epic.md');
|
|
2056
|
+
const reviewerInstructions = loadAgent('context-reviewer-epic.md');
|
|
2057
|
+
const rootSection = this.rootContextMd ? `## Project Context\n\n${this.rootContextMd}\n\n` : '';
|
|
2058
|
+
|
|
2059
|
+
// Canonical source for both writer and reviewer
|
|
2060
|
+
const epicForContext = {
|
|
2061
|
+
id: epic.id,
|
|
2062
|
+
name: epic.name,
|
|
2063
|
+
domain: epic.domain,
|
|
2064
|
+
description: epic.description,
|
|
2065
|
+
features: epic.features || [],
|
|
2066
|
+
dependencies: epic.dependencies || [],
|
|
2067
|
+
stories: (epic.stories || []).map(s => ({ id: s.id || 'TBD', name: s.name })),
|
|
2068
|
+
};
|
|
2069
|
+
const epicJson = JSON.stringify(epicForContext, null, 2);
|
|
2070
|
+
|
|
2071
|
+
// Template-based scaffolding — pre-generate deterministic sections so the LLM
|
|
2072
|
+
// only needs to write Purpose, Scope, Data Model, NFRs, and Success Criteria.
|
|
2073
|
+
const scaffold = this._buildEpicScaffold(epicForContext);
|
|
2074
|
+
const scaffoldHint = `\n\n## Pre-built Scaffold (use verbatim for these sections, write the rest)\n\n\`\`\`\n${scaffold}\n\`\`\``;
|
|
2075
|
+
|
|
2076
|
+
// Domain-aware prompt injection — detect project features and add mandatory checklist items
|
|
2077
|
+
const domainHints = this._detectDomainHints(this.rootContextMd || '', epicForContext);
|
|
2078
|
+
const domainSection = domainHints.length > 0
|
|
2079
|
+
? `\n\n## Domain-Specific Requirements (MUST address in NFRs or Technical Notes)\n${domainHints.map(h => `- ${h}`).join('\n')}`
|
|
2080
|
+
: '';
|
|
2081
|
+
|
|
2082
|
+
const baseWriterPrompt = `${rootSection}## Epic JSON\n\n\`\`\`json\n${epicJson}\n\`\`\`${scaffoldHint}${domainSection}`;
|
|
2083
|
+
const baseReviewerPrompt = `${rootSection}## Original Epic JSON\n\n\`\`\`json\n${epicJson}\n\`\`\``;
|
|
2084
|
+
|
|
2085
|
+
let bestContext = null;
|
|
2086
|
+
let bestScore = 0;
|
|
2087
|
+
let writerPrompt = `${baseWriterPrompt}\n\nWrite the complete context.md for this epic. Use the pre-built scaffold verbatim for Identity, Features, Dependencies, and Stories Overview sections. Focus your effort on Purpose, Scope, Data Model, NFRs, and Success Criteria.`;
|
|
2088
|
+
|
|
2089
|
+
// Write → Review → Refine loop (max 2 review rounds = max 3 LLM calls total)
|
|
2090
|
+
for (let iter = 0; iter < 3; iter++) {
|
|
2091
|
+
// Step 1: Write (or refine) — use tool-augmented generation on first iteration
|
|
2092
|
+
const writeResult = await this._generateJSONMaybeWithTools(provider, writerPrompt, writerInstructions, iter === 0);
|
|
2093
|
+
const contextText = (typeof writeResult?.context === 'string' && writeResult.context.trim()) ? writeResult.context : null;
|
|
2094
|
+
const writerScore = typeof writeResult?.completenessScore === 'number' ? writeResult.completenessScore : 100;
|
|
2095
|
+
const writerGaps = Array.isArray(writeResult?.gaps) ? writeResult.gaps : [];
|
|
2096
|
+
|
|
2097
|
+
if (!contextText) {
|
|
2098
|
+
this.debug(`[context-writer-epic] iter=${iter + 1} — no context returned, stopping (epic: ${epic.name})`);
|
|
2099
|
+
break;
|
|
2100
|
+
}
|
|
2101
|
+
|
|
2102
|
+
// Deterministic pre-check — verifies structure and source fidelity without LLM
|
|
2103
|
+
const preCheck = this._computeContextScore(contextText, epicForContext, 'epic', this._hierarchyValidIds);
|
|
2104
|
+
const canSkipReview = preCheck.score >= 92;
|
|
2105
|
+
|
|
2106
|
+
if (canSkipReview) {
|
|
2107
|
+
this.debug(`[context-epic] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} — skipping review (deterministic pass) (epic: ${epic.name})`);
|
|
2108
|
+
if (preCheck.score > bestScore) {
|
|
2109
|
+
bestContext = contextText;
|
|
2110
|
+
bestScore = preCheck.score;
|
|
2111
|
+
}
|
|
2112
|
+
break;
|
|
2113
|
+
}
|
|
2114
|
+
|
|
2115
|
+
// Pre-check found issues — run independent LLM review
|
|
2116
|
+
this.debug(`[context-epic] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} preCheckIssues=${preCheck.issues.length} — triggering review (epic: ${epic.name})`);
|
|
2117
|
+
|
|
2118
|
+
// Step 2: Independent review — verifies accuracy against source JSON
|
|
2119
|
+
const reviewPrompt = `${baseReviewerPrompt}\n\n## Generated context.md\n\n${contextText}\n\nAudit this context.md against the source JSON.`;
|
|
2120
|
+
const reviewResult = await provider.generateJSON(reviewPrompt, reviewerInstructions);
|
|
2121
|
+
const reviewScore = typeof reviewResult?.score === 'number' ? reviewResult.score : preCheck.score;
|
|
2122
|
+
const reviewIssues = Array.isArray(reviewResult?.issues) ? reviewResult.issues : [];
|
|
2123
|
+
const accurate = reviewResult?.accurate === true;
|
|
2124
|
+
|
|
2125
|
+
// Combine deterministic issues with LLM reviewer issues (dedup by prefix)
|
|
2126
|
+
const combinedIssues = [...preCheck.issues];
|
|
2127
|
+
for (const ri of reviewIssues) {
|
|
2128
|
+
if (!combinedIssues.some(ci => ci.slice(0, 40) === ri.slice(0, 40))) {
|
|
2129
|
+
combinedIssues.push(ri);
|
|
2130
|
+
}
|
|
2131
|
+
}
|
|
2132
|
+
|
|
2133
|
+
// Keep the best version seen so far (use reviewer score, not self-score)
|
|
2134
|
+
if (reviewScore > bestScore) {
|
|
2135
|
+
bestContext = contextText;
|
|
2136
|
+
bestScore = reviewScore;
|
|
2137
|
+
}
|
|
2138
|
+
|
|
2139
|
+
this.debug(`[context-epic] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} reviewScore=${reviewScore} accurate=${accurate} issues=${combinedIssues.length} (epic: ${epic.name})`);
|
|
2140
|
+
|
|
2141
|
+
// Stop if reviewer confirms accuracy and score is high enough
|
|
2142
|
+
if (accurate && reviewScore >= 85) break;
|
|
2143
|
+
if (iter === 2) break; // max iterations reached
|
|
2144
|
+
|
|
2145
|
+
// Step 3: Build refinement prompt combining deterministic + reviewer + writer issues
|
|
2146
|
+
const allFeedback = [
|
|
2147
|
+
...combinedIssues,
|
|
2148
|
+
...writerGaps.filter(g => !combinedIssues.some(i => i.includes(g.slice(0, 30)))),
|
|
2149
|
+
];
|
|
2150
|
+
const feedbackText = allFeedback.map((f, i) => `${i + 1}. ${f}`).join('\n');
|
|
2151
|
+
writerPrompt = `${baseWriterPrompt}\n\n## Draft Context (Review Score: ${reviewScore}/100)\n\n${contextText}\n\n## Issues to Fix\n\n${feedbackText}\n\nRevise the context.md to address all issues above. Return improved JSON.`;
|
|
2152
|
+
}
|
|
2153
|
+
|
|
2154
|
+
return bestContext || this.generateEpicContextMd(epic);
|
|
2155
|
+
}
|
|
2156
|
+
|
|
2157
|
+
/**
|
|
2158
|
+
* Generate a complete canonical context.md for a story using an LLM agent.
|
|
2159
|
+
* Uses deterministic pre-check (not LLM self-score) to decide whether to invoke the reviewer.
|
|
2160
|
+
* Falls back to the structured formatter if the LLM call fails.
|
|
2161
|
+
* @param {Object} story
|
|
2162
|
+
* @param {Object} epic - Parent epic
|
|
2163
|
+
* @param {string} epicContextMd - Parent epic's generated context.md
|
|
2164
|
+
* @param {LLMProvider} provider
|
|
2165
|
+
* @returns {Promise<string>} context.md text
|
|
2166
|
+
*/
|
|
2167
|
+
async generateStoryContextMdLLM(story, epic, epicContextMd, provider) {
|
|
2168
|
+
const writerInstructions = loadAgent('context-writer-story.md');
|
|
2169
|
+
const reviewerInstructions = loadAgent('context-reviewer-story.md');
|
|
2170
|
+
const rootSection = this.rootContextMd ? `## Project Context\n\n${this.rootContextMd}\n\n` : '';
|
|
2171
|
+
const epicSection = epicContextMd ? `## Parent Epic Context\n\n${epicContextMd}\n\n` : '';
|
|
2172
|
+
|
|
2173
|
+
// Canonical source for both writer and reviewer
|
|
2174
|
+
const storyForContext = {
|
|
2175
|
+
id: story.id || 'TBD',
|
|
2176
|
+
name: story.name,
|
|
2177
|
+
userType: story.userType || 'team member',
|
|
2178
|
+
description: story.description,
|
|
2179
|
+
acceptance: story.acceptance || [],
|
|
2180
|
+
dependencies: story.dependencies || [],
|
|
2181
|
+
epicId: epic.id || 'TBD',
|
|
2182
|
+
epicName: epic.name,
|
|
2183
|
+
};
|
|
2184
|
+
const storyJson = JSON.stringify(storyForContext, null, 2);
|
|
2185
|
+
|
|
2186
|
+
// Template-based scaffolding for stories
|
|
2187
|
+
const scaffold = this._buildStoryScaffold(storyForContext);
|
|
2188
|
+
const scaffoldHint = `\n\n## Pre-built Scaffold (use verbatim for these sections, write the rest)\n\n\`\`\`\n${scaffold}\n\`\`\``;
|
|
2189
|
+
|
|
2190
|
+
// Domain-aware hints for stories
|
|
2191
|
+
const domainHints = this._detectDomainHints(this.rootContextMd || '', { features: story.acceptance || [], description: story.description });
|
|
2192
|
+
const domainSection = domainHints.length > 0
|
|
2193
|
+
? `\n\n## Domain-Specific Requirements (MUST address in Technical Notes)\n${domainHints.map(h => `- ${h}`).join('\n')}`
|
|
2194
|
+
: '';
|
|
2195
|
+
|
|
2196
|
+
const baseWriterPrompt = `${rootSection}${epicSection}## Story JSON\n\n\`\`\`json\n${storyJson}\n\`\`\`${scaffoldHint}${domainSection}`;
|
|
2197
|
+
const baseReviewerPrompt = `${rootSection}## Original Story JSON\n\n\`\`\`json\n${storyJson}\n\`\`\``;
|
|
2198
|
+
|
|
2199
|
+
let bestContext = null;
|
|
2200
|
+
let bestScore = 0;
|
|
2201
|
+
let writerPrompt = `${baseWriterPrompt}\n\nWrite the complete context.md for this story. Use the pre-built scaffold verbatim for Identity, Acceptance Criteria, and Dependencies sections. Focus on User Story, Summary, Scope, and Technical Notes.`;
|
|
2202
|
+
|
|
2203
|
+
// Write → Review → Refine loop (max 2 review rounds = max 3 LLM calls total)
|
|
2204
|
+
for (let iter = 0; iter < 3; iter++) {
|
|
2205
|
+
// Step 1: Write (or refine) — use tool-augmented generation on first iteration
|
|
2206
|
+
const writeResult = await this._generateJSONMaybeWithTools(provider, writerPrompt, writerInstructions, iter === 0);
|
|
2207
|
+
const contextText = (typeof writeResult?.context === 'string' && writeResult.context.trim()) ? writeResult.context : null;
|
|
2208
|
+
const writerScore = typeof writeResult?.completenessScore === 'number' ? writeResult.completenessScore : 100;
|
|
2209
|
+
const writerGaps = Array.isArray(writeResult?.gaps) ? writeResult.gaps : [];
|
|
2210
|
+
|
|
2211
|
+
if (!contextText) {
|
|
2212
|
+
this.debug(`[context-writer-story] iter=${iter + 1} — no context returned, stopping (story: ${story.name})`);
|
|
2213
|
+
break;
|
|
2214
|
+
}
|
|
2215
|
+
|
|
2216
|
+
// Deterministic pre-check — verifies structure and source fidelity without LLM
|
|
2217
|
+
const preCheck = this._computeContextScore(contextText, storyForContext, 'story', this._hierarchyValidIds);
|
|
2218
|
+
const canSkipReview = preCheck.score >= 92;
|
|
2219
|
+
|
|
2220
|
+
if (canSkipReview) {
|
|
2221
|
+
this.debug(`[context-story] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} — skipping review (deterministic pass) (story: ${story.name})`);
|
|
2222
|
+
if (preCheck.score > bestScore) {
|
|
2223
|
+
bestContext = contextText;
|
|
2224
|
+
bestScore = preCheck.score;
|
|
2225
|
+
}
|
|
2226
|
+
break;
|
|
2227
|
+
}
|
|
2228
|
+
|
|
2229
|
+
// Pre-check found issues — run independent LLM review
|
|
2230
|
+
this.debug(`[context-story] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} preCheckIssues=${preCheck.issues.length} — triggering review (story: ${story.name})`);
|
|
2231
|
+
|
|
2232
|
+
// Step 2: Independent review — verifies accuracy against source JSON
|
|
2233
|
+
const reviewPrompt = `${baseReviewerPrompt}\n\n## Generated context.md\n\n${contextText}\n\nAudit this context.md against the source JSON.`;
|
|
2234
|
+
const reviewResult = await provider.generateJSON(reviewPrompt, reviewerInstructions);
|
|
2235
|
+
const reviewScore = typeof reviewResult?.score === 'number' ? reviewResult.score : preCheck.score;
|
|
2236
|
+
const reviewIssues = Array.isArray(reviewResult?.issues) ? reviewResult.issues : [];
|
|
2237
|
+
const accurate = reviewResult?.accurate === true;
|
|
2238
|
+
|
|
2239
|
+
// Combine deterministic issues with LLM reviewer issues (dedup by prefix)
|
|
2240
|
+
const combinedIssues = [...preCheck.issues];
|
|
2241
|
+
for (const ri of reviewIssues) {
|
|
2242
|
+
if (!combinedIssues.some(ci => ci.slice(0, 40) === ri.slice(0, 40))) {
|
|
2243
|
+
combinedIssues.push(ri);
|
|
2244
|
+
}
|
|
2245
|
+
}
|
|
2246
|
+
|
|
2247
|
+
if (reviewScore > bestScore) {
|
|
2248
|
+
bestContext = contextText;
|
|
2249
|
+
bestScore = reviewScore;
|
|
2250
|
+
}
|
|
2251
|
+
|
|
2252
|
+
this.debug(`[context-story] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} reviewScore=${reviewScore} accurate=${accurate} issues=${combinedIssues.length} (story: ${story.name})`);
|
|
2253
|
+
|
|
2254
|
+
if (accurate && reviewScore >= 85 && preCheck.score >= 85) break;
|
|
2255
|
+
if (iter === 2) break;
|
|
2256
|
+
|
|
2257
|
+
// Step 3: Refinement prompt combining deterministic + reviewer + writer issues
|
|
2258
|
+
const allFeedback = [
|
|
2259
|
+
...combinedIssues,
|
|
2260
|
+
...writerGaps.filter(g => !combinedIssues.some(i => i.includes(g.slice(0, 30)))),
|
|
2261
|
+
];
|
|
2262
|
+
const feedbackText = allFeedback.map((f, i) => `${i + 1}. ${f}`).join('\n');
|
|
2263
|
+
writerPrompt = `${baseWriterPrompt}\n\n## Draft Context (Review Score: ${reviewScore}/100)\n\n${contextText}\n\n## Issues to Fix\n\n${feedbackText}\n\nRevise the context.md to address all issues above. Return improved JSON.`;
|
|
2264
|
+
}
|
|
2265
|
+
|
|
2266
|
+
return bestContext || this.generateStoryContextMd(story, epic);
|
|
2267
|
+
}
|
|
2268
|
+
|
|
2269
|
+
/**
|
|
2270
|
+
* Pre-generate LLM context.md for all epics and stories before validation.
|
|
2271
|
+
* Results are cached in _epicContextCache (keyed by epic.name) and
|
|
2272
|
+
* _storyContextCache (keyed by "epicName::storyName").
|
|
2273
|
+
* Uses 'context-generation' stage config if defined; falls back to 'doc-generation'.
|
|
2274
|
+
* @param {Object} hierarchy
|
|
2275
|
+
* @param {Function} progressCallback
|
|
2276
|
+
*/
|
|
2277
|
+
async generateContextFiles(hierarchy, progressCallback = null) {
|
|
2278
|
+
this.debugStage(4.8, 'Pre-generate LLM Context Files');
|
|
2279
|
+
this._epicContextCache = new Map();
|
|
2280
|
+
this._storyContextCache = new Map();
|
|
2281
|
+
|
|
2282
|
+
// Build set of all valid IDs for dependency cross-validation
|
|
2283
|
+
this._hierarchyValidIds = new Set();
|
|
2284
|
+
for (const epic of hierarchy.epics) {
|
|
2285
|
+
if (epic.id) this._hierarchyValidIds.add(epic.id);
|
|
2286
|
+
for (const story of epic.stories || []) {
|
|
2287
|
+
if (story.id) this._hierarchyValidIds.add(story.id);
|
|
2288
|
+
}
|
|
2289
|
+
}
|
|
2290
|
+
this.debug(`Dependency cross-validation: ${this._hierarchyValidIds.size} valid IDs indexed`);
|
|
2291
|
+
|
|
2292
|
+
// Use context-generation stage if configured; fall back to doc-generation then ceremony default
|
|
2293
|
+
const stageName = this.stagesConfig?.['context-generation'] ? 'context-generation' : 'doc-generation';
|
|
2294
|
+
const provider = await this.getProviderForStageInstance(stageName);
|
|
2295
|
+
const { model: modelName } = this.getProviderForStage(stageName);
|
|
2296
|
+
this.debug(`Context generation using model: ${modelName}`);
|
|
2297
|
+
|
|
2298
|
+
const epicCount = hierarchy.epics.length;
|
|
2299
|
+
const storyCount = hierarchy.epics.reduce((s, e) => s + (e.stories?.length || 0), 0);
|
|
2300
|
+
await progressCallback?.(null, `Generating context for ${epicCount} epics + ${storyCount} stories…`, {});
|
|
2301
|
+
|
|
2302
|
+
// Concurrency control — limit concurrent context-generation calls to avoid saturating the server.
|
|
2303
|
+
const defaultCtxConcurrency = this._providerName === 'local' ? 2 : hierarchy.epics.length;
|
|
2304
|
+
const ctxConcurrency = this.stagesConfig?.['context-generation']?.concurrency ?? defaultCtxConcurrency;
|
|
2305
|
+
this.debug(`Context generation concurrency: ${ctxConcurrency} (provider: ${this._providerName})`);
|
|
2306
|
+
|
|
2307
|
+
// Helper: run async tasks with concurrency limit
|
|
2308
|
+
const runWithConcurrency = async (items, fn, limit) => {
|
|
2309
|
+
if (limit >= items.length) {
|
|
2310
|
+
return Promise.all(items.map(fn));
|
|
2311
|
+
}
|
|
2312
|
+
const queue = [...items];
|
|
2313
|
+
const running = new Set();
|
|
2314
|
+
let idx = 0;
|
|
2315
|
+
while (idx < queue.length || running.size > 0) {
|
|
2316
|
+
while (idx < queue.length && running.size < limit) {
|
|
2317
|
+
const item = queue[idx++];
|
|
2318
|
+
const p = fn(item).then(() => running.delete(p));
|
|
2319
|
+
running.add(p);
|
|
2320
|
+
}
|
|
2321
|
+
if (running.size > 0) await Promise.race(running);
|
|
2322
|
+
}
|
|
2323
|
+
};
|
|
2324
|
+
|
|
2325
|
+
// Phase 1: Generate epic contexts with concurrency control
|
|
2326
|
+
const epicCtxFailures = { count: 0 };
|
|
2327
|
+
await runWithConcurrency(hierarchy.epics, async (epic) => {
|
|
2328
|
+
if (epicCtxFailures.count >= 3) {
|
|
2329
|
+
this.debug(`Skipping LLM context for epic "${epic.name}" — too many context-size failures, using structured fallback`);
|
|
2330
|
+
this._epicContextCache.set(epic.name, this.generateEpicContextMd(epic));
|
|
2331
|
+
} else {
|
|
2332
|
+
try {
|
|
2333
|
+
this.debug(`Generating context for epic: ${epic.name}`);
|
|
2334
|
+
const epicContextMd = await this.generateEpicContextMdLLM(epic, provider);
|
|
2335
|
+
this._epicContextCache.set(epic.name, epicContextMd);
|
|
2336
|
+
this.debug(`Epic context generated: ${epic.name} (${epicContextMd.length} bytes)`);
|
|
2337
|
+
} catch (err) {
|
|
2338
|
+
if (err.message?.toLowerCase().includes('context size') || err.message?.includes('exceeded')) {
|
|
2339
|
+
epicCtxFailures.count++;
|
|
2340
|
+
this.debug(`Context-size failure #${epicCtxFailures.count} for epic context "${epic.name}"`, { error: err.message });
|
|
2341
|
+
}
|
|
2342
|
+
this.debug(`Epic context generation failed — using structured fallback (${epic.name})`, { error: err.message });
|
|
2343
|
+
this._epicContextCache.set(epic.name, this.generateEpicContextMd(epic));
|
|
2344
|
+
}
|
|
2345
|
+
}
|
|
2346
|
+
// Write context.md immediately using provisional ID — visible on disk during the ceremony.
|
|
2347
|
+
// Stage 6 (writeHierarchyFiles) will rename the folder if IDs change after renumbering.
|
|
2348
|
+
try {
|
|
2349
|
+
const provisionalEpicDir = path.join(this.projectPath, epic.id);
|
|
2350
|
+
if (!fs.existsSync(provisionalEpicDir)) fs.mkdirSync(provisionalEpicDir, { recursive: true });
|
|
2351
|
+
fs.writeFileSync(path.join(provisionalEpicDir, 'context.md'), this._epicContextCache.get(epic.name), 'utf8');
|
|
2352
|
+
this.debug(`Epic context.md written early (provisional): ${epic.id}/context.md`);
|
|
2353
|
+
} catch (err) {
|
|
2354
|
+
this.debug(`Early epic context.md write failed — will retry in Stage 6`, { error: err.message });
|
|
2355
|
+
}
|
|
2356
|
+
}, ctxConcurrency);
|
|
2357
|
+
|
|
2358
|
+
// Phase 2: Generate story contexts with concurrency control.
|
|
2359
|
+
// Stories within each epic run sequentially to maximise OpenAI prefix-cache hits:
|
|
2360
|
+
// all stories of the same epic share an identical system-message prefix
|
|
2361
|
+
// (agentInstructions + epic context.md), so calls 2-N hit the cache at a 90% discount.
|
|
2362
|
+
const storyCtxFailures = { count: 0 };
|
|
2363
|
+
await runWithConcurrency(hierarchy.epics, async (epic) => {
|
|
2364
|
+
const epicContextMd = this._epicContextCache.get(epic.name) || this.generateEpicContextMd(epic);
|
|
2365
|
+
for (const story of (epic.stories || [])) {
|
|
2366
|
+
const cacheKey = `${epic.name}::${story.name}`;
|
|
2367
|
+
if (storyCtxFailures.count >= 3) {
|
|
2368
|
+
this.debug(`Skipping LLM context for story "${story.name}" — too many context-size failures, using structured fallback`);
|
|
2369
|
+
this._storyContextCache.set(cacheKey, this.generateStoryContextMd(story, epic));
|
|
2370
|
+
} else {
|
|
2371
|
+
try {
|
|
2372
|
+
this.debug(`Generating context for story: ${story.name}`);
|
|
2373
|
+
const storyContextMd = await this.generateStoryContextMdLLM(story, epic, epicContextMd, provider);
|
|
2374
|
+
this._storyContextCache.set(cacheKey, storyContextMd);
|
|
2375
|
+
this.debug(`Story context generated: ${story.name} (${storyContextMd.length} bytes)`);
|
|
2376
|
+
} catch (err) {
|
|
2377
|
+
if (err.message?.toLowerCase().includes('context size') || err.message?.includes('exceeded')) {
|
|
2378
|
+
storyCtxFailures.count++;
|
|
2379
|
+
this.debug(`Context-size failure #${storyCtxFailures.count} for story context "${story.name}"`, { error: err.message });
|
|
2380
|
+
}
|
|
2381
|
+
this.debug(`Story context generation failed — using structured fallback (${story.name})`, { error: err.message });
|
|
2382
|
+
this._storyContextCache.set(cacheKey, this.generateStoryContextMd(story, epic));
|
|
2383
|
+
}
|
|
2384
|
+
}
|
|
2385
|
+
// Write context.md immediately using provisional IDs.
|
|
2386
|
+
try {
|
|
2387
|
+
const provisionalStoryDir = path.join(this.projectPath, epic.id, story.id);
|
|
2388
|
+
if (!fs.existsSync(provisionalStoryDir)) fs.mkdirSync(provisionalStoryDir, { recursive: true });
|
|
2389
|
+
fs.writeFileSync(path.join(provisionalStoryDir, 'context.md'), this._storyContextCache.get(cacheKey), 'utf8');
|
|
2390
|
+
this.debug(`Story context.md written early (provisional): ${epic.id}/${story.id}/context.md`);
|
|
2391
|
+
} catch (err) {
|
|
2392
|
+
this.debug(`Early story context.md write failed — will retry in Stage 6`, { error: err.message });
|
|
2393
|
+
}
|
|
2394
|
+
}
|
|
2395
|
+
}, ctxConcurrency);
|
|
2396
|
+
|
|
2397
|
+
// Post-generation dependency validation — verify all context-XXXX references exist
|
|
2398
|
+
this._validateDependencyReferences(hierarchy);
|
|
2399
|
+
|
|
2400
|
+
this.debug(`Context generation complete: ${this._epicContextCache.size} epic contexts, ${this._storyContextCache.size} story contexts`);
|
|
2401
|
+
}
|
|
2402
|
+
|
|
2403
|
+
/**
|
|
2404
|
+
* Validate that all dependency references across the hierarchy point to existing IDs.
|
|
2405
|
+
* Removes broken references and logs warnings. Runs after context generation and after splits.
|
|
2406
|
+
* @param {Object} hierarchy
|
|
2407
|
+
*/
|
|
2408
|
+
_validateDependencyReferences(hierarchy) {
|
|
2409
|
+
const validIds = new Set();
|
|
2410
|
+
for (const epic of hierarchy.epics) {
|
|
2411
|
+
if (epic.id) validIds.add(epic.id);
|
|
2412
|
+
for (const story of epic.stories || []) {
|
|
2413
|
+
if (story.id) validIds.add(story.id);
|
|
2414
|
+
}
|
|
2415
|
+
}
|
|
2416
|
+
|
|
2417
|
+
let brokenCount = 0;
|
|
2418
|
+
let removedCount = 0;
|
|
2419
|
+
|
|
2420
|
+
for (const epic of hierarchy.epics) {
|
|
2421
|
+
if (Array.isArray(epic.dependencies)) {
|
|
2422
|
+
epic.dependencies = epic.dependencies.filter(dep => {
|
|
2423
|
+
if (/^context-\d{4}(-\d{4})?$/.test(dep) && !validIds.has(dep)) {
|
|
2424
|
+
brokenCount++;
|
|
2425
|
+
removedCount++;
|
|
2426
|
+
this.debug(`Broken dependency removed: epic "${epic.name}" referenced non-existent "${dep}"`);
|
|
2427
|
+
return false;
|
|
2428
|
+
}
|
|
2429
|
+
return true;
|
|
2430
|
+
});
|
|
2431
|
+
}
|
|
2432
|
+
for (const story of epic.stories || []) {
|
|
2433
|
+
if (Array.isArray(story.dependencies)) {
|
|
2434
|
+
story.dependencies = story.dependencies.filter(dep => {
|
|
2435
|
+
if (/^context-\d{4}(-\d{4})?$/.test(dep) && !validIds.has(dep)) {
|
|
2436
|
+
brokenCount++;
|
|
2437
|
+
removedCount++;
|
|
2438
|
+
this.debug(`Broken dependency removed: story "${story.name}" referenced non-existent "${dep}"`);
|
|
2439
|
+
return false;
|
|
2440
|
+
}
|
|
2441
|
+
return true;
|
|
2442
|
+
});
|
|
2443
|
+
}
|
|
2444
|
+
}
|
|
2445
|
+
}
|
|
2446
|
+
|
|
2447
|
+
if (brokenCount > 0) {
|
|
2448
|
+
this.debug(`Dependency validation: removed ${removedCount} broken reference(s) across hierarchy`);
|
|
2449
|
+
console.log(` ⚠ Removed ${removedCount} broken dependency reference(s) (hallucinated IDs)`);
|
|
2450
|
+
} else {
|
|
2451
|
+
this.debug('Dependency validation: all references valid');
|
|
2452
|
+
}
|
|
2453
|
+
}
|
|
2454
|
+
|
|
2455
|
+
/**
|
|
2456
|
+
* Generate narrative doc.md files for all epics and stories from their canonical context.md.
|
|
2457
|
+
* Replaces the old doc-distribution stage. Uses doc-writer-epic.md and doc-writer-story.md agents.
|
|
2458
|
+
*
|
|
2459
|
+
* Context chain:
|
|
2460
|
+
* Epic doc.md : root doc.md + epic context.md → narrative
|
|
2461
|
+
* Story doc.md : root doc.md + parent epic context.md + story context.md → narrative
|
|
2462
|
+
*
|
|
2463
|
+
* Strict: agents are instructed not to add scope beyond what is in context.md.
|
|
2464
|
+
*
|
|
2465
|
+
* @param {Object} hierarchy - Hierarchy with final validated epics and stories (real IDs)
|
|
2466
|
+
* @param {string} rootDocContent - Content of root doc.md
|
|
2467
|
+
* @param {Function} progressCallback
|
|
2468
|
+
*/
|
|
2469
|
+
async generateDocFiles(hierarchy, rootDocContent, progressCallback = null) {
|
|
2470
|
+
this.debugStage(6.5, 'Generate Narrative doc.md Files from Canonical context.md');
|
|
2471
|
+
|
|
2472
|
+
const epicAgentInstructions = loadAgent('doc-writer-epic.md');
|
|
2473
|
+
const storyAgentInstructions = loadAgent('doc-writer-story.md');
|
|
2474
|
+
// Uses 'doc-generation' stage config if defined; falls back to ceremony-level provider
|
|
2475
|
+
const provider = await this.getProviderForStageInstance('doc-generation');
|
|
2476
|
+
|
|
2477
|
+
const doGenerate = rootDocContent && rootDocContent.length > 0;
|
|
2478
|
+
if (!doGenerate) {
|
|
2479
|
+
this.debug('No root doc.md content — skipping doc generation, writing minimal stubs');
|
|
2480
|
+
}
|
|
2481
|
+
|
|
2482
|
+
await Promise.all(hierarchy.epics.map(async (epic) => {
|
|
2483
|
+
const epicDir = path.join(this.projectPath, epic.id);
|
|
2484
|
+
const epicContextPath = path.join(epicDir, 'context.md');
|
|
2485
|
+
const epicDocPath = path.join(epicDir, 'doc.md');
|
|
2486
|
+
|
|
2487
|
+
// Read the epic's canonical context.md (written earlier in writeHierarchyFiles)
|
|
2488
|
+
let epicContextMd = '';
|
|
2489
|
+
if (fs.existsSync(epicContextPath)) {
|
|
2490
|
+
epicContextMd = fs.readFileSync(epicContextPath, 'utf8');
|
|
2491
|
+
} else {
|
|
2492
|
+
epicContextMd = this.generateEpicContextMd(epic);
|
|
2493
|
+
}
|
|
2494
|
+
|
|
2495
|
+
// Generate epic doc.md
|
|
2496
|
+
if (doGenerate) {
|
|
2497
|
+
await progressCallback?.(null, `Generating documentation → ${epic.name}`, {});
|
|
2498
|
+
this.debug(`Generating doc.md for epic ${epic.id}: ${epic.name}`);
|
|
2499
|
+
try {
|
|
2500
|
+
const prompt = `## Project Documentation\n\n${rootDocContent}\n\n---\n\n## Epic Canonical Specification\n\n${epicContextMd}\n\nWrite the epic's doc.md. Return JSON with a \`doc\` field.`;
|
|
2501
|
+
const result = await this._withProgressHeartbeat(
|
|
2502
|
+
() => this.retryWithBackoff(
|
|
2503
|
+
() => provider.generateJSON(prompt, epicAgentInstructions),
|
|
2504
|
+
`doc generation for epic: ${epic.name}`
|
|
2505
|
+
),
|
|
2506
|
+
(elapsed) => {
|
|
2507
|
+
if (elapsed < 15) return `Writing ${epic.name} documentation…`;
|
|
2508
|
+
if (elapsed < 40) return `Expanding ${epic.name} narrative…`;
|
|
2509
|
+
return `Still writing…`;
|
|
2510
|
+
},
|
|
2511
|
+
progressCallback,
|
|
2512
|
+
10000
|
|
2513
|
+
);
|
|
2514
|
+
const epicDoc = (typeof result.doc === 'string' && result.doc.trim())
|
|
2515
|
+
? result.doc
|
|
2516
|
+
: `# ${epic.name}\n\n${epic.description || ''}\n`;
|
|
2517
|
+
fs.writeFileSync(epicDocPath, epicDoc, 'utf8');
|
|
2518
|
+
this.debug(`Epic doc.md written: ${epicDoc.length} bytes`);
|
|
2519
|
+
} catch (err) {
|
|
2520
|
+
this.debug(`Epic doc generation failed for ${epic.id} — writing stub`, { error: err.message });
|
|
2521
|
+
fs.writeFileSync(epicDocPath, `# ${epic.name}\n\n${epic.description || ''}\n`, 'utf8');
|
|
2522
|
+
}
|
|
2523
|
+
} else {
|
|
2524
|
+
fs.writeFileSync(epicDocPath, `# ${epic.name}\n\n${epic.description || ''}\n`, 'utf8');
|
|
2525
|
+
}
|
|
2526
|
+
|
|
2527
|
+
// Generate story doc.md files in parallel within this epic
|
|
2528
|
+
await Promise.all((epic.stories || []).map(async (story) => {
|
|
2529
|
+
const storyDir = path.join(epicDir, story.id);
|
|
2530
|
+
const storyContextPath = path.join(storyDir, 'context.md');
|
|
2531
|
+
const storyDocPath = path.join(storyDir, 'doc.md');
|
|
2532
|
+
|
|
2533
|
+
let storyContextMd = '';
|
|
2534
|
+
if (fs.existsSync(storyContextPath)) {
|
|
2535
|
+
storyContextMd = fs.readFileSync(storyContextPath, 'utf8');
|
|
2536
|
+
} else {
|
|
2537
|
+
storyContextMd = this.generateStoryContextMd(story, epic);
|
|
2538
|
+
}
|
|
2539
|
+
|
|
2540
|
+
if (doGenerate) {
|
|
2541
|
+
await progressCallback?.(null, ` Generating documentation → ${story.name}`, {});
|
|
2542
|
+
this.debug(`Generating doc.md for story ${story.id}: ${story.name}`);
|
|
2543
|
+
try {
|
|
2544
|
+
const prompt = `## Project Documentation\n\n${rootDocContent}\n\n---\n\n## Parent Epic Canonical Specification\n\n${epicContextMd}\n\n---\n\n## Story Canonical Specification\n\n${storyContextMd}\n\nWrite the story's doc.md. Return JSON with a \`doc\` field.`;
|
|
2545
|
+
const result = await this._withProgressHeartbeat(
|
|
2546
|
+
() => this.retryWithBackoff(
|
|
2547
|
+
() => provider.generateJSON(prompt, storyAgentInstructions),
|
|
2548
|
+
`doc generation for story: ${story.name}`
|
|
2549
|
+
),
|
|
2550
|
+
(elapsed) => {
|
|
2551
|
+
if (elapsed < 15) return `Writing ${story.name} documentation…`;
|
|
2552
|
+
if (elapsed < 40) return `Expanding ${story.name} narrative…`;
|
|
2553
|
+
return `Still writing…`;
|
|
2554
|
+
},
|
|
2555
|
+
progressCallback,
|
|
2556
|
+
10000
|
|
2557
|
+
);
|
|
2558
|
+
const storyDoc = (typeof result.doc === 'string' && result.doc.trim())
|
|
2559
|
+
? result.doc
|
|
2560
|
+
: `# ${story.name}\n\n${story.description || ''}\n`;
|
|
2561
|
+
fs.writeFileSync(storyDocPath, storyDoc, 'utf8');
|
|
2562
|
+
this.debug(`Story doc.md written: ${storyDoc.length} bytes`);
|
|
2563
|
+
} catch (err) {
|
|
2564
|
+
this.debug(`Story doc generation failed for ${story.id} — writing stub`, { error: err.message });
|
|
2565
|
+
fs.writeFileSync(storyDocPath, `# ${story.name}\n\n${story.description || ''}\n`, 'utf8');
|
|
2566
|
+
}
|
|
2567
|
+
} else {
|
|
2568
|
+
fs.writeFileSync(storyDocPath, `# ${story.name}\n\n${story.description || ''}\n`, 'utf8');
|
|
2569
|
+
}
|
|
740
2570
|
}));
|
|
741
|
-
|
|
742
|
-
}
|
|
2571
|
+
}));
|
|
743
2572
|
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
* @param {string} scope - Project scope text (first 3000 chars used)
|
|
748
|
-
* @param {Function} progressCallback - Optional progress callback
|
|
749
|
-
* @returns {Promise<Object>} ProjectContext JSON (empty object on failure)
|
|
750
|
-
*/
|
|
751
|
-
async extractProjectContext(scope, progressCallback) {
|
|
752
|
-
this.debug('Extracting project context for contextual agent selection');
|
|
753
|
-
try {
|
|
754
|
-
const provider = await this.getProviderForStageInstance('validation');
|
|
755
|
-
const agent = loadAgent('project-context-extractor.md');
|
|
756
|
-
const prompt = `PROJECT SCOPE:\n\n${scope.substring(0, 3000)}\n\nExtract the structured project context as JSON.`;
|
|
757
|
-
const result = await provider.generateJSON(prompt, agent);
|
|
758
|
-
return result || {};
|
|
759
|
-
} catch (err) {
|
|
760
|
-
this.debug('Project context extraction failed, continuing without context', { error: err.message });
|
|
761
|
-
return {};
|
|
762
|
-
}
|
|
2573
|
+
const epicCount = hierarchy.epics.length;
|
|
2574
|
+
const storyCount = hierarchy.epics.reduce((s, e) => s + (e.stories?.length || 0), 0);
|
|
2575
|
+
this.debug(`Doc generation complete: ${epicCount} epics + ${storyCount} stories`);
|
|
763
2576
|
}
|
|
764
2577
|
|
|
765
2578
|
// STAGE 5: Multi-Agent Validation
|
|
@@ -792,6 +2605,41 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
792
2605
|
this.debug('useContextualSelection=true but no scope available — skipping context extraction');
|
|
793
2606
|
}
|
|
794
2607
|
|
|
2608
|
+
// Generate and write root context.md (canonical project representation)
|
|
2609
|
+
let rootContextMd = null;
|
|
2610
|
+
try {
|
|
2611
|
+
let docMdContent = '';
|
|
2612
|
+
if (fs.existsSync(this.projectDocPath)) {
|
|
2613
|
+
docMdContent = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
2614
|
+
}
|
|
2615
|
+
rootContextMd = this.generateRootContextMd(projectContext || {}, docMdContent, hierarchy);
|
|
2616
|
+
const rootContextPath = path.join(this.projectPath, 'context.md');
|
|
2617
|
+
fs.writeFileSync(rootContextPath, rootContextMd, 'utf8');
|
|
2618
|
+
this.debug(`Root context.md written (${rootContextMd.length} bytes)`);
|
|
2619
|
+
} catch (err) {
|
|
2620
|
+
this.debug('Failed to write root context.md — continuing without it', { error: err.message });
|
|
2621
|
+
}
|
|
2622
|
+
|
|
2623
|
+
// Store root context on this instance so LLM context writers can access it
|
|
2624
|
+
this.rootContextMd = rootContextMd;
|
|
2625
|
+
|
|
2626
|
+
// Pre-generate LLM context.md for all epics and stories before validation
|
|
2627
|
+
// This gives validators rich, complete context rather than sparse structured stubs
|
|
2628
|
+
await progressCallback?.(null, 'Generating canonical context for epics and stories…', {});
|
|
2629
|
+
const _tsCtx = Date.now();
|
|
2630
|
+
await this.generateContextFiles(hierarchy, progressCallback);
|
|
2631
|
+
this.debugTiming('generateContextFiles', _tsCtx);
|
|
2632
|
+
|
|
2633
|
+
// Generate scaffolding epic AFTER all domain contexts are written — it reads them
|
|
2634
|
+
// to know the exact tech requirements (packages, infra, test frameworks, etc.)
|
|
2635
|
+
try {
|
|
2636
|
+
const _tsScaffold = Date.now();
|
|
2637
|
+
await this._generateScaffoldingEpic(hierarchy, progressCallback);
|
|
2638
|
+
this.debugTiming('generateScaffoldingEpic', _tsScaffold);
|
|
2639
|
+
} catch (err) {
|
|
2640
|
+
this.debug(`Scaffolding epic generation failed (non-critical): ${err.message}`);
|
|
2641
|
+
}
|
|
2642
|
+
|
|
795
2643
|
const validator = new EpicStoryValidator(
|
|
796
2644
|
this.llmProvider,
|
|
797
2645
|
this.verificationTracker,
|
|
@@ -801,6 +2649,11 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
801
2649
|
projectContext
|
|
802
2650
|
);
|
|
803
2651
|
this._validator = validator;
|
|
2652
|
+
if (this._quotaExceededCallback) {
|
|
2653
|
+
this._validator.setQuotaExceededCallback(this._quotaExceededCallback);
|
|
2654
|
+
}
|
|
2655
|
+
if (rootContextMd) this._validator.setRootContextMd(rootContextMd);
|
|
2656
|
+
this._validator.setPromptLogger(this._promptLogger);
|
|
804
2657
|
this._validator.setTokenCallback((delta, stageHint) => {
|
|
805
2658
|
const key = stageHint
|
|
806
2659
|
? `${this.ceremonyName}-${stageHint}`
|
|
@@ -812,13 +2665,20 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
812
2665
|
}
|
|
813
2666
|
});
|
|
814
2667
|
|
|
815
|
-
// Validate
|
|
816
|
-
|
|
2668
|
+
// ── Validate epics with concurrency control ──────────────────────────────
|
|
2669
|
+
// Each epic (+ its stories) is validated independently. We run up to
|
|
2670
|
+
// EPIC_CONCURRENCY epics in parallel to reduce wall-clock time while
|
|
2671
|
+
// respecting API rate limits. Stories within each epic remain sequential
|
|
2672
|
+
// because split-story splice logic requires index stability.
|
|
2673
|
+
const defaultConcurrency = 2;
|
|
2674
|
+
const EPIC_CONCURRENCY = this.stagesConfig?.validation?.epicConcurrency ?? defaultConcurrency;
|
|
2675
|
+
|
|
2676
|
+
const validateSingleEpic = async (epic) => {
|
|
817
2677
|
this.debug(`\nValidating Epic: ${epic.id} "${epic.name}"`);
|
|
818
2678
|
await progressCallback?.(null, `Validating Epic: ${epic.name}`, {});
|
|
819
2679
|
|
|
820
|
-
//
|
|
821
|
-
const epicContext =
|
|
2680
|
+
// Use LLM-generated context if available; fall back to structured format
|
|
2681
|
+
const epicContext = this._epicContextCache.get(epic.name) || this.generateEpicContextMd(epic);
|
|
822
2682
|
|
|
823
2683
|
// Validate epic with multiple domain validators
|
|
824
2684
|
const _tsEpic = Date.now();
|
|
@@ -834,13 +2694,21 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
834
2694
|
this.displayValidationIssues(epicValidation);
|
|
835
2695
|
}
|
|
836
2696
|
|
|
837
|
-
// Validate each story under this epic
|
|
838
|
-
|
|
2697
|
+
// Validate each story under this epic.
|
|
2698
|
+
// Use index-based loop so split stories inserted via splice() are validated in-place.
|
|
2699
|
+
// Keep in sync with STORY_AC_CAP in epic-story-validator.js
|
|
2700
|
+
const STORY_AC_CAP = 20;
|
|
2701
|
+
const MAX_SPLIT_DEPTH = 1;
|
|
2702
|
+
let si = 0;
|
|
2703
|
+
while (si < (epic.stories || []).length) {
|
|
2704
|
+
const story = epic.stories[si];
|
|
2705
|
+
const splitDepth = story._splitDepth || 0;
|
|
2706
|
+
|
|
839
2707
|
this.debug(`\nValidating Story: ${story.id} "${story.name}"`);
|
|
840
2708
|
await progressCallback?.(null, ` Validating story: ${story.name}`, {});
|
|
841
2709
|
|
|
842
|
-
//
|
|
843
|
-
const storyContext =
|
|
2710
|
+
// Use LLM-generated context if available; fall back to structured format
|
|
2711
|
+
const storyContext = this._storyContextCache.get(`${epic.name}::${story.name}`) || this.generateStoryContextMd(story, epic);
|
|
844
2712
|
|
|
845
2713
|
// Validate story with multiple domain validators
|
|
846
2714
|
const _tsStory = Date.now();
|
|
@@ -855,9 +2723,122 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
855
2723
|
this.debug(`Story "${story.name}" needs improvement - showing issues`);
|
|
856
2724
|
this.displayValidationIssues(storyValidation);
|
|
857
2725
|
}
|
|
2726
|
+
|
|
2727
|
+
// ── Split detection ────────────────────────────────────────────────────────
|
|
2728
|
+
// Trigger split when:
|
|
2729
|
+
// (a) AC cap reached — story too large for solver to improve further, OR
|
|
2730
|
+
// (b) SA issued a SPLIT RECOMMENDATION — story has too many concerns to resolve
|
|
2731
|
+
// by adding ACs regardless of current AC count
|
|
2732
|
+
const acCount = (story.acceptance || []).length;
|
|
2733
|
+
|
|
2734
|
+
const splitRecommended = storyValidation._splitRecommended === true
|
|
2735
|
+
|| storyValidation.microCheckDetails?.splitRecommendation === true;
|
|
2736
|
+
const shouldSplit =
|
|
2737
|
+
storyValidation.overallStatus === 'needs-improvement' &&
|
|
2738
|
+
(acCount >= STORY_AC_CAP || splitRecommended) &&
|
|
2739
|
+
splitDepth < MAX_SPLIT_DEPTH;
|
|
2740
|
+
|
|
2741
|
+
if (shouldSplit) {
|
|
2742
|
+
const allIssues = [
|
|
2743
|
+
...(storyValidation.criticalIssues || []),
|
|
2744
|
+
...(storyValidation.majorIssues || []),
|
|
2745
|
+
];
|
|
2746
|
+
|
|
2747
|
+
const splitReason = splitRecommended
|
|
2748
|
+
? `SA split recommendation (${acCount} ACs)`
|
|
2749
|
+
: `too large (${acCount} ACs)`;
|
|
2750
|
+
await progressCallback?.(null, ` Splitting story: ${story.name}`, {});
|
|
2751
|
+
await validator._detail(`✂ [${story.id}] ${splitReason} — attempting split…`);
|
|
2752
|
+
console.log(` ✂ Splitting story "${story.name}" — ${splitReason}`);
|
|
2753
|
+
|
|
2754
|
+
const splitStories = await validator._splitStory(story, epic, allIssues);
|
|
2755
|
+
|
|
2756
|
+
if (splitStories) {
|
|
2757
|
+
// Tag split stories with depth guard and parent reference (runtime-only, prefixed _)
|
|
2758
|
+
for (const s of splitStories) {
|
|
2759
|
+
s._splitDepth = splitDepth + 1;
|
|
2760
|
+
s._splitFrom = story.id;
|
|
2761
|
+
}
|
|
2762
|
+
|
|
2763
|
+
// Replace the original story with the split stories in-place
|
|
2764
|
+
epic.stories.splice(si, 1, ...splitStories);
|
|
2765
|
+
|
|
2766
|
+
// Invalidate stale context cache entry for the original story name
|
|
2767
|
+
this._storyContextCache.delete(`${epic.name}::${story.name}`);
|
|
2768
|
+
|
|
2769
|
+
// Pre-populate structured context for split stories so they don't start with nothing.
|
|
2770
|
+
// This avoids an expensive LLM context-gen call for each split story — the structured
|
|
2771
|
+
// fallback is sufficient since validators will refine the story content anyway.
|
|
2772
|
+
for (const splitStory of splitStories) {
|
|
2773
|
+
const splitKey = `${epic.name}::${splitStory.name}`;
|
|
2774
|
+
if (!this._storyContextCache.has(splitKey)) {
|
|
2775
|
+
this._storyContextCache.set(splitKey, this.generateStoryContextMd(splitStory, epic));
|
|
2776
|
+
}
|
|
2777
|
+
}
|
|
2778
|
+
|
|
2779
|
+
// Post-split dependency reconciliation: any story that depended on the
|
|
2780
|
+
// original (now-removed) story ID should depend on the first split story instead.
|
|
2781
|
+
const originalId = story.id;
|
|
2782
|
+
const replacementId = splitStories[0]?.id;
|
|
2783
|
+
if (originalId && replacementId) {
|
|
2784
|
+
for (const e of hierarchy.epics) {
|
|
2785
|
+
for (const s of e.stories || []) {
|
|
2786
|
+
if (Array.isArray(s.dependencies)) {
|
|
2787
|
+
const depIdx = s.dependencies.indexOf(originalId);
|
|
2788
|
+
if (depIdx !== -1 && !splitStories.some(sp => sp.id === s.id)) {
|
|
2789
|
+
s.dependencies[depIdx] = replacementId;
|
|
2790
|
+
this.debug(`Post-split dep remap: story "${s.name}" now depends on "${replacementId}" (was "${originalId}")`);
|
|
2791
|
+
}
|
|
2792
|
+
}
|
|
2793
|
+
}
|
|
2794
|
+
}
|
|
2795
|
+
}
|
|
2796
|
+
|
|
2797
|
+
const names = splitStories.map(s => `"${s.name}"`).join(' + ');
|
|
2798
|
+
await validator._detail(`✂ [${story.id}] split into ${splitStories.length}: ${names}`);
|
|
2799
|
+
console.log(` ✂ Split into ${splitStories.length} stories: ${names}`);
|
|
2800
|
+
|
|
2801
|
+
// Do NOT increment si — the loop re-enters at position si
|
|
2802
|
+
// which now holds the first split story
|
|
2803
|
+
continue;
|
|
2804
|
+
} else {
|
|
2805
|
+
console.log(` ⚠ Split failed for "${story.name}" — keeping original story`);
|
|
2806
|
+
}
|
|
2807
|
+
}
|
|
2808
|
+
|
|
2809
|
+
si++;
|
|
2810
|
+
}
|
|
2811
|
+
};
|
|
2812
|
+
|
|
2813
|
+
// Run epics with concurrency limit
|
|
2814
|
+
if (EPIC_CONCURRENCY <= 1 || hierarchy.epics.length <= 1) {
|
|
2815
|
+
// Sequential fallback for local LLMs or single epic
|
|
2816
|
+
for (const epic of hierarchy.epics) {
|
|
2817
|
+
await validateSingleEpic(epic);
|
|
2818
|
+
}
|
|
2819
|
+
} else {
|
|
2820
|
+
// Concurrency-limited parallel execution
|
|
2821
|
+
const queue = [...hierarchy.epics];
|
|
2822
|
+
const running = new Set();
|
|
2823
|
+
let idx = 0;
|
|
2824
|
+
|
|
2825
|
+
while (idx < queue.length || running.size > 0) {
|
|
2826
|
+
// Launch tasks up to concurrency limit
|
|
2827
|
+
while (idx < queue.length && running.size < EPIC_CONCURRENCY) {
|
|
2828
|
+
const epic = queue[idx++];
|
|
2829
|
+
const promise = validateSingleEpic(epic).then(() => running.delete(promise));
|
|
2830
|
+
running.add(promise);
|
|
2831
|
+
}
|
|
2832
|
+
// Wait for at least one to complete before launching more
|
|
2833
|
+
if (running.size > 0) {
|
|
2834
|
+
await Promise.race(running);
|
|
2835
|
+
}
|
|
858
2836
|
}
|
|
859
2837
|
}
|
|
860
2838
|
|
|
2839
|
+
// Post-validation dependency cleanup — splits may have introduced broken references
|
|
2840
|
+
this._validateDependencyReferences(hierarchy);
|
|
2841
|
+
|
|
861
2842
|
return hierarchy;
|
|
862
2843
|
}
|
|
863
2844
|
|
|
@@ -874,11 +2855,12 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
874
2855
|
const prefix = statusPrefix[validation.overallStatus] || '';
|
|
875
2856
|
sendOutput(`${prefix} ${type}: ${name}\n`);
|
|
876
2857
|
sendIndented(`Overall Score: ${validation.averageScore}/100`, 1);
|
|
877
|
-
|
|
878
|
-
sendIndented(`
|
|
2858
|
+
const agentCount = validation.validatorCount ?? validation.validatorResults?.length ?? 0;
|
|
2859
|
+
sendIndented(`Validators: ${agentCount} agents`, 1);
|
|
2860
|
+
sendIndented(`Issues: ${validation.criticalIssues?.length || 0} critical, ${validation.majorIssues?.length || 0} major, ${validation.minorIssues?.length || 0} minor`, 1);
|
|
879
2861
|
|
|
880
2862
|
// Show strengths if excellent or acceptable
|
|
881
|
-
if (validation.overallStatus !== 'needs-improvement' && validation.strengths
|
|
2863
|
+
if (validation.overallStatus !== 'needs-improvement' && validation.strengths?.length > 0) {
|
|
882
2864
|
sendIndented(`Strengths: ${validation.strengths.slice(0, 2).join(', ')}`, 1);
|
|
883
2865
|
}
|
|
884
2866
|
|
|
@@ -890,7 +2872,7 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
890
2872
|
*/
|
|
891
2873
|
displayValidationIssues(validation) {
|
|
892
2874
|
// Show critical issues
|
|
893
|
-
if (validation.criticalIssues
|
|
2875
|
+
if (validation.criticalIssues?.length > 0) {
|
|
894
2876
|
this.debug('Critical Issues', validation.criticalIssues.slice(0, 3).map(issue => ({
|
|
895
2877
|
domain: issue.domain,
|
|
896
2878
|
description: issue.description,
|
|
@@ -898,8 +2880,16 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
898
2880
|
})));
|
|
899
2881
|
}
|
|
900
2882
|
|
|
901
|
-
// Show
|
|
902
|
-
if (validation.
|
|
2883
|
+
// Show major issues (micro-check format)
|
|
2884
|
+
if (validation.majorIssues?.length > 0) {
|
|
2885
|
+
this.debug('Major Issues', validation.majorIssues.slice(0, 3).map(issue => ({
|
|
2886
|
+
description: issue.description,
|
|
2887
|
+
suggestion: issue.suggestion
|
|
2888
|
+
})));
|
|
2889
|
+
}
|
|
2890
|
+
|
|
2891
|
+
// Show improvement priorities (monolithic format, if present)
|
|
2892
|
+
if (validation.improvementPriorities?.length > 0) {
|
|
903
2893
|
this.debug('Improvement Priorities', validation.improvementPriorities.slice(0, 3).map((priority, i) => ({
|
|
904
2894
|
rank: i + 1,
|
|
905
2895
|
priority: priority.priority,
|
|
@@ -1020,12 +3010,16 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
1020
3010
|
this.debugStage(6, 'Renumber IDs');
|
|
1021
3011
|
this.debug('Renumbering hierarchy to avoid ID collisions...');
|
|
1022
3012
|
|
|
3013
|
+
// Build old→new ID mapping for dependency remapping
|
|
3014
|
+
const idMap = new Map();
|
|
3015
|
+
|
|
1023
3016
|
let nextEpicNum = maxEpicNum.value + 1;
|
|
1024
3017
|
this.debug(`Next epic number: ${nextEpicNum} (after existing ${maxEpicNum.value})`);
|
|
1025
3018
|
|
|
1026
3019
|
for (const epic of hierarchy.epics) {
|
|
1027
3020
|
const oldEpicId = epic.id;
|
|
1028
3021
|
const newEpicId = `context-${String(nextEpicNum).padStart(4, '0')}`;
|
|
3022
|
+
idMap.set(oldEpicId, newEpicId);
|
|
1029
3023
|
epic.id = newEpicId;
|
|
1030
3024
|
|
|
1031
3025
|
this.debug(`ID mapping - Epic "${epic.name}": ${oldEpicId} -> ${newEpicId}`);
|
|
@@ -1035,6 +3029,7 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
1035
3029
|
for (const story of epic.stories || []) {
|
|
1036
3030
|
const oldStoryId = story.id;
|
|
1037
3031
|
const newStoryId = `${newEpicId}-${String(nextStoryNum).padStart(4, '0')}`;
|
|
3032
|
+
idMap.set(oldStoryId, newStoryId);
|
|
1038
3033
|
story.id = newStoryId;
|
|
1039
3034
|
|
|
1040
3035
|
this.debug(`ID mapping - Story "${story.name}": ${oldStoryId} -> ${newStoryId}`);
|
|
@@ -1044,6 +3039,31 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
1044
3039
|
nextEpicNum++;
|
|
1045
3040
|
}
|
|
1046
3041
|
|
|
3042
|
+
// Remap all dependency references using the old→new ID map
|
|
3043
|
+
let remappedCount = 0;
|
|
3044
|
+
for (const epic of hierarchy.epics) {
|
|
3045
|
+
if (Array.isArray(epic.dependencies)) {
|
|
3046
|
+
epic.dependencies = epic.dependencies.map(dep => {
|
|
3047
|
+
const mapped = idMap.get(dep);
|
|
3048
|
+
if (mapped) { remappedCount++; return mapped; }
|
|
3049
|
+
return dep;
|
|
3050
|
+
});
|
|
3051
|
+
}
|
|
3052
|
+
for (const story of epic.stories || []) {
|
|
3053
|
+
if (Array.isArray(story.dependencies)) {
|
|
3054
|
+
story.dependencies = story.dependencies.map(dep => {
|
|
3055
|
+
const mapped = idMap.get(dep);
|
|
3056
|
+
if (mapped) { remappedCount++; return mapped; }
|
|
3057
|
+
return dep;
|
|
3058
|
+
});
|
|
3059
|
+
}
|
|
3060
|
+
}
|
|
3061
|
+
}
|
|
3062
|
+
|
|
3063
|
+
if (remappedCount > 0) {
|
|
3064
|
+
this.debug(`Remapped ${remappedCount} dependency reference(s) to new IDs`);
|
|
3065
|
+
}
|
|
3066
|
+
|
|
1047
3067
|
this.debug('Renumbered hierarchy', {
|
|
1048
3068
|
epics: hierarchy.epics.map(e => ({ id: e.id, name: e.name, storyCount: e.stories?.length || 0 }))
|
|
1049
3069
|
});
|
|
@@ -1056,22 +3076,50 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
1056
3076
|
this.debugStage(7, 'Write Hierarchy Files + Distribute Documentation');
|
|
1057
3077
|
this.debug('Writing hierarchy files with documentation distribution');
|
|
1058
3078
|
|
|
1059
|
-
//
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
3079
|
+
// Phase -1: Build name→id map and normalize all dependencies to IDs
|
|
3080
|
+
const nameToId = {};
|
|
3081
|
+
for (const epic of hierarchy.epics) {
|
|
3082
|
+
nameToId[epic.name] = epic.id;
|
|
3083
|
+
nameToId[epic.name.toLowerCase()] = epic.id;
|
|
3084
|
+
for (const story of epic.stories || []) {
|
|
3085
|
+
nameToId[story.name] = story.id;
|
|
3086
|
+
nameToId[story.name.toLowerCase()] = story.id;
|
|
3087
|
+
}
|
|
3088
|
+
}
|
|
3089
|
+
const normalizeDeps = (deps) => (deps || []).map(d => nameToId[d] || nameToId[d?.toLowerCase?.()] || d);
|
|
3090
|
+
for (const epic of hierarchy.epics) {
|
|
3091
|
+
epic.dependencies = normalizeDeps(epic.dependencies);
|
|
3092
|
+
for (const story of epic.stories || []) {
|
|
3093
|
+
story.dependencies = normalizeDeps(story.dependencies);
|
|
3094
|
+
}
|
|
1066
3095
|
}
|
|
1067
3096
|
|
|
1068
|
-
|
|
3097
|
+
// Phase 0: Rename all provisional epic folders in REVERSE order to avoid collisions
|
|
3098
|
+
// (e.g., 0001→0002 must happen after 0002→0003, not before)
|
|
3099
|
+
const epicsToRename = hierarchy.epics
|
|
3100
|
+
.filter(e => e._provisionalId && e._provisionalId !== e.id)
|
|
3101
|
+
.reverse();
|
|
3102
|
+
for (const epic of epicsToRename) {
|
|
3103
|
+
const provisionalDir = path.join(this.projectPath, epic._provisionalId);
|
|
3104
|
+
const targetDir = path.join(this.projectPath, epic.id);
|
|
3105
|
+
if (fs.existsSync(provisionalDir) && !fs.existsSync(targetDir)) {
|
|
3106
|
+
fs.renameSync(provisionalDir, targetDir);
|
|
3107
|
+
this.debug(`Renamed provisional epic folder: ${epic._provisionalId} → ${epic.id}`);
|
|
3108
|
+
}
|
|
3109
|
+
}
|
|
1069
3110
|
|
|
1070
|
-
// Phase 1 (sync): Create all directories
|
|
3111
|
+
// Phase 1 (sync): Create all directories, write work.json and context.md files
|
|
1071
3112
|
for (const epic of hierarchy.epics) {
|
|
1072
3113
|
const epicDir = path.join(this.projectPath, epic.id);
|
|
1073
3114
|
if (!fs.existsSync(epicDir)) fs.mkdirSync(epicDir, { recursive: true });
|
|
1074
3115
|
|
|
3116
|
+
// Use LLM-generated context if cached; patch the id line since IDs may have changed after renumbering
|
|
3117
|
+
let epicContextMd = this._epicContextCache.get(epic.name) || this.generateEpicContextMd(epic);
|
|
3118
|
+
epicContextMd = epicContextMd.replace(/^(- id: ).+$/m, `$1${epic.id}`);
|
|
3119
|
+
const epicContextPath = path.join(epicDir, 'context.md');
|
|
3120
|
+
fs.writeFileSync(epicContextPath, epicContextMd, 'utf8');
|
|
3121
|
+
this.debug(`Writing ${epicContextPath} (${epicContextMd.length} bytes)`);
|
|
3122
|
+
|
|
1075
3123
|
const epicWorkJson = {
|
|
1076
3124
|
id: epic.id,
|
|
1077
3125
|
name: epic.name,
|
|
@@ -1092,11 +3140,34 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
1092
3140
|
const workJsonContent = JSON.stringify(epicWorkJson, null, 2);
|
|
1093
3141
|
fs.writeFileSync(workJsonPath, workJsonContent, 'utf8');
|
|
1094
3142
|
this.debug(`Writing ${workJsonPath} (${workJsonContent.length} bytes)`);
|
|
3143
|
+
await this._itemWrittenCallback?.({ itemId: epic.id, itemType: 'epic' });
|
|
3144
|
+
|
|
3145
|
+
// Rename provisional story folders in reverse order to avoid collisions
|
|
3146
|
+
// (e.g., 0001b→0002 must happen after 0002→0003, not before)
|
|
3147
|
+
const storiesToRename = (epic.stories || [])
|
|
3148
|
+
.filter(s => s._provisionalId && s._provisionalId !== s.id)
|
|
3149
|
+
.reverse();
|
|
3150
|
+
for (const story of storiesToRename) {
|
|
3151
|
+
const provisionalStoryDir = path.join(epicDir, story._provisionalId);
|
|
3152
|
+
const targetDir = path.join(epicDir, story.id);
|
|
3153
|
+
if (fs.existsSync(provisionalStoryDir) && !fs.existsSync(targetDir)) {
|
|
3154
|
+
fs.renameSync(provisionalStoryDir, targetDir);
|
|
3155
|
+
this.debug(`Renamed provisional story folder: ${story._provisionalId} → ${story.id}`);
|
|
3156
|
+
}
|
|
3157
|
+
}
|
|
1095
3158
|
|
|
1096
3159
|
for (const story of epic.stories || []) {
|
|
1097
3160
|
const storyDir = path.join(epicDir, story.id);
|
|
1098
3161
|
if (!fs.existsSync(storyDir)) fs.mkdirSync(storyDir, { recursive: true });
|
|
1099
3162
|
|
|
3163
|
+
// Use LLM-generated context if cached; patch id and epic-ref lines after renumbering
|
|
3164
|
+
let storyContextMd = this._storyContextCache.get(`${epic.name}::${story.name}`) || this.generateStoryContextMd(story, epic);
|
|
3165
|
+
storyContextMd = storyContextMd.replace(/^(- id: ).+$/m, `$1${story.id}`);
|
|
3166
|
+
storyContextMd = storyContextMd.replace(/^(- epic: ).+$/m, `$1${epic.id} (${epic.name})`);
|
|
3167
|
+
const storyContextPath = path.join(storyDir, 'context.md');
|
|
3168
|
+
fs.writeFileSync(storyContextPath, storyContextMd, 'utf8');
|
|
3169
|
+
this.debug(`Writing ${storyContextPath} (${storyContextMd.length} bytes)`);
|
|
3170
|
+
|
|
1100
3171
|
const storyWorkJson = {
|
|
1101
3172
|
id: story.id,
|
|
1102
3173
|
name: story.name,
|
|
@@ -1117,69 +3188,77 @@ Return your response as JSON following the exact structure specified in your ins
|
|
|
1117
3188
|
const storyWorkJsonContent = JSON.stringify(storyWorkJson, null, 2);
|
|
1118
3189
|
fs.writeFileSync(storyWorkJsonPath, storyWorkJsonContent, 'utf8');
|
|
1119
3190
|
this.debug(`Writing ${storyWorkJsonPath} (${storyWorkJsonContent.length} bytes)`);
|
|
3191
|
+
await this._itemWrittenCallback?.({ itemId: story.id, itemType: 'story' });
|
|
1120
3192
|
}
|
|
1121
3193
|
}
|
|
1122
3194
|
|
|
1123
|
-
//
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
3195
|
+
// Set dependency-ready items to 'ready' status instead of 'planned'.
|
|
3196
|
+
// Items whose dependencies are all within this run (and thus all "planned") are ready
|
|
3197
|
+
// if those dependencies have no external blockers. Phase 1 epics/stories with no
|
|
3198
|
+
// dependencies are always ready. Others stay 'planned' until their deps are completed.
|
|
3199
|
+
try {
|
|
3200
|
+
const { checkDependenciesReady } = await import('./dependency-checker.js');
|
|
1127
3201
|
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
3202
|
+
// Build lookup of all items in this hierarchy
|
|
3203
|
+
const lookup = {};
|
|
3204
|
+
for (const epic of hierarchy.epics) {
|
|
3205
|
+
lookup[epic.id] = {
|
|
3206
|
+
id: epic.id, name: epic.name, type: 'epic',
|
|
3207
|
+
status: 'planned', dependencies: epic.dependencies || [],
|
|
3208
|
+
};
|
|
3209
|
+
for (const story of epic.stories || []) {
|
|
3210
|
+
lookup[story.id] = {
|
|
3211
|
+
id: story.id, name: story.name, type: 'story',
|
|
3212
|
+
status: 'planned', dependencies: story.dependencies || [],
|
|
3213
|
+
};
|
|
1139
3214
|
}
|
|
3215
|
+
}
|
|
1140
3216
|
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
3217
|
+
// Items with no dependencies (or all deps are outside this hierarchy and assumed done) → ready
|
|
3218
|
+
let readyCount = 0;
|
|
3219
|
+
for (const epic of hierarchy.epics) {
|
|
3220
|
+
const epicDeps = (epic.dependencies || []).filter(d => lookup[d]);
|
|
3221
|
+
if (epicDeps.length === 0) {
|
|
3222
|
+
// Epic has no deps within this hierarchy → ready
|
|
3223
|
+
this._setWorkJsonStatus(path.join(this.projectPath, epic.id, 'work.json'), 'ready');
|
|
3224
|
+
lookup[epic.id].status = 'ready';
|
|
3225
|
+
readyCount++;
|
|
3226
|
+
|
|
3227
|
+
// Its stories with no deps (or deps only on this now-ready epic) → ready
|
|
3228
|
+
for (const story of epic.stories || []) {
|
|
3229
|
+
const storyResult = checkDependenciesReady(story.id, lookup);
|
|
3230
|
+
if (storyResult.ready) {
|
|
3231
|
+
this._setWorkJsonStatus(path.join(this.projectPath, epic.id, story.id, 'work.json'), 'ready');
|
|
3232
|
+
lookup[story.id].status = 'ready';
|
|
3233
|
+
readyCount++;
|
|
1146
3234
|
}
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
// Write all doc.md files for this epic
|
|
1156
|
-
const epicDocPath = path.join(epicDir, 'doc.md');
|
|
1157
|
-
fs.writeFileSync(epicDocPath, epicDocContent, 'utf8');
|
|
1158
|
-
this.debug(`Writing ${epicDocPath} (${epicDocContent.length} bytes)`);
|
|
3235
|
+
}
|
|
3236
|
+
}
|
|
3237
|
+
}
|
|
3238
|
+
this.debug(`Set ${readyCount} items to 'ready' status (dependency-free)`);
|
|
3239
|
+
} catch (err) {
|
|
3240
|
+
this.debug(`Failed to compute ready status (non-critical): ${err.message}`);
|
|
3241
|
+
}
|
|
1159
3242
|
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
fs.writeFileSync(storyDocPath, storyDocs[si], 'utf8');
|
|
1163
|
-
this.debug(`Writing ${storyDocPath} (${storyDocs[si].length} bytes)`);
|
|
1164
|
-
});
|
|
1165
|
-
})
|
|
1166
|
-
);
|
|
3243
|
+
// Phase 2 (doc.md): Handled by generateDocFiles() called after this method.
|
|
3244
|
+
// context.md files written in Phase 1 are the canonical source for doc generation.
|
|
1167
3245
|
|
|
1168
3246
|
const epicCount = hierarchy.epics.length;
|
|
1169
3247
|
const storyCount = hierarchy.epics.reduce((sum, epic) => sum + (epic.stories || []).length, 0);
|
|
1170
3248
|
|
|
1171
3249
|
// Log all files written this run for cross-run comparison
|
|
1172
|
-
this.debugSection('FILES WRITTEN THIS RUN');
|
|
3250
|
+
this.debugSection('FILES WRITTEN THIS RUN (Phase 1 — work.json + context.md)');
|
|
1173
3251
|
const filesWritten = [];
|
|
3252
|
+
filesWritten.push('context.md');
|
|
1174
3253
|
for (const epic of hierarchy.epics) {
|
|
1175
3254
|
filesWritten.push(`${epic.id}/work.json`);
|
|
1176
|
-
filesWritten.push(`${epic.id}/
|
|
3255
|
+
filesWritten.push(`${epic.id}/context.md`);
|
|
1177
3256
|
for (const story of epic.stories || []) {
|
|
1178
3257
|
filesWritten.push(`${epic.id}/${story.id}/work.json`);
|
|
1179
|
-
filesWritten.push(`${epic.id}/${story.id}/
|
|
3258
|
+
filesWritten.push(`${epic.id}/${story.id}/context.md`);
|
|
1180
3259
|
}
|
|
1181
3260
|
}
|
|
1182
|
-
this.debug('Files written this run', filesWritten);
|
|
3261
|
+
this.debug('Files written this run (Phase 1)', filesWritten);
|
|
1183
3262
|
this.debug(`Total files written: ${filesWritten.length} (${epicCount} epics x 2 + ${storyCount} stories x 2)`);
|
|
1184
3263
|
|
|
1185
3264
|
// Display clean summary of created epics and stories
|
|
@@ -1500,8 +3579,84 @@ Extract and synthesize content from the parent document that is specifically rel
|
|
|
1500
3579
|
return snapshot;
|
|
1501
3580
|
}
|
|
1502
3581
|
|
|
3582
|
+
/**
|
|
3583
|
+
* Rebuild the hierarchy object from work.json files on disk.
|
|
3584
|
+
* Used by resume mode to skip stages 1-6 and jump straight to doc/enrichment stages.
|
|
3585
|
+
* @returns {{ hierarchy: object, epicCount: number, storyCount: number }}
|
|
3586
|
+
*/
|
|
3587
|
+
_rebuildHierarchyFromDisk() {
|
|
3588
|
+
this.debugStage('R', 'Rebuild Hierarchy From Disk (Resume Mode)');
|
|
3589
|
+
|
|
3590
|
+
const epics = [];
|
|
3591
|
+
if (!fs.existsSync(this.projectPath)) {
|
|
3592
|
+
throw new Error('No project directory found — cannot resume');
|
|
3593
|
+
}
|
|
3594
|
+
|
|
3595
|
+
const dirs = fs.readdirSync(this.projectPath).filter(d =>
|
|
3596
|
+
d.startsWith('context-') && fs.existsSync(path.join(this.projectPath, d, 'work.json'))
|
|
3597
|
+
).sort();
|
|
3598
|
+
|
|
3599
|
+
let storyCount = 0;
|
|
3600
|
+
for (const dir of dirs) {
|
|
3601
|
+
const epicWorkPath = path.join(this.projectPath, dir, 'work.json');
|
|
3602
|
+
const epicWork = JSON.parse(fs.readFileSync(epicWorkPath, 'utf8'));
|
|
3603
|
+
|
|
3604
|
+
const epicEntry = {
|
|
3605
|
+
id: epicWork.id,
|
|
3606
|
+
name: epicWork.name,
|
|
3607
|
+
description: epicWork.description || '',
|
|
3608
|
+
domain: epicWork.domain || '',
|
|
3609
|
+
acceptanceCriteria: epicWork.acceptanceCriteria || [],
|
|
3610
|
+
dependencies: epicWork.dependencies || [],
|
|
3611
|
+
metadata: epicWork.metadata || {},
|
|
3612
|
+
stories: [],
|
|
3613
|
+
};
|
|
3614
|
+
|
|
3615
|
+
// Read cached context.md for this epic (used by doc generation)
|
|
3616
|
+
const epicCtxPath = path.join(this.projectPath, dir, 'context.md');
|
|
3617
|
+
if (fs.existsSync(epicCtxPath)) {
|
|
3618
|
+
this._epicContextCache.set(epicWork.name, fs.readFileSync(epicCtxPath, 'utf8'));
|
|
3619
|
+
}
|
|
3620
|
+
|
|
3621
|
+
// Scan story subdirectories
|
|
3622
|
+
const epicDir = path.join(this.projectPath, dir);
|
|
3623
|
+
const storyDirs = fs.readdirSync(epicDir).filter(sd => {
|
|
3624
|
+
const sdPath = path.join(epicDir, sd);
|
|
3625
|
+
return fs.statSync(sdPath).isDirectory() && fs.existsSync(path.join(sdPath, 'work.json'));
|
|
3626
|
+
}).sort();
|
|
3627
|
+
|
|
3628
|
+
for (const sd of storyDirs) {
|
|
3629
|
+
const storyWorkPath = path.join(epicDir, sd, 'work.json');
|
|
3630
|
+
const storyWork = JSON.parse(fs.readFileSync(storyWorkPath, 'utf8'));
|
|
3631
|
+
|
|
3632
|
+
epicEntry.stories.push({
|
|
3633
|
+
id: storyWork.id,
|
|
3634
|
+
name: storyWork.name,
|
|
3635
|
+
description: storyWork.description || '',
|
|
3636
|
+
acceptanceCriteria: storyWork.acceptanceCriteria || [],
|
|
3637
|
+
dependencies: storyWork.dependencies || [],
|
|
3638
|
+
metadata: storyWork.metadata || {},
|
|
3639
|
+
});
|
|
3640
|
+
|
|
3641
|
+
// Read cached story context.md
|
|
3642
|
+
const storyCtxPath = path.join(epicDir, sd, 'context.md');
|
|
3643
|
+
if (fs.existsSync(storyCtxPath)) {
|
|
3644
|
+
this._storyContextCache.set(`${epicWork.name}::${storyWork.name}`, fs.readFileSync(storyCtxPath, 'utf8'));
|
|
3645
|
+
}
|
|
3646
|
+
|
|
3647
|
+
storyCount++;
|
|
3648
|
+
}
|
|
3649
|
+
|
|
3650
|
+
epics.push(epicEntry);
|
|
3651
|
+
}
|
|
3652
|
+
|
|
3653
|
+
const hierarchy = { epics, validation: null };
|
|
3654
|
+
this.debug(`Rebuilt hierarchy from disk: ${epics.length} epics, ${storyCount} stories`);
|
|
3655
|
+
return { hierarchy, epicCount: epics.length, storyCount };
|
|
3656
|
+
}
|
|
3657
|
+
|
|
1503
3658
|
// Main execution method
|
|
1504
|
-
async execute(progressCallback = null) {
|
|
3659
|
+
async execute(progressCallback = null, { resumeFrom = null } = {}) {
|
|
1505
3660
|
// Cost threshold protection — wrap callback to check running cost before each progress call
|
|
1506
3661
|
if (this._costThreshold != null && progressCallback) {
|
|
1507
3662
|
const _origCallback = progressCallback;
|
|
@@ -1527,6 +3682,13 @@ Extract and synthesize content from the parent document that is specifically rel
|
|
|
1527
3682
|
// Start execution tracking
|
|
1528
3683
|
const executionId = history.startExecution('sprint-planning', 'decomposition');
|
|
1529
3684
|
|
|
3685
|
+
// Stage checkpoint helper — updates ceremony history with current stage.
|
|
3686
|
+
// Non-fatal: never throws.
|
|
3687
|
+
const checkpoint = (stage) => {
|
|
3688
|
+
try { history.updateExecution('sprint-planning', executionId, { stage, lastCheckpoint: localISO() }); }
|
|
3689
|
+
catch {}
|
|
3690
|
+
};
|
|
3691
|
+
|
|
1530
3692
|
try {
|
|
1531
3693
|
// Log ceremony execution metadata
|
|
1532
3694
|
const runId = Date.now();
|
|
@@ -1547,10 +3709,86 @@ Extract and synthesize content from the parent document that is specifically rel
|
|
|
1547
3709
|
});
|
|
1548
3710
|
|
|
1549
3711
|
const header = getCeremonyHeader('sprint-planning');
|
|
1550
|
-
sendCeremonyHeader(header.title
|
|
3712
|
+
sendCeremonyHeader(header.title);
|
|
1551
3713
|
|
|
1552
3714
|
const _t0run = Date.now();
|
|
1553
3715
|
|
|
3716
|
+
// ── Resume fast-path ─────────────────────────────────────────────
|
|
3717
|
+
// When resumeFrom is set, skip stages 1-6 and jump directly to the
|
|
3718
|
+
// doc-generation / enrichment stages using hierarchy rebuilt from disk.
|
|
3719
|
+
if (resumeFrom) {
|
|
3720
|
+
this.debug(`RESUME MODE — resuming from checkpoint: ${resumeFrom}`);
|
|
3721
|
+
await progressCallback?.(`Resuming from ${resumeFrom}…`);
|
|
3722
|
+
|
|
3723
|
+
const { hierarchy, epicCount, storyCount } = this._rebuildHierarchyFromDisk();
|
|
3724
|
+
|
|
3725
|
+
let rootDocContent = '';
|
|
3726
|
+
if (fs.existsSync(this.projectDocPath)) {
|
|
3727
|
+
rootDocContent = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
3728
|
+
}
|
|
3729
|
+
|
|
3730
|
+
if (resumeFrom === 'files-written') {
|
|
3731
|
+
// Run stages 7, 8, 9
|
|
3732
|
+
sendProgress('Generating documentation from canonical context...');
|
|
3733
|
+
await progressCallback?.(`Stage 7/8: Generating documentation (${epicCount} epics, ${storyCount} stories)…`);
|
|
3734
|
+
await this.generateDocFiles(hierarchy, rootDocContent, progressCallback);
|
|
3735
|
+
checkpoint('docs-generated');
|
|
3736
|
+
|
|
3737
|
+
sendProgress('Enriching story documentation with implementation detail...');
|
|
3738
|
+
await progressCallback?.(`Stage 8/8: Enriching story documentation (${storyCount} stories)…`);
|
|
3739
|
+
await this.enrichStoryDocs(hierarchy, progressCallback);
|
|
3740
|
+
checkpoint('enrichment-complete');
|
|
3741
|
+
} else if (resumeFrom === 'docs-generated') {
|
|
3742
|
+
// Run stages 8, 9
|
|
3743
|
+
sendProgress('Enriching story documentation with implementation detail...');
|
|
3744
|
+
await progressCallback?.(`Stage 8/8: Enriching story documentation (${storyCount} stories)…`);
|
|
3745
|
+
await this.enrichStoryDocs(hierarchy, progressCallback);
|
|
3746
|
+
checkpoint('enrichment-complete');
|
|
3747
|
+
}
|
|
3748
|
+
// 'enrichment-complete' → only summary (stage 9)
|
|
3749
|
+
|
|
3750
|
+
// Stage 9: Summary
|
|
3751
|
+
const { totalEpics, totalStories } = this.countTotalHierarchy();
|
|
3752
|
+
const returnResult = {
|
|
3753
|
+
epicsCreated: epicCount,
|
|
3754
|
+
storiesCreated: storyCount,
|
|
3755
|
+
totalEpics,
|
|
3756
|
+
totalStories,
|
|
3757
|
+
tokenUsage: { input: 0, output: 0, total: 0 },
|
|
3758
|
+
model: this._modelName,
|
|
3759
|
+
provider: this._providerName,
|
|
3760
|
+
validationIssues: [],
|
|
3761
|
+
resumed: true,
|
|
3762
|
+
resumedFrom: resumeFrom,
|
|
3763
|
+
};
|
|
3764
|
+
|
|
3765
|
+
try {
|
|
3766
|
+
const aggregated = this._aggregateAllTokenUsage();
|
|
3767
|
+
if (aggregated.totalCalls > 0 || aggregated.inputTokens > 0) {
|
|
3768
|
+
this.tokenTracker.finalizeRun(this.ceremonyName);
|
|
3769
|
+
returnResult.tokenUsage = {
|
|
3770
|
+
input: aggregated.inputTokens || 0,
|
|
3771
|
+
output: aggregated.outputTokens || 0,
|
|
3772
|
+
total: aggregated.totalTokens || 0,
|
|
3773
|
+
};
|
|
3774
|
+
}
|
|
3775
|
+
} catch {}
|
|
3776
|
+
|
|
3777
|
+
try {
|
|
3778
|
+
history.completeExecution('sprint-planning', executionId, 'success', {
|
|
3779
|
+
stage: 'completed',
|
|
3780
|
+
resumed: true,
|
|
3781
|
+
resumedFrom: resumeFrom,
|
|
3782
|
+
metrics: { epicsCreated: epicCount, storiesCreated: storyCount, totalEpics, totalStories }
|
|
3783
|
+
});
|
|
3784
|
+
} catch {}
|
|
3785
|
+
|
|
3786
|
+
sendOutput(`Resume complete — ${totalEpics} Epics, ${totalStories} Stories.`);
|
|
3787
|
+
this.debugTiming('TOTAL resume end-to-end', _t0run);
|
|
3788
|
+
return returnResult;
|
|
3789
|
+
}
|
|
3790
|
+
// ── End resume fast-path ─────────────────────────────────────────
|
|
3791
|
+
|
|
1554
3792
|
// Stage 1: Validate
|
|
1555
3793
|
sendProgress('Validating prerequisites...');
|
|
1556
3794
|
await progressCallback?.('Stage 1/6: Validating prerequisites…');
|
|
@@ -1588,7 +3826,27 @@ Extract and synthesize content from the parent document that is specifically rel
|
|
|
1588
3826
|
await progressCallback?.('Stage 4/6: Decomposing scope into Epics and Stories…');
|
|
1589
3827
|
_ts = Date.now();
|
|
1590
3828
|
let hierarchy = await this.decomposeIntoEpicsStories(scope, existingEpics, existingStories, progressCallback);
|
|
3829
|
+
|
|
3830
|
+
// Quality gate: retry decomposition if structural issues detected
|
|
3831
|
+
const maxDecompRetries = 3;
|
|
3832
|
+
for (let retry = 1; retry <= maxDecompRetries; retry++) {
|
|
3833
|
+
const issues = this._checkDecompositionQuality(hierarchy);
|
|
3834
|
+
if (issues.length === 0) break;
|
|
3835
|
+
|
|
3836
|
+
if (retry === maxDecompRetries) {
|
|
3837
|
+
this.debug(`Decomposition quality issues persist after ${maxDecompRetries} retries — proceeding with warnings`, { issues });
|
|
3838
|
+
break;
|
|
3839
|
+
}
|
|
3840
|
+
|
|
3841
|
+
this.debug(`Decomposition quality gate failed (attempt ${retry}/${maxDecompRetries})`, { issues });
|
|
3842
|
+
await progressCallback?.(`Decomposition has ${issues.length} issue(s) — retrying (${retry}/${maxDecompRetries})…`);
|
|
3843
|
+
|
|
3844
|
+
// Retry with violation feedback
|
|
3845
|
+
hierarchy = await this.decomposeIntoEpicsStories(scope, existingEpics, existingStories, progressCallback, issues);
|
|
3846
|
+
}
|
|
3847
|
+
|
|
1591
3848
|
this.debugTiming('Stage 4 — decomposeIntoEpicsStories', _ts);
|
|
3849
|
+
checkpoint('decomposition-complete');
|
|
1592
3850
|
|
|
1593
3851
|
// Log raw LLM output before any validation/modification
|
|
1594
3852
|
this.debugSection('POST-DECOMPOSE: Raw LLM Output (before validation)');
|
|
@@ -1599,6 +3857,40 @@ Extract and synthesize content from the parent document that is specifically rel
|
|
|
1599
3857
|
})));
|
|
1600
3858
|
this.debug('LLM validation field', hierarchy.validation || null);
|
|
1601
3859
|
|
|
3860
|
+
// NOTE: Scaffolding epic is generated AFTER context generation (in validateHierarchy)
|
|
3861
|
+
// so it can read tech requirements from all domain epic/story contexts.
|
|
3862
|
+
// Dependency injection happens inside _generateScaffoldingEpic().
|
|
3863
|
+
|
|
3864
|
+
// Stage 4.1: LLM-based duplicate detection
|
|
3865
|
+
sendProgress('Detecting duplicate epics and stories...');
|
|
3866
|
+
await progressCallback?.('Stage 4.1/8: Detecting duplicates…');
|
|
3867
|
+
_ts = Date.now();
|
|
3868
|
+
hierarchy = await this.deduplicateEpicsLLM(hierarchy, preRunSnapshot, progressCallback);
|
|
3869
|
+
this.debugTiming('Stage 4.1 — deduplicateEpicsLLM', _ts);
|
|
3870
|
+
|
|
3871
|
+
// Log post-dedup snapshot
|
|
3872
|
+
this.debugSection('POST-DEDUP: Hierarchy after duplicate detection');
|
|
3873
|
+
this.debugHierarchySnapshot('POST-DEDUP', hierarchy.epics.map(e => ({
|
|
3874
|
+
id: e.id || '(no-id)',
|
|
3875
|
+
name: e.name,
|
|
3876
|
+
stories: (e.stories || []).map(s => ({ id: s.id || '(no-id)', name: s.name }))
|
|
3877
|
+
})));
|
|
3878
|
+
|
|
3879
|
+
// Stage 4.2: Review and split wide stories
|
|
3880
|
+
sendProgress('Reviewing story scopes for splits...');
|
|
3881
|
+
await progressCallback?.('Stage 4.2/8: Reviewing story scopes for required splits…');
|
|
3882
|
+
_ts = Date.now();
|
|
3883
|
+
hierarchy = await this.reviewAndSplitStories(hierarchy, progressCallback);
|
|
3884
|
+
this.debugTiming('Stage 4.2 — reviewAndSplitStories', _ts);
|
|
3885
|
+
|
|
3886
|
+
// Log post-split snapshot
|
|
3887
|
+
this.debugSection('POST-SPLIT: Hierarchy after story scope review');
|
|
3888
|
+
this.debugHierarchySnapshot('POST-SPLIT', hierarchy.epics.map(e => ({
|
|
3889
|
+
id: e.id || '(no-id)',
|
|
3890
|
+
name: e.name,
|
|
3891
|
+
stories: (e.stories || []).map(s => ({ id: s.id || '(no-id)', name: s.name }))
|
|
3892
|
+
})));
|
|
3893
|
+
|
|
1602
3894
|
// Stage 4.5: User selection gate (Kanban UI only; null = run straight through)
|
|
1603
3895
|
if (this._selectionCallback) {
|
|
1604
3896
|
await progressCallback?.('Stage 4.5/6: Waiting for epic/story selection…');
|
|
@@ -1624,6 +3916,7 @@ Extract and synthesize content from the parent document that is specifically rel
|
|
|
1624
3916
|
await progressCallback?.(`Stage 5/6: Validating with domain experts (${epicCount5} epics, ${storyCount5} stories)…`);
|
|
1625
3917
|
_ts = Date.now();
|
|
1626
3918
|
hierarchy = await this.validateHierarchy(hierarchy, progressCallback, scope);
|
|
3919
|
+
checkpoint('validation-complete');
|
|
1627
3920
|
this.debugTiming(`Stage 5 — validateHierarchy (${epicCount5} epics, ${storyCount5} stories)`, _ts);
|
|
1628
3921
|
|
|
1629
3922
|
// Log hierarchy after validation (may have been modified)
|
|
@@ -1634,8 +3927,14 @@ Extract and synthesize content from the parent document that is specifically rel
|
|
|
1634
3927
|
stories: (e.stories || []).map(s => ({ id: s.id || '(no-id)', name: s.name }))
|
|
1635
3928
|
})));
|
|
1636
3929
|
|
|
1637
|
-
//
|
|
1638
|
-
|
|
3930
|
+
// Snapshot provisional IDs before renumbering so writeHierarchyFiles can rename
|
|
3931
|
+
// any provisional folders that were written early during context generation.
|
|
3932
|
+
for (const epic of hierarchy.epics) {
|
|
3933
|
+
epic._provisionalId = epic.id;
|
|
3934
|
+
for (const story of epic.stories || []) {
|
|
3935
|
+
story._provisionalId = story.id;
|
|
3936
|
+
}
|
|
3937
|
+
}
|
|
1639
3938
|
|
|
1640
3939
|
// Renumber IDs
|
|
1641
3940
|
hierarchy = this.renumberHierarchy(hierarchy, maxEpicNum, maxStoryNums);
|
|
@@ -1644,126 +3943,176 @@ Extract and synthesize content from the parent document that is specifically rel
|
|
|
1644
3943
|
process.stdout.write('\x1bc');
|
|
1645
3944
|
outputBuffer.clear();
|
|
1646
3945
|
|
|
1647
|
-
//
|
|
1648
|
-
|
|
1649
|
-
|
|
3946
|
+
// Compute coding order from dependency graph and stamp phase/order onto hierarchy
|
|
3947
|
+
// (before writeHierarchyFiles so work.json includes codingPhase and codingOrder)
|
|
3948
|
+
try {
|
|
3949
|
+
_ts = Date.now();
|
|
3950
|
+
const codingOrder = computeCodingOrder(hierarchy);
|
|
3951
|
+
|
|
3952
|
+
// Stamp phase and order onto each epic and story in the hierarchy
|
|
3953
|
+
const epicPhaseMap = new Map(); // epicId → phase number (1-based)
|
|
3954
|
+
for (const phase of codingOrder.phases) {
|
|
3955
|
+
for (const epicId of phase.epicIds) {
|
|
3956
|
+
epicPhaseMap.set(epicId, phase.phase + 1);
|
|
3957
|
+
}
|
|
3958
|
+
}
|
|
3959
|
+
let globalOrder = 0;
|
|
3960
|
+
for (const phase of codingOrder.json.phases) {
|
|
3961
|
+
for (const epicEntry of phase.epics) {
|
|
3962
|
+
const epic = hierarchy.epics.find(e => e.id === epicEntry.id);
|
|
3963
|
+
if (epic) {
|
|
3964
|
+
globalOrder++;
|
|
3965
|
+
epic.metadata = { ...(epic.metadata || {}), codingPhase: phase.phase, codingOrder: globalOrder };
|
|
3966
|
+
}
|
|
3967
|
+
for (const storyEntry of epicEntry.stories) {
|
|
3968
|
+
const epic2 = hierarchy.epics.find(e => e.id === epicEntry.id);
|
|
3969
|
+
const story = (epic2?.stories || []).find(s => s.id === storyEntry.id);
|
|
3970
|
+
if (story) {
|
|
3971
|
+
globalOrder++;
|
|
3972
|
+
story.metadata = { ...(story.metadata || {}), codingPhase: phase.phase, codingOrder: globalOrder };
|
|
3973
|
+
}
|
|
3974
|
+
}
|
|
3975
|
+
}
|
|
3976
|
+
}
|
|
3977
|
+
|
|
3978
|
+
// Write coding-order files
|
|
3979
|
+
const codingOrderMdPath = path.join(this.projectPath, 'coding-order.md');
|
|
3980
|
+
const codingOrderJsonPath = path.join(this.projectPath, 'coding-order.json');
|
|
3981
|
+
fs.writeFileSync(codingOrderMdPath, codingOrder.md, 'utf8');
|
|
3982
|
+
fs.writeFileSync(codingOrderJsonPath, JSON.stringify(codingOrder.json, null, 2), 'utf8');
|
|
3983
|
+
this.debug(`Coding order generated: ${codingOrder.phases.length} phases, critical path length ${codingOrder.criticalPath.length}`);
|
|
3984
|
+
this.debugTiming('Stage 6 — computeCodingOrder', _ts);
|
|
3985
|
+
} catch (err) {
|
|
3986
|
+
this.debug(`Coding order generation failed (non-critical): ${err.message}`);
|
|
3987
|
+
}
|
|
3988
|
+
|
|
3989
|
+
// Stage 6: Write hierarchy files (work.json + context.md — no LLM)
|
|
3990
|
+
sendProgress('Writing files and canonical context...');
|
|
3991
|
+
await progressCallback?.(`Stage 6/8: Writing files and canonical context (${epicCount5} epics, ${storyCount5} stories)…`);
|
|
1650
3992
|
_ts = Date.now();
|
|
1651
3993
|
const { epicCount, storyCount } = await this.writeHierarchyFiles(hierarchy, progressCallback);
|
|
3994
|
+
checkpoint('files-written');
|
|
1652
3995
|
this.debugTiming(`Stage 6 — writeHierarchyFiles (${epicCount5} epics, ${storyCount5} stories)`, _ts);
|
|
1653
3996
|
|
|
1654
|
-
//
|
|
3997
|
+
// Notify listeners (e.g. Kanban board) that work.json files are now on disk.
|
|
3998
|
+
// This fires BEFORE Stage 7/8 so the board can list new epics/stories immediately.
|
|
3999
|
+
await this._hierarchyWrittenCallback?.({ epicCount, storyCount });
|
|
4000
|
+
|
|
4001
|
+
// Stage 7: Generate narrative doc.md from canonical context.md (replaces doc-distribution)
|
|
4002
|
+
sendProgress('Generating documentation from canonical context...');
|
|
4003
|
+
await progressCallback?.(`Stage 7/8: Generating documentation (${epicCount5} epics, ${storyCount5} stories)…`);
|
|
4004
|
+
_ts = Date.now();
|
|
4005
|
+
let rootDocContent = '';
|
|
4006
|
+
if (fs.existsSync(this.projectDocPath)) {
|
|
4007
|
+
rootDocContent = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
4008
|
+
}
|
|
4009
|
+
await this.generateDocFiles(hierarchy, rootDocContent, progressCallback);
|
|
4010
|
+
checkpoint('docs-generated');
|
|
4011
|
+
this.debugTiming(`Stage 7 — generateDocFiles (${epicCount5} epics, ${storyCount5} stories)`, _ts);
|
|
4012
|
+
|
|
4013
|
+
// Stage 8: Enrich story docs with implementation detail
|
|
1655
4014
|
sendProgress('Enriching story documentation with implementation detail...');
|
|
1656
|
-
await progressCallback?.(`Stage
|
|
4015
|
+
await progressCallback?.(`Stage 8/8: Enriching story documentation (${storyCount5} stories)…`);
|
|
1657
4016
|
_ts = Date.now();
|
|
1658
4017
|
await this.enrichStoryDocs(hierarchy, progressCallback);
|
|
1659
|
-
|
|
4018
|
+
checkpoint('enrichment-complete');
|
|
4019
|
+
this.debugTiming(`Stage 8 — enrichStoryDocs (${storyCount5} stories)`, _ts);
|
|
1660
4020
|
|
|
1661
4021
|
// Stage 9: Summary & Cleanup
|
|
1662
4022
|
this.debugStage(9, 'Summary & Cleanup');
|
|
1663
4023
|
|
|
1664
4024
|
const { totalEpics, totalStories } = this.countTotalHierarchy();
|
|
1665
4025
|
|
|
1666
|
-
//
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
this.debugTiming('TOTAL run() end-to-end', _t0run);
|
|
1671
|
-
sendOutput(`Created ${epicCount} Epics, ${storyCount} Stories. Total: ${totalEpics} Epics, ${totalStories} Stories.`);
|
|
1672
|
-
|
|
1673
|
-
// Track token usage — aggregate across all provider instances
|
|
1674
|
-
const aggregated = this._aggregateAllTokenUsage();
|
|
4026
|
+
// ====================================================================
|
|
4027
|
+
// Stage 9: Summary & Cleanup (non-fatal — all real work is done)
|
|
4028
|
+
// Errors here must not crash the ceremony since files are already on disk.
|
|
4029
|
+
// ====================================================================
|
|
1675
4030
|
let tokenUsageSummary = null;
|
|
1676
|
-
if (aggregated.totalCalls > 0 || aggregated.inputTokens > 0) {
|
|
1677
|
-
tokenUsageSummary = aggregated;
|
|
1678
|
-
this.debug('Token usage (all providers)', tokenUsageSummary);
|
|
1679
|
-
|
|
1680
|
-
this.tokenTracker.finalizeRun(this.ceremonyName);
|
|
1681
|
-
this.debug('Token tracking finalized in .avc/token-history.json');
|
|
1682
|
-
}
|
|
1683
|
-
|
|
1684
|
-
sendOutput('Run /seed <story-id> to decompose a Story into Tasks.');
|
|
1685
|
-
|
|
1686
|
-
// Log ceremony execution end with full comparison summary
|
|
1687
|
-
const runDuration = Date.now() - runId;
|
|
1688
|
-
this.debug('\n' + '='.repeat(80));
|
|
1689
|
-
this.debug('SPRINT PLANNING CEREMONY - EXECUTION END');
|
|
1690
|
-
this.debug('='.repeat(80));
|
|
1691
|
-
this.debug('Run ID:', runId);
|
|
1692
|
-
this.debug('Started:', runTimestamp);
|
|
1693
|
-
this.debug('Ended:', localISO());
|
|
1694
|
-
this.debug('Duration:', `${Math.round(runDuration / 1000)} seconds`);
|
|
1695
|
-
|
|
1696
|
-
this.debugSection('RUN COMPARISON SUMMARY (compare this block across runs)');
|
|
1697
|
-
this.debug('PRE-RUN state', {
|
|
1698
|
-
epics: existingEpics.size,
|
|
1699
|
-
stories: existingStories.size,
|
|
1700
|
-
epicNames: Array.from(existingEpics.keys())
|
|
1701
|
-
});
|
|
1702
|
-
this.debug('THIS RUN added', {
|
|
1703
|
-
epics: epicCount,
|
|
1704
|
-
stories: storyCount,
|
|
1705
|
-
epicNames: hierarchy.epics.map(e => e.name),
|
|
1706
|
-
storyNames: hierarchy.epics.flatMap(e => (e.stories || []).map(s => s.name))
|
|
1707
|
-
});
|
|
1708
|
-
this.debug('POST-RUN state', {
|
|
1709
|
-
epics: totalEpics,
|
|
1710
|
-
stories: totalStories
|
|
1711
|
-
});
|
|
1712
|
-
this.debug('Duplicate detection results', {
|
|
1713
|
-
epicsSkippedAsDuplicates: duplicateAnalysis.skippedEpics.length,
|
|
1714
|
-
storiesSkippedAsDuplicates: duplicateAnalysis.skippedStories.length,
|
|
1715
|
-
skippedEpicNames: duplicateAnalysis.skippedEpics.map(s => s.name),
|
|
1716
|
-
skippedStoryNames: duplicateAnalysis.skippedStories.map(s => s.name)
|
|
1717
|
-
});
|
|
1718
|
-
if (tokenUsageSummary) {
|
|
1719
|
-
this.debug('Token usage this run', tokenUsageSummary);
|
|
1720
|
-
}
|
|
1721
|
-
this.debug('='.repeat(80) + '\n');
|
|
1722
|
-
|
|
1723
|
-
// Build return result for kanban integration
|
|
1724
4031
|
const returnResult = {
|
|
1725
4032
|
epicsCreated: epicCount,
|
|
1726
4033
|
storiesCreated: storyCount,
|
|
1727
4034
|
totalEpics,
|
|
1728
4035
|
totalStories,
|
|
1729
|
-
tokenUsage: {
|
|
1730
|
-
input: tokenUsageSummary?.inputTokens || 0,
|
|
1731
|
-
output: tokenUsageSummary?.outputTokens || 0,
|
|
1732
|
-
total: tokenUsageSummary?.totalTokens || 0,
|
|
1733
|
-
},
|
|
4036
|
+
tokenUsage: { input: 0, output: 0, total: 0 },
|
|
1734
4037
|
model: this._modelName,
|
|
1735
4038
|
provider: this._providerName,
|
|
1736
4039
|
validationIssues: [],
|
|
1737
4040
|
};
|
|
1738
4041
|
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
|
|
4042
|
+
try {
|
|
4043
|
+
// Post-run snapshot
|
|
4044
|
+
const postRunSnapshot = this.readPostRunSnapshot();
|
|
4045
|
+
this.debugHierarchySnapshot('POST-RUN', postRunSnapshot);
|
|
4046
|
+
this.debugTiming('TOTAL run() end-to-end', _t0run);
|
|
4047
|
+
sendOutput(`Created ${epicCount} Epics, ${storyCount} Stories. Total: ${totalEpics} Epics, ${totalStories} Stories.`);
|
|
4048
|
+
|
|
4049
|
+
// Token usage
|
|
4050
|
+
const aggregated = this._aggregateAllTokenUsage();
|
|
4051
|
+
if (aggregated.totalCalls > 0 || aggregated.inputTokens > 0) {
|
|
4052
|
+
tokenUsageSummary = aggregated;
|
|
4053
|
+
this.debug('Token usage (all providers)', tokenUsageSummary);
|
|
4054
|
+
this.tokenTracker.finalizeRun(this.ceremonyName);
|
|
4055
|
+
returnResult.tokenUsage = {
|
|
4056
|
+
input: aggregated.inputTokens || 0,
|
|
4057
|
+
output: aggregated.outputTokens || 0,
|
|
4058
|
+
total: aggregated.totalTokens || 0,
|
|
4059
|
+
};
|
|
1747
4060
|
}
|
|
4061
|
+
|
|
4062
|
+
sendOutput('Run /seed <story-id> to decompose a Story into Tasks.');
|
|
4063
|
+
|
|
4064
|
+
// Comparison summary log
|
|
4065
|
+
const runDuration = Date.now() - runId;
|
|
4066
|
+
this.debug('\n' + '='.repeat(80));
|
|
4067
|
+
this.debug('SPRINT PLANNING CEREMONY - EXECUTION END');
|
|
4068
|
+
this.debug('='.repeat(80));
|
|
4069
|
+
this.debug('Run ID:', runId);
|
|
4070
|
+
this.debug('Duration:', `${Math.round(runDuration / 1000)} seconds`);
|
|
4071
|
+
|
|
4072
|
+
const duplicateAnalysis = this._lastDuplicateAnalysis || { skippedEpics: [], skippedStories: [] };
|
|
4073
|
+
this.debug('Duplicate detection', {
|
|
4074
|
+
epicsSkipped: duplicateAnalysis.skippedEpics.length,
|
|
4075
|
+
storiesSkipped: duplicateAnalysis.skippedStories.length,
|
|
4076
|
+
});
|
|
4077
|
+
if (tokenUsageSummary) this.debug('Token usage this run', tokenUsageSummary);
|
|
4078
|
+
this.debug('='.repeat(80) + '\n');
|
|
4079
|
+
} catch (summaryErr) {
|
|
4080
|
+
// Summary is non-fatal — log and continue
|
|
4081
|
+
this.debug('Summary logging failed (non-fatal, all files are on disk)', { error: summaryErr.message });
|
|
1748
4082
|
}
|
|
1749
4083
|
|
|
1750
|
-
history
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1755
|
-
|
|
1756
|
-
|
|
1757
|
-
|
|
1758
|
-
|
|
1759
|
-
|
|
1760
|
-
|
|
1761
|
-
|
|
1762
|
-
|
|
1763
|
-
totalEpics: totalEpics,
|
|
1764
|
-
totalStories: totalStories
|
|
4084
|
+
// Complete ceremony history (also non-fatal)
|
|
4085
|
+
try {
|
|
4086
|
+
const filesGenerated = [];
|
|
4087
|
+
filesGenerated.push(path.join(this.projectPath, 'context.md'));
|
|
4088
|
+
for (const epic of hierarchy.epics) {
|
|
4089
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, 'work.json'));
|
|
4090
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, 'context.md'));
|
|
4091
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, 'doc.md'));
|
|
4092
|
+
for (const story of epic.stories || []) {
|
|
4093
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, story.id, 'work.json'));
|
|
4094
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, story.id, 'context.md'));
|
|
4095
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, story.id, 'doc.md'));
|
|
4096
|
+
}
|
|
1765
4097
|
}
|
|
1766
|
-
|
|
4098
|
+
history.completeExecution('sprint-planning', executionId, 'success', {
|
|
4099
|
+
filesGenerated,
|
|
4100
|
+
tokenUsage: tokenUsageSummary ? {
|
|
4101
|
+
input: tokenUsageSummary.inputTokens,
|
|
4102
|
+
output: tokenUsageSummary.outputTokens,
|
|
4103
|
+
total: tokenUsageSummary.totalTokens
|
|
4104
|
+
} : null,
|
|
4105
|
+
model: this._modelName,
|
|
4106
|
+
provider: this._providerName,
|
|
4107
|
+
stage: 'completed',
|
|
4108
|
+
metrics: {
|
|
4109
|
+
epicsCreated: epicCount, storiesCreated: storyCount,
|
|
4110
|
+
totalEpics, totalStories
|
|
4111
|
+
}
|
|
4112
|
+
});
|
|
4113
|
+
} catch (historyErr) {
|
|
4114
|
+
this.debug('Ceremony history update failed (non-fatal)', { error: historyErr.message });
|
|
4115
|
+
}
|
|
1767
4116
|
|
|
1768
4117
|
return returnResult;
|
|
1769
4118
|
} catch (error) {
|