@agile-vibe-coding/avc 0.1.1 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli/agent-loader.js +21 -0
- package/cli/agents/agent-selector.md +152 -0
- package/cli/agents/architecture-recommender.md +418 -0
- package/cli/agents/code-implementer.md +117 -0
- package/cli/agents/code-validator.md +80 -0
- package/cli/agents/context-reviewer-epic.md +101 -0
- package/cli/agents/context-reviewer-story.md +92 -0
- package/cli/agents/context-writer-epic.md +145 -0
- package/cli/agents/context-writer-story.md +111 -0
- package/cli/agents/database-deep-dive.md +470 -0
- package/cli/agents/database-recommender.md +634 -0
- package/cli/agents/doc-distributor.md +176 -0
- package/cli/agents/doc-writer-epic.md +42 -0
- package/cli/agents/doc-writer-story.md +43 -0
- package/cli/agents/documentation-updater.md +203 -0
- package/cli/agents/duplicate-detector.md +110 -0
- package/cli/agents/epic-story-decomposer.md +559 -0
- package/cli/agents/feature-context-generator.md +91 -0
- package/cli/agents/gap-checker-epic.md +52 -0
- package/cli/agents/impact-checker-story.md +51 -0
- package/cli/agents/migration-guide-generator.md +305 -0
- package/cli/agents/mission-scope-generator.md +143 -0
- package/cli/agents/mission-scope-validator.md +146 -0
- package/cli/agents/project-context-extractor.md +122 -0
- package/cli/agents/project-documentation-creator.json +226 -0
- package/cli/agents/project-documentation-creator.md +595 -0
- package/cli/agents/question-prefiller.md +269 -0
- package/cli/agents/refiner-epic.md +39 -0
- package/cli/agents/refiner-story.md +42 -0
- package/cli/agents/scaffolding-generator.md +99 -0
- package/cli/agents/seed-validator.md +71 -0
- package/cli/agents/story-doc-enricher.md +133 -0
- package/cli/agents/story-scope-reviewer.md +147 -0
- package/cli/agents/story-splitter.md +83 -0
- package/cli/agents/suggestion-business-analyst.md +88 -0
- package/cli/agents/suggestion-deployment-architect.md +263 -0
- package/cli/agents/suggestion-product-manager.md +129 -0
- package/cli/agents/suggestion-security-specialist.md +156 -0
- package/cli/agents/suggestion-technical-architect.md +269 -0
- package/cli/agents/suggestion-ux-researcher.md +93 -0
- package/cli/agents/task-subtask-decomposer.md +188 -0
- package/cli/agents/validator-documentation.json +183 -0
- package/cli/agents/validator-documentation.md +455 -0
- package/cli/agents/validator-selector.md +211 -0
- package/cli/ansi-colors.js +21 -0
- package/cli/api-reference-tool.js +368 -0
- package/cli/build-docs.js +29 -8
- package/cli/ceremony-history.js +369 -0
- package/cli/checks/catalog.json +76 -0
- package/cli/checks/code/quality.json +26 -0
- package/cli/checks/code/testing.json +14 -0
- package/cli/checks/code/traceability.json +26 -0
- package/cli/checks/cross-refs/epic.json +171 -0
- package/cli/checks/cross-refs/story.json +149 -0
- package/cli/checks/epic/api.json +114 -0
- package/cli/checks/epic/backend.json +126 -0
- package/cli/checks/epic/cloud.json +126 -0
- package/cli/checks/epic/data.json +102 -0
- package/cli/checks/epic/database.json +114 -0
- package/cli/checks/epic/developer.json +182 -0
- package/cli/checks/epic/devops.json +174 -0
- package/cli/checks/epic/frontend.json +162 -0
- package/cli/checks/epic/mobile.json +102 -0
- package/cli/checks/epic/qa.json +90 -0
- package/cli/checks/epic/security.json +184 -0
- package/cli/checks/epic/solution-architect.json +192 -0
- package/cli/checks/epic/test-architect.json +90 -0
- package/cli/checks/epic/ui.json +102 -0
- package/cli/checks/epic/ux.json +90 -0
- package/cli/checks/fixes/epic-fix-template.md +10 -0
- package/cli/checks/fixes/story-fix-template.md +10 -0
- package/cli/checks/story/api.json +186 -0
- package/cli/checks/story/backend.json +102 -0
- package/cli/checks/story/cloud.json +102 -0
- package/cli/checks/story/data.json +210 -0
- package/cli/checks/story/database.json +102 -0
- package/cli/checks/story/developer.json +168 -0
- package/cli/checks/story/devops.json +102 -0
- package/cli/checks/story/frontend.json +174 -0
- package/cli/checks/story/mobile.json +102 -0
- package/cli/checks/story/qa.json +210 -0
- package/cli/checks/story/security.json +198 -0
- package/cli/checks/story/solution-architect.json +230 -0
- package/cli/checks/story/test-architect.json +210 -0
- package/cli/checks/story/ui.json +102 -0
- package/cli/checks/story/ux.json +102 -0
- package/cli/coding-order.js +401 -0
- package/cli/command-logger.js +49 -12
- package/cli/components/static-output.js +63 -0
- package/cli/console-output-manager.js +94 -0
- package/cli/dependency-checker.js +72 -0
- package/cli/docs-sync.js +306 -0
- package/cli/epic-story-validator.js +659 -0
- package/cli/evaluation-prompts.js +1008 -0
- package/cli/execution-context.js +195 -0
- package/cli/generate-summary-table.js +340 -0
- package/cli/init-model-config.js +704 -0
- package/cli/init.js +1737 -278
- package/cli/kanban-server-manager.js +227 -0
- package/cli/llm-claude.js +150 -1
- package/cli/llm-gemini.js +109 -0
- package/cli/llm-local.js +493 -0
- package/cli/llm-mock.js +233 -0
- package/cli/llm-openai.js +454 -0
- package/cli/llm-provider.js +379 -3
- package/cli/llm-token-limits.js +211 -0
- package/cli/llm-verifier.js +662 -0
- package/cli/llm-xiaomi.js +143 -0
- package/cli/message-constants.js +49 -0
- package/cli/message-manager.js +334 -0
- package/cli/message-types.js +96 -0
- package/cli/messaging-api.js +291 -0
- package/cli/micro-check-fixer.js +335 -0
- package/cli/micro-check-runner.js +449 -0
- package/cli/micro-check-scorer.js +148 -0
- package/cli/micro-check-validator.js +538 -0
- package/cli/model-pricing.js +192 -0
- package/cli/model-query-engine.js +468 -0
- package/cli/model-recommendation-analyzer.js +495 -0
- package/cli/model-selector.js +270 -0
- package/cli/output-buffer.js +107 -0
- package/cli/process-manager.js +73 -2
- package/cli/prompt-logger.js +57 -0
- package/cli/repl-ink.js +4625 -1094
- package/cli/repl-old.js +3 -4
- package/cli/seed-processor.js +962 -0
- package/cli/sprint-planning-processor.js +4162 -0
- package/cli/template-processor.js +2149 -105
- package/cli/templates/project.md +25 -8
- package/cli/templates/vitepress-config.mts.template +5 -4
- package/cli/token-tracker.js +547 -0
- package/cli/tools/generate-story-validators.js +317 -0
- package/cli/tools/generate-validators.js +669 -0
- package/cli/update-checker.js +19 -17
- package/cli/update-notifier.js +4 -4
- package/cli/validation-router.js +667 -0
- package/cli/verification-tracker.js +563 -0
- package/cli/worktree-runner.js +654 -0
- package/kanban/README.md +386 -0
- package/kanban/client/README.md +205 -0
- package/kanban/client/components.json +20 -0
- package/kanban/client/dist/assets/index-D_KC5EQT.css +1 -0
- package/kanban/client/dist/assets/index-DjY5zqW7.js +351 -0
- package/kanban/client/dist/index.html +16 -0
- package/kanban/client/dist/vite.svg +1 -0
- package/kanban/client/index.html +15 -0
- package/kanban/client/package-lock.json +9442 -0
- package/kanban/client/package.json +44 -0
- package/kanban/client/postcss.config.js +6 -0
- package/kanban/client/public/vite.svg +1 -0
- package/kanban/client/src/App.jsx +651 -0
- package/kanban/client/src/components/ProjectFileEditorPopup.jsx +117 -0
- package/kanban/client/src/components/ceremony/AskArchPopup.jsx +420 -0
- package/kanban/client/src/components/ceremony/AskModelPopup.jsx +629 -0
- package/kanban/client/src/components/ceremony/CeremonyWorkflowModal.jsx +1133 -0
- package/kanban/client/src/components/ceremony/EpicStorySelectionModal.jsx +254 -0
- package/kanban/client/src/components/ceremony/ProviderSwitcherButton.jsx +290 -0
- package/kanban/client/src/components/ceremony/SponsorCallModal.jsx +686 -0
- package/kanban/client/src/components/ceremony/SprintPlanningModal.jsx +838 -0
- package/kanban/client/src/components/ceremony/steps/ArchitectureStep.jsx +150 -0
- package/kanban/client/src/components/ceremony/steps/CompleteStep.jsx +136 -0
- package/kanban/client/src/components/ceremony/steps/DatabaseStep.jsx +202 -0
- package/kanban/client/src/components/ceremony/steps/DeploymentStep.jsx +123 -0
- package/kanban/client/src/components/ceremony/steps/MissionStep.jsx +106 -0
- package/kanban/client/src/components/ceremony/steps/ReviewAnswersStep.jsx +329 -0
- package/kanban/client/src/components/ceremony/steps/RunningStep.jsx +249 -0
- package/kanban/client/src/components/kanban/CardDetailModal.jsx +646 -0
- package/kanban/client/src/components/kanban/EpicSection.jsx +146 -0
- package/kanban/client/src/components/kanban/FilterToolbar.jsx +222 -0
- package/kanban/client/src/components/kanban/GroupingSelector.jsx +63 -0
- package/kanban/client/src/components/kanban/KanbanBoard.jsx +211 -0
- package/kanban/client/src/components/kanban/KanbanCard.jsx +147 -0
- package/kanban/client/src/components/kanban/KanbanColumn.jsx +90 -0
- package/kanban/client/src/components/kanban/RefineWorkItemPopup.jsx +784 -0
- package/kanban/client/src/components/kanban/RunButton.jsx +162 -0
- package/kanban/client/src/components/kanban/SeedButton.jsx +176 -0
- package/kanban/client/src/components/layout/LoadingScreen.jsx +82 -0
- package/kanban/client/src/components/process/ProcessMonitorBar.jsx +80 -0
- package/kanban/client/src/components/settings/AgentEditorPopup.jsx +171 -0
- package/kanban/client/src/components/settings/AgentsTab.jsx +381 -0
- package/kanban/client/src/components/settings/ApiKeysTab.jsx +142 -0
- package/kanban/client/src/components/settings/CeremonyModelsTab.jsx +105 -0
- package/kanban/client/src/components/settings/CheckEditorPopup.jsx +507 -0
- package/kanban/client/src/components/settings/CostThresholdsTab.jsx +95 -0
- package/kanban/client/src/components/settings/ModelPricingTab.jsx +269 -0
- package/kanban/client/src/components/settings/OpenAIAuthSection.jsx +412 -0
- package/kanban/client/src/components/settings/ServersTab.jsx +121 -0
- package/kanban/client/src/components/settings/SettingsModal.jsx +84 -0
- package/kanban/client/src/components/stats/CostModal.jsx +384 -0
- package/kanban/client/src/components/ui/badge.jsx +27 -0
- package/kanban/client/src/components/ui/dialog.jsx +121 -0
- package/kanban/client/src/components/ui/tabs.jsx +85 -0
- package/kanban/client/src/hooks/__tests__/useGrouping.test.js +232 -0
- package/kanban/client/src/hooks/useGrouping.js +177 -0
- package/kanban/client/src/hooks/useWebSocket.js +120 -0
- package/kanban/client/src/lib/__tests__/api.test.js +196 -0
- package/kanban/client/src/lib/__tests__/status-grouping.test.js +94 -0
- package/kanban/client/src/lib/api.js +515 -0
- package/kanban/client/src/lib/status-grouping.js +154 -0
- package/kanban/client/src/lib/utils.js +11 -0
- package/kanban/client/src/main.jsx +10 -0
- package/kanban/client/src/store/__tests__/kanbanStore.test.js +164 -0
- package/kanban/client/src/store/ceremonyStore.js +172 -0
- package/kanban/client/src/store/filterStore.js +201 -0
- package/kanban/client/src/store/kanbanStore.js +123 -0
- package/kanban/client/src/store/processStore.js +65 -0
- package/kanban/client/src/store/sprintPlanningStore.js +33 -0
- package/kanban/client/src/styles/globals.css +59 -0
- package/kanban/client/tailwind.config.js +77 -0
- package/kanban/client/vite.config.js +28 -0
- package/kanban/client/vitest.config.js +28 -0
- package/kanban/dev-start.sh +47 -0
- package/kanban/package.json +12 -0
- package/kanban/server/index.js +537 -0
- package/kanban/server/routes/ceremony.js +454 -0
- package/kanban/server/routes/costs.js +163 -0
- package/kanban/server/routes/openai-oauth.js +366 -0
- package/kanban/server/routes/processes.js +50 -0
- package/kanban/server/routes/settings.js +736 -0
- package/kanban/server/routes/websocket.js +281 -0
- package/kanban/server/routes/work-items.js +487 -0
- package/kanban/server/services/CeremonyService.js +1441 -0
- package/kanban/server/services/FileSystemScanner.js +95 -0
- package/kanban/server/services/FileWatcher.js +144 -0
- package/kanban/server/services/HierarchyBuilder.js +196 -0
- package/kanban/server/services/ProcessRegistry.js +122 -0
- package/kanban/server/services/TaskRunnerService.js +261 -0
- package/kanban/server/services/WorkItemReader.js +123 -0
- package/kanban/server/services/WorkItemRefineService.js +510 -0
- package/kanban/server/start.js +49 -0
- package/kanban/server/utils/kanban-logger.js +132 -0
- package/kanban/server/utils/markdown.js +91 -0
- package/kanban/server/utils/status-grouping.js +107 -0
- package/kanban/server/workers/run-task-worker.js +121 -0
- package/kanban/server/workers/seed-worker.js +94 -0
- package/kanban/server/workers/sponsor-call-worker.js +92 -0
- package/kanban/server/workers/sprint-planning-worker.js +212 -0
- package/package.json +19 -7
- package/cli/agents/documentation.md +0 -302
|
@@ -0,0 +1,4162 @@
|
|
|
1
|
+
import fs from 'fs';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import { LLMProvider } from './llm-provider.js';
|
|
4
|
+
import { PromptLogger } from './prompt-logger.js';
|
|
5
|
+
import { TokenTracker } from './token-tracker.js';
|
|
6
|
+
import { EpicStoryValidator } from './epic-story-validator.js';
|
|
7
|
+
import { VerificationTracker } from './verification-tracker.js';
|
|
8
|
+
import { fileURLToPath } from 'url';
|
|
9
|
+
import { getCeremonyHeader } from './message-constants.js';
|
|
10
|
+
import { sendError, sendWarning, sendSuccess, sendInfo, sendOutput, sendIndented, sendSectionHeader, sendCeremonyHeader, sendProgress, sendSubstep } from './messaging-api.js';
|
|
11
|
+
import { outputBuffer } from './output-buffer.js';
|
|
12
|
+
import { loadAgent } from './agent-loader.js';
|
|
13
|
+
import { CONTEXT_GENERATION_TOOLS, dispatchToolCall } from './api-reference-tool.js';
|
|
14
|
+
import { computeCodingOrder } from './coding-order.js';
|
|
15
|
+
|
|
16
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
17
|
+
const __dirname = path.dirname(__filename);
|
|
18
|
+
|
|
19
|
+
/** Local-timezone ISO string (e.g. 2026-03-04T18:05:16.554+01:00) */
|
|
20
|
+
function localISO(date = new Date()) {
|
|
21
|
+
const p = n => String(n).padStart(2, '0');
|
|
22
|
+
const ms = String(date.getMilliseconds()).padStart(3, '0');
|
|
23
|
+
const tz = -date.getTimezoneOffset();
|
|
24
|
+
const sign = tz >= 0 ? '+' : '-';
|
|
25
|
+
const tzH = p(Math.floor(Math.abs(tz) / 60));
|
|
26
|
+
const tzM = p(Math.abs(tz) % 60);
|
|
27
|
+
return `${date.getFullYear()}-${p(date.getMonth()+1)}-${p(date.getDate())}T${p(date.getHours())}:${p(date.getMinutes())}:${p(date.getSeconds())}.${ms}${sign}${tzH}:${tzM}`;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* SprintPlanningProcessor - Creates/expands Epics and Stories with duplicate detection
|
|
32
|
+
*/
|
|
33
|
+
class SprintPlanningProcessor {
|
|
34
|
+
constructor(options = {}) {
|
|
35
|
+
this.ceremonyName = 'sprint-planning';
|
|
36
|
+
this.avcPath = path.join(process.cwd(), '.avc');
|
|
37
|
+
this.projectPath = path.join(this.avcPath, 'project');
|
|
38
|
+
this.projectDocPath = path.join(this.projectPath, 'doc.md');
|
|
39
|
+
this.avcConfigPath = path.join(this.avcPath, 'avc.json');
|
|
40
|
+
this.agentsPath = path.join(__dirname, 'agents');
|
|
41
|
+
|
|
42
|
+
// Read ceremony config
|
|
43
|
+
const { provider, model, stagesConfig } = this.readCeremonyConfig();
|
|
44
|
+
this._providerName = provider;
|
|
45
|
+
this._modelName = model;
|
|
46
|
+
this.llmProvider = null;
|
|
47
|
+
this.stagesConfig = stagesConfig;
|
|
48
|
+
|
|
49
|
+
// Stage provider cache
|
|
50
|
+
this._stageProviders = {};
|
|
51
|
+
|
|
52
|
+
// LLM-generated context.md cache (keyed by epic.name / "epicName::storyName")
|
|
53
|
+
// Populated by generateContextFiles() before validation; consumed by validation loop and writeHierarchyFiles()
|
|
54
|
+
this._epicContextCache = new Map();
|
|
55
|
+
this._storyContextCache = new Map();
|
|
56
|
+
|
|
57
|
+
// Initialize prompt logger (writes per-call prompt/response JSON files)
|
|
58
|
+
this._promptLogger = new PromptLogger(process.cwd(), 'sprint-planning');
|
|
59
|
+
|
|
60
|
+
// Initialize token tracker
|
|
61
|
+
this.tokenTracker = new TokenTracker(this.avcPath);
|
|
62
|
+
this.tokenTracker.init();
|
|
63
|
+
|
|
64
|
+
// Initialize verification tracker
|
|
65
|
+
this.verificationTracker = new VerificationTracker(this.avcPath);
|
|
66
|
+
|
|
67
|
+
// Debug mode - always enabled for comprehensive logging
|
|
68
|
+
this.debugMode = true;
|
|
69
|
+
|
|
70
|
+
// Cost threshold protection
|
|
71
|
+
this._costThreshold = options?.costThreshold ?? null;
|
|
72
|
+
this._costLimitReachedCallback = options?.costLimitReachedCallback ?? null;
|
|
73
|
+
this._runningCost = 0;
|
|
74
|
+
|
|
75
|
+
// Optional user-selection gate between Stage 4 and Stage 5
|
|
76
|
+
// When provided, the processor calls this async function with the decomposed hierarchy
|
|
77
|
+
// and waits for it to resolve with { selectedEpicIds, selectedStoryIds }.
|
|
78
|
+
// When null (default), the processor runs straight through without pausing.
|
|
79
|
+
this._selectionCallback = options?.selectionCallback ?? null;
|
|
80
|
+
|
|
81
|
+
// Optional callback fired immediately after Stage 6 writes work.json files to disk.
|
|
82
|
+
// Use this to trigger Kanban board refresh before Stage 7/8 (doc gen + enrichment) complete.
|
|
83
|
+
this._hierarchyWrittenCallback = options?.hierarchyWrittenCallback ?? null;
|
|
84
|
+
|
|
85
|
+
// Optional callback fired each time a single epic or story work.json is written.
|
|
86
|
+
// Allows the Kanban board to display items one-by-one as they are created.
|
|
87
|
+
this._itemWrittenCallback = options?.itemWrittenCallback ?? null;
|
|
88
|
+
|
|
89
|
+
// Optional callback when a validator call fails with quota/rate-limit error.
|
|
90
|
+
// Async: resolves with { newProvider?, newModel? } or null (retry same model).
|
|
91
|
+
this._quotaExceededCallback = options?.quotaExceededCallback ?? null;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Structured debug logger - writes ONLY to file via CommandLogger
|
|
96
|
+
*/
|
|
97
|
+
debug(message, data = null) {
|
|
98
|
+
if (!this.debugMode) return;
|
|
99
|
+
|
|
100
|
+
const timestamp = localISO();
|
|
101
|
+
const prefix = `[${timestamp}] [DEBUG]`;
|
|
102
|
+
|
|
103
|
+
if (data === null) {
|
|
104
|
+
console.log(`${prefix} ${message}`);
|
|
105
|
+
} else {
|
|
106
|
+
// Combine message and data in single log call with [DEBUG] prefix
|
|
107
|
+
console.log(`${prefix} ${message}\n${JSON.stringify(data, null, 2)}`);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* Stage boundary marker
|
|
113
|
+
*/
|
|
114
|
+
debugStage(stageNumber, stageName) {
|
|
115
|
+
const separator = '='.repeat(50);
|
|
116
|
+
this.debug(`\n${separator}`);
|
|
117
|
+
this.debug(`STAGE ${stageNumber}: ${stageName.toUpperCase()}`);
|
|
118
|
+
this.debug(separator);
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Log elapsed time for a labelled operation
|
|
123
|
+
*/
|
|
124
|
+
debugTiming(label, startMs) {
|
|
125
|
+
const elapsed = Date.now() - startMs;
|
|
126
|
+
this.debug(`[TIMING] ${label}: ${elapsed}ms (${(elapsed / 1000).toFixed(1)}s)`);
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Sub-section separator for grouping related log entries within a stage
|
|
131
|
+
*/
|
|
132
|
+
debugSection(title) {
|
|
133
|
+
const line = '-'.repeat(60);
|
|
134
|
+
this.debug(`\n${line}`);
|
|
135
|
+
this.debug(`-- ${title}`);
|
|
136
|
+
this.debug(line);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Log a full hierarchy tree for snapshot comparison across runs
|
|
141
|
+
* @param {string} label - Label for this snapshot (e.g. "PRE-RUN" or "POST-RUN")
|
|
142
|
+
* @param {Array} epics - Array of {id, name, stories:[{id, name}]} objects
|
|
143
|
+
*/
|
|
144
|
+
debugHierarchySnapshot(label, epics) {
|
|
145
|
+
this.debugSection(`${label} HIERARCHY SNAPSHOT`);
|
|
146
|
+
if (!epics || epics.length === 0) {
|
|
147
|
+
this.debug(`${label}: (empty - no epics found)`);
|
|
148
|
+
return;
|
|
149
|
+
}
|
|
150
|
+
this.debug(`${label}: ${epics.length} epics`);
|
|
151
|
+
for (const epic of epics) {
|
|
152
|
+
const storyCount = epic.stories ? epic.stories.length : 0;
|
|
153
|
+
this.debug(` [${epic.id}] ${epic.name} (${storyCount} stories)`);
|
|
154
|
+
for (const story of epic.stories || []) {
|
|
155
|
+
this.debug(` [${story.id}] ${story.name}`);
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
// Flat totals for quick comparison
|
|
159
|
+
const totalStories = epics.reduce((sum, e) => sum + (e.stories?.length || 0), 0);
|
|
160
|
+
this.debug(`${label} TOTALS: ${epics.length} epics, ${totalStories} stories`);
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* Truncate document content for local LLMs with limited context windows.
|
|
165
|
+
* For non-local providers, returns the content unchanged.
|
|
166
|
+
*
|
|
167
|
+
* Keeps sections 1-3 in full (Overview, Target Users, Core Features),
|
|
168
|
+
* then only section headers + first paragraph for sections 4-9.
|
|
169
|
+
*
|
|
170
|
+
* @param {string} docContent - The full document content
|
|
171
|
+
* @param {number} maxChars - Maximum character length (default 6000)
|
|
172
|
+
* @returns {string} Truncated or original content
|
|
173
|
+
*/
|
|
174
|
+
_truncateDocForLocalLLM(docContent, maxChars = 6000) {
|
|
175
|
+
if (this._providerName !== 'local') {
|
|
176
|
+
return docContent;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if (!docContent || docContent.length <= maxChars) {
|
|
180
|
+
this.debug('Local LLM truncation: content already within limit', {
|
|
181
|
+
contentLength: docContent?.length || 0,
|
|
182
|
+
maxChars
|
|
183
|
+
});
|
|
184
|
+
return docContent;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
this.debug('Local LLM truncation: truncating document for context window', {
|
|
188
|
+
originalLength: docContent.length,
|
|
189
|
+
maxChars
|
|
190
|
+
});
|
|
191
|
+
|
|
192
|
+
const lines = docContent.split('\n');
|
|
193
|
+
const sections = [];
|
|
194
|
+
let currentSection = { header: null, lines: [] };
|
|
195
|
+
|
|
196
|
+
// Split into sections by ## headers
|
|
197
|
+
for (const line of lines) {
|
|
198
|
+
if (line.startsWith('## ')) {
|
|
199
|
+
if (currentSection.header !== null || currentSection.lines.length > 0) {
|
|
200
|
+
sections.push(currentSection);
|
|
201
|
+
}
|
|
202
|
+
currentSection = { header: line, lines: [] };
|
|
203
|
+
} else {
|
|
204
|
+
currentSection.lines.push(line);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
// Push last section
|
|
208
|
+
if (currentSection.header !== null || currentSection.lines.length > 0) {
|
|
209
|
+
sections.push(currentSection);
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
// Keep first 3 sections (index 0 = preamble, 1-3 = first three ## sections) in full
|
|
213
|
+
// Summarize sections 4+
|
|
214
|
+
const resultParts = [];
|
|
215
|
+
let sectionIndex = 0;
|
|
216
|
+
|
|
217
|
+
for (const section of sections) {
|
|
218
|
+
if (section.header) {
|
|
219
|
+
sectionIndex++;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
if (sectionIndex <= 3) {
|
|
223
|
+
// Keep in full
|
|
224
|
+
if (section.header) resultParts.push(section.header);
|
|
225
|
+
resultParts.push(...section.lines);
|
|
226
|
+
} else {
|
|
227
|
+
// Keep header + first paragraph only
|
|
228
|
+
if (section.header) resultParts.push(section.header);
|
|
229
|
+
let foundContent = false;
|
|
230
|
+
for (const sLine of section.lines) {
|
|
231
|
+
if (sLine.trim() === '') {
|
|
232
|
+
if (foundContent) break; // End of first paragraph
|
|
233
|
+
resultParts.push(sLine);
|
|
234
|
+
} else {
|
|
235
|
+
foundContent = true;
|
|
236
|
+
resultParts.push(sLine);
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
let result = resultParts.join('\n');
|
|
243
|
+
|
|
244
|
+
// Final hard truncation if still over limit
|
|
245
|
+
if (result.length > maxChars) {
|
|
246
|
+
result = result.substring(0, maxChars);
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
result += '\n\n[... remaining sections summarized for context window limits ...]';
|
|
250
|
+
|
|
251
|
+
this.debug('Local LLM truncation complete', {
|
|
252
|
+
originalLength: docContent.length,
|
|
253
|
+
truncatedLength: result.length
|
|
254
|
+
});
|
|
255
|
+
|
|
256
|
+
return result;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
/**
|
|
260
|
+
* API call logging with timing
|
|
261
|
+
*/
|
|
262
|
+
async debugApiCall(operation, fn) {
|
|
263
|
+
this.debug(`\n${'='.repeat(50)}`);
|
|
264
|
+
this.debug(`LLM API CALL: ${operation}`);
|
|
265
|
+
this.debug('='.repeat(50));
|
|
266
|
+
this.debug(`Provider: ${this._providerName}`);
|
|
267
|
+
this.debug(`Model: ${this._modelName}`);
|
|
268
|
+
|
|
269
|
+
const startTime = Date.now();
|
|
270
|
+
try {
|
|
271
|
+
const result = await fn();
|
|
272
|
+
const duration = Date.now() - startTime;
|
|
273
|
+
|
|
274
|
+
this.debug(`Response received (${duration}ms)`);
|
|
275
|
+
return result;
|
|
276
|
+
} catch (error) {
|
|
277
|
+
const duration = Date.now() - startTime;
|
|
278
|
+
this.debug(`API call failed after ${duration}ms`, {
|
|
279
|
+
error: error.message,
|
|
280
|
+
stack: error.stack
|
|
281
|
+
});
|
|
282
|
+
throw error;
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
readCeremonyConfig() {
|
|
287
|
+
try {
|
|
288
|
+
const config = JSON.parse(fs.readFileSync(this.avcConfigPath, 'utf8'));
|
|
289
|
+
const ceremony = config.settings?.ceremonies?.find(c => c.name === this.ceremonyName);
|
|
290
|
+
|
|
291
|
+
if (!ceremony) {
|
|
292
|
+
sendWarning(`Ceremony '${this.ceremonyName}' not found in config, using defaults`);
|
|
293
|
+
return {
|
|
294
|
+
provider: 'claude',
|
|
295
|
+
model: 'claude-sonnet-4-5-20250929',
|
|
296
|
+
stagesConfig: null
|
|
297
|
+
};
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
return {
|
|
301
|
+
provider: ceremony.provider || 'claude',
|
|
302
|
+
model: ceremony.defaultModel || 'claude-sonnet-4-5-20250929',
|
|
303
|
+
stagesConfig: ceremony.stages || null
|
|
304
|
+
};
|
|
305
|
+
} catch (error) {
|
|
306
|
+
sendWarning(`Could not read ceremony config: ${error.message}`);
|
|
307
|
+
return {
|
|
308
|
+
provider: 'claude',
|
|
309
|
+
model: 'claude-sonnet-4-5-20250929',
|
|
310
|
+
stagesConfig: null
|
|
311
|
+
};
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
/**
|
|
316
|
+
* Get provider and model for a specific stage
|
|
317
|
+
* Falls back to ceremony-level config if stage-specific config not found
|
|
318
|
+
* @param {string} stageName - Stage name ('decomposition', 'validation', 'doc-distribution')
|
|
319
|
+
* @returns {Object} { provider, model }
|
|
320
|
+
*/
|
|
321
|
+
getProviderForStage(stageName) {
|
|
322
|
+
// Check if stage-specific config exists
|
|
323
|
+
if (this.stagesConfig && this.stagesConfig[stageName]) {
|
|
324
|
+
const stageConfig = this.stagesConfig[stageName];
|
|
325
|
+
return {
|
|
326
|
+
provider: stageConfig.provider || this._providerName,
|
|
327
|
+
model: stageConfig.model || this._modelName
|
|
328
|
+
};
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// Fall back to ceremony-level config
|
|
332
|
+
return {
|
|
333
|
+
provider: this._providerName,
|
|
334
|
+
model: this._modelName
|
|
335
|
+
};
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
/**
|
|
339
|
+
* Get or create LLM provider for a specific stage
|
|
340
|
+
* @param {string} stageName - Stage name ('decomposition', 'validation', 'doc-distribution')
|
|
341
|
+
* @returns {Promise<LLMProvider>} LLM provider instance
|
|
342
|
+
*/
|
|
343
|
+
async getProviderForStageInstance(stageName) {
|
|
344
|
+
let { provider, model } = this.getProviderForStage(stageName);
|
|
345
|
+
|
|
346
|
+
// Resolve to an available provider if current one has no credentials
|
|
347
|
+
const resolved = await LLMProvider.resolveAvailableProvider(provider, model);
|
|
348
|
+
if (resolved.fellBack) {
|
|
349
|
+
this.debug(`Provider fallback for ${stageName}: ${provider}→${resolved.provider} (${resolved.model})`);
|
|
350
|
+
console.warn(`[WARN] ${provider} has no API key — falling back to ${resolved.provider} for stage "${stageName}"`);
|
|
351
|
+
provider = resolved.provider;
|
|
352
|
+
model = resolved.model;
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
// Check if we already have a provider for this stage
|
|
356
|
+
const cacheKey = `${stageName}:${provider}:${model}`;
|
|
357
|
+
|
|
358
|
+
if (this._stageProviders[cacheKey]) {
|
|
359
|
+
this.debug(`Using cached provider for ${stageName}: ${provider} (${model})`);
|
|
360
|
+
return this._stageProviders[cacheKey];
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
// Create new provider
|
|
364
|
+
this.debug(`Creating new provider for ${stageName}: ${provider} (${model})`);
|
|
365
|
+
const providerInstance = await LLMProvider.create(provider, model);
|
|
366
|
+
this._registerTokenCallback(providerInstance, `${this.ceremonyName}-${stageName}`);
|
|
367
|
+
providerInstance.setPromptLogger(this._promptLogger, stageName);
|
|
368
|
+
this._stageProviders[cacheKey] = providerInstance;
|
|
369
|
+
|
|
370
|
+
return providerInstance;
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
/**
|
|
374
|
+
* Run an async LLM fn with a periodic elapsed-time heartbeat detail message.
|
|
375
|
+
* Keeps the UI updated while waiting for long LLM calls to complete.
|
|
376
|
+
*/
|
|
377
|
+
async _withProgressHeartbeat(fn, getMsg, progressCallback, intervalMs = 5000) {
|
|
378
|
+
const startTime = Date.now();
|
|
379
|
+
let lastMsg = null;
|
|
380
|
+
const timer = setInterval(() => {
|
|
381
|
+
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
|
382
|
+
const msg = getMsg(elapsed);
|
|
383
|
+
if (msg != null && msg !== lastMsg) {
|
|
384
|
+
lastMsg = msg;
|
|
385
|
+
progressCallback?.(null, null, { detail: msg })?.catch?.(() => {});
|
|
386
|
+
}
|
|
387
|
+
}, intervalMs);
|
|
388
|
+
try {
|
|
389
|
+
return await fn();
|
|
390
|
+
} finally {
|
|
391
|
+
clearInterval(timer);
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
/**
|
|
396
|
+
* Run async task functions with bounded concurrency.
|
|
397
|
+
* @param {Array<() => Promise>} tasks - Array of functions that return promises
|
|
398
|
+
* @param {number} concurrency - Max parallel tasks
|
|
399
|
+
* @returns {Promise<Array>} Results in original order
|
|
400
|
+
*/
|
|
401
|
+
async _runWithConcurrency(tasks, concurrency) {
|
|
402
|
+
const results = [];
|
|
403
|
+
const executing = new Set();
|
|
404
|
+
for (let i = 0; i < tasks.length; i++) {
|
|
405
|
+
const p = tasks[i]().then(result => { executing.delete(p); return result; });
|
|
406
|
+
executing.add(p);
|
|
407
|
+
results.push(p);
|
|
408
|
+
if (executing.size >= concurrency) {
|
|
409
|
+
await Promise.race(executing);
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
return Promise.all(results);
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
/**
|
|
416
|
+
* Aggregate token usage across all provider instances:
|
|
417
|
+
* - this.llmProvider (Stage 5 validation fallback)
|
|
418
|
+
* - this._stageProviders (Stage 4 decomposition, Stage 7 doc-distribution)
|
|
419
|
+
* - this._validator._validatorProviders (Stage 5 per-validator providers)
|
|
420
|
+
*/
|
|
421
|
+
_aggregateAllTokenUsage() {
|
|
422
|
+
const totals = { inputTokens: 0, outputTokens: 0, totalTokens: 0, totalCalls: 0, estimatedCost: 0 };
|
|
423
|
+
const add = (provider) => {
|
|
424
|
+
if (!provider) return;
|
|
425
|
+
const u = provider.getTokenUsage();
|
|
426
|
+
totals.inputTokens += u.inputTokens || 0;
|
|
427
|
+
totals.outputTokens += u.outputTokens || 0;
|
|
428
|
+
totals.totalTokens += u.totalTokens || (u.inputTokens || 0) + (u.outputTokens || 0);
|
|
429
|
+
totals.totalCalls += u.totalCalls || 0;
|
|
430
|
+
totals.estimatedCost += u.estimatedCost || 0;
|
|
431
|
+
};
|
|
432
|
+
add(this.llmProvider);
|
|
433
|
+
for (const p of Object.values(this._stageProviders)) add(p);
|
|
434
|
+
if (this._validator) {
|
|
435
|
+
for (const p of Object.values(this._validator._validatorProviders)) add(p);
|
|
436
|
+
}
|
|
437
|
+
return totals;
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
/**
|
|
441
|
+
* Register a per-call token callback on a provider instance.
|
|
442
|
+
* Each LLM API call fires addIncremental() so tokens are persisted crash-safely.
|
|
443
|
+
* @param {object} provider - LLM provider instance
|
|
444
|
+
* @param {string} [stageKey] - Stage-specific key (e.g. 'sprint-planning-decomposition').
|
|
445
|
+
* Defaults to this.ceremonyName so the parent roll-up bucket still accumulates totals.
|
|
446
|
+
*/
|
|
447
|
+
_registerTokenCallback(provider, stageKey) {
|
|
448
|
+
if (!provider) return;
|
|
449
|
+
const key = stageKey ?? this.ceremonyName;
|
|
450
|
+
provider.onCall((delta) => {
|
|
451
|
+
this.tokenTracker.addIncremental(key, delta);
|
|
452
|
+
if (delta.model) {
|
|
453
|
+
const cost = this.tokenTracker.calculateCost(delta.input, delta.output, delta.model);
|
|
454
|
+
this._runningCost += cost?.total ?? 0;
|
|
455
|
+
}
|
|
456
|
+
});
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
async initializeLLMProvider() {
|
|
460
|
+
try {
|
|
461
|
+
this.llmProvider = await LLMProvider.create(this._providerName, this._modelName);
|
|
462
|
+
this._registerTokenCallback(this.llmProvider);
|
|
463
|
+
this.llmProvider.setPromptLogger(this._promptLogger, 'main');
|
|
464
|
+
return this.llmProvider;
|
|
465
|
+
} catch (error) {
|
|
466
|
+
this.debug(`Could not initialize ${this._providerName} provider`);
|
|
467
|
+
this.debug(`Error: ${error.message}`);
|
|
468
|
+
return null;
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
async retryWithBackoff(fn, operation, maxRetries = 3, options = {}) {
|
|
473
|
+
const {
|
|
474
|
+
baseDelay = this._providerName === 'local' ? 500 : 1000,
|
|
475
|
+
multiplier = this._providerName === 'local' ? 1.5 : 2,
|
|
476
|
+
maxDelay = this._providerName === 'local' ? 5000 : 30000,
|
|
477
|
+
} = options;
|
|
478
|
+
|
|
479
|
+
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
|
480
|
+
try {
|
|
481
|
+
return await fn();
|
|
482
|
+
} catch (error) {
|
|
483
|
+
const isLastAttempt = attempt === maxRetries;
|
|
484
|
+
const isRetriable = error.message?.includes('rate limit') ||
|
|
485
|
+
error.message?.includes('timeout') ||
|
|
486
|
+
error.message?.includes('503') ||
|
|
487
|
+
error.message?.toLowerCase().includes('connection error') ||
|
|
488
|
+
error.message?.toLowerCase().includes('econnreset') ||
|
|
489
|
+
error.message?.toLowerCase().includes('network error') ||
|
|
490
|
+
error.message?.toLowerCase().includes('terminated') ||
|
|
491
|
+
error.message?.toLowerCase().includes('econnrefused') ||
|
|
492
|
+
error.message?.toLowerCase().includes('socket hang up');
|
|
493
|
+
|
|
494
|
+
if (isLastAttempt || !isRetriable) {
|
|
495
|
+
throw error;
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
const delay = Math.min(Math.pow(multiplier, attempt) * baseDelay, maxDelay);
|
|
499
|
+
this.debug(`Retry ${attempt}/${maxRetries} in ${delay/1000}s: ${operation}`);
|
|
500
|
+
this.debug(`Error: ${error.message}`);
|
|
501
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
// STAGE 1: Validate prerequisites
|
|
507
|
+
validatePrerequisites() {
|
|
508
|
+
this.debugStage(1, 'Validate Prerequisites');
|
|
509
|
+
this.debug('Checking prerequisites...');
|
|
510
|
+
|
|
511
|
+
if (!fs.existsSync(this.projectDocPath)) {
|
|
512
|
+
this.debug(`✗ Project doc missing: ${this.projectDocPath}`);
|
|
513
|
+
throw new Error(
|
|
514
|
+
'Project documentation not found. Please run /sponsor-call first.'
|
|
515
|
+
);
|
|
516
|
+
}
|
|
517
|
+
const docSize = fs.statSync(this.projectDocPath).size;
|
|
518
|
+
this.debug(`✓ Project doc exists: ${this.projectDocPath} (${docSize} bytes)`);
|
|
519
|
+
|
|
520
|
+
this.debug('Prerequisites validated successfully');
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
// STAGE 2: Read existing hierarchy
|
|
524
|
+
readExistingHierarchy() {
|
|
525
|
+
this.debugStage(2, 'Read Existing Hierarchy');
|
|
526
|
+
|
|
527
|
+
const existingEpics = new Map(); // name -> id
|
|
528
|
+
const existingStories = new Map(); // name -> id
|
|
529
|
+
const maxEpicNum = { value: 0 };
|
|
530
|
+
const maxStoryNums = new Map(); // epicId -> maxNum
|
|
531
|
+
const preRunSnapshot = []; // Rich snapshot for cross-run comparison
|
|
532
|
+
|
|
533
|
+
if (!fs.existsSync(this.projectPath)) {
|
|
534
|
+
this.debug('Project path does not exist yet (first run)');
|
|
535
|
+
return { existingEpics, existingStories, maxEpicNum, maxStoryNums, preRunSnapshot };
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
this.debug(`Scanning directory: ${this.projectPath}`);
|
|
539
|
+
const dirs = fs.readdirSync(this.projectPath).sort();
|
|
540
|
+
this.debug(`Found ${dirs.length} top-level entries to scan`);
|
|
541
|
+
|
|
542
|
+
// Scan top-level directories (epics)
|
|
543
|
+
for (const dir of dirs) {
|
|
544
|
+
const epicWorkJsonPath = path.join(this.projectPath, dir, 'work.json');
|
|
545
|
+
|
|
546
|
+
if (!fs.existsSync(epicWorkJsonPath)) continue;
|
|
547
|
+
|
|
548
|
+
try {
|
|
549
|
+
const work = JSON.parse(fs.readFileSync(epicWorkJsonPath, 'utf8'));
|
|
550
|
+
|
|
551
|
+
if (work.type === 'epic') {
|
|
552
|
+
this.debug(`Found existing Epic: ${work.id} "${work.name}" [status=${work.status}, created=${work.metadata?.created || 'unknown'}]`);
|
|
553
|
+
existingEpics.set(work.name.toLowerCase(), work.id);
|
|
554
|
+
|
|
555
|
+
const epicEntry = {
|
|
556
|
+
id: work.id,
|
|
557
|
+
name: work.name,
|
|
558
|
+
domain: work.domain || '',
|
|
559
|
+
status: work.status || 'unknown',
|
|
560
|
+
created: work.metadata?.created || null,
|
|
561
|
+
ceremony: work.metadata?.ceremony || null,
|
|
562
|
+
description: (work.description || '').substring(0, 120),
|
|
563
|
+
stories: []
|
|
564
|
+
};
|
|
565
|
+
|
|
566
|
+
// Track max epic number (context-0001 → 1)
|
|
567
|
+
const match = work.id.match(/^context-(\d+)$/);
|
|
568
|
+
if (match) {
|
|
569
|
+
const num = parseInt(match[1], 10);
|
|
570
|
+
if (num > maxEpicNum.value) maxEpicNum.value = num;
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
// Scan for nested stories under this epic
|
|
574
|
+
const epicDir = path.join(this.projectPath, dir);
|
|
575
|
+
const epicSubdirs = fs.readdirSync(epicDir).filter(subdir => {
|
|
576
|
+
const subdirPath = path.join(epicDir, subdir);
|
|
577
|
+
return fs.statSync(subdirPath).isDirectory();
|
|
578
|
+
}).sort();
|
|
579
|
+
|
|
580
|
+
this.debug(`Scanning ${epicSubdirs.length} subdirectories under epic ${work.id}`);
|
|
581
|
+
|
|
582
|
+
for (const storyDir of epicSubdirs) {
|
|
583
|
+
const storyWorkJsonPath = path.join(epicDir, storyDir, 'work.json');
|
|
584
|
+
|
|
585
|
+
if (!fs.existsSync(storyWorkJsonPath)) continue;
|
|
586
|
+
|
|
587
|
+
try {
|
|
588
|
+
const storyWork = JSON.parse(fs.readFileSync(storyWorkJsonPath, 'utf8'));
|
|
589
|
+
|
|
590
|
+
if (storyWork.type === 'story') {
|
|
591
|
+
this.debug(` Found existing Story: ${storyWork.id} "${storyWork.name}" [status=${storyWork.status}, created=${storyWork.metadata?.created || 'unknown'}]`);
|
|
592
|
+
existingStories.set(storyWork.name.toLowerCase(), storyWork.id);
|
|
593
|
+
epicEntry.stories.push({
|
|
594
|
+
id: storyWork.id,
|
|
595
|
+
name: storyWork.name,
|
|
596
|
+
status: storyWork.status || 'unknown',
|
|
597
|
+
created: storyWork.metadata?.created || null,
|
|
598
|
+
userType: storyWork.userType || ''
|
|
599
|
+
});
|
|
600
|
+
|
|
601
|
+
// Track max story number per epic (context-0001-0003 → epic 0001, story 3)
|
|
602
|
+
const storyMatch = storyWork.id.match(/^context-(\d+)-(\d+)$/);
|
|
603
|
+
if (storyMatch) {
|
|
604
|
+
const epicId = `context-${storyMatch[1]}`;
|
|
605
|
+
const storyNum = parseInt(storyMatch[2], 10);
|
|
606
|
+
|
|
607
|
+
if (!maxStoryNums.has(epicId)) {
|
|
608
|
+
maxStoryNums.set(epicId, 0);
|
|
609
|
+
}
|
|
610
|
+
if (storyNum > maxStoryNums.get(epicId)) {
|
|
611
|
+
maxStoryNums.set(epicId, storyNum);
|
|
612
|
+
}
|
|
613
|
+
}
|
|
614
|
+
}
|
|
615
|
+
} catch (error) {
|
|
616
|
+
this.debug(`Could not parse ${storyWorkJsonPath}: ${error.message}`);
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
preRunSnapshot.push(epicEntry);
|
|
621
|
+
}
|
|
622
|
+
} catch (error) {
|
|
623
|
+
this.debug(`Could not parse ${epicWorkJsonPath}: ${error.message}`);
|
|
624
|
+
sendWarning(`Could not parse ${epicWorkJsonPath}: ${error.message}`);
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
// Log complete pre-run state for cross-run comparison
|
|
629
|
+
this.debugSection('PRE-RUN STATE - Full Existing Hierarchy');
|
|
630
|
+
this.debug('Pre-run counts', {
|
|
631
|
+
epics: existingEpics.size,
|
|
632
|
+
stories: existingStories.size,
|
|
633
|
+
maxEpicNum: maxEpicNum.value,
|
|
634
|
+
maxStoryNums: Object.fromEntries(maxStoryNums)
|
|
635
|
+
});
|
|
636
|
+
this.debugHierarchySnapshot('PRE-RUN', preRunSnapshot);
|
|
637
|
+
this.debug('All existing epic names (for duplicate detection)', Array.from(existingEpics.keys()));
|
|
638
|
+
this.debug('All existing story names (for duplicate detection)', Array.from(existingStories.keys()));
|
|
639
|
+
|
|
640
|
+
return { existingEpics, existingStories, maxEpicNum, maxStoryNums, preRunSnapshot };
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
// STAGE 3: Collect new scope (optional expansion)
|
|
644
|
+
async collectNewScope() {
|
|
645
|
+
this.debugStage(3, 'Collect New Scope');
|
|
646
|
+
|
|
647
|
+
this.debug(`Reading project doc: ${this.projectDocPath}`);
|
|
648
|
+
const docContent = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
649
|
+
this.debug(`Doc content loaded (${docContent.length} chars)`);
|
|
650
|
+
|
|
651
|
+
this.debugSection('SCOPE TEXT SENT TO LLM (full doc.md)');
|
|
652
|
+
this.debug(`Full doc content (${docContent.length} chars):\n` + docContent);
|
|
653
|
+
|
|
654
|
+
return docContent;
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
// STAGE 4: Decompose into Epics + Stories
|
|
658
|
+
async decomposeIntoEpicsStories(scope, existingEpics, existingStories, progressCallback = null, previousIssues = null) {
|
|
659
|
+
this.debugStage(4, 'Decompose into Epics + Stories');
|
|
660
|
+
|
|
661
|
+
this.debug('Stage 1/3: Decomposing scope into Epics and Stories');
|
|
662
|
+
|
|
663
|
+
// Get stage-specific provider for decomposition
|
|
664
|
+
const provider = await this.getProviderForStageInstance('decomposition');
|
|
665
|
+
const { provider: providerName, model: modelName } = this.getProviderForStage('decomposition');
|
|
666
|
+
|
|
667
|
+
this.debug('Using provider for decomposition', { provider: providerName, model: modelName });
|
|
668
|
+
await progressCallback?.(null, `Using model: ${modelName}`, {});
|
|
669
|
+
|
|
670
|
+
// Read agent instructions
|
|
671
|
+
const agentPath = path.join(this.agentsPath, 'epic-story-decomposer.md');
|
|
672
|
+
this.debug(`Loading agent: ${agentPath}`);
|
|
673
|
+
const epicStoryDecomposerAgent = fs.readFileSync(agentPath, 'utf8');
|
|
674
|
+
this.debug(`Agent loaded (${epicStoryDecomposerAgent.length} bytes)`);
|
|
675
|
+
|
|
676
|
+
// Build prompt with duplicate detection
|
|
677
|
+
this.debug('Constructing decomposition prompt...');
|
|
678
|
+
const existingEpicNames = Array.from(existingEpics.keys());
|
|
679
|
+
const existingStoryNames = Array.from(existingStories.keys());
|
|
680
|
+
|
|
681
|
+
let prompt = `Given the following project scope:
|
|
682
|
+
|
|
683
|
+
**Initial Scope (Features to Implement):**
|
|
684
|
+
${scope}
|
|
685
|
+
`;
|
|
686
|
+
|
|
687
|
+
if (existingEpicNames.length > 0) {
|
|
688
|
+
prompt += `\n**Existing Epics (DO NOT DUPLICATE):**
|
|
689
|
+
${existingEpicNames.map(name => `- ${name}`).join('\n')}
|
|
690
|
+
`;
|
|
691
|
+
}
|
|
692
|
+
|
|
693
|
+
if (existingStoryNames.length > 0) {
|
|
694
|
+
prompt += `\n**Existing Stories (DO NOT DUPLICATE):**
|
|
695
|
+
${existingStoryNames.map(name => `- ${name}`).join('\n')}
|
|
696
|
+
`;
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
prompt += `\nDecompose this project into NEW Epics (domain-based groupings) and Stories (user-facing capabilities per Epic) — create as many as needed to fully cover the scope.
|
|
700
|
+
|
|
701
|
+
IMPORTANT: Only generate NEW Epics and Stories. Skip any that match the existing ones.
|
|
702
|
+
|
|
703
|
+
Return your response as JSON following the exact structure specified in your instructions.`;
|
|
704
|
+
|
|
705
|
+
this.debug('Prompt includes', {
|
|
706
|
+
scopeLength: scope.length,
|
|
707
|
+
existingEpics: existingEpicNames.length,
|
|
708
|
+
existingStories: existingStoryNames.length,
|
|
709
|
+
totalPromptSize: prompt.length
|
|
710
|
+
});
|
|
711
|
+
|
|
712
|
+
const existingNote = existingEpicNames.length > 0
|
|
713
|
+
? ` (skipping ${existingEpicNames.length} existing epics)`
|
|
714
|
+
: '';
|
|
715
|
+
await progressCallback?.(null, `Calling LLM to decompose scope${existingNote}…`, {});
|
|
716
|
+
await progressCallback?.(null, null, { detail: `Sending to ${providerName} (${modelName})…` });
|
|
717
|
+
|
|
718
|
+
// Inject quality issues from previous attempt for retry
|
|
719
|
+
if (previousIssues?.length > 0) {
|
|
720
|
+
prompt += `\n\n**CRITICAL: Your previous decomposition had these quality issues. FIX ALL of them:**\n\n${previousIssues.map((issue, i) => `${i + 1}. ${issue}`).join('\n')}\n\n**Specifically:**\n- Every epic MUST have at least 1 story (most should have 2-5)\n- All epic IDs must match format: context-XXXX (e.g., context-0001)\n- All story IDs must match format: context-XXXX-XXXX (e.g., context-0001-0001)\n- Every story must have 3-8 acceptance criteria\n`;
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
// Log full decomposition prompt for duplicate detection analysis
|
|
724
|
+
this.debug('\n' + '='.repeat(80));
|
|
725
|
+
this.debug('FULL DECOMPOSITION PROMPT:');
|
|
726
|
+
this.debug('='.repeat(80));
|
|
727
|
+
this.debug(prompt);
|
|
728
|
+
this.debug('='.repeat(80) + '\n');
|
|
729
|
+
|
|
730
|
+
// LLM call with full request/response logging
|
|
731
|
+
let hierarchy = await this.debugApiCall(
|
|
732
|
+
'Epic/Story Decomposition',
|
|
733
|
+
async () => {
|
|
734
|
+
this.debug('Request payload', {
|
|
735
|
+
model: modelName,
|
|
736
|
+
maxTokens: 8000,
|
|
737
|
+
agentInstructions: `${epicStoryDecomposerAgent.substring(0, 100)}...`,
|
|
738
|
+
promptPreview: `${prompt.substring(0, 200)}...`
|
|
739
|
+
});
|
|
740
|
+
|
|
741
|
+
this.debug('Sending request to LLM API...');
|
|
742
|
+
|
|
743
|
+
const result = await this._withProgressHeartbeat(
|
|
744
|
+
() => this.retryWithBackoff(
|
|
745
|
+
() => provider.generateJSON(prompt, epicStoryDecomposerAgent),
|
|
746
|
+
'Epic/Story decomposition'
|
|
747
|
+
),
|
|
748
|
+
(elapsed) => {
|
|
749
|
+
if (elapsed < 20) return 'Reading scope and project context…';
|
|
750
|
+
if (elapsed < 40) return 'Identifying domain boundaries…';
|
|
751
|
+
if (elapsed < 60) return 'Structuring epics and stories…';
|
|
752
|
+
if (elapsed < 80) return 'Refining decomposition…';
|
|
753
|
+
if (elapsed < 100) return 'Finalizing work item hierarchy…';
|
|
754
|
+
return 'Still decomposing…';
|
|
755
|
+
},
|
|
756
|
+
progressCallback,
|
|
757
|
+
20000 // 20s interval — phase messages change each tick
|
|
758
|
+
);
|
|
759
|
+
|
|
760
|
+
// Log token usage
|
|
761
|
+
const usage = provider.getTokenUsage();
|
|
762
|
+
this.debug('Response tokens', {
|
|
763
|
+
input: usage.inputTokens,
|
|
764
|
+
output: usage.outputTokens,
|
|
765
|
+
total: usage.totalTokens
|
|
766
|
+
});
|
|
767
|
+
await progressCallback?.(null, null, { detail: `${usage.inputTokens.toLocaleString()} in · ${usage.outputTokens.toLocaleString()} out tokens` });
|
|
768
|
+
|
|
769
|
+
this.debug(`Response content (${usage.outputTokens} tokens)`, {
|
|
770
|
+
epicCount: result.epics?.length || 0,
|
|
771
|
+
totalStories: result.epics?.reduce((sum, e) => sum + (e.stories?.length || 0), 0) || 0,
|
|
772
|
+
validation: result.validation
|
|
773
|
+
});
|
|
774
|
+
|
|
775
|
+
// Log full LLM response for duplicate detection analysis
|
|
776
|
+
this.debug('\n' + '='.repeat(80));
|
|
777
|
+
this.debug('FULL LLM RESPONSE:');
|
|
778
|
+
this.debug('='.repeat(80));
|
|
779
|
+
this.debug(JSON.stringify(result, null, 2));
|
|
780
|
+
this.debug('='.repeat(80) + '\n');
|
|
781
|
+
|
|
782
|
+
return result;
|
|
783
|
+
}
|
|
784
|
+
);
|
|
785
|
+
|
|
786
|
+
// NOTE: Deduplication moved to Stage 4.1 (deduplicateEpicsLLM) in process() flow.
|
|
787
|
+
// The inline algorithmic dedup here was insufficient for semantic duplicates.
|
|
788
|
+
|
|
789
|
+
if (!hierarchy.epics || !Array.isArray(hierarchy.epics)) {
|
|
790
|
+
this.debug('✗ Invalid decomposition response: missing epics array');
|
|
791
|
+
throw new Error('Invalid decomposition response: missing epics array');
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
const totalStories = hierarchy.epics.reduce((sum, e) => sum + (e.stories?.length || 0), 0);
|
|
795
|
+
await progressCallback?.(null, `Decomposed into ${hierarchy.epics.length} epics, ${totalStories} stories`, {});
|
|
796
|
+
for (const epic of hierarchy.epics) {
|
|
797
|
+
await progressCallback?.(null, ` ${epic.name} (${epic.stories?.length || 0} stories)`, {});
|
|
798
|
+
}
|
|
799
|
+
|
|
800
|
+
this.debug('Parsed hierarchy', {
|
|
801
|
+
epics: hierarchy.epics.map(e => ({
|
|
802
|
+
id: e.id,
|
|
803
|
+
name: e.name,
|
|
804
|
+
storyCount: e.stories?.length || 0
|
|
805
|
+
})),
|
|
806
|
+
validation: hierarchy.validation
|
|
807
|
+
});
|
|
808
|
+
|
|
809
|
+
|
|
810
|
+
return hierarchy;
|
|
811
|
+
}
|
|
812
|
+
|
|
813
|
+
/**
|
|
814
|
+
* Check decomposition quality — returns array of issues to fix on retry.
|
|
815
|
+
* Empty array = quality is acceptable.
|
|
816
|
+
*/
|
|
817
|
+
_checkDecompositionQuality(hierarchy) {
|
|
818
|
+
const issues = [];
|
|
819
|
+
|
|
820
|
+
if (!hierarchy?.epics?.length) {
|
|
821
|
+
issues.push('No epics generated — the decomposition returned an empty hierarchy.');
|
|
822
|
+
return issues;
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
// Check for epics with 0 stories
|
|
826
|
+
for (const epic of hierarchy.epics) {
|
|
827
|
+
if (!epic.stories || epic.stories.length === 0) {
|
|
828
|
+
issues.push(`Epic "${epic.name}" (${epic.id}) has 0 stories — every epic MUST have at least 1 story. Decompose this epic's features into user-facing stories.`);
|
|
829
|
+
}
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
// Check for invalid story ID formats (must be context-XXXX-XXXX)
|
|
833
|
+
for (const epic of hierarchy.epics) {
|
|
834
|
+
for (const story of epic.stories || []) {
|
|
835
|
+
if (!story.id || !/^context-\d{4}-\d{4}[a-z]?$/.test(story.id)) {
|
|
836
|
+
issues.push(`Story "${story.name}" has invalid ID "${story.id}" — must match format context-XXXX-XXXX (e.g., context-0001-0001).`);
|
|
837
|
+
}
|
|
838
|
+
}
|
|
839
|
+
}
|
|
840
|
+
|
|
841
|
+
// Check for invalid epic ID formats
|
|
842
|
+
for (const epic of hierarchy.epics) {
|
|
843
|
+
if (!epic.id || !/^context-\d{4}$/.test(epic.id)) {
|
|
844
|
+
issues.push(`Epic "${epic.name}" has invalid ID "${epic.id}" — must match format context-XXXX (e.g., context-0001).`);
|
|
845
|
+
}
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
// Check total story count (a calculator should have at least 5 stories)
|
|
849
|
+
const totalStories = hierarchy.epics.reduce((sum, e) => sum + (e.stories?.length || 0), 0);
|
|
850
|
+
if (totalStories < hierarchy.epics.length) {
|
|
851
|
+
issues.push(`Only ${totalStories} total stories for ${hierarchy.epics.length} epics — generate more stories to cover the full scope.`);
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
// Check stories have acceptance criteria
|
|
855
|
+
for (const epic of hierarchy.epics) {
|
|
856
|
+
for (const story of epic.stories || []) {
|
|
857
|
+
if (!story.acceptance || story.acceptance.length === 0) {
|
|
858
|
+
issues.push(`Story "${story.name}" has no acceptance criteria — each story must have 3-8 testable ACs.`);
|
|
859
|
+
}
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
return issues;
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
/**
|
|
867
|
+
* Merge source epic data into target epic (stories, features, dependencies).
|
|
868
|
+
* Shared by both LLM-based and algorithmic dedup paths.
|
|
869
|
+
*/
|
|
870
|
+
_mergeEpicData(target, source) {
|
|
871
|
+
this.debug(`Merging duplicate epic "${source.name}" into "${target.name}"`);
|
|
872
|
+
|
|
873
|
+
// Merge stories, avoiding duplicates by name
|
|
874
|
+
const existingStoryNames = new Set((target.stories || []).map(s => s.name?.toLowerCase()));
|
|
875
|
+
for (const story of (source.stories || [])) {
|
|
876
|
+
if (!existingStoryNames.has(story.name?.toLowerCase())) {
|
|
877
|
+
target.stories = target.stories || [];
|
|
878
|
+
target.stories.push(story);
|
|
879
|
+
existingStoryNames.add(story.name?.toLowerCase());
|
|
880
|
+
}
|
|
881
|
+
}
|
|
882
|
+
|
|
883
|
+
// Merge features (deduplicate)
|
|
884
|
+
if (source.features?.length) {
|
|
885
|
+
const existingFeatures = new Set((target.features || []).map(f => f.toLowerCase()));
|
|
886
|
+
target.features = target.features || [];
|
|
887
|
+
for (const feature of source.features) {
|
|
888
|
+
if (!existingFeatures.has(feature.toLowerCase())) {
|
|
889
|
+
target.features.push(feature);
|
|
890
|
+
existingFeatures.add(feature.toLowerCase());
|
|
891
|
+
}
|
|
892
|
+
}
|
|
893
|
+
}
|
|
894
|
+
|
|
895
|
+
// Merge dependencies (deduplicate)
|
|
896
|
+
if (source.dependencies?.length) {
|
|
897
|
+
const existingDeps = new Set((target.dependencies || []).map(d => d.toLowerCase()));
|
|
898
|
+
target.dependencies = target.dependencies || [];
|
|
899
|
+
for (const dep of source.dependencies) {
|
|
900
|
+
if (!existingDeps.has(dep.toLowerCase())) {
|
|
901
|
+
target.dependencies.push(dep);
|
|
902
|
+
existingDeps.add(dep.toLowerCase());
|
|
903
|
+
}
|
|
904
|
+
}
|
|
905
|
+
}
|
|
906
|
+
}
|
|
907
|
+
|
|
908
|
+
/**
|
|
909
|
+
* Algorithmic fallback: deduplicate epics by Jaccard similarity.
|
|
910
|
+
* Used when LLM-based dedup fails or as a post-LLM cap enforcer.
|
|
911
|
+
*/
|
|
912
|
+
_deduplicateEpicsAlgorithmic(hierarchy) {
|
|
913
|
+
if (!hierarchy.epics || hierarchy.epics.length <= 1) return hierarchy;
|
|
914
|
+
|
|
915
|
+
// Stop words that inflate Jaccard without carrying domain meaning
|
|
916
|
+
const STOP_WORDS = new Set(['and', 'the', 'for', 'with', 'from', 'into', 'via', 'based', 'system', 'engine', 'management', 'handling', 'service', 'services', 'module', 'platform']);
|
|
917
|
+
|
|
918
|
+
const tokenize = (text) => {
|
|
919
|
+
return new Set(
|
|
920
|
+
text.toLowerCase().replace(/[^a-z0-9\s]/g, '').split(/\s+/)
|
|
921
|
+
.filter(w => w.length > 2 && !STOP_WORDS.has(w))
|
|
922
|
+
);
|
|
923
|
+
};
|
|
924
|
+
|
|
925
|
+
const jaccardSimilarity = (setA, setB) => {
|
|
926
|
+
const intersection = [...setA].filter(w => setB.has(w)).length;
|
|
927
|
+
const union = new Set([...setA, ...setB]).size;
|
|
928
|
+
return union === 0 ? 0 : intersection / union;
|
|
929
|
+
};
|
|
930
|
+
|
|
931
|
+
// Composite similarity: name (40%) + domain (30%) + features (30%)
|
|
932
|
+
const epicSimilarity = (a, b) => {
|
|
933
|
+
const nameSim = jaccardSimilarity(tokenize(a.name || ''), tokenize(b.name || ''));
|
|
934
|
+
|
|
935
|
+
// Domain: exact match = 1.0, Jaccard on tokens otherwise
|
|
936
|
+
const domA = (a.domain || '').toLowerCase();
|
|
937
|
+
const domB = (b.domain || '').toLowerCase();
|
|
938
|
+
const domainSim = domA === domB && domA ? 1.0 : jaccardSimilarity(tokenize(domA), tokenize(domB));
|
|
939
|
+
|
|
940
|
+
// Features: extract the feature-name prefix (before parentheses), tokenize all, Jaccard
|
|
941
|
+
const extractFeatureWords = (features) => {
|
|
942
|
+
const words = new Set();
|
|
943
|
+
for (const f of (features || [])) {
|
|
944
|
+
const prefix = f.split('(')[0].trim();
|
|
945
|
+
for (const w of prefix.toLowerCase().replace(/[^a-z0-9\s]/g, ' ').split(/\s+/)) {
|
|
946
|
+
if (w.length > 2 && !STOP_WORDS.has(w)) words.add(w);
|
|
947
|
+
}
|
|
948
|
+
}
|
|
949
|
+
return words;
|
|
950
|
+
};
|
|
951
|
+
const featSim = jaccardSimilarity(extractFeatureWords(a.features), extractFeatureWords(b.features));
|
|
952
|
+
|
|
953
|
+
const combined = nameSim * 0.4 + domainSim * 0.3 + featSim * 0.3;
|
|
954
|
+
return { combined, nameSim, domainSim, featSim };
|
|
955
|
+
};
|
|
956
|
+
|
|
957
|
+
const originalCount = hierarchy.epics.length;
|
|
958
|
+
|
|
959
|
+
// Pass 1: Merge epics that are duplicates or semantically overlapping.
|
|
960
|
+
let merged = true;
|
|
961
|
+
while (merged) {
|
|
962
|
+
merged = false;
|
|
963
|
+
for (let i = 0; i < hierarchy.epics.length && !merged; i++) {
|
|
964
|
+
for (let j = i + 1; j < hierarchy.epics.length && !merged; j++) {
|
|
965
|
+
const sim = epicSimilarity(hierarchy.epics[i], hierarchy.epics[j]);
|
|
966
|
+
const shouldMerge = sim.combined > 0.5
|
|
967
|
+
|| sim.nameSim >= 0.5
|
|
968
|
+
|| (sim.nameSim >= 0.35 && sim.featSim >= 0.4);
|
|
969
|
+
if (shouldMerge) {
|
|
970
|
+
this.debug(`Epic similarity ${sim.combined.toFixed(2)} between "${hierarchy.epics[i].name}" and "${hierarchy.epics[j].name}" (name=${sim.nameSim.toFixed(2)}, domain=${sim.domainSim.toFixed(2)}, features=${sim.featSim.toFixed(2)})`);
|
|
971
|
+
this._mergeEpicData(hierarchy.epics[i], hierarchy.epics[j]);
|
|
972
|
+
hierarchy.epics.splice(j, 1);
|
|
973
|
+
merged = true;
|
|
974
|
+
}
|
|
975
|
+
}
|
|
976
|
+
}
|
|
977
|
+
}
|
|
978
|
+
|
|
979
|
+
if (hierarchy.epics.length < originalCount) {
|
|
980
|
+
this.debug(`Epic deduplication: ${originalCount} → ${hierarchy.epics.length} epics`);
|
|
981
|
+
} else {
|
|
982
|
+
this.debug('Epic deduplication: no duplicates found');
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
return hierarchy;
|
|
986
|
+
}
|
|
987
|
+
|
|
988
|
+
/**
|
|
989
|
+
* LLM-based semantic duplicate detection for epics and stories.
|
|
990
|
+
* Falls back to _deduplicateEpicsAlgorithmic on any error.
|
|
991
|
+
*/
|
|
992
|
+
async deduplicateEpicsLLM(hierarchy, preRunSnapshot, progressCallback) {
|
|
993
|
+
this.debugStage(4.1, 'LLM-Based Duplicate Detection');
|
|
994
|
+
|
|
995
|
+
if (!hierarchy.epics || hierarchy.epics.length <= 1) {
|
|
996
|
+
this.debug('Skipping dedup: ≤1 epic');
|
|
997
|
+
return hierarchy;
|
|
998
|
+
}
|
|
999
|
+
|
|
1000
|
+
try {
|
|
1001
|
+
const provider = await this.getProviderForStageInstance('decomposition');
|
|
1002
|
+
const { provider: providerName, model: modelName } = this.getProviderForStage('decomposition');
|
|
1003
|
+
|
|
1004
|
+
const agentInstructions = loadAgent('duplicate-detector.md');
|
|
1005
|
+
this.debug('Duplicate detector agent loaded', { bytes: agentInstructions.length });
|
|
1006
|
+
|
|
1007
|
+
// Build compact summaries of new epics
|
|
1008
|
+
const newEpicSummaries = hierarchy.epics.map((e, i) => ({
|
|
1009
|
+
index: i,
|
|
1010
|
+
name: e.name,
|
|
1011
|
+
domain: e.domain || '',
|
|
1012
|
+
description: (e.description || '').substring(0, 300),
|
|
1013
|
+
features: (e.features || []).slice(0, 10),
|
|
1014
|
+
storyNames: (e.stories || []).map(s => s.name)
|
|
1015
|
+
}));
|
|
1016
|
+
|
|
1017
|
+
// Build existing epics context from preRunSnapshot
|
|
1018
|
+
const existingEpicSummaries = (preRunSnapshot || []).map(e => ({
|
|
1019
|
+
name: e.name,
|
|
1020
|
+
domain: e.domain || '',
|
|
1021
|
+
description: (e.description || '').substring(0, 300),
|
|
1022
|
+
storyNames: (e.stories || []).map(s => s.name)
|
|
1023
|
+
}));
|
|
1024
|
+
|
|
1025
|
+
const prompt = `Analyze the following newly generated epics for duplicates and overlaps.
|
|
1026
|
+
|
|
1027
|
+
**New Epics (just generated):**
|
|
1028
|
+
${JSON.stringify(newEpicSummaries, null, 2)}
|
|
1029
|
+
|
|
1030
|
+
${existingEpicSummaries.length > 0 ? `**Existing Epics (already on disk from prior runs):**
|
|
1031
|
+
${JSON.stringify(existingEpicSummaries, null, 2)}` : '**No existing epics on disk.**'}
|
|
1032
|
+
|
|
1033
|
+
Detect:
|
|
1034
|
+
1. New epics that should be merged together (epicMergeGroups)
|
|
1035
|
+
2. Stories within the same epic that should be merged (storyMergeGroups)
|
|
1036
|
+
3. New epics that duplicate existing on-disk epics (existingOverlaps)
|
|
1037
|
+
|
|
1038
|
+
Return JSON following the exact structure in your instructions.`;
|
|
1039
|
+
|
|
1040
|
+
this.debug('Duplicate detection prompt built', {
|
|
1041
|
+
newEpics: newEpicSummaries.length,
|
|
1042
|
+
existingEpics: existingEpicSummaries.length,
|
|
1043
|
+
promptLength: prompt.length
|
|
1044
|
+
});
|
|
1045
|
+
|
|
1046
|
+
await progressCallback?.(null, `Detecting duplicates via ${providerName} (${modelName})…`, {});
|
|
1047
|
+
|
|
1048
|
+
const result = await this.debugApiCall(
|
|
1049
|
+
'Duplicate Detection',
|
|
1050
|
+
async () => {
|
|
1051
|
+
return await this._withProgressHeartbeat(
|
|
1052
|
+
() => this.retryWithBackoff(
|
|
1053
|
+
() => provider.generateJSON(prompt, agentInstructions),
|
|
1054
|
+
'duplicate-detection'
|
|
1055
|
+
),
|
|
1056
|
+
(elapsed) => `Analyzing duplicates… ${elapsed}s`,
|
|
1057
|
+
progressCallback
|
|
1058
|
+
);
|
|
1059
|
+
}
|
|
1060
|
+
);
|
|
1061
|
+
|
|
1062
|
+
this.debug('LLM duplicate detection result', result);
|
|
1063
|
+
|
|
1064
|
+
// Apply the LLM results
|
|
1065
|
+
hierarchy = this._applyDuplicateResults(hierarchy, result);
|
|
1066
|
+
|
|
1067
|
+
const totalStories = hierarchy.epics.reduce((sum, e) => sum + (e.stories?.length || 0), 0);
|
|
1068
|
+
await progressCallback?.(null, `After dedup: ${hierarchy.epics.length} epics, ${totalStories} stories`, {});
|
|
1069
|
+
|
|
1070
|
+
return hierarchy;
|
|
1071
|
+
} catch (error) {
|
|
1072
|
+
this.debug(`LLM duplicate detection failed, falling back to algorithmic: ${error.message}`, {
|
|
1073
|
+
stack: error.stack
|
|
1074
|
+
});
|
|
1075
|
+
sendWarning(`LLM dedup failed (${error.message}), using algorithmic fallback`);
|
|
1076
|
+
return this._deduplicateEpicsAlgorithmic(hierarchy);
|
|
1077
|
+
}
|
|
1078
|
+
}
|
|
1079
|
+
|
|
1080
|
+
/**
|
|
1081
|
+
* Apply LLM duplicate detection results to the hierarchy.
|
|
1082
|
+
* Processes: existingOverlaps → epicMergeGroups → storyMergeGroups (in that order).
|
|
1083
|
+
* All indices in the LLM response refer to the ORIGINAL array positions.
|
|
1084
|
+
* We maintain an oldOriginal→currentPosition mapping throughout.
|
|
1085
|
+
*/
|
|
1086
|
+
_applyDuplicateResults(hierarchy, result) {
|
|
1087
|
+
if (!result || typeof result !== 'object') {
|
|
1088
|
+
this.debug('Invalid duplicate detection result, skipping');
|
|
1089
|
+
return hierarchy;
|
|
1090
|
+
}
|
|
1091
|
+
|
|
1092
|
+
const existingOverlaps = result.existingOverlaps || [];
|
|
1093
|
+
const epicMergeGroups = result.epicMergeGroups || [];
|
|
1094
|
+
const storyMergeGroups = result.storyMergeGroups || [];
|
|
1095
|
+
|
|
1096
|
+
const originalCount = hierarchy.epics.length;
|
|
1097
|
+
|
|
1098
|
+
// Master mapping: original index → current index (or -1 if removed)
|
|
1099
|
+
// Starts as identity mapping, updated after each removal/splice.
|
|
1100
|
+
const originalToCurrent = new Map();
|
|
1101
|
+
for (let i = 0; i < originalCount; i++) {
|
|
1102
|
+
originalToCurrent.set(i, i);
|
|
1103
|
+
}
|
|
1104
|
+
|
|
1105
|
+
// Helper: after splicing index `splicedIdx` from the array,
|
|
1106
|
+
// decrement all current-index values that are > splicedIdx.
|
|
1107
|
+
const adjustAfterSplice = (splicedCurrentIdx) => {
|
|
1108
|
+
for (const [origIdx, curIdx] of originalToCurrent) {
|
|
1109
|
+
if (curIdx === splicedCurrentIdx) {
|
|
1110
|
+
originalToCurrent.set(origIdx, -1);
|
|
1111
|
+
} else if (curIdx > splicedCurrentIdx) {
|
|
1112
|
+
originalToCurrent.set(origIdx, curIdx - 1);
|
|
1113
|
+
}
|
|
1114
|
+
}
|
|
1115
|
+
};
|
|
1116
|
+
|
|
1117
|
+
// 1. Remove new epics that duplicate existing on-disk epics (process in reverse current-index order)
|
|
1118
|
+
if (existingOverlaps.length > 0) {
|
|
1119
|
+
// Collect current indices to remove (mapped from original indices)
|
|
1120
|
+
const toRemove = existingOverlaps
|
|
1121
|
+
.filter(o => o.recommendation === 'skip' && typeof o.newEpicIndex === 'number')
|
|
1122
|
+
.map(o => ({ origIdx: o.newEpicIndex, curIdx: originalToCurrent.get(o.newEpicIndex), overlap: o }))
|
|
1123
|
+
.filter(r => r.curIdx != null && r.curIdx >= 0 && r.curIdx < hierarchy.epics.length)
|
|
1124
|
+
.sort((a, b) => b.curIdx - a.curIdx); // reverse for safe splicing
|
|
1125
|
+
|
|
1126
|
+
for (const { origIdx, curIdx, overlap } of toRemove) {
|
|
1127
|
+
this.debug(`Removing epic "${hierarchy.epics[curIdx].name}" — duplicates existing "${overlap.existingEpicName}"`);
|
|
1128
|
+
hierarchy.epics.splice(curIdx, 1);
|
|
1129
|
+
adjustAfterSplice(curIdx);
|
|
1130
|
+
}
|
|
1131
|
+
}
|
|
1132
|
+
|
|
1133
|
+
// 2. Merge epic groups
|
|
1134
|
+
if (epicMergeGroups.length > 0) {
|
|
1135
|
+
for (const group of epicMergeGroups) {
|
|
1136
|
+
const targetCur = originalToCurrent.get(group.targetIndex);
|
|
1137
|
+
if (targetCur == null || targetCur === -1) {
|
|
1138
|
+
this.debug(`Skipping epic merge group: target index ${group.targetIndex} was removed or invalid`);
|
|
1139
|
+
continue;
|
|
1140
|
+
}
|
|
1141
|
+
|
|
1142
|
+
const sourceCurIndices = (group.sourceIndices || [])
|
|
1143
|
+
.map(idx => originalToCurrent.get(idx))
|
|
1144
|
+
.filter(idx => idx != null && idx !== -1 && idx >= 0 && idx < hierarchy.epics.length && idx !== targetCur);
|
|
1145
|
+
|
|
1146
|
+
if (sourceCurIndices.length === 0) {
|
|
1147
|
+
this.debug(`Skipping epic merge group: no valid source indices`);
|
|
1148
|
+
continue;
|
|
1149
|
+
}
|
|
1150
|
+
|
|
1151
|
+
if (targetCur < 0 || targetCur >= hierarchy.epics.length) {
|
|
1152
|
+
this.debug(`Skipping epic merge group: remapped target ${targetCur} out of bounds`);
|
|
1153
|
+
continue;
|
|
1154
|
+
}
|
|
1155
|
+
|
|
1156
|
+
// Merge sources into target
|
|
1157
|
+
for (const srcIdx of sourceCurIndices) {
|
|
1158
|
+
this._mergeEpicData(hierarchy.epics[targetCur], hierarchy.epics[srcIdx]);
|
|
1159
|
+
}
|
|
1160
|
+
|
|
1161
|
+
if (group.mergedName) {
|
|
1162
|
+
hierarchy.epics[targetCur].name = group.mergedName;
|
|
1163
|
+
}
|
|
1164
|
+
|
|
1165
|
+
this.debug(`Merged epics: ${sourceCurIndices.map(i => hierarchy.epics[i]?.name).join(', ')} → "${hierarchy.epics[targetCur].name}" (reason: ${group.reason})`);
|
|
1166
|
+
|
|
1167
|
+
// Remove sources in reverse order, adjusting mapping after each
|
|
1168
|
+
for (const srcIdx of sourceCurIndices.sort((a, b) => b - a)) {
|
|
1169
|
+
hierarchy.epics.splice(srcIdx, 1);
|
|
1170
|
+
adjustAfterSplice(srcIdx);
|
|
1171
|
+
}
|
|
1172
|
+
}
|
|
1173
|
+
}
|
|
1174
|
+
|
|
1175
|
+
// 3. Merge story groups within epics
|
|
1176
|
+
// epicIndex from LLM refers to ORIGINAL positions — remap via originalToCurrent.
|
|
1177
|
+
if (storyMergeGroups.length > 0) {
|
|
1178
|
+
for (const group of storyMergeGroups) {
|
|
1179
|
+
const epicIdx = originalToCurrent.get(group.epicIndex);
|
|
1180
|
+
if (epicIdx == null || epicIdx === -1 || epicIdx < 0 || epicIdx >= hierarchy.epics.length) {
|
|
1181
|
+
this.debug(`Skipping story merge group: epic index ${group.epicIndex} (remapped: ${epicIdx}) out of bounds or removed`);
|
|
1182
|
+
continue;
|
|
1183
|
+
}
|
|
1184
|
+
|
|
1185
|
+
const epic = hierarchy.epics[epicIdx];
|
|
1186
|
+
const stories = epic.stories || [];
|
|
1187
|
+
const targetIdx = group.targetStoryIndex;
|
|
1188
|
+
|
|
1189
|
+
if (targetIdx < 0 || targetIdx >= stories.length) {
|
|
1190
|
+
this.debug(`Skipping story merge group: target story index ${targetIdx} out of bounds in epic "${epic.name}"`);
|
|
1191
|
+
continue;
|
|
1192
|
+
}
|
|
1193
|
+
|
|
1194
|
+
let sourceIndices = (group.sourceStoryIndices || [])
|
|
1195
|
+
.filter(idx => idx >= 0 && idx < stories.length && idx !== targetIdx);
|
|
1196
|
+
|
|
1197
|
+
if (sourceIndices.length === 0) continue;
|
|
1198
|
+
|
|
1199
|
+
// Guard: reject merge when both stories have ≥3 acceptance criteria (both are well-scoped)
|
|
1200
|
+
const targetAcCount = (stories[targetIdx].acceptance || []).length;
|
|
1201
|
+
const sourceAcCounts = sourceIndices.map(idx => (stories[idx]?.acceptance || []).length);
|
|
1202
|
+
if (targetAcCount >= 3 && sourceAcCounts.some(c => c >= 3)) {
|
|
1203
|
+
this.debug(`Rejecting story merge in "${epic.name}": target "${stories[targetIdx].name}" has ${targetAcCount} ACs and source(s) have ${sourceAcCounts.join(',')} ACs — both are well-scoped, merging would create an oversized story`);
|
|
1204
|
+
continue;
|
|
1205
|
+
}
|
|
1206
|
+
|
|
1207
|
+
// Cap story merges: merge at most 1 source to prevent over-merging
|
|
1208
|
+
if (sourceIndices.length > 1) {
|
|
1209
|
+
this.debug(`Story merge group in "${epic.name}" has ${sourceIndices.length} sources — capping to 1 to prevent over-merging`);
|
|
1210
|
+
sourceIndices = [sourceIndices[0]];
|
|
1211
|
+
}
|
|
1212
|
+
|
|
1213
|
+
// Merge story acceptance criteria and dependencies
|
|
1214
|
+
const target = stories[targetIdx];
|
|
1215
|
+
for (const srcIdx of sourceIndices) {
|
|
1216
|
+
const source = stories[srcIdx];
|
|
1217
|
+
|
|
1218
|
+
// Merge acceptance criteria
|
|
1219
|
+
if (source.acceptance?.length) {
|
|
1220
|
+
const existing = new Set((target.acceptance || []).map(a => a.toLowerCase()));
|
|
1221
|
+
target.acceptance = target.acceptance || [];
|
|
1222
|
+
for (const ac of source.acceptance) {
|
|
1223
|
+
if (!existing.has(ac.toLowerCase())) {
|
|
1224
|
+
target.acceptance.push(ac);
|
|
1225
|
+
existing.add(ac.toLowerCase());
|
|
1226
|
+
}
|
|
1227
|
+
}
|
|
1228
|
+
}
|
|
1229
|
+
|
|
1230
|
+
// Merge dependencies
|
|
1231
|
+
if (source.dependencies?.length) {
|
|
1232
|
+
const existing = new Set((target.dependencies || []).map(d => d.toLowerCase()));
|
|
1233
|
+
target.dependencies = target.dependencies || [];
|
|
1234
|
+
for (const dep of source.dependencies) {
|
|
1235
|
+
if (!existing.has(dep.toLowerCase())) {
|
|
1236
|
+
target.dependencies.push(dep);
|
|
1237
|
+
existing.add(dep.toLowerCase());
|
|
1238
|
+
}
|
|
1239
|
+
}
|
|
1240
|
+
}
|
|
1241
|
+
}
|
|
1242
|
+
|
|
1243
|
+
this.debug(`Merged stories in epic "${epic.name}": indices ${sourceIndices.join(',')} → ${targetIdx} (reason: ${group.reason})`);
|
|
1244
|
+
|
|
1245
|
+
// Remap dependency references: any story depending on a merged source now depends on the target
|
|
1246
|
+
const targetId = target.id;
|
|
1247
|
+
for (const srcIdx of sourceIndices) {
|
|
1248
|
+
const sourceId = stories[srcIdx]?.id;
|
|
1249
|
+
if (sourceId && targetId) {
|
|
1250
|
+
for (const e of hierarchy.epics) {
|
|
1251
|
+
for (const s of e.stories || []) {
|
|
1252
|
+
if (Array.isArray(s.dependencies)) {
|
|
1253
|
+
const depIdx = s.dependencies.indexOf(sourceId);
|
|
1254
|
+
if (depIdx !== -1 && s.id !== targetId) {
|
|
1255
|
+
s.dependencies[depIdx] = targetId;
|
|
1256
|
+
this.debug(`Remapped dependency: story "${s.name}" now depends on "${targetId}" (was "${sourceId}")`);
|
|
1257
|
+
}
|
|
1258
|
+
}
|
|
1259
|
+
}
|
|
1260
|
+
}
|
|
1261
|
+
}
|
|
1262
|
+
}
|
|
1263
|
+
|
|
1264
|
+
// Remove source stories in reverse order
|
|
1265
|
+
for (const srcIdx of sourceIndices.sort((a, b) => b - a)) {
|
|
1266
|
+
epic.stories.splice(srcIdx, 1);
|
|
1267
|
+
}
|
|
1268
|
+
}
|
|
1269
|
+
}
|
|
1270
|
+
|
|
1271
|
+
return hierarchy;
|
|
1272
|
+
}
|
|
1273
|
+
|
|
1274
|
+
// STAGE 4.2: Review and split wide stories
|
|
1275
|
+
async reviewAndSplitStories(hierarchy, progressCallback = null) {
|
|
1276
|
+
this.debugStage(4.2, 'Review and Split Wide Stories');
|
|
1277
|
+
|
|
1278
|
+
const provider = await this.getProviderForStageInstance('decomposition');
|
|
1279
|
+
const { provider: providerName, model: modelName } = this.getProviderForStage('decomposition');
|
|
1280
|
+
|
|
1281
|
+
const agentPath = path.join(this.agentsPath, 'story-scope-reviewer.md');
|
|
1282
|
+
const reviewerAgent = fs.readFileSync(agentPath, 'utf8');
|
|
1283
|
+
|
|
1284
|
+
this.debug('Story scope reviewer loaded', { agentBytes: reviewerAgent.length, provider: providerName, model: modelName });
|
|
1285
|
+
await progressCallback?.(null, `Reviewing story scopes for splits (${providerName} / ${modelName})…`, {});
|
|
1286
|
+
|
|
1287
|
+
let totalSplits = 0;
|
|
1288
|
+
const contextSizeFailures = { count: 0 };
|
|
1289
|
+
|
|
1290
|
+
// Limit concurrency for story scope review: local models can't handle unbounded parallelism
|
|
1291
|
+
const reviewConcurrency = providerName === 'local' ? 2 : hierarchy.epics.length;
|
|
1292
|
+
|
|
1293
|
+
// Process epics with bounded concurrency — one LLM call per epic
|
|
1294
|
+
const epicTasks = hierarchy.epics.map((epic) => async () => {
|
|
1295
|
+
// Early bail-out if too many context-size failures
|
|
1296
|
+
if (contextSizeFailures.count >= 3) {
|
|
1297
|
+
this.debug(`Skipping story review for "${epic.name}" — too many context-size failures`);
|
|
1298
|
+
return { epic, stories: epic.stories, splits: [] };
|
|
1299
|
+
}
|
|
1300
|
+
|
|
1301
|
+
const prompt = `## Epic
|
|
1302
|
+
name: ${epic.name}
|
|
1303
|
+
domain: ${epic.domain || 'unknown'}
|
|
1304
|
+
description: ${epic.description || ''}
|
|
1305
|
+
features: ${JSON.stringify(epic.features || [])}
|
|
1306
|
+
|
|
1307
|
+
## Stories
|
|
1308
|
+
${JSON.stringify(epic.stories || [], null, 2)}
|
|
1309
|
+
|
|
1310
|
+
Review the stories above and return the complete final story list for this epic, splitting any stories that are too broad according to your instructions.`;
|
|
1311
|
+
|
|
1312
|
+
this.debug(`Reviewing stories for epic: ${epic.name} (${(epic.stories || []).length} stories)`);
|
|
1313
|
+
|
|
1314
|
+
try {
|
|
1315
|
+
const result = await this._withProgressHeartbeat(
|
|
1316
|
+
() => this.retryWithBackoff(
|
|
1317
|
+
() => provider.generateJSON(prompt, reviewerAgent),
|
|
1318
|
+
`Story scope review for ${epic.name}`
|
|
1319
|
+
),
|
|
1320
|
+
(elapsed) => {
|
|
1321
|
+
if (elapsed < 20) return `Reviewing stories in ${epic.name}…`;
|
|
1322
|
+
if (elapsed < 45) return `Checking scope boundaries for ${epic.name}…`;
|
|
1323
|
+
return `Finalizing splits for ${epic.name}…`;
|
|
1324
|
+
},
|
|
1325
|
+
progressCallback,
|
|
1326
|
+
20000
|
|
1327
|
+
);
|
|
1328
|
+
|
|
1329
|
+
const splits = result.splits || [];
|
|
1330
|
+
const stories = result.stories || epic.stories;
|
|
1331
|
+
|
|
1332
|
+
if (splits.length > 0) {
|
|
1333
|
+
this.debug(`Splits applied in epic "${epic.name}"`, splits.map(s => ({
|
|
1334
|
+
original: s.original,
|
|
1335
|
+
into: s.into,
|
|
1336
|
+
rationale: s.rationale
|
|
1337
|
+
})));
|
|
1338
|
+
for (const split of splits) {
|
|
1339
|
+
await progressCallback?.(null, ` Split: ${split.original} → ${split.into.join(', ')} — ${split.rationale}`, {});
|
|
1340
|
+
}
|
|
1341
|
+
} else {
|
|
1342
|
+
this.debug(`No splits needed for epic "${epic.name}"`);
|
|
1343
|
+
}
|
|
1344
|
+
|
|
1345
|
+
return { epic, stories, splits };
|
|
1346
|
+
} catch (err) {
|
|
1347
|
+
if (err.message?.toLowerCase().includes('context size') || err.message?.includes('exceeded')) {
|
|
1348
|
+
contextSizeFailures.count++;
|
|
1349
|
+
this.debug(`Context-size failure #${contextSizeFailures.count} for epic "${epic.name}"`, { error: err.message });
|
|
1350
|
+
}
|
|
1351
|
+
this.debug(`Story scope review failed for epic "${epic.name}" — keeping original stories`, { error: err.message });
|
|
1352
|
+
return { epic, stories: epic.stories, splits: [] };
|
|
1353
|
+
}
|
|
1354
|
+
});
|
|
1355
|
+
|
|
1356
|
+
// Run with bounded concurrency
|
|
1357
|
+
const results = await this._runWithConcurrency(epicTasks, reviewConcurrency);
|
|
1358
|
+
|
|
1359
|
+
// Apply results back to hierarchy
|
|
1360
|
+
for (const { epic, stories, splits } of results) {
|
|
1361
|
+
epic.stories = stories;
|
|
1362
|
+
totalSplits += splits.length;
|
|
1363
|
+
}
|
|
1364
|
+
|
|
1365
|
+
const totalStories = hierarchy.epics.reduce((s, e) => s + (e.stories?.length || 0), 0);
|
|
1366
|
+
this.debug(`Story scope review complete — ${totalSplits} split(s), ${totalStories} total stories`);
|
|
1367
|
+
await progressCallback?.(null, `Story review complete: ${totalSplits} split(s), ${totalStories} total stories`, {});
|
|
1368
|
+
|
|
1369
|
+
return hierarchy;
|
|
1370
|
+
}
|
|
1371
|
+
|
|
1372
|
+
/**
|
|
1373
|
+
* Filter the decomposed hierarchy to only the epics/stories chosen by the user.
|
|
1374
|
+
* @param {Object} hierarchy - Full decomposed hierarchy
|
|
1375
|
+
* @param {string[]} selectedEpicIds - Epic IDs to keep
|
|
1376
|
+
* @param {string[]} selectedStoryIds - Story IDs to keep
|
|
1377
|
+
* @returns {Object} Filtered hierarchy
|
|
1378
|
+
*/
|
|
1379
|
+
_filterHierarchyBySelection(hierarchy, selectedEpicIds, selectedStoryIds) {
|
|
1380
|
+
const epicIdSet = new Set(selectedEpicIds);
|
|
1381
|
+
const storyIdSet = new Set(selectedStoryIds);
|
|
1382
|
+
const filteredEpics = hierarchy.epics
|
|
1383
|
+
.filter(e => epicIdSet.has(e.id))
|
|
1384
|
+
.map(e => ({
|
|
1385
|
+
...e,
|
|
1386
|
+
stories: (e.stories || []).filter(s => storyIdSet.has(s.id))
|
|
1387
|
+
}));
|
|
1388
|
+
return { ...hierarchy, epics: filteredEpics };
|
|
1389
|
+
}
|
|
1390
|
+
|
|
1391
|
+
/**
|
|
1392
|
+
* Phase 1 of contextual selection: extract structured project characteristics from scope text.
|
|
1393
|
+
* Called once per sprint-planning run when useContextualSelection is enabled.
|
|
1394
|
+
* @param {string} scope - Project scope text (first 3000 chars used)
|
|
1395
|
+
* @param {Function} progressCallback - Optional progress callback
|
|
1396
|
+
* @returns {Promise<Object>} ProjectContext JSON (empty object on failure)
|
|
1397
|
+
*/
|
|
1398
|
+
async extractProjectContext(scope, progressCallback) {
|
|
1399
|
+
this.debug('Extracting project context for contextual agent selection');
|
|
1400
|
+
try {
|
|
1401
|
+
const provider = await this.getProviderForStageInstance('validation');
|
|
1402
|
+
const agent = loadAgent('project-context-extractor.md');
|
|
1403
|
+
// Use full doc.md for tech stack extraction — the "Initial Scope" section alone
|
|
1404
|
+
// omits tech info from Overview (§1), UI/UX Design (§5), and Technical Architecture (§6).
|
|
1405
|
+
let extractionText = scope || '';
|
|
1406
|
+
if (fs.existsSync(this.projectDocPath)) {
|
|
1407
|
+
extractionText = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
1408
|
+
this.debug(`Using full doc.md for context extraction (${extractionText.length} chars)`);
|
|
1409
|
+
}
|
|
1410
|
+
const prompt = `PROJECT SCOPE:\n\n${extractionText.substring(0, 20000)}\n\nScan the entire document above from start to finish for ALL technology mentions before filling each field. Do not stop at the first occurrence. Extract the structured project context as JSON.`;
|
|
1411
|
+
const result = await provider.generateJSON(prompt, agent);
|
|
1412
|
+
return result || {};
|
|
1413
|
+
} catch (err) {
|
|
1414
|
+
this.debug('Project context extraction failed, continuing without context', { error: err.message });
|
|
1415
|
+
return {};
|
|
1416
|
+
}
|
|
1417
|
+
}
|
|
1418
|
+
|
|
1419
|
+
/**
|
|
1420
|
+
* Generate canonical root context.md content from extracted project context + doc.md.
|
|
1421
|
+
* Written once per run to {projectPath}/context.md — no LLM call needed.
|
|
1422
|
+
* @param {Object} projectContext - JSON from project-context-extractor
|
|
1423
|
+
* @param {string} docMdContent - Full root doc.md content
|
|
1424
|
+
* @returns {string}
|
|
1425
|
+
*/
|
|
1426
|
+
generateRootContextMd(projectContext, docMdContent, hierarchy = null) {
|
|
1427
|
+
const ctx = projectContext || {};
|
|
1428
|
+
const stack = (ctx.techStack || []).map(t => `- ${t}`).join('\n') || '- (not detected)';
|
|
1429
|
+
|
|
1430
|
+
// Purpose comes from the LLM-extracted field — no regex needed.
|
|
1431
|
+
const purposeText = (ctx.purpose || '').trim();
|
|
1432
|
+
|
|
1433
|
+
// Detect auth mechanism from doc.md to enforce consistency across all contexts
|
|
1434
|
+
const docLower = (docMdContent || '').toLowerCase();
|
|
1435
|
+
let authMechanism = 'session-based (httpOnly cookies)';
|
|
1436
|
+
// Check for "no auth" signals first
|
|
1437
|
+
const noAuthPatterns = /no\s+auth|no\s+login|no\s+account|no\s+user\s+account|publicly\s+accessible|no\s+backend|no\s+server|single.file|static\s+site|client.side\s+only|no\s+authentication/i;
|
|
1438
|
+
if (noAuthPatterns.test(docMdContent)) {
|
|
1439
|
+
authMechanism = 'none (public, no authentication required)';
|
|
1440
|
+
} else if (/jwt\s+token|bearer\s+token|authorization\s+header/i.test(docMdContent) && !/session.based|httponl/i.test(docMdContent)) {
|
|
1441
|
+
authMechanism = 'JWT bearer tokens';
|
|
1442
|
+
}
|
|
1443
|
+
|
|
1444
|
+
const lines = [
|
|
1445
|
+
'# Project Context',
|
|
1446
|
+
'',
|
|
1447
|
+
'## Identity',
|
|
1448
|
+
`- type: ${ctx.projectType || 'web-application'}`,
|
|
1449
|
+
`- deployment: ${ctx.deploymentType || 'local'}`,
|
|
1450
|
+
`- team: ${ctx.teamContext || 'small'}`,
|
|
1451
|
+
'',
|
|
1452
|
+
'## Purpose',
|
|
1453
|
+
purposeText || '(see root doc.md)',
|
|
1454
|
+
'',
|
|
1455
|
+
'## Tech Stack',
|
|
1456
|
+
stack,
|
|
1457
|
+
'',
|
|
1458
|
+
'## Authentication',
|
|
1459
|
+
`- mechanism: ${authMechanism}`,
|
|
1460
|
+
...(authMechanism.startsWith('none')
|
|
1461
|
+
? ['- NOTE: This project has no authentication. Do NOT add auth mechanisms (sessions, JWT, login) to any epic or story.']
|
|
1462
|
+
: ['- IMPORTANT: All epics and stories MUST use this auth mechanism consistently. Do NOT mix session cookies and JWT tokens.']),
|
|
1463
|
+
'',
|
|
1464
|
+
'## Project Characteristics',
|
|
1465
|
+
`- hasCloud: ${ctx.hasCloud ?? false}`,
|
|
1466
|
+
`- hasCI_CD: ${ctx.hasCI_CD ?? false}`,
|
|
1467
|
+
`- hasMobileApp: ${ctx.hasMobileApp ?? false}`,
|
|
1468
|
+
`- hasFrontend: ${ctx.hasFrontend ?? true}`,
|
|
1469
|
+
`- hasPublicAPI: ${ctx.hasPublicAPI ?? false}`,
|
|
1470
|
+
];
|
|
1471
|
+
|
|
1472
|
+
// Add epic map if hierarchy is available
|
|
1473
|
+
if (hierarchy?.epics?.length > 0) {
|
|
1474
|
+
lines.push('', '## Epic Map');
|
|
1475
|
+
for (const epic of hierarchy.epics) {
|
|
1476
|
+
const storyCount = (epic.stories || []).length;
|
|
1477
|
+
lines.push(`- **${epic.id}**: ${epic.name} (${storyCount} stories) — ${epic.domain || 'general'}`);
|
|
1478
|
+
}
|
|
1479
|
+
lines.push('', 'Use the epic IDs above when cross-referencing dependencies between epics and stories.');
|
|
1480
|
+
}
|
|
1481
|
+
|
|
1482
|
+
return lines.join('\n');
|
|
1483
|
+
}
|
|
1484
|
+
|
|
1485
|
+
/**
|
|
1486
|
+
* Generate a scaffolding epic by scanning all domain epic/story contexts for tech requirements.
|
|
1487
|
+
* Called AFTER generateContextFiles() so all context.md files are in cache.
|
|
1488
|
+
* Inserts the epic as the first item in hierarchy.epics with all others depending on it.
|
|
1489
|
+
*/
|
|
1490
|
+
async _generateScaffoldingEpic(hierarchy, progressCallback) {
|
|
1491
|
+
if (!hierarchy.epics || hierarchy.epics.length === 0) return;
|
|
1492
|
+
|
|
1493
|
+
// Skip if a scaffolding epic already exists (e.g., from a prior run)
|
|
1494
|
+
if (hierarchy.epics.some(e => (e.domain || '').toLowerCase() === 'scaffolding')) {
|
|
1495
|
+
this.debug('Scaffolding epic already exists — skipping generation');
|
|
1496
|
+
return;
|
|
1497
|
+
}
|
|
1498
|
+
|
|
1499
|
+
await progressCallback?.(null, 'Generating project scaffolding epic from tech requirements…', {});
|
|
1500
|
+
|
|
1501
|
+
// 1. Extract tech requirements from all cached context.md files
|
|
1502
|
+
const techMentions = new Set();
|
|
1503
|
+
const allContexts = [];
|
|
1504
|
+
|
|
1505
|
+
for (const [key, contextMd] of this._epicContextCache) {
|
|
1506
|
+
allContexts.push(contextMd);
|
|
1507
|
+
this._extractTechFromContext(contextMd, techMentions);
|
|
1508
|
+
}
|
|
1509
|
+
for (const [key, contextMd] of this._storyContextCache) {
|
|
1510
|
+
allContexts.push(contextMd);
|
|
1511
|
+
this._extractTechFromContext(contextMd, techMentions);
|
|
1512
|
+
}
|
|
1513
|
+
|
|
1514
|
+
// Also extract from root context
|
|
1515
|
+
if (this.rootContextMd) {
|
|
1516
|
+
this._extractTechFromContext(this.rootContextMd, techMentions);
|
|
1517
|
+
}
|
|
1518
|
+
|
|
1519
|
+
const techRequirements = [...techMentions].sort();
|
|
1520
|
+
this.debug('Tech requirements extracted for scaffolding', { count: techRequirements.length, items: techRequirements });
|
|
1521
|
+
|
|
1522
|
+
if (techRequirements.length === 0) {
|
|
1523
|
+
this.debug('No tech requirements found — skipping scaffolding generation');
|
|
1524
|
+
return;
|
|
1525
|
+
}
|
|
1526
|
+
|
|
1527
|
+
// 2. Call LLM to generate the scaffolding epic
|
|
1528
|
+
const provider = await this.getProviderForStageInstance('context-generation');
|
|
1529
|
+
const agentInstructions = loadAgent('scaffolding-generator.md');
|
|
1530
|
+
|
|
1531
|
+
const prompt = `## Project Context\n\n${this.rootContextMd || '(no root context)'}\n\n## Extracted Tech Requirements\n\nThe following technologies, packages, tools, and infrastructure were found across all ${hierarchy.epics.length} domain epics and their stories:\n\n${techRequirements.map(t => `- ${t}`).join('\n')}\n\n## Epic Count\n\n${hierarchy.epics.length} domain epics exist.`;
|
|
1532
|
+
|
|
1533
|
+
let scaffoldingData;
|
|
1534
|
+
try {
|
|
1535
|
+
scaffoldingData = await provider.generateJSON(prompt, agentInstructions);
|
|
1536
|
+
} catch (err) {
|
|
1537
|
+
this.debug('Scaffolding LLM call failed', { error: err.message });
|
|
1538
|
+
return;
|
|
1539
|
+
}
|
|
1540
|
+
|
|
1541
|
+
if (!scaffoldingData?.epic) {
|
|
1542
|
+
this.debug('Scaffolding LLM returned no epic data');
|
|
1543
|
+
return;
|
|
1544
|
+
}
|
|
1545
|
+
|
|
1546
|
+
// 3. Build the scaffolding epic object
|
|
1547
|
+
const scaffoldStories = (scaffoldingData.epic.stories || []).map((s, i) => {
|
|
1548
|
+
const storyId = `context-0000-${String(i + 1).padStart(4, '0')}`;
|
|
1549
|
+
const prevId = i > 0 ? `context-0000-${String(i).padStart(4, '0')}` : null;
|
|
1550
|
+
// Chain dependencies: each story depends on the previous one
|
|
1551
|
+
const deps = s.dependencies && s.dependencies.length > 0
|
|
1552
|
+
? s.dependencies
|
|
1553
|
+
: (prevId ? [prevId] : []);
|
|
1554
|
+
return {
|
|
1555
|
+
id: storyId,
|
|
1556
|
+
name: s.name,
|
|
1557
|
+
userType: s.userType || 'developers',
|
|
1558
|
+
description: s.description || '',
|
|
1559
|
+
acceptance: s.acceptance || [],
|
|
1560
|
+
dependencies: deps,
|
|
1561
|
+
};
|
|
1562
|
+
});
|
|
1563
|
+
|
|
1564
|
+
const scaffoldEpic = {
|
|
1565
|
+
id: 'context-0000',
|
|
1566
|
+
name: scaffoldingData.epic.name || 'Project Scaffolding and Environment Setup',
|
|
1567
|
+
domain: 'scaffolding',
|
|
1568
|
+
description: scaffoldingData.epic.description || '',
|
|
1569
|
+
features: scaffoldingData.epic.features || [],
|
|
1570
|
+
dependencies: [],
|
|
1571
|
+
stories: scaffoldStories,
|
|
1572
|
+
};
|
|
1573
|
+
|
|
1574
|
+
// 4. Insert as first epic
|
|
1575
|
+
hierarchy.epics.unshift(scaffoldEpic);
|
|
1576
|
+
|
|
1577
|
+
// 5. Inject dependency: all domain epics depend on scaffolding
|
|
1578
|
+
const scaffoldId = scaffoldEpic.id;
|
|
1579
|
+
let injected = 0;
|
|
1580
|
+
for (const epic of hierarchy.epics) {
|
|
1581
|
+
if (epic.id === scaffoldId) continue;
|
|
1582
|
+
if (!epic.dependencies) epic.dependencies = [];
|
|
1583
|
+
if (!epic.dependencies.includes(scaffoldId)) {
|
|
1584
|
+
epic.dependencies.push(scaffoldId);
|
|
1585
|
+
injected++;
|
|
1586
|
+
}
|
|
1587
|
+
}
|
|
1588
|
+
|
|
1589
|
+
// 6. Generate context.md for the scaffolding epic (so validators see it)
|
|
1590
|
+
try {
|
|
1591
|
+
const scaffoldContext = await this.generateEpicContextMdLLM(scaffoldEpic, provider);
|
|
1592
|
+
this._epicContextCache.set(scaffoldEpic.name, scaffoldContext);
|
|
1593
|
+
// Also generate story contexts (4th param is the provider)
|
|
1594
|
+
for (const story of scaffoldEpic.stories) {
|
|
1595
|
+
const storyContext = await this.generateStoryContextMdLLM(story, scaffoldEpic, scaffoldContext, provider);
|
|
1596
|
+
this._storyContextCache.set(`${scaffoldEpic.name}::${story.name}`, storyContext);
|
|
1597
|
+
}
|
|
1598
|
+
} catch (err) {
|
|
1599
|
+
this.debug('Scaffolding context generation failed — using fallback', { error: err.message });
|
|
1600
|
+
}
|
|
1601
|
+
|
|
1602
|
+
this.debug(`Scaffolding epic generated: "${scaffoldEpic.name}" with ${scaffoldEpic.stories.length} stories, ${injected} epics now depend on it`);
|
|
1603
|
+
this.debug('Scaffolding stories', scaffoldEpic.stories.map(s => s.name));
|
|
1604
|
+
}
|
|
1605
|
+
|
|
1606
|
+
/**
|
|
1607
|
+
* Extract technology mentions from a context.md string.
|
|
1608
|
+
* Looks for package names, frameworks, tools, and infrastructure.
|
|
1609
|
+
*/
|
|
1610
|
+
_extractTechFromContext(contextText, techSet) {
|
|
1611
|
+
if (!contextText) return;
|
|
1612
|
+
const lower = contextText.toLowerCase();
|
|
1613
|
+
|
|
1614
|
+
// Common tech patterns to detect
|
|
1615
|
+
const patterns = [
|
|
1616
|
+
// JS/Node ecosystem
|
|
1617
|
+
/\b(node\.?js|npm|yarn|pnpm)\b/gi,
|
|
1618
|
+
/\b(express\.?js|express|fastify|koa|hapi)\b/gi,
|
|
1619
|
+
/\b(react|vue\.?js|angular|svelte|next\.?js|nuxt)\b/gi,
|
|
1620
|
+
/\b(vite|webpack|rollup|esbuild|parcel)\b/gi,
|
|
1621
|
+
/\b(vitest|jest|mocha|chai|cypress|playwright)\b/gi,
|
|
1622
|
+
/\b(typescript|tsconfig)\b/gi,
|
|
1623
|
+
/\b(tailwind\s*css|bootstrap|material.ui)\b/gi,
|
|
1624
|
+
/\b(prisma|sequelize|typeorm|knex|drizzle)\b/gi,
|
|
1625
|
+
/\b(zustand|redux|mobx|react.query|tanstack)\b/gi,
|
|
1626
|
+
// Databases
|
|
1627
|
+
/\b(sqlite|postgresql|postgres|mysql|mongodb|redis)\b/gi,
|
|
1628
|
+
// Infrastructure
|
|
1629
|
+
/\b(docker|docker.compose|nginx|apache)\b/gi,
|
|
1630
|
+
/\b(kubernetes|k8s|terraform|aws|gcp|azure)\b/gi,
|
|
1631
|
+
// Python
|
|
1632
|
+
/\b(python|pip|poetry|flask|django|fastapi|pytest)\b/gi,
|
|
1633
|
+
// General
|
|
1634
|
+
/\b(git|eslint|prettier|husky)\b/gi,
|
|
1635
|
+
/\b(\.env|environment.variables|dotenv)\b/gi,
|
|
1636
|
+
// File types that indicate tech
|
|
1637
|
+
/\b(html5?|css3?|vanilla\s*javascript)\b/gi,
|
|
1638
|
+
];
|
|
1639
|
+
|
|
1640
|
+
for (const pattern of patterns) {
|
|
1641
|
+
const matches = contextText.match(pattern);
|
|
1642
|
+
if (matches) {
|
|
1643
|
+
for (const m of matches) {
|
|
1644
|
+
techSet.add(m.trim().toLowerCase());
|
|
1645
|
+
}
|
|
1646
|
+
}
|
|
1647
|
+
}
|
|
1648
|
+
}
|
|
1649
|
+
|
|
1650
|
+
/**
|
|
1651
|
+
* Update the status field in a work.json file on disk.
|
|
1652
|
+
*/
|
|
1653
|
+
_setWorkJsonStatus(workJsonPath, status) {
|
|
1654
|
+
try {
|
|
1655
|
+
if (!fs.existsSync(workJsonPath)) return;
|
|
1656
|
+
const workJson = JSON.parse(fs.readFileSync(workJsonPath, 'utf8'));
|
|
1657
|
+
workJson.status = status;
|
|
1658
|
+
fs.writeFileSync(workJsonPath, JSON.stringify(workJson, null, 2), 'utf8');
|
|
1659
|
+
} catch {}
|
|
1660
|
+
}
|
|
1661
|
+
|
|
1662
|
+
/**
|
|
1663
|
+
* Generate canonical context.md string for an epic from its JSON fields.
|
|
1664
|
+
* No LLM call needed — derived directly from decomposition output.
|
|
1665
|
+
* @param {Object} epic
|
|
1666
|
+
* @returns {string}
|
|
1667
|
+
*/
|
|
1668
|
+
generateEpicContextMd(epic) {
|
|
1669
|
+
const features = (epic.features || []).map(f => `- ${f}`).join('\n') || '- (none)';
|
|
1670
|
+
const deps = epic.dependencies || [];
|
|
1671
|
+
const optional = deps.filter(d => /optional/i.test(d));
|
|
1672
|
+
const required = deps.filter(d => !/optional/i.test(d));
|
|
1673
|
+
const reqLines = required.length ? required.map(d => `- ${d}`).join('\n') : '- (none)';
|
|
1674
|
+
const storyCount = (epic.stories || []).length;
|
|
1675
|
+
const lines = [
|
|
1676
|
+
`# Epic: ${epic.name}`,
|
|
1677
|
+
'',
|
|
1678
|
+
'## Identity',
|
|
1679
|
+
`- id: ${epic.id || '(pending)'}`,
|
|
1680
|
+
`- domain: ${epic.domain}`,
|
|
1681
|
+
`- stories: ${storyCount}`,
|
|
1682
|
+
'',
|
|
1683
|
+
'## Summary',
|
|
1684
|
+
epic.description || '(no description)',
|
|
1685
|
+
'',
|
|
1686
|
+
'## Features',
|
|
1687
|
+
features,
|
|
1688
|
+
'',
|
|
1689
|
+
'## Dependencies',
|
|
1690
|
+
'',
|
|
1691
|
+
'### Required',
|
|
1692
|
+
reqLines,
|
|
1693
|
+
];
|
|
1694
|
+
if (optional.length) {
|
|
1695
|
+
lines.push('', '### Optional');
|
|
1696
|
+
optional.forEach(d => lines.push(`- ${d}`));
|
|
1697
|
+
}
|
|
1698
|
+
return lines.join('\n');
|
|
1699
|
+
}
|
|
1700
|
+
|
|
1701
|
+
/**
|
|
1702
|
+
* Generate canonical context.md string for a story from its JSON fields.
|
|
1703
|
+
* No LLM call needed — derived directly from decomposition output.
|
|
1704
|
+
* @param {Object} story
|
|
1705
|
+
* @param {Object} epic - Parent epic for identity context
|
|
1706
|
+
* @returns {string}
|
|
1707
|
+
*/
|
|
1708
|
+
generateStoryContextMd(story, epic) {
|
|
1709
|
+
const ac = (story.acceptance || []).map((a, i) => `${i + 1}. ${a}`).join('\n') || '1. (none)';
|
|
1710
|
+
const deps = (story.dependencies || []).map(d => `- ${d}`).join('\n') || '- (none)';
|
|
1711
|
+
return [
|
|
1712
|
+
`# Story: ${story.name}`,
|
|
1713
|
+
'',
|
|
1714
|
+
'## Identity',
|
|
1715
|
+
`- id: ${story.id || '(pending)'}`,
|
|
1716
|
+
`- epic: ${epic.id || '(pending)'} (${epic.name})`,
|
|
1717
|
+
`- userType: ${story.userType || 'team member'}`,
|
|
1718
|
+
'',
|
|
1719
|
+
'## Summary',
|
|
1720
|
+
story.description || '(no description)',
|
|
1721
|
+
'',
|
|
1722
|
+
'## Acceptance Criteria',
|
|
1723
|
+
ac,
|
|
1724
|
+
'',
|
|
1725
|
+
'## Dependencies',
|
|
1726
|
+
deps,
|
|
1727
|
+
].join('\n');
|
|
1728
|
+
}
|
|
1729
|
+
|
|
1730
|
+
/**
|
|
1731
|
+
* Build deterministic scaffold for epic context.md — Identity, Features, Dependencies, Stories Overview.
|
|
1732
|
+
* LLM only needs to write Purpose, Scope, Data Model, NFRs, Success Criteria.
|
|
1733
|
+
* @param {Object} epicData - Canonical epic JSON
|
|
1734
|
+
* @returns {string} Pre-built markdown sections
|
|
1735
|
+
*/
|
|
1736
|
+
_buildEpicScaffold(epicData) {
|
|
1737
|
+
const lines = [];
|
|
1738
|
+
lines.push(`# Epic: ${epicData.name}`);
|
|
1739
|
+
lines.push('');
|
|
1740
|
+
lines.push('## Identity');
|
|
1741
|
+
lines.push(`- id: ${epicData.id}`);
|
|
1742
|
+
lines.push(`- domain: ${epicData.domain || 'general'}`);
|
|
1743
|
+
lines.push(`- stories: ${(epicData.stories || []).length}`);
|
|
1744
|
+
lines.push('');
|
|
1745
|
+
lines.push('## Features');
|
|
1746
|
+
for (const feature of epicData.features || []) {
|
|
1747
|
+
lines.push(`- ${feature}`);
|
|
1748
|
+
}
|
|
1749
|
+
lines.push('');
|
|
1750
|
+
lines.push('## Dependencies');
|
|
1751
|
+
lines.push('');
|
|
1752
|
+
lines.push('### Required');
|
|
1753
|
+
const deps = epicData.dependencies || [];
|
|
1754
|
+
if (deps.length === 0) {
|
|
1755
|
+
lines.push('- (none)');
|
|
1756
|
+
} else {
|
|
1757
|
+
for (const dep of deps) {
|
|
1758
|
+
lines.push(`- ${dep}`);
|
|
1759
|
+
}
|
|
1760
|
+
}
|
|
1761
|
+
lines.push('');
|
|
1762
|
+
lines.push('### Optional');
|
|
1763
|
+
lines.push('- (none)');
|
|
1764
|
+
lines.push('');
|
|
1765
|
+
lines.push('## Stories Overview');
|
|
1766
|
+
for (const story of epicData.stories || []) {
|
|
1767
|
+
lines.push(`- ${story.id || 'TBD'}: ${story.name}`);
|
|
1768
|
+
}
|
|
1769
|
+
return lines.join('\n');
|
|
1770
|
+
}
|
|
1771
|
+
|
|
1772
|
+
/**
|
|
1773
|
+
* Build deterministic scaffold for story context.md — Identity, Acceptance Criteria, Dependencies.
|
|
1774
|
+
* LLM only needs to write User Story, Summary, Scope, Technical Notes.
|
|
1775
|
+
* @param {Object} storyData - Canonical story JSON
|
|
1776
|
+
* @returns {string} Pre-built markdown sections
|
|
1777
|
+
*/
|
|
1778
|
+
_buildStoryScaffold(storyData) {
|
|
1779
|
+
const lines = [];
|
|
1780
|
+
lines.push(`# Story: ${storyData.name}`);
|
|
1781
|
+
lines.push('');
|
|
1782
|
+
lines.push('## Identity');
|
|
1783
|
+
lines.push(`- id: ${storyData.id || 'TBD'}`);
|
|
1784
|
+
lines.push(`- epic: ${storyData.epicId || 'TBD'} (${storyData.epicName || 'TBD'})`);
|
|
1785
|
+
lines.push(`- userType: ${storyData.userType || 'team member'}`);
|
|
1786
|
+
lines.push('');
|
|
1787
|
+
lines.push('## Acceptance Criteria');
|
|
1788
|
+
const acceptance = storyData.acceptance || [];
|
|
1789
|
+
for (let i = 0; i < acceptance.length; i++) {
|
|
1790
|
+
lines.push(`${i + 1}. ${acceptance[i]}`);
|
|
1791
|
+
}
|
|
1792
|
+
lines.push('');
|
|
1793
|
+
lines.push('## Dependencies');
|
|
1794
|
+
const deps = storyData.dependencies || [];
|
|
1795
|
+
if (deps.length === 0) {
|
|
1796
|
+
lines.push('- (none)');
|
|
1797
|
+
} else {
|
|
1798
|
+
for (const dep of deps) {
|
|
1799
|
+
lines.push(`- ${dep}`);
|
|
1800
|
+
}
|
|
1801
|
+
}
|
|
1802
|
+
return lines.join('\n');
|
|
1803
|
+
}
|
|
1804
|
+
|
|
1805
|
+
/**
|
|
1806
|
+
* Detect domain-specific concerns from project context and work item features.
|
|
1807
|
+
* Returns checklist items that the LLM MUST address in NFRs or Technical Notes.
|
|
1808
|
+
* @param {string} rootContext - Root context.md text
|
|
1809
|
+
* @param {Object} workItem - Epic or story-like object with features/description
|
|
1810
|
+
* @returns {string[]} Domain hints
|
|
1811
|
+
*/
|
|
1812
|
+
_detectDomainHints(rootContext, workItem) {
|
|
1813
|
+
const hints = [];
|
|
1814
|
+
const allText = [
|
|
1815
|
+
rootContext,
|
|
1816
|
+
workItem.description || '',
|
|
1817
|
+
...(workItem.features || []),
|
|
1818
|
+
].join(' ').toLowerCase();
|
|
1819
|
+
|
|
1820
|
+
// Scheduling / time-related → timezone handling
|
|
1821
|
+
if (/schedul|cron|recurring|time.?zone|appointment|booking|remind/i.test(allText)) {
|
|
1822
|
+
hints.push('Timezone handling: specify how dates/times are stored (UTC recommended) and how user-local display is handled');
|
|
1823
|
+
}
|
|
1824
|
+
|
|
1825
|
+
// Auth / passwords → hashing strategy
|
|
1826
|
+
if (/auth|password|login|sign.?up|credential|session|jwt|token/i.test(allText)) {
|
|
1827
|
+
hints.push('Authentication security: specify password hashing algorithm (bcrypt/argon2), token expiry, and session invalidation strategy');
|
|
1828
|
+
}
|
|
1829
|
+
|
|
1830
|
+
// Messaging / notifications → delivery guarantees
|
|
1831
|
+
if (/messag|chat|notification|push|sms|whatsapp|email|webhook/i.test(allText)) {
|
|
1832
|
+
hints.push('Message delivery: specify retry/queue strategy for failed deliveries, idempotency handling, and rate limiting');
|
|
1833
|
+
}
|
|
1834
|
+
|
|
1835
|
+
// File uploads / media → size limits and storage
|
|
1836
|
+
if (/upload|media|image|file|attachment|storage|s3|blob/i.test(allText)) {
|
|
1837
|
+
hints.push('File handling: specify max file size, allowed MIME types, storage backend, and virus/malware scanning if applicable');
|
|
1838
|
+
}
|
|
1839
|
+
|
|
1840
|
+
// Payment / billing → PCI compliance
|
|
1841
|
+
if (/payment|billing|invoice|subscript|stripe|charge|refund/i.test(allText)) {
|
|
1842
|
+
hints.push('Payment security: specify PCI compliance approach (tokenization recommended), refund handling, and receipt/audit trail');
|
|
1843
|
+
}
|
|
1844
|
+
|
|
1845
|
+
// Real-time / websocket → connection management
|
|
1846
|
+
if (/real.?time|websocket|socket\.io|live|stream|sse|event.?source/i.test(allText)) {
|
|
1847
|
+
hints.push('Real-time connections: specify reconnection strategy, heartbeat interval, and graceful degradation when WebSocket is unavailable');
|
|
1848
|
+
}
|
|
1849
|
+
|
|
1850
|
+
return hints;
|
|
1851
|
+
}
|
|
1852
|
+
|
|
1853
|
+
/**
|
|
1854
|
+
* Deterministic quality check for generated context.md — verifies structural completeness
|
|
1855
|
+
* and source fidelity without relying on LLM self-assessment.
|
|
1856
|
+
* @param {string} contextText - Generated context.md content
|
|
1857
|
+
* @param {Object} sourceJson - The canonical source JSON (epic or story)
|
|
1858
|
+
* @param {'epic'|'story'} type
|
|
1859
|
+
* @param {Set<string>} [validIds] - Optional set of all valid epic/story IDs in the hierarchy for dependency cross-validation
|
|
1860
|
+
* @returns {{ score: number, issues: string[] }}
|
|
1861
|
+
*/
|
|
1862
|
+
_computeContextScore(contextText, sourceJson, type, validIds = null) {
|
|
1863
|
+
let score = 100;
|
|
1864
|
+
const issues = [];
|
|
1865
|
+
const lower = contextText.toLowerCase();
|
|
1866
|
+
|
|
1867
|
+
// 1. Required sections present
|
|
1868
|
+
const requiredSections = type === 'epic'
|
|
1869
|
+
? ['## Identity', '## Purpose', '## Scope', '### In Scope', '### Out of Scope',
|
|
1870
|
+
'## Features', '## Non-Functional Requirements', '## Dependencies', '## Success Criteria', '## Stories Overview']
|
|
1871
|
+
: ['## Identity', '## User Story', '## Summary', '## Scope', '### In Scope',
|
|
1872
|
+
'### Out of Scope', '## Acceptance Criteria', '## Technical Notes', '## Dependencies'];
|
|
1873
|
+
|
|
1874
|
+
for (const section of requiredSections) {
|
|
1875
|
+
if (!contextText.includes(section)) {
|
|
1876
|
+
score -= 10;
|
|
1877
|
+
issues.push(`Missing required section: ${section}`);
|
|
1878
|
+
}
|
|
1879
|
+
}
|
|
1880
|
+
|
|
1881
|
+
// 2. Feature / acceptance criteria coverage against source JSON
|
|
1882
|
+
if (type === 'epic') {
|
|
1883
|
+
const features = sourceJson.features || [];
|
|
1884
|
+
let missing = 0;
|
|
1885
|
+
for (const feature of features) {
|
|
1886
|
+
// Extract the key term before parenthetical details
|
|
1887
|
+
const featureKey = feature.split('(')[0].trim().toLowerCase().replace(/[-_]/g, ' ');
|
|
1888
|
+
if (!lower.includes(featureKey) && featureKey.length > 3) {
|
|
1889
|
+
missing++;
|
|
1890
|
+
issues.push(`Feature possibly missing from output: "${feature.slice(0, 60)}"`);
|
|
1891
|
+
}
|
|
1892
|
+
}
|
|
1893
|
+
score -= missing * 5;
|
|
1894
|
+
} else {
|
|
1895
|
+
const acceptance = sourceJson.acceptance || [];
|
|
1896
|
+
if (acceptance.length > 0) {
|
|
1897
|
+
// Strip markdown formatting (backticks, bold, italic) for fuzzy matching
|
|
1898
|
+
const stripMd = (s) => s.toLowerCase().replace(/[`*_~]/g, '').replace(/\s+/g, ' ');
|
|
1899
|
+
const lowerStripped = stripMd(contextText);
|
|
1900
|
+
let matched = 0;
|
|
1901
|
+
for (const ac of acceptance) {
|
|
1902
|
+
const acStripped = stripMd(ac);
|
|
1903
|
+
// Extract key terms (3+ char words) and check if majority appear in context
|
|
1904
|
+
const keyTerms = acStripped.split(/\s+/).filter(w => w.length >= 3 && !/^(the|and|for|with|from|that|this|are|was|has|have|will|can|not|but|its|also|into)$/.test(w));
|
|
1905
|
+
if (keyTerms.length === 0) { matched++; continue; }
|
|
1906
|
+
const found = keyTerms.filter(term => lowerStripped.includes(term)).length;
|
|
1907
|
+
if (found / keyTerms.length >= 0.5) matched++;
|
|
1908
|
+
}
|
|
1909
|
+
const coverage = matched / acceptance.length;
|
|
1910
|
+
if (coverage < 0.7) {
|
|
1911
|
+
const missCount = acceptance.length - matched;
|
|
1912
|
+
score -= missCount * 5;
|
|
1913
|
+
issues.push(`Only ${matched}/${acceptance.length} acceptance criteria found in output`);
|
|
1914
|
+
}
|
|
1915
|
+
}
|
|
1916
|
+
}
|
|
1917
|
+
|
|
1918
|
+
// 3. Non-ASCII anomaly detection (catches Chinese/CJK character leaks from local models)
|
|
1919
|
+
const cjkPattern = /[\u3000-\u9FFF\uF900-\uFAFF\uFE30-\uFE4F]/g;
|
|
1920
|
+
const cjkMatches = contextText.match(cjkPattern);
|
|
1921
|
+
if (cjkMatches && cjkMatches.length > 0) {
|
|
1922
|
+
score -= 15;
|
|
1923
|
+
issues.push(`Non-ASCII anomaly: ${cjkMatches.length} CJK character(s) detected — likely model generation artifact`);
|
|
1924
|
+
}
|
|
1925
|
+
|
|
1926
|
+
// 4. Section minimum substance (headers with < 2 content lines)
|
|
1927
|
+
const sectionBlocks = contextText.split(/^## /m).filter(s => s.trim());
|
|
1928
|
+
for (const block of sectionBlocks) {
|
|
1929
|
+
const lines = block.split('\n').filter(l => l.trim());
|
|
1930
|
+
const sectionName = lines[0]?.trim().split('\n')[0] || 'unknown';
|
|
1931
|
+
// Skip subsections (### In Scope etc.) — they're part of parent
|
|
1932
|
+
if (sectionName.startsWith('#')) continue;
|
|
1933
|
+
if (lines.length < 2) {
|
|
1934
|
+
score -= 5;
|
|
1935
|
+
issues.push(`Section too thin: "## ${sectionName}" has only ${lines.length} content line(s)`);
|
|
1936
|
+
}
|
|
1937
|
+
}
|
|
1938
|
+
|
|
1939
|
+
// 5. Dependencies match source JSON
|
|
1940
|
+
const sourceDeps = sourceJson.dependencies || [];
|
|
1941
|
+
for (const dep of sourceDeps) {
|
|
1942
|
+
const depId = typeof dep === 'string' ? dep : dep.id || dep.name || '';
|
|
1943
|
+
if (depId && !contextText.includes(depId)) {
|
|
1944
|
+
score -= 5;
|
|
1945
|
+
issues.push(`Dependency from source JSON not found in output: "${depId}"`);
|
|
1946
|
+
}
|
|
1947
|
+
}
|
|
1948
|
+
|
|
1949
|
+
// 6. Dependency cross-validation — verify referenced IDs exist in hierarchy
|
|
1950
|
+
if (validIds && sourceDeps.length > 0) {
|
|
1951
|
+
for (const dep of sourceDeps) {
|
|
1952
|
+
const depId = typeof dep === 'string' ? dep : dep.id || dep.name || '';
|
|
1953
|
+
if (depId && /^context-\d{4}(-\d{4})?$/.test(depId) && !validIds.has(depId)) {
|
|
1954
|
+
score -= 10;
|
|
1955
|
+
issues.push(`Broken dependency chain: "${depId}" does not exist in the hierarchy`);
|
|
1956
|
+
}
|
|
1957
|
+
}
|
|
1958
|
+
}
|
|
1959
|
+
|
|
1960
|
+
// 7. Data model presence for data-heavy epics (soft suggestion, no score penalty)
|
|
1961
|
+
if (type === 'epic') {
|
|
1962
|
+
const featureText = (sourceJson.features || []).join(' ').toLowerCase();
|
|
1963
|
+
const hasDataKeywords = /stor|persist|record|track|log|database|schema|model|ingestion/.test(featureText);
|
|
1964
|
+
if (hasDataKeywords && !contextText.includes('## Data Model')) {
|
|
1965
|
+
issues.push('Consider adding "## Data Model Sketch" section — features mention data storage');
|
|
1966
|
+
}
|
|
1967
|
+
}
|
|
1968
|
+
|
|
1969
|
+
// 8. Auth mechanism consistency — check against root context
|
|
1970
|
+
if (this.rootContextMd) {
|
|
1971
|
+
const rootLower = this.rootContextMd.toLowerCase();
|
|
1972
|
+
const isNoAuth = rootLower.includes('none (public') || rootLower.includes('no authentication required');
|
|
1973
|
+
const isSessionAuth = !isNoAuth && (rootLower.includes('session-based') || rootLower.includes('httponly'));
|
|
1974
|
+
const isJwtAuth = !isNoAuth && rootLower.includes('jwt bearer');
|
|
1975
|
+
if (isNoAuth) {
|
|
1976
|
+
// No-auth project — penalize any auth mechanism references
|
|
1977
|
+
if (/\bjwt\b|bearer\s+token|session\s+cookie|httponly|authorization\s*:\s*bearer|login|sign.?in/i.test(contextText)) {
|
|
1978
|
+
score -= 10;
|
|
1979
|
+
issues.push('Auth inconsistency: context references auth mechanisms but project has no authentication');
|
|
1980
|
+
}
|
|
1981
|
+
} else if (isSessionAuth) {
|
|
1982
|
+
// Session auth project — penalize JWT references
|
|
1983
|
+
if (/\bjwt\b|bearer\s+token|authorization\s*:\s*bearer/i.test(contextText)) {
|
|
1984
|
+
score -= 10;
|
|
1985
|
+
issues.push('Auth inconsistency: context references JWT/bearer tokens but project uses session-based auth (httpOnly cookies)');
|
|
1986
|
+
}
|
|
1987
|
+
} else if (isJwtAuth) {
|
|
1988
|
+
// JWT project — penalize session cookie references
|
|
1989
|
+
if (/session\s+cookie|httponly\s+cookie|sessionid\s+cookie/i.test(contextText)) {
|
|
1990
|
+
score -= 10;
|
|
1991
|
+
issues.push('Auth inconsistency: context references session cookies but project uses JWT bearer tokens');
|
|
1992
|
+
}
|
|
1993
|
+
}
|
|
1994
|
+
}
|
|
1995
|
+
|
|
1996
|
+
// 9. Rate limit numeric consistency — detect contradictory values within same context
|
|
1997
|
+
const rateLimitMatches = contextText.match(/(\d+)\s*(requests?|req|messages?|calls?)\s*\/?\s*(second|sec|s|minute|min|m|hour|hr|h)\b/gi) || [];
|
|
1998
|
+
if (rateLimitMatches.length >= 2) {
|
|
1999
|
+
// Normalize to per-minute for comparison
|
|
2000
|
+
const normalize = (match) => {
|
|
2001
|
+
const m = match.match(/(\d+)\s*(?:requests?|req|messages?|calls?)\s*\/?\s*(second|sec|s|minute|min|m|hour|hr|h)/i);
|
|
2002
|
+
if (!m) return null;
|
|
2003
|
+
const val = parseInt(m[1]);
|
|
2004
|
+
const unit = m[2].toLowerCase();
|
|
2005
|
+
if (unit.startsWith('s')) return val * 60; // per sec → per min
|
|
2006
|
+
if (unit.startsWith('h')) return val / 60; // per hour → per min
|
|
2007
|
+
return val; // per min
|
|
2008
|
+
};
|
|
2009
|
+
const normalized = rateLimitMatches.map(normalize).filter(v => v !== null);
|
|
2010
|
+
if (normalized.length >= 2) {
|
|
2011
|
+
const min = Math.min(...normalized);
|
|
2012
|
+
const max = Math.max(...normalized);
|
|
2013
|
+
if (max / min > 10) { // >10x discrepancy
|
|
2014
|
+
score -= 5;
|
|
2015
|
+
issues.push(`Rate limit inconsistency: found values ranging from ${min}/min to ${max}/min (${max/min}x difference)`);
|
|
2016
|
+
}
|
|
2017
|
+
}
|
|
2018
|
+
}
|
|
2019
|
+
|
|
2020
|
+
return { score: Math.max(0, score), issues };
|
|
2021
|
+
}
|
|
2022
|
+
|
|
2023
|
+
/**
|
|
2024
|
+
* Call generateJSON with tool support if the provider has it, otherwise plain generateJSON.
|
|
2025
|
+
* Only provides tools on the first write call (iter 0) — refinement iterations don't need them.
|
|
2026
|
+
* @param {Object} provider - LLM provider instance
|
|
2027
|
+
* @param {string} prompt - User prompt
|
|
2028
|
+
* @param {string} instructions - System/agent instructions
|
|
2029
|
+
* @param {boolean} [useTools=false] - Whether to attempt tool-augmented generation
|
|
2030
|
+
* @returns {Promise<Object>} Parsed JSON result
|
|
2031
|
+
*/
|
|
2032
|
+
async _generateJSONMaybeWithTools(provider, prompt, instructions, useTools = false) {
|
|
2033
|
+
if (useTools && typeof provider.generateJSONWithTools === 'function') {
|
|
2034
|
+
try {
|
|
2035
|
+
return await provider.generateJSONWithTools(
|
|
2036
|
+
prompt, instructions,
|
|
2037
|
+
CONTEXT_GENERATION_TOOLS, dispatchToolCall
|
|
2038
|
+
);
|
|
2039
|
+
} catch (err) {
|
|
2040
|
+
this.debug(`Tool-augmented generation failed, falling back to plain generateJSON: ${err.message}`);
|
|
2041
|
+
}
|
|
2042
|
+
}
|
|
2043
|
+
return provider.generateJSON(prompt, instructions);
|
|
2044
|
+
}
|
|
2045
|
+
|
|
2046
|
+
/**
|
|
2047
|
+
* Generate a complete canonical context.md for an epic using an LLM agent.
|
|
2048
|
+
* Uses deterministic pre-check (not LLM self-score) to decide whether to invoke the reviewer.
|
|
2049
|
+
* Falls back to the structured formatter if the LLM call fails.
|
|
2050
|
+
* @param {Object} epic
|
|
2051
|
+
* @param {LLMProvider} provider
|
|
2052
|
+
* @returns {Promise<string>} context.md text
|
|
2053
|
+
*/
|
|
2054
|
+
async generateEpicContextMdLLM(epic, provider) {
|
|
2055
|
+
const writerInstructions = loadAgent('context-writer-epic.md');
|
|
2056
|
+
const reviewerInstructions = loadAgent('context-reviewer-epic.md');
|
|
2057
|
+
const rootSection = this.rootContextMd ? `## Project Context\n\n${this.rootContextMd}\n\n` : '';
|
|
2058
|
+
|
|
2059
|
+
// Canonical source for both writer and reviewer
|
|
2060
|
+
const epicForContext = {
|
|
2061
|
+
id: epic.id,
|
|
2062
|
+
name: epic.name,
|
|
2063
|
+
domain: epic.domain,
|
|
2064
|
+
description: epic.description,
|
|
2065
|
+
features: epic.features || [],
|
|
2066
|
+
dependencies: epic.dependencies || [],
|
|
2067
|
+
stories: (epic.stories || []).map(s => ({ id: s.id || 'TBD', name: s.name })),
|
|
2068
|
+
};
|
|
2069
|
+
const epicJson = JSON.stringify(epicForContext, null, 2);
|
|
2070
|
+
|
|
2071
|
+
// Template-based scaffolding — pre-generate deterministic sections so the LLM
|
|
2072
|
+
// only needs to write Purpose, Scope, Data Model, NFRs, and Success Criteria.
|
|
2073
|
+
const scaffold = this._buildEpicScaffold(epicForContext);
|
|
2074
|
+
const scaffoldHint = `\n\n## Pre-built Scaffold (use verbatim for these sections, write the rest)\n\n\`\`\`\n${scaffold}\n\`\`\``;
|
|
2075
|
+
|
|
2076
|
+
// Domain-aware prompt injection — detect project features and add mandatory checklist items
|
|
2077
|
+
const domainHints = this._detectDomainHints(this.rootContextMd || '', epicForContext);
|
|
2078
|
+
const domainSection = domainHints.length > 0
|
|
2079
|
+
? `\n\n## Domain-Specific Requirements (MUST address in NFRs or Technical Notes)\n${domainHints.map(h => `- ${h}`).join('\n')}`
|
|
2080
|
+
: '';
|
|
2081
|
+
|
|
2082
|
+
const baseWriterPrompt = `${rootSection}## Epic JSON\n\n\`\`\`json\n${epicJson}\n\`\`\`${scaffoldHint}${domainSection}`;
|
|
2083
|
+
const baseReviewerPrompt = `${rootSection}## Original Epic JSON\n\n\`\`\`json\n${epicJson}\n\`\`\``;
|
|
2084
|
+
|
|
2085
|
+
let bestContext = null;
|
|
2086
|
+
let bestScore = 0;
|
|
2087
|
+
let writerPrompt = `${baseWriterPrompt}\n\nWrite the complete context.md for this epic. Use the pre-built scaffold verbatim for Identity, Features, Dependencies, and Stories Overview sections. Focus your effort on Purpose, Scope, Data Model, NFRs, and Success Criteria.`;
|
|
2088
|
+
|
|
2089
|
+
// Write → Review → Refine loop (max 2 review rounds = max 3 LLM calls total)
|
|
2090
|
+
for (let iter = 0; iter < 3; iter++) {
|
|
2091
|
+
// Step 1: Write (or refine) — use tool-augmented generation on first iteration
|
|
2092
|
+
const writeResult = await this._generateJSONMaybeWithTools(provider, writerPrompt, writerInstructions, iter === 0);
|
|
2093
|
+
const contextText = (typeof writeResult?.context === 'string' && writeResult.context.trim()) ? writeResult.context : null;
|
|
2094
|
+
const writerScore = typeof writeResult?.completenessScore === 'number' ? writeResult.completenessScore : 100;
|
|
2095
|
+
const writerGaps = Array.isArray(writeResult?.gaps) ? writeResult.gaps : [];
|
|
2096
|
+
|
|
2097
|
+
if (!contextText) {
|
|
2098
|
+
this.debug(`[context-writer-epic] iter=${iter + 1} — no context returned, stopping (epic: ${epic.name})`);
|
|
2099
|
+
break;
|
|
2100
|
+
}
|
|
2101
|
+
|
|
2102
|
+
// Deterministic pre-check — verifies structure and source fidelity without LLM
|
|
2103
|
+
const preCheck = this._computeContextScore(contextText, epicForContext, 'epic', this._hierarchyValidIds);
|
|
2104
|
+
const canSkipReview = preCheck.score >= 92;
|
|
2105
|
+
|
|
2106
|
+
if (canSkipReview) {
|
|
2107
|
+
this.debug(`[context-epic] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} — skipping review (deterministic pass) (epic: ${epic.name})`);
|
|
2108
|
+
if (preCheck.score > bestScore) {
|
|
2109
|
+
bestContext = contextText;
|
|
2110
|
+
bestScore = preCheck.score;
|
|
2111
|
+
}
|
|
2112
|
+
break;
|
|
2113
|
+
}
|
|
2114
|
+
|
|
2115
|
+
// Pre-check found issues — run independent LLM review
|
|
2116
|
+
this.debug(`[context-epic] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} preCheckIssues=${preCheck.issues.length} — triggering review (epic: ${epic.name})`);
|
|
2117
|
+
|
|
2118
|
+
// Step 2: Independent review — verifies accuracy against source JSON
|
|
2119
|
+
const reviewPrompt = `${baseReviewerPrompt}\n\n## Generated context.md\n\n${contextText}\n\nAudit this context.md against the source JSON.`;
|
|
2120
|
+
const reviewResult = await provider.generateJSON(reviewPrompt, reviewerInstructions);
|
|
2121
|
+
const reviewScore = typeof reviewResult?.score === 'number' ? reviewResult.score : preCheck.score;
|
|
2122
|
+
const reviewIssues = Array.isArray(reviewResult?.issues) ? reviewResult.issues : [];
|
|
2123
|
+
const accurate = reviewResult?.accurate === true;
|
|
2124
|
+
|
|
2125
|
+
// Combine deterministic issues with LLM reviewer issues (dedup by prefix)
|
|
2126
|
+
const combinedIssues = [...preCheck.issues];
|
|
2127
|
+
for (const ri of reviewIssues) {
|
|
2128
|
+
if (!combinedIssues.some(ci => ci.slice(0, 40) === ri.slice(0, 40))) {
|
|
2129
|
+
combinedIssues.push(ri);
|
|
2130
|
+
}
|
|
2131
|
+
}
|
|
2132
|
+
|
|
2133
|
+
// Keep the best version seen so far (use reviewer score, not self-score)
|
|
2134
|
+
if (reviewScore > bestScore) {
|
|
2135
|
+
bestContext = contextText;
|
|
2136
|
+
bestScore = reviewScore;
|
|
2137
|
+
}
|
|
2138
|
+
|
|
2139
|
+
this.debug(`[context-epic] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} reviewScore=${reviewScore} accurate=${accurate} issues=${combinedIssues.length} (epic: ${epic.name})`);
|
|
2140
|
+
|
|
2141
|
+
// Stop if reviewer confirms accuracy and score is high enough
|
|
2142
|
+
if (accurate && reviewScore >= 85) break;
|
|
2143
|
+
if (iter === 2) break; // max iterations reached
|
|
2144
|
+
|
|
2145
|
+
// Step 3: Build refinement prompt combining deterministic + reviewer + writer issues
|
|
2146
|
+
const allFeedback = [
|
|
2147
|
+
...combinedIssues,
|
|
2148
|
+
...writerGaps.filter(g => !combinedIssues.some(i => i.includes(g.slice(0, 30)))),
|
|
2149
|
+
];
|
|
2150
|
+
const feedbackText = allFeedback.map((f, i) => `${i + 1}. ${f}`).join('\n');
|
|
2151
|
+
writerPrompt = `${baseWriterPrompt}\n\n## Draft Context (Review Score: ${reviewScore}/100)\n\n${contextText}\n\n## Issues to Fix\n\n${feedbackText}\n\nRevise the context.md to address all issues above. Return improved JSON.`;
|
|
2152
|
+
}
|
|
2153
|
+
|
|
2154
|
+
return bestContext || this.generateEpicContextMd(epic);
|
|
2155
|
+
}
|
|
2156
|
+
|
|
2157
|
+
/**
|
|
2158
|
+
* Generate a complete canonical context.md for a story using an LLM agent.
|
|
2159
|
+
* Uses deterministic pre-check (not LLM self-score) to decide whether to invoke the reviewer.
|
|
2160
|
+
* Falls back to the structured formatter if the LLM call fails.
|
|
2161
|
+
* @param {Object} story
|
|
2162
|
+
* @param {Object} epic - Parent epic
|
|
2163
|
+
* @param {string} epicContextMd - Parent epic's generated context.md
|
|
2164
|
+
* @param {LLMProvider} provider
|
|
2165
|
+
* @returns {Promise<string>} context.md text
|
|
2166
|
+
*/
|
|
2167
|
+
async generateStoryContextMdLLM(story, epic, epicContextMd, provider) {
|
|
2168
|
+
const writerInstructions = loadAgent('context-writer-story.md');
|
|
2169
|
+
const reviewerInstructions = loadAgent('context-reviewer-story.md');
|
|
2170
|
+
const rootSection = this.rootContextMd ? `## Project Context\n\n${this.rootContextMd}\n\n` : '';
|
|
2171
|
+
const epicSection = epicContextMd ? `## Parent Epic Context\n\n${epicContextMd}\n\n` : '';
|
|
2172
|
+
|
|
2173
|
+
// Canonical source for both writer and reviewer
|
|
2174
|
+
const storyForContext = {
|
|
2175
|
+
id: story.id || 'TBD',
|
|
2176
|
+
name: story.name,
|
|
2177
|
+
userType: story.userType || 'team member',
|
|
2178
|
+
description: story.description,
|
|
2179
|
+
acceptance: story.acceptance || [],
|
|
2180
|
+
dependencies: story.dependencies || [],
|
|
2181
|
+
epicId: epic.id || 'TBD',
|
|
2182
|
+
epicName: epic.name,
|
|
2183
|
+
};
|
|
2184
|
+
const storyJson = JSON.stringify(storyForContext, null, 2);
|
|
2185
|
+
|
|
2186
|
+
// Template-based scaffolding for stories
|
|
2187
|
+
const scaffold = this._buildStoryScaffold(storyForContext);
|
|
2188
|
+
const scaffoldHint = `\n\n## Pre-built Scaffold (use verbatim for these sections, write the rest)\n\n\`\`\`\n${scaffold}\n\`\`\``;
|
|
2189
|
+
|
|
2190
|
+
// Domain-aware hints for stories
|
|
2191
|
+
const domainHints = this._detectDomainHints(this.rootContextMd || '', { features: story.acceptance || [], description: story.description });
|
|
2192
|
+
const domainSection = domainHints.length > 0
|
|
2193
|
+
? `\n\n## Domain-Specific Requirements (MUST address in Technical Notes)\n${domainHints.map(h => `- ${h}`).join('\n')}`
|
|
2194
|
+
: '';
|
|
2195
|
+
|
|
2196
|
+
const baseWriterPrompt = `${rootSection}${epicSection}## Story JSON\n\n\`\`\`json\n${storyJson}\n\`\`\`${scaffoldHint}${domainSection}`;
|
|
2197
|
+
const baseReviewerPrompt = `${rootSection}## Original Story JSON\n\n\`\`\`json\n${storyJson}\n\`\`\``;
|
|
2198
|
+
|
|
2199
|
+
let bestContext = null;
|
|
2200
|
+
let bestScore = 0;
|
|
2201
|
+
let writerPrompt = `${baseWriterPrompt}\n\nWrite the complete context.md for this story. Use the pre-built scaffold verbatim for Identity, Acceptance Criteria, and Dependencies sections. Focus on User Story, Summary, Scope, and Technical Notes.`;
|
|
2202
|
+
|
|
2203
|
+
// Write → Review → Refine loop (max 2 review rounds = max 3 LLM calls total)
|
|
2204
|
+
for (let iter = 0; iter < 3; iter++) {
|
|
2205
|
+
// Step 1: Write (or refine) — use tool-augmented generation on first iteration
|
|
2206
|
+
const writeResult = await this._generateJSONMaybeWithTools(provider, writerPrompt, writerInstructions, iter === 0);
|
|
2207
|
+
const contextText = (typeof writeResult?.context === 'string' && writeResult.context.trim()) ? writeResult.context : null;
|
|
2208
|
+
const writerScore = typeof writeResult?.completenessScore === 'number' ? writeResult.completenessScore : 100;
|
|
2209
|
+
const writerGaps = Array.isArray(writeResult?.gaps) ? writeResult.gaps : [];
|
|
2210
|
+
|
|
2211
|
+
if (!contextText) {
|
|
2212
|
+
this.debug(`[context-writer-story] iter=${iter + 1} — no context returned, stopping (story: ${story.name})`);
|
|
2213
|
+
break;
|
|
2214
|
+
}
|
|
2215
|
+
|
|
2216
|
+
// Deterministic pre-check — verifies structure and source fidelity without LLM
|
|
2217
|
+
const preCheck = this._computeContextScore(contextText, storyForContext, 'story', this._hierarchyValidIds);
|
|
2218
|
+
const canSkipReview = preCheck.score >= 92;
|
|
2219
|
+
|
|
2220
|
+
if (canSkipReview) {
|
|
2221
|
+
this.debug(`[context-story] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} — skipping review (deterministic pass) (story: ${story.name})`);
|
|
2222
|
+
if (preCheck.score > bestScore) {
|
|
2223
|
+
bestContext = contextText;
|
|
2224
|
+
bestScore = preCheck.score;
|
|
2225
|
+
}
|
|
2226
|
+
break;
|
|
2227
|
+
}
|
|
2228
|
+
|
|
2229
|
+
// Pre-check found issues — run independent LLM review
|
|
2230
|
+
this.debug(`[context-story] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} preCheckIssues=${preCheck.issues.length} — triggering review (story: ${story.name})`);
|
|
2231
|
+
|
|
2232
|
+
// Step 2: Independent review — verifies accuracy against source JSON
|
|
2233
|
+
const reviewPrompt = `${baseReviewerPrompt}\n\n## Generated context.md\n\n${contextText}\n\nAudit this context.md against the source JSON.`;
|
|
2234
|
+
const reviewResult = await provider.generateJSON(reviewPrompt, reviewerInstructions);
|
|
2235
|
+
const reviewScore = typeof reviewResult?.score === 'number' ? reviewResult.score : preCheck.score;
|
|
2236
|
+
const reviewIssues = Array.isArray(reviewResult?.issues) ? reviewResult.issues : [];
|
|
2237
|
+
const accurate = reviewResult?.accurate === true;
|
|
2238
|
+
|
|
2239
|
+
// Combine deterministic issues with LLM reviewer issues (dedup by prefix)
|
|
2240
|
+
const combinedIssues = [...preCheck.issues];
|
|
2241
|
+
for (const ri of reviewIssues) {
|
|
2242
|
+
if (!combinedIssues.some(ci => ci.slice(0, 40) === ri.slice(0, 40))) {
|
|
2243
|
+
combinedIssues.push(ri);
|
|
2244
|
+
}
|
|
2245
|
+
}
|
|
2246
|
+
|
|
2247
|
+
if (reviewScore > bestScore) {
|
|
2248
|
+
bestContext = contextText;
|
|
2249
|
+
bestScore = reviewScore;
|
|
2250
|
+
}
|
|
2251
|
+
|
|
2252
|
+
this.debug(`[context-story] iter=${iter + 1} writerScore=${writerScore} preCheck=${preCheck.score} reviewScore=${reviewScore} accurate=${accurate} issues=${combinedIssues.length} (story: ${story.name})`);
|
|
2253
|
+
|
|
2254
|
+
if (accurate && reviewScore >= 85 && preCheck.score >= 85) break;
|
|
2255
|
+
if (iter === 2) break;
|
|
2256
|
+
|
|
2257
|
+
// Step 3: Refinement prompt combining deterministic + reviewer + writer issues
|
|
2258
|
+
const allFeedback = [
|
|
2259
|
+
...combinedIssues,
|
|
2260
|
+
...writerGaps.filter(g => !combinedIssues.some(i => i.includes(g.slice(0, 30)))),
|
|
2261
|
+
];
|
|
2262
|
+
const feedbackText = allFeedback.map((f, i) => `${i + 1}. ${f}`).join('\n');
|
|
2263
|
+
writerPrompt = `${baseWriterPrompt}\n\n## Draft Context (Review Score: ${reviewScore}/100)\n\n${contextText}\n\n## Issues to Fix\n\n${feedbackText}\n\nRevise the context.md to address all issues above. Return improved JSON.`;
|
|
2264
|
+
}
|
|
2265
|
+
|
|
2266
|
+
return bestContext || this.generateStoryContextMd(story, epic);
|
|
2267
|
+
}
|
|
2268
|
+
|
|
2269
|
+
/**
|
|
2270
|
+
* Pre-generate LLM context.md for all epics and stories before validation.
|
|
2271
|
+
* Results are cached in _epicContextCache (keyed by epic.name) and
|
|
2272
|
+
* _storyContextCache (keyed by "epicName::storyName").
|
|
2273
|
+
* Uses 'context-generation' stage config if defined; falls back to 'doc-generation'.
|
|
2274
|
+
* @param {Object} hierarchy
|
|
2275
|
+
* @param {Function} progressCallback
|
|
2276
|
+
*/
|
|
2277
|
+
async generateContextFiles(hierarchy, progressCallback = null) {
|
|
2278
|
+
this.debugStage(4.8, 'Pre-generate LLM Context Files');
|
|
2279
|
+
this._epicContextCache = new Map();
|
|
2280
|
+
this._storyContextCache = new Map();
|
|
2281
|
+
|
|
2282
|
+
// Build set of all valid IDs for dependency cross-validation
|
|
2283
|
+
this._hierarchyValidIds = new Set();
|
|
2284
|
+
for (const epic of hierarchy.epics) {
|
|
2285
|
+
if (epic.id) this._hierarchyValidIds.add(epic.id);
|
|
2286
|
+
for (const story of epic.stories || []) {
|
|
2287
|
+
if (story.id) this._hierarchyValidIds.add(story.id);
|
|
2288
|
+
}
|
|
2289
|
+
}
|
|
2290
|
+
this.debug(`Dependency cross-validation: ${this._hierarchyValidIds.size} valid IDs indexed`);
|
|
2291
|
+
|
|
2292
|
+
// Use context-generation stage if configured; fall back to doc-generation then ceremony default
|
|
2293
|
+
const stageName = this.stagesConfig?.['context-generation'] ? 'context-generation' : 'doc-generation';
|
|
2294
|
+
const provider = await this.getProviderForStageInstance(stageName);
|
|
2295
|
+
const { model: modelName } = this.getProviderForStage(stageName);
|
|
2296
|
+
this.debug(`Context generation using model: ${modelName}`);
|
|
2297
|
+
|
|
2298
|
+
const epicCount = hierarchy.epics.length;
|
|
2299
|
+
const storyCount = hierarchy.epics.reduce((s, e) => s + (e.stories?.length || 0), 0);
|
|
2300
|
+
await progressCallback?.(null, `Generating context for ${epicCount} epics + ${storyCount} stories…`, {});
|
|
2301
|
+
|
|
2302
|
+
// Concurrency control — limit concurrent context-generation calls to avoid saturating the server.
|
|
2303
|
+
const defaultCtxConcurrency = this._providerName === 'local' ? 2 : hierarchy.epics.length;
|
|
2304
|
+
const ctxConcurrency = this.stagesConfig?.['context-generation']?.concurrency ?? defaultCtxConcurrency;
|
|
2305
|
+
this.debug(`Context generation concurrency: ${ctxConcurrency} (provider: ${this._providerName})`);
|
|
2306
|
+
|
|
2307
|
+
// Helper: run async tasks with concurrency limit
|
|
2308
|
+
const runWithConcurrency = async (items, fn, limit) => {
|
|
2309
|
+
if (limit >= items.length) {
|
|
2310
|
+
return Promise.all(items.map(fn));
|
|
2311
|
+
}
|
|
2312
|
+
const queue = [...items];
|
|
2313
|
+
const running = new Set();
|
|
2314
|
+
let idx = 0;
|
|
2315
|
+
while (idx < queue.length || running.size > 0) {
|
|
2316
|
+
while (idx < queue.length && running.size < limit) {
|
|
2317
|
+
const item = queue[idx++];
|
|
2318
|
+
const p = fn(item).then(() => running.delete(p));
|
|
2319
|
+
running.add(p);
|
|
2320
|
+
}
|
|
2321
|
+
if (running.size > 0) await Promise.race(running);
|
|
2322
|
+
}
|
|
2323
|
+
};
|
|
2324
|
+
|
|
2325
|
+
// Phase 1: Generate epic contexts with concurrency control
|
|
2326
|
+
const epicCtxFailures = { count: 0 };
|
|
2327
|
+
await runWithConcurrency(hierarchy.epics, async (epic) => {
|
|
2328
|
+
if (epicCtxFailures.count >= 3) {
|
|
2329
|
+
this.debug(`Skipping LLM context for epic "${epic.name}" — too many context-size failures, using structured fallback`);
|
|
2330
|
+
this._epicContextCache.set(epic.name, this.generateEpicContextMd(epic));
|
|
2331
|
+
} else {
|
|
2332
|
+
try {
|
|
2333
|
+
this.debug(`Generating context for epic: ${epic.name}`);
|
|
2334
|
+
const epicContextMd = await this.generateEpicContextMdLLM(epic, provider);
|
|
2335
|
+
this._epicContextCache.set(epic.name, epicContextMd);
|
|
2336
|
+
this.debug(`Epic context generated: ${epic.name} (${epicContextMd.length} bytes)`);
|
|
2337
|
+
} catch (err) {
|
|
2338
|
+
if (err.message?.toLowerCase().includes('context size') || err.message?.includes('exceeded')) {
|
|
2339
|
+
epicCtxFailures.count++;
|
|
2340
|
+
this.debug(`Context-size failure #${epicCtxFailures.count} for epic context "${epic.name}"`, { error: err.message });
|
|
2341
|
+
}
|
|
2342
|
+
this.debug(`Epic context generation failed — using structured fallback (${epic.name})`, { error: err.message });
|
|
2343
|
+
this._epicContextCache.set(epic.name, this.generateEpicContextMd(epic));
|
|
2344
|
+
}
|
|
2345
|
+
}
|
|
2346
|
+
// Write context.md immediately using provisional ID — visible on disk during the ceremony.
|
|
2347
|
+
// Stage 6 (writeHierarchyFiles) will rename the folder if IDs change after renumbering.
|
|
2348
|
+
try {
|
|
2349
|
+
const provisionalEpicDir = path.join(this.projectPath, epic.id);
|
|
2350
|
+
if (!fs.existsSync(provisionalEpicDir)) fs.mkdirSync(provisionalEpicDir, { recursive: true });
|
|
2351
|
+
fs.writeFileSync(path.join(provisionalEpicDir, 'context.md'), this._epicContextCache.get(epic.name), 'utf8');
|
|
2352
|
+
this.debug(`Epic context.md written early (provisional): ${epic.id}/context.md`);
|
|
2353
|
+
} catch (err) {
|
|
2354
|
+
this.debug(`Early epic context.md write failed — will retry in Stage 6`, { error: err.message });
|
|
2355
|
+
}
|
|
2356
|
+
}, ctxConcurrency);
|
|
2357
|
+
|
|
2358
|
+
// Phase 2: Generate story contexts with concurrency control.
|
|
2359
|
+
// Stories within each epic run sequentially to maximise OpenAI prefix-cache hits:
|
|
2360
|
+
// all stories of the same epic share an identical system-message prefix
|
|
2361
|
+
// (agentInstructions + epic context.md), so calls 2-N hit the cache at a 90% discount.
|
|
2362
|
+
const storyCtxFailures = { count: 0 };
|
|
2363
|
+
await runWithConcurrency(hierarchy.epics, async (epic) => {
|
|
2364
|
+
const epicContextMd = this._epicContextCache.get(epic.name) || this.generateEpicContextMd(epic);
|
|
2365
|
+
for (const story of (epic.stories || [])) {
|
|
2366
|
+
const cacheKey = `${epic.name}::${story.name}`;
|
|
2367
|
+
if (storyCtxFailures.count >= 3) {
|
|
2368
|
+
this.debug(`Skipping LLM context for story "${story.name}" — too many context-size failures, using structured fallback`);
|
|
2369
|
+
this._storyContextCache.set(cacheKey, this.generateStoryContextMd(story, epic));
|
|
2370
|
+
} else {
|
|
2371
|
+
try {
|
|
2372
|
+
this.debug(`Generating context for story: ${story.name}`);
|
|
2373
|
+
const storyContextMd = await this.generateStoryContextMdLLM(story, epic, epicContextMd, provider);
|
|
2374
|
+
this._storyContextCache.set(cacheKey, storyContextMd);
|
|
2375
|
+
this.debug(`Story context generated: ${story.name} (${storyContextMd.length} bytes)`);
|
|
2376
|
+
} catch (err) {
|
|
2377
|
+
if (err.message?.toLowerCase().includes('context size') || err.message?.includes('exceeded')) {
|
|
2378
|
+
storyCtxFailures.count++;
|
|
2379
|
+
this.debug(`Context-size failure #${storyCtxFailures.count} for story context "${story.name}"`, { error: err.message });
|
|
2380
|
+
}
|
|
2381
|
+
this.debug(`Story context generation failed — using structured fallback (${story.name})`, { error: err.message });
|
|
2382
|
+
this._storyContextCache.set(cacheKey, this.generateStoryContextMd(story, epic));
|
|
2383
|
+
}
|
|
2384
|
+
}
|
|
2385
|
+
// Write context.md immediately using provisional IDs.
|
|
2386
|
+
try {
|
|
2387
|
+
const provisionalStoryDir = path.join(this.projectPath, epic.id, story.id);
|
|
2388
|
+
if (!fs.existsSync(provisionalStoryDir)) fs.mkdirSync(provisionalStoryDir, { recursive: true });
|
|
2389
|
+
fs.writeFileSync(path.join(provisionalStoryDir, 'context.md'), this._storyContextCache.get(cacheKey), 'utf8');
|
|
2390
|
+
this.debug(`Story context.md written early (provisional): ${epic.id}/${story.id}/context.md`);
|
|
2391
|
+
} catch (err) {
|
|
2392
|
+
this.debug(`Early story context.md write failed — will retry in Stage 6`, { error: err.message });
|
|
2393
|
+
}
|
|
2394
|
+
}
|
|
2395
|
+
}, ctxConcurrency);
|
|
2396
|
+
|
|
2397
|
+
// Post-generation dependency validation — verify all context-XXXX references exist
|
|
2398
|
+
this._validateDependencyReferences(hierarchy);
|
|
2399
|
+
|
|
2400
|
+
this.debug(`Context generation complete: ${this._epicContextCache.size} epic contexts, ${this._storyContextCache.size} story contexts`);
|
|
2401
|
+
}
|
|
2402
|
+
|
|
2403
|
+
/**
|
|
2404
|
+
* Validate that all dependency references across the hierarchy point to existing IDs.
|
|
2405
|
+
* Removes broken references and logs warnings. Runs after context generation and after splits.
|
|
2406
|
+
* @param {Object} hierarchy
|
|
2407
|
+
*/
|
|
2408
|
+
_validateDependencyReferences(hierarchy) {
|
|
2409
|
+
const validIds = new Set();
|
|
2410
|
+
for (const epic of hierarchy.epics) {
|
|
2411
|
+
if (epic.id) validIds.add(epic.id);
|
|
2412
|
+
for (const story of epic.stories || []) {
|
|
2413
|
+
if (story.id) validIds.add(story.id);
|
|
2414
|
+
}
|
|
2415
|
+
}
|
|
2416
|
+
|
|
2417
|
+
let brokenCount = 0;
|
|
2418
|
+
let removedCount = 0;
|
|
2419
|
+
|
|
2420
|
+
for (const epic of hierarchy.epics) {
|
|
2421
|
+
if (Array.isArray(epic.dependencies)) {
|
|
2422
|
+
epic.dependencies = epic.dependencies.filter(dep => {
|
|
2423
|
+
if (/^context-\d{4}(-\d{4})?$/.test(dep) && !validIds.has(dep)) {
|
|
2424
|
+
brokenCount++;
|
|
2425
|
+
removedCount++;
|
|
2426
|
+
this.debug(`Broken dependency removed: epic "${epic.name}" referenced non-existent "${dep}"`);
|
|
2427
|
+
return false;
|
|
2428
|
+
}
|
|
2429
|
+
return true;
|
|
2430
|
+
});
|
|
2431
|
+
}
|
|
2432
|
+
for (const story of epic.stories || []) {
|
|
2433
|
+
if (Array.isArray(story.dependencies)) {
|
|
2434
|
+
story.dependencies = story.dependencies.filter(dep => {
|
|
2435
|
+
if (/^context-\d{4}(-\d{4})?$/.test(dep) && !validIds.has(dep)) {
|
|
2436
|
+
brokenCount++;
|
|
2437
|
+
removedCount++;
|
|
2438
|
+
this.debug(`Broken dependency removed: story "${story.name}" referenced non-existent "${dep}"`);
|
|
2439
|
+
return false;
|
|
2440
|
+
}
|
|
2441
|
+
return true;
|
|
2442
|
+
});
|
|
2443
|
+
}
|
|
2444
|
+
}
|
|
2445
|
+
}
|
|
2446
|
+
|
|
2447
|
+
if (brokenCount > 0) {
|
|
2448
|
+
this.debug(`Dependency validation: removed ${removedCount} broken reference(s) across hierarchy`);
|
|
2449
|
+
console.log(` ⚠ Removed ${removedCount} broken dependency reference(s) (hallucinated IDs)`);
|
|
2450
|
+
} else {
|
|
2451
|
+
this.debug('Dependency validation: all references valid');
|
|
2452
|
+
}
|
|
2453
|
+
}
|
|
2454
|
+
|
|
2455
|
+
/**
|
|
2456
|
+
* Generate narrative doc.md files for all epics and stories from their canonical context.md.
|
|
2457
|
+
* Replaces the old doc-distribution stage. Uses doc-writer-epic.md and doc-writer-story.md agents.
|
|
2458
|
+
*
|
|
2459
|
+
* Context chain:
|
|
2460
|
+
* Epic doc.md : root doc.md + epic context.md → narrative
|
|
2461
|
+
* Story doc.md : root doc.md + parent epic context.md + story context.md → narrative
|
|
2462
|
+
*
|
|
2463
|
+
* Strict: agents are instructed not to add scope beyond what is in context.md.
|
|
2464
|
+
*
|
|
2465
|
+
* @param {Object} hierarchy - Hierarchy with final validated epics and stories (real IDs)
|
|
2466
|
+
* @param {string} rootDocContent - Content of root doc.md
|
|
2467
|
+
* @param {Function} progressCallback
|
|
2468
|
+
*/
|
|
2469
|
+
async generateDocFiles(hierarchy, rootDocContent, progressCallback = null) {
|
|
2470
|
+
this.debugStage(6.5, 'Generate Narrative doc.md Files from Canonical context.md');
|
|
2471
|
+
|
|
2472
|
+
const epicAgentInstructions = loadAgent('doc-writer-epic.md');
|
|
2473
|
+
const storyAgentInstructions = loadAgent('doc-writer-story.md');
|
|
2474
|
+
// Uses 'doc-generation' stage config if defined; falls back to ceremony-level provider
|
|
2475
|
+
const provider = await this.getProviderForStageInstance('doc-generation');
|
|
2476
|
+
|
|
2477
|
+
const doGenerate = rootDocContent && rootDocContent.length > 0;
|
|
2478
|
+
if (!doGenerate) {
|
|
2479
|
+
this.debug('No root doc.md content — skipping doc generation, writing minimal stubs');
|
|
2480
|
+
}
|
|
2481
|
+
|
|
2482
|
+
await Promise.all(hierarchy.epics.map(async (epic) => {
|
|
2483
|
+
const epicDir = path.join(this.projectPath, epic.id);
|
|
2484
|
+
const epicContextPath = path.join(epicDir, 'context.md');
|
|
2485
|
+
const epicDocPath = path.join(epicDir, 'doc.md');
|
|
2486
|
+
|
|
2487
|
+
// Read the epic's canonical context.md (written earlier in writeHierarchyFiles)
|
|
2488
|
+
let epicContextMd = '';
|
|
2489
|
+
if (fs.existsSync(epicContextPath)) {
|
|
2490
|
+
epicContextMd = fs.readFileSync(epicContextPath, 'utf8');
|
|
2491
|
+
} else {
|
|
2492
|
+
epicContextMd = this.generateEpicContextMd(epic);
|
|
2493
|
+
}
|
|
2494
|
+
|
|
2495
|
+
// Generate epic doc.md
|
|
2496
|
+
if (doGenerate) {
|
|
2497
|
+
await progressCallback?.(null, `Generating documentation → ${epic.name}`, {});
|
|
2498
|
+
this.debug(`Generating doc.md for epic ${epic.id}: ${epic.name}`);
|
|
2499
|
+
try {
|
|
2500
|
+
const prompt = `## Project Documentation\n\n${rootDocContent}\n\n---\n\n## Epic Canonical Specification\n\n${epicContextMd}\n\nWrite the epic's doc.md. Return JSON with a \`doc\` field.`;
|
|
2501
|
+
const result = await this._withProgressHeartbeat(
|
|
2502
|
+
() => this.retryWithBackoff(
|
|
2503
|
+
() => provider.generateJSON(prompt, epicAgentInstructions),
|
|
2504
|
+
`doc generation for epic: ${epic.name}`
|
|
2505
|
+
),
|
|
2506
|
+
(elapsed) => {
|
|
2507
|
+
if (elapsed < 15) return `Writing ${epic.name} documentation…`;
|
|
2508
|
+
if (elapsed < 40) return `Expanding ${epic.name} narrative…`;
|
|
2509
|
+
return `Still writing…`;
|
|
2510
|
+
},
|
|
2511
|
+
progressCallback,
|
|
2512
|
+
10000
|
|
2513
|
+
);
|
|
2514
|
+
const epicDoc = (typeof result.doc === 'string' && result.doc.trim())
|
|
2515
|
+
? result.doc
|
|
2516
|
+
: `# ${epic.name}\n\n${epic.description || ''}\n`;
|
|
2517
|
+
fs.writeFileSync(epicDocPath, epicDoc, 'utf8');
|
|
2518
|
+
this.debug(`Epic doc.md written: ${epicDoc.length} bytes`);
|
|
2519
|
+
} catch (err) {
|
|
2520
|
+
this.debug(`Epic doc generation failed for ${epic.id} — writing stub`, { error: err.message });
|
|
2521
|
+
fs.writeFileSync(epicDocPath, `# ${epic.name}\n\n${epic.description || ''}\n`, 'utf8');
|
|
2522
|
+
}
|
|
2523
|
+
} else {
|
|
2524
|
+
fs.writeFileSync(epicDocPath, `# ${epic.name}\n\n${epic.description || ''}\n`, 'utf8');
|
|
2525
|
+
}
|
|
2526
|
+
|
|
2527
|
+
// Generate story doc.md files in parallel within this epic
|
|
2528
|
+
await Promise.all((epic.stories || []).map(async (story) => {
|
|
2529
|
+
const storyDir = path.join(epicDir, story.id);
|
|
2530
|
+
const storyContextPath = path.join(storyDir, 'context.md');
|
|
2531
|
+
const storyDocPath = path.join(storyDir, 'doc.md');
|
|
2532
|
+
|
|
2533
|
+
let storyContextMd = '';
|
|
2534
|
+
if (fs.existsSync(storyContextPath)) {
|
|
2535
|
+
storyContextMd = fs.readFileSync(storyContextPath, 'utf8');
|
|
2536
|
+
} else {
|
|
2537
|
+
storyContextMd = this.generateStoryContextMd(story, epic);
|
|
2538
|
+
}
|
|
2539
|
+
|
|
2540
|
+
if (doGenerate) {
|
|
2541
|
+
await progressCallback?.(null, ` Generating documentation → ${story.name}`, {});
|
|
2542
|
+
this.debug(`Generating doc.md for story ${story.id}: ${story.name}`);
|
|
2543
|
+
try {
|
|
2544
|
+
const prompt = `## Project Documentation\n\n${rootDocContent}\n\n---\n\n## Parent Epic Canonical Specification\n\n${epicContextMd}\n\n---\n\n## Story Canonical Specification\n\n${storyContextMd}\n\nWrite the story's doc.md. Return JSON with a \`doc\` field.`;
|
|
2545
|
+
const result = await this._withProgressHeartbeat(
|
|
2546
|
+
() => this.retryWithBackoff(
|
|
2547
|
+
() => provider.generateJSON(prompt, storyAgentInstructions),
|
|
2548
|
+
`doc generation for story: ${story.name}`
|
|
2549
|
+
),
|
|
2550
|
+
(elapsed) => {
|
|
2551
|
+
if (elapsed < 15) return `Writing ${story.name} documentation…`;
|
|
2552
|
+
if (elapsed < 40) return `Expanding ${story.name} narrative…`;
|
|
2553
|
+
return `Still writing…`;
|
|
2554
|
+
},
|
|
2555
|
+
progressCallback,
|
|
2556
|
+
10000
|
|
2557
|
+
);
|
|
2558
|
+
const storyDoc = (typeof result.doc === 'string' && result.doc.trim())
|
|
2559
|
+
? result.doc
|
|
2560
|
+
: `# ${story.name}\n\n${story.description || ''}\n`;
|
|
2561
|
+
fs.writeFileSync(storyDocPath, storyDoc, 'utf8');
|
|
2562
|
+
this.debug(`Story doc.md written: ${storyDoc.length} bytes`);
|
|
2563
|
+
} catch (err) {
|
|
2564
|
+
this.debug(`Story doc generation failed for ${story.id} — writing stub`, { error: err.message });
|
|
2565
|
+
fs.writeFileSync(storyDocPath, `# ${story.name}\n\n${story.description || ''}\n`, 'utf8');
|
|
2566
|
+
}
|
|
2567
|
+
} else {
|
|
2568
|
+
fs.writeFileSync(storyDocPath, `# ${story.name}\n\n${story.description || ''}\n`, 'utf8');
|
|
2569
|
+
}
|
|
2570
|
+
}));
|
|
2571
|
+
}));
|
|
2572
|
+
|
|
2573
|
+
const epicCount = hierarchy.epics.length;
|
|
2574
|
+
const storyCount = hierarchy.epics.reduce((s, e) => s + (e.stories?.length || 0), 0);
|
|
2575
|
+
this.debug(`Doc generation complete: ${epicCount} epics + ${storyCount} stories`);
|
|
2576
|
+
}
|
|
2577
|
+
|
|
2578
|
+
// STAGE 5: Multi-Agent Validation
|
|
2579
|
+
async validateHierarchy(hierarchy, progressCallback = null, scope = null) {
|
|
2580
|
+
this.debugStage(5, 'Multi-Agent Validation');
|
|
2581
|
+
|
|
2582
|
+
// Initialize default LLM provider if not already done (for fallback)
|
|
2583
|
+
if (!this.llmProvider) {
|
|
2584
|
+
await this.initializeLLMProvider();
|
|
2585
|
+
}
|
|
2586
|
+
|
|
2587
|
+
// Check if smart selection is enabled
|
|
2588
|
+
const useSmartSelection = this.stagesConfig?.validation?.useSmartSelection || false;
|
|
2589
|
+
|
|
2590
|
+
if (useSmartSelection) {
|
|
2591
|
+
this.debug('Smart validator selection enabled');
|
|
2592
|
+
}
|
|
2593
|
+
|
|
2594
|
+
// Phase 1: Extract project context if contextual selection is enabled
|
|
2595
|
+
const useContextualSelection = this.stagesConfig?.validation?.useContextualSelection || false;
|
|
2596
|
+
this.debug(`Contextual agent selection: useContextualSelection=${useContextualSelection}, stagesConfig.validation=${JSON.stringify(this.stagesConfig?.validation)}`);
|
|
2597
|
+
let projectContext = null;
|
|
2598
|
+
|
|
2599
|
+
if (useContextualSelection && scope) {
|
|
2600
|
+
await progressCallback?.(null, 'Analyzing project context for agent selection…', {});
|
|
2601
|
+
projectContext = await this.extractProjectContext(scope, progressCallback);
|
|
2602
|
+
this._projectContext = projectContext;
|
|
2603
|
+
this.debug('Project context extracted', projectContext);
|
|
2604
|
+
} else if (useContextualSelection) {
|
|
2605
|
+
this.debug('useContextualSelection=true but no scope available — skipping context extraction');
|
|
2606
|
+
}
|
|
2607
|
+
|
|
2608
|
+
// Generate and write root context.md (canonical project representation)
|
|
2609
|
+
let rootContextMd = null;
|
|
2610
|
+
try {
|
|
2611
|
+
let docMdContent = '';
|
|
2612
|
+
if (fs.existsSync(this.projectDocPath)) {
|
|
2613
|
+
docMdContent = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
2614
|
+
}
|
|
2615
|
+
rootContextMd = this.generateRootContextMd(projectContext || {}, docMdContent, hierarchy);
|
|
2616
|
+
const rootContextPath = path.join(this.projectPath, 'context.md');
|
|
2617
|
+
fs.writeFileSync(rootContextPath, rootContextMd, 'utf8');
|
|
2618
|
+
this.debug(`Root context.md written (${rootContextMd.length} bytes)`);
|
|
2619
|
+
} catch (err) {
|
|
2620
|
+
this.debug('Failed to write root context.md — continuing without it', { error: err.message });
|
|
2621
|
+
}
|
|
2622
|
+
|
|
2623
|
+
// Store root context on this instance so LLM context writers can access it
|
|
2624
|
+
this.rootContextMd = rootContextMd;
|
|
2625
|
+
|
|
2626
|
+
// Pre-generate LLM context.md for all epics and stories before validation
|
|
2627
|
+
// This gives validators rich, complete context rather than sparse structured stubs
|
|
2628
|
+
await progressCallback?.(null, 'Generating canonical context for epics and stories…', {});
|
|
2629
|
+
const _tsCtx = Date.now();
|
|
2630
|
+
await this.generateContextFiles(hierarchy, progressCallback);
|
|
2631
|
+
this.debugTiming('generateContextFiles', _tsCtx);
|
|
2632
|
+
|
|
2633
|
+
// Generate scaffolding epic AFTER all domain contexts are written — it reads them
|
|
2634
|
+
// to know the exact tech requirements (packages, infra, test frameworks, etc.)
|
|
2635
|
+
try {
|
|
2636
|
+
const _tsScaffold = Date.now();
|
|
2637
|
+
await this._generateScaffoldingEpic(hierarchy, progressCallback);
|
|
2638
|
+
this.debugTiming('generateScaffoldingEpic', _tsScaffold);
|
|
2639
|
+
} catch (err) {
|
|
2640
|
+
this.debug(`Scaffolding epic generation failed (non-critical): ${err.message}`);
|
|
2641
|
+
}
|
|
2642
|
+
|
|
2643
|
+
const validator = new EpicStoryValidator(
|
|
2644
|
+
this.llmProvider,
|
|
2645
|
+
this.verificationTracker,
|
|
2646
|
+
this.stagesConfig,
|
|
2647
|
+
useSmartSelection,
|
|
2648
|
+
progressCallback,
|
|
2649
|
+
projectContext
|
|
2650
|
+
);
|
|
2651
|
+
this._validator = validator;
|
|
2652
|
+
if (this._quotaExceededCallback) {
|
|
2653
|
+
this._validator.setQuotaExceededCallback(this._quotaExceededCallback);
|
|
2654
|
+
}
|
|
2655
|
+
if (rootContextMd) this._validator.setRootContextMd(rootContextMd);
|
|
2656
|
+
this._validator.setPromptLogger(this._promptLogger);
|
|
2657
|
+
this._validator.setTokenCallback((delta, stageHint) => {
|
|
2658
|
+
const key = stageHint
|
|
2659
|
+
? `${this.ceremonyName}-${stageHint}`
|
|
2660
|
+
: this.ceremonyName;
|
|
2661
|
+
this.tokenTracker.addIncremental(key, delta);
|
|
2662
|
+
if (delta.model) {
|
|
2663
|
+
const cost = this.tokenTracker.calculateCost(delta.input, delta.output, delta.model);
|
|
2664
|
+
this._runningCost += cost?.total ?? 0;
|
|
2665
|
+
}
|
|
2666
|
+
});
|
|
2667
|
+
|
|
2668
|
+
// ── Validate epics with concurrency control ──────────────────────────────
|
|
2669
|
+
// Each epic (+ its stories) is validated independently. We run up to
|
|
2670
|
+
// EPIC_CONCURRENCY epics in parallel to reduce wall-clock time while
|
|
2671
|
+
// respecting API rate limits. Stories within each epic remain sequential
|
|
2672
|
+
// because split-story splice logic requires index stability.
|
|
2673
|
+
const defaultConcurrency = 2;
|
|
2674
|
+
const EPIC_CONCURRENCY = this.stagesConfig?.validation?.epicConcurrency ?? defaultConcurrency;
|
|
2675
|
+
|
|
2676
|
+
const validateSingleEpic = async (epic) => {
|
|
2677
|
+
this.debug(`\nValidating Epic: ${epic.id} "${epic.name}"`);
|
|
2678
|
+
await progressCallback?.(null, `Validating Epic: ${epic.name}`, {});
|
|
2679
|
+
|
|
2680
|
+
// Use LLM-generated context if available; fall back to structured format
|
|
2681
|
+
const epicContext = this._epicContextCache.get(epic.name) || this.generateEpicContextMd(epic);
|
|
2682
|
+
|
|
2683
|
+
// Validate epic with multiple domain validators
|
|
2684
|
+
const _tsEpic = Date.now();
|
|
2685
|
+
const epicValidation = await validator.validateEpic(epic, epicContext);
|
|
2686
|
+
this.debugTiming(` validateEpic: ${epic.id} "${epic.name}"`, _tsEpic);
|
|
2687
|
+
|
|
2688
|
+
// Display validation summary
|
|
2689
|
+
this.displayValidationSummary('Epic', epic.name, epicValidation);
|
|
2690
|
+
|
|
2691
|
+
// Handle validation result
|
|
2692
|
+
if (epicValidation.overallStatus === 'needs-improvement') {
|
|
2693
|
+
this.debug(`Epic "${epic.name}" needs improvement - showing issues`);
|
|
2694
|
+
this.displayValidationIssues(epicValidation);
|
|
2695
|
+
}
|
|
2696
|
+
|
|
2697
|
+
// Validate each story under this epic.
|
|
2698
|
+
// Use index-based loop so split stories inserted via splice() are validated in-place.
|
|
2699
|
+
// Keep in sync with STORY_AC_CAP in epic-story-validator.js
|
|
2700
|
+
const STORY_AC_CAP = 20;
|
|
2701
|
+
const MAX_SPLIT_DEPTH = 1;
|
|
2702
|
+
let si = 0;
|
|
2703
|
+
while (si < (epic.stories || []).length) {
|
|
2704
|
+
const story = epic.stories[si];
|
|
2705
|
+
const splitDepth = story._splitDepth || 0;
|
|
2706
|
+
|
|
2707
|
+
this.debug(`\nValidating Story: ${story.id} "${story.name}"`);
|
|
2708
|
+
await progressCallback?.(null, ` Validating story: ${story.name}`, {});
|
|
2709
|
+
|
|
2710
|
+
// Use LLM-generated context if available; fall back to structured format
|
|
2711
|
+
const storyContext = this._storyContextCache.get(`${epic.name}::${story.name}`) || this.generateStoryContextMd(story, epic);
|
|
2712
|
+
|
|
2713
|
+
// Validate story with multiple domain validators
|
|
2714
|
+
const _tsStory = Date.now();
|
|
2715
|
+
const storyValidation = await validator.validateStory(story, storyContext, epic);
|
|
2716
|
+
this.debugTiming(` validateStory: ${story.id} "${story.name}"`, _tsStory);
|
|
2717
|
+
|
|
2718
|
+
// Display validation summary
|
|
2719
|
+
this.displayValidationSummary('Story', story.name, storyValidation);
|
|
2720
|
+
|
|
2721
|
+
// Handle validation result
|
|
2722
|
+
if (storyValidation.overallStatus === 'needs-improvement') {
|
|
2723
|
+
this.debug(`Story "${story.name}" needs improvement - showing issues`);
|
|
2724
|
+
this.displayValidationIssues(storyValidation);
|
|
2725
|
+
}
|
|
2726
|
+
|
|
2727
|
+
// ── Split detection ────────────────────────────────────────────────────────
|
|
2728
|
+
// Trigger split when:
|
|
2729
|
+
// (a) AC cap reached — story too large for solver to improve further, OR
|
|
2730
|
+
// (b) SA issued a SPLIT RECOMMENDATION — story has too many concerns to resolve
|
|
2731
|
+
// by adding ACs regardless of current AC count
|
|
2732
|
+
const acCount = (story.acceptance || []).length;
|
|
2733
|
+
|
|
2734
|
+
const splitRecommended = storyValidation._splitRecommended === true
|
|
2735
|
+
|| storyValidation.microCheckDetails?.splitRecommendation === true;
|
|
2736
|
+
const shouldSplit =
|
|
2737
|
+
storyValidation.overallStatus === 'needs-improvement' &&
|
|
2738
|
+
(acCount >= STORY_AC_CAP || splitRecommended) &&
|
|
2739
|
+
splitDepth < MAX_SPLIT_DEPTH;
|
|
2740
|
+
|
|
2741
|
+
if (shouldSplit) {
|
|
2742
|
+
const allIssues = [
|
|
2743
|
+
...(storyValidation.criticalIssues || []),
|
|
2744
|
+
...(storyValidation.majorIssues || []),
|
|
2745
|
+
];
|
|
2746
|
+
|
|
2747
|
+
const splitReason = splitRecommended
|
|
2748
|
+
? `SA split recommendation (${acCount} ACs)`
|
|
2749
|
+
: `too large (${acCount} ACs)`;
|
|
2750
|
+
await progressCallback?.(null, ` Splitting story: ${story.name}`, {});
|
|
2751
|
+
await validator._detail(`✂ [${story.id}] ${splitReason} — attempting split…`);
|
|
2752
|
+
console.log(` ✂ Splitting story "${story.name}" — ${splitReason}`);
|
|
2753
|
+
|
|
2754
|
+
const splitStories = await validator._splitStory(story, epic, allIssues);
|
|
2755
|
+
|
|
2756
|
+
if (splitStories) {
|
|
2757
|
+
// Tag split stories with depth guard and parent reference (runtime-only, prefixed _)
|
|
2758
|
+
for (const s of splitStories) {
|
|
2759
|
+
s._splitDepth = splitDepth + 1;
|
|
2760
|
+
s._splitFrom = story.id;
|
|
2761
|
+
}
|
|
2762
|
+
|
|
2763
|
+
// Replace the original story with the split stories in-place
|
|
2764
|
+
epic.stories.splice(si, 1, ...splitStories);
|
|
2765
|
+
|
|
2766
|
+
// Invalidate stale context cache entry for the original story name
|
|
2767
|
+
this._storyContextCache.delete(`${epic.name}::${story.name}`);
|
|
2768
|
+
|
|
2769
|
+
// Pre-populate structured context for split stories so they don't start with nothing.
|
|
2770
|
+
// This avoids an expensive LLM context-gen call for each split story — the structured
|
|
2771
|
+
// fallback is sufficient since validators will refine the story content anyway.
|
|
2772
|
+
for (const splitStory of splitStories) {
|
|
2773
|
+
const splitKey = `${epic.name}::${splitStory.name}`;
|
|
2774
|
+
if (!this._storyContextCache.has(splitKey)) {
|
|
2775
|
+
this._storyContextCache.set(splitKey, this.generateStoryContextMd(splitStory, epic));
|
|
2776
|
+
}
|
|
2777
|
+
}
|
|
2778
|
+
|
|
2779
|
+
// Post-split dependency reconciliation: any story that depended on the
|
|
2780
|
+
// original (now-removed) story ID should depend on the first split story instead.
|
|
2781
|
+
const originalId = story.id;
|
|
2782
|
+
const replacementId = splitStories[0]?.id;
|
|
2783
|
+
if (originalId && replacementId) {
|
|
2784
|
+
for (const e of hierarchy.epics) {
|
|
2785
|
+
for (const s of e.stories || []) {
|
|
2786
|
+
if (Array.isArray(s.dependencies)) {
|
|
2787
|
+
const depIdx = s.dependencies.indexOf(originalId);
|
|
2788
|
+
if (depIdx !== -1 && !splitStories.some(sp => sp.id === s.id)) {
|
|
2789
|
+
s.dependencies[depIdx] = replacementId;
|
|
2790
|
+
this.debug(`Post-split dep remap: story "${s.name}" now depends on "${replacementId}" (was "${originalId}")`);
|
|
2791
|
+
}
|
|
2792
|
+
}
|
|
2793
|
+
}
|
|
2794
|
+
}
|
|
2795
|
+
}
|
|
2796
|
+
|
|
2797
|
+
const names = splitStories.map(s => `"${s.name}"`).join(' + ');
|
|
2798
|
+
await validator._detail(`✂ [${story.id}] split into ${splitStories.length}: ${names}`);
|
|
2799
|
+
console.log(` ✂ Split into ${splitStories.length} stories: ${names}`);
|
|
2800
|
+
|
|
2801
|
+
// Do NOT increment si — the loop re-enters at position si
|
|
2802
|
+
// which now holds the first split story
|
|
2803
|
+
continue;
|
|
2804
|
+
} else {
|
|
2805
|
+
console.log(` ⚠ Split failed for "${story.name}" — keeping original story`);
|
|
2806
|
+
}
|
|
2807
|
+
}
|
|
2808
|
+
|
|
2809
|
+
si++;
|
|
2810
|
+
}
|
|
2811
|
+
};
|
|
2812
|
+
|
|
2813
|
+
// Run epics with concurrency limit
|
|
2814
|
+
if (EPIC_CONCURRENCY <= 1 || hierarchy.epics.length <= 1) {
|
|
2815
|
+
// Sequential fallback for local LLMs or single epic
|
|
2816
|
+
for (const epic of hierarchy.epics) {
|
|
2817
|
+
await validateSingleEpic(epic);
|
|
2818
|
+
}
|
|
2819
|
+
} else {
|
|
2820
|
+
// Concurrency-limited parallel execution
|
|
2821
|
+
const queue = [...hierarchy.epics];
|
|
2822
|
+
const running = new Set();
|
|
2823
|
+
let idx = 0;
|
|
2824
|
+
|
|
2825
|
+
while (idx < queue.length || running.size > 0) {
|
|
2826
|
+
// Launch tasks up to concurrency limit
|
|
2827
|
+
while (idx < queue.length && running.size < EPIC_CONCURRENCY) {
|
|
2828
|
+
const epic = queue[idx++];
|
|
2829
|
+
const promise = validateSingleEpic(epic).then(() => running.delete(promise));
|
|
2830
|
+
running.add(promise);
|
|
2831
|
+
}
|
|
2832
|
+
// Wait for at least one to complete before launching more
|
|
2833
|
+
if (running.size > 0) {
|
|
2834
|
+
await Promise.race(running);
|
|
2835
|
+
}
|
|
2836
|
+
}
|
|
2837
|
+
}
|
|
2838
|
+
|
|
2839
|
+
// Post-validation dependency cleanup — splits may have introduced broken references
|
|
2840
|
+
this._validateDependencyReferences(hierarchy);
|
|
2841
|
+
|
|
2842
|
+
return hierarchy;
|
|
2843
|
+
}
|
|
2844
|
+
|
|
2845
|
+
/**
|
|
2846
|
+
* Display validation summary
|
|
2847
|
+
*/
|
|
2848
|
+
displayValidationSummary(type, name, validation) {
|
|
2849
|
+
const statusPrefix = {
|
|
2850
|
+
'excellent': 'SUCCESS:',
|
|
2851
|
+
'acceptable': 'WARNING:',
|
|
2852
|
+
'needs-improvement': 'ERROR:'
|
|
2853
|
+
};
|
|
2854
|
+
|
|
2855
|
+
const prefix = statusPrefix[validation.overallStatus] || '';
|
|
2856
|
+
sendOutput(`${prefix} ${type}: ${name}\n`);
|
|
2857
|
+
sendIndented(`Overall Score: ${validation.averageScore}/100`, 1);
|
|
2858
|
+
const agentCount = validation.validatorCount ?? validation.validatorResults?.length ?? 0;
|
|
2859
|
+
sendIndented(`Validators: ${agentCount} agents`, 1);
|
|
2860
|
+
sendIndented(`Issues: ${validation.criticalIssues?.length || 0} critical, ${validation.majorIssues?.length || 0} major, ${validation.minorIssues?.length || 0} minor`, 1);
|
|
2861
|
+
|
|
2862
|
+
// Show strengths if excellent or acceptable
|
|
2863
|
+
if (validation.overallStatus !== 'needs-improvement' && validation.strengths?.length > 0) {
|
|
2864
|
+
sendIndented(`Strengths: ${validation.strengths.slice(0, 2).join(', ')}`, 1);
|
|
2865
|
+
}
|
|
2866
|
+
|
|
2867
|
+
sendOutput('\n');
|
|
2868
|
+
}
|
|
2869
|
+
|
|
2870
|
+
/**
|
|
2871
|
+
* Display validation issues
|
|
2872
|
+
*/
|
|
2873
|
+
displayValidationIssues(validation) {
|
|
2874
|
+
// Show critical issues
|
|
2875
|
+
if (validation.criticalIssues?.length > 0) {
|
|
2876
|
+
this.debug('Critical Issues', validation.criticalIssues.slice(0, 3).map(issue => ({
|
|
2877
|
+
domain: issue.domain,
|
|
2878
|
+
description: issue.description,
|
|
2879
|
+
suggestion: issue.suggestion
|
|
2880
|
+
})));
|
|
2881
|
+
}
|
|
2882
|
+
|
|
2883
|
+
// Show major issues (micro-check format)
|
|
2884
|
+
if (validation.majorIssues?.length > 0) {
|
|
2885
|
+
this.debug('Major Issues', validation.majorIssues.slice(0, 3).map(issue => ({
|
|
2886
|
+
description: issue.description,
|
|
2887
|
+
suggestion: issue.suggestion
|
|
2888
|
+
})));
|
|
2889
|
+
}
|
|
2890
|
+
|
|
2891
|
+
// Show improvement priorities (monolithic format, if present)
|
|
2892
|
+
if (validation.improvementPriorities?.length > 0) {
|
|
2893
|
+
this.debug('Improvement Priorities', validation.improvementPriorities.slice(0, 3).map((priority, i) => ({
|
|
2894
|
+
rank: i + 1,
|
|
2895
|
+
priority: priority.priority,
|
|
2896
|
+
mentionedBy: priority.mentionedBy
|
|
2897
|
+
})));
|
|
2898
|
+
}
|
|
2899
|
+
}
|
|
2900
|
+
|
|
2901
|
+
/**
|
|
2902
|
+
* Analyze duplicate detection decisions
|
|
2903
|
+
* Logs which epics/stories should have been skipped by LLM vs which are truly new
|
|
2904
|
+
*/
|
|
2905
|
+
analyzeDuplicates(hierarchy, existingEpics, existingStories) {
|
|
2906
|
+
this.debug('\n' + '='.repeat(80));
|
|
2907
|
+
this.debug('DUPLICATE DETECTION ANALYSIS');
|
|
2908
|
+
this.debug('='.repeat(80));
|
|
2909
|
+
|
|
2910
|
+
const skippedEpics = [];
|
|
2911
|
+
const createdEpics = [];
|
|
2912
|
+
|
|
2913
|
+
// Analyze epics
|
|
2914
|
+
for (const epic of hierarchy.epics || []) {
|
|
2915
|
+
const normalized = epic.name.toLowerCase();
|
|
2916
|
+
const isDuplicate = existingEpics.has(normalized);
|
|
2917
|
+
|
|
2918
|
+
this.debug(`\nEpic: "${epic.name}"`);
|
|
2919
|
+
this.debug(` Normalized: "${normalized}"`);
|
|
2920
|
+
this.debug(` Exists in previous runs: ${isDuplicate}`);
|
|
2921
|
+
|
|
2922
|
+
if (isDuplicate) {
|
|
2923
|
+
const existingId = existingEpics.get(normalized);
|
|
2924
|
+
this.debug(` ⚠️ Match found: ${existingId}`);
|
|
2925
|
+
this.debug(` Action: SHOULD HAVE BEEN SKIPPED BY LLM`);
|
|
2926
|
+
this.debug(` Reason: LLM generated duplicate that already exists`);
|
|
2927
|
+
skippedEpics.push({ name: epic.name, existingId });
|
|
2928
|
+
} else {
|
|
2929
|
+
// Check for potential semantic duplicates (similar names)
|
|
2930
|
+
const similarEpics = [];
|
|
2931
|
+
for (const [existingName, existingId] of existingEpics.entries()) {
|
|
2932
|
+
// Simple similarity: check if one name contains the other
|
|
2933
|
+
if (normalized.includes(existingName) || existingName.includes(normalized)) {
|
|
2934
|
+
similarEpics.push({ name: existingName, id: existingId });
|
|
2935
|
+
}
|
|
2936
|
+
}
|
|
2937
|
+
|
|
2938
|
+
if (similarEpics.length > 0) {
|
|
2939
|
+
this.debug(` ⚠️ Possible semantic duplicates found:`);
|
|
2940
|
+
for (const similar of similarEpics) {
|
|
2941
|
+
this.debug(` - "${similar.name}" (${similar.id})`);
|
|
2942
|
+
}
|
|
2943
|
+
this.debug(` Action: CREATE NEW (but user should review for duplicates)`);
|
|
2944
|
+
} else {
|
|
2945
|
+
this.debug(` ✓ Match found: NONE`);
|
|
2946
|
+
this.debug(` Action: CREATE NEW`);
|
|
2947
|
+
this.debug(` Reason: Genuinely new epic not in existing list`);
|
|
2948
|
+
}
|
|
2949
|
+
createdEpics.push(epic.name);
|
|
2950
|
+
}
|
|
2951
|
+
}
|
|
2952
|
+
|
|
2953
|
+
// Analyze stories
|
|
2954
|
+
const skippedStories = [];
|
|
2955
|
+
const createdStories = [];
|
|
2956
|
+
|
|
2957
|
+
for (const epic of hierarchy.epics || []) {
|
|
2958
|
+
for (const story of epic.stories || []) {
|
|
2959
|
+
const normalized = story.name.toLowerCase();
|
|
2960
|
+
const isDuplicate = existingStories.has(normalized);
|
|
2961
|
+
|
|
2962
|
+
this.debug(`\nStory: "${story.name}" (under epic "${epic.name}")`);
|
|
2963
|
+
this.debug(` Normalized: "${normalized}"`);
|
|
2964
|
+
this.debug(` Exists in previous runs: ${isDuplicate}`);
|
|
2965
|
+
|
|
2966
|
+
if (isDuplicate) {
|
|
2967
|
+
const existingId = existingStories.get(normalized);
|
|
2968
|
+
this.debug(` ⚠️ Match found: ${existingId}`);
|
|
2969
|
+
this.debug(` Action: SHOULD HAVE BEEN SKIPPED BY LLM`);
|
|
2970
|
+
skippedStories.push({ name: story.name, existingId });
|
|
2971
|
+
} else {
|
|
2972
|
+
this.debug(` ✓ Match found: NONE`);
|
|
2973
|
+
this.debug(` Action: CREATE NEW`);
|
|
2974
|
+
createdStories.push(story.name);
|
|
2975
|
+
}
|
|
2976
|
+
}
|
|
2977
|
+
}
|
|
2978
|
+
|
|
2979
|
+
// Summary
|
|
2980
|
+
this.debug('\n' + '='.repeat(80));
|
|
2981
|
+
this.debug('DUPLICATE ANALYSIS SUMMARY');
|
|
2982
|
+
this.debug('='.repeat(80));
|
|
2983
|
+
this.debug('Epics:', {
|
|
2984
|
+
shouldBeSkipped: skippedEpics.length,
|
|
2985
|
+
willCreate: createdEpics.length,
|
|
2986
|
+
skippedNames: skippedEpics.map(s => s.name),
|
|
2987
|
+
createdNames: createdEpics
|
|
2988
|
+
});
|
|
2989
|
+
this.debug('Stories:', {
|
|
2990
|
+
shouldBeSkipped: skippedStories.length,
|
|
2991
|
+
willCreate: createdStories.length,
|
|
2992
|
+
skippedNames: skippedStories.map(s => s.name),
|
|
2993
|
+
createdNames: createdStories
|
|
2994
|
+
});
|
|
2995
|
+
|
|
2996
|
+
if (skippedEpics.length > 0 || skippedStories.length > 0) {
|
|
2997
|
+
this.debug('\n⚠️ WARNING: LLM generated duplicates that should have been skipped!');
|
|
2998
|
+
this.debug('This indicates LLM non-determinism or insufficient duplicate detection.');
|
|
2999
|
+
} else {
|
|
3000
|
+
this.debug('\n✓ Result: LLM correctly identified all items as duplicates or genuinely new');
|
|
3001
|
+
}
|
|
3002
|
+
|
|
3003
|
+
this.debug('='.repeat(80) + '\n');
|
|
3004
|
+
|
|
3005
|
+
return { skippedEpics, createdEpics, skippedStories, createdStories };
|
|
3006
|
+
}
|
|
3007
|
+
|
|
3008
|
+
// STAGE 6: Renumber IDs to avoid collisions
|
|
3009
|
+
renumberHierarchy(hierarchy, maxEpicNum, maxStoryNums) {
|
|
3010
|
+
this.debugStage(6, 'Renumber IDs');
|
|
3011
|
+
this.debug('Renumbering hierarchy to avoid ID collisions...');
|
|
3012
|
+
|
|
3013
|
+
// Build old→new ID mapping for dependency remapping
|
|
3014
|
+
const idMap = new Map();
|
|
3015
|
+
|
|
3016
|
+
let nextEpicNum = maxEpicNum.value + 1;
|
|
3017
|
+
this.debug(`Next epic number: ${nextEpicNum} (after existing ${maxEpicNum.value})`);
|
|
3018
|
+
|
|
3019
|
+
for (const epic of hierarchy.epics) {
|
|
3020
|
+
const oldEpicId = epic.id;
|
|
3021
|
+
const newEpicId = `context-${String(nextEpicNum).padStart(4, '0')}`;
|
|
3022
|
+
idMap.set(oldEpicId, newEpicId);
|
|
3023
|
+
epic.id = newEpicId;
|
|
3024
|
+
|
|
3025
|
+
this.debug(`ID mapping - Epic "${epic.name}": ${oldEpicId} -> ${newEpicId}`);
|
|
3026
|
+
|
|
3027
|
+
let nextStoryNum = (maxStoryNums.get(newEpicId) || 0) + 1;
|
|
3028
|
+
|
|
3029
|
+
for (const story of epic.stories || []) {
|
|
3030
|
+
const oldStoryId = story.id;
|
|
3031
|
+
const newStoryId = `${newEpicId}-${String(nextStoryNum).padStart(4, '0')}`;
|
|
3032
|
+
idMap.set(oldStoryId, newStoryId);
|
|
3033
|
+
story.id = newStoryId;
|
|
3034
|
+
|
|
3035
|
+
this.debug(`ID mapping - Story "${story.name}": ${oldStoryId} -> ${newStoryId}`);
|
|
3036
|
+
nextStoryNum++;
|
|
3037
|
+
}
|
|
3038
|
+
|
|
3039
|
+
nextEpicNum++;
|
|
3040
|
+
}
|
|
3041
|
+
|
|
3042
|
+
// Remap all dependency references using the old→new ID map
|
|
3043
|
+
let remappedCount = 0;
|
|
3044
|
+
for (const epic of hierarchy.epics) {
|
|
3045
|
+
if (Array.isArray(epic.dependencies)) {
|
|
3046
|
+
epic.dependencies = epic.dependencies.map(dep => {
|
|
3047
|
+
const mapped = idMap.get(dep);
|
|
3048
|
+
if (mapped) { remappedCount++; return mapped; }
|
|
3049
|
+
return dep;
|
|
3050
|
+
});
|
|
3051
|
+
}
|
|
3052
|
+
for (const story of epic.stories || []) {
|
|
3053
|
+
if (Array.isArray(story.dependencies)) {
|
|
3054
|
+
story.dependencies = story.dependencies.map(dep => {
|
|
3055
|
+
const mapped = idMap.get(dep);
|
|
3056
|
+
if (mapped) { remappedCount++; return mapped; }
|
|
3057
|
+
return dep;
|
|
3058
|
+
});
|
|
3059
|
+
}
|
|
3060
|
+
}
|
|
3061
|
+
}
|
|
3062
|
+
|
|
3063
|
+
if (remappedCount > 0) {
|
|
3064
|
+
this.debug(`Remapped ${remappedCount} dependency reference(s) to new IDs`);
|
|
3065
|
+
}
|
|
3066
|
+
|
|
3067
|
+
this.debug('Renumbered hierarchy', {
|
|
3068
|
+
epics: hierarchy.epics.map(e => ({ id: e.id, name: e.name, storyCount: e.stories?.length || 0 }))
|
|
3069
|
+
});
|
|
3070
|
+
|
|
3071
|
+
return hierarchy;
|
|
3072
|
+
}
|
|
3073
|
+
|
|
3074
|
+
// STAGE 7: Write hierarchy files with distributed documentation
|
|
3075
|
+
async writeHierarchyFiles(hierarchy, progressCallback = null) {
|
|
3076
|
+
this.debugStage(7, 'Write Hierarchy Files + Distribute Documentation');
|
|
3077
|
+
this.debug('Writing hierarchy files with documentation distribution');
|
|
3078
|
+
|
|
3079
|
+
// Phase -1: Build name→id map and normalize all dependencies to IDs
|
|
3080
|
+
const nameToId = {};
|
|
3081
|
+
for (const epic of hierarchy.epics) {
|
|
3082
|
+
nameToId[epic.name] = epic.id;
|
|
3083
|
+
nameToId[epic.name.toLowerCase()] = epic.id;
|
|
3084
|
+
for (const story of epic.stories || []) {
|
|
3085
|
+
nameToId[story.name] = story.id;
|
|
3086
|
+
nameToId[story.name.toLowerCase()] = story.id;
|
|
3087
|
+
}
|
|
3088
|
+
}
|
|
3089
|
+
const normalizeDeps = (deps) => (deps || []).map(d => nameToId[d] || nameToId[d?.toLowerCase?.()] || d);
|
|
3090
|
+
for (const epic of hierarchy.epics) {
|
|
3091
|
+
epic.dependencies = normalizeDeps(epic.dependencies);
|
|
3092
|
+
for (const story of epic.stories || []) {
|
|
3093
|
+
story.dependencies = normalizeDeps(story.dependencies);
|
|
3094
|
+
}
|
|
3095
|
+
}
|
|
3096
|
+
|
|
3097
|
+
// Phase 0: Rename all provisional epic folders in REVERSE order to avoid collisions
|
|
3098
|
+
// (e.g., 0001→0002 must happen after 0002→0003, not before)
|
|
3099
|
+
const epicsToRename = hierarchy.epics
|
|
3100
|
+
.filter(e => e._provisionalId && e._provisionalId !== e.id)
|
|
3101
|
+
.reverse();
|
|
3102
|
+
for (const epic of epicsToRename) {
|
|
3103
|
+
const provisionalDir = path.join(this.projectPath, epic._provisionalId);
|
|
3104
|
+
const targetDir = path.join(this.projectPath, epic.id);
|
|
3105
|
+
if (fs.existsSync(provisionalDir) && !fs.existsSync(targetDir)) {
|
|
3106
|
+
fs.renameSync(provisionalDir, targetDir);
|
|
3107
|
+
this.debug(`Renamed provisional epic folder: ${epic._provisionalId} → ${epic.id}`);
|
|
3108
|
+
}
|
|
3109
|
+
}
|
|
3110
|
+
|
|
3111
|
+
// Phase 1 (sync): Create all directories, write work.json and context.md files
|
|
3112
|
+
for (const epic of hierarchy.epics) {
|
|
3113
|
+
const epicDir = path.join(this.projectPath, epic.id);
|
|
3114
|
+
if (!fs.existsSync(epicDir)) fs.mkdirSync(epicDir, { recursive: true });
|
|
3115
|
+
|
|
3116
|
+
// Use LLM-generated context if cached; patch the id line since IDs may have changed after renumbering
|
|
3117
|
+
let epicContextMd = this._epicContextCache.get(epic.name) || this.generateEpicContextMd(epic);
|
|
3118
|
+
epicContextMd = epicContextMd.replace(/^(- id: ).+$/m, `$1${epic.id}`);
|
|
3119
|
+
const epicContextPath = path.join(epicDir, 'context.md');
|
|
3120
|
+
fs.writeFileSync(epicContextPath, epicContextMd, 'utf8');
|
|
3121
|
+
this.debug(`Writing ${epicContextPath} (${epicContextMd.length} bytes)`);
|
|
3122
|
+
|
|
3123
|
+
const epicWorkJson = {
|
|
3124
|
+
id: epic.id,
|
|
3125
|
+
name: epic.name,
|
|
3126
|
+
type: 'epic',
|
|
3127
|
+
domain: epic.domain,
|
|
3128
|
+
description: epic.description,
|
|
3129
|
+
features: epic.features,
|
|
3130
|
+
status: 'planned',
|
|
3131
|
+
dependencies: epic.dependencies || [],
|
|
3132
|
+
children: (epic.stories || []).map(s => s.id),
|
|
3133
|
+
metadata: {
|
|
3134
|
+
...(epic.metadata || {}),
|
|
3135
|
+
created: localISO(),
|
|
3136
|
+
ceremony: this.ceremonyName
|
|
3137
|
+
}
|
|
3138
|
+
};
|
|
3139
|
+
const workJsonPath = path.join(epicDir, 'work.json');
|
|
3140
|
+
const workJsonContent = JSON.stringify(epicWorkJson, null, 2);
|
|
3141
|
+
fs.writeFileSync(workJsonPath, workJsonContent, 'utf8');
|
|
3142
|
+
this.debug(`Writing ${workJsonPath} (${workJsonContent.length} bytes)`);
|
|
3143
|
+
await this._itemWrittenCallback?.({ itemId: epic.id, itemType: 'epic' });
|
|
3144
|
+
|
|
3145
|
+
// Rename provisional story folders in reverse order to avoid collisions
|
|
3146
|
+
// (e.g., 0001b→0002 must happen after 0002→0003, not before)
|
|
3147
|
+
const storiesToRename = (epic.stories || [])
|
|
3148
|
+
.filter(s => s._provisionalId && s._provisionalId !== s.id)
|
|
3149
|
+
.reverse();
|
|
3150
|
+
for (const story of storiesToRename) {
|
|
3151
|
+
const provisionalStoryDir = path.join(epicDir, story._provisionalId);
|
|
3152
|
+
const targetDir = path.join(epicDir, story.id);
|
|
3153
|
+
if (fs.existsSync(provisionalStoryDir) && !fs.existsSync(targetDir)) {
|
|
3154
|
+
fs.renameSync(provisionalStoryDir, targetDir);
|
|
3155
|
+
this.debug(`Renamed provisional story folder: ${story._provisionalId} → ${story.id}`);
|
|
3156
|
+
}
|
|
3157
|
+
}
|
|
3158
|
+
|
|
3159
|
+
for (const story of epic.stories || []) {
|
|
3160
|
+
const storyDir = path.join(epicDir, story.id);
|
|
3161
|
+
if (!fs.existsSync(storyDir)) fs.mkdirSync(storyDir, { recursive: true });
|
|
3162
|
+
|
|
3163
|
+
// Use LLM-generated context if cached; patch id and epic-ref lines after renumbering
|
|
3164
|
+
let storyContextMd = this._storyContextCache.get(`${epic.name}::${story.name}`) || this.generateStoryContextMd(story, epic);
|
|
3165
|
+
storyContextMd = storyContextMd.replace(/^(- id: ).+$/m, `$1${story.id}`);
|
|
3166
|
+
storyContextMd = storyContextMd.replace(/^(- epic: ).+$/m, `$1${epic.id} (${epic.name})`);
|
|
3167
|
+
const storyContextPath = path.join(storyDir, 'context.md');
|
|
3168
|
+
fs.writeFileSync(storyContextPath, storyContextMd, 'utf8');
|
|
3169
|
+
this.debug(`Writing ${storyContextPath} (${storyContextMd.length} bytes)`);
|
|
3170
|
+
|
|
3171
|
+
const storyWorkJson = {
|
|
3172
|
+
id: story.id,
|
|
3173
|
+
name: story.name,
|
|
3174
|
+
type: 'story',
|
|
3175
|
+
userType: story.userType,
|
|
3176
|
+
description: story.description,
|
|
3177
|
+
acceptance: story.acceptance,
|
|
3178
|
+
status: 'planned',
|
|
3179
|
+
dependencies: story.dependencies || [],
|
|
3180
|
+
children: [],
|
|
3181
|
+
metadata: {
|
|
3182
|
+
...(story.metadata || {}),
|
|
3183
|
+
created: localISO(),
|
|
3184
|
+
ceremony: this.ceremonyName
|
|
3185
|
+
}
|
|
3186
|
+
};
|
|
3187
|
+
const storyWorkJsonPath = path.join(storyDir, 'work.json');
|
|
3188
|
+
const storyWorkJsonContent = JSON.stringify(storyWorkJson, null, 2);
|
|
3189
|
+
fs.writeFileSync(storyWorkJsonPath, storyWorkJsonContent, 'utf8');
|
|
3190
|
+
this.debug(`Writing ${storyWorkJsonPath} (${storyWorkJsonContent.length} bytes)`);
|
|
3191
|
+
await this._itemWrittenCallback?.({ itemId: story.id, itemType: 'story' });
|
|
3192
|
+
}
|
|
3193
|
+
}
|
|
3194
|
+
|
|
3195
|
+
// Set dependency-ready items to 'ready' status instead of 'planned'.
|
|
3196
|
+
// Items whose dependencies are all within this run (and thus all "planned") are ready
|
|
3197
|
+
// if those dependencies have no external blockers. Phase 1 epics/stories with no
|
|
3198
|
+
// dependencies are always ready. Others stay 'planned' until their deps are completed.
|
|
3199
|
+
try {
|
|
3200
|
+
const { checkDependenciesReady } = await import('./dependency-checker.js');
|
|
3201
|
+
|
|
3202
|
+
// Build lookup of all items in this hierarchy
|
|
3203
|
+
const lookup = {};
|
|
3204
|
+
for (const epic of hierarchy.epics) {
|
|
3205
|
+
lookup[epic.id] = {
|
|
3206
|
+
id: epic.id, name: epic.name, type: 'epic',
|
|
3207
|
+
status: 'planned', dependencies: epic.dependencies || [],
|
|
3208
|
+
};
|
|
3209
|
+
for (const story of epic.stories || []) {
|
|
3210
|
+
lookup[story.id] = {
|
|
3211
|
+
id: story.id, name: story.name, type: 'story',
|
|
3212
|
+
status: 'planned', dependencies: story.dependencies || [],
|
|
3213
|
+
};
|
|
3214
|
+
}
|
|
3215
|
+
}
|
|
3216
|
+
|
|
3217
|
+
// Items with no dependencies (or all deps are outside this hierarchy and assumed done) → ready
|
|
3218
|
+
let readyCount = 0;
|
|
3219
|
+
for (const epic of hierarchy.epics) {
|
|
3220
|
+
const epicDeps = (epic.dependencies || []).filter(d => lookup[d]);
|
|
3221
|
+
if (epicDeps.length === 0) {
|
|
3222
|
+
// Epic has no deps within this hierarchy → ready
|
|
3223
|
+
this._setWorkJsonStatus(path.join(this.projectPath, epic.id, 'work.json'), 'ready');
|
|
3224
|
+
lookup[epic.id].status = 'ready';
|
|
3225
|
+
readyCount++;
|
|
3226
|
+
|
|
3227
|
+
// Its stories with no deps (or deps only on this now-ready epic) → ready
|
|
3228
|
+
for (const story of epic.stories || []) {
|
|
3229
|
+
const storyResult = checkDependenciesReady(story.id, lookup);
|
|
3230
|
+
if (storyResult.ready) {
|
|
3231
|
+
this._setWorkJsonStatus(path.join(this.projectPath, epic.id, story.id, 'work.json'), 'ready');
|
|
3232
|
+
lookup[story.id].status = 'ready';
|
|
3233
|
+
readyCount++;
|
|
3234
|
+
}
|
|
3235
|
+
}
|
|
3236
|
+
}
|
|
3237
|
+
}
|
|
3238
|
+
this.debug(`Set ${readyCount} items to 'ready' status (dependency-free)`);
|
|
3239
|
+
} catch (err) {
|
|
3240
|
+
this.debug(`Failed to compute ready status (non-critical): ${err.message}`);
|
|
3241
|
+
}
|
|
3242
|
+
|
|
3243
|
+
// Phase 2 (doc.md): Handled by generateDocFiles() called after this method.
|
|
3244
|
+
// context.md files written in Phase 1 are the canonical source for doc generation.
|
|
3245
|
+
|
|
3246
|
+
const epicCount = hierarchy.epics.length;
|
|
3247
|
+
const storyCount = hierarchy.epics.reduce((sum, epic) => sum + (epic.stories || []).length, 0);
|
|
3248
|
+
|
|
3249
|
+
// Log all files written this run for cross-run comparison
|
|
3250
|
+
this.debugSection('FILES WRITTEN THIS RUN (Phase 1 — work.json + context.md)');
|
|
3251
|
+
const filesWritten = [];
|
|
3252
|
+
filesWritten.push('context.md');
|
|
3253
|
+
for (const epic of hierarchy.epics) {
|
|
3254
|
+
filesWritten.push(`${epic.id}/work.json`);
|
|
3255
|
+
filesWritten.push(`${epic.id}/context.md`);
|
|
3256
|
+
for (const story of epic.stories || []) {
|
|
3257
|
+
filesWritten.push(`${epic.id}/${story.id}/work.json`);
|
|
3258
|
+
filesWritten.push(`${epic.id}/${story.id}/context.md`);
|
|
3259
|
+
}
|
|
3260
|
+
}
|
|
3261
|
+
this.debug('Files written this run (Phase 1)', filesWritten);
|
|
3262
|
+
this.debug(`Total files written: ${filesWritten.length} (${epicCount} epics x 2 + ${storyCount} stories x 2)`);
|
|
3263
|
+
|
|
3264
|
+
// Display clean summary of created epics and stories
|
|
3265
|
+
if (hierarchy.epics.length > 0) {
|
|
3266
|
+
for (const epic of hierarchy.epics) {
|
|
3267
|
+
sendOutput(`${epic.id}: ${epic.name}\n`);
|
|
3268
|
+
for (const story of epic.stories || []) {
|
|
3269
|
+
sendIndented(`${story.id}: ${story.name}`, 1);
|
|
3270
|
+
}
|
|
3271
|
+
sendOutput('\n');
|
|
3272
|
+
}
|
|
3273
|
+
}
|
|
3274
|
+
|
|
3275
|
+
return { epicCount, storyCount };
|
|
3276
|
+
}
|
|
3277
|
+
|
|
3278
|
+
/**
|
|
3279
|
+
* Stage 7: Enrich story doc.md files with implementation-specific detail.
|
|
3280
|
+
*
|
|
3281
|
+
* After doc distribution, story docs may still have vague acceptance criteria
|
|
3282
|
+
* that lack concrete API contracts, error tables, DB field names, and business rules.
|
|
3283
|
+
* This stage runs the story-doc-enricher agent on each story doc to fill those gaps.
|
|
3284
|
+
*
|
|
3285
|
+
* @param {Object} hierarchy - Hierarchy with epics and stories (post-renumbering, with real IDs)
|
|
3286
|
+
* @param {Function} progressCallback - Optional progress callback
|
|
3287
|
+
*/
|
|
3288
|
+
async enrichStoryDocs(hierarchy, progressCallback = null) {
|
|
3289
|
+
this.debugStage(8, 'Enrich Story Docs with Implementation Detail');
|
|
3290
|
+
|
|
3291
|
+
const agentInstructions = loadAgent('story-doc-enricher.md');
|
|
3292
|
+
const provider = await this.getProviderForStageInstance('enrichment');
|
|
3293
|
+
const { model: modelName } = this.getProviderForStage('enrichment');
|
|
3294
|
+
|
|
3295
|
+
this.debug(`Using model for enrichment: ${modelName}`);
|
|
3296
|
+
|
|
3297
|
+
// Collect all story tasks and run all enrichments in parallel
|
|
3298
|
+
const tasks = hierarchy.epics.flatMap(epic =>
|
|
3299
|
+
(epic.stories || []).map(story => ({ epic, story }))
|
|
3300
|
+
);
|
|
3301
|
+
|
|
3302
|
+
const results = await Promise.all(tasks.map(async ({ epic, story }) => {
|
|
3303
|
+
const storyDir = path.join(this.projectPath, epic.id, story.id);
|
|
3304
|
+
const storyDocPath = path.join(storyDir, 'doc.md');
|
|
3305
|
+
|
|
3306
|
+
if (!fs.existsSync(storyDocPath)) {
|
|
3307
|
+
this.debug(`Skipping enrichment for ${story.id} — doc.md not found`);
|
|
3308
|
+
return 'skipped';
|
|
3309
|
+
}
|
|
3310
|
+
|
|
3311
|
+
const storyDocContent = fs.readFileSync(storyDocPath, 'utf8');
|
|
3312
|
+
const acceptance = (story.acceptance || []).map((a, i) => `${i + 1}. ${a}`).join('\n') || 'none specified';
|
|
3313
|
+
const prompt = `## Existing Story Doc
|
|
3314
|
+
|
|
3315
|
+
${storyDocContent}
|
|
3316
|
+
|
|
3317
|
+
---
|
|
3318
|
+
|
|
3319
|
+
## Story Work Item
|
|
3320
|
+
|
|
3321
|
+
**Name:** ${story.name}
|
|
3322
|
+
**User Type:** ${story.userType || 'team member'}
|
|
3323
|
+
**Description:** ${story.description || ''}
|
|
3324
|
+
**Acceptance Criteria:**
|
|
3325
|
+
${acceptance}
|
|
3326
|
+
|
|
3327
|
+
---
|
|
3328
|
+
|
|
3329
|
+
## Parent Epic Context
|
|
3330
|
+
|
|
3331
|
+
**Epic:** ${epic.name}
|
|
3332
|
+
**Domain:** ${epic.domain || 'general'}
|
|
3333
|
+
**Description:** ${epic.description || ''}
|
|
3334
|
+
|
|
3335
|
+
---
|
|
3336
|
+
|
|
3337
|
+
Enrich the existing story doc to be fully implementation-ready. Fill any gaps in API contracts, error tables, data model fields, business rules, and authorization. Return JSON with \`enriched_doc\` and \`gaps_filled\` fields.`;
|
|
3338
|
+
|
|
3339
|
+
this.debug(`Enriching story doc: ${story.id} (${story.name})`);
|
|
3340
|
+
await progressCallback?.(null, ` Enriching documentation → ${story.name}`, {});
|
|
3341
|
+
|
|
3342
|
+
const _tsEnrich = Date.now();
|
|
3343
|
+
try {
|
|
3344
|
+
const result = await this._withProgressHeartbeat(
|
|
3345
|
+
() => this.retryWithBackoff(
|
|
3346
|
+
() => provider.generateJSON(prompt, agentInstructions),
|
|
3347
|
+
`enrichment for story: ${story.name}`
|
|
3348
|
+
),
|
|
3349
|
+
(elapsed) => {
|
|
3350
|
+
if (elapsed < 15) return `Enriching ${story.name}…`;
|
|
3351
|
+
if (elapsed < 40) return `Adding implementation detail to ${story.name}…`;
|
|
3352
|
+
return `Still enriching…`;
|
|
3353
|
+
},
|
|
3354
|
+
progressCallback,
|
|
3355
|
+
10000
|
|
3356
|
+
);
|
|
3357
|
+
|
|
3358
|
+
const enrichedDoc = (typeof result.enriched_doc === 'string' && result.enriched_doc.trim())
|
|
3359
|
+
? result.enriched_doc
|
|
3360
|
+
: storyDocContent;
|
|
3361
|
+
|
|
3362
|
+
const gapsFilled = Array.isArray(result.gaps_filled) ? result.gaps_filled : [];
|
|
3363
|
+
|
|
3364
|
+
fs.writeFileSync(storyDocPath, enrichedDoc, 'utf8');
|
|
3365
|
+
|
|
3366
|
+
this.debugTiming(` enrichStory: ${story.id} "${story.name}"`, _tsEnrich);
|
|
3367
|
+
if (gapsFilled.length > 0) {
|
|
3368
|
+
this.debug(`Story ${story.id} enriched: ${gapsFilled.length} gaps filled`, gapsFilled);
|
|
3369
|
+
} else {
|
|
3370
|
+
this.debug(`Story ${story.id}: already implementation-ready, no gaps filled`);
|
|
3371
|
+
}
|
|
3372
|
+
return 'enriched';
|
|
3373
|
+
} catch (err) {
|
|
3374
|
+
this.debugTiming(` enrichStory FAILED: ${story.id} "${story.name}"`, _tsEnrich);
|
|
3375
|
+
this.debug(`Story enrichment failed for ${story.id} — keeping original doc`, { error: err.message });
|
|
3376
|
+
return 'skipped';
|
|
3377
|
+
}
|
|
3378
|
+
}));
|
|
3379
|
+
|
|
3380
|
+
const enrichedCount = results.filter(r => r === 'enriched').length;
|
|
3381
|
+
const skippedCount = results.filter(r => r === 'skipped').length;
|
|
3382
|
+
this.debug(`Story enrichment complete: ${enrichedCount} enriched, ${skippedCount} skipped`);
|
|
3383
|
+
}
|
|
3384
|
+
|
|
3385
|
+
/**
|
|
3386
|
+
* Distribute documentation content from a parent doc.md to a child item's doc.md.
|
|
3387
|
+
*
|
|
3388
|
+
* Calls the doc-distributor LLM agent which extracts content specifically about
|
|
3389
|
+
* the child from the parent document, builds the child's doc.md with the extracted
|
|
3390
|
+
* content plus elaboration, and returns the parent document with that content removed.
|
|
3391
|
+
*
|
|
3392
|
+
* @param {string} parentDocContent - Current content of the parent doc.md
|
|
3393
|
+
* @param {Object} childItem - Epic or story object from decomposition result
|
|
3394
|
+
* @param {'epic'|'story'} childType - Whether the child is an epic or story
|
|
3395
|
+
* @param {Function} progressCallback - Optional progress callback
|
|
3396
|
+
* @returns {Promise<{childDoc: string, parentDoc: string}>}
|
|
3397
|
+
*/
|
|
3398
|
+
async distributeDocContent(parentDocContent, childItem, childType, progressCallback = null) {
|
|
3399
|
+
this.debugSection(`DOC DISTRIBUTION: ${childType.toUpperCase()} "${childItem.name}"`);
|
|
3400
|
+
|
|
3401
|
+
const agentPath = path.join(this.agentsPath, 'doc-distributor.md');
|
|
3402
|
+
this.debug(`Loading doc-distributor agent: ${agentPath}`);
|
|
3403
|
+
const agentInstructions = fs.readFileSync(agentPath, 'utf8');
|
|
3404
|
+
|
|
3405
|
+
// Build child item description for the prompt
|
|
3406
|
+
let itemDescription;
|
|
3407
|
+
if (childType === 'epic') {
|
|
3408
|
+
const features = (childItem.features || []).join(', ') || 'none specified';
|
|
3409
|
+
const stories = (childItem.stories || []).map(s => `- ${s.name}: ${s.description || ''}`).join('\n') || 'none yet';
|
|
3410
|
+
itemDescription = `Type: epic
|
|
3411
|
+
Name: ${childItem.name}
|
|
3412
|
+
Domain: ${childItem.domain || 'general'}
|
|
3413
|
+
Description: ${childItem.description || ''}
|
|
3414
|
+
Features: ${features}
|
|
3415
|
+
Stories that will belong to this epic:
|
|
3416
|
+
${stories}`;
|
|
3417
|
+
} else {
|
|
3418
|
+
const acceptance = (childItem.acceptance || []).map(a => `- ${a}`).join('\n') || 'none specified';
|
|
3419
|
+
itemDescription = `Type: story
|
|
3420
|
+
Name: ${childItem.name}
|
|
3421
|
+
User type: ${childItem.userType || 'team member'}
|
|
3422
|
+
Description: ${childItem.description || ''}
|
|
3423
|
+
Acceptance criteria:
|
|
3424
|
+
${acceptance}`;
|
|
3425
|
+
}
|
|
3426
|
+
|
|
3427
|
+
const prompt = `## Parent Document
|
|
3428
|
+
|
|
3429
|
+
${parentDocContent}
|
|
3430
|
+
|
|
3431
|
+
---
|
|
3432
|
+
|
|
3433
|
+
## Child Item to Create Documentation For
|
|
3434
|
+
|
|
3435
|
+
${itemDescription}
|
|
3436
|
+
|
|
3437
|
+
---
|
|
3438
|
+
|
|
3439
|
+
Extract and synthesize content from the parent document that is specifically relevant to this ${childType}, then compose the child's \`doc.md\`. Return JSON with a \`child_doc\` field.`;
|
|
3440
|
+
|
|
3441
|
+
this.debug(`Prompt length: ${prompt.length} chars (parent: ${parentDocContent.length}, item: ${itemDescription.length})`);
|
|
3442
|
+
|
|
3443
|
+
const provider = await this.getProviderForStageInstance('doc-distribution');
|
|
3444
|
+
|
|
3445
|
+
const result = await this._withProgressHeartbeat(
|
|
3446
|
+
() => this.retryWithBackoff(
|
|
3447
|
+
() => provider.generateJSON(prompt, agentInstructions),
|
|
3448
|
+
`doc distribution for ${childType}: ${childItem.name}`
|
|
3449
|
+
),
|
|
3450
|
+
(elapsed) => {
|
|
3451
|
+
if (elapsed < 15) return `Extracting ${childType}-specific content…`;
|
|
3452
|
+
if (elapsed < 40) return `Building ${childItem.name} documentation…`;
|
|
3453
|
+
if (elapsed < 65) return `Refining parent document…`;
|
|
3454
|
+
return `Still distributing…`;
|
|
3455
|
+
},
|
|
3456
|
+
progressCallback,
|
|
3457
|
+
10000
|
|
3458
|
+
);
|
|
3459
|
+
|
|
3460
|
+
const usage = provider.getTokenUsage();
|
|
3461
|
+
this.debug(`Doc distribution tokens: ${usage.inputTokens} in · ${usage.outputTokens} out`);
|
|
3462
|
+
|
|
3463
|
+
// Validate response shape and fall back gracefully on malformed output
|
|
3464
|
+
const childDoc = (typeof result.child_doc === 'string' && result.child_doc.trim())
|
|
3465
|
+
? result.child_doc
|
|
3466
|
+
: `# ${childItem.name}\n\n${childItem.description || ''}\n`;
|
|
3467
|
+
|
|
3468
|
+
this.debug(`Distribution result: child_doc ${childDoc.length} bytes`);
|
|
3469
|
+
|
|
3470
|
+
return { childDoc };
|
|
3471
|
+
}
|
|
3472
|
+
|
|
3473
|
+
// Count total hierarchy (nested structure)
|
|
3474
|
+
countTotalHierarchy() {
|
|
3475
|
+
let totalEpics = 0;
|
|
3476
|
+
let totalStories = 0;
|
|
3477
|
+
|
|
3478
|
+
if (!fs.existsSync(this.projectPath)) {
|
|
3479
|
+
return { totalEpics, totalStories };
|
|
3480
|
+
}
|
|
3481
|
+
|
|
3482
|
+
const dirs = fs.readdirSync(this.projectPath);
|
|
3483
|
+
|
|
3484
|
+
// Scan top-level directories (epics)
|
|
3485
|
+
for (const dir of dirs) {
|
|
3486
|
+
const epicWorkJsonPath = path.join(this.projectPath, dir, 'work.json');
|
|
3487
|
+
|
|
3488
|
+
if (!fs.existsSync(epicWorkJsonPath)) continue;
|
|
3489
|
+
|
|
3490
|
+
try {
|
|
3491
|
+
const work = JSON.parse(fs.readFileSync(epicWorkJsonPath, 'utf8'));
|
|
3492
|
+
|
|
3493
|
+
if (work.type === 'epic') {
|
|
3494
|
+
totalEpics++;
|
|
3495
|
+
|
|
3496
|
+
// Count nested stories under this epic
|
|
3497
|
+
const epicDir = path.join(this.projectPath, dir);
|
|
3498
|
+
const epicSubdirs = fs.readdirSync(epicDir).filter(subdir => {
|
|
3499
|
+
const subdirPath = path.join(epicDir, subdir);
|
|
3500
|
+
return fs.statSync(subdirPath).isDirectory();
|
|
3501
|
+
});
|
|
3502
|
+
|
|
3503
|
+
for (const storyDir of epicSubdirs) {
|
|
3504
|
+
const storyWorkJsonPath = path.join(epicDir, storyDir, 'work.json');
|
|
3505
|
+
|
|
3506
|
+
if (!fs.existsSync(storyWorkJsonPath)) continue;
|
|
3507
|
+
|
|
3508
|
+
try {
|
|
3509
|
+
const storyWork = JSON.parse(fs.readFileSync(storyWorkJsonPath, 'utf8'));
|
|
3510
|
+
if (storyWork.type === 'story') {
|
|
3511
|
+
totalStories++;
|
|
3512
|
+
}
|
|
3513
|
+
} catch (error) {
|
|
3514
|
+
// Ignore parse errors
|
|
3515
|
+
}
|
|
3516
|
+
}
|
|
3517
|
+
}
|
|
3518
|
+
} catch (error) {
|
|
3519
|
+
// Ignore parse errors
|
|
3520
|
+
}
|
|
3521
|
+
}
|
|
3522
|
+
|
|
3523
|
+
return { totalEpics, totalStories };
|
|
3524
|
+
}
|
|
3525
|
+
|
|
3526
|
+
/**
|
|
3527
|
+
* Read the full on-disk hierarchy after writing files.
|
|
3528
|
+
* Returns the same shape as preRunSnapshot for direct comparison.
|
|
3529
|
+
*/
|
|
3530
|
+
readPostRunSnapshot() {
|
|
3531
|
+
if (!fs.existsSync(this.projectPath)) return [];
|
|
3532
|
+
|
|
3533
|
+
const snapshot = [];
|
|
3534
|
+
const dirs = fs.readdirSync(this.projectPath).sort();
|
|
3535
|
+
|
|
3536
|
+
for (const dir of dirs) {
|
|
3537
|
+
const epicWorkJsonPath = path.join(this.projectPath, dir, 'work.json');
|
|
3538
|
+
if (!fs.existsSync(epicWorkJsonPath)) continue;
|
|
3539
|
+
|
|
3540
|
+
try {
|
|
3541
|
+
const work = JSON.parse(fs.readFileSync(epicWorkJsonPath, 'utf8'));
|
|
3542
|
+
if (work.type !== 'epic') continue;
|
|
3543
|
+
|
|
3544
|
+
const epicEntry = {
|
|
3545
|
+
id: work.id,
|
|
3546
|
+
name: work.name,
|
|
3547
|
+
domain: work.domain || '',
|
|
3548
|
+
status: work.status || 'unknown',
|
|
3549
|
+
created: work.metadata?.created || null,
|
|
3550
|
+
ceremony: work.metadata?.ceremony || null,
|
|
3551
|
+
stories: []
|
|
3552
|
+
};
|
|
3553
|
+
|
|
3554
|
+
const epicDir = path.join(this.projectPath, dir);
|
|
3555
|
+
const epicSubdirs = fs.readdirSync(epicDir).filter(subdir =>
|
|
3556
|
+
fs.statSync(path.join(epicDir, subdir)).isDirectory()
|
|
3557
|
+
).sort();
|
|
3558
|
+
|
|
3559
|
+
for (const storyDir of epicSubdirs) {
|
|
3560
|
+
const storyWorkJsonPath = path.join(epicDir, storyDir, 'work.json');
|
|
3561
|
+
if (!fs.existsSync(storyWorkJsonPath)) continue;
|
|
3562
|
+
try {
|
|
3563
|
+
const storyWork = JSON.parse(fs.readFileSync(storyWorkJsonPath, 'utf8'));
|
|
3564
|
+
if (storyWork.type === 'story') {
|
|
3565
|
+
epicEntry.stories.push({
|
|
3566
|
+
id: storyWork.id,
|
|
3567
|
+
name: storyWork.name,
|
|
3568
|
+
status: storyWork.status || 'unknown',
|
|
3569
|
+
created: storyWork.metadata?.created || null
|
|
3570
|
+
});
|
|
3571
|
+
}
|
|
3572
|
+
} catch (e) { /* ignore */ }
|
|
3573
|
+
}
|
|
3574
|
+
|
|
3575
|
+
snapshot.push(epicEntry);
|
|
3576
|
+
} catch (e) { /* ignore */ }
|
|
3577
|
+
}
|
|
3578
|
+
|
|
3579
|
+
return snapshot;
|
|
3580
|
+
}
|
|
3581
|
+
|
|
3582
|
+
/**
|
|
3583
|
+
* Rebuild the hierarchy object from work.json files on disk.
|
|
3584
|
+
* Used by resume mode to skip stages 1-6 and jump straight to doc/enrichment stages.
|
|
3585
|
+
* @returns {{ hierarchy: object, epicCount: number, storyCount: number }}
|
|
3586
|
+
*/
|
|
3587
|
+
_rebuildHierarchyFromDisk() {
|
|
3588
|
+
this.debugStage('R', 'Rebuild Hierarchy From Disk (Resume Mode)');
|
|
3589
|
+
|
|
3590
|
+
const epics = [];
|
|
3591
|
+
if (!fs.existsSync(this.projectPath)) {
|
|
3592
|
+
throw new Error('No project directory found — cannot resume');
|
|
3593
|
+
}
|
|
3594
|
+
|
|
3595
|
+
const dirs = fs.readdirSync(this.projectPath).filter(d =>
|
|
3596
|
+
d.startsWith('context-') && fs.existsSync(path.join(this.projectPath, d, 'work.json'))
|
|
3597
|
+
).sort();
|
|
3598
|
+
|
|
3599
|
+
let storyCount = 0;
|
|
3600
|
+
for (const dir of dirs) {
|
|
3601
|
+
const epicWorkPath = path.join(this.projectPath, dir, 'work.json');
|
|
3602
|
+
const epicWork = JSON.parse(fs.readFileSync(epicWorkPath, 'utf8'));
|
|
3603
|
+
|
|
3604
|
+
const epicEntry = {
|
|
3605
|
+
id: epicWork.id,
|
|
3606
|
+
name: epicWork.name,
|
|
3607
|
+
description: epicWork.description || '',
|
|
3608
|
+
domain: epicWork.domain || '',
|
|
3609
|
+
acceptanceCriteria: epicWork.acceptanceCriteria || [],
|
|
3610
|
+
dependencies: epicWork.dependencies || [],
|
|
3611
|
+
metadata: epicWork.metadata || {},
|
|
3612
|
+
stories: [],
|
|
3613
|
+
};
|
|
3614
|
+
|
|
3615
|
+
// Read cached context.md for this epic (used by doc generation)
|
|
3616
|
+
const epicCtxPath = path.join(this.projectPath, dir, 'context.md');
|
|
3617
|
+
if (fs.existsSync(epicCtxPath)) {
|
|
3618
|
+
this._epicContextCache.set(epicWork.name, fs.readFileSync(epicCtxPath, 'utf8'));
|
|
3619
|
+
}
|
|
3620
|
+
|
|
3621
|
+
// Scan story subdirectories
|
|
3622
|
+
const epicDir = path.join(this.projectPath, dir);
|
|
3623
|
+
const storyDirs = fs.readdirSync(epicDir).filter(sd => {
|
|
3624
|
+
const sdPath = path.join(epicDir, sd);
|
|
3625
|
+
return fs.statSync(sdPath).isDirectory() && fs.existsSync(path.join(sdPath, 'work.json'));
|
|
3626
|
+
}).sort();
|
|
3627
|
+
|
|
3628
|
+
for (const sd of storyDirs) {
|
|
3629
|
+
const storyWorkPath = path.join(epicDir, sd, 'work.json');
|
|
3630
|
+
const storyWork = JSON.parse(fs.readFileSync(storyWorkPath, 'utf8'));
|
|
3631
|
+
|
|
3632
|
+
epicEntry.stories.push({
|
|
3633
|
+
id: storyWork.id,
|
|
3634
|
+
name: storyWork.name,
|
|
3635
|
+
description: storyWork.description || '',
|
|
3636
|
+
acceptanceCriteria: storyWork.acceptanceCriteria || [],
|
|
3637
|
+
dependencies: storyWork.dependencies || [],
|
|
3638
|
+
metadata: storyWork.metadata || {},
|
|
3639
|
+
});
|
|
3640
|
+
|
|
3641
|
+
// Read cached story context.md
|
|
3642
|
+
const storyCtxPath = path.join(epicDir, sd, 'context.md');
|
|
3643
|
+
if (fs.existsSync(storyCtxPath)) {
|
|
3644
|
+
this._storyContextCache.set(`${epicWork.name}::${storyWork.name}`, fs.readFileSync(storyCtxPath, 'utf8'));
|
|
3645
|
+
}
|
|
3646
|
+
|
|
3647
|
+
storyCount++;
|
|
3648
|
+
}
|
|
3649
|
+
|
|
3650
|
+
epics.push(epicEntry);
|
|
3651
|
+
}
|
|
3652
|
+
|
|
3653
|
+
const hierarchy = { epics, validation: null };
|
|
3654
|
+
this.debug(`Rebuilt hierarchy from disk: ${epics.length} epics, ${storyCount} stories`);
|
|
3655
|
+
return { hierarchy, epicCount: epics.length, storyCount };
|
|
3656
|
+
}
|
|
3657
|
+
|
|
3658
|
+
// Main execution method
|
|
3659
|
+
async execute(progressCallback = null, { resumeFrom = null } = {}) {
|
|
3660
|
+
// Cost threshold protection — wrap callback to check running cost before each progress call
|
|
3661
|
+
if (this._costThreshold != null && progressCallback) {
|
|
3662
|
+
const _origCallback = progressCallback;
|
|
3663
|
+
progressCallback = async (...args) => {
|
|
3664
|
+
if (this._costThreshold != null && this._runningCost >= this._costThreshold) {
|
|
3665
|
+
if (this._costLimitReachedCallback) {
|
|
3666
|
+
this._costThreshold = null; // disable re-triggering
|
|
3667
|
+
await this._costLimitReachedCallback(this._runningCost);
|
|
3668
|
+
// returns → ceremony continues with limit disabled
|
|
3669
|
+
} else {
|
|
3670
|
+
throw new Error(`COST_LIMIT_EXCEEDED:${this._runningCost.toFixed(6)}`);
|
|
3671
|
+
}
|
|
3672
|
+
}
|
|
3673
|
+
return _origCallback(...args);
|
|
3674
|
+
};
|
|
3675
|
+
}
|
|
3676
|
+
|
|
3677
|
+
// Initialize ceremony history
|
|
3678
|
+
const { CeremonyHistory } = await import('./ceremony-history.js');
|
|
3679
|
+
const history = new CeremonyHistory(this.avcPath);
|
|
3680
|
+
history.init();
|
|
3681
|
+
|
|
3682
|
+
// Start execution tracking
|
|
3683
|
+
const executionId = history.startExecution('sprint-planning', 'decomposition');
|
|
3684
|
+
|
|
3685
|
+
// Stage checkpoint helper — updates ceremony history with current stage.
|
|
3686
|
+
// Non-fatal: never throws.
|
|
3687
|
+
const checkpoint = (stage) => {
|
|
3688
|
+
try { history.updateExecution('sprint-planning', executionId, { stage, lastCheckpoint: localISO() }); }
|
|
3689
|
+
catch {}
|
|
3690
|
+
};
|
|
3691
|
+
|
|
3692
|
+
try {
|
|
3693
|
+
// Log ceremony execution metadata
|
|
3694
|
+
const runId = Date.now();
|
|
3695
|
+
const runTimestamp = localISO();
|
|
3696
|
+
this.debug('='.repeat(80));
|
|
3697
|
+
this.debug('SPRINT PLANNING CEREMONY - EXECUTION START');
|
|
3698
|
+
this.debug('='.repeat(80));
|
|
3699
|
+
this.debug('Run ID (ms epoch):', runId);
|
|
3700
|
+
this.debug('Timestamp:', runTimestamp);
|
|
3701
|
+
this.debug('Execution ID:', executionId);
|
|
3702
|
+
this.debug('Config', {
|
|
3703
|
+
provider: this._providerName,
|
|
3704
|
+
model: this._modelName,
|
|
3705
|
+
stagesConfig: this.stagesConfig ? JSON.stringify(this.stagesConfig) : 'using defaults',
|
|
3706
|
+
projectPath: this.projectPath,
|
|
3707
|
+
cwd: process.cwd(),
|
|
3708
|
+
nodeVersion: process.version
|
|
3709
|
+
});
|
|
3710
|
+
|
|
3711
|
+
const header = getCeremonyHeader('sprint-planning');
|
|
3712
|
+
sendCeremonyHeader(header.title);
|
|
3713
|
+
|
|
3714
|
+
const _t0run = Date.now();
|
|
3715
|
+
|
|
3716
|
+
// ── Resume fast-path ─────────────────────────────────────────────
|
|
3717
|
+
// When resumeFrom is set, skip stages 1-6 and jump directly to the
|
|
3718
|
+
// doc-generation / enrichment stages using hierarchy rebuilt from disk.
|
|
3719
|
+
if (resumeFrom) {
|
|
3720
|
+
this.debug(`RESUME MODE — resuming from checkpoint: ${resumeFrom}`);
|
|
3721
|
+
await progressCallback?.(`Resuming from ${resumeFrom}…`);
|
|
3722
|
+
|
|
3723
|
+
const { hierarchy, epicCount, storyCount } = this._rebuildHierarchyFromDisk();
|
|
3724
|
+
|
|
3725
|
+
let rootDocContent = '';
|
|
3726
|
+
if (fs.existsSync(this.projectDocPath)) {
|
|
3727
|
+
rootDocContent = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
3728
|
+
}
|
|
3729
|
+
|
|
3730
|
+
if (resumeFrom === 'files-written') {
|
|
3731
|
+
// Run stages 7, 8, 9
|
|
3732
|
+
sendProgress('Generating documentation from canonical context...');
|
|
3733
|
+
await progressCallback?.(`Stage 7/8: Generating documentation (${epicCount} epics, ${storyCount} stories)…`);
|
|
3734
|
+
await this.generateDocFiles(hierarchy, rootDocContent, progressCallback);
|
|
3735
|
+
checkpoint('docs-generated');
|
|
3736
|
+
|
|
3737
|
+
sendProgress('Enriching story documentation with implementation detail...');
|
|
3738
|
+
await progressCallback?.(`Stage 8/8: Enriching story documentation (${storyCount} stories)…`);
|
|
3739
|
+
await this.enrichStoryDocs(hierarchy, progressCallback);
|
|
3740
|
+
checkpoint('enrichment-complete');
|
|
3741
|
+
} else if (resumeFrom === 'docs-generated') {
|
|
3742
|
+
// Run stages 8, 9
|
|
3743
|
+
sendProgress('Enriching story documentation with implementation detail...');
|
|
3744
|
+
await progressCallback?.(`Stage 8/8: Enriching story documentation (${storyCount} stories)…`);
|
|
3745
|
+
await this.enrichStoryDocs(hierarchy, progressCallback);
|
|
3746
|
+
checkpoint('enrichment-complete');
|
|
3747
|
+
}
|
|
3748
|
+
// 'enrichment-complete' → only summary (stage 9)
|
|
3749
|
+
|
|
3750
|
+
// Stage 9: Summary
|
|
3751
|
+
const { totalEpics, totalStories } = this.countTotalHierarchy();
|
|
3752
|
+
const returnResult = {
|
|
3753
|
+
epicsCreated: epicCount,
|
|
3754
|
+
storiesCreated: storyCount,
|
|
3755
|
+
totalEpics,
|
|
3756
|
+
totalStories,
|
|
3757
|
+
tokenUsage: { input: 0, output: 0, total: 0 },
|
|
3758
|
+
model: this._modelName,
|
|
3759
|
+
provider: this._providerName,
|
|
3760
|
+
validationIssues: [],
|
|
3761
|
+
resumed: true,
|
|
3762
|
+
resumedFrom: resumeFrom,
|
|
3763
|
+
};
|
|
3764
|
+
|
|
3765
|
+
try {
|
|
3766
|
+
const aggregated = this._aggregateAllTokenUsage();
|
|
3767
|
+
if (aggregated.totalCalls > 0 || aggregated.inputTokens > 0) {
|
|
3768
|
+
this.tokenTracker.finalizeRun(this.ceremonyName);
|
|
3769
|
+
returnResult.tokenUsage = {
|
|
3770
|
+
input: aggregated.inputTokens || 0,
|
|
3771
|
+
output: aggregated.outputTokens || 0,
|
|
3772
|
+
total: aggregated.totalTokens || 0,
|
|
3773
|
+
};
|
|
3774
|
+
}
|
|
3775
|
+
} catch {}
|
|
3776
|
+
|
|
3777
|
+
try {
|
|
3778
|
+
history.completeExecution('sprint-planning', executionId, 'success', {
|
|
3779
|
+
stage: 'completed',
|
|
3780
|
+
resumed: true,
|
|
3781
|
+
resumedFrom: resumeFrom,
|
|
3782
|
+
metrics: { epicsCreated: epicCount, storiesCreated: storyCount, totalEpics, totalStories }
|
|
3783
|
+
});
|
|
3784
|
+
} catch {}
|
|
3785
|
+
|
|
3786
|
+
sendOutput(`Resume complete — ${totalEpics} Epics, ${totalStories} Stories.`);
|
|
3787
|
+
this.debugTiming('TOTAL resume end-to-end', _t0run);
|
|
3788
|
+
return returnResult;
|
|
3789
|
+
}
|
|
3790
|
+
// ── End resume fast-path ─────────────────────────────────────────
|
|
3791
|
+
|
|
3792
|
+
// Stage 1: Validate
|
|
3793
|
+
sendProgress('Validating prerequisites...');
|
|
3794
|
+
await progressCallback?.('Stage 1/6: Validating prerequisites…');
|
|
3795
|
+
let _ts = Date.now();
|
|
3796
|
+
this.validatePrerequisites();
|
|
3797
|
+
this.debugTiming('Stage 1 — validatePrerequisites', _ts);
|
|
3798
|
+
|
|
3799
|
+
// Stage 2: Read existing hierarchy
|
|
3800
|
+
sendProgress('Analyzing existing project structure...');
|
|
3801
|
+
await progressCallback?.('Stage 2/6: Analyzing existing project structure…');
|
|
3802
|
+
_ts = Date.now();
|
|
3803
|
+
const { existingEpics, existingStories, maxEpicNum, maxStoryNums, preRunSnapshot } = this.readExistingHierarchy();
|
|
3804
|
+
this.debugTiming('Stage 2 — readExistingHierarchy', _ts);
|
|
3805
|
+
|
|
3806
|
+
if (existingEpics.size > 0) {
|
|
3807
|
+
this.debug(`Found ${existingEpics.size} existing Epics, ${existingStories.size} existing Stories`);
|
|
3808
|
+
sendInfo(`Found ${existingEpics.size} existing Epics, ${existingStories.size} existing Stories`);
|
|
3809
|
+
} else {
|
|
3810
|
+
this.debug('No existing Epics/Stories found (first expansion)');
|
|
3811
|
+
}
|
|
3812
|
+
|
|
3813
|
+
// Stage 3: Collect scope
|
|
3814
|
+
sendProgress('Collecting project scope...');
|
|
3815
|
+
await progressCallback?.('Stage 3/6: Collecting project scope…');
|
|
3816
|
+
_ts = Date.now();
|
|
3817
|
+
const scope = await this.collectNewScope();
|
|
3818
|
+
this.debugTiming('Stage 3 — collectNewScope', _ts);
|
|
3819
|
+
|
|
3820
|
+
// Clear screen before decomposition phase
|
|
3821
|
+
process.stdout.write('\x1bc');
|
|
3822
|
+
outputBuffer.clear();
|
|
3823
|
+
|
|
3824
|
+
// Stage 4: Decompose
|
|
3825
|
+
sendProgress('Decomposing scope into Epics and Stories...');
|
|
3826
|
+
await progressCallback?.('Stage 4/6: Decomposing scope into Epics and Stories…');
|
|
3827
|
+
_ts = Date.now();
|
|
3828
|
+
let hierarchy = await this.decomposeIntoEpicsStories(scope, existingEpics, existingStories, progressCallback);
|
|
3829
|
+
|
|
3830
|
+
// Quality gate: retry decomposition if structural issues detected
|
|
3831
|
+
const maxDecompRetries = 3;
|
|
3832
|
+
for (let retry = 1; retry <= maxDecompRetries; retry++) {
|
|
3833
|
+
const issues = this._checkDecompositionQuality(hierarchy);
|
|
3834
|
+
if (issues.length === 0) break;
|
|
3835
|
+
|
|
3836
|
+
if (retry === maxDecompRetries) {
|
|
3837
|
+
this.debug(`Decomposition quality issues persist after ${maxDecompRetries} retries — proceeding with warnings`, { issues });
|
|
3838
|
+
break;
|
|
3839
|
+
}
|
|
3840
|
+
|
|
3841
|
+
this.debug(`Decomposition quality gate failed (attempt ${retry}/${maxDecompRetries})`, { issues });
|
|
3842
|
+
await progressCallback?.(`Decomposition has ${issues.length} issue(s) — retrying (${retry}/${maxDecompRetries})…`);
|
|
3843
|
+
|
|
3844
|
+
// Retry with violation feedback
|
|
3845
|
+
hierarchy = await this.decomposeIntoEpicsStories(scope, existingEpics, existingStories, progressCallback, issues);
|
|
3846
|
+
}
|
|
3847
|
+
|
|
3848
|
+
this.debugTiming('Stage 4 — decomposeIntoEpicsStories', _ts);
|
|
3849
|
+
checkpoint('decomposition-complete');
|
|
3850
|
+
|
|
3851
|
+
// Log raw LLM output before any validation/modification
|
|
3852
|
+
this.debugSection('POST-DECOMPOSE: Raw LLM Output (before validation)');
|
|
3853
|
+
this.debugHierarchySnapshot('POST-DECOMPOSE', hierarchy.epics.map(e => ({
|
|
3854
|
+
id: e.id || '(no-id)',
|
|
3855
|
+
name: e.name,
|
|
3856
|
+
stories: (e.stories || []).map(s => ({ id: s.id || '(no-id)', name: s.name }))
|
|
3857
|
+
})));
|
|
3858
|
+
this.debug('LLM validation field', hierarchy.validation || null);
|
|
3859
|
+
|
|
3860
|
+
// NOTE: Scaffolding epic is generated AFTER context generation (in validateHierarchy)
|
|
3861
|
+
// so it can read tech requirements from all domain epic/story contexts.
|
|
3862
|
+
// Dependency injection happens inside _generateScaffoldingEpic().
|
|
3863
|
+
|
|
3864
|
+
// Stage 4.1: LLM-based duplicate detection
|
|
3865
|
+
sendProgress('Detecting duplicate epics and stories...');
|
|
3866
|
+
await progressCallback?.('Stage 4.1/8: Detecting duplicates…');
|
|
3867
|
+
_ts = Date.now();
|
|
3868
|
+
hierarchy = await this.deduplicateEpicsLLM(hierarchy, preRunSnapshot, progressCallback);
|
|
3869
|
+
this.debugTiming('Stage 4.1 — deduplicateEpicsLLM', _ts);
|
|
3870
|
+
|
|
3871
|
+
// Log post-dedup snapshot
|
|
3872
|
+
this.debugSection('POST-DEDUP: Hierarchy after duplicate detection');
|
|
3873
|
+
this.debugHierarchySnapshot('POST-DEDUP', hierarchy.epics.map(e => ({
|
|
3874
|
+
id: e.id || '(no-id)',
|
|
3875
|
+
name: e.name,
|
|
3876
|
+
stories: (e.stories || []).map(s => ({ id: s.id || '(no-id)', name: s.name }))
|
|
3877
|
+
})));
|
|
3878
|
+
|
|
3879
|
+
// Stage 4.2: Review and split wide stories
|
|
3880
|
+
sendProgress('Reviewing story scopes for splits...');
|
|
3881
|
+
await progressCallback?.('Stage 4.2/8: Reviewing story scopes for required splits…');
|
|
3882
|
+
_ts = Date.now();
|
|
3883
|
+
hierarchy = await this.reviewAndSplitStories(hierarchy, progressCallback);
|
|
3884
|
+
this.debugTiming('Stage 4.2 — reviewAndSplitStories', _ts);
|
|
3885
|
+
|
|
3886
|
+
// Log post-split snapshot
|
|
3887
|
+
this.debugSection('POST-SPLIT: Hierarchy after story scope review');
|
|
3888
|
+
this.debugHierarchySnapshot('POST-SPLIT', hierarchy.epics.map(e => ({
|
|
3889
|
+
id: e.id || '(no-id)',
|
|
3890
|
+
name: e.name,
|
|
3891
|
+
stories: (e.stories || []).map(s => ({ id: s.id || '(no-id)', name: s.name }))
|
|
3892
|
+
})));
|
|
3893
|
+
|
|
3894
|
+
// Stage 4.5: User selection gate (Kanban UI only; null = run straight through)
|
|
3895
|
+
if (this._selectionCallback) {
|
|
3896
|
+
await progressCallback?.('Stage 4.5/6: Waiting for epic/story selection…');
|
|
3897
|
+
const selection = await this._selectionCallback(hierarchy);
|
|
3898
|
+
if (selection) {
|
|
3899
|
+
const { selectedEpicIds, selectedStoryIds } = selection;
|
|
3900
|
+
hierarchy = this._filterHierarchyBySelection(hierarchy, selectedEpicIds, selectedStoryIds);
|
|
3901
|
+
const epicCount = hierarchy.epics.length;
|
|
3902
|
+
const storyCount = hierarchy.epics.reduce((s, e) => s + (e.stories?.length || 0), 0);
|
|
3903
|
+
this.debug(`Selection applied: ${epicCount} epics, ${storyCount} stories selected`);
|
|
3904
|
+
await progressCallback?.(null, `Confirmed: ${epicCount} epics, ${storyCount} stories selected`, {});
|
|
3905
|
+
}
|
|
3906
|
+
}
|
|
3907
|
+
|
|
3908
|
+
// Clear screen before validation phase
|
|
3909
|
+
process.stdout.write('\x1bc');
|
|
3910
|
+
outputBuffer.clear();
|
|
3911
|
+
|
|
3912
|
+
// Stage 5: Multi-Agent Validation
|
|
3913
|
+
const epicCount5 = hierarchy.epics.length;
|
|
3914
|
+
const storyCount5 = hierarchy.epics.reduce((s, e) => s + (e.stories?.length || 0), 0);
|
|
3915
|
+
sendProgress('Validating Epics and Stories with domain experts...');
|
|
3916
|
+
await progressCallback?.(`Stage 5/6: Validating with domain experts (${epicCount5} epics, ${storyCount5} stories)…`);
|
|
3917
|
+
_ts = Date.now();
|
|
3918
|
+
hierarchy = await this.validateHierarchy(hierarchy, progressCallback, scope);
|
|
3919
|
+
checkpoint('validation-complete');
|
|
3920
|
+
this.debugTiming(`Stage 5 — validateHierarchy (${epicCount5} epics, ${storyCount5} stories)`, _ts);
|
|
3921
|
+
|
|
3922
|
+
// Log hierarchy after validation (may have been modified)
|
|
3923
|
+
this.debugSection('POST-VALIDATION: Hierarchy after domain-expert validation');
|
|
3924
|
+
this.debugHierarchySnapshot('POST-VALIDATION', hierarchy.epics.map(e => ({
|
|
3925
|
+
id: e.id || '(no-id)',
|
|
3926
|
+
name: e.name,
|
|
3927
|
+
stories: (e.stories || []).map(s => ({ id: s.id || '(no-id)', name: s.name }))
|
|
3928
|
+
})));
|
|
3929
|
+
|
|
3930
|
+
// Snapshot provisional IDs before renumbering so writeHierarchyFiles can rename
|
|
3931
|
+
// any provisional folders that were written early during context generation.
|
|
3932
|
+
for (const epic of hierarchy.epics) {
|
|
3933
|
+
epic._provisionalId = epic.id;
|
|
3934
|
+
for (const story of epic.stories || []) {
|
|
3935
|
+
story._provisionalId = story.id;
|
|
3936
|
+
}
|
|
3937
|
+
}
|
|
3938
|
+
|
|
3939
|
+
// Renumber IDs
|
|
3940
|
+
hierarchy = this.renumberHierarchy(hierarchy, maxEpicNum, maxStoryNums);
|
|
3941
|
+
|
|
3942
|
+
// Clear screen before file writing phase
|
|
3943
|
+
process.stdout.write('\x1bc');
|
|
3944
|
+
outputBuffer.clear();
|
|
3945
|
+
|
|
3946
|
+
// Compute coding order from dependency graph and stamp phase/order onto hierarchy
|
|
3947
|
+
// (before writeHierarchyFiles so work.json includes codingPhase and codingOrder)
|
|
3948
|
+
try {
|
|
3949
|
+
_ts = Date.now();
|
|
3950
|
+
const codingOrder = computeCodingOrder(hierarchy);
|
|
3951
|
+
|
|
3952
|
+
// Stamp phase and order onto each epic and story in the hierarchy
|
|
3953
|
+
const epicPhaseMap = new Map(); // epicId → phase number (1-based)
|
|
3954
|
+
for (const phase of codingOrder.phases) {
|
|
3955
|
+
for (const epicId of phase.epicIds) {
|
|
3956
|
+
epicPhaseMap.set(epicId, phase.phase + 1);
|
|
3957
|
+
}
|
|
3958
|
+
}
|
|
3959
|
+
let globalOrder = 0;
|
|
3960
|
+
for (const phase of codingOrder.json.phases) {
|
|
3961
|
+
for (const epicEntry of phase.epics) {
|
|
3962
|
+
const epic = hierarchy.epics.find(e => e.id === epicEntry.id);
|
|
3963
|
+
if (epic) {
|
|
3964
|
+
globalOrder++;
|
|
3965
|
+
epic.metadata = { ...(epic.metadata || {}), codingPhase: phase.phase, codingOrder: globalOrder };
|
|
3966
|
+
}
|
|
3967
|
+
for (const storyEntry of epicEntry.stories) {
|
|
3968
|
+
const epic2 = hierarchy.epics.find(e => e.id === epicEntry.id);
|
|
3969
|
+
const story = (epic2?.stories || []).find(s => s.id === storyEntry.id);
|
|
3970
|
+
if (story) {
|
|
3971
|
+
globalOrder++;
|
|
3972
|
+
story.metadata = { ...(story.metadata || {}), codingPhase: phase.phase, codingOrder: globalOrder };
|
|
3973
|
+
}
|
|
3974
|
+
}
|
|
3975
|
+
}
|
|
3976
|
+
}
|
|
3977
|
+
|
|
3978
|
+
// Write coding-order files
|
|
3979
|
+
const codingOrderMdPath = path.join(this.projectPath, 'coding-order.md');
|
|
3980
|
+
const codingOrderJsonPath = path.join(this.projectPath, 'coding-order.json');
|
|
3981
|
+
fs.writeFileSync(codingOrderMdPath, codingOrder.md, 'utf8');
|
|
3982
|
+
fs.writeFileSync(codingOrderJsonPath, JSON.stringify(codingOrder.json, null, 2), 'utf8');
|
|
3983
|
+
this.debug(`Coding order generated: ${codingOrder.phases.length} phases, critical path length ${codingOrder.criticalPath.length}`);
|
|
3984
|
+
this.debugTiming('Stage 6 — computeCodingOrder', _ts);
|
|
3985
|
+
} catch (err) {
|
|
3986
|
+
this.debug(`Coding order generation failed (non-critical): ${err.message}`);
|
|
3987
|
+
}
|
|
3988
|
+
|
|
3989
|
+
// Stage 6: Write hierarchy files (work.json + context.md — no LLM)
|
|
3990
|
+
sendProgress('Writing files and canonical context...');
|
|
3991
|
+
await progressCallback?.(`Stage 6/8: Writing files and canonical context (${epicCount5} epics, ${storyCount5} stories)…`);
|
|
3992
|
+
_ts = Date.now();
|
|
3993
|
+
const { epicCount, storyCount } = await this.writeHierarchyFiles(hierarchy, progressCallback);
|
|
3994
|
+
checkpoint('files-written');
|
|
3995
|
+
this.debugTiming(`Stage 6 — writeHierarchyFiles (${epicCount5} epics, ${storyCount5} stories)`, _ts);
|
|
3996
|
+
|
|
3997
|
+
// Notify listeners (e.g. Kanban board) that work.json files are now on disk.
|
|
3998
|
+
// This fires BEFORE Stage 7/8 so the board can list new epics/stories immediately.
|
|
3999
|
+
await this._hierarchyWrittenCallback?.({ epicCount, storyCount });
|
|
4000
|
+
|
|
4001
|
+
// Stage 7: Generate narrative doc.md from canonical context.md (replaces doc-distribution)
|
|
4002
|
+
sendProgress('Generating documentation from canonical context...');
|
|
4003
|
+
await progressCallback?.(`Stage 7/8: Generating documentation (${epicCount5} epics, ${storyCount5} stories)…`);
|
|
4004
|
+
_ts = Date.now();
|
|
4005
|
+
let rootDocContent = '';
|
|
4006
|
+
if (fs.existsSync(this.projectDocPath)) {
|
|
4007
|
+
rootDocContent = fs.readFileSync(this.projectDocPath, 'utf8');
|
|
4008
|
+
}
|
|
4009
|
+
await this.generateDocFiles(hierarchy, rootDocContent, progressCallback);
|
|
4010
|
+
checkpoint('docs-generated');
|
|
4011
|
+
this.debugTiming(`Stage 7 — generateDocFiles (${epicCount5} epics, ${storyCount5} stories)`, _ts);
|
|
4012
|
+
|
|
4013
|
+
// Stage 8: Enrich story docs with implementation detail
|
|
4014
|
+
sendProgress('Enriching story documentation with implementation detail...');
|
|
4015
|
+
await progressCallback?.(`Stage 8/8: Enriching story documentation (${storyCount5} stories)…`);
|
|
4016
|
+
_ts = Date.now();
|
|
4017
|
+
await this.enrichStoryDocs(hierarchy, progressCallback);
|
|
4018
|
+
checkpoint('enrichment-complete');
|
|
4019
|
+
this.debugTiming(`Stage 8 — enrichStoryDocs (${storyCount5} stories)`, _ts);
|
|
4020
|
+
|
|
4021
|
+
// Stage 9: Summary & Cleanup
|
|
4022
|
+
this.debugStage(9, 'Summary & Cleanup');
|
|
4023
|
+
|
|
4024
|
+
const { totalEpics, totalStories } = this.countTotalHierarchy();
|
|
4025
|
+
|
|
4026
|
+
// ====================================================================
|
|
4027
|
+
// Stage 9: Summary & Cleanup (non-fatal — all real work is done)
|
|
4028
|
+
// Errors here must not crash the ceremony since files are already on disk.
|
|
4029
|
+
// ====================================================================
|
|
4030
|
+
let tokenUsageSummary = null;
|
|
4031
|
+
const returnResult = {
|
|
4032
|
+
epicsCreated: epicCount,
|
|
4033
|
+
storiesCreated: storyCount,
|
|
4034
|
+
totalEpics,
|
|
4035
|
+
totalStories,
|
|
4036
|
+
tokenUsage: { input: 0, output: 0, total: 0 },
|
|
4037
|
+
model: this._modelName,
|
|
4038
|
+
provider: this._providerName,
|
|
4039
|
+
validationIssues: [],
|
|
4040
|
+
};
|
|
4041
|
+
|
|
4042
|
+
try {
|
|
4043
|
+
// Post-run snapshot
|
|
4044
|
+
const postRunSnapshot = this.readPostRunSnapshot();
|
|
4045
|
+
this.debugHierarchySnapshot('POST-RUN', postRunSnapshot);
|
|
4046
|
+
this.debugTiming('TOTAL run() end-to-end', _t0run);
|
|
4047
|
+
sendOutput(`Created ${epicCount} Epics, ${storyCount} Stories. Total: ${totalEpics} Epics, ${totalStories} Stories.`);
|
|
4048
|
+
|
|
4049
|
+
// Token usage
|
|
4050
|
+
const aggregated = this._aggregateAllTokenUsage();
|
|
4051
|
+
if (aggregated.totalCalls > 0 || aggregated.inputTokens > 0) {
|
|
4052
|
+
tokenUsageSummary = aggregated;
|
|
4053
|
+
this.debug('Token usage (all providers)', tokenUsageSummary);
|
|
4054
|
+
this.tokenTracker.finalizeRun(this.ceremonyName);
|
|
4055
|
+
returnResult.tokenUsage = {
|
|
4056
|
+
input: aggregated.inputTokens || 0,
|
|
4057
|
+
output: aggregated.outputTokens || 0,
|
|
4058
|
+
total: aggregated.totalTokens || 0,
|
|
4059
|
+
};
|
|
4060
|
+
}
|
|
4061
|
+
|
|
4062
|
+
sendOutput('Run /seed <story-id> to decompose a Story into Tasks.');
|
|
4063
|
+
|
|
4064
|
+
// Comparison summary log
|
|
4065
|
+
const runDuration = Date.now() - runId;
|
|
4066
|
+
this.debug('\n' + '='.repeat(80));
|
|
4067
|
+
this.debug('SPRINT PLANNING CEREMONY - EXECUTION END');
|
|
4068
|
+
this.debug('='.repeat(80));
|
|
4069
|
+
this.debug('Run ID:', runId);
|
|
4070
|
+
this.debug('Duration:', `${Math.round(runDuration / 1000)} seconds`);
|
|
4071
|
+
|
|
4072
|
+
const duplicateAnalysis = this._lastDuplicateAnalysis || { skippedEpics: [], skippedStories: [] };
|
|
4073
|
+
this.debug('Duplicate detection', {
|
|
4074
|
+
epicsSkipped: duplicateAnalysis.skippedEpics.length,
|
|
4075
|
+
storiesSkipped: duplicateAnalysis.skippedStories.length,
|
|
4076
|
+
});
|
|
4077
|
+
if (tokenUsageSummary) this.debug('Token usage this run', tokenUsageSummary);
|
|
4078
|
+
this.debug('='.repeat(80) + '\n');
|
|
4079
|
+
} catch (summaryErr) {
|
|
4080
|
+
// Summary is non-fatal — log and continue
|
|
4081
|
+
this.debug('Summary logging failed (non-fatal, all files are on disk)', { error: summaryErr.message });
|
|
4082
|
+
}
|
|
4083
|
+
|
|
4084
|
+
// Complete ceremony history (also non-fatal)
|
|
4085
|
+
try {
|
|
4086
|
+
const filesGenerated = [];
|
|
4087
|
+
filesGenerated.push(path.join(this.projectPath, 'context.md'));
|
|
4088
|
+
for (const epic of hierarchy.epics) {
|
|
4089
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, 'work.json'));
|
|
4090
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, 'context.md'));
|
|
4091
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, 'doc.md'));
|
|
4092
|
+
for (const story of epic.stories || []) {
|
|
4093
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, story.id, 'work.json'));
|
|
4094
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, story.id, 'context.md'));
|
|
4095
|
+
filesGenerated.push(path.join(this.projectPath, epic.id, story.id, 'doc.md'));
|
|
4096
|
+
}
|
|
4097
|
+
}
|
|
4098
|
+
history.completeExecution('sprint-planning', executionId, 'success', {
|
|
4099
|
+
filesGenerated,
|
|
4100
|
+
tokenUsage: tokenUsageSummary ? {
|
|
4101
|
+
input: tokenUsageSummary.inputTokens,
|
|
4102
|
+
output: tokenUsageSummary.outputTokens,
|
|
4103
|
+
total: tokenUsageSummary.totalTokens
|
|
4104
|
+
} : null,
|
|
4105
|
+
model: this._modelName,
|
|
4106
|
+
provider: this._providerName,
|
|
4107
|
+
stage: 'completed',
|
|
4108
|
+
metrics: {
|
|
4109
|
+
epicsCreated: epicCount, storiesCreated: storyCount,
|
|
4110
|
+
totalEpics, totalStories
|
|
4111
|
+
}
|
|
4112
|
+
});
|
|
4113
|
+
} catch (historyErr) {
|
|
4114
|
+
this.debug('Ceremony history update failed (non-fatal)', { error: historyErr.message });
|
|
4115
|
+
}
|
|
4116
|
+
|
|
4117
|
+
return returnResult;
|
|
4118
|
+
} catch (error) {
|
|
4119
|
+
const isCancelled = error.message === 'CEREMONY_CANCELLED';
|
|
4120
|
+
|
|
4121
|
+
// Track tokens even for cancelled/error runs — tokens were spent up to this point
|
|
4122
|
+
try {
|
|
4123
|
+
const aggregated = this._aggregateAllTokenUsage();
|
|
4124
|
+
if (aggregated.totalCalls > 0 || aggregated.inputTokens > 0) {
|
|
4125
|
+
this.tokenTracker.finalizeRun(this.ceremonyName);
|
|
4126
|
+
this.debug('Token tracking finalized (partial run)', aggregated);
|
|
4127
|
+
}
|
|
4128
|
+
} catch (trackErr) {
|
|
4129
|
+
this.debug('Could not save token tracking on error', { error: trackErr.message });
|
|
4130
|
+
}
|
|
4131
|
+
|
|
4132
|
+
if (!isCancelled) {
|
|
4133
|
+
this.debug('\n========== ERROR OCCURRED ==========');
|
|
4134
|
+
this.debug('Error details', {
|
|
4135
|
+
message: error.message,
|
|
4136
|
+
stack: error.stack,
|
|
4137
|
+
name: error.name
|
|
4138
|
+
});
|
|
4139
|
+
this.debug('Application state at failure', {
|
|
4140
|
+
ceremonyName: this.ceremonyName,
|
|
4141
|
+
provider: this._providerName,
|
|
4142
|
+
model: this._modelName,
|
|
4143
|
+
projectPath: this.projectPath,
|
|
4144
|
+
currentWorkingDir: process.cwd(),
|
|
4145
|
+
nodeVersion: process.version,
|
|
4146
|
+
platform: process.platform
|
|
4147
|
+
});
|
|
4148
|
+
sendError(`Project expansion failed: ${error.message}`);
|
|
4149
|
+
}
|
|
4150
|
+
|
|
4151
|
+
// Mark execution as aborted on error
|
|
4152
|
+
history.completeExecution('sprint-planning', executionId, 'abrupt-termination', {
|
|
4153
|
+
stage: isCancelled ? 'cancelled' : 'error',
|
|
4154
|
+
error: error.message
|
|
4155
|
+
});
|
|
4156
|
+
|
|
4157
|
+
throw error;
|
|
4158
|
+
}
|
|
4159
|
+
}
|
|
4160
|
+
}
|
|
4161
|
+
|
|
4162
|
+
export { SprintPlanningProcessor };
|