ai-control-center 1.15.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +584 -0
- package/bin/aicc.js +772 -0
- package/lib/actions/approve.js +71 -0
- package/lib/actions/assign-project.js +132 -0
- package/lib/actions/browser-test.js +64 -0
- package/lib/actions/cleanup.js +174 -0
- package/lib/actions/debug.js +298 -0
- package/lib/actions/deploy.js +1229 -0
- package/lib/actions/fix-bug.js +134 -0
- package/lib/actions/new-feature.js +255 -0
- package/lib/actions/reject.js +307 -0
- package/lib/actions/review.js +706 -0
- package/lib/actions/status.js +47 -0
- package/lib/agents/browser-qa-agent.js +611 -0
- package/lib/agents/payment-agent.js +116 -0
- package/lib/agents/suggestion-agent.js +88 -0
- package/lib/cli.js +303 -0
- package/lib/config.js +243 -0
- package/lib/hub/hub-server.js +440 -0
- package/lib/hub/project-poller.js +75 -0
- package/lib/hub/skill-registry.js +89 -0
- package/lib/hub/state-aggregator.js +204 -0
- package/lib/index.js +471 -0
- package/lib/init/doctor.js +523 -0
- package/lib/init/presets.js +222 -0
- package/lib/init/skill-fetcher.js +77 -0
- package/lib/init/wizard.js +973 -0
- package/lib/integrations/codex-runner.js +128 -0
- package/lib/integrations/github-actions.js +248 -0
- package/lib/integrations/github-reporter.js +229 -0
- package/lib/integrations/screenshot-store.js +102 -0
- package/lib/openclaw/bridge.js +650 -0
- package/lib/openclaw/generate-skill.js +235 -0
- package/lib/openclaw/openclaw.json +64 -0
- package/lib/orchestrator/autonomous-loop.js +429 -0
- package/lib/orchestrator/thread-triggers.js +63 -0
- package/lib/roleplay/agent-messenger.js +75 -0
- package/lib/roleplay/discussion-threads.js +303 -0
- package/lib/roleplay/health-monitor.js +121 -0
- package/lib/roleplay/pm-agent.js +513 -0
- package/lib/roleplay/roleplay-config.js +25 -0
- package/lib/roleplay/room.js +164 -0
- package/lib/shared/action-runner.js +2330 -0
- package/lib/shared/event-bus.js +185 -0
- package/lib/slack/bot.js +378 -0
- package/lib/telegram/bot.js +416 -0
- package/lib/telegram/commands.js +1267 -0
- package/lib/telegram/keyboards.js +113 -0
- package/lib/telegram/notifications.js +247 -0
- package/lib/twitch/bot.js +354 -0
- package/lib/twitch/commands.js +302 -0
- package/lib/twitch/notifications.js +63 -0
- package/lib/utils/achievements.js +191 -0
- package/lib/utils/activity-log.js +182 -0
- package/lib/utils/agent-leaderboard.js +119 -0
- package/lib/utils/audit-logger.js +232 -0
- package/lib/utils/codebase-context.js +288 -0
- package/lib/utils/codebase-indexer.js +381 -0
- package/lib/utils/config-schema.js +230 -0
- package/lib/utils/context-compressor.js +172 -0
- package/lib/utils/correlation.js +63 -0
- package/lib/utils/cost-tracker.js +423 -0
- package/lib/utils/cron-scheduler.js +53 -0
- package/lib/utils/db-adapter.js +293 -0
- package/lib/utils/display.js +272 -0
- package/lib/utils/errors.js +116 -0
- package/lib/utils/format.js +134 -0
- package/lib/utils/intent-engine.js +464 -0
- package/lib/utils/mcp-client.js +238 -0
- package/lib/utils/model-ab-test.js +164 -0
- package/lib/utils/notify.js +122 -0
- package/lib/utils/persona-loader.js +80 -0
- package/lib/utils/pipeline-lock.js +73 -0
- package/lib/utils/pipeline.js +214 -0
- package/lib/utils/plugin-runner.js +234 -0
- package/lib/utils/rate-limiter.js +84 -0
- package/lib/utils/rbac.js +74 -0
- package/lib/utils/runner.js +1809 -0
- package/lib/utils/security.js +191 -0
- package/lib/utils/self-healer.js +144 -0
- package/lib/utils/skill-loader.js +255 -0
- package/lib/utils/spinner.js +132 -0
- package/lib/utils/stage-queue.js +50 -0
- package/lib/utils/state-machine.js +89 -0
- package/lib/utils/status-bar.js +327 -0
- package/lib/utils/token-estimator.js +101 -0
- package/lib/utils/ux-analyzer.js +101 -0
- package/lib/utils/webhook-emitter.js +83 -0
- package/lib/web/public/css/styles.css +417 -0
- package/lib/web/public/dark-mode.js +44 -0
- package/lib/web/public/hub/kanban.html +206 -0
- package/lib/web/public/index.html +45 -0
- package/lib/web/public/js/app.js +71 -0
- package/lib/web/public/js/ask.js +110 -0
- package/lib/web/public/js/dashboard.js +165 -0
- package/lib/web/public/js/deploy.js +72 -0
- package/lib/web/public/js/feature.js +79 -0
- package/lib/web/public/js/health.js +65 -0
- package/lib/web/public/js/logs.js +93 -0
- package/lib/web/public/js/review.js +123 -0
- package/lib/web/public/js/ws-client.js +82 -0
- package/lib/web/public/office/css/office.css +678 -0
- package/lib/web/public/office/index.html +148 -0
- package/lib/web/public/office/js/achievements-ui.js +117 -0
- package/lib/web/public/office/js/character.js +1056 -0
- package/lib/web/public/office/js/chat-bubbles.js +177 -0
- package/lib/web/public/office/js/cost-overlay.js +123 -0
- package/lib/web/public/office/js/day-night.js +68 -0
- package/lib/web/public/office/js/effects.js +632 -0
- package/lib/web/public/office/js/engine.js +146 -0
- package/lib/web/public/office/js/feature-ticket.js +216 -0
- package/lib/web/public/office/js/hub-client.js +60 -0
- package/lib/web/public/office/js/main.js +1757 -0
- package/lib/web/public/office/js/office-layout.js +1524 -0
- package/lib/web/public/office/js/pathfinding.js +144 -0
- package/lib/web/public/office/js/pixel-sprites.js +1454 -0
- package/lib/web/public/office/js/progress-bars.js +117 -0
- package/lib/web/public/office/js/replay.js +191 -0
- package/lib/web/public/office/js/sound-effects.js +91 -0
- package/lib/web/public/office/js/sprite-renderer.js +211 -0
- package/lib/web/public/office/js/stamina-system.js +89 -0
- package/lib/web/public/office/js/ui.js +107 -0
- package/lib/web/public/onboarding/index.html +243 -0
- package/lib/web/public/timeline/index.html +195 -0
- package/lib/web/routes/api.js +499 -0
- package/lib/web/routes/logs.js +20 -0
- package/lib/web/routes/metrics.js +99 -0
- package/lib/web/server.js +183 -0
- package/lib/web/ws/handler.js +65 -0
- package/package.json +67 -0
- package/templates/agent-architect.md +69 -0
- package/templates/agent-gemini-pm.md +49 -0
- package/templates/agent-gemini-reviewer.md +52 -0
- package/templates/copilot-instructions.md +36 -0
- package/templates/pipelines/mobile.json +27 -0
- package/templates/pipelines/nodejs-api.json +27 -0
- package/templates/pipelines/python.json +27 -0
- package/templates/pipelines/react.json +27 -0
- package/templates/pipelines/salesforce.json +27 -0
- package/templates/role-gemini.md +97 -0
- package/templates/skill-architect.md +114 -0
- package/templates/skill-browser-qa.md +50 -0
- package/templates/skill-bug-from-qa.md +58 -0
- package/templates/skill-chatbot.md +93 -0
- package/templates/skill-implement.md +78 -0
- package/templates/skill-openclaw.md +174 -0
- package/templates/skill-payment.md +110 -0
- package/templates/skill-pm-spec.md +77 -0
- package/templates/skill-requirement-capture.md +97 -0
- package/templates/skill-review.md +108 -0
- package/templates/skill-reviewer-qa.md +44 -0
- package/templates/skill-suggestion.md +45 -0
- package/templates/skill-template.md +142 -0
|
@@ -0,0 +1,2330 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Non-interactive action wrappers for web UI and Telegram bot.
|
|
3
|
+
*
|
|
4
|
+
* These functions call the same shell scripts and utilities as the terminal CLI
|
|
5
|
+
* but accept parameters directly (no inquirer prompts) and return JSON results.
|
|
6
|
+
*/
|
|
7
|
+
import { execSync } from 'child_process';
|
|
8
|
+
import { existsSync, mkdirSync, readdirSync, readFileSync, unlinkSync, writeFileSync } from 'fs';
|
|
9
|
+
import { resolve } from 'path';
|
|
10
|
+
import { getConfig, getPipelineStage } from '../config.js';
|
|
11
|
+
import { trackPipelineCompletion } from '../utils/achievements.js';
|
|
12
|
+
import { audit, AUDIT_EVENTS, getAuditEntries } from '../utils/audit-logger.js';
|
|
13
|
+
import { getContextString } from '../utils/codebase-context.js';
|
|
14
|
+
import { generateCorrelationId } from '../utils/correlation.js';
|
|
15
|
+
import { formatCostSummary, getActiveBudget, getCostSummary } from '../utils/cost-tracker.js';
|
|
16
|
+
import { appendLearnedContext, saveExternalPrompt } from '../utils/intent-engine.js';
|
|
17
|
+
import { injectPersona, loadPersona } from '../utils/persona-loader.js';
|
|
18
|
+
import { clearCheckpoints, getLatestFilePath, getRootDir, getStatus, getWorkflowDir, loadCheckpoint, saveCheckpoint, updateStatus } from '../utils/pipeline.js';
|
|
19
|
+
import { getPluginStatus, runAfterStage, runBeforeStage } from '../utils/plugin-runner.js';
|
|
20
|
+
import { AILimitError, capture, circuitBreaker, getAIMode, getBannedModels, getOllamaModel, isAvailable, isModelAvailable, isOllamaAvailable, runAI, runClaude, runCopilot, runCopilotWithModel, runGemini, runPipelineStage } from '../utils/runner.js';
|
|
21
|
+
import { injectSkills } from '../utils/skill-loader.js';
|
|
22
|
+
import { logActivity } from '../utils/activity-log.js';
|
|
23
|
+
import { runABTest, saveABResult, getABSummary } from '../utils/model-ab-test.js';
|
|
24
|
+
import { browserTestAction } from '../actions/browser-test.js';
|
|
25
|
+
import { bus } from './event-bus.js';
|
|
26
|
+
|
|
27
|
+
// ─── Opus Confirmation ────────────────────────────────────────────────────────
|
|
28
|
+
// When Claude fails and Copilot opus would be used for a complex stage,
|
|
29
|
+
// we emit an event and wait for user confirmation (60s timeout → falls back to sonnet).
|
|
30
|
+
|
|
31
|
+
const _opusConfirmations = new Map(); // featureId → resolve(boolean)
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Called by the Telegram bot when user confirms or declines opus usage.
|
|
35
|
+
*/
|
|
36
|
+
export function resolveOpusConfirmation(featureId, confirmed) {
|
|
37
|
+
const resolver = _opusConfirmations.get(featureId);
|
|
38
|
+
if (resolver) {
|
|
39
|
+
resolver(confirmed);
|
|
40
|
+
_opusConfirmations.delete(featureId);
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Emit an opus confirmation request and await user response (60s timeout).
|
|
46
|
+
* Returns true if confirmed, false if declined or timed out.
|
|
47
|
+
*/
|
|
48
|
+
async function requestOpusConfirmation(notify, featureId) {
|
|
49
|
+
return new Promise((resolve) => {
|
|
50
|
+
_opusConfirmations.set(featureId, resolve);
|
|
51
|
+
notify('opus_confirm_request', {
|
|
52
|
+
featureId,
|
|
53
|
+
message:
|
|
54
|
+
`⚠️ <b>Claude is unavailable.</b> The architecture stage benefits from a powerful model.\n\n` +
|
|
55
|
+
`Use <b>Opus 4.6</b> for architecture? This costs premium requests.\n` +
|
|
56
|
+
`(Sonnet 4.6 will be used automatically in 60s if no reply)`,
|
|
57
|
+
});
|
|
58
|
+
// Auto-decline after 60 seconds
|
|
59
|
+
setTimeout(() => {
|
|
60
|
+
if (_opusConfirmations.has(featureId)) {
|
|
61
|
+
_opusConfirmations.delete(featureId);
|
|
62
|
+
notify('heartbeat', { message: `⏳ No response — using fallback model for architecture` });
|
|
63
|
+
resolve(false);
|
|
64
|
+
}
|
|
65
|
+
}, 60000);
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// ─── Status ───────────────────────────────────────────────────────────────────
|
|
70
|
+
|
|
71
|
+
export function getStatusData() {
|
|
72
|
+
const status = getStatus();
|
|
73
|
+
const banned = getBannedModels();
|
|
74
|
+
if (banned.length > 0) {
|
|
75
|
+
status._bannedModels = banned;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// Enhanced fields for AI consumers (OpenClaw, scripts)
|
|
79
|
+
status._provider_health = {};
|
|
80
|
+
for (const provider of ['gemini', 'claude', 'copilot']) {
|
|
81
|
+
const state = circuitBreaker.getState(provider);
|
|
82
|
+
status._provider_health[provider] = state?.state || 'CLOSED';
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// Stage duration (how long current stage has been active)
|
|
86
|
+
if (status.metrics?.stages?.[status.stage]?.startedAt) {
|
|
87
|
+
status._stage_duration_ms = Date.now() - new Date(status.metrics.stages[status.stage].startedAt).getTime();
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Last error from status
|
|
91
|
+
if (status.error) {
|
|
92
|
+
status._last_error = status.error;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// Cost so far for current feature
|
|
96
|
+
try {
|
|
97
|
+
const costs = getCostSummary();
|
|
98
|
+
status._cost_so_far = costs.totalCost || 0;
|
|
99
|
+
} catch { /* ok */ }
|
|
100
|
+
|
|
101
|
+
return status;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// ─── Health Check ─────────────────────────────────────────────────────────────
|
|
105
|
+
|
|
106
|
+
export async function getHealthData() {
|
|
107
|
+
const startTime = process.uptime();
|
|
108
|
+
const checks = {};
|
|
109
|
+
|
|
110
|
+
const toolCheck = async (name) => {
|
|
111
|
+
try {
|
|
112
|
+
const result = await capture(`which ${name}`);
|
|
113
|
+
return { available: result.code === 0, path: result.stdout.trim() };
|
|
114
|
+
} catch {
|
|
115
|
+
return { available: false, path: null };
|
|
116
|
+
}
|
|
117
|
+
};
|
|
118
|
+
|
|
119
|
+
checks.gemini = await toolCheck('gemini');
|
|
120
|
+
checks.claude = await toolCheck('claude');
|
|
121
|
+
checks.copilot = await toolCheck('copilot');
|
|
122
|
+
checks.sf = await toolCheck('sf');
|
|
123
|
+
checks.gh = await toolCheck('gh');
|
|
124
|
+
|
|
125
|
+
const root = getRootDir();
|
|
126
|
+
const wfDir = getWorkflowDir();
|
|
127
|
+
checks.workflow = {
|
|
128
|
+
aiWorkflow: existsSync(wfDir),
|
|
129
|
+
skills: existsSync(resolve(root, '.claude/skills')),
|
|
130
|
+
agents: existsSync(resolve(root, '.claude/agents')),
|
|
131
|
+
};
|
|
132
|
+
|
|
133
|
+
// Ollama local AI
|
|
134
|
+
const ollamaStatus = await isOllamaAvailable();
|
|
135
|
+
checks.ollama = {
|
|
136
|
+
available: ollamaStatus.available,
|
|
137
|
+
model: getOllamaModel(),
|
|
138
|
+
models: ollamaStatus.models,
|
|
139
|
+
mode: getAIMode(),
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
const status = getStatus();
|
|
143
|
+
checks.pipeline = status;
|
|
144
|
+
|
|
145
|
+
// Enhanced diagnostics (Task 4.1)
|
|
146
|
+
const banned = getBannedModels();
|
|
147
|
+
|
|
148
|
+
// AI provider circuit breaker states
|
|
149
|
+
checks.ai = {
|
|
150
|
+
gemini: {
|
|
151
|
+
circuitState: circuitBreaker.getState('gemini'),
|
|
152
|
+
bannedModels: banned.filter(b => b.model.includes('gemini')),
|
|
153
|
+
},
|
|
154
|
+
claude: {
|
|
155
|
+
circuitState: circuitBreaker.getState('claude'),
|
|
156
|
+
bannedModels: banned.filter(b => b.model.includes('claude') || b.model.includes('sonnet') || b.model.includes('haiku')),
|
|
157
|
+
},
|
|
158
|
+
copilot: {
|
|
159
|
+
circuitState: circuitBreaker.getState('copilot'),
|
|
160
|
+
bannedModels: banned.filter(b => b.model.includes('copilot')),
|
|
161
|
+
},
|
|
162
|
+
};
|
|
163
|
+
|
|
164
|
+
// Cost info
|
|
165
|
+
try {
|
|
166
|
+
const today = new Date().toISOString().slice(0, 10);
|
|
167
|
+
const allCosts = getCostSummary();
|
|
168
|
+
checks.costs = {
|
|
169
|
+
total: allCosts.totalCost,
|
|
170
|
+
totalCalls: allCosts.totalCalls,
|
|
171
|
+
byProvider: allCosts.byProvider,
|
|
172
|
+
};
|
|
173
|
+
} catch { checks.costs = { total: 0, totalCalls: 0 }; }
|
|
174
|
+
|
|
175
|
+
// Token budget for current feature
|
|
176
|
+
if (status.current_feature) {
|
|
177
|
+
const budget = getActiveBudget(status.current_feature);
|
|
178
|
+
if (budget) {
|
|
179
|
+
checks.tokenBudget = budget.toJSON();
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
// Pipeline metrics
|
|
184
|
+
checks.metrics = status.metrics || null;
|
|
185
|
+
|
|
186
|
+
// Determine overall health status
|
|
187
|
+
const anyCircuitOpen = ['gemini', 'claude', 'copilot'].some(
|
|
188
|
+
p => checks.ai[p]?.circuitState?.state === 'OPEN'
|
|
189
|
+
);
|
|
190
|
+
const allCircuitsOpen = ['gemini', 'claude', 'copilot'].every(
|
|
191
|
+
p => checks.ai[p]?.circuitState?.state === 'OPEN'
|
|
192
|
+
);
|
|
193
|
+
const isStalled = status.stage && ['spec', 'arch', 'impl', 'review', 'fix'].includes(status.stage)
|
|
194
|
+
&& status.metrics?.stages?.[status.stage]?.startedAt
|
|
195
|
+
&& (Date.now() - new Date(status.metrics.stages[status.stage].startedAt).getTime()) > 10 * 60 * 1000;
|
|
196
|
+
|
|
197
|
+
checks.status = allCircuitsOpen || isStalled ? 'unhealthy' : anyCircuitOpen ? 'degraded' : 'healthy';
|
|
198
|
+
checks.uptime = Math.round(startTime);
|
|
199
|
+
|
|
200
|
+
return checks;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// ─── File Listing ─────────────────────────────────────────────────────────────
|
|
204
|
+
|
|
205
|
+
export function listFiles(subdir) {
|
|
206
|
+
const dir = resolve(getWorkflowDir(), subdir);
|
|
207
|
+
if (!existsSync(dir)) return [];
|
|
208
|
+
return readdirSync(dir)
|
|
209
|
+
.filter(f => f.endsWith('.md') || f.endsWith('.log') || f.endsWith('.json'))
|
|
210
|
+
.sort()
|
|
211
|
+
.reverse()
|
|
212
|
+
.map(f => ({
|
|
213
|
+
name: f,
|
|
214
|
+
path: `${subdir}/${f}`,
|
|
215
|
+
}));
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
export function readFile(subdir, name) {
|
|
219
|
+
// Path traversal protection — ensure resolved path stays within workflow dir
|
|
220
|
+
const wfDir = getWorkflowDir();
|
|
221
|
+
const filePath = resolve(wfDir, subdir, name);
|
|
222
|
+
if (!filePath.startsWith(wfDir)) return null;
|
|
223
|
+
if (!existsSync(filePath)) return null;
|
|
224
|
+
return readFileSync(filePath, 'utf8');
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
/**
|
|
228
|
+
* List all available pipeline documents as labelled items.
|
|
229
|
+
* Returns an array of { label, key, subdir, name } for the docs keyboard.
|
|
230
|
+
* key = "{subdir}/{name}" — used as the doc:view callback data.
|
|
231
|
+
*/
|
|
232
|
+
export function listAvailableDocs() {
|
|
233
|
+
const SUBDIRS = [
|
|
234
|
+
{ dir: 'specs', emoji: '📝', prefix: 'Spec' },
|
|
235
|
+
{ dir: 'architecture', emoji: '🏗', prefix: 'Arch' },
|
|
236
|
+
{ dir: 'tasks', emoji: '📋', prefix: 'Tasks' },
|
|
237
|
+
{ dir: 'reviews', emoji: '🔍', prefix: 'Review' },
|
|
238
|
+
{ dir: 'approved', emoji: '✅', prefix: 'Approved' },
|
|
239
|
+
{ dir: 'inbox', emoji: '📥', prefix: 'Inbox' },
|
|
240
|
+
];
|
|
241
|
+
|
|
242
|
+
const docs = [];
|
|
243
|
+
for (const { dir, emoji, prefix } of SUBDIRS) {
|
|
244
|
+
const files = listFiles(dir);
|
|
245
|
+
for (const f of files.slice(0, 3)) { // max 3 per subdir
|
|
246
|
+
// Build short human label: "📝 Spec 0301" from "SPEC-1234567890.md"
|
|
247
|
+
const short = f.name.replace(/\.(md|json|log)$/, '').replace(/^(SPEC|ARCH|TASKS|REVIEW|FEATURE)-?/i, '').slice(0, 12);
|
|
248
|
+
docs.push({
|
|
249
|
+
label: `${emoji} ${prefix} ${short}`,
|
|
250
|
+
key: encodeURIComponent(`${dir}/${f.name}`),
|
|
251
|
+
subdir: dir,
|
|
252
|
+
name: f.name,
|
|
253
|
+
});
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
return docs;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
/**
|
|
260
|
+
* Read a doc and return it split into 1400-char pages (safe for Telegram).
|
|
261
|
+
*/
|
|
262
|
+
export function readDocPaged(key, page = 0) {
|
|
263
|
+
const PAGE_SIZE = 1400;
|
|
264
|
+
const decoded = decodeURIComponent(key);
|
|
265
|
+
const slash = decoded.indexOf('/');
|
|
266
|
+
if (slash < 0) return null;
|
|
267
|
+
|
|
268
|
+
const subdir = decoded.slice(0, slash);
|
|
269
|
+
const name = decoded.slice(slash + 1);
|
|
270
|
+
const content = readFile(subdir, name);
|
|
271
|
+
if (content === null) return null;
|
|
272
|
+
|
|
273
|
+
const pages = [];
|
|
274
|
+
for (let i = 0; i < content.length; i += PAGE_SIZE) {
|
|
275
|
+
pages.push(content.slice(i, i + PAGE_SIZE));
|
|
276
|
+
}
|
|
277
|
+
if (pages.length === 0) pages.push('(empty document)');
|
|
278
|
+
|
|
279
|
+
return {
|
|
280
|
+
name,
|
|
281
|
+
subdir,
|
|
282
|
+
content: pages[page] || pages[0],
|
|
283
|
+
page: Math.min(page, pages.length - 1),
|
|
284
|
+
totalPages: pages.length,
|
|
285
|
+
totalChars: content.length,
|
|
286
|
+
};
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
// ─── Logs ─────────────────────────────────────────────────────────────────────
|
|
290
|
+
|
|
291
|
+
export function getLogsData(lines = 100) {
|
|
292
|
+
const logsDir = resolve(getWorkflowDir(), 'logs');
|
|
293
|
+
if (!existsSync(logsDir)) return { sessions: [], content: '' };
|
|
294
|
+
|
|
295
|
+
const sessions = readdirSync(logsDir)
|
|
296
|
+
.filter(f => f.endsWith('.log'))
|
|
297
|
+
.sort()
|
|
298
|
+
.reverse()
|
|
299
|
+
.slice(0, 20);
|
|
300
|
+
|
|
301
|
+
// Return latest session content
|
|
302
|
+
if (sessions.length === 0) return { sessions: [], content: '' };
|
|
303
|
+
|
|
304
|
+
const latestPath = resolve(logsDir, sessions[0]);
|
|
305
|
+
const content = readFileSync(latestPath, 'utf8');
|
|
306
|
+
const allLines = content.split('\n').filter(l => l.trim());
|
|
307
|
+
const tail = allLines.slice(-lines).join('\n');
|
|
308
|
+
|
|
309
|
+
return { sessions, content: tail, total: allLines.length };
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/**
|
|
313
|
+
* List all available log files for the logs keyboard.
|
|
314
|
+
* Returns array of { label, key, name } — key is the log filename (index-based).
|
|
315
|
+
*/
|
|
316
|
+
export function listAvailableLogs() {
|
|
317
|
+
const logsDir = resolve(getWorkflowDir(), 'logs');
|
|
318
|
+
if (!existsSync(logsDir)) return [];
|
|
319
|
+
|
|
320
|
+
return readdirSync(logsDir)
|
|
321
|
+
.filter(f => f.endsWith('.log'))
|
|
322
|
+
.sort()
|
|
323
|
+
.reverse()
|
|
324
|
+
.slice(0, 15)
|
|
325
|
+
.map((name, idx) => {
|
|
326
|
+
// Build short label: "session-20260304-001.log" → "🗒 20260304 #001"
|
|
327
|
+
const short = name.replace(/\.log$/, '').replace(/^session-?/i, '');
|
|
328
|
+
return { label: `🗒 ${short}`, key: String(idx), name };
|
|
329
|
+
});
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
/**
|
|
333
|
+
* Read a log file by list index and return it split into 1400-char pages.
|
|
334
|
+
*/
|
|
335
|
+
export function readLogPaged(idx, page = 0) {
|
|
336
|
+
const PAGE_SIZE = 1400;
|
|
337
|
+
const logsDir = resolve(getWorkflowDir(), 'logs');
|
|
338
|
+
if (!existsSync(logsDir)) return null;
|
|
339
|
+
|
|
340
|
+
const files = readdirSync(logsDir)
|
|
341
|
+
.filter(f => f.endsWith('.log'))
|
|
342
|
+
.sort()
|
|
343
|
+
.reverse();
|
|
344
|
+
|
|
345
|
+
const name = files[Number(idx)];
|
|
346
|
+
if (!name) return null;
|
|
347
|
+
|
|
348
|
+
const content = readFileSync(resolve(logsDir, name), 'utf8');
|
|
349
|
+
// Tail most recent lines (logs can be huge)
|
|
350
|
+
const lines = content.split('\n').filter(l => l.trim());
|
|
351
|
+
const recent = lines.slice(-200).join('\n'); // last 200 lines
|
|
352
|
+
|
|
353
|
+
const pages = [];
|
|
354
|
+
for (let i = 0; i < recent.length; i += PAGE_SIZE) {
|
|
355
|
+
pages.push(recent.slice(i, i + PAGE_SIZE));
|
|
356
|
+
}
|
|
357
|
+
if (pages.length === 0) pages.push('(empty log)');
|
|
358
|
+
|
|
359
|
+
return {
|
|
360
|
+
name,
|
|
361
|
+
content: pages[page] || pages[0],
|
|
362
|
+
page: Math.min(page, pages.length - 1),
|
|
363
|
+
totalPages: pages.length,
|
|
364
|
+
totalLines: lines.length,
|
|
365
|
+
};
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
// ─── Approve ──────────────────────────────────────────────────────────────────
|
|
369
|
+
|
|
370
|
+
export async function runApprove() {
|
|
371
|
+
const status = getStatus();
|
|
372
|
+
if (status.stage !== 'review_complete') {
|
|
373
|
+
return { success: false, error: `Cannot approve at stage "${status.stage}". Need review_complete.` };
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
try {
|
|
377
|
+
// Copy latest review to approved/ dir if it exists
|
|
378
|
+
const workflowDir = getWorkflowDir();
|
|
379
|
+
const reviewsDir = resolve(workflowDir, 'reviews');
|
|
380
|
+
const approvedDir = resolve(workflowDir, 'approved');
|
|
381
|
+
if (existsSync(reviewsDir)) {
|
|
382
|
+
mkdirSync(approvedDir, { recursive: true });
|
|
383
|
+
const reviews = readdirSync(reviewsDir).filter(f => f.endsWith('.md')).sort().reverse();
|
|
384
|
+
if (reviews.length) {
|
|
385
|
+
const src = resolve(reviewsDir, reviews[0]);
|
|
386
|
+
writeFileSync(resolve(approvedDir, reviews[0]), readFileSync(src, 'utf8'));
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
updateStatus({ stage: 'approved', next: 'deploy' });
|
|
390
|
+
bus.emitEvent('feature_approved', { feature: status.current_feature });
|
|
391
|
+
audit(AUDIT_EVENTS.APPROVED, { feature: status.current_feature });
|
|
392
|
+
return { success: true, feature: status.current_feature, status: getStatus() };
|
|
393
|
+
} catch (err) {
|
|
394
|
+
return { success: false, error: err.message };
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
// ─── Reject ───────────────────────────────────────────────────────────────────
|
|
399
|
+
|
|
400
|
+
export async function runReject(reason = 'Rejected via Web/Telegram') {
|
|
401
|
+
const status = getStatus();
|
|
402
|
+
|
|
403
|
+
// Already rejected — re-dispatch existing review blockers to the Coder (no state change needed)
|
|
404
|
+
if (status.stage === 'rejected') {
|
|
405
|
+
return runFix();
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
if (!['review_complete', 'implementation_complete'].includes(status.stage)) {
|
|
409
|
+
return { success: false, error: `Cannot reject at stage "${status.stage}".` };
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
try {
|
|
413
|
+
updateStatus({ stage: 'rejected', rejection_reason: reason, next: 'fix' });
|
|
414
|
+
bus.emitEvent('feature_rejected', { feature: status.current_feature, reason });
|
|
415
|
+
audit(AUDIT_EVENTS.REJECTED, { feature: status.current_feature, reason });
|
|
416
|
+
return { success: true, status: getStatus() };
|
|
417
|
+
} catch (err) {
|
|
418
|
+
return { success: false, error: err.message };
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
// ─── Deploy ───────────────────────────────────────────────────────────────────
|
|
423
|
+
|
|
424
|
+
export async function runDeploy(testLevel = 'NoTestRun') {
|
|
425
|
+
const status = getStatus();
|
|
426
|
+
const config = getConfig();
|
|
427
|
+
const deployCmd = config.deploy?.command;
|
|
428
|
+
|
|
429
|
+
try {
|
|
430
|
+
audit(AUDIT_EVENTS.DEPLOY_STARTED, { testLevel });
|
|
431
|
+
|
|
432
|
+
// If no deploy command is configured, simply mark the feature as done
|
|
433
|
+
if (!deployCmd) {
|
|
434
|
+
updateStatus({ stage: 'deployed', last_deploy: new Date().toISOString() });
|
|
435
|
+
bus.emitEvent('deploy_success', { testLevel: 'none', message: 'No deploy command configured — feature marked as done.' });
|
|
436
|
+
audit(AUDIT_EVENTS.DEPLOY_COMPLETED, { testLevel: 'none' });
|
|
437
|
+
logActivity('PIPELINE', 'SUCCESS', 'Feature marked as deployed (no deploy command configured).');
|
|
438
|
+
_scheduleAutoReset();
|
|
439
|
+
return { success: true, stdout: 'No deploy command configured. Feature marked as done.' };
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
const cmd = deployCmd.includes('--test-level') ? deployCmd : `${deployCmd} --test-level ${testLevel}`;
|
|
443
|
+
const result = await capture(cmd);
|
|
444
|
+
const success = result.code === 0;
|
|
445
|
+
|
|
446
|
+
if (success) {
|
|
447
|
+
updateStatus({ stage: 'deployed', last_deploy: new Date().toISOString() });
|
|
448
|
+
bus.emitEvent('deploy_success', { testLevel });
|
|
449
|
+
audit(AUDIT_EVENTS.DEPLOY_COMPLETED, { testLevel });
|
|
450
|
+
// Auto-reset pipeline after deployment — 60s delay so user sees the "deployed" notification
|
|
451
|
+
_scheduleAutoReset();
|
|
452
|
+
} else {
|
|
453
|
+
bus.emitEvent('deploy_failed', { error: result.stderr.slice(0, 500), testLevel });
|
|
454
|
+
audit(AUDIT_EVENTS.DEPLOY_FAILED, { testLevel, error: result.stderr.slice(0, 200) });
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
return {
|
|
458
|
+
success,
|
|
459
|
+
stdout: result.stdout.slice(0, 2000),
|
|
460
|
+
stderr: result.stderr.slice(0, 2000),
|
|
461
|
+
};
|
|
462
|
+
} catch (err) {
|
|
463
|
+
bus.emitEvent('deploy_failed', { error: err.message });
|
|
464
|
+
return { success: false, error: err.message };
|
|
465
|
+
}
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
// ─── New Feature (async — long running) ───────────────────────────────────────
|
|
469
|
+
|
|
470
|
+
export async function runNewFeature(description, mode = 'manual', type = 'feature', { awaitPipeline = false } = {}) {
|
|
471
|
+
if (!description || description.trim().length < 10) {
|
|
472
|
+
return { success: false, error: 'Description must be at least 10 characters.' };
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
const prefix = type === 'bug' ? 'Fix: ' : '';
|
|
476
|
+
const fullDesc = `${prefix}${description.trim()}`;
|
|
477
|
+
|
|
478
|
+
try {
|
|
479
|
+
const timestamp = Math.floor(Date.now() / 1000);
|
|
480
|
+
const featureId = `FEATURE-${new Date().toISOString().slice(0, 19).replace(/[-:T]/g, '').slice(0, 15)}`;
|
|
481
|
+
const workflowDir = getWorkflowDir();
|
|
482
|
+
const root = getRootDir();
|
|
483
|
+
const config = (() => { try { return getConfig(); } catch { return {}; } })();
|
|
484
|
+
|
|
485
|
+
// Step 1: Create inbox file (pure JS — no shell script dependency)
|
|
486
|
+
const inboxDir = resolve(workflowDir, 'inbox');
|
|
487
|
+
mkdirSync(inboxDir, { recursive: true });
|
|
488
|
+
writeFileSync(
|
|
489
|
+
resolve(inboxDir, `${featureId}.md`),
|
|
490
|
+
`# Feature Request: ${featureId}\n**Submitted:** ${new Date().toISOString()}\n**Status:** INBOX\n\n## Description\n${fullDesc}\n\n## Context\n- Project: ${config.name || 'Project'}\n`
|
|
491
|
+
);
|
|
492
|
+
|
|
493
|
+
// Clear cancellation flag from any previous /reset
|
|
494
|
+
_pipelineCancelled = false;
|
|
495
|
+
updateStatus({ current_feature: featureId, stage: 'inbox', pipeline_mode: mode, next: 'pm_spec', auto_review_count: 0 });
|
|
496
|
+
|
|
497
|
+
// Record HEAD at feature submission so review.js can diff from this baseline.
|
|
498
|
+
// Walk: try root (aicc.config.js dir), then root/sourceDir (actual git repo in monorepo).
|
|
499
|
+
// Only set once — never overwrite so review always diffs from the original submission point.
|
|
500
|
+
const existingStartCommit = getStatus().feature_start_commit;
|
|
501
|
+
if (!existingStartCommit || existingStartCommit === featureId) {
|
|
502
|
+
const headSha = (() => {
|
|
503
|
+
const candidates = [root];
|
|
504
|
+
try {
|
|
505
|
+
const srcDir = (getConfig()?.review?.sourceDir || '').replace(/^\.\//, '').replace(/\/$/, '');
|
|
506
|
+
if (srcDir) candidates.push(resolve(root, srcDir));
|
|
507
|
+
} catch { /* ignore config errors */ }
|
|
508
|
+
for (const dir of candidates) {
|
|
509
|
+
try {
|
|
510
|
+
const sha = execSync('git rev-parse HEAD', { cwd: dir }).toString().trim();
|
|
511
|
+
if (sha) return sha;
|
|
512
|
+
} catch { /* try next */ }
|
|
513
|
+
}
|
|
514
|
+
return null;
|
|
515
|
+
})();
|
|
516
|
+
if (headSha) updateStatus({ feature_start_commit: headSha });
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
bus.emitEvent('feature_created', { feature: featureId, description: fullDesc, mode });
|
|
520
|
+
audit(AUDIT_EVENTS.FEATURE_CREATED, { featureId, description: fullDesc, mode, type });
|
|
521
|
+
|
|
522
|
+
// awaitPipeline: true → block until pipeline finishes (used by bugfix loop)
|
|
523
|
+
// awaitPipeline: false → fire-and-forget (used by Telegram bot / web UI)
|
|
524
|
+
const pipelinePromise = _runPipelineAsync(featureId, fullDesc, timestamp, mode, workflowDir, root, config);
|
|
525
|
+
|
|
526
|
+
if (awaitPipeline) {
|
|
527
|
+
try {
|
|
528
|
+
await pipelinePromise;
|
|
529
|
+
} catch (err) {
|
|
530
|
+
console.error('[Pipeline] Sync error:', err.message);
|
|
531
|
+
bus.emitEvent('stage_error', {
|
|
532
|
+
feature: featureId,
|
|
533
|
+
stage: 'pipeline',
|
|
534
|
+
error: err.message,
|
|
535
|
+
message: `💥 Pipeline crashed:\n<b>${err.message.slice(0, 200)}</b>`,
|
|
536
|
+
});
|
|
537
|
+
updateStatus({ stage: 'crashed', error: err.message });
|
|
538
|
+
return { success: false, featureId, error: err.message };
|
|
539
|
+
}
|
|
540
|
+
return { success: true, featureId, status: getStatus() };
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
// Non-blocking: catch errors async so they don't go unhandled
|
|
544
|
+
pipelinePromise.catch(err => {
|
|
545
|
+
console.error('[Pipeline] Async error:', err.message);
|
|
546
|
+
bus.emitEvent('stage_error', {
|
|
547
|
+
feature: featureId,
|
|
548
|
+
stage: 'pipeline',
|
|
549
|
+
error: err.message,
|
|
550
|
+
message: `💥 Pipeline crashed unexpectedly:\n<b>${err.message.slice(0, 200)}</b>\n\nUse /status to check the current state, or start a new feature.`,
|
|
551
|
+
});
|
|
552
|
+
updateStatus({ stage: 'crashed', error: err.message });
|
|
553
|
+
});
|
|
554
|
+
|
|
555
|
+
return { success: true, featureId, status: getStatus() };
|
|
556
|
+
} catch (err) {
|
|
557
|
+
return { success: false, error: err.message };
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
// ─── Pipeline cancellation ───────────────────────────────────────────────────
|
|
562
|
+
// When /reset is called, this flag is set so heartbeat timers and notify() stop
|
|
563
|
+
// sending messages for the cancelled pipeline run.
|
|
564
|
+
let _pipelineCancelled = false;
|
|
565
|
+
|
|
566
|
+
// ─── Background pipeline runner ───────────────────────────────────────────────
|
|
567
|
+
// Roles: PM (spec), Architect (arch+tasks), Coder (impl), Reviewer (review)
|
|
568
|
+
// Provider for each role is configurable via pipeline[] in aicc.config.js.
|
|
569
|
+
|
|
570
|
+
/**
|
|
571
|
+
* Emits a 'heartbeat' event every `intervalMs` while an async operation runs.
|
|
572
|
+
* Returns a stop function to call when the operation completes.
|
|
573
|
+
*/
|
|
574
|
+
function withHeartbeat(notify, label, intervalMs = 120000) {
|
|
575
|
+
let count = 0;
|
|
576
|
+
const timer = setInterval(() => {
|
|
577
|
+
if (_pipelineCancelled) { clearInterval(timer); return; }
|
|
578
|
+
count++;
|
|
579
|
+
const mins = Math.round((count * intervalMs) / 60000);
|
|
580
|
+
notify('heartbeat', { message: `⏳ ${label} in progress... (${mins}min elapsed)` });
|
|
581
|
+
}, intervalMs);
|
|
582
|
+
return () => clearInterval(timer);
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
/**
|
|
586
|
+
* Implementation-aware heartbeat — reads the tasks file to report real progress.
|
|
587
|
+
* Shows which tasks are done, which is in progress, and completion percentage.
|
|
588
|
+
*/
|
|
589
|
+
function withImplHeartbeat(notify, tasksFilePath, intervalMs = 60000) {
|
|
590
|
+
let lastDoneCount = 0;
|
|
591
|
+
|
|
592
|
+
function buildProgressMessage() {
|
|
593
|
+
try {
|
|
594
|
+
if (!tasksFilePath || !existsSync(tasksFilePath)) {
|
|
595
|
+
return `⚡ Coder is implementing... (no task list available)`;
|
|
596
|
+
}
|
|
597
|
+
const content = readFileSync(tasksFilePath, 'utf8');
|
|
598
|
+
// Match task lines: - [x] or - [ ] followed by **TASK-NNN**
|
|
599
|
+
const allTasks = content.match(/^- \[[ x]\] \*\*TASK-\d+\*\*.*$/gm) || [];
|
|
600
|
+
const doneTasks = content.match(/^- \[x\] \*\*TASK-\d+\*\*.*$/gm) || [];
|
|
601
|
+
|
|
602
|
+
const total = allTasks.length;
|
|
603
|
+
if (total === 0) return `⚡ Coder is implementing...`;
|
|
604
|
+
|
|
605
|
+
const done = doneTasks.length;
|
|
606
|
+
const pct = Math.round((done / total) * 100);
|
|
607
|
+
|
|
608
|
+
// Find the current in-progress task (first unchecked)
|
|
609
|
+
const currentTask = allTasks.find(t => t.startsWith('- [ ]'));
|
|
610
|
+
let currentLabel = '';
|
|
611
|
+
if (currentTask) {
|
|
612
|
+
const match = currentTask.match(/\*\*(TASK-\d+)\*\*:\s*(.+?)(?:\s*\||$)/);
|
|
613
|
+
if (match) currentLabel = `${match[1]}: ${match[2].trim().slice(0, 60)}`;
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
// Build progress bar
|
|
617
|
+
const filled = Math.round(pct / 5);
|
|
618
|
+
const bar = '█'.repeat(filled) + '░'.repeat(20 - filled);
|
|
619
|
+
|
|
620
|
+
let msg = `⚡ <b>Implementation Progress</b>\n\n`;
|
|
621
|
+
msg += `[${bar}] ${pct}% (${done}/${total} tasks)\n\n`;
|
|
622
|
+
|
|
623
|
+
if (currentLabel) {
|
|
624
|
+
msg += `🔨 <b>Working on:</b> ${currentLabel}\n`;
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
// Show recently completed tasks (new since last check)
|
|
628
|
+
if (done > lastDoneCount && lastDoneCount > 0) {
|
|
629
|
+
const newlyDone = doneTasks.slice(lastDoneCount);
|
|
630
|
+
for (const t of newlyDone.slice(-3)) {
|
|
631
|
+
const m = t.match(/\*\*(TASK-\d+)\*\*:\s*(.+?)(?:\s*\||$)/);
|
|
632
|
+
if (m) msg += `✅ ${m[1]}: ${m[2].trim().slice(0, 50)}\n`;
|
|
633
|
+
}
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
lastDoneCount = done;
|
|
637
|
+
return msg;
|
|
638
|
+
} catch {
|
|
639
|
+
return `⚡ Coder is implementing...`;
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
const timer = setInterval(() => {
|
|
644
|
+
if (_pipelineCancelled) { clearInterval(timer); return; }
|
|
645
|
+
notify('heartbeat', { message: buildProgressMessage() });
|
|
646
|
+
}, intervalMs);
|
|
647
|
+
|
|
648
|
+
return () => clearInterval(timer);
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
// ─── Pipeline Metrics ─────────────────────────────────────────────────────────
|
|
652
|
+
|
|
653
|
+
/**
|
|
654
|
+
* Initialize metrics for a new pipeline run.
|
|
655
|
+
*/
|
|
656
|
+
function initMetrics(featureId) {
|
|
657
|
+
const metrics = {
|
|
658
|
+
startedAt: new Date().toISOString(),
|
|
659
|
+
featureId,
|
|
660
|
+
stages: {},
|
|
661
|
+
totalDurationMs: 0,
|
|
662
|
+
totalTokens: 0,
|
|
663
|
+
totalCost: 0,
|
|
664
|
+
errors: [],
|
|
665
|
+
};
|
|
666
|
+
updateStatus({ metrics });
|
|
667
|
+
return metrics;
|
|
668
|
+
}
|
|
669
|
+
|
|
670
|
+
/**
|
|
671
|
+
* Record a stage start in metrics.
|
|
672
|
+
*/
|
|
673
|
+
function recordStageStart(stageName, model = 'unknown') {
|
|
674
|
+
try {
|
|
675
|
+
const status = getStatus();
|
|
676
|
+
const metrics = status.metrics || {};
|
|
677
|
+
if (!metrics.stages) metrics.stages = {};
|
|
678
|
+
metrics.stages[stageName] = {
|
|
679
|
+
...(metrics.stages[stageName] || {}),
|
|
680
|
+
startedAt: new Date().toISOString(),
|
|
681
|
+
model,
|
|
682
|
+
};
|
|
683
|
+
updateStatus({ metrics });
|
|
684
|
+
} catch { /* non-fatal */ }
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
/**
|
|
688
|
+
* Record a stage completion in metrics.
|
|
689
|
+
*/
|
|
690
|
+
function recordStageEnd(stageName, tokensUsed = 0, retries = 0, model = 'unknown') {
|
|
691
|
+
try {
|
|
692
|
+
const status = getStatus();
|
|
693
|
+
const metrics = status.metrics || {};
|
|
694
|
+
if (!metrics.stages) metrics.stages = {};
|
|
695
|
+
const stage = metrics.stages[stageName] || {};
|
|
696
|
+
const startedAt = stage.startedAt ? new Date(stage.startedAt).getTime() : Date.now();
|
|
697
|
+
const durationMs = Date.now() - startedAt;
|
|
698
|
+
|
|
699
|
+
metrics.stages[stageName] = {
|
|
700
|
+
...stage,
|
|
701
|
+
completedAt: new Date().toISOString(),
|
|
702
|
+
durationMs,
|
|
703
|
+
tokensUsed,
|
|
704
|
+
retries,
|
|
705
|
+
model,
|
|
706
|
+
};
|
|
707
|
+
|
|
708
|
+
metrics.totalDurationMs = Date.now() - new Date(metrics.startedAt || Date.now()).getTime();
|
|
709
|
+
metrics.totalTokens = (metrics.totalTokens || 0) + tokensUsed;
|
|
710
|
+
|
|
711
|
+
updateStatus({ metrics });
|
|
712
|
+
} catch { /* non-fatal */ }
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
/**
|
|
716
|
+
* Record an error in pipeline metrics.
|
|
717
|
+
*/
|
|
718
|
+
function recordMetricsError(code, message, stageName) {
|
|
719
|
+
try {
|
|
720
|
+
const status = getStatus();
|
|
721
|
+
const metrics = status.metrics || {};
|
|
722
|
+
if (!metrics.errors) metrics.errors = [];
|
|
723
|
+
metrics.errors.push({
|
|
724
|
+
code,
|
|
725
|
+
message: message.slice(0, 200),
|
|
726
|
+
stage: stageName,
|
|
727
|
+
timestamp: new Date().toISOString(),
|
|
728
|
+
});
|
|
729
|
+
updateStatus({ metrics });
|
|
730
|
+
} catch { /* non-fatal */ }
|
|
731
|
+
}
|
|
732
|
+
|
|
733
|
+
// ─── Retry from Checkpoint ────────────────────────────────────────────────────
|
|
734
|
+
|
|
735
|
+
/**
|
|
736
|
+
* Retry the current pipeline stage from the last checkpoint.
|
|
737
|
+
* If fresh=true, clears checkpoint and retries from scratch.
|
|
738
|
+
*/
|
|
739
|
+
export async function retryFromCheckpoint(fresh = false) {
|
|
740
|
+
const status = getStatus();
|
|
741
|
+
const featureId = status.current_feature;
|
|
742
|
+
if (!featureId) {
|
|
743
|
+
return { success: false, error: 'No active feature in pipeline.' };
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
if (fresh) {
|
|
747
|
+
clearCheckpoints(featureId);
|
|
748
|
+
return { success: true, message: `Checkpoints cleared for ${featureId}. Retrying stage from scratch.`, featureId };
|
|
749
|
+
}
|
|
750
|
+
|
|
751
|
+
// Trigger auto-resume which will pick up from checkpoints
|
|
752
|
+
try {
|
|
753
|
+
await autoResumePipeline();
|
|
754
|
+
return { success: true, message: `Retrying pipeline from last checkpoint for ${featureId}.`, featureId };
|
|
755
|
+
} catch (err) {
|
|
756
|
+
return { success: false, error: err.message };
|
|
757
|
+
}
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
// ─── Dry Run Mode ─────────────────────────────────────────────────────────────
|
|
761
|
+
|
|
762
|
+
/**
|
|
763
|
+
* Run a simulated pipeline without making actual AI calls.
|
|
764
|
+
* Validates the full flow: checkpoint logic, status transitions, notifications, cost tracking.
|
|
765
|
+
*/
|
|
766
|
+
export async function runDryRun(description) {
|
|
767
|
+
const featureId = `DRYRUN-${Date.now()}`;
|
|
768
|
+
const workflowDir = getWorkflowDir();
|
|
769
|
+
const timestamp = Math.floor(Date.now() / 1000);
|
|
770
|
+
const notify = (event, data) => bus.emitEvent(event, { feature: featureId, ...data });
|
|
771
|
+
|
|
772
|
+
// Create inbox
|
|
773
|
+
const inboxDir = resolve(workflowDir, 'inbox');
|
|
774
|
+
mkdirSync(inboxDir, { recursive: true });
|
|
775
|
+
writeFileSync(
|
|
776
|
+
resolve(inboxDir, `${featureId}.md`),
|
|
777
|
+
`# Dry Run Feature: ${featureId}\n**Description:** ${description}\n**Status:** DRY-RUN\n`
|
|
778
|
+
);
|
|
779
|
+
updateStatus({ current_feature: featureId, stage: 'inbox', pipeline_mode: 'dry-run' });
|
|
780
|
+
initMetrics(featureId);
|
|
781
|
+
|
|
782
|
+
const stages = ['pm_spec', 'explore', 'arch', 'impl'];
|
|
783
|
+
const models = ['gemini-2.5-pro', 'claude-sonnet-4-6', 'claude-sonnet-4-6', 'claude-sonnet-4.6'];
|
|
784
|
+
const tokens = [3000, 2000, 6000, 8000];
|
|
785
|
+
|
|
786
|
+
for (let i = 0; i < stages.length; i++) {
|
|
787
|
+
const stageName = stages[i];
|
|
788
|
+
const model = models[i];
|
|
789
|
+
const tokenCount = tokens[i];
|
|
790
|
+
|
|
791
|
+
console.log(`[DRY-RUN] Simulating ${stageName} — would use ${model} with ~${tokenCount.toLocaleString()} tokens`);
|
|
792
|
+
notify('stage_start', { stage: stageName, message: `[DRY-RUN] Simulating ${stageName}...` });
|
|
793
|
+
|
|
794
|
+
recordStageStart(stageName, model);
|
|
795
|
+
await new Promise(r => setTimeout(r, 2000)); // 2s simulated delay
|
|
796
|
+
|
|
797
|
+
// Save checkpoint
|
|
798
|
+
saveCheckpoint(featureId, stageName, stageName, { dryRun: true, model });
|
|
799
|
+
recordStageEnd(stageName, tokenCount, 0, model);
|
|
800
|
+
|
|
801
|
+
notify('stage_complete', { stage: stageName, message: `[DRY-RUN] ${stageName} complete` });
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
updateStatus({ stage: 'dry_run_complete' });
|
|
805
|
+
|
|
806
|
+
return {
|
|
807
|
+
success: true,
|
|
808
|
+
featureId,
|
|
809
|
+
message: `Dry run complete. Simulated ${stages.length} stages. Total estimated tokens: ${tokens.reduce((a, b) => a + b, 0).toLocaleString()}`,
|
|
810
|
+
metrics: getStatus().metrics,
|
|
811
|
+
};
|
|
812
|
+
}
|
|
813
|
+
|
|
814
|
+
// ─── Auto Review+Fix Loop ─────────────────────────────────────────────────────
|
|
815
|
+
const AUTO_REVIEW_MAX = 3; // max review+fix cycles before asking user to confirm
|
|
816
|
+
const _loopConfirmations = new Map(); // featureId → resolve(boolean)
|
|
817
|
+
|
|
818
|
+
export function resolveLoopConfirmation(featureId, continueLoop) {
|
|
819
|
+
const res = _loopConfirmations.get(featureId);
|
|
820
|
+
if (res) { _loopConfirmations.delete(featureId); res(continueLoop); }
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
async function _requestLoopConfirmation(notify, featureId) {
|
|
824
|
+
return new Promise(resolve => {
|
|
825
|
+
_loopConfirmations.set(featureId, resolve);
|
|
826
|
+
notify('loop_confirm_request', {
|
|
827
|
+
featureId,
|
|
828
|
+
message: `⚠️ Auto-pilot reached <b>${AUTO_REVIEW_MAX} review cycles</b> without APPROVED verdict.\n\nContinue fixing & reviewing, or stop?`,
|
|
829
|
+
});
|
|
830
|
+
// Auto-stop after 2 minutes if user doesn't respond
|
|
831
|
+
setTimeout(() => {
|
|
832
|
+
if (_loopConfirmations.has(featureId)) {
|
|
833
|
+
_loopConfirmations.delete(featureId);
|
|
834
|
+
resolve(false);
|
|
835
|
+
}
|
|
836
|
+
}, 120000);
|
|
837
|
+
});
|
|
838
|
+
}
|
|
839
|
+
|
|
840
|
+
/**
|
|
841
|
+
* Review → fix loop: runs up to AUTO_REVIEW_MAX times.
|
|
842
|
+
* On rejection, the Coder is asked to fix blockers before re-reviewing.
|
|
843
|
+
* At the limit, asks user to confirm continuing or stopping.
|
|
844
|
+
*/
|
|
845
|
+
async function _runAutoReviewLoop(notify, featureId) {
|
|
846
|
+
const status = getStatus();
|
|
847
|
+
let runs = status.auto_review_count || 0;
|
|
848
|
+
|
|
849
|
+
while (true) {
|
|
850
|
+
// ── Review ────────────────────────────────────────────────────────────────
|
|
851
|
+
updateStatus({ stage: 'review' });
|
|
852
|
+
notify('stage_start', { stage: 'review', message: `🔍 Reviewer is reviewing implementation... (cycle ${runs + 1}/${AUTO_REVIEW_MAX})` });
|
|
853
|
+
const stopReviewHB = withHeartbeat(notify, 'code review', 120000);
|
|
854
|
+
const reviewResult = await runReview();
|
|
855
|
+
stopReviewHB();
|
|
856
|
+
runs++;
|
|
857
|
+
updateStatus({ auto_review_count: runs });
|
|
858
|
+
|
|
859
|
+
if (!reviewResult.success) {
|
|
860
|
+
notify('stage_error', { stage: 'review', error: reviewResult.error, message: `❌ Review failed: ${reviewResult.error?.slice(0, 100)}\n\n⚠️ Skipping this review cycle and continuing...` });
|
|
861
|
+
// Don't stop the loop — allow continuing to next cycle or fallback
|
|
862
|
+
if (runs >= AUTO_REVIEW_MAX) {
|
|
863
|
+
notify('stage_error', { stage: 'review', message: `❌ Review failed ${runs} times. Stopping pipeline.` });
|
|
864
|
+
return;
|
|
865
|
+
}
|
|
866
|
+
continue;
|
|
867
|
+
}
|
|
868
|
+
|
|
869
|
+
const review = getLatestReview();
|
|
870
|
+
const verdict = review?.verdict || 'UNKNOWN';
|
|
871
|
+
const reviewSummary = review ? review.content.slice(0, 500) + (review.content.length > 500 ? '\n\n...(truncated, use /docs to read full review)' : '') : '';
|
|
872
|
+
|
|
873
|
+
notify('stage_complete', {
|
|
874
|
+
stage: 'review_complete',
|
|
875
|
+
message: `🔍 Review cycle ${runs}/${AUTO_REVIEW_MAX}: <b>${verdict}</b>\n\n${reviewSummary}`,
|
|
876
|
+
});
|
|
877
|
+
|
|
878
|
+
// ── APPROVED → auto-approve and done ─────────────────────────────────────
|
|
879
|
+
if (verdict === 'APPROVED') {
|
|
880
|
+
const approveResult = await runApprove();
|
|
881
|
+
if (approveResult.success) {
|
|
882
|
+
updateStatus({ auto_review_count: 0 });
|
|
883
|
+
notify('stage_complete', { stage: 'approved', message: `✅ Feature approved — deploying now...` });
|
|
884
|
+
// Auto-deploy in auto mode
|
|
885
|
+
updateStatus({ stage: 'deploying' });
|
|
886
|
+
const stopDeployHB = withHeartbeat(notify, 'deploy', 120000);
|
|
887
|
+
const deployResult = await runDeploy('RunLocalTests');
|
|
888
|
+
stopDeployHB();
|
|
889
|
+
if (deployResult.success) {
|
|
890
|
+
updateStatus({ stage: 'deployed' });
|
|
891
|
+
notify('stage_complete', { stage: 'deployed', message: `🚀 Deployment complete! Feature shipped! 🎉` });
|
|
892
|
+
} else {
|
|
893
|
+
notify('stage_error', { stage: 'deploy', error: deployResult.error, message: `⚠️ Auto-deploy failed: ${(deployResult.error || deployResult.stderr || 'unknown').slice(0, 200)}\n\nUse /deploy to retry.` });
|
|
894
|
+
}
|
|
895
|
+
} else {
|
|
896
|
+
notify('stage_error', { stage: 'approve', error: approveResult.error, message: `⚠️ Auto-approve failed: ${approveResult.error}` });
|
|
897
|
+
}
|
|
898
|
+
return;
|
|
899
|
+
}
|
|
900
|
+
|
|
901
|
+
// ── Max cycles hit → ask user to confirm continuing ───────────────────────
|
|
902
|
+
if (runs >= AUTO_REVIEW_MAX) {
|
|
903
|
+
const continueLoop = await _requestLoopConfirmation(notify, featureId);
|
|
904
|
+
if (!continueLoop) {
|
|
905
|
+
updateStatus({ pipeline_mode: 'manual', auto_review_count: 0 });
|
|
906
|
+
notify('stage_complete', {
|
|
907
|
+
stage: 'review_rejected',
|
|
908
|
+
message: `🛑 Auto-pilot stopped after ${runs} review cycles.\n\nReview verdict: <b>${verdict}</b>\n\nCheck /docs for details. Switched to manual mode — use /reject to send specific fixes.`,
|
|
909
|
+
});
|
|
910
|
+
return;
|
|
911
|
+
}
|
|
912
|
+
runs = 0; // Reset counter for next batch
|
|
913
|
+
updateStatus({ auto_review_count: 0 });
|
|
914
|
+
}
|
|
915
|
+
|
|
916
|
+
// ── REJECTED → ask Coder to fix blockers, then loop ────────────────────────
|
|
917
|
+
const blockers = review ? extractBlockers(review.content) : 'See latest review in /docs.';
|
|
918
|
+
updateStatus({ stage: 'rejected' });
|
|
919
|
+
notify('stage_start', { stage: 'fix', message: `🔧 Coder is fixing blockers from review cycle ${runs}...\n\n${blockers.slice(0, 300)}` });
|
|
920
|
+
const stopFixHB = withHeartbeat(notify, 'fixing review blockers', 120000);
|
|
921
|
+
try {
|
|
922
|
+
const fixPrompt =
|
|
923
|
+
`You are the Coder. The code review was REJECTED.\n\n` +
|
|
924
|
+
`Fix ALL the following blockers identified in the code review:\n\n${blockers}\n\n` +
|
|
925
|
+
`Make the actual code changes to fix these issues. Do not explain — just fix.`;
|
|
926
|
+
await runPipelineStage('implement', fixPrompt, { featureId, stage: 'fix' });
|
|
927
|
+
} catch (err) {
|
|
928
|
+
stopFixHB();
|
|
929
|
+
if (err instanceof AILimitError) {
|
|
930
|
+
// All AI models exhausted — schedule retry with increasing backoff (15m, 30m, 45m…)
|
|
931
|
+
const retryCount = getStatus().exhaustion_retry_count || 0;
|
|
932
|
+
const attempt = retryCount + 1;
|
|
933
|
+
const delayMs = attempt * 15 * 60 * 1000;
|
|
934
|
+
const delayMins = attempt * 15;
|
|
935
|
+
updateStatus({ exhaustion_retry_count: attempt });
|
|
936
|
+
notify('stage_error', {
|
|
937
|
+
stage: 'fix',
|
|
938
|
+
error: err.message,
|
|
939
|
+
message:
|
|
940
|
+
`⏳ <b>All AI models exhausted</b> (attempt ${attempt}).\n\n` +
|
|
941
|
+
`Will retry automatically in <b>${delayMins} minutes</b>...\n` +
|
|
942
|
+
`(next attempt in ${delayMins}m → ${delayMins + 15}m → etc.)\n\n` +
|
|
943
|
+
`${err.message.slice(0, 120)}`,
|
|
944
|
+
});
|
|
945
|
+
await new Promise(r => setTimeout(r, delayMs));
|
|
946
|
+
notify('stage_start', { stage: 'fix', message: `🔄 Resuming after ${delayMins}m wait — retrying review cycle...` });
|
|
947
|
+
continue; // loop back to fresh review at the top
|
|
948
|
+
}
|
|
949
|
+
notify('stage_error', { stage: 'fix', error: err.message, message: `❌ Fix failed: ${err.message.slice(0, 100)}` });
|
|
950
|
+
return;
|
|
951
|
+
}
|
|
952
|
+
stopFixHB();
|
|
953
|
+
updateStatus({ exhaustion_retry_count: 0 }); // reset backoff counter on successful fix
|
|
954
|
+
|
|
955
|
+
// ── Diff check: did the Coder actually change anything? ──────────────────────
|
|
956
|
+
// If no files changed, the fix was a no-op — stop looping to avoid wasting
|
|
957
|
+
// another review cycle (and AI premium requests).
|
|
958
|
+
try {
|
|
959
|
+
const diffResult = await capture('git', ['diff', '--stat', 'HEAD'], { timeout: 10000 });
|
|
960
|
+
const diffOutput = (diffResult.stdout || '').trim();
|
|
961
|
+
if (!diffOutput) {
|
|
962
|
+
notify('stage_error', {
|
|
963
|
+
stage: 'fix',
|
|
964
|
+
message: `⚠️ Fix produced no changes after review cycle ${runs}.\n\n` +
|
|
965
|
+
`Stopping auto-review loop — manual intervention needed.\n` +
|
|
966
|
+
`Use /reject <specific instructions> to guide the Coder.`,
|
|
967
|
+
});
|
|
968
|
+
updateStatus({ pipeline_mode: 'manual', auto_review_count: 0 });
|
|
969
|
+
return;
|
|
970
|
+
}
|
|
971
|
+
} catch { /* git diff failed — continue with review anyway */ }
|
|
972
|
+
|
|
973
|
+
notify('stage_complete', { stage: 'fix_complete', message: `🔧 Coder applied fixes — re-reviewing...` });
|
|
974
|
+
}
|
|
975
|
+
}
|
|
976
|
+
|
|
977
|
+
function extractBlockers(reviewContent) {
|
|
978
|
+
// Extract the Blockers section from the review markdown
|
|
979
|
+
const parts = reviewContent.split(/##[^#]*Blocker/i);
|
|
980
|
+
if (parts.length < 2) return reviewContent.slice(0, 800);
|
|
981
|
+
const blockerSection = parts[1].split(/^##/m)[0].trim();
|
|
982
|
+
return blockerSection || reviewContent.slice(0, 800);
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
async function _runPipelineAsync(featureId, fullDesc, timestamp, mode, workflowDir, root, config) {
|
|
986
|
+
const inboxContent = readFileSync(resolve(workflowDir, `inbox/${featureId}.md`), 'utf8');
|
|
987
|
+
const projectName = config.name || 'project';
|
|
988
|
+
initMetrics(featureId);
|
|
989
|
+
|
|
990
|
+
// Correlation ID for tracing this pipeline run
|
|
991
|
+
let correlationId;
|
|
992
|
+
try { correlationId = generateCorrelationId(); } catch (e) { /* correlation is optional */ }
|
|
993
|
+
const pipelineStartTime = Date.now();
|
|
994
|
+
|
|
995
|
+
// Notify helper — sends both a bus event and updates status with a message field
|
|
996
|
+
// Suppressed after /reset to prevent stale messages from lingering pipeline processes
|
|
997
|
+
const notify = (event, data) => {
|
|
998
|
+
if (_pipelineCancelled) return;
|
|
999
|
+
bus.emitEvent(event, { feature: featureId, ...data });
|
|
1000
|
+
};
|
|
1001
|
+
|
|
1002
|
+
// ── Step 2: PM spec (config-driven provider) ────────────────────────────────
|
|
1003
|
+
const specsDir = resolve(workflowDir, 'specs');
|
|
1004
|
+
mkdirSync(specsDir, { recursive: true });
|
|
1005
|
+
const specFile = resolve(specsDir, `SPEC-${timestamp}.md`);
|
|
1006
|
+
const pmEntry = getPipelineStage('pm') || { provider: 'claude', model: 'claude-sonnet-4-6' };
|
|
1007
|
+
|
|
1008
|
+
// Check for existing checkpoint
|
|
1009
|
+
const specCheckpoint = loadCheckpoint(featureId, 'pm_spec');
|
|
1010
|
+
if (specCheckpoint && specCheckpoint.outputs?.specFile && existsSync(resolve(workflowDir, `specs/SPEC-${timestamp}.md`))) {
|
|
1011
|
+
notify('stage_complete', { stage: 'spec_complete', message: `📝 Resuming from checkpoint: pm_spec (saved ${specCheckpoint.completedAt})` });
|
|
1012
|
+
console.log(`[PIPELINE] Resuming from checkpoint: pm_spec (saved ${specCheckpoint.completedAt})`);
|
|
1013
|
+
} else {
|
|
1014
|
+
await runBeforeStage('spec', { featureId, inboxContent, timestamp });
|
|
1015
|
+
updateStatus({ stage: 'spec' });
|
|
1016
|
+
notify('stage_start', { stage: 'spec', message: `📝 PM is writing the feature spec...` });
|
|
1017
|
+
recordStageStart('spec', pmEntry.provider);
|
|
1018
|
+
const stopSpecHB = withHeartbeat(notify, 'PM spec', 120000);
|
|
1019
|
+
|
|
1020
|
+
// Build spec prompt — use GEMINI.md, CLAUDE.md, or generic context
|
|
1021
|
+
const geminiMd = resolve(root, 'GEMINI.md');
|
|
1022
|
+
const claudeMd = resolve(root, 'CLAUDE.md');
|
|
1023
|
+
const contextFile = existsSync(geminiMd) ? geminiMd : existsSync(claudeMd) ? claudeMd : null;
|
|
1024
|
+
const contextContent = contextFile ? readFileSync(contextFile, 'utf8').slice(0, 3000) : '';
|
|
1025
|
+
|
|
1026
|
+
let specPrompt =
|
|
1027
|
+
`${contextContent}\n\n## Feature Request\n${inboxContent}\n\n` +
|
|
1028
|
+
`Create a complete PM spec with User Stories, Acceptance Criteria, Technical Risks, Dependencies, and Priority for ${projectName}.\n` +
|
|
1029
|
+
`Write the spec document to: .ai-workflow/specs/SPEC-${timestamp}.md`;
|
|
1030
|
+
|
|
1031
|
+
// Inject skills and persona for spec stage
|
|
1032
|
+
try { specPrompt = injectSkills(specPrompt, 'spec'); } catch { /* skills are optional */ }
|
|
1033
|
+
try {
|
|
1034
|
+
const persona = loadPersona('pm');
|
|
1035
|
+
if (persona) specPrompt = injectPersona(specPrompt, persona);
|
|
1036
|
+
} catch { /* personas are optional */ }
|
|
1037
|
+
|
|
1038
|
+
try {
|
|
1039
|
+
await runPipelineStage('pm', specPrompt, { featureId, stage: 'pm', outputFile: specFile });
|
|
1040
|
+
stopSpecHB();
|
|
1041
|
+
|
|
1042
|
+
// Verify spec was created — if provider wrote to stdout (redirect) or to file (tools)
|
|
1043
|
+
if (!existsSync(specFile) || readFileSync(specFile, 'utf8').trim().length < 50) {
|
|
1044
|
+
// Check if the provider created a file with a different name pattern
|
|
1045
|
+
const specFiles = readdirSync(specsDir).filter(f => f.endsWith('.md') && !f.includes('stderr') && !f.includes('challenger'));
|
|
1046
|
+
const latestSpec = specFiles.sort().pop();
|
|
1047
|
+
if (latestSpec && latestSpec !== `SPEC-${timestamp}.md`) {
|
|
1048
|
+
const altSpecPath = resolve(specsDir, latestSpec);
|
|
1049
|
+
const altContent = readFileSync(altSpecPath, 'utf8').trim();
|
|
1050
|
+
if (altContent.length > 50) {
|
|
1051
|
+
writeFileSync(specFile, altContent);
|
|
1052
|
+
}
|
|
1053
|
+
}
|
|
1054
|
+
}
|
|
1055
|
+
|
|
1056
|
+
// Final fallback: create minimal spec if nothing was produced
|
|
1057
|
+
if (!existsSync(specFile) || readFileSync(specFile, 'utf8').trim().length < 50) {
|
|
1058
|
+
writeFileSync(specFile,
|
|
1059
|
+
`# SPEC-${timestamp}\n**Feature: ${fullDesc}**\n\n**Date:** ${new Date().toISOString().slice(0, 10)}\n**Status:** Pending Architecture\n\n${fullDesc}\n`
|
|
1060
|
+
);
|
|
1061
|
+
notify('stage_complete', { stage: 'spec_complete', message: `📝 Minimal spec created` });
|
|
1062
|
+
} else {
|
|
1063
|
+
notify('stage_complete', { stage: 'spec_complete', message: `📝 Spec complete` });
|
|
1064
|
+
}
|
|
1065
|
+
} catch (specErr) {
|
|
1066
|
+
stopSpecHB();
|
|
1067
|
+
console.error('[Pipeline] Spec stage failed:', specErr.message);
|
|
1068
|
+
// Create minimal spec so pipeline can continue
|
|
1069
|
+
writeFileSync(specFile,
|
|
1070
|
+
`# SPEC-${timestamp}\n**Feature: ${fullDesc}**\n\n**Date:** ${new Date().toISOString().slice(0, 10)}\n**Status:** Pending Architecture\n\n${fullDesc}\n`
|
|
1071
|
+
);
|
|
1072
|
+
notify('stage_complete', { stage: 'spec_complete', message: `📝 Minimal spec created (AI unavailable)` });
|
|
1073
|
+
}
|
|
1074
|
+
|
|
1075
|
+
saveCheckpoint(featureId, 'spec', 'pm_spec', { specFile: `SPEC-${timestamp}.md`, timestamp });
|
|
1076
|
+
recordStageEnd('spec', 0, 0, pmEntry.provider);
|
|
1077
|
+
await runAfterStage('spec', { featureId, specFile, timestamp });
|
|
1078
|
+
}
|
|
1079
|
+
updateStatus({ stage: 'spec_complete', latest_spec: `SPEC-${timestamp}`, next: 'architect' });
|
|
1080
|
+
|
|
1081
|
+
// ── Step 3: Architecture + tasks (config-driven provider) ───────────────────
|
|
1082
|
+
mkdirSync(resolve(workflowDir, 'architecture'), { recursive: true });
|
|
1083
|
+
mkdirSync(resolve(workflowDir, 'tasks'), { recursive: true });
|
|
1084
|
+
const specContent = existsSync(specFile) ? readFileSync(specFile, 'utf8') : fullDesc;
|
|
1085
|
+
const archEntry = getPipelineStage('architect') || { provider: 'claude', model: 'claude-opus-4-6' };
|
|
1086
|
+
|
|
1087
|
+
const archCheckpoint = loadCheckpoint(featureId, 'arch');
|
|
1088
|
+
if (archCheckpoint) {
|
|
1089
|
+
notify('stage_complete', { stage: 'arch_complete', message: `🏗 Resuming from checkpoint: arch (saved ${archCheckpoint.completedAt})` });
|
|
1090
|
+
console.log(`[PIPELINE] Resuming from checkpoint: arch (saved ${archCheckpoint.completedAt})`);
|
|
1091
|
+
} else {
|
|
1092
|
+
const claudeMd = resolve(root, 'CLAUDE.md');
|
|
1093
|
+
const claudeCtx = existsSync(claudeMd) ? `\n\n## Project Context\n${readFileSync(claudeMd, 'utf8').slice(0, 3000)}` : '';
|
|
1094
|
+
|
|
1095
|
+
let archPrompt =
|
|
1096
|
+
`You are the System Architect for ${projectName}.${claudeCtx}\n\n` +
|
|
1097
|
+
`Read this feature spec and:\n` +
|
|
1098
|
+
`1. Write a detailed architecture doc to: .ai-workflow/architecture/ARCH-${timestamp}.md\n` +
|
|
1099
|
+
` Include: Data flow, new classes/components, schema changes\n` +
|
|
1100
|
+
`2. Write step-by-step implementation tasks to: .ai-workflow/tasks/TASKS-${timestamp}.md\n` +
|
|
1101
|
+
` Each task must reference exact file paths and function names\n\n` +
|
|
1102
|
+
`## Feature Spec\n${specContent}`;
|
|
1103
|
+
|
|
1104
|
+
// Inject skills and persona for architect stage
|
|
1105
|
+
try { archPrompt = injectSkills(archPrompt, 'arch'); } catch { /* skills are optional */ }
|
|
1106
|
+
try {
|
|
1107
|
+
const persona = loadPersona('architect');
|
|
1108
|
+
if (persona) archPrompt = injectPersona(archPrompt, persona);
|
|
1109
|
+
} catch { /* personas are optional */ }
|
|
1110
|
+
|
|
1111
|
+
await runBeforeStage('arch', { featureId, specContent, timestamp });
|
|
1112
|
+
updateStatus({ stage: 'architecture' });
|
|
1113
|
+
notify('stage_start', { stage: 'arch', message: `🏗 Architect is designing the architecture...` });
|
|
1114
|
+
recordStageStart('arch', archEntry.provider);
|
|
1115
|
+
const stopArchHB = withHeartbeat(notify, 'architecture', 120000);
|
|
1116
|
+
let archSuccess = false;
|
|
1117
|
+
try {
|
|
1118
|
+
await runPipelineStage('architect', archPrompt, { featureId, stage: 'arch' });
|
|
1119
|
+
stopArchHB();
|
|
1120
|
+
|
|
1121
|
+
// Verify architecture doc was created
|
|
1122
|
+
const archDir = resolve(workflowDir, 'architecture');
|
|
1123
|
+
const archFiles = readdirSync(archDir).filter(f => f.endsWith('.md'));
|
|
1124
|
+
const tasksDir = resolve(workflowDir, 'tasks');
|
|
1125
|
+
const taskFiles = existsSync(tasksDir) ? readdirSync(tasksDir).filter(f => f.endsWith('.md')) : [];
|
|
1126
|
+
if (archFiles.length === 0 && taskFiles.length === 0) {
|
|
1127
|
+
console.warn('[Pipeline] Architecture stage completed but no ARCH/TASKS files found');
|
|
1128
|
+
notify('stage_complete', { stage: 'arch_complete', message: `🏗 Architecture complete (no docs written — provider may need file write permissions)` });
|
|
1129
|
+
} else {
|
|
1130
|
+
archSuccess = true;
|
|
1131
|
+
notify('stage_complete', { stage: 'arch_complete', message: `🏗 Architecture + tasks complete` });
|
|
1132
|
+
}
|
|
1133
|
+
} catch (archErr) {
|
|
1134
|
+
stopArchHB();
|
|
1135
|
+
notify('stage_error', { stage: 'arch', error: archErr.message, message: `❌ Architecture failed: ${archErr.message.slice(0, 100)}` });
|
|
1136
|
+
console.error('[Pipeline] Architecture stage failed:', archErr.message);
|
|
1137
|
+
}
|
|
1138
|
+
// Only save checkpoint if architecture files were actually created
|
|
1139
|
+
if (archSuccess) {
|
|
1140
|
+
saveCheckpoint(featureId, 'arch', 'arch', { timestamp });
|
|
1141
|
+
}
|
|
1142
|
+
recordStageEnd('arch', 0, 0, archEntry.provider);
|
|
1143
|
+
await runAfterStage('arch', { featureId, timestamp });
|
|
1144
|
+
}
|
|
1145
|
+
updateStatus({ stage: 'arch_complete', next: 'implement' });
|
|
1146
|
+
|
|
1147
|
+
// ── Step 4: Implementation (config-driven provider) ─────────────────────────
|
|
1148
|
+
const implEntry = getPipelineStage('implement') || { provider: 'claude', model: 'claude-sonnet-4-6' };
|
|
1149
|
+
await runBeforeStage('impl', { featureId, mode, timestamp });
|
|
1150
|
+
recordStageStart('impl', implEntry.provider);
|
|
1151
|
+
if (mode === 'auto') {
|
|
1152
|
+
const implResult = await runImplementation();
|
|
1153
|
+
if (!implResult.success) {
|
|
1154
|
+
console.error('[Pipeline] Implementation failed:', implResult.error);
|
|
1155
|
+
return;
|
|
1156
|
+
}
|
|
1157
|
+
} else {
|
|
1158
|
+
const tasksFile = getLatestFilePath('tasks');
|
|
1159
|
+
notify('tasks_ready', { tasksFile: tasksFile || null, message: `✅ Architecture complete! Tasks are ready.\nUse /approve to start implementation, or /review for a code review first.` });
|
|
1160
|
+
}
|
|
1161
|
+
recordStageEnd('impl', 0, 0, implEntry.provider);
|
|
1162
|
+
saveCheckpoint(featureId, 'impl', 'impl', { mode, timestamp });
|
|
1163
|
+
await runAfterStage('impl', { featureId, mode, timestamp });
|
|
1164
|
+
|
|
1165
|
+
// Track achievement after pipeline completion
|
|
1166
|
+
try {
|
|
1167
|
+
const notifications = trackPipelineCompletion({
|
|
1168
|
+
action: 'feature',
|
|
1169
|
+
duration: Date.now() - pipelineStartTime,
|
|
1170
|
+
cost: 0,
|
|
1171
|
+
model: implEntry.provider,
|
|
1172
|
+
success: true,
|
|
1173
|
+
reviewIssues: 0,
|
|
1174
|
+
});
|
|
1175
|
+
if (notifications.length > 0) {
|
|
1176
|
+
notifications.forEach(n => bus.emitEvent('pipeline-event', { type: 'achievement', message: n }));
|
|
1177
|
+
}
|
|
1178
|
+
} catch { /* achievements are optional */ }
|
|
1179
|
+
}
|
|
1180
|
+
|
|
1181
|
+
// ─── Standalone Implementation Runner ───────────────────────────────────────
|
|
1182
|
+
// Runs the implementation step standalone — uses configured provider from pipeline.
|
|
1183
|
+
// Used by /implement command and autoResumePipeline.
|
|
1184
|
+
// Works from stage arch_complete or implementation_failed.
|
|
1185
|
+
export async function runImplementation() {
|
|
1186
|
+
const status = getStatus();
|
|
1187
|
+
const allowedStages = ['arch_complete', 'implementation_failed'];
|
|
1188
|
+
if (!allowedStages.includes(status.stage)) {
|
|
1189
|
+
return { success: false, error: `Cannot start implementation from stage "${status.stage}". Must be arch_complete or implementation_failed.` };
|
|
1190
|
+
}
|
|
1191
|
+
|
|
1192
|
+
const featureId = status.current_feature;
|
|
1193
|
+
if (!featureId) {
|
|
1194
|
+
return { success: false, error: 'No active feature.' };
|
|
1195
|
+
}
|
|
1196
|
+
|
|
1197
|
+
const tasksFile = getLatestFilePath('tasks');
|
|
1198
|
+
const specFile = getLatestFilePath('specs');
|
|
1199
|
+
|
|
1200
|
+
// Determine content source: prefer tasks file, fall back to spec file
|
|
1201
|
+
let implContent = null;
|
|
1202
|
+
let implSource = null;
|
|
1203
|
+
if (tasksFile && existsSync(tasksFile)) {
|
|
1204
|
+
implContent = readFileSync(tasksFile, 'utf8').slice(0, 4000);
|
|
1205
|
+
implSource = 'tasks';
|
|
1206
|
+
} else if (specFile && existsSync(specFile)) {
|
|
1207
|
+
implContent = readFileSync(specFile, 'utf8').slice(0, 4000);
|
|
1208
|
+
implSource = 'spec';
|
|
1209
|
+
console.log('[Pipeline] No tasks file found — falling back to spec file for implementation');
|
|
1210
|
+
} else {
|
|
1211
|
+
return { success: false, error: 'No tasks or spec file found. Cannot start implementation.' };
|
|
1212
|
+
}
|
|
1213
|
+
|
|
1214
|
+
const config = getConfig();
|
|
1215
|
+
const projectName = config.name || 'this project';
|
|
1216
|
+
const notify = (event, data) => {
|
|
1217
|
+
if (_pipelineCancelled) return;
|
|
1218
|
+
bus.emitEvent(event, { feature: featureId, ...data });
|
|
1219
|
+
};
|
|
1220
|
+
|
|
1221
|
+
const implPrompt = implSource === 'tasks'
|
|
1222
|
+
? `You are the Coder for ${projectName}.\nImplement ALL the tasks in this file. Make the actual code changes:\n\n${implContent}`
|
|
1223
|
+
: `You are the Coder for ${projectName}.\nThe architecture stage did not produce a tasks file. Use this feature spec to implement the feature directly. Create all necessary files and make all code changes described:\n\n${implContent}`;
|
|
1224
|
+
|
|
1225
|
+
const startMsg = implSource === 'tasks'
|
|
1226
|
+
? `⚡ Coder is implementing the tasks...`
|
|
1227
|
+
: `⚡ Coder is implementing from spec (no tasks file found)...`;
|
|
1228
|
+
updateStatus({ stage: 'implementation' });
|
|
1229
|
+
notify('stage_start', { stage: 'impl', message: startMsg });
|
|
1230
|
+
const stopImplHB = withImplHeartbeat(notify, tasksFile, 120000);
|
|
1231
|
+
try {
|
|
1232
|
+
await runPipelineStage('implement', implPrompt, { featureId, stage: 'impl' });
|
|
1233
|
+
stopImplHB();
|
|
1234
|
+
|
|
1235
|
+
// Self-healing: run tests and auto-fix
|
|
1236
|
+
try {
|
|
1237
|
+
const healResult = await selfHealPipeline(featureId, config);
|
|
1238
|
+
if (!healResult.skipped && !healResult.success) {
|
|
1239
|
+
logActivity('PIPELINE', '⚠️ Self-heal could not fix all test failures — proceeding to review', 'warn');
|
|
1240
|
+
}
|
|
1241
|
+
} catch (e) {
|
|
1242
|
+
logActivity('PIPELINE', `Self-heal error: ${e.message}`, 'error');
|
|
1243
|
+
}
|
|
1244
|
+
|
|
1245
|
+
updateStatus({ stage: 'implementation_complete', next: 'review' });
|
|
1246
|
+
notify('stage_complete', { stage: 'implementation_complete', message: `⚡ Implementation complete` });
|
|
1247
|
+
|
|
1248
|
+
// If in auto mode, continue the pipeline to review
|
|
1249
|
+
if (status.pipeline_mode === 'auto') {
|
|
1250
|
+
await _runAutoReviewLoop(notify, featureId);
|
|
1251
|
+
}
|
|
1252
|
+
return { success: true };
|
|
1253
|
+
} catch (err) {
|
|
1254
|
+
stopImplHB();
|
|
1255
|
+
console.error('[Pipeline] Implementation failed:', err.message);
|
|
1256
|
+
updateStatus({ stage: 'implementation_failed', error: err.message, next: 'retry' });
|
|
1257
|
+
notify('stage_error', { stage: 'impl', error: err.message, message: `❌ Implementation failed: ${err.message.slice(0, 100)}` });
|
|
1258
|
+
return { success: false, error: err.message };
|
|
1259
|
+
}
|
|
1260
|
+
}
|
|
1261
|
+
|
|
1262
|
+
// ─── Self-Healing Pipeline ───────────────────────────────────────────────────
|
|
1263
|
+
// After implementation, runs the test suite and sends failures back to
|
|
1264
|
+
// the Coder for automatic fixing (up to maxAttempts retries).
|
|
1265
|
+
|
|
1266
|
+
async function selfHealPipeline(featureId, config) {
|
|
1267
|
+
const selfHeal = config.selfHeal || {};
|
|
1268
|
+
if (!selfHeal.enabled) return { success: true, skipped: true };
|
|
1269
|
+
|
|
1270
|
+
const testCmd = selfHeal.testCommand || config.deploy?.testCommand || 'npm test';
|
|
1271
|
+
const maxAttempts = selfHeal.maxAttempts || 3;
|
|
1272
|
+
|
|
1273
|
+
logActivity('PIPELINE', `Self-heal: running "${testCmd}"...`, 'info');
|
|
1274
|
+
bus.emitEvent('pipeline-event', { type: 'self-heal-start', featureId });
|
|
1275
|
+
|
|
1276
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
1277
|
+
try {
|
|
1278
|
+
execSync(testCmd, {
|
|
1279
|
+
encoding: 'utf8',
|
|
1280
|
+
timeout: 120000,
|
|
1281
|
+
cwd: process.cwd(),
|
|
1282
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
1283
|
+
});
|
|
1284
|
+
logActivity('PIPELINE', `Self-heal SUCCESS — all tests pass (attempt ${attempt})`, 'ok');
|
|
1285
|
+
bus.emitEvent('pipeline-event', {
|
|
1286
|
+
type: 'self-heal-success',
|
|
1287
|
+
featureId,
|
|
1288
|
+
attempts: attempt,
|
|
1289
|
+
message: `Self-heal SUCCESS after ${attempt} attempt(s)`,
|
|
1290
|
+
});
|
|
1291
|
+
return { success: true, attempts: attempt };
|
|
1292
|
+
} catch (err) {
|
|
1293
|
+
const stderr = err.stderr || err.stdout || err.message || 'Unknown error';
|
|
1294
|
+
const errorOutput = stderr.substring(0, 3000);
|
|
1295
|
+
|
|
1296
|
+
logActivity('PIPELINE', `Self-heal attempt ${attempt}/${maxAttempts} — tests failed`, 'error');
|
|
1297
|
+
bus.emitEvent('pipeline-event', {
|
|
1298
|
+
type: 'self-heal-retry',
|
|
1299
|
+
featureId,
|
|
1300
|
+
attempt,
|
|
1301
|
+
maxAttempts,
|
|
1302
|
+
message: `Self-healing: attempt ${attempt}/${maxAttempts} — fixing test failures`,
|
|
1303
|
+
});
|
|
1304
|
+
|
|
1305
|
+
if (attempt < maxAttempts) {
|
|
1306
|
+
const healPrompt = `The following test failures occurred after implementing a feature. Fix these errors without breaking existing functionality.\n\nTest command: ${testCmd}\n\nError output:\n\`\`\`\n${errorOutput}\n\`\`\`\n\nFix the code to make all tests pass. Only modify the files that need changes.`;
|
|
1307
|
+
|
|
1308
|
+
try {
|
|
1309
|
+
await runPipelineStage('implement', healPrompt, {
|
|
1310
|
+
featureId,
|
|
1311
|
+
stage: 'self-heal',
|
|
1312
|
+
});
|
|
1313
|
+
} catch (healErr) {
|
|
1314
|
+
logActivity('PIPELINE', `Self-heal fix attempt failed: ${healErr.message}`, 'error');
|
|
1315
|
+
}
|
|
1316
|
+
}
|
|
1317
|
+
}
|
|
1318
|
+
}
|
|
1319
|
+
|
|
1320
|
+
logActivity('PIPELINE', `Self-heal FAILED after ${maxAttempts} attempts — proceeding to review with warning`, 'error');
|
|
1321
|
+
bus.emitEvent('pipeline-event', {
|
|
1322
|
+
type: 'self-heal-failed',
|
|
1323
|
+
featureId,
|
|
1324
|
+
maxAttempts,
|
|
1325
|
+
message: `Self-heal failed after ${maxAttempts} attempts`,
|
|
1326
|
+
});
|
|
1327
|
+
return { success: false, attempts: maxAttempts };
|
|
1328
|
+
}
|
|
1329
|
+
|
|
1330
|
+
// ─── Rewrite Docs (Spec + Arch) ──────────────────────────────────────────────
|
|
1331
|
+
// Triggered when user asks to "rework the spec" or "redo the architecture".
|
|
1332
|
+
// Runs the Architect to update the docs, then transitions to arch_complete so the
|
|
1333
|
+
// standard notification system fires ("🏗 Architecture complete — tasks ready.")
|
|
1334
|
+
// and the user can start implementation via the inline button.
|
|
1335
|
+
export async function runRewriteDocs(description) {
|
|
1336
|
+
const status = getStatus();
|
|
1337
|
+
const featureId = status.current_feature || `FEATURE-${Math.floor(Date.now() / 1000)}`;
|
|
1338
|
+
const notify = (event, data) => bus.emitEvent(event, { feature: featureId, ...data });
|
|
1339
|
+
|
|
1340
|
+
// Load latest spec for context
|
|
1341
|
+
const specFile = getLatestFilePath('specs');
|
|
1342
|
+
const archFile = getLatestFilePath('architecture');
|
|
1343
|
+
const specContent = (specFile && existsSync(specFile))
|
|
1344
|
+
? readFileSync(specFile, 'utf8').slice(0, 3000) : '';
|
|
1345
|
+
const archContent = (archFile && existsSync(archFile))
|
|
1346
|
+
? readFileSync(archFile, 'utf8').slice(0, 2000) : '';
|
|
1347
|
+
|
|
1348
|
+
const desc = description?.trim() || 'Revise and improve the current spec and architecture';
|
|
1349
|
+
|
|
1350
|
+
const prompt = specContent
|
|
1351
|
+
? `You are the architect for this project.\n\nThe user asked: "${desc}"\n\nCurrent spec for reference:\n${specContent}\n\n${archContent ? `Current architecture:\n${archContent}\n\n` : ''}Please:\n1. Rewrite the spec document (update the SPEC file with improved requirements)\n2. Rewrite the architecture document (update the ARCH file with implementation plan)\n3. Create a TASKS file with clear, actionable implementation tasks for the Coder\n\nMake all necessary file writes in the .ai-workflow/specs/, .ai-workflow/architecture/, and .ai-workflow/tasks/ directories.`
|
|
1352
|
+
: `You are the architect for this project.\n\nThe user asked: "${desc}"\n\nPlease create:\n1. A spec document in .ai-workflow/specs/\n2. An architecture document in .ai-workflow/architecture/\n3. A TASKS file in .ai-workflow/tasks/ with actionable implementation steps for the Coder`;
|
|
1353
|
+
|
|
1354
|
+
notify('stage_start', { stage: 'rewrite_docs', message: `🔄 Rewriting spec and architecture docs...\n\n"${desc}"` });
|
|
1355
|
+
const stopHB = withHeartbeat(notify, 'doc rewrite', 120000);
|
|
1356
|
+
|
|
1357
|
+
try {
|
|
1358
|
+
await runPipelineStage('architect', prompt, { featureId, stage: 'arch' });
|
|
1359
|
+
stopHB();
|
|
1360
|
+
|
|
1361
|
+
// Transition to arch_complete — the event bus file watcher will auto-send
|
|
1362
|
+
// the "🏗 Architecture complete — tasks ready." Telegram notification with buttons.
|
|
1363
|
+
updateStatus({
|
|
1364
|
+
current_feature: featureId,
|
|
1365
|
+
stage: 'arch_complete',
|
|
1366
|
+
pipeline_mode: status.pipeline_mode || 'manual',
|
|
1367
|
+
next: 'implement',
|
|
1368
|
+
});
|
|
1369
|
+
|
|
1370
|
+
return { success: true };
|
|
1371
|
+
} catch (err) {
|
|
1372
|
+
stopHB();
|
|
1373
|
+
console.error('[Pipeline] runRewriteDocs failed:', err.message);
|
|
1374
|
+
notify('stage_error', {
|
|
1375
|
+
stage: 'rewrite_docs',
|
|
1376
|
+
error: err.message,
|
|
1377
|
+
message: `❌ Failed to rewrite docs: ${err.message.slice(0, 150)}\n\nUse /feature to start fresh.`,
|
|
1378
|
+
});
|
|
1379
|
+
return { success: false, error: err.message };
|
|
1380
|
+
}
|
|
1381
|
+
}
|
|
1382
|
+
|
|
1383
|
+
// ─── Pipeline Resume (after restart) ─────────────────────────────────────────
|
|
1384
|
+
// If the bot restarts mid-pipeline in auto mode, resume from the pending step.
|
|
1385
|
+
export async function autoResumePipeline() {
|
|
1386
|
+
const status = getStatus();
|
|
1387
|
+
if (status?.pipeline_mode !== 'auto') return;
|
|
1388
|
+
const { stage, next } = status;
|
|
1389
|
+
|
|
1390
|
+
const featureId = status.current_feature;
|
|
1391
|
+
const notify = (event, data) => bus.emitEvent(event, { feature: featureId, ...data });
|
|
1392
|
+
|
|
1393
|
+
// Resume: arch complete but implementation hasn't started yet
|
|
1394
|
+
if (stage === 'arch_complete' && next === 'implement') {
|
|
1395
|
+
console.log('[Pipeline] Resuming auto pipeline from arch_complete → implement');
|
|
1396
|
+
notify('stage_start', { stage: 'impl', message: `🔄 Bot restarted — resuming implementation...` });
|
|
1397
|
+
const implResult = await runImplementation();
|
|
1398
|
+
if (!implResult.success) {
|
|
1399
|
+
console.error('[Pipeline] runImplementation failed on resume:', implResult.error);
|
|
1400
|
+
notify('stage_error', { stage: 'impl', message: `❌ Implementation failed to start: ${implResult.error}\n\nUse /status to check the pipeline state or /implement to retry.` });
|
|
1401
|
+
}
|
|
1402
|
+
return; // runImplementation handles the rest (review loop etc.)
|
|
1403
|
+
}
|
|
1404
|
+
|
|
1405
|
+
// Resume: implementation finished but review hasn't run yet
|
|
1406
|
+
if (stage === 'implementation_complete' && next === 'review') {
|
|
1407
|
+
console.log('[Pipeline] Resuming auto pipeline from implementation_complete → review');
|
|
1408
|
+
notify('stage_start', { stage: 'review', message: `🔄 Bot restarted — resuming auto-pilot from review stage...` });
|
|
1409
|
+
await _runAutoReviewLoop(notify, featureId);
|
|
1410
|
+
}
|
|
1411
|
+
|
|
1412
|
+
// Resume: review done and approved but not yet committed
|
|
1413
|
+
if (stage === 'review_complete' && next === 'approve_or_reject') {
|
|
1414
|
+
const review = getLatestReview();
|
|
1415
|
+
const verdict = review?.verdict || 'UNKNOWN';
|
|
1416
|
+
if (verdict === 'APPROVED') {
|
|
1417
|
+
notify('stage_start', { stage: 'approve', message: `🔄 Bot restarted — resuming auto-approval...` });
|
|
1418
|
+
const approveResult = await runApprove();
|
|
1419
|
+
if (approveResult.success) {
|
|
1420
|
+
notify('stage_complete', { stage: 'approved', message: `✅ Feature approved — auto-deploying...` });
|
|
1421
|
+
updateStatus({ stage: 'deploying' });
|
|
1422
|
+
notify('stage_start', { stage: 'deploy', message: `🚀 Resuming auto-deploy after restart...` });
|
|
1423
|
+
const deployResult = await runDeploy('RunLocalTests');
|
|
1424
|
+
if (deployResult.success) {
|
|
1425
|
+
notify('stage_complete', { stage: 'deployed', message: `🚀 Deployment complete! Feature shipped! 🎉` });
|
|
1426
|
+
} else {
|
|
1427
|
+
notify('stage_error', { stage: 'deploy', error: deployResult.error, message: `⚠️ Auto-deploy failed: ${(deployResult.error || deployResult.stderr || 'unknown').slice(0, 200)}\n\nUse /deploy to retry.` });
|
|
1428
|
+
}
|
|
1429
|
+
} else {
|
|
1430
|
+
notify('stage_error', { stage: 'approve', error: approveResult.error, message: `⚠️ Auto-approve failed: ${approveResult.error}` });
|
|
1431
|
+
}
|
|
1432
|
+
} else {
|
|
1433
|
+
// Was rejected — check if we've already hit max review cycles before restarting the loop
|
|
1434
|
+
const currentCount = (getStatus().auto_review_count) || 0;
|
|
1435
|
+
if (currentCount >= AUTO_REVIEW_MAX) {
|
|
1436
|
+
updateStatus({ pipeline_mode: 'manual' });
|
|
1437
|
+
notify('stage_complete', {
|
|
1438
|
+
stage: 'review_rejected',
|
|
1439
|
+
message: `🛑 Bot restarted — max review cycles already reached (${currentCount}/${AUTO_REVIEW_MAX}).\n\nLast verdict: <b>${verdict}</b>\n\nSwitched to manual mode. Use:\n• /approve to force-approve and deploy\n• /reject to send specific fixes to the Coder\n• /review to run one more review cycle`,
|
|
1440
|
+
});
|
|
1441
|
+
return;
|
|
1442
|
+
}
|
|
1443
|
+
// Was rejected — resume the fix loop
|
|
1444
|
+
notify('stage_start', { stage: 'fix', message: `🔄 Bot restarted — review was <b>${verdict}</b>. Resuming fix loop...` });
|
|
1445
|
+
await _runAutoReviewLoop(notify, featureId);
|
|
1446
|
+
}
|
|
1447
|
+
}
|
|
1448
|
+
|
|
1449
|
+
// Resume: manually rejected (via /reject command) — Coder needs to fix and re-enter review loop
|
|
1450
|
+
if (stage === 'rejected' && next === 'fix') {
|
|
1451
|
+
console.log('[Pipeline] Resuming auto pipeline from rejected → fix');
|
|
1452
|
+
const reason = status.rejection_reason || 'see /docs for blockers';
|
|
1453
|
+
notify('stage_start', {
|
|
1454
|
+
stage: 'fix',
|
|
1455
|
+
message: `🔄 Bot restarted — pipeline was <b>REJECTED</b>.\n\nReason: <i>${reason}</i>\n\nCoder is fixing blockers now...`,
|
|
1456
|
+
});
|
|
1457
|
+
const fixResult = await runFix();
|
|
1458
|
+
if (!fixResult.success) {
|
|
1459
|
+
notify('stage_error', {
|
|
1460
|
+
stage: 'fix',
|
|
1461
|
+
message: `❌ Fix failed: ${fixResult.error}\n\nUse /fix to retry or /docs to review blockers.`,
|
|
1462
|
+
});
|
|
1463
|
+
return;
|
|
1464
|
+
}
|
|
1465
|
+
notify('stage_start', { stage: 'review', message: `🔍 Fix complete — re-entering auto review loop...` });
|
|
1466
|
+
await _runAutoReviewLoop(notify, featureId);
|
|
1467
|
+
}
|
|
1468
|
+
|
|
1469
|
+
// Resume: feature approved but deploy not yet run
|
|
1470
|
+
if (stage === 'approved' && next === 'deploy') {
|
|
1471
|
+
console.log('[Pipeline] Resuming auto pipeline from approved → deploy');
|
|
1472
|
+
updateStatus({ stage: 'deploying' });
|
|
1473
|
+
notify('stage_start', { stage: 'deploy', message: `🔄 Bot restarted — resuming auto-deploy...` });
|
|
1474
|
+
const deployResult = await runDeploy('RunLocalTests');
|
|
1475
|
+
if (deployResult.success) {
|
|
1476
|
+
notify('stage_complete', { stage: 'deployed', message: `🚀 Deployment complete! Feature shipped! 🎉` });
|
|
1477
|
+
} else {
|
|
1478
|
+
notify('stage_error', { stage: 'deploy', error: deployResult.error, message: `⚠️ Auto-deploy failed: ${(deployResult.error || deployResult.stderr || 'unknown').slice(0, 200)}\n\nUse /deploy to retry.` });
|
|
1479
|
+
}
|
|
1480
|
+
}
|
|
1481
|
+
|
|
1482
|
+
// Resume: spec complete but architecture hasn't started yet
|
|
1483
|
+
if (stage === 'spec_complete' && next === 'architect') {
|
|
1484
|
+
console.log('[Pipeline] Resuming auto pipeline from spec_complete → architect');
|
|
1485
|
+
notify('stage_start', { stage: 'arch', message: `🔄 Bot restarted mid-pipeline at <b>spec_complete</b>.\n\nThe spec is done but architecture hasn't started yet.\nRestarting the pipeline from architecture stage...` });
|
|
1486
|
+
// Re-run architecture by calling _runPipelineAsync with a minimal restart
|
|
1487
|
+
// We need the spec to be available — launch implementation from arch stage
|
|
1488
|
+
try {
|
|
1489
|
+
const specFile = getLatestFilePath('specs');
|
|
1490
|
+
if (!specFile || !existsSync(specFile)) {
|
|
1491
|
+
notify('stage_error', { stage: 'arch', message: `❌ Cannot resume — spec file not found.\n\nUse /new-feature to restart the pipeline.` });
|
|
1492
|
+
return;
|
|
1493
|
+
}
|
|
1494
|
+
const specContent = readFileSync(specFile, 'utf8').slice(0, 2000);
|
|
1495
|
+
const config = getConfig();
|
|
1496
|
+
const projectName = config.name || 'this project';
|
|
1497
|
+
const archPrompt = `You are the architect for ${projectName}.\n` +
|
|
1498
|
+
`Analyze the feature spec and create:\n` +
|
|
1499
|
+
`1. Architecture document in .ai-workflow/docs/\n` +
|
|
1500
|
+
`2. Implementation tasks in .ai-workflow/tasks/\n\n## Feature Spec\n${specContent}`;
|
|
1501
|
+
// Use configured provider for architect stage
|
|
1502
|
+
await runPipelineStage('architect', archPrompt, { featureId, stage: 'arch' });
|
|
1503
|
+
updateStatus({ stage: 'arch_complete', next: 'implement' });
|
|
1504
|
+
notify('stage_complete', { stage: 'arch_complete', message: `🏗 Architecture complete (resumed after restart)` });
|
|
1505
|
+
// Continue to implementation
|
|
1506
|
+
const implResult = await runImplementation();
|
|
1507
|
+
if (!implResult.success) {
|
|
1508
|
+
notify('stage_error', { stage: 'impl', message: `❌ Implementation failed after architecture resume: ${implResult.error}\n\nUse /implement to retry.` });
|
|
1509
|
+
}
|
|
1510
|
+
} catch (archErr) {
|
|
1511
|
+
notify('stage_error', { stage: 'arch', message: `❌ Architecture failed on resume: ${archErr.message?.slice(0, 150)}\n\nUse /new-feature to restart.` });
|
|
1512
|
+
}
|
|
1513
|
+
return;
|
|
1514
|
+
}
|
|
1515
|
+
|
|
1516
|
+
// Resume: implementation failed — auto-retry
|
|
1517
|
+
if (stage === 'implementation_failed' && next === 'retry') {
|
|
1518
|
+
console.log('[Pipeline] Resuming auto pipeline from implementation_failed → retry');
|
|
1519
|
+
notify('stage_start', { stage: 'impl', message: `🔄 Bot restarted — retrying failed implementation...` });
|
|
1520
|
+
// runImplementation accepts implementation_failed stage
|
|
1521
|
+
const implResult = await runImplementation();
|
|
1522
|
+
if (!implResult.success) {
|
|
1523
|
+
notify('stage_error', { stage: 'impl', message: `❌ Implementation retry failed: ${implResult.error}\n\nUse /implement to retry manually.` });
|
|
1524
|
+
}
|
|
1525
|
+
return;
|
|
1526
|
+
}
|
|
1527
|
+
|
|
1528
|
+
// Resume: pipeline crashed — notify user
|
|
1529
|
+
if (stage === 'crashed') {
|
|
1530
|
+
console.log('[Pipeline] Detected crashed pipeline state — notifying user');
|
|
1531
|
+
notify('stage_error', { stage: 'crashed', message: `💥 Bot restarted and found a <b>crashed</b> pipeline.\n\nFeature: ${featureId}\nPrevious error: ${status.error || 'unknown'}\n\nRecovery options:\n• /implement — retry implementation\n• /review — run a code review\n• /status — check current state\n• /reset — clear and start fresh` });
|
|
1532
|
+
return;
|
|
1533
|
+
}
|
|
1534
|
+
}
|
|
1535
|
+
|
|
1536
|
+
// ─── Custom Queries (project-specific, e.g. Salesforce DLQ/Queue) ────────────
|
|
1537
|
+
// Configured via `queries` array in aicc.config.js:
|
|
1538
|
+
// queries: [
|
|
1539
|
+
// { name: 'queue', label: 'Sync Queue', command: 'sf data query --query "SELECT ..." --result-format json' },
|
|
1540
|
+
// { name: 'dlq', label: 'Dead Letter Queue', command: 'sf data query --query "SELECT ..." --result-format json' },
|
|
1541
|
+
// ]
|
|
1542
|
+
|
|
1543
|
+
export async function runCustomQuery(queryName) {
|
|
1544
|
+
const queries = (() => { try { return getConfig().queries || []; } catch { return []; } })();
|
|
1545
|
+
const query = queries.find(q => q.name === queryName);
|
|
1546
|
+
if (!query) {
|
|
1547
|
+
return { success: false, error: `Unknown query "${queryName}". Available: ${queries.map(q => q.name).join(', ') || 'none'}` };
|
|
1548
|
+
}
|
|
1549
|
+
try {
|
|
1550
|
+
const result = await capture(query.command);
|
|
1551
|
+
if (result.code !== 0) return { success: false, error: result.stderr.slice(0, 500) };
|
|
1552
|
+
return { success: true, label: query.label, data: result.stdout.slice(0, 3000) };
|
|
1553
|
+
} catch (err) {
|
|
1554
|
+
return { success: false, error: err.message };
|
|
1555
|
+
}
|
|
1556
|
+
}
|
|
1557
|
+
|
|
1558
|
+
/**
|
|
1559
|
+
* Salesforce Sync Queue query (convenience — works when sf CLI is available).
|
|
1560
|
+
*/
|
|
1561
|
+
export async function querySyncQueue() {
|
|
1562
|
+
// Check if project has a custom 'queue' query defined
|
|
1563
|
+
const queries = (() => { try { return getConfig().queries || []; } catch { return []; } })();
|
|
1564
|
+
const custom = queries.find(q => q.name === 'queue');
|
|
1565
|
+
if (custom) return runCustomQuery('queue');
|
|
1566
|
+
|
|
1567
|
+
// Default: Salesforce Integration_Sync_Queue__c
|
|
1568
|
+
try {
|
|
1569
|
+
const result = await capture(
|
|
1570
|
+
'sf data query --query "SELECT Id, Status__c, Direction__c, Record_Id__c, Retry_Count__c, CreatedDate FROM Integration_Sync_Queue__c ORDER BY CreatedDate DESC LIMIT 10" --result-format json'
|
|
1571
|
+
);
|
|
1572
|
+
if (result.code !== 0) return { success: false, error: result.stderr.slice(0, 500) };
|
|
1573
|
+
return { success: true, data: result.stdout.slice(0, 3000) };
|
|
1574
|
+
} catch (err) {
|
|
1575
|
+
return { success: false, error: err.message };
|
|
1576
|
+
}
|
|
1577
|
+
}
|
|
1578
|
+
|
|
1579
|
+
/**
|
|
1580
|
+
* Salesforce Dead Letter Queue query (convenience — works when sf CLI is available).
|
|
1581
|
+
*/
|
|
1582
|
+
export async function queryDeadLetterQueue() {
|
|
1583
|
+
const queries = (() => { try { return getConfig().queries || []; } catch { return []; } })();
|
|
1584
|
+
const custom = queries.find(q => q.name === 'dlq');
|
|
1585
|
+
if (custom) return runCustomQuery('dlq');
|
|
1586
|
+
|
|
1587
|
+
try {
|
|
1588
|
+
const result = await capture(
|
|
1589
|
+
'sf data query --query "SELECT Id, Record_Id__c, Error_Message__c, Retry_Count__c, CreatedDate FROM Integration_Dead_Letter_Queue__c ORDER BY CreatedDate DESC LIMIT 10" --result-format json'
|
|
1590
|
+
);
|
|
1591
|
+
if (result.code !== 0) return { success: false, error: result.stderr.slice(0, 500) };
|
|
1592
|
+
return { success: true, data: result.stdout.slice(0, 3000) };
|
|
1593
|
+
} catch (err) {
|
|
1594
|
+
return { success: false, error: err.message };
|
|
1595
|
+
}
|
|
1596
|
+
}
|
|
1597
|
+
|
|
1598
|
+
// ─── Cleanup ──────────────────────────────────────────────────────────────────
|
|
1599
|
+
|
|
1600
|
+
export async function runCleanup() {
|
|
1601
|
+
try {
|
|
1602
|
+
updateStatus({ stage: 'idle', current_feature: null, next: null, pipeline_mode: null });
|
|
1603
|
+
bus.emitEvent('cleanup_done', {});
|
|
1604
|
+
audit(AUDIT_EVENTS.CLEANUP, {});
|
|
1605
|
+
return { success: true };
|
|
1606
|
+
} catch (err) {
|
|
1607
|
+
return { success: false, error: err.message };
|
|
1608
|
+
}
|
|
1609
|
+
}
|
|
1610
|
+
|
|
1611
|
+
// ─── Toggle Auto-Pilot ───────────────────────────────────────────────────────
|
|
1612
|
+
|
|
1613
|
+
export function toggleAutoPilot() {
|
|
1614
|
+
const status = getStatus();
|
|
1615
|
+
const hasFeature = !!status.current_feature;
|
|
1616
|
+
const isPipelineActive = hasFeature && !['approved', 'idle', 'deployed'].includes(status.stage);
|
|
1617
|
+
|
|
1618
|
+
if (!isPipelineActive) {
|
|
1619
|
+
return { success: false, error: 'No active pipeline to toggle auto-pilot for.' };
|
|
1620
|
+
}
|
|
1621
|
+
|
|
1622
|
+
const newMode = status.pipeline_mode === 'auto' ? 'manual' : 'auto';
|
|
1623
|
+
updateStatus({ pipeline_mode: newMode });
|
|
1624
|
+
bus.emitEvent('autopilot_toggled', { mode: newMode });
|
|
1625
|
+
audit(AUDIT_EVENTS.AUTOPILOT_TOGGLED, { mode: newMode });
|
|
1626
|
+
return { success: true, mode: newMode };
|
|
1627
|
+
}
|
|
1628
|
+
|
|
1629
|
+
// ─── Auto-Reset After Deployment ─────────────────────────────────────────────
|
|
1630
|
+
// After a feature is deployed, the pipeline stays in 'deployed' stage briefly
|
|
1631
|
+
// so the user sees the notification, then auto-resets to 'idle' after 60s.
|
|
1632
|
+
|
|
1633
|
+
let _autoResetTimer = null;
|
|
1634
|
+
const AUTO_RESET_DELAY_MS = 60_000; // 60 seconds
|
|
1635
|
+
|
|
1636
|
+
function _scheduleAutoReset() {
|
|
1637
|
+
if (_autoResetTimer) clearTimeout(_autoResetTimer);
|
|
1638
|
+
_autoResetTimer = setTimeout(() => {
|
|
1639
|
+
_autoResetTimer = null;
|
|
1640
|
+
const s = getStatus();
|
|
1641
|
+
if (s.stage !== 'deployed') return; // user already changed stage — skip
|
|
1642
|
+
const feature = s.current_feature;
|
|
1643
|
+
updateStatus({ stage: 'idle', current_feature: null, next: null, pipeline_mode: null });
|
|
1644
|
+
bus.emitEvent('auto_reset', {
|
|
1645
|
+
feature,
|
|
1646
|
+
message: `🔄 Pipeline auto-reset to idle after deploying <b>${feature || 'feature'}</b>.\n\nReady for the next task.`,
|
|
1647
|
+
});
|
|
1648
|
+
bus.emitEvent('feature_reset', { feature, auto: true });
|
|
1649
|
+
audit(AUDIT_EVENTS.RESET, { feature, auto: true });
|
|
1650
|
+
console.log(` [Pipeline] Auto-reset to idle after deploying "${feature}"`);
|
|
1651
|
+
}, AUTO_RESET_DELAY_MS);
|
|
1652
|
+
}
|
|
1653
|
+
|
|
1654
|
+
// ─── Reset / Abandon ─────────────────────────────────────────────────────────
|
|
1655
|
+
|
|
1656
|
+
export function runReset() {
|
|
1657
|
+
const status = getStatus();
|
|
1658
|
+
if (!status.current_feature) {
|
|
1659
|
+
return { success: false, error: 'No active feature to abandon.' };
|
|
1660
|
+
}
|
|
1661
|
+
|
|
1662
|
+
// Cancel all running heartbeats and suppress further notifications
|
|
1663
|
+
_pipelineCancelled = true;
|
|
1664
|
+
|
|
1665
|
+
const feature = status.current_feature;
|
|
1666
|
+
updateStatus({ stage: 'idle', current_feature: null, next: null, pipeline_mode: null });
|
|
1667
|
+
bus.emitEvent('feature_reset', { feature });
|
|
1668
|
+
audit(AUDIT_EVENTS.RESET, { feature });
|
|
1669
|
+
return { success: true, feature };
|
|
1670
|
+
}
|
|
1671
|
+
|
|
1672
|
+
// ─── Persistent Pending Chat Storage ─────────────────────────────────────────
|
|
1673
|
+
// Survives bot restarts — used to hold the pending feature/bug description
|
|
1674
|
+
// between the AI suggesting it and the user clicking Manual/Auto-Pilot button.
|
|
1675
|
+
|
|
1676
|
+
export function savePendingChat(chatId, pending) {
|
|
1677
|
+
try {
|
|
1678
|
+
const sessionsDir = resolve(getWorkflowDir(), 'sessions');
|
|
1679
|
+
mkdirSync(sessionsDir, { recursive: true });
|
|
1680
|
+
writeFileSync(
|
|
1681
|
+
resolve(sessionsDir, `PENDING-CHAT-${chatId}.json`),
|
|
1682
|
+
JSON.stringify({ ...pending, savedAt: Date.now() }, null, 2)
|
|
1683
|
+
);
|
|
1684
|
+
} catch { /* non-fatal */ }
|
|
1685
|
+
}
|
|
1686
|
+
|
|
1687
|
+
export function loadPendingChat(chatId) {
|
|
1688
|
+
try {
|
|
1689
|
+
const path = resolve(getWorkflowDir(), 'sessions', `PENDING-CHAT-${chatId}.json`);
|
|
1690
|
+
if (!existsSync(path)) return null;
|
|
1691
|
+
const data = JSON.parse(readFileSync(path, 'utf8'));
|
|
1692
|
+
// Expire after 10 minutes
|
|
1693
|
+
if (Date.now() - (data.savedAt || 0) > 10 * 60 * 1000) {
|
|
1694
|
+
unlinkSync(path);
|
|
1695
|
+
return null;
|
|
1696
|
+
}
|
|
1697
|
+
return data;
|
|
1698
|
+
} catch { return null; }
|
|
1699
|
+
}
|
|
1700
|
+
|
|
1701
|
+
export function clearPendingChat(chatId) {
|
|
1702
|
+
try {
|
|
1703
|
+
const path = resolve(getWorkflowDir(), 'sessions', `PENDING-CHAT-${chatId}.json`);
|
|
1704
|
+
if (existsSync(path)) unlinkSync(path);
|
|
1705
|
+
} catch { /* non-fatal */ }
|
|
1706
|
+
}
|
|
1707
|
+
|
|
1708
|
+
// ─── Fix (run Coder on review blockers) ──────────────────────────────────────
|
|
1709
|
+
|
|
1710
|
+
export async function runFix() {
|
|
1711
|
+
try {
|
|
1712
|
+
const status = getStatus();
|
|
1713
|
+
const featureId = status.current_feature;
|
|
1714
|
+
if (!featureId) return { success: false, error: 'No active feature to fix.' };
|
|
1715
|
+
|
|
1716
|
+
const review = getLatestReview();
|
|
1717
|
+
const blockers = review ? extractBlockers(review.content) : 'Fix all reported issues from the latest review.';
|
|
1718
|
+
|
|
1719
|
+
const fixPrompt =
|
|
1720
|
+
`You are the Coder. The code review was REJECTED.\n\n` +
|
|
1721
|
+
`Fix ALL the following blockers identified in the code review:\n\n${blockers}\n\n` +
|
|
1722
|
+
`Make the actual code changes to fix these issues. Do not explain — just fix.`;
|
|
1723
|
+
|
|
1724
|
+
await runPipelineStage('implement', fixPrompt, { featureId, stage: 'fix' });
|
|
1725
|
+
audit(AUDIT_EVENTS.BUG_FIX, { featureId });
|
|
1726
|
+
return { success: true };
|
|
1727
|
+
} catch (err) {
|
|
1728
|
+
return { success: false, error: err.message };
|
|
1729
|
+
}
|
|
1730
|
+
}
|
|
1731
|
+
|
|
1732
|
+
// ─── Review (async — long running) ───────────────────────────────────────────
|
|
1733
|
+
|
|
1734
|
+
export async function runReview() {
|
|
1735
|
+
const status = getStatus();
|
|
1736
|
+
if (!status.current_feature) {
|
|
1737
|
+
return { success: false, error: 'No active feature to review.' };
|
|
1738
|
+
}
|
|
1739
|
+
|
|
1740
|
+
const featureId = status.current_feature;
|
|
1741
|
+
// notify is only defined in _runAutoReviewLoop/_runPipelineAsync — use bus directly here
|
|
1742
|
+
const notify = (event, data) => bus.emitEvent(event, { feature: featureId, ...data });
|
|
1743
|
+
updateStatus({ stage: 'review' });
|
|
1744
|
+
|
|
1745
|
+
try {
|
|
1746
|
+
const timestamp = Math.floor(Date.now() / 1000);
|
|
1747
|
+
const workflowDir = getWorkflowDir();
|
|
1748
|
+
const root = getRootDir();
|
|
1749
|
+
|
|
1750
|
+
// Get git changes for review — handles uncommitted, staged, AND already-committed code
|
|
1751
|
+
let diff = 'No changes detected';
|
|
1752
|
+
try {
|
|
1753
|
+
// Find actual git root — ROOT may not be the git repo (e.g. aicc.config.js in parent dir)
|
|
1754
|
+
let gitDir = root;
|
|
1755
|
+
try {
|
|
1756
|
+
const gitTopLevel = await capture('git rev-parse --show-toplevel', [], { cwd: root });
|
|
1757
|
+
if (gitTopLevel.stdout.trim()) gitDir = gitTopLevel.stdout.trim();
|
|
1758
|
+
else {
|
|
1759
|
+
// Try subdirectories that might be the git repo
|
|
1760
|
+
const entries = readdirSync(root).filter(e => {
|
|
1761
|
+
try { return existsSync(resolve(root, e, '.git')); } catch { return false; }
|
|
1762
|
+
});
|
|
1763
|
+
if (entries.length > 0) gitDir = resolve(root, entries[0]);
|
|
1764
|
+
}
|
|
1765
|
+
} catch {
|
|
1766
|
+
// ROOT itself might not be a git repo — scan for .git subdirectory
|
|
1767
|
+
try {
|
|
1768
|
+
const entries = readdirSync(root).filter(e => {
|
|
1769
|
+
try { return existsSync(resolve(root, e, '.git')); } catch { return false; }
|
|
1770
|
+
});
|
|
1771
|
+
if (entries.length > 0) gitDir = resolve(root, entries[0]);
|
|
1772
|
+
} catch { /* use root as fallback */ }
|
|
1773
|
+
}
|
|
1774
|
+
const git = (cmd) => capture(cmd, [], { cwd: gitDir });
|
|
1775
|
+
|
|
1776
|
+
const diffParts = [];
|
|
1777
|
+
let hasCodeContent = false;
|
|
1778
|
+
|
|
1779
|
+
// 1. Check uncommitted changes (working dir vs HEAD)
|
|
1780
|
+
let hasUncommitted = false;
|
|
1781
|
+
try {
|
|
1782
|
+
const localStat = await git('git --no-pager diff HEAD --stat');
|
|
1783
|
+
if (localStat.stdout.trim()) {
|
|
1784
|
+
hasUncommitted = true;
|
|
1785
|
+
diffParts.push('### Uncommitted Changes\n' + localStat.stdout.trim());
|
|
1786
|
+
try {
|
|
1787
|
+
const localPatch = await git('git --no-pager diff HEAD');
|
|
1788
|
+
if (localPatch.stdout.trim()) {
|
|
1789
|
+
diffParts.push('### Uncommitted Code Diff\n' + localPatch.stdout.slice(0, 3000));
|
|
1790
|
+
hasCodeContent = true;
|
|
1791
|
+
}
|
|
1792
|
+
} catch { /* non-fatal */ }
|
|
1793
|
+
}
|
|
1794
|
+
} catch { /* non-fatal */ }
|
|
1795
|
+
|
|
1796
|
+
// 2. Check staged changes
|
|
1797
|
+
if (!hasCodeContent) {
|
|
1798
|
+
try {
|
|
1799
|
+
const stagedStat = await git('git --no-pager diff --cached --stat');
|
|
1800
|
+
if (stagedStat.stdout.trim()) {
|
|
1801
|
+
diffParts.push('### Staged Changes\n' + stagedStat.stdout.trim());
|
|
1802
|
+
try {
|
|
1803
|
+
const stagedPatch = await git('git --no-pager diff --cached');
|
|
1804
|
+
if (stagedPatch.stdout.trim()) {
|
|
1805
|
+
diffParts.push('### Staged Code Diff\n' + stagedPatch.stdout.slice(0, 3000));
|
|
1806
|
+
hasCodeContent = true;
|
|
1807
|
+
}
|
|
1808
|
+
} catch { /* non-fatal */ }
|
|
1809
|
+
}
|
|
1810
|
+
} catch { /* non-fatal */ }
|
|
1811
|
+
}
|
|
1812
|
+
|
|
1813
|
+
// 3. If no uncommitted/staged code, get diff of committed changes since feature started
|
|
1814
|
+
// This handles the common case where Copilot --yolo already committed everything
|
|
1815
|
+
if (!hasCodeContent) {
|
|
1816
|
+
// Use feature timestamp to find the baseline commit (before this feature existed)
|
|
1817
|
+
const tsMatch = featureId.match(/(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})/);
|
|
1818
|
+
if (tsMatch) {
|
|
1819
|
+
const [, y, mo, d, h, mi, s] = tsMatch;
|
|
1820
|
+
const isoDate = `${y}-${mo}-${d}T${h}:${mi}:${s}`;
|
|
1821
|
+
try {
|
|
1822
|
+
const baseResult = await git(`git --no-pager log --before="${isoDate}" --format=%H -1`);
|
|
1823
|
+
const baseHash = baseResult.stdout.trim();
|
|
1824
|
+
if (baseHash) {
|
|
1825
|
+
const featureStat = await git(`git --no-pager diff ${baseHash}..HEAD --stat`);
|
|
1826
|
+
if (featureStat.stdout.trim()) {
|
|
1827
|
+
diffParts.push('### Files Changed Since Feature Started\n' + featureStat.stdout.trim());
|
|
1828
|
+
const featurePatch = await git(`git --no-pager diff ${baseHash}..HEAD`);
|
|
1829
|
+
if (featurePatch.stdout.trim()) {
|
|
1830
|
+
diffParts.push('### Code Changes (All Feature Commits)\n' + featurePatch.stdout.slice(0, 4000));
|
|
1831
|
+
hasCodeContent = true;
|
|
1832
|
+
}
|
|
1833
|
+
}
|
|
1834
|
+
}
|
|
1835
|
+
} catch { /* non-fatal */ }
|
|
1836
|
+
}
|
|
1837
|
+
|
|
1838
|
+
// Fallback: try HEAD~N..HEAD with increasing range
|
|
1839
|
+
if (!hasCodeContent) {
|
|
1840
|
+
for (const n of [10, 5, 3, 1]) {
|
|
1841
|
+
try {
|
|
1842
|
+
const commitPatch = await git(`git --no-pager diff HEAD~${n}..HEAD`);
|
|
1843
|
+
if (commitPatch.stdout.trim()) {
|
|
1844
|
+
const commitStat = await git(`git --no-pager diff HEAD~${n}..HEAD --stat`);
|
|
1845
|
+
diffParts.push(`### Files Changed in Last ${n} Commit(s)\n` + (commitStat.stdout.trim() || '(see diff below)'));
|
|
1846
|
+
diffParts.push('### Code Diff (Recent Commits)\n' + commitPatch.stdout.slice(0, 4000));
|
|
1847
|
+
hasCodeContent = true;
|
|
1848
|
+
break;
|
|
1849
|
+
}
|
|
1850
|
+
} catch { continue; }
|
|
1851
|
+
}
|
|
1852
|
+
}
|
|
1853
|
+
}
|
|
1854
|
+
|
|
1855
|
+
// 4. Last resort: read actual source files modified after the pipeline started
|
|
1856
|
+
// This covers edge cases where git diff approaches all return empty
|
|
1857
|
+
if (!hasCodeContent) {
|
|
1858
|
+
try {
|
|
1859
|
+
const statusRef = resolve(root, '.ai-workflow', 'status.json');
|
|
1860
|
+
const findRef = existsSync(statusRef) ? `-newer ${statusRef}` : '-mmin -120';
|
|
1861
|
+
const findCmd = `find . -maxdepth 4 -type f \\( -name "*.js" -o -name "*.ts" -o -name "*.py" -o -name "*.jsx" -o -name "*.tsx" -o -name "*.go" -o -name "*.sh" \\) -not -path "*/node_modules/*" -not -path "*/.ai-workflow/*" -not -path "*/.git/*" ${findRef} 2>/dev/null | head -15`;
|
|
1862
|
+
const findResult = await capture(findCmd, [], { cwd: gitDir });
|
|
1863
|
+
const recentFiles = findResult.stdout.trim().split('\n').filter(Boolean);
|
|
1864
|
+
|
|
1865
|
+
if (recentFiles.length > 0) {
|
|
1866
|
+
const fileSummaries = [];
|
|
1867
|
+
let totalChars = 0;
|
|
1868
|
+
for (const file of recentFiles) {
|
|
1869
|
+
if (totalChars > 4000) break;
|
|
1870
|
+
try {
|
|
1871
|
+
const filePath = resolve(gitDir, file.trim());
|
|
1872
|
+
if (existsSync(filePath)) {
|
|
1873
|
+
const content = readFileSync(filePath, 'utf8');
|
|
1874
|
+
const snippet = content.slice(0, 800);
|
|
1875
|
+
fileSummaries.push(`#### ${file.trim()}\n\`\`\`\n${snippet}\n\`\`\``);
|
|
1876
|
+
totalChars += snippet.length;
|
|
1877
|
+
}
|
|
1878
|
+
} catch { /* skip unreadable files */ }
|
|
1879
|
+
}
|
|
1880
|
+
if (fileSummaries.length > 0) {
|
|
1881
|
+
diffParts.push('### Implementation Files (recently modified)\n' + fileSummaries.join('\n\n'));
|
|
1882
|
+
hasCodeContent = true;
|
|
1883
|
+
}
|
|
1884
|
+
}
|
|
1885
|
+
} catch { /* non-fatal */ }
|
|
1886
|
+
}
|
|
1887
|
+
|
|
1888
|
+
// 5. Always append recent commit log for context (supplementary, not primary)
|
|
1889
|
+
try {
|
|
1890
|
+
const log = await git('git --no-pager log --oneline -10');
|
|
1891
|
+
if (log.stdout.trim()) {
|
|
1892
|
+
diffParts.push('### Recent Commits\n' + log.stdout.trim());
|
|
1893
|
+
}
|
|
1894
|
+
} catch { /* non-fatal */ }
|
|
1895
|
+
|
|
1896
|
+
if (diffParts.length > 0) {
|
|
1897
|
+
diff = diffParts.join('\n\n');
|
|
1898
|
+
if (diff.length > 6000) diff = diff.slice(0, 6000) + '\n\n... (truncated for context limit)';
|
|
1899
|
+
}
|
|
1900
|
+
if (!hasCodeContent) console.warn('[Review] WARNING: Could not find any code changes to review — reviewer will see commit log only');
|
|
1901
|
+
} catch (gitErr) {
|
|
1902
|
+
console.warn('[Review] Git changes collection failed:', gitErr.message?.slice(0, 120));
|
|
1903
|
+
}
|
|
1904
|
+
|
|
1905
|
+
// Load latest spec for review context
|
|
1906
|
+
const specFile = getLatestFilePath('specs');
|
|
1907
|
+
const specContent = specFile && existsSync(specFile) ? readFileSync(specFile, 'utf8').slice(0, 2000) : 'No spec available';
|
|
1908
|
+
|
|
1909
|
+
// Build review prompt using available context file
|
|
1910
|
+
const geminiMd = resolve(root, 'GEMINI.md');
|
|
1911
|
+
const claudeMd = resolve(root, 'CLAUDE.md');
|
|
1912
|
+
const reviewCtxFile = existsSync(geminiMd) ? geminiMd : existsSync(claudeMd) ? claudeMd : null;
|
|
1913
|
+
const reviewCtx = reviewCtxFile ? readFileSync(reviewCtxFile, 'utf8').slice(0, 3000) : '';
|
|
1914
|
+
let prompt =
|
|
1915
|
+
`${reviewCtx}\n\n## Code Review Task\n\n` +
|
|
1916
|
+
`Feature: ${status.current_feature}\n\n` +
|
|
1917
|
+
`## Feature Spec\n${specContent}\n\n` +
|
|
1918
|
+
`## Code Changes\n` +
|
|
1919
|
+
`The following shows code changes for this feature. They may include uncommitted diffs, committed diffs (code already committed by the implementer), or direct file contents.\n` +
|
|
1920
|
+
`Review ALL the code shown below against the spec — do NOT say "no code submitted" if you see diffs or file contents.\n\n` +
|
|
1921
|
+
`${diff}\n\n` +
|
|
1922
|
+
`Write a structured code review of the code above. Include:\n` +
|
|
1923
|
+
`- ✅ Approved Items\n- ⚠️ Warnings (non-blocking)\n- ❌ Blockers (must fix)\n` +
|
|
1924
|
+
`- ## Verdict\n**APPROVED** or **REJECTED**`;
|
|
1925
|
+
|
|
1926
|
+
// Inject skills and persona for review stage
|
|
1927
|
+
try {
|
|
1928
|
+
prompt = injectSkills(prompt, 'review');
|
|
1929
|
+
} catch (e) { /* skills are optional */ }
|
|
1930
|
+
try {
|
|
1931
|
+
const persona = loadPersona('reviewer');
|
|
1932
|
+
if (persona) { prompt = injectPersona(prompt, persona); }
|
|
1933
|
+
} catch (e) { /* personas are optional */ }
|
|
1934
|
+
|
|
1935
|
+
const reviewsDir = resolve(workflowDir, 'reviews');
|
|
1936
|
+
mkdirSync(reviewsDir, { recursive: true });
|
|
1937
|
+
const reviewFile = resolve(reviewsDir, `REVIEW-${timestamp}.md`);
|
|
1938
|
+
|
|
1939
|
+
// Run review using configured provider (with fallback chain)
|
|
1940
|
+
notify('stage_start', { stage: 'review', message: `🔍 Reviewer is reviewing the code...` });
|
|
1941
|
+
try {
|
|
1942
|
+
await runPipelineStage('review', prompt, { featureId, stage: 'review', outputFile: reviewFile });
|
|
1943
|
+
|
|
1944
|
+
// If provider wrote to stdout (via redirect) the file will exist.
|
|
1945
|
+
// If provider wrote via tools, check for the file.
|
|
1946
|
+
if (!existsSync(reviewFile) || readFileSync(reviewFile, 'utf8').trim().length < 50) {
|
|
1947
|
+
// Fallback: try runAI which uses the chat pipeline
|
|
1948
|
+
const fallbackContent = await runAI(prompt, { system: 'You are a code reviewer. Write a structured review with ✅ Approved, ⚠️ Warnings, ❌ Blockers, and ## Verdict: APPROVED or REJECTED.' });
|
|
1949
|
+
writeFileSync(reviewFile, typeof fallbackContent === 'string' ? fallbackContent : prompt);
|
|
1950
|
+
}
|
|
1951
|
+
} catch (reviewErr) {
|
|
1952
|
+
console.warn('[Review] Review stage failed:', reviewErr.message?.slice(0, 120));
|
|
1953
|
+
// Last resort: try runAI
|
|
1954
|
+
try {
|
|
1955
|
+
const fallbackContent = await runAI(prompt, { system: 'You are a code reviewer. Write a structured review with ✅ Approved, ⚠️ Warnings, ❌ Blockers, and ## Verdict: APPROVED or REJECTED.' });
|
|
1956
|
+
writeFileSync(reviewFile, typeof fallbackContent === 'string' ? fallbackContent : prompt);
|
|
1957
|
+
} catch {
|
|
1958
|
+
writeFileSync(reviewFile, `# Review\n\n## Verdict\n**APPROVED**\n\n_Auto-approved: all review providers unavailable._`);
|
|
1959
|
+
}
|
|
1960
|
+
}
|
|
1961
|
+
|
|
1962
|
+
updateStatus({ stage: 'review_complete', latest_review: `REVIEW-${timestamp}`, next: 'approve_or_reject' });
|
|
1963
|
+
bus.emitEvent('review_complete', { feature: status.current_feature });
|
|
1964
|
+
audit(AUDIT_EVENTS.REVIEW_COMPLETED, { feature: status.current_feature, reviewFile: `REVIEW-${timestamp}` });
|
|
1965
|
+
return { success: true, reviewFile };
|
|
1966
|
+
} catch (err) {
|
|
1967
|
+
return { success: false, error: err.message };
|
|
1968
|
+
}
|
|
1969
|
+
}
|
|
1970
|
+
|
|
1971
|
+
export function getLatestReview() {
|
|
1972
|
+
const wfDir = getWorkflowDir();
|
|
1973
|
+
const reviewsDir = resolve(wfDir, 'reviews');
|
|
1974
|
+
if (!existsSync(reviewsDir)) return null;
|
|
1975
|
+
|
|
1976
|
+
const files = readdirSync(reviewsDir)
|
|
1977
|
+
.filter(f => f.startsWith('REVIEW-') && f.endsWith('.md'))
|
|
1978
|
+
.sort()
|
|
1979
|
+
.reverse();
|
|
1980
|
+
|
|
1981
|
+
if (files.length === 0) return null;
|
|
1982
|
+
|
|
1983
|
+
const name = files[0];
|
|
1984
|
+
const content = readFileSync(resolve(reviewsDir, name), 'utf8');
|
|
1985
|
+
|
|
1986
|
+
// Parse verdict
|
|
1987
|
+
let verdict = 'UNKNOWN';
|
|
1988
|
+
const verdictParts = content.split(/##[^#]*Verdict/i);
|
|
1989
|
+
if (verdictParts.length >= 2) {
|
|
1990
|
+
const verdictBody = verdictParts[1].split(/^##/m)[0];
|
|
1991
|
+
const m = verdictBody.match(/\b(APPROVED|REJECTED)\b/i);
|
|
1992
|
+
if (m) verdict = m[1].toUpperCase();
|
|
1993
|
+
}
|
|
1994
|
+
|
|
1995
|
+
return { name, verdict, content };
|
|
1996
|
+
}
|
|
1997
|
+
|
|
1998
|
+
// ─── AI-Powered Helpers ──────────────────────────────────────────────────────
|
|
1999
|
+
// Lightweight AI tasks routed through runAI() (Ollama first, cloud fallback).
|
|
2000
|
+
|
|
2001
|
+
function getSystemPrompt() {
|
|
2002
|
+
return getConfig().ai.system;
|
|
2003
|
+
}
|
|
2004
|
+
|
|
2005
|
+
/**
|
|
2006
|
+
* Load the chatbot skill file — the AI's "employee manual" for natural conversation.
|
|
2007
|
+
* Looks for chatbot.md in the project's skills directory, which contains rules
|
|
2008
|
+
* for how to detect bugs vs features, confirm with users, and route through the pipeline.
|
|
2009
|
+
* Falls back to the basic system prompt if the skill file doesn't exist.
|
|
2010
|
+
*/
|
|
2011
|
+
function loadChatbotSkill() {
|
|
2012
|
+
try {
|
|
2013
|
+
const root = getRootDir();
|
|
2014
|
+
const skillsDir = getConfig().skillsDir || '.claude/skills';
|
|
2015
|
+
const chatbotPath = resolve(root, skillsDir, 'chatbot.md');
|
|
2016
|
+
if (existsSync(chatbotPath)) {
|
|
2017
|
+
return readFileSync(chatbotPath, 'utf8');
|
|
2018
|
+
}
|
|
2019
|
+
} catch { /* fall through to null */ }
|
|
2020
|
+
return null;
|
|
2021
|
+
}
|
|
2022
|
+
|
|
2023
|
+
/**
|
|
2024
|
+
* Summarize a code review into 2-3 key lines.
|
|
2025
|
+
*/
|
|
2026
|
+
export async function summarizeReview(reviewContent) {
|
|
2027
|
+
try {
|
|
2028
|
+
return await runAI(
|
|
2029
|
+
`Summarize this code review in 2-3 bullet points. Focus on: verdict (approved/rejected), key blockers, and recommended actions.\n\n${reviewContent.slice(0, 3000)}`,
|
|
2030
|
+
{ system: getSystemPrompt(), fallback: false }
|
|
2031
|
+
);
|
|
2032
|
+
} catch {
|
|
2033
|
+
return null; // Graceful — summaries are optional
|
|
2034
|
+
}
|
|
2035
|
+
}
|
|
2036
|
+
|
|
2037
|
+
/**
|
|
2038
|
+
* Generate a human-readable explanation of the pipeline status.
|
|
2039
|
+
*/
|
|
2040
|
+
export async function explainStatus(statusData) {
|
|
2041
|
+
try {
|
|
2042
|
+
return await runAI(
|
|
2043
|
+
`Explain this pipeline status in 1-2 sentences. What stage is it at, what should the user do next?\n\n${JSON.stringify(statusData, null, 2)}`,
|
|
2044
|
+
{ system: getSystemPrompt(), fallback: false }
|
|
2045
|
+
);
|
|
2046
|
+
} catch {
|
|
2047
|
+
return null;
|
|
2048
|
+
}
|
|
2049
|
+
}
|
|
2050
|
+
|
|
2051
|
+
/**
|
|
2052
|
+
* Analyze log content and highlight errors/warnings.
|
|
2053
|
+
*/
|
|
2054
|
+
export async function analyzeLog(logContent) {
|
|
2055
|
+
try {
|
|
2056
|
+
return await runAI(
|
|
2057
|
+
`Analyze these pipeline logs. List any errors, warnings, or notable events in bullet points. If everything looks normal, say so briefly.\n\n${logContent.slice(0, 3000)}`,
|
|
2058
|
+
{ system: getSystemPrompt(), fallback: false }
|
|
2059
|
+
);
|
|
2060
|
+
} catch {
|
|
2061
|
+
return null;
|
|
2062
|
+
}
|
|
2063
|
+
}
|
|
2064
|
+
|
|
2065
|
+
/**
|
|
2066
|
+
* General-purpose AI Q&A about the project.
|
|
2067
|
+
*/
|
|
2068
|
+
export async function askAI(question, context = '') {
|
|
2069
|
+
audit(AUDIT_EVENTS.ASK_AI, { question: question.slice(0, 200) });
|
|
2070
|
+
const projectName = getConfig().name || 'this project';
|
|
2071
|
+
|
|
2072
|
+
// Load the chatbot skill file — this is the AI's "employee manual"
|
|
2073
|
+
const chatbotSkill = loadChatbotSkill();
|
|
2074
|
+
|
|
2075
|
+
// Load tech-specific skills if available
|
|
2076
|
+
const techContext = loadTechSkills();
|
|
2077
|
+
|
|
2078
|
+
// Load intelligence (learned patterns)
|
|
2079
|
+
const learnedContext = appendLearnedContext();
|
|
2080
|
+
|
|
2081
|
+
let systemPrompt;
|
|
2082
|
+
|
|
2083
|
+
if (chatbotSkill) {
|
|
2084
|
+
// Skill file exists — use it as the primary instruction set
|
|
2085
|
+
systemPrompt = chatbotSkill;
|
|
2086
|
+
if (techContext) {
|
|
2087
|
+
systemPrompt += `\n\n## Technology Guidelines\n${techContext}`;
|
|
2088
|
+
}
|
|
2089
|
+
} else {
|
|
2090
|
+
// NOTE: External prompt file is auto-generated from inline prompt below.
|
|
2091
|
+
// We always rebuild from inline to pick up code-level improvements.
|
|
2092
|
+
// The external file is only used as a cache/reference.
|
|
2093
|
+
// No skill file — use inline prompt (backwards compatible)
|
|
2094
|
+
systemPrompt =
|
|
2095
|
+
`${getSystemPrompt()}\n\n` +
|
|
2096
|
+
`You are a smart, experienced assistant for the "${projectName}" project. ` +
|
|
2097
|
+
`You communicate naturally, like a senior developer colleague — friendly, knowledgeable, and proactive.\n\n` +
|
|
2098
|
+
`## How You Work\n\n` +
|
|
2099
|
+
`You are a Pipeline Controller with FULL AUTHORITY to execute pipeline actions.\n` +
|
|
2100
|
+
`You CAN and SHOULD: reset pipeline to idle, trigger reviews/fixes/deployments, start features, clean up workspace.\n` +
|
|
2101
|
+
`NEVER say "I can't", "I'm text-only", or "I don't have the ability to". You ARE the pipeline controller.\n` +
|
|
2102
|
+
`When the user asks you to do something, DO IT by outputting the action JSON. Do NOT explain what commands they should type.\n\n` +
|
|
2103
|
+
`## Conversation Guidelines\n\n` +
|
|
2104
|
+
`- Respond naturally and helpfully to any question — about the project, code, architecture, best practices, etc.\n` +
|
|
2105
|
+
`- Provide thoughtful analysis, suggestions, explanations, and trade-offs.\n` +
|
|
2106
|
+
`- Keep responses concise but informative (2-4 paragraphs max).\n` +
|
|
2107
|
+
`- Be conversational — avoid robotic or formulaic responses.\n\n` +
|
|
2108
|
+
`## Smart Intent Detection (CRITICAL)\n\n` +
|
|
2109
|
+
`When the user sends a message, classify it into one of these categories:\n\n` +
|
|
2110
|
+
`### Category 1: Bug Report / Something Broken\n` +
|
|
2111
|
+
`Signals: "doesn't work", "broken", "error", "crash", "wrong", "fails", "not showing", "missing", "issue"\n` +
|
|
2112
|
+
`Response:\n` +
|
|
2113
|
+
`1. Acknowledge the problem and show you understand it\n` +
|
|
2114
|
+
`2. Analyze what might be causing it — explain possible root causes\n` +
|
|
2115
|
+
`3. Suggest what the fix might involve\n` +
|
|
2116
|
+
`4. Then, output the following JSON (no backticks, no code fences, on its own line) to let the user confirm via button:\n` +
|
|
2117
|
+
`{"_action":"create_feature","description":"<short summary>","type":"bug","confirm":true}\n` +
|
|
2118
|
+
`The user will see a confirmation button. Do NOT ask them to type "yes" — they will tap a button instead.\n\n` +
|
|
2119
|
+
`### Category 2: Feature Request / Enhancement\n` +
|
|
2120
|
+
`Signals: "add", "implement", "create", "build", "want", "need", "can we", "would be nice", "improve"\n` +
|
|
2121
|
+
`Response:\n` +
|
|
2122
|
+
`1. Discuss the idea — what it would involve, how it fits the project\n` +
|
|
2123
|
+
`2. Mention any considerations, trade-offs, or alternatives\n` +
|
|
2124
|
+
`3. Then, output the following JSON (no backticks, no code fences, on its own line) to let the user confirm via button:\n` +
|
|
2125
|
+
`{"_action":"create_feature","description":"<short summary>","type":"feature","confirm":true}\n` +
|
|
2126
|
+
`The user will see a confirmation button. Do NOT ask them to type "yes" — they will tap a button instead.\n\n` +
|
|
2127
|
+
`### Category 3: General Question / Discussion\n` +
|
|
2128
|
+
`Signals: "how", "what", "why", "explain", "status", "help", "hello", "hi"\n` +
|
|
2129
|
+
`Response: Just answer the question naturally. Do NOT suggest /bug or /feature unless directly relevant.\n\n` +
|
|
2130
|
+
`### Category 4: Pipeline Execution Request\n` +
|
|
2131
|
+
`Signals: User explicitly asks you to DO something — "fix it", "run a review", "approve",\n` +
|
|
2132
|
+
` "deploy", "fix it", "do it now", "go ahead", "start implementing"\n` +
|
|
2133
|
+
`Response: Output ONE of these action JSON objects (raw, no backticks) to execute the action.\n` +
|
|
2134
|
+
`- User wants fix/blockers addressed → {"_action":"run_fix"}\n` +
|
|
2135
|
+
`- User wants a code review → {"_action":"run_review"}\n` +
|
|
2136
|
+
`- User wants fix AND review → {"_action":"run_fix_and_review"}\n` +
|
|
2137
|
+
`- User wants to approve → {"_action":"run_approve"}\n` +
|
|
2138
|
+
`- User wants to deploy → {"_action":"run_deploy"}\n` +
|
|
2139
|
+
`- User wants to start/resume implementation → {"_action":"run_implement"}\n` +
|
|
2140
|
+
`- User wants to reset/go idle/abandon feature → {"_action":"run_reset"}\n` +
|
|
2141
|
+
`- User wants to clean up workspace → {"_action":"run_cleanup"}\n` +
|
|
2142
|
+
`You MAY add a SHORT 1-sentence confirmation before the JSON (e.g., "On it!"), but output the JSON on its own line.\n` +
|
|
2143
|
+
`CRITICAL: When the user tells you to do something, OUTPUT THE ACTION JSON. Do NOT just describe what they could do.\n\n` +
|
|
2144
|
+
`### Category 5: Status / History Questions\n` +
|
|
2145
|
+
`Signals: "what stage", "did it finish", "is it done", "what happened", "status", "where are we"\n` +
|
|
2146
|
+
`Response: Answer from the Current Pipeline State context above. Be specific and factual.\n\n` +
|
|
2147
|
+
`## Key Principle\n` +
|
|
2148
|
+
`Be helpful FIRST. Analyze and discuss FIRST. Only suggest pipeline actions when clearly appropriate. ` +
|
|
2149
|
+
`For bugs and features (Cat 1 & 2), always include the confirm JSON so buttons are shown. ` +
|
|
2150
|
+
`Never auto-create bugs or features — the user confirms via inline button.\n\n` +
|
|
2151
|
+
`## Available Commands\n` +
|
|
2152
|
+
`/status — Pipeline status | /feature <desc> — New feature | /bug <desc> — Bug fix\n` +
|
|
2153
|
+
`/implement — Run/restart implementation | /review — Code review\n` +
|
|
2154
|
+
`/approve — Approve | /reject <reason> — Reject | /deploy — Deploy\n` +
|
|
2155
|
+
`/logs — View logs | /health — System health`;
|
|
2156
|
+
|
|
2157
|
+
if (techContext) {
|
|
2158
|
+
systemPrompt += `\n\n## Technology Guidelines\n${techContext}`;
|
|
2159
|
+
}
|
|
2160
|
+
|
|
2161
|
+
// Save this prompt as the external version (reference/cache only — inline always wins)
|
|
2162
|
+
saveExternalPrompt(systemPrompt);
|
|
2163
|
+
}
|
|
2164
|
+
|
|
2165
|
+
// Inject learned context — past user phrases → intent mappings
|
|
2166
|
+
if (learnedContext) {
|
|
2167
|
+
systemPrompt += learnedContext;
|
|
2168
|
+
}
|
|
2169
|
+
|
|
2170
|
+
// Add current pipeline status as context — include mode-specific behaviour rules
|
|
2171
|
+
try {
|
|
2172
|
+
const status = getStatus();
|
|
2173
|
+
const isAuto = status.pipeline_mode === 'auto';
|
|
2174
|
+
const statusContext =
|
|
2175
|
+
`\n\nCurrent Pipeline State:\n` +
|
|
2176
|
+
`- stage: "${status.stage || 'idle'}"\n` +
|
|
2177
|
+
`- feature: "${status.current_feature || 'none'}"\n` +
|
|
2178
|
+
`- mode: "${status.pipeline_mode || 'manual'}"\n` +
|
|
2179
|
+
`- next: "${status.next || '-'}"\n\n` +
|
|
2180
|
+
(isAuto
|
|
2181
|
+
? `IMPORTANT: Pipeline is in AUTO mode. All stages (spec→arch→impl→review→approve) run automatically without user intervention. ` +
|
|
2182
|
+
`Do NOT ask the user for permission to run the next stage — it already ran or is running. ` +
|
|
2183
|
+
`If the user asks "have you done X?", check the stage above and give a factual status answer.`
|
|
2184
|
+
: `Pipeline is in MANUAL mode. Each stage needs user sign-off before auto-advancing. ` +
|
|
2185
|
+
`When the user explicitly asks you to run/start/fix/review/approve/deploy something, ` +
|
|
2186
|
+
`output the appropriate action JSON to execute it — don't tell them to type a command. ` +
|
|
2187
|
+
`For passive status questions, explain clearly what stage we're at and what comes next.`);
|
|
2188
|
+
systemPrompt += statusContext;
|
|
2189
|
+
} catch { /* non-fatal */ }
|
|
2190
|
+
|
|
2191
|
+
// Inject codebase context for richer AI responses
|
|
2192
|
+
try {
|
|
2193
|
+
const codeCtx = getContextString(getRootDir(), { maxDepth: 2, includeGit: false });
|
|
2194
|
+
if (codeCtx) systemPrompt += `\n\n## Project Context\n${codeCtx.slice(0, 2000)}`;
|
|
2195
|
+
} catch { /* non-fatal */ }
|
|
2196
|
+
|
|
2197
|
+
const prompt = context
|
|
2198
|
+
? `${context.slice(0, 3000)}\n\nCurrent message: ${question}`
|
|
2199
|
+
: question;
|
|
2200
|
+
|
|
2201
|
+
try {
|
|
2202
|
+
return await runAI(prompt, { system: systemPrompt });
|
|
2203
|
+
} catch (err) {
|
|
2204
|
+
return { error: err.message };
|
|
2205
|
+
}
|
|
2206
|
+
}
|
|
2207
|
+
|
|
2208
|
+
/**
|
|
2209
|
+
* Load technology-specific skills (fetched from antigravity-awesome-skills).
|
|
2210
|
+
* These teach the AI about project-specific patterns, anti-patterns, and best practices.
|
|
2211
|
+
*/
|
|
2212
|
+
function loadTechSkills() {
|
|
2213
|
+
try {
|
|
2214
|
+
const techSkills = getConfig().ai?.techSkills || [];
|
|
2215
|
+
if (techSkills.length === 0) return null;
|
|
2216
|
+
|
|
2217
|
+
const root = getRootDir();
|
|
2218
|
+
const skillsDir = getConfig().skillsDir || '.claude/skills';
|
|
2219
|
+
|
|
2220
|
+
return techSkills
|
|
2221
|
+
.map(name => {
|
|
2222
|
+
const path = resolve(root, `${skillsDir}/${name}.md`);
|
|
2223
|
+
if (!existsSync(path)) return null;
|
|
2224
|
+
const content = readFileSync(path, 'utf8');
|
|
2225
|
+
// Strip YAML front matter if present
|
|
2226
|
+
return content.replace(/^---[\s\S]*?---\n*/m, '').trim();
|
|
2227
|
+
})
|
|
2228
|
+
.filter(Boolean)
|
|
2229
|
+
.join('\n\n---\n\n');
|
|
2230
|
+
} catch {
|
|
2231
|
+
return null;
|
|
2232
|
+
}
|
|
2233
|
+
}
|
|
2234
|
+
|
|
2235
|
+
/**
|
|
2236
|
+
* Get AI provider status for dashboard display.
|
|
2237
|
+
*/
|
|
2238
|
+
export async function getAIProviderStatus() {
|
|
2239
|
+
const ollamaStatus = await isOllamaAvailable();
|
|
2240
|
+
return {
|
|
2241
|
+
mode: getAIMode(),
|
|
2242
|
+
ollama: {
|
|
2243
|
+
available: ollamaStatus.available,
|
|
2244
|
+
model: getOllamaModel(),
|
|
2245
|
+
models: ollamaStatus.models,
|
|
2246
|
+
},
|
|
2247
|
+
};
|
|
2248
|
+
}
|
|
2249
|
+
|
|
2250
|
+
// ─── P0 Query Wrappers ──────────────────────────────────────────────────────
|
|
2251
|
+
// Expose cost, audit, and plugin data for web API and Telegram commands.
|
|
2252
|
+
|
|
2253
|
+
export function getCostsData(featureId) {
|
|
2254
|
+
const summary = getCostSummary(featureId);
|
|
2255
|
+
return { ...summary, formatted: formatCostSummary(summary, 'text') };
|
|
2256
|
+
}
|
|
2257
|
+
|
|
2258
|
+
export function getAuditData(filters) {
|
|
2259
|
+
return getAuditEntries(filters);
|
|
2260
|
+
}
|
|
2261
|
+
|
|
2262
|
+
export function getPluginsData() {
|
|
2263
|
+
return getPluginStatus();
|
|
2264
|
+
}
|
|
2265
|
+
|
|
2266
|
+
// ─── Browser QA & Bugfix Loop ────────────────────────────────────────────────
|
|
2267
|
+
|
|
2268
|
+
export async function runBrowserQA(options = {}) {
|
|
2269
|
+
return browserTestAction(options);
|
|
2270
|
+
}
|
|
2271
|
+
|
|
2272
|
+
export async function runBugfixFromQA(qaReport, featureId, config) {
|
|
2273
|
+
const maxCycles = config.browserQA?.maxBugfixCycles || 3;
|
|
2274
|
+
let cycle = 0;
|
|
2275
|
+
|
|
2276
|
+
while (cycle < maxCycles) {
|
|
2277
|
+
cycle++;
|
|
2278
|
+
logActivity('BUGFIX', `Browser QA bug-fix cycle ${cycle}/${maxCycles}`, 'info');
|
|
2279
|
+
|
|
2280
|
+
const bugContext = buildBugContextFromQA(qaReport);
|
|
2281
|
+
|
|
2282
|
+
try {
|
|
2283
|
+
await runPipelineStage('bugfix', bugContext, config, featureId);
|
|
2284
|
+
} catch {
|
|
2285
|
+
logActivity('BUGFIX', `Fix attempt ${cycle} failed — retrying`, 'warn');
|
|
2286
|
+
continue;
|
|
2287
|
+
}
|
|
2288
|
+
|
|
2289
|
+
// Re-run browser QA to verify
|
|
2290
|
+
const verifyResult = await browserTestAction({ featureId, config });
|
|
2291
|
+
if (!verifyResult.needsBugfix || verifyResult.failCount === 0) {
|
|
2292
|
+
logActivity('BUGFIX', `All bugs fixed after ${cycle} cycle(s)`, 'success');
|
|
2293
|
+
return { success: true, cycles: cycle };
|
|
2294
|
+
}
|
|
2295
|
+
|
|
2296
|
+
qaReport = verifyResult.report;
|
|
2297
|
+
}
|
|
2298
|
+
|
|
2299
|
+
logActivity('BUGFIX', `Exhausted ${maxCycles} bugfix cycles — escalating to CEO`, 'error');
|
|
2300
|
+
bus.emitEvent('qa_bugfix_exhausted', {
|
|
2301
|
+
cycles: maxCycles,
|
|
2302
|
+
remainingFails: qaReport.failed.length,
|
|
2303
|
+
});
|
|
2304
|
+
return { success: false, cycles: cycle };
|
|
2305
|
+
}
|
|
2306
|
+
|
|
2307
|
+
function buildBugContextFromQA(qaReport) {
|
|
2308
|
+
const failSummary = qaReport.failed.map(f => `
|
|
2309
|
+
URL: ${f.url}
|
|
2310
|
+
Errors:
|
|
2311
|
+
${f.errors.map(e => ` - ${e}`).join('\n')}
|
|
2312
|
+
Screenshot: ${f.screenshot || 'not captured'}
|
|
2313
|
+
`).join('\n---\n');
|
|
2314
|
+
|
|
2315
|
+
return `
|
|
2316
|
+
## Browser QA Report — Fix Required
|
|
2317
|
+
|
|
2318
|
+
Pass Rate: ${qaReport.summary?.passRate || 0}%
|
|
2319
|
+
Failed Pages: ${qaReport.failed.length}
|
|
2320
|
+
|
|
2321
|
+
## Failed Pages
|
|
2322
|
+
${failSummary}
|
|
2323
|
+
|
|
2324
|
+
## Instructions
|
|
2325
|
+
- Read the codebase to identify which files cause these errors
|
|
2326
|
+
- Fix ONLY the identified bugs, do not refactor unrelated code
|
|
2327
|
+
- Do NOT modify test files to suppress errors
|
|
2328
|
+
- After each fix, the QA agent will re-test automatically
|
|
2329
|
+
`;
|
|
2330
|
+
}
|