@fermindi/pwn-cli 0.6.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli/backlog.js +73 -0
- package/cli/batch.js +106 -14
- package/cli/index.js +7 -29
- package/cli/inject.js +8 -33
- package/cli/update.js +31 -24
- package/package.json +6 -3
- package/src/core/inject.js +18 -39
- package/src/core/state.js +0 -1
- package/src/core/validate.js +1 -3
- package/src/index.js +0 -1
- package/src/services/batch-runner.js +860 -0
- package/src/services/batch-service.js +115 -21
- package/src/ui/backlog-viewer.js +394 -0
- package/templates/workspace/.ai/README.md +20 -0
- package/templates/workspace/.ai/batch/prompt.md +36 -0
- package/templates/workspace/.ai/batch/tasks/.gitkeep +0 -0
- package/cli/codespaces.js +0 -303
- package/cli/migrate.js +0 -466
- package/cli/mode.js +0 -206
- package/cli/notify.js +0 -135
- package/src/services/notification-service.js +0 -342
- package/templates/codespaces/devcontainer.json +0 -52
- package/templates/codespaces/setup.sh +0 -70
- package/templates/workspace/.ai/config/notifications.template.json +0 -20
- package/templates/workspace/.claude/commands/mode.md +0 -104
- package/templates/workspace/.claude/settings.json +0 -24
|
@@ -0,0 +1,860 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PWN Batch Runner — Node.js TUI with chalk
|
|
3
|
+
*
|
|
4
|
+
* Two-phase execution model:
|
|
5
|
+
* Phase 1: Planning — Claude estimates time and creates action plan
|
|
6
|
+
* Phase 2: Execution — Dynamic timeout based on estimate + 20%
|
|
7
|
+
*
|
|
8
|
+
* Task files (.ai/batch/tasks/{US-ID}.json) track status per story.
|
|
9
|
+
* Completed files are cleaned up at the end; failed are kept for review.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { spawn } from 'child_process';
|
|
13
|
+
import { existsSync, readFileSync, writeFileSync, mkdirSync, createWriteStream, appendFileSync, unlinkSync, readdirSync } from 'fs';
|
|
14
|
+
import { join } from 'path';
|
|
15
|
+
import ora from 'ora';
|
|
16
|
+
import chalk from 'chalk';
|
|
17
|
+
import {
|
|
18
|
+
parsePrdTasks,
|
|
19
|
+
selectNextTask,
|
|
20
|
+
markStoryDone,
|
|
21
|
+
runQualityGate,
|
|
22
|
+
loadConfig,
|
|
23
|
+
commitTask,
|
|
24
|
+
updateBatchState,
|
|
25
|
+
clearBatchState
|
|
26
|
+
} from './batch-service.js';
|
|
27
|
+
|
|
28
|
+
// --- Constants ---
|
|
29
|
+
const RUNNER_VERSION = '2.1';
|
|
30
|
+
const DEFAULT_TIMEOUT_MS = 900_000; // 15 minutes fallback
|
|
31
|
+
const MIN_TIMEOUT_MS = 300_000; // 5 minutes minimum (claude init ~30-40s + real work)
|
|
32
|
+
|
|
33
|
+
// Complexity → timeout mapping (based on real-world execution data)
|
|
34
|
+
// AI is bad at estimating seconds but decent at classifying complexity
|
|
35
|
+
const COMPLEXITY_TIMEOUT = {
|
|
36
|
+
low: { seconds: 300, label: '5m' }, // config change, small fix
|
|
37
|
+
medium: { seconds: 600, label: '10m' }, // new function, 1-3 files
|
|
38
|
+
high: { seconds: 900, label: '15m' }, // new module, multi-file refactor
|
|
39
|
+
};
|
|
40
|
+
const PLAN_TIMEOUT_MS = 120_000; // 2 minutes for planning phase (claude init ~30s)
|
|
41
|
+
const DEFAULT_RATE_LIMIT_WAIT = 1800; // 30 minutes (seconds)
|
|
42
|
+
const MAX_RETRIES = 2;
|
|
43
|
+
const MAX_NO_PROGRESS = 3; // circuit breaker
|
|
44
|
+
const RATE_LIMIT_RE = /rate.?limit|429|overloaded|hit your limit|quota exceeded|resets|too many requests/i;
|
|
45
|
+
const SPINNER_FRAMES = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
|
|
46
|
+
|
|
47
|
+
// --- Task File CRUD ---
|
|
48
|
+
|
|
49
|
+
function getTasksDir(cwd) {
|
|
50
|
+
return join(cwd, '.ai', 'batch', 'tasks');
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export function saveTaskFile(taskData, cwd) {
|
|
54
|
+
const tasksDir = getTasksDir(cwd);
|
|
55
|
+
mkdirSync(tasksDir, { recursive: true });
|
|
56
|
+
const filePath = join(tasksDir, `${taskData.id}.json`);
|
|
57
|
+
writeFileSync(filePath, JSON.stringify(taskData, null, 2));
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export function loadTaskFile(taskId, cwd) {
|
|
61
|
+
const filePath = join(getTasksDir(cwd), `${taskId}.json`);
|
|
62
|
+
if (!existsSync(filePath)) return null;
|
|
63
|
+
return JSON.parse(readFileSync(filePath, 'utf8'));
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export function deleteTaskFile(taskId, cwd) {
|
|
67
|
+
const filePath = join(getTasksDir(cwd), `${taskId}.json`);
|
|
68
|
+
if (existsSync(filePath)) {
|
|
69
|
+
unlinkSync(filePath);
|
|
70
|
+
return true;
|
|
71
|
+
}
|
|
72
|
+
return false;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export function listTaskFiles(cwd, { statusFilter } = {}) {
|
|
76
|
+
const tasksDir = getTasksDir(cwd);
|
|
77
|
+
if (!existsSync(tasksDir)) return [];
|
|
78
|
+
const files = readdirSync(tasksDir).filter(f => f.endsWith('.json'));
|
|
79
|
+
const tasks = files.map(f => JSON.parse(readFileSync(join(tasksDir, f), 'utf8')));
|
|
80
|
+
if (statusFilter) return tasks.filter(t => t.status === statusFilter);
|
|
81
|
+
return tasks;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// --- Rate Limit Handling ---
|
|
85
|
+
|
|
86
|
+
function isRateLimitError(output) {
|
|
87
|
+
return output && RATE_LIMIT_RE.test(output);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
async function waitForRateLimit(waitSeconds, attempt) {
|
|
91
|
+
const resumeAt = new Date(Date.now() + waitSeconds * 1000);
|
|
92
|
+
console.log(chalk.yellow(` Rate limited (wait #${attempt}). Cooling down ${formatDuration(waitSeconds)}...`));
|
|
93
|
+
console.log(chalk.dim(` Resumes at: ${resumeAt.toLocaleTimeString()}`));
|
|
94
|
+
|
|
95
|
+
let remaining = waitSeconds;
|
|
96
|
+
while (remaining > 0) {
|
|
97
|
+
const spinner = SPINNER_FRAMES[Math.floor(Date.now() / 80) % SPINNER_FRAMES.length];
|
|
98
|
+
process.stdout.write(`\r\x1b[K ${chalk.yellow(spinner)} Rate limit cooldown: ${chalk.bold(formatDuration(remaining))} remaining`);
|
|
99
|
+
const tick = Math.min(remaining, 1);
|
|
100
|
+
await sleep(tick * 1000);
|
|
101
|
+
remaining -= tick;
|
|
102
|
+
}
|
|
103
|
+
process.stdout.write('\r\x1b[K');
|
|
104
|
+
console.log(chalk.green(' Cooldown complete. Retrying...'));
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// --- Planning Phase ---
|
|
108
|
+
|
|
109
|
+
function buildPlanPrompt(task, cwd, replanContext = null) {
|
|
110
|
+
const prdPath = join(cwd, '.ai', 'tasks', 'prd.json');
|
|
111
|
+
const prd = JSON.parse(readFileSync(prdPath, 'utf8'));
|
|
112
|
+
const story = prd.stories.find(s => s.id === task.id);
|
|
113
|
+
if (!story) return '';
|
|
114
|
+
|
|
115
|
+
const acList = (story.acceptance_criteria || []).map(ac => `- ${ac}`).join('\n') || 'None';
|
|
116
|
+
|
|
117
|
+
let replanSection = '';
|
|
118
|
+
if (replanContext) {
|
|
119
|
+
replanSection = `
|
|
120
|
+
|
|
121
|
+
IMPORTANT — REPLANNING: A previous attempt FAILED.
|
|
122
|
+
- Previous complexity: ${replanContext.previousComplexity}
|
|
123
|
+
- Failure reason: ${replanContext.failureReason}
|
|
124
|
+
Re-evaluate the complexity. If it timed out, it's likely MORE complex than you initially thought.`;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
return `You are analyzing task ${task.id}: ${task.title}
|
|
128
|
+
|
|
129
|
+
Acceptance criteria:
|
|
130
|
+
${acList}
|
|
131
|
+
|
|
132
|
+
Notes: ${story.notes || 'None'}
|
|
133
|
+
${replanSection}
|
|
134
|
+
Analyze this task and respond with ONLY a JSON object (no markdown, no code fences):
|
|
135
|
+
{
|
|
136
|
+
"plan": ["step 1", "step 2", ...],
|
|
137
|
+
"complexity": "low|medium|high",
|
|
138
|
+
"recommended_model": "opus|sonnet|haiku",
|
|
139
|
+
"files_likely_affected": ["path1", "path2"]
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
Classify complexity:
|
|
143
|
+
- "low": config change, small fix, single file tweak
|
|
144
|
+
- "medium": new function, simple feature, 1-3 files
|
|
145
|
+
- "high": new module, multi-file refactor, architecture change, 4+ files
|
|
146
|
+
|
|
147
|
+
Recommend a model for execution:
|
|
148
|
+
- "haiku": trivial (config change, typo, single-line fix)
|
|
149
|
+
- "sonnet": low-medium complexity (new function, simple feature, 1-3 files)
|
|
150
|
+
- "opus": high complexity (new module, multi-file refactor, architecture)`;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
async function planTask(task, cwd, replanContext = null) {
|
|
154
|
+
const prompt = buildPlanPrompt(task, cwd, replanContext);
|
|
155
|
+
if (!prompt) return null;
|
|
156
|
+
|
|
157
|
+
const env = { ...process.env };
|
|
158
|
+
// Remove all Claude session markers to avoid "nested session" detection
|
|
159
|
+
Object.keys(env).forEach(k => { if (k.startsWith('CLAUDE')) delete env[k]; });
|
|
160
|
+
|
|
161
|
+
return new Promise((resolve) => {
|
|
162
|
+
let output = '';
|
|
163
|
+
|
|
164
|
+
// Planning uses --print WITHOUT --dangerously-skip-permissions (read-only)
|
|
165
|
+
const child = spawn('bash', [
|
|
166
|
+
'-c',
|
|
167
|
+
`claude --model opus --print -p "$(cat)"`,
|
|
168
|
+
], {
|
|
169
|
+
cwd,
|
|
170
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
171
|
+
env,
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
// Pass prompt via stdin
|
|
175
|
+
child.stdin.write(prompt);
|
|
176
|
+
child.stdin.end();
|
|
177
|
+
|
|
178
|
+
const timeoutId = setTimeout(() => {
|
|
179
|
+
child.kill('SIGTERM');
|
|
180
|
+
}, PLAN_TIMEOUT_MS);
|
|
181
|
+
|
|
182
|
+
child.stdout.on('data', (data) => { output += data.toString(); });
|
|
183
|
+
child.stderr.on('data', (data) => { output += data.toString(); });
|
|
184
|
+
|
|
185
|
+
child.on('close', (code, signal) => {
|
|
186
|
+
clearTimeout(timeoutId);
|
|
187
|
+
|
|
188
|
+
if (signal) {
|
|
189
|
+
console.log(chalk.dim(` Planning killed by ${signal} (timeout=${signal === 'SIGTERM' ? 'likely' : 'no'})`));
|
|
190
|
+
resolve(null);
|
|
191
|
+
return;
|
|
192
|
+
}
|
|
193
|
+
if (code !== 0) {
|
|
194
|
+
console.log(chalk.dim(` Planning exited with code ${code}`));
|
|
195
|
+
resolve(null);
|
|
196
|
+
return;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// Try to parse JSON from output
|
|
200
|
+
try {
|
|
201
|
+
let jsonStr = output.trim();
|
|
202
|
+
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
|
203
|
+
if (jsonMatch) jsonStr = jsonMatch[0];
|
|
204
|
+
const parsed = JSON.parse(jsonStr);
|
|
205
|
+
resolve(parsed);
|
|
206
|
+
} catch (e) {
|
|
207
|
+
console.log(chalk.dim(` Planning JSON parse failed: ${e.message}`));
|
|
208
|
+
console.log(chalk.dim(` Raw output (first 200 chars): ${output.slice(0, 200)}`));
|
|
209
|
+
resolve(null);
|
|
210
|
+
}
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
child.on('error', (err) => {
|
|
214
|
+
clearTimeout(timeoutId);
|
|
215
|
+
console.log(chalk.dim(` Planning spawn error: ${err.message}`));
|
|
216
|
+
resolve(null);
|
|
217
|
+
});
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
function computeTimeout(estimatedSeconds) {
|
|
222
|
+
if (!estimatedSeconds || estimatedSeconds <= 0) return DEFAULT_TIMEOUT_MS;
|
|
223
|
+
return Math.max(Math.ceil(estimatedSeconds * 1.05) * 1000, MIN_TIMEOUT_MS);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
/**
|
|
227
|
+
* Main entry point for the TUI batch runner.
|
|
228
|
+
*/
|
|
229
|
+
export async function runBatch(options = {}, cwd = process.cwd()) {
|
|
230
|
+
const prdPath = join(cwd, '.ai', 'tasks', 'prd.json');
|
|
231
|
+
const promptPath = join(cwd, '.ai', 'batch', 'prompt.md');
|
|
232
|
+
const logDir = join(cwd, 'logs');
|
|
233
|
+
const progressPath = join(cwd, '.ai', 'batch', 'progress.txt');
|
|
234
|
+
const noPlan = options.noPlan || false;
|
|
235
|
+
const rateLimitWait = options.rateLimitWait || DEFAULT_RATE_LIMIT_WAIT;
|
|
236
|
+
|
|
237
|
+
// --- Pre-flight checks ---
|
|
238
|
+
if (!existsSync(prdPath)) {
|
|
239
|
+
console.log(chalk.red('Error:') + ` ${prdPath} not found`);
|
|
240
|
+
process.exit(1);
|
|
241
|
+
}
|
|
242
|
+
if (!existsSync(promptPath)) {
|
|
243
|
+
console.log(chalk.red('Error:') + ` ${promptPath} not found`);
|
|
244
|
+
process.exit(1);
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
mkdirSync(logDir, { recursive: true });
|
|
248
|
+
mkdirSync(getTasksDir(cwd), { recursive: true });
|
|
249
|
+
|
|
250
|
+
const stories = parsePrdTasks(cwd);
|
|
251
|
+
const totalStories = stories.length;
|
|
252
|
+
const doneAtStart = stories.filter(s => s.passes).length;
|
|
253
|
+
const phaseFilter = options.phase ? `Phase ${options.phase}` : undefined;
|
|
254
|
+
const taskFilter = options.filter || null;
|
|
255
|
+
|
|
256
|
+
// Count remaining eligible stories (respecting filters)
|
|
257
|
+
const doneIds = stories.filter(s => s.passes).map(s => s.id);
|
|
258
|
+
const filterRe = taskFilter ? new RegExp(taskFilter, 'i') : null;
|
|
259
|
+
const eligibleCount = stories.filter(s =>
|
|
260
|
+
!s.passes &&
|
|
261
|
+
s.dependencies.every(dep => doneIds.includes(dep)) &&
|
|
262
|
+
(!phaseFilter || s.phase === phaseFilter) &&
|
|
263
|
+
(!filterRe || filterRe.test(s.id) || filterRe.test(s.title))
|
|
264
|
+
).length;
|
|
265
|
+
const maxIterations = options.maxIterations || eligibleCount;
|
|
266
|
+
|
|
267
|
+
// --- Dry run ---
|
|
268
|
+
if (options.dryRun) {
|
|
269
|
+
return dryRunPreview(cwd, phaseFilter, maxIterations, taskFilter);
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
// --- Print header ---
|
|
273
|
+
printHeader(maxIterations, phaseFilter, totalStories, doneAtStart, noPlan, cwd, taskFilter);
|
|
274
|
+
|
|
275
|
+
// NO custom SIGINT handler — Ctrl+C uses default Node.js behavior (kills process group)
|
|
276
|
+
|
|
277
|
+
// --- Main loop ---
|
|
278
|
+
let iteration = 0;
|
|
279
|
+
let noProgressCount = 0;
|
|
280
|
+
let storiesCompleted = 0;
|
|
281
|
+
const batchStart = Date.now();
|
|
282
|
+
|
|
283
|
+
updateBatchState({
|
|
284
|
+
started_at: new Date().toISOString(),
|
|
285
|
+
status: 'running',
|
|
286
|
+
completed: [],
|
|
287
|
+
max_tasks: maxIterations
|
|
288
|
+
}, cwd);
|
|
289
|
+
|
|
290
|
+
while (iteration < maxIterations) {
|
|
291
|
+
iteration++;
|
|
292
|
+
|
|
293
|
+
const task = selectNextTask(cwd, { phase: phaseFilter, filter: taskFilter });
|
|
294
|
+
if (!task) {
|
|
295
|
+
console.log(chalk.green('\nAll eligible stories completed!'));
|
|
296
|
+
break;
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
const currentDone = parsePrdTasks(cwd).filter(s => s.passes).length;
|
|
300
|
+
|
|
301
|
+
console.log(chalk.dim(`\n--- Iteration ${iteration}/${maxIterations} ---`));
|
|
302
|
+
console.log(`${chalk.cyan(`[${currentDone + 1}/${totalStories}]`)} ${chalk.bold(task.id)}: ${task.title}`);
|
|
303
|
+
|
|
304
|
+
// --- Phase 1: Planning (skip if already planned) ---
|
|
305
|
+
let taskTimeoutMs = DEFAULT_TIMEOUT_MS;
|
|
306
|
+
let taskFile = null;
|
|
307
|
+
|
|
308
|
+
if (!noPlan) {
|
|
309
|
+
const existing = loadTaskFile(task.id, cwd);
|
|
310
|
+
|
|
311
|
+
const needsReplan = existing && existing.last_failure_type && existing.status !== 'completed';
|
|
312
|
+
if (existing && existing.status === 'planned' && existing.complexity !== 'unknown' && !needsReplan) {
|
|
313
|
+
// Reuse previous plan (only if it hasn't failed before)
|
|
314
|
+
taskFile = existing;
|
|
315
|
+
taskTimeoutMs = computeTimeout(existing.estimated_time_seconds);
|
|
316
|
+
console.log(chalk.dim(` Phase 1: Reusing plan for ${task.id} (${existing.complexity}, ~${formatDuration(existing.estimated_time_seconds)}, model: ${existing.recommended_model || 'sonnet'})`));
|
|
317
|
+
} else {
|
|
318
|
+
console.log(chalk.blue(` Phase 1: Planning ${task.id}...`));
|
|
319
|
+
const planSpinner = ora({ text: `Planning ${task.id}...`, indent: 2 }).start();
|
|
320
|
+
|
|
321
|
+
const planResult = await planTask(task, cwd);
|
|
322
|
+
|
|
323
|
+
if (planResult) {
|
|
324
|
+
const complexity = planResult.complexity || 'medium';
|
|
325
|
+
const tier = COMPLEXITY_TIMEOUT[complexity] || COMPLEXITY_TIMEOUT.medium;
|
|
326
|
+
const estimatedSeconds = tier.seconds;
|
|
327
|
+
const timeoutSeconds = Math.ceil(estimatedSeconds * 1.05);
|
|
328
|
+
taskTimeoutMs = timeoutSeconds * 1000;
|
|
329
|
+
|
|
330
|
+
const recommendedModel = planResult.recommended_model || 'sonnet';
|
|
331
|
+
taskFile = {
|
|
332
|
+
id: task.id,
|
|
333
|
+
title: task.title,
|
|
334
|
+
status: 'planned',
|
|
335
|
+
estimated_time_seconds: estimatedSeconds,
|
|
336
|
+
timeout_seconds: timeoutSeconds,
|
|
337
|
+
plan: planResult.plan || [],
|
|
338
|
+
complexity,
|
|
339
|
+
recommended_model: recommendedModel,
|
|
340
|
+
files_likely_affected: planResult.files_likely_affected || [],
|
|
341
|
+
created_at: new Date().toISOString(),
|
|
342
|
+
completed_at: null,
|
|
343
|
+
failure_reason: null
|
|
344
|
+
};
|
|
345
|
+
saveTaskFile(taskFile, cwd);
|
|
346
|
+
|
|
347
|
+
planSpinner.succeed(chalk.green(
|
|
348
|
+
`Planned: ${complexity} complexity, timeout ${tier.label}, model: ${recommendedModel}`
|
|
349
|
+
));
|
|
350
|
+
} else {
|
|
351
|
+
// Fallback when planning fails
|
|
352
|
+
const fallbackSeconds = DEFAULT_TIMEOUT_MS / 1000;
|
|
353
|
+
taskFile = {
|
|
354
|
+
id: task.id,
|
|
355
|
+
title: task.title,
|
|
356
|
+
status: 'planned',
|
|
357
|
+
estimated_time_seconds: fallbackSeconds,
|
|
358
|
+
timeout_seconds: fallbackSeconds,
|
|
359
|
+
plan: ['fallback - no plan available'],
|
|
360
|
+
complexity: 'unknown',
|
|
361
|
+
recommended_model: 'sonnet',
|
|
362
|
+
files_likely_affected: [],
|
|
363
|
+
created_at: new Date().toISOString(),
|
|
364
|
+
completed_at: null,
|
|
365
|
+
failure_reason: null
|
|
366
|
+
};
|
|
367
|
+
saveTaskFile(taskFile, cwd);
|
|
368
|
+
taskTimeoutMs = DEFAULT_TIMEOUT_MS;
|
|
369
|
+
|
|
370
|
+
planSpinner.warn(chalk.yellow(`Planning failed, using default timeout (${formatDuration(fallbackSeconds)})`));
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
// --- Phase 2: Execution ---
|
|
376
|
+
const phaseLabel = noPlan ? '' : 'Phase 2';
|
|
377
|
+
console.log(chalk.blue(` ${noPlan ? 'Executing' : 'Phase 2: Executing'} ${task.id}...`));
|
|
378
|
+
|
|
379
|
+
let retry = 0;
|
|
380
|
+
let rateLimitAttempts = 0;
|
|
381
|
+
let storyDone = false;
|
|
382
|
+
let errorContext = '';
|
|
383
|
+
|
|
384
|
+
while (retry <= MAX_RETRIES && !storyDone) {
|
|
385
|
+
if (retry > 0) {
|
|
386
|
+
console.log(chalk.yellow(` Retry ${retry}/${MAX_RETRIES}`));
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
const prompt = buildPrompt(task.id, cwd, prdPath, promptPath, errorContext);
|
|
390
|
+
if (!prompt) {
|
|
391
|
+
console.log(chalk.red(` Cannot build prompt for ${task.id} — skipping`));
|
|
392
|
+
break;
|
|
393
|
+
}
|
|
394
|
+
const logFile = join(logDir, `${task.id}_${timestamp()}.log`);
|
|
395
|
+
|
|
396
|
+
const estimatedSeconds = taskFile?.estimated_time_seconds || null;
|
|
397
|
+
const executionModel = taskFile?.recommended_model || null;
|
|
398
|
+
const result = await spawnClaude(prompt, task, iteration, maxIterations, currentDone, totalStories, phaseFilter, logFile, cwd, taskTimeoutMs, estimatedSeconds, phaseLabel, executionModel);
|
|
399
|
+
|
|
400
|
+
// Killed by signal (user did kill or Ctrl+C) — don't retry, exit
|
|
401
|
+
if (result.signal) {
|
|
402
|
+
console.log(chalk.yellow(` Killed by ${result.signal}`));
|
|
403
|
+
clearBatchState(cwd);
|
|
404
|
+
printSummary(cwd, iteration, storiesCompleted, batchStart);
|
|
405
|
+
return;
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
// Timeout
|
|
409
|
+
if (result.timedOut) {
|
|
410
|
+
// Timeout can also be caused by rate limit — check output
|
|
411
|
+
if (isRateLimitError(result.output)) {
|
|
412
|
+
rateLimitAttempts++;
|
|
413
|
+
await waitForRateLimit(rateLimitWait, rateLimitAttempts);
|
|
414
|
+
continue;
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
const prevTimeout = Math.round(taskTimeoutMs / 1000);
|
|
418
|
+
console.log(chalk.yellow(` Timed out after ${prevTimeout}s`));
|
|
419
|
+
|
|
420
|
+
// --- Replan on timeout: bump complexity tier ---
|
|
421
|
+
if (taskFile) {
|
|
422
|
+
const prevComplexity = taskFile.complexity;
|
|
423
|
+
const prevEstimate = taskFile.estimated_time_seconds;
|
|
424
|
+
|
|
425
|
+
// Escalate complexity: low → medium → high, high stays high but doubles
|
|
426
|
+
const escalation = { low: 'medium', medium: 'high' };
|
|
427
|
+
const newComplexity = escalation[prevComplexity] || 'high';
|
|
428
|
+
const tier = COMPLEXITY_TIMEOUT[newComplexity] || COMPLEXITY_TIMEOUT.high;
|
|
429
|
+
|
|
430
|
+
// If already high, double the previous timeout
|
|
431
|
+
const newEstimate = prevComplexity === 'high'
|
|
432
|
+
? prevEstimate * 2
|
|
433
|
+
: tier.seconds;
|
|
434
|
+
const newTimeout = Math.ceil(newEstimate * 1.05);
|
|
435
|
+
taskTimeoutMs = newTimeout * 1000;
|
|
436
|
+
|
|
437
|
+
taskFile.estimated_time_seconds = newEstimate;
|
|
438
|
+
taskFile.timeout_seconds = newTimeout;
|
|
439
|
+
taskFile.complexity = newComplexity;
|
|
440
|
+
taskFile.recommended_model = 'opus'; // upgrade model on timeout
|
|
441
|
+
taskFile.replanned_at = new Date().toISOString();
|
|
442
|
+
taskFile.replan_reason = `timeout after ${prevTimeout}s (${prevComplexity} → ${newComplexity})`;
|
|
443
|
+
saveTaskFile(taskFile, cwd);
|
|
444
|
+
|
|
445
|
+
console.log(chalk.blue(
|
|
446
|
+
` Escalated: ${prevComplexity} → ${newComplexity}, timeout ${formatDuration(newTimeout)}, model: opus`
|
|
447
|
+
));
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
errorContext = `Session timed out after ${prevTimeout}s. The task was replanned with more time. Focus on core acceptance criteria first, then iterate.`;
|
|
451
|
+
retry++;
|
|
452
|
+
continue;
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
// Non-zero exit (not from signal) — check if it's a rate limit
|
|
456
|
+
// Rate limit detection only on FAILURES. Successful output (exit 0) may
|
|
457
|
+
// mention "rate limit" as a feature description, not an actual API error.
|
|
458
|
+
if (result.exitCode !== 0) {
|
|
459
|
+
if (isRateLimitError(result.output)) {
|
|
460
|
+
rateLimitAttempts++;
|
|
461
|
+
await waitForRateLimit(rateLimitWait, rateLimitAttempts);
|
|
462
|
+
continue; // no retry increment — wait and try again
|
|
463
|
+
}
|
|
464
|
+
console.log(chalk.yellow(` Claude exited with code ${result.exitCode}`));
|
|
465
|
+
errorContext = `Claude session failed with exit code ${result.exitCode}.`;
|
|
466
|
+
|
|
467
|
+
// Save error output snippet to task file for debugging
|
|
468
|
+
if (taskFile) {
|
|
469
|
+
taskFile.last_error_output = (result.output || '').slice(-2000);
|
|
470
|
+
taskFile.last_failure_type = 'crash';
|
|
471
|
+
saveTaskFile(taskFile, cwd);
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
retry++;
|
|
475
|
+
continue;
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
// Quality gates
|
|
479
|
+
const gatesResult = await runGatesWithStatus(cwd);
|
|
480
|
+
|
|
481
|
+
if (gatesResult.success) {
|
|
482
|
+
console.log(chalk.green(` Quality gates PASSED`));
|
|
483
|
+
markStoryDone(task.id, cwd);
|
|
484
|
+
appendProgress(progressPath, task.id, 'All quality gates passed');
|
|
485
|
+
|
|
486
|
+
const config = loadConfig(cwd);
|
|
487
|
+
if (config.auto_commit) {
|
|
488
|
+
await commitTask(task, {}, cwd);
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
storyDone = true;
|
|
492
|
+
storiesCompleted++;
|
|
493
|
+
noProgressCount = 0;
|
|
494
|
+
|
|
495
|
+
// Update task file status
|
|
496
|
+
if (taskFile) {
|
|
497
|
+
taskFile.status = 'completed';
|
|
498
|
+
taskFile.completed_at = new Date().toISOString();
|
|
499
|
+
saveTaskFile(taskFile, cwd);
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
updateBatchState({
|
|
503
|
+
completed: [task.id],
|
|
504
|
+
current_task: null,
|
|
505
|
+
last_completed_at: new Date().toISOString()
|
|
506
|
+
}, cwd);
|
|
507
|
+
} else {
|
|
508
|
+
console.log(chalk.red(` Quality gates FAILED`));
|
|
509
|
+
errorContext = gatesResult.errorOutput;
|
|
510
|
+
|
|
511
|
+
// Save gate failure details to task file
|
|
512
|
+
if (taskFile) {
|
|
513
|
+
taskFile.last_error_output = (gatesResult.errorOutput || '').slice(-2000);
|
|
514
|
+
taskFile.last_failure_type = 'quality_gate';
|
|
515
|
+
saveTaskFile(taskFile, cwd);
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
retry++;
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
if (!storyDone) {
|
|
523
|
+
const failureType = taskFile?.last_failure_type || 'unknown';
|
|
524
|
+
console.log(chalk.red(` FAILED: ${task.id} after ${MAX_RETRIES} retries (${failureType})`));
|
|
525
|
+
appendProgress(progressPath, task.id, `FAILED after ${MAX_RETRIES} retries (${failureType}). Skipping.`);
|
|
526
|
+
|
|
527
|
+
// Update task file with failure
|
|
528
|
+
if (taskFile) {
|
|
529
|
+
taskFile.status = 'failed';
|
|
530
|
+
taskFile.failure_reason = errorContext || `Failed after ${MAX_RETRIES} retries`;
|
|
531
|
+
taskFile.completed_at = new Date().toISOString();
|
|
532
|
+
saveTaskFile(taskFile, cwd);
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
noProgressCount++;
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
if (noProgressCount >= MAX_NO_PROGRESS) {
|
|
539
|
+
console.log(chalk.red(`\nCIRCUIT BREAKER: ${MAX_NO_PROGRESS} consecutive failures. Stopping.`));
|
|
540
|
+
break;
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
clearBatchState(cwd);
|
|
545
|
+
printSummary(cwd, iteration, storiesCompleted, batchStart);
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
/**
|
|
549
|
+
* Spawn claude with piped stdio + spinner status line.
|
|
550
|
+
* Uses temp file for prompt to avoid shell escaping issues.
|
|
551
|
+
* Ctrl+C works because: no custom SIGINT handler + pipe mode (child not in foreground).
|
|
552
|
+
*/
|
|
553
|
+
function spawnClaude(prompt, task, iteration, maxIter, done, total, phase, logFile, cwd, timeoutMs = DEFAULT_TIMEOUT_MS, estimatedSeconds = null, phaseTag = '', model = null) {
|
|
554
|
+
return new Promise((resolve) => {
|
|
555
|
+
let bytesReceived = 0;
|
|
556
|
+
let output = '';
|
|
557
|
+
const logStream = createWriteStream(logFile);
|
|
558
|
+
const startTime = Date.now();
|
|
559
|
+
|
|
560
|
+
const env = { ...process.env };
|
|
561
|
+
// Remove all Claude session markers to avoid "nested session" detection
|
|
562
|
+
Object.keys(env).forEach(k => { if (k.startsWith('CLAUDE')) delete env[k]; });
|
|
563
|
+
|
|
564
|
+
const modelFlag = model ? `--model ${model} ` : '';
|
|
565
|
+
const child = spawn('bash', [
|
|
566
|
+
'-c',
|
|
567
|
+
`claude ${modelFlag}--print --dangerously-skip-permissions -p "$(cat)"`,
|
|
568
|
+
], {
|
|
569
|
+
cwd,
|
|
570
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
571
|
+
env,
|
|
572
|
+
});
|
|
573
|
+
|
|
574
|
+
// Pass prompt via stdin to avoid shell escaping issues and file race conditions
|
|
575
|
+
child.stdin.write(prompt);
|
|
576
|
+
child.stdin.end();
|
|
577
|
+
|
|
578
|
+
const modelLabel = model ? chalk.magenta(model) : chalk.dim('default');
|
|
579
|
+
console.log(chalk.dim(` Log: tail -f ${logFile}`));
|
|
580
|
+
console.log(chalk.dim(` PID: ${child.pid} | Model: `) + modelLabel + chalk.dim(` | Prompt: ${prompt.length} chars | Timeout: ${formatDuration(Math.round(timeoutMs / 1000))}`));
|
|
581
|
+
|
|
582
|
+
// Spinner via \r — no ora, no SIGINT hijack, no hidden cursor
|
|
583
|
+
let frame = 0;
|
|
584
|
+
const timer = setInterval(() => {
|
|
585
|
+
const spinner = chalk.cyan(SPINNER_FRAMES[frame++ % SPINNER_FRAMES.length]);
|
|
586
|
+
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
|
587
|
+
const bytesLabel = bytesReceived === 0
|
|
588
|
+
? chalk.dim('waiting (~2min)...')
|
|
589
|
+
: chalk.green(formatBytes(bytesReceived));
|
|
590
|
+
const phaseLabel = phase ? chalk.dim(` | ${phase}`) : '';
|
|
591
|
+
const estLabel = estimatedSeconds
|
|
592
|
+
? ` / ${chalk.dim('~' + formatDuration(estimatedSeconds))}`
|
|
593
|
+
: '';
|
|
594
|
+
const phaseTagLabel = phaseTag ? chalk.dim(` | ${phaseTag}`) : '';
|
|
595
|
+
const modelIndicator = model ? chalk.dim(` | ${model}`) : '';
|
|
596
|
+
const line = ` ${spinner} ${chalk.cyan(`[${done + 1}/${total}]`)} ${task.id}: Executing | Iter ${iteration}/${maxIter} | ${chalk.yellow(formatDuration(elapsed))}${estLabel} | ${bytesLabel}${phaseLabel}${phaseTagLabel}${modelIndicator}`;
|
|
597
|
+
process.stdout.write(`\r\x1b[K${line}`);
|
|
598
|
+
}, 80);
|
|
599
|
+
|
|
600
|
+
const timeoutId = setTimeout(() => {
|
|
601
|
+
child.kill('SIGTERM');
|
|
602
|
+
}, timeoutMs);
|
|
603
|
+
|
|
604
|
+
child.stdout.on('data', (data) => {
|
|
605
|
+
bytesReceived += data.length;
|
|
606
|
+
output += data.toString();
|
|
607
|
+
logStream.write(data);
|
|
608
|
+
});
|
|
609
|
+
child.stderr.on('data', (data) => {
|
|
610
|
+
bytesReceived += data.length;
|
|
611
|
+
output += data.toString();
|
|
612
|
+
logStream.write(data);
|
|
613
|
+
});
|
|
614
|
+
|
|
615
|
+
child.on('close', (code, signal) => {
|
|
616
|
+
clearTimeout(timeoutId);
|
|
617
|
+
clearInterval(timer);
|
|
618
|
+
logStream.end();
|
|
619
|
+
|
|
620
|
+
// Clear spinner line
|
|
621
|
+
process.stdout.write('\r\x1b[K');
|
|
622
|
+
|
|
623
|
+
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
|
624
|
+
// Timeout detection: SIGTERM signal, OR exit code 143 (128+SIGTERM) near the timeout boundary
|
|
625
|
+
const nearTimeout = elapsed >= Math.floor(timeoutMs / 1000) - 2;
|
|
626
|
+
const timedOut = nearTimeout && (signal === 'SIGTERM' || code === 143);
|
|
627
|
+
|
|
628
|
+
if (signal && !timedOut) {
|
|
629
|
+
console.log(chalk.dim(` Claude killed (${signal}) after ${formatDuration(elapsed)}`));
|
|
630
|
+
resolve({ exitCode: code, output, timedOut: false, signal });
|
|
631
|
+
return;
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
console.log(chalk.dim(` Claude finished in ${formatDuration(elapsed)} | ${formatBytes(bytesReceived)}`));
|
|
635
|
+
resolve({ exitCode: code ?? 1, output, timedOut, signal: null });
|
|
636
|
+
});
|
|
637
|
+
|
|
638
|
+
child.on('error', (err) => {
|
|
639
|
+
clearTimeout(timeoutId);
|
|
640
|
+
clearInterval(timer);
|
|
641
|
+
logStream.end();
|
|
642
|
+
process.stdout.write('\r\x1b[K');
|
|
643
|
+
console.log(chalk.red(` Spawn error: ${err.message}`));
|
|
644
|
+
resolve({ exitCode: 1, output: '', timedOut: false, signal: null });
|
|
645
|
+
});
|
|
646
|
+
});
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
/**
|
|
650
|
+
* Run quality gates with real-time PASS/FAIL display.
|
|
651
|
+
*/
|
|
652
|
+
async function runGatesWithStatus(cwd) {
|
|
653
|
+
const config = loadConfig(cwd);
|
|
654
|
+
const gates = config.quality_gates || [];
|
|
655
|
+
const skipGates = config.skip_gates || [];
|
|
656
|
+
let allPassed = true;
|
|
657
|
+
let errorOutput = '';
|
|
658
|
+
|
|
659
|
+
for (const gate of gates) {
|
|
660
|
+
if (skipGates.includes(gate)) {
|
|
661
|
+
console.log(chalk.dim(` ${gate}: SKIP`));
|
|
662
|
+
continue;
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
const spinner = ora({ text: `Running ${gate}...`, indent: 2 }).start();
|
|
666
|
+
const result = await runQualityGate(gate, cwd);
|
|
667
|
+
|
|
668
|
+
if (result.success || result.skipped) {
|
|
669
|
+
spinner.succeed(chalk.green(`${gate}: PASS`));
|
|
670
|
+
} else {
|
|
671
|
+
spinner.fail(chalk.red(`${gate}: FAIL`));
|
|
672
|
+
allPassed = false;
|
|
673
|
+
errorOutput += `=== ${gate} ===\n${result.error || result.stderr || ''}\n\n`;
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
return { success: allPassed, errorOutput };
|
|
678
|
+
}
|
|
679
|
+
|
|
680
|
+
/**
|
|
681
|
+
* Build the prompt from template, substituting placeholders.
|
|
682
|
+
*/
|
|
683
|
+
function buildPrompt(storyId, cwd, prdPath, promptPath, extraContext) {
|
|
684
|
+
const prd = JSON.parse(readFileSync(prdPath, 'utf8'));
|
|
685
|
+
const story = prd.stories.find(s => s.id === storyId);
|
|
686
|
+
if (!story) {
|
|
687
|
+
console.log(chalk.yellow(` Warning: story ${storyId} not found in prd.json — may have been modified during execution`));
|
|
688
|
+
return '';
|
|
689
|
+
}
|
|
690
|
+
|
|
691
|
+
const doneIds = prd.stories.filter(s => s.passes).map(s => s.id);
|
|
692
|
+
const acList = (story.acceptance_criteria || []).map(ac => `- ${ac}`).join('\n') || 'None';
|
|
693
|
+
|
|
694
|
+
let depsList = 'None';
|
|
695
|
+
if (story.dependencies?.length > 0) {
|
|
696
|
+
depsList = story.dependencies
|
|
697
|
+
.filter(dep => doneIds.includes(dep))
|
|
698
|
+
.map(dep => {
|
|
699
|
+
const depStory = prd.stories.find(s => s.id === dep);
|
|
700
|
+
return `- ${dep}: ${depStory?.title || 'unknown'} (done)`;
|
|
701
|
+
})
|
|
702
|
+
.join('\n') || 'None';
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
let prompt = readFileSync(promptPath, 'utf8');
|
|
706
|
+
prompt = prompt.replaceAll('{STORY_ID}', storyId);
|
|
707
|
+
prompt = prompt.replaceAll('{STORY_TITLE}', story.title || '');
|
|
708
|
+
prompt = prompt.replaceAll('{ACCEPTANCE_CRITERIA}', acList);
|
|
709
|
+
prompt = prompt.replaceAll('{NOTES}', story.notes || '');
|
|
710
|
+
prompt = prompt.replaceAll('{DEPENDENCIES}', depsList);
|
|
711
|
+
|
|
712
|
+
if (extraContext) {
|
|
713
|
+
prompt += `\n\n## Previous Attempt Failed\nThe previous attempt failed quality gates. Here is the error output:\n\n\`\`\`\n${extraContext}\n\`\`\`\n\nFix these issues before committing.`;
|
|
714
|
+
}
|
|
715
|
+
|
|
716
|
+
return prompt;
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
/**
|
|
720
|
+
* Dry run: show formatted preview of eligible tasks.
|
|
721
|
+
*/
|
|
722
|
+
function dryRunPreview(cwd, phaseFilter, maxIterations, taskFilter = null) {
|
|
723
|
+
const stories = parsePrdTasks(cwd);
|
|
724
|
+
const total = stories.length;
|
|
725
|
+
const done = stories.filter(s => s.passes).length;
|
|
726
|
+
const remaining = total - done;
|
|
727
|
+
|
|
728
|
+
console.log(chalk.bold('\nDry Run Preview\n'));
|
|
729
|
+
console.log(` Progress: ${chalk.green(done)}/${total} done, ${chalk.yellow(remaining)} remaining`);
|
|
730
|
+
console.log(` Max iterations: ${maxIterations}`);
|
|
731
|
+
if (phaseFilter) console.log(` Phase filter: ${chalk.blue(phaseFilter)}`);
|
|
732
|
+
if (taskFilter) console.log(` Task filter: ${chalk.blue(taskFilter)}`);
|
|
733
|
+
|
|
734
|
+
const doneIds = stories.filter(s => s.passes).map(s => s.id);
|
|
735
|
+
const filterRe = taskFilter ? new RegExp(taskFilter, 'i') : null;
|
|
736
|
+
const eligible = stories.filter(s =>
|
|
737
|
+
!s.passes &&
|
|
738
|
+
s.dependencies.every(dep => doneIds.includes(dep)) &&
|
|
739
|
+
(!phaseFilter || s.phase === phaseFilter) &&
|
|
740
|
+
(!filterRe || filterRe.test(s.id) || filterRe.test(s.title))
|
|
741
|
+
);
|
|
742
|
+
|
|
743
|
+
if (eligible.length === 0) {
|
|
744
|
+
console.log(chalk.yellow('\n No eligible stories found.'));
|
|
745
|
+
return;
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
console.log(chalk.bold('\n Eligible stories:\n'));
|
|
749
|
+
for (let i = 0; i < Math.min(eligible.length, maxIterations); i++) {
|
|
750
|
+
const s = eligible[i];
|
|
751
|
+
const phaseLabel = s.phase ? chalk.dim(` (${s.phase})`) : '';
|
|
752
|
+
const effort = s.effort ? chalk.dim(` [${s.effort}]`) : '';
|
|
753
|
+
|
|
754
|
+
// Show planning info if task file exists
|
|
755
|
+
const existing = loadTaskFile(s.id, cwd);
|
|
756
|
+
const planInfo = existing
|
|
757
|
+
? chalk.dim(` — ${existing.complexity}, ~${formatDuration(existing.estimated_time_seconds)}, model: ${existing.recommended_model || 'sonnet'}`)
|
|
758
|
+
: '';
|
|
759
|
+
|
|
760
|
+
console.log(` ${chalk.cyan(`${i + 1}.`)} ${chalk.bold(s.id)}: ${s.title}${phaseLabel}${effort}${planInfo}`);
|
|
761
|
+
}
|
|
762
|
+
|
|
763
|
+
const config = loadConfig(cwd);
|
|
764
|
+
console.log(chalk.bold('\n Quality gates:'));
|
|
765
|
+
for (const gate of config.quality_gates) {
|
|
766
|
+
if (config.skip_gates.includes(gate)) {
|
|
767
|
+
console.log(chalk.dim(` ${gate} (skip)`));
|
|
768
|
+
} else {
|
|
769
|
+
console.log(` ${chalk.green('>')} ${gate}`);
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
console.log(chalk.dim('\n Run without --dry-run to execute.\n'));
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
function printHeader(maxIter, phase, total, done, noPlan = false, cwd = process.cwd(), taskFilter = null) {
|
|
777
|
+
const config = loadConfig(cwd);
|
|
778
|
+
const skipGates = config.skip_gates || [];
|
|
779
|
+
|
|
780
|
+
console.log('');
|
|
781
|
+
console.log(chalk.bold('PWN Batch Runner') + chalk.dim(` (v${RUNNER_VERSION})`));
|
|
782
|
+
console.log(chalk.dim('─'.repeat(40)));
|
|
783
|
+
console.log(` Max iterations: ${chalk.cyan(maxIter)}`);
|
|
784
|
+
console.log(` Progress: ${chalk.green(done)}/${total} done`);
|
|
785
|
+
console.log(` Planning: ${noPlan ? chalk.yellow('disabled') : chalk.green('enabled')}`);
|
|
786
|
+
if (phase) console.log(` Phase filter: ${chalk.blue(phase)}`);
|
|
787
|
+
if (taskFilter) console.log(` Task filter: ${chalk.blue(taskFilter)}`);
|
|
788
|
+
if (skipGates.length > 0) {
|
|
789
|
+
console.log(` ${chalk.yellow('⚠️ Skipping gates (no tooling):')} ${skipGates.join(', ')}`);
|
|
790
|
+
}
|
|
791
|
+
console.log(chalk.dim('─'.repeat(40)));
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
function printSummary(cwd, iterations, completed, startTime) {
|
|
795
|
+
const stories = parsePrdTasks(cwd);
|
|
796
|
+
const total = stories.length;
|
|
797
|
+
const done = stories.filter(s => s.passes).length;
|
|
798
|
+
const remaining = total - done;
|
|
799
|
+
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
|
800
|
+
|
|
801
|
+
// Cleanup: delete completed task files, keep failed
|
|
802
|
+
let cleaned = 0;
|
|
803
|
+
let failedKept = 0;
|
|
804
|
+
const tasksDir = getTasksDir(cwd);
|
|
805
|
+
if (existsSync(tasksDir)) {
|
|
806
|
+
const files = readdirSync(tasksDir).filter(f => f.endsWith('.json'));
|
|
807
|
+
for (const file of files) {
|
|
808
|
+
try {
|
|
809
|
+
const data = JSON.parse(readFileSync(join(tasksDir, file), 'utf8'));
|
|
810
|
+
if (data.status === 'completed') {
|
|
811
|
+
unlinkSync(join(tasksDir, file));
|
|
812
|
+
cleaned++;
|
|
813
|
+
} else if (data.status === 'failed') {
|
|
814
|
+
failedKept++;
|
|
815
|
+
}
|
|
816
|
+
} catch {}
|
|
817
|
+
}
|
|
818
|
+
}
|
|
819
|
+
|
|
820
|
+
console.log('');
|
|
821
|
+
console.log(chalk.bold('Batch Runner Complete'));
|
|
822
|
+
console.log(chalk.dim('─'.repeat(40)));
|
|
823
|
+
console.log(` Iterations: ${iterations}`);
|
|
824
|
+
console.log(` Completed: ${chalk.green(completed)}`);
|
|
825
|
+
console.log(` Total progress: ${chalk.green(done)}/${total} done, ${chalk.yellow(remaining)} remaining`);
|
|
826
|
+
console.log(` Duration: ${formatDuration(elapsed)}`);
|
|
827
|
+
console.log(` Logs: logs/`);
|
|
828
|
+
if (cleaned > 0 || failedKept > 0) {
|
|
829
|
+
console.log(` Cleanup: ${chalk.green(`${cleaned} completed`)} removed, ${failedKept > 0 ? chalk.red(`${failedKept} failed`) : '0 failed'} kept for review`);
|
|
830
|
+
}
|
|
831
|
+
console.log(chalk.dim('─'.repeat(40)));
|
|
832
|
+
console.log('');
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
// --- Utilities ---
|
|
836
|
+
|
|
837
|
+
function formatBytes(bytes) {
|
|
838
|
+
if (bytes < 1024) return `${bytes} B`;
|
|
839
|
+
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`;
|
|
840
|
+
return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
function formatDuration(seconds) {
|
|
844
|
+
const m = Math.floor(seconds / 60);
|
|
845
|
+
const s = seconds % 60;
|
|
846
|
+
return m > 0 ? `${m}m ${String(s).padStart(2, '0')}s` : `${s}s`;
|
|
847
|
+
}
|
|
848
|
+
|
|
849
|
+
function timestamp() {
|
|
850
|
+
return new Date().toISOString().replace(/[:.]/g, '').slice(0, 15);
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
function sleep(ms) {
|
|
854
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
855
|
+
}
|
|
856
|
+
|
|
857
|
+
function appendProgress(progressPath, storyId, notes) {
|
|
858
|
+
const line = `\n=== ${storyId} completed at ${new Date().toISOString()} ===\n- ${notes}\n`;
|
|
859
|
+
appendFileSync(progressPath, line);
|
|
860
|
+
}
|