mstro-app 0.3.9 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/LICENSE +191 -21
  2. package/PRIVACY.md +286 -62
  3. package/README.md +81 -58
  4. package/bin/commands/status.js +1 -1
  5. package/dist/server/cli/headless/claude-invoker.d.ts.map +1 -1
  6. package/dist/server/cli/headless/claude-invoker.js +4 -3
  7. package/dist/server/cli/headless/claude-invoker.js.map +1 -1
  8. package/dist/server/cli/headless/stall-assessor.d.ts.map +1 -1
  9. package/dist/server/cli/headless/stall-assessor.js +30 -5
  10. package/dist/server/cli/headless/stall-assessor.js.map +1 -1
  11. package/dist/server/cli/improvisation-session-manager.js +2 -2
  12. package/dist/server/cli/improvisation-session-manager.js.map +1 -1
  13. package/dist/server/services/plan/dependency-resolver.d.ts.map +1 -1
  14. package/dist/server/services/plan/dependency-resolver.js +2 -0
  15. package/dist/server/services/plan/dependency-resolver.js.map +1 -1
  16. package/dist/server/services/plan/executor.d.ts +27 -8
  17. package/dist/server/services/plan/executor.d.ts.map +1 -1
  18. package/dist/server/services/plan/executor.js +176 -80
  19. package/dist/server/services/plan/executor.js.map +1 -1
  20. package/dist/server/services/plan/parser.d.ts.map +1 -1
  21. package/dist/server/services/plan/parser.js +39 -9
  22. package/dist/server/services/plan/parser.js.map +1 -1
  23. package/dist/server/services/plan/state-reconciler.d.ts.map +1 -1
  24. package/dist/server/services/plan/state-reconciler.js +41 -1
  25. package/dist/server/services/plan/state-reconciler.js.map +1 -1
  26. package/dist/server/services/plan/types.d.ts +1 -0
  27. package/dist/server/services/plan/types.d.ts.map +1 -1
  28. package/dist/server/services/websocket/quality-handlers.js +14 -6
  29. package/dist/server/services/websocket/quality-handlers.js.map +1 -1
  30. package/dist/server/services/websocket/quality-service.d.ts +10 -0
  31. package/dist/server/services/websocket/quality-service.d.ts.map +1 -1
  32. package/dist/server/services/websocket/quality-service.js +105 -11
  33. package/dist/server/services/websocket/quality-service.js.map +1 -1
  34. package/package.json +2 -2
  35. package/server/cli/headless/claude-invoker.ts +4 -3
  36. package/server/cli/headless/stall-assessor.ts +34 -5
  37. package/server/cli/improvisation-session-manager.ts +2 -2
  38. package/server/services/plan/dependency-resolver.ts +3 -0
  39. package/server/services/plan/executor.ts +176 -80
  40. package/server/services/plan/parser.ts +41 -9
  41. package/server/services/plan/state-reconciler.ts +44 -2
  42. package/server/services/plan/types.ts +2 -0
  43. package/server/services/websocket/quality-handlers.ts +15 -7
  44. package/server/services/websocket/quality-service.ts +123 -11
@@ -49,6 +49,35 @@ export interface StallVerdict {
49
49
  reason: string;
50
50
  }
51
51
 
52
+ /** Check if Task/Agent subagents are currently pending (producing expected silence) */
53
+ function hasSubagentPending(pendingNames: Set<string>, lastToolName: string | undefined, hasPendingTools: boolean): boolean {
54
+ return pendingNames.has('Task') || pendingNames.has('Agent')
55
+ || ((lastToolName === 'Task' || lastToolName === 'Agent') && hasPendingTools);
56
+ }
57
+
58
+ /**
59
+ * Check if an Agent Teams lead is idle-waiting for teammate notifications.
60
+ * After spawning teammates (Agent tool calls complete), the lead has no pending
61
+ * tools but is legitimately waiting for teammate idle events.
62
+ */
63
+ function checkAgentTeamsWaiting(ctx: StallContext, hasPendingTools: boolean): StallVerdict | null {
64
+ // The lead may use any tool while waiting (Glob to verify outputs, Bash to
65
+ // check disk, ToolSearch, etc.), so don't gate on lastToolName. The key
66
+ // signal is: prompt contains team_name, tools were called, nothing pending.
67
+ if (
68
+ !hasPendingTools &&
69
+ ctx.totalToolCalls > 0 &&
70
+ ctx.originalPrompt.includes('team_name')
71
+ ) {
72
+ return {
73
+ action: 'extend',
74
+ extensionMs: 30 * 60_000,
75
+ reason: 'Agent Teams lead waiting for teammate idle notifications — extending 30 min',
76
+ };
77
+ }
78
+ return null;
79
+ }
80
+
52
81
  /**
53
82
  * Fast heuristic for known long-running patterns.
54
83
  * Returns a verdict immediately if the pattern is recognized, null otherwise.
@@ -87,11 +116,7 @@ function quickHeuristic(ctx: StallContext, toolWatchdogActive = false): StallVer
87
116
 
88
117
  // Task/subagent launches are known to produce long silence periods.
89
118
  // The parent Claude process emits nothing while waiting for subagent results.
90
- // Check pendingToolNames (reliable) first, fall back to lastToolName (legacy).
91
- // Claude Code renamed Task → Agent; check both for backward compatibility
92
- const hasTaskPending = pendingNames.has('Task') || pendingNames.has('Agent')
93
- || ((ctx.lastToolName === 'Task' || ctx.lastToolName === 'Agent') && hasPendingTools);
94
- if (hasTaskPending) {
119
+ if (hasSubagentPending(pendingNames, ctx.lastToolName, hasPendingTools)) {
95
120
  const extensionMin = Math.min(30, 10 + ctx.pendingToolCount * 5);
96
121
  return {
97
122
  action: 'extend',
@@ -100,6 +125,10 @@ function quickHeuristic(ctx: StallContext, toolWatchdogActive = false): StallVer
100
125
  };
101
126
  }
102
127
 
128
+ // Agent Teams lead waiting for teammate idle notifications (extracted for complexity)
129
+ const agentTeamsVerdict = checkAgentTeamsWaiting(ctx, hasPendingTools);
130
+ if (agentTeamsVerdict) return agentTeamsVerdict;
131
+
103
132
  // Multiple parallel tool calls (e.g., parallel Bash, parallel Read/Grep)
104
133
  if (ctx.pendingToolCount >= 3) {
105
134
  return {
@@ -1089,7 +1089,7 @@ export class ImprovisationSessionManager extends EventEmitter {
1089
1089
  isMaxTokens: boolean,
1090
1090
  ): void {
1091
1091
  state.retryNumber++;
1092
- const reason = isMaxTokens ? 'max_tokens hit' : 'incomplete end_turn (Haiku assessment)';
1092
+ const reason = isMaxTokens ? 'Output limit reached' : 'Task appears unfinished (AI assessment)';
1093
1093
 
1094
1094
  state.retryLog.push({
1095
1095
  retryNumber: state.retryNumber,
@@ -1113,7 +1113,7 @@ export class ImprovisationSessionManager extends EventEmitter {
1113
1113
  });
1114
1114
 
1115
1115
  this.queueOutput(
1116
- `\n[[MSTRO_AUTO_CONTINUE]] Task incomplete (${reason}) — resuming session (retry ${state.retryNumber}/${maxRetries}).\n`
1116
+ `\n[[MSTRO_AUTO_CONTINUE]] ${reason} — resuming session (retry ${state.retryNumber}/${maxRetries}).\n`
1117
1117
  );
1118
1118
  this.flushOutputQueue();
1119
1119
 
@@ -158,6 +158,9 @@ export function computeCriticalPath(issues: Issue[]): Issue[] {
158
158
  return [];
159
159
  }
160
160
 
161
+ // Set sentinel before recursing to break cycles
162
+ longestFrom.set(path, [issue]);
163
+
161
164
  let best: Issue[] = [];
162
165
  for (const dep of issue.blocks) {
163
166
  const sub = getLongest(dep);
@@ -10,8 +10,8 @@
10
10
  */
11
11
 
12
12
  import { EventEmitter } from 'node:events';
13
- import { existsSync, mkdirSync, readdirSync, readFileSync, unlinkSync, writeFileSync } from 'node:fs';
14
- import { join } from 'node:path';
13
+ import { copyFileSync, existsSync, mkdirSync, readdirSync, readFileSync, unlinkSync, writeFileSync } from 'node:fs';
14
+ import { join, resolve } from 'node:path';
15
15
  import { runWithFileLogger } from '../../cli/headless/headless-logger.js';
16
16
  import { HeadlessRunner } from '../../cli/headless/index.js';
17
17
  import { generateMcpConfig } from '../../cli/headless/mcp-config.js';
@@ -121,11 +121,19 @@ export class PlanExecutor extends EventEmitter {
121
121
  // ── Wave execution (Agent Teams) ──────────────────────────────
122
122
 
123
123
  private async executeWave(issues: Issue[]): Promise<void> {
124
+ const waveStart = Date.now();
124
125
  const waveIds = issues.map(i => i.id);
125
126
  this.metrics.currentWaveIds = waveIds;
126
127
  this.metrics.issuesAttempted += issues.length;
127
128
  this.emit('waveStarted', { issueIds: waveIds });
128
129
 
130
+ // Ensure .pm/out/ exists for execution output
131
+ const pmDir = resolvePmDir(this.workingDir);
132
+ if (pmDir) {
133
+ const outDir = join(pmDir, 'out');
134
+ if (!existsSync(outDir)) mkdirSync(outDir, { recursive: true });
135
+ }
136
+
129
137
  // Pre-approve tools so teammates don't hit interactive permission prompts
130
138
  this.installTeammatePermissions();
131
139
 
@@ -171,22 +179,30 @@ export class PlanExecutor extends EventEmitter {
171
179
  error: error instanceof Error ? error.message : String(error),
172
180
  });
173
181
  this.revertIncompleteIssues(issues);
182
+ } finally {
183
+ // Clean up temporary configs — must run even if wave throws
184
+ this.uninstallBouncerForSubagents();
185
+ this.uninstallTeammatePermissions();
174
186
  }
175
187
 
176
- // Clean up temporary configs
177
- this.uninstallBouncerForSubagents();
178
- this.uninstallTeammatePermissions();
179
-
180
- // Reconcile STATE.md after wave
188
+ // Reconcile STATE.md and sprint statuses after wave
181
189
  reconcileState(this.workingDir);
182
190
  this.emit('stateUpdated');
191
+
192
+ // Copy confirmed-done outputs to user-specified output_file paths
193
+ this.publishOutputs(issues);
194
+
195
+ // Append progress log entry
196
+ this.appendProgressEntry(issues, waveStart);
197
+
183
198
  this.metrics.currentWaveIds = [];
184
199
  }
185
200
 
186
201
  /**
187
202
  * After a wave, check each issue's status on disk.
188
- * The coordinator agent is instructed to mark issues as done via front matter,
189
- * so we trust the disk state and update metrics accordingly.
203
+ * `status: done` in issue front matter is the single completion signal.
204
+ * Output doc existence is NOT used as a proxy — code-focused issues
205
+ * (bug fixes, refactors) don't produce docs but are still valid completions.
190
206
  */
191
207
  private reconcileWaveResults(issues: Issue[]): void {
192
208
  const pmDir = resolvePmDir(this.workingDir);
@@ -202,49 +218,20 @@ export class PlanExecutor extends EventEmitter {
202
218
  if (currentStatus === 'done') {
203
219
  this.metrics.issuesCompleted++;
204
220
  this.emit('issueCompleted', issue);
205
- } else if (currentStatus === 'in_progress') {
206
- // Agent didn't finish check if output doc exists (partial completion)
207
- const outputDoc = this.findOutputDoc(issue.id);
208
- if (outputDoc) {
209
- // Output was written but status not updated — mark done
210
- this.updateIssueFrontMatter(issue.path, 'done');
211
- this.metrics.issuesCompleted++;
212
- this.emit('issueCompleted', issue);
213
- } else {
214
- // Genuinely incomplete — revert to prior status
215
- this.updateIssueFrontMatter(issue.path, issue.status);
216
- this.emit('issueError', {
217
- issueId: issue.id,
218
- error: 'Issue did not complete during wave execution',
219
- });
220
- }
221
+ } else {
222
+ // Not donerevert to prior status
223
+ this.updateIssueFrontMatter(issue.path, issue.status);
224
+ this.emit('issueError', {
225
+ issueId: issue.id,
226
+ error: 'Issue did not complete during wave execution',
227
+ });
221
228
  }
222
229
  } catch {
223
- // File read error — treat as incomplete
224
230
  this.emit('issueError', { issueId: issue.id, error: 'Could not read issue file after wave' });
225
231
  }
226
232
  }
227
233
  }
228
234
 
229
- /**
230
- * Look for an output document matching an issue ID in .pm/docs/.
231
- */
232
- private findOutputDoc(issueId: string): string | null {
233
- const pmDir = resolvePmDir(this.workingDir);
234
- if (!pmDir) return null;
235
- const docsDir = join(pmDir, 'docs');
236
- if (!existsSync(docsDir)) return null;
237
-
238
- try {
239
- const files = readdirSync(docsDir);
240
- const prefix = issueId.toLowerCase();
241
- const match = files.find(f => f.toLowerCase().startsWith(prefix) && f.endsWith('.md'));
242
- return match ? join(docsDir, match) : null;
243
- } catch {
244
- return null;
245
- }
246
- }
247
-
248
235
  // ── Issue picking ─────────────────────────────────────────────
249
236
 
250
237
  private pickReadyIssues(): Issue[] {
@@ -268,12 +255,13 @@ export class PlanExecutor extends EventEmitter {
268
255
 
269
256
  /**
270
257
  * Build the team lead prompt for a wave of issues.
271
- * Uses Agent Teams (TeamCreate/SendMessage) for true parallel execution
272
- * as separate processes — each teammate gets its own context window.
258
+ * Uses Agent Teams for true parallel execution as separate processes —
259
+ * each teammate gets its own context window and sends idle notifications
260
+ * when done. The team is created implicitly by the first Agent(team_name=...) call.
273
261
  */
274
262
  private buildCoordinatorPrompt(issues: Issue[]): string {
275
263
  const pmDir = resolvePmDir(this.workingDir);
276
- const docsDir = pmDir ? join(pmDir, 'docs') : '.pm/docs';
264
+ const outDir = pmDir ? join(pmDir, 'out') : join(this.workingDir, '.pm', 'out');
277
265
 
278
266
  // Collect existing output docs that issues may need as input
279
267
  const existingDocs = this.listExistingDocs();
@@ -313,10 +301,9 @@ ${criteria || 'No specific criteria defined.'}
313
301
  ${issue.technicalNotes || 'None'}
314
302
  ${files}${predecessorSection}
315
303
 
316
- **Output file**: ${docsDir}/${issue.id}-${this.slugify(issue.title)}.md`;
304
+ **Output file**: ${this.resolveOutputPath(issue)}`;
317
305
  }).join('\n\n---\n\n');
318
306
 
319
- const teammateNames = issues.map(i => i.id.toLowerCase()).join(', ');
320
307
  const teamName = `pm-wave-${Date.now()}`;
321
308
 
322
309
  const teammateSpawns = issues.map(issue => {
@@ -331,7 +318,7 @@ ${files}${predecessorSection}
331
318
  ? `Read these predecessor output docs before starting: ${predecessorDocs.join(', ')}. `
332
319
  : '';
333
320
 
334
- const outputFile = `${docsDir}/${issue.id}-${this.slugify(issue.title)}.md`;
321
+ const outputFile = this.resolveOutputPath(issue);
335
322
 
336
323
  return `Spawn teammate **${issue.id.toLowerCase()}** using the **Agent** tool with \`team_name: "${teamName}"\` and \`name: "${issue.id.toLowerCase()}"\`:
337
324
  > ${predInstr}Work on issue ${issue.id}: ${issue.title}.
@@ -354,40 +341,41 @@ ${issueBlocks}
354
341
 
355
342
  ## Execution Protocol — Agent Teams
356
343
 
357
- ### Step 1: Create the team
344
+ ### Step 1: Spawn teammates
358
345
 
359
- Use **TeamCreate** to create a team named \`${teamName}\`.
346
+ Spawn all ${issues.length} teammates in parallel by sending a single message with ${issues.length} **Agent** tool calls. Each call must include \`team_name: "${teamName}"\` and a unique \`name\`. The team is created automatically when you spawn the first teammate with \`team_name\` — no separate setup step is needed.
360
347
 
361
- ### Step 2: Spawn teammates
348
+ ${teammateSpawns}
362
349
 
363
- Spawn all ${issues.length} teammates in parallel using the **Agent** tool with \`team_name\` and \`name\` parameters. Send a single message with ${issues.length} Agent tool calls.
350
+ ### Step 2: Wait for ALL teammates to complete
364
351
 
365
- ${teammateSpawns}
352
+ CRITICAL: After spawning, you MUST remain active and wait for every single teammate to finish. Each teammate automatically sends you an **idle notification** when they complete their work.
353
+
354
+ Track completion against this checklist — ALL must report idle before you proceed:
355
+ ${issues.map(i => `- [ ] ${i.id.toLowerCase()}`).join('\n')}
366
356
 
367
- ### Step 3: Monitor completion
357
+ While waiting:
358
+ - As each teammate goes idle, verify their output file exists on disk using the **Read** tool
359
+ - If a teammate has not gone idle after 15 minutes, use **SendMessage** to check on them
360
+ - Do NOT proceed to Step 3 until you have received idle notifications from ALL ${issues.length} teammates
368
361
 
369
- After spawning all teammates, poll for completion:
370
- 1. Use **SendMessage** to each teammate (${teammateNames}) asking for status
371
- 2. A teammate is done when its output file exists on disk AND the issue status is \`done\`
372
- 3. If a teammate reports completion, verify by reading the output file yourself
373
- 4. If a teammate is struggling, provide guidance via SendMessage
362
+ WARNING: The #1 failure mode is exiting before all teammates finish. If you exit early, all teammate processes are killed and their work is permanently lost. When in doubt, keep waiting. Err on the side of waiting too long rather than exiting too early.
374
363
 
375
- ### Step 4: Verify and clean up
364
+ ### Step 3: Verify outputs
376
365
 
377
- Once all teammates report done:
378
- 1. Verify each output file exists in ${docsDir}/
366
+ Once every teammate has gone idle:
367
+ 1. Verify each output file exists in ${outDir}/ using **Read** or **Glob**
379
368
  2. Verify each issue's front matter status is \`done\`
380
369
  3. If any teammate failed to write output or update status, do it yourself
381
- 4. Use **TeamDelete** to clean up the team \`${teamName}\`
382
- 5. Do NOT modify STATE.md — the orchestrator handles that
370
+ 4. Do NOT modify STATE.md the orchestrator handles that
383
371
 
384
372
  ## Critical Rules
385
373
 
386
- - Create ONE team with TeamCreate, then spawn teammates with Agent(team_name="${teamName}", name="...").
387
- - Each teammate MUST write its output to disk. Research only in conversation is LOST.
374
+ - The team is created implicitly when you spawn the first teammate with \`team_name\`, and cleaned up automatically when all teammates exit. Your only job is to spawn teammates, wait, and verify.
375
+ - You MUST wait for idle notifications from ALL ${issues.length} teammates before exiting. Exiting early kills all teammate processes and permanently loses their work.
376
+ - Each teammate MUST write its output to disk — research only in conversation is LOST.
388
377
  - Each teammate MUST update the issue front matter status to \`done\`.
389
- - One issue per teammate — no cross-issue work.
390
- - Do not exit until ALL teammates have completed and output files are verified.`;
378
+ - One issue per teammate — no cross-issue work.`;
391
379
  }
392
380
 
393
381
  /**
@@ -509,10 +497,7 @@ Once all teammates report done:
509
497
  private installBouncerForSubagents(): void {
510
498
  const mcpJsonPath = join(this.workingDir, '.mcp.json');
511
499
 
512
- // Generate the standard MCP config (for parent --mcp-config)
513
- generateMcpConfig(this.workingDir);
514
-
515
- // Read the generated config and write it as .mcp.json for sub-agent discovery
500
+ // Generate the standard MCP config (for parent --mcp-config) and reuse for sub-agents
516
501
  try {
517
502
  const generatedPath = generateMcpConfig(this.workingDir);
518
503
  if (!generatedPath) return;
@@ -566,21 +551,82 @@ Once all teammates report done:
566
551
 
567
552
  // ── Helpers ───────────────────────────────────────────────────
568
553
 
554
+ /**
555
+ * Resolve the canonical output path for an issue in .pm/out/.
556
+ * This is the PM system's internal execution artifact — always under
557
+ * PM control. User-facing delivery to output_file happens via publishOutputs().
558
+ */
559
+ private resolveOutputPath(issue: Issue): string {
560
+ const pmDir = resolvePmDir(this.workingDir);
561
+ const outDir = pmDir ? join(pmDir, 'out') : join(this.workingDir, '.pm', 'out');
562
+ return join(outDir, `${issue.id}-${this.slugify(issue.title)}.md`);
563
+ }
564
+
565
+ /**
566
+ * List existing execution output docs in .pm/out/.
567
+ * Single canonical location — no split-brain lookup.
568
+ */
569
569
  private listExistingDocs(): string[] {
570
570
  const pmDir = resolvePmDir(this.workingDir);
571
571
  if (!pmDir) return [];
572
- const docsDir = join(pmDir, 'docs');
573
- if (!existsSync(docsDir)) return [];
572
+ const outDir = join(pmDir, 'out');
573
+ if (!existsSync(outDir)) return [];
574
574
 
575
575
  try {
576
- return readdirSync(docsDir, { recursive: true })
577
- .filter((f): f is string => typeof f === 'string' && f.endsWith('.md'))
578
- .map(f => join(docsDir, f));
576
+ return readdirSync(outDir)
577
+ .filter(f => f.endsWith('.md'))
578
+ .map(f => join(outDir, f));
579
579
  } catch {
580
580
  return [];
581
581
  }
582
582
  }
583
583
 
584
+ /**
585
+ * Copy confirmed-done outputs from .pm/out/ to user-specified output_file paths.
586
+ * Only copies for issues that completed successfully and have output_file set.
587
+ * Failures are non-fatal — the canonical artifact in .pm/out/ is always safe.
588
+ */
589
+ private publishOutputs(issues: Issue[]): void {
590
+ const pmDir = resolvePmDir(this.workingDir);
591
+ if (!pmDir) return;
592
+
593
+ for (const issue of issues) {
594
+ if (!issue.outputFile) continue;
595
+
596
+ // Only publish for confirmed-done issues
597
+ try {
598
+ const content = readFileSync(join(pmDir, issue.path), 'utf-8');
599
+ if (!content.match(/^status:\s*done$/m)) continue;
600
+ } catch { continue; }
601
+
602
+ const srcPath = this.resolveOutputPath(issue);
603
+ if (!existsSync(srcPath)) continue;
604
+
605
+ // Guard against path traversal — output_file must resolve within workingDir
606
+ const destPath = resolve(this.workingDir, issue.outputFile);
607
+ if (!destPath.startsWith(this.workingDir + '/') && destPath !== this.workingDir) {
608
+ this.emit('output', {
609
+ issueId: issue.id,
610
+ text: `Warning: output_file "${issue.outputFile}" escapes project directory — skipping`,
611
+ });
612
+ continue;
613
+ }
614
+
615
+ try {
616
+ // Ensure destination directory exists
617
+ const destDir = join(destPath, '..');
618
+ if (!existsSync(destDir)) mkdirSync(destDir, { recursive: true });
619
+ copyFileSync(srcPath, destPath);
620
+ } catch {
621
+ // Non-fatal — canonical artifact is safe in .pm/out/
622
+ this.emit('output', {
623
+ issueId: issue.id,
624
+ text: `Warning: could not copy output to ${issue.outputFile}`,
625
+ });
626
+ }
627
+ }
628
+ }
629
+
584
630
  private slugify(text: string): string {
585
631
  return text
586
632
  .toLowerCase()
@@ -601,4 +647,54 @@ Once all teammates report done:
601
647
  // Ignore errors — file may have been moved
602
648
  }
603
649
  }
650
+
651
+ /**
652
+ * Append a progress log entry after a wave completes.
653
+ * Re-reads issue files from disk to determine which actually completed.
654
+ */
655
+ private appendProgressEntry(issues: Issue[], waveStart: number): void {
656
+ const pmDir = resolvePmDir(this.workingDir);
657
+ if (!pmDir) return;
658
+ const progressPath = join(pmDir, 'progress.md');
659
+ if (!existsSync(progressPath)) return;
660
+
661
+ const durationMin = Math.round((Date.now() - waveStart) / 60_000);
662
+ const timestamp = new Date().toISOString().replace('T', ' ').slice(0, 16);
663
+
664
+ // Re-read issue statuses from disk to get accurate completion count
665
+ const completed: string[] = [];
666
+ const failed: string[] = [];
667
+ for (const issue of issues) {
668
+ try {
669
+ const content = readFileSync(join(pmDir, issue.path), 'utf-8');
670
+ const statusMatch = content.match(/^status:\s*(\S+)/m);
671
+ if (statusMatch?.[1] === 'done') {
672
+ completed.push(issue.id);
673
+ } else {
674
+ failed.push(issue.id);
675
+ }
676
+ } catch {
677
+ failed.push(issue.id);
678
+ }
679
+ }
680
+
681
+ const lines = [
682
+ '',
683
+ `## ${timestamp} — Wave [${issues.map(i => i.id).join(', ')}]`,
684
+ '',
685
+ `- **Duration**: ${durationMin} min`,
686
+ `- **Completed**: ${completed.length}/${issues.length}${completed.length > 0 ? ` (${completed.join(', ')})` : ''}`,
687
+ ];
688
+ if (failed.length > 0) {
689
+ lines.push(`- **Failed**: ${failed.join(', ')}`);
690
+ }
691
+ lines.push('');
692
+
693
+ try {
694
+ const existing = readFileSync(progressPath, 'utf-8');
695
+ writeFileSync(progressPath, existing.trimEnd() + '\n' + lines.join('\n'), 'utf-8');
696
+ } catch {
697
+ // Non-fatal
698
+ }
699
+ }
604
700
  }
@@ -60,13 +60,29 @@ function parseFrontMatter(content: string): ParsedFile {
60
60
  return { frontMatter: {}, body: content };
61
61
  }
62
62
  const frontMatter: Record<string, unknown> = {};
63
+ const lines = match[1].split('\n');
63
64
 
64
- for (const line of match[1].split('\n')) {
65
- const trimmed = line.trim();
65
+ for (let i = 0; i < lines.length; i++) {
66
+ const trimmed = lines[i].trim();
66
67
  if (!trimmed || trimmed.startsWith('#')) continue;
67
68
  const colonIdx = trimmed.indexOf(':');
68
69
  if (colonIdx === -1) continue;
69
- frontMatter[trimmed.slice(0, colonIdx).trim()] = parseYamlValue(trimmed.slice(colonIdx + 1).trim());
70
+
71
+ const key = trimmed.slice(0, colonIdx).trim();
72
+ const rawValue = trimmed.slice(colonIdx + 1).trim();
73
+
74
+ // Handle multi-line indented YAML lists (key:\n - item1\n - item2)
75
+ if (!rawValue) {
76
+ const items: string[] = [];
77
+ while (i + 1 < lines.length && /^\s+-\s/.test(lines[i + 1])) {
78
+ i++;
79
+ const item = lines[i].trim().replace(/^-\s+/, '');
80
+ items.push(stripQuotes(item));
81
+ }
82
+ frontMatter[key] = items.length > 0 ? items : null;
83
+ } else {
84
+ frontMatter[key] = parseYamlValue(rawValue);
85
+ }
70
86
  }
71
87
 
72
88
  return { frontMatter, body: match[2] };
@@ -241,7 +257,9 @@ function toStringArray(val: unknown): string[] {
241
257
  }
242
258
 
243
259
  function optionalString(val: unknown): string | null {
244
- return (val as string) || null;
260
+ if (val == null) return null;
261
+ const s = String(val);
262
+ return s === '' ? null : s;
245
263
  }
246
264
 
247
265
  function parseIssue(content: string, filePath: string): Issue {
@@ -273,6 +291,7 @@ function parseIssue(content: string, filePath: string): Issue {
273
291
  technicalNotes: sections.get('Technical Notes') || null,
274
292
  filesToModify: parseListItems(sections.get('Files to Modify') || ''),
275
293
  activity: parseListItems(sections.get('Activity') || ''),
294
+ outputFile: optionalString(fm.output_file),
276
295
  body,
277
296
  path: filePath,
278
297
  };
@@ -304,17 +323,28 @@ function parseSprint(content: string, filePath: string): Sprint {
304
323
  const { frontMatter: fm, body } = parseFrontMatter(content);
305
324
  const sections = extractSections(body);
306
325
 
326
+ // Table-based parsing (markdown links in table rows)
327
+ let issues = parseSprintIssues(sections.get('Issues'));
328
+
329
+ // Fallback: front matter issues array (e.g., ["backlog/IS-001.md", ...])
330
+ if (issues.length === 0 && Array.isArray(fm.issues)) {
331
+ issues = (fm.issues as string[]).map(path => {
332
+ const id = path.replace(/^backlog\//, '').replace(/\.md$/, '');
333
+ return { id, path, title: '', points: null, status: '' };
334
+ });
335
+ }
336
+
307
337
  return {
308
338
  id: String(fm.id || ''),
309
339
  title: String(fm.title || ''),
310
340
  status: (fm.status as Sprint['status']) || 'planned',
311
- start: String(fm.start || ''),
312
- end: String(fm.end || ''),
313
- goal: String(fm.goal || sections.get('Goal') || ''),
341
+ start: String(fm.start || fm.start_date || ''),
342
+ end: String(fm.end || fm.end_date || ''),
343
+ goal: String(fm.goal || sections.get('Goal') || sections.get('Sprint Goal') || ''),
314
344
  capacity: optionalNumber(fm.capacity),
315
345
  committed: optionalNumber(fm.committed),
316
346
  completed: optionalNumber(fm.completed),
317
- issues: parseSprintIssues(sections.get('Issues')),
347
+ issues,
318
348
  path: filePath,
319
349
  };
320
350
  }
@@ -447,9 +477,11 @@ export function parseSingleMilestone(workingDir: string, milestonePath: string):
447
477
 
448
478
  /** Compute the next available ID for a given prefix (e.g., "IS" → "IS-004") */
449
479
  export function getNextId(issues: Issue[], prefix: string): string {
480
+ const escaped = prefix.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
481
+ const pattern = new RegExp(`^${escaped}-(\\d+)$`);
450
482
  let max = 0;
451
483
  for (const issue of issues) {
452
- const match = issue.id.match(new RegExp(`^${prefix}-(\\d+)$`));
484
+ const match = issue.id.match(pattern);
453
485
  if (match) {
454
486
  const num = Number.parseInt(match[1], 10);
455
487
  if (num > max) max = num;
@@ -12,7 +12,7 @@ import { existsSync, readFileSync, writeFileSync } from 'node:fs';
12
12
  import { join } from 'node:path';
13
13
  import { resolveReadyToWork } from './dependency-resolver.js';
14
14
  import { parsePlanDirectory, resolvePmDir } from './parser.js';
15
- import type { Issue } from './types.js';
15
+ import type { Issue, Sprint } from './types.js';
16
16
 
17
17
  interface CategorizedIssues {
18
18
  inProgress: Issue[];
@@ -107,6 +107,45 @@ function buildStateMarkdown(
107
107
  return `---\n${frontMatter}\n---\n\n${sections.join('\n')}`;
108
108
  }
109
109
 
110
+ /**
111
+ * Derive sprint status from its issues' actual statuses.
112
+ * - All issues done/cancelled → completed
113
+ * - Any issue in_progress/in_review → active
114
+ * - Otherwise → planned (unchanged)
115
+ */
116
+ function deriveSprintStatus(sprint: Sprint, issueByPath: Map<string, Issue>): Sprint['status'] | null {
117
+ // Sprint references issues by path (e.g., "backlog/IS-001.md")
118
+ const issuePaths = sprint.issues.map(si => si.path);
119
+ if (issuePaths.length === 0) return null;
120
+
121
+ const statuses = issuePaths.map(p => issueByPath.get(p)?.status).filter(Boolean) as string[];
122
+ if (statuses.length === 0) return null;
123
+
124
+ const allFinished = statuses.every(s => s === 'done' || s === 'cancelled');
125
+ if (allFinished) return 'completed';
126
+
127
+ const anyStarted = statuses.some(s => s === 'in_progress' || s === 'in_review');
128
+ if (anyStarted) return 'active';
129
+
130
+ return null;
131
+ }
132
+
133
+ function reconcileSprintStatuses(pmDir: string, sprints: Sprint[], issueByPath: Map<string, Issue>): void {
134
+ for (const sprint of sprints) {
135
+ const derived = deriveSprintStatus(sprint, issueByPath);
136
+ if (!derived || derived === sprint.status) continue;
137
+
138
+ const sprintPath = join(pmDir, sprint.path);
139
+ try {
140
+ let content = readFileSync(sprintPath, 'utf-8');
141
+ content = content.replace(/^(status:\s*).+$/m, `$1${derived}`);
142
+ writeFileSync(sprintPath, content, 'utf-8');
143
+ } catch {
144
+ // Sprint file may be missing or unwritable
145
+ }
146
+ }
147
+ }
148
+
110
149
  export function reconcileState(workingDir: string): void {
111
150
  const pmDir = resolvePmDir(workingDir);
112
151
  if (!pmDir) return;
@@ -116,7 +155,7 @@ export function reconcileState(workingDir: string): void {
116
155
  const fullState = parsePlanDirectory(workingDir);
117
156
  if (!fullState) return;
118
157
 
119
- const { issues, project } = fullState;
158
+ const { issues, sprints, project } = fullState;
120
159
 
121
160
  const issueByPath = new Map(issues.map(i => [i.path, i]));
122
161
  const categories = categorizeIssues(issues, issueByPath);
@@ -129,4 +168,7 @@ export function reconcileState(workingDir: string): void {
129
168
 
130
169
  const newContent = buildStateMarkdown(frontMatter, categories, warnings, issueByPath);
131
170
  writeFileSync(statePath, newContent, 'utf-8');
171
+
172
+ // Reconcile sprint statuses from actual issue statuses
173
+ reconcileSprintStatuses(pmDir, sprints, issueByPath);
132
174
  }
@@ -92,6 +92,8 @@ export interface Issue {
92
92
  children: string[];
93
93
  // Progress (for epics)
94
94
  progress: string | null;
95
+ // Planned output file path (from front matter output_file, relative to working dir)
96
+ outputFile: string | null;
95
97
  // Full markdown body
96
98
  body: string;
97
99
  // File path relative to .pm/