claude-code-workflow 7.2.14 → 7.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/.claude/commands/workflow/analyze-with-file.md +7 -0
  2. package/.codex/skills/analyze-with-file/SKILL.md +1181 -1182
  3. package/.codex/skills/brainstorm/SKILL.md +723 -725
  4. package/.codex/skills/brainstorm-with-file/SKILL.md +10 -5
  5. package/.codex/skills/clean/SKILL.md +33 -26
  6. package/.codex/skills/collaborative-plan-with-file/SKILL.md +830 -831
  7. package/.codex/skills/csv-wave-pipeline/SKILL.md +906 -906
  8. package/.codex/skills/issue-discover/SKILL.md +57 -50
  9. package/.codex/skills/issue-discover/phases/01-issue-new.md +18 -11
  10. package/.codex/skills/issue-discover/phases/02-discover.md +31 -26
  11. package/.codex/skills/issue-discover/phases/03-discover-by-prompt.md +13 -11
  12. package/.codex/skills/issue-discover/phases/04-quick-execute.md +32 -27
  13. package/.codex/skills/parallel-dev-cycle/SKILL.md +402 -402
  14. package/.codex/skills/project-documentation-workflow/SKILL.md +13 -3
  15. package/.codex/skills/roadmap-with-file/SKILL.md +901 -897
  16. package/.codex/skills/session-sync/SKILL.md +222 -212
  17. package/.codex/skills/spec-add/SKILL.md +620 -613
  18. package/.codex/skills/spec-generator/SKILL.md +2 -2
  19. package/.codex/skills/spec-generator/phases/01-5-requirement-clarification.md +10 -10
  20. package/.codex/skills/spec-generator/phases/01-discovery.md +11 -18
  21. package/.codex/skills/spec-generator/phases/02-product-brief.md +5 -5
  22. package/.codex/skills/spec-generator/phases/03-requirements.md +7 -7
  23. package/.codex/skills/spec-generator/phases/04-architecture.md +4 -4
  24. package/.codex/skills/spec-generator/phases/05-epics-stories.md +5 -6
  25. package/.codex/skills/spec-generator/phases/06-readiness-check.md +10 -17
  26. package/.codex/skills/spec-generator/phases/07-issue-export.md +326 -329
  27. package/.codex/skills/spec-setup/SKILL.md +669 -657
  28. package/.codex/skills/team-arch-opt/SKILL.md +50 -50
  29. package/.codex/skills/team-arch-opt/agents/completion-handler.md +3 -3
  30. package/.codex/skills/team-brainstorm/SKILL.md +724 -725
  31. package/.codex/skills/team-coordinate/SKILL.md +51 -51
  32. package/.codex/skills/team-coordinate/agents/completion-handler.md +3 -3
  33. package/.codex/skills/team-coordinate/agents/plan-reviewer.md +4 -4
  34. package/.codex/skills/team-designer/SKILL.md +691 -691
  35. package/.codex/skills/team-designer/agents/requirement-clarifier.md +11 -12
  36. package/.codex/skills/team-executor/SKILL.md +45 -45
  37. package/.codex/skills/team-frontend/SKILL.md +45 -45
  38. package/.codex/skills/team-frontend/agents/completion-handler.md +3 -3
  39. package/.codex/skills/team-frontend/agents/qa-gate-reviewer.md +4 -4
  40. package/.codex/skills/team-frontend-debug/SKILL.md +50 -50
  41. package/.codex/skills/team-frontend-debug/agents/completion-handler.md +3 -3
  42. package/.codex/skills/team-frontend-debug/agents/conditional-skip-gate.md +4 -4
  43. package/.codex/skills/team-issue/SKILL.md +751 -740
  44. package/.codex/skills/team-iterdev/SKILL.md +825 -826
  45. package/.codex/skills/team-lifecycle-v4/SKILL.md +775 -775
  46. package/.codex/skills/team-lifecycle-v4/agents/quality-gate.md +165 -165
  47. package/.codex/skills/team-lifecycle-v4/agents/requirement-clarifier.md +163 -163
  48. package/.codex/skills/team-perf-opt/SKILL.md +50 -50
  49. package/.codex/skills/team-perf-opt/agents/completion-handler.md +3 -3
  50. package/.codex/skills/team-planex-v2/SKILL.md +652 -637
  51. package/.codex/skills/team-quality-assurance/SKILL.md +51 -52
  52. package/.codex/skills/team-review/SKILL.md +40 -40
  53. package/.codex/skills/team-roadmap-dev/SKILL.md +51 -51
  54. package/.codex/skills/team-roadmap-dev/agents/roadmap-discusser.md +8 -8
  55. package/.codex/skills/team-tech-debt/SKILL.md +50 -50
  56. package/.codex/skills/team-tech-debt/agents/plan-approver.md +5 -5
  57. package/.codex/skills/team-testing/SKILL.md +51 -52
  58. package/.codex/skills/team-uidesign/SKILL.md +40 -40
  59. package/.codex/skills/team-uidesign/agents/completion-handler.md +177 -177
  60. package/.codex/skills/team-ultra-analyze/SKILL.md +786 -787
  61. package/.codex/skills/team-ultra-analyze/agents/discussion-feedback.md +8 -8
  62. package/.codex/skills/team-ux-improve/SKILL.md +51 -52
  63. package/.codex/skills/team-ux-improve/agents/ux-designer.md +2 -2
  64. package/.codex/skills/team-ux-improve/agents/ux-explorer.md +1 -1
  65. package/.codex/skills/unified-execute-with-file/SKILL.md +797 -796
  66. package/.codex/skills/workflow-execute/SKILL.md +1117 -1118
  67. package/.codex/skills/workflow-lite-planex/SKILL.md +1144 -1141
  68. package/.codex/skills/workflow-plan/SKILL.md +631 -636
  69. package/.codex/skills/workflow-tdd-plan/SKILL.md +753 -759
  70. package/.codex/skills/workflow-test-fix-cycle/SKILL.md +402 -392
  71. package/README.md +25 -0
  72. package/ccw/dist/commands/install.d.ts.map +1 -1
  73. package/ccw/dist/commands/install.js +12 -0
  74. package/ccw/dist/commands/install.js.map +1 -1
  75. package/package.json +1 -1
@@ -1,1118 +1,1117 @@
1
- ---
2
- name: workflow-execute
3
- description: |
4
- Autonomous workflow execution pipeline with CSV wave engine.
5
- Session discovery → plan validation → IMPL-*.json → CSV conversion →
6
- wave execution via spawn_agents_on_csv → results sync.
7
- Task JSONs remain the rich data source; CSV is brief + execution state.
8
- argument-hint: "[-y|--yes] [-c|--concurrency N] [--resume-session=ID] [--with-commit]"
9
- allowed-tools: spawn_agents_on_csv, AskUserQuestion, Read, Write, Edit, Bash, Glob, Grep
10
- ---
11
-
12
- ## Auto Mode
13
-
14
- When `--yes` or `-y`: Auto-select first session, auto-complete session after all tasks, skip all confirmations.
15
-
16
- # Workflow Execute
17
-
18
- ## Usage
19
-
20
- ```bash
21
- $workflow-execute
22
- $workflow-execute --yes
23
- $workflow-execute --resume-session=WFS-auth
24
- $workflow-execute -y --with-commit
25
- $workflow-execute -y -c 4 --with-commit
26
- $workflow-execute -y --with-commit --resume-session=WFS-auth
27
- ```
28
-
29
- **Flags**:
30
- - `-y, --yes`: Skip all confirmations (auto mode)
31
- - `-c, --concurrency N`: Max concurrent agents per wave (default: 4)
32
- - `--resume-session=ID`: Resume specific session (skip Phase 1-2)
33
- - `--with-commit`: Auto-commit after each task completion
34
-
35
- ---
36
-
37
- ## Overview
38
-
39
- Autonomous execution pipeline using `spawn_agents_on_csv` wave engine. Converts planning artifacts (IMPL-*.json + plan.json) into CSV for wave-based parallel execution, with full task JSON available via `task_json_path` column.
40
-
41
- ```
42
- ┌──────────────────────────────────────────────────────────────────┐
43
- │ WORKFLOW EXECUTE PIPELINE │
44
- ├──────────────────────────────────────────────────────────────────┤
45
- │ │
46
- │ Phase 1: Session Discovery │
47
- │ ├─ Find active sessions │
48
- │ ├─ Auto-select (1 session) or prompt (multiple) │
49
- │ └─ Load session metadata │
50
- │ │
51
- │ Phase 2: Planning Document Validation │
52
- │ ├─ Verify IMPL_PLAN.md exists │
53
- │ ├─ Verify TODO_LIST.md exists │
54
- │ └─ Verify .task/ contains IMPL-*.json │
55
- │ │
56
- │ Phase 3: JSON → CSV Conversion │
57
- │ ├─ Read all IMPL-*.json + plan.json │
58
- │ ├─ Skip already-completed tasks (resume support) │
59
- │ ├─ Compute waves via Kahn's BFS (deps + plan hints) │
60
- │ ├─ Generate tasks.csv (21 cols) + context.csv │
61
- │ └─ Initialize discoveries.ndjson │
62
- │ │
63
- │ Phase 4: Wave Execute (spawn_agents_on_csv) │
64
- │ ├─ Per wave: build prev_context → wave-{N}.csv │
65
- │ ├─ spawn_agents_on_csv with execute instruction │
66
- │ ├─ Merge results → tasks.csv + task JSON status │
67
- │ ├─ Auto-commit per task (if --with-commit) │
68
- │ └─ Cleanup temp wave CSVs │
69
- │ │
70
- │ Phase 5: Results Sync │
71
- │ ├─ Export results.csv │
72
- │ ├─ Reconcile TODO_LIST.md with tasks.csv status │
73
- │ └─ User choice: Review | Complete Session │
74
- │ │
75
- │ Phase 6: Post-Implementation Review (Optional) │
76
- │ ├─ Select review type (quality/security/architecture) │
77
- │ ├─ CLI-assisted analysis │
78
- │ └─ Generate REVIEW-{type}.md │
79
- │ │
80
- │ Resume Mode (--resume-session): │
81
- │ └─ Skip Phase 1-2 → enter Phase 3 (skip completed tasks) │
82
- │ │
83
- └──────────────────────────────────────────────────────────────────┘
84
- ```
85
-
86
- ---
87
-
88
- ## CSV Schemas
89
-
90
- ### tasks.csv (21 columns)
91
-
92
- ```csv
93
- id,title,description,agent,scope,deps,execution_group,context_from,wave,task_json_path,hints,execution_directives,acceptance_criteria,prev_context,status,findings,files_modified,tests_passed,acceptance_met,summary_path,error
94
- ```
95
-
96
- | Column | Phase | Source | Description |
97
- |--------|-------|--------|-------------|
98
- | `id` | Input | task.id | IMPL-001 etc |
99
- | `title` | Input | task.title | Short title |
100
- | `description` | Input | task.description | Full description |
101
- | `agent` | Input | meta.agent or inferred | @code-developer etc |
102
- | `scope` | Input | task.scope / focus_paths | File scope glob |
103
- | `deps` | Input | depends_on.join(';') | Dependency IDs (semicolon-separated) |
104
- | `execution_group` | Input | meta.execution_group | Parallel group identifier |
105
- | `context_from` | Computed | deps + completed predecessors | Context source IDs |
106
- | `wave` | Computed | Kahn's BFS | Wave number (1-based) |
107
- | `task_json_path` | Input | relative path | `.task/IMPL-001.json` (agent reads full JSON) |
108
- | `hints` | Input | artifacts + pre_analysis refs | `tips \|\| file1;file2` |
109
- | `execution_directives` | Input | convergence.verification | Verification commands |
110
- | `acceptance_criteria` | Input | convergence.criteria.join | Acceptance conditions |
111
- | `prev_context` | Computed(per-wave) | context_from findings lookup | Predecessor task findings |
112
- | `status` | Output | agent result | pending→completed/failed/skipped |
113
- | `findings` | Output | agent result | Key findings (max 500 chars) |
114
- | `files_modified` | Output | agent result | Modified files (semicolon-separated) |
115
- | `tests_passed` | Output | agent result | true/false |
116
- | `acceptance_met` | Output | agent result | Acceptance status |
117
- | `summary_path` | Output | generated | .summaries/IMPL-X-summary.md |
118
- | `error` | Output | agent result | Error message |
119
-
120
- **Key design**: `task_json_path` lets agents read the full task JSON (with pre_analysis, flow_control, convergence etc). CSV is "brief + execution state".
121
-
122
- ### context.csv (4 columns)
123
-
124
- ```csv
125
- key,type,value,source
126
- "tech_stack","array","TypeScript;React 18;Zustand","plan.json"
127
- "conventions","array","Use useIntl;Barrel exports","plan.json"
128
- "context_package_path","path",".process/context-package.json","session"
129
- "discoveries_path","path","discoveries.ndjson","session"
130
- ```
131
-
132
- Injected into instruction template as static context — avoids each agent rediscovering project basics.
133
-
134
- ---
135
-
136
- ## Session Structure
137
-
138
- ```
139
- .workflow/active/WFS-{session}/
140
- ├── workflow-session.json # Session state
141
- ├── plan.json # Structured plan (machine-readable)
142
- ├── IMPL_PLAN.md # Implementation plan (human-readable)
143
- ├── TODO_LIST.md # Progress tracking (Phase 5 sync)
144
- ├── tasks.csv # Phase 3 generated, Phase 4 updated
145
- ├── context.csv # Phase 3 generated
146
- ├── results.csv # Phase 5 exported
147
- ├── discoveries.ndjson # Phase 3 initialized, Phase 4 agents append
148
- ├── .task/ # Task definitions (unchanged)
149
- │ ├── IMPL-1.json
150
- │ └── IMPL-N.json
151
- ├── .summaries/ # Agent-generated summaries
152
- │ ├── IMPL-1-summary.md
153
- │ └── IMPL-N-summary.md
154
- ├── .process/context-package.json# Unchanged
155
- └── wave-{N}.csv # Phase 4 temporary (cleaned after each wave)
156
- ```
157
-
158
- ---
159
-
160
- ## Implementation
161
-
162
- ### Session Initialization
163
-
164
- ```javascript
165
- const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
166
-
167
- // Parse flags
168
- const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
169
- const withCommit = $ARGUMENTS.includes('--with-commit')
170
- const resumeMatch = $ARGUMENTS.match(/--resume-session[=\s]+(\S+)/)
171
- const resumeSessionId = resumeMatch ? resumeMatch[1] : null
172
- const isResumeMode = !!resumeSessionId
173
- const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
174
- const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 4
175
- ```
176
-
177
- ---
178
-
179
- ### Phase 1: Session Discovery
180
-
181
- **Applies to**: Normal mode only (skipped if `--resume-session`).
182
-
183
- ```javascript
184
- let sessionId, sessionFolder
185
-
186
- if (isResumeMode) {
187
- sessionId = resumeSessionId
188
- sessionFolder = `.workflow/active/${sessionId}`
189
- // Skip to Phase 3
190
- } else {
191
- const sessions = Bash(`ls -d .workflow/active/WFS-* 2>/dev/null`).trim().split('\n').filter(Boolean)
192
-
193
- if (sessions.length === 0) {
194
- console.log('ERROR: No active workflow sessions found.')
195
- console.log('Run $workflow-plan "task description" to create a session.')
196
- return
197
- }
198
-
199
- if (sessions.length === 1) {
200
- sessionFolder = sessions[0]
201
- sessionId = sessionFolder.split('/').pop()
202
- console.log(`Auto-selected session: ${sessionId}`)
203
- } else {
204
- if (AUTO_YES) {
205
- sessionFolder = sessions[0]
206
- sessionId = sessionFolder.split('/').pop()
207
- console.log(`[--yes] Auto-selected: ${sessionId}`)
208
- } else {
209
- const sessionInfos = sessions.slice(0, 4).map(s => {
210
- const id = s.split('/').pop()
211
- const total = parseInt(Bash(`grep -c '^- \\[' "${s}/TODO_LIST.md" 2>/dev/null || echo 0`).trim()) || 0
212
- const done = parseInt(Bash(`grep -c '^- \\[x\\]' "${s}/TODO_LIST.md" 2>/dev/null || echo 0`).trim()) || 0
213
- return { id, path: s, progress: `${done}/${total} tasks` }
214
- })
215
-
216
- const answer = AskUserQuestion({
217
- questions: [{
218
- question: "Select session to execute:",
219
- header: "Session",
220
- multiSelect: false,
221
- options: sessionInfos.map(s => ({
222
- label: s.id,
223
- description: s.progress
224
- }))
225
- }]
226
- })
227
- sessionId = answer.Session
228
- sessionFolder = `.workflow/active/${sessionId}`
229
- }
230
- }
231
- }
232
- ```
233
-
234
- ---
235
-
236
- ### Phase 2: Planning Document Validation
237
-
238
- **Applies to**: Normal mode only.
239
-
240
- ```javascript
241
- if (!isResumeMode) {
242
- const checks = {
243
- 'IMPL_PLAN.md': Bash(`test -f "${sessionFolder}/IMPL_PLAN.md" && echo yes`).trim() === 'yes',
244
- 'TODO_LIST.md': Bash(`test -f "${sessionFolder}/TODO_LIST.md" && echo yes`).trim() === 'yes',
245
- '.task/ has files': parseInt(Bash(`ls ${sessionFolder}/.task/IMPL-*.json 2>/dev/null | wc -l`).trim()) > 0
246
- }
247
-
248
- const missing = Object.entries(checks).filter(([_, ok]) => !ok).map(([name]) => name)
249
- if (missing.length > 0) {
250
- console.log(`ERROR: Missing planning documents: ${missing.join(', ')}`)
251
- console.log(`Run $workflow-plan --session ${sessionId} to generate plan.`)
252
- return
253
- }
254
-
255
- console.log(`Planning documents validated.`)
256
- }
257
- ```
258
-
259
- ---
260
-
261
- ### Phase 3: JSON → CSV Conversion
262
-
263
- **Applies to**: Both normal and resume modes (resume entry point).
264
-
265
- **Objective**: Convert IMPL-*.json + plan.json into tasks.csv + context.csv with computed waves.
266
-
267
- ```javascript
268
- console.log(`\n## Phase 3: JSON → CSV Conversion\n`)
269
-
270
- // Update session status to active
271
- Bash(`cd "${sessionFolder}" && jq '.status = "active" | .execution_started_at = (.execution_started_at // "'"$(date -Iseconds)"'")' workflow-session.json > tmp.json && mv tmp.json workflow-session.json 2>/dev/null || true`)
272
- Bash(`mkdir -p "${sessionFolder}/.summaries"`)
273
-
274
- // 3.1: Read all IMPL-*.json
275
- const taskFiles = Bash(`ls ${sessionFolder}/.task/IMPL-*.json 2>/dev/null`).trim().split('\n').filter(Boolean)
276
- if (taskFiles.length === 0) {
277
- console.log('ERROR: No task JSONs found in .task/')
278
- return
279
- }
280
-
281
- const taskJsons = taskFiles.map(f => {
282
- const content = Read(f)
283
- const json = JSON.parse(content)
284
- json._filePath = f
285
- // Fallback: derive id from filename if missing
286
- if (!json.id) {
287
- json.id = f.split('/').pop().replace('.json', '')
288
- }
289
- return json
290
- })
291
-
292
- // 3.2: Skip completed tasks (resume support)
293
- const todoContent = Read(`${sessionFolder}/TODO_LIST.md`)
294
- const completedIds = new Set()
295
- const todoLines = todoContent.match(/^- \[x\] (IMPL-\d+(?:\.\d+)?)/gm) || []
296
- todoLines.forEach(line => {
297
- const match = line.match(/IMPL-\d+(?:\.\d+)?/)
298
- if (match) completedIds.add(match[0])
299
- })
300
-
301
- // Also check task JSON status field
302
- taskJsons.forEach(tj => {
303
- if (tj.status === 'completed') completedIds.add(tj.id)
304
- })
305
-
306
- const pendingJsons = taskJsons.filter(tj => !completedIds.has(tj.id))
307
-
308
- console.log(` Total tasks: ${taskJsons.length}`)
309
- console.log(` Already completed: ${completedIds.size}`)
310
- console.log(` Pending: ${pendingJsons.length}`)
311
-
312
- if (pendingJsons.length === 0) {
313
- console.log(`\nAll tasks already completed. Proceeding to Phase 5.`)
314
- // → Jump to Phase 5
315
- }
316
-
317
- // 3.3: Read plan.json for execution hints
318
- const planJsonPath = `${sessionFolder}/plan.json`
319
- const planJsonExists = Bash(`test -f "${planJsonPath}" && echo yes`).trim() === 'yes'
320
- const planJson = planJsonExists ? JSON.parse(Read(planJsonPath) || '{}') : {}
321
-
322
- // 3.4: Extract fields from task JSONs (handles two schema variants)
323
- function resolveAgent(tj) {
324
- if (tj.meta?.agent) return tj.meta.agent
325
- const typeMap = {
326
- 'feature': 'code-developer',
327
- 'test-gen': 'code-developer',
328
- 'test-fix': 'test-fix-agent',
329
- 'review': 'universal-executor',
330
- 'docs': 'doc-generator'
331
- }
332
- return typeMap[tj.meta?.type] || 'code-developer'
333
- }
334
-
335
- function extractDeps(tj) {
336
- return tj.depends_on || tj.context?.depends_on || []
337
- }
338
-
339
- function buildHints(tj) {
340
- const tips = []
341
- const files = []
342
- // Gather artifact references
343
- if (tj.artifacts) {
344
- tj.artifacts.forEach(a => { if (a.path) files.push(a.path) })
345
- }
346
- // Gather pre_analysis Read references
347
- if (tj.pre_analysis) {
348
- tj.pre_analysis.forEach(step => {
349
- if (step.tool === 'Read' && step.path) files.push(step.path)
350
- })
351
- }
352
- // Gather tips from meta or context
353
- if (tj.meta?.hints) tips.push(tj.meta.hints)
354
- if (tj.context?.tips) tips.push(tj.context.tips)
355
-
356
- const tipsStr = tips.join('; ')
357
- const filesStr = files.join(';')
358
- if (tipsStr && filesStr) return `${tipsStr} || ${filesStr}`
359
- if (tipsStr) return tipsStr
360
- if (filesStr) return `|| ${filesStr}`
361
- return ''
362
- }
363
-
364
- function extractDirectives(tj) {
365
- if (tj.convergence?.verification) {
366
- return Array.isArray(tj.convergence.verification)
367
- ? tj.convergence.verification.join('; ')
368
- : tj.convergence.verification
369
- }
370
- if (tj.execution_config?.verification_command) return tj.execution_config.verification_command
371
- return ''
372
- }
373
-
374
- function extractAcceptance(tj) {
375
- if (tj.convergence?.criteria) {
376
- return Array.isArray(tj.convergence.criteria)
377
- ? tj.convergence.criteria.join('; ')
378
- : tj.convergence.criteria
379
- }
380
- if (tj.context?.acceptance) {
381
- return Array.isArray(tj.context.acceptance)
382
- ? tj.context.acceptance.join('; ')
383
- : tj.context.acceptance
384
- }
385
- return ''
386
- }
387
-
388
- function extractScope(tj) {
389
- if (tj.scope) return tj.scope
390
- if (tj.focus_paths) {
391
- return Array.isArray(tj.focus_paths) ? tj.focus_paths.join(';') : tj.focus_paths
392
- }
393
- return ''
394
- }
395
-
396
- // Build task rows (all tasks — completed ones carry status forward)
397
- const taskRows = taskJsons.map(tj => ({
398
- id: tj.id,
399
- title: tj.title || '',
400
- description: tj.description || '',
401
- agent: resolveAgent(tj),
402
- scope: extractScope(tj),
403
- deps: extractDeps(tj).join(';'),
404
- execution_group: tj.meta?.execution_group || '',
405
- context_from: '', // computed after wave assignment
406
- task_json_path: `.task/${tj.id}.json`,
407
- hints: buildHints(tj),
408
- execution_directives: extractDirectives(tj),
409
- acceptance_criteria: extractAcceptance(tj),
410
- prev_context: '', // computed per-wave in Phase 4
411
- status: completedIds.has(tj.id) ? 'completed' : 'pending',
412
- findings: '',
413
- files_modified: '',
414
- tests_passed: '',
415
- acceptance_met: '',
416
- summary_path: `.summaries/${tj.id}-summary.md`,
417
- error: ''
418
- }))
419
-
420
- // 3.5: Compute waves via Kahn's BFS with plan.json hints
421
- function computeWaves(rows, planJson) {
422
- const taskMap = new Map(rows.map(r => [r.id, r]))
423
- const inDegree = new Map(rows.map(r => [r.id, 0]))
424
- const adjList = new Map(rows.map(r => [r.id, []]))
425
-
426
- for (const row of rows) {
427
- const deps = row.deps.split(';').filter(Boolean)
428
- for (const dep of deps) {
429
- if (taskMap.has(dep)) {
430
- adjList.get(dep).push(row.id)
431
- inDegree.set(row.id, inDegree.get(row.id) + 1)
432
- }
433
- }
434
- }
435
-
436
- // BFS
437
- const queue = []
438
- const waveMap = new Map()
439
-
440
- for (const [id, deg] of inDegree) {
441
- if (deg === 0) {
442
- queue.push([id, 1])
443
- waveMap.set(id, 1)
444
- }
445
- }
446
-
447
- let maxWave = 1
448
- let idx = 0
449
- while (idx < queue.length) {
450
- const [current, depth] = queue[idx++]
451
- for (const next of adjList.get(current)) {
452
- const newDeg = inDegree.get(next) - 1
453
- inDegree.set(next, newDeg)
454
- const nextDepth = Math.max(waveMap.get(next) || 0, depth + 1)
455
- waveMap.set(next, nextDepth)
456
- if (newDeg === 0) {
457
- queue.push([next, nextDepth])
458
- maxWave = Math.max(maxWave, nextDepth)
459
- }
460
- }
461
- }
462
-
463
- // Check for unassigned (circular deps)
464
- for (const row of rows) {
465
- if (!waveMap.has(row.id)) {
466
- console.log(`WARNING: Circular dependency involving ${row.id}, assigning to wave ${maxWave + 1}`)
467
- waveMap.set(row.id, maxWave + 1)
468
- maxWave = maxWave + 1
469
- }
470
- }
471
-
472
- // Apply plan.json execution_graph hints if available
473
- if (planJson.execution_graph?.phases) {
474
- planJson.execution_graph.phases.forEach((phase, idx) => {
475
- const phaseWave = idx + 1
476
- const taskIds = phase.tasks || phase.task_ids || []
477
- taskIds.forEach(id => {
478
- if (waveMap.has(id)) {
479
- // Only shift to later wave (never earlier — respect deps)
480
- if (phaseWave > waveMap.get(id)) {
481
- waveMap.set(id, phaseWave)
482
- }
483
- }
484
- })
485
- })
486
- maxWave = Math.max(maxWave, ...waveMap.values())
487
- }
488
-
489
- return { waveMap, maxWave }
490
- }
491
-
492
- const { waveMap, maxWave } = computeWaves(taskRows, planJson)
493
-
494
- // Assign wave + context_from
495
- taskRows.forEach(row => {
496
- row.wave = waveMap.get(row.id) || 1
497
- // context_from = deps + already-completed IDs for resume context
498
- const depIds = row.deps.split(';').filter(Boolean)
499
- const contextIds = [...new Set([...depIds, ...[...completedIds].filter(id => id !== row.id)])]
500
- row.context_from = contextIds.join(';')
501
- })
502
-
503
- // 3.6: Write tasks.csv
504
- function csvEscape(val) {
505
- return `"${String(val).replace(/"/g, '""')}"`
506
- }
507
-
508
- const tasksCsvHeader = 'id,title,description,agent,scope,deps,execution_group,context_from,wave,task_json_path,hints,execution_directives,acceptance_criteria,prev_context,status,findings,files_modified,tests_passed,acceptance_met,summary_path,error'
509
- const tasksCsvRows = taskRows.map(r =>
510
- [r.id, r.title, r.description, r.agent, r.scope, r.deps, r.execution_group,
511
- r.context_from, r.wave, r.task_json_path, r.hints, r.execution_directives,
512
- r.acceptance_criteria, r.prev_context, r.status, r.findings, r.files_modified,
513
- r.tests_passed, r.acceptance_met, r.summary_path, r.error]
514
- .map(csvEscape).join(',')
515
- )
516
- Write(`${sessionFolder}/tasks.csv`, [tasksCsvHeader, ...tasksCsvRows].join('\n'))
517
-
518
- // 3.7: Write context.csv
519
- const contextRows = ['key,type,value,source']
520
- if (planJson.tech_stack) {
521
- const stack = Array.isArray(planJson.tech_stack) ? planJson.tech_stack.join(';') : planJson.tech_stack
522
- contextRows.push(`"tech_stack","array","${stack}","plan.json"`)
523
- }
524
- if (planJson.conventions) {
525
- const conv = Array.isArray(planJson.conventions) ? planJson.conventions.join(';') : planJson.conventions
526
- contextRows.push(`"conventions","array","${conv}","plan.json"`)
527
- }
528
- const ctxPkgExists = Bash(`test -f "${sessionFolder}/.process/context-package.json" && echo yes`).trim() === 'yes'
529
- if (ctxPkgExists) {
530
- contextRows.push(`"context_package_path","path",".process/context-package.json","session"`)
531
- }
532
- contextRows.push(`"discoveries_path","path","discoveries.ndjson","session"`)
533
- Write(`${sessionFolder}/context.csv`, contextRows.join('\n'))
534
-
535
- // 3.8: Initialize discoveries.ndjson
536
- Bash(`touch "${sessionFolder}/discoveries.ndjson"`)
537
-
538
- // 3.9: User validation (skip if AUTO_YES)
539
- if (!AUTO_YES) {
540
- const pendingRows = taskRows.filter(r => r.status === 'pending')
541
- console.log(`\n## Wave Execution Plan\n`)
542
- console.log(` Tasks: ${pendingRows.length} pending across ${maxWave} waves\n`)
543
- for (let w = 1; w <= maxWave; w++) {
544
- const waveTasks = pendingRows.filter(r => r.wave === w)
545
- if (waveTasks.length === 0) continue
546
- console.log(` Wave ${w}: ${waveTasks.map(t => `${t.id}(${t.agent})`).join(', ')}`)
547
- }
548
-
549
- const answer = AskUserQuestion({
550
- questions: [{
551
- question: `Proceed with ${pendingRows.length} tasks across ${maxWave} waves?`,
552
- header: "Confirm",
553
- multiSelect: false,
554
- options: [
555
- { label: "Execute", description: "Proceed with wave execution" },
556
- { label: "Modify", description: `Edit ${sessionFolder}/tasks.csv then --resume-session` },
557
- { label: "Cancel", description: "Abort" }
558
- ]
559
- }]
560
- })
561
-
562
- if (answer.Confirm === "Modify") {
563
- console.log(`Edit: ${sessionFolder}/tasks.csv\nResume: $workflow-execute --resume-session=${sessionId}`)
564
- return
565
- } else if (answer.Confirm === "Cancel") {
566
- return
567
- }
568
- }
569
-
570
- console.log(`\n tasks.csv: ${taskRows.length} rows (${pendingJsons.length} pending)`)
571
- console.log(` context.csv: ${contextRows.length - 1} entries`)
572
- console.log(` Wave plan: ${maxWave} waves`)
573
- ```
574
-
575
- ---
576
-
577
- ### Phase 4: Wave Execute (spawn_agents_on_csv)
578
-
579
- **Objective**: Execute tasks wave-by-wave via `spawn_agents_on_csv`. Each wave builds `prev_context` from completed predecessors.
580
-
581
- ```javascript
582
- console.log(`\n## Phase 4: Wave Execute\n`)
583
-
584
- // Determine concurrency from plan.json or flag
585
- let effectiveConcurrency = maxConcurrency
586
- if (planJson.recommended_execution === 'Sequential') {
587
- effectiveConcurrency = 1
588
- console.log(` Sequential mode (from plan.json), concurrency: 1`)
589
- } else {
590
- console.log(` Parallel mode, concurrency: ${effectiveConcurrency}`)
591
- }
592
-
593
- // Read context.csv for instruction injection
594
- const contextCsvContent = Read(`${sessionFolder}/context.csv`)
595
- const contextEntries = parseCsv(contextCsvContent)
596
- const contextBlock = contextEntries.map(e => `- **${e.key}** (${e.type}): ${e.value}`).join('\n')
597
-
598
- const failedIds = new Set()
599
- const skippedIds = new Set()
600
-
601
- for (let wave = 1; wave <= maxWave; wave++) {
602
- console.log(`\n### Wave ${wave}/${maxWave}\n`)
603
-
604
- // Re-read master CSV for current state
605
- const masterCsv = parseCsv(Read(`${sessionFolder}/tasks.csv`))
606
- const waveTasks = masterCsv.filter(row =>
607
- parseInt(row.wave) === wave && row.status === 'pending'
608
- )
609
-
610
- if (waveTasks.length === 0) {
611
- console.log(` No pending tasks in wave ${wave}`)
612
- continue
613
- }
614
-
615
- // Skip tasks whose deps failed/skipped
616
- const executableTasks = []
617
- for (const task of waveTasks) {
618
- const deps = (task.deps || '').split(';').filter(Boolean)
619
- if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
620
- skippedIds.add(task.id)
621
- updateMasterCsvRow(`${sessionFolder}/tasks.csv`, task.id, {
622
- status: 'skipped',
623
- error: 'Dependency failed or skipped'
624
- })
625
- console.log(` [${task.id}] ${task.title} → SKIPPED (dependency failed)`)
626
- continue
627
- }
628
- executableTasks.push(task)
629
- }
630
-
631
- if (executableTasks.length === 0) {
632
- console.log(` No executable tasks in wave ${wave}`)
633
- continue
634
- }
635
-
636
- // Build prev_context for each task
637
- for (const task of executableTasks) {
638
- task.prev_context = buildPrevContext(task.context_from, masterCsv)
639
- }
640
-
641
- // Write wave CSV (input columns + prev_context)
642
- const waveHeader = 'id,title,description,agent,scope,deps,execution_group,context_from,wave,task_json_path,hints,execution_directives,acceptance_criteria,prev_context'
643
- const waveRows = executableTasks.map(t =>
644
- [t.id, t.title, t.description, t.agent, t.scope, t.deps, t.execution_group,
645
- t.context_from, t.wave, t.task_json_path, t.hints, t.execution_directives,
646
- t.acceptance_criteria, t.prev_context]
647
- .map(cell => `"${String(cell).replace(/"/g, '""')}"`)
648
- .join(',')
649
- )
650
- Write(`${sessionFolder}/wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
651
-
652
- // Execute wave
653
- console.log(` Executing ${executableTasks.length} tasks (concurrency: ${effectiveConcurrency})...`)
654
-
655
- spawn_agents_on_csv({
656
- csv_path: `${sessionFolder}/wave-${wave}.csv`,
657
- id_column: "id",
658
- instruction: buildExecuteInstruction(sessionFolder, contextBlock),
659
- max_concurrency: effectiveConcurrency,
660
- max_runtime_seconds: 600,
661
- output_csv_path: `${sessionFolder}/wave-${wave}-results.csv`,
662
- output_schema: {
663
- type: "object",
664
- properties: {
665
- id: { type: "string" },
666
- status: { type: "string", enum: ["completed", "failed"] },
667
- findings: { type: "string" },
668
- files_modified: { type: "array", items: { type: "string" } },
669
- tests_passed: { type: "boolean" },
670
- acceptance_met: { type: "string" },
671
- error: { type: "string" }
672
- },
673
- required: ["id", "status", "findings", "tests_passed"]
674
- }
675
- })
676
-
677
- // Merge results into master CSV + update task JSONs
678
- const waveResults = parseCsv(Read(`${sessionFolder}/wave-${wave}-results.csv`))
679
- for (const result of waveResults) {
680
- const filesModified = Array.isArray(result.files_modified)
681
- ? result.files_modified.join(';')
682
- : (result.files_modified || '')
683
-
684
- updateMasterCsvRow(`${sessionFolder}/tasks.csv`, result.id, {
685
- status: result.status,
686
- findings: result.findings || '',
687
- files_modified: filesModified,
688
- tests_passed: String(result.tests_passed ?? ''),
689
- acceptance_met: result.acceptance_met || '',
690
- error: result.error || ''
691
- })
692
-
693
- // Update task JSON status
694
- if (result.status === 'completed' || result.status === 'failed') {
695
- Bash(`cd "${sessionFolder}/.task" && jq '.status="${result.status}" | .status_history=(.status_history // [])+[{"from":"in_progress","to":"${result.status}","changed_at":"'"$(date -Iseconds)"'"}]' "${result.id}.json" > tmp.json && mv tmp.json "${result.id}.json" 2>/dev/null || true`)
696
- }
697
-
698
- if (result.status === 'failed') {
699
- failedIds.add(result.id)
700
- console.log(` [${result.id}] → FAILED: ${result.error}`)
701
- } else {
702
- console.log(` [${result.id}] → COMPLETED${result.tests_passed ? ' (tests passed)' : ''}`)
703
- }
704
-
705
- // Auto-commit per completed task
706
- if (withCommit && result.status === 'completed' && filesModified) {
707
- const files = filesModified.split(';').filter(Boolean)
708
- if (files.length > 0) {
709
- const taskJson = JSON.parse(Read(`${sessionFolder}/.task/${result.id}.json`) || '{}')
710
- const typeMap = { feature: 'feat', bugfix: 'fix', refactor: 'refactor', 'test-gen': 'test', docs: 'docs' }
711
- const type = typeMap[taskJson.meta?.type] || 'chore'
712
- const title = taskJson.title || result.id
713
- const msg = `${type}: ${title}`
714
- Bash(`git add ${files.map(f => '"' + f + '"').join(' ')} && git commit -m "${msg}" 2>/dev/null || true`)
715
- console.log(` Committed: ${msg}`)
716
- }
717
- }
718
- }
719
-
720
- // Cleanup temp wave CSVs
721
- Bash(`rm -f "${sessionFolder}/wave-${wave}.csv" "${sessionFolder}/wave-${wave}-results.csv"`)
722
-
723
- const completedCount = waveResults.filter(r => r.status === 'completed').length
724
- const failedCount = waveResults.filter(r => r.status === 'failed').length
725
- console.log(` Wave ${wave} done: ${completedCount} completed, ${failedCount} failed`)
726
- }
727
- ```
728
-
729
- **prev_context Builder**
730
-
731
- ```javascript
732
- function buildPrevContext(contextFrom, masterCsv) {
733
- if (!contextFrom) return 'No previous context available'
734
-
735
- const ids = contextFrom.split(';').filter(Boolean)
736
- const entries = []
737
-
738
- ids.forEach(id => {
739
- const row = masterCsv.find(r => r.id === id)
740
- if (row && row.status === 'completed' && row.findings) {
741
- entries.push(`[${row.id}: ${row.title}] ${row.findings}`)
742
- if (row.files_modified) entries.push(` Modified: ${row.files_modified}`)
743
- }
744
- })
745
-
746
- return entries.length > 0 ? entries.join('\n') : 'No previous context available'
747
- }
748
- ```
749
-
750
- **Execute Instruction Template**
751
-
752
- ```javascript
753
- function buildExecuteInstruction(sessionFolder, contextBlock) {
754
- return `
755
- ## TASK ASSIGNMENT
756
-
757
- ### MANDATORY FIRST STEPS
758
- 1. Read your FULL task JSON: ${sessionFolder}/{task_json_path}
759
- - CSV row is a brief — task JSON has pre_analysis, flow_control, convergence, and full context
760
- 2. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists)
761
- 3. Read project context: .workflow/project-tech.json (if exists)
762
-
763
- ---
764
-
765
- ## Your Task
766
-
767
- **Task ID**: {id}
768
- **Title**: {title}
769
- **Description**: {description}
770
- **Agent Type**: {agent}
771
- **Scope**: {scope}
772
-
773
- ### Task JSON (full details)
774
- Read: ${sessionFolder}/{task_json_path}
775
-
776
- ### Implementation Hints & Reference Files
777
- {hints}
778
-
779
- > Format: \`tips text || file1;file2\`. Read ALL reference files (after ||) before starting. Apply tips (before ||) as guidance.
780
-
781
- ### Execution Directives
782
- {execution_directives}
783
-
784
- > Commands to run for verification, tool restrictions, or environment requirements.
785
-
786
- ### Acceptance Criteria
787
- {acceptance_criteria}
788
-
789
- ### Previous Context (from predecessor tasks)
790
- {prev_context}
791
-
792
- ### Project Context
793
- ${contextBlock}
794
-
795
- ---
796
-
797
- ## Execution Protocol
798
-
799
- 1. **Read task JSON**: Load ${sessionFolder}/{task_json_path} for full task details including pre_analysis steps and flow_control
800
- 2. **Check execution method**: If task JSON has \`execution_config.method\`, follow it (agent vs cli mode)
801
- 3. **Execute pre_analysis**: If task JSON has \`pre_analysis\` steps, run them first to gather context
802
- 4. **Read references**: Parse {hints} — read all files listed after \`||\` to understand existing patterns
803
- 5. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared findings
804
- 6. **Use context**: Apply predecessor tasks' findings from prev_context above
805
- 7. **Stay in scope**: ONLY create/modify files within {scope} — do NOT touch files outside this boundary
806
- 8. **Apply hints**: Follow implementation tips from {hints} (before \`||\`)
807
- 9. **Execute**: Implement the task as described in the task JSON
808
- 10. **Generate summary**: Write execution summary to ${sessionFolder}/.summaries/{id}-summary.md with sections:
809
- ## Summary, ## Files Modified (as \`- \\\`path\\\`\` list), ## Key Decisions, ## Tests
810
- 11. **Run directives**: Execute commands from {execution_directives} to verify your work
811
- 12. **Update TODO**: In ${sessionFolder}/TODO_LIST.md, change \`- [ ] {id}\` to \`- [x] {id}\`
812
- 13. **Share discoveries**: Append findings to shared board:
813
- \`\`\`bash
814
- echo '{"ts":"<ISO8601>","worker":"{id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
815
- \`\`\`
816
- 14. **Report result**: Return JSON via report_agent_job_result
817
-
818
- ### Discovery Types to Share
819
- - \`code_pattern\`: {name, file, description} — reusable patterns found
820
- - \`integration_point\`: {file, description, exports[]} — module connection points
821
- - \`convention\`: {naming, imports, formatting} — code style conventions
822
- - \`blocker\`: {issue, severity, impact} — blocking issues encountered
823
-
824
- ---
825
-
826
- ## Output (report_agent_job_result)
827
-
828
- Return JSON:
829
- {
830
- "id": "{id}",
831
- "status": "completed" | "failed",
832
- "findings": "Key discoveries and implementation notes (max 500 chars)",
833
- "files_modified": ["path1", "path2"],
834
- "tests_passed": true | false,
835
- "acceptance_met": "Summary of which acceptance criteria were met/unmet",
836
- "error": ""
837
- }
838
-
839
- **IMPORTANT**: Set status to "completed" ONLY if:
840
- - All acceptance criteria are met
841
- - Verification directives pass (if any)
842
- Otherwise set status to "failed" with details in error field.
843
- `
844
- }
845
- ```
846
-
847
- ---
848
-
849
- ### Phase 5: Results Sync
850
-
851
- **Objective**: Export results, reconcile TODO_LIST.md, update session status.
852
-
853
- ```javascript
854
- console.log(`\n## Phase 5: Results Sync\n`)
855
-
856
- // 5.1: Export results.csv (final copy of tasks.csv)
857
- const finalCsvContent = Read(`${sessionFolder}/tasks.csv`)
858
- Write(`${sessionFolder}/results.csv`, finalCsvContent)
859
-
860
- // 5.2: Reconcile TODO_LIST.md with tasks.csv status
861
- const finalTasks = parseCsv(finalCsvContent)
862
- let todoMd = Read(`${sessionFolder}/TODO_LIST.md`)
863
-
864
- for (const task of finalTasks) {
865
- if (task.status === 'completed') {
866
- // Ensure marked as [x] in TODO_LIST.md
867
- const uncheckedPattern = new RegExp(`^(- \\[ \\] ${task.id.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}(:.*)?)$`, 'm')
868
- todoMd = todoMd.replace(uncheckedPattern, (match, line) => line.replace('- [ ]', '- [x]'))
869
- }
870
- }
871
- Write(`${sessionFolder}/TODO_LIST.md`, todoMd)
872
-
873
- // 5.3: Summary
874
- const completed = finalTasks.filter(t => t.status === 'completed')
875
- const failed = finalTasks.filter(t => t.status === 'failed')
876
- const skipped = finalTasks.filter(t => t.status === 'skipped')
877
- const pending = finalTasks.filter(t => t.status === 'pending')
878
-
879
- console.log(` Results:`)
880
- console.log(` Completed: ${completed.length}`)
881
- console.log(` Failed: ${failed.length}`)
882
- console.log(` Skipped: ${skipped.length}`)
883
- console.log(` Pending: ${pending.length}`)
884
-
885
- // 5.4: Update session status
886
- const allDone = failed.length === 0 && skipped.length === 0 && pending.length === 0
887
- const sessionStatus = allDone ? 'completed' : 'partial'
888
- Bash(`cd "${sessionFolder}" && jq '.status = "${sessionStatus}" | .completed_at = "'"$(date -Iseconds)"'"' workflow-session.json > tmp.json && mv tmp.json workflow-session.json 2>/dev/null || true`)
889
-
890
- // 5.5: User next step
891
- if (AUTO_YES) {
892
- console.log(` [--yes] Session ${sessionId} ${sessionStatus}.`)
893
- } else {
894
- const nextStep = AskUserQuestion({
895
- questions: [{
896
- question: "Execution complete. What's next?",
897
- header: "Next Step",
898
- multiSelect: false,
899
- options: [
900
- { label: "Enter Review", description: "Run post-implementation review (security/quality/architecture)" },
901
- { label: "Complete Session", description: "Archive session and finalize" }
902
- ]
903
- }]
904
- })
905
-
906
- if (nextStep['Next Step'] === 'Enter Review') {
907
- // → Phase 6
908
- } else {
909
- console.log(` Session ${sessionId} ${sessionStatus}.`)
910
- }
911
- }
912
- ```
913
-
914
- ---
915
-
916
- ### Phase 6: Post-Implementation Review (Optional)
917
-
918
- **Objective**: CLI-assisted specialized review of implemented code.
919
-
920
- ```javascript
921
- // Phase 6 entry (from Phase 5 "Enter Review" or direct invocation)
922
- console.log(`\n## Phase 6: Post-Implementation Review\n`)
923
-
924
- const reviewType = AUTO_YES ? 'quality' : (() => {
925
- const answer = AskUserQuestion({
926
- questions: [{
927
- question: "Select review type:",
928
- header: "Review",
929
- multiSelect: false,
930
- options: [
931
- { label: "Quality", description: "Code quality, best practices, maintainability" },
932
- { label: "Security", description: "Security vulnerabilities, OWASP Top 10" },
933
- { label: "Architecture", description: "Architecture decisions, scalability, patterns" },
934
- { label: "Action Items", description: "TODO items, tech debt, follow-ups" }
935
- ]
936
- }]
937
- })
938
- return answer.Review.toLowerCase()
939
- })()
940
-
941
- // Get list of modified files from tasks.csv
942
- const reviewTasks = parseCsv(Read(`${sessionFolder}/tasks.csv`))
943
- const allModifiedFiles = new Set()
944
- reviewTasks.forEach(t => {
945
- (t.files_modified || '').split(';').filter(Boolean).forEach(f => allModifiedFiles.add(f))
946
- })
947
-
948
- const fileList = [...allModifiedFiles].join(', ')
949
-
950
- Bash({
951
- command: `ccw cli -p "PURPOSE: Post-implementation ${reviewType} review of modified files. Identify issues and generate actionable report.
952
- TASK:
953
- Review all modified files for ${reviewType} concerns
954
- Assess overall ${reviewType} posture
955
- Generate prioritized issue list with severity
956
- Provide remediation recommendations
957
- MODE: analysis
958
- CONTEXT: @${[...allModifiedFiles].map(f => f).join(' @')}
959
- EXPECTED: Structured ${reviewType} review report with: summary, issue list (severity, file, line, description, fix), overall score
960
- CONSTRAINTS: Focus on ${reviewType} | Review only modified files: ${fileList}" --tool gemini --mode analysis --rule analysis-review-code-quality`,
961
- run_in_background: true
962
- })
963
- // Wait for CLI → review report
964
-
965
- Write(`${sessionFolder}/REVIEW-${reviewType}.md`, reviewReport)
966
- console.log(` Review complete: ${sessionFolder}/REVIEW-${reviewType}.md`)
967
-
968
- // Post-review options
969
- if (!AUTO_YES) {
970
- const postReview = AskUserQuestion({
971
- questions: [{
972
- question: "Review complete. What's next?",
973
- header: "Post-Review",
974
- multiSelect: false,
975
- options: [
976
- { label: "Another Review", description: "Run a different review type" },
977
- { label: "Complete Session", description: "Archive and finalize" }
978
- ]
979
- }]
980
- })
981
-
982
- if (postReview['Post-Review'] === 'Another Review') {
983
- // Loop back to Phase 6 review type selection
984
- }
985
- }
986
-
987
- console.log(`\nSession ${sessionId} execution complete.`)
988
- ```
989
-
990
- ---
991
-
992
- ## CSV Helpers
993
-
994
- ```javascript
995
- function parseCsv(content) {
996
- const lines = content.trim().split('\n')
997
- if (lines.length < 2) return []
998
- const header = parseCsvLine(lines[0])
999
- return lines.slice(1).map(line => {
1000
- const cells = parseCsvLine(line)
1001
- const obj = {}
1002
- header.forEach((col, i) => { obj[col] = cells[i] || '' })
1003
- return obj
1004
- })
1005
- }
1006
-
1007
- function parseCsvLine(line) {
1008
- const cells = []
1009
- let current = ''
1010
- let inQuotes = false
1011
- for (let i = 0; i < line.length; i++) {
1012
- const ch = line[i]
1013
- if (inQuotes) {
1014
- if (ch === '"' && line[i + 1] === '"') {
1015
- current += '"'
1016
- i++
1017
- } else if (ch === '"') {
1018
- inQuotes = false
1019
- } else {
1020
- current += ch
1021
- }
1022
- } else {
1023
- if (ch === '"') {
1024
- inQuotes = true
1025
- } else if (ch === ',') {
1026
- cells.push(current)
1027
- current = ''
1028
- } else {
1029
- current += ch
1030
- }
1031
- }
1032
- }
1033
- cells.push(current)
1034
- return cells
1035
- }
1036
-
1037
- function updateMasterCsvRow(csvPath, taskId, updates) {
1038
- const content = Read(csvPath)
1039
- const lines = content.split('\n')
1040
- const header = parseCsvLine(lines[0])
1041
-
1042
- for (let i = 1; i < lines.length; i++) {
1043
- const cells = parseCsvLine(lines[i])
1044
- if (cells[0] === taskId) {
1045
- for (const [col, val] of Object.entries(updates)) {
1046
- const colIdx = header.indexOf(col)
1047
- if (colIdx >= 0) {
1048
- cells[colIdx] = String(val).replace(/"/g, '""')
1049
- }
1050
- }
1051
- lines[i] = cells.map(c => `"${c}"`).join(',')
1052
- break
1053
- }
1054
- }
1055
-
1056
- Write(csvPath, lines.join('\n'))
1057
- }
1058
-
1059
- function csvEscape(val) {
1060
- return `"${String(val).replace(/"/g, '""')}"`
1061
- }
1062
- ```
1063
-
1064
- ---
1065
-
1066
- ## Agent Assignment Rules
1067
-
1068
- ```
1069
- meta.agent specifiedUse specified agent file
1070
- meta.agent missing Infer from meta.type:
1071
- feature → code-developer
1072
- test-gencode-developer
1073
- test-fix test-fix-agent
1074
- review universal-executor
1075
- docs doc-generator
1076
- default → code-developer
1077
- ```
1078
-
1079
- ---
1080
-
1081
- ## Error Handling
1082
-
1083
- | Error | Recovery |
1084
- |-------|----------|
1085
- | No active sessions | Guide: run `$workflow-plan "description"` first |
1086
- | Missing planning docs | Guide: run `$workflow-plan --session ID` |
1087
- | Agent failure | Mark failed in tasks.csv, skip dependents, continue wave |
1088
- | Dependency cascade | Skip dependent tasks, report at Phase 5 |
1089
- | CSV parse error | Validate CSV format, report line number |
1090
- | Task JSON corruption | Skip task, mark failed in CSV |
1091
- | Commit failure | Log error, continue execution (non-blocking) |
1092
- | Circular dependency | Warn and assign to last wave |
1093
- | All wave tasks skipped | Log and continue to next wave |
1094
-
1095
- ---
1096
-
1097
- ## Core Rules
1098
-
1099
- 1. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results merge
1100
- 2. **CSV is Execution State**: tasks.csv holds all execution state always re-read before each wave
1101
- 3. **Task JSON is Rich Data**: agents read full task JSON via `task_json_path` CSV is brief
1102
- 4. **Skip on Failure**: If a dependency failed, skip the dependent task (cascade)
1103
- 5. **Cleanup Temp Files**: Remove wave CSVs after results are merged
1104
- 6. **TODO_LIST.md Sync**: Phase 5 reconciles TODO_LIST.md with tasks.csv final state
1105
- 7. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson
1106
- 8. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
1107
- 9. **Planning Artifacts Unchanged**: IMPL-*.json, plan.json, IMPL_PLAN.md formats are preserved
1108
-
1109
- ---
1110
-
1111
- ## Compact Protection
1112
-
1113
- > **COMPACT DIRECTIVE**: If context compression occurs during execution:
1114
- > - Re-read `tasks.csv` for current execution state (source of truth)
1115
- > - Re-read `TODO_LIST.md` for progress display
1116
- > - Re-read `plan.json` for execution hints (concurrency, phases)
1117
- > - Task JSONs are read per-wave via `task_json_path`, compression has minimal impact
1118
- > - Wave loop state (`failedIds`, `skippedIds`, `wave`) can be reconstructed from tasks.csv status column
1
+ ---
2
+ name: workflow-execute
3
+ description: |
4
+ Autonomous workflow execution pipeline with CSV wave engine.
5
+ Session discovery → plan validation → IMPL-*.json → CSV conversion →
6
+ wave execution via spawn_agents_on_csv → results sync.
7
+ Task JSONs remain the rich data source; CSV is brief + execution state.
8
+ argument-hint: "[-y|--yes] [-c|--concurrency N] [--resume-session=ID] [--with-commit]"
9
+ allowed-tools: spawn_agents_on_csv, request_user_input, Read, Write, Edit, Bash, Glob, Grep
10
+ ---
11
+
12
+ ## Auto Mode
13
+
14
+ When `--yes` or `-y`: Auto-select first session, auto-complete session after all tasks, skip all confirmations.
15
+
16
+ # Workflow Execute
17
+
18
+ ## Usage
19
+
20
+ ```bash
21
+ $workflow-execute
22
+ $workflow-execute --yes
23
+ $workflow-execute --resume-session=WFS-auth
24
+ $workflow-execute -y --with-commit
25
+ $workflow-execute -y -c 4 --with-commit
26
+ $workflow-execute -y --with-commit --resume-session=WFS-auth
27
+ ```
28
+
29
+ **Flags**:
30
+ - `-y, --yes`: Skip all confirmations (auto mode)
31
+ - `-c, --concurrency N`: Max concurrent agents per wave (default: 4)
32
+ - `--resume-session=ID`: Resume specific session (skip Phase 1-2)
33
+ - `--with-commit`: Auto-commit after each task completion
34
+
35
+ ---
36
+
37
+ ## Overview
38
+
39
+ Autonomous execution pipeline using `spawn_agents_on_csv` wave engine. Converts planning artifacts (IMPL-*.json + plan.json) into CSV for wave-based parallel execution, with full task JSON available via `task_json_path` column.
40
+
41
+ ```
42
+ ┌──────────────────────────────────────────────────────────────────┐
43
+ │ WORKFLOW EXECUTE PIPELINE │
44
+ ├──────────────────────────────────────────────────────────────────┤
45
+ │ │
46
+ │ Phase 1: Session Discovery │
47
+ │ ├─ Find active sessions │
48
+ │ ├─ Auto-select (1 session) or prompt (multiple) │
49
+ │ └─ Load session metadata │
50
+ │ │
51
+ │ Phase 2: Planning Document Validation │
52
+ │ ├─ Verify IMPL_PLAN.md exists │
53
+ │ ├─ Verify TODO_LIST.md exists │
54
+ │ └─ Verify .task/ contains IMPL-*.json │
55
+ │ │
56
+ │ Phase 3: JSON → CSV Conversion │
57
+ │ ├─ Read all IMPL-*.json + plan.json │
58
+ │ ├─ Skip already-completed tasks (resume support) │
59
+ │ ├─ Compute waves via Kahn's BFS (deps + plan hints) │
60
+ │ ├─ Generate tasks.csv (21 cols) + context.csv │
61
+ │ └─ Initialize discoveries.ndjson │
62
+ │ │
63
+ │ Phase 4: Wave Execute (spawn_agents_on_csv) │
64
+ │ ├─ Per wave: build prev_context → wave-{N}.csv │
65
+ │ ├─ spawn_agents_on_csv with execute instruction │
66
+ │ ├─ Merge results → tasks.csv + task JSON status │
67
+ │ ├─ Auto-commit per task (if --with-commit) │
68
+ │ └─ Cleanup temp wave CSVs │
69
+ │ │
70
+ │ Phase 5: Results Sync │
71
+ │ ├─ Export results.csv │
72
+ │ ├─ Reconcile TODO_LIST.md with tasks.csv status │
73
+ │ └─ User choice: Review | Complete Session │
74
+ │ │
75
+ │ Phase 6: Post-Implementation Review (Optional) │
76
+ │ ├─ Select review type (quality/security/architecture) │
77
+ │ ├─ CLI-assisted analysis │
78
+ │ └─ Generate REVIEW-{type}.md │
79
+ │ │
80
+ │ Resume Mode (--resume-session): │
81
+ │ └─ Skip Phase 1-2 → enter Phase 3 (skip completed tasks) │
82
+ │ │
83
+ └──────────────────────────────────────────────────────────────────┘
84
+ ```
85
+
86
+ ---
87
+
88
+ ## CSV Schemas
89
+
90
+ ### tasks.csv (21 columns)
91
+
92
+ ```csv
93
+ id,title,description,agent,scope,deps,execution_group,context_from,wave,task_json_path,hints,execution_directives,acceptance_criteria,prev_context,status,findings,files_modified,tests_passed,acceptance_met,summary_path,error
94
+ ```
95
+
96
+ | Column | Phase | Source | Description |
97
+ |--------|-------|--------|-------------|
98
+ | `id` | Input | task.id | IMPL-001 etc |
99
+ | `title` | Input | task.title | Short title |
100
+ | `description` | Input | task.description | Full description |
101
+ | `agent` | Input | meta.agent or inferred | @code-developer etc |
102
+ | `scope` | Input | task.scope / focus_paths | File scope glob |
103
+ | `deps` | Input | depends_on.join(';') | Dependency IDs (semicolon-separated) |
104
+ | `execution_group` | Input | meta.execution_group | Parallel group identifier |
105
+ | `context_from` | Computed | deps + completed predecessors | Context source IDs |
106
+ | `wave` | Computed | Kahn's BFS | Wave number (1-based) |
107
+ | `task_json_path` | Input | relative path | `.task/IMPL-001.json` (agent reads full JSON) |
108
+ | `hints` | Input | artifacts + pre_analysis refs | `tips \|\| file1;file2` |
109
+ | `execution_directives` | Input | convergence.verification | Verification commands |
110
+ | `acceptance_criteria` | Input | convergence.criteria.join | Acceptance conditions |
111
+ | `prev_context` | Computed(per-wave) | context_from findings lookup | Predecessor task findings |
112
+ | `status` | Output | agent result | pending→completed/failed/skipped |
113
+ | `findings` | Output | agent result | Key findings (max 500 chars) |
114
+ | `files_modified` | Output | agent result | Modified files (semicolon-separated) |
115
+ | `tests_passed` | Output | agent result | true/false |
116
+ | `acceptance_met` | Output | agent result | Acceptance status |
117
+ | `summary_path` | Output | generated | .summaries/IMPL-X-summary.md |
118
+ | `error` | Output | agent result | Error message |
119
+
120
+ **Key design**: `task_json_path` lets agents read the full task JSON (with pre_analysis, flow_control, convergence etc). CSV is "brief + execution state".
121
+
122
+ ### context.csv (4 columns)
123
+
124
+ ```csv
125
+ key,type,value,source
126
+ "tech_stack","array","TypeScript;React 18;Zustand","plan.json"
127
+ "conventions","array","Use useIntl;Barrel exports","plan.json"
128
+ "context_package_path","path",".process/context-package.json","session"
129
+ "discoveries_path","path","discoveries.ndjson","session"
130
+ ```
131
+
132
+ Injected into instruction template as static context — avoids each agent rediscovering project basics.
133
+
134
+ ---
135
+
136
+ ## Session Structure
137
+
138
+ ```
139
+ .workflow/active/WFS-{session}/
140
+ ├── workflow-session.json # Session state
141
+ ├── plan.json # Structured plan (machine-readable)
142
+ ├── IMPL_PLAN.md # Implementation plan (human-readable)
143
+ ├── TODO_LIST.md # Progress tracking (Phase 5 sync)
144
+ ├── tasks.csv # Phase 3 generated, Phase 4 updated
145
+ ├── context.csv # Phase 3 generated
146
+ ├── results.csv # Phase 5 exported
147
+ ├── discoveries.ndjson # Phase 3 initialized, Phase 4 agents append
148
+ ├── .task/ # Task definitions (unchanged)
149
+ │ ├── IMPL-1.json
150
+ │ └── IMPL-N.json
151
+ ├── .summaries/ # Agent-generated summaries
152
+ │ ├── IMPL-1-summary.md
153
+ │ └── IMPL-N-summary.md
154
+ ├── .process/context-package.json# Unchanged
155
+ └── wave-{N}.csv # Phase 4 temporary (cleaned after each wave)
156
+ ```
157
+
158
+ ---
159
+
160
+ ## Implementation
161
+
162
+ ### Session Initialization
163
+
164
+ ```javascript
165
+ const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
166
+
167
+ // Parse flags
168
+ const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
169
+ const withCommit = $ARGUMENTS.includes('--with-commit')
170
+ const resumeMatch = $ARGUMENTS.match(/--resume-session[=\s]+(\S+)/)
171
+ const resumeSessionId = resumeMatch ? resumeMatch[1] : null
172
+ const isResumeMode = !!resumeSessionId
173
+ const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
174
+ const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 4
175
+ ```
176
+
177
+ ---
178
+
179
+ ### Phase 1: Session Discovery
180
+
181
+ **Applies to**: Normal mode only (skipped if `--resume-session`).
182
+
183
+ ```javascript
184
+ let sessionId, sessionFolder
185
+
186
+ if (isResumeMode) {
187
+ sessionId = resumeSessionId
188
+ sessionFolder = `.workflow/active/${sessionId}`
189
+ // Skip to Phase 3
190
+ } else {
191
+ const sessions = Bash(`ls -d .workflow/active/WFS-* 2>/dev/null`).trim().split('\n').filter(Boolean)
192
+
193
+ if (sessions.length === 0) {
194
+ console.log('ERROR: No active workflow sessions found.')
195
+ console.log('Run $workflow-plan "task description" to create a session.')
196
+ return
197
+ }
198
+
199
+ if (sessions.length === 1) {
200
+ sessionFolder = sessions[0]
201
+ sessionId = sessionFolder.split('/').pop()
202
+ console.log(`Auto-selected session: ${sessionId}`)
203
+ } else {
204
+ if (AUTO_YES) {
205
+ sessionFolder = sessions[0]
206
+ sessionId = sessionFolder.split('/').pop()
207
+ console.log(`[--yes] Auto-selected: ${sessionId}`)
208
+ } else {
209
+ const sessionInfos = sessions.slice(0, 4).map(s => {
210
+ const id = s.split('/').pop()
211
+ const total = parseInt(Bash(`grep -c '^- \\[' "${s}/TODO_LIST.md" 2>/dev/null || echo 0`).trim()) || 0
212
+ const done = parseInt(Bash(`grep -c '^- \\[x\\]' "${s}/TODO_LIST.md" 2>/dev/null || echo 0`).trim()) || 0
213
+ return { id, path: s, progress: `${done}/${total} tasks` }
214
+ })
215
+
216
+ const answer = request_user_input({
217
+ questions: [{
218
+ header: "Session",
219
+ id: "session",
220
+ question: "Select session to execute.",
221
+ options: sessionInfos.map(s => ({
222
+ label: s.id,
223
+ description: s.progress
224
+ }))
225
+ }]
226
+ })
227
+ sessionId = answer.answers.session.answers[0]
228
+ sessionFolder = `.workflow/active/${sessionId}`
229
+ }
230
+ }
231
+ }
232
+ ```
233
+
234
+ ---
235
+
236
+ ### Phase 2: Planning Document Validation
237
+
238
+ **Applies to**: Normal mode only.
239
+
240
+ ```javascript
241
+ if (!isResumeMode) {
242
+ const checks = {
243
+ 'IMPL_PLAN.md': Bash(`test -f "${sessionFolder}/IMPL_PLAN.md" && echo yes`).trim() === 'yes',
244
+ 'TODO_LIST.md': Bash(`test -f "${sessionFolder}/TODO_LIST.md" && echo yes`).trim() === 'yes',
245
+ '.task/ has files': parseInt(Bash(`ls ${sessionFolder}/.task/IMPL-*.json 2>/dev/null | wc -l`).trim()) > 0
246
+ }
247
+
248
+ const missing = Object.entries(checks).filter(([_, ok]) => !ok).map(([name]) => name)
249
+ if (missing.length > 0) {
250
+ console.log(`ERROR: Missing planning documents: ${missing.join(', ')}`)
251
+ console.log(`Run $workflow-plan --session ${sessionId} to generate plan.`)
252
+ return
253
+ }
254
+
255
+ console.log(`Planning documents validated.`)
256
+ }
257
+ ```
258
+
259
+ ---
260
+
261
+ ### Phase 3: JSON → CSV Conversion
262
+
263
+ **Applies to**: Both normal and resume modes (resume entry point).
264
+
265
+ **Objective**: Convert IMPL-*.json + plan.json into tasks.csv + context.csv with computed waves.
266
+
267
+ ```javascript
268
+ console.log(`\n## Phase 3: JSON → CSV Conversion\n`)
269
+
270
+ // Update session status to active
271
+ Bash(`cd "${sessionFolder}" && jq '.status = "active" | .execution_started_at = (.execution_started_at // "'"$(date -Iseconds)"'")' workflow-session.json > tmp.json && mv tmp.json workflow-session.json 2>/dev/null || true`)
272
+ Bash(`mkdir -p "${sessionFolder}/.summaries"`)
273
+
274
+ // 3.1: Read all IMPL-*.json
275
+ const taskFiles = Bash(`ls ${sessionFolder}/.task/IMPL-*.json 2>/dev/null`).trim().split('\n').filter(Boolean)
276
+ if (taskFiles.length === 0) {
277
+ console.log('ERROR: No task JSONs found in .task/')
278
+ return
279
+ }
280
+
281
+ const taskJsons = taskFiles.map(f => {
282
+ const content = Read(f)
283
+ const json = JSON.parse(content)
284
+ json._filePath = f
285
+ // Fallback: derive id from filename if missing
286
+ if (!json.id) {
287
+ json.id = f.split('/').pop().replace('.json', '')
288
+ }
289
+ return json
290
+ })
291
+
292
+ // 3.2: Skip completed tasks (resume support)
293
+ const todoContent = Read(`${sessionFolder}/TODO_LIST.md`)
294
+ const completedIds = new Set()
295
+ const todoLines = todoContent.match(/^- \[x\] (IMPL-\d+(?:\.\d+)?)/gm) || []
296
+ todoLines.forEach(line => {
297
+ const match = line.match(/IMPL-\d+(?:\.\d+)?/)
298
+ if (match) completedIds.add(match[0])
299
+ })
300
+
301
+ // Also check task JSON status field
302
+ taskJsons.forEach(tj => {
303
+ if (tj.status === 'completed') completedIds.add(tj.id)
304
+ })
305
+
306
+ const pendingJsons = taskJsons.filter(tj => !completedIds.has(tj.id))
307
+
308
+ console.log(` Total tasks: ${taskJsons.length}`)
309
+ console.log(` Already completed: ${completedIds.size}`)
310
+ console.log(` Pending: ${pendingJsons.length}`)
311
+
312
+ if (pendingJsons.length === 0) {
313
+ console.log(`\nAll tasks already completed. Proceeding to Phase 5.`)
314
+ // → Jump to Phase 5
315
+ }
316
+
317
+ // 3.3: Read plan.json for execution hints
318
+ const planJsonPath = `${sessionFolder}/plan.json`
319
+ const planJsonExists = Bash(`test -f "${planJsonPath}" && echo yes`).trim() === 'yes'
320
+ const planJson = planJsonExists ? JSON.parse(Read(planJsonPath) || '{}') : {}
321
+
322
+ // 3.4: Extract fields from task JSONs (handles two schema variants)
323
+ function resolveAgent(tj) {
324
+ if (tj.meta?.agent) return tj.meta.agent
325
+ const typeMap = {
326
+ 'feature': 'code-developer',
327
+ 'test-gen': 'code-developer',
328
+ 'test-fix': 'test-fix-agent',
329
+ 'review': 'universal-executor',
330
+ 'docs': 'doc-generator'
331
+ }
332
+ return typeMap[tj.meta?.type] || 'code-developer'
333
+ }
334
+
335
+ function extractDeps(tj) {
336
+ return tj.depends_on || tj.context?.depends_on || []
337
+ }
338
+
339
+ function buildHints(tj) {
340
+ const tips = []
341
+ const files = []
342
+ // Gather artifact references
343
+ if (tj.artifacts) {
344
+ tj.artifacts.forEach(a => { if (a.path) files.push(a.path) })
345
+ }
346
+ // Gather pre_analysis Read references
347
+ if (tj.pre_analysis) {
348
+ tj.pre_analysis.forEach(step => {
349
+ if (step.tool === 'Read' && step.path) files.push(step.path)
350
+ })
351
+ }
352
+ // Gather tips from meta or context
353
+ if (tj.meta?.hints) tips.push(tj.meta.hints)
354
+ if (tj.context?.tips) tips.push(tj.context.tips)
355
+
356
+ const tipsStr = tips.join('; ')
357
+ const filesStr = files.join(';')
358
+ if (tipsStr && filesStr) return `${tipsStr} || ${filesStr}`
359
+ if (tipsStr) return tipsStr
360
+ if (filesStr) return `|| ${filesStr}`
361
+ return ''
362
+ }
363
+
364
+ function extractDirectives(tj) {
365
+ if (tj.convergence?.verification) {
366
+ return Array.isArray(tj.convergence.verification)
367
+ ? tj.convergence.verification.join('; ')
368
+ : tj.convergence.verification
369
+ }
370
+ if (tj.execution_config?.verification_command) return tj.execution_config.verification_command
371
+ return ''
372
+ }
373
+
374
+ function extractAcceptance(tj) {
375
+ if (tj.convergence?.criteria) {
376
+ return Array.isArray(tj.convergence.criteria)
377
+ ? tj.convergence.criteria.join('; ')
378
+ : tj.convergence.criteria
379
+ }
380
+ if (tj.context?.acceptance) {
381
+ return Array.isArray(tj.context.acceptance)
382
+ ? tj.context.acceptance.join('; ')
383
+ : tj.context.acceptance
384
+ }
385
+ return ''
386
+ }
387
+
388
+ function extractScope(tj) {
389
+ if (tj.scope) return tj.scope
390
+ if (tj.focus_paths) {
391
+ return Array.isArray(tj.focus_paths) ? tj.focus_paths.join(';') : tj.focus_paths
392
+ }
393
+ return ''
394
+ }
395
+
396
+ // Build task rows (all tasks — completed ones carry status forward)
397
+ const taskRows = taskJsons.map(tj => ({
398
+ id: tj.id,
399
+ title: tj.title || '',
400
+ description: tj.description || '',
401
+ agent: resolveAgent(tj),
402
+ scope: extractScope(tj),
403
+ deps: extractDeps(tj).join(';'),
404
+ execution_group: tj.meta?.execution_group || '',
405
+ context_from: '', // computed after wave assignment
406
+ task_json_path: `.task/${tj.id}.json`,
407
+ hints: buildHints(tj),
408
+ execution_directives: extractDirectives(tj),
409
+ acceptance_criteria: extractAcceptance(tj),
410
+ prev_context: '', // computed per-wave in Phase 4
411
+ status: completedIds.has(tj.id) ? 'completed' : 'pending',
412
+ findings: '',
413
+ files_modified: '',
414
+ tests_passed: '',
415
+ acceptance_met: '',
416
+ summary_path: `.summaries/${tj.id}-summary.md`,
417
+ error: ''
418
+ }))
419
+
420
+ // 3.5: Compute waves via Kahn's BFS with plan.json hints
421
+ function computeWaves(rows, planJson) {
422
+ const taskMap = new Map(rows.map(r => [r.id, r]))
423
+ const inDegree = new Map(rows.map(r => [r.id, 0]))
424
+ const adjList = new Map(rows.map(r => [r.id, []]))
425
+
426
+ for (const row of rows) {
427
+ const deps = row.deps.split(';').filter(Boolean)
428
+ for (const dep of deps) {
429
+ if (taskMap.has(dep)) {
430
+ adjList.get(dep).push(row.id)
431
+ inDegree.set(row.id, inDegree.get(row.id) + 1)
432
+ }
433
+ }
434
+ }
435
+
436
+ // BFS
437
+ const queue = []
438
+ const waveMap = new Map()
439
+
440
+ for (const [id, deg] of inDegree) {
441
+ if (deg === 0) {
442
+ queue.push([id, 1])
443
+ waveMap.set(id, 1)
444
+ }
445
+ }
446
+
447
+ let maxWave = 1
448
+ let idx = 0
449
+ while (idx < queue.length) {
450
+ const [current, depth] = queue[idx++]
451
+ for (const next of adjList.get(current)) {
452
+ const newDeg = inDegree.get(next) - 1
453
+ inDegree.set(next, newDeg)
454
+ const nextDepth = Math.max(waveMap.get(next) || 0, depth + 1)
455
+ waveMap.set(next, nextDepth)
456
+ if (newDeg === 0) {
457
+ queue.push([next, nextDepth])
458
+ maxWave = Math.max(maxWave, nextDepth)
459
+ }
460
+ }
461
+ }
462
+
463
+ // Check for unassigned (circular deps)
464
+ for (const row of rows) {
465
+ if (!waveMap.has(row.id)) {
466
+ console.log(`WARNING: Circular dependency involving ${row.id}, assigning to wave ${maxWave + 1}`)
467
+ waveMap.set(row.id, maxWave + 1)
468
+ maxWave = maxWave + 1
469
+ }
470
+ }
471
+
472
+ // Apply plan.json execution_graph hints if available
473
+ if (planJson.execution_graph?.phases) {
474
+ planJson.execution_graph.phases.forEach((phase, idx) => {
475
+ const phaseWave = idx + 1
476
+ const taskIds = phase.tasks || phase.task_ids || []
477
+ taskIds.forEach(id => {
478
+ if (waveMap.has(id)) {
479
+ // Only shift to later wave (never earlier — respect deps)
480
+ if (phaseWave > waveMap.get(id)) {
481
+ waveMap.set(id, phaseWave)
482
+ }
483
+ }
484
+ })
485
+ })
486
+ maxWave = Math.max(maxWave, ...waveMap.values())
487
+ }
488
+
489
+ return { waveMap, maxWave }
490
+ }
491
+
492
+ const { waveMap, maxWave } = computeWaves(taskRows, planJson)
493
+
494
+ // Assign wave + context_from
495
+ taskRows.forEach(row => {
496
+ row.wave = waveMap.get(row.id) || 1
497
+ // context_from = deps + already-completed IDs for resume context
498
+ const depIds = row.deps.split(';').filter(Boolean)
499
+ const contextIds = [...new Set([...depIds, ...[...completedIds].filter(id => id !== row.id)])]
500
+ row.context_from = contextIds.join(';')
501
+ })
502
+
503
+ // 3.6: Write tasks.csv
504
+ function csvEscape(val) {
505
+ return `"${String(val).replace(/"/g, '""')}"`
506
+ }
507
+
508
+ const tasksCsvHeader = 'id,title,description,agent,scope,deps,execution_group,context_from,wave,task_json_path,hints,execution_directives,acceptance_criteria,prev_context,status,findings,files_modified,tests_passed,acceptance_met,summary_path,error'
509
+ const tasksCsvRows = taskRows.map(r =>
510
+ [r.id, r.title, r.description, r.agent, r.scope, r.deps, r.execution_group,
511
+ r.context_from, r.wave, r.task_json_path, r.hints, r.execution_directives,
512
+ r.acceptance_criteria, r.prev_context, r.status, r.findings, r.files_modified,
513
+ r.tests_passed, r.acceptance_met, r.summary_path, r.error]
514
+ .map(csvEscape).join(',')
515
+ )
516
+ Write(`${sessionFolder}/tasks.csv`, [tasksCsvHeader, ...tasksCsvRows].join('\n'))
517
+
518
+ // 3.7: Write context.csv
519
+ const contextRows = ['key,type,value,source']
520
+ if (planJson.tech_stack) {
521
+ const stack = Array.isArray(planJson.tech_stack) ? planJson.tech_stack.join(';') : planJson.tech_stack
522
+ contextRows.push(`"tech_stack","array","${stack}","plan.json"`)
523
+ }
524
+ if (planJson.conventions) {
525
+ const conv = Array.isArray(planJson.conventions) ? planJson.conventions.join(';') : planJson.conventions
526
+ contextRows.push(`"conventions","array","${conv}","plan.json"`)
527
+ }
528
+ const ctxPkgExists = Bash(`test -f "${sessionFolder}/.process/context-package.json" && echo yes`).trim() === 'yes'
529
+ if (ctxPkgExists) {
530
+ contextRows.push(`"context_package_path","path",".process/context-package.json","session"`)
531
+ }
532
+ contextRows.push(`"discoveries_path","path","discoveries.ndjson","session"`)
533
+ Write(`${sessionFolder}/context.csv`, contextRows.join('\n'))
534
+
535
+ // 3.8: Initialize discoveries.ndjson
536
+ Bash(`touch "${sessionFolder}/discoveries.ndjson"`)
537
+
538
+ // 3.9: User validation (skip if AUTO_YES)
539
+ if (!AUTO_YES) {
540
+ const pendingRows = taskRows.filter(r => r.status === 'pending')
541
+ console.log(`\n## Wave Execution Plan\n`)
542
+ console.log(` Tasks: ${pendingRows.length} pending across ${maxWave} waves\n`)
543
+ for (let w = 1; w <= maxWave; w++) {
544
+ const waveTasks = pendingRows.filter(r => r.wave === w)
545
+ if (waveTasks.length === 0) continue
546
+ console.log(` Wave ${w}: ${waveTasks.map(t => `${t.id}(${t.agent})`).join(', ')}`)
547
+ }
548
+
549
+ const answer = request_user_input({
550
+ questions: [{
551
+ header: "Confirm",
552
+ id: "confirm_execute",
553
+ question: `Proceed with ${pendingRows.length} tasks across ${maxWave} waves?`,
554
+ options: [
555
+ { label: "Execute (Recommended)", description: "Proceed with wave execution" },
556
+ { label: "Modify", description: `Edit ${sessionFolder}/tasks.csv then --resume-session` },
557
+ { label: "Cancel", description: "Abort" }
558
+ ]
559
+ }]
560
+ })
561
+
562
+ if (answer.answers.confirm_execute.answers[0] === "Modify") {
563
+ console.log(`Edit: ${sessionFolder}/tasks.csv\nResume: $workflow-execute --resume-session=${sessionId}`)
564
+ return
565
+ } else if (answer.answers.confirm_execute.answers[0] === "Cancel") {
566
+ return
567
+ }
568
+ }
569
+
570
+ console.log(`\n tasks.csv: ${taskRows.length} rows (${pendingJsons.length} pending)`)
571
+ console.log(` context.csv: ${contextRows.length - 1} entries`)
572
+ console.log(` Wave plan: ${maxWave} waves`)
573
+ ```
574
+
575
+ ---
576
+
577
+ ### Phase 4: Wave Execute (spawn_agents_on_csv)
578
+
579
+ **Objective**: Execute tasks wave-by-wave via `spawn_agents_on_csv`. Each wave builds `prev_context` from completed predecessors.
580
+
581
+ ```javascript
582
+ console.log(`\n## Phase 4: Wave Execute\n`)
583
+
584
+ // Determine concurrency from plan.json or flag
585
+ let effectiveConcurrency = maxConcurrency
586
+ if (planJson.recommended_execution === 'Sequential') {
587
+ effectiveConcurrency = 1
588
+ console.log(` Sequential mode (from plan.json), concurrency: 1`)
589
+ } else {
590
+ console.log(` Parallel mode, concurrency: ${effectiveConcurrency}`)
591
+ }
592
+
593
+ // Read context.csv for instruction injection
594
+ const contextCsvContent = Read(`${sessionFolder}/context.csv`)
595
+ const contextEntries = parseCsv(contextCsvContent)
596
+ const contextBlock = contextEntries.map(e => `- **${e.key}** (${e.type}): ${e.value}`).join('\n')
597
+
598
+ const failedIds = new Set()
599
+ const skippedIds = new Set()
600
+
601
+ for (let wave = 1; wave <= maxWave; wave++) {
602
+ console.log(`\n### Wave ${wave}/${maxWave}\n`)
603
+
604
+ // Re-read master CSV for current state
605
+ const masterCsv = parseCsv(Read(`${sessionFolder}/tasks.csv`))
606
+ const waveTasks = masterCsv.filter(row =>
607
+ parseInt(row.wave) === wave && row.status === 'pending'
608
+ )
609
+
610
+ if (waveTasks.length === 0) {
611
+ console.log(` No pending tasks in wave ${wave}`)
612
+ continue
613
+ }
614
+
615
+ // Skip tasks whose deps failed/skipped
616
+ const executableTasks = []
617
+ for (const task of waveTasks) {
618
+ const deps = (task.deps || '').split(';').filter(Boolean)
619
+ if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
620
+ skippedIds.add(task.id)
621
+ updateMasterCsvRow(`${sessionFolder}/tasks.csv`, task.id, {
622
+ status: 'skipped',
623
+ error: 'Dependency failed or skipped'
624
+ })
625
+ console.log(` [${task.id}] ${task.title} → SKIPPED (dependency failed)`)
626
+ continue
627
+ }
628
+ executableTasks.push(task)
629
+ }
630
+
631
+ if (executableTasks.length === 0) {
632
+ console.log(` No executable tasks in wave ${wave}`)
633
+ continue
634
+ }
635
+
636
+ // Build prev_context for each task
637
+ for (const task of executableTasks) {
638
+ task.prev_context = buildPrevContext(task.context_from, masterCsv)
639
+ }
640
+
641
+ // Write wave CSV (input columns + prev_context)
642
+ const waveHeader = 'id,title,description,agent,scope,deps,execution_group,context_from,wave,task_json_path,hints,execution_directives,acceptance_criteria,prev_context'
643
+ const waveRows = executableTasks.map(t =>
644
+ [t.id, t.title, t.description, t.agent, t.scope, t.deps, t.execution_group,
645
+ t.context_from, t.wave, t.task_json_path, t.hints, t.execution_directives,
646
+ t.acceptance_criteria, t.prev_context]
647
+ .map(cell => `"${String(cell).replace(/"/g, '""')}"`)
648
+ .join(',')
649
+ )
650
+ Write(`${sessionFolder}/wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
651
+
652
+ // Execute wave
653
+ console.log(` Executing ${executableTasks.length} tasks (concurrency: ${effectiveConcurrency})...`)
654
+
655
+ spawn_agents_on_csv({
656
+ csv_path: `${sessionFolder}/wave-${wave}.csv`,
657
+ id_column: "id",
658
+ instruction: buildExecuteInstruction(sessionFolder, contextBlock),
659
+ max_concurrency: effectiveConcurrency,
660
+ max_runtime_seconds: 600,
661
+ output_csv_path: `${sessionFolder}/wave-${wave}-results.csv`,
662
+ output_schema: {
663
+ type: "object",
664
+ properties: {
665
+ id: { type: "string" },
666
+ status: { type: "string", enum: ["completed", "failed"] },
667
+ findings: { type: "string" },
668
+ files_modified: { type: "array", items: { type: "string" } },
669
+ tests_passed: { type: "boolean" },
670
+ acceptance_met: { type: "string" },
671
+ error: { type: "string" }
672
+ },
673
+ required: ["id", "status", "findings", "tests_passed"]
674
+ }
675
+ })
676
+
677
+ // Merge results into master CSV + update task JSONs
678
+ const waveResults = parseCsv(Read(`${sessionFolder}/wave-${wave}-results.csv`))
679
+ for (const result of waveResults) {
680
+ const filesModified = Array.isArray(result.files_modified)
681
+ ? result.files_modified.join(';')
682
+ : (result.files_modified || '')
683
+
684
+ updateMasterCsvRow(`${sessionFolder}/tasks.csv`, result.id, {
685
+ status: result.status,
686
+ findings: result.findings || '',
687
+ files_modified: filesModified,
688
+ tests_passed: String(result.tests_passed ?? ''),
689
+ acceptance_met: result.acceptance_met || '',
690
+ error: result.error || ''
691
+ })
692
+
693
+ // Update task JSON status
694
+ if (result.status === 'completed' || result.status === 'failed') {
695
+ Bash(`cd "${sessionFolder}/.task" && jq '.status="${result.status}" | .status_history=(.status_history // [])+[{"from":"in_progress","to":"${result.status}","changed_at":"'"$(date -Iseconds)"'"}]' "${result.id}.json" > tmp.json && mv tmp.json "${result.id}.json" 2>/dev/null || true`)
696
+ }
697
+
698
+ if (result.status === 'failed') {
699
+ failedIds.add(result.id)
700
+ console.log(` [${result.id}] → FAILED: ${result.error}`)
701
+ } else {
702
+ console.log(` [${result.id}] → COMPLETED${result.tests_passed ? ' (tests passed)' : ''}`)
703
+ }
704
+
705
+ // Auto-commit per completed task
706
+ if (withCommit && result.status === 'completed' && filesModified) {
707
+ const files = filesModified.split(';').filter(Boolean)
708
+ if (files.length > 0) {
709
+ const taskJson = JSON.parse(Read(`${sessionFolder}/.task/${result.id}.json`) || '{}')
710
+ const typeMap = { feature: 'feat', bugfix: 'fix', refactor: 'refactor', 'test-gen': 'test', docs: 'docs' }
711
+ const type = typeMap[taskJson.meta?.type] || 'chore'
712
+ const title = taskJson.title || result.id
713
+ const msg = `${type}: ${title}`
714
+ Bash(`git add ${files.map(f => '"' + f + '"').join(' ')} && git commit -m "${msg}" 2>/dev/null || true`)
715
+ console.log(` Committed: ${msg}`)
716
+ }
717
+ }
718
+ }
719
+
720
+ // Cleanup temp wave CSVs
721
+ Bash(`rm -f "${sessionFolder}/wave-${wave}.csv" "${sessionFolder}/wave-${wave}-results.csv"`)
722
+
723
+ const completedCount = waveResults.filter(r => r.status === 'completed').length
724
+ const failedCount = waveResults.filter(r => r.status === 'failed').length
725
+ console.log(` Wave ${wave} done: ${completedCount} completed, ${failedCount} failed`)
726
+ }
727
+ ```
728
+
729
+ **prev_context Builder**
730
+
731
+ ```javascript
732
+ function buildPrevContext(contextFrom, masterCsv) {
733
+ if (!contextFrom) return 'No previous context available'
734
+
735
+ const ids = contextFrom.split(';').filter(Boolean)
736
+ const entries = []
737
+
738
+ ids.forEach(id => {
739
+ const row = masterCsv.find(r => r.id === id)
740
+ if (row && row.status === 'completed' && row.findings) {
741
+ entries.push(`[${row.id}: ${row.title}] ${row.findings}`)
742
+ if (row.files_modified) entries.push(` Modified: ${row.files_modified}`)
743
+ }
744
+ })
745
+
746
+ return entries.length > 0 ? entries.join('\n') : 'No previous context available'
747
+ }
748
+ ```
749
+
750
+ **Execute Instruction Template**
751
+
752
+ ```javascript
753
+ function buildExecuteInstruction(sessionFolder, contextBlock) {
754
+ return `
755
+ ## TASK ASSIGNMENT
756
+
757
+ ### MANDATORY FIRST STEPS
758
+ 1. Read your FULL task JSON: ${sessionFolder}/{task_json_path}
759
+ - CSV row is a brief — task JSON has pre_analysis, flow_control, convergence, and full context
760
+ 2. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists)
761
+ 3. Read project context: .workflow/project-tech.json (if exists)
762
+
763
+ ---
764
+
765
+ ## Your Task
766
+
767
+ **Task ID**: {id}
768
+ **Title**: {title}
769
+ **Description**: {description}
770
+ **Agent Type**: {agent}
771
+ **Scope**: {scope}
772
+
773
+ ### Task JSON (full details)
774
+ Read: ${sessionFolder}/{task_json_path}
775
+
776
+ ### Implementation Hints & Reference Files
777
+ {hints}
778
+
779
+ > Format: \`tips text || file1;file2\`. Read ALL reference files (after ||) before starting. Apply tips (before ||) as guidance.
780
+
781
+ ### Execution Directives
782
+ {execution_directives}
783
+
784
+ > Commands to run for verification, tool restrictions, or environment requirements.
785
+
786
+ ### Acceptance Criteria
787
+ {acceptance_criteria}
788
+
789
+ ### Previous Context (from predecessor tasks)
790
+ {prev_context}
791
+
792
+ ### Project Context
793
+ ${contextBlock}
794
+
795
+ ---
796
+
797
+ ## Execution Protocol
798
+
799
+ 1. **Read task JSON**: Load ${sessionFolder}/{task_json_path} for full task details including pre_analysis steps and flow_control
800
+ 2. **Check execution method**: If task JSON has \`execution_config.method\`, follow it (agent vs cli mode)
801
+ 3. **Execute pre_analysis**: If task JSON has \`pre_analysis\` steps, run them first to gather context
802
+ 4. **Read references**: Parse {hints} — read all files listed after \`||\` to understand existing patterns
803
+ 5. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared findings
804
+ 6. **Use context**: Apply predecessor tasks' findings from prev_context above
805
+ 7. **Stay in scope**: ONLY create/modify files within {scope} — do NOT touch files outside this boundary
806
+ 8. **Apply hints**: Follow implementation tips from {hints} (before \`||\`)
807
+ 9. **Execute**: Implement the task as described in the task JSON
808
+ 10. **Generate summary**: Write execution summary to ${sessionFolder}/.summaries/{id}-summary.md with sections:
809
+ ## Summary, ## Files Modified (as \`- \\\`path\\\`\` list), ## Key Decisions, ## Tests
810
+ 11. **Run directives**: Execute commands from {execution_directives} to verify your work
811
+ 12. **Update TODO**: In ${sessionFolder}/TODO_LIST.md, change \`- [ ] {id}\` to \`- [x] {id}\`
812
+ 13. **Share discoveries**: Append findings to shared board:
813
+ \`\`\`bash
814
+ echo '{"ts":"<ISO8601>","worker":"{id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
815
+ \`\`\`
816
+ 14. **Report result**: Return JSON via report_agent_job_result
817
+
818
+ ### Discovery Types to Share
819
+ - \`code_pattern\`: {name, file, description} — reusable patterns found
820
+ - \`integration_point\`: {file, description, exports[]} — module connection points
821
+ - \`convention\`: {naming, imports, formatting} — code style conventions
822
+ - \`blocker\`: {issue, severity, impact} — blocking issues encountered
823
+
824
+ ---
825
+
826
+ ## Output (report_agent_job_result)
827
+
828
+ Return JSON:
829
+ {
830
+ "id": "{id}",
831
+ "status": "completed" | "failed",
832
+ "findings": "Key discoveries and implementation notes (max 500 chars)",
833
+ "files_modified": ["path1", "path2"],
834
+ "tests_passed": true | false,
835
+ "acceptance_met": "Summary of which acceptance criteria were met/unmet",
836
+ "error": ""
837
+ }
838
+
839
+ **IMPORTANT**: Set status to "completed" ONLY if:
840
+ - All acceptance criteria are met
841
+ - Verification directives pass (if any)
842
+ Otherwise set status to "failed" with details in error field.
843
+ `
844
+ }
845
+ ```
846
+
847
+ ---
848
+
849
+ ### Phase 5: Results Sync
850
+
851
+ **Objective**: Export results, reconcile TODO_LIST.md, update session status.
852
+
853
+ ```javascript
854
+ console.log(`\n## Phase 5: Results Sync\n`)
855
+
856
+ // 5.1: Export results.csv (final copy of tasks.csv)
857
+ const finalCsvContent = Read(`${sessionFolder}/tasks.csv`)
858
+ Write(`${sessionFolder}/results.csv`, finalCsvContent)
859
+
860
+ // 5.2: Reconcile TODO_LIST.md with tasks.csv status
861
+ const finalTasks = parseCsv(finalCsvContent)
862
+ let todoMd = Read(`${sessionFolder}/TODO_LIST.md`)
863
+
864
+ for (const task of finalTasks) {
865
+ if (task.status === 'completed') {
866
+ // Ensure marked as [x] in TODO_LIST.md
867
+ const uncheckedPattern = new RegExp(`^(- \\[ \\] ${task.id.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}(:.*)?)$`, 'm')
868
+ todoMd = todoMd.replace(uncheckedPattern, (match, line) => line.replace('- [ ]', '- [x]'))
869
+ }
870
+ }
871
+ Write(`${sessionFolder}/TODO_LIST.md`, todoMd)
872
+
873
+ // 5.3: Summary
874
+ const completed = finalTasks.filter(t => t.status === 'completed')
875
+ const failed = finalTasks.filter(t => t.status === 'failed')
876
+ const skipped = finalTasks.filter(t => t.status === 'skipped')
877
+ const pending = finalTasks.filter(t => t.status === 'pending')
878
+
879
+ console.log(` Results:`)
880
+ console.log(` Completed: ${completed.length}`)
881
+ console.log(` Failed: ${failed.length}`)
882
+ console.log(` Skipped: ${skipped.length}`)
883
+ console.log(` Pending: ${pending.length}`)
884
+
885
+ // 5.4: Update session status
886
+ const allDone = failed.length === 0 && skipped.length === 0 && pending.length === 0
887
+ const sessionStatus = allDone ? 'completed' : 'partial'
888
+ Bash(`cd "${sessionFolder}" && jq '.status = "${sessionStatus}" | .completed_at = "'"$(date -Iseconds)"'"' workflow-session.json > tmp.json && mv tmp.json workflow-session.json 2>/dev/null || true`)
889
+
890
+ // 5.5: User next step
891
+ if (AUTO_YES) {
892
+ console.log(` [--yes] Session ${sessionId} ${sessionStatus}.`)
893
+ } else {
894
+ const nextStep = request_user_input({
895
+ questions: [{
896
+ header: "Next Step",
897
+ id: "next_step",
898
+ question: "Execution complete. What is next?",
899
+ options: [
900
+ { label: "Enter Review (Recommended)", description: "Run post-implementation review (security/quality/architecture)" },
901
+ { label: "Complete Session", description: "Archive session and finalize" }
902
+ ]
903
+ }]
904
+ })
905
+
906
+ if (nextStep.answers.next_step.answers[0] === 'Enter Review (Recommended)') {
907
+ // → Phase 6
908
+ } else {
909
+ console.log(` Session ${sessionId} ${sessionStatus}.`)
910
+ }
911
+ }
912
+ ```
913
+
914
+ ---
915
+
916
+ ### Phase 6: Post-Implementation Review (Optional)
917
+
918
+ **Objective**: CLI-assisted specialized review of implemented code.
919
+
920
+ ```javascript
921
+ // Phase 6 entry (from Phase 5 "Enter Review" or direct invocation)
922
+ console.log(`\n## Phase 6: Post-Implementation Review\n`)
923
+
924
+ const reviewType = AUTO_YES ? 'quality' : (() => {
925
+ const answer = request_user_input({
926
+ questions: [{
927
+ header: "Review Type",
928
+ id: "review_type",
929
+ question: "Select review type.",
930
+ options: [
931
+ { label: "Quality (Recommended)", description: "Code quality, best practices, maintainability" },
932
+ { label: "Security", description: "Security vulnerabilities, OWASP Top 10" },
933
+ { label: "Architecture", description: "Architecture decisions, scalability, patterns" }
934
+ ]
935
+ }]
936
+ })
937
+ return answer.answers.review_type.answers[0].toLowerCase()
938
+ })()
939
+
940
+ // Get list of modified files from tasks.csv
941
+ const reviewTasks = parseCsv(Read(`${sessionFolder}/tasks.csv`))
942
+ const allModifiedFiles = new Set()
943
+ reviewTasks.forEach(t => {
944
+ (t.files_modified || '').split(';').filter(Boolean).forEach(f => allModifiedFiles.add(f))
945
+ })
946
+
947
+ const fileList = [...allModifiedFiles].join(', ')
948
+
949
+ Bash({
950
+ command: `ccw cli -p "PURPOSE: Post-implementation ${reviewType} review of modified files. Identify issues and generate actionable report.
951
+ TASK:
952
+ • Review all modified files for ${reviewType} concerns
953
+ Assess overall ${reviewType} posture
954
+ Generate prioritized issue list with severity
955
+ Provide remediation recommendations
956
+ MODE: analysis
957
+ CONTEXT: @${[...allModifiedFiles].map(f => f).join(' @')}
958
+ EXPECTED: Structured ${reviewType} review report with: summary, issue list (severity, file, line, description, fix), overall score
959
+ CONSTRAINTS: Focus on ${reviewType} | Review only modified files: ${fileList}" --tool gemini --mode analysis --rule analysis-review-code-quality`,
960
+ run_in_background: true
961
+ })
962
+ // Wait for CLI → review report
963
+
964
+ Write(`${sessionFolder}/REVIEW-${reviewType}.md`, reviewReport)
965
+ console.log(` Review complete: ${sessionFolder}/REVIEW-${reviewType}.md`)
966
+
967
+ // Post-review options
968
+ if (!AUTO_YES) {
969
+ const postReview = request_user_input({
970
+ questions: [{
971
+ header: "Post Review",
972
+ id: "post_review",
973
+ question: "Review complete. What is next?",
974
+ options: [
975
+ { label: "Complete Session (Recommended)", description: "Archive and finalize" },
976
+ { label: "Another Review", description: "Run a different review type" }
977
+ ]
978
+ }]
979
+ })
980
+
981
+ if (postReview.answers.post_review.answers[0] === 'Another Review') {
982
+ // Loop back to Phase 6 review type selection
983
+ }
984
+ }
985
+
986
+ console.log(`\nSession ${sessionId} execution complete.`)
987
+ ```
988
+
989
+ ---
990
+
991
+ ## CSV Helpers
992
+
993
+ ```javascript
994
+ function parseCsv(content) {
995
+ const lines = content.trim().split('\n')
996
+ if (lines.length < 2) return []
997
+ const header = parseCsvLine(lines[0])
998
+ return lines.slice(1).map(line => {
999
+ const cells = parseCsvLine(line)
1000
+ const obj = {}
1001
+ header.forEach((col, i) => { obj[col] = cells[i] || '' })
1002
+ return obj
1003
+ })
1004
+ }
1005
+
1006
+ function parseCsvLine(line) {
1007
+ const cells = []
1008
+ let current = ''
1009
+ let inQuotes = false
1010
+ for (let i = 0; i < line.length; i++) {
1011
+ const ch = line[i]
1012
+ if (inQuotes) {
1013
+ if (ch === '"' && line[i + 1] === '"') {
1014
+ current += '"'
1015
+ i++
1016
+ } else if (ch === '"') {
1017
+ inQuotes = false
1018
+ } else {
1019
+ current += ch
1020
+ }
1021
+ } else {
1022
+ if (ch === '"') {
1023
+ inQuotes = true
1024
+ } else if (ch === ',') {
1025
+ cells.push(current)
1026
+ current = ''
1027
+ } else {
1028
+ current += ch
1029
+ }
1030
+ }
1031
+ }
1032
+ cells.push(current)
1033
+ return cells
1034
+ }
1035
+
1036
+ function updateMasterCsvRow(csvPath, taskId, updates) {
1037
+ const content = Read(csvPath)
1038
+ const lines = content.split('\n')
1039
+ const header = parseCsvLine(lines[0])
1040
+
1041
+ for (let i = 1; i < lines.length; i++) {
1042
+ const cells = parseCsvLine(lines[i])
1043
+ if (cells[0] === taskId) {
1044
+ for (const [col, val] of Object.entries(updates)) {
1045
+ const colIdx = header.indexOf(col)
1046
+ if (colIdx >= 0) {
1047
+ cells[colIdx] = String(val).replace(/"/g, '""')
1048
+ }
1049
+ }
1050
+ lines[i] = cells.map(c => `"${c}"`).join(',')
1051
+ break
1052
+ }
1053
+ }
1054
+
1055
+ Write(csvPath, lines.join('\n'))
1056
+ }
1057
+
1058
+ function csvEscape(val) {
1059
+ return `"${String(val).replace(/"/g, '""')}"`
1060
+ }
1061
+ ```
1062
+
1063
+ ---
1064
+
1065
+ ## Agent Assignment Rules
1066
+
1067
+ ```
1068
+ meta.agent specified → Use specified agent file
1069
+ meta.agent missingInfer from meta.type:
1070
+ feature code-developer
1071
+ test-gen → code-developer
1072
+ test-fixtest-fix-agent
1073
+ review universal-executor
1074
+ docs doc-generator
1075
+ default code-developer
1076
+ ```
1077
+
1078
+ ---
1079
+
1080
+ ## Error Handling
1081
+
1082
+ | Error | Recovery |
1083
+ |-------|----------|
1084
+ | No active sessions | Guide: run `$workflow-plan "description"` first |
1085
+ | Missing planning docs | Guide: run `$workflow-plan --session ID` |
1086
+ | Agent failure | Mark failed in tasks.csv, skip dependents, continue wave |
1087
+ | Dependency cascade | Skip dependent tasks, report at Phase 5 |
1088
+ | CSV parse error | Validate CSV format, report line number |
1089
+ | Task JSON corruption | Skip task, mark failed in CSV |
1090
+ | Commit failure | Log error, continue execution (non-blocking) |
1091
+ | Circular dependency | Warn and assign to last wave |
1092
+ | All wave tasks skipped | Log and continue to next wave |
1093
+
1094
+ ---
1095
+
1096
+ ## Core Rules
1097
+
1098
+ 1. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results merge
1099
+ 2. **CSV is Execution State**: tasks.csv holds all execution state always re-read before each wave
1100
+ 3. **Task JSON is Rich Data**: agents read full task JSON via `task_json_path` CSV is brief
1101
+ 4. **Skip on Failure**: If a dependency failed, skip the dependent task (cascade)
1102
+ 5. **Cleanup Temp Files**: Remove wave CSVs after results are merged
1103
+ 6. **TODO_LIST.md Sync**: Phase 5 reconciles TODO_LIST.md with tasks.csv final state
1104
+ 7. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson
1105
+ 8. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
1106
+ 9. **Planning Artifacts Unchanged**: IMPL-*.json, plan.json, IMPL_PLAN.md formats are preserved
1107
+
1108
+ ---
1109
+
1110
+ ## Compact Protection
1111
+
1112
+ > **COMPACT DIRECTIVE**: If context compression occurs during execution:
1113
+ > - Re-read `tasks.csv` for current execution state (source of truth)
1114
+ > - Re-read `TODO_LIST.md` for progress display
1115
+ > - Re-read `plan.json` for execution hints (concurrency, phases)
1116
+ > - Task JSONs are read per-wave via `task_json_path`, compression has minimal impact
1117
+ > - Wave loop state (`failedIds`, `skippedIds`, `wave`) can be reconstructed from tasks.csv status column