claude-code-workflow 7.2.13 → 7.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/.claude/commands/workflow/analyze-with-file.md +61 -40
  2. package/.claude/skills/workflow-lite-plan/SKILL.md +3 -7
  3. package/.codex/skills/analyze-with-file/SKILL.md +1181 -1182
  4. package/.codex/skills/brainstorm/SKILL.md +723 -725
  5. package/.codex/skills/brainstorm-with-file/SKILL.md +10 -5
  6. package/.codex/skills/clean/SKILL.md +33 -26
  7. package/.codex/skills/collaborative-plan-with-file/SKILL.md +830 -831
  8. package/.codex/skills/csv-wave-pipeline/SKILL.md +906 -906
  9. package/.codex/skills/issue-discover/SKILL.md +57 -50
  10. package/.codex/skills/issue-discover/phases/01-issue-new.md +18 -11
  11. package/.codex/skills/issue-discover/phases/02-discover.md +31 -26
  12. package/.codex/skills/issue-discover/phases/03-discover-by-prompt.md +13 -11
  13. package/.codex/skills/issue-discover/phases/04-quick-execute.md +32 -27
  14. package/.codex/skills/parallel-dev-cycle/SKILL.md +402 -402
  15. package/.codex/skills/project-documentation-workflow/SKILL.md +13 -3
  16. package/.codex/skills/roadmap-with-file/SKILL.md +901 -897
  17. package/.codex/skills/session-sync/SKILL.md +222 -212
  18. package/.codex/skills/spec-add/SKILL.md +620 -613
  19. package/.codex/skills/spec-generator/SKILL.md +2 -2
  20. package/.codex/skills/spec-generator/phases/01-5-requirement-clarification.md +10 -10
  21. package/.codex/skills/spec-generator/phases/01-discovery.md +11 -18
  22. package/.codex/skills/spec-generator/phases/02-product-brief.md +5 -5
  23. package/.codex/skills/spec-generator/phases/03-requirements.md +7 -7
  24. package/.codex/skills/spec-generator/phases/04-architecture.md +4 -4
  25. package/.codex/skills/spec-generator/phases/05-epics-stories.md +5 -6
  26. package/.codex/skills/spec-generator/phases/06-readiness-check.md +10 -17
  27. package/.codex/skills/spec-generator/phases/07-issue-export.md +326 -329
  28. package/.codex/skills/spec-setup/SKILL.md +669 -657
  29. package/.codex/skills/team-arch-opt/SKILL.md +50 -50
  30. package/.codex/skills/team-arch-opt/agents/completion-handler.md +3 -3
  31. package/.codex/skills/team-brainstorm/SKILL.md +724 -725
  32. package/.codex/skills/team-coordinate/SKILL.md +51 -51
  33. package/.codex/skills/team-coordinate/agents/completion-handler.md +3 -3
  34. package/.codex/skills/team-coordinate/agents/plan-reviewer.md +4 -4
  35. package/.codex/skills/team-designer/SKILL.md +691 -691
  36. package/.codex/skills/team-designer/agents/requirement-clarifier.md +11 -12
  37. package/.codex/skills/team-executor/SKILL.md +45 -45
  38. package/.codex/skills/team-frontend/SKILL.md +45 -45
  39. package/.codex/skills/team-frontend/agents/completion-handler.md +3 -3
  40. package/.codex/skills/team-frontend/agents/qa-gate-reviewer.md +4 -4
  41. package/.codex/skills/team-frontend-debug/SKILL.md +50 -50
  42. package/.codex/skills/team-frontend-debug/agents/completion-handler.md +3 -3
  43. package/.codex/skills/team-frontend-debug/agents/conditional-skip-gate.md +4 -4
  44. package/.codex/skills/team-issue/SKILL.md +751 -740
  45. package/.codex/skills/team-iterdev/SKILL.md +825 -826
  46. package/.codex/skills/team-lifecycle-v4/SKILL.md +775 -775
  47. package/.codex/skills/team-lifecycle-v4/agents/quality-gate.md +165 -165
  48. package/.codex/skills/team-lifecycle-v4/agents/requirement-clarifier.md +163 -163
  49. package/.codex/skills/team-perf-opt/SKILL.md +50 -50
  50. package/.codex/skills/team-perf-opt/agents/completion-handler.md +3 -3
  51. package/.codex/skills/team-planex-v2/SKILL.md +652 -637
  52. package/.codex/skills/team-quality-assurance/SKILL.md +51 -52
  53. package/.codex/skills/team-review/SKILL.md +40 -40
  54. package/.codex/skills/team-roadmap-dev/SKILL.md +51 -51
  55. package/.codex/skills/team-roadmap-dev/agents/roadmap-discusser.md +8 -8
  56. package/.codex/skills/team-tech-debt/SKILL.md +50 -50
  57. package/.codex/skills/team-tech-debt/agents/plan-approver.md +5 -5
  58. package/.codex/skills/team-testing/SKILL.md +51 -52
  59. package/.codex/skills/team-uidesign/SKILL.md +40 -40
  60. package/.codex/skills/team-uidesign/agents/completion-handler.md +177 -177
  61. package/.codex/skills/team-ultra-analyze/SKILL.md +786 -787
  62. package/.codex/skills/team-ultra-analyze/agents/discussion-feedback.md +8 -8
  63. package/.codex/skills/team-ux-improve/SKILL.md +51 -52
  64. package/.codex/skills/team-ux-improve/agents/ux-designer.md +2 -2
  65. package/.codex/skills/team-ux-improve/agents/ux-explorer.md +1 -1
  66. package/.codex/skills/unified-execute-with-file/SKILL.md +797 -796
  67. package/.codex/skills/workflow-execute/SKILL.md +1117 -1118
  68. package/.codex/skills/workflow-lite-planex/SKILL.md +1144 -1141
  69. package/.codex/skills/workflow-plan/SKILL.md +631 -636
  70. package/.codex/skills/workflow-tdd-plan/SKILL.md +753 -759
  71. package/.codex/skills/workflow-test-fix-cycle/SKILL.md +402 -392
  72. package/README.md +25 -0
  73. package/ccw/dist/commands/install.d.ts.map +1 -1
  74. package/ccw/dist/commands/install.js +12 -0
  75. package/ccw/dist/commands/install.js.map +1 -1
  76. package/package.json +1 -1
@@ -1,906 +1,906 @@
1
- ---
2
- name: csv-wave-pipeline
3
- description: Requirement planning to wave-based CSV execution pipeline. Decomposes requirement into dependency-sorted CSV tasks, computes execution waves, runs wave-by-wave via spawn_agents_on_csv with cross-wave context propagation.
4
- argument-hint: "[-y|--yes] [-c|--concurrency N] [--continue] \"requirement description\""
5
- allowed-tools: spawn_agents_on_csv, Read, Write, Edit, Bash, Glob, Grep, AskUserQuestion
6
- ---
7
-
8
- ## Auto Mode
9
-
10
- When `--yes` or `-y`: Auto-confirm task decomposition, skip interactive validation, use defaults.
11
-
12
- # CSV Wave Pipeline
13
-
14
- ## Usage
15
-
16
- ```bash
17
- $csv-wave-pipeline "Implement user authentication with OAuth, JWT, and 2FA"
18
- $csv-wave-pipeline -c 4 "Refactor payment module with Stripe and PayPal"
19
- $csv-wave-pipeline -y "Build notification system with email and SMS"
20
- $csv-wave-pipeline --continue "auth-20260228"
21
- ```
22
-
23
- **Flags**:
24
- - `-y, --yes`: Skip all confirmations (auto mode)
25
- - `-c, --concurrency N`: Max concurrent agents within each wave (default: 4)
26
- - `--continue`: Resume existing session
27
-
28
- **Output Directory**: `.workflow/.csv-wave/{session-id}/`
29
- **Core Output**: `tasks.csv` (master state) + `results.csv` (final) + `discoveries.ndjson` (shared exploration) + `context.md` (human-readable report)
30
-
31
- ---
32
-
33
- ## Overview
34
-
35
- Wave-based batch execution using `spawn_agents_on_csv` with **cross-wave context propagation**. Tasks are grouped into dependency waves; each wave executes concurrently, and its results feed into the next wave.
36
-
37
- **Core workflow**: Decompose → Compute Waves → Execute Wave-by-Wave → Aggregate
38
-
39
- ```
40
- ┌─────────────────────────────────────────────────────────────────────────┐
41
- │ CSV BATCH EXECUTION WORKFLOW │
42
- ├─────────────────────────────────────────────────────────────────────────┤
43
- │ │
44
- │ Phase 1: Requirement → CSV │
45
- │ ├─ Parse requirement into subtasks (3-10 tasks) │
46
- │ ├─ Identify dependencies (deps column) │
47
- │ ├─ Compute dependency waves (topological sort → depth grouping) │
48
- │ ├─ Generate tasks.csv with wave column │
49
- │ └─ User validates task breakdown (skip if -y) │
50
- │ │
51
- │ Phase 2: Wave Execution Engine │
52
- │ ├─ For each wave (1..N): │
53
- │ │ ├─ Build wave CSV (filter rows for this wave) │
54
- │ │ ├─ Inject previous wave findings into prev_context column │
55
- │ │ ├─ spawn_agents_on_csv(wave CSV) │
56
- │ │ ├─ Collect results, merge into master tasks.csv │
57
- │ │ └─ Check: any failed? → skip dependents or retry │
58
- │ └─ discoveries.ndjson shared across all waves (append-only) │
59
- │ │
60
- │ Phase 3: Results Aggregation │
61
- │ ├─ Export final results.csv │
62
- │ ├─ Generate context.md with all findings │
63
- │ ├─ Display summary: completed/failed/skipped per wave │
64
- │ └─ Offer: view results | retry failed | done │
65
- │ │
66
- └─────────────────────────────────────────────────────────────────────────┘
67
- ```
68
-
69
- ---
70
-
71
- ## CSV Schema
72
-
73
- ### tasks.csv (Master State)
74
-
75
- ```csv
76
- id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error
77
- "1","Setup auth module","Create auth directory structure and base files","Verify directory exists and base files export expected interfaces","auth/ dir created; index.ts and types.ts export AuthProvider interface","src/auth/**","Follow monorepo module pattern || package.json;src/shared/types.ts","","","","1","","","","","",""
78
- "2","Implement OAuth","Add OAuth provider integration with Google and GitHub","Unit test: mock OAuth callback returns valid token; Integration test: verify redirect URL generation","OAuth login redirects to provider; callback returns JWT; supports Google and GitHub","src/auth/oauth/**","Use passport.js strategy pattern || src/auth/index.ts;docs/oauth-flow.md","Run npm test -- --grep oauth before completion","1","1","2","","","","","",""
79
- "3","Add JWT tokens","Implement JWT generation and validation","Unit test: sign/verify round-trip; Edge test: expired token returns 401","generateToken() returns valid JWT; verifyToken() rejects expired/tampered tokens","src/auth/jwt/**","Use jsonwebtoken library; Set default expiry 1h || src/config/auth.ts","Ensure tsc --noEmit passes","1","1","2","","","","","",""
80
- "4","Setup 2FA","Add TOTP-based 2FA with QR code generation","Unit test: TOTP verify with correct code; Test: QR data URL is valid","QR code generates scannable image; TOTP verification succeeds within time window","src/auth/2fa/**","Use speakeasy + qrcode libraries || src/auth/oauth/strategy.ts;src/auth/jwt/token.ts","Run full test suite: npm test","2;3","1;2;3","3","","","","","",""
81
- ```
82
-
83
- **Columns**:
84
-
85
- | Column | Phase | Description |
86
- |--------|-------|-------------|
87
- | `id` | Input | Unique task identifier (string) |
88
- | `title` | Input | Short task title |
89
- | `description` | Input | Detailed task description — what to implement |
90
- | `test` | Input | Test cases: what tests to write and how to verify (unit/integration/edge) |
91
- | `acceptance_criteria` | Input | Acceptance criteria: measurable conditions that define "done" |
92
- | `scope` | Input | Target file/directory glob — constrains agent work area, prevents cross-task file conflicts |
93
- | `hints` | Input | Implementation tips + reference files. Format: `tips text \|\| file1;file2`. Before `\|\|` = how to implement; after `\|\|` = existing files to read before starting. Either part is optional |
94
- | `execution_directives` | Input | Execution constraints: commands to run for verification, tool restrictions, environment requirements |
95
- | `deps` | Input | Semicolon-separated dependency task IDs (empty = no deps) |
96
- | `context_from` | Input | Semicolon-separated task IDs whose findings this task needs |
97
- | `wave` | Computed | Wave number (computed by topological sort, 1-based) |
98
- | `status` | Output | `pending` → `completed` / `failed` / `skipped` |
99
- | `findings` | Output | Key discoveries or implementation notes (max 500 chars) |
100
- | `files_modified` | Output | Semicolon-separated file paths |
101
- | `tests_passed` | Output | Whether all defined test cases passed (true/false) |
102
- | `acceptance_met` | Output | Summary of which acceptance criteria were met/unmet |
103
- | `error` | Output | Error message if failed (empty if success) |
104
-
105
- ### Per-Wave CSV (Temporary)
106
-
107
- Each wave generates a temporary `wave-{N}.csv` with an extra `prev_context` column:
108
-
109
- ```csv
110
- id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context
111
- "2","Implement OAuth","Add OAuth integration","Unit test: mock OAuth callback returns valid token","OAuth login redirects to provider; callback returns JWT","src/auth/oauth/**","Use passport.js strategy pattern || src/auth/index.ts;docs/oauth-flow.md","Run npm test -- --grep oauth","1","1","2","[Task 1] Created auth/ with index.ts and types.ts"
112
- "3","Add JWT tokens","Implement JWT","Unit test: sign/verify round-trip; Edge test: expired token returns 401","generateToken() returns valid JWT; verifyToken() rejects expired/tampered tokens","src/auth/jwt/**","Use jsonwebtoken library; Set default expiry 1h || src/config/auth.ts","Ensure tsc --noEmit passes","1","1","2","[Task 1] Created auth/ with index.ts and types.ts"
113
- ```
114
-
115
- The `prev_context` column is built from `context_from` by looking up completed tasks' `findings` in the master CSV.
116
-
117
- ---
118
-
119
- ## Output Artifacts
120
-
121
- | File | Purpose | Lifecycle |
122
- |------|---------|-----------|
123
- | `tasks.csv` | Master state — all tasks with status/findings | Updated after each wave |
124
- | `wave-{N}.csv` | Per-wave input (temporary) | Created before wave, deleted after |
125
- | `results.csv` | Final export of all task results | Created in Phase 3 |
126
- | `discoveries.ndjson` | Shared exploration board across all agents | Append-only, carries across waves |
127
- | `context.md` | Human-readable execution report | Created in Phase 3 |
128
-
129
- ---
130
-
131
- ## Session Structure
132
-
133
- ```
134
- .workflow/.csv-wave/{session-id}/
135
- ├── tasks.csv # Master state (updated per wave)
136
- ├── results.csv # Final results export
137
- ├── discoveries.ndjson # Shared discovery board (all agents)
138
- ├── context.md # Human-readable report
139
- └── wave-{N}.csv # Temporary per-wave input (cleaned up)
140
- ```
141
-
142
- ---
143
-
144
- ## Implementation
145
-
146
- ### Session Initialization
147
-
148
- ```javascript
149
- const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
150
-
151
- // Parse flags
152
- const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
153
- const continueMode = $ARGUMENTS.includes('--continue')
154
- const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
155
- const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 4
156
-
157
- // Clean requirement text (remove flags)
158
- const requirement = $ARGUMENTS
159
- .replace(/--yes|-y|--continue|--concurrency\s+\d+|-c\s+\d+/g, '')
160
- .trim()
161
-
162
- const slug = requirement.toLowerCase()
163
- .replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
164
- .substring(0, 40)
165
- const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
166
- const sessionId = `cwp-${slug}-${dateStr}`
167
- const sessionFolder = `.workflow/.csv-wave/${sessionId}`
168
-
169
- // Continue mode: find existing session
170
- if (continueMode) {
171
- const existing = Bash(`ls -t .workflow/.csv-wave/ 2>/dev/null | head -1`).trim()
172
- if (existing) {
173
- sessionId = existing
174
- sessionFolder = `.workflow/.csv-wave/${sessionId}`
175
- // Read existing tasks.csv, find incomplete waves, resume from there
176
- const existingCsv = Read(`${sessionFolder}/tasks.csv`)
177
- // → jump to Phase 2 with remaining waves
178
- }
179
- }
180
-
181
- Bash(`mkdir -p ${sessionFolder}`)
182
- ```
183
-
184
- ---
185
-
186
- ### Phase 1: Requirement → CSV
187
-
188
- **Objective**: Decompose requirement into tasks, compute dependency waves, generate tasks.csv.
189
-
190
- **Steps**:
191
-
192
- 1. **Decompose Requirement**
193
-
194
- ```javascript
195
- // Use ccw cli to decompose requirement into subtasks
196
- Bash({
197
- command: `ccw cli -p "PURPOSE: Decompose requirement into 3-10 atomic tasks for batch agent execution. Each task must include implementation description, test cases, and acceptance criteria.
198
- TASK:
199
- • Parse requirement into independent subtasks
200
- • Identify dependencies between tasks (which must complete before others)
201
- • Identify context flow (which tasks need previous tasks' findings)
202
- • For each task, define concrete test cases (unit/integration/edge)
203
- • For each task, define measurable acceptance criteria (what defines 'done')
204
- • Each task must be executable by a single agent with file read/write access
205
- MODE: analysis
206
- CONTEXT: @**/*
207
- EXPECTED: JSON object with tasks array. Each task: {id: string, title: string, description: string, test: string, acceptance_criteria: string, scope: string, hints: string, execution_directives: string, deps: string[], context_from: string[]}.
208
- - description: what to implement (specific enough for an agent to execute independently)
209
- - test: what tests to write and how to verify (e.g. 'Unit test: X returns Y; Edge test: handles Z')
210
- - acceptance_criteria: measurable conditions that define done (e.g. 'API returns 200; token expires after 1h')
211
- - scope: target file/directory glob (e.g. 'src/auth/**') — tasks in same wave MUST have non-overlapping scopes
212
- - hints: implementation tips + reference files, format '<tips> || <ref_file1>;<ref_file2>' (e.g. 'Use strategy pattern || src/base/Strategy.ts;docs/design.md')
213
- - execution_directives: commands to run for verification or tool constraints (e.g. 'Run npm test --bail; Ensure tsc passes')
214
- - deps: task IDs that must complete first
215
- - context_from: task IDs whose findings are needed
216
- CONSTRAINTS: 3-10 tasks | Each task is atomic | No circular deps | test and acceptance_criteria must be concrete and verifiable | Same-wave tasks must have non-overlapping scopes
217
-
218
- REQUIREMENT: ${requirement}" --tool gemini --mode analysis --rule planning-breakdown-task-steps`,
219
- run_in_background: true
220
- })
221
- // Wait for CLI completion via hook callback
222
- // Parse JSON from CLI output → decomposedTasks[]
223
- ```
224
-
225
- 2. **Compute Waves** (Topological Sort → Depth Grouping)
226
-
227
- ```javascript
228
- function computeWaves(tasks) {
229
- // Build adjacency: task.deps → predecessors
230
- const taskMap = new Map(tasks.map(t => [t.id, t]))
231
- const inDegree = new Map(tasks.map(t => [t.id, 0]))
232
- const adjList = new Map(tasks.map(t => [t.id, []]))
233
-
234
- for (const task of tasks) {
235
- for (const dep of task.deps) {
236
- if (taskMap.has(dep)) {
237
- adjList.get(dep).push(task.id)
238
- inDegree.set(task.id, inDegree.get(task.id) + 1)
239
- }
240
- }
241
- }
242
-
243
- // BFS-based topological sort with depth tracking
244
- const queue = [] // [taskId, depth]
245
- const waveAssignment = new Map()
246
-
247
- for (const [id, deg] of inDegree) {
248
- if (deg === 0) {
249
- queue.push([id, 1])
250
- waveAssignment.set(id, 1)
251
- }
252
- }
253
-
254
- let maxWave = 1
255
- let idx = 0
256
- while (idx < queue.length) {
257
- const [current, depth] = queue[idx++]
258
- for (const next of adjList.get(current)) {
259
- const newDeg = inDegree.get(next) - 1
260
- inDegree.set(next, newDeg)
261
- const nextDepth = Math.max(waveAssignment.get(next) || 0, depth + 1)
262
- waveAssignment.set(next, nextDepth)
263
- if (newDeg === 0) {
264
- queue.push([next, nextDepth])
265
- maxWave = Math.max(maxWave, nextDepth)
266
- }
267
- }
268
- }
269
-
270
- // Detect cycles: any task without wave assignment
271
- for (const task of tasks) {
272
- if (!waveAssignment.has(task.id)) {
273
- throw new Error(`Circular dependency detected involving task ${task.id}`)
274
- }
275
- }
276
-
277
- return { waveAssignment, maxWave }
278
- }
279
-
280
- const { waveAssignment, maxWave } = computeWaves(decomposedTasks)
281
- ```
282
-
283
- 3. **Generate tasks.csv**
284
-
285
- ```javascript
286
- const header = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error'
287
- const rows = decomposedTasks.map(task => {
288
- const wave = waveAssignment.get(task.id)
289
- return [
290
- task.id,
291
- csvEscape(task.title),
292
- csvEscape(task.description),
293
- csvEscape(task.test),
294
- csvEscape(task.acceptance_criteria),
295
- csvEscape(task.scope),
296
- csvEscape(task.hints),
297
- csvEscape(task.execution_directives),
298
- task.deps.join(';'),
299
- task.context_from.join(';'),
300
- wave,
301
- 'pending', // status
302
- '', // findings
303
- '', // files_modified
304
- '', // tests_passed
305
- '', // acceptance_met
306
- '' // error
307
- ].map(cell => `"${String(cell).replace(/"/g, '""')}"`).join(',')
308
- })
309
-
310
- Write(`${sessionFolder}/tasks.csv`, [header, ...rows].join('\n'))
311
- ```
312
-
313
- 4. **User Validation** (skip if AUTO_YES)
314
-
315
- ```javascript
316
- if (!AUTO_YES) {
317
- // Display task breakdown with wave assignment
318
- console.log(`\n## Task Breakdown (${decomposedTasks.length} tasks, ${maxWave} waves)\n`)
319
- for (let w = 1; w <= maxWave; w++) {
320
- const waveTasks = decomposedTasks.filter(t => waveAssignment.get(t.id) === w)
321
- console.log(`### Wave ${w} (${waveTasks.length} tasks, concurrent)`)
322
- waveTasks.forEach(t => console.log(` - [${t.id}] ${t.title}`))
323
- }
324
-
325
- const answer = AskUserQuestion({
326
- questions: [{
327
- question: "Approve task breakdown?",
328
- header: "Validation",
329
- multiSelect: false,
330
- options: [
331
- { label: "Approve", description: "Proceed with wave execution" },
332
- { label: "Modify", description: `Edit ${sessionFolder}/tasks.csv manually, then --continue` },
333
- { label: "Cancel", description: "Abort" }
334
- ]
335
- }]
336
- }) // BLOCKS
337
-
338
- if (answer.Validation === "Modify") {
339
- console.log(`Edit: ${sessionFolder}/tasks.csv\nResume: $csv-wave-pipeline --continue`)
340
- return
341
- } else if (answer.Validation === "Cancel") {
342
- return
343
- }
344
- }
345
- ```
346
-
347
- **Success Criteria**:
348
- - tasks.csv created with valid schema and wave assignments
349
- - No circular dependencies
350
- - User approved (or AUTO_YES)
351
-
352
- ---
353
-
354
- ### Phase 2: Wave Execution Engine
355
-
356
- **Objective**: Execute tasks wave-by-wave via `spawn_agents_on_csv`. Each wave sees previous waves' results.
357
-
358
- **Steps**:
359
-
360
- 1. **Wave Loop**
361
-
362
- ```javascript
363
- const failedIds = new Set()
364
- const skippedIds = new Set()
365
-
366
- for (let wave = 1; wave <= maxWave; wave++) {
367
- console.log(`\n## Wave ${wave}/${maxWave}\n`)
368
-
369
- // 1. Read current master CSV
370
- const masterCsv = parseCsv(Read(`${sessionFolder}/tasks.csv`))
371
-
372
- // 2. Filter tasks for this wave
373
- const waveTasks = masterCsv.filter(row => parseInt(row.wave) === wave)
374
-
375
- // 3. Skip tasks whose deps failed
376
- const executableTasks = []
377
- for (const task of waveTasks) {
378
- const deps = task.deps.split(';').filter(Boolean)
379
- if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
380
- skippedIds.add(task.id)
381
- // Update master CSV: mark as skipped
382
- updateMasterCsvRow(sessionFolder, task.id, {
383
- status: 'skipped',
384
- error: 'Dependency failed or skipped'
385
- })
386
- console.log(` [${task.id}] ${task.title} → SKIPPED (dependency failed)`)
387
- continue
388
- }
389
- executableTasks.push(task)
390
- }
391
-
392
- if (executableTasks.length === 0) {
393
- console.log(` No executable tasks in wave ${wave}`)
394
- continue
395
- }
396
-
397
- // 4. Build prev_context for each task
398
- for (const task of executableTasks) {
399
- const contextIds = task.context_from.split(';').filter(Boolean)
400
- const prevFindings = contextIds
401
- .map(id => {
402
- const prevRow = masterCsv.find(r => r.id === id)
403
- if (prevRow && prevRow.status === 'completed' && prevRow.findings) {
404
- return `[Task ${id}: ${prevRow.title}] ${prevRow.findings}`
405
- }
406
- return null
407
- })
408
- .filter(Boolean)
409
- .join('\n')
410
- task.prev_context = prevFindings || 'No previous context available'
411
- }
412
-
413
- // 5. Write wave CSV
414
- const waveHeader = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context'
415
- const waveRows = executableTasks.map(t =>
416
- [t.id, t.title, t.description, t.test, t.acceptance_criteria, t.scope, t.hints, t.execution_directives, t.deps, t.context_from, t.wave, t.prev_context]
417
- .map(cell => `"${String(cell).replace(/"/g, '""')}"`)
418
- .join(',')
419
- )
420
- Write(`${sessionFolder}/wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
421
-
422
- // 6. Execute wave
423
- console.log(` Executing ${executableTasks.length} tasks (concurrency: ${maxConcurrency})...`)
424
-
425
- const waveResult = spawn_agents_on_csv({
426
- csv_path: `${sessionFolder}/wave-${wave}.csv`,
427
- id_column: "id",
428
- instruction: buildInstructionTemplate(sessionFolder, wave),
429
- max_concurrency: maxConcurrency,
430
- max_runtime_seconds: 600,
431
- output_csv_path: `${sessionFolder}/wave-${wave}-results.csv`,
432
- output_schema: {
433
- type: "object",
434
- properties: {
435
- id: { type: "string" },
436
- status: { type: "string", enum: ["completed", "failed"] },
437
- findings: { type: "string" },
438
- files_modified: { type: "array", items: { type: "string" } },
439
- tests_passed: { type: "boolean" },
440
- acceptance_met: { type: "string" },
441
- error: { type: "string" }
442
- },
443
- required: ["id", "status", "findings", "tests_passed"]
444
- }
445
- })
446
- // ↑ Blocks until all agents in this wave complete
447
-
448
- // 7. Merge results into master CSV
449
- const waveResults = parseCsv(Read(`${sessionFolder}/wave-${wave}-results.csv`))
450
- for (const result of waveResults) {
451
- updateMasterCsvRow(sessionFolder, result.id, {
452
- status: result.status,
453
- findings: result.findings || '',
454
- files_modified: (result.files_modified || []).join(';'),
455
- tests_passed: String(result.tests_passed ?? ''),
456
- acceptance_met: result.acceptance_met || '',
457
- error: result.error || ''
458
- })
459
-
460
- if (result.status === 'failed') {
461
- failedIds.add(result.id)
462
- console.log(` [${result.id}] ${result.title} → FAILED: ${result.error}`)
463
- } else {
464
- console.log(` [${result.id}] ${result.title} → COMPLETED`)
465
- }
466
- }
467
-
468
- // 8. Cleanup temporary wave CSV
469
- Bash(`rm -f "${sessionFolder}/wave-${wave}.csv"`)
470
-
471
- console.log(` Wave ${wave} done: ${waveResults.filter(r => r.status === 'completed').length} completed, ${waveResults.filter(r => r.status === 'failed').length} failed`)
472
- }
473
- ```
474
-
475
- 2. **Instruction Template Builder**
476
-
477
- ```javascript
478
- function buildInstructionTemplate(sessionFolder, wave) {
479
- return `
480
- ## TASK ASSIGNMENT
481
-
482
- ### MANDATORY FIRST STEPS
483
- 1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not)
484
- 2. Read project context: .workflow/project-tech.json (if exists)
485
-
486
- ---
487
-
488
- ## Your Task
489
-
490
- **Task ID**: {id}
491
- **Title**: {title}
492
- **Description**: {description}
493
- **Scope**: {scope}
494
-
495
- ### Implementation Hints & Reference Files
496
- {hints}
497
-
498
- > Format: \`<tips> || <ref_file1>;<ref_file2>\`. Read ALL reference files (after ||) before starting implementation. Apply tips (before ||) as implementation guidance.
499
-
500
- ### Execution Directives
501
- {execution_directives}
502
-
503
- > Commands to run for verification, tool restrictions, or environment requirements. Follow these constraints during and after implementation.
504
-
505
- ### Test Cases
506
- {test}
507
-
508
- ### Acceptance Criteria
509
- {acceptance_criteria}
510
-
511
- ### Previous Tasks' Findings (Context)
512
- {prev_context}
513
-
514
- ---
515
-
516
- ## Execution Protocol
517
-
518
- 1. **Read references**: Parse {hints} — read all files listed after \`||\` to understand existing patterns
519
- 2. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared exploration findings
520
- 3. **Use context**: Apply previous tasks' findings from prev_context above
521
- 4. **Stay in scope**: ONLY create/modify files within {scope} — do NOT touch files outside this boundary
522
- 5. **Apply hints**: Follow implementation tips from {hints} (before \`||\`)
523
- 6. **Execute**: Implement the task as described
524
- 7. **Write tests**: Implement the test cases defined above
525
- 8. **Run directives**: Execute commands from {execution_directives} to verify your work
526
- 9. **Verify acceptance**: Ensure all acceptance criteria are met before reporting completion
527
- 10. **Share discoveries**: Append exploration findings to shared board:
528
- \`\`\`bash
529
- echo '{"ts":"<ISO8601>","worker":"{id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
530
- \`\`\`
531
- 11. **Report result**: Return JSON via report_agent_job_result
532
-
533
- ### Discovery Types to Share
534
- - \`code_pattern\`: {name, file, description} — reusable patterns found
535
- - \`integration_point\`: {file, description, exports[]} — module connection points
536
- - \`convention\`: {naming, imports, formatting} — code style conventions
537
- - \`blocker\`: {issue, severity, impact} — blocking issues encountered
538
-
539
- ---
540
-
541
- ## Output (report_agent_job_result)
542
-
543
- Return JSON:
544
- {
545
- "id": "{id}",
546
- "status": "completed" | "failed",
547
- "findings": "Key discoveries and implementation notes (max 500 chars)",
548
- "files_modified": ["path1", "path2"],
549
- "tests_passed": true | false,
550
- "acceptance_met": "Summary of which acceptance criteria were met/unmet",
551
- "error": ""
552
- }
553
-
554
- **IMPORTANT**: Set status to "completed" ONLY if:
555
- - All test cases pass
556
- - All acceptance criteria are met
557
- Otherwise set status to "failed" with details in error field.
558
- `
559
- }
560
- ```
561
-
562
- 3. **Master CSV Update Helper**
563
-
564
- ```javascript
565
- function updateMasterCsvRow(sessionFolder, taskId, updates) {
566
- const csvPath = `${sessionFolder}/tasks.csv`
567
- const content = Read(csvPath)
568
- const lines = content.split('\n')
569
- const header = lines[0].split(',')
570
-
571
- for (let i = 1; i < lines.length; i++) {
572
- const cells = parseCsvLine(lines[i])
573
- if (cells[0] === taskId || cells[0] === `"${taskId}"`) {
574
- // Update specified columns
575
- for (const [col, val] of Object.entries(updates)) {
576
- const colIdx = header.indexOf(col)
577
- if (colIdx >= 0) {
578
- cells[colIdx] = `"${String(val).replace(/"/g, '""')}"`
579
- }
580
- }
581
- lines[i] = cells.join(',')
582
- break
583
- }
584
- }
585
-
586
- Write(csvPath, lines.join('\n'))
587
- }
588
- ```
589
-
590
- **Success Criteria**:
591
- - All waves executed in order
592
- - Each wave's results merged into master CSV before next wave starts
593
- - Dependent tasks skipped when predecessor failed
594
- - discoveries.ndjson accumulated across all waves
595
-
596
- ---
597
-
598
- ### Phase 3: Results Aggregation
599
-
600
- **Objective**: Generate final results and human-readable report.
601
-
602
- **Steps**:
603
-
604
- 1. **Export results.csv**
605
-
606
- ```javascript
607
- const masterCsv = Read(`${sessionFolder}/tasks.csv`)
608
- // results.csv = master CSV (already has all results populated)
609
- Write(`${sessionFolder}/results.csv`, masterCsv)
610
- ```
611
-
612
- 2. **Generate context.md**
613
-
614
- ```javascript
615
- const tasks = parseCsv(masterCsv)
616
- const completed = tasks.filter(t => t.status === 'completed')
617
- const failed = tasks.filter(t => t.status === 'failed')
618
- const skipped = tasks.filter(t => t.status === 'skipped')
619
-
620
- const contextContent = `# CSV Batch Execution Report
621
-
622
- **Session**: ${sessionId}
623
- **Requirement**: ${requirement}
624
- **Completed**: ${getUtc8ISOString()}
625
- **Waves**: ${maxWave} | **Concurrency**: ${maxConcurrency}
626
-
627
- ---
628
-
629
- ## Summary
630
-
631
- | Metric | Count |
632
- |--------|-------|
633
- | Total Tasks | ${tasks.length} |
634
- | Completed | ${completed.length} |
635
- | Failed | ${failed.length} |
636
- | Skipped | ${skipped.length} |
637
- | Waves | ${maxWave} |
638
-
639
- ---
640
-
641
- ## Wave Execution
642
-
643
- ${Array.from({ length: maxWave }, (_, i) => i + 1).map(w => {
644
- const waveTasks = tasks.filter(t => parseInt(t.wave) === w)
645
- return `### Wave ${w}
646
- ${waveTasks.map(t => `- **[${t.id}] ${t.title}**: ${t.status}${t.tests_passed ? ' ✓tests' : ''}${t.error ? ' — ' + t.error : ''}
647
- ${t.findings ? 'Findings: ' + t.findings : ''}`).join('\n')}`
648
- }).join('\n\n')}
649
-
650
- ---
651
-
652
- ## Task Details
653
-
654
- ${tasks.map(t => `### ${t.id}: ${t.title}
655
-
656
- | Field | Value |
657
- |-------|-------|
658
- | Status | ${t.status} |
659
- | Wave | ${t.wave} |
660
- | Scope | ${t.scope || 'none'} |
661
- | Dependencies | ${t.deps || 'none'} |
662
- | Context From | ${t.context_from || 'none'} |
663
- | Tests Passed | ${t.tests_passed || 'N/A'} |
664
- | Acceptance Met | ${t.acceptance_met || 'N/A'} |
665
- | Error | ${t.error || 'none'} |
666
-
667
- **Description**: ${t.description}
668
-
669
- **Test Cases**: ${t.test || 'N/A'}
670
-
671
- **Acceptance Criteria**: ${t.acceptance_criteria || 'N/A'}
672
-
673
- **Hints**: ${t.hints || 'N/A'}
674
-
675
- **Execution Directives**: ${t.execution_directives || 'N/A'}
676
-
677
- **Findings**: ${t.findings || 'N/A'}
678
-
679
- **Files Modified**: ${t.files_modified || 'none'}
680
- `).join('\n---\n')}
681
-
682
- ---
683
-
684
- ## All Modified Files
685
-
686
- ${[...new Set(tasks.flatMap(t => (t.files_modified || '').split(';')).filter(Boolean))].map(f => '- ' + f).join('\n') || 'None'}
687
- `
688
-
689
- Write(`${sessionFolder}/context.md`, contextContent)
690
- ```
691
-
692
- 3. **Display Summary**
693
-
694
- ```javascript
695
- console.log(`
696
- ## Execution Complete
697
-
698
- - **Session**: ${sessionId}
699
- - **Waves**: ${maxWave}
700
- - **Completed**: ${completed.length}/${tasks.length}
701
- - **Failed**: ${failed.length}
702
- - **Skipped**: ${skipped.length}
703
-
704
- **Results**: ${sessionFolder}/results.csv
705
- **Report**: ${sessionFolder}/context.md
706
- **Discoveries**: ${sessionFolder}/discoveries.ndjson
707
- `)
708
- ```
709
-
710
- 4. **Offer Next Steps** (skip if AUTO_YES)
711
-
712
- ```javascript
713
- if (!AUTO_YES && failed.length > 0) {
714
- const answer = AskUserQuestion({
715
- questions: [{
716
- question: `${failed.length} tasks failed. Next action?`,
717
- header: "Next Step",
718
- multiSelect: false,
719
- options: [
720
- { label: "Retry Failed", description: `Re-execute ${failed.length} failed tasks with updated context` },
721
- { label: "View Report", description: "Display context.md" },
722
- { label: "Done", description: "Complete session" }
723
- ]
724
- }]
725
- }) // BLOCKS
726
-
727
- if (answer['Next Step'] === "Retry Failed") {
728
- // Reset failed tasks to pending, re-run Phase 2 for their waves
729
- for (const task of failed) {
730
- updateMasterCsvRow(sessionFolder, task.id, { status: 'pending', error: '' })
731
- }
732
- // Also reset skipped tasks whose deps are now retrying
733
- for (const task of skipped) {
734
- updateMasterCsvRow(sessionFolder, task.id, { status: 'pending', error: '' })
735
- }
736
- // Re-execute Phase 2 (loop will skip already-completed tasks)
737
- // → goto Phase 2
738
- } else if (answer['Next Step'] === "View Report") {
739
- console.log(Read(`${sessionFolder}/context.md`))
740
- }
741
- }
742
- ```
743
-
744
- **Success Criteria**:
745
- - results.csv exported
746
- - context.md generated
747
- - Summary displayed to user
748
-
749
- ---
750
-
751
- ## Shared Discovery Board Protocol
752
-
753
- All agents across all waves share `discoveries.ndjson`. This eliminates redundant codebase exploration.
754
-
755
- **Lifecycle**:
756
- - Created by the first agent to write a discovery
757
- - Carries over across waves — never cleared
758
- - Agents append via `echo '...' >> discoveries.ndjson`
759
-
760
- **Format**: NDJSON, each line is a self-contained JSON:
761
-
762
- ```jsonl
763
- {"ts":"2026-02-28T10:00:00+08:00","worker":"1","type":"code_pattern","data":{"name":"repository-pattern","file":"src/repos/Base.ts","description":"Abstract CRUD repository"}}
764
- {"ts":"2026-02-28T10:01:00+08:00","worker":"2","type":"integration_point","data":{"file":"src/auth/index.ts","description":"Auth module entry","exports":["authenticate","authorize"]}}
765
- ```
766
-
767
- **Discovery Types**:
768
-
769
- | type | Dedup Key | Description |
770
- |------|-----------|-------------|
771
- | `code_pattern` | `data.name` | Reusable code pattern found |
772
- | `integration_point` | `data.file` | Module connection point |
773
- | `convention` | singleton | Code style conventions |
774
- | `blocker` | `data.issue` | Blocking issue encountered |
775
- | `tech_stack` | singleton | Project technology stack |
776
- | `test_command` | singleton | Test commands discovered |
777
-
778
- **Protocol Rules**:
779
- 1. Read board before own exploration → skip covered areas
780
- 2. Write discoveries immediately via `echo >>` → don't batch
781
- 3. Deduplicate — check existing entries; skip if same type + dedup key exists
782
- 4. Append-only — never modify or delete existing lines
783
-
784
- ---
785
-
786
- ## Wave Computation Details
787
-
788
- ### Algorithm
789
-
790
- Kahn's BFS topological sort with depth tracking:
791
-
792
- ```
793
- Input: tasks[] with deps[]
794
- Output: waveAssignment (taskId → wave number)
795
-
796
- 1. Build in-degree map and adjacency list from deps
797
- 2. Enqueue all tasks with in-degree 0 at wave 1
798
- 3. BFS: for each dequeued task at wave W:
799
- - For each dependent task D:
800
- - Decrement D's in-degree
801
- - D.wave = max(D.wave, W + 1)
802
- - If D's in-degree reaches 0, enqueue D
803
- 4. Any task without wave assignment → circular dependency error
804
- ```
805
-
806
- ### Wave Properties
807
-
808
- - **Wave 1**: No dependencies — all tasks in wave 1 are fully independent
809
- - **Wave N**: All dependencies are in waves 1..(N-1) — guaranteed completed before wave N starts
810
- - **Within a wave**: Tasks are independent of each other → safe for concurrent execution
811
-
812
- ### Example
813
-
814
- ```
815
- Task A (no deps) → Wave 1
816
- Task B (no deps) → Wave 1
817
- Task C (deps: A) → Wave 2
818
- Task D (deps: A, B) → Wave 2
819
- Task E (deps: C, D) → Wave 3
820
-
821
- Execution:
822
- Wave 1: [A, B] ← concurrent
823
- Wave 2: [C, D] ← concurrent, sees A+B findings
824
- Wave 3: [E] ← sees A+B+C+D findings
825
- ```
826
-
827
- ---
828
-
829
- ## Context Propagation Flow
830
-
831
- ```
832
- Wave 1 agents:
833
- ├─ Execute tasks (no prev_context)
834
- ├─ Write findings to report_agent_job_result
835
- └─ Append discoveries to discoveries.ndjson
836
-
837
- ↓ merge results into master CSV
838
-
839
- Wave 2 agents:
840
- ├─ Read discoveries.ndjson (exploration sharing)
841
- ├─ Read prev_context column (wave 1 findings from context_from)
842
- ├─ Execute tasks with full upstream context
843
- ├─ Write findings to report_agent_job_result
844
- └─ Append new discoveries to discoveries.ndjson
845
-
846
- ↓ merge results into master CSV
847
-
848
- Wave 3 agents:
849
- ├─ Read discoveries.ndjson (accumulated from waves 1+2)
850
- ├─ Read prev_context column (wave 1+2 findings from context_from)
851
- ├─ Execute tasks
852
- └─ ...
853
- ```
854
-
855
- **Two context channels**:
856
- 1. **CSV findings** (structured): `context_from` column → `prev_context` injection — task-specific directed context
857
- 2. **NDJSON discoveries** (broadcast): `discoveries.ndjson` — general exploration findings available to all
858
-
859
- ---
860
-
861
- ## Error Handling
862
-
863
- | Error | Resolution |
864
- |-------|------------|
865
- | Circular dependency | Detect in wave computation, abort with error message |
866
- | Agent timeout | Mark as failed in results, continue with wave |
867
- | Agent failed | Mark as failed, skip dependent tasks in later waves |
868
- | All agents in wave failed | Log error, offer retry or abort |
869
- | CSV parse error | Validate CSV format before execution, show line number |
870
- | discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
871
- | Continue mode: no session found | List available sessions, prompt user to select |
872
-
873
- ---
874
-
875
- ## Core Rules
876
-
877
- 1. **Start Immediately**: First action is session initialization, then Phase 1
878
- 2. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results are merged
879
- 3. **CSV is Source of Truth**: Master tasks.csv holds all state — always read before wave, always write after
880
- 4. **Context Propagation**: prev_context built from master CSV, not from memory
881
- 5. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson
882
- 6. **Skip on Failure**: If a dependency failed, skip the dependent task (don't attempt)
883
- 7. **Cleanup Temp Files**: Remove wave-{N}.csv after results are merged
884
- 8. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
885
-
886
- ---
887
-
888
- ## Best Practices
889
-
890
- 1. **Task Granularity**: 3-10 tasks optimal; too many = overhead, too few = no parallelism benefit
891
- 2. **Minimize Cross-Wave Deps**: More tasks in wave 1 = more parallelism
892
- 3. **Specific Descriptions**: Agent sees only its CSV row + prev_context — make description self-contained
893
- 4. **Context From ≠ Deps**: `deps` = execution order constraint; `context_from` = information flow. A task can have `context_from` without `deps` (it just reads previous findings but doesn't require them to be done first in its wave)
894
- 5. **Concurrency Tuning**: `-c 1` for serial execution (maximum context sharing); `-c 8` for I/O-bound tasks
895
-
896
- ---
897
-
898
- ## Usage Recommendations
899
-
900
- | Scenario | Recommended Approach |
901
- |----------|---------------------|
902
- | Independent parallel tasks (no deps) | `$csv-wave-pipeline -c 8` — single wave, max parallelism |
903
- | Linear pipeline (A→B→C) | `$csv-wave-pipeline -c 1` — 3 waves, serial, full context |
904
- | Diamond dependency (A→B,C→D) | `$csv-wave-pipeline` — 3 waves, B+C concurrent in wave 2 |
905
- | Complex requirement, unclear tasks | Use `$roadmap-with-file` first for planning, then feed issues here |
906
- | Single complex task | Use `$workflow-lite-plan` instead |
1
+ ---
2
+ name: csv-wave-pipeline
3
+ description: Requirement planning to wave-based CSV execution pipeline. Decomposes requirement into dependency-sorted CSV tasks, computes execution waves, runs wave-by-wave via spawn_agents_on_csv with cross-wave context propagation.
4
+ argument-hint: "[-y|--yes] [-c|--concurrency N] [--continue] \"requirement description\""
5
+ allowed-tools: spawn_agents_on_csv, Read, Write, Edit, Bash, Glob, Grep, request_user_input
6
+ ---
7
+
8
+ ## Auto Mode
9
+
10
+ When `--yes` or `-y`: Auto-confirm task decomposition, skip interactive validation, use defaults.
11
+
12
+ # CSV Wave Pipeline
13
+
14
+ ## Usage
15
+
16
+ ```bash
17
+ $csv-wave-pipeline "Implement user authentication with OAuth, JWT, and 2FA"
18
+ $csv-wave-pipeline -c 4 "Refactor payment module with Stripe and PayPal"
19
+ $csv-wave-pipeline -y "Build notification system with email and SMS"
20
+ $csv-wave-pipeline --continue "auth-20260228"
21
+ ```
22
+
23
+ **Flags**:
24
+ - `-y, --yes`: Skip all confirmations (auto mode)
25
+ - `-c, --concurrency N`: Max concurrent agents within each wave (default: 4)
26
+ - `--continue`: Resume existing session
27
+
28
+ **Output Directory**: `.workflow/.csv-wave/{session-id}/`
29
+ **Core Output**: `tasks.csv` (master state) + `results.csv` (final) + `discoveries.ndjson` (shared exploration) + `context.md` (human-readable report)
30
+
31
+ ---
32
+
33
+ ## Overview
34
+
35
+ Wave-based batch execution using `spawn_agents_on_csv` with **cross-wave context propagation**. Tasks are grouped into dependency waves; each wave executes concurrently, and its results feed into the next wave.
36
+
37
+ **Core workflow**: Decompose → Compute Waves → Execute Wave-by-Wave → Aggregate
38
+
39
+ ```
40
+ ┌─────────────────────────────────────────────────────────────────────────┐
41
+ │ CSV BATCH EXECUTION WORKFLOW │
42
+ ├─────────────────────────────────────────────────────────────────────────┤
43
+ │ │
44
+ │ Phase 1: Requirement → CSV │
45
+ │ ├─ Parse requirement into subtasks (3-10 tasks) │
46
+ │ ├─ Identify dependencies (deps column) │
47
+ │ ├─ Compute dependency waves (topological sort → depth grouping) │
48
+ │ ├─ Generate tasks.csv with wave column │
49
+ │ └─ User validates task breakdown (skip if -y) │
50
+ │ │
51
+ │ Phase 2: Wave Execution Engine │
52
+ │ ├─ For each wave (1..N): │
53
+ │ │ ├─ Build wave CSV (filter rows for this wave) │
54
+ │ │ ├─ Inject previous wave findings into prev_context column │
55
+ │ │ ├─ spawn_agents_on_csv(wave CSV) │
56
+ │ │ ├─ Collect results, merge into master tasks.csv │
57
+ │ │ └─ Check: any failed? → skip dependents or retry │
58
+ │ └─ discoveries.ndjson shared across all waves (append-only) │
59
+ │ │
60
+ │ Phase 3: Results Aggregation │
61
+ │ ├─ Export final results.csv │
62
+ │ ├─ Generate context.md with all findings │
63
+ │ ├─ Display summary: completed/failed/skipped per wave │
64
+ │ └─ Offer: view results | retry failed | done │
65
+ │ │
66
+ └─────────────────────────────────────────────────────────────────────────┘
67
+ ```
68
+
69
+ ---
70
+
71
+ ## CSV Schema
72
+
73
+ ### tasks.csv (Master State)
74
+
75
+ ```csv
76
+ id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error
77
+ "1","Setup auth module","Create auth directory structure and base files","Verify directory exists and base files export expected interfaces","auth/ dir created; index.ts and types.ts export AuthProvider interface","src/auth/**","Follow monorepo module pattern || package.json;src/shared/types.ts","","","","1","","","","","",""
78
+ "2","Implement OAuth","Add OAuth provider integration with Google and GitHub","Unit test: mock OAuth callback returns valid token; Integration test: verify redirect URL generation","OAuth login redirects to provider; callback returns JWT; supports Google and GitHub","src/auth/oauth/**","Use passport.js strategy pattern || src/auth/index.ts;docs/oauth-flow.md","Run npm test -- --grep oauth before completion","1","1","2","","","","","",""
79
+ "3","Add JWT tokens","Implement JWT generation and validation","Unit test: sign/verify round-trip; Edge test: expired token returns 401","generateToken() returns valid JWT; verifyToken() rejects expired/tampered tokens","src/auth/jwt/**","Use jsonwebtoken library; Set default expiry 1h || src/config/auth.ts","Ensure tsc --noEmit passes","1","1","2","","","","","",""
80
+ "4","Setup 2FA","Add TOTP-based 2FA with QR code generation","Unit test: TOTP verify with correct code; Test: QR data URL is valid","QR code generates scannable image; TOTP verification succeeds within time window","src/auth/2fa/**","Use speakeasy + qrcode libraries || src/auth/oauth/strategy.ts;src/auth/jwt/token.ts","Run full test suite: npm test","2;3","1;2;3","3","","","","","",""
81
+ ```
82
+
83
+ **Columns**:
84
+
85
+ | Column | Phase | Description |
86
+ |--------|-------|-------------|
87
+ | `id` | Input | Unique task identifier (string) |
88
+ | `title` | Input | Short task title |
89
+ | `description` | Input | Detailed task description — what to implement |
90
+ | `test` | Input | Test cases: what tests to write and how to verify (unit/integration/edge) |
91
+ | `acceptance_criteria` | Input | Acceptance criteria: measurable conditions that define "done" |
92
+ | `scope` | Input | Target file/directory glob — constrains agent work area, prevents cross-task file conflicts |
93
+ | `hints` | Input | Implementation tips + reference files. Format: `tips text \|\| file1;file2`. Before `\|\|` = how to implement; after `\|\|` = existing files to read before starting. Either part is optional |
94
+ | `execution_directives` | Input | Execution constraints: commands to run for verification, tool restrictions, environment requirements |
95
+ | `deps` | Input | Semicolon-separated dependency task IDs (empty = no deps) |
96
+ | `context_from` | Input | Semicolon-separated task IDs whose findings this task needs |
97
+ | `wave` | Computed | Wave number (computed by topological sort, 1-based) |
98
+ | `status` | Output | `pending` → `completed` / `failed` / `skipped` |
99
+ | `findings` | Output | Key discoveries or implementation notes (max 500 chars) |
100
+ | `files_modified` | Output | Semicolon-separated file paths |
101
+ | `tests_passed` | Output | Whether all defined test cases passed (true/false) |
102
+ | `acceptance_met` | Output | Summary of which acceptance criteria were met/unmet |
103
+ | `error` | Output | Error message if failed (empty if success) |
104
+
105
+ ### Per-Wave CSV (Temporary)
106
+
107
+ Each wave generates a temporary `wave-{N}.csv` with an extra `prev_context` column:
108
+
109
+ ```csv
110
+ id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context
111
+ "2","Implement OAuth","Add OAuth integration","Unit test: mock OAuth callback returns valid token","OAuth login redirects to provider; callback returns JWT","src/auth/oauth/**","Use passport.js strategy pattern || src/auth/index.ts;docs/oauth-flow.md","Run npm test -- --grep oauth","1","1","2","[Task 1] Created auth/ with index.ts and types.ts"
112
+ "3","Add JWT tokens","Implement JWT","Unit test: sign/verify round-trip; Edge test: expired token returns 401","generateToken() returns valid JWT; verifyToken() rejects expired/tampered tokens","src/auth/jwt/**","Use jsonwebtoken library; Set default expiry 1h || src/config/auth.ts","Ensure tsc --noEmit passes","1","1","2","[Task 1] Created auth/ with index.ts and types.ts"
113
+ ```
114
+
115
+ The `prev_context` column is built from `context_from` by looking up completed tasks' `findings` in the master CSV.
116
+
117
+ ---
118
+
119
+ ## Output Artifacts
120
+
121
+ | File | Purpose | Lifecycle |
122
+ |------|---------|-----------|
123
+ | `tasks.csv` | Master state — all tasks with status/findings | Updated after each wave |
124
+ | `wave-{N}.csv` | Per-wave input (temporary) | Created before wave, deleted after |
125
+ | `results.csv` | Final export of all task results | Created in Phase 3 |
126
+ | `discoveries.ndjson` | Shared exploration board across all agents | Append-only, carries across waves |
127
+ | `context.md` | Human-readable execution report | Created in Phase 3 |
128
+
129
+ ---
130
+
131
+ ## Session Structure
132
+
133
+ ```
134
+ .workflow/.csv-wave/{session-id}/
135
+ ├── tasks.csv # Master state (updated per wave)
136
+ ├── results.csv # Final results export
137
+ ├── discoveries.ndjson # Shared discovery board (all agents)
138
+ ├── context.md # Human-readable report
139
+ └── wave-{N}.csv # Temporary per-wave input (cleaned up)
140
+ ```
141
+
142
+ ---
143
+
144
+ ## Implementation
145
+
146
+ ### Session Initialization
147
+
148
+ ```javascript
149
+ const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
150
+
151
+ // Parse flags
152
+ const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
153
+ const continueMode = $ARGUMENTS.includes('--continue')
154
+ const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
155
+ const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 4
156
+
157
+ // Clean requirement text (remove flags)
158
+ const requirement = $ARGUMENTS
159
+ .replace(/--yes|-y|--continue|--concurrency\s+\d+|-c\s+\d+/g, '')
160
+ .trim()
161
+
162
+ const slug = requirement.toLowerCase()
163
+ .replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
164
+ .substring(0, 40)
165
+ const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
166
+ const sessionId = `cwp-${slug}-${dateStr}`
167
+ const sessionFolder = `.workflow/.csv-wave/${sessionId}`
168
+
169
+ // Continue mode: find existing session
170
+ if (continueMode) {
171
+ const existing = Bash(`ls -t .workflow/.csv-wave/ 2>/dev/null | head -1`).trim()
172
+ if (existing) {
173
+ sessionId = existing
174
+ sessionFolder = `.workflow/.csv-wave/${sessionId}`
175
+ // Read existing tasks.csv, find incomplete waves, resume from there
176
+ const existingCsv = Read(`${sessionFolder}/tasks.csv`)
177
+ // → jump to Phase 2 with remaining waves
178
+ }
179
+ }
180
+
181
+ Bash(`mkdir -p ${sessionFolder}`)
182
+ ```
183
+
184
+ ---
185
+
186
+ ### Phase 1: Requirement → CSV
187
+
188
+ **Objective**: Decompose requirement into tasks, compute dependency waves, generate tasks.csv.
189
+
190
+ **Steps**:
191
+
192
+ 1. **Decompose Requirement**
193
+
194
+ ```javascript
195
+ // Use ccw cli to decompose requirement into subtasks
196
+ Bash({
197
+ command: `ccw cli -p "PURPOSE: Decompose requirement into 3-10 atomic tasks for batch agent execution. Each task must include implementation description, test cases, and acceptance criteria.
198
+ TASK:
199
+ • Parse requirement into independent subtasks
200
+ • Identify dependencies between tasks (which must complete before others)
201
+ • Identify context flow (which tasks need previous tasks' findings)
202
+ • For each task, define concrete test cases (unit/integration/edge)
203
+ • For each task, define measurable acceptance criteria (what defines 'done')
204
+ • Each task must be executable by a single agent with file read/write access
205
+ MODE: analysis
206
+ CONTEXT: @**/*
207
+ EXPECTED: JSON object with tasks array. Each task: {id: string, title: string, description: string, test: string, acceptance_criteria: string, scope: string, hints: string, execution_directives: string, deps: string[], context_from: string[]}.
208
+ - description: what to implement (specific enough for an agent to execute independently)
209
+ - test: what tests to write and how to verify (e.g. 'Unit test: X returns Y; Edge test: handles Z')
210
+ - acceptance_criteria: measurable conditions that define done (e.g. 'API returns 200; token expires after 1h')
211
+ - scope: target file/directory glob (e.g. 'src/auth/**') — tasks in same wave MUST have non-overlapping scopes
212
+ - hints: implementation tips + reference files, format '<tips> || <ref_file1>;<ref_file2>' (e.g. 'Use strategy pattern || src/base/Strategy.ts;docs/design.md')
213
+ - execution_directives: commands to run for verification or tool constraints (e.g. 'Run npm test --bail; Ensure tsc passes')
214
+ - deps: task IDs that must complete first
215
+ - context_from: task IDs whose findings are needed
216
+ CONSTRAINTS: 3-10 tasks | Each task is atomic | No circular deps | test and acceptance_criteria must be concrete and verifiable | Same-wave tasks must have non-overlapping scopes
217
+
218
+ REQUIREMENT: ${requirement}" --tool gemini --mode analysis --rule planning-breakdown-task-steps`,
219
+ run_in_background: true
220
+ })
221
+ // Wait for CLI completion via hook callback
222
+ // Parse JSON from CLI output → decomposedTasks[]
223
+ ```
224
+
225
+ 2. **Compute Waves** (Topological Sort → Depth Grouping)
226
+
227
+ ```javascript
228
+ function computeWaves(tasks) {
229
+ // Build adjacency: task.deps → predecessors
230
+ const taskMap = new Map(tasks.map(t => [t.id, t]))
231
+ const inDegree = new Map(tasks.map(t => [t.id, 0]))
232
+ const adjList = new Map(tasks.map(t => [t.id, []]))
233
+
234
+ for (const task of tasks) {
235
+ for (const dep of task.deps) {
236
+ if (taskMap.has(dep)) {
237
+ adjList.get(dep).push(task.id)
238
+ inDegree.set(task.id, inDegree.get(task.id) + 1)
239
+ }
240
+ }
241
+ }
242
+
243
+ // BFS-based topological sort with depth tracking
244
+ const queue = [] // [taskId, depth]
245
+ const waveAssignment = new Map()
246
+
247
+ for (const [id, deg] of inDegree) {
248
+ if (deg === 0) {
249
+ queue.push([id, 1])
250
+ waveAssignment.set(id, 1)
251
+ }
252
+ }
253
+
254
+ let maxWave = 1
255
+ let idx = 0
256
+ while (idx < queue.length) {
257
+ const [current, depth] = queue[idx++]
258
+ for (const next of adjList.get(current)) {
259
+ const newDeg = inDegree.get(next) - 1
260
+ inDegree.set(next, newDeg)
261
+ const nextDepth = Math.max(waveAssignment.get(next) || 0, depth + 1)
262
+ waveAssignment.set(next, nextDepth)
263
+ if (newDeg === 0) {
264
+ queue.push([next, nextDepth])
265
+ maxWave = Math.max(maxWave, nextDepth)
266
+ }
267
+ }
268
+ }
269
+
270
+ // Detect cycles: any task without wave assignment
271
+ for (const task of tasks) {
272
+ if (!waveAssignment.has(task.id)) {
273
+ throw new Error(`Circular dependency detected involving task ${task.id}`)
274
+ }
275
+ }
276
+
277
+ return { waveAssignment, maxWave }
278
+ }
279
+
280
+ const { waveAssignment, maxWave } = computeWaves(decomposedTasks)
281
+ ```
282
+
283
+ 3. **Generate tasks.csv**
284
+
285
+ ```javascript
286
+ const header = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error'
287
+ const rows = decomposedTasks.map(task => {
288
+ const wave = waveAssignment.get(task.id)
289
+ return [
290
+ task.id,
291
+ csvEscape(task.title),
292
+ csvEscape(task.description),
293
+ csvEscape(task.test),
294
+ csvEscape(task.acceptance_criteria),
295
+ csvEscape(task.scope),
296
+ csvEscape(task.hints),
297
+ csvEscape(task.execution_directives),
298
+ task.deps.join(';'),
299
+ task.context_from.join(';'),
300
+ wave,
301
+ 'pending', // status
302
+ '', // findings
303
+ '', // files_modified
304
+ '', // tests_passed
305
+ '', // acceptance_met
306
+ '' // error
307
+ ].map(cell => `"${String(cell).replace(/"/g, '""')}"`).join(',')
308
+ })
309
+
310
+ Write(`${sessionFolder}/tasks.csv`, [header, ...rows].join('\n'))
311
+ ```
312
+
313
+ 4. **User Validation** (skip if AUTO_YES)
314
+
315
+ ```javascript
316
+ if (!AUTO_YES) {
317
+ // Display task breakdown with wave assignment
318
+ console.log(`\n## Task Breakdown (${decomposedTasks.length} tasks, ${maxWave} waves)\n`)
319
+ for (let w = 1; w <= maxWave; w++) {
320
+ const waveTasks = decomposedTasks.filter(t => waveAssignment.get(t.id) === w)
321
+ console.log(`### Wave ${w} (${waveTasks.length} tasks, concurrent)`)
322
+ waveTasks.forEach(t => console.log(` - [${t.id}] ${t.title}`))
323
+ }
324
+
325
+ const answer = request_user_input({
326
+ questions: [{
327
+ header: "验证",
328
+ id: "validation",
329
+ question: "Approve task breakdown?",
330
+ options: [
331
+ { label: "Approve(Recommended)", description: "Proceed with wave execution" },
332
+ { label: "Modify", description: `Edit ${sessionFolder}/tasks.csv manually, then --continue` },
333
+ { label: "Cancel", description: "Abort" }
334
+ ]
335
+ }]
336
+ }) // BLOCKS
337
+
338
+ if (answer.answers.validation.answers[0] === "Modify") {
339
+ console.log(`Edit: ${sessionFolder}/tasks.csv\nResume: $csv-wave-pipeline --continue`)
340
+ return
341
+ } else if (answer.answers.validation.answers[0] === "Cancel") {
342
+ return
343
+ }
344
+ }
345
+ ```
346
+
347
+ **Success Criteria**:
348
+ - tasks.csv created with valid schema and wave assignments
349
+ - No circular dependencies
350
+ - User approved (or AUTO_YES)
351
+
352
+ ---
353
+
354
+ ### Phase 2: Wave Execution Engine
355
+
356
+ **Objective**: Execute tasks wave-by-wave via `spawn_agents_on_csv`. Each wave sees previous waves' results.
357
+
358
+ **Steps**:
359
+
360
+ 1. **Wave Loop**
361
+
362
+ ```javascript
363
+ const failedIds = new Set()
364
+ const skippedIds = new Set()
365
+
366
+ for (let wave = 1; wave <= maxWave; wave++) {
367
+ console.log(`\n## Wave ${wave}/${maxWave}\n`)
368
+
369
+ // 1. Read current master CSV
370
+ const masterCsv = parseCsv(Read(`${sessionFolder}/tasks.csv`))
371
+
372
+ // 2. Filter tasks for this wave
373
+ const waveTasks = masterCsv.filter(row => parseInt(row.wave) === wave)
374
+
375
+ // 3. Skip tasks whose deps failed
376
+ const executableTasks = []
377
+ for (const task of waveTasks) {
378
+ const deps = task.deps.split(';').filter(Boolean)
379
+ if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
380
+ skippedIds.add(task.id)
381
+ // Update master CSV: mark as skipped
382
+ updateMasterCsvRow(sessionFolder, task.id, {
383
+ status: 'skipped',
384
+ error: 'Dependency failed or skipped'
385
+ })
386
+ console.log(` [${task.id}] ${task.title} → SKIPPED (dependency failed)`)
387
+ continue
388
+ }
389
+ executableTasks.push(task)
390
+ }
391
+
392
+ if (executableTasks.length === 0) {
393
+ console.log(` No executable tasks in wave ${wave}`)
394
+ continue
395
+ }
396
+
397
+ // 4. Build prev_context for each task
398
+ for (const task of executableTasks) {
399
+ const contextIds = task.context_from.split(';').filter(Boolean)
400
+ const prevFindings = contextIds
401
+ .map(id => {
402
+ const prevRow = masterCsv.find(r => r.id === id)
403
+ if (prevRow && prevRow.status === 'completed' && prevRow.findings) {
404
+ return `[Task ${id}: ${prevRow.title}] ${prevRow.findings}`
405
+ }
406
+ return null
407
+ })
408
+ .filter(Boolean)
409
+ .join('\n')
410
+ task.prev_context = prevFindings || 'No previous context available'
411
+ }
412
+
413
+ // 5. Write wave CSV
414
+ const waveHeader = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context'
415
+ const waveRows = executableTasks.map(t =>
416
+ [t.id, t.title, t.description, t.test, t.acceptance_criteria, t.scope, t.hints, t.execution_directives, t.deps, t.context_from, t.wave, t.prev_context]
417
+ .map(cell => `"${String(cell).replace(/"/g, '""')}"`)
418
+ .join(',')
419
+ )
420
+ Write(`${sessionFolder}/wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
421
+
422
+ // 6. Execute wave
423
+ console.log(` Executing ${executableTasks.length} tasks (concurrency: ${maxConcurrency})...`)
424
+
425
+ const waveResult = spawn_agents_on_csv({
426
+ csv_path: `${sessionFolder}/wave-${wave}.csv`,
427
+ id_column: "id",
428
+ instruction: buildInstructionTemplate(sessionFolder, wave),
429
+ max_concurrency: maxConcurrency,
430
+ max_runtime_seconds: 600,
431
+ output_csv_path: `${sessionFolder}/wave-${wave}-results.csv`,
432
+ output_schema: {
433
+ type: "object",
434
+ properties: {
435
+ id: { type: "string" },
436
+ status: { type: "string", enum: ["completed", "failed"] },
437
+ findings: { type: "string" },
438
+ files_modified: { type: "array", items: { type: "string" } },
439
+ tests_passed: { type: "boolean" },
440
+ acceptance_met: { type: "string" },
441
+ error: { type: "string" }
442
+ },
443
+ required: ["id", "status", "findings", "tests_passed"]
444
+ }
445
+ })
446
+ // ↑ Blocks until all agents in this wave complete
447
+
448
+ // 7. Merge results into master CSV
449
+ const waveResults = parseCsv(Read(`${sessionFolder}/wave-${wave}-results.csv`))
450
+ for (const result of waveResults) {
451
+ updateMasterCsvRow(sessionFolder, result.id, {
452
+ status: result.status,
453
+ findings: result.findings || '',
454
+ files_modified: (result.files_modified || []).join(';'),
455
+ tests_passed: String(result.tests_passed ?? ''),
456
+ acceptance_met: result.acceptance_met || '',
457
+ error: result.error || ''
458
+ })
459
+
460
+ if (result.status === 'failed') {
461
+ failedIds.add(result.id)
462
+ console.log(` [${result.id}] ${result.title} → FAILED: ${result.error}`)
463
+ } else {
464
+ console.log(` [${result.id}] ${result.title} → COMPLETED`)
465
+ }
466
+ }
467
+
468
+ // 8. Cleanup temporary wave CSV
469
+ Bash(`rm -f "${sessionFolder}/wave-${wave}.csv"`)
470
+
471
+ console.log(` Wave ${wave} done: ${waveResults.filter(r => r.status === 'completed').length} completed, ${waveResults.filter(r => r.status === 'failed').length} failed`)
472
+ }
473
+ ```
474
+
475
+ 2. **Instruction Template Builder**
476
+
477
+ ```javascript
478
+ function buildInstructionTemplate(sessionFolder, wave) {
479
+ return `
480
+ ## TASK ASSIGNMENT
481
+
482
+ ### MANDATORY FIRST STEPS
483
+ 1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not)
484
+ 2. Read project context: .workflow/project-tech.json (if exists)
485
+
486
+ ---
487
+
488
+ ## Your Task
489
+
490
+ **Task ID**: {id}
491
+ **Title**: {title}
492
+ **Description**: {description}
493
+ **Scope**: {scope}
494
+
495
+ ### Implementation Hints & Reference Files
496
+ {hints}
497
+
498
+ > Format: \`<tips> || <ref_file1>;<ref_file2>\`. Read ALL reference files (after ||) before starting implementation. Apply tips (before ||) as implementation guidance.
499
+
500
+ ### Execution Directives
501
+ {execution_directives}
502
+
503
+ > Commands to run for verification, tool restrictions, or environment requirements. Follow these constraints during and after implementation.
504
+
505
+ ### Test Cases
506
+ {test}
507
+
508
+ ### Acceptance Criteria
509
+ {acceptance_criteria}
510
+
511
+ ### Previous Tasks' Findings (Context)
512
+ {prev_context}
513
+
514
+ ---
515
+
516
+ ## Execution Protocol
517
+
518
+ 1. **Read references**: Parse {hints} — read all files listed after \`||\` to understand existing patterns
519
+ 2. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared exploration findings
520
+ 3. **Use context**: Apply previous tasks' findings from prev_context above
521
+ 4. **Stay in scope**: ONLY create/modify files within {scope} — do NOT touch files outside this boundary
522
+ 5. **Apply hints**: Follow implementation tips from {hints} (before \`||\`)
523
+ 6. **Execute**: Implement the task as described
524
+ 7. **Write tests**: Implement the test cases defined above
525
+ 8. **Run directives**: Execute commands from {execution_directives} to verify your work
526
+ 9. **Verify acceptance**: Ensure all acceptance criteria are met before reporting completion
527
+ 10. **Share discoveries**: Append exploration findings to shared board:
528
+ \`\`\`bash
529
+ echo '{"ts":"<ISO8601>","worker":"{id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
530
+ \`\`\`
531
+ 11. **Report result**: Return JSON via report_agent_job_result
532
+
533
+ ### Discovery Types to Share
534
+ - \`code_pattern\`: {name, file, description} — reusable patterns found
535
+ - \`integration_point\`: {file, description, exports[]} — module connection points
536
+ - \`convention\`: {naming, imports, formatting} — code style conventions
537
+ - \`blocker\`: {issue, severity, impact} — blocking issues encountered
538
+
539
+ ---
540
+
541
+ ## Output (report_agent_job_result)
542
+
543
+ Return JSON:
544
+ {
545
+ "id": "{id}",
546
+ "status": "completed" | "failed",
547
+ "findings": "Key discoveries and implementation notes (max 500 chars)",
548
+ "files_modified": ["path1", "path2"],
549
+ "tests_passed": true | false,
550
+ "acceptance_met": "Summary of which acceptance criteria were met/unmet",
551
+ "error": ""
552
+ }
553
+
554
+ **IMPORTANT**: Set status to "completed" ONLY if:
555
+ - All test cases pass
556
+ - All acceptance criteria are met
557
+ Otherwise set status to "failed" with details in error field.
558
+ `
559
+ }
560
+ ```
561
+
562
+ 3. **Master CSV Update Helper**
563
+
564
+ ```javascript
565
+ function updateMasterCsvRow(sessionFolder, taskId, updates) {
566
+ const csvPath = `${sessionFolder}/tasks.csv`
567
+ const content = Read(csvPath)
568
+ const lines = content.split('\n')
569
+ const header = lines[0].split(',')
570
+
571
+ for (let i = 1; i < lines.length; i++) {
572
+ const cells = parseCsvLine(lines[i])
573
+ if (cells[0] === taskId || cells[0] === `"${taskId}"`) {
574
+ // Update specified columns
575
+ for (const [col, val] of Object.entries(updates)) {
576
+ const colIdx = header.indexOf(col)
577
+ if (colIdx >= 0) {
578
+ cells[colIdx] = `"${String(val).replace(/"/g, '""')}"`
579
+ }
580
+ }
581
+ lines[i] = cells.join(',')
582
+ break
583
+ }
584
+ }
585
+
586
+ Write(csvPath, lines.join('\n'))
587
+ }
588
+ ```
589
+
590
+ **Success Criteria**:
591
+ - All waves executed in order
592
+ - Each wave's results merged into master CSV before next wave starts
593
+ - Dependent tasks skipped when predecessor failed
594
+ - discoveries.ndjson accumulated across all waves
595
+
596
+ ---
597
+
598
+ ### Phase 3: Results Aggregation
599
+
600
+ **Objective**: Generate final results and human-readable report.
601
+
602
+ **Steps**:
603
+
604
+ 1. **Export results.csv**
605
+
606
+ ```javascript
607
+ const masterCsv = Read(`${sessionFolder}/tasks.csv`)
608
+ // results.csv = master CSV (already has all results populated)
609
+ Write(`${sessionFolder}/results.csv`, masterCsv)
610
+ ```
611
+
612
+ 2. **Generate context.md**
613
+
614
+ ```javascript
615
+ const tasks = parseCsv(masterCsv)
616
+ const completed = tasks.filter(t => t.status === 'completed')
617
+ const failed = tasks.filter(t => t.status === 'failed')
618
+ const skipped = tasks.filter(t => t.status === 'skipped')
619
+
620
+ const contextContent = `# CSV Batch Execution Report
621
+
622
+ **Session**: ${sessionId}
623
+ **Requirement**: ${requirement}
624
+ **Completed**: ${getUtc8ISOString()}
625
+ **Waves**: ${maxWave} | **Concurrency**: ${maxConcurrency}
626
+
627
+ ---
628
+
629
+ ## Summary
630
+
631
+ | Metric | Count |
632
+ |--------|-------|
633
+ | Total Tasks | ${tasks.length} |
634
+ | Completed | ${completed.length} |
635
+ | Failed | ${failed.length} |
636
+ | Skipped | ${skipped.length} |
637
+ | Waves | ${maxWave} |
638
+
639
+ ---
640
+
641
+ ## Wave Execution
642
+
643
+ ${Array.from({ length: maxWave }, (_, i) => i + 1).map(w => {
644
+ const waveTasks = tasks.filter(t => parseInt(t.wave) === w)
645
+ return `### Wave ${w}
646
+ ${waveTasks.map(t => `- **[${t.id}] ${t.title}**: ${t.status}${t.tests_passed ? ' ✓tests' : ''}${t.error ? ' — ' + t.error : ''}
647
+ ${t.findings ? 'Findings: ' + t.findings : ''}`).join('\n')}`
648
+ }).join('\n\n')}
649
+
650
+ ---
651
+
652
+ ## Task Details
653
+
654
+ ${tasks.map(t => `### ${t.id}: ${t.title}
655
+
656
+ | Field | Value |
657
+ |-------|-------|
658
+ | Status | ${t.status} |
659
+ | Wave | ${t.wave} |
660
+ | Scope | ${t.scope || 'none'} |
661
+ | Dependencies | ${t.deps || 'none'} |
662
+ | Context From | ${t.context_from || 'none'} |
663
+ | Tests Passed | ${t.tests_passed || 'N/A'} |
664
+ | Acceptance Met | ${t.acceptance_met || 'N/A'} |
665
+ | Error | ${t.error || 'none'} |
666
+
667
+ **Description**: ${t.description}
668
+
669
+ **Test Cases**: ${t.test || 'N/A'}
670
+
671
+ **Acceptance Criteria**: ${t.acceptance_criteria || 'N/A'}
672
+
673
+ **Hints**: ${t.hints || 'N/A'}
674
+
675
+ **Execution Directives**: ${t.execution_directives || 'N/A'}
676
+
677
+ **Findings**: ${t.findings || 'N/A'}
678
+
679
+ **Files Modified**: ${t.files_modified || 'none'}
680
+ `).join('\n---\n')}
681
+
682
+ ---
683
+
684
+ ## All Modified Files
685
+
686
+ ${[...new Set(tasks.flatMap(t => (t.files_modified || '').split(';')).filter(Boolean))].map(f => '- ' + f).join('\n') || 'None'}
687
+ `
688
+
689
+ Write(`${sessionFolder}/context.md`, contextContent)
690
+ ```
691
+
692
+ 3. **Display Summary**
693
+
694
+ ```javascript
695
+ console.log(`
696
+ ## Execution Complete
697
+
698
+ - **Session**: ${sessionId}
699
+ - **Waves**: ${maxWave}
700
+ - **Completed**: ${completed.length}/${tasks.length}
701
+ - **Failed**: ${failed.length}
702
+ - **Skipped**: ${skipped.length}
703
+
704
+ **Results**: ${sessionFolder}/results.csv
705
+ **Report**: ${sessionFolder}/context.md
706
+ **Discoveries**: ${sessionFolder}/discoveries.ndjson
707
+ `)
708
+ ```
709
+
710
+ 4. **Offer Next Steps** (skip if AUTO_YES)
711
+
712
+ ```javascript
713
+ if (!AUTO_YES && failed.length > 0) {
714
+ const answer = request_user_input({
715
+ questions: [{
716
+ header: "下一步",
717
+ id: "next_step",
718
+ question: `${failed.length} tasks failed. Next action?`,
719
+ options: [
720
+ { label: "Retry Failed(Recommended)", description: `Re-execute ${failed.length} failed tasks with updated context` },
721
+ { label: "View Report", description: "Display context.md" },
722
+ { label: "Done", description: "Complete session" }
723
+ ]
724
+ }]
725
+ }) // BLOCKS
726
+
727
+ if (answer.answers.next_step.answers[0] === "Retry Failed(Recommended)") {
728
+ // Reset failed tasks to pending, re-run Phase 2 for their waves
729
+ for (const task of failed) {
730
+ updateMasterCsvRow(sessionFolder, task.id, { status: 'pending', error: '' })
731
+ }
732
+ // Also reset skipped tasks whose deps are now retrying
733
+ for (const task of skipped) {
734
+ updateMasterCsvRow(sessionFolder, task.id, { status: 'pending', error: '' })
735
+ }
736
+ // Re-execute Phase 2 (loop will skip already-completed tasks)
737
+ // → goto Phase 2
738
+ } else if (answer.answers.next_step.answers[0] === "View Report") {
739
+ console.log(Read(`${sessionFolder}/context.md`))
740
+ }
741
+ }
742
+ ```
743
+
744
+ **Success Criteria**:
745
+ - results.csv exported
746
+ - context.md generated
747
+ - Summary displayed to user
748
+
749
+ ---
750
+
751
+ ## Shared Discovery Board Protocol
752
+
753
+ All agents across all waves share `discoveries.ndjson`. This eliminates redundant codebase exploration.
754
+
755
+ **Lifecycle**:
756
+ - Created by the first agent to write a discovery
757
+ - Carries over across waves — never cleared
758
+ - Agents append via `echo '...' >> discoveries.ndjson`
759
+
760
+ **Format**: NDJSON, each line is a self-contained JSON:
761
+
762
+ ```jsonl
763
+ {"ts":"2026-02-28T10:00:00+08:00","worker":"1","type":"code_pattern","data":{"name":"repository-pattern","file":"src/repos/Base.ts","description":"Abstract CRUD repository"}}
764
+ {"ts":"2026-02-28T10:01:00+08:00","worker":"2","type":"integration_point","data":{"file":"src/auth/index.ts","description":"Auth module entry","exports":["authenticate","authorize"]}}
765
+ ```
766
+
767
+ **Discovery Types**:
768
+
769
+ | type | Dedup Key | Description |
770
+ |------|-----------|-------------|
771
+ | `code_pattern` | `data.name` | Reusable code pattern found |
772
+ | `integration_point` | `data.file` | Module connection point |
773
+ | `convention` | singleton | Code style conventions |
774
+ | `blocker` | `data.issue` | Blocking issue encountered |
775
+ | `tech_stack` | singleton | Project technology stack |
776
+ | `test_command` | singleton | Test commands discovered |
777
+
778
+ **Protocol Rules**:
779
+ 1. Read board before own exploration → skip covered areas
780
+ 2. Write discoveries immediately via `echo >>` → don't batch
781
+ 3. Deduplicate — check existing entries; skip if same type + dedup key exists
782
+ 4. Append-only — never modify or delete existing lines
783
+
784
+ ---
785
+
786
+ ## Wave Computation Details
787
+
788
+ ### Algorithm
789
+
790
+ Kahn's BFS topological sort with depth tracking:
791
+
792
+ ```
793
+ Input: tasks[] with deps[]
794
+ Output: waveAssignment (taskId → wave number)
795
+
796
+ 1. Build in-degree map and adjacency list from deps
797
+ 2. Enqueue all tasks with in-degree 0 at wave 1
798
+ 3. BFS: for each dequeued task at wave W:
799
+ - For each dependent task D:
800
+ - Decrement D's in-degree
801
+ - D.wave = max(D.wave, W + 1)
802
+ - If D's in-degree reaches 0, enqueue D
803
+ 4. Any task without wave assignment → circular dependency error
804
+ ```
805
+
806
+ ### Wave Properties
807
+
808
+ - **Wave 1**: No dependencies — all tasks in wave 1 are fully independent
809
+ - **Wave N**: All dependencies are in waves 1..(N-1) — guaranteed completed before wave N starts
810
+ - **Within a wave**: Tasks are independent of each other → safe for concurrent execution
811
+
812
+ ### Example
813
+
814
+ ```
815
+ Task A (no deps) → Wave 1
816
+ Task B (no deps) → Wave 1
817
+ Task C (deps: A) → Wave 2
818
+ Task D (deps: A, B) → Wave 2
819
+ Task E (deps: C, D) → Wave 3
820
+
821
+ Execution:
822
+ Wave 1: [A, B] ← concurrent
823
+ Wave 2: [C, D] ← concurrent, sees A+B findings
824
+ Wave 3: [E] ← sees A+B+C+D findings
825
+ ```
826
+
827
+ ---
828
+
829
+ ## Context Propagation Flow
830
+
831
+ ```
832
+ Wave 1 agents:
833
+ ├─ Execute tasks (no prev_context)
834
+ ├─ Write findings to report_agent_job_result
835
+ └─ Append discoveries to discoveries.ndjson
836
+
837
+ ↓ merge results into master CSV
838
+
839
+ Wave 2 agents:
840
+ ├─ Read discoveries.ndjson (exploration sharing)
841
+ ├─ Read prev_context column (wave 1 findings from context_from)
842
+ ├─ Execute tasks with full upstream context
843
+ ├─ Write findings to report_agent_job_result
844
+ └─ Append new discoveries to discoveries.ndjson
845
+
846
+ ↓ merge results into master CSV
847
+
848
+ Wave 3 agents:
849
+ ├─ Read discoveries.ndjson (accumulated from waves 1+2)
850
+ ├─ Read prev_context column (wave 1+2 findings from context_from)
851
+ ├─ Execute tasks
852
+ └─ ...
853
+ ```
854
+
855
+ **Two context channels**:
856
+ 1. **CSV findings** (structured): `context_from` column → `prev_context` injection — task-specific directed context
857
+ 2. **NDJSON discoveries** (broadcast): `discoveries.ndjson` — general exploration findings available to all
858
+
859
+ ---
860
+
861
+ ## Error Handling
862
+
863
+ | Error | Resolution |
864
+ |-------|------------|
865
+ | Circular dependency | Detect in wave computation, abort with error message |
866
+ | Agent timeout | Mark as failed in results, continue with wave |
867
+ | Agent failed | Mark as failed, skip dependent tasks in later waves |
868
+ | All agents in wave failed | Log error, offer retry or abort |
869
+ | CSV parse error | Validate CSV format before execution, show line number |
870
+ | discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
871
+ | Continue mode: no session found | List available sessions, prompt user to select |
872
+
873
+ ---
874
+
875
+ ## Core Rules
876
+
877
+ 1. **Start Immediately**: First action is session initialization, then Phase 1
878
+ 2. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results are merged
879
+ 3. **CSV is Source of Truth**: Master tasks.csv holds all state — always read before wave, always write after
880
+ 4. **Context Propagation**: prev_context built from master CSV, not from memory
881
+ 5. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson
882
+ 6. **Skip on Failure**: If a dependency failed, skip the dependent task (don't attempt)
883
+ 7. **Cleanup Temp Files**: Remove wave-{N}.csv after results are merged
884
+ 8. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
885
+
886
+ ---
887
+
888
+ ## Best Practices
889
+
890
+ 1. **Task Granularity**: 3-10 tasks optimal; too many = overhead, too few = no parallelism benefit
891
+ 2. **Minimize Cross-Wave Deps**: More tasks in wave 1 = more parallelism
892
+ 3. **Specific Descriptions**: Agent sees only its CSV row + prev_context — make description self-contained
893
+ 4. **Context From ≠ Deps**: `deps` = execution order constraint; `context_from` = information flow. A task can have `context_from` without `deps` (it just reads previous findings but doesn't require them to be done first in its wave)
894
+ 5. **Concurrency Tuning**: `-c 1` for serial execution (maximum context sharing); `-c 8` for I/O-bound tasks
895
+
896
+ ---
897
+
898
+ ## Usage Recommendations
899
+
900
+ | Scenario | Recommended Approach |
901
+ |----------|---------------------|
902
+ | Independent parallel tasks (no deps) | `$csv-wave-pipeline -c 8` — single wave, max parallelism |
903
+ | Linear pipeline (A→B→C) | `$csv-wave-pipeline -c 1` — 3 waves, serial, full context |
904
+ | Diamond dependency (A→B,C→D) | `$csv-wave-pipeline` — 3 waves, B+C concurrent in wave 2 |
905
+ | Complex requirement, unclear tasks | Use `$roadmap-with-file` first for planning, then feed issues here |
906
+ | Single complex task | Use `$workflow-lite-plan` instead |