claude-code-workflow 7.2.14 → 7.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/.claude/commands/workflow/analyze-with-file.md +7 -0
  2. package/.codex/skills/analyze-with-file/SKILL.md +1181 -1182
  3. package/.codex/skills/brainstorm/SKILL.md +723 -725
  4. package/.codex/skills/brainstorm-with-file/SKILL.md +10 -5
  5. package/.codex/skills/clean/SKILL.md +33 -26
  6. package/.codex/skills/collaborative-plan-with-file/SKILL.md +830 -831
  7. package/.codex/skills/csv-wave-pipeline/SKILL.md +906 -906
  8. package/.codex/skills/issue-discover/SKILL.md +57 -50
  9. package/.codex/skills/issue-discover/phases/01-issue-new.md +18 -11
  10. package/.codex/skills/issue-discover/phases/02-discover.md +31 -26
  11. package/.codex/skills/issue-discover/phases/03-discover-by-prompt.md +13 -11
  12. package/.codex/skills/issue-discover/phases/04-quick-execute.md +32 -27
  13. package/.codex/skills/parallel-dev-cycle/SKILL.md +402 -402
  14. package/.codex/skills/project-documentation-workflow/SKILL.md +13 -3
  15. package/.codex/skills/roadmap-with-file/SKILL.md +901 -897
  16. package/.codex/skills/session-sync/SKILL.md +222 -212
  17. package/.codex/skills/spec-add/SKILL.md +620 -613
  18. package/.codex/skills/spec-generator/SKILL.md +2 -2
  19. package/.codex/skills/spec-generator/phases/01-5-requirement-clarification.md +10 -10
  20. package/.codex/skills/spec-generator/phases/01-discovery.md +11 -18
  21. package/.codex/skills/spec-generator/phases/02-product-brief.md +5 -5
  22. package/.codex/skills/spec-generator/phases/03-requirements.md +7 -7
  23. package/.codex/skills/spec-generator/phases/04-architecture.md +4 -4
  24. package/.codex/skills/spec-generator/phases/05-epics-stories.md +5 -6
  25. package/.codex/skills/spec-generator/phases/06-readiness-check.md +10 -17
  26. package/.codex/skills/spec-generator/phases/07-issue-export.md +326 -329
  27. package/.codex/skills/spec-setup/SKILL.md +669 -657
  28. package/.codex/skills/team-arch-opt/SKILL.md +50 -50
  29. package/.codex/skills/team-arch-opt/agents/completion-handler.md +3 -3
  30. package/.codex/skills/team-brainstorm/SKILL.md +724 -725
  31. package/.codex/skills/team-coordinate/SKILL.md +51 -51
  32. package/.codex/skills/team-coordinate/agents/completion-handler.md +3 -3
  33. package/.codex/skills/team-coordinate/agents/plan-reviewer.md +4 -4
  34. package/.codex/skills/team-designer/SKILL.md +691 -691
  35. package/.codex/skills/team-designer/agents/requirement-clarifier.md +11 -12
  36. package/.codex/skills/team-executor/SKILL.md +45 -45
  37. package/.codex/skills/team-frontend/SKILL.md +45 -45
  38. package/.codex/skills/team-frontend/agents/completion-handler.md +3 -3
  39. package/.codex/skills/team-frontend/agents/qa-gate-reviewer.md +4 -4
  40. package/.codex/skills/team-frontend-debug/SKILL.md +50 -50
  41. package/.codex/skills/team-frontend-debug/agents/completion-handler.md +3 -3
  42. package/.codex/skills/team-frontend-debug/agents/conditional-skip-gate.md +4 -4
  43. package/.codex/skills/team-issue/SKILL.md +751 -740
  44. package/.codex/skills/team-iterdev/SKILL.md +825 -826
  45. package/.codex/skills/team-lifecycle-v4/SKILL.md +775 -775
  46. package/.codex/skills/team-lifecycle-v4/agents/quality-gate.md +165 -165
  47. package/.codex/skills/team-lifecycle-v4/agents/requirement-clarifier.md +163 -163
  48. package/.codex/skills/team-perf-opt/SKILL.md +50 -50
  49. package/.codex/skills/team-perf-opt/agents/completion-handler.md +3 -3
  50. package/.codex/skills/team-planex-v2/SKILL.md +652 -637
  51. package/.codex/skills/team-quality-assurance/SKILL.md +51 -52
  52. package/.codex/skills/team-review/SKILL.md +40 -40
  53. package/.codex/skills/team-roadmap-dev/SKILL.md +51 -51
  54. package/.codex/skills/team-roadmap-dev/agents/roadmap-discusser.md +8 -8
  55. package/.codex/skills/team-tech-debt/SKILL.md +50 -50
  56. package/.codex/skills/team-tech-debt/agents/plan-approver.md +5 -5
  57. package/.codex/skills/team-testing/SKILL.md +51 -52
  58. package/.codex/skills/team-uidesign/SKILL.md +40 -40
  59. package/.codex/skills/team-uidesign/agents/completion-handler.md +177 -177
  60. package/.codex/skills/team-ultra-analyze/SKILL.md +786 -787
  61. package/.codex/skills/team-ultra-analyze/agents/discussion-feedback.md +8 -8
  62. package/.codex/skills/team-ux-improve/SKILL.md +51 -52
  63. package/.codex/skills/team-ux-improve/agents/ux-designer.md +2 -2
  64. package/.codex/skills/team-ux-improve/agents/ux-explorer.md +1 -1
  65. package/.codex/skills/unified-execute-with-file/SKILL.md +797 -796
  66. package/.codex/skills/workflow-execute/SKILL.md +1117 -1118
  67. package/.codex/skills/workflow-lite-planex/SKILL.md +1144 -1141
  68. package/.codex/skills/workflow-plan/SKILL.md +631 -636
  69. package/.codex/skills/workflow-tdd-plan/SKILL.md +753 -759
  70. package/.codex/skills/workflow-test-fix-cycle/SKILL.md +402 -392
  71. package/README.md +25 -0
  72. package/ccw/dist/commands/install.d.ts.map +1 -1
  73. package/ccw/dist/commands/install.js +12 -0
  74. package/ccw/dist/commands/install.js.map +1 -1
  75. package/package.json +1 -1
@@ -1,1141 +1,1144 @@
1
- ---
2
- name: workflow-lite-plan
3
- description: Explore-first wave pipeline. Decomposes requirement into exploration angles, runs wave exploration via spawn_agents_on_csv, synthesizes findings into execution tasks with cross-phase context linking (E*→T*), then wave-executes via spawn_agents_on_csv.
4
- argument-hint: "[-y|--yes] [-c|--concurrency N] [--continue] \"requirement description\""
5
- allowed-tools: spawn_agents_on_csv, Read, Write, Edit, Bash, Glob, Grep, AskUserQuestion
6
- ---
7
-
8
- ## Auto Mode
9
-
10
- When `--yes` or `-y`: Auto-confirm decomposition, skip interactive validation, use defaults.
11
-
12
- # Workflow Lite Planex
13
-
14
- ## Usage
15
-
16
- ```bash
17
- $workflow-lite-plan "Implement user authentication with OAuth, JWT, and 2FA"
18
- $workflow-lite-plan -c 4 "Refactor payment module with Stripe and PayPal"
19
- $workflow-lite-plan -y "Build notification system with email and SMS"
20
- $workflow-lite-plan --continue "auth-20260228"
21
- ```
22
-
23
- **Flags**:
24
- - `-y, --yes`: Skip all confirmations (auto mode)
25
- - `-c, --concurrency N`: Max concurrent agents within each wave (default: 4)
26
- - `--continue`: Resume existing session
27
-
28
- **Output Directory**: `.workflow/.lite-plan/{session-id}/`
29
-
30
- ---
31
-
32
- ## Overview
33
-
34
- Explore-first wave-based pipeline using `spawn_agents_on_csv`. Two-stage CSV execution: **explore.csv** (codebase discovery) → **tasks.csv** (implementation), with cross-phase context propagation via `context_from` linking (`E*` → `T*`).
35
-
36
- **Core workflow**: Decompose → Wave Explore → Synthesize & Plan → Wave Execute → Aggregate
37
-
38
- ```
39
- ┌──────────────────────────────────────────────────────────────────────┐
40
- │ WORKFLOW LITE PLANEX │
41
- ├──────────────────────────────────────────────────────────────────────┤
42
- │ │
43
- │ Phase 1: Requirement → explore.csv │
44
- │ ├─ Analyze complexity → select exploration angles (1-4) │
45
- │ ├─ Generate explore.csv (1 row per angle) │
46
- │ └─ User validates (skip if -y)
47
- │ │
48
- │ Phase 2: Wave Explore (spawn_agents_on_csv) │
49
- │ ├─ For each explore wave: │
50
- │ │ ├─ Build wave CSV from explore.csv │
51
- │ │ ├─ spawn_agents_on_csv(explore instruction template) │
52
- │ │ └─ Merge findings/key_files into explore.csv │
53
- │ └─ discoveries.ndjson shared across agents │
54
- │ │
55
- │ Phase 3: Synthesize & Plan → tasks.csv │
56
- │ ├─ Read all explore findings → cross-reference │
57
- │ ├─ Resolve conflicts between angles │
58
- │ ├─ Decompose into execution tasks with context_from: E*;T* │
59
- │ ├─ Compute dependency waves (topological sort) │
60
- │ └─ User validates (skip if -y)
61
- │ │
62
- │ Phase 4: Wave Execute (spawn_agents_on_csv) │
63
- │ ├─ For each task wave: │
64
- │ │ ├─ Build prev_context from explore.csv + tasks.csv │
65
- │ │ ├─ Build wave CSV with prev_context column │
66
- │ │ ├─ spawn_agents_on_csv(execute instruction template) │
67
- │ │ └─ Merge results into tasks.csv │
68
- │ └─ discoveries.ndjson carries across all waves │
69
- │ │
70
- │ Phase 5: Aggregate │
71
- │ ├─ Export results.csv │
72
- │ ├─ Generate context.md with all findings │
73
- │ └─ Display summary │
74
- │ │
75
- └──────────────────────────────────────────────────────────────────────┘
76
- ```
77
-
78
- ---
79
-
80
- ## Context Flow
81
-
82
- ```
83
- explore.csv tasks.csv
84
- ┌──────────┐ ┌──────────┐
85
- │ E1: arch │──────────→│ T1: setup│ context_from: E1;E2
86
- │ findings │ │ prev_ctx │← E1+E2 findings
87
- ├──────────┤ ├──────────┤
88
- │ E2: deps │──────────→│ T2: impl │ context_from: E1;T1
89
- │ findings │ │ prev_ctx │← E1+T1 findings
90
- ├──────────┤ ├──────────┤
91
- │ E3: test │──┐ ┌───→│ T3: test │ context_from: E3;T2
92
- │ findings │ └───┘ │ prev_ctx │← E3+T2 findings
93
- └──────────┘ └──────────┘
94
-
95
- Two context channels:
96
- 1. Directed: context_from → prev_context (CSV findings lookup)
97
- 2. Broadcast: discoveries.ndjson (append-only shared board)
98
-
99
- context_from prefix: E* → explore.csv lookup, T* → tasks.csv lookup
100
- ```
101
-
102
- ---
103
-
104
- ## CSV Schemas
105
-
106
- ### explore.csv
107
-
108
- ```csv
109
- id,angle,description,focus,deps,wave,status,findings,key_files,error
110
- "E1","architecture","Explore codebase architecture for: auth system","architecture","","1","pending","","",""
111
- "E2","dependencies","Explore dependency landscape for: auth system","dependencies","","1","pending","","",""
112
- "E3","testing","Explore test infrastructure for: auth system","testing","","1","pending","","",""
113
- ```
114
-
115
- **Columns**:
116
-
117
- | Column | Phase | Description |
118
- |--------|-------|-------------|
119
- | `id` | Input | Exploration ID: E1, E2, ... |
120
- | `angle` | Input | Exploration angle name |
121
- | `description` | Input | What to explore from this angle |
122
- | `focus` | Input | Keywords and focus areas |
123
- | `deps` | Input | Semicolon-separated dep IDs (usually empty — all wave 1) |
124
- | `wave` | Computed | Wave number (usually 1 for all explorations) |
125
- | `status` | Output | `pending` → `completed` / `failed` |
126
- | `findings` | Output | Discoveries (max 800 chars) |
127
- | `key_files` | Output | Relevant files (semicolon-separated) |
128
- | `error` | Output | Error message if failed |
129
-
130
- ### tasks.csv
131
-
132
- ```csv
133
- id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error
134
- "T1","Setup types","Create type definitions","Verify types compile with tsc","All interfaces exported","src/types/**","Follow existing patterns || src/types/index.ts","tsc --noEmit","","E1;E2","1","pending","","","","",""
135
- "T2","Implement core","Implement core auth logic","Unit test: login returns token","Login flow works end-to-end","src/auth/**","Reuse BaseService || src/services/Base.ts","npm test -- --grep auth","T1","E1;E2;T1","2","pending","","","","",""
136
- ```
137
-
138
- **Columns**:
139
-
140
- | Column | Phase | Description |
141
- |--------|-------|-------------|
142
- | `id` | Input | Task ID: T1, T2, ... |
143
- | `title` | Input | Short task title |
144
- | `description` | Input | Self-contained task description — what to implement |
145
- | `test` | Input | Test cases: what tests to write and how to verify (unit/integration/edge) |
146
- | `acceptance_criteria` | Input | Measurable conditions that define "done" |
147
- | `scope` | Input | Target file/directory glob — constrains agent write area, prevents cross-task file conflicts |
148
- | `hints` | Input | Implementation tips + reference files. Format: `tips text \|\| file1;file2`. Either part is optional |
149
- | `execution_directives` | Input | Execution constraints: commands to run for verification, tool restrictions |
150
- | `deps` | Input | Dependency task IDs: T1;T2 (semicolon-separated) |
151
- | `context_from` | Input | Context source IDs: **E1;E2;T1** — `E*` lookups in explore.csv, `T*` in tasks.csv |
152
- | `wave` | Computed | Wave number (computed by topological sort, 1-based) |
153
- | `status` | Output | `pending` → `completed` / `failed` / `skipped` |
154
- | `findings` | Output | Execution findings (max 500 chars) |
155
- | `files_modified` | Output | Semicolon-separated file paths |
156
- | `tests_passed` | Output | Whether all defined test cases passed (true/false) |
157
- | `acceptance_met` | Output | Summary of which acceptance criteria were met/unmet |
158
- | `error` | Output | Error message if failed (empty if success) |
159
-
160
- ### Per-Wave CSV (Temporary)
161
-
162
- Each wave generates a temporary CSV with an extra `prev_context` column.
163
-
164
- **Explore wave**: `explore-wave-{N}.csv` — same columns as explore.csv (no prev_context, explorations are independent).
165
-
166
- **Execute wave**: `task-wave-{N}.csv` — all task columns + `prev_context`:
167
-
168
- ```csv
169
- id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context
170
- "T2","Implement core","Implement core auth logic","Unit test: login returns token","Login flow works end-to-end","src/auth/**","Reuse BaseService || src/services/Base.ts","npm test -- --grep auth","T1","E1;E2;T1","2","[Explore architecture] Found BaseService pattern in src/services/\n[Task T1] Created types at src/types/auth.ts"
171
- ```
172
-
173
- The `prev_context` column is built from `context_from` by looking up completed rows' `findings` in both explore.csv (`E*`) and tasks.csv (`T*`).
174
-
175
- ---
176
-
177
- ## Output Artifacts
178
-
179
- | File | Purpose | Lifecycle |
180
- |------|---------|-----------|
181
- | `explore.csv` | Exploration state — angles with findings/key_files | Updated after Phase 2 |
182
- | `tasks.csv` | Execution state — tasks with results | Updated after each wave in Phase 4 |
183
- | `explore-wave-{N}.csv` | Per-wave explore input (temporary) | Created before wave, deleted after |
184
- | `task-wave-{N}.csv` | Per-wave execute input (temporary) | Created before wave, deleted after |
185
- | `results.csv` | Final results export | Created in Phase 5 |
186
- | `discoveries.ndjson` | Shared discovery board (all agents, all phases) | Append-only |
187
- | `context.md` | Human-readable execution report | Created in Phase 5 |
188
-
189
- ---
190
-
191
- ## Session Structure
192
-
193
- ```
194
- .workflow/.lite-plan/{session-id}/
195
- ├── explore.csv # Exploration state
196
- ├── tasks.csv # Execution state
197
- ├── results.csv # Final results export
198
- ├── discoveries.ndjson # Shared discovery board
199
- ├── context.md # Full context summary
200
- ├── explore-wave-{N}.csv # Temporary per-wave explore input (cleaned up)
201
- └── task-wave-{N}.csv # Temporary per-wave execute input (cleaned up)
202
- ```
203
-
204
- ---
205
-
206
- ## Implementation
207
-
208
- ### Session Initialization
209
-
210
- ```javascript
211
- const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
212
-
213
- // Parse flags
214
- const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
215
- const continueMode = $ARGUMENTS.includes('--continue')
216
- const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
217
- const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 4
218
-
219
- const requirement = $ARGUMENTS
220
- .replace(/--yes|-y|--continue|--concurrency\s+\d+|-c\s+\d+/g, '')
221
- .trim()
222
-
223
- const slug = requirement.toLowerCase()
224
- .replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
225
- .substring(0, 40)
226
- const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
227
- const sessionId = `wpp-${slug}-${dateStr}`
228
- const sessionFolder = `.workflow/.lite-plan/${sessionId}`
229
-
230
- // Continue mode: find existing session
231
- if (continueMode) {
232
- const existing = Bash(`ls -t .workflow/.lite-plan/ 2>/dev/null | head -1`).trim()
233
- if (existing) {
234
- sessionId = existing
235
- sessionFolder = `.workflow/.lite-plan/${sessionId}`
236
- // Check which phase to resume: if tasks.csv exists → Phase 4, else → Phase 2
237
- }
238
- }
239
-
240
- Bash(`mkdir -p ${sessionFolder}`)
241
- ```
242
-
243
- ---
244
-
245
- ### Phase 1: Requirement → explore.csv
246
-
247
- **Objective**: Analyze requirement complexity, select exploration angles, generate explore.csv.
248
-
249
- **Steps**:
250
-
251
- 1. **Analyze & Decompose**
252
-
253
- ```javascript
254
- Bash({
255
- command: `ccw cli -p "PURPOSE: Analyze requirement complexity and select 1-4 exploration angles for codebase discovery before implementation.
256
- TASK:
257
- • Classify requirement type (feature/bugfix/refactor/security/performance)
258
- • Assess complexity (Low: 1 angle, Medium: 2-3, High: 3-4)
259
- • Select exploration angles from: architecture, dependencies, integration-points, testing, patterns, security, performance, state-management, error-handling, edge-cases
260
- • For each angle, define focus keywords and what to discover
261
- MODE: analysis
262
- CONTEXT: @**/*
263
- EXPECTED: JSON object: {type: string, complexity: string, angles: [{id: string, angle: string, description: string, focus: string}]}. Each angle id = E1, E2, etc.
264
- CONSTRAINTS: 1-4 angles | Angles must be distinct | Each angle must have clear focus
265
-
266
- REQUIREMENT: ${requirement}" --tool gemini --mode analysis --rule planning-breakdown-task-steps`,
267
- run_in_background: true
268
- })
269
- // Wait for CLI completion via hook callback
270
- // Parse JSON from CLI output → { type, complexity, angles[] }
271
- ```
272
-
273
- 2. **Generate explore.csv**
274
-
275
- ```javascript
276
- const header = 'id,angle,description,focus,deps,wave,status,findings,key_files,error'
277
- const rows = angles.map(a =>
278
- [a.id, a.angle, a.description, a.focus, '', '1', 'pending', '', '', '']
279
- .map(v => `"${String(v).replace(/"/g, '""')}"`)
280
- .join(',')
281
- )
282
-
283
- Write(`${sessionFolder}/explore.csv`, [header, ...rows].join('\n'))
284
- ```
285
-
286
- 3. **User Validation** (skip if AUTO_YES)
287
-
288
- ```javascript
289
- if (!AUTO_YES) {
290
- console.log(`\n## Exploration Plan (${angles.length} angles)\n`)
291
- angles.forEach(a => console.log(` - [${a.id}] ${a.angle}: ${a.focus}`))
292
-
293
- const answer = AskUserQuestion({
294
- questions: [{
295
- question: "Approve exploration angles?",
296
- header: "Validation",
297
- multiSelect: false,
298
- options: [
299
- { label: "Approve", description: "Proceed with wave exploration" },
300
- { label: "Modify", description: `Edit ${sessionFolder}/explore.csv manually, then --continue` },
301
- { label: "Cancel", description: "Abort" }
302
- ]
303
- }]
304
- })
305
-
306
- if (answer.Validation === "Modify") {
307
- console.log(`Edit: ${sessionFolder}/explore.csv\nResume: $workflow-lite-plan --continue`)
308
- return
309
- } else if (answer.Validation === "Cancel") {
310
- return
311
- }
312
- }
313
- ```
314
-
315
- **Success Criteria**:
316
- - explore.csv created with 1-4 exploration angles
317
- - User approved (or AUTO_YES)
318
-
319
- ---
320
-
321
- ### Phase 2: Wave Explore (spawn_agents_on_csv)
322
-
323
- **Objective**: Execute exploration via `spawn_agents_on_csv`. Each angle produces findings and key_files.
324
-
325
- **Steps**:
326
-
327
- 1. **Explore Wave Loop**
328
-
329
- ```javascript
330
- const exploreCSV = parseCsv(Read(`${sessionFolder}/explore.csv`))
331
- const maxExploreWave = Math.max(...exploreCSV.map(r => parseInt(r.wave)))
332
-
333
- for (let wave = 1; wave <= maxExploreWave; wave++) {
334
- const waveTasks = exploreCSV.filter(r =>
335
- parseInt(r.wave) === wave && r.status === 'pending'
336
- )
337
- if (waveTasks.length === 0) continue
338
-
339
- // Skip rows with failed dependencies
340
- const executableTasks = []
341
- for (const task of waveTasks) {
342
- const deps = (task.deps || '').split(';').filter(Boolean)
343
- if (deps.some(d => {
344
- const dep = exploreCSV.find(r => r.id === d)
345
- return !dep || dep.status !== 'completed'
346
- })) {
347
- task.status = 'skipped'
348
- task.error = 'Dependency failed/skipped'
349
- continue
350
- }
351
- executableTasks.push(task)
352
- }
353
-
354
- if (executableTasks.length === 0) continue
355
-
356
- // Write explore wave CSV
357
- const waveHeader = 'id,angle,description,focus,deps,wave'
358
- const waveRows = executableTasks.map(t =>
359
- [t.id, t.angle, t.description, t.focus, t.deps, t.wave]
360
- .map(v => `"${String(v).replace(/"/g, '""')}"`)
361
- .join(',')
362
- )
363
- Write(`${sessionFolder}/explore-wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
364
-
365
- // Execute explore wave
366
- console.log(` Exploring ${executableTasks.length} angles (wave ${wave})...`)
367
-
368
- spawn_agents_on_csv({
369
- csv_path: `${sessionFolder}/explore-wave-${wave}.csv`,
370
- id_column: "id",
371
- instruction: buildExploreInstruction(sessionFolder),
372
- max_concurrency: maxConcurrency,
373
- max_runtime_seconds: 300,
374
- output_csv_path: `${sessionFolder}/explore-wave-${wave}-results.csv`,
375
- output_schema: {
376
- type: "object",
377
- properties: {
378
- id: { type: "string" },
379
- status: { type: "string", enum: ["completed", "failed"] },
380
- findings: { type: "string" },
381
- key_files: { type: "array", items: { type: "string" } },
382
- error: { type: "string" }
383
- },
384
- required: ["id", "status", "findings"]
385
- }
386
- })
387
-
388
- // Merge results into explore.csv
389
- const waveResults = parseCsv(Read(`${sessionFolder}/explore-wave-${wave}-results.csv`))
390
- for (const result of waveResults) {
391
- updateMasterCsvRow(`${sessionFolder}/explore.csv`, result.id, {
392
- status: result.status,
393
- findings: result.findings || '',
394
- key_files: Array.isArray(result.key_files) ? result.key_files.join(';') : (result.key_files || ''),
395
- error: result.error || ''
396
- })
397
- }
398
-
399
- // Cleanup temporary wave CSV
400
- Bash(`rm -f "${sessionFolder}/explore-wave-${wave}.csv" "${sessionFolder}/explore-wave-${wave}-results.csv"`)
401
- }
402
- ```
403
-
404
- 2. **Explore Instruction Template**
405
-
406
- ```javascript
407
- function buildExploreInstruction(sessionFolder) {
408
- return `
409
- ## EXPLORATION ASSIGNMENT
410
-
411
- ### MANDATORY FIRST STEPS
412
- 1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not)
413
- 2. Read project context: .workflow/project-tech.json (if exists)
414
-
415
- ---
416
-
417
- ## Your Exploration
418
-
419
- **Exploration ID**: {id}
420
- **Angle**: {angle}
421
- **Description**: {description}
422
- **Focus**: {focus}
423
-
424
- ---
425
-
426
- ## Exploration Protocol
427
-
428
- 1. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared findings
429
- 2. **Explore**: Search the codebase from the {angle} perspective
430
- 3. **Discover**: Find relevant files, patterns, integration points, constraints
431
- 4. **Share discoveries**: Append findings to shared board:
432
- \`\`\`bash
433
- echo '{"ts":"<ISO8601>","worker":"{id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
434
- \`\`\`
435
- 5. **Report result**: Return JSON via report_agent_job_result
436
-
437
- ### What to Look For
438
- - Existing patterns and conventions to follow
439
- - Integration points and module boundaries
440
- - Dependencies and constraints
441
- - Test infrastructure and coverage
442
- - Risks and potential blockers
443
-
444
- ### Discovery Types to Share
445
- - \`code_pattern\`: {name, file, description} — reusable patterns found
446
- - \`integration_point\`: {file, description, exports[]} — module connection points
447
- - \`convention\`: {naming, imports, formatting} — code style conventions
448
- - \`tech_stack\`: {framework, version, config} — technology stack details
449
-
450
- ---
451
-
452
- ## Output (report_agent_job_result)
453
-
454
- Return JSON:
455
- {
456
- "id": "{id}",
457
- "status": "completed" | "failed",
458
- "findings": "Concise summary of ${'{'}angle{'}'} discoveries (max 800 chars)",
459
- "key_files": ["relevant/file1.ts", "relevant/file2.ts"],
460
- "error": ""
461
- }
462
- `
463
- }
464
- ```
465
-
466
- **Success Criteria**:
467
- - All explore angles executed
468
- - explore.csv updated with findings and key_files
469
- - discoveries.ndjson accumulated
470
-
471
- ---
472
-
473
- ### Phase 3: Synthesize & Plan → tasks.csv
474
-
475
- **Objective**: Read exploration findings, cross-reference, resolve conflicts, generate tasks.csv with context_from linking to E* rows.
476
-
477
- **Steps**:
478
-
479
- 1. **Synthesize Exploration Findings**
480
-
481
- ```javascript
482
- const exploreCSV = parseCsv(Read(`${sessionFolder}/explore.csv`))
483
- const completed = exploreCSV.filter(r => r.status === 'completed')
484
-
485
- // Cross-reference: find shared files across angles
486
- const fileRefs = {}
487
- completed.forEach(r => {
488
- (r.key_files || '').split(';').filter(Boolean).forEach(f => {
489
- if (!fileRefs[f]) fileRefs[f] = []
490
- fileRefs[f].push({ angle: r.angle, id: r.id })
491
- })
492
- })
493
- const sharedFiles = Object.entries(fileRefs).filter(([_, refs]) => refs.length > 1)
494
-
495
- // Build synthesis context for task decomposition
496
- const synthesisContext = completed.map(r =>
497
- `[${r.id}: ${r.angle}] ${r.findings}\n Key files: ${r.key_files || 'none'}`
498
- ).join('\n\n')
499
-
500
- const sharedFilesContext = sharedFiles.length > 0
501
- ? `\nShared files (referenced by multiple angles):\n${sharedFiles.map(([f, refs]) =>
502
- ` ${f} ${refs.map(r => r.id).join(', ')}`
503
- ).join('\n')}`
504
- : ''
505
- ```
506
-
507
- 2. **Decompose into Tasks**
508
-
509
- ```javascript
510
- Bash({
511
- command: `ccw cli -p "PURPOSE: Based on exploration findings, decompose requirement into 3-10 atomic execution tasks. Each task must include test cases, acceptance criteria, and link to relevant exploration findings.
512
- TASK:
513
- • Use exploration findings to inform task decomposition
514
- Each task must be self-contained with specific implementation instructions
515
- Link tasks to exploration rows via context_from (E1, E2, etc.)
516
- Define dependencies between tasks (T1 must finish before T2, etc.)
517
- For each task: define test cases, acceptance criteria, scope, hints, and execution directives
518
- Ensure same-wave tasks have non-overlapping scopes
519
- MODE: analysis
520
- CONTEXT: @**/*
521
- EXPECTED: JSON object with tasks array. Each task: {id: string, title: string, description: string, test: string, acceptance_criteria: string, scope: string, hints: string, execution_directives: string, deps: string[], context_from: string[]}.
522
- - id: T1, T2, etc.
523
- - description: what to implement (specific enough for an agent)
524
- - test: what tests to write (e.g. 'Unit test: X returns Y')
525
- - acceptance_criteria: what defines done (e.g. 'API returns 200')
526
- - scope: target glob (e.g. 'src/auth/**') — non-overlapping within same wave
527
- - hints: tips + ref files (format: 'tips || file1;file2')
528
- - execution_directives: verification commands (e.g. 'npm test --bail')
529
- - deps: task IDs that must complete first (T*)
530
- - context_from: explore (E*) and task (T*) IDs whose findings are needed
531
- CONSTRAINTS: 3-10 tasks | Atomic | No circular deps | Concrete test/acceptance_criteria | Non-overlapping scopes per wave
532
-
533
- EXPLORATION FINDINGS:
534
- ${synthesisContext}
535
- ${sharedFilesContext}
536
-
537
- REQUIREMENT: ${requirement}" --tool gemini --mode analysis --rule planning-breakdown-task-steps`,
538
- run_in_background: true
539
- })
540
- // Wait for CLI completion → decomposedTasks[]
541
- ```
542
-
543
- 3. **Compute Waves & Write tasks.csv**
544
-
545
- ```javascript
546
- const { waveAssignment, maxWave } = computeWaves(decomposedTasks)
547
-
548
- const header = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error'
549
- const rows = decomposedTasks.map(task => {
550
- const wave = waveAssignment.get(task.id)
551
- return [
552
- task.id,
553
- csvEscape(task.title),
554
- csvEscape(task.description),
555
- csvEscape(task.test),
556
- csvEscape(task.acceptance_criteria),
557
- csvEscape(task.scope),
558
- csvEscape(task.hints),
559
- csvEscape(task.execution_directives),
560
- task.deps.join(';'),
561
- task.context_from.join(';'),
562
- wave,
563
- 'pending', '', '', '', '', ''
564
- ].map(cell => `"${String(cell).replace(/"/g, '""')}"`).join(',')
565
- })
566
-
567
- Write(`${sessionFolder}/tasks.csv`, [header, ...rows].join('\n'))
568
- ```
569
-
570
- 4. **User Validation** (skip if AUTO_YES)
571
-
572
- ```javascript
573
- if (!AUTO_YES) {
574
- console.log(`
575
- ## Execution Plan
576
-
577
- Explore: ${completed.length} angles completed
578
- Shared files: ${sharedFiles.length}
579
- Tasks: ${decomposedTasks.length} across ${maxWave} waves
580
-
581
- ${Array.from({length: maxWave}, (_, i) => i + 1).map(w => {
582
- const wt = decomposedTasks.filter(t => waveAssignment.get(t.id) === w)
583
- return `### Wave ${w} (${wt.length} tasks, concurrent)
584
- ${wt.map(t => ` - [${t.id}] ${t.title} (scope: ${t.scope}, from: ${t.context_from.join(';')})`).join('\n')}`
585
- }).join('\n')}
586
- `)
587
-
588
- const answer = AskUserQuestion({
589
- questions: [{
590
- question: `Proceed with ${decomposedTasks.length} tasks across ${maxWave} waves?`,
591
- header: "Confirm",
592
- multiSelect: false,
593
- options: [
594
- { label: "Execute", description: "Proceed with wave execution" },
595
- { label: "Modify", description: `Edit ${sessionFolder}/tasks.csv then --continue` },
596
- { label: "Cancel", description: "Abort" }
597
- ]
598
- }]
599
- })
600
-
601
- if (answer.Confirm === "Modify") {
602
- console.log(`Edit: ${sessionFolder}/tasks.csv\nResume: $workflow-lite-plan --continue`)
603
- return
604
- } else if (answer.Confirm === "Cancel") {
605
- return
606
- }
607
- }
608
- ```
609
-
610
- **Success Criteria**:
611
- - tasks.csv created with context_from linking to E* rows
612
- - No circular dependencies
613
- - User approved (or AUTO_YES)
614
-
615
- ---
616
-
617
- ### Phase 4: Wave Execute (spawn_agents_on_csv)
618
-
619
- **Objective**: Execute tasks wave-by-wave via `spawn_agents_on_csv`. Each wave's prev_context is built from both explore.csv and tasks.csv.
620
-
621
- **Steps**:
622
-
623
- 1. **Wave Loop**
624
-
625
- ```javascript
626
- const exploreCSV = parseCsv(Read(`${sessionFolder}/explore.csv`))
627
- const failedIds = new Set()
628
- const skippedIds = new Set()
629
-
630
- for (let wave = 1; wave <= maxWave; wave++) {
631
- console.log(`\n## Wave ${wave}/${maxWave}\n`)
632
-
633
- // Re-read master CSV
634
- const masterCsv = parseCsv(Read(`${sessionFolder}/tasks.csv`))
635
- const waveTasks = masterCsv.filter(row => parseInt(row.wave) === wave)
636
-
637
- // Skip tasks whose deps failed
638
- const executableTasks = []
639
- for (const task of waveTasks) {
640
- const deps = (task.deps || '').split(';').filter(Boolean)
641
- if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
642
- skippedIds.add(task.id)
643
- updateMasterCsvRow(`${sessionFolder}/tasks.csv`, task.id, {
644
- status: 'skipped',
645
- error: 'Dependency failed or skipped'
646
- })
647
- console.log(` [${task.id}] ${task.title} → SKIPPED (dependency failed)`)
648
- continue
649
- }
650
- executableTasks.push(task)
651
- }
652
-
653
- if (executableTasks.length === 0) {
654
- console.log(` No executable tasks in wave ${wave}`)
655
- continue
656
- }
657
-
658
- // Build prev_context for each task (cross-phase: E* + T*)
659
- for (const task of executableTasks) {
660
- task.prev_context = buildPrevContext(task.context_from, exploreCSV, masterCsv)
661
- }
662
-
663
- // Write wave CSV
664
- const waveHeader = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context'
665
- const waveRows = executableTasks.map(t =>
666
- [t.id, t.title, t.description, t.test, t.acceptance_criteria, t.scope, t.hints, t.execution_directives, t.deps, t.context_from, t.wave, t.prev_context]
667
- .map(cell => `"${String(cell).replace(/"/g, '""')}"`)
668
- .join(',')
669
- )
670
- Write(`${sessionFolder}/task-wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
671
-
672
- // Execute wave
673
- console.log(` Executing ${executableTasks.length} tasks (concurrency: ${maxConcurrency})...`)
674
-
675
- spawn_agents_on_csv({
676
- csv_path: `${sessionFolder}/task-wave-${wave}.csv`,
677
- id_column: "id",
678
- instruction: buildExecuteInstruction(sessionFolder, wave),
679
- max_concurrency: maxConcurrency,
680
- max_runtime_seconds: 600,
681
- output_csv_path: `${sessionFolder}/task-wave-${wave}-results.csv`,
682
- output_schema: {
683
- type: "object",
684
- properties: {
685
- id: { type: "string" },
686
- status: { type: "string", enum: ["completed", "failed"] },
687
- findings: { type: "string" },
688
- files_modified: { type: "array", items: { type: "string" } },
689
- tests_passed: { type: "boolean" },
690
- acceptance_met: { type: "string" },
691
- error: { type: "string" }
692
- },
693
- required: ["id", "status", "findings", "tests_passed"]
694
- }
695
- })
696
-
697
- // Merge results into master CSV
698
- const waveResults = parseCsv(Read(`${sessionFolder}/task-wave-${wave}-results.csv`))
699
- for (const result of waveResults) {
700
- updateMasterCsvRow(`${sessionFolder}/tasks.csv`, result.id, {
701
- status: result.status,
702
- findings: result.findings || '',
703
- files_modified: Array.isArray(result.files_modified) ? result.files_modified.join(';') : (result.files_modified || ''),
704
- tests_passed: String(result.tests_passed ?? ''),
705
- acceptance_met: result.acceptance_met || '',
706
- error: result.error || ''
707
- })
708
-
709
- if (result.status === 'failed') {
710
- failedIds.add(result.id)
711
- console.log(` [${result.id}] → FAILED: ${result.error}`)
712
- } else {
713
- console.log(` [${result.id}] → COMPLETED${result.tests_passed ? ' ✓tests' : ''}`)
714
- }
715
- }
716
-
717
- // Cleanup
718
- Bash(`rm -f "${sessionFolder}/task-wave-${wave}.csv" "${sessionFolder}/task-wave-${wave}-results.csv"`)
719
-
720
- console.log(` Wave ${wave} done: ${waveResults.filter(r => r.status === 'completed').length} completed, ${waveResults.filter(r => r.status === 'failed').length} failed`)
721
- }
722
- ```
723
-
724
- 2. **prev_context Builder (Cross-Phase)**
725
-
726
- The key function linking exploration context to execution:
727
-
728
- ```javascript
729
- function buildPrevContext(contextFrom, exploreCSV, tasksCSV) {
730
- if (!contextFrom) return 'No previous context available'
731
-
732
- const ids = contextFrom.split(';').filter(Boolean)
733
- const entries = []
734
-
735
- ids.forEach(id => {
736
- if (id.startsWith('E')) {
737
- // ← Look up in explore.csv (cross-phase link)
738
- const row = exploreCSV.find(r => r.id === id)
739
- if (row && row.status === 'completed' && row.findings) {
740
- entries.push(`[Explore ${row.angle}] ${row.findings}`)
741
- if (row.key_files) entries.push(` Key files: ${row.key_files}`)
742
- }
743
- } else if (id.startsWith('T')) {
744
- // Look up in tasks.csv (same-phase link)
745
- const row = tasksCSV.find(r => r.id === id)
746
- if (row && row.status === 'completed' && row.findings) {
747
- entries.push(`[Task ${row.id}: ${row.title}] ${row.findings}`)
748
- if (row.files_modified) entries.push(` Modified: ${row.files_modified}`)
749
- }
750
- }
751
- })
752
-
753
- return entries.length > 0 ? entries.join('\n') : 'No previous context available'
754
- }
755
- ```
756
-
757
- 3. **Execute Instruction Template**
758
-
759
- ```javascript
760
- function buildExecuteInstruction(sessionFolder, wave) {
761
- return `
762
- ## TASK ASSIGNMENT
763
-
764
- ### MANDATORY FIRST STEPS
765
- 1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not)
766
- 2. Read project context: .workflow/project-tech.json (if exists)
767
-
768
- ---
769
-
770
- ## Your Task
771
-
772
- **Task ID**: {id}
773
- **Title**: {title}
774
- **Description**: {description}
775
- **Scope**: {scope}
776
-
777
- ### Implementation Hints & Reference Files
778
- {hints}
779
-
780
- > Format: \`tips text || file1;file2\`. Read ALL reference files (after ||) before starting. Apply tips (before ||) as guidance.
781
-
782
- ### Execution Directives
783
- {execution_directives}
784
-
785
- > Commands to run for verification, tool restrictions, or environment requirements.
786
-
787
- ### Test Cases
788
- {test}
789
-
790
- ### Acceptance Criteria
791
- {acceptance_criteria}
792
-
793
- ### Previous Context (from exploration and predecessor tasks)
794
- {prev_context}
795
-
796
- ---
797
-
798
- ## Execution Protocol
799
-
800
- 1. **Read references**: Parse {hints} — read all files listed after \`||\` to understand existing patterns
801
- 2. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared exploration findings
802
- 3. **Use context**: Apply previous tasks' findings from prev_context above
803
- 4. **Stay in scope**: ONLY create/modify files within {scope} — do NOT touch files outside this boundary
804
- 5. **Apply hints**: Follow implementation tips from {hints} (before \`||\`)
805
- 6. **Execute**: Implement the task as described
806
- 7. **Write tests**: Implement the test cases defined above
807
- 8. **Run directives**: Execute commands from {execution_directives} to verify your work
808
- 9. **Verify acceptance**: Ensure all acceptance criteria are met before reporting completion
809
- 10. **Share discoveries**: Append exploration findings to shared board:
810
- \`\`\`bash
811
- echo '{"ts":"<ISO8601>","worker":"{id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
812
- \`\`\`
813
- 11. **Report result**: Return JSON via report_agent_job_result
814
-
815
- ### Discovery Types to Share
816
- - \`code_pattern\`: {name, file, description} reusable patterns found
817
- - \`integration_point\`: {file, description, exports[]} — module connection points
818
- - \`convention\`: {naming, imports, formatting} — code style conventions
819
- - \`blocker\`: {issue, severity, impact} — blocking issues encountered
820
-
821
- ---
822
-
823
- ## Output (report_agent_job_result)
824
-
825
- Return JSON:
826
- {
827
- "id": "{id}",
828
- "status": "completed" | "failed",
829
- "findings": "Key discoveries and implementation notes (max 500 chars)",
830
- "files_modified": ["path1", "path2"],
831
- "tests_passed": true | false,
832
- "acceptance_met": "Summary of which acceptance criteria were met/unmet",
833
- "error": ""
834
- }
835
-
836
- **IMPORTANT**: Set status to "completed" ONLY if:
837
- - All test cases pass
838
- - All acceptance criteria are met
839
- Otherwise set status to "failed" with details in error field.
840
- `
841
- }
842
- ```
843
-
844
- 4. **Master CSV Update Helper**
845
-
846
- ```javascript
847
- function updateMasterCsvRow(csvPath, taskId, updates) {
848
- const content = Read(csvPath)
849
- const lines = content.split('\n')
850
- const header = lines[0].split(',')
851
-
852
- for (let i = 1; i < lines.length; i++) {
853
- const cells = parseCsvLine(lines[i])
854
- if (cells[0] === taskId || cells[0] === `"${taskId}"`) {
855
- for (const [col, val] of Object.entries(updates)) {
856
- const colIdx = header.indexOf(col)
857
- if (colIdx >= 0) {
858
- cells[colIdx] = `"${String(val).replace(/"/g, '""')}"`
859
- }
860
- }
861
- lines[i] = cells.join(',')
862
- break
863
- }
864
- }
865
-
866
- Write(csvPath, lines.join('\n'))
867
- }
868
- ```
869
-
870
- **Success Criteria**:
871
- - All waves executed in order
872
- - Each wave's results merged into master CSV before next wave starts
873
- - Dependent tasks skipped when predecessor failed
874
- - discoveries.ndjson accumulated across all phases
875
-
876
- ---
877
-
878
- ### Phase 5: Results Aggregation
879
-
880
- **Objective**: Generate final results and human-readable report.
881
-
882
- **Steps**:
883
-
884
- 1. **Export results.csv**
885
-
886
- ```javascript
887
- const masterCsv = Read(`${sessionFolder}/tasks.csv`)
888
- Write(`${sessionFolder}/results.csv`, masterCsv)
889
- ```
890
-
891
- 2. **Generate context.md**
892
-
893
- ```javascript
894
- const finalTasks = parseCsv(masterCsv)
895
- const exploreCSV = parseCsv(Read(`${sessionFolder}/explore.csv`))
896
- const completed = finalTasks.filter(t => t.status === 'completed')
897
- const failed = finalTasks.filter(t => t.status === 'failed')
898
- const skipped = finalTasks.filter(t => t.status === 'skipped')
899
-
900
- const contextContent = `# Lite Planex Execution Report
901
-
902
- **Session**: ${sessionId}
903
- **Requirement**: ${requirement}
904
- **Completed**: ${getUtc8ISOString()}
905
- **Waves**: ${maxWave} | **Concurrency**: ${maxConcurrency}
906
-
907
- ---
908
-
909
- ## Summary
910
-
911
- | Metric | Count |
912
- |--------|-------|
913
- | Explore Angles | ${exploreCSV.length} |
914
- | Total Tasks | ${finalTasks.length} |
915
- | Completed | ${completed.length} |
916
- | Failed | ${failed.length} |
917
- | Skipped | ${skipped.length} |
918
- | Waves | ${maxWave} |
919
-
920
- ---
921
-
922
- ## Exploration Results
923
-
924
- ${exploreCSV.map(e => `### ${e.id}: ${e.angle} (${e.status})
925
- ${e.findings || 'N/A'}
926
- Key files: ${e.key_files || 'none'}`).join('\n\n')}
927
-
928
- ---
929
-
930
- ## Task Results
931
-
932
- ${finalTasks.map(t => `### ${t.id}: ${t.title} (${t.status})
933
-
934
- | Field | Value |
935
- |-------|-------|
936
- | Wave | ${t.wave} |
937
- | Scope | ${t.scope || 'none'} |
938
- | Dependencies | ${t.deps || 'none'} |
939
- | Context From | ${t.context_from || 'none'} |
940
- | Tests Passed | ${t.tests_passed || 'N/A'} |
941
- | Acceptance Met | ${t.acceptance_met || 'N/A'} |
942
- | Error | ${t.error || 'none'} |
943
-
944
- **Description**: ${t.description}
945
-
946
- **Test Cases**: ${t.test || 'N/A'}
947
-
948
- **Acceptance Criteria**: ${t.acceptance_criteria || 'N/A'}
949
-
950
- **Hints**: ${t.hints || 'N/A'}
951
-
952
- **Execution Directives**: ${t.execution_directives || 'N/A'}
953
-
954
- **Findings**: ${t.findings || 'N/A'}
955
-
956
- **Files Modified**: ${t.files_modified || 'none'}`).join('\n\n---\n\n')}
957
-
958
- ---
959
-
960
- ## All Modified Files
961
-
962
- ${[...new Set(finalTasks.flatMap(t => (t.files_modified || '').split(';')).filter(Boolean))].map(f => '- ' + f).join('\n') || 'None'}
963
- `
964
-
965
- Write(`${sessionFolder}/context.md`, contextContent)
966
- ```
967
-
968
- 3. **Display Summary**
969
-
970
- ```javascript
971
- console.log(`
972
- ## Lite Planex Complete
973
-
974
- - **Session**: ${sessionId}
975
- - **Explore**: ${exploreCSV.filter(r => r.status === 'completed').length}/${exploreCSV.length} angles
976
- - **Tasks**: ${completed.length}/${finalTasks.length} completed, ${failed.length} failed, ${skipped.length} skipped
977
- - **Waves**: ${maxWave}
978
-
979
- **Results**: ${sessionFolder}/results.csv
980
- **Report**: ${sessionFolder}/context.md
981
- **Discoveries**: ${sessionFolder}/discoveries.ndjson
982
- `)
983
- ```
984
-
985
- 4. **Offer Next Steps** (skip if AUTO_YES)
986
-
987
- ```javascript
988
- if (!AUTO_YES && failed.length > 0) {
989
- const answer = AskUserQuestion({
990
- questions: [{
991
- question: `${failed.length} tasks failed. Next action?`,
992
- header: "Next Step",
993
- multiSelect: false,
994
- options: [
995
- { label: "Retry Failed", description: `Re-execute ${failed.length} failed tasks with updated context` },
996
- { label: "View Report", description: "Display context.md" },
997
- { label: "Done", description: "Complete session" }
998
- ]
999
- }]
1000
- })
1001
-
1002
- if (answer['Next Step'] === "Retry Failed") {
1003
- for (const task of failed) {
1004
- updateMasterCsvRow(`${sessionFolder}/tasks.csv`, task.id, { status: 'pending', error: '' })
1005
- }
1006
- for (const task of skipped) {
1007
- updateMasterCsvRow(`${sessionFolder}/tasks.csv`, task.id, { status: 'pending', error: '' })
1008
- }
1009
- // Re-execute Phase 4
1010
- } else if (answer['Next Step'] === "View Report") {
1011
- console.log(Read(`${sessionFolder}/context.md`))
1012
- }
1013
- }
1014
- ```
1015
-
1016
- **Success Criteria**:
1017
- - results.csv exported
1018
- - context.md generated with full field coverage
1019
- - Summary displayed to user
1020
-
1021
- ---
1022
-
1023
- ## Wave Computation (Kahn's BFS)
1024
-
1025
- ```javascript
1026
- function computeWaves(tasks) {
1027
- const taskMap = new Map(tasks.map(t => [t.id, t]))
1028
- const inDegree = new Map(tasks.map(t => [t.id, 0]))
1029
- const adjList = new Map(tasks.map(t => [t.id, []]))
1030
-
1031
- for (const task of tasks) {
1032
- for (const dep of task.deps) {
1033
- if (taskMap.has(dep)) {
1034
- adjList.get(dep).push(task.id)
1035
- inDegree.set(task.id, inDegree.get(task.id) + 1)
1036
- }
1037
- }
1038
- }
1039
-
1040
- const queue = []
1041
- const waveAssignment = new Map()
1042
-
1043
- for (const [id, deg] of inDegree) {
1044
- if (deg === 0) {
1045
- queue.push([id, 1])
1046
- waveAssignment.set(id, 1)
1047
- }
1048
- }
1049
-
1050
- let maxWave = 1
1051
- let idx = 0
1052
- while (idx < queue.length) {
1053
- const [current, depth] = queue[idx++]
1054
- for (const next of adjList.get(current)) {
1055
- const newDeg = inDegree.get(next) - 1
1056
- inDegree.set(next, newDeg)
1057
- const nextDepth = Math.max(waveAssignment.get(next) || 0, depth + 1)
1058
- waveAssignment.set(next, nextDepth)
1059
- if (newDeg === 0) {
1060
- queue.push([next, nextDepth])
1061
- maxWave = Math.max(maxWave, nextDepth)
1062
- }
1063
- }
1064
- }
1065
-
1066
- for (const task of tasks) {
1067
- if (!waveAssignment.has(task.id)) {
1068
- throw new Error(`Circular dependency detected involving task ${task.id}`)
1069
- }
1070
- }
1071
-
1072
- return { waveAssignment, maxWave }
1073
- }
1074
- ```
1075
-
1076
- ---
1077
-
1078
- ## Shared Discovery Board Protocol
1079
-
1080
- All agents across all phases share `discoveries.ndjson`. This eliminates redundant codebase exploration.
1081
-
1082
- ```jsonl
1083
- {"ts":"2026-02-28T10:00:00+08:00","worker":"E1","type":"code_pattern","data":{"name":"repository-pattern","file":"src/repos/Base.ts","description":"Abstract CRUD repository"}}
1084
- {"ts":"2026-02-28T10:01:00+08:00","worker":"T2","type":"integration_point","data":{"file":"src/auth/index.ts","description":"Auth module entry","exports":["authenticate","authorize"]}}
1085
- ```
1086
-
1087
- **Types**: `code_pattern`, `integration_point`, `convention`, `blocker`, `tech_stack`, `test_command`
1088
- **Rules**: Read first → write immediately → deduplicate → append-only
1089
-
1090
- ---
1091
-
1092
- ## Error Handling
1093
-
1094
- | Error | Resolution |
1095
- |-------|------------|
1096
- | Explore agent failure | Mark as failed in explore.csv, exclude from planning |
1097
- | All explores failed | Fallback: plan directly from requirement without exploration |
1098
- | Circular dependency | Abort wave computation, report cycle |
1099
- | Execute agent timeout | Mark as failed in results, continue with wave |
1100
- | Execute agent failed | Mark as failed, skip dependent tasks in later waves |
1101
- | CSV parse error | Validate CSV format before execution, show line number |
1102
- | discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
1103
- | Continue mode: no session | List available sessions, prompt user to select |
1104
-
1105
- ---
1106
-
1107
- ## Core Rules
1108
-
1109
- 1. **Explore Before Execute**: Phase 2 completes before Phase 4 starts
1110
- 2. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results are merged
1111
- 3. **CSV is Source of Truth**: Master CSVs hold all state — always read before wave, always write after
1112
- 4. **Cross-Phase Context**: prev_context built from both explore.csv (E*) and tasks.csv (T*), not from memory
1113
- 5. **E* T* Linking**: tasks.csv `context_from` references explore.csv rows for cross-phase context
1114
- 6. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson
1115
- 7. **Skip on Failure**: If a dependency failed, skip the dependent task (cascade)
1116
- 8. **Cleanup Temp Files**: Remove wave CSVs after results are merged
1117
- 9. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
1118
-
1119
- ---
1120
-
1121
- ## Best Practices
1122
-
1123
- 1. **Exploration Angles**: 1 for simple, 3-4 for complex; avoid redundant angles
1124
- 2. **Context Linking**: Link every task to at least one explore row (E*) — exploration was done for a reason
1125
- 3. **Task Granularity**: 3-10 tasks optimal; too many = overhead, too few = no parallelism
1126
- 4. **Minimize Cross-Wave Deps**: More tasks in wave 1 = more parallelism
1127
- 5. **Specific Descriptions**: Agent sees only its CSV row + prev_context make description self-contained
1128
- 6. **Non-Overlapping Scopes**: Same-wave tasks must not write to the same files
1129
- 7. **Concurrency Tuning**: `-c 1` for serial (max context sharing); `-c 8` for I/O-bound tasks
1130
-
1131
- ---
1132
-
1133
- ## Usage Recommendations
1134
-
1135
- | Scenario | Recommended Approach |
1136
- |----------|---------------------|
1137
- | Complex feature (unclear architecture) | `$workflow-lite-plan` — explore first, then plan |
1138
- | Simple known-pattern task | `$workflow-lite-plan` skip exploration, direct execution |
1139
- | Independent parallel tasks | `$workflow-lite-plan -c 8` — single wave, max parallelism |
1140
- | Diamond dependency (A→B,C→D) | `$workflow-lite-plan` — 3 waves with context propagation |
1141
- | Unknown codebase | `$workflow-lite-plan` — exploration phase is essential |
1
+ ---
2
+ name: workflow-lite-plan
3
+ description: Explore-first wave pipeline. Decomposes requirement into exploration angles, runs wave exploration via spawn_agents_on_csv, synthesizes findings into execution tasks with cross-phase context linking (E*→T*), then wave-executes via spawn_agents_on_csv.
4
+ argument-hint: "[-y|--yes] [-c|--concurrency N] [--continue] \"requirement description\""
5
+ allowed-tools: spawn_agents_on_csv, Read, Write, Edit, Bash, Glob, Grep, request_user_input
6
+ ---
7
+
8
+ ## Auto Mode
9
+
10
+ When `--yes` or `-y`: Auto-confirm decomposition, skip interactive validation, use defaults.
11
+
12
+ # Workflow Lite Planex
13
+
14
+ ## Usage
15
+
16
+ ```bash
17
+ $workflow-lite-plan "Implement user authentication with OAuth, JWT, and 2FA"
18
+ $workflow-lite-plan -c 4 "Refactor payment module with Stripe and PayPal"
19
+ $workflow-lite-plan -y "Build notification system with email and SMS"
20
+ $workflow-lite-plan --continue "auth-20260228"
21
+ ```
22
+
23
+ **Flags**:
24
+ - `-y, --yes`: Skip all confirmations (auto mode)
25
+ - `-c, --concurrency N`: Max concurrent agents within each wave (default: 4)
26
+ - `--continue`: Resume existing session
27
+
28
+ **Output Directory**: `.workflow/.lite-plan/{session-id}/`
29
+
30
+ ---
31
+
32
+ ## Overview
33
+
34
+ Explore-first wave-based pipeline using `spawn_agents_on_csv`. Two-stage CSV execution: **explore.csv** (codebase discovery) → **tasks.csv** (implementation), with cross-phase context propagation via `context_from` linking (`E*` → `T*`).
35
+
36
+ **Core workflow**: Decompose → **[Confirm]** → Wave Explore → Synthesize & Plan → **[Confirm]** → Wave Execute → Aggregate
37
+
38
+ ```
39
+ ┌──────────────────────────────────────────────────────────────────────┐
40
+ │ WORKFLOW LITE PLANEX │
41
+ ├──────────────────────────────────────────────────────────────────────┤
42
+ │ │
43
+ │ Phase 1: Requirement → explore.csv │
44
+ │ ├─ Analyze complexity → select exploration angles (1-4) │
45
+ │ ├─ Generate explore.csv (1 row per angle) │
46
+ │ └─ ⛔ MANDATORY: User validates (skip ONLY if -y)
47
+ │ │
48
+ │ Phase 2: Wave Explore (spawn_agents_on_csv) │
49
+ │ ├─ For each explore wave: │
50
+ │ │ ├─ Build wave CSV from explore.csv │
51
+ │ │ ├─ spawn_agents_on_csv(explore instruction template) │
52
+ │ │ └─ Merge findings/key_files into explore.csv │
53
+ │ └─ discoveries.ndjson shared across agents │
54
+ │ │
55
+ │ Phase 3: Synthesize & Plan → tasks.csv │
56
+ │ ├─ Read all explore findings → cross-reference │
57
+ │ ├─ Resolve conflicts between angles │
58
+ │ ├─ Decompose into execution tasks with context_from: E*;T* │
59
+ │ ├─ Compute dependency waves (topological sort) │
60
+ │ └─ ⛔ MANDATORY: User validates (skip ONLY if -y)
61
+ │ │
62
+ │ Phase 4: Wave Execute (spawn_agents_on_csv) │
63
+ │ ├─ For each task wave: │
64
+ │ │ ├─ Build prev_context from explore.csv + tasks.csv │
65
+ │ │ ├─ Build wave CSV with prev_context column │
66
+ │ │ ├─ spawn_agents_on_csv(execute instruction template) │
67
+ │ │ └─ Merge results into tasks.csv │
68
+ │ └─ discoveries.ndjson carries across all waves │
69
+ │ │
70
+ │ Phase 5: Aggregate │
71
+ │ ├─ Export results.csv │
72
+ │ ├─ Generate context.md with all findings │
73
+ │ └─ Display summary │
74
+ │ │
75
+ └──────────────────────────────────────────────────────────────────────┘
76
+ ```
77
+
78
+ ---
79
+
80
+ ## Context Flow
81
+
82
+ ```
83
+ explore.csv tasks.csv
84
+ ┌──────────┐ ┌──────────┐
85
+ │ E1: arch │──────────→│ T1: setup│ context_from: E1;E2
86
+ │ findings │ │ prev_ctx │← E1+E2 findings
87
+ ├──────────┤ ├──────────┤
88
+ │ E2: deps │──────────→│ T2: impl │ context_from: E1;T1
89
+ │ findings │ │ prev_ctx │← E1+T1 findings
90
+ ├──────────┤ ├──────────┤
91
+ │ E3: test │──┐ ┌───→│ T3: test │ context_from: E3;T2
92
+ │ findings │ └───┘ │ prev_ctx │← E3+T2 findings
93
+ └──────────┘ └──────────┘
94
+
95
+ Two context channels:
96
+ 1. Directed: context_from → prev_context (CSV findings lookup)
97
+ 2. Broadcast: discoveries.ndjson (append-only shared board)
98
+
99
+ context_from prefix: E* → explore.csv lookup, T* → tasks.csv lookup
100
+ ```
101
+
102
+ ---
103
+
104
+ ## CSV Schemas
105
+
106
+ ### explore.csv
107
+
108
+ ```csv
109
+ id,angle,description,focus,deps,wave,status,findings,key_files,error
110
+ "E1","architecture","Explore codebase architecture for: auth system","architecture","","1","pending","","",""
111
+ "E2","dependencies","Explore dependency landscape for: auth system","dependencies","","1","pending","","",""
112
+ "E3","testing","Explore test infrastructure for: auth system","testing","","1","pending","","",""
113
+ ```
114
+
115
+ **Columns**:
116
+
117
+ | Column | Phase | Description |
118
+ |--------|-------|-------------|
119
+ | `id` | Input | Exploration ID: E1, E2, ... |
120
+ | `angle` | Input | Exploration angle name |
121
+ | `description` | Input | What to explore from this angle |
122
+ | `focus` | Input | Keywords and focus areas |
123
+ | `deps` | Input | Semicolon-separated dep IDs (usually empty — all wave 1) |
124
+ | `wave` | Computed | Wave number (usually 1 for all explorations) |
125
+ | `status` | Output | `pending` → `completed` / `failed` |
126
+ | `findings` | Output | Discoveries (max 800 chars) |
127
+ | `key_files` | Output | Relevant files (semicolon-separated) |
128
+ | `error` | Output | Error message if failed |
129
+
130
+ ### tasks.csv
131
+
132
+ ```csv
133
+ id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error
134
+ "T1","Setup types","Create type definitions","Verify types compile with tsc","All interfaces exported","src/types/**","Follow existing patterns || src/types/index.ts","tsc --noEmit","","E1;E2","1","pending","","","","",""
135
+ "T2","Implement core","Implement core auth logic","Unit test: login returns token","Login flow works end-to-end","src/auth/**","Reuse BaseService || src/services/Base.ts","npm test -- --grep auth","T1","E1;E2;T1","2","pending","","","","",""
136
+ ```
137
+
138
+ **Columns**:
139
+
140
+ | Column | Phase | Description |
141
+ |--------|-------|-------------|
142
+ | `id` | Input | Task ID: T1, T2, ... |
143
+ | `title` | Input | Short task title |
144
+ | `description` | Input | Self-contained task description — what to implement |
145
+ | `test` | Input | Test cases: what tests to write and how to verify (unit/integration/edge) |
146
+ | `acceptance_criteria` | Input | Measurable conditions that define "done" |
147
+ | `scope` | Input | Target file/directory glob — constrains agent write area, prevents cross-task file conflicts |
148
+ | `hints` | Input | Implementation tips + reference files. Format: `tips text \|\| file1;file2`. Either part is optional |
149
+ | `execution_directives` | Input | Execution constraints: commands to run for verification, tool restrictions |
150
+ | `deps` | Input | Dependency task IDs: T1;T2 (semicolon-separated) |
151
+ | `context_from` | Input | Context source IDs: **E1;E2;T1** — `E*` lookups in explore.csv, `T*` in tasks.csv |
152
+ | `wave` | Computed | Wave number (computed by topological sort, 1-based) |
153
+ | `status` | Output | `pending` → `completed` / `failed` / `skipped` |
154
+ | `findings` | Output | Execution findings (max 500 chars) |
155
+ | `files_modified` | Output | Semicolon-separated file paths |
156
+ | `tests_passed` | Output | Whether all defined test cases passed (true/false) |
157
+ | `acceptance_met` | Output | Summary of which acceptance criteria were met/unmet |
158
+ | `error` | Output | Error message if failed (empty if success) |
159
+
160
+ ### Per-Wave CSV (Temporary)
161
+
162
+ Each wave generates a temporary CSV with an extra `prev_context` column.
163
+
164
+ **Explore wave**: `explore-wave-{N}.csv` — same columns as explore.csv (no prev_context, explorations are independent).
165
+
166
+ **Execute wave**: `task-wave-{N}.csv` — all task columns + `prev_context`:
167
+
168
+ ```csv
169
+ id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context
170
+ "T2","Implement core","Implement core auth logic","Unit test: login returns token","Login flow works end-to-end","src/auth/**","Reuse BaseService || src/services/Base.ts","npm test -- --grep auth","T1","E1;E2;T1","2","[Explore architecture] Found BaseService pattern in src/services/\n[Task T1] Created types at src/types/auth.ts"
171
+ ```
172
+
173
+ The `prev_context` column is built from `context_from` by looking up completed rows' `findings` in both explore.csv (`E*`) and tasks.csv (`T*`).
174
+
175
+ ---
176
+
177
+ ## Output Artifacts
178
+
179
+ | File | Purpose | Lifecycle |
180
+ |------|---------|-----------|
181
+ | `explore.csv` | Exploration state — angles with findings/key_files | Updated after Phase 2 |
182
+ | `tasks.csv` | Execution state — tasks with results | Updated after each wave in Phase 4 |
183
+ | `explore-wave-{N}.csv` | Per-wave explore input (temporary) | Created before wave, deleted after |
184
+ | `task-wave-{N}.csv` | Per-wave execute input (temporary) | Created before wave, deleted after |
185
+ | `results.csv` | Final results export | Created in Phase 5 |
186
+ | `discoveries.ndjson` | Shared discovery board (all agents, all phases) | Append-only |
187
+ | `context.md` | Human-readable execution report | Created in Phase 5 |
188
+
189
+ ---
190
+
191
+ ## Session Structure
192
+
193
+ ```
194
+ .workflow/.lite-plan/{session-id}/
195
+ ├── explore.csv # Exploration state
196
+ ├── tasks.csv # Execution state
197
+ ├── results.csv # Final results export
198
+ ├── discoveries.ndjson # Shared discovery board
199
+ ├── context.md # Full context summary
200
+ ├── explore-wave-{N}.csv # Temporary per-wave explore input (cleaned up)
201
+ └── task-wave-{N}.csv # Temporary per-wave execute input (cleaned up)
202
+ ```
203
+
204
+ ---
205
+
206
+ ## Implementation
207
+
208
+ ### Session Initialization
209
+
210
+ ```javascript
211
+ const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
212
+
213
+ // Parse flags
214
+ const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
215
+ const continueMode = $ARGUMENTS.includes('--continue')
216
+ const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
217
+ const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 4
218
+
219
+ const requirement = $ARGUMENTS
220
+ .replace(/--yes|-y|--continue|--concurrency\s+\d+|-c\s+\d+/g, '')
221
+ .trim()
222
+
223
+ const slug = requirement.toLowerCase()
224
+ .replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
225
+ .substring(0, 40)
226
+ const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
227
+ const sessionId = `wpp-${slug}-${dateStr}`
228
+ const sessionFolder = `.workflow/.lite-plan/${sessionId}`
229
+
230
+ // Continue mode: find existing session
231
+ if (continueMode) {
232
+ const existing = Bash(`ls -t .workflow/.lite-plan/ 2>/dev/null | head -1`).trim()
233
+ if (existing) {
234
+ sessionId = existing
235
+ sessionFolder = `.workflow/.lite-plan/${sessionId}`
236
+ // Check which phase to resume: if tasks.csv exists → Phase 4, else → Phase 2
237
+ }
238
+ }
239
+
240
+ Bash(`mkdir -p ${sessionFolder}`)
241
+ ```
242
+
243
+ ---
244
+
245
+ ### Phase 1: Requirement → explore.csv
246
+
247
+ **Objective**: Analyze requirement complexity, select exploration angles, generate explore.csv.
248
+
249
+ **Steps**:
250
+
251
+ 1. **Analyze & Decompose**
252
+
253
+ ```javascript
254
+ Bash({
255
+ command: `ccw cli -p "PURPOSE: Analyze requirement complexity and select 1-4 exploration angles for codebase discovery before implementation.
256
+ TASK:
257
+ • Classify requirement type (feature/bugfix/refactor/security/performance)
258
+ • Assess complexity (Low: 1 angle, Medium: 2-3, High: 3-4)
259
+ • Select exploration angles from: architecture, dependencies, integration-points, testing, patterns, security, performance, state-management, error-handling, edge-cases
260
+ • For each angle, define focus keywords and what to discover
261
+ MODE: analysis
262
+ CONTEXT: @**/*
263
+ EXPECTED: JSON object: {type: string, complexity: string, angles: [{id: string, angle: string, description: string, focus: string}]}. Each angle id = E1, E2, etc.
264
+ CONSTRAINTS: 1-4 angles | Angles must be distinct | Each angle must have clear focus
265
+
266
+ REQUIREMENT: ${requirement}" --tool gemini --mode analysis --rule planning-breakdown-task-steps`,
267
+ run_in_background: true
268
+ })
269
+ // Wait for CLI completion via hook callback
270
+ // Parse JSON from CLI output → { type, complexity, angles[] }
271
+ ```
272
+
273
+ 2. **Generate explore.csv**
274
+
275
+ ```javascript
276
+ const header = 'id,angle,description,focus,deps,wave,status,findings,key_files,error'
277
+ const rows = angles.map(a =>
278
+ [a.id, a.angle, a.description, a.focus, '', '1', 'pending', '', '', '']
279
+ .map(v => `"${String(v).replace(/"/g, '""')}"`)
280
+ .join(',')
281
+ )
282
+
283
+ Write(`${sessionFolder}/explore.csv`, [header, ...rows].join('\n'))
284
+ ```
285
+
286
+ 3. **User Validation — MANDATORY CONFIRMATION GATE** (skip ONLY if AUTO_YES)
287
+
288
+ **CRITICAL: You MUST stop here and wait for user confirmation before proceeding to Phase 2. DO NOT skip this step. DO NOT auto-proceed.**
289
+
290
+ ```javascript
291
+ if (!AUTO_YES) {
292
+ console.log(`\n## Exploration Plan (${angles.length} angles)\n`)
293
+ angles.forEach(a => console.log(` - [${a.id}] ${a.angle}: ${a.focus}`))
294
+
295
+ const answer = request_user_input({
296
+ questions: [{
297
+ question: "Approve exploration angles?",
298
+ header: "Validation",
299
+ options: [
300
+ { label: "Approve", description: "Proceed with wave exploration" },
301
+ { label: "Modify", description: `Edit ${sessionFolder}/explore.csv manually, then --continue` },
302
+ { label: "Cancel", description: "Abort" }
303
+ ]
304
+ }]
305
+ })
306
+
307
+ if (answer.Validation === "Modify") {
308
+ console.log(`Edit: ${sessionFolder}/explore.csv\nResume: $workflow-lite-plan --continue`)
309
+ return
310
+ } else if (answer.Validation === "Cancel") {
311
+ return
312
+ }
313
+ }
314
+ ```
315
+
316
+ **Success Criteria**:
317
+ - explore.csv created with 1-4 exploration angles
318
+ - User approved (or AUTO_YES)
319
+
320
+ ---
321
+
322
+ ### Phase 2: Wave Explore (spawn_agents_on_csv)
323
+
324
+ **Objective**: Execute exploration via `spawn_agents_on_csv`. Each angle produces findings and key_files.
325
+
326
+ **Steps**:
327
+
328
+ 1. **Explore Wave Loop**
329
+
330
+ ```javascript
331
+ const exploreCSV = parseCsv(Read(`${sessionFolder}/explore.csv`))
332
+ const maxExploreWave = Math.max(...exploreCSV.map(r => parseInt(r.wave)))
333
+
334
+ for (let wave = 1; wave <= maxExploreWave; wave++) {
335
+ const waveTasks = exploreCSV.filter(r =>
336
+ parseInt(r.wave) === wave && r.status === 'pending'
337
+ )
338
+ if (waveTasks.length === 0) continue
339
+
340
+ // Skip rows with failed dependencies
341
+ const executableTasks = []
342
+ for (const task of waveTasks) {
343
+ const deps = (task.deps || '').split(';').filter(Boolean)
344
+ if (deps.some(d => {
345
+ const dep = exploreCSV.find(r => r.id === d)
346
+ return !dep || dep.status !== 'completed'
347
+ })) {
348
+ task.status = 'skipped'
349
+ task.error = 'Dependency failed/skipped'
350
+ continue
351
+ }
352
+ executableTasks.push(task)
353
+ }
354
+
355
+ if (executableTasks.length === 0) continue
356
+
357
+ // Write explore wave CSV
358
+ const waveHeader = 'id,angle,description,focus,deps,wave'
359
+ const waveRows = executableTasks.map(t =>
360
+ [t.id, t.angle, t.description, t.focus, t.deps, t.wave]
361
+ .map(v => `"${String(v).replace(/"/g, '""')}"`)
362
+ .join(',')
363
+ )
364
+ Write(`${sessionFolder}/explore-wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
365
+
366
+ // Execute explore wave
367
+ console.log(` Exploring ${executableTasks.length} angles (wave ${wave})...`)
368
+
369
+ spawn_agents_on_csv({
370
+ csv_path: `${sessionFolder}/explore-wave-${wave}.csv`,
371
+ id_column: "id",
372
+ instruction: buildExploreInstruction(sessionFolder),
373
+ max_concurrency: maxConcurrency,
374
+ max_runtime_seconds: 300,
375
+ output_csv_path: `${sessionFolder}/explore-wave-${wave}-results.csv`,
376
+ output_schema: {
377
+ type: "object",
378
+ properties: {
379
+ id: { type: "string" },
380
+ status: { type: "string", enum: ["completed", "failed"] },
381
+ findings: { type: "string" },
382
+ key_files: { type: "array", items: { type: "string" } },
383
+ error: { type: "string" }
384
+ },
385
+ required: ["id", "status", "findings"]
386
+ }
387
+ })
388
+
389
+ // Merge results into explore.csv
390
+ const waveResults = parseCsv(Read(`${sessionFolder}/explore-wave-${wave}-results.csv`))
391
+ for (const result of waveResults) {
392
+ updateMasterCsvRow(`${sessionFolder}/explore.csv`, result.id, {
393
+ status: result.status,
394
+ findings: result.findings || '',
395
+ key_files: Array.isArray(result.key_files) ? result.key_files.join(';') : (result.key_files || ''),
396
+ error: result.error || ''
397
+ })
398
+ }
399
+
400
+ // Cleanup temporary wave CSV
401
+ Bash(`rm -f "${sessionFolder}/explore-wave-${wave}.csv" "${sessionFolder}/explore-wave-${wave}-results.csv"`)
402
+ }
403
+ ```
404
+
405
+ 2. **Explore Instruction Template**
406
+
407
+ ```javascript
408
+ function buildExploreInstruction(sessionFolder) {
409
+ return `
410
+ ## EXPLORATION ASSIGNMENT
411
+
412
+ ### MANDATORY FIRST STEPS
413
+ 1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not)
414
+ 2. Read project context: .workflow/project-tech.json (if exists)
415
+
416
+ ---
417
+
418
+ ## Your Exploration
419
+
420
+ **Exploration ID**: {id}
421
+ **Angle**: {angle}
422
+ **Description**: {description}
423
+ **Focus**: {focus}
424
+
425
+ ---
426
+
427
+ ## Exploration Protocol
428
+
429
+ 1. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared findings
430
+ 2. **Explore**: Search the codebase from the {angle} perspective
431
+ 3. **Discover**: Find relevant files, patterns, integration points, constraints
432
+ 4. **Share discoveries**: Append findings to shared board:
433
+ \`\`\`bash
434
+ echo '{"ts":"<ISO8601>","worker":"{id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
435
+ \`\`\`
436
+ 5. **Report result**: Return JSON via report_agent_job_result
437
+
438
+ ### What to Look For
439
+ - Existing patterns and conventions to follow
440
+ - Integration points and module boundaries
441
+ - Dependencies and constraints
442
+ - Test infrastructure and coverage
443
+ - Risks and potential blockers
444
+
445
+ ### Discovery Types to Share
446
+ - \`code_pattern\`: {name, file, description} — reusable patterns found
447
+ - \`integration_point\`: {file, description, exports[]} — module connection points
448
+ - \`convention\`: {naming, imports, formatting} — code style conventions
449
+ - \`tech_stack\`: {framework, version, config} — technology stack details
450
+
451
+ ---
452
+
453
+ ## Output (report_agent_job_result)
454
+
455
+ Return JSON:
456
+ {
457
+ "id": "{id}",
458
+ "status": "completed" | "failed",
459
+ "findings": "Concise summary of ${'{'}angle{'}'} discoveries (max 800 chars)",
460
+ "key_files": ["relevant/file1.ts", "relevant/file2.ts"],
461
+ "error": ""
462
+ }
463
+ `
464
+ }
465
+ ```
466
+
467
+ **Success Criteria**:
468
+ - All explore angles executed
469
+ - explore.csv updated with findings and key_files
470
+ - discoveries.ndjson accumulated
471
+
472
+ ---
473
+
474
+ ### Phase 3: Synthesize & Plan → tasks.csv
475
+
476
+ **Objective**: Read exploration findings, cross-reference, resolve conflicts, generate tasks.csv with context_from linking to E* rows.
477
+
478
+ **Steps**:
479
+
480
+ 1. **Synthesize Exploration Findings**
481
+
482
+ ```javascript
483
+ const exploreCSV = parseCsv(Read(`${sessionFolder}/explore.csv`))
484
+ const completed = exploreCSV.filter(r => r.status === 'completed')
485
+
486
+ // Cross-reference: find shared files across angles
487
+ const fileRefs = {}
488
+ completed.forEach(r => {
489
+ (r.key_files || '').split(';').filter(Boolean).forEach(f => {
490
+ if (!fileRefs[f]) fileRefs[f] = []
491
+ fileRefs[f].push({ angle: r.angle, id: r.id })
492
+ })
493
+ })
494
+ const sharedFiles = Object.entries(fileRefs).filter(([_, refs]) => refs.length > 1)
495
+
496
+ // Build synthesis context for task decomposition
497
+ const synthesisContext = completed.map(r =>
498
+ `[${r.id}: ${r.angle}] ${r.findings}\n Key files: ${r.key_files || 'none'}`
499
+ ).join('\n\n')
500
+
501
+ const sharedFilesContext = sharedFiles.length > 0
502
+ ? `\nShared files (referenced by multiple angles):\n${sharedFiles.map(([f, refs]) =>
503
+ ` ${f} ← ${refs.map(r => r.id).join(', ')}`
504
+ ).join('\n')}`
505
+ : ''
506
+ ```
507
+
508
+ 2. **Decompose into Tasks**
509
+
510
+ ```javascript
511
+ Bash({
512
+ command: `ccw cli -p "PURPOSE: Based on exploration findings, decompose requirement into 3-10 atomic execution tasks. Each task must include test cases, acceptance criteria, and link to relevant exploration findings.
513
+ TASK:
514
+ Use exploration findings to inform task decomposition
515
+ Each task must be self-contained with specific implementation instructions
516
+ Link tasks to exploration rows via context_from (E1, E2, etc.)
517
+ Define dependencies between tasks (T1 must finish before T2, etc.)
518
+ For each task: define test cases, acceptance criteria, scope, hints, and execution directives
519
+ Ensure same-wave tasks have non-overlapping scopes
520
+ MODE: analysis
521
+ CONTEXT: @**/*
522
+ EXPECTED: JSON object with tasks array. Each task: {id: string, title: string, description: string, test: string, acceptance_criteria: string, scope: string, hints: string, execution_directives: string, deps: string[], context_from: string[]}.
523
+ - id: T1, T2, etc.
524
+ - description: what to implement (specific enough for an agent)
525
+ - test: what tests to write (e.g. 'Unit test: X returns Y')
526
+ - acceptance_criteria: what defines done (e.g. 'API returns 200')
527
+ - scope: target glob (e.g. 'src/auth/**') non-overlapping within same wave
528
+ - hints: tips + ref files (format: 'tips || file1;file2')
529
+ - execution_directives: verification commands (e.g. 'npm test --bail')
530
+ - deps: task IDs that must complete first (T*)
531
+ - context_from: explore (E*) and task (T*) IDs whose findings are needed
532
+ CONSTRAINTS: 3-10 tasks | Atomic | No circular deps | Concrete test/acceptance_criteria | Non-overlapping scopes per wave
533
+
534
+ EXPLORATION FINDINGS:
535
+ ${synthesisContext}
536
+ ${sharedFilesContext}
537
+
538
+ REQUIREMENT: ${requirement}" --tool gemini --mode analysis --rule planning-breakdown-task-steps`,
539
+ run_in_background: true
540
+ })
541
+ // Wait for CLI completion → decomposedTasks[]
542
+ ```
543
+
544
+ 3. **Compute Waves & Write tasks.csv**
545
+
546
+ ```javascript
547
+ const { waveAssignment, maxWave } = computeWaves(decomposedTasks)
548
+
549
+ const header = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error'
550
+ const rows = decomposedTasks.map(task => {
551
+ const wave = waveAssignment.get(task.id)
552
+ return [
553
+ task.id,
554
+ csvEscape(task.title),
555
+ csvEscape(task.description),
556
+ csvEscape(task.test),
557
+ csvEscape(task.acceptance_criteria),
558
+ csvEscape(task.scope),
559
+ csvEscape(task.hints),
560
+ csvEscape(task.execution_directives),
561
+ task.deps.join(';'),
562
+ task.context_from.join(';'),
563
+ wave,
564
+ 'pending', '', '', '', '', ''
565
+ ].map(cell => `"${String(cell).replace(/"/g, '""')}"`).join(',')
566
+ })
567
+
568
+ Write(`${sessionFolder}/tasks.csv`, [header, ...rows].join('\n'))
569
+ ```
570
+
571
+ 4. **User Validation — MANDATORY CONFIRMATION GATE** (skip ONLY if AUTO_YES)
572
+
573
+ **CRITICAL: You MUST stop here and wait for user confirmation before proceeding to Phase 4. DO NOT skip this step. DO NOT auto-proceed.**
574
+
575
+ ```javascript
576
+ if (!AUTO_YES) {
577
+ console.log(`
578
+ ## Execution Plan
579
+
580
+ Explore: ${completed.length} angles completed
581
+ Shared files: ${sharedFiles.length}
582
+ Tasks: ${decomposedTasks.length} across ${maxWave} waves
583
+
584
+ ${Array.from({length: maxWave}, (_, i) => i + 1).map(w => {
585
+ const wt = decomposedTasks.filter(t => waveAssignment.get(t.id) === w)
586
+ return `### Wave ${w} (${wt.length} tasks, concurrent)
587
+ ${wt.map(t => ` - [${t.id}] ${t.title} (scope: ${t.scope}, from: ${t.context_from.join(';')})`).join('\n')}`
588
+ }).join('\n')}
589
+ `)
590
+
591
+ const answer = request_user_input({
592
+ questions: [{
593
+ question: `Proceed with ${decomposedTasks.length} tasks across ${maxWave} waves?`,
594
+ header: "Confirm",
595
+ options: [
596
+ { label: "Execute", description: "Proceed with wave execution" },
597
+ { label: "Modify", description: `Edit ${sessionFolder}/tasks.csv then --continue` },
598
+ { label: "Cancel", description: "Abort" }
599
+ ]
600
+ }]
601
+ })
602
+
603
+ if (answer.Confirm === "Modify") {
604
+ console.log(`Edit: ${sessionFolder}/tasks.csv\nResume: $workflow-lite-plan --continue`)
605
+ return // STOP — do not proceed to Phase 4
606
+ } else if (answer.Confirm === "Cancel") {
607
+ return // STOP — do not proceed to Phase 4
608
+ }
609
+ // Only reach here if user selected "Execute"
610
+ }
611
+ ```
612
+
613
+ **Success Criteria**:
614
+ - tasks.csv created with context_from linking to E* rows
615
+ - No circular dependencies
616
+ - User explicitly approved (or AUTO_YES) — Phase 4 MUST NOT start without this
617
+
618
+ ---
619
+
620
+ ### Phase 4: Wave Execute (spawn_agents_on_csv)
621
+
622
+ **Objective**: Execute tasks wave-by-wave via `spawn_agents_on_csv`. Each wave's prev_context is built from both explore.csv and tasks.csv.
623
+
624
+ **Steps**:
625
+
626
+ 1. **Wave Loop**
627
+
628
+ ```javascript
629
+ const exploreCSV = parseCsv(Read(`${sessionFolder}/explore.csv`))
630
+ const failedIds = new Set()
631
+ const skippedIds = new Set()
632
+
633
+ for (let wave = 1; wave <= maxWave; wave++) {
634
+ console.log(`\n## Wave ${wave}/${maxWave}\n`)
635
+
636
+ // Re-read master CSV
637
+ const masterCsv = parseCsv(Read(`${sessionFolder}/tasks.csv`))
638
+ const waveTasks = masterCsv.filter(row => parseInt(row.wave) === wave)
639
+
640
+ // Skip tasks whose deps failed
641
+ const executableTasks = []
642
+ for (const task of waveTasks) {
643
+ const deps = (task.deps || '').split(';').filter(Boolean)
644
+ if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
645
+ skippedIds.add(task.id)
646
+ updateMasterCsvRow(`${sessionFolder}/tasks.csv`, task.id, {
647
+ status: 'skipped',
648
+ error: 'Dependency failed or skipped'
649
+ })
650
+ console.log(` [${task.id}] ${task.title} → SKIPPED (dependency failed)`)
651
+ continue
652
+ }
653
+ executableTasks.push(task)
654
+ }
655
+
656
+ if (executableTasks.length === 0) {
657
+ console.log(` No executable tasks in wave ${wave}`)
658
+ continue
659
+ }
660
+
661
+ // Build prev_context for each task (cross-phase: E* + T*)
662
+ for (const task of executableTasks) {
663
+ task.prev_context = buildPrevContext(task.context_from, exploreCSV, masterCsv)
664
+ }
665
+
666
+ // Write wave CSV
667
+ const waveHeader = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context'
668
+ const waveRows = executableTasks.map(t =>
669
+ [t.id, t.title, t.description, t.test, t.acceptance_criteria, t.scope, t.hints, t.execution_directives, t.deps, t.context_from, t.wave, t.prev_context]
670
+ .map(cell => `"${String(cell).replace(/"/g, '""')}"`)
671
+ .join(',')
672
+ )
673
+ Write(`${sessionFolder}/task-wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
674
+
675
+ // Execute wave
676
+ console.log(` Executing ${executableTasks.length} tasks (concurrency: ${maxConcurrency})...`)
677
+
678
+ spawn_agents_on_csv({
679
+ csv_path: `${sessionFolder}/task-wave-${wave}.csv`,
680
+ id_column: "id",
681
+ instruction: buildExecuteInstruction(sessionFolder, wave),
682
+ max_concurrency: maxConcurrency,
683
+ max_runtime_seconds: 600,
684
+ output_csv_path: `${sessionFolder}/task-wave-${wave}-results.csv`,
685
+ output_schema: {
686
+ type: "object",
687
+ properties: {
688
+ id: { type: "string" },
689
+ status: { type: "string", enum: ["completed", "failed"] },
690
+ findings: { type: "string" },
691
+ files_modified: { type: "array", items: { type: "string" } },
692
+ tests_passed: { type: "boolean" },
693
+ acceptance_met: { type: "string" },
694
+ error: { type: "string" }
695
+ },
696
+ required: ["id", "status", "findings", "tests_passed"]
697
+ }
698
+ })
699
+
700
+ // Merge results into master CSV
701
+ const waveResults = parseCsv(Read(`${sessionFolder}/task-wave-${wave}-results.csv`))
702
+ for (const result of waveResults) {
703
+ updateMasterCsvRow(`${sessionFolder}/tasks.csv`, result.id, {
704
+ status: result.status,
705
+ findings: result.findings || '',
706
+ files_modified: Array.isArray(result.files_modified) ? result.files_modified.join(';') : (result.files_modified || ''),
707
+ tests_passed: String(result.tests_passed ?? ''),
708
+ acceptance_met: result.acceptance_met || '',
709
+ error: result.error || ''
710
+ })
711
+
712
+ if (result.status === 'failed') {
713
+ failedIds.add(result.id)
714
+ console.log(` [${result.id}] → FAILED: ${result.error}`)
715
+ } else {
716
+ console.log(` [${result.id}] → COMPLETED${result.tests_passed ? ' ✓tests' : ''}`)
717
+ }
718
+ }
719
+
720
+ // Cleanup
721
+ Bash(`rm -f "${sessionFolder}/task-wave-${wave}.csv" "${sessionFolder}/task-wave-${wave}-results.csv"`)
722
+
723
+ console.log(` Wave ${wave} done: ${waveResults.filter(r => r.status === 'completed').length} completed, ${waveResults.filter(r => r.status === 'failed').length} failed`)
724
+ }
725
+ ```
726
+
727
+ 2. **prev_context Builder (Cross-Phase)**
728
+
729
+ The key function linking exploration context to execution:
730
+
731
+ ```javascript
732
+ function buildPrevContext(contextFrom, exploreCSV, tasksCSV) {
733
+ if (!contextFrom) return 'No previous context available'
734
+
735
+ const ids = contextFrom.split(';').filter(Boolean)
736
+ const entries = []
737
+
738
+ ids.forEach(id => {
739
+ if (id.startsWith('E')) {
740
+ // ← Look up in explore.csv (cross-phase link)
741
+ const row = exploreCSV.find(r => r.id === id)
742
+ if (row && row.status === 'completed' && row.findings) {
743
+ entries.push(`[Explore ${row.angle}] ${row.findings}`)
744
+ if (row.key_files) entries.push(` Key files: ${row.key_files}`)
745
+ }
746
+ } else if (id.startsWith('T')) {
747
+ // Look up in tasks.csv (same-phase link)
748
+ const row = tasksCSV.find(r => r.id === id)
749
+ if (row && row.status === 'completed' && row.findings) {
750
+ entries.push(`[Task ${row.id}: ${row.title}] ${row.findings}`)
751
+ if (row.files_modified) entries.push(` Modified: ${row.files_modified}`)
752
+ }
753
+ }
754
+ })
755
+
756
+ return entries.length > 0 ? entries.join('\n') : 'No previous context available'
757
+ }
758
+ ```
759
+
760
+ 3. **Execute Instruction Template**
761
+
762
+ ```javascript
763
+ function buildExecuteInstruction(sessionFolder, wave) {
764
+ return `
765
+ ## TASK ASSIGNMENT
766
+
767
+ ### MANDATORY FIRST STEPS
768
+ 1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not)
769
+ 2. Read project context: .workflow/project-tech.json (if exists)
770
+
771
+ ---
772
+
773
+ ## Your Task
774
+
775
+ **Task ID**: {id}
776
+ **Title**: {title}
777
+ **Description**: {description}
778
+ **Scope**: {scope}
779
+
780
+ ### Implementation Hints & Reference Files
781
+ {hints}
782
+
783
+ > Format: \`tips text || file1;file2\`. Read ALL reference files (after ||) before starting. Apply tips (before ||) as guidance.
784
+
785
+ ### Execution Directives
786
+ {execution_directives}
787
+
788
+ > Commands to run for verification, tool restrictions, or environment requirements.
789
+
790
+ ### Test Cases
791
+ {test}
792
+
793
+ ### Acceptance Criteria
794
+ {acceptance_criteria}
795
+
796
+ ### Previous Context (from exploration and predecessor tasks)
797
+ {prev_context}
798
+
799
+ ---
800
+
801
+ ## Execution Protocol
802
+
803
+ 1. **Read references**: Parse {hints} — read all files listed after \`||\` to understand existing patterns
804
+ 2. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared exploration findings
805
+ 3. **Use context**: Apply previous tasks' findings from prev_context above
806
+ 4. **Stay in scope**: ONLY create/modify files within {scope} — do NOT touch files outside this boundary
807
+ 5. **Apply hints**: Follow implementation tips from {hints} (before \`||\`)
808
+ 6. **Execute**: Implement the task as described
809
+ 7. **Write tests**: Implement the test cases defined above
810
+ 8. **Run directives**: Execute commands from {execution_directives} to verify your work
811
+ 9. **Verify acceptance**: Ensure all acceptance criteria are met before reporting completion
812
+ 10. **Share discoveries**: Append exploration findings to shared board:
813
+ \`\`\`bash
814
+ echo '{"ts":"<ISO8601>","worker":"{id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
815
+ \`\`\`
816
+ 11. **Report result**: Return JSON via report_agent_job_result
817
+
818
+ ### Discovery Types to Share
819
+ - \`code_pattern\`: {name, file, description} — reusable patterns found
820
+ - \`integration_point\`: {file, description, exports[]} — module connection points
821
+ - \`convention\`: {naming, imports, formatting} — code style conventions
822
+ - \`blocker\`: {issue, severity, impact} — blocking issues encountered
823
+
824
+ ---
825
+
826
+ ## Output (report_agent_job_result)
827
+
828
+ Return JSON:
829
+ {
830
+ "id": "{id}",
831
+ "status": "completed" | "failed",
832
+ "findings": "Key discoveries and implementation notes (max 500 chars)",
833
+ "files_modified": ["path1", "path2"],
834
+ "tests_passed": true | false,
835
+ "acceptance_met": "Summary of which acceptance criteria were met/unmet",
836
+ "error": ""
837
+ }
838
+
839
+ **IMPORTANT**: Set status to "completed" ONLY if:
840
+ - All test cases pass
841
+ - All acceptance criteria are met
842
+ Otherwise set status to "failed" with details in error field.
843
+ `
844
+ }
845
+ ```
846
+
847
+ 4. **Master CSV Update Helper**
848
+
849
+ ```javascript
850
+ function updateMasterCsvRow(csvPath, taskId, updates) {
851
+ const content = Read(csvPath)
852
+ const lines = content.split('\n')
853
+ const header = lines[0].split(',')
854
+
855
+ for (let i = 1; i < lines.length; i++) {
856
+ const cells = parseCsvLine(lines[i])
857
+ if (cells[0] === taskId || cells[0] === `"${taskId}"`) {
858
+ for (const [col, val] of Object.entries(updates)) {
859
+ const colIdx = header.indexOf(col)
860
+ if (colIdx >= 0) {
861
+ cells[colIdx] = `"${String(val).replace(/"/g, '""')}"`
862
+ }
863
+ }
864
+ lines[i] = cells.join(',')
865
+ break
866
+ }
867
+ }
868
+
869
+ Write(csvPath, lines.join('\n'))
870
+ }
871
+ ```
872
+
873
+ **Success Criteria**:
874
+ - All waves executed in order
875
+ - Each wave's results merged into master CSV before next wave starts
876
+ - Dependent tasks skipped when predecessor failed
877
+ - discoveries.ndjson accumulated across all phases
878
+
879
+ ---
880
+
881
+ ### Phase 5: Results Aggregation
882
+
883
+ **Objective**: Generate final results and human-readable report.
884
+
885
+ **Steps**:
886
+
887
+ 1. **Export results.csv**
888
+
889
+ ```javascript
890
+ const masterCsv = Read(`${sessionFolder}/tasks.csv`)
891
+ Write(`${sessionFolder}/results.csv`, masterCsv)
892
+ ```
893
+
894
+ 2. **Generate context.md**
895
+
896
+ ```javascript
897
+ const finalTasks = parseCsv(masterCsv)
898
+ const exploreCSV = parseCsv(Read(`${sessionFolder}/explore.csv`))
899
+ const completed = finalTasks.filter(t => t.status === 'completed')
900
+ const failed = finalTasks.filter(t => t.status === 'failed')
901
+ const skipped = finalTasks.filter(t => t.status === 'skipped')
902
+
903
+ const contextContent = `# Lite Planex Execution Report
904
+
905
+ **Session**: ${sessionId}
906
+ **Requirement**: ${requirement}
907
+ **Completed**: ${getUtc8ISOString()}
908
+ **Waves**: ${maxWave} | **Concurrency**: ${maxConcurrency}
909
+
910
+ ---
911
+
912
+ ## Summary
913
+
914
+ | Metric | Count |
915
+ |--------|-------|
916
+ | Explore Angles | ${exploreCSV.length} |
917
+ | Total Tasks | ${finalTasks.length} |
918
+ | Completed | ${completed.length} |
919
+ | Failed | ${failed.length} |
920
+ | Skipped | ${skipped.length} |
921
+ | Waves | ${maxWave} |
922
+
923
+ ---
924
+
925
+ ## Exploration Results
926
+
927
+ ${exploreCSV.map(e => `### ${e.id}: ${e.angle} (${e.status})
928
+ ${e.findings || 'N/A'}
929
+ Key files: ${e.key_files || 'none'}`).join('\n\n')}
930
+
931
+ ---
932
+
933
+ ## Task Results
934
+
935
+ ${finalTasks.map(t => `### ${t.id}: ${t.title} (${t.status})
936
+
937
+ | Field | Value |
938
+ |-------|-------|
939
+ | Wave | ${t.wave} |
940
+ | Scope | ${t.scope || 'none'} |
941
+ | Dependencies | ${t.deps || 'none'} |
942
+ | Context From | ${t.context_from || 'none'} |
943
+ | Tests Passed | ${t.tests_passed || 'N/A'} |
944
+ | Acceptance Met | ${t.acceptance_met || 'N/A'} |
945
+ | Error | ${t.error || 'none'} |
946
+
947
+ **Description**: ${t.description}
948
+
949
+ **Test Cases**: ${t.test || 'N/A'}
950
+
951
+ **Acceptance Criteria**: ${t.acceptance_criteria || 'N/A'}
952
+
953
+ **Hints**: ${t.hints || 'N/A'}
954
+
955
+ **Execution Directives**: ${t.execution_directives || 'N/A'}
956
+
957
+ **Findings**: ${t.findings || 'N/A'}
958
+
959
+ **Files Modified**: ${t.files_modified || 'none'}`).join('\n\n---\n\n')}
960
+
961
+ ---
962
+
963
+ ## All Modified Files
964
+
965
+ ${[...new Set(finalTasks.flatMap(t => (t.files_modified || '').split(';')).filter(Boolean))].map(f => '- ' + f).join('\n') || 'None'}
966
+ `
967
+
968
+ Write(`${sessionFolder}/context.md`, contextContent)
969
+ ```
970
+
971
+ 3. **Display Summary**
972
+
973
+ ```javascript
974
+ console.log(`
975
+ ## Lite Planex Complete
976
+
977
+ - **Session**: ${sessionId}
978
+ - **Explore**: ${exploreCSV.filter(r => r.status === 'completed').length}/${exploreCSV.length} angles
979
+ - **Tasks**: ${completed.length}/${finalTasks.length} completed, ${failed.length} failed, ${skipped.length} skipped
980
+ - **Waves**: ${maxWave}
981
+
982
+ **Results**: ${sessionFolder}/results.csv
983
+ **Report**: ${sessionFolder}/context.md
984
+ **Discoveries**: ${sessionFolder}/discoveries.ndjson
985
+ `)
986
+ ```
987
+
988
+ 4. **Offer Next Steps** (skip if AUTO_YES)
989
+
990
+ ```javascript
991
+ if (!AUTO_YES && failed.length > 0) {
992
+ const answer = request_user_input({
993
+ questions: [{
994
+ question: `${failed.length} tasks failed. Next action?`,
995
+ header: "Next Step",
996
+ options: [
997
+ { label: "Retry Failed", description: `Re-execute ${failed.length} failed tasks with updated context` },
998
+ { label: "View Report", description: "Display context.md" },
999
+ { label: "Done", description: "Complete session" }
1000
+ ]
1001
+ }]
1002
+ })
1003
+
1004
+ if (answer['Next Step'] === "Retry Failed") {
1005
+ for (const task of failed) {
1006
+ updateMasterCsvRow(`${sessionFolder}/tasks.csv`, task.id, { status: 'pending', error: '' })
1007
+ }
1008
+ for (const task of skipped) {
1009
+ updateMasterCsvRow(`${sessionFolder}/tasks.csv`, task.id, { status: 'pending', error: '' })
1010
+ }
1011
+ // Re-execute Phase 4
1012
+ } else if (answer['Next Step'] === "View Report") {
1013
+ console.log(Read(`${sessionFolder}/context.md`))
1014
+ }
1015
+ }
1016
+ ```
1017
+
1018
+ **Success Criteria**:
1019
+ - results.csv exported
1020
+ - context.md generated with full field coverage
1021
+ - Summary displayed to user
1022
+
1023
+ ---
1024
+
1025
+ ## Wave Computation (Kahn's BFS)
1026
+
1027
+ ```javascript
1028
+ function computeWaves(tasks) {
1029
+ const taskMap = new Map(tasks.map(t => [t.id, t]))
1030
+ const inDegree = new Map(tasks.map(t => [t.id, 0]))
1031
+ const adjList = new Map(tasks.map(t => [t.id, []]))
1032
+
1033
+ for (const task of tasks) {
1034
+ for (const dep of task.deps) {
1035
+ if (taskMap.has(dep)) {
1036
+ adjList.get(dep).push(task.id)
1037
+ inDegree.set(task.id, inDegree.get(task.id) + 1)
1038
+ }
1039
+ }
1040
+ }
1041
+
1042
+ const queue = []
1043
+ const waveAssignment = new Map()
1044
+
1045
+ for (const [id, deg] of inDegree) {
1046
+ if (deg === 0) {
1047
+ queue.push([id, 1])
1048
+ waveAssignment.set(id, 1)
1049
+ }
1050
+ }
1051
+
1052
+ let maxWave = 1
1053
+ let idx = 0
1054
+ while (idx < queue.length) {
1055
+ const [current, depth] = queue[idx++]
1056
+ for (const next of adjList.get(current)) {
1057
+ const newDeg = inDegree.get(next) - 1
1058
+ inDegree.set(next, newDeg)
1059
+ const nextDepth = Math.max(waveAssignment.get(next) || 0, depth + 1)
1060
+ waveAssignment.set(next, nextDepth)
1061
+ if (newDeg === 0) {
1062
+ queue.push([next, nextDepth])
1063
+ maxWave = Math.max(maxWave, nextDepth)
1064
+ }
1065
+ }
1066
+ }
1067
+
1068
+ for (const task of tasks) {
1069
+ if (!waveAssignment.has(task.id)) {
1070
+ throw new Error(`Circular dependency detected involving task ${task.id}`)
1071
+ }
1072
+ }
1073
+
1074
+ return { waveAssignment, maxWave }
1075
+ }
1076
+ ```
1077
+
1078
+ ---
1079
+
1080
+ ## Shared Discovery Board Protocol
1081
+
1082
+ All agents across all phases share `discoveries.ndjson`. This eliminates redundant codebase exploration.
1083
+
1084
+ ```jsonl
1085
+ {"ts":"2026-02-28T10:00:00+08:00","worker":"E1","type":"code_pattern","data":{"name":"repository-pattern","file":"src/repos/Base.ts","description":"Abstract CRUD repository"}}
1086
+ {"ts":"2026-02-28T10:01:00+08:00","worker":"T2","type":"integration_point","data":{"file":"src/auth/index.ts","description":"Auth module entry","exports":["authenticate","authorize"]}}
1087
+ ```
1088
+
1089
+ **Types**: `code_pattern`, `integration_point`, `convention`, `blocker`, `tech_stack`, `test_command`
1090
+ **Rules**: Read first → write immediately → deduplicate → append-only
1091
+
1092
+ ---
1093
+
1094
+ ## Error Handling
1095
+
1096
+ | Error | Resolution |
1097
+ |-------|------------|
1098
+ | Explore agent failure | Mark as failed in explore.csv, exclude from planning |
1099
+ | All explores failed | Fallback: plan directly from requirement without exploration |
1100
+ | Circular dependency | Abort wave computation, report cycle |
1101
+ | Execute agent timeout | Mark as failed in results, continue with wave |
1102
+ | Execute agent failed | Mark as failed, skip dependent tasks in later waves |
1103
+ | CSV parse error | Validate CSV format before execution, show line number |
1104
+ | discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
1105
+ | Continue mode: no session | List available sessions, prompt user to select |
1106
+
1107
+ ---
1108
+
1109
+ ## Core Rules
1110
+
1111
+ 1. **Explore Before Execute**: Phase 2 completes before Phase 4 starts
1112
+ 2. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results are merged
1113
+ 3. **CSV is Source of Truth**: Master CSVs hold all state always read before wave, always write after
1114
+ 4. **Cross-Phase Context**: prev_context built from both explore.csv (E*) and tasks.csv (T*), not from memory
1115
+ 5. **E* T* Linking**: tasks.csv `context_from` references explore.csv rows for cross-phase context
1116
+ 6. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson
1117
+ 7. **Skip on Failure**: If a dependency failed, skip the dependent task (cascade)
1118
+ 8. **Cleanup Temp Files**: Remove wave CSVs after results are merged
1119
+ 9. **MANDATORY CONFIRMATION GATES**: Unless `-y`/`--yes` is set, you MUST stop and wait for user confirmation after Phase 1 (exploration plan) and Phase 3 (execution plan) before proceeding. NEVER skip these gates. Phase 4 execution MUST NOT begin until user explicitly approves
1120
+ 10. **Continuous Within Phase**: Within a phase (e.g., wave loop in Phase 4), execute continuously until all waves complete or all remaining tasks are skipped — but NEVER cross a confirmation gate without user approval
1121
+
1122
+ ---
1123
+
1124
+ ## Best Practices
1125
+
1126
+ 1. **Exploration Angles**: 1 for simple, 3-4 for complex; avoid redundant angles
1127
+ 2. **Context Linking**: Link every task to at least one explore row (E*)exploration was done for a reason
1128
+ 3. **Task Granularity**: 3-10 tasks optimal; too many = overhead, too few = no parallelism
1129
+ 4. **Minimize Cross-Wave Deps**: More tasks in wave 1 = more parallelism
1130
+ 5. **Specific Descriptions**: Agent sees only its CSV row + prev_context — make description self-contained
1131
+ 6. **Non-Overlapping Scopes**: Same-wave tasks must not write to the same files
1132
+ 7. **Concurrency Tuning**: `-c 1` for serial (max context sharing); `-c 8` for I/O-bound tasks
1133
+
1134
+ ---
1135
+
1136
+ ## Usage Recommendations
1137
+
1138
+ | Scenario | Recommended Approach |
1139
+ |----------|---------------------|
1140
+ | Complex feature (unclear architecture) | `$workflow-lite-plan` — explore first, then plan |
1141
+ | Simple known-pattern task | `$workflow-lite-plan` — skip exploration, direct execution |
1142
+ | Independent parallel tasks | `$workflow-lite-plan -c 8` — single wave, max parallelism |
1143
+ | Diamond dependency (A→B,C→D) | `$workflow-lite-plan` — 3 waves with context propagation |
1144
+ | Unknown codebase | `$workflow-lite-plan` — exploration phase is essential |