claude-code-workflow 7.2.14 → 7.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/.claude/commands/workflow/analyze-with-file.md +7 -0
  2. package/.codex/skills/analyze-with-file/SKILL.md +1181 -1182
  3. package/.codex/skills/brainstorm/SKILL.md +723 -725
  4. package/.codex/skills/brainstorm-with-file/SKILL.md +10 -5
  5. package/.codex/skills/clean/SKILL.md +33 -26
  6. package/.codex/skills/collaborative-plan-with-file/SKILL.md +830 -831
  7. package/.codex/skills/csv-wave-pipeline/SKILL.md +906 -906
  8. package/.codex/skills/issue-discover/SKILL.md +57 -50
  9. package/.codex/skills/issue-discover/phases/01-issue-new.md +18 -11
  10. package/.codex/skills/issue-discover/phases/02-discover.md +31 -26
  11. package/.codex/skills/issue-discover/phases/03-discover-by-prompt.md +13 -11
  12. package/.codex/skills/issue-discover/phases/04-quick-execute.md +32 -27
  13. package/.codex/skills/parallel-dev-cycle/SKILL.md +402 -402
  14. package/.codex/skills/project-documentation-workflow/SKILL.md +13 -3
  15. package/.codex/skills/roadmap-with-file/SKILL.md +901 -897
  16. package/.codex/skills/session-sync/SKILL.md +222 -212
  17. package/.codex/skills/spec-add/SKILL.md +620 -613
  18. package/.codex/skills/spec-generator/SKILL.md +2 -2
  19. package/.codex/skills/spec-generator/phases/01-5-requirement-clarification.md +10 -10
  20. package/.codex/skills/spec-generator/phases/01-discovery.md +11 -18
  21. package/.codex/skills/spec-generator/phases/02-product-brief.md +5 -5
  22. package/.codex/skills/spec-generator/phases/03-requirements.md +7 -7
  23. package/.codex/skills/spec-generator/phases/04-architecture.md +4 -4
  24. package/.codex/skills/spec-generator/phases/05-epics-stories.md +5 -6
  25. package/.codex/skills/spec-generator/phases/06-readiness-check.md +10 -17
  26. package/.codex/skills/spec-generator/phases/07-issue-export.md +326 -329
  27. package/.codex/skills/spec-setup/SKILL.md +669 -657
  28. package/.codex/skills/team-arch-opt/SKILL.md +50 -50
  29. package/.codex/skills/team-arch-opt/agents/completion-handler.md +3 -3
  30. package/.codex/skills/team-brainstorm/SKILL.md +724 -725
  31. package/.codex/skills/team-coordinate/SKILL.md +51 -51
  32. package/.codex/skills/team-coordinate/agents/completion-handler.md +3 -3
  33. package/.codex/skills/team-coordinate/agents/plan-reviewer.md +4 -4
  34. package/.codex/skills/team-designer/SKILL.md +691 -691
  35. package/.codex/skills/team-designer/agents/requirement-clarifier.md +11 -12
  36. package/.codex/skills/team-executor/SKILL.md +45 -45
  37. package/.codex/skills/team-frontend/SKILL.md +45 -45
  38. package/.codex/skills/team-frontend/agents/completion-handler.md +3 -3
  39. package/.codex/skills/team-frontend/agents/qa-gate-reviewer.md +4 -4
  40. package/.codex/skills/team-frontend-debug/SKILL.md +50 -50
  41. package/.codex/skills/team-frontend-debug/agents/completion-handler.md +3 -3
  42. package/.codex/skills/team-frontend-debug/agents/conditional-skip-gate.md +4 -4
  43. package/.codex/skills/team-issue/SKILL.md +751 -740
  44. package/.codex/skills/team-iterdev/SKILL.md +825 -826
  45. package/.codex/skills/team-lifecycle-v4/SKILL.md +775 -775
  46. package/.codex/skills/team-lifecycle-v4/agents/quality-gate.md +165 -165
  47. package/.codex/skills/team-lifecycle-v4/agents/requirement-clarifier.md +163 -163
  48. package/.codex/skills/team-perf-opt/SKILL.md +50 -50
  49. package/.codex/skills/team-perf-opt/agents/completion-handler.md +3 -3
  50. package/.codex/skills/team-planex-v2/SKILL.md +652 -637
  51. package/.codex/skills/team-quality-assurance/SKILL.md +51 -52
  52. package/.codex/skills/team-review/SKILL.md +40 -40
  53. package/.codex/skills/team-roadmap-dev/SKILL.md +51 -51
  54. package/.codex/skills/team-roadmap-dev/agents/roadmap-discusser.md +8 -8
  55. package/.codex/skills/team-tech-debt/SKILL.md +50 -50
  56. package/.codex/skills/team-tech-debt/agents/plan-approver.md +5 -5
  57. package/.codex/skills/team-testing/SKILL.md +51 -52
  58. package/.codex/skills/team-uidesign/SKILL.md +40 -40
  59. package/.codex/skills/team-uidesign/agents/completion-handler.md +177 -177
  60. package/.codex/skills/team-ultra-analyze/SKILL.md +786 -787
  61. package/.codex/skills/team-ultra-analyze/agents/discussion-feedback.md +8 -8
  62. package/.codex/skills/team-ux-improve/SKILL.md +51 -52
  63. package/.codex/skills/team-ux-improve/agents/ux-designer.md +2 -2
  64. package/.codex/skills/team-ux-improve/agents/ux-explorer.md +1 -1
  65. package/.codex/skills/unified-execute-with-file/SKILL.md +797 -796
  66. package/.codex/skills/workflow-execute/SKILL.md +1117 -1118
  67. package/.codex/skills/workflow-lite-planex/SKILL.md +1144 -1141
  68. package/.codex/skills/workflow-plan/SKILL.md +631 -636
  69. package/.codex/skills/workflow-tdd-plan/SKILL.md +753 -759
  70. package/.codex/skills/workflow-test-fix-cycle/SKILL.md +402 -392
  71. package/README.md +25 -0
  72. package/ccw/dist/commands/install.d.ts.map +1 -1
  73. package/ccw/dist/commands/install.js +12 -0
  74. package/ccw/dist/commands/install.js.map +1 -1
  75. package/package.json +1 -1
@@ -1,637 +1,652 @@
1
- ---
2
- name: team-planex-v2
3
- description: Hybrid team skill for plan-and-execute pipeline. CSV wave primary for planning and execution. Planner decomposes requirements into issues and solutions, then executor implements each via CLI tools. Supports issue IDs, text input, and plan file input.
4
- argument-hint: "[-y|--yes] [-c|--concurrency N] [--continue] [--exec=codex|gemini] \"issue IDs or --text 'description' or --plan path\""
5
- allowed-tools: spawn_agents_on_csv, spawn_agent, wait, send_input, close_agent, Read, Write, Edit, Bash, Glob, Grep, AskUserQuestion
6
- ---
7
-
8
- ## Auto Mode
9
-
10
- When `--yes` or `-y`: Auto-confirm task decomposition, skip interactive validation, use defaults.
11
-
12
- # Team PlanEx
13
-
14
- ## Usage
15
-
16
- ```bash
17
- $team-planex-v2 "ISS-20260308-120000 ISS-20260308-120001"
18
- $team-planex-v2 -c 3 "--text 'Add rate limiting to all API endpoints'"
19
- $team-planex-v2 -y "--plan .workflow/specs/roadmap.md --exec=codex"
20
- $team-planex-v2 --continue "planex-rate-limit-20260308"
21
- ```
22
-
23
- **Flags**:
24
- - `-y, --yes`: Skip all confirmations (auto mode)
25
- - `-c, --concurrency N`: Max concurrent agents within each wave (default: 3)
26
- - `--continue`: Resume existing session
27
- - `--exec=codex|gemini|qwen`: Force execution method for implementation
28
-
29
- **Output Directory**: `.workflow/.csv-wave/{session-id}/`
30
- **Core Output**: `tasks.csv` (master state) + `results.csv` (final) + `discoveries.ndjson` (shared exploration) + `context.md` (human-readable report)
31
-
32
- ---
33
-
34
- ## Overview
35
-
36
- Plan-and-execute pipeline for issue-based development. Planner decomposes requirements into individual issues with solution plans, then executors implement each issue independently.
37
-
38
- **Execution Model**: Hybrid -- CSV wave pipeline (primary) + individual agent spawn (secondary)
39
-
40
- ```
41
- +---------------------------------------------------------------------------+
42
- | TEAM PLANEX WORKFLOW |
43
- +---------------------------------------------------------------------------+
44
- | |
45
- | Phase 0: Pre-Wave Interactive (Input Analysis) |
46
- | +-- Parse input type (issue IDs / --text / --plan) |
47
- | +-- Determine execution method (codex/gemini/auto) |
48
- | +-- Create issues from text/plan if needed |
49
- | +-- Output: refined issue list for decomposition |
50
- | |
51
- | Phase 1: Requirement -> CSV + Classification |
52
- | +-- Planning wave: generate solutions for each issue |
53
- | +-- Execution wave: implement each issue independently |
54
- | +-- Classify tasks: csv-wave (default) | interactive |
55
- | +-- Compute dependency waves (topological sort) |
56
- | +-- Generate tasks.csv with wave + exec_mode columns |
57
- | +-- User validates task breakdown (skip if -y) |
58
- | |
59
- | Phase 2: Wave Execution Engine (Extended) |
60
- | +-- For each wave (1..N): |
61
- | | +-- Build wave CSV (filter csv-wave tasks for this wave) |
62
- | | +-- Inject previous findings into prev_context column |
63
- | | +-- spawn_agents_on_csv(wave CSV) |
64
- | | +-- Merge all results into master tasks.csv |
65
- | | +-- Check: any failed? -> skip dependents |
66
- | +-- discoveries.ndjson shared across all modes (append-only) |
67
- | |
68
- | Phase 3: Results Aggregation |
69
- | +-- Export final results.csv |
70
- | +-- Generate context.md with all findings |
71
- | +-- Display summary: completed/failed/skipped per wave |
72
- | +-- Offer: view results | retry failed | done |
73
- | |
74
- +---------------------------------------------------------------------------+
75
- ```
76
-
77
- ---
78
-
79
- ## Task Classification Rules
80
-
81
- Each task is classified by `exec_mode`:
82
-
83
- | exec_mode | Mechanism | Criteria |
84
- |-----------|-----------|----------|
85
- | `csv-wave` | `spawn_agents_on_csv` | One-shot, structured I/O, no multi-round interaction |
86
- | `interactive` | `spawn_agent`/`wait`/`send_input`/`close_agent` | Multi-round, clarification needed |
87
-
88
- **Classification Decision**:
89
-
90
- | Task Property | Classification |
91
- |---------------|---------------|
92
- | Solution planning per issue (PLAN-*) | `csv-wave` |
93
- | Code implementation per issue (EXEC-*) | `csv-wave` |
94
- | Complex multi-issue coordination (rare) | `interactive` |
95
-
96
- > In the standard PlanEx pipeline, all tasks default to `csv-wave`. Interactive mode is reserved for edge cases requiring multi-round coordination.
97
-
98
- ---
99
-
100
- ## CSV Schema
101
-
102
- ### tasks.csv (Master State)
103
-
104
- ```csv
105
- id,title,description,role,issue_ids,input_type,raw_input,exec_mode,execution_method,deps,context_from,wave,status,findings,artifact_path,error
106
- "PLAN-001","Plan issue-1","Generate solution for ISS-20260308-120000","planner","ISS-20260308-120000","issues","ISS-20260308-120000","csv-wave","","","","1","pending","","",""
107
- "PLAN-002","Plan issue-2","Generate solution for ISS-20260308-120001","planner","ISS-20260308-120001","issues","ISS-20260308-120001","csv-wave","","","","1","pending","","",""
108
- "EXEC-001","Implement issue-1","Implement solution for ISS-20260308-120000","executor","ISS-20260308-120000","","","csv-wave","gemini","PLAN-001","PLAN-001","2","pending","","",""
109
- "EXEC-002","Implement issue-2","Implement solution for ISS-20260308-120001","executor","ISS-20260308-120001","","","csv-wave","gemini","PLAN-002","PLAN-002","2","pending","","",""
110
- ```
111
-
112
- **Columns**:
113
-
114
- | Column | Phase | Description |
115
- |--------|-------|-------------|
116
- | `id` | Input | Unique task identifier (PLAN-NNN, EXEC-NNN) |
117
- | `title` | Input | Short task title |
118
- | `description` | Input | Detailed task description |
119
- | `role` | Input | Worker role: planner or executor |
120
- | `issue_ids` | Input | Semicolon-separated issue IDs this task covers |
121
- | `input_type` | Input | Input type: issues, text, or plan (planner tasks only) |
122
- | `raw_input` | Input | Raw input text (planner tasks only) |
123
- | `exec_mode` | Input | `csv-wave` or `interactive` |
124
- | `execution_method` | Input | codex, gemini, qwen, or empty (executor tasks only) |
125
- | `deps` | Input | Semicolon-separated dependency task IDs |
126
- | `context_from` | Input | Semicolon-separated task IDs whose findings this task needs |
127
- | `wave` | Computed | Wave number (computed by topological sort, 1-based) |
128
- | `status` | Output | `pending` -> `completed` / `failed` / `skipped` |
129
- | `findings` | Output | Key discoveries or implementation notes (max 500 chars) |
130
- | `artifact_path` | Output | Path to generated artifact (solution file, build result) |
131
- | `error` | Output | Error message if failed (empty if success) |
132
-
133
- ### Per-Wave CSV (Temporary)
134
-
135
- Each wave generates a temporary `wave-{N}.csv` with extra `prev_context` column (csv-wave tasks only).
136
-
137
- ---
138
-
139
- ## Output Artifacts
140
-
141
- | File | Purpose | Lifecycle |
142
- |------|---------|-----------|
143
- | `tasks.csv` | Master state -- all tasks with status/findings | Updated after each wave |
144
- | `wave-{N}.csv` | Per-wave input (temporary, csv-wave tasks only) | Created before wave, deleted after |
145
- | `results.csv` | Final export of all task results | Created in Phase 3 |
146
- | `discoveries.ndjson` | Shared exploration board (all agents) | Append-only, carries across waves |
147
- | `context.md` | Human-readable execution report | Created in Phase 3 |
148
- | `artifacts/solutions/{issueId}.json` | Planner solution artifacts | Created by planner agents |
149
- | `builds/{issueId}.json` | Executor build results | Created by executor agents |
150
-
151
- ---
152
-
153
- ## Session Structure
154
-
155
- ```
156
- .workflow/.csv-wave/{session-id}/
157
- +-- tasks.csv # Master state (all tasks)
158
- +-- results.csv # Final results export
159
- +-- discoveries.ndjson # Shared discovery board
160
- +-- context.md # Human-readable report
161
- +-- wave-{N}.csv # Temporary per-wave input
162
- +-- artifacts/
163
- | +-- solutions/ # Planner output
164
- | +-- {issueId}.json
165
- +-- builds/ # Executor output
166
- | +-- {issueId}.json
167
- +-- wisdom/ # Cross-task knowledge
168
- +-- learnings.md
169
- +-- decisions.md
170
- +-- conventions.md
171
- +-- issues.md
172
- ```
173
-
174
- ---
175
-
176
- ## Implementation
177
-
178
- ### Session Initialization
179
-
180
- ```javascript
181
- const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
182
-
183
- const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
184
- const continueMode = $ARGUMENTS.includes('--continue')
185
- const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
186
- const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 3
187
-
188
- const requirement = $ARGUMENTS
189
- .replace(/--yes|-y|--continue|--concurrency\s+\d+|-c\s+\d+/g, '')
190
- .trim()
191
-
192
- // Parse execution method
193
- let executionMethod = 'gemini' // default
194
- const execMatch = requirement.match(/--exec=(\w+)/)
195
- if (execMatch) executionMethod = execMatch[1]
196
-
197
- // Detect input type
198
- const issueIdPattern = /ISS-\d{8}-\d{6}/g
199
- const textMatch = requirement.match(/--text\s+'([^']+)'/)
200
- const planMatch = requirement.match(/--plan\s+(\S+)/)
201
-
202
- let inputType = 'issues'
203
- let rawInput = requirement
204
- let issueIds = requirement.match(issueIdPattern) || []
205
-
206
- if (textMatch) {
207
- inputType = 'text'
208
- rawInput = textMatch[1]
209
- issueIds = [] // will be created by planner
210
- } else if (planMatch) {
211
- inputType = 'plan'
212
- rawInput = planMatch[1]
213
- issueIds = [] // will be parsed from plan file
214
- }
215
-
216
- // If no input detected, ask user
217
- if (issueIds.length === 0 && inputType === 'issues') {
218
- const answer = AskUserQuestion("No input detected. Provide issue IDs, or use --text 'description' or --plan <path>:")
219
- issueIds = answer.match(issueIdPattern) || []
220
- if (issueIds.length === 0 && !answer.includes('--text') && !answer.includes('--plan')) {
221
- inputType = 'text'
222
- rawInput = answer
223
- }
224
- }
225
-
226
- // Execution method selection (interactive if no flag)
227
- if (!execMatch && !AUTO_YES) {
228
- const methodChoice = AskUserQuestion({
229
- questions: [{ question: "Select execution method for implementation:",
230
- options: [
231
- { label: "Gemini", description: "gemini-2.5-pro (recommended for <= 3 tasks)" },
232
- { label: "Codex", description: "gpt-5.2 (recommended for > 3 tasks)" },
233
- { label: "Auto", description: "Auto-select based on task count" }
234
- ]
235
- }]
236
- })
237
- if (methodChoice === 'Codex') executionMethod = 'codex'
238
- else if (methodChoice === 'Auto') executionMethod = 'auto'
239
- }
240
-
241
- const slug = (issueIds[0] || rawInput).toLowerCase()
242
- .replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
243
- .substring(0, 30)
244
- const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
245
- const sessionId = `planex-${slug}-${dateStr}`
246
- const sessionFolder = `.workflow/.csv-wave/${sessionId}`
247
-
248
- Bash(`mkdir -p ${sessionFolder}/{artifacts/solutions,builds,wisdom}`)
249
-
250
- Write(`${sessionFolder}/discoveries.ndjson`, `# Discovery Board - ${sessionId}\n# Format: NDJSON\n`)
251
-
252
- // Initialize wisdom files
253
- Write(`${sessionFolder}/wisdom/learnings.md`, `# Learnings\n\nAccumulated during ${sessionId}\n`)
254
- Write(`${sessionFolder}/wisdom/decisions.md`, `# Decisions\n\n`)
255
- Write(`${sessionFolder}/wisdom/conventions.md`, `# Conventions\n\n`)
256
- Write(`${sessionFolder}/wisdom/issues.md`, `# Issues\n\n`)
257
-
258
- // Store session metadata
259
- Write(`${sessionFolder}/session.json`, JSON.stringify({
260
- session_id: sessionId,
261
- pipeline_type: 'plan-execute',
262
- input_type: inputType,
263
- raw_input: rawInput,
264
- issue_ids: issueIds,
265
- execution_method: executionMethod,
266
- created_at: getUtc8ISOString()
267
- }, null, 2))
268
- ```
269
-
270
- ---
271
-
272
- ### Phase 0: Pre-Wave Interactive (Input Analysis)
273
-
274
- **Objective**: Parse and normalize input into a list of issue IDs ready for the planning wave.
275
-
276
- **Input Type Handling**:
277
-
278
- | Input Type | Processing |
279
- |------------|-----------|
280
- | `issues` (ISS-* IDs) | Use directly, verify exist via `ccw issue status` |
281
- | `text` (--text flag) | Create issues via `ccw issue create --title ... --context ...` |
282
- | `plan` (--plan flag) | Read plan file, parse phases/tasks, batch create issues |
283
-
284
- For `text` input:
285
- ```bash
286
- # Create issue from text description
287
- ccw issue create --title "<derived-title>" --context "<raw_input>"
288
- # Parse output for new issue ID
289
- ```
290
-
291
- For `plan` input:
292
- ```bash
293
- # Read plan file
294
- planContent = Read("<plan-path>")
295
- # Parse phases/sections into individual issues
296
- # Create each as a separate issue via ccw issue create
297
- ```
298
-
299
- After processing, update session.json with resolved issue_ids.
300
-
301
- **Success Criteria**:
302
- - All inputs resolved to valid issue IDs
303
- - Session metadata updated with final issue list
304
-
305
- ---
306
-
307
- ### Phase 1: Requirement -> CSV + Classification
308
-
309
- **Objective**: Generate tasks.csv with PLAN-* tasks (wave 1) and EXEC-* tasks (wave 2).
310
-
311
- **Two-Wave Structure**:
312
-
313
- Wave 1 (Planning): One PLAN-NNN task per issue, all independent (no deps), concurrent execution.
314
- Wave 2 (Execution): One EXEC-NNN task per issue, each depends on its corresponding PLAN-NNN.
315
-
316
- **Task Generation**:
317
-
318
- ```javascript
319
- const tasks = []
320
-
321
- // Wave 1: Planning tasks (one per issue)
322
- for (let i = 0; i < issueIds.length; i++) {
323
- const n = String(i + 1).padStart(3, '0')
324
- tasks.push({
325
- id: `PLAN-${n}`,
326
- title: `Plan ${issueIds[i]}`,
327
- description: `Generate implementation solution for issue ${issueIds[i]}. Analyze requirements, design solution approach, break down into implementation tasks, identify files to modify/create.`,
328
- role: 'planner',
329
- issue_ids: issueIds[i],
330
- input_type: inputType,
331
- raw_input: inputType === 'issues' ? issueIds[i] : rawInput,
332
- exec_mode: 'csv-wave',
333
- execution_method: '',
334
- deps: '',
335
- context_from: '',
336
- wave: '1',
337
- status: 'pending',
338
- findings: '', artifact_path: '', error: ''
339
- })
340
- }
341
-
342
- // Wave 2: Execution tasks (one per issue, depends on corresponding PLAN)
343
- for (let i = 0; i < issueIds.length; i++) {
344
- const n = String(i + 1).padStart(3, '0')
345
- // Resolve execution method
346
- let method = executionMethod
347
- if (method === 'auto') {
348
- method = issueIds.length <= 3 ? 'gemini' : 'codex'
349
- }
350
- tasks.push({
351
- id: `EXEC-${n}`,
352
- title: `Implement ${issueIds[i]}`,
353
- description: `Implement solution for issue ${issueIds[i]}. Load solution artifact, execute implementation via CLI, run tests, commit.`,
354
- role: 'executor',
355
- issue_ids: issueIds[i],
356
- input_type: '',
357
- raw_input: '',
358
- exec_mode: 'csv-wave',
359
- execution_method: method,
360
- deps: `PLAN-${n}`,
361
- context_from: `PLAN-${n}`,
362
- wave: '2',
363
- status: 'pending',
364
- findings: '', artifact_path: '', error: ''
365
- })
366
- }
367
-
368
- Write(`${sessionFolder}/tasks.csv`, toCsv(tasks))
369
- ```
370
-
371
- **User Validation**: Display task breakdown with wave assignment (skip if AUTO_YES).
372
-
373
- **Success Criteria**:
374
- - tasks.csv created with valid schema and wave assignments
375
- - PLAN-* tasks in wave 1, EXEC-* tasks in wave 2
376
- - Each EXEC-* depends on its corresponding PLAN-*
377
- - No circular dependencies
378
- - User approved (or AUTO_YES)
379
-
380
- ---
381
-
382
- ### Phase 2: Wave Execution Engine (Extended)
383
-
384
- **Objective**: Execute tasks wave-by-wave with context propagation between planning and execution waves.
385
-
386
- ```javascript
387
- const masterCsv = Read(`${sessionFolder}/tasks.csv`)
388
- let tasks = parseCsv(masterCsv)
389
- const maxWave = Math.max(...tasks.map(t => parseInt(t.wave)))
390
-
391
- for (let wave = 1; wave <= maxWave; wave++) {
392
- console.log(`\nWave ${wave}/${maxWave} (${wave === 1 ? 'Planning' : 'Execution'})`)
393
-
394
- // 1. Filter tasks for this wave
395
- const waveTasks = tasks.filter(t => parseInt(t.wave) === wave && t.status === 'pending')
396
-
397
- // 2. Check dependencies - skip if upstream failed
398
- for (const task of waveTasks) {
399
- const depIds = (task.deps || '').split(';').filter(Boolean)
400
- const depStatuses = depIds.map(id => tasks.find(t => t.id === id)?.status)
401
- if (depStatuses.some(s => s === 'failed' || s === 'skipped')) {
402
- task.status = 'skipped'
403
- task.error = `Dependency failed: ${depIds.filter((id, i) =>
404
- ['failed','skipped'].includes(depStatuses[i])).join(', ')}`
405
- }
406
- }
407
-
408
- const pendingTasks = waveTasks.filter(t => t.status === 'pending')
409
- if (pendingTasks.length === 0) {
410
- console.log(`Wave ${wave}: No pending tasks, skipping...`)
411
- continue
412
- }
413
-
414
- // 3. Build prev_context from completed upstream tasks
415
- for (const task of pendingTasks) {
416
- const contextIds = (task.context_from || '').split(';').filter(Boolean)
417
- const prevFindings = contextIds.map(id => {
418
- const src = tasks.find(t => t.id === id)
419
- if (!src?.findings) return ''
420
- return `## [${src.id}] ${src.title}\n${src.findings}\nArtifact: ${src.artifact_path || 'N/A'}`
421
- }).filter(Boolean).join('\n\n')
422
- task.prev_context = prevFindings
423
- }
424
-
425
- // 4. Write wave CSV
426
- Write(`${sessionFolder}/wave-${wave}.csv`, toCsv(pendingTasks))
427
-
428
- // 5. Execute wave
429
- spawn_agents_on_csv({
430
- csv_path: `${sessionFolder}/wave-${wave}.csv`,
431
- id_column: "id",
432
- instruction: Read("~ or <project>/.codex/skills/team-planex/instructions/agent-instruction.md"),
433
- max_concurrency: maxConcurrency,
434
- max_runtime_seconds: 1200,
435
- output_csv_path: `${sessionFolder}/wave-${wave}-results.csv`,
436
- output_schema: {
437
- type: "object",
438
- properties: {
439
- id: { type: "string" },
440
- status: { type: "string", enum: ["completed", "failed"] },
441
- findings: { type: "string" },
442
- artifact_path: { type: "string" },
443
- error: { type: "string" }
444
- }
445
- }
446
- })
447
-
448
- // 6. Merge results into master CSV
449
- const results = parseCsv(Read(`${sessionFolder}/wave-${wave}-results.csv`))
450
- for (const r of results) {
451
- const t = tasks.find(t => t.id === r.id)
452
- if (t) Object.assign(t, r)
453
- }
454
- Write(`${sessionFolder}/tasks.csv`, toCsv(tasks))
455
-
456
- // 7. Cleanup temp files
457
- Bash(`rm -f ${sessionFolder}/wave-${wave}.csv ${sessionFolder}/wave-${wave}-results.csv`)
458
-
459
- // 8. Display wave summary
460
- const completed = results.filter(r => r.status === 'completed').length
461
- const failed = results.filter(r => r.status === 'failed').length
462
- console.log(`Wave ${wave} Complete: ${completed} completed, ${failed} failed`)
463
- }
464
- ```
465
-
466
- **Success Criteria**:
467
- - All waves executed in order
468
- - Each wave's results merged into master CSV before next wave starts
469
- - Dependent tasks skipped when predecessor failed
470
- - discoveries.ndjson accumulated across all waves
471
- - Planning wave completes before execution wave starts
472
-
473
- ---
474
-
475
- ### Phase 3: Results Aggregation
476
-
477
- **Objective**: Generate final results and human-readable report.
478
-
479
- ```javascript
480
- const tasks = parseCsv(Read(`${sessionFolder}/tasks.csv`))
481
- const completed = tasks.filter(t => t.status === 'completed')
482
- const failed = tasks.filter(t => t.status === 'failed')
483
- const skipped = tasks.filter(t => t.status === 'skipped')
484
-
485
- const planTasks = tasks.filter(t => t.role === 'planner')
486
- const execTasks = tasks.filter(t => t.role === 'executor')
487
-
488
- // Export results.csv
489
- Bash(`cp ${sessionFolder}/tasks.csv ${sessionFolder}/results.csv`)
490
-
491
- // Generate context.md
492
- let contextMd = `# PlanEx Pipeline Report\n\n`
493
- contextMd += `**Session**: ${sessionId}\n`
494
- contextMd += `**Input Type**: ${inputType}\n`
495
- contextMd += `**Execution Method**: ${executionMethod}\n`
496
- contextMd += `**Issues**: ${issueIds.join(', ')}\n\n`
497
-
498
- contextMd += `## Summary\n\n`
499
- contextMd += `| Status | Count |\n|--------|-------|\n`
500
- contextMd += `| Completed | ${completed.length} |\n`
501
- contextMd += `| Failed | ${failed.length} |\n`
502
- contextMd += `| Skipped | ${skipped.length} |\n\n`
503
-
504
- contextMd += `## Planning Wave\n\n`
505
- for (const t of planTasks) {
506
- const icon = t.status === 'completed' ? '[OK]' : t.status === 'failed' ? '[FAIL]' : '[SKIP]'
507
- contextMd += `${icon} **${t.id}**: ${t.title}\n`
508
- if (t.findings) contextMd += ` ${t.findings.substring(0, 200)}\n`
509
- if (t.artifact_path) contextMd += ` Solution: ${t.artifact_path}\n`
510
- contextMd += `\n`
511
- }
512
-
513
- contextMd += `## Execution Wave\n\n`
514
- for (const t of execTasks) {
515
- const icon = t.status === 'completed' ? '[OK]' : t.status === 'failed' ? '[FAIL]' : '[SKIP]'
516
- contextMd += `${icon} **${t.id}**: ${t.title}\n`
517
- if (t.findings) contextMd += ` ${t.findings.substring(0, 200)}\n`
518
- if (t.error) contextMd += ` Error: ${t.error}\n`
519
- contextMd += `\n`
520
- }
521
-
522
- contextMd += `## Deliverables\n\n`
523
- contextMd += `| Artifact | Path |\n|----------|------|\n`
524
- contextMd += `| Solution Plans | ${sessionFolder}/artifacts/solutions/ |\n`
525
- contextMd += `| Build Results | ${sessionFolder}/builds/ |\n`
526
- contextMd += `| Discovery Board | ${sessionFolder}/discoveries.ndjson |\n`
527
-
528
- Write(`${sessionFolder}/context.md`, contextMd)
529
-
530
- // Display summary
531
- console.log(`
532
- PlanEx Pipeline Complete
533
- Input: ${inputType} (${issueIds.length} issues)
534
- Planning: ${planTasks.filter(t => t.status === 'completed').length}/${planTasks.length} completed
535
- Execution: ${execTasks.filter(t => t.status === 'completed').length}/${execTasks.length} completed
536
- Failed: ${failed.length} | Skipped: ${skipped.length}
537
- Output: ${sessionFolder}
538
- `)
539
- ```
540
-
541
- **Success Criteria**:
542
- - results.csv exported (all tasks)
543
- - context.md generated
544
- - Summary displayed to user
545
-
546
- ---
547
-
548
- ## Shared Discovery Board Protocol
549
-
550
- Both planner and executor agents share the same discoveries.ndjson file:
551
-
552
- ```jsonl
553
- {"ts":"2026-03-08T10:00:00Z","worker":"PLAN-001","type":"solution_designed","data":{"issue_id":"ISS-20260308-120000","approach":"refactor","task_count":4,"estimated_files":6}}
554
- {"ts":"2026-03-08T10:05:00Z","worker":"PLAN-002","type":"conflict_warning","data":{"issue_ids":["ISS-20260308-120000","ISS-20260308-120001"],"overlapping_files":["src/auth/handler.ts"]}}
555
- {"ts":"2026-03-08T10:10:00Z","worker":"EXEC-001","type":"impl_result","data":{"issue_id":"ISS-20260308-120000","files_changed":3,"tests_pass":true,"commit":"abc123"}}
556
- ```
557
-
558
- **Discovery Types**:
559
-
560
- | Type | Dedup Key | Data Schema | Description |
561
- |------|-----------|-------------|-------------|
562
- | `solution_designed` | `issue_id` | `{issue_id, approach, task_count, estimated_files}` | Planner: solution plan completed |
563
- | `conflict_warning` | `issue_ids` | `{issue_ids, overlapping_files}` | Planner: file overlap detected between issues |
564
- | `pattern_found` | `pattern+location` | `{pattern, location, description}` | Any: code pattern identified |
565
- | `impl_result` | `issue_id` | `{issue_id, files_changed, tests_pass, commit}` | Executor: implementation outcome |
566
- | `test_failure` | `issue_id` | `{issue_id, test_file, error_msg}` | Executor: test failure details |
567
-
568
- ---
569
-
570
- ## Error Handling
571
-
572
- | Error | Resolution |
573
- |-------|------------|
574
- | Circular dependency | Detect in wave computation, abort with error message |
575
- | CSV agent timeout | Mark as failed in results, continue with wave |
576
- | CSV agent failed | Mark as failed, skip dependent EXEC tasks |
577
- | Planner fails to create solution | Mark PLAN task failed, skip corresponding EXEC task |
578
- | Executor fails implementation | Mark as failed, report in context.md |
579
- | All agents in wave failed | Log error, offer retry or abort |
580
- | CSV parse error | Validate CSV format before execution, show line number |
581
- | discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
582
- | No input provided | Ask user for input via AskUserQuestion |
583
- | Issue creation fails (text/plan input) | Report error, suggest manual issue creation |
584
- | Continue mode: no session found | List available sessions, prompt user to select |
585
-
586
- ---
587
-
588
- ## Core Rules
589
-
590
- 1. **Start Immediately**: First action is session initialization, then input parsing
591
- 2. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results are merged
592
- 3. **CSV is Source of Truth**: Master tasks.csv holds all state
593
- 4. **CSV First**: Default to csv-wave for all tasks; interactive only for edge cases
594
- 5. **Context Propagation**: prev_context built from master CSV, not from memory
595
- 6. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson
596
- 7. **Skip on Failure**: If PLAN-N failed, skip EXEC-N automatically
597
- 8. **Cleanup Temp Files**: Remove wave-{N}.csv after results are merged
598
- 9. **Two-Wave Pipeline**: Wave 1 = Planning (PLAN-*), Wave 2 = Execution (EXEC-*)
599
- 10. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
600
-
601
-
602
- ---
603
-
604
- ## Coordinator Role Constraints (Main Agent)
605
-
606
- **CRITICAL**: The coordinator (main agent executing this skill) is responsible for **orchestration only**, NOT implementation.
607
-
608
- 15. **Coordinator Does NOT Execute Code**: The main agent MUST NOT write, modify, or implement any code directly. All implementation work is delegated to spawned team agents. The coordinator only:
609
- - Spawns agents with task assignments
610
- - Waits for agent callbacks
611
- - Merges results and coordinates workflow
612
- - Manages workflow transitions between phases
613
-
614
- 16. **Patient Waiting is Mandatory**: Agent execution takes significant time (typically 10-30 minutes per phase, sometimes longer). The coordinator MUST:
615
- - Wait patiently for `wait()` calls to complete
616
- - NOT skip workflow steps due to perceived delays
617
- - NOT assume agents have failed just because they're taking time
618
- - Trust the timeout mechanisms defined in the skill
619
-
620
- 17. **Use send_input for Clarification**: When agents need guidance or appear stuck, the coordinator MUST:
621
- - Use `send_input()` to ask questions or provide clarification
622
- - NOT skip the agent or move to next phase prematurely
623
- - Give agents opportunity to respond before escalating
624
- - Example: `send_input({ id: agent_id, message: "Please provide status update or clarify blockers" })`
625
-
626
- 18. **No Workflow Shortcuts**: The coordinator MUST NOT:
627
- - Skip phases or stages defined in the workflow
628
- - Bypass required approval or review steps
629
- - Execute dependent tasks before prerequisites complete
630
- - Assume task completion without explicit agent callback
631
- - Make up or fabricate agent results
632
-
633
- 19. **Respect Long-Running Processes**: This is a complex multi-agent workflow that requires patience:
634
- - Total execution time may range from 30-90 minutes or longer
635
- - Each phase may take 10-30 minutes depending on complexity
636
- - The coordinator must remain active and attentive throughout the entire process
637
- - Do not terminate or skip steps due to time concerns
1
+ ---
2
+ name: team-planex-v2
3
+ description: Hybrid team skill for plan-and-execute pipeline. CSV wave primary for planning and execution. Planner decomposes requirements into issues and solutions, then executor implements each via CLI tools. Supports issue IDs, text input, and plan file input.
4
+ argument-hint: "[-y|--yes] [-c|--concurrency N] [--continue] [--exec=codex|gemini] \"issue IDs or --text 'description' or --plan path\""
5
+ allowed-tools: spawn_agents_on_csv, spawn_agent, wait, send_input, close_agent, Read, Write, Edit, Bash, Glob, Grep, request_user_input
6
+ ---
7
+
8
+ ## Auto Mode
9
+
10
+ When `--yes` or `-y`: Auto-confirm task decomposition, skip interactive validation, use defaults.
11
+
12
+ # Team PlanEx
13
+
14
+ ## Usage
15
+
16
+ ```bash
17
+ $team-planex-v2 "ISS-20260308-120000 ISS-20260308-120001"
18
+ $team-planex-v2 -c 3 "--text 'Add rate limiting to all API endpoints'"
19
+ $team-planex-v2 -y "--plan .workflow/specs/roadmap.md --exec=codex"
20
+ $team-planex-v2 --continue "planex-rate-limit-20260308"
21
+ ```
22
+
23
+ **Flags**:
24
+ - `-y, --yes`: Skip all confirmations (auto mode)
25
+ - `-c, --concurrency N`: Max concurrent agents within each wave (default: 3)
26
+ - `--continue`: Resume existing session
27
+ - `--exec=codex|gemini|qwen`: Force execution method for implementation
28
+
29
+ **Output Directory**: `.workflow/.csv-wave/{session-id}/`
30
+ **Core Output**: `tasks.csv` (master state) + `results.csv` (final) + `discoveries.ndjson` (shared exploration) + `context.md` (human-readable report)
31
+
32
+ ---
33
+
34
+ ## Overview
35
+
36
+ Plan-and-execute pipeline for issue-based development. Planner decomposes requirements into individual issues with solution plans, then executors implement each issue independently.
37
+
38
+ **Execution Model**: Hybrid -- CSV wave pipeline (primary) + individual agent spawn (secondary)
39
+
40
+ ```
41
+ +---------------------------------------------------------------------------+
42
+ | TEAM PLANEX WORKFLOW |
43
+ +---------------------------------------------------------------------------+
44
+ | |
45
+ | Phase 0: Pre-Wave Interactive (Input Analysis) |
46
+ | +-- Parse input type (issue IDs / --text / --plan) |
47
+ | +-- Determine execution method (codex/gemini/auto) |
48
+ | +-- Create issues from text/plan if needed |
49
+ | +-- Output: refined issue list for decomposition |
50
+ | |
51
+ | Phase 1: Requirement -> CSV + Classification |
52
+ | +-- Planning wave: generate solutions for each issue |
53
+ | +-- Execution wave: implement each issue independently |
54
+ | +-- Classify tasks: csv-wave (default) | interactive |
55
+ | +-- Compute dependency waves (topological sort) |
56
+ | +-- Generate tasks.csv with wave + exec_mode columns |
57
+ | +-- User validates task breakdown (skip if -y) |
58
+ | |
59
+ | Phase 2: Wave Execution Engine (Extended) |
60
+ | +-- For each wave (1..N): |
61
+ | | +-- Build wave CSV (filter csv-wave tasks for this wave) |
62
+ | | +-- Inject previous findings into prev_context column |
63
+ | | +-- spawn_agents_on_csv(wave CSV) |
64
+ | | +-- Merge all results into master tasks.csv |
65
+ | | +-- Check: any failed? -> skip dependents |
66
+ | +-- discoveries.ndjson shared across all modes (append-only) |
67
+ | |
68
+ | Phase 3: Results Aggregation |
69
+ | +-- Export final results.csv |
70
+ | +-- Generate context.md with all findings |
71
+ | +-- Display summary: completed/failed/skipped per wave |
72
+ | +-- Offer: view results | retry failed | done |
73
+ | |
74
+ +---------------------------------------------------------------------------+
75
+ ```
76
+
77
+ ---
78
+
79
+ ## Task Classification Rules
80
+
81
+ Each task is classified by `exec_mode`:
82
+
83
+ | exec_mode | Mechanism | Criteria |
84
+ |-----------|-----------|----------|
85
+ | `csv-wave` | `spawn_agents_on_csv` | One-shot, structured I/O, no multi-round interaction |
86
+ | `interactive` | `spawn_agent`/`wait`/`send_input`/`close_agent` | Multi-round, clarification needed |
87
+
88
+ **Classification Decision**:
89
+
90
+ | Task Property | Classification |
91
+ |---------------|---------------|
92
+ | Solution planning per issue (PLAN-*) | `csv-wave` |
93
+ | Code implementation per issue (EXEC-*) | `csv-wave` |
94
+ | Complex multi-issue coordination (rare) | `interactive` |
95
+
96
+ > In the standard PlanEx pipeline, all tasks default to `csv-wave`. Interactive mode is reserved for edge cases requiring multi-round coordination.
97
+
98
+ ---
99
+
100
+ ## CSV Schema
101
+
102
+ ### tasks.csv (Master State)
103
+
104
+ ```csv
105
+ id,title,description,role,issue_ids,input_type,raw_input,exec_mode,execution_method,deps,context_from,wave,status,findings,artifact_path,error
106
+ "PLAN-001","Plan issue-1","Generate solution for ISS-20260308-120000","planner","ISS-20260308-120000","issues","ISS-20260308-120000","csv-wave","","","","1","pending","","",""
107
+ "PLAN-002","Plan issue-2","Generate solution for ISS-20260308-120001","planner","ISS-20260308-120001","issues","ISS-20260308-120001","csv-wave","","","","1","pending","","",""
108
+ "EXEC-001","Implement issue-1","Implement solution for ISS-20260308-120000","executor","ISS-20260308-120000","","","csv-wave","gemini","PLAN-001","PLAN-001","2","pending","","",""
109
+ "EXEC-002","Implement issue-2","Implement solution for ISS-20260308-120001","executor","ISS-20260308-120001","","","csv-wave","gemini","PLAN-002","PLAN-002","2","pending","","",""
110
+ ```
111
+
112
+ **Columns**:
113
+
114
+ | Column | Phase | Description |
115
+ |--------|-------|-------------|
116
+ | `id` | Input | Unique task identifier (PLAN-NNN, EXEC-NNN) |
117
+ | `title` | Input | Short task title |
118
+ | `description` | Input | Detailed task description |
119
+ | `role` | Input | Worker role: planner or executor |
120
+ | `issue_ids` | Input | Semicolon-separated issue IDs this task covers |
121
+ | `input_type` | Input | Input type: issues, text, or plan (planner tasks only) |
122
+ | `raw_input` | Input | Raw input text (planner tasks only) |
123
+ | `exec_mode` | Input | `csv-wave` or `interactive` |
124
+ | `execution_method` | Input | codex, gemini, qwen, or empty (executor tasks only) |
125
+ | `deps` | Input | Semicolon-separated dependency task IDs |
126
+ | `context_from` | Input | Semicolon-separated task IDs whose findings this task needs |
127
+ | `wave` | Computed | Wave number (computed by topological sort, 1-based) |
128
+ | `status` | Output | `pending` -> `completed` / `failed` / `skipped` |
129
+ | `findings` | Output | Key discoveries or implementation notes (max 500 chars) |
130
+ | `artifact_path` | Output | Path to generated artifact (solution file, build result) |
131
+ | `error` | Output | Error message if failed (empty if success) |
132
+
133
+ ### Per-Wave CSV (Temporary)
134
+
135
+ Each wave generates a temporary `wave-{N}.csv` with extra `prev_context` column (csv-wave tasks only).
136
+
137
+ ---
138
+
139
+ ## Output Artifacts
140
+
141
+ | File | Purpose | Lifecycle |
142
+ |------|---------|-----------|
143
+ | `tasks.csv` | Master state -- all tasks with status/findings | Updated after each wave |
144
+ | `wave-{N}.csv` | Per-wave input (temporary, csv-wave tasks only) | Created before wave, deleted after |
145
+ | `results.csv` | Final export of all task results | Created in Phase 3 |
146
+ | `discoveries.ndjson` | Shared exploration board (all agents) | Append-only, carries across waves |
147
+ | `context.md` | Human-readable execution report | Created in Phase 3 |
148
+ | `artifacts/solutions/{issueId}.json` | Planner solution artifacts | Created by planner agents |
149
+ | `builds/{issueId}.json` | Executor build results | Created by executor agents |
150
+
151
+ ---
152
+
153
+ ## Session Structure
154
+
155
+ ```
156
+ .workflow/.csv-wave/{session-id}/
157
+ +-- tasks.csv # Master state (all tasks)
158
+ +-- results.csv # Final results export
159
+ +-- discoveries.ndjson # Shared discovery board
160
+ +-- context.md # Human-readable report
161
+ +-- wave-{N}.csv # Temporary per-wave input
162
+ +-- artifacts/
163
+ | +-- solutions/ # Planner output
164
+ | +-- {issueId}.json
165
+ +-- builds/ # Executor output
166
+ | +-- {issueId}.json
167
+ +-- wisdom/ # Cross-task knowledge
168
+ +-- learnings.md
169
+ +-- decisions.md
170
+ +-- conventions.md
171
+ +-- issues.md
172
+ ```
173
+
174
+ ---
175
+
176
+ ## Implementation
177
+
178
+ ### Session Initialization
179
+
180
+ ```javascript
181
+ const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
182
+
183
+ const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
184
+ const continueMode = $ARGUMENTS.includes('--continue')
185
+ const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
186
+ const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 3
187
+
188
+ const requirement = $ARGUMENTS
189
+ .replace(/--yes|-y|--continue|--concurrency\s+\d+|-c\s+\d+/g, '')
190
+ .trim()
191
+
192
+ // Parse execution method
193
+ let executionMethod = 'gemini' // default
194
+ const execMatch = requirement.match(/--exec=(\w+)/)
195
+ if (execMatch) executionMethod = execMatch[1]
196
+
197
+ // Detect input type
198
+ const issueIdPattern = /ISS-\d{8}-\d{6}/g
199
+ const textMatch = requirement.match(/--text\s+'([^']+)'/)
200
+ const planMatch = requirement.match(/--plan\s+(\S+)/)
201
+
202
+ let inputType = 'issues'
203
+ let rawInput = requirement
204
+ let issueIds = requirement.match(issueIdPattern) || []
205
+
206
+ if (textMatch) {
207
+ inputType = 'text'
208
+ rawInput = textMatch[1]
209
+ issueIds = [] // will be created by planner
210
+ } else if (planMatch) {
211
+ inputType = 'plan'
212
+ rawInput = planMatch[1]
213
+ issueIds = [] // will be parsed from plan file
214
+ }
215
+
216
+ // If no input detected, ask user
217
+ if (issueIds.length === 0 && inputType === 'issues') {
218
+ const answer = request_user_input({
219
+ questions: [{
220
+ question: "No input detected. Choose input method.",
221
+ header: "Input",
222
+ id: "input_method",
223
+ options: [
224
+ { label: "Enter IDs", description: "Provide issue IDs (e.g., ISS-20260308-120000)" },
225
+ { label: "Cancel", description: "Abort the pipeline" }
226
+ ]
227
+ }]
228
+ })
229
+ if (answer.answers.input_method.answers[0] === "Cancel") return
230
+ issueIds = answer.answers.input_method.answers[0].match(issueIdPattern) || []
231
+ if (issueIds.length === 0 && !answer.includes('--text') && !answer.includes('--plan')) {
232
+ inputType = 'text'
233
+ rawInput = answer
234
+ }
235
+ }
236
+
237
+ // Execution method selection (interactive if no flag)
238
+ if (!execMatch && !AUTO_YES) {
239
+ const methodChoice = request_user_input({
240
+ questions: [{
241
+ question: "Select execution method for implementation.",
242
+ header: "Exec Method",
243
+ id: "exec_method",
244
+ options: [
245
+ { label: "Gemini (Recommended)", description: "gemini-2.5-pro (best for <= 3 tasks)" },
246
+ { label: "Codex", description: "gpt-5.2 (best for > 3 tasks)" },
247
+ { label: "Auto", description: "Auto-select based on task count" }
248
+ ]
249
+ }]
250
+ })
251
+ const chosen = methodChoice.answers.exec_method.answers[0]
252
+ if (chosen === 'Codex') executionMethod = 'codex'
253
+ else if (chosen === 'Auto') executionMethod = 'auto'
254
+ }
255
+
256
+ const slug = (issueIds[0] || rawInput).toLowerCase()
257
+ .replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
258
+ .substring(0, 30)
259
+ const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
260
+ const sessionId = `planex-${slug}-${dateStr}`
261
+ const sessionFolder = `.workflow/.csv-wave/${sessionId}`
262
+
263
+ Bash(`mkdir -p ${sessionFolder}/{artifacts/solutions,builds,wisdom}`)
264
+
265
+ Write(`${sessionFolder}/discoveries.ndjson`, `# Discovery Board - ${sessionId}\n# Format: NDJSON\n`)
266
+
267
+ // Initialize wisdom files
268
+ Write(`${sessionFolder}/wisdom/learnings.md`, `# Learnings\n\nAccumulated during ${sessionId}\n`)
269
+ Write(`${sessionFolder}/wisdom/decisions.md`, `# Decisions\n\n`)
270
+ Write(`${sessionFolder}/wisdom/conventions.md`, `# Conventions\n\n`)
271
+ Write(`${sessionFolder}/wisdom/issues.md`, `# Issues\n\n`)
272
+
273
+ // Store session metadata
274
+ Write(`${sessionFolder}/session.json`, JSON.stringify({
275
+ session_id: sessionId,
276
+ pipeline_type: 'plan-execute',
277
+ input_type: inputType,
278
+ raw_input: rawInput,
279
+ issue_ids: issueIds,
280
+ execution_method: executionMethod,
281
+ created_at: getUtc8ISOString()
282
+ }, null, 2))
283
+ ```
284
+
285
+ ---
286
+
287
+ ### Phase 0: Pre-Wave Interactive (Input Analysis)
288
+
289
+ **Objective**: Parse and normalize input into a list of issue IDs ready for the planning wave.
290
+
291
+ **Input Type Handling**:
292
+
293
+ | Input Type | Processing |
294
+ |------------|-----------|
295
+ | `issues` (ISS-* IDs) | Use directly, verify exist via `ccw issue status` |
296
+ | `text` (--text flag) | Create issues via `ccw issue create --title ... --context ...` |
297
+ | `plan` (--plan flag) | Read plan file, parse phases/tasks, batch create issues |
298
+
299
+ For `text` input:
300
+ ```bash
301
+ # Create issue from text description
302
+ ccw issue create --title "<derived-title>" --context "<raw_input>"
303
+ # Parse output for new issue ID
304
+ ```
305
+
306
+ For `plan` input:
307
+ ```bash
308
+ # Read plan file
309
+ planContent = Read("<plan-path>")
310
+ # Parse phases/sections into individual issues
311
+ # Create each as a separate issue via ccw issue create
312
+ ```
313
+
314
+ After processing, update session.json with resolved issue_ids.
315
+
316
+ **Success Criteria**:
317
+ - All inputs resolved to valid issue IDs
318
+ - Session metadata updated with final issue list
319
+
320
+ ---
321
+
322
+ ### Phase 1: Requirement -> CSV + Classification
323
+
324
+ **Objective**: Generate tasks.csv with PLAN-* tasks (wave 1) and EXEC-* tasks (wave 2).
325
+
326
+ **Two-Wave Structure**:
327
+
328
+ Wave 1 (Planning): One PLAN-NNN task per issue, all independent (no deps), concurrent execution.
329
+ Wave 2 (Execution): One EXEC-NNN task per issue, each depends on its corresponding PLAN-NNN.
330
+
331
+ **Task Generation**:
332
+
333
+ ```javascript
334
+ const tasks = []
335
+
336
+ // Wave 1: Planning tasks (one per issue)
337
+ for (let i = 0; i < issueIds.length; i++) {
338
+ const n = String(i + 1).padStart(3, '0')
339
+ tasks.push({
340
+ id: `PLAN-${n}`,
341
+ title: `Plan ${issueIds[i]}`,
342
+ description: `Generate implementation solution for issue ${issueIds[i]}. Analyze requirements, design solution approach, break down into implementation tasks, identify files to modify/create.`,
343
+ role: 'planner',
344
+ issue_ids: issueIds[i],
345
+ input_type: inputType,
346
+ raw_input: inputType === 'issues' ? issueIds[i] : rawInput,
347
+ exec_mode: 'csv-wave',
348
+ execution_method: '',
349
+ deps: '',
350
+ context_from: '',
351
+ wave: '1',
352
+ status: 'pending',
353
+ findings: '', artifact_path: '', error: ''
354
+ })
355
+ }
356
+
357
+ // Wave 2: Execution tasks (one per issue, depends on corresponding PLAN)
358
+ for (let i = 0; i < issueIds.length; i++) {
359
+ const n = String(i + 1).padStart(3, '0')
360
+ // Resolve execution method
361
+ let method = executionMethod
362
+ if (method === 'auto') {
363
+ method = issueIds.length <= 3 ? 'gemini' : 'codex'
364
+ }
365
+ tasks.push({
366
+ id: `EXEC-${n}`,
367
+ title: `Implement ${issueIds[i]}`,
368
+ description: `Implement solution for issue ${issueIds[i]}. Load solution artifact, execute implementation via CLI, run tests, commit.`,
369
+ role: 'executor',
370
+ issue_ids: issueIds[i],
371
+ input_type: '',
372
+ raw_input: '',
373
+ exec_mode: 'csv-wave',
374
+ execution_method: method,
375
+ deps: `PLAN-${n}`,
376
+ context_from: `PLAN-${n}`,
377
+ wave: '2',
378
+ status: 'pending',
379
+ findings: '', artifact_path: '', error: ''
380
+ })
381
+ }
382
+
383
+ Write(`${sessionFolder}/tasks.csv`, toCsv(tasks))
384
+ ```
385
+
386
+ **User Validation**: Display task breakdown with wave assignment (skip if AUTO_YES).
387
+
388
+ **Success Criteria**:
389
+ - tasks.csv created with valid schema and wave assignments
390
+ - PLAN-* tasks in wave 1, EXEC-* tasks in wave 2
391
+ - Each EXEC-* depends on its corresponding PLAN-*
392
+ - No circular dependencies
393
+ - User approved (or AUTO_YES)
394
+
395
+ ---
396
+
397
+ ### Phase 2: Wave Execution Engine (Extended)
398
+
399
+ **Objective**: Execute tasks wave-by-wave with context propagation between planning and execution waves.
400
+
401
+ ```javascript
402
+ const masterCsv = Read(`${sessionFolder}/tasks.csv`)
403
+ let tasks = parseCsv(masterCsv)
404
+ const maxWave = Math.max(...tasks.map(t => parseInt(t.wave)))
405
+
406
+ for (let wave = 1; wave <= maxWave; wave++) {
407
+ console.log(`\nWave ${wave}/${maxWave} (${wave === 1 ? 'Planning' : 'Execution'})`)
408
+
409
+ // 1. Filter tasks for this wave
410
+ const waveTasks = tasks.filter(t => parseInt(t.wave) === wave && t.status === 'pending')
411
+
412
+ // 2. Check dependencies - skip if upstream failed
413
+ for (const task of waveTasks) {
414
+ const depIds = (task.deps || '').split(';').filter(Boolean)
415
+ const depStatuses = depIds.map(id => tasks.find(t => t.id === id)?.status)
416
+ if (depStatuses.some(s => s === 'failed' || s === 'skipped')) {
417
+ task.status = 'skipped'
418
+ task.error = `Dependency failed: ${depIds.filter((id, i) =>
419
+ ['failed','skipped'].includes(depStatuses[i])).join(', ')}`
420
+ }
421
+ }
422
+
423
+ const pendingTasks = waveTasks.filter(t => t.status === 'pending')
424
+ if (pendingTasks.length === 0) {
425
+ console.log(`Wave ${wave}: No pending tasks, skipping...`)
426
+ continue
427
+ }
428
+
429
+ // 3. Build prev_context from completed upstream tasks
430
+ for (const task of pendingTasks) {
431
+ const contextIds = (task.context_from || '').split(';').filter(Boolean)
432
+ const prevFindings = contextIds.map(id => {
433
+ const src = tasks.find(t => t.id === id)
434
+ if (!src?.findings) return ''
435
+ return `## [${src.id}] ${src.title}\n${src.findings}\nArtifact: ${src.artifact_path || 'N/A'}`
436
+ }).filter(Boolean).join('\n\n')
437
+ task.prev_context = prevFindings
438
+ }
439
+
440
+ // 4. Write wave CSV
441
+ Write(`${sessionFolder}/wave-${wave}.csv`, toCsv(pendingTasks))
442
+
443
+ // 5. Execute wave
444
+ spawn_agents_on_csv({
445
+ csv_path: `${sessionFolder}/wave-${wave}.csv`,
446
+ id_column: "id",
447
+ instruction: Read("~ or <project>/.codex/skills/team-planex/instructions/agent-instruction.md"),
448
+ max_concurrency: maxConcurrency,
449
+ max_runtime_seconds: 1200,
450
+ output_csv_path: `${sessionFolder}/wave-${wave}-results.csv`,
451
+ output_schema: {
452
+ type: "object",
453
+ properties: {
454
+ id: { type: "string" },
455
+ status: { type: "string", enum: ["completed", "failed"] },
456
+ findings: { type: "string" },
457
+ artifact_path: { type: "string" },
458
+ error: { type: "string" }
459
+ }
460
+ }
461
+ })
462
+
463
+ // 6. Merge results into master CSV
464
+ const results = parseCsv(Read(`${sessionFolder}/wave-${wave}-results.csv`))
465
+ for (const r of results) {
466
+ const t = tasks.find(t => t.id === r.id)
467
+ if (t) Object.assign(t, r)
468
+ }
469
+ Write(`${sessionFolder}/tasks.csv`, toCsv(tasks))
470
+
471
+ // 7. Cleanup temp files
472
+ Bash(`rm -f ${sessionFolder}/wave-${wave}.csv ${sessionFolder}/wave-${wave}-results.csv`)
473
+
474
+ // 8. Display wave summary
475
+ const completed = results.filter(r => r.status === 'completed').length
476
+ const failed = results.filter(r => r.status === 'failed').length
477
+ console.log(`Wave ${wave} Complete: ${completed} completed, ${failed} failed`)
478
+ }
479
+ ```
480
+
481
+ **Success Criteria**:
482
+ - All waves executed in order
483
+ - Each wave's results merged into master CSV before next wave starts
484
+ - Dependent tasks skipped when predecessor failed
485
+ - discoveries.ndjson accumulated across all waves
486
+ - Planning wave completes before execution wave starts
487
+
488
+ ---
489
+
490
+ ### Phase 3: Results Aggregation
491
+
492
+ **Objective**: Generate final results and human-readable report.
493
+
494
+ ```javascript
495
+ const tasks = parseCsv(Read(`${sessionFolder}/tasks.csv`))
496
+ const completed = tasks.filter(t => t.status === 'completed')
497
+ const failed = tasks.filter(t => t.status === 'failed')
498
+ const skipped = tasks.filter(t => t.status === 'skipped')
499
+
500
+ const planTasks = tasks.filter(t => t.role === 'planner')
501
+ const execTasks = tasks.filter(t => t.role === 'executor')
502
+
503
+ // Export results.csv
504
+ Bash(`cp ${sessionFolder}/tasks.csv ${sessionFolder}/results.csv`)
505
+
506
+ // Generate context.md
507
+ let contextMd = `# PlanEx Pipeline Report\n\n`
508
+ contextMd += `**Session**: ${sessionId}\n`
509
+ contextMd += `**Input Type**: ${inputType}\n`
510
+ contextMd += `**Execution Method**: ${executionMethod}\n`
511
+ contextMd += `**Issues**: ${issueIds.join(', ')}\n\n`
512
+
513
+ contextMd += `## Summary\n\n`
514
+ contextMd += `| Status | Count |\n|--------|-------|\n`
515
+ contextMd += `| Completed | ${completed.length} |\n`
516
+ contextMd += `| Failed | ${failed.length} |\n`
517
+ contextMd += `| Skipped | ${skipped.length} |\n\n`
518
+
519
+ contextMd += `## Planning Wave\n\n`
520
+ for (const t of planTasks) {
521
+ const icon = t.status === 'completed' ? '[OK]' : t.status === 'failed' ? '[FAIL]' : '[SKIP]'
522
+ contextMd += `${icon} **${t.id}**: ${t.title}\n`
523
+ if (t.findings) contextMd += ` ${t.findings.substring(0, 200)}\n`
524
+ if (t.artifact_path) contextMd += ` Solution: ${t.artifact_path}\n`
525
+ contextMd += `\n`
526
+ }
527
+
528
+ contextMd += `## Execution Wave\n\n`
529
+ for (const t of execTasks) {
530
+ const icon = t.status === 'completed' ? '[OK]' : t.status === 'failed' ? '[FAIL]' : '[SKIP]'
531
+ contextMd += `${icon} **${t.id}**: ${t.title}\n`
532
+ if (t.findings) contextMd += ` ${t.findings.substring(0, 200)}\n`
533
+ if (t.error) contextMd += ` Error: ${t.error}\n`
534
+ contextMd += `\n`
535
+ }
536
+
537
+ contextMd += `## Deliverables\n\n`
538
+ contextMd += `| Artifact | Path |\n|----------|------|\n`
539
+ contextMd += `| Solution Plans | ${sessionFolder}/artifacts/solutions/ |\n`
540
+ contextMd += `| Build Results | ${sessionFolder}/builds/ |\n`
541
+ contextMd += `| Discovery Board | ${sessionFolder}/discoveries.ndjson |\n`
542
+
543
+ Write(`${sessionFolder}/context.md`, contextMd)
544
+
545
+ // Display summary
546
+ console.log(`
547
+ PlanEx Pipeline Complete
548
+ Input: ${inputType} (${issueIds.length} issues)
549
+ Planning: ${planTasks.filter(t => t.status === 'completed').length}/${planTasks.length} completed
550
+ Execution: ${execTasks.filter(t => t.status === 'completed').length}/${execTasks.length} completed
551
+ Failed: ${failed.length} | Skipped: ${skipped.length}
552
+ Output: ${sessionFolder}
553
+ `)
554
+ ```
555
+
556
+ **Success Criteria**:
557
+ - results.csv exported (all tasks)
558
+ - context.md generated
559
+ - Summary displayed to user
560
+
561
+ ---
562
+
563
+ ## Shared Discovery Board Protocol
564
+
565
+ Both planner and executor agents share the same discoveries.ndjson file:
566
+
567
+ ```jsonl
568
+ {"ts":"2026-03-08T10:00:00Z","worker":"PLAN-001","type":"solution_designed","data":{"issue_id":"ISS-20260308-120000","approach":"refactor","task_count":4,"estimated_files":6}}
569
+ {"ts":"2026-03-08T10:05:00Z","worker":"PLAN-002","type":"conflict_warning","data":{"issue_ids":["ISS-20260308-120000","ISS-20260308-120001"],"overlapping_files":["src/auth/handler.ts"]}}
570
+ {"ts":"2026-03-08T10:10:00Z","worker":"EXEC-001","type":"impl_result","data":{"issue_id":"ISS-20260308-120000","files_changed":3,"tests_pass":true,"commit":"abc123"}}
571
+ ```
572
+
573
+ **Discovery Types**:
574
+
575
+ | Type | Dedup Key | Data Schema | Description |
576
+ |------|-----------|-------------|-------------|
577
+ | `solution_designed` | `issue_id` | `{issue_id, approach, task_count, estimated_files}` | Planner: solution plan completed |
578
+ | `conflict_warning` | `issue_ids` | `{issue_ids, overlapping_files}` | Planner: file overlap detected between issues |
579
+ | `pattern_found` | `pattern+location` | `{pattern, location, description}` | Any: code pattern identified |
580
+ | `impl_result` | `issue_id` | `{issue_id, files_changed, tests_pass, commit}` | Executor: implementation outcome |
581
+ | `test_failure` | `issue_id` | `{issue_id, test_file, error_msg}` | Executor: test failure details |
582
+
583
+ ---
584
+
585
+ ## Error Handling
586
+
587
+ | Error | Resolution |
588
+ |-------|------------|
589
+ | Circular dependency | Detect in wave computation, abort with error message |
590
+ | CSV agent timeout | Mark as failed in results, continue with wave |
591
+ | CSV agent failed | Mark as failed, skip dependent EXEC tasks |
592
+ | Planner fails to create solution | Mark PLAN task failed, skip corresponding EXEC task |
593
+ | Executor fails implementation | Mark as failed, report in context.md |
594
+ | All agents in wave failed | Log error, offer retry or abort |
595
+ | CSV parse error | Validate CSV format before execution, show line number |
596
+ | discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
597
+ | No input provided | Ask user for input via request_user_input |
598
+ | Issue creation fails (text/plan input) | Report error, suggest manual issue creation |
599
+ | Continue mode: no session found | List available sessions, prompt user to select |
600
+
601
+ ---
602
+
603
+ ## Core Rules
604
+
605
+ 1. **Start Immediately**: First action is session initialization, then input parsing
606
+ 2. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results are merged
607
+ 3. **CSV is Source of Truth**: Master tasks.csv holds all state
608
+ 4. **CSV First**: Default to csv-wave for all tasks; interactive only for edge cases
609
+ 5. **Context Propagation**: prev_context built from master CSV, not from memory
610
+ 6. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson
611
+ 7. **Skip on Failure**: If PLAN-N failed, skip EXEC-N automatically
612
+ 8. **Cleanup Temp Files**: Remove wave-{N}.csv after results are merged
613
+ 9. **Two-Wave Pipeline**: Wave 1 = Planning (PLAN-*), Wave 2 = Execution (EXEC-*)
614
+ 10. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
615
+
616
+
617
+ ---
618
+
619
+ ## Coordinator Role Constraints (Main Agent)
620
+
621
+ **CRITICAL**: The coordinator (main agent executing this skill) is responsible for **orchestration only**, NOT implementation.
622
+
623
+ 15. **Coordinator Does NOT Execute Code**: The main agent MUST NOT write, modify, or implement any code directly. All implementation work is delegated to spawned team agents. The coordinator only:
624
+ - Spawns agents with task assignments
625
+ - Waits for agent callbacks
626
+ - Merges results and coordinates workflow
627
+ - Manages workflow transitions between phases
628
+
629
+ 16. **Patient Waiting is Mandatory**: Agent execution takes significant time (typically 10-30 minutes per phase, sometimes longer). The coordinator MUST:
630
+ - Wait patiently for `wait()` calls to complete
631
+ - NOT skip workflow steps due to perceived delays
632
+ - NOT assume agents have failed just because they're taking time
633
+ - Trust the timeout mechanisms defined in the skill
634
+
635
+ 17. **Use send_input for Clarification**: When agents need guidance or appear stuck, the coordinator MUST:
636
+ - Use `send_input()` to ask questions or provide clarification
637
+ - NOT skip the agent or move to next phase prematurely
638
+ - Give agents opportunity to respond before escalating
639
+ - Example: `send_input({ id: agent_id, message: "Please provide status update or clarify blockers" })`
640
+
641
+ 18. **No Workflow Shortcuts**: The coordinator MUST NOT:
642
+ - Skip phases or stages defined in the workflow
643
+ - Bypass required approval or review steps
644
+ - Execute dependent tasks before prerequisites complete
645
+ - Assume task completion without explicit agent callback
646
+ - Make up or fabricate agent results
647
+
648
+ 19. **Respect Long-Running Processes**: This is a complex multi-agent workflow that requires patience:
649
+ - Total execution time may range from 30-90 minutes or longer
650
+ - Each phase may take 10-30 minutes depending on complexity
651
+ - The coordinator must remain active and attentive throughout the entire process
652
+ - Do not terminate or skip steps due to time concerns