claude-code-workflow 7.2.14 → 7.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/.claude/commands/workflow/analyze-with-file.md +7 -0
  2. package/.codex/skills/analyze-with-file/SKILL.md +1181 -1182
  3. package/.codex/skills/brainstorm/SKILL.md +723 -725
  4. package/.codex/skills/brainstorm-with-file/SKILL.md +10 -5
  5. package/.codex/skills/clean/SKILL.md +33 -26
  6. package/.codex/skills/collaborative-plan-with-file/SKILL.md +830 -831
  7. package/.codex/skills/csv-wave-pipeline/SKILL.md +906 -906
  8. package/.codex/skills/issue-discover/SKILL.md +57 -50
  9. package/.codex/skills/issue-discover/phases/01-issue-new.md +18 -11
  10. package/.codex/skills/issue-discover/phases/02-discover.md +31 -26
  11. package/.codex/skills/issue-discover/phases/03-discover-by-prompt.md +13 -11
  12. package/.codex/skills/issue-discover/phases/04-quick-execute.md +32 -27
  13. package/.codex/skills/parallel-dev-cycle/SKILL.md +402 -402
  14. package/.codex/skills/project-documentation-workflow/SKILL.md +13 -3
  15. package/.codex/skills/roadmap-with-file/SKILL.md +901 -897
  16. package/.codex/skills/session-sync/SKILL.md +222 -212
  17. package/.codex/skills/spec-add/SKILL.md +620 -613
  18. package/.codex/skills/spec-generator/SKILL.md +2 -2
  19. package/.codex/skills/spec-generator/phases/01-5-requirement-clarification.md +10 -10
  20. package/.codex/skills/spec-generator/phases/01-discovery.md +11 -18
  21. package/.codex/skills/spec-generator/phases/02-product-brief.md +5 -5
  22. package/.codex/skills/spec-generator/phases/03-requirements.md +7 -7
  23. package/.codex/skills/spec-generator/phases/04-architecture.md +4 -4
  24. package/.codex/skills/spec-generator/phases/05-epics-stories.md +5 -6
  25. package/.codex/skills/spec-generator/phases/06-readiness-check.md +10 -17
  26. package/.codex/skills/spec-generator/phases/07-issue-export.md +326 -329
  27. package/.codex/skills/spec-setup/SKILL.md +669 -657
  28. package/.codex/skills/team-arch-opt/SKILL.md +50 -50
  29. package/.codex/skills/team-arch-opt/agents/completion-handler.md +3 -3
  30. package/.codex/skills/team-brainstorm/SKILL.md +724 -725
  31. package/.codex/skills/team-coordinate/SKILL.md +51 -51
  32. package/.codex/skills/team-coordinate/agents/completion-handler.md +3 -3
  33. package/.codex/skills/team-coordinate/agents/plan-reviewer.md +4 -4
  34. package/.codex/skills/team-designer/SKILL.md +691 -691
  35. package/.codex/skills/team-designer/agents/requirement-clarifier.md +11 -12
  36. package/.codex/skills/team-executor/SKILL.md +45 -45
  37. package/.codex/skills/team-frontend/SKILL.md +45 -45
  38. package/.codex/skills/team-frontend/agents/completion-handler.md +3 -3
  39. package/.codex/skills/team-frontend/agents/qa-gate-reviewer.md +4 -4
  40. package/.codex/skills/team-frontend-debug/SKILL.md +50 -50
  41. package/.codex/skills/team-frontend-debug/agents/completion-handler.md +3 -3
  42. package/.codex/skills/team-frontend-debug/agents/conditional-skip-gate.md +4 -4
  43. package/.codex/skills/team-issue/SKILL.md +751 -740
  44. package/.codex/skills/team-iterdev/SKILL.md +825 -826
  45. package/.codex/skills/team-lifecycle-v4/SKILL.md +775 -775
  46. package/.codex/skills/team-lifecycle-v4/agents/quality-gate.md +165 -165
  47. package/.codex/skills/team-lifecycle-v4/agents/requirement-clarifier.md +163 -163
  48. package/.codex/skills/team-perf-opt/SKILL.md +50 -50
  49. package/.codex/skills/team-perf-opt/agents/completion-handler.md +3 -3
  50. package/.codex/skills/team-planex-v2/SKILL.md +652 -637
  51. package/.codex/skills/team-quality-assurance/SKILL.md +51 -52
  52. package/.codex/skills/team-review/SKILL.md +40 -40
  53. package/.codex/skills/team-roadmap-dev/SKILL.md +51 -51
  54. package/.codex/skills/team-roadmap-dev/agents/roadmap-discusser.md +8 -8
  55. package/.codex/skills/team-tech-debt/SKILL.md +50 -50
  56. package/.codex/skills/team-tech-debt/agents/plan-approver.md +5 -5
  57. package/.codex/skills/team-testing/SKILL.md +51 -52
  58. package/.codex/skills/team-uidesign/SKILL.md +40 -40
  59. package/.codex/skills/team-uidesign/agents/completion-handler.md +177 -177
  60. package/.codex/skills/team-ultra-analyze/SKILL.md +786 -787
  61. package/.codex/skills/team-ultra-analyze/agents/discussion-feedback.md +8 -8
  62. package/.codex/skills/team-ux-improve/SKILL.md +51 -52
  63. package/.codex/skills/team-ux-improve/agents/ux-designer.md +2 -2
  64. package/.codex/skills/team-ux-improve/agents/ux-explorer.md +1 -1
  65. package/.codex/skills/unified-execute-with-file/SKILL.md +797 -796
  66. package/.codex/skills/workflow-execute/SKILL.md +1117 -1118
  67. package/.codex/skills/workflow-lite-planex/SKILL.md +1144 -1141
  68. package/.codex/skills/workflow-plan/SKILL.md +631 -636
  69. package/.codex/skills/workflow-tdd-plan/SKILL.md +753 -759
  70. package/.codex/skills/workflow-test-fix-cycle/SKILL.md +402 -392
  71. package/README.md +25 -0
  72. package/ccw/dist/commands/install.d.ts.map +1 -1
  73. package/ccw/dist/commands/install.js +12 -0
  74. package/ccw/dist/commands/install.js.map +1 -1
  75. package/package.json +1 -1
@@ -1,787 +1,786 @@
1
- ---
2
- name: team-ultra-analyze
3
- description: Deep collaborative analysis pipeline. Multi-perspective exploration, deep analysis, user-driven discussion loops, and cross-perspective synthesis. Supports Quick, Standard, and Deep pipeline modes.
4
- argument-hint: "[-y|--yes] [-c|--concurrency N] [--continue] [--mode quick|standard|deep] \"analysis topic\""
5
- allowed-tools: spawn_agents_on_csv, spawn_agent, wait, send_input, close_agent, Read, Write, Edit, Bash, Glob, Grep, AskUserQuestion
6
- ---
7
-
8
- ## Auto Mode
9
-
10
- When `--yes` or `-y`: Auto-confirm task decomposition, skip interactive validation, use defaults.
11
-
12
- # Team Ultra Analyze
13
-
14
- ## Usage
15
-
16
- ```bash
17
- $team-ultra-analyze "Analyze authentication module architecture and security"
18
- $team-ultra-analyze -c 4 --mode deep "Deep analysis of payment processing pipeline"
19
- $team-ultra-analyze -y --mode quick "Quick overview of API endpoint structure"
20
- $team-ultra-analyze --continue "uan-auth-analysis-20260308"
21
- ```
22
-
23
- **Flags**:
24
- - `-y, --yes`: Skip all confirmations (auto mode)
25
- - `-c, --concurrency N`: Max concurrent agents within each wave (default: 3)
26
- - `--mode`: Pipeline mode override (quick|standard|deep)
27
- - `--continue`: Resume existing session
28
-
29
- **Output Directory**: `.workflow/.csv-wave/{session-id}/`
30
- **Core Output**: `tasks.csv` (master state) + `results.csv` (final) + `discoveries.ndjson` (shared exploration) + `context.md` (human-readable report)
31
-
32
- ---
33
-
34
- ## Overview
35
-
36
- Deep collaborative analysis with multi-perspective exploration, deep analysis, user-driven discussion loops, and cross-perspective synthesis. Each perspective gets its own explorer and analyst, working in parallel. Discussion rounds allow the user to steer analysis depth and direction.
37
-
38
- **Execution Model**: Hybrid — CSV wave pipeline (primary) + individual agent spawn (secondary for discussion feedback loop)
39
-
40
- ```
41
- ┌─────────────────────────────────────────────────────────────────────────┐
42
- │ TEAM ULTRA ANALYZE WORKFLOW │
43
- ├─────────────────────────────────────────────────────────────────────────┤
44
- │ │
45
- │ Phase 0: Pre-Wave Interactive │
46
- │ ├─ Topic parsing + dimension detection │
47
- │ ├─ Pipeline mode selection (quick/standard/deep) │
48
- │ ├─ Perspective assignment │
49
- │ └─ Output: refined requirements for decomposition │
50
- │ │
51
- │ Phase 1: Requirement → CSV + Classification │
52
- │ ├─ Parse topic into exploration + analysis + discussion + synthesis │
53
- │ ├─ Assign roles: explorer, analyst, discussant, synthesizer │
54
- │ ├─ Classify tasks: csv-wave | interactive (exec_mode) │
55
- │ ├─ Compute dependency waves (topological sort → depth grouping) │
56
- │ ├─ Generate tasks.csv with wave + exec_mode columns │
57
- │ └─ User validates task breakdown (skip if -y) │
58
- │ │
59
- │ Phase 2: Wave Execution Engine (Extended) │
60
- │ ├─ For each wave (1..N): │
61
- │ │ ├─ Build wave CSV (filter csv-wave tasks for this wave) │
62
- │ │ ├─ Inject previous findings into prev_context column │
63
- │ │ ├─ spawn_agents_on_csv(wave CSV) │
64
- │ │ ├─ Execute post-wave interactive tasks (if any) │
65
- │ │ ├─ Merge all results into master tasks.csv │
66
- │ │ └─ Check: any failed? → skip dependents │
67
- │ └─ discoveries.ndjson shared across all modes (append-only) │
68
- │ │
69
- │ Phase 3: Post-Wave Interactive (Discussion Loop) │
70
- │ ├─ After discussant completes: user feedback gate │
71
- │ ├─ User chooses: continue deeper | adjust direction | done │
72
- │ ├─ Creates dynamic tasks (DISCUSS-N, ANALYZE-fix-N) as needed │
73
- │ └─ Max discussion rounds: quick=0, standard=1, deep=5 │
74
- │ │
75
- │ Phase 4: Results Aggregation │
76
- │ ├─ Export final results.csv │
77
- │ ├─ Generate context.md with all findings │
78
- │ ├─ Display summary: completed/failed/skipped per wave │
79
- │ └─ Offer: view results | export | archive │
80
- │ │
81
- └─────────────────────────────────────────────────────────────────────────┘
82
- ```
83
-
84
- ---
85
-
86
- ## Task Classification Rules
87
-
88
- Each task is classified by `exec_mode`:
89
-
90
- | exec_mode | Mechanism | Criteria |
91
- |-----------|-----------|----------|
92
- | `csv-wave` | `spawn_agents_on_csv` | One-shot, structured I/O, no multi-round interaction |
93
- | `interactive` | `spawn_agent`/`wait`/`send_input`/`close_agent` | Multi-round, user feedback, direction control |
94
-
95
- **Classification Decision**:
96
-
97
- | Task Property | Classification |
98
- |---------------|---------------|
99
- | Codebase exploration (single perspective) | `csv-wave` |
100
- | Parallel exploration (multiple perspectives) | `csv-wave` (parallel in same wave) |
101
- | Deep analysis (single perspective) | `csv-wave` |
102
- | Parallel analysis (multiple perspectives) | `csv-wave` (parallel in same wave) |
103
- | Direction-fix analysis (adjusted focus) | `csv-wave` |
104
- | Discussion processing (aggregate results) | `csv-wave` |
105
- | Final synthesis (cross-perspective integration) | `csv-wave` |
106
- | Discussion feedback gate (user interaction) | `interactive` |
107
- | Topic clarification (Phase 0) | `interactive` |
108
-
109
- ---
110
-
111
- ## CSV Schema
112
-
113
- ### tasks.csv (Master State)
114
-
115
- ```csv
116
- id,title,description,role,perspective,dimensions,discussion_round,discussion_type,deps,context_from,exec_mode,wave,status,findings,error
117
- "EXPLORE-001","Explore from technical perspective","Search codebase from technical perspective. Collect files, patterns, findings.","explorer","technical","architecture;implementation","0","","","","csv-wave","1","pending","",""
118
- "ANALYZE-001","Deep analysis from technical perspective","Analyze exploration results from technical perspective. Generate insights with confidence levels.","analyst","technical","architecture;implementation","0","","EXPLORE-001","EXPLORE-001","csv-wave","2","pending","",""
119
- "DISCUSS-001","Initial discussion round","Aggregate all analysis results. Identify convergent themes, conflicts, top discussion points.","discussant","","","1","initial","ANALYZE-001;ANALYZE-002","ANALYZE-001;ANALYZE-002","csv-wave","3","pending","",""
120
- ```
121
-
122
- **Columns**:
123
-
124
- | Column | Phase | Description |
125
- |--------|-------|-------------|
126
- | `id` | Input | Unique task identifier (string) |
127
- | `title` | Input | Short task title |
128
- | `description` | Input | Detailed task description |
129
- | `role` | Input | Worker role: explorer, analyst, discussant, synthesizer |
130
- | `perspective` | Input | Analysis perspective: technical, architectural, business, domain_expert |
131
- | `dimensions` | Input | Analysis dimensions (semicolon-separated): architecture, implementation, performance, security, concept, comparison, decision |
132
- | `discussion_round` | Input | Discussion round number (0 = N/A, 1+ = round number) |
133
- | `discussion_type` | Input | Discussion type: initial, deepen, direction-adjusted, specific-questions |
134
- | `deps` | Input | Semicolon-separated dependency task IDs |
135
- | `context_from` | Input | Semicolon-separated task IDs whose findings this task needs |
136
- | `exec_mode` | Input | `csv-wave` or `interactive` |
137
- | `wave` | Computed | Wave number (computed by topological sort, 1-based) |
138
- | `status` | Output | `pending` → `completed` / `failed` / `skipped` |
139
- | `findings` | Output | Key discoveries or implementation notes (max 500 chars) |
140
- | `error` | Output | Error message if failed (empty if success) |
141
-
142
- ### Per-Wave CSV (Temporary)
143
-
144
- Each wave generates a temporary `wave-{N}.csv` with extra `prev_context` column (csv-wave tasks only).
145
-
146
- ---
147
-
148
- ## Agent Registry (Interactive Agents)
149
-
150
- | Agent | Role File | Pattern | Responsibility | Position |
151
- |-------|-----------|---------|----------------|----------|
152
- | discussion-feedback | agents/discussion-feedback.md | 2.3 (wait-respond) | Collect user feedback after discussion round, create dynamic tasks | post-wave (after discussant wave) |
153
- | topic-analyzer | agents/topic-analyzer.md | 2.3 (wait-respond) | Parse topic, detect dimensions, select pipeline mode and perspectives | standalone (Phase 0) |
154
-
155
- > **COMPACT PROTECTION**: Agent files are execution documents. When context compression occurs, **you MUST immediately `Read` the corresponding agent.md** to reload.
156
-
157
- ---
158
-
159
- ## Output Artifacts
160
-
161
- | File | Purpose | Lifecycle |
162
- |------|---------|-----------|
163
- | `tasks.csv` | Master state — all tasks with status/findings | Updated after each wave |
164
- | `wave-{N}.csv` | Per-wave input (temporary, csv-wave tasks only) | Created before wave, deleted after |
165
- | `results.csv` | Final export of all task results | Created in Phase 4 |
166
- | `discoveries.ndjson` | Shared exploration board (all agents, both modes) | Append-only, carries across waves |
167
- | `context.md` | Human-readable execution report | Created in Phase 4 |
168
- | `interactive/{id}-result.json` | Results from interactive tasks | Created per interactive task |
169
-
170
- ---
171
-
172
- ## Session Structure
173
-
174
- ```
175
- .workflow/.csv-wave/{session-id}/
176
- ├── tasks.csv # Master state (all tasks, both modes)
177
- ├── results.csv # Final results export
178
- ├── discoveries.ndjson # Shared discovery board (all agents)
179
- ├── context.md # Human-readable report
180
- ├── wave-{N}.csv # Temporary per-wave input (csv-wave only)
181
- └── interactive/ # Interactive task artifacts
182
- └── {id}-result.json # Per-task results
183
- ```
184
-
185
- ---
186
-
187
- ## Implementation
188
-
189
- ### Session Initialization
190
-
191
- ```javascript
192
- const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
193
-
194
- // Parse flags
195
- const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
196
- const continueMode = $ARGUMENTS.includes('--continue')
197
- const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
198
- const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 3
199
- const modeMatch = $ARGUMENTS.match(/--mode\s+(quick|standard|deep)/)
200
- const explicitMode = modeMatch ? modeMatch[1] : null
201
-
202
- // Clean requirement text (remove flags)
203
- const topic = $ARGUMENTS
204
- .replace(/--yes|-y|--continue|--concurrency\s+\d+|-c\s+\d+|--mode\s+\w+/g, '')
205
- .trim()
206
-
207
- const slug = topic.toLowerCase()
208
- .replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
209
- .substring(0, 40)
210
- const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
211
- let sessionId = `uan-${slug}-${dateStr}`
212
- let sessionFolder = `.workflow/.csv-wave/${sessionId}`
213
-
214
- // Continue mode: find existing session
215
- if (continueMode) {
216
- const existing = Bash(`ls -t .workflow/.csv-wave/uan-* 2>/dev/null | head -1`).trim()
217
- if (existing) {
218
- sessionId = existing.split('/').pop()
219
- sessionFolder = existing
220
- }
221
- }
222
-
223
- Bash(`mkdir -p ${sessionFolder}/interactive`)
224
- ```
225
-
226
- ---
227
-
228
- ### Phase 0: Pre-Wave Interactive
229
-
230
- **Objective**: Parse topic, detect analysis dimensions, select pipeline mode, and assign perspectives.
231
-
232
- **Execution**:
233
-
234
- ```javascript
235
- const analyzer = spawn_agent({
236
- message: `
237
- ## TASK ASSIGNMENT
238
-
239
- ### MANDATORY FIRST STEPS (Agent Execute)
240
- 1. **Read role definition**: ~ or <project>/.codex/skills/team-ultra-analyze/agents/topic-analyzer.md (MUST read first)
241
- 2. Read: .workflow/project-tech.json (if exists)
242
-
243
- ---
244
-
245
- Goal: Analyze topic and recommend pipeline configuration
246
- Topic: ${topic}
247
- Explicit Mode: ${explicitMode || 'auto-detect'}
248
-
249
- ### Task
250
- 1. Detect analysis dimensions from topic keywords:
251
- - architecture, implementation, performance, security, concept, comparison, decision
252
- 2. Select perspectives based on dimensions:
253
- - technical, architectural, business, domain_expert
254
- 3. Determine pipeline mode (if not explicitly set):
255
- - Complexity 1-3 → quick, 4-6 → standard, 7+ → deep
256
- 4. Return structured configuration
257
- `
258
- })
259
-
260
- const analyzerResult = wait({ ids: [analyzer], timeout_ms: 120000 })
261
-
262
- if (analyzerResult.timed_out) {
263
- send_input({ id: analyzer, message: "Please finalize and output current findings." })
264
- wait({ ids: [analyzer], timeout_ms: 60000 })
265
- }
266
-
267
- close_agent({ id: analyzer })
268
-
269
- // Parse result: pipeline_mode, perspectives[], dimensions[], depth
270
- Write(`${sessionFolder}/interactive/topic-analyzer-result.json`, JSON.stringify({
271
- task_id: "topic-analysis",
272
- status: "completed",
273
- pipeline_mode: parsedMode,
274
- perspectives: parsedPerspectives,
275
- dimensions: parsedDimensions,
276
- depth: parsedDepth,
277
- timestamp: getUtc8ISOString()
278
- }))
279
- ```
280
-
281
- If not AUTO_YES, present user with configuration for confirmation:
282
-
283
- ```javascript
284
- if (!AUTO_YES) {
285
- const answer = AskUserQuestion({
286
- questions: [{
287
- question: `Topic: "${topic}"\nPipeline: ${pipeline_mode}\nPerspectives: ${perspectives.join(', ')}\nDimensions: ${dimensions.join(', ')}\n\nApprove?`,
288
- header: "Analysis Configuration",
289
- multiSelect: false,
290
- options: [
291
- { label: "Approve", description: `Use ${pipeline_mode} mode with ${perspectives.length} perspectives` },
292
- { label: "Quick", description: "1 explorer 1 analyst synthesizer (fast)" },
293
- { label: "Standard", description: "N explorers N analysts discussion synthesizer" },
294
- { label: "Deep", description: "N explorers → N analysts → discussion loop (up to 5 rounds) → synthesizer" }
295
- ]
296
- }]
297
- })
298
- }
299
- ```
300
-
301
- **Success Criteria**:
302
- - Refined requirements available for Phase 1 decomposition
303
- - Interactive agents closed, results stored
304
-
305
- ---
306
-
307
- ### Phase 1: Requirement → CSV + Classification
308
-
309
- **Objective**: Build tasks.csv from selected pipeline mode and perspectives.
310
-
311
- **Decomposition Rules**:
312
-
313
- | Pipeline | Tasks | Wave Structure |
314
- |----------|-------|---------------|
315
- | quick | EXPLORE-001 → ANALYZE-001 → SYNTH-001 | 3 waves, serial, depth=1 |
316
- | standard | EXPLORE-001..N → ANALYZE-001..N → DISCUSS-001 → SYNTH-001 | 4 wave groups, parallel explore+analyze |
317
- | deep | EXPLORE-001..N → ANALYZE-001..N → DISCUSS-001 (→ dynamic tasks) → SYNTH-001 | 3+ waves, SYNTH created after discussion loop |
318
-
319
- Where N = number of selected perspectives.
320
-
321
- **Classification Rules**:
322
-
323
- All work tasks (exploration, analysis, discussion processing, synthesis) are `csv-wave`. The discussion feedback gate (user interaction after discussant completes) is `interactive`.
324
-
325
- **Pipeline Task Definitions**:
326
-
327
- #### Quick Pipeline (3 csv-wave tasks)
328
-
329
- | Task ID | Role | Wave | Deps | Perspective | Description |
330
- |---------|------|------|------|-------------|-------------|
331
- | EXPLORE-001 | explorer | 1 | (none) | general | Explore codebase structure for analysis topic |
332
- | ANALYZE-001 | analyst | 2 | EXPLORE-001 | technical | Deep analysis from technical perspective |
333
- | SYNTH-001 | synthesizer | 3 | ANALYZE-001 | (all) | Integrate analysis into final conclusions |
334
-
335
- #### Standard Pipeline (2N+2 tasks, parallel windows)
336
-
337
- | Task ID | Role | Wave | Deps | Perspective | Description |
338
- |---------|------|------|------|-------------|-------------|
339
- | EXPLORE-001..N | explorer | 1 | (none) | per-perspective | Parallel codebase exploration, one per perspective |
340
- | ANALYZE-001..N | analyst | 2 | EXPLORE-N | per-perspective | Parallel deep analysis, one per perspective |
341
- | DISCUSS-001 | discussant | 3 | all ANALYZE-* | (all) | Aggregate analyses, identify themes and conflicts |
342
- | FEEDBACK-001 | (interactive) | 4 | DISCUSS-001 | - | User feedback: done create SYNTH, continue → more discussion |
343
- | SYNTH-001 | synthesizer | 5 | FEEDBACK-001 | (all) | Cross-perspective integration and conclusions |
344
-
345
- #### Deep Pipeline (2N+1 initial tasks + dynamic)
346
-
347
- Same as Standard, but SYNTH-001 is omitted initially. Created dynamically after the discussion loop (up to 5 rounds) completes. Additional dynamic tasks:
348
- - `DISCUSS-N` — subsequent discussion round
349
- - `ANALYZE-fix-N` — supplementary analysis with adjusted focus
350
- - `SYNTH-001` — created after final discussion round
351
-
352
- **Wave Computation**: Kahn's BFS topological sort with depth tracking (csv-wave tasks only).
353
-
354
- **User Validation**: Display task breakdown with wave + exec_mode assignment (skip if AUTO_YES).
355
-
356
- **Success Criteria**:
357
- - tasks.csv created with valid schema, wave, and exec_mode assignments
358
- - No circular dependencies
359
- - User approved (or AUTO_YES)
360
-
361
- ---
362
-
363
- ### Phase 2: Wave Execution Engine (Extended)
364
-
365
- **Objective**: Execute tasks wave-by-wave with hybrid mechanism support and cross-wave context propagation.
366
-
367
- ```javascript
368
- const failedIds = new Set()
369
- const skippedIds = new Set()
370
- let discussionRound = 0
371
- const MAX_DISCUSSION_ROUNDS = pipeline_mode === 'deep' ? 5 : pipeline_mode === 'standard' ? 1 : 0
372
-
373
- for (let wave = 1; wave <= maxWave; wave++) {
374
- console.log(`\n## Wave ${wave}/${maxWave}\n`)
375
-
376
- // 1. Read current master CSV
377
- const masterCsv = parseCsv(Read(`${sessionFolder}/tasks.csv`))
378
-
379
- // 2. Separate csv-wave and interactive tasks for this wave
380
- const waveTasks = masterCsv.filter(row => parseInt(row.wave) === wave)
381
- const csvTasks = waveTasks.filter(t => t.exec_mode === 'csv-wave')
382
- const interactiveTasks = waveTasks.filter(t => t.exec_mode === 'interactive')
383
-
384
- // 3. Skip tasks whose deps failed
385
- const executableCsvTasks = []
386
- for (const task of csvTasks) {
387
- const deps = task.deps.split(';').filter(Boolean)
388
- if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
389
- skippedIds.add(task.id)
390
- updateMasterCsvRow(sessionFolder, task.id, {
391
- status: 'skipped', error: 'Dependency failed or skipped'
392
- })
393
- continue
394
- }
395
- executableCsvTasks.push(task)
396
- }
397
-
398
- // 4. Build prev_context for each csv-wave task
399
- for (const task of executableCsvTasks) {
400
- const contextIds = task.context_from.split(';').filter(Boolean)
401
- const prevFindings = contextIds
402
- .map(id => {
403
- const prevRow = masterCsv.find(r => r.id === id)
404
- if (prevRow && prevRow.status === 'completed' && prevRow.findings) {
405
- return `[Task ${id}: ${prevRow.title}] ${prevRow.findings}`
406
- }
407
- return null
408
- })
409
- .filter(Boolean)
410
- .join('\n')
411
- task.prev_context = prevFindings || 'No previous context available'
412
- }
413
-
414
- // 5. Write wave CSV and execute csv-wave tasks
415
- if (executableCsvTasks.length > 0) {
416
- const waveHeader = 'id,title,description,role,perspective,dimensions,discussion_round,discussion_type,deps,context_from,exec_mode,wave,prev_context'
417
- const waveRows = executableCsvTasks.map(t =>
418
- [t.id, t.title, t.description, t.role, t.perspective, t.dimensions,
419
- t.discussion_round, t.discussion_type, t.deps, t.context_from, t.exec_mode, t.wave, t.prev_context]
420
- .map(cell => `"${String(cell).replace(/"/g, '""')}"`)
421
- .join(',')
422
- )
423
- Write(`${sessionFolder}/wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
424
-
425
- const waveResult = spawn_agents_on_csv({
426
- csv_path: `${sessionFolder}/wave-${wave}.csv`,
427
- id_column: "id",
428
- instruction: buildAnalysisInstruction(sessionFolder, wave),
429
- max_concurrency: maxConcurrency,
430
- max_runtime_seconds: 600,
431
- output_csv_path: `${sessionFolder}/wave-${wave}-results.csv`,
432
- output_schema: {
433
- type: "object",
434
- properties: {
435
- id: { type: "string" },
436
- status: { type: "string", enum: ["completed", "failed"] },
437
- findings: { type: "string" },
438
- error: { type: "string" }
439
- },
440
- required: ["id", "status", "findings"]
441
- }
442
- })
443
-
444
- // Merge results into master CSV
445
- const waveResults = parseCsv(Read(`${sessionFolder}/wave-${wave}-results.csv`))
446
- for (const result of waveResults) {
447
- updateMasterCsvRow(sessionFolder, result.id, {
448
- status: result.status,
449
- findings: result.findings || '',
450
- error: result.error || ''
451
- })
452
- if (result.status === 'failed') failedIds.add(result.id)
453
- }
454
-
455
- Bash(`rm -f "${sessionFolder}/wave-${wave}.csv"`)
456
- }
457
-
458
- // 6. Execute post-wave interactive tasks (Discussion Feedback)
459
- for (const task of interactiveTasks) {
460
- if (task.status !== 'pending') continue
461
- const deps = task.deps.split(';').filter(Boolean)
462
- if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
463
- skippedIds.add(task.id)
464
- continue
465
- }
466
-
467
- discussionRound++
468
-
469
- // Discussion Feedback Gate
470
- if (pipeline_mode === 'quick' || discussionRound > MAX_DISCUSSION_ROUNDS) {
471
- // No discussion or max rounds reached — proceed to synthesis
472
- if (!masterCsv.find(t => t.id === 'SYNTH-001')) {
473
- // Create SYNTH-001 dynamically
474
- const lastDiscuss = masterCsv.filter(t => t.id.startsWith('DISCUSS'))
475
- .sort((a, b) => b.id.localeCompare(a.id))[0]
476
- addTaskToMasterCsv(sessionFolder, {
477
- id: 'SYNTH-001', title: 'Final synthesis',
478
- description: 'Integrate all analysis into final conclusions',
479
- role: 'synthesizer', perspective: '', dimensions: '',
480
- discussion_round: '0', discussion_type: '',
481
- deps: lastDiscuss ? lastDiscuss.id : '', context_from: 'all',
482
- exec_mode: 'csv-wave', wave: String(wave + 1),
483
- status: 'pending', findings: '', error: ''
484
- })
485
- maxWave = wave + 1
486
- }
487
- updateMasterCsvRow(sessionFolder, task.id, {
488
- status: 'completed',
489
- findings: `Discussion round ${discussionRound}: proceeding to synthesis`
490
- })
491
- continue
492
- }
493
-
494
- // Spawn discussion feedback agent
495
- const feedbackAgent = spawn_agent({
496
- message: `
497
- ## TASK ASSIGNMENT
498
-
499
- ### MANDATORY FIRST STEPS (Agent Execute)
500
- 1. **Read role definition**: ~ or <project>/.codex/skills/team-ultra-analyze/agents/discussion-feedback.md (MUST read first)
501
- 2. Read: ${sessionFolder}/discoveries.ndjson (shared discoveries)
502
-
503
- ---
504
-
505
- Goal: Collect user feedback on discussion round ${discussionRound}
506
- Session: ${sessionFolder}
507
- Discussion Round: ${discussionRound}/${MAX_DISCUSSION_ROUNDS}
508
- Pipeline Mode: ${pipeline_mode}
509
-
510
- ### Context
511
- The discussant has completed round ${discussionRound}. Present the user with discussion results and collect feedback on next direction.
512
- `
513
- })
514
-
515
- const feedbackResult = wait({ ids: [feedbackAgent], timeout_ms: 300000 })
516
- if (feedbackResult.timed_out) {
517
- send_input({ id: feedbackAgent, message: "Please finalize: user did not respond, default to 'Done'." })
518
- wait({ ids: [feedbackAgent], timeout_ms: 60000 })
519
- }
520
- close_agent({ id: feedbackAgent })
521
-
522
- // Parse feedback decision: "continue_deeper" | "adjust_direction" | "done"
523
- Write(`${sessionFolder}/interactive/${task.id}-result.json`, JSON.stringify({
524
- task_id: task.id, status: "completed",
525
- discussion_round: discussionRound,
526
- feedback: feedbackDecision,
527
- timestamp: getUtc8ISOString()
528
- }))
529
-
530
- // Handle feedback
531
- if (feedbackDecision === 'done') {
532
- // Create SYNTH-001 blocked by last DISCUSS task
533
- addTaskToMasterCsv(sessionFolder, {
534
- id: 'SYNTH-001', deps: task.id.replace('FEEDBACK', 'DISCUSS'),
535
- role: 'synthesizer', exec_mode: 'csv-wave', wave: String(wave + 1)
536
- })
537
- maxWave = wave + 1
538
- } else if (feedbackDecision === 'adjust_direction') {
539
- // Create ANALYZE-fix-N and DISCUSS-N+1
540
- const fixId = `ANALYZE-fix-${discussionRound}`
541
- const nextDiscussId = `DISCUSS-${String(discussionRound + 1).padStart(3, '0')}`
542
- addTaskToMasterCsv(sessionFolder, {
543
- id: fixId, role: 'analyst', exec_mode: 'csv-wave', wave: String(wave + 1)
544
- })
545
- addTaskToMasterCsv(sessionFolder, {
546
- id: nextDiscussId, role: 'discussant', deps: fixId,
547
- exec_mode: 'csv-wave', wave: String(wave + 2)
548
- })
549
- addTaskToMasterCsv(sessionFolder, {
550
- id: `FEEDBACK-${String(discussionRound + 1).padStart(3, '0')}`,
551
- exec_mode: 'interactive', deps: nextDiscussId, wave: String(wave + 3)
552
- })
553
- maxWave = wave + 3
554
- } else {
555
- // continue_deeper: Create DISCUSS-N+1
556
- const nextDiscussId = `DISCUSS-${String(discussionRound + 1).padStart(3, '0')}`
557
- addTaskToMasterCsv(sessionFolder, {
558
- id: nextDiscussId, role: 'discussant', exec_mode: 'csv-wave', wave: String(wave + 1)
559
- })
560
- addTaskToMasterCsv(sessionFolder, {
561
- id: `FEEDBACK-${String(discussionRound + 1).padStart(3, '0')}`,
562
- exec_mode: 'interactive', deps: nextDiscussId, wave: String(wave + 2)
563
- })
564
- maxWave = wave + 2
565
- }
566
-
567
- updateMasterCsvRow(sessionFolder, task.id, {
568
- status: 'completed',
569
- findings: `Discussion feedback: ${feedbackDecision}, round ${discussionRound}`
570
- })
571
- }
572
- }
573
- ```
574
-
575
- **Success Criteria**:
576
- - All waves executed in order
577
- - Both csv-wave and interactive tasks handled per wave
578
- - Each wave's results merged into master CSV before next wave starts
579
- - Dependent tasks skipped when predecessor failed
580
- - discoveries.ndjson accumulated across all waves and mechanisms
581
- - Discussion loop controlled with proper round tracking
582
- - Dynamic tasks created correctly based on user feedback
583
-
584
- ---
585
-
586
- ### Phase 3: Post-Wave Interactive
587
-
588
- **Objective**: Handle discussion loop completion and ensure synthesis is triggered.
589
-
590
- After all discussion rounds are exhausted or user chooses "done":
591
- 1. Ensure SYNTH-001 exists in master CSV
592
- 2. Ensure SYNTH-001 is unblocked (blocked by last completed discussion task)
593
- 3. Execute remaining waves (synthesis)
594
-
595
- **Success Criteria**:
596
- - Post-wave interactive processing complete
597
- - Interactive agents closed, results stored
598
-
599
- ---
600
-
601
- ### Phase 4: Results Aggregation
602
-
603
- **Objective**: Generate final results and human-readable report.
604
-
605
- ```javascript
606
- const masterCsv = Read(`${sessionFolder}/tasks.csv`)
607
- Write(`${sessionFolder}/results.csv`, masterCsv)
608
-
609
- const tasks = parseCsv(masterCsv)
610
- const completed = tasks.filter(t => t.status === 'completed')
611
- const failed = tasks.filter(t => t.status === 'failed')
612
- const skipped = tasks.filter(t => t.status === 'skipped')
613
-
614
- const contextContent = `# Ultra Analyze Report
615
-
616
- **Session**: ${sessionId}
617
- **Topic**: ${topic}
618
- **Pipeline**: ${pipeline_mode}
619
- **Perspectives**: ${perspectives.join(', ')}
620
- **Discussion Rounds**: ${discussionRound}
621
- **Completed**: ${getUtc8ISOString()}
622
-
623
- ---
624
-
625
- ## Summary
626
-
627
- | Metric | Count |
628
- |--------|-------|
629
- | Total Tasks | ${tasks.length} |
630
- | Completed | ${completed.length} |
631
- | Failed | ${failed.length} |
632
- | Skipped | ${skipped.length} |
633
- | Discussion Rounds | ${discussionRound} |
634
-
635
- ---
636
-
637
- ## Wave Execution
638
-
639
- ${waveDetails}
640
-
641
- ---
642
-
643
- ## Analysis Artifacts
644
-
645
- - Explorations: discoveries with type "exploration" in discoveries.ndjson
646
- - Analyses: discoveries with type "analysis" in discoveries.ndjson
647
- - Discussion: discoveries with type "discussion" in discoveries.ndjson
648
- - Conclusions: discoveries with type "conclusion" in discoveries.ndjson
649
-
650
- ---
651
-
652
- ## Conclusions
653
-
654
- ${synthesisFindings}
655
- `
656
-
657
- Write(`${sessionFolder}/context.md`, contextContent)
658
- ```
659
-
660
- If not AUTO_YES, offer completion options:
661
-
662
- ```javascript
663
- if (!AUTO_YES) {
664
- const answer = AskUserQuestion({
665
- questions: [{
666
- question: "Ultra-Analyze pipeline complete. What would you like to do?",
667
- header: "Completion",
668
- multiSelect: false,
669
- options: [
670
- { label: "Archive & Clean (Recommended)", description: "Archive session" },
671
- { label: "Keep Active", description: "Keep session for follow-up" },
672
- { label: "Export Results", description: "Export deliverables to specified location" }
673
- ]
674
- }]
675
- })
676
- }
677
- ```
678
-
679
- **Success Criteria**:
680
- - results.csv exported (all tasks, both modes)
681
- - context.md generated
682
- - All interactive agents closed
683
- - Summary displayed to user
684
-
685
- ---
686
-
687
- ## Shared Discovery Board Protocol
688
-
689
- All agents across all waves share `discoveries.ndjson`. This enables cross-role knowledge sharing.
690
-
691
- **Discovery Types**:
692
-
693
- | Type | Dedup Key | Data Schema | Description |
694
- |------|-----------|-------------|-------------|
695
- | `exploration` | `data.perspective+data.file` | `{perspective, file, relevance, summary, patterns[]}` | Explored file/module |
696
- | `analysis` | `data.perspective+data.insight` | `{perspective, insight, confidence, evidence, file_ref}` | Analysis insight |
697
- | `pattern` | `data.name` | `{name, file, description, type}` | Code/architecture pattern |
698
- | `discussion_point` | `data.topic` | `{topic, perspectives[], convergence, open_questions[]}` | Discussion point |
699
- | `recommendation` | `data.action` | `{action, rationale, priority, confidence}` | Recommendation |
700
- | `conclusion` | `data.point` | `{point, evidence, confidence, perspectives_supporting[]}` | Final conclusion |
701
-
702
- **Format**: NDJSON, each line is self-contained JSON:
703
-
704
- ```jsonl
705
- {"ts":"2026-03-08T10:00:00+08:00","worker":"EXPLORE-001","type":"exploration","data":{"perspective":"technical","file":"src/auth/index.ts","relevance":"high","summary":"Auth module entry point with OAuth and JWT exports","patterns":["module-pattern","strategy-pattern"]}}
706
- {"ts":"2026-03-08T10:05:00+08:00","worker":"ANALYZE-001","type":"analysis","data":{"perspective":"technical","insight":"Auth module uses strategy pattern for provider switching","confidence":"high","evidence":"src/auth/strategies/*.ts","file_ref":"src/auth/index.ts:15"}}
707
- {"ts":"2026-03-08T10:10:00+08:00","worker":"DISCUSS-001","type":"discussion_point","data":{"topic":"Authentication scalability","perspectives":["technical","architectural"],"convergence":"Both perspectives agree on stateless JWT approach","open_questions":["Token refresh strategy for long sessions"]}}
708
- ```
709
-
710
- **Protocol Rules**:
711
- 1. Read board before own explorationskip covered areas
712
- 2. Write discoveries immediately via `echo >>` don't batch
713
- 3. Deduplicatecheck existing entries by type + dedup key
714
- 4. Append-only — never modify or delete existing lines
715
-
716
- ---
717
-
718
- ## Error Handling
719
-
720
- | Error | Resolution |
721
- |-------|------------|
722
- | Circular dependency | Detect in wave computation, abort with error message |
723
- | CSV agent timeout | Mark as failed in results, continue with wave |
724
- | CSV agent failed | Mark as failed, skip dependent tasks in later waves |
725
- | Interactive agent timeout | Urge convergence via send_input, then close if still timed out |
726
- | Interactive agent failed | Mark as failed, skip dependents |
727
- | All agents in wave failed | Log error, offer retry or abort |
728
- | CSV parse error | Validate CSV format before execution, show line number |
729
- | discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
730
- | Discussion loop exceeds 5 rounds | Force synthesis, offer continuation |
731
- | Explorer finds nothing | Continue with limited context, note limitation |
732
- | CLI tool unavailable | Fallback chain: gemini codex direct analysis |
733
- | User timeout in discussion | Save state, default to "done", proceed to synthesis |
734
- | Continue mode: no session found | List available sessions, prompt user to select |
735
-
736
- ---
737
-
738
- ## Core Rules
739
-
740
- 1. **Start Immediately**: First action is session initialization, then Phase 0/1
741
- 2. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results are merged
742
- 3. **CSV is Source of Truth**: Master tasks.csv holds all state (both csv-wave and interactive)
743
- 4. **CSV First**: Default to csv-wave for tasks; only use interactive when user interaction is needed
744
- 5. **Context Propagation**: prev_context built from master CSV, not from memory
745
- 6. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson both mechanisms share it
746
- 7. **Skip on Failure**: If a dependency failed, skip the dependent task (regardless of mechanism)
747
- 8. **Lifecycle Balance**: Every spawn_agent MUST have a matching close_agent
748
- 9. **Cleanup Temp Files**: Remove wave-{N}.csv after results are merged
749
- 10. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
750
-
751
-
752
- ---
753
-
754
- ## Coordinator Role Constraints (Main Agent)
755
-
756
- **CRITICAL**: The coordinator (main agent executing this skill) is responsible for **orchestration only**, NOT implementation.
757
-
758
- 15. **Coordinator Does NOT Execute Code**: The main agent MUST NOT write, modify, or implement any code directly. All implementation work is delegated to spawned team agents. The coordinator only:
759
- - Spawns agents with task assignments
760
- - Waits for agent callbacks
761
- - Merges results and coordinates workflow
762
- - Manages workflow transitions between phases
763
-
764
- 16. **Patient Waiting is Mandatory**: Agent execution takes significant time (typically 10-30 minutes per phase, sometimes longer). The coordinator MUST:
765
- - Wait patiently for `wait()` calls to complete
766
- - NOT skip workflow steps due to perceived delays
767
- - NOT assume agents have failed just because they're taking time
768
- - Trust the timeout mechanisms defined in the skill
769
-
770
- 17. **Use send_input for Clarification**: When agents need guidance or appear stuck, the coordinator MUST:
771
- - Use `send_input()` to ask questions or provide clarification
772
- - NOT skip the agent or move to next phase prematurely
773
- - Give agents opportunity to respond before escalating
774
- - Example: `send_input({ id: agent_id, message: "Please provide status update or clarify blockers" })`
775
-
776
- 18. **No Workflow Shortcuts**: The coordinator MUST NOT:
777
- - Skip phases or stages defined in the workflow
778
- - Bypass required approval or review steps
779
- - Execute dependent tasks before prerequisites complete
780
- - Assume task completion without explicit agent callback
781
- - Make up or fabricate agent results
782
-
783
- 19. **Respect Long-Running Processes**: This is a complex multi-agent workflow that requires patience:
784
- - Total execution time may range from 30-90 minutes or longer
785
- - Each phase may take 10-30 minutes depending on complexity
786
- - The coordinator must remain active and attentive throughout the entire process
787
- - Do not terminate or skip steps due to time concerns
1
+ ---
2
+ name: team-ultra-analyze
3
+ description: Deep collaborative analysis pipeline. Multi-perspective exploration, deep analysis, user-driven discussion loops, and cross-perspective synthesis. Supports Quick, Standard, and Deep pipeline modes.
4
+ argument-hint: "[-y|--yes] [-c|--concurrency N] [--continue] [--mode quick|standard|deep] \"analysis topic\""
5
+ allowed-tools: spawn_agents_on_csv, spawn_agent, wait, send_input, close_agent, Read, Write, Edit, Bash, Glob, Grep, request_user_input
6
+ ---
7
+
8
+ ## Auto Mode
9
+
10
+ When `--yes` or `-y`: Auto-confirm task decomposition, skip interactive validation, use defaults.
11
+
12
+ # Team Ultra Analyze
13
+
14
+ ## Usage
15
+
16
+ ```bash
17
+ $team-ultra-analyze "Analyze authentication module architecture and security"
18
+ $team-ultra-analyze -c 4 --mode deep "Deep analysis of payment processing pipeline"
19
+ $team-ultra-analyze -y --mode quick "Quick overview of API endpoint structure"
20
+ $team-ultra-analyze --continue "uan-auth-analysis-20260308"
21
+ ```
22
+
23
+ **Flags**:
24
+ - `-y, --yes`: Skip all confirmations (auto mode)
25
+ - `-c, --concurrency N`: Max concurrent agents within each wave (default: 3)
26
+ - `--mode`: Pipeline mode override (quick|standard|deep)
27
+ - `--continue`: Resume existing session
28
+
29
+ **Output Directory**: `.workflow/.csv-wave/{session-id}/`
30
+ **Core Output**: `tasks.csv` (master state) + `results.csv` (final) + `discoveries.ndjson` (shared exploration) + `context.md` (human-readable report)
31
+
32
+ ---
33
+
34
+ ## Overview
35
+
36
+ Deep collaborative analysis with multi-perspective exploration, deep analysis, user-driven discussion loops, and cross-perspective synthesis. Each perspective gets its own explorer and analyst, working in parallel. Discussion rounds allow the user to steer analysis depth and direction.
37
+
38
+ **Execution Model**: Hybrid — CSV wave pipeline (primary) + individual agent spawn (secondary for discussion feedback loop)
39
+
40
+ ```
41
+ ┌─────────────────────────────────────────────────────────────────────────┐
42
+ │ TEAM ULTRA ANALYZE WORKFLOW │
43
+ ├─────────────────────────────────────────────────────────────────────────┤
44
+ │ │
45
+ │ Phase 0: Pre-Wave Interactive │
46
+ │ ├─ Topic parsing + dimension detection │
47
+ │ ├─ Pipeline mode selection (quick/standard/deep) │
48
+ │ ├─ Perspective assignment │
49
+ │ └─ Output: refined requirements for decomposition │
50
+ │ │
51
+ │ Phase 1: Requirement → CSV + Classification │
52
+ │ ├─ Parse topic into exploration + analysis + discussion + synthesis │
53
+ │ ├─ Assign roles: explorer, analyst, discussant, synthesizer │
54
+ │ ├─ Classify tasks: csv-wave | interactive (exec_mode) │
55
+ │ ├─ Compute dependency waves (topological sort → depth grouping) │
56
+ │ ├─ Generate tasks.csv with wave + exec_mode columns │
57
+ │ └─ User validates task breakdown (skip if -y) │
58
+ │ │
59
+ │ Phase 2: Wave Execution Engine (Extended) │
60
+ │ ├─ For each wave (1..N): │
61
+ │ │ ├─ Build wave CSV (filter csv-wave tasks for this wave) │
62
+ │ │ ├─ Inject previous findings into prev_context column │
63
+ │ │ ├─ spawn_agents_on_csv(wave CSV) │
64
+ │ │ ├─ Execute post-wave interactive tasks (if any) │
65
+ │ │ ├─ Merge all results into master tasks.csv │
66
+ │ │ └─ Check: any failed? → skip dependents │
67
+ │ └─ discoveries.ndjson shared across all modes (append-only) │
68
+ │ │
69
+ │ Phase 3: Post-Wave Interactive (Discussion Loop) │
70
+ │ ├─ After discussant completes: user feedback gate │
71
+ │ ├─ User chooses: continue deeper | adjust direction | done │
72
+ │ ├─ Creates dynamic tasks (DISCUSS-N, ANALYZE-fix-N) as needed │
73
+ │ └─ Max discussion rounds: quick=0, standard=1, deep=5 │
74
+ │ │
75
+ │ Phase 4: Results Aggregation │
76
+ │ ├─ Export final results.csv │
77
+ │ ├─ Generate context.md with all findings │
78
+ │ ├─ Display summary: completed/failed/skipped per wave │
79
+ │ └─ Offer: view results | export | archive │
80
+ │ │
81
+ └─────────────────────────────────────────────────────────────────────────┘
82
+ ```
83
+
84
+ ---
85
+
86
+ ## Task Classification Rules
87
+
88
+ Each task is classified by `exec_mode`:
89
+
90
+ | exec_mode | Mechanism | Criteria |
91
+ |-----------|-----------|----------|
92
+ | `csv-wave` | `spawn_agents_on_csv` | One-shot, structured I/O, no multi-round interaction |
93
+ | `interactive` | `spawn_agent`/`wait`/`send_input`/`close_agent` | Multi-round, user feedback, direction control |
94
+
95
+ **Classification Decision**:
96
+
97
+ | Task Property | Classification |
98
+ |---------------|---------------|
99
+ | Codebase exploration (single perspective) | `csv-wave` |
100
+ | Parallel exploration (multiple perspectives) | `csv-wave` (parallel in same wave) |
101
+ | Deep analysis (single perspective) | `csv-wave` |
102
+ | Parallel analysis (multiple perspectives) | `csv-wave` (parallel in same wave) |
103
+ | Direction-fix analysis (adjusted focus) | `csv-wave` |
104
+ | Discussion processing (aggregate results) | `csv-wave` |
105
+ | Final synthesis (cross-perspective integration) | `csv-wave` |
106
+ | Discussion feedback gate (user interaction) | `interactive` |
107
+ | Topic clarification (Phase 0) | `interactive` |
108
+
109
+ ---
110
+
111
+ ## CSV Schema
112
+
113
+ ### tasks.csv (Master State)
114
+
115
+ ```csv
116
+ id,title,description,role,perspective,dimensions,discussion_round,discussion_type,deps,context_from,exec_mode,wave,status,findings,error
117
+ "EXPLORE-001","Explore from technical perspective","Search codebase from technical perspective. Collect files, patterns, findings.","explorer","technical","architecture;implementation","0","","","","csv-wave","1","pending","",""
118
+ "ANALYZE-001","Deep analysis from technical perspective","Analyze exploration results from technical perspective. Generate insights with confidence levels.","analyst","technical","architecture;implementation","0","","EXPLORE-001","EXPLORE-001","csv-wave","2","pending","",""
119
+ "DISCUSS-001","Initial discussion round","Aggregate all analysis results. Identify convergent themes, conflicts, top discussion points.","discussant","","","1","initial","ANALYZE-001;ANALYZE-002","ANALYZE-001;ANALYZE-002","csv-wave","3","pending","",""
120
+ ```
121
+
122
+ **Columns**:
123
+
124
+ | Column | Phase | Description |
125
+ |--------|-------|-------------|
126
+ | `id` | Input | Unique task identifier (string) |
127
+ | `title` | Input | Short task title |
128
+ | `description` | Input | Detailed task description |
129
+ | `role` | Input | Worker role: explorer, analyst, discussant, synthesizer |
130
+ | `perspective` | Input | Analysis perspective: technical, architectural, business, domain_expert |
131
+ | `dimensions` | Input | Analysis dimensions (semicolon-separated): architecture, implementation, performance, security, concept, comparison, decision |
132
+ | `discussion_round` | Input | Discussion round number (0 = N/A, 1+ = round number) |
133
+ | `discussion_type` | Input | Discussion type: initial, deepen, direction-adjusted, specific-questions |
134
+ | `deps` | Input | Semicolon-separated dependency task IDs |
135
+ | `context_from` | Input | Semicolon-separated task IDs whose findings this task needs |
136
+ | `exec_mode` | Input | `csv-wave` or `interactive` |
137
+ | `wave` | Computed | Wave number (computed by topological sort, 1-based) |
138
+ | `status` | Output | `pending` → `completed` / `failed` / `skipped` |
139
+ | `findings` | Output | Key discoveries or implementation notes (max 500 chars) |
140
+ | `error` | Output | Error message if failed (empty if success) |
141
+
142
+ ### Per-Wave CSV (Temporary)
143
+
144
+ Each wave generates a temporary `wave-{N}.csv` with extra `prev_context` column (csv-wave tasks only).
145
+
146
+ ---
147
+
148
+ ## Agent Registry (Interactive Agents)
149
+
150
+ | Agent | Role File | Pattern | Responsibility | Position |
151
+ |-------|-----------|---------|----------------|----------|
152
+ | discussion-feedback | agents/discussion-feedback.md | 2.3 (wait-respond) | Collect user feedback after discussion round, create dynamic tasks | post-wave (after discussant wave) |
153
+ | topic-analyzer | agents/topic-analyzer.md | 2.3 (wait-respond) | Parse topic, detect dimensions, select pipeline mode and perspectives | standalone (Phase 0) |
154
+
155
+ > **COMPACT PROTECTION**: Agent files are execution documents. When context compression occurs, **you MUST immediately `Read` the corresponding agent.md** to reload.
156
+
157
+ ---
158
+
159
+ ## Output Artifacts
160
+
161
+ | File | Purpose | Lifecycle |
162
+ |------|---------|-----------|
163
+ | `tasks.csv` | Master state — all tasks with status/findings | Updated after each wave |
164
+ | `wave-{N}.csv` | Per-wave input (temporary, csv-wave tasks only) | Created before wave, deleted after |
165
+ | `results.csv` | Final export of all task results | Created in Phase 4 |
166
+ | `discoveries.ndjson` | Shared exploration board (all agents, both modes) | Append-only, carries across waves |
167
+ | `context.md` | Human-readable execution report | Created in Phase 4 |
168
+ | `interactive/{id}-result.json` | Results from interactive tasks | Created per interactive task |
169
+
170
+ ---
171
+
172
+ ## Session Structure
173
+
174
+ ```
175
+ .workflow/.csv-wave/{session-id}/
176
+ ├── tasks.csv # Master state (all tasks, both modes)
177
+ ├── results.csv # Final results export
178
+ ├── discoveries.ndjson # Shared discovery board (all agents)
179
+ ├── context.md # Human-readable report
180
+ ├── wave-{N}.csv # Temporary per-wave input (csv-wave only)
181
+ └── interactive/ # Interactive task artifacts
182
+ └── {id}-result.json # Per-task results
183
+ ```
184
+
185
+ ---
186
+
187
+ ## Implementation
188
+
189
+ ### Session Initialization
190
+
191
+ ```javascript
192
+ const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
193
+
194
+ // Parse flags
195
+ const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
196
+ const continueMode = $ARGUMENTS.includes('--continue')
197
+ const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
198
+ const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 3
199
+ const modeMatch = $ARGUMENTS.match(/--mode\s+(quick|standard|deep)/)
200
+ const explicitMode = modeMatch ? modeMatch[1] : null
201
+
202
+ // Clean requirement text (remove flags)
203
+ const topic = $ARGUMENTS
204
+ .replace(/--yes|-y|--continue|--concurrency\s+\d+|-c\s+\d+|--mode\s+\w+/g, '')
205
+ .trim()
206
+
207
+ const slug = topic.toLowerCase()
208
+ .replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
209
+ .substring(0, 40)
210
+ const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
211
+ let sessionId = `uan-${slug}-${dateStr}`
212
+ let sessionFolder = `.workflow/.csv-wave/${sessionId}`
213
+
214
+ // Continue mode: find existing session
215
+ if (continueMode) {
216
+ const existing = Bash(`ls -t .workflow/.csv-wave/uan-* 2>/dev/null | head -1`).trim()
217
+ if (existing) {
218
+ sessionId = existing.split('/').pop()
219
+ sessionFolder = existing
220
+ }
221
+ }
222
+
223
+ Bash(`mkdir -p ${sessionFolder}/interactive`)
224
+ ```
225
+
226
+ ---
227
+
228
+ ### Phase 0: Pre-Wave Interactive
229
+
230
+ **Objective**: Parse topic, detect analysis dimensions, select pipeline mode, and assign perspectives.
231
+
232
+ **Execution**:
233
+
234
+ ```javascript
235
+ const analyzer = spawn_agent({
236
+ message: `
237
+ ## TASK ASSIGNMENT
238
+
239
+ ### MANDATORY FIRST STEPS (Agent Execute)
240
+ 1. **Read role definition**: ~ or <project>/.codex/skills/team-ultra-analyze/agents/topic-analyzer.md (MUST read first)
241
+ 2. Read: .workflow/project-tech.json (if exists)
242
+
243
+ ---
244
+
245
+ Goal: Analyze topic and recommend pipeline configuration
246
+ Topic: ${topic}
247
+ Explicit Mode: ${explicitMode || 'auto-detect'}
248
+
249
+ ### Task
250
+ 1. Detect analysis dimensions from topic keywords:
251
+ - architecture, implementation, performance, security, concept, comparison, decision
252
+ 2. Select perspectives based on dimensions:
253
+ - technical, architectural, business, domain_expert
254
+ 3. Determine pipeline mode (if not explicitly set):
255
+ - Complexity 1-3 → quick, 4-6 → standard, 7+ → deep
256
+ 4. Return structured configuration
257
+ `
258
+ })
259
+
260
+ const analyzerResult = wait({ ids: [analyzer], timeout_ms: 120000 })
261
+
262
+ if (analyzerResult.timed_out) {
263
+ send_input({ id: analyzer, message: "Please finalize and output current findings." })
264
+ wait({ ids: [analyzer], timeout_ms: 60000 })
265
+ }
266
+
267
+ close_agent({ id: analyzer })
268
+
269
+ // Parse result: pipeline_mode, perspectives[], dimensions[], depth
270
+ Write(`${sessionFolder}/interactive/topic-analyzer-result.json`, JSON.stringify({
271
+ task_id: "topic-analysis",
272
+ status: "completed",
273
+ pipeline_mode: parsedMode,
274
+ perspectives: parsedPerspectives,
275
+ dimensions: parsedDimensions,
276
+ depth: parsedDepth,
277
+ timestamp: getUtc8ISOString()
278
+ }))
279
+ ```
280
+
281
+ If not AUTO_YES, present user with configuration for confirmation:
282
+
283
+ ```javascript
284
+ if (!AUTO_YES) {
285
+ const answer = request_user_input({
286
+ questions: [{
287
+ question: `Topic: "${topic}" — Pipeline: ${pipeline_mode}. Approve or override?`,
288
+ header: "Config",
289
+ id: "analysis_config",
290
+ options: [
291
+ { label: "Approve (Recommended)", description: `Use ${pipeline_mode} mode with ${perspectives.length} perspectives` },
292
+ { label: "Quick", description: "1 explorer -> 1 analyst -> synthesizer (fast)" },
293
+ { label: "Standard/Deep", description: "N explorers -> N analysts -> discussion -> synthesizer" }
294
+ ]
295
+ }]
296
+ })
297
+ }
298
+ ```
299
+
300
+ **Success Criteria**:
301
+ - Refined requirements available for Phase 1 decomposition
302
+ - Interactive agents closed, results stored
303
+
304
+ ---
305
+
306
+ ### Phase 1: Requirement → CSV + Classification
307
+
308
+ **Objective**: Build tasks.csv from selected pipeline mode and perspectives.
309
+
310
+ **Decomposition Rules**:
311
+
312
+ | Pipeline | Tasks | Wave Structure |
313
+ |----------|-------|---------------|
314
+ | quick | EXPLORE-001 → ANALYZE-001 → SYNTH-001 | 3 waves, serial, depth=1 |
315
+ | standard | EXPLORE-001..N → ANALYZE-001..NDISCUSS-001 → SYNTH-001 | 4 wave groups, parallel explore+analyze |
316
+ | deep | EXPLORE-001..N → ANALYZE-001..N → DISCUSS-001 (dynamic tasks) → SYNTH-001 | 3+ waves, SYNTH created after discussion loop |
317
+
318
+ Where N = number of selected perspectives.
319
+
320
+ **Classification Rules**:
321
+
322
+ All work tasks (exploration, analysis, discussion processing, synthesis) are `csv-wave`. The discussion feedback gate (user interaction after discussant completes) is `interactive`.
323
+
324
+ **Pipeline Task Definitions**:
325
+
326
+ #### Quick Pipeline (3 csv-wave tasks)
327
+
328
+ | Task ID | Role | Wave | Deps | Perspective | Description |
329
+ |---------|------|------|------|-------------|-------------|
330
+ | EXPLORE-001 | explorer | 1 | (none) | general | Explore codebase structure for analysis topic |
331
+ | ANALYZE-001 | analyst | 2 | EXPLORE-001 | technical | Deep analysis from technical perspective |
332
+ | SYNTH-001 | synthesizer | 3 | ANALYZE-001 | (all) | Integrate analysis into final conclusions |
333
+
334
+ #### Standard Pipeline (2N+2 tasks, parallel windows)
335
+
336
+ | Task ID | Role | Wave | Deps | Perspective | Description |
337
+ |---------|------|------|------|-------------|-------------|
338
+ | EXPLORE-001..N | explorer | 1 | (none) | per-perspective | Parallel codebase exploration, one per perspective |
339
+ | ANALYZE-001..N | analyst | 2 | EXPLORE-N | per-perspective | Parallel deep analysis, one per perspective |
340
+ | DISCUSS-001 | discussant | 3 | all ANALYZE-* | (all) | Aggregate analyses, identify themes and conflicts |
341
+ | FEEDBACK-001 | (interactive) | 4 | DISCUSS-001 | - | User feedback: done → create SYNTH, continue more discussion |
342
+ | SYNTH-001 | synthesizer | 5 | FEEDBACK-001 | (all) | Cross-perspective integration and conclusions |
343
+
344
+ #### Deep Pipeline (2N+1 initial tasks + dynamic)
345
+
346
+ Same as Standard, but SYNTH-001 is omitted initially. Created dynamically after the discussion loop (up to 5 rounds) completes. Additional dynamic tasks:
347
+ - `DISCUSS-N` subsequent discussion round
348
+ - `ANALYZE-fix-N` — supplementary analysis with adjusted focus
349
+ - `SYNTH-001` — created after final discussion round
350
+
351
+ **Wave Computation**: Kahn's BFS topological sort with depth tracking (csv-wave tasks only).
352
+
353
+ **User Validation**: Display task breakdown with wave + exec_mode assignment (skip if AUTO_YES).
354
+
355
+ **Success Criteria**:
356
+ - tasks.csv created with valid schema, wave, and exec_mode assignments
357
+ - No circular dependencies
358
+ - User approved (or AUTO_YES)
359
+
360
+ ---
361
+
362
+ ### Phase 2: Wave Execution Engine (Extended)
363
+
364
+ **Objective**: Execute tasks wave-by-wave with hybrid mechanism support and cross-wave context propagation.
365
+
366
+ ```javascript
367
+ const failedIds = new Set()
368
+ const skippedIds = new Set()
369
+ let discussionRound = 0
370
+ const MAX_DISCUSSION_ROUNDS = pipeline_mode === 'deep' ? 5 : pipeline_mode === 'standard' ? 1 : 0
371
+
372
+ for (let wave = 1; wave <= maxWave; wave++) {
373
+ console.log(`\n## Wave ${wave}/${maxWave}\n`)
374
+
375
+ // 1. Read current master CSV
376
+ const masterCsv = parseCsv(Read(`${sessionFolder}/tasks.csv`))
377
+
378
+ // 2. Separate csv-wave and interactive tasks for this wave
379
+ const waveTasks = masterCsv.filter(row => parseInt(row.wave) === wave)
380
+ const csvTasks = waveTasks.filter(t => t.exec_mode === 'csv-wave')
381
+ const interactiveTasks = waveTasks.filter(t => t.exec_mode === 'interactive')
382
+
383
+ // 3. Skip tasks whose deps failed
384
+ const executableCsvTasks = []
385
+ for (const task of csvTasks) {
386
+ const deps = task.deps.split(';').filter(Boolean)
387
+ if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
388
+ skippedIds.add(task.id)
389
+ updateMasterCsvRow(sessionFolder, task.id, {
390
+ status: 'skipped', error: 'Dependency failed or skipped'
391
+ })
392
+ continue
393
+ }
394
+ executableCsvTasks.push(task)
395
+ }
396
+
397
+ // 4. Build prev_context for each csv-wave task
398
+ for (const task of executableCsvTasks) {
399
+ const contextIds = task.context_from.split(';').filter(Boolean)
400
+ const prevFindings = contextIds
401
+ .map(id => {
402
+ const prevRow = masterCsv.find(r => r.id === id)
403
+ if (prevRow && prevRow.status === 'completed' && prevRow.findings) {
404
+ return `[Task ${id}: ${prevRow.title}] ${prevRow.findings}`
405
+ }
406
+ return null
407
+ })
408
+ .filter(Boolean)
409
+ .join('\n')
410
+ task.prev_context = prevFindings || 'No previous context available'
411
+ }
412
+
413
+ // 5. Write wave CSV and execute csv-wave tasks
414
+ if (executableCsvTasks.length > 0) {
415
+ const waveHeader = 'id,title,description,role,perspective,dimensions,discussion_round,discussion_type,deps,context_from,exec_mode,wave,prev_context'
416
+ const waveRows = executableCsvTasks.map(t =>
417
+ [t.id, t.title, t.description, t.role, t.perspective, t.dimensions,
418
+ t.discussion_round, t.discussion_type, t.deps, t.context_from, t.exec_mode, t.wave, t.prev_context]
419
+ .map(cell => `"${String(cell).replace(/"/g, '""')}"`)
420
+ .join(',')
421
+ )
422
+ Write(`${sessionFolder}/wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
423
+
424
+ const waveResult = spawn_agents_on_csv({
425
+ csv_path: `${sessionFolder}/wave-${wave}.csv`,
426
+ id_column: "id",
427
+ instruction: buildAnalysisInstruction(sessionFolder, wave),
428
+ max_concurrency: maxConcurrency,
429
+ max_runtime_seconds: 600,
430
+ output_csv_path: `${sessionFolder}/wave-${wave}-results.csv`,
431
+ output_schema: {
432
+ type: "object",
433
+ properties: {
434
+ id: { type: "string" },
435
+ status: { type: "string", enum: ["completed", "failed"] },
436
+ findings: { type: "string" },
437
+ error: { type: "string" }
438
+ },
439
+ required: ["id", "status", "findings"]
440
+ }
441
+ })
442
+
443
+ // Merge results into master CSV
444
+ const waveResults = parseCsv(Read(`${sessionFolder}/wave-${wave}-results.csv`))
445
+ for (const result of waveResults) {
446
+ updateMasterCsvRow(sessionFolder, result.id, {
447
+ status: result.status,
448
+ findings: result.findings || '',
449
+ error: result.error || ''
450
+ })
451
+ if (result.status === 'failed') failedIds.add(result.id)
452
+ }
453
+
454
+ Bash(`rm -f "${sessionFolder}/wave-${wave}.csv"`)
455
+ }
456
+
457
+ // 6. Execute post-wave interactive tasks (Discussion Feedback)
458
+ for (const task of interactiveTasks) {
459
+ if (task.status !== 'pending') continue
460
+ const deps = task.deps.split(';').filter(Boolean)
461
+ if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
462
+ skippedIds.add(task.id)
463
+ continue
464
+ }
465
+
466
+ discussionRound++
467
+
468
+ // Discussion Feedback Gate
469
+ if (pipeline_mode === 'quick' || discussionRound > MAX_DISCUSSION_ROUNDS) {
470
+ // No discussion or max rounds reached proceed to synthesis
471
+ if (!masterCsv.find(t => t.id === 'SYNTH-001')) {
472
+ // Create SYNTH-001 dynamically
473
+ const lastDiscuss = masterCsv.filter(t => t.id.startsWith('DISCUSS'))
474
+ .sort((a, b) => b.id.localeCompare(a.id))[0]
475
+ addTaskToMasterCsv(sessionFolder, {
476
+ id: 'SYNTH-001', title: 'Final synthesis',
477
+ description: 'Integrate all analysis into final conclusions',
478
+ role: 'synthesizer', perspective: '', dimensions: '',
479
+ discussion_round: '0', discussion_type: '',
480
+ deps: lastDiscuss ? lastDiscuss.id : '', context_from: 'all',
481
+ exec_mode: 'csv-wave', wave: String(wave + 1),
482
+ status: 'pending', findings: '', error: ''
483
+ })
484
+ maxWave = wave + 1
485
+ }
486
+ updateMasterCsvRow(sessionFolder, task.id, {
487
+ status: 'completed',
488
+ findings: `Discussion round ${discussionRound}: proceeding to synthesis`
489
+ })
490
+ continue
491
+ }
492
+
493
+ // Spawn discussion feedback agent
494
+ const feedbackAgent = spawn_agent({
495
+ message: `
496
+ ## TASK ASSIGNMENT
497
+
498
+ ### MANDATORY FIRST STEPS (Agent Execute)
499
+ 1. **Read role definition**: ~ or <project>/.codex/skills/team-ultra-analyze/agents/discussion-feedback.md (MUST read first)
500
+ 2. Read: ${sessionFolder}/discoveries.ndjson (shared discoveries)
501
+
502
+ ---
503
+
504
+ Goal: Collect user feedback on discussion round ${discussionRound}
505
+ Session: ${sessionFolder}
506
+ Discussion Round: ${discussionRound}/${MAX_DISCUSSION_ROUNDS}
507
+ Pipeline Mode: ${pipeline_mode}
508
+
509
+ ### Context
510
+ The discussant has completed round ${discussionRound}. Present the user with discussion results and collect feedback on next direction.
511
+ `
512
+ })
513
+
514
+ const feedbackResult = wait({ ids: [feedbackAgent], timeout_ms: 300000 })
515
+ if (feedbackResult.timed_out) {
516
+ send_input({ id: feedbackAgent, message: "Please finalize: user did not respond, default to 'Done'." })
517
+ wait({ ids: [feedbackAgent], timeout_ms: 60000 })
518
+ }
519
+ close_agent({ id: feedbackAgent })
520
+
521
+ // Parse feedback decision: "continue_deeper" | "adjust_direction" | "done"
522
+ Write(`${sessionFolder}/interactive/${task.id}-result.json`, JSON.stringify({
523
+ task_id: task.id, status: "completed",
524
+ discussion_round: discussionRound,
525
+ feedback: feedbackDecision,
526
+ timestamp: getUtc8ISOString()
527
+ }))
528
+
529
+ // Handle feedback
530
+ if (feedbackDecision === 'done') {
531
+ // Create SYNTH-001 blocked by last DISCUSS task
532
+ addTaskToMasterCsv(sessionFolder, {
533
+ id: 'SYNTH-001', deps: task.id.replace('FEEDBACK', 'DISCUSS'),
534
+ role: 'synthesizer', exec_mode: 'csv-wave', wave: String(wave + 1)
535
+ })
536
+ maxWave = wave + 1
537
+ } else if (feedbackDecision === 'adjust_direction') {
538
+ // Create ANALYZE-fix-N and DISCUSS-N+1
539
+ const fixId = `ANALYZE-fix-${discussionRound}`
540
+ const nextDiscussId = `DISCUSS-${String(discussionRound + 1).padStart(3, '0')}`
541
+ addTaskToMasterCsv(sessionFolder, {
542
+ id: fixId, role: 'analyst', exec_mode: 'csv-wave', wave: String(wave + 1)
543
+ })
544
+ addTaskToMasterCsv(sessionFolder, {
545
+ id: nextDiscussId, role: 'discussant', deps: fixId,
546
+ exec_mode: 'csv-wave', wave: String(wave + 2)
547
+ })
548
+ addTaskToMasterCsv(sessionFolder, {
549
+ id: `FEEDBACK-${String(discussionRound + 1).padStart(3, '0')}`,
550
+ exec_mode: 'interactive', deps: nextDiscussId, wave: String(wave + 3)
551
+ })
552
+ maxWave = wave + 3
553
+ } else {
554
+ // continue_deeper: Create DISCUSS-N+1
555
+ const nextDiscussId = `DISCUSS-${String(discussionRound + 1).padStart(3, '0')}`
556
+ addTaskToMasterCsv(sessionFolder, {
557
+ id: nextDiscussId, role: 'discussant', exec_mode: 'csv-wave', wave: String(wave + 1)
558
+ })
559
+ addTaskToMasterCsv(sessionFolder, {
560
+ id: `FEEDBACK-${String(discussionRound + 1).padStart(3, '0')}`,
561
+ exec_mode: 'interactive', deps: nextDiscussId, wave: String(wave + 2)
562
+ })
563
+ maxWave = wave + 2
564
+ }
565
+
566
+ updateMasterCsvRow(sessionFolder, task.id, {
567
+ status: 'completed',
568
+ findings: `Discussion feedback: ${feedbackDecision}, round ${discussionRound}`
569
+ })
570
+ }
571
+ }
572
+ ```
573
+
574
+ **Success Criteria**:
575
+ - All waves executed in order
576
+ - Both csv-wave and interactive tasks handled per wave
577
+ - Each wave's results merged into master CSV before next wave starts
578
+ - Dependent tasks skipped when predecessor failed
579
+ - discoveries.ndjson accumulated across all waves and mechanisms
580
+ - Discussion loop controlled with proper round tracking
581
+ - Dynamic tasks created correctly based on user feedback
582
+
583
+ ---
584
+
585
+ ### Phase 3: Post-Wave Interactive
586
+
587
+ **Objective**: Handle discussion loop completion and ensure synthesis is triggered.
588
+
589
+ After all discussion rounds are exhausted or user chooses "done":
590
+ 1. Ensure SYNTH-001 exists in master CSV
591
+ 2. Ensure SYNTH-001 is unblocked (blocked by last completed discussion task)
592
+ 3. Execute remaining waves (synthesis)
593
+
594
+ **Success Criteria**:
595
+ - Post-wave interactive processing complete
596
+ - Interactive agents closed, results stored
597
+
598
+ ---
599
+
600
+ ### Phase 4: Results Aggregation
601
+
602
+ **Objective**: Generate final results and human-readable report.
603
+
604
+ ```javascript
605
+ const masterCsv = Read(`${sessionFolder}/tasks.csv`)
606
+ Write(`${sessionFolder}/results.csv`, masterCsv)
607
+
608
+ const tasks = parseCsv(masterCsv)
609
+ const completed = tasks.filter(t => t.status === 'completed')
610
+ const failed = tasks.filter(t => t.status === 'failed')
611
+ const skipped = tasks.filter(t => t.status === 'skipped')
612
+
613
+ const contextContent = `# Ultra Analyze Report
614
+
615
+ **Session**: ${sessionId}
616
+ **Topic**: ${topic}
617
+ **Pipeline**: ${pipeline_mode}
618
+ **Perspectives**: ${perspectives.join(', ')}
619
+ **Discussion Rounds**: ${discussionRound}
620
+ **Completed**: ${getUtc8ISOString()}
621
+
622
+ ---
623
+
624
+ ## Summary
625
+
626
+ | Metric | Count |
627
+ |--------|-------|
628
+ | Total Tasks | ${tasks.length} |
629
+ | Completed | ${completed.length} |
630
+ | Failed | ${failed.length} |
631
+ | Skipped | ${skipped.length} |
632
+ | Discussion Rounds | ${discussionRound} |
633
+
634
+ ---
635
+
636
+ ## Wave Execution
637
+
638
+ ${waveDetails}
639
+
640
+ ---
641
+
642
+ ## Analysis Artifacts
643
+
644
+ - Explorations: discoveries with type "exploration" in discoveries.ndjson
645
+ - Analyses: discoveries with type "analysis" in discoveries.ndjson
646
+ - Discussion: discoveries with type "discussion" in discoveries.ndjson
647
+ - Conclusions: discoveries with type "conclusion" in discoveries.ndjson
648
+
649
+ ---
650
+
651
+ ## Conclusions
652
+
653
+ ${synthesisFindings}
654
+ `
655
+
656
+ Write(`${sessionFolder}/context.md`, contextContent)
657
+ ```
658
+
659
+ If not AUTO_YES, offer completion options:
660
+
661
+ ```javascript
662
+ if (!AUTO_YES) {
663
+ const answer = request_user_input({
664
+ questions: [{
665
+ question: "Ultra-Analyze pipeline complete. Choose next action.",
666
+ header: "Done",
667
+ id: "completion",
668
+ options: [
669
+ { label: "Archive (Recommended)", description: "Archive session" },
670
+ { label: "Keep Active", description: "Keep session for follow-up" },
671
+ { label: "Export Results", description: "Export deliverables to specified location" }
672
+ ]
673
+ }]
674
+ })
675
+ }
676
+ ```
677
+
678
+ **Success Criteria**:
679
+ - results.csv exported (all tasks, both modes)
680
+ - context.md generated
681
+ - All interactive agents closed
682
+ - Summary displayed to user
683
+
684
+ ---
685
+
686
+ ## Shared Discovery Board Protocol
687
+
688
+ All agents across all waves share `discoveries.ndjson`. This enables cross-role knowledge sharing.
689
+
690
+ **Discovery Types**:
691
+
692
+ | Type | Dedup Key | Data Schema | Description |
693
+ |------|-----------|-------------|-------------|
694
+ | `exploration` | `data.perspective+data.file` | `{perspective, file, relevance, summary, patterns[]}` | Explored file/module |
695
+ | `analysis` | `data.perspective+data.insight` | `{perspective, insight, confidence, evidence, file_ref}` | Analysis insight |
696
+ | `pattern` | `data.name` | `{name, file, description, type}` | Code/architecture pattern |
697
+ | `discussion_point` | `data.topic` | `{topic, perspectives[], convergence, open_questions[]}` | Discussion point |
698
+ | `recommendation` | `data.action` | `{action, rationale, priority, confidence}` | Recommendation |
699
+ | `conclusion` | `data.point` | `{point, evidence, confidence, perspectives_supporting[]}` | Final conclusion |
700
+
701
+ **Format**: NDJSON, each line is self-contained JSON:
702
+
703
+ ```jsonl
704
+ {"ts":"2026-03-08T10:00:00+08:00","worker":"EXPLORE-001","type":"exploration","data":{"perspective":"technical","file":"src/auth/index.ts","relevance":"high","summary":"Auth module entry point with OAuth and JWT exports","patterns":["module-pattern","strategy-pattern"]}}
705
+ {"ts":"2026-03-08T10:05:00+08:00","worker":"ANALYZE-001","type":"analysis","data":{"perspective":"technical","insight":"Auth module uses strategy pattern for provider switching","confidence":"high","evidence":"src/auth/strategies/*.ts","file_ref":"src/auth/index.ts:15"}}
706
+ {"ts":"2026-03-08T10:10:00+08:00","worker":"DISCUSS-001","type":"discussion_point","data":{"topic":"Authentication scalability","perspectives":["technical","architectural"],"convergence":"Both perspectives agree on stateless JWT approach","open_questions":["Token refresh strategy for long sessions"]}}
707
+ ```
708
+
709
+ **Protocol Rules**:
710
+ 1. Read board before own exploration → skip covered areas
711
+ 2. Write discoveries immediately via `echo >>` don't batch
712
+ 3. Deduplicate check existing entries by type + dedup key
713
+ 4. Append-onlynever modify or delete existing lines
714
+
715
+ ---
716
+
717
+ ## Error Handling
718
+
719
+ | Error | Resolution |
720
+ |-------|------------|
721
+ | Circular dependency | Detect in wave computation, abort with error message |
722
+ | CSV agent timeout | Mark as failed in results, continue with wave |
723
+ | CSV agent failed | Mark as failed, skip dependent tasks in later waves |
724
+ | Interactive agent timeout | Urge convergence via send_input, then close if still timed out |
725
+ | Interactive agent failed | Mark as failed, skip dependents |
726
+ | All agents in wave failed | Log error, offer retry or abort |
727
+ | CSV parse error | Validate CSV format before execution, show line number |
728
+ | discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
729
+ | Discussion loop exceeds 5 rounds | Force synthesis, offer continuation |
730
+ | Explorer finds nothing | Continue with limited context, note limitation |
731
+ | CLI tool unavailable | Fallback chain: gemini codex direct analysis |
732
+ | User timeout in discussion | Save state, default to "done", proceed to synthesis |
733
+ | Continue mode: no session found | List available sessions, prompt user to select |
734
+
735
+ ---
736
+
737
+ ## Core Rules
738
+
739
+ 1. **Start Immediately**: First action is session initialization, then Phase 0/1
740
+ 2. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results are merged
741
+ 3. **CSV is Source of Truth**: Master tasks.csv holds all state (both csv-wave and interactive)
742
+ 4. **CSV First**: Default to csv-wave for tasks; only use interactive when user interaction is needed
743
+ 5. **Context Propagation**: prev_context built from master CSV, not from memory
744
+ 6. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson — both mechanisms share it
745
+ 7. **Skip on Failure**: If a dependency failed, skip the dependent task (regardless of mechanism)
746
+ 8. **Lifecycle Balance**: Every spawn_agent MUST have a matching close_agent
747
+ 9. **Cleanup Temp Files**: Remove wave-{N}.csv after results are merged
748
+ 10. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
749
+
750
+
751
+ ---
752
+
753
+ ## Coordinator Role Constraints (Main Agent)
754
+
755
+ **CRITICAL**: The coordinator (main agent executing this skill) is responsible for **orchestration only**, NOT implementation.
756
+
757
+ 15. **Coordinator Does NOT Execute Code**: The main agent MUST NOT write, modify, or implement any code directly. All implementation work is delegated to spawned team agents. The coordinator only:
758
+ - Spawns agents with task assignments
759
+ - Waits for agent callbacks
760
+ - Merges results and coordinates workflow
761
+ - Manages workflow transitions between phases
762
+
763
+ 16. **Patient Waiting is Mandatory**: Agent execution takes significant time (typically 10-30 minutes per phase, sometimes longer). The coordinator MUST:
764
+ - Wait patiently for `wait()` calls to complete
765
+ - NOT skip workflow steps due to perceived delays
766
+ - NOT assume agents have failed just because they're taking time
767
+ - Trust the timeout mechanisms defined in the skill
768
+
769
+ 17. **Use send_input for Clarification**: When agents need guidance or appear stuck, the coordinator MUST:
770
+ - Use `send_input()` to ask questions or provide clarification
771
+ - NOT skip the agent or move to next phase prematurely
772
+ - Give agents opportunity to respond before escalating
773
+ - Example: `send_input({ id: agent_id, message: "Please provide status update or clarify blockers" })`
774
+
775
+ 18. **No Workflow Shortcuts**: The coordinator MUST NOT:
776
+ - Skip phases or stages defined in the workflow
777
+ - Bypass required approval or review steps
778
+ - Execute dependent tasks before prerequisites complete
779
+ - Assume task completion without explicit agent callback
780
+ - Make up or fabricate agent results
781
+
782
+ 19. **Respect Long-Running Processes**: This is a complex multi-agent workflow that requires patience:
783
+ - Total execution time may range from 30-90 minutes or longer
784
+ - Each phase may take 10-30 minutes depending on complexity
785
+ - The coordinator must remain active and attentive throughout the entire process
786
+ - Do not terminate or skip steps due to time concerns