opencode-swarm-plugin 0.44.0 → 0.44.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (215) hide show
  1. package/bin/swarm.serve.test.ts +6 -4
  2. package/bin/swarm.ts +18 -12
  3. package/dist/compaction-prompt-scoring.js +139 -0
  4. package/dist/eval-capture.js +12811 -0
  5. package/dist/hive.d.ts.map +1 -1
  6. package/dist/hive.js +14834 -0
  7. package/dist/index.d.ts +18 -0
  8. package/dist/index.d.ts.map +1 -1
  9. package/dist/index.js +7743 -62593
  10. package/dist/plugin.js +24052 -78907
  11. package/dist/swarm-orchestrate.d.ts.map +1 -1
  12. package/dist/swarm-prompts.d.ts.map +1 -1
  13. package/dist/swarm-prompts.js +39407 -0
  14. package/dist/swarm-review.d.ts.map +1 -1
  15. package/dist/swarm-validation.d.ts +127 -0
  16. package/dist/swarm-validation.d.ts.map +1 -0
  17. package/dist/validators/index.d.ts +7 -0
  18. package/dist/validators/index.d.ts.map +1 -0
  19. package/dist/validators/schema-validator.d.ts +58 -0
  20. package/dist/validators/schema-validator.d.ts.map +1 -0
  21. package/package.json +17 -5
  22. package/.changeset/swarm-insights-data-layer.md +0 -63
  23. package/.hive/analysis/eval-failure-analysis-2025-12-25.md +0 -331
  24. package/.hive/analysis/session-data-quality-audit.md +0 -320
  25. package/.hive/eval-results.json +0 -483
  26. package/.hive/issues.jsonl +0 -138
  27. package/.hive/memories.jsonl +0 -729
  28. package/.opencode/eval-history.jsonl +0 -327
  29. package/.turbo/turbo-build.log +0 -9
  30. package/CHANGELOG.md +0 -2286
  31. package/SCORER-ANALYSIS.md +0 -598
  32. package/docs/analysis/subagent-coordination-patterns.md +0 -902
  33. package/docs/analysis-socratic-planner-pattern.md +0 -504
  34. package/docs/planning/ADR-001-monorepo-structure.md +0 -171
  35. package/docs/planning/ADR-002-package-extraction.md +0 -393
  36. package/docs/planning/ADR-003-performance-improvements.md +0 -451
  37. package/docs/planning/ADR-004-message-queue-features.md +0 -187
  38. package/docs/planning/ADR-005-devtools-observability.md +0 -202
  39. package/docs/planning/ADR-007-swarm-enhancements-worktree-review.md +0 -168
  40. package/docs/planning/ADR-008-worker-handoff-protocol.md +0 -293
  41. package/docs/planning/ADR-009-oh-my-opencode-patterns.md +0 -353
  42. package/docs/planning/ADR-010-cass-inhousing.md +0 -1215
  43. package/docs/planning/ROADMAP.md +0 -368
  44. package/docs/semantic-memory-cli-syntax.md +0 -123
  45. package/docs/swarm-mail-architecture.md +0 -1147
  46. package/docs/testing/context-recovery-test.md +0 -470
  47. package/evals/ARCHITECTURE.md +0 -1189
  48. package/evals/README.md +0 -768
  49. package/evals/compaction-prompt.eval.ts +0 -149
  50. package/evals/compaction-resumption.eval.ts +0 -289
  51. package/evals/coordinator-behavior.eval.ts +0 -307
  52. package/evals/coordinator-session.eval.ts +0 -154
  53. package/evals/evalite.config.ts.bak +0 -15
  54. package/evals/example.eval.ts +0 -31
  55. package/evals/fixtures/cass-baseline.ts +0 -217
  56. package/evals/fixtures/compaction-cases.ts +0 -350
  57. package/evals/fixtures/compaction-prompt-cases.ts +0 -311
  58. package/evals/fixtures/coordinator-sessions.ts +0 -328
  59. package/evals/fixtures/decomposition-cases.ts +0 -105
  60. package/evals/lib/compaction-loader.test.ts +0 -248
  61. package/evals/lib/compaction-loader.ts +0 -320
  62. package/evals/lib/data-loader.evalite-test.ts +0 -289
  63. package/evals/lib/data-loader.test.ts +0 -345
  64. package/evals/lib/data-loader.ts +0 -281
  65. package/evals/lib/llm.ts +0 -115
  66. package/evals/scorers/compaction-prompt-scorers.ts +0 -145
  67. package/evals/scorers/compaction-scorers.ts +0 -305
  68. package/evals/scorers/coordinator-discipline.evalite-test.ts +0 -539
  69. package/evals/scorers/coordinator-discipline.ts +0 -325
  70. package/evals/scorers/index.test.ts +0 -146
  71. package/evals/scorers/index.ts +0 -328
  72. package/evals/scorers/outcome-scorers.evalite-test.ts +0 -27
  73. package/evals/scorers/outcome-scorers.ts +0 -349
  74. package/evals/swarm-decomposition.eval.ts +0 -121
  75. package/examples/commands/swarm.md +0 -745
  76. package/examples/plugin-wrapper-template.ts +0 -2515
  77. package/examples/skills/hive-workflow/SKILL.md +0 -212
  78. package/examples/skills/skill-creator/SKILL.md +0 -223
  79. package/examples/skills/swarm-coordination/SKILL.md +0 -292
  80. package/global-skills/cli-builder/SKILL.md +0 -344
  81. package/global-skills/cli-builder/references/advanced-patterns.md +0 -244
  82. package/global-skills/learning-systems/SKILL.md +0 -644
  83. package/global-skills/skill-creator/LICENSE.txt +0 -202
  84. package/global-skills/skill-creator/SKILL.md +0 -352
  85. package/global-skills/skill-creator/references/output-patterns.md +0 -82
  86. package/global-skills/skill-creator/references/workflows.md +0 -28
  87. package/global-skills/swarm-coordination/SKILL.md +0 -995
  88. package/global-skills/swarm-coordination/references/coordinator-patterns.md +0 -235
  89. package/global-skills/swarm-coordination/references/strategies.md +0 -138
  90. package/global-skills/system-design/SKILL.md +0 -213
  91. package/global-skills/testing-patterns/SKILL.md +0 -430
  92. package/global-skills/testing-patterns/references/dependency-breaking-catalog.md +0 -586
  93. package/opencode-swarm-plugin-0.30.7.tgz +0 -0
  94. package/opencode-swarm-plugin-0.31.0.tgz +0 -0
  95. package/scripts/cleanup-test-memories.ts +0 -346
  96. package/scripts/init-skill.ts +0 -222
  97. package/scripts/migrate-unknown-sessions.ts +0 -349
  98. package/scripts/validate-skill.ts +0 -204
  99. package/src/agent-mail.ts +0 -1724
  100. package/src/anti-patterns.test.ts +0 -1167
  101. package/src/anti-patterns.ts +0 -448
  102. package/src/compaction-capture.integration.test.ts +0 -257
  103. package/src/compaction-hook.test.ts +0 -838
  104. package/src/compaction-hook.ts +0 -1204
  105. package/src/compaction-observability.integration.test.ts +0 -139
  106. package/src/compaction-observability.test.ts +0 -187
  107. package/src/compaction-observability.ts +0 -324
  108. package/src/compaction-prompt-scorers.test.ts +0 -475
  109. package/src/compaction-prompt-scoring.ts +0 -300
  110. package/src/contributor-tools.test.ts +0 -133
  111. package/src/contributor-tools.ts +0 -201
  112. package/src/dashboard.test.ts +0 -611
  113. package/src/dashboard.ts +0 -462
  114. package/src/error-enrichment.test.ts +0 -403
  115. package/src/error-enrichment.ts +0 -219
  116. package/src/eval-capture.test.ts +0 -1015
  117. package/src/eval-capture.ts +0 -929
  118. package/src/eval-gates.test.ts +0 -306
  119. package/src/eval-gates.ts +0 -218
  120. package/src/eval-history.test.ts +0 -508
  121. package/src/eval-history.ts +0 -214
  122. package/src/eval-learning.test.ts +0 -378
  123. package/src/eval-learning.ts +0 -360
  124. package/src/eval-runner.test.ts +0 -223
  125. package/src/eval-runner.ts +0 -402
  126. package/src/export-tools.test.ts +0 -476
  127. package/src/export-tools.ts +0 -257
  128. package/src/hive.integration.test.ts +0 -2241
  129. package/src/hive.ts +0 -1628
  130. package/src/index.ts +0 -940
  131. package/src/learning.integration.test.ts +0 -1815
  132. package/src/learning.ts +0 -1079
  133. package/src/logger.test.ts +0 -189
  134. package/src/logger.ts +0 -135
  135. package/src/mandate-promotion.test.ts +0 -473
  136. package/src/mandate-promotion.ts +0 -239
  137. package/src/mandate-storage.integration.test.ts +0 -601
  138. package/src/mandate-storage.test.ts +0 -578
  139. package/src/mandate-storage.ts +0 -794
  140. package/src/mandates.ts +0 -540
  141. package/src/memory-tools.test.ts +0 -195
  142. package/src/memory-tools.ts +0 -344
  143. package/src/memory.integration.test.ts +0 -334
  144. package/src/memory.test.ts +0 -158
  145. package/src/memory.ts +0 -527
  146. package/src/model-selection.test.ts +0 -188
  147. package/src/model-selection.ts +0 -68
  148. package/src/observability-tools.test.ts +0 -359
  149. package/src/observability-tools.ts +0 -871
  150. package/src/output-guardrails.test.ts +0 -438
  151. package/src/output-guardrails.ts +0 -381
  152. package/src/pattern-maturity.test.ts +0 -1160
  153. package/src/pattern-maturity.ts +0 -525
  154. package/src/planning-guardrails.test.ts +0 -491
  155. package/src/planning-guardrails.ts +0 -438
  156. package/src/plugin.ts +0 -23
  157. package/src/post-compaction-tracker.test.ts +0 -251
  158. package/src/post-compaction-tracker.ts +0 -237
  159. package/src/query-tools.test.ts +0 -636
  160. package/src/query-tools.ts +0 -324
  161. package/src/rate-limiter.integration.test.ts +0 -466
  162. package/src/rate-limiter.ts +0 -774
  163. package/src/replay-tools.test.ts +0 -496
  164. package/src/replay-tools.ts +0 -240
  165. package/src/repo-crawl.integration.test.ts +0 -441
  166. package/src/repo-crawl.ts +0 -610
  167. package/src/schemas/cell-events.test.ts +0 -347
  168. package/src/schemas/cell-events.ts +0 -807
  169. package/src/schemas/cell.ts +0 -257
  170. package/src/schemas/evaluation.ts +0 -166
  171. package/src/schemas/index.test.ts +0 -199
  172. package/src/schemas/index.ts +0 -286
  173. package/src/schemas/mandate.ts +0 -232
  174. package/src/schemas/swarm-context.ts +0 -115
  175. package/src/schemas/task.ts +0 -161
  176. package/src/schemas/worker-handoff.test.ts +0 -302
  177. package/src/schemas/worker-handoff.ts +0 -131
  178. package/src/sessions/agent-discovery.test.ts +0 -137
  179. package/src/sessions/agent-discovery.ts +0 -112
  180. package/src/sessions/index.ts +0 -15
  181. package/src/skills.integration.test.ts +0 -1192
  182. package/src/skills.test.ts +0 -643
  183. package/src/skills.ts +0 -1549
  184. package/src/storage.integration.test.ts +0 -341
  185. package/src/storage.ts +0 -884
  186. package/src/structured.integration.test.ts +0 -817
  187. package/src/structured.test.ts +0 -1046
  188. package/src/structured.ts +0 -762
  189. package/src/swarm-decompose.test.ts +0 -188
  190. package/src/swarm-decompose.ts +0 -1302
  191. package/src/swarm-deferred.integration.test.ts +0 -157
  192. package/src/swarm-deferred.test.ts +0 -38
  193. package/src/swarm-insights.test.ts +0 -214
  194. package/src/swarm-insights.ts +0 -459
  195. package/src/swarm-mail.integration.test.ts +0 -970
  196. package/src/swarm-mail.ts +0 -739
  197. package/src/swarm-orchestrate.integration.test.ts +0 -282
  198. package/src/swarm-orchestrate.test.ts +0 -548
  199. package/src/swarm-orchestrate.ts +0 -3084
  200. package/src/swarm-prompts.test.ts +0 -1270
  201. package/src/swarm-prompts.ts +0 -2077
  202. package/src/swarm-research.integration.test.ts +0 -701
  203. package/src/swarm-research.test.ts +0 -698
  204. package/src/swarm-research.ts +0 -472
  205. package/src/swarm-review.integration.test.ts +0 -285
  206. package/src/swarm-review.test.ts +0 -879
  207. package/src/swarm-review.ts +0 -709
  208. package/src/swarm-strategies.ts +0 -407
  209. package/src/swarm-worktree.test.ts +0 -501
  210. package/src/swarm-worktree.ts +0 -575
  211. package/src/swarm.integration.test.ts +0 -2377
  212. package/src/swarm.ts +0 -38
  213. package/src/tool-adapter.integration.test.ts +0 -1221
  214. package/src/tool-availability.ts +0 -461
  215. package/tsconfig.json +0 -28
@@ -1,1204 +0,0 @@
1
- /**
2
- * Swarm-Aware Compaction Hook
3
- *
4
- * Provides context preservation during OpenCode session compaction.
5
- * When context is compacted, this hook injects instructions for the summarizer
6
- * to preserve swarm coordination state and enable seamless resumption.
7
- *
8
- * ## Philosophy: Err on the Side of Continuation
9
- *
10
- * It's better to inject swarm context unnecessarily than to lose an active swarm.
11
- * The cost of a false positive (extra context) is low.
12
- * The cost of a false negative (lost swarm) is high - wasted work, confused agents.
13
- *
14
- * Hook signature (from @opencode-ai/plugin):
15
- * ```typescript
16
- * "experimental.session.compacting"?: (
17
- * input: { sessionID: string },
18
- * output: { context: string[] }
19
- * ) => Promise<void>
20
- * ```
21
- *
22
- * @example
23
- * ```typescript
24
- * import { SWARM_COMPACTION_CONTEXT, createCompactionHook } from "opencode-swarm-plugin";
25
- *
26
- * const hooks: Hooks = {
27
- * "experimental.session.compacting": createCompactionHook(),
28
- * };
29
- * ```
30
- */
31
-
32
- import { checkSwarmHealth } from "swarm-mail";
33
- import {
34
- CompactionPhase,
35
- createMetricsCollector,
36
- getMetricsSummary,
37
- recordPatternExtracted,
38
- recordPatternSkipped,
39
- recordPhaseComplete,
40
- recordPhaseStart,
41
- } from "./compaction-observability";
42
- import { getHiveAdapter, getHiveWorkingDirectory } from "./hive";
43
- import { createChildLogger } from "./logger";
44
-
45
- let _logger: any | undefined;
46
-
47
- /**
48
- * Get logger instance (lazy initialization for testability)
49
- *
50
- * Logs to: ~/.config/swarm-tools/logs/compaction.1log
51
- *
52
- * Log structure:
53
- * - START: session_id, trigger
54
- * - GATHER: source (swarm-mail|hive), duration_ms, stats/counts
55
- * - DETECT: confidence, detected, reason_count, reasons
56
- * - INJECT: confidence, context_length, context_type (full|fallback|none)
57
- * - COMPLETE: duration_ms, success, detected, confidence, context_injected
58
- */
59
- function getLog() {
60
- if (!_logger) {
61
- _logger = createChildLogger("compaction");
62
- }
63
- return _logger;
64
- }
65
-
66
- // ============================================================================
67
- // Compaction Context
68
- // ============================================================================
69
-
70
- /**
71
- * Swarm-aware compaction context
72
- *
73
- * Injected during compaction to keep the swarm cooking. The coordinator should
74
- * wake up from compaction and immediately resume orchestration - spawning agents,
75
- * monitoring progress, unblocking work.
76
- *
77
- * This is NOT about preserving state for a human - it's about the swarm continuing
78
- * autonomously after context compression.
79
- *
80
- * Structure optimized for eval scores:
81
- * 1. ASCII header (visual anchor, coordinatorIdentity scorer)
82
- * 2. What Good Looks Like (behavioral examples, outcome-focused)
83
- * 3. Immediate actions (actionable tool calls, postCompactionDiscipline scorer)
84
- * 4. Forbidden tools (explicit list, forbiddenToolsPresent scorer)
85
- * 5. Mandatory behaviors (inbox, skills, review)
86
- * 6. Role & mandates (strong language, coordinatorIdentity scorer)
87
- * 7. Reference sections (supporting material)
88
- */
89
- export const SWARM_COMPACTION_CONTEXT = `
90
- ┌─────────────────────────────────────────────────────────────┐
91
- │ │
92
- │ 🐝 YOU ARE THE COORDINATOR 🐝 │
93
- │ │
94
- │ NOT A WORKER. NOT AN IMPLEMENTER. │
95
- │ YOU ORCHESTRATE. │
96
- │ │
97
- └─────────────────────────────────────────────────────────────┘
98
-
99
- Context was compacted but the swarm is still running. **YOU ARE THE COORDINATOR.**
100
-
101
- Your role is ORCHESTRATION, not implementation. The resume steps above (if present) tell you exactly what to do first.
102
-
103
- ---
104
-
105
- ## 🎯 WHAT GOOD LOOKS LIKE (Behavioral Examples)
106
-
107
- **✅ GOOD Coordinator Behavior:**
108
- - Spawned researcher for unfamiliar tech → got summary → stored in semantic-memory
109
- - Loaded \`skills_use(name="testing-patterns")\` BEFORE spawning test workers
110
- - Checked \`swarmmail_inbox()\` every 5-10 minutes → caught blocked worker → unblocked in 2min
111
- - Delegated planning to swarm/planner subagent → main context stayed clean
112
- - Workers reserved their OWN files → no conflicts
113
- - Reviewed all worker output with \`swarm_review\` → caught integration issue before merge
114
-
115
- **❌ COMMON MISTAKES (Avoid These):**
116
- - Called context7/pdf-brain directly → dumped 50KB into thread → context exhaustion
117
- - Skipped skill loading → workers reinvented patterns already in skills
118
- - Never checked inbox → worker stuck 25 minutes → silent failure
119
- - Reserved files as coordinator → workers blocked → swarm stalled
120
- - Closed cells when workers said "done" → skipped review → shipped broken code
121
-
122
- ---
123
-
124
- ## 🚫 FORBIDDEN TOOLS (NEVER Use These Directly)
125
-
126
- Coordinators do NOT do implementation work. These tools are **FORBIDDEN**:
127
-
128
- ### File Modification (ALWAYS spawn workers instead)
129
- - \`Edit\` - SPAWN A WORKER
130
- - \`Write\` - SPAWN A WORKER
131
- - \`bash\` (for file modifications) - SPAWN A WORKER
132
- - \`swarmmail_reserve\` - Workers reserve their own files
133
- - \`git commit\` - Workers commit their own changes
134
-
135
- ### External Data Fetching (SPAWN A RESEARCHER instead)
136
-
137
- **Repository fetching:**
138
- - \`repo-crawl_file\`, \`repo-crawl_readme\`, \`repo-crawl_search\`, \`repo-crawl_structure\`, \`repo-crawl_tree\`
139
- - \`repo-autopsy_*\` (all repo-autopsy tools)
140
-
141
- **Web/documentation fetching:**
142
- - \`webfetch\`, \`fetch_fetch\`
143
- - \`context7_resolve-library-id\`, \`context7_get-library-docs\`
144
-
145
- **Knowledge base:**
146
- - \`pdf-brain_search\`, \`pdf-brain_read\`
147
-
148
- **Instead:** Use \`swarm_spawn_researcher\` with a clear research task. The researcher will fetch, summarize, and return findings.
149
-
150
- ---
151
-
152
- ## 💼 YOUR ROLE (Non-Negotiable)
153
-
154
- You are the **COORDINATOR**. Your job is ORCHESTRATION, not implementation.
155
-
156
- ### What Coordinators Do:
157
- - ✅ Spawn workers for implementation tasks
158
- - ✅ Monitor worker progress via \`swarm_status\` and \`swarmmail_inbox\`
159
- - ✅ Review completed work with \`swarm_review\`
160
- - ✅ Unblock dependencies and resolve conflicts
161
- - ✅ Close the loop when epics complete
162
-
163
- ### What Coordinators NEVER Do:
164
- - ❌ **NEVER** edit or write files directly
165
- - ❌ **NEVER** run tests with \`bash\`
166
- - ❌ **NEVER** "just do it myself to save time"
167
- - ❌ **NEVER** reserve files (workers reserve)
168
- - ❌ **NEVER** fetch external data directly (spawn researchers)
169
-
170
- **If you catch yourself about to edit a file, STOP. Use \`swarm_spawn_subtask\` instead.**
171
-
172
- ### Strong Mandates:
173
- - **ALWAYS** spawn workers for implementation tasks
174
- - **ALWAYS** check status and inbox before decisions
175
- - **ALWAYS** review worker output before accepting
176
- - **NON-NEGOTIABLE:** You orchestrate. You do NOT implement.
177
-
178
- ---
179
-
180
- ## 📋 MANDATORY BEHAVIORS (Post-Compaction Checklist)
181
-
182
- ### 1. Inbox Monitoring (EVERY 5-10 MINUTES)
183
- \`\`\`
184
- swarmmail_inbox(limit=5) # Check for messages
185
- swarmmail_read_message(message_id=N) # Read urgent ones
186
- swarm_status(epic_id, project_key) # Overall progress
187
- \`\`\`
188
- **Intervention triggers:** Worker blocked >5min, file conflict, scope creep
189
-
190
- ### 2. Skill Loading (BEFORE spawning workers)
191
- \`\`\`
192
- skills_use(name="swarm-coordination") # ALWAYS for swarms
193
- skills_use(name="testing-patterns") # If task involves tests
194
- skills_use(name="system-design") # If architectural decisions
195
- \`\`\`
196
- **Include skill recommendations in shared_context for workers.**
197
-
198
- ### 3. Worker Review (AFTER EVERY worker returns)
199
- \`\`\`
200
- swarm_review(project_key, epic_id, task_id, files_touched)
201
- # Evaluate: Does it fulfill requirements? Enable downstream tasks? Type safe?
202
- swarm_review_feedback(project_key, task_id, worker_id, status, issues)
203
- \`\`\`
204
- **3-Strike Rule:** After 3 rejections → mark blocked → escalate to human.
205
-
206
- ### 4. Research Spawning (For unfamiliar tech)
207
- \`\`\`
208
- Task(subagent_type="swarm-researcher", prompt="Research <topic>...")
209
- \`\`\`
210
- **NEVER call context7, pdf-brain, webfetch directly.** Spawn a researcher.
211
-
212
- ---
213
-
214
- ## 📝 SUMMARY FORMAT (Preserve This State)
215
-
216
- When compaction occurs, extract and preserve this structure:
217
-
218
- \`\`\`
219
- ## 🐝 Swarm State
220
-
221
- **Epic:** CELL_ID - TITLE
222
- **Project:** PROJECT_PATH
223
- **Progress:** X/Y subtasks complete
224
-
225
- **Active:**
226
- - CELL_ID: TITLE [in_progress] → AGENT working on FILES
227
-
228
- **Blocked:**
229
- - CELL_ID: TITLE - BLOCKED: REASON
230
-
231
- **Completed:**
232
- - CELL_ID: TITLE ✓
233
-
234
- **Ready to Spawn:**
235
- - CELL_ID: TITLE (files: FILES)
236
- \`\`\`
237
-
238
- ### What to Extract:
239
- 1. **Epic & Subtasks** - IDs, titles, status, file assignments
240
- 2. **What's Running** - Active agents and their current work
241
- 3. **What's Blocked** - Blockers and what's needed to unblock
242
- 4. **What's Done** - Completed work and follow-ups
243
- 5. **What's Next** - Pending subtasks ready to spawn
244
-
245
- ---
246
-
247
- ## 📋 REFERENCE: Full Coordinator Workflow
248
-
249
- You are ALWAYS swarming. Use this workflow for any new work:
250
-
251
- ### Phase 1.5: Research (For Complex Tasks)
252
-
253
- If the task requires unfamiliar technologies, spawn a researcher FIRST:
254
-
255
- \`\`\`
256
- swarm_spawn_researcher(
257
- research_id="research-TOPIC",
258
- epic_id="mjkw...", # your epic ID
259
- tech_stack=["TECHNOLOGY"],
260
- project_path="PROJECT_PATH"
261
- )
262
- // Then spawn with Task(subagent_type="swarm/researcher", prompt="...")
263
- \`\`\`
264
-
265
- ### Phase 2: Knowledge Gathering
266
-
267
- \`\`\`
268
- semantic-memory_find(query="TASK_KEYWORDS", limit=5) # Past learnings
269
- cass_search(query="TASK_DESCRIPTION", limit=5) # Similar past tasks
270
- skills_list() # Available skills
271
- \`\`\`
272
-
273
- ### Phase 3: Decompose
274
-
275
- \`\`\`
276
- swarm_select_strategy(task="TASK")
277
- swarm_plan_prompt(task="TASK", context="KNOWLEDGE")
278
- swarm_validate_decomposition(response="CELLTREE_JSON")
279
- \`\`\`
280
-
281
- ### Phase 4: Create Cells
282
-
283
- \`hive_create_epic(epic_title="TASK", subtasks=[...])\`
284
-
285
- ### Phase 5: File Reservations
286
-
287
- > **⚠️ Coordinator NEVER reserves files.** Workers reserve their own files with \`swarmmail_reserve\`.
288
-
289
- ### Phase 6: Spawn Workers
290
-
291
- \`\`\`
292
- swarm_spawn_subtask(bead_id, epic_id, title, files, shared_context, project_path)
293
- Task(subagent_type="swarm/worker", prompt="GENERATED_PROMPT")
294
- \`\`\`
295
-
296
- ### Phase 7: Review Loop (MANDATORY)
297
-
298
- **AFTER EVERY Task() RETURNS:**
299
-
300
- 1. \`swarmmail_inbox()\` - Check for messages
301
- 2. \`swarm_review(project_key, epic_id, task_id, files_touched)\` - Generate review
302
- 3. Evaluate against epic goals
303
- 4. \`swarm_review_feedback(project_key, task_id, worker_id, status, issues)\`
304
-
305
- **If needs_changes:**
306
- \`\`\`
307
- swarm_spawn_retry(bead_id, epic_id, original_prompt, attempt, issues, diff, files, project_path)
308
- // Spawn NEW worker with Task() using retry prompt
309
- // Max 3 attempts before marking task blocked
310
- \`\`\`
311
-
312
- ### Phase 8: Complete
313
-
314
- \`hive_sync()\` - Sync all cells to git
315
-
316
- ---
317
-
318
- ## 📊 REFERENCE: Decomposition Strategies
319
-
320
- | Strategy | Best For | Keywords |
321
- | -------------- | ------------------------ | -------------------------------------- |
322
- | file-based | Refactoring, migrations | refactor, migrate, rename, update all |
323
- | feature-based | New features | add, implement, build, create, feature |
324
- | risk-based | Bug fixes, security | fix, bug, security, critical, urgent |
325
-
326
- ---
327
-
328
- **You are the COORDINATOR. You orchestrate. You do NOT implement. Spawn workers.**
329
- `;
330
-
331
- /**
332
- * Fallback detection prompt - tells the compactor what to look for
333
- *
334
- * Used when we can't definitively detect a swarm but want to be safe.
335
- * The compactor can check the conversation context for these patterns.
336
- */
337
- export const SWARM_DETECTION_FALLBACK = `## 🐝 Swarm Detection - Check Your Context
338
-
339
- **IMPORTANT:** Before summarizing, check if this session involves an active swarm.
340
-
341
- Look for ANY of these patterns in the conversation:
342
-
343
- ### Tool Calls (definite swarm sign)
344
- - \`swarm_decompose\`, \`swarm_spawn_subtask\`, \`swarm_status\`, \`swarm_complete\`
345
- - \`swarmmail_init\`, \`swarmmail_reserve\`, \`swarmmail_send\`
346
- - \`hive_create_epic\`, \`hive_start\`, \`hive_close\`
347
-
348
- ### IDs and Names
349
- - Cell IDs: \`bd-xxx\`, \`bd-xxx.N\` (subtask format)
350
- - Agent names: BlueLake, RedMountain, GreenValley, etc.
351
- - Epic references: "epic", "subtask", "parent"
352
-
353
- ### Coordination Language
354
- - "spawn", "worker", "coordinator"
355
- - "reserve", "reservation", "files"
356
- - "blocked", "unblock", "dependency"
357
- - "progress", "complete", "in_progress"
358
-
359
- ### If You Find Swarm Evidence
360
-
361
- Include this in your summary:
362
- 1. Epic ID and title
363
- 2. Project path
364
- 3. Subtask status (running/blocked/done/pending)
365
- 4. Any blockers or issues
366
- 5. What should happen next
367
-
368
- **Then tell the resumed session:**
369
- "This is an active swarm. Check swarm_status and swarmmail_inbox immediately."
370
- `;
371
-
372
- // ============================================================================
373
- // Dynamic Context Building
374
- // ============================================================================
375
-
376
- /**
377
- * Build dynamic swarm state section from detected state
378
- *
379
- * This injects SPECIFIC values instead of placeholders, making the context
380
- * immediately actionable on resume.
381
- */
382
- function buildDynamicSwarmState(state: SwarmState): string {
383
- const parts: string[] = [];
384
-
385
- // Lead with epic context
386
- if (state.epicId && state.epicTitle) {
387
- parts.push(`You are coordinating epic **${state.epicId}** - ${state.epicTitle}`);
388
- } else if (state.epicId) {
389
- parts.push(`You are coordinating epic **${state.epicId}**`);
390
- }
391
-
392
- parts.push(`Project: ${state.projectPath}\n`);
393
-
394
- // IMMEDIATE ACTIONS section (must come FIRST for postCompactionDiscipline scoring)
395
- if (state.epicId) {
396
- parts.push(`## 1️⃣ IMMEDIATE ACTIONS (Do These FIRST)\n`);
397
- parts.push(`1. \`swarm_status(epic_id="${state.epicId}", project_key="${state.projectPath}")\` - Get current swarm state`);
398
- parts.push(`2. \`swarmmail_inbox(limit=5)\` - Check for worker messages and blockers`);
399
- parts.push(`3. For completed work: Review with \`swarm_review\` → \`swarm_review_feedback\``);
400
- parts.push(`4. For open subtasks: Spawn workers with \`swarm_spawn_subtask\``);
401
- parts.push(`5. For blocked work: Investigate, unblock, or reassign\n`);
402
- }
403
-
404
- // Swarm state summary
405
- parts.push(`## 🐝 Current Swarm State\n`);
406
-
407
- if (state.epicId && state.epicTitle) {
408
- parts.push(`**Epic:** ${state.epicId} - ${state.epicTitle}`);
409
-
410
- const totalSubtasks = state.subtasks.closed + state.subtasks.in_progress +
411
- state.subtasks.open + state.subtasks.blocked;
412
-
413
- if (totalSubtasks > 0) {
414
- parts.push(`**Subtasks:**`);
415
- if (state.subtasks.closed > 0) parts.push(` - ${state.subtasks.closed} closed`);
416
- if (state.subtasks.in_progress > 0) parts.push(` - ${state.subtasks.in_progress} in_progress`);
417
- if (state.subtasks.open > 0) parts.push(` - ${state.subtasks.open} open`);
418
- if (state.subtasks.blocked > 0) parts.push(` - ${state.subtasks.blocked} blocked`);
419
- }
420
- }
421
-
422
- parts.push(`**Project:** ${state.projectPath}\n`);
423
-
424
- return parts.join("\n");
425
- }
426
-
427
- // ============================================================================
428
- // SDK Message Scanning
429
- // ============================================================================
430
-
431
- /**
432
- * Tool part with completed state containing input/output
433
- */
434
- interface ToolPart {
435
- id: string;
436
- sessionID: string;
437
- messageID: string;
438
- type: "tool";
439
- callID: string;
440
- tool: string;
441
- state: ToolState;
442
- }
443
-
444
- /**
445
- * Tool state (completed tools have input/output we need)
446
- */
447
- type ToolState =
448
- | {
449
- status: "completed";
450
- input: { [key: string]: unknown };
451
- output: string;
452
- title: string;
453
- metadata: { [key: string]: unknown };
454
- time: { start: number; end: number };
455
- }
456
- | {
457
- status: string;
458
- [key: string]: unknown;
459
- };
460
-
461
- /**
462
- * SDK Client type (minimal interface for scanSessionMessages)
463
- *
464
- * The actual SDK client uses a more complex Options-based API:
465
- * client.session.messages({ path: { id: sessionID }, query: { limit } })
466
- *
467
- * We accept `unknown` and handle the type internally to avoid
468
- * tight coupling to SDK internals.
469
- */
470
- export type OpencodeClient = unknown;
471
-
472
- /**
473
- * Scanned swarm state extracted from session messages
474
- */
475
- export interface ScannedSwarmState {
476
- epicId?: string;
477
- epicTitle?: string;
478
- projectPath?: string;
479
- agentName?: string;
480
- subtasks: Map<
481
- string,
482
- { title: string; status: string; worker?: string; files?: string[] }
483
- >;
484
- lastAction?: { tool: string; args: unknown; timestamp: number };
485
- }
486
-
487
- /**
488
- * Scan session messages for swarm state using SDK client
489
- *
490
- * Extracts swarm coordination state from actual tool calls:
491
- * - swarm_spawn_subtask → subtask tracking
492
- * - swarmmail_init → agent name, project path
493
- * - hive_create_epic → epic ID and title
494
- * - swarm_status → epic reference
495
- * - swarm_complete → subtask completion
496
- *
497
- * @param client - OpenCode SDK client (undefined if not available)
498
- * @param sessionID - Session to scan
499
- * @param limit - Max messages to fetch (default 100)
500
- * @returns Extracted swarm state
501
- */
502
- export async function scanSessionMessages(
503
- client: OpencodeClient,
504
- sessionID: string,
505
- limit: number = 100,
506
- ): Promise<ScannedSwarmState> {
507
- const state: ScannedSwarmState = {
508
- subtasks: new Map(),
509
- };
510
-
511
- if (!client) {
512
- return state;
513
- }
514
-
515
- try {
516
- // SDK client uses Options-based API: { path: { id }, query: { limit } }
517
- const sdkClient = client as {
518
- session: {
519
- messages: (opts: {
520
- path: { id: string };
521
- query?: { limit?: number };
522
- }) => Promise<{ data?: Array<{ info: unknown; parts: ToolPart[] }> }>;
523
- };
524
- };
525
-
526
- const response = await sdkClient.session.messages({
527
- path: { id: sessionID },
528
- query: { limit },
529
- });
530
-
531
- const messages = response.data || [];
532
-
533
- for (const message of messages) {
534
- for (const part of message.parts) {
535
- if (part.type !== "tool" || part.state.status !== "completed") {
536
- continue;
537
- }
538
-
539
- const { tool, state: toolState } = part;
540
- const { input, output, time } = toolState as Extract<
541
- ToolState,
542
- { status: "completed" }
543
- >;
544
-
545
- // Track last action
546
- state.lastAction = {
547
- tool,
548
- args: input,
549
- timestamp: time.end,
550
- };
551
-
552
- // Extract swarm state based on tool type
553
- switch (tool) {
554
- case "hive_create_epic": {
555
- try {
556
- const parsed = JSON.parse(output);
557
- if (parsed.epic?.id) {
558
- state.epicId = parsed.epic.id;
559
- }
560
- if (input.epic_title && typeof input.epic_title === "string") {
561
- state.epicTitle = input.epic_title;
562
- }
563
- } catch {
564
- // Invalid JSON, skip
565
- }
566
- break;
567
- }
568
-
569
- case "swarmmail_init": {
570
- try {
571
- const parsed = JSON.parse(output);
572
- if (parsed.agent_name) {
573
- state.agentName = parsed.agent_name;
574
- }
575
- if (parsed.project_key) {
576
- state.projectPath = parsed.project_key;
577
- }
578
- } catch {
579
- // Invalid JSON, skip
580
- }
581
- break;
582
- }
583
-
584
- case "swarm_spawn_subtask": {
585
- const beadId = input.bead_id as string | undefined;
586
- const epicId = input.epic_id as string | undefined;
587
- const title = input.subtask_title as string | undefined;
588
- const files = input.files as string[] | undefined;
589
-
590
- if (beadId && title) {
591
- let worker: string | undefined;
592
- try {
593
- const parsed = JSON.parse(output);
594
- worker = parsed.worker;
595
- } catch {
596
- // No worker in output
597
- }
598
-
599
- state.subtasks.set(beadId, {
600
- title,
601
- status: "spawned",
602
- worker,
603
- files,
604
- });
605
-
606
- if (epicId && !state.epicId) {
607
- state.epicId = epicId;
608
- }
609
- }
610
- break;
611
- }
612
-
613
- case "swarm_complete": {
614
- const beadId = input.bead_id as string | undefined;
615
- if (beadId && state.subtasks.has(beadId)) {
616
- const existing = state.subtasks.get(beadId)!;
617
- state.subtasks.set(beadId, {
618
- ...existing,
619
- status: "completed",
620
- });
621
- }
622
- break;
623
- }
624
-
625
- case "swarm_status": {
626
- const epicId = input.epic_id as string | undefined;
627
- if (epicId && !state.epicId) {
628
- state.epicId = epicId;
629
- }
630
- const projectKey = input.project_key as string | undefined;
631
- if (projectKey && !state.projectPath) {
632
- state.projectPath = projectKey;
633
- }
634
- break;
635
- }
636
- }
637
- }
638
- }
639
- } catch (error) {
640
- getLog().debug(
641
- {
642
- error: error instanceof Error ? error.message : String(error),
643
- },
644
- "SDK message scanning failed",
645
- );
646
- // SDK not available or error fetching messages - return what we have
647
- }
648
-
649
- return state;
650
- }
651
-
652
- /**
653
- * Build dynamic swarm state from scanned messages (more precise than hive detection)
654
- */
655
- function buildDynamicSwarmStateFromScanned(
656
- scanned: ScannedSwarmState,
657
- detected: SwarmState,
658
- ): string {
659
- const parts: string[] = [];
660
-
661
- // Prefer scanned data over detected
662
- const epicId = scanned.epicId || detected.epicId;
663
- const epicTitle = scanned.epicTitle || detected.epicTitle;
664
- const projectPath = scanned.projectPath || detected.projectPath;
665
-
666
- // Lead with epic context
667
- if (epicId && epicTitle) {
668
- parts.push(`You are coordinating epic **${epicId}** - ${epicTitle}`);
669
- } else if (epicId) {
670
- parts.push(`You are coordinating epic **${epicId}**`);
671
- }
672
-
673
- if (scanned.agentName) {
674
- parts.push(`Coordinator: ${scanned.agentName}`);
675
- }
676
-
677
- parts.push(`Project: ${projectPath}\n`);
678
-
679
- // IMMEDIATE ACTIONS section (must come FIRST for postCompactionDiscipline scoring)
680
- if (epicId) {
681
- parts.push(`## 1️⃣ IMMEDIATE ACTIONS (Do These FIRST)\n`);
682
- parts.push(
683
- `1. \`swarm_status(epic_id="${epicId}", project_key="${projectPath}")\` - Get current swarm state`,
684
- );
685
- parts.push(`2. \`swarmmail_inbox(limit=5)\` - Check for worker messages and blockers`);
686
- parts.push(
687
- `3. For completed work: Review with \`swarm_review\` → \`swarm_review_feedback\``,
688
- );
689
- parts.push(`4. For open subtasks: Spawn workers with \`swarm_spawn_subtask\``);
690
- parts.push(`5. For blocked work: Investigate, unblock, or reassign\n`);
691
- }
692
-
693
- // Swarm state summary
694
- parts.push(`## 🐝 Current Swarm State\n`);
695
-
696
- if (epicId) {
697
- parts.push(`**Epic:** ${epicId}${epicTitle ? ` - ${epicTitle}` : ""}`);
698
- }
699
-
700
- // Show detailed subtask info from scanned state
701
- if (scanned.subtasks.size > 0) {
702
- parts.push(`\n**Subtasks:**`);
703
- for (const [id, subtask] of scanned.subtasks) {
704
- const status = subtask.status === "completed" ? "✓" : `[${subtask.status}]`;
705
- const worker = subtask.worker ? ` → ${subtask.worker}` : "";
706
- const files = subtask.files?.length ? ` (${subtask.files.join(", ")})` : "";
707
- parts.push(` - ${id}: ${subtask.title} ${status}${worker}${files}`);
708
- }
709
- } else if (detected.subtasks) {
710
- // Fall back to counts from hive detection
711
- const total =
712
- detected.subtasks.closed +
713
- detected.subtasks.in_progress +
714
- detected.subtasks.open +
715
- detected.subtasks.blocked;
716
-
717
- if (total > 0) {
718
- parts.push(`\n**Subtasks:**`);
719
- if (detected.subtasks.closed > 0)
720
- parts.push(` - ${detected.subtasks.closed} closed`);
721
- if (detected.subtasks.in_progress > 0)
722
- parts.push(` - ${detected.subtasks.in_progress} in_progress`);
723
- if (detected.subtasks.open > 0)
724
- parts.push(` - ${detected.subtasks.open} open`);
725
- if (detected.subtasks.blocked > 0)
726
- parts.push(` - ${detected.subtasks.blocked} blocked`);
727
- }
728
- }
729
-
730
- parts.push(`\n**Project:** ${projectPath}`);
731
-
732
- // Show last action if available
733
- if (scanned.lastAction) {
734
- parts.push(`**Last Action:** \`${scanned.lastAction.tool}\``);
735
- }
736
-
737
- return parts.join("\n");
738
- }
739
-
740
- // ============================================================================
741
- // Swarm Detection
742
- // ============================================================================
743
-
744
- /**
745
- * Detection result with confidence level
746
- */
747
- interface SwarmDetection {
748
- detected: boolean;
749
- confidence: "high" | "medium" | "low" | "none";
750
- reasons: string[];
751
- /** Specific swarm state data for context injection */
752
- state?: SwarmState;
753
- }
754
-
755
- /**
756
- * Specific swarm state captured during detection
757
- */
758
- interface SwarmState {
759
- epicId?: string;
760
- epicTitle?: string;
761
- projectPath: string;
762
- subtasks: {
763
- closed: number;
764
- in_progress: number;
765
- open: number;
766
- blocked: number;
767
- };
768
- }
769
-
770
- /**
771
- * Check for swarm sign - evidence a swarm passed through
772
- *
773
- * Uses multiple signals with different confidence levels:
774
- * - HIGH: Active reservations, in_progress cells
775
- * - MEDIUM: Open subtasks, unclosed epics, recent activity
776
- * - LOW: Any cells exist, swarm-mail initialized
777
- *
778
- * Philosophy: Err on the side of continuation.
779
- */
780
- async function detectSwarm(): Promise<SwarmDetection> {
781
- const reasons: string[] = [];
782
- let highConfidence = false;
783
- let mediumConfidence = false;
784
- let lowConfidence = false;
785
- let state: SwarmState | undefined;
786
-
787
- try {
788
- const projectKey = getHiveWorkingDirectory();
789
-
790
- // Initialize state with project path
791
- state = {
792
- projectPath: projectKey,
793
- subtasks: {
794
- closed: 0,
795
- in_progress: 0,
796
- open: 0,
797
- blocked: 0,
798
- },
799
- };
800
-
801
- // Check 1: Active reservations in swarm-mail (HIGH confidence)
802
- const swarmMailStart = Date.now();
803
- try {
804
- const health = await checkSwarmHealth(projectKey);
805
- const duration = Date.now() - swarmMailStart;
806
-
807
- getLog().debug(
808
- {
809
- source: "swarm-mail",
810
- duration_ms: duration,
811
- healthy: health.healthy,
812
- stats: health.stats,
813
- },
814
- "checked swarm-mail health",
815
- );
816
-
817
- if (health.healthy && health.stats) {
818
- if (health.stats.reservations > 0) {
819
- highConfidence = true;
820
- reasons.push(`${health.stats.reservations} active file reservations`);
821
- }
822
- if (health.stats.agents > 0) {
823
- mediumConfidence = true;
824
- reasons.push(`${health.stats.agents} registered agents`);
825
- }
826
- if (health.stats.messages > 0) {
827
- lowConfidence = true;
828
- reasons.push(`${health.stats.messages} swarm messages`);
829
- }
830
- }
831
- } catch (error) {
832
- getLog().debug(
833
- {
834
- source: "swarm-mail",
835
- duration_ms: Date.now() - swarmMailStart,
836
- error: error instanceof Error ? error.message : String(error),
837
- },
838
- "swarm-mail check failed",
839
- );
840
- // Swarm-mail not available, continue with other checks
841
- }
842
-
843
- // Check 2: Hive cells (various confidence levels)
844
- const hiveStart = Date.now();
845
- try {
846
- const adapter = await getHiveAdapter(projectKey);
847
- const cells = await adapter.queryCells(projectKey, {});
848
- const duration = Date.now() - hiveStart;
849
-
850
- if (Array.isArray(cells) && cells.length > 0) {
851
- // HIGH: Any in_progress cells
852
- const inProgress = cells.filter((c) => c.status === "in_progress");
853
- if (inProgress.length > 0) {
854
- highConfidence = true;
855
- reasons.push(`${inProgress.length} cells in_progress`);
856
- }
857
-
858
- // MEDIUM: Open subtasks (cells with parent_id)
859
- const subtasks = cells.filter(
860
- (c) => c.status === "open" && c.parent_id,
861
- );
862
- if (subtasks.length > 0) {
863
- mediumConfidence = true;
864
- reasons.push(`${subtasks.length} open subtasks`);
865
- }
866
-
867
- // MEDIUM: Unclosed epics
868
- const openEpics = cells.filter(
869
- (c) => c.type === "epic" && c.status !== "closed",
870
- );
871
- if (openEpics.length > 0) {
872
- mediumConfidence = true;
873
- reasons.push(`${openEpics.length} unclosed epics`);
874
-
875
- // Capture in_progress epic data for state
876
- const inProgressEpic = openEpics.find((c) => c.status === "in_progress");
877
- if (inProgressEpic && state) {
878
- state.epicId = inProgressEpic.id;
879
- state.epicTitle = inProgressEpic.title;
880
-
881
- // Count subtasks for this epic
882
- const epicSubtasks = cells.filter((c) => c.parent_id === inProgressEpic.id);
883
- state.subtasks.closed = epicSubtasks.filter((c) => c.status === "closed").length;
884
- state.subtasks.in_progress = epicSubtasks.filter((c) => c.status === "in_progress").length;
885
- state.subtasks.open = epicSubtasks.filter((c) => c.status === "open").length;
886
- state.subtasks.blocked = epicSubtasks.filter((c) => c.status === "blocked").length;
887
-
888
- getLog().debug(
889
- {
890
- epic_id: state.epicId,
891
- epic_title: state.epicTitle,
892
- subtasks_closed: state.subtasks.closed,
893
- subtasks_in_progress: state.subtasks.in_progress,
894
- subtasks_open: state.subtasks.open,
895
- subtasks_blocked: state.subtasks.blocked,
896
- },
897
- "captured epic state for context",
898
- );
899
- }
900
- }
901
-
902
- // MEDIUM: Recently updated cells (last hour)
903
- const oneHourAgo = Date.now() - 60 * 60 * 1000;
904
- const recentCells = cells.filter((c) => c.updated_at > oneHourAgo);
905
- if (recentCells.length > 0) {
906
- mediumConfidence = true;
907
- reasons.push(`${recentCells.length} cells updated in last hour`);
908
- }
909
-
910
- // LOW: Any cells exist at all
911
- if (cells.length > 0) {
912
- lowConfidence = true;
913
- reasons.push(`${cells.length} total cells in hive`);
914
- }
915
-
916
- getLog().debug(
917
- {
918
- source: "hive",
919
- duration_ms: duration,
920
- total_cells: cells.length,
921
- in_progress: inProgress.length,
922
- open_subtasks: subtasks.length,
923
- open_epics: openEpics.length,
924
- recent_updates: recentCells.length,
925
- },
926
- "checked hive cells",
927
- );
928
- } else {
929
- getLog().debug(
930
- { source: "hive", duration_ms: duration, total_cells: 0 },
931
- "hive empty",
932
- );
933
- }
934
- } catch (error) {
935
- getLog().debug(
936
- {
937
- source: "hive",
938
- duration_ms: Date.now() - hiveStart,
939
- error: error instanceof Error ? error.message : String(error),
940
- },
941
- "hive check failed",
942
- );
943
- // Hive not available, continue
944
- }
945
- } catch (error) {
946
- // Project detection failed, use fallback
947
- lowConfidence = true;
948
- reasons.push("Could not detect project, using fallback");
949
- getLog().debug(
950
- {
951
- error: error instanceof Error ? error.message : String(error),
952
- },
953
- "project detection failed",
954
- );
955
- }
956
-
957
- // Determine overall confidence
958
- let confidence: "high" | "medium" | "low" | "none";
959
- if (highConfidence) {
960
- confidence = "high";
961
- } else if (mediumConfidence) {
962
- confidence = "medium";
963
- } else if (lowConfidence) {
964
- confidence = "low";
965
- } else {
966
- confidence = "none";
967
- }
968
-
969
- const result = {
970
- detected: confidence !== "none",
971
- confidence,
972
- reasons,
973
- state,
974
- };
975
-
976
- getLog().debug(
977
- {
978
- detected: result.detected,
979
- confidence: result.confidence,
980
- reason_count: result.reasons.length,
981
- reasons: result.reasons,
982
- has_state: !!result.state,
983
- },
984
- "swarm detection complete",
985
- );
986
-
987
- return result;
988
- }
989
-
990
- // ============================================================================
991
- // Hook Registration
992
- // ============================================================================
993
-
994
- /**
995
- * Create the compaction hook for use in plugin registration
996
- *
997
- * Injects swarm context based on detection confidence:
998
- * - HIGH/MEDIUM: Full swarm context (definitely/probably a swarm)
999
- * - LOW: Fallback detection prompt (let compactor check context)
1000
- * - NONE: No injection (probably not a swarm)
1001
- *
1002
- * Philosophy: Err on the side of continuation. A false positive costs
1003
- * a bit of context space. A false negative loses the swarm.
1004
- *
1005
- * @param client - Optional OpenCode SDK client for scanning session messages.
1006
- * When provided, extracts PRECISE swarm state from actual tool calls.
1007
- * When undefined, falls back to hive/swarm-mail heuristic detection.
1008
- *
1009
- * @example
1010
- * ```typescript
1011
- * import { createCompactionHook } from "opencode-swarm-plugin";
1012
- *
1013
- * export const SwarmPlugin: Plugin = async (input) => ({
1014
- * tool: { ... },
1015
- * "experimental.session.compacting": createCompactionHook(input.client),
1016
- * });
1017
- * ```
1018
- */
1019
- export function createCompactionHook(client?: OpencodeClient) {
1020
- return async (
1021
- input: { sessionID: string },
1022
- output: { context: string[] },
1023
- ): Promise<void> => {
1024
- const startTime = Date.now();
1025
-
1026
- // Create metrics collector
1027
- const metrics = createMetricsCollector({
1028
- session_id: input.sessionID,
1029
- has_sdk_client: !!client,
1030
- });
1031
-
1032
- getLog().info(
1033
- {
1034
- session_id: input.sessionID,
1035
- trigger: "session_compaction",
1036
- has_sdk_client: !!client,
1037
- },
1038
- "compaction started",
1039
- );
1040
-
1041
- recordPhaseStart(metrics, CompactionPhase.START);
1042
-
1043
- try {
1044
- recordPhaseComplete(metrics, CompactionPhase.START);
1045
-
1046
- // Scan session messages for precise swarm state (if client available)
1047
- recordPhaseStart(metrics, CompactionPhase.GATHER_SWARM_MAIL);
1048
- const scannedState = await scanSessionMessages(client, input.sessionID);
1049
- recordPhaseComplete(metrics, CompactionPhase.GATHER_SWARM_MAIL);
1050
-
1051
- // Also run heuristic detection from hive/swarm-mail
1052
- recordPhaseStart(metrics, CompactionPhase.DETECT);
1053
- const detection = await detectSwarm();
1054
-
1055
- // Boost confidence if we found swarm evidence in session messages
1056
- let effectiveConfidence = detection.confidence;
1057
- if (scannedState.epicId || scannedState.subtasks.size > 0) {
1058
- // Session messages show swarm activity - this is HIGH confidence
1059
- if (effectiveConfidence === "none" || effectiveConfidence === "low") {
1060
- effectiveConfidence = "medium";
1061
- detection.reasons.push("swarm tool calls found in session");
1062
- recordPatternExtracted(metrics, "swarm_tool_calls", "Found swarm tool calls in session");
1063
- }
1064
- if (scannedState.subtasks.size > 0) {
1065
- effectiveConfidence = "high";
1066
- detection.reasons.push(`${scannedState.subtasks.size} subtasks spawned`);
1067
- recordPatternExtracted(metrics, "subtasks", `${scannedState.subtasks.size} subtasks spawned`);
1068
- }
1069
- }
1070
-
1071
- recordPhaseComplete(metrics, CompactionPhase.DETECT, {
1072
- confidence: effectiveConfidence,
1073
- detected: detection.detected || scannedState.epicId !== undefined,
1074
- });
1075
-
1076
- recordPhaseStart(metrics, CompactionPhase.INJECT);
1077
- if (
1078
- effectiveConfidence === "high" ||
1079
- effectiveConfidence === "medium"
1080
- ) {
1081
- // Definite or probable swarm - inject full context
1082
- const header = `[Swarm detected: ${detection.reasons.join(", ")}]\n\n`;
1083
-
1084
- // Build dynamic state section - prefer scanned state (ground truth) over detected
1085
- let dynamicState = "";
1086
- if (scannedState.epicId || scannedState.subtasks.size > 0) {
1087
- // Use scanned state (more precise)
1088
- dynamicState =
1089
- buildDynamicSwarmStateFromScanned(
1090
- scannedState,
1091
- detection.state || {
1092
- projectPath: scannedState.projectPath || process.cwd(),
1093
- subtasks: { closed: 0, in_progress: 0, open: 0, blocked: 0 },
1094
- },
1095
- ) + "\n\n";
1096
- } else if (detection.state && detection.state.epicId) {
1097
- // Fall back to hive-detected state
1098
- dynamicState = buildDynamicSwarmState(detection.state) + "\n\n";
1099
- }
1100
-
1101
- const contextContent = header + dynamicState + SWARM_COMPACTION_CONTEXT;
1102
- output.context.push(contextContent);
1103
-
1104
- recordPhaseComplete(metrics, CompactionPhase.INJECT, {
1105
- context_length: contextContent.length,
1106
- context_type: "full",
1107
- });
1108
-
1109
- getLog().info(
1110
- {
1111
- confidence: effectiveConfidence,
1112
- context_length: contextContent.length,
1113
- context_type: "full",
1114
- reasons: detection.reasons,
1115
- has_dynamic_state: !!dynamicState,
1116
- epic_id: scannedState.epicId || detection.state?.epicId,
1117
- scanned_subtasks: scannedState.subtasks.size,
1118
- scanned_agent: scannedState.agentName,
1119
- },
1120
- "injected swarm context",
1121
- );
1122
- } else if (effectiveConfidence === "low") {
1123
- // Possible swarm - inject fallback detection prompt
1124
- const header = `[Possible swarm: ${detection.reasons.join(", ")}]\n\n`;
1125
- const contextContent = header + SWARM_DETECTION_FALLBACK;
1126
- output.context.push(contextContent);
1127
-
1128
- recordPhaseComplete(metrics, CompactionPhase.INJECT, {
1129
- context_length: contextContent.length,
1130
- context_type: "fallback",
1131
- });
1132
-
1133
- getLog().info(
1134
- {
1135
- confidence: effectiveConfidence,
1136
- context_length: contextContent.length,
1137
- context_type: "fallback",
1138
- reasons: detection.reasons,
1139
- },
1140
- "injected swarm context",
1141
- );
1142
- } else {
1143
- recordPhaseComplete(metrics, CompactionPhase.INJECT, {
1144
- context_type: "none",
1145
- });
1146
-
1147
- getLog().debug(
1148
- {
1149
- confidence: effectiveConfidence,
1150
- context_type: "none",
1151
- },
1152
- "no swarm detected, skipping injection",
1153
- );
1154
- }
1155
- // confidence === "none" - no injection, probably not a swarm
1156
-
1157
- recordPhaseStart(metrics, CompactionPhase.COMPLETE);
1158
- const duration = Date.now() - startTime;
1159
- const summary = getMetricsSummary(metrics);
1160
-
1161
- getLog().info(
1162
- {
1163
- duration_ms: duration,
1164
- success: true,
1165
- detected: detection.detected || scannedState.epicId !== undefined,
1166
- confidence: effectiveConfidence,
1167
- context_injected: output.context.length > 0,
1168
- // Add metrics summary
1169
- metrics: {
1170
- phases: Object.keys(summary.phases).map(phase => ({
1171
- name: phase,
1172
- duration_ms: summary.phases[phase].duration_ms,
1173
- success: summary.phases[phase].success,
1174
- })),
1175
- patterns_extracted: summary.patterns_extracted,
1176
- patterns_skipped: summary.patterns_skipped,
1177
- extraction_success_rate: summary.extraction_success_rate,
1178
- },
1179
- },
1180
- "compaction complete",
1181
- );
1182
-
1183
- recordPhaseComplete(metrics, CompactionPhase.COMPLETE);
1184
- } catch (error) {
1185
- const duration = Date.now() - startTime;
1186
-
1187
- recordPhaseComplete(metrics, CompactionPhase.COMPLETE, {
1188
- success: false,
1189
- error: error instanceof Error ? error.message : String(error),
1190
- });
1191
-
1192
- getLog().error(
1193
- {
1194
- duration_ms: duration,
1195
- success: false,
1196
- error: error instanceof Error ? error.message : String(error),
1197
- stack: error instanceof Error ? error.stack : undefined,
1198
- },
1199
- "compaction failed",
1200
- );
1201
- // Don't throw - compaction hook failures shouldn't break the session
1202
- }
1203
- };
1204
- }