opencode-swarm-plugin 0.56.1 → 0.57.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. package/README.md +21 -0
  2. package/claude-plugin/agents/background-worker.md +1 -0
  3. package/claude-plugin/agents/coordinator.md +1 -0
  4. package/claude-plugin/agents/worker.md +1 -0
  5. package/claude-plugin/bin/swarm-mcp-server.ts +47 -8
  6. package/claude-plugin/dist/agent-mail.d.ts +480 -0
  7. package/claude-plugin/dist/agent-mail.d.ts.map +1 -0
  8. package/claude-plugin/dist/anti-patterns.d.ts +257 -0
  9. package/claude-plugin/dist/anti-patterns.d.ts.map +1 -0
  10. package/claude-plugin/dist/bin/swarm.js +373128 -0
  11. package/claude-plugin/dist/cass-tools.d.ts +74 -0
  12. package/claude-plugin/dist/cass-tools.d.ts.map +1 -0
  13. package/claude-plugin/dist/claude-plugin/claude-plugin-assets.d.ts +10 -0
  14. package/claude-plugin/dist/claude-plugin/claude-plugin-assets.d.ts.map +1 -0
  15. package/claude-plugin/dist/compaction-hook.d.ts +178 -0
  16. package/claude-plugin/dist/compaction-hook.d.ts.map +1 -0
  17. package/claude-plugin/dist/compaction-observability.d.ts +173 -0
  18. package/claude-plugin/dist/compaction-observability.d.ts.map +1 -0
  19. package/claude-plugin/dist/compaction-prompt-scoring.d.ts +125 -0
  20. package/claude-plugin/dist/compaction-prompt-scoring.d.ts.map +1 -0
  21. package/claude-plugin/dist/compaction-prompt-scoring.js +139 -0
  22. package/claude-plugin/dist/contributor-tools.d.ts +42 -0
  23. package/claude-plugin/dist/contributor-tools.d.ts.map +1 -0
  24. package/claude-plugin/dist/coordinator-guard.d.ts +79 -0
  25. package/claude-plugin/dist/coordinator-guard.d.ts.map +1 -0
  26. package/claude-plugin/dist/dashboard.d.ts +82 -0
  27. package/claude-plugin/dist/dashboard.d.ts.map +1 -0
  28. package/claude-plugin/dist/decision-trace-integration.d.ts +204 -0
  29. package/claude-plugin/dist/decision-trace-integration.d.ts.map +1 -0
  30. package/claude-plugin/dist/error-enrichment.d.ts +49 -0
  31. package/claude-plugin/dist/error-enrichment.d.ts.map +1 -0
  32. package/claude-plugin/dist/eval-capture.d.ts +494 -0
  33. package/claude-plugin/dist/eval-capture.d.ts.map +1 -0
  34. package/claude-plugin/dist/eval-capture.js +12844 -0
  35. package/claude-plugin/dist/eval-gates.d.ts +84 -0
  36. package/claude-plugin/dist/eval-gates.d.ts.map +1 -0
  37. package/claude-plugin/dist/eval-history.d.ts +117 -0
  38. package/claude-plugin/dist/eval-history.d.ts.map +1 -0
  39. package/claude-plugin/dist/eval-learning.d.ts +216 -0
  40. package/claude-plugin/dist/eval-learning.d.ts.map +1 -0
  41. package/claude-plugin/dist/eval-runner.d.ts +134 -0
  42. package/claude-plugin/dist/eval-runner.d.ts.map +1 -0
  43. package/claude-plugin/dist/examples/plugin-wrapper-template.ts +3341 -0
  44. package/claude-plugin/dist/export-tools.d.ts +76 -0
  45. package/claude-plugin/dist/export-tools.d.ts.map +1 -0
  46. package/claude-plugin/dist/hive.d.ts +949 -0
  47. package/claude-plugin/dist/hive.d.ts.map +1 -0
  48. package/claude-plugin/dist/hive.js +15009 -0
  49. package/claude-plugin/dist/hivemind-tools.d.ts +479 -0
  50. package/claude-plugin/dist/hivemind-tools.d.ts.map +1 -0
  51. package/claude-plugin/dist/hooks/atomic-write.d.ts +21 -0
  52. package/claude-plugin/dist/hooks/atomic-write.d.ts.map +1 -0
  53. package/claude-plugin/dist/hooks/constants.d.ts +28 -0
  54. package/claude-plugin/dist/hooks/constants.d.ts.map +1 -0
  55. package/claude-plugin/dist/hooks/index.d.ts +16 -0
  56. package/claude-plugin/dist/hooks/index.d.ts.map +1 -0
  57. package/claude-plugin/dist/hooks/session-start.d.ts +30 -0
  58. package/claude-plugin/dist/hooks/session-start.d.ts.map +1 -0
  59. package/claude-plugin/dist/hooks/tool-complete.d.ts +54 -0
  60. package/claude-plugin/dist/hooks/tool-complete.d.ts.map +1 -0
  61. package/claude-plugin/dist/index.d.ts +2017 -0
  62. package/claude-plugin/dist/index.d.ts.map +1 -0
  63. package/claude-plugin/dist/index.js +73453 -0
  64. package/claude-plugin/dist/learning.d.ts +700 -0
  65. package/claude-plugin/dist/learning.d.ts.map +1 -0
  66. package/claude-plugin/dist/logger.d.ts +38 -0
  67. package/claude-plugin/dist/logger.d.ts.map +1 -0
  68. package/claude-plugin/dist/mandate-promotion.d.ts +93 -0
  69. package/claude-plugin/dist/mandate-promotion.d.ts.map +1 -0
  70. package/claude-plugin/dist/mandate-storage.d.ts +209 -0
  71. package/claude-plugin/dist/mandate-storage.d.ts.map +1 -0
  72. package/claude-plugin/dist/mandates.d.ts +230 -0
  73. package/claude-plugin/dist/mandates.d.ts.map +1 -0
  74. package/claude-plugin/dist/memory-tools.d.ts +281 -0
  75. package/claude-plugin/dist/memory-tools.d.ts.map +1 -0
  76. package/claude-plugin/dist/memory.d.ts +164 -0
  77. package/claude-plugin/dist/memory.d.ts.map +1 -0
  78. package/claude-plugin/dist/model-selection.d.ts +37 -0
  79. package/claude-plugin/dist/model-selection.d.ts.map +1 -0
  80. package/claude-plugin/dist/observability-health.d.ts +87 -0
  81. package/claude-plugin/dist/observability-health.d.ts.map +1 -0
  82. package/claude-plugin/dist/observability-tools.d.ts +184 -0
  83. package/claude-plugin/dist/observability-tools.d.ts.map +1 -0
  84. package/claude-plugin/dist/output-guardrails.d.ts +125 -0
  85. package/claude-plugin/dist/output-guardrails.d.ts.map +1 -0
  86. package/claude-plugin/dist/pattern-maturity.d.ts +246 -0
  87. package/claude-plugin/dist/pattern-maturity.d.ts.map +1 -0
  88. package/claude-plugin/dist/planning-guardrails.d.ts +183 -0
  89. package/claude-plugin/dist/planning-guardrails.d.ts.map +1 -0
  90. package/claude-plugin/dist/plugin.d.ts +22 -0
  91. package/claude-plugin/dist/plugin.d.ts.map +1 -0
  92. package/claude-plugin/dist/plugin.js +72295 -0
  93. package/claude-plugin/dist/post-compaction-tracker.d.ts +133 -0
  94. package/claude-plugin/dist/post-compaction-tracker.d.ts.map +1 -0
  95. package/claude-plugin/dist/query-tools.d.ts +90 -0
  96. package/claude-plugin/dist/query-tools.d.ts.map +1 -0
  97. package/claude-plugin/dist/rate-limiter.d.ts +218 -0
  98. package/claude-plugin/dist/rate-limiter.d.ts.map +1 -0
  99. package/claude-plugin/dist/regression-detection.d.ts +58 -0
  100. package/claude-plugin/dist/regression-detection.d.ts.map +1 -0
  101. package/claude-plugin/dist/replay-tools.d.ts +28 -0
  102. package/claude-plugin/dist/replay-tools.d.ts.map +1 -0
  103. package/claude-plugin/dist/repo-crawl.d.ts +146 -0
  104. package/claude-plugin/dist/repo-crawl.d.ts.map +1 -0
  105. package/claude-plugin/dist/schemas/cell-events.d.ts +1352 -0
  106. package/claude-plugin/dist/schemas/cell-events.d.ts.map +1 -0
  107. package/claude-plugin/dist/schemas/cell.d.ts +413 -0
  108. package/claude-plugin/dist/schemas/cell.d.ts.map +1 -0
  109. package/claude-plugin/dist/schemas/evaluation.d.ts +161 -0
  110. package/claude-plugin/dist/schemas/evaluation.d.ts.map +1 -0
  111. package/claude-plugin/dist/schemas/index.d.ts +46 -0
  112. package/claude-plugin/dist/schemas/index.d.ts.map +1 -0
  113. package/claude-plugin/dist/schemas/mandate.d.ts +336 -0
  114. package/claude-plugin/dist/schemas/mandate.d.ts.map +1 -0
  115. package/claude-plugin/dist/schemas/swarm-context.d.ts +131 -0
  116. package/claude-plugin/dist/schemas/swarm-context.d.ts.map +1 -0
  117. package/claude-plugin/dist/schemas/task.d.ts +189 -0
  118. package/claude-plugin/dist/schemas/task.d.ts.map +1 -0
  119. package/claude-plugin/dist/schemas/worker-handoff.d.ts +78 -0
  120. package/claude-plugin/dist/schemas/worker-handoff.d.ts.map +1 -0
  121. package/claude-plugin/dist/sessions/agent-discovery.d.ts +59 -0
  122. package/claude-plugin/dist/sessions/agent-discovery.d.ts.map +1 -0
  123. package/claude-plugin/dist/sessions/index.d.ts +10 -0
  124. package/claude-plugin/dist/sessions/index.d.ts.map +1 -0
  125. package/claude-plugin/dist/skills.d.ts +490 -0
  126. package/claude-plugin/dist/skills.d.ts.map +1 -0
  127. package/claude-plugin/dist/storage.d.ts +260 -0
  128. package/claude-plugin/dist/storage.d.ts.map +1 -0
  129. package/claude-plugin/dist/structured.d.ts +206 -0
  130. package/claude-plugin/dist/structured.d.ts.map +1 -0
  131. package/claude-plugin/dist/swarm-adversarial-review.d.ts +104 -0
  132. package/claude-plugin/dist/swarm-adversarial-review.d.ts.map +1 -0
  133. package/claude-plugin/dist/swarm-decompose.d.ts +297 -0
  134. package/claude-plugin/dist/swarm-decompose.d.ts.map +1 -0
  135. package/claude-plugin/dist/swarm-insights.d.ts +390 -0
  136. package/claude-plugin/dist/swarm-insights.d.ts.map +1 -0
  137. package/claude-plugin/dist/swarm-mail.d.ts +274 -0
  138. package/claude-plugin/dist/swarm-mail.d.ts.map +1 -0
  139. package/claude-plugin/dist/swarm-orchestrate.d.ts +924 -0
  140. package/claude-plugin/dist/swarm-orchestrate.d.ts.map +1 -0
  141. package/claude-plugin/dist/swarm-prompts.d.ts +467 -0
  142. package/claude-plugin/dist/swarm-prompts.d.ts.map +1 -0
  143. package/claude-plugin/dist/swarm-prompts.js +45283 -0
  144. package/claude-plugin/dist/swarm-research.d.ts +125 -0
  145. package/claude-plugin/dist/swarm-research.d.ts.map +1 -0
  146. package/claude-plugin/dist/swarm-review.d.ts +214 -0
  147. package/claude-plugin/dist/swarm-review.d.ts.map +1 -0
  148. package/claude-plugin/dist/swarm-signature.d.ts +106 -0
  149. package/claude-plugin/dist/swarm-signature.d.ts.map +1 -0
  150. package/claude-plugin/dist/swarm-strategies.d.ts +113 -0
  151. package/claude-plugin/dist/swarm-strategies.d.ts.map +1 -0
  152. package/claude-plugin/dist/swarm-validation.d.ts +127 -0
  153. package/claude-plugin/dist/swarm-validation.d.ts.map +1 -0
  154. package/claude-plugin/dist/swarm-worktree.d.ts +185 -0
  155. package/claude-plugin/dist/swarm-worktree.d.ts.map +1 -0
  156. package/claude-plugin/dist/swarm.d.ts +590 -0
  157. package/claude-plugin/dist/swarm.d.ts.map +1 -0
  158. package/claude-plugin/dist/tool-availability.d.ts +91 -0
  159. package/claude-plugin/dist/tool-availability.d.ts.map +1 -0
  160. package/claude-plugin/dist/utils/tree-renderer.d.ts +61 -0
  161. package/claude-plugin/dist/utils/tree-renderer.d.ts.map +1 -0
  162. package/claude-plugin/dist/validators/index.d.ts +7 -0
  163. package/claude-plugin/dist/validators/index.d.ts.map +1 -0
  164. package/claude-plugin/dist/validators/schema-validator.d.ts +58 -0
  165. package/claude-plugin/dist/validators/schema-validator.d.ts.map +1 -0
  166. package/claude-plugin/skills/always-on-guidance/SKILL.md +44 -0
  167. package/dist/agent-mail.d.ts +4 -4
  168. package/dist/agent-mail.d.ts.map +1 -1
  169. package/dist/bin/swarm.js +467 -12
  170. package/dist/claude-plugin/claude-plugin-assets.d.ts +10 -0
  171. package/dist/claude-plugin/claude-plugin-assets.d.ts.map +1 -0
  172. package/dist/compaction-hook.d.ts +1 -1
  173. package/dist/compaction-hook.d.ts.map +1 -1
  174. package/dist/index.js +370 -260
  175. package/dist/plugin.js +369 -259
  176. package/dist/skills.d.ts +15 -0
  177. package/dist/skills.d.ts.map +1 -1
  178. package/dist/swarm-mail.d.ts.map +1 -1
  179. package/dist/swarm-prompts.d.ts +3 -1
  180. package/dist/swarm-prompts.d.ts.map +1 -1
  181. package/dist/swarm-prompts.js +79 -2
  182. package/package.json +2 -1
@@ -0,0 +1,3341 @@
1
+ /**
2
+ * ╔═══════════════════════════════════════════════════════════════════════════╗
3
+ * ║ ║
4
+ * ║ 🐝 OPENCODE SWARM PLUGIN WRAPPER 🐝 ║
5
+ * ║ ║
6
+ * ║ This file lives at: ~/.config/opencode/plugin/swarm.ts ║
7
+ * ║ Generated by: swarm setup ║
8
+ * ║ ║
9
+ * ╠═══════════════════════════════════════════════════════════════════════════╣
10
+ * ║ ║
11
+ * ║ ⚠️ CRITICAL: THIS FILE MUST BE 100% SELF-CONTAINED ⚠️ ║
12
+ * ║ ║
13
+ * ║ ❌ NEVER import from "opencode-swarm-plugin" npm package ║
14
+ * ║ ❌ NEVER import from any package with transitive deps (evalite, etc) ║
15
+ * ║ ❌ NEVER add dependencies that aren't provided by OpenCode ║
16
+ * ║ ║
17
+ * ║ ✅ ONLY import from: @opencode-ai/plugin, @opencode-ai/sdk, node:* ║
18
+ * ║ ✅ Shell out to `swarm` CLI for all tool execution ║
19
+ * ║ ✅ Inline any logic that would otherwise require imports ║
20
+ * ║ ║
21
+ * ║ WHY? The npm package has dependencies (evalite, etc) that aren't ║
22
+ * ║ available in OpenCode's plugin context. Importing causes: ║
23
+ * ║ "Cannot find module 'evalite/runner'" → trace trap → OpenCode crash ║
24
+ * ║ ║
25
+ * ║ PATTERN: Plugin wrapper is DUMB. CLI is SMART. ║
26
+ * ║ - Wrapper: thin shell, no logic, just bridges to CLI ║
27
+ * ║ - CLI: all the smarts, all the deps, runs in its own context ║
28
+ * ║ ║
29
+ * ╚═══════════════════════════════════════════════════════════════════════════╝
30
+ *
31
+ * Environment variables passed to CLI:
32
+ * - OPENCODE_SESSION_ID: Session state persistence
33
+ * - OPENCODE_MESSAGE_ID: Message context
34
+ * - OPENCODE_AGENT: Agent context
35
+ * - SWARM_PROJECT_DIR: Project directory (critical for database path)
36
+ */
37
+ import type { Plugin, PluginInput, Hooks } from "@opencode-ai/plugin";
38
+ import type { ToolPart } from "@opencode-ai/sdk";
39
+ import { tool } from "@opencode-ai/plugin";
40
+ import { spawn } from "child_process";
41
+ import { appendFileSync, mkdirSync, existsSync } from "node:fs";
42
+ import { join } from "node:path";
43
+ import { homedir } from "node:os";
44
+
45
+ // =============================================================================
46
+ // Swarm Signature Detection (INLINED - do not import from opencode-swarm-plugin)
47
+ // =============================================================================
48
+
49
+ /**
50
+ * Subtask lifecycle status derived from events
51
+ */
52
+ type SubtaskStatus = "created" | "spawned" | "in_progress" | "completed" | "closed";
53
+
54
+ /**
55
+ * Subtask state projected from events
56
+ */
57
+ interface SubtaskState {
58
+ id: string;
59
+ title: string;
60
+ status: SubtaskStatus;
61
+ files: string[];
62
+ worker?: string;
63
+ spawnedAt?: number;
64
+ completedAt?: number;
65
+ }
66
+
67
+ /**
68
+ * Epic state projected from events
69
+ */
70
+ interface EpicState {
71
+ id: string;
72
+ title: string;
73
+ status: "open" | "in_progress" | "closed";
74
+ createdAt: number;
75
+ }
76
+
77
+ /**
78
+ * Complete swarm state projected from session events
79
+ */
80
+ interface SwarmProjection {
81
+ isSwarm: boolean;
82
+ epic?: EpicState;
83
+ subtasks: Map<string, SubtaskState>;
84
+ projectPath?: string;
85
+ coordinatorName?: string;
86
+ lastEventAt?: number;
87
+ counts: {
88
+ total: number;
89
+ created: number;
90
+ spawned: number;
91
+ inProgress: number;
92
+ completed: number;
93
+ closed: number;
94
+ };
95
+ }
96
+
97
+ /**
98
+ * Tool call event extracted from session messages
99
+ */
100
+ interface ToolCallEvent {
101
+ tool: string;
102
+ input: Record<string, unknown>;
103
+ output: string;
104
+ timestamp: number;
105
+ }
106
+
107
+ /** Parse epic ID from hive_create_epic output */
108
+ function parseEpicId(output: string): string | undefined {
109
+ try {
110
+ const parsed = JSON.parse(output);
111
+ return parsed.epic?.id || parsed.id;
112
+ } catch {
113
+ return undefined;
114
+ }
115
+ }
116
+
117
+ /** Parse subtask IDs from hive_create_epic output */
118
+ function parseSubtaskIds(output: string): string[] {
119
+ try {
120
+ const parsed = JSON.parse(output);
121
+ const subtasks = parsed.subtasks || parsed.epic?.subtasks || [];
122
+ return subtasks
123
+ .map((s: unknown) => {
124
+ if (typeof s === "object" && s !== null && "id" in s) {
125
+ return (s as { id: string }).id;
126
+ }
127
+ return undefined;
128
+ })
129
+ .filter((id: unknown): id is string => typeof id === "string");
130
+ } catch {
131
+ return [];
132
+ }
133
+ }
134
+
135
+ /**
136
+ * Project swarm state from session tool call events
137
+ */
138
+ function projectSwarmState(events: ToolCallEvent[]): SwarmProjection {
139
+ const state: SwarmProjection = {
140
+ isSwarm: false,
141
+ subtasks: new Map(),
142
+ counts: { total: 0, created: 0, spawned: 0, inProgress: 0, completed: 0, closed: 0 },
143
+ };
144
+
145
+ let hasEpic = false;
146
+ let hasSpawn = false;
147
+
148
+ for (const event of events) {
149
+ state.lastEventAt = event.timestamp;
150
+
151
+ switch (event.tool) {
152
+ case "hive_create_epic": {
153
+ const epicId = parseEpicId(event.output);
154
+ const epicTitle = typeof event.input.epic_title === "string" ? event.input.epic_title : undefined;
155
+
156
+ if (epicId) {
157
+ state.epic = { id: epicId, title: epicTitle || "Unknown Epic", status: "open", createdAt: event.timestamp };
158
+ hasEpic = true;
159
+
160
+ const subtasks = event.input.subtasks;
161
+ if (Array.isArray(subtasks)) {
162
+ for (const subtask of subtasks) {
163
+ if (typeof subtask === "object" && subtask !== null) {
164
+ state.counts.created++;
165
+ state.counts.total++;
166
+ }
167
+ }
168
+ }
169
+
170
+ const subtaskIds = parseSubtaskIds(event.output);
171
+ for (const id of subtaskIds) {
172
+ if (!state.subtasks.has(id)) {
173
+ state.subtasks.set(id, { id, title: "Unknown", status: "created", files: [] });
174
+ state.counts.total++;
175
+ state.counts.created++;
176
+ }
177
+ }
178
+ }
179
+ break;
180
+ }
181
+
182
+ case "swarm_spawn_subtask": {
183
+ const beadId = typeof event.input.bead_id === "string" ? event.input.bead_id : undefined;
184
+ const title = typeof event.input.subtask_title === "string" ? event.input.subtask_title : "Unknown";
185
+ const files = Array.isArray(event.input.files) ? (event.input.files as string[]) : [];
186
+
187
+ if (beadId) {
188
+ hasSpawn = true;
189
+ const existing = state.subtasks.get(beadId);
190
+ if (existing) {
191
+ if (existing.status === "created") { state.counts.created--; state.counts.spawned++; }
192
+ existing.status = "spawned";
193
+ existing.title = title;
194
+ existing.files = files;
195
+ existing.spawnedAt = event.timestamp;
196
+ } else {
197
+ state.subtasks.set(beadId, { id: beadId, title, status: "spawned", files, spawnedAt: event.timestamp });
198
+ state.counts.total++;
199
+ state.counts.spawned++;
200
+ }
201
+
202
+ const epicId = typeof event.input.epic_id === "string" ? event.input.epic_id : undefined;
203
+ if (epicId && !state.epic) {
204
+ state.epic = { id: epicId, title: "Unknown Epic", status: "in_progress", createdAt: event.timestamp };
205
+ }
206
+ }
207
+ break;
208
+ }
209
+
210
+ case "hive_start": {
211
+ const id = typeof event.input.id === "string" ? event.input.id : undefined;
212
+ if (id) {
213
+ const subtask = state.subtasks.get(id);
214
+ if (subtask && subtask.status !== "completed" && subtask.status !== "closed") {
215
+ if (subtask.status === "created") state.counts.created--;
216
+ else if (subtask.status === "spawned") state.counts.spawned--;
217
+ subtask.status = "in_progress";
218
+ state.counts.inProgress++;
219
+ }
220
+ if (state.epic && state.epic.id === id) state.epic.status = "in_progress";
221
+ }
222
+ break;
223
+ }
224
+
225
+ case "swarm_complete": {
226
+ const beadId = typeof event.input.bead_id === "string" ? event.input.bead_id : undefined;
227
+ if (beadId) {
228
+ const subtask = state.subtasks.get(beadId);
229
+ if (subtask && subtask.status !== "closed") {
230
+ if (subtask.status === "created") state.counts.created--;
231
+ else if (subtask.status === "spawned") state.counts.spawned--;
232
+ else if (subtask.status === "in_progress") state.counts.inProgress--;
233
+ subtask.status = "completed";
234
+ subtask.completedAt = event.timestamp;
235
+ state.counts.completed++;
236
+ }
237
+ }
238
+ break;
239
+ }
240
+
241
+ case "hive_close": {
242
+ const id = typeof event.input.id === "string" ? event.input.id : undefined;
243
+ if (id) {
244
+ const subtask = state.subtasks.get(id);
245
+ if (subtask) {
246
+ if (subtask.status === "created") state.counts.created--;
247
+ else if (subtask.status === "spawned") state.counts.spawned--;
248
+ else if (subtask.status === "in_progress") state.counts.inProgress--;
249
+ else if (subtask.status === "completed") state.counts.completed--;
250
+ subtask.status = "closed";
251
+ state.counts.closed++;
252
+ }
253
+ if (state.epic && state.epic.id === id) state.epic.status = "closed";
254
+ }
255
+ break;
256
+ }
257
+
258
+ case "swarmmail_init": {
259
+ try {
260
+ const parsed = JSON.parse(event.output);
261
+ if (parsed.agent_name) state.coordinatorName = parsed.agent_name;
262
+ if (parsed.project_key) state.projectPath = parsed.project_key;
263
+ } catch { /* skip */ }
264
+ break;
265
+ }
266
+ }
267
+ }
268
+
269
+ state.isSwarm = hasEpic && hasSpawn;
270
+ return state;
271
+ }
272
+
273
+ /** Quick check for swarm signature without full projection */
274
+ function hasSwarmSignature(events: ToolCallEvent[]): boolean {
275
+ let hasEpic = false;
276
+ let hasSpawn = false;
277
+ for (const event of events) {
278
+ if (event.tool === "hive_create_epic") hasEpic = true;
279
+ else if (event.tool === "swarm_spawn_subtask") hasSpawn = true;
280
+ if (hasEpic && hasSpawn) return true;
281
+ }
282
+ return false;
283
+ }
284
+
285
+ /** Check if swarm is still active (has pending work) */
286
+ function isSwarmActive(projection: SwarmProjection): boolean {
287
+ if (!projection.isSwarm) return false;
288
+ return projection.counts.created > 0 || projection.counts.spawned > 0 ||
289
+ projection.counts.inProgress > 0 || projection.counts.completed > 0;
290
+ }
291
+
292
+ /** Get human-readable swarm status summary */
293
+ function getSwarmSummary(projection: SwarmProjection): string {
294
+ if (!projection.isSwarm) return "No swarm detected";
295
+ const { counts, epic } = projection;
296
+ const parts: string[] = [];
297
+ if (epic) parts.push(`Epic: ${epic.id} - ${epic.title} [${epic.status}]`);
298
+ parts.push(`Subtasks: ${counts.total} total (${counts.spawned} spawned, ${counts.inProgress} in_progress, ${counts.completed} completed, ${counts.closed} closed)`);
299
+ parts.push(isSwarmActive(projection) ? "Status: ACTIVE - has pending work" : "Status: COMPLETE - all work closed");
300
+ return parts.join("\n");
301
+ }
302
+
303
+ // =============================================================================
304
+ // Constants
305
+ // =============================================================================
306
+
307
+ const SWARM_CLI = "swarm";
308
+
309
+ // =============================================================================
310
+ // File-based Logging (writes to ~/.config/swarm-tools/logs/)
311
+ // =============================================================================
312
+
313
+ const LOG_DIR = join(homedir(), ".config", "swarm-tools", "logs");
314
+ const COMPACTION_LOG = join(LOG_DIR, "compaction.log");
315
+
316
+ /**
317
+ * Ensure log directory exists
318
+ */
319
+ function ensureLogDir(): void {
320
+ if (!existsSync(LOG_DIR)) {
321
+ mkdirSync(LOG_DIR, { recursive: true });
322
+ }
323
+ }
324
+
325
+ /**
326
+ * Log a compaction event to file (JSON lines format, compatible with `swarm log`)
327
+ *
328
+ * @param level - Log level (info, debug, warn, error)
329
+ * @param msg - Log message
330
+ * @param data - Additional structured data
331
+ */
332
+ function logCompaction(
333
+ level: "info" | "debug" | "warn" | "error",
334
+ msg: string,
335
+ data?: Record<string, unknown>,
336
+ ): void {
337
+ try {
338
+ ensureLogDir();
339
+ const entry = JSON.stringify({
340
+ time: new Date().toISOString(),
341
+ level,
342
+ msg,
343
+ ...data,
344
+ });
345
+ appendFileSync(COMPACTION_LOG, entry + "\n");
346
+ } catch {
347
+ // Silently fail - logging should never break the plugin
348
+ }
349
+ }
350
+
351
+ /**
352
+ * Get date-stamped log file path
353
+ * Format: ~/.config/swarm-tools/logs/{type}-YYYY-MM-DD.log
354
+ */
355
+ function getDateStampedLogPath(type: "tools" | "swarmmail" | "errors"): string {
356
+ const today = new Date().toISOString().split("T")[0]; // YYYY-MM-DD
357
+ return join(LOG_DIR, `${type}-${today}.log`);
358
+ }
359
+
360
+ /**
361
+ * Rotate old log files (delete files older than 7 days)
362
+ *
363
+ * Runs silently - never breaks the plugin if rotation fails.
364
+ */
365
+ function rotateLogFiles(): void {
366
+ try {
367
+ ensureLogDir();
368
+ const { readdirSync, unlinkSync, statSync } = require("node:fs");
369
+ const files = readdirSync(LOG_DIR);
370
+ const now = Date.now();
371
+ const sevenDaysMs = 7 * 24 * 60 * 60 * 1000;
372
+
373
+ for (const file of files) {
374
+ // Only rotate date-stamped files (tools-*, swarmmail-*, errors-*)
375
+ if (!/^(tools|swarmmail|errors)-\d{4}-\d{2}-\d{2}\.log$/.test(file)) {
376
+ continue;
377
+ }
378
+
379
+ const filePath = join(LOG_DIR, file);
380
+ const stats = statSync(filePath);
381
+ const age = now - stats.mtimeMs;
382
+
383
+ if (age > sevenDaysMs) {
384
+ unlinkSync(filePath);
385
+ }
386
+ }
387
+ } catch {
388
+ // Silently fail - rotation failures shouldn't break the plugin
389
+ }
390
+ }
391
+
392
+ /**
393
+ * Log a tool invocation to date-stamped file
394
+ *
395
+ * @param toolName - Tool name (e.g., "hive_create", "swarm_status")
396
+ * @param args - Tool arguments
397
+ * @param result - Tool result (optional, for successful calls)
398
+ * @param error - Error message (optional, for failed calls)
399
+ */
400
+ function logTool(
401
+ toolName: string,
402
+ args: Record<string, unknown>,
403
+ result?: string,
404
+ error?: string,
405
+ ): void {
406
+ try {
407
+ ensureLogDir();
408
+ rotateLogFiles(); // Rotate on every log call (cheap operation)
409
+
410
+ const logPath = getDateStampedLogPath("tools");
411
+ const entry = JSON.stringify({
412
+ time: new Date().toISOString(),
413
+ level: error ? "error" : "info",
414
+ msg: `tool_call: ${toolName}`,
415
+ tool: toolName,
416
+ args,
417
+ ...(result && { result }),
418
+ ...(error && { error }),
419
+ });
420
+
421
+ appendFileSync(logPath, entry + "\n");
422
+ } catch {
423
+ // Silently fail - logging should never break the plugin
424
+ }
425
+ }
426
+
427
+ /**
428
+ * Log a Swarm Mail event to date-stamped file
429
+ *
430
+ * @param event - Event type (e.g., "message_sent", "inbox_fetched")
431
+ * @param data - Event data
432
+ */
433
+ function logSwarmMail(
434
+ event: string,
435
+ data: Record<string, unknown>,
436
+ ): void {
437
+ try {
438
+ ensureLogDir();
439
+ rotateLogFiles();
440
+
441
+ const logPath = getDateStampedLogPath("swarmmail");
442
+ const entry = JSON.stringify({
443
+ time: new Date().toISOString(),
444
+ level: "info",
445
+ msg: event,
446
+ ...data,
447
+ });
448
+
449
+ appendFileSync(logPath, entry + "\n");
450
+ } catch {
451
+ // Silently fail
452
+ }
453
+ }
454
+
455
+ /**
456
+ * Log an error to date-stamped file
457
+ *
458
+ * @param error - Error message
459
+ * @param data - Additional error context
460
+ */
461
+ function logError(
462
+ error: string,
463
+ data?: Record<string, unknown>,
464
+ ): void {
465
+ try {
466
+ ensureLogDir();
467
+ rotateLogFiles();
468
+
469
+ const logPath = getDateStampedLogPath("errors");
470
+ const entry = JSON.stringify({
471
+ time: new Date().toISOString(),
472
+ level: "error",
473
+ msg: error,
474
+ ...data,
475
+ });
476
+
477
+ appendFileSync(logPath, entry + "\n");
478
+ } catch {
479
+ // Silently fail
480
+ }
481
+ }
482
+
483
+ /**
484
+ * Capture compaction event for evals (INLINED - do not import from opencode-swarm-plugin)
485
+ *
486
+ * Writes COMPACTION events directly to session JSONL file.
487
+ * This is inlined to avoid import issues - plugin wrapper must be 100% self-contained.
488
+ *
489
+ * Matches the structure of captureCompactionEvent from eval-capture.ts but writes
490
+ * ONLY to JSONL (not libSQL) to avoid swarm-mail dependency.
491
+ *
492
+ * @param sessionID - Session ID
493
+ * @param epicID - Epic ID (or "unknown" if not detected)
494
+ * @param compactionType - Event type (detection_complete, prompt_generated, context_injected, resumption_started, tool_call_tracked)
495
+ * @param payload - Event-specific data (full prompts, detection results, etc.)
496
+ */
497
+ async function captureCompaction(
498
+ sessionID: string,
499
+ epicID: string,
500
+ compactionType: "detection_complete" | "prompt_generated" | "context_injected" | "resumption_started" | "tool_call_tracked",
501
+ payload: any,
502
+ ): Promise<void> {
503
+ try {
504
+ // Build the CoordinatorEvent object matching eval-capture.ts schema
505
+ const event = {
506
+ session_id: sessionID,
507
+ epic_id: epicID,
508
+ timestamp: new Date().toISOString(),
509
+ event_type: "COMPACTION",
510
+ compaction_type: compactionType,
511
+ payload: payload,
512
+ };
513
+
514
+ // Session directory: ~/.config/swarm-tools/sessions/
515
+ const sessionDir = process.env.SWARM_SESSIONS_DIR ||
516
+ join(homedir(), ".config", "swarm-tools", "sessions");
517
+
518
+ // Ensure directory exists
519
+ if (!existsSync(sessionDir)) {
520
+ mkdirSync(sessionDir, { recursive: true });
521
+ }
522
+
523
+ // Write to JSONL (append mode)
524
+ const sessionPath = join(sessionDir, `${sessionID}.jsonl`);
525
+ const line = `${JSON.stringify(event)}\n`;
526
+ appendFileSync(sessionPath, line, "utf-8");
527
+
528
+ logCompaction("debug", "compaction_event_captured", {
529
+ session_id: sessionID,
530
+ epic_id: epicID,
531
+ compaction_type: compactionType,
532
+ session_path: sessionPath,
533
+ });
534
+ } catch (err) {
535
+ // Non-fatal - capture failures shouldn't break compaction
536
+ logCompaction("warn", "compaction_capture_failed", {
537
+ session_id: sessionID,
538
+ epic_id: epicID,
539
+ compaction_type: compactionType,
540
+ error: err instanceof Error ? err.message : String(err),
541
+ });
542
+ }
543
+ }
544
+
545
+ // Module-level project directory - set during plugin initialization
546
+ // This is CRITICAL: without it, the CLI uses process.cwd() which may be wrong
547
+ let projectDirectory: string = process.cwd();
548
+
549
+ // Module-level SDK client - set during plugin initialization
550
+ // Used for scanning session messages during compaction
551
+ let sdkClient: any = null;
552
+
553
+ // =============================================================================
554
+ // CLI Execution Helper
555
+ // =============================================================================
556
+
557
+ /**
558
+ * Execute a swarm tool via CLI
559
+ *
560
+ * Spawns `swarm tool <name> --json '<args>'` and returns the result.
561
+ * Passes session context via environment variables.
562
+ *
563
+ * IMPORTANT: Runs in projectDirectory (set by OpenCode) not process.cwd()
564
+ */
565
+ async function execTool(
566
+ name: string,
567
+ args: Record<string, unknown>,
568
+ ctx: { sessionID: string; messageID: string; agent: string },
569
+ ): Promise<string> {
570
+ return new Promise((resolve, reject) => {
571
+ const hasArgs = Object.keys(args).length > 0;
572
+ const cliArgs = hasArgs
573
+ ? ["tool", name, "--json", JSON.stringify(args)]
574
+ : ["tool", name];
575
+
576
+ const proc = spawn(SWARM_CLI, cliArgs, {
577
+ cwd: projectDirectory, // Run in project directory, not plugin directory
578
+ stdio: ["ignore", "pipe", "pipe"],
579
+ env: {
580
+ ...process.env,
581
+ OPENCODE_SESSION_ID: ctx.sessionID,
582
+ OPENCODE_MESSAGE_ID: ctx.messageID,
583
+ OPENCODE_AGENT: ctx.agent,
584
+ SWARM_PROJECT_DIR: projectDirectory, // Also pass as env var
585
+ },
586
+ });
587
+
588
+ let stdout = "";
589
+ let stderr = "";
590
+
591
+ proc.stdout.on("data", (data) => {
592
+ stdout += data;
593
+ });
594
+ proc.stderr.on("data", (data) => {
595
+ stderr += data;
596
+ });
597
+
598
+ proc.on("close", (code) => {
599
+ if (code === 0) {
600
+ // Success - return the JSON output
601
+ try {
602
+ const result = JSON.parse(stdout);
603
+ if (result.success && result.data !== undefined) {
604
+ // Log successful tool call
605
+ logTool(name, args, typeof result.data === "string" ? result.data : JSON.stringify(result.data));
606
+
607
+ // Log Swarm Mail events separately
608
+ if (name.startsWith("swarmmail_")) {
609
+ logSwarmMail(`tool_${name}`, { args, result: result.data });
610
+ }
611
+
612
+ // Unwrap the data for cleaner tool output
613
+ resolve(
614
+ typeof result.data === "string"
615
+ ? result.data
616
+ : JSON.stringify(result.data, null, 2),
617
+ );
618
+ } else if (!result.success && result.error) {
619
+ // Tool returned an error in JSON format
620
+ // Handle both string errors and object errors with .message
621
+ const errorMsg = typeof result.error === "string"
622
+ ? result.error
623
+ : (result.error.message || "Tool execution failed");
624
+
625
+ // Log failed tool call
626
+ logTool(name, args, undefined, errorMsg);
627
+ logError(`Tool ${name} failed`, { args, error: errorMsg });
628
+
629
+ reject(new Error(errorMsg));
630
+ } else {
631
+ // Log successful (non-standard response)
632
+ logTool(name, args, stdout);
633
+ resolve(stdout);
634
+ }
635
+ } catch {
636
+ // Log successful (unparseable response)
637
+ logTool(name, args, stdout);
638
+ resolve(stdout);
639
+ }
640
+ } else if (code === 2) {
641
+ const errorMsg = `Unknown tool: ${name}`;
642
+ logError(errorMsg, { args });
643
+ reject(new Error(errorMsg));
644
+ } else if (code === 3) {
645
+ const errorMsg = `Invalid JSON args: ${stderr}`;
646
+ logError(errorMsg, { tool: name, args });
647
+ reject(new Error(errorMsg));
648
+ } else {
649
+ // Tool returned error
650
+ try {
651
+ const result = JSON.parse(stdout);
652
+ if (!result.success && result.error) {
653
+ // Handle both string errors and object errors with .message
654
+ const errorMsg = typeof result.error === "string"
655
+ ? result.error
656
+ : (result.error.message || `Tool failed with code ${code}`);
657
+
658
+ logTool(name, args, undefined, errorMsg);
659
+ logError(`Tool ${name} failed with code ${code}`, { args, error: errorMsg });
660
+
661
+ reject(new Error(errorMsg));
662
+ } else {
663
+ const errorMsg = stderr || stdout || `Tool failed with code ${code}`;
664
+ logTool(name, args, undefined, errorMsg);
665
+ logError(`Tool ${name} failed with code ${code}`, { args, stderr, stdout });
666
+
667
+ reject(
668
+ new Error(errorMsg),
669
+ );
670
+ }
671
+ } catch {
672
+ const errorMsg = stderr || stdout || `Tool failed with code ${code}`;
673
+ logTool(name, args, undefined, errorMsg);
674
+ logError(`Tool ${name} failed with code ${code}`, { args, stderr, stdout });
675
+
676
+ reject(
677
+ new Error(errorMsg),
678
+ );
679
+ }
680
+ }
681
+ });
682
+
683
+ proc.on("error", (err) => {
684
+ if ((err as NodeJS.ErrnoException).code === "ENOENT") {
685
+ reject(
686
+ new Error(
687
+ `swarm CLI not found. Install with: npm install -g opencode-swarm-plugin`,
688
+ ),
689
+ );
690
+ } else {
691
+ reject(err);
692
+ }
693
+ });
694
+ });
695
+ }
696
+
697
+ // =============================================================================
698
+ // Beads Tools
699
+ // =============================================================================
700
+
701
+ const hive_create = tool({
702
+ description: "Create a new bead with type-safe validation",
703
+ args: {
704
+ title: tool.schema.string().describe("Bead title"),
705
+ type: tool.schema
706
+ .enum(["bug", "feature", "task", "epic", "chore"])
707
+ .optional()
708
+ .describe("Issue type (default: task)"),
709
+ priority: tool.schema
710
+ .number()
711
+ .min(0)
712
+ .max(3)
713
+ .optional()
714
+ .describe("Priority 0-3 (default: 2)"),
715
+ description: tool.schema.string().optional().describe("Bead description"),
716
+ parent_id: tool.schema
717
+ .string()
718
+ .optional()
719
+ .describe("Parent bead ID for epic children"),
720
+ },
721
+ execute: (args, ctx) => execTool("hive_create", args, ctx),
722
+ });
723
+
724
+ const hive_create_epic = tool({
725
+ description: "Create epic with subtasks in one atomic operation",
726
+ args: {
727
+ epic_title: tool.schema.string().describe("Epic title"),
728
+ epic_description: tool.schema
729
+ .string()
730
+ .optional()
731
+ .describe("Epic description"),
732
+ subtasks: tool.schema
733
+ .array(
734
+ tool.schema.object({
735
+ title: tool.schema.string(),
736
+ priority: tool.schema.number().min(0).max(3).optional(),
737
+ files: tool.schema.array(tool.schema.string()).optional(),
738
+ }),
739
+ )
740
+ .describe("Subtasks to create under the epic"),
741
+ },
742
+ execute: (args, ctx) => execTool("hive_create_epic", args, ctx),
743
+ });
744
+
745
+ const hive_query = tool({
746
+ description: "Query beads with filters (replaces bd list, bd ready, bd wip)",
747
+ args: {
748
+ status: tool.schema
749
+ .enum(["open", "in_progress", "blocked", "closed"])
750
+ .optional()
751
+ .describe("Filter by status"),
752
+ type: tool.schema
753
+ .enum(["bug", "feature", "task", "epic", "chore"])
754
+ .optional()
755
+ .describe("Filter by type"),
756
+ ready: tool.schema
757
+ .boolean()
758
+ .optional()
759
+ .describe("Only show unblocked beads"),
760
+ limit: tool.schema
761
+ .number()
762
+ .optional()
763
+ .describe("Max results (default: 20)"),
764
+ },
765
+ execute: (args, ctx) => execTool("hive_query", args, ctx),
766
+ });
767
+
768
+ const hive_update = tool({
769
+ description: "Update bead status/description",
770
+ args: {
771
+ id: tool.schema.string().describe("Cell ID"),
772
+ status: tool.schema
773
+ .enum(["open", "in_progress", "blocked", "closed"])
774
+ .optional()
775
+ .describe("New status"),
776
+ description: tool.schema.string().optional().describe("New description"),
777
+ priority: tool.schema
778
+ .number()
779
+ .min(0)
780
+ .max(3)
781
+ .optional()
782
+ .describe("New priority"),
783
+ },
784
+ execute: (args, ctx) => execTool("hive_update", args, ctx),
785
+ });
786
+
787
+ const hive_close = tool({
788
+ description: "Close a bead with reason",
789
+ args: {
790
+ id: tool.schema.string().describe("Cell ID"),
791
+ reason: tool.schema.string().describe("Completion reason"),
792
+ },
793
+ execute: (args, ctx) => execTool("hive_close", args, ctx),
794
+ });
795
+
796
+ const hive_start = tool({
797
+ description: "Mark a bead as in-progress",
798
+ args: {
799
+ id: tool.schema.string().describe("Cell ID"),
800
+ },
801
+ execute: (args, ctx) => execTool("hive_start", args, ctx),
802
+ });
803
+
804
+ const hive_ready = tool({
805
+ description: "Get the next ready bead (unblocked, highest priority)",
806
+ args: {},
807
+ execute: (args, ctx) => execTool("hive_ready", args, ctx),
808
+ });
809
+
810
+ const hive_sync = tool({
811
+ description: "Sync beads to git and push (MANDATORY at session end)",
812
+ args: {
813
+ auto_pull: tool.schema.boolean().optional().describe("Pull before sync"),
814
+ },
815
+ execute: (args, ctx) => execTool("hive_sync", args, ctx),
816
+ });
817
+
818
+ const hive_cells = tool({
819
+ description: `Query cells from the hive database with flexible filtering.
820
+
821
+ USE THIS TOOL TO:
822
+ - List all open cells: hive_cells()
823
+ - Find cells by status: hive_cells({ status: "in_progress" })
824
+ - Find cells by type: hive_cells({ type: "bug" })
825
+ - Get a specific cell by partial ID: hive_cells({ id: "mjkmd" })
826
+ - Get the next ready (unblocked) cell: hive_cells({ ready: true })
827
+ - Combine filters: hive_cells({ status: "open", type: "task" })
828
+
829
+ RETURNS: Array of cells with id, title, status, priority, type, parent_id, created_at, updated_at
830
+
831
+ PREFER THIS OVER hive_query when you need to:
832
+ - See what work is available
833
+ - Check status of multiple cells
834
+ - Find cells matching criteria
835
+ - Look up a cell by partial ID`,
836
+ args: {
837
+ id: tool.schema.string().optional().describe("Partial or full cell ID to look up"),
838
+ status: tool.schema.enum(["open", "in_progress", "blocked", "closed"]).optional().describe("Filter by status"),
839
+ type: tool.schema.enum(["task", "bug", "feature", "epic", "chore"]).optional().describe("Filter by type"),
840
+ ready: tool.schema.boolean().optional().describe("If true, return only the next unblocked cell"),
841
+ limit: tool.schema.number().optional().describe("Max cells to return (default 20)"),
842
+ },
843
+ execute: (args, ctx) => execTool("hive_cells", args, ctx),
844
+ });
845
+
846
+ const beads_link_thread = tool({
847
+ description: "Add metadata linking bead to Agent Mail thread",
848
+ args: {
849
+ bead_id: tool.schema.string().describe("Cell ID"),
850
+ thread_id: tool.schema.string().describe("Agent Mail thread ID"),
851
+ },
852
+ execute: (args, ctx) => execTool("beads_link_thread", args, ctx),
853
+ });
854
+
855
+ // =============================================================================
856
+ // Session Handoff Tools (Chainlink-inspired)
857
+ // =============================================================================
858
+
859
+ const hive_session_start = tool({
860
+ description: `Start a new work session with optional handoff notes from previous session.
861
+
862
+ Chainlink-inspired session management for context preservation across sessions.
863
+ Returns previous session's handoff notes if available.
864
+
865
+ Credit: Chainlink session handoff pattern from https://github.com/dollspace-gay/chainlink`,
866
+ args: {
867
+ active_cell_id: tool.schema
868
+ .string()
869
+ .optional()
870
+ .describe("ID of cell being worked on"),
871
+ },
872
+ execute: (args, ctx) => execTool("hive_session_start", args, ctx),
873
+ });
874
+
875
+ const hive_session_end = tool({
876
+ description: `End current session with handoff notes for next session.
877
+
878
+ Save context for the next agent/session to pick up where you left off.
879
+ Include: what was done, what's next, any blockers or gotchas.
880
+
881
+ Credit: Chainlink session handoff pattern from https://github.com/dollspace-gay/chainlink`,
882
+ args: {
883
+ handoff_notes: tool.schema
884
+ .string()
885
+ .optional()
886
+ .describe("Notes for next session (e.g., 'Completed X. Next: do Y. Watch out for Z.')"),
887
+ },
888
+ execute: (args, ctx) => execTool("hive_session_end", args, ctx),
889
+ });
890
+
891
+ // =============================================================================
892
+ // Swarm Mail Tools (Embedded)
893
+ // =============================================================================
894
+
895
+ const swarmmail_init = tool({
896
+ description: "Initialize Swarm Mail session (REQUIRED FIRST)",
897
+ args: {
898
+ project_path: tool.schema.string().describe("Absolute path to the project"),
899
+ agent_name: tool.schema.string().optional().describe("Custom agent name"),
900
+ task_description: tool.schema
901
+ .string()
902
+ .optional()
903
+ .describe("Task description"),
904
+ },
905
+ execute: (args, ctx) => execTool("swarmmail_init", args, ctx),
906
+ });
907
+
908
+ const swarmmail_send = tool({
909
+ description: "Send message to other agents via Swarm Mail",
910
+ args: {
911
+ to: tool.schema
912
+ .array(tool.schema.string())
913
+ .describe("Recipient agent names"),
914
+ subject: tool.schema.string().describe("Message subject"),
915
+ body: tool.schema.string().describe("Message body"),
916
+ thread_id: tool.schema
917
+ .string()
918
+ .optional()
919
+ .describe("Thread ID for grouping"),
920
+ importance: tool.schema
921
+ .enum(["low", "normal", "high", "urgent"])
922
+ .optional()
923
+ .describe("Message importance"),
924
+ ack_required: tool.schema
925
+ .boolean()
926
+ .optional()
927
+ .describe("Require acknowledgment"),
928
+ },
929
+ execute: (args, ctx) => execTool("swarmmail_send", args, ctx),
930
+ });
931
+
932
+ const swarmmail_inbox = tool({
933
+ description: "Fetch inbox (CONTEXT-SAFE: bodies excluded, max 5 messages)",
934
+ args: {
935
+ limit: tool.schema
936
+ .number()
937
+ .max(5)
938
+ .optional()
939
+ .describe("Max messages (max 5)"),
940
+ urgent_only: tool.schema
941
+ .boolean()
942
+ .optional()
943
+ .describe("Only urgent messages"),
944
+ },
945
+ execute: (args, ctx) => execTool("swarmmail_inbox", args, ctx),
946
+ });
947
+
948
+ const swarmmail_read_message = tool({
949
+ description: "Fetch ONE message body by ID",
950
+ args: {
951
+ message_id: tool.schema.number().describe("Message ID"),
952
+ },
953
+ execute: (args, ctx) => execTool("swarmmail_read_message", args, ctx),
954
+ });
955
+
956
+ const swarmmail_reserve = tool({
957
+ description: "Reserve file paths for exclusive editing",
958
+ args: {
959
+ paths: tool.schema
960
+ .array(tool.schema.string())
961
+ .describe("File paths/patterns"),
962
+ ttl_seconds: tool.schema.number().optional().describe("Reservation TTL"),
963
+ exclusive: tool.schema.boolean().optional().describe("Exclusive lock"),
964
+ reason: tool.schema.string().optional().describe("Reservation reason"),
965
+ },
966
+ execute: (args, ctx) => execTool("swarmmail_reserve", args, ctx),
967
+ });
968
+
969
+ const swarmmail_release = tool({
970
+ description: "Release file reservations",
971
+ args: {
972
+ paths: tool.schema
973
+ .array(tool.schema.string())
974
+ .optional()
975
+ .describe("Paths to release"),
976
+ reservation_ids: tool.schema
977
+ .array(tool.schema.number())
978
+ .optional()
979
+ .describe("Reservation IDs"),
980
+ },
981
+ execute: (args, ctx) => execTool("swarmmail_release", args, ctx),
982
+ });
983
+
984
+ const swarmmail_release_all = tool({
985
+ description: "Release all file reservations in the project (coordinator override)",
986
+ args: {},
987
+ execute: (args, ctx) => execTool("swarmmail_release_all", args, ctx),
988
+ });
989
+
990
+ const swarmmail_release_agent = tool({
991
+ description: "Release all file reservations for a specific agent (coordinator override)",
992
+ args: {
993
+ agent_name: tool.schema.string().describe("Target agent name"),
994
+ },
995
+ execute: (args, ctx) => execTool("swarmmail_release_agent", args, ctx),
996
+ });
997
+
998
+ const swarmmail_ack = tool({
999
+ description: "Acknowledge a message",
1000
+ args: {
1001
+ message_id: tool.schema.number().describe("Message ID"),
1002
+ },
1003
+ execute: (args, ctx) => execTool("swarmmail_ack", args, ctx),
1004
+ });
1005
+
1006
+ const swarmmail_health = tool({
1007
+ description: "Check Swarm Mail database health",
1008
+ args: {},
1009
+ execute: (args, ctx) => execTool("swarmmail_health", args, ctx),
1010
+ });
1011
+
1012
+ // =============================================================================
1013
+ // Structured Tools
1014
+ // =============================================================================
1015
+
1016
+ const structured_extract_json = tool({
1017
+ description: "Extract JSON from markdown/text response",
1018
+ args: {
1019
+ text: tool.schema.string().describe("Text containing JSON"),
1020
+ },
1021
+ execute: (args, ctx) => execTool("structured_extract_json", args, ctx),
1022
+ });
1023
+
1024
+ const structured_validate = tool({
1025
+ description: "Validate agent response against a schema",
1026
+ args: {
1027
+ response: tool.schema.string().describe("Agent response to validate"),
1028
+ schema_name: tool.schema
1029
+ .enum(["evaluation", "task_decomposition", "cell_tree"])
1030
+ .describe("Schema to validate against"),
1031
+ max_retries: tool.schema
1032
+ .number()
1033
+ .min(1)
1034
+ .max(5)
1035
+ .optional()
1036
+ .describe("Max retries"),
1037
+ },
1038
+ execute: (args, ctx) => execTool("structured_validate", args, ctx),
1039
+ });
1040
+
1041
+ const structured_parse_evaluation = tool({
1042
+ description: "Parse and validate evaluation response",
1043
+ args: {
1044
+ response: tool.schema.string().describe("Agent response"),
1045
+ },
1046
+ execute: (args, ctx) => execTool("structured_parse_evaluation", args, ctx),
1047
+ });
1048
+
1049
+ const structured_parse_decomposition = tool({
1050
+ description: "Parse and validate task decomposition response",
1051
+ args: {
1052
+ response: tool.schema.string().describe("Agent response"),
1053
+ },
1054
+ execute: (args, ctx) => execTool("structured_parse_decomposition", args, ctx),
1055
+ });
1056
+
1057
+ const structured_parse_cell_tree = tool({
1058
+ description: "Parse and validate bead tree response",
1059
+ args: {
1060
+ response: tool.schema.string().describe("Agent response"),
1061
+ },
1062
+ execute: (args, ctx) => execTool("structured_parse_cell_tree", args, ctx),
1063
+ });
1064
+
1065
+ // =============================================================================
1066
+ // Swarm Tools
1067
+ // =============================================================================
1068
+
1069
+ const swarm_init = tool({
1070
+ description: "Initialize swarm session and check tool availability",
1071
+ args: {
1072
+ project_path: tool.schema.string().optional().describe("Project path"),
1073
+ isolation: tool.schema
1074
+ .enum(["worktree", "reservation"])
1075
+ .optional()
1076
+ .describe(
1077
+ "Isolation mode: 'worktree' for git worktree isolation, 'reservation' for file reservations (default)",
1078
+ ),
1079
+ },
1080
+ execute: (args, ctx) => execTool("swarm_init", args, ctx),
1081
+ });
1082
+
1083
+ const swarm_select_strategy = tool({
1084
+ description: "Analyze task and recommend decomposition strategy",
1085
+ args: {
1086
+ task: tool.schema.string().min(1).describe("Task to analyze"),
1087
+ codebase_context: tool.schema
1088
+ .string()
1089
+ .optional()
1090
+ .describe("Codebase context"),
1091
+ },
1092
+ execute: (args, ctx) => execTool("swarm_select_strategy", args, ctx),
1093
+ });
1094
+
1095
+ const swarm_plan_prompt = tool({
1096
+ description: "Generate strategy-specific decomposition prompt",
1097
+ args: {
1098
+ task: tool.schema.string().min(1).describe("Task to decompose"),
1099
+ strategy: tool.schema
1100
+ .enum(["file-based", "feature-based", "risk-based", "auto"])
1101
+ .optional()
1102
+ .describe("Decomposition strategy"),
1103
+ max_subtasks: tool.schema
1104
+ .number()
1105
+ .int()
1106
+ .min(2)
1107
+ .max(10)
1108
+ .optional()
1109
+ .describe("Max subtasks"),
1110
+ context: tool.schema.string().optional().describe("Additional context"),
1111
+ query_cass: tool.schema
1112
+ .boolean()
1113
+ .optional()
1114
+ .describe("Query CASS for similar tasks"),
1115
+ cass_limit: tool.schema
1116
+ .number()
1117
+ .int()
1118
+ .min(1)
1119
+ .max(10)
1120
+ .optional()
1121
+ .describe("CASS limit"),
1122
+ },
1123
+ execute: (args, ctx) => execTool("swarm_plan_prompt", args, ctx),
1124
+ });
1125
+
1126
+ const swarm_decompose = tool({
1127
+ description: "Generate decomposition prompt for breaking task into subtasks",
1128
+ args: {
1129
+ task: tool.schema.string().min(1).describe("Task to decompose"),
1130
+ max_subtasks: tool.schema
1131
+ .number()
1132
+ .int()
1133
+ .min(2)
1134
+ .max(10)
1135
+ .optional()
1136
+ .describe("Max subtasks"),
1137
+ context: tool.schema.string().optional().describe("Additional context"),
1138
+ query_cass: tool.schema.boolean().optional().describe("Query CASS"),
1139
+ cass_limit: tool.schema
1140
+ .number()
1141
+ .int()
1142
+ .min(1)
1143
+ .max(10)
1144
+ .optional()
1145
+ .describe("CASS limit"),
1146
+ },
1147
+ execute: (args, ctx) => execTool("swarm_decompose", args, ctx),
1148
+ });
1149
+
1150
+ const swarm_validate_decomposition = tool({
1151
+ description: "Validate a decomposition response against CellTreeSchema",
1152
+ args: {
1153
+ response: tool.schema.string().describe("Decomposition response"),
1154
+ },
1155
+ execute: (args, ctx) => execTool("swarm_validate_decomposition", args, ctx),
1156
+ });
1157
+
1158
+ const swarm_status = tool({
1159
+ description: "Get status of a swarm by epic ID",
1160
+ args: {
1161
+ epic_id: tool.schema.string().describe("Epic bead ID"),
1162
+ project_key: tool.schema.string().describe("Project key"),
1163
+ },
1164
+ execute: (args, ctx) => execTool("swarm_status", args, ctx),
1165
+ });
1166
+
1167
+ const swarm_progress = tool({
1168
+ description: "Report progress on a subtask to coordinator",
1169
+ args: {
1170
+ project_key: tool.schema.string().describe("Project key"),
1171
+ agent_name: tool.schema.string().describe("Agent name"),
1172
+ bead_id: tool.schema.string().describe("Cell ID"),
1173
+ status: tool.schema
1174
+ .enum(["in_progress", "blocked", "completed", "failed"])
1175
+ .describe("Status"),
1176
+ message: tool.schema.string().optional().describe("Progress message"),
1177
+ progress_percent: tool.schema
1178
+ .number()
1179
+ .min(0)
1180
+ .max(100)
1181
+ .optional()
1182
+ .describe("Progress %"),
1183
+ files_touched: tool.schema
1184
+ .array(tool.schema.string())
1185
+ .optional()
1186
+ .describe("Files modified"),
1187
+ },
1188
+ execute: (args, ctx) => execTool("swarm_progress", args, ctx),
1189
+ });
1190
+
1191
+ const swarm_complete = tool({
1192
+ description:
1193
+ "Mark subtask complete with Verification Gate. Runs typecheck and tests before allowing completion.",
1194
+ args: {
1195
+ project_key: tool.schema.string().describe("Project key"),
1196
+ agent_name: tool.schema.string().describe("Agent name"),
1197
+ bead_id: tool.schema.string().describe("Cell ID"),
1198
+ summary: tool.schema.string().describe("Completion summary"),
1199
+ evaluation: tool.schema.string().optional().describe("Self-evaluation JSON"),
1200
+ files_touched: tool.schema
1201
+ .array(tool.schema.string())
1202
+ .optional()
1203
+ .describe("Files modified - will be verified"),
1204
+ skip_verification: tool.schema
1205
+ .boolean()
1206
+ .optional()
1207
+ .describe("Skip ALL verification (typecheck, tests)"),
1208
+ skip_review: tool.schema
1209
+ .boolean()
1210
+ .optional()
1211
+ .describe("Skip review gate check"),
1212
+ },
1213
+ execute: (args, ctx) => execTool("swarm_complete", args, ctx),
1214
+ });
1215
+
1216
+ const swarm_record_outcome = tool({
1217
+ description: "Record subtask outcome for implicit feedback scoring",
1218
+ args: {
1219
+ bead_id: tool.schema.string().describe("Cell ID"),
1220
+ duration_ms: tool.schema.number().int().min(0).describe("Duration in ms"),
1221
+ error_count: tool.schema
1222
+ .number()
1223
+ .int()
1224
+ .min(0)
1225
+ .optional()
1226
+ .describe("Error count"),
1227
+ retry_count: tool.schema
1228
+ .number()
1229
+ .int()
1230
+ .min(0)
1231
+ .optional()
1232
+ .describe("Retry count"),
1233
+ success: tool.schema.boolean().describe("Whether task succeeded"),
1234
+ files_touched: tool.schema
1235
+ .array(tool.schema.string())
1236
+ .optional()
1237
+ .describe("Files modified"),
1238
+ criteria: tool.schema
1239
+ .array(tool.schema.string())
1240
+ .optional()
1241
+ .describe("Evaluation criteria"),
1242
+ strategy: tool.schema
1243
+ .enum(["file-based", "feature-based", "risk-based"])
1244
+ .optional()
1245
+ .describe("Strategy used"),
1246
+ },
1247
+ execute: (args, ctx) => execTool("swarm_record_outcome", args, ctx),
1248
+ });
1249
+
1250
+ const swarm_subtask_prompt = tool({
1251
+ description: "Generate the prompt for a spawned subtask agent",
1252
+ args: {
1253
+ agent_name: tool.schema.string().describe("Agent name"),
1254
+ bead_id: tool.schema.string().describe("Cell ID"),
1255
+ epic_id: tool.schema.string().describe("Epic ID"),
1256
+ subtask_title: tool.schema.string().describe("Subtask title"),
1257
+ subtask_description: tool.schema
1258
+ .string()
1259
+ .optional()
1260
+ .describe("Description"),
1261
+ files: tool.schema.array(tool.schema.string()).describe("Files to work on"),
1262
+ shared_context: tool.schema.string().optional().describe("Shared context"),
1263
+ },
1264
+ execute: (args, ctx) => execTool("swarm_subtask_prompt", args, ctx),
1265
+ });
1266
+
1267
+ const swarm_spawn_subtask = tool({
1268
+ description: "Prepare a subtask for spawning with Task tool",
1269
+ args: {
1270
+ bead_id: tool.schema.string().describe("Cell ID"),
1271
+ epic_id: tool.schema.string().describe("Epic ID"),
1272
+ subtask_title: tool.schema.string().describe("Subtask title"),
1273
+ subtask_description: tool.schema
1274
+ .string()
1275
+ .optional()
1276
+ .describe("Description"),
1277
+ files: tool.schema.array(tool.schema.string()).describe("Files to work on"),
1278
+ shared_context: tool.schema.string().optional().describe("Shared context"),
1279
+ },
1280
+ execute: (args, ctx) => execTool("swarm_spawn_subtask", args, ctx),
1281
+ });
1282
+
1283
+ const swarm_complete_subtask = tool({
1284
+ description: "Handle subtask completion after Task agent returns",
1285
+ args: {
1286
+ bead_id: tool.schema.string().describe("Cell ID"),
1287
+ task_result: tool.schema.string().describe("Task result JSON"),
1288
+ files_touched: tool.schema
1289
+ .array(tool.schema.string())
1290
+ .optional()
1291
+ .describe("Files modified"),
1292
+ },
1293
+ execute: (args, ctx) => execTool("swarm_complete_subtask", args, ctx),
1294
+ });
1295
+
1296
+ const swarm_evaluation_prompt = tool({
1297
+ description: "Generate self-evaluation prompt for a completed subtask",
1298
+ args: {
1299
+ bead_id: tool.schema.string().describe("Cell ID"),
1300
+ subtask_title: tool.schema.string().describe("Subtask title"),
1301
+ files_touched: tool.schema
1302
+ .array(tool.schema.string())
1303
+ .describe("Files modified"),
1304
+ },
1305
+ execute: (args, ctx) => execTool("swarm_evaluation_prompt", args, ctx),
1306
+ });
1307
+
1308
+ const swarm_broadcast = tool({
1309
+ description:
1310
+ "Broadcast context update to all agents working on the same epic",
1311
+ args: {
1312
+ project_path: tool.schema.string().describe("Project path"),
1313
+ agent_name: tool.schema.string().describe("Agent name"),
1314
+ epic_id: tool.schema.string().describe("Epic ID"),
1315
+ message: tool.schema.string().describe("Context update message"),
1316
+ importance: tool.schema
1317
+ .enum(["info", "warning", "blocker"])
1318
+ .optional()
1319
+ .describe("Priority level (default: info)"),
1320
+ files_affected: tool.schema
1321
+ .array(tool.schema.string())
1322
+ .optional()
1323
+ .describe("Files this context relates to"),
1324
+ },
1325
+ execute: (args, ctx) => execTool("swarm_broadcast", args, ctx),
1326
+ });
1327
+
1328
+ // =============================================================================
1329
+ // Worktree Isolation Tools
1330
+ // =============================================================================
1331
+
1332
+ const swarm_worktree_create = tool({
1333
+ description:
1334
+ "Create a git worktree for isolated task execution. Worker operates in worktree, not main branch.",
1335
+ args: {
1336
+ project_path: tool.schema.string().describe("Absolute path to project root"),
1337
+ task_id: tool.schema.string().describe("Task/bead ID (e.g., bd-abc123.1)"),
1338
+ start_commit: tool.schema
1339
+ .string()
1340
+ .describe("Commit SHA to create worktree at (swarm start point)"),
1341
+ },
1342
+ execute: (args, ctx) => execTool("swarm_worktree_create", args, ctx),
1343
+ });
1344
+
1345
+ const swarm_worktree_merge = tool({
1346
+ description:
1347
+ "Cherry-pick commits from worktree back to main branch. Call after worker completes.",
1348
+ args: {
1349
+ project_path: tool.schema.string().describe("Absolute path to project root"),
1350
+ task_id: tool.schema.string().describe("Task/bead ID"),
1351
+ start_commit: tool.schema
1352
+ .string()
1353
+ .optional()
1354
+ .describe("Original start commit (to find new commits)"),
1355
+ },
1356
+ execute: (args, ctx) => execTool("swarm_worktree_merge", args, ctx),
1357
+ });
1358
+
1359
+ const swarm_worktree_cleanup = tool({
1360
+ description:
1361
+ "Remove a worktree after completion or abort. Idempotent - safe to call multiple times.",
1362
+ args: {
1363
+ project_path: tool.schema.string().describe("Absolute path to project root"),
1364
+ task_id: tool.schema.string().optional().describe("Task/bead ID to clean up"),
1365
+ cleanup_all: tool.schema
1366
+ .boolean()
1367
+ .optional()
1368
+ .describe("Remove all worktrees for this project"),
1369
+ },
1370
+ execute: (args, ctx) => execTool("swarm_worktree_cleanup", args, ctx),
1371
+ });
1372
+
1373
+ const swarm_worktree_list = tool({
1374
+ description: "List all active worktrees for a project",
1375
+ args: {
1376
+ project_path: tool.schema.string().describe("Absolute path to project root"),
1377
+ },
1378
+ execute: (args, ctx) => execTool("swarm_worktree_list", args, ctx),
1379
+ });
1380
+
1381
+ // =============================================================================
1382
+ // Structured Review Tools
1383
+ // =============================================================================
1384
+
1385
+ const swarm_review = tool({
1386
+ description:
1387
+ "Generate a review prompt for a completed subtask. Includes epic context, dependencies, and diff.",
1388
+ args: {
1389
+ project_key: tool.schema.string().describe("Project path"),
1390
+ epic_id: tool.schema.string().describe("Epic bead ID"),
1391
+ task_id: tool.schema.string().describe("Subtask bead ID to review"),
1392
+ files_touched: tool.schema
1393
+ .array(tool.schema.string())
1394
+ .optional()
1395
+ .describe("Files modified (will get diff for these)"),
1396
+ },
1397
+ execute: (args, ctx) => execTool("swarm_review", args, ctx),
1398
+ });
1399
+
1400
+ const swarm_review_feedback = tool({
1401
+ description:
1402
+ "Send review feedback to a worker. Tracks attempts (max 3). Fails task after 3 rejections.",
1403
+ args: {
1404
+ project_key: tool.schema.string().describe("Project path"),
1405
+ task_id: tool.schema.string().describe("Subtask bead ID"),
1406
+ worker_id: tool.schema.string().describe("Worker agent name"),
1407
+ status: tool.schema
1408
+ .enum(["approved", "needs_changes"])
1409
+ .describe("Review status"),
1410
+ summary: tool.schema.string().optional().describe("Review summary"),
1411
+ issues: tool.schema
1412
+ .string()
1413
+ .optional()
1414
+ .describe("JSON array of ReviewIssue objects (for needs_changes)"),
1415
+ },
1416
+ execute: (args, ctx) => execTool("swarm_review_feedback", args, ctx),
1417
+ });
1418
+
1419
+ // =============================================================================
1420
+ // Adversarial Review Tools (VDD/Chainlink-inspired)
1421
+ // =============================================================================
1422
+
1423
+ const swarm_adversarial_review = tool({
1424
+ description: `VDD-style adversarial code review using hostile, fresh-context agent.
1425
+
1426
+ Spawns Sarcasmotron - a hyper-critical reviewer with zero tolerance for slop.
1427
+ Fresh context per review prevents "relationship drift" (becoming lenient over time).
1428
+
1429
+ Returns structured critique with verdict:
1430
+ - APPROVED: Code is solid
1431
+ - NEEDS_CHANGES: Real issues found
1432
+ - HALLUCINATING: Adversary invented issues (code is excellent!)
1433
+
1434
+ Credit: VDD methodology from https://github.com/Vomikron/VDD
1435
+ Credit: Chainlink patterns from https://github.com/dollspace-gay/chainlink`,
1436
+ args: {
1437
+ diff: tool.schema.string().describe("Git diff of changes to review"),
1438
+ test_output: tool.schema.string().optional().describe("Test output (optional)"),
1439
+ },
1440
+ execute: (args, ctx) => execTool("swarm_adversarial_review", args, ctx),
1441
+ });
1442
+
1443
+ // =============================================================================
1444
+ // Skills Tools
1445
+ // =============================================================================
1446
+
1447
+ const skills_list = tool({
1448
+ description:
1449
+ "List all available skills from global, project, and bundled sources",
1450
+ args: {
1451
+ source: tool.schema
1452
+ .enum(["all", "global", "project", "bundled"])
1453
+ .optional()
1454
+ .describe("Filter by source (default: all)"),
1455
+ },
1456
+ execute: (args, ctx) => execTool("skills_list", args, ctx),
1457
+ });
1458
+
1459
+ const skills_read = tool({
1460
+ description: "Read a skill's full content including SKILL.md and references",
1461
+ args: {
1462
+ name: tool.schema.string().describe("Skill name"),
1463
+ },
1464
+ execute: (args, ctx) => execTool("skills_read", args, ctx),
1465
+ });
1466
+
1467
+ const skills_use = tool({
1468
+ description:
1469
+ "Get skill content formatted for injection into agent context. Use this when you need to apply a skill's knowledge to the current task.",
1470
+ args: {
1471
+ name: tool.schema.string().describe("Skill name"),
1472
+ context: tool.schema
1473
+ .string()
1474
+ .optional()
1475
+ .describe("Optional context about how the skill will be used"),
1476
+ },
1477
+ execute: (args, ctx) => execTool("skills_use", args, ctx),
1478
+ });
1479
+
1480
+ const skills_create = tool({
1481
+ description: "Create a new skill with SKILL.md template",
1482
+ args: {
1483
+ name: tool.schema.string().describe("Skill name (kebab-case)"),
1484
+ description: tool.schema.string().describe("Brief skill description"),
1485
+ scope: tool.schema
1486
+ .enum(["global", "project"])
1487
+ .optional()
1488
+ .describe("Where to create (default: project)"),
1489
+ tags: tool.schema
1490
+ .array(tool.schema.string())
1491
+ .optional()
1492
+ .describe("Skill tags for discovery"),
1493
+ },
1494
+ execute: (args, ctx) => execTool("skills_create", args, ctx),
1495
+ });
1496
+
1497
+ const skills_update = tool({
1498
+ description: "Update an existing skill's SKILL.md content",
1499
+ args: {
1500
+ name: tool.schema.string().describe("Skill name"),
1501
+ content: tool.schema.string().describe("New SKILL.md content"),
1502
+ },
1503
+ execute: (args, ctx) => execTool("skills_update", args, ctx),
1504
+ });
1505
+
1506
+ const skills_delete = tool({
1507
+ description: "Delete a skill (project skills only)",
1508
+ args: {
1509
+ name: tool.schema.string().describe("Skill name"),
1510
+ },
1511
+ execute: (args, ctx) => execTool("skills_delete", args, ctx),
1512
+ });
1513
+
1514
+ const skills_init = tool({
1515
+ description: "Initialize skills directory in current project",
1516
+ args: {
1517
+ path: tool.schema
1518
+ .string()
1519
+ .optional()
1520
+ .describe("Custom path (default: .opencode/skills)"),
1521
+ },
1522
+ execute: (args, ctx) => execTool("skills_init", args, ctx),
1523
+ });
1524
+
1525
+ const skills_add_script = tool({
1526
+ description: "Add an executable script to a skill",
1527
+ args: {
1528
+ skill_name: tool.schema.string().describe("Skill name"),
1529
+ script_name: tool.schema.string().describe("Script filename"),
1530
+ content: tool.schema.string().describe("Script content"),
1531
+ executable: tool.schema
1532
+ .boolean()
1533
+ .optional()
1534
+ .describe("Make executable (default: true)"),
1535
+ },
1536
+ execute: (args, ctx) => execTool("skills_add_script", args, ctx),
1537
+ });
1538
+
1539
+ const skills_execute = tool({
1540
+ description: "Execute a skill's script",
1541
+ args: {
1542
+ skill_name: tool.schema.string().describe("Skill name"),
1543
+ script_name: tool.schema.string().describe("Script to execute"),
1544
+ args: tool.schema
1545
+ .array(tool.schema.string())
1546
+ .optional()
1547
+ .describe("Script arguments"),
1548
+ },
1549
+ execute: (args, ctx) => execTool("skills_execute", args, ctx),
1550
+ });
1551
+
1552
+ // =============================================================================
1553
+ // Swarm Insights Tools
1554
+ // =============================================================================
1555
+
1556
+ const swarm_get_strategy_insights = tool({
1557
+ description: "Get strategy success rates for decomposition planning. Use this when planning task decomposition to see which strategies (file-based, feature-based, risk-based) have historically succeeded or failed. Returns success rates and recommendations based on past swarm outcomes.",
1558
+ args: {
1559
+ task: tool.schema.string().describe("Task description to analyze for strategy recommendation"),
1560
+ },
1561
+ execute: (args, ctx) => execTool("swarm_get_strategy_insights", args, ctx),
1562
+ });
1563
+
1564
+ const swarm_get_file_insights = tool({
1565
+ description: "Get file-specific gotchas for worker context. Use this when assigning files to workers to warn them about historical failure patterns. Queries past outcomes and semantic memory for file-specific learnings (edge cases, common bugs, performance traps).",
1566
+ args: {
1567
+ files: tool.schema.array(tool.schema.string()).describe("File paths to get insights for"),
1568
+ },
1569
+ execute: (args, ctx) => execTool("swarm_get_file_insights", args, ctx),
1570
+ });
1571
+
1572
+ const swarm_get_pattern_insights = tool({
1573
+ description: "Get common failure patterns across swarms. Use this during planning or when debugging stuck swarms to see recurring anti-patterns (type errors, timeouts, conflicts, test failures). Returns top 5 most frequent failure patterns with recommendations.",
1574
+ args: {},
1575
+ execute: (args, ctx) => execTool("swarm_get_pattern_insights", args, ctx),
1576
+ });
1577
+
1578
+ // =============================================================================
1579
+ // CASS Tools (Cross-Agent Session Search)
1580
+ // =============================================================================
1581
+
1582
+ const cass_search = tool({
1583
+ description: "Search across all AI coding agent histories (Claude, Codex, Cursor, Gemini, Aider, ChatGPT, Cline, OpenCode, Amp, Pi-Agent). Query BEFORE solving problems from scratch - another agent may have already solved it. Returns matching sessions ranked by relevance.",
1584
+ args: {
1585
+ query: tool.schema.string().describe("Search query (e.g., 'authentication error Next.js')"),
1586
+ agent: tool.schema
1587
+ .string()
1588
+ .optional()
1589
+ .describe("Filter by agent name (e.g., 'claude', 'cursor')"),
1590
+ days: tool.schema
1591
+ .number()
1592
+ .optional()
1593
+ .describe("Only search sessions from last N days"),
1594
+ limit: tool.schema
1595
+ .number()
1596
+ .optional()
1597
+ .describe("Max results to return (default: 5)"),
1598
+ fields: tool.schema
1599
+ .string()
1600
+ .optional()
1601
+ .describe("Field selection: 'minimal' for compact output (path, line, agent only)"),
1602
+ },
1603
+ execute: (args, ctx) => execTool("cass_search", args, ctx),
1604
+ });
1605
+
1606
+ const cass_view = tool({
1607
+ description: "View a specific conversation/session from search results. Use source_path from cass_search output.",
1608
+ args: {
1609
+ path: tool.schema
1610
+ .string()
1611
+ .describe("Path to session file (from cass_search results)"),
1612
+ line: tool.schema
1613
+ .number()
1614
+ .optional()
1615
+ .describe("Jump to specific line number"),
1616
+ },
1617
+ execute: (args, ctx) => execTool("cass_view", args, ctx),
1618
+ });
1619
+
1620
+ const cass_expand = tool({
1621
+ description: "Expand context around a specific line in a session. Shows messages before/after.",
1622
+ args: {
1623
+ path: tool.schema
1624
+ .string()
1625
+ .describe("Path to session file"),
1626
+ line: tool.schema
1627
+ .number()
1628
+ .describe("Line number to expand around"),
1629
+ context: tool.schema
1630
+ .number()
1631
+ .optional()
1632
+ .describe("Number of lines before/after to show (default: 5)"),
1633
+ },
1634
+ execute: (args, ctx) => execTool("cass_expand", args, ctx),
1635
+ });
1636
+
1637
+ const cass_health = tool({
1638
+ description: "Check if cass index is healthy. Exit 0 = ready, Exit 1 = needs indexing. Run this before searching.",
1639
+ args: {},
1640
+ execute: (args, ctx) => execTool("cass_health", args, ctx),
1641
+ });
1642
+
1643
+ const cass_index = tool({
1644
+ description: "Build or rebuild the search index. Run this if health check fails or to pick up new sessions.",
1645
+ args: {
1646
+ full: tool.schema
1647
+ .boolean()
1648
+ .optional()
1649
+ .describe("Force full rebuild (default: incremental)"),
1650
+ },
1651
+ execute: (args, ctx) => execTool("cass_index", args, ctx),
1652
+ });
1653
+
1654
+ const cass_stats = tool({
1655
+ description: "Show index statistics - how many sessions, messages, agents indexed.",
1656
+ args: {},
1657
+ execute: (args, ctx) => execTool("cass_stats", args, ctx),
1658
+ });
1659
+
1660
+ // =============================================================================
1661
+ // Hivemind Tools (Unified Memory - Sessions + Learnings)
1662
+ // =============================================================================
1663
+
1664
+ const hivemind_store = tool({
1665
+ description: "Store a memory (learnings, decisions, patterns) with metadata and tags. Include WHY, not just WHAT.",
1666
+ args: {
1667
+ information: tool.schema.string().describe("The learning, decision, or pattern to store (include context and reasoning)"),
1668
+ tags: tool.schema.string().optional().describe("Comma-separated tags for categorization (e.g., 'auth,oauth,tokens')"),
1669
+ },
1670
+ execute: (args, ctx) => execTool("hivemind_store", args, ctx),
1671
+ });
1672
+
1673
+ const hivemind_find = tool({
1674
+ description: "Search all memories (learnings + sessions) by semantic similarity. Use BEFORE implementing to check if any agent solved it before.",
1675
+ args: {
1676
+ query: tool.schema.string().describe("Search query (e.g., 'token refresh race condition')"),
1677
+ limit: tool.schema.number().optional().describe("Max results to return (default: 5)"),
1678
+ collection: tool.schema.string().optional().describe("Filter by collection: 'default' (learnings), 'claude', 'cursor', etc., or omit for all"),
1679
+ },
1680
+ execute: (args, ctx) => execTool("hivemind_find", args, ctx),
1681
+ });
1682
+
1683
+ const hivemind_get = tool({
1684
+ description: "Get specific memory by ID",
1685
+ args: {
1686
+ id: tool.schema.string().describe("Memory ID (e.g., 'mem_xyz123')"),
1687
+ },
1688
+ execute: (args, ctx) => execTool("hivemind_get", args, ctx),
1689
+ });
1690
+
1691
+ const hivemind_remove = tool({
1692
+ description: "Delete outdated/incorrect memory",
1693
+ args: {
1694
+ id: tool.schema.string().describe("Memory ID to remove"),
1695
+ },
1696
+ execute: (args, ctx) => execTool("hivemind_remove", args, ctx),
1697
+ });
1698
+
1699
+ const hivemind_validate = tool({
1700
+ description: "Confirm memory is still accurate (resets 90-day decay timer)",
1701
+ args: {
1702
+ id: tool.schema.string().describe("Memory ID to validate"),
1703
+ },
1704
+ execute: (args, ctx) => execTool("hivemind_validate", args, ctx),
1705
+ });
1706
+
1707
+ const hivemind_stats = tool({
1708
+ description: "Memory statistics and health check (documents, chunks, embeddings)",
1709
+ args: {},
1710
+ execute: (args, ctx) => execTool("hivemind_stats", args, ctx),
1711
+ });
1712
+
1713
+ const hivemind_index = tool({
1714
+ description: "Index AI session directories (automatically indexes ~/.config/opencode/sessions, ~/.cursor-tutor, etc.)",
1715
+ args: {},
1716
+ execute: (args, ctx) => execTool("hivemind_index", args, ctx),
1717
+ });
1718
+
1719
+ const hivemind_sync = tool({
1720
+ description: "Sync learnings to .hive/memories.jsonl for git-backed team sharing",
1721
+ args: {},
1722
+ execute: (args, ctx) => execTool("hivemind_sync", args, ctx),
1723
+ });
1724
+
1725
+ // =============================================================================
1726
+ // Plugin Export
1727
+ // =============================================================================
1728
+
1729
+ // =============================================================================
1730
+ // Compaction Hook - Swarm Recovery Context
1731
+ // =============================================================================
1732
+
1733
+ /**
1734
+ * Detection result with confidence level
1735
+ */
1736
+ interface SwarmDetection {
1737
+ detected: boolean;
1738
+ confidence: "high" | "medium" | "low" | "none";
1739
+ reasons: string[];
1740
+ }
1741
+
1742
+ /**
1743
+ * Structured state snapshot for LLM-powered compaction
1744
+ *
1745
+ * This is passed to the lite model to generate a continuation prompt
1746
+ * with concrete data instead of just instructions.
1747
+ */
1748
+ interface SwarmStateSnapshot {
1749
+ sessionID: string;
1750
+ detection: {
1751
+ confidence: "high" | "medium" | "low" | "none";
1752
+ reasons: string[];
1753
+ };
1754
+ epic?: {
1755
+ id: string;
1756
+ title: string;
1757
+ status: string;
1758
+ subtasks: Array<{
1759
+ id: string;
1760
+ title: string;
1761
+ status: "open" | "in_progress" | "blocked" | "closed";
1762
+ files: string[];
1763
+ assignedTo?: string;
1764
+ }>;
1765
+ };
1766
+ messages: Array<{
1767
+ from: string;
1768
+ to: string[];
1769
+ subject: string;
1770
+ body: string;
1771
+ timestamp: number;
1772
+ importance?: string;
1773
+ }>;
1774
+ reservations: Array<{
1775
+ agent: string;
1776
+ paths: string[];
1777
+ exclusive: boolean;
1778
+ expiresAt: number;
1779
+ }>;
1780
+ }
1781
+
1782
+ /**
1783
+ * Query actual swarm state using spawn (like detectSwarm does)
1784
+ *
1785
+ * Returns structured snapshot of current state for LLM compaction.
1786
+ * Shells out to swarm CLI to get real data.
1787
+ */
1788
+ async function querySwarmState(sessionID: string): Promise<SwarmStateSnapshot> {
1789
+ const startTime = Date.now();
1790
+
1791
+ logCompaction("debug", "query_swarm_state_start", {
1792
+ session_id: sessionID,
1793
+ project_directory: projectDirectory,
1794
+ });
1795
+
1796
+ try {
1797
+ // Query cells via swarm CLI
1798
+ const cliStart = Date.now();
1799
+ const cellsResult = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
1800
+ (resolve) => {
1801
+ const proc = spawn(SWARM_CLI, ["tool", "hive_query"], {
1802
+ cwd: projectDirectory,
1803
+ stdio: ["ignore", "pipe", "pipe"],
1804
+ });
1805
+ let stdout = "";
1806
+ let stderr = "";
1807
+ proc.stdout.on("data", (d) => {
1808
+ stdout += d;
1809
+ });
1810
+ proc.stderr.on("data", (d) => {
1811
+ stderr += d;
1812
+ });
1813
+ proc.on("close", (exitCode) =>
1814
+ resolve({ exitCode: exitCode ?? 1, stdout, stderr }),
1815
+ );
1816
+ },
1817
+ );
1818
+ const cliDuration = Date.now() - cliStart;
1819
+
1820
+ logCompaction("debug", "query_swarm_state_cli_complete", {
1821
+ session_id: sessionID,
1822
+ duration_ms: cliDuration,
1823
+ exit_code: cellsResult.exitCode,
1824
+ stdout_length: cellsResult.stdout.length,
1825
+ stderr_length: cellsResult.stderr.length,
1826
+ });
1827
+
1828
+ let cells: any[] = [];
1829
+ if (cellsResult.exitCode === 0) {
1830
+ try {
1831
+ const parsed = JSON.parse(cellsResult.stdout);
1832
+ // Handle wrapped response: { success: true, data: [...] }
1833
+ cells = Array.isArray(parsed) ? parsed : (parsed?.data ?? []);
1834
+ } catch (parseErr) {
1835
+ logCompaction("error", "query_swarm_state_parse_failed", {
1836
+ session_id: sessionID,
1837
+ error: parseErr instanceof Error ? parseErr.message : String(parseErr),
1838
+ stdout_preview: cellsResult.stdout.substring(0, 500),
1839
+ });
1840
+ }
1841
+ }
1842
+
1843
+ logCompaction("debug", "query_swarm_state_cells_parsed", {
1844
+ session_id: sessionID,
1845
+ cell_count: cells.length,
1846
+ cells: cells.map((c: any) => ({
1847
+ id: c.id,
1848
+ title: c.title,
1849
+ type: c.type,
1850
+ status: c.status,
1851
+ parent_id: c.parent_id,
1852
+ })),
1853
+ });
1854
+
1855
+ // Find active epic (first unclosed epic with subtasks)
1856
+ const openEpics = cells.filter(
1857
+ (c: { type?: string; status: string }) =>
1858
+ c.type === "epic" && c.status !== "closed",
1859
+ );
1860
+ const epic = openEpics[0];
1861
+
1862
+ logCompaction("debug", "query_swarm_state_epics", {
1863
+ session_id: sessionID,
1864
+ open_epic_count: openEpics.length,
1865
+ selected_epic: epic ? { id: epic.id, title: epic.title, status: epic.status } : null,
1866
+ });
1867
+
1868
+ // Get subtasks if we have an epic
1869
+ const subtasks =
1870
+ epic && epic.id
1871
+ ? cells.filter(
1872
+ (c: { parent_id?: string }) => c.parent_id === epic.id,
1873
+ )
1874
+ : [];
1875
+
1876
+ logCompaction("debug", "query_swarm_state_subtasks", {
1877
+ session_id: sessionID,
1878
+ subtask_count: subtasks.length,
1879
+ subtasks: subtasks.map((s: any) => ({
1880
+ id: s.id,
1881
+ title: s.title,
1882
+ status: s.status,
1883
+ files: s.files,
1884
+ })),
1885
+ });
1886
+
1887
+ // TODO: Query swarm mail for messages and reservations
1888
+ // For MVP, use empty arrays - the fallback chain handles this
1889
+ const messages: SwarmStateSnapshot["messages"] = [];
1890
+ const reservations: SwarmStateSnapshot["reservations"] = [];
1891
+
1892
+ // Run detection for confidence (already logged internally)
1893
+ const detection = await detectSwarm();
1894
+
1895
+ const snapshot: SwarmStateSnapshot = {
1896
+ sessionID,
1897
+ detection: {
1898
+ confidence: detection.confidence,
1899
+ reasons: detection.reasons,
1900
+ },
1901
+ epic: epic
1902
+ ? {
1903
+ id: epic.id,
1904
+ title: epic.title,
1905
+ status: epic.status,
1906
+ subtasks: subtasks.map((s: {
1907
+ id: string;
1908
+ title: string;
1909
+ status: string;
1910
+ files?: string[];
1911
+ }) => ({
1912
+ id: s.id,
1913
+ title: s.title,
1914
+ status: s.status as "open" | "in_progress" | "blocked" | "closed",
1915
+ files: s.files || [],
1916
+ })),
1917
+ }
1918
+ : undefined,
1919
+ messages,
1920
+ reservations,
1921
+ };
1922
+
1923
+ const totalDuration = Date.now() - startTime;
1924
+ logCompaction("debug", "query_swarm_state_complete", {
1925
+ session_id: sessionID,
1926
+ duration_ms: totalDuration,
1927
+ has_epic: !!snapshot.epic,
1928
+ epic_id: snapshot.epic?.id,
1929
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
1930
+ message_count: snapshot.messages.length,
1931
+ reservation_count: snapshot.reservations.length,
1932
+ });
1933
+
1934
+ return snapshot;
1935
+ } catch (err) {
1936
+ logCompaction("error", "query_swarm_state_exception", {
1937
+ session_id: sessionID,
1938
+ error: err instanceof Error ? err.message : String(err),
1939
+ stack: err instanceof Error ? err.stack : undefined,
1940
+ duration_ms: Date.now() - startTime,
1941
+ });
1942
+
1943
+ // If query fails, return minimal snapshot
1944
+ const detection = await detectSwarm();
1945
+ return {
1946
+ sessionID,
1947
+ detection: {
1948
+ confidence: detection.confidence,
1949
+ reasons: detection.reasons,
1950
+ },
1951
+ messages: [],
1952
+ reservations: [],
1953
+ };
1954
+ }
1955
+ }
1956
+
1957
+ /**
1958
+ * Generate compaction prompt using LLM
1959
+ *
1960
+ * Shells out to `opencode run -m <liteModel>` with structured state.
1961
+ * Returns markdown continuation prompt or null on failure.
1962
+ *
1963
+ * Timeout: 30 seconds
1964
+ */
1965
+ async function generateCompactionPrompt(
1966
+ snapshot: SwarmStateSnapshot,
1967
+ ): Promise<string | null> {
1968
+ const startTime = Date.now();
1969
+ const liteModel = process.env.OPENCODE_LITE_MODEL || "__SWARM_LITE_MODEL__";
1970
+
1971
+ logCompaction("debug", "generate_compaction_prompt_start", {
1972
+ session_id: snapshot.sessionID,
1973
+ lite_model: liteModel,
1974
+ has_epic: !!snapshot.epic,
1975
+ epic_id: snapshot.epic?.id,
1976
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
1977
+ snapshot_size: JSON.stringify(snapshot).length,
1978
+ });
1979
+
1980
+ try {
1981
+ const promptText = `You are generating a continuation prompt for a compacted swarm coordination session.
1982
+
1983
+ Analyze this swarm state and generate a structured markdown prompt that will be given to the resumed session:
1984
+
1985
+ ${JSON.stringify(snapshot, null, 2)}
1986
+
1987
+ Generate a prompt following this structure:
1988
+
1989
+ ┌─────────────────────────────────────────────────────────────┐
1990
+ │ │
1991
+ │ 🐝 YOU ARE THE COORDINATOR 🐝 │
1992
+ │ │
1993
+ │ NOT A WORKER. NOT AN IMPLEMENTER. │
1994
+ │ YOU ORCHESTRATE. │
1995
+ │ │
1996
+ └─────────────────────────────────────────────────────────────┘
1997
+
1998
+ # 🐝 Swarm Continuation - [Epic Title or "Unknown"]
1999
+
2000
+ **NON-NEGOTIABLE: YOU ARE THE COORDINATOR.** You resumed after context compaction.
2001
+
2002
+ ## Epic State
2003
+
2004
+ **ID:** [epic ID or "Unknown"]
2005
+ **Title:** [epic title or "No active epic"]
2006
+ **Status:** [X/Y subtasks complete]
2007
+ **Project:** ${projectDirectory}
2008
+
2009
+ ## Subtask Status
2010
+
2011
+ ### ✅ Completed (N)
2012
+ [List completed subtasks with IDs]
2013
+
2014
+ ### 🚧 In Progress (N)
2015
+ [List in-progress subtasks with IDs, files, agents if known]
2016
+
2017
+ ### 🚫 Blocked (N)
2018
+ [List blocked subtasks]
2019
+
2020
+ ### ⏳ Pending (N)
2021
+ [List pending subtasks]
2022
+
2023
+ ## Next Actions (IMMEDIATE)
2024
+
2025
+ [List 3-5 concrete actions with actual commands, using real IDs from the state]
2026
+
2027
+ ## 🎯 COORDINATOR MANDATES (NON-NEGOTIABLE)
2028
+
2029
+ **YOU ARE THE COORDINATOR. NOT A WORKER.**
2030
+
2031
+ ### ⛔ FORBIDDEN - NEVER do these:
2032
+ - ❌ NEVER use \`edit\`, \`write\`, or \`bash\` for implementation - SPAWN A WORKER
2033
+ - ❌ NEVER fetch directly with \`repo-crawl_*\`, \`repo-autopsy_*\`, \`webfetch\`, \`fetch_fetch\` - SPAWN A RESEARCHER
2034
+ - ❌ NEVER use \`context7_*\` or \`pdf-brain_*\` directly - SPAWN A RESEARCHER
2035
+ - ❌ NEVER reserve files - Workers reserve files
2036
+
2037
+ ### ✅ ALWAYS do these:
2038
+ - ✅ ALWAYS check \`swarm_status\` and \`swarmmail_inbox\` first
2039
+ - ✅ ALWAYS use \`swarm_spawn_subtask\` for implementation work
2040
+ - ✅ ALWAYS use \`swarm_spawn_researcher\` for external data fetching
2041
+ - ✅ ALWAYS review worker output with \`swarm_review\` → \`swarm_review_feedback\`
2042
+ - ✅ ALWAYS monitor actively - Check messages every ~10 minutes
2043
+ - ✅ ALWAYS unblock aggressively - Resolve dependencies immediately
2044
+
2045
+ **If you need external data:** Use \`swarm_spawn_researcher\` with a clear research task. The researcher will fetch, summarize, and return findings.
2046
+
2047
+ **3-strike rule enforced:** Workers get 3 review attempts. After 3 rejections, escalate to human.
2048
+
2049
+ Keep the prompt concise but actionable. Use actual data from the snapshot, not placeholders. Include the ASCII header and ALL coordinator mandates.`;
2050
+
2051
+ logCompaction("debug", "generate_compaction_prompt_calling_llm", {
2052
+ session_id: snapshot.sessionID,
2053
+ prompt_length: promptText.length,
2054
+ model: liteModel,
2055
+ command: `opencode run -m ${liteModel} -- <prompt>`,
2056
+ });
2057
+
2058
+ const llmStart = Date.now();
2059
+ const result = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
2060
+ (resolve, reject) => {
2061
+ const proc = spawn("opencode", ["run", "-m", liteModel, "--", promptText], {
2062
+ cwd: projectDirectory,
2063
+ stdio: ["ignore", "pipe", "pipe"],
2064
+ timeout: 30000, // 30 second timeout
2065
+ });
2066
+
2067
+ let stdout = "";
2068
+ let stderr = "";
2069
+
2070
+ proc.stdout.on("data", (d) => {
2071
+ stdout += d;
2072
+ });
2073
+ proc.stderr.on("data", (d) => {
2074
+ stderr += d;
2075
+ });
2076
+
2077
+ proc.on("close", (exitCode) => {
2078
+ resolve({ exitCode: exitCode ?? 1, stdout, stderr });
2079
+ });
2080
+
2081
+ proc.on("error", (err) => {
2082
+ reject(err);
2083
+ });
2084
+
2085
+ // Timeout handling
2086
+ setTimeout(() => {
2087
+ proc.kill("SIGTERM");
2088
+ reject(new Error("LLM compaction timeout (30s)"));
2089
+ }, 30000);
2090
+ },
2091
+ );
2092
+ const llmDuration = Date.now() - llmStart;
2093
+
2094
+ logCompaction("debug", "generate_compaction_prompt_llm_complete", {
2095
+ session_id: snapshot.sessionID,
2096
+ duration_ms: llmDuration,
2097
+ exit_code: result.exitCode,
2098
+ stdout_length: result.stdout.length,
2099
+ stderr_length: result.stderr.length,
2100
+ stderr_preview: result.stderr.substring(0, 500),
2101
+ stdout_preview: result.stdout.substring(0, 500),
2102
+ });
2103
+
2104
+ if (result.exitCode !== 0) {
2105
+ logCompaction("error", "generate_compaction_prompt_llm_failed", {
2106
+ session_id: snapshot.sessionID,
2107
+ exit_code: result.exitCode,
2108
+ stderr: result.stderr,
2109
+ stdout: result.stdout,
2110
+ duration_ms: llmDuration,
2111
+ });
2112
+ return null;
2113
+ }
2114
+
2115
+ // Extract the prompt from stdout (LLM may wrap in markdown)
2116
+ const prompt = result.stdout.trim();
2117
+
2118
+ const totalDuration = Date.now() - startTime;
2119
+ logCompaction("debug", "generate_compaction_prompt_success", {
2120
+ session_id: snapshot.sessionID,
2121
+ total_duration_ms: totalDuration,
2122
+ llm_duration_ms: llmDuration,
2123
+ prompt_length: prompt.length,
2124
+ prompt_preview: prompt.substring(0, 500),
2125
+ prompt_has_content: prompt.length > 0,
2126
+ });
2127
+
2128
+ return prompt.length > 0 ? prompt : null;
2129
+ } catch (err) {
2130
+ const totalDuration = Date.now() - startTime;
2131
+ logCompaction("error", "generate_compaction_prompt_exception", {
2132
+ session_id: snapshot.sessionID,
2133
+ error: err instanceof Error ? err.message : String(err),
2134
+ stack: err instanceof Error ? err.stack : undefined,
2135
+ duration_ms: totalDuration,
2136
+ });
2137
+ return null;
2138
+ }
2139
+ }
2140
+
2141
+ /**
2142
+ * Session message scan result
2143
+ */
2144
+ interface SessionScanResult {
2145
+ messageCount: number;
2146
+ toolCalls: Array<{
2147
+ toolName: string;
2148
+ args: Record<string, unknown>;
2149
+ output?: string;
2150
+ timestamp?: number;
2151
+ }>;
2152
+ swarmDetected: boolean;
2153
+ reasons: string[];
2154
+ /** Projected swarm state from event fold - ground truth from session events */
2155
+ projection?: SwarmProjection;
2156
+ }
2157
+
2158
+ /**
2159
+ * Scan session messages for swarm tool calls
2160
+ *
2161
+ * Uses SDK client to fetch messages and look for swarm activity.
2162
+ * This can detect swarm work even if no cells exist yet.
2163
+ */
2164
+ async function scanSessionMessages(sessionID: string): Promise<SessionScanResult> {
2165
+ const startTime = Date.now();
2166
+ const result: SessionScanResult = {
2167
+ messageCount: 0,
2168
+ toolCalls: [],
2169
+ swarmDetected: false,
2170
+ reasons: [],
2171
+ };
2172
+
2173
+ logCompaction("debug", "session_scan_start", {
2174
+ session_id: sessionID,
2175
+ has_sdk_client: !!sdkClient,
2176
+ });
2177
+
2178
+ if (!sdkClient) {
2179
+ logCompaction("warn", "session_scan_no_sdk_client", {
2180
+ session_id: sessionID,
2181
+ });
2182
+ return result;
2183
+ }
2184
+
2185
+ try {
2186
+ // Fetch session messages
2187
+ const messagesStart = Date.now();
2188
+ const rawResponse = await sdkClient.session.messages({ path: { id: sessionID } });
2189
+ const messagesDuration = Date.now() - messagesStart;
2190
+
2191
+ // Log the RAW response to understand its shape
2192
+ logCompaction("debug", "session_scan_raw_response", {
2193
+ session_id: sessionID,
2194
+ response_type: typeof rawResponse,
2195
+ is_array: Array.isArray(rawResponse),
2196
+ is_null: rawResponse === null,
2197
+ is_undefined: rawResponse === undefined,
2198
+ keys: rawResponse && typeof rawResponse === 'object' ? Object.keys(rawResponse) : [],
2199
+ raw_preview: JSON.stringify(rawResponse)?.slice(0, 500),
2200
+ });
2201
+
2202
+ // The response might be wrapped - check common patterns
2203
+ const messages = Array.isArray(rawResponse)
2204
+ ? rawResponse
2205
+ : rawResponse?.data
2206
+ ? rawResponse.data
2207
+ : rawResponse?.messages
2208
+ ? rawResponse.messages
2209
+ : rawResponse?.items
2210
+ ? rawResponse.items
2211
+ : [];
2212
+
2213
+ result.messageCount = messages?.length ?? 0;
2214
+
2215
+ logCompaction("debug", "session_scan_messages_fetched", {
2216
+ session_id: sessionID,
2217
+ duration_ms: messagesDuration,
2218
+ message_count: result.messageCount,
2219
+ extraction_method: Array.isArray(rawResponse) ? 'direct_array' : rawResponse?.data ? 'data_field' : rawResponse?.messages ? 'messages_field' : rawResponse?.items ? 'items_field' : 'fallback_empty',
2220
+ });
2221
+
2222
+ if (!Array.isArray(messages) || messages.length === 0) {
2223
+ logCompaction("debug", "session_scan_no_messages", {
2224
+ session_id: sessionID,
2225
+ });
2226
+ return result;
2227
+ }
2228
+
2229
+ // Swarm-related tool patterns
2230
+ const swarmTools = [
2231
+ // High confidence - active swarm coordination
2232
+ "hive_create_epic",
2233
+ "swarm_decompose",
2234
+ "swarm_spawn_subtask",
2235
+ "swarm_complete",
2236
+ "swarmmail_init",
2237
+ "swarmmail_reserve",
2238
+ // Medium confidence - swarm activity
2239
+ "hive_start",
2240
+ "hive_close",
2241
+ "swarm_status",
2242
+ "swarm_progress",
2243
+ "swarmmail_send",
2244
+ // Low confidence - possible swarm
2245
+ "hive_create",
2246
+ "hive_query",
2247
+ ];
2248
+
2249
+ const highConfidenceTools = new Set([
2250
+ "hive_create_epic",
2251
+ "swarm_decompose",
2252
+ "swarm_spawn_subtask",
2253
+ "swarmmail_init",
2254
+ "swarmmail_reserve",
2255
+ ]);
2256
+
2257
+ // Scan messages for tool calls
2258
+ let swarmToolCount = 0;
2259
+ let highConfidenceCount = 0;
2260
+
2261
+ // Debug: collect part types to understand message structure
2262
+ const partTypeCounts: Record<string, number> = {};
2263
+ let messagesWithParts = 0;
2264
+ let messagesWithoutParts = 0;
2265
+ let samplePartTypes: string[] = [];
2266
+
2267
+ for (const message of messages) {
2268
+ if (!message.parts || !Array.isArray(message.parts)) {
2269
+ messagesWithoutParts++;
2270
+ continue;
2271
+ }
2272
+ messagesWithParts++;
2273
+
2274
+ for (const part of message.parts) {
2275
+ const partType = part.type || "unknown";
2276
+ partTypeCounts[partType] = (partTypeCounts[partType] || 0) + 1;
2277
+
2278
+ // Collect first 10 unique part types for debugging
2279
+ if (samplePartTypes.length < 10 && !samplePartTypes.includes(partType)) {
2280
+ samplePartTypes.push(partType);
2281
+ }
2282
+
2283
+ // Check if this is a tool call part
2284
+ // OpenCode SDK: ToolPart has type="tool", tool=<string name>, state={...}
2285
+ if (part.type === "tool") {
2286
+ const toolPart = part as ToolPart;
2287
+ const toolName = toolPart.tool; // tool name is a string directly
2288
+
2289
+ if (toolName && swarmTools.includes(toolName)) {
2290
+ swarmToolCount++;
2291
+
2292
+ if (highConfidenceTools.has(toolName)) {
2293
+ highConfidenceCount++;
2294
+ }
2295
+
2296
+ // Extract args/output/timestamp from state if available
2297
+ const state = toolPart.state;
2298
+ const args = state && "input" in state ? state.input : {};
2299
+ const output = state && "output" in state ? state.output : undefined;
2300
+ const timestamp = state && "time" in state && state.time && typeof state.time === "object" && "end" in state.time
2301
+ ? (state.time as { end: number }).end
2302
+ : Date.now();
2303
+
2304
+ result.toolCalls.push({
2305
+ toolName,
2306
+ args,
2307
+ output,
2308
+ timestamp,
2309
+ });
2310
+
2311
+ logCompaction("debug", "session_scan_tool_found", {
2312
+ session_id: sessionID,
2313
+ tool_name: toolName,
2314
+ is_high_confidence: highConfidenceTools.has(toolName),
2315
+ });
2316
+ }
2317
+ }
2318
+ }
2319
+ }
2320
+
2321
+ // =======================================================================
2322
+ // PROJECT SWARM STATE FROM EVENTS (deterministic, no heuristics)
2323
+ // =======================================================================
2324
+ // Convert tool calls to ToolCallEvent format for projection
2325
+ const events: ToolCallEvent[] = result.toolCalls.map(tc => ({
2326
+ tool: tc.toolName,
2327
+ input: tc.args as Record<string, unknown>,
2328
+ output: tc.output || "{}",
2329
+ timestamp: tc.timestamp || Date.now(),
2330
+ }));
2331
+
2332
+ // Project swarm state from events - this is the ground truth
2333
+ const projection = projectSwarmState(events);
2334
+ result.projection = projection;
2335
+
2336
+ // Use projection for swarm detection (deterministic)
2337
+ if (projection.isSwarm) {
2338
+ result.swarmDetected = true;
2339
+ result.reasons.push(`Swarm signature detected: epic ${projection.epic?.id || "unknown"} with ${projection.counts.total} subtasks`);
2340
+
2341
+ if (isSwarmActive(projection)) {
2342
+ result.reasons.push(`Swarm ACTIVE: ${projection.counts.spawned} spawned, ${projection.counts.inProgress} in_progress, ${projection.counts.completed} completed (not closed)`);
2343
+ } else {
2344
+ result.reasons.push(`Swarm COMPLETE: all ${projection.counts.closed} subtasks closed`);
2345
+ }
2346
+ } else if (highConfidenceCount > 0) {
2347
+ // Fallback to heuristic detection if no signature but high-confidence tools found
2348
+ result.swarmDetected = true;
2349
+ result.reasons.push(`${highConfidenceCount} high-confidence swarm tools (${Array.from(new Set(result.toolCalls.filter(tc => highConfidenceTools.has(tc.toolName)).map(tc => tc.toolName))).join(", ")})`);
2350
+ } else if (swarmToolCount > 0) {
2351
+ result.swarmDetected = true;
2352
+ result.reasons.push(`${swarmToolCount} swarm-related tools used`);
2353
+ }
2354
+
2355
+ const totalDuration = Date.now() - startTime;
2356
+
2357
+ // Debug: log part type distribution to understand message structure
2358
+ logCompaction("debug", "session_scan_part_types", {
2359
+ session_id: sessionID,
2360
+ messages_with_parts: messagesWithParts,
2361
+ messages_without_parts: messagesWithoutParts,
2362
+ part_type_counts: partTypeCounts,
2363
+ sample_part_types: samplePartTypes,
2364
+ });
2365
+
2366
+ logCompaction("info", "session_scan_complete", {
2367
+ session_id: sessionID,
2368
+ duration_ms: totalDuration,
2369
+ message_count: result.messageCount,
2370
+ tool_call_count: result.toolCalls.length,
2371
+ swarm_tool_count: swarmToolCount,
2372
+ high_confidence_count: highConfidenceCount,
2373
+ swarm_detected: result.swarmDetected,
2374
+ reasons: result.reasons,
2375
+ unique_tools: Array.from(new Set(result.toolCalls.map(tc => tc.toolName))),
2376
+ // Add projection summary
2377
+ projection_summary: projection.isSwarm ? {
2378
+ epic_id: projection.epic?.id,
2379
+ epic_title: projection.epic?.title,
2380
+ epic_status: projection.epic?.status,
2381
+ is_active: isSwarmActive(projection),
2382
+ counts: projection.counts,
2383
+ } : null,
2384
+ });
2385
+
2386
+ return result;
2387
+ } catch (err) {
2388
+ const totalDuration = Date.now() - startTime;
2389
+ logCompaction("error", "session_scan_exception", {
2390
+ session_id: sessionID,
2391
+ error: err instanceof Error ? err.message : String(err),
2392
+ stack: err instanceof Error ? err.stack : undefined,
2393
+ duration_ms: totalDuration,
2394
+ });
2395
+ return result;
2396
+ }
2397
+ }
2398
+
2399
+ /**
2400
+ * Check for swarm sign - evidence a swarm passed through
2401
+ *
2402
+ * Uses multiple signals with different confidence levels:
2403
+ * - HIGH: in_progress cells (active work)
2404
+ * - MEDIUM: Open subtasks, unclosed epics, recently updated cells
2405
+ * - LOW: Any cells exist
2406
+ *
2407
+ * Philosophy: Err on the side of continuation.
2408
+ * False positive = extra context (low cost)
2409
+ * False negative = lost swarm (high cost)
2410
+ */
2411
+ async function detectSwarm(): Promise<SwarmDetection> {
2412
+ const startTime = Date.now();
2413
+ const reasons: string[] = [];
2414
+ let highConfidence = false;
2415
+ let mediumConfidence = false;
2416
+ let lowConfidence = false;
2417
+
2418
+ logCompaction("debug", "detect_swarm_start", {
2419
+ project_directory: projectDirectory,
2420
+ cwd: process.cwd(),
2421
+ });
2422
+
2423
+ try {
2424
+ const cliStart = Date.now();
2425
+ const result = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
2426
+ (resolve) => {
2427
+ // Use swarm tool to query beads
2428
+ const proc = spawn(SWARM_CLI, ["tool", "hive_query"], {
2429
+ cwd: projectDirectory,
2430
+ stdio: ["ignore", "pipe", "pipe"],
2431
+ });
2432
+ let stdout = "";
2433
+ let stderr = "";
2434
+ proc.stdout.on("data", (d) => {
2435
+ stdout += d;
2436
+ });
2437
+ proc.stderr.on("data", (d) => {
2438
+ stderr += d;
2439
+ });
2440
+ proc.on("close", (exitCode) =>
2441
+ resolve({ exitCode: exitCode ?? 1, stdout, stderr }),
2442
+ );
2443
+ },
2444
+ );
2445
+ const cliDuration = Date.now() - cliStart;
2446
+
2447
+ logCompaction("debug", "detect_swarm_cli_complete", {
2448
+ duration_ms: cliDuration,
2449
+ exit_code: result.exitCode,
2450
+ stdout_length: result.stdout.length,
2451
+ stderr_length: result.stderr.length,
2452
+ stderr_preview: result.stderr.substring(0, 200),
2453
+ });
2454
+
2455
+ if (result.exitCode !== 0) {
2456
+ logCompaction("warn", "detect_swarm_cli_failed", {
2457
+ exit_code: result.exitCode,
2458
+ stderr: result.stderr,
2459
+ });
2460
+ return { detected: false, confidence: "none", reasons: ["hive_query failed"] };
2461
+ }
2462
+
2463
+ let cells: any[];
2464
+ try {
2465
+ cells = JSON.parse(result.stdout);
2466
+ } catch (parseErr) {
2467
+ logCompaction("error", "detect_swarm_parse_failed", {
2468
+ error: parseErr instanceof Error ? parseErr.message : String(parseErr),
2469
+ stdout_preview: result.stdout.substring(0, 500),
2470
+ });
2471
+ return { detected: false, confidence: "none", reasons: ["hive_query parse failed"] };
2472
+ }
2473
+
2474
+ if (!Array.isArray(cells) || cells.length === 0) {
2475
+ logCompaction("debug", "detect_swarm_no_cells", {
2476
+ is_array: Array.isArray(cells),
2477
+ length: cells?.length ?? 0,
2478
+ });
2479
+ return { detected: false, confidence: "none", reasons: ["no cells found"] };
2480
+ }
2481
+
2482
+ // Log ALL cells for debugging
2483
+ logCompaction("debug", "detect_swarm_cells_found", {
2484
+ total_cells: cells.length,
2485
+ cells: cells.map((c: any) => ({
2486
+ id: c.id,
2487
+ title: c.title,
2488
+ type: c.type,
2489
+ status: c.status,
2490
+ parent_id: c.parent_id,
2491
+ updated_at: c.updated_at,
2492
+ created_at: c.created_at,
2493
+ })),
2494
+ });
2495
+
2496
+ // HIGH: Any in_progress cells
2497
+ const inProgress = cells.filter(
2498
+ (c: { status: string }) => c.status === "in_progress"
2499
+ );
2500
+ if (inProgress.length > 0) {
2501
+ highConfidence = true;
2502
+ reasons.push(`${inProgress.length} cells in_progress`);
2503
+ logCompaction("debug", "detect_swarm_in_progress", {
2504
+ count: inProgress.length,
2505
+ cells: inProgress.map((c: any) => ({ id: c.id, title: c.title })),
2506
+ });
2507
+ }
2508
+
2509
+ // MEDIUM: Open subtasks (cells with parent_id)
2510
+ const subtasks = cells.filter(
2511
+ (c: { status: string; parent_id?: string }) =>
2512
+ c.status === "open" && c.parent_id
2513
+ );
2514
+ if (subtasks.length > 0) {
2515
+ mediumConfidence = true;
2516
+ reasons.push(`${subtasks.length} open subtasks`);
2517
+ logCompaction("debug", "detect_swarm_open_subtasks", {
2518
+ count: subtasks.length,
2519
+ cells: subtasks.map((c: any) => ({ id: c.id, title: c.title, parent_id: c.parent_id })),
2520
+ });
2521
+ }
2522
+
2523
+ // MEDIUM: Unclosed epics
2524
+ const openEpics = cells.filter(
2525
+ (c: { status: string; type?: string }) =>
2526
+ c.type === "epic" && c.status !== "closed"
2527
+ );
2528
+ if (openEpics.length > 0) {
2529
+ mediumConfidence = true;
2530
+ reasons.push(`${openEpics.length} unclosed epics`);
2531
+ logCompaction("debug", "detect_swarm_open_epics", {
2532
+ count: openEpics.length,
2533
+ cells: openEpics.map((c: any) => ({ id: c.id, title: c.title, status: c.status })),
2534
+ });
2535
+ }
2536
+
2537
+ // MEDIUM: Recently updated cells (last hour)
2538
+ const oneHourAgo = Date.now() - 60 * 60 * 1000;
2539
+ const recentCells = cells.filter(
2540
+ (c: { updated_at?: number }) => c.updated_at && c.updated_at > oneHourAgo
2541
+ );
2542
+ if (recentCells.length > 0) {
2543
+ mediumConfidence = true;
2544
+ reasons.push(`${recentCells.length} cells updated in last hour`);
2545
+ logCompaction("debug", "detect_swarm_recent_cells", {
2546
+ count: recentCells.length,
2547
+ one_hour_ago: oneHourAgo,
2548
+ cells: recentCells.map((c: any) => ({
2549
+ id: c.id,
2550
+ title: c.title,
2551
+ updated_at: c.updated_at,
2552
+ age_minutes: Math.round((Date.now() - c.updated_at) / 60000),
2553
+ })),
2554
+ });
2555
+ }
2556
+
2557
+ // LOW: Any cells exist at all
2558
+ if (cells.length > 0) {
2559
+ lowConfidence = true;
2560
+ reasons.push(`${cells.length} total cells in hive`);
2561
+ }
2562
+ } catch (err) {
2563
+ // Detection failed, use fallback
2564
+ lowConfidence = true;
2565
+ reasons.push("Detection error, using fallback");
2566
+ logCompaction("error", "detect_swarm_exception", {
2567
+ error: err instanceof Error ? err.message : String(err),
2568
+ stack: err instanceof Error ? err.stack : undefined,
2569
+ });
2570
+ }
2571
+
2572
+ // Determine overall confidence
2573
+ let confidence: "high" | "medium" | "low" | "none";
2574
+ if (highConfidence) {
2575
+ confidence = "high";
2576
+ } else if (mediumConfidence) {
2577
+ confidence = "medium";
2578
+ } else if (lowConfidence) {
2579
+ confidence = "low";
2580
+ } else {
2581
+ confidence = "none";
2582
+ }
2583
+
2584
+ const totalDuration = Date.now() - startTime;
2585
+ logCompaction("debug", "detect_swarm_complete", {
2586
+ duration_ms: totalDuration,
2587
+ confidence,
2588
+ detected: confidence !== "none",
2589
+ reason_count: reasons.length,
2590
+ reasons,
2591
+ high_confidence: highConfidence,
2592
+ medium_confidence: mediumConfidence,
2593
+ low_confidence: lowConfidence,
2594
+ });
2595
+
2596
+ return {
2597
+ detected: confidence !== "none",
2598
+ confidence,
2599
+ reasons,
2600
+ };
2601
+ }
2602
+
2603
+ /**
2604
+ * Swarm-aware compaction context
2605
+ *
2606
+ * Injected during compaction to keep the swarm cooking. The coordinator should
2607
+ * wake up from compaction and immediately resume orchestration - spawning agents,
2608
+ * monitoring progress, unblocking work.
2609
+ */
2610
+ const SWARM_COMPACTION_CONTEXT = `## 🐝 SWARM ACTIVE - Keep Cooking
2611
+
2612
+ You are the **COORDINATOR** of an active swarm. Context was compacted but the swarm is still running.
2613
+
2614
+ **YOUR JOB:** Keep orchestrating. Spawn agents. Monitor progress. Unblock work. Ship it.
2615
+
2616
+ ### Preserve in Summary
2617
+
2618
+ Extract from session context:
2619
+
2620
+ 1. **Epic & Subtasks** - IDs, titles, status, file assignments
2621
+ 2. **What's Running** - Which agents are active, what they're working on
2622
+ 3. **What's Blocked** - Blockers and what's needed to unblock
2623
+ 4. **What's Done** - Completed work and any follow-ups needed
2624
+ 5. **What's Next** - Pending subtasks ready to spawn
2625
+
2626
+ ### Summary Format
2627
+
2628
+ \`\`\`
2629
+ ## 🐝 Swarm State
2630
+
2631
+ **Epic:** <bd-xxx> - <title>
2632
+ **Project:** <path>
2633
+ **Progress:** X/Y subtasks complete
2634
+
2635
+ **Active:**
2636
+ - <bd-xxx>: <title> [in_progress] → <agent> working on <files>
2637
+
2638
+ **Blocked:**
2639
+ - <bd-xxx>: <title> - BLOCKED: <reason>
2640
+
2641
+ **Completed:**
2642
+ - <bd-xxx>: <title> ✓
2643
+
2644
+ **Ready to Spawn:**
2645
+ - <bd-xxx>: <title> (files: <...>)
2646
+ \`\`\`
2647
+
2648
+ ### On Resume - IMMEDIATELY
2649
+
2650
+ 1. \`swarm_status(epic_id="<epic>", project_key="<path>")\` - Get current state
2651
+ 2. \`swarmmail_inbox(limit=5)\` - Check for agent messages
2652
+ 3. \`swarm_review(project_key, epic_id, task_id, files_touched)\` - Review any completed work
2653
+ 4. \`swarm_review_feedback(project_key, task_id, worker_id, status, issues)\` - Approve or request changes
2654
+ 5. **Spawn ready subtasks** - Don't wait, fire them off
2655
+ 6. **Unblock blocked work** - Resolve dependencies, reassign if needed
2656
+ 7. **Collect completed work** - Close done subtasks, verify quality
2657
+
2658
+ ### Keep the Swarm Cooking
2659
+
2660
+ - **Spawn aggressively** - If a subtask is ready and unblocked, spawn an agent
2661
+ - **Monitor actively** - Check status, read messages, respond to blockers
2662
+ - **Close the loop** - When all subtasks done, verify and close the epic
2663
+ - **Don't stop** - The swarm runs until the epic is closed
2664
+
2665
+ **You are not waiting for instructions. You are the coordinator. Coordinate.**
2666
+ `;
2667
+
2668
+ /**
2669
+ * Build dynamic swarm state section from snapshot
2670
+ *
2671
+ * This creates a concrete state summary with actual IDs and status
2672
+ * to prepend to the static compaction context.
2673
+ */
2674
+ function buildDynamicStateFromSnapshot(snapshot: SwarmStateSnapshot): string {
2675
+ if (!snapshot.epic) {
2676
+ return "";
2677
+ }
2678
+
2679
+ const parts: string[] = [];
2680
+
2681
+ // Header with epic info
2682
+ parts.push(`## 🐝 Current Swarm State\n`);
2683
+ parts.push(`**Epic:** ${snapshot.epic.id} - ${snapshot.epic.title}`);
2684
+ parts.push(`**Status:** ${snapshot.epic.status}`);
2685
+ parts.push(`**Project:** ${projectDirectory}\n`);
2686
+
2687
+ // Subtask breakdown
2688
+ const subtasks = snapshot.epic.subtasks || [];
2689
+ const completed = subtasks.filter(s => s.status === "closed");
2690
+ const inProgress = subtasks.filter(s => s.status === "in_progress");
2691
+ const blocked = subtasks.filter(s => s.status === "blocked");
2692
+ const pending = subtasks.filter(s => s.status === "open");
2693
+
2694
+ parts.push(`**Progress:** ${completed.length}/${subtasks.length} subtasks complete\n`);
2695
+
2696
+ // Immediate actions with real IDs
2697
+ parts.push(`## 1️⃣ IMMEDIATE ACTIONS (Do These FIRST)\n`);
2698
+ parts.push(`1. \`swarm_status(epic_id="${snapshot.epic.id}", project_key="${projectDirectory}")\` - Get current state`);
2699
+ parts.push(`2. \`swarmmail_inbox(limit=5)\` - Check for worker messages`);
2700
+
2701
+ if (inProgress.length > 0) {
2702
+ parts.push(`3. Review in-progress work when workers complete`);
2703
+ }
2704
+ if (pending.length > 0) {
2705
+ const next = pending[0];
2706
+ parts.push(`4. Spawn next subtask: \`swarm_spawn_subtask(bead_id="${next.id}", ...)\``);
2707
+ }
2708
+ if (blocked.length > 0) {
2709
+ parts.push(`5. Unblock: ${blocked.map(s => s.id).join(", ")}`);
2710
+ }
2711
+ parts.push("");
2712
+
2713
+ // Detailed subtask status
2714
+ if (inProgress.length > 0) {
2715
+ parts.push(`### 🚧 In Progress (${inProgress.length})`);
2716
+ for (const s of inProgress) {
2717
+ const files = s.files?.length ? ` (${s.files.slice(0, 3).join(", ")}${s.files.length > 3 ? "..." : ""})` : "";
2718
+ parts.push(`- ${s.id}: ${s.title}${files}`);
2719
+ }
2720
+ parts.push("");
2721
+ }
2722
+
2723
+ if (blocked.length > 0) {
2724
+ parts.push(`### 🚫 Blocked (${blocked.length})`);
2725
+ for (const s of blocked) {
2726
+ parts.push(`- ${s.id}: ${s.title}`);
2727
+ }
2728
+ parts.push("");
2729
+ }
2730
+
2731
+ if (pending.length > 0) {
2732
+ parts.push(`### ⏳ Ready to Spawn (${pending.length})`);
2733
+ for (const s of pending.slice(0, 5)) { // Show first 5
2734
+ const files = s.files?.length ? ` (${s.files.slice(0, 2).join(", ")}${s.files.length > 2 ? "..." : ""})` : "";
2735
+ parts.push(`- ${s.id}: ${s.title}${files}`);
2736
+ }
2737
+ if (pending.length > 5) {
2738
+ parts.push(`- ... and ${pending.length - 5} more`);
2739
+ }
2740
+ parts.push("");
2741
+ }
2742
+
2743
+ if (completed.length > 0) {
2744
+ parts.push(`### ✅ Completed (${completed.length})`);
2745
+ for (const s of completed.slice(-3)) { // Show last 3
2746
+ parts.push(`- ${s.id}: ${s.title} ✓`);
2747
+ }
2748
+ if (completed.length > 3) {
2749
+ parts.push(`- ... and ${completed.length - 3} more`);
2750
+ }
2751
+ parts.push("");
2752
+ }
2753
+
2754
+ parts.push("---\n");
2755
+
2756
+ return parts.join("\n");
2757
+ }
2758
+
2759
+ /**
2760
+ * Fallback detection prompt - tells the compactor what to look for
2761
+ *
2762
+ * Used when we can't definitively detect a swarm but want to be safe.
2763
+ * The compactor can check the conversation context for these patterns.
2764
+ */
2765
+ const SWARM_DETECTION_FALLBACK = `## 🐝 Swarm Detection - Check Your Context
2766
+
2767
+ **IMPORTANT:** Before summarizing, check if this session involves an active swarm.
2768
+
2769
+ Look for ANY of these patterns in the conversation:
2770
+
2771
+ ### Tool Calls (definite swarm sign)
2772
+ - \`swarm_decompose\`, \`swarm_spawn_subtask\`, \`swarm_status\`, \`swarm_complete\`
2773
+ - \`swarmmail_init\`, \`swarmmail_reserve\`, \`swarmmail_send\`
2774
+ - \`hive_create_epic\`, \`hive_start\`, \`hive_close\`
2775
+
2776
+ ### IDs and Names
2777
+ - Cell IDs: \`bd-xxx\`, \`bd-xxx.N\` (subtask format)
2778
+ - Agent names: BlueLake, RedMountain, GreenValley, etc.
2779
+ - Epic references: "epic", "subtask", "parent"
2780
+
2781
+ ### Coordination Language
2782
+ - "spawn", "worker", "coordinator"
2783
+ - "reserve", "reservation", "files"
2784
+ - "blocked", "unblock", "dependency"
2785
+ - "progress", "complete", "in_progress"
2786
+
2787
+ ### If You Find Swarm Evidence
2788
+
2789
+ Include this in your summary:
2790
+ 1. Epic ID and title
2791
+ 2. Project path
2792
+ 3. Subtask status (running/blocked/done/pending)
2793
+ 4. Any blockers or issues
2794
+ 5. What should happen next
2795
+
2796
+ **Then tell the resumed session:**
2797
+ "This is an active swarm. Check swarm_status and swarmmail_inbox immediately."
2798
+ `;
2799
+
2800
+ // Extended hooks type to include experimental compaction hook with new prompt API
2801
+ type CompactionOutput = {
2802
+ context: string[];
2803
+ prompt?: string; // NEW API from OpenCode PR #5907
2804
+ };
2805
+
2806
+ type ExtendedHooks = Hooks & {
2807
+ "experimental.session.compacting"?: (
2808
+ input: { sessionID: string },
2809
+ output: CompactionOutput,
2810
+ ) => Promise<void>;
2811
+ };
2812
+
2813
+ // NOTE: Only default export - named exports cause double registration!
2814
+ // OpenCode's plugin loader calls ALL exports as functions.
2815
+ const SwarmPlugin: Plugin = async (
2816
+ input: PluginInput,
2817
+ ): Promise<ExtendedHooks> => {
2818
+ // CRITICAL: Set project directory from OpenCode input
2819
+ // Without this, CLI uses wrong database path
2820
+ projectDirectory = input.directory;
2821
+
2822
+ // Store SDK client for session message scanning during compaction
2823
+ sdkClient = input.client;
2824
+
2825
+ return {
2826
+ tool: {
2827
+ // Beads
2828
+ hive_create,
2829
+ hive_create_epic,
2830
+ hive_query,
2831
+ hive_update,
2832
+ hive_close,
2833
+ hive_start,
2834
+ hive_ready,
2835
+ hive_cells,
2836
+ hive_sync,
2837
+ beads_link_thread,
2838
+ // Session Handoff (Chainlink)
2839
+ hive_session_start,
2840
+ hive_session_end,
2841
+ // Swarm Mail (Embedded)
2842
+ swarmmail_init,
2843
+ swarmmail_send,
2844
+ swarmmail_inbox,
2845
+ swarmmail_read_message,
2846
+ swarmmail_reserve,
2847
+ swarmmail_release,
2848
+ swarmmail_release_all,
2849
+ swarmmail_release_agent,
2850
+ swarmmail_ack,
2851
+ swarmmail_health,
2852
+ // Structured
2853
+ structured_extract_json,
2854
+ structured_validate,
2855
+ structured_parse_evaluation,
2856
+ structured_parse_decomposition,
2857
+ structured_parse_cell_tree,
2858
+ // Swarm
2859
+ swarm_init,
2860
+ swarm_select_strategy,
2861
+ swarm_plan_prompt,
2862
+ swarm_decompose,
2863
+ swarm_validate_decomposition,
2864
+ swarm_status,
2865
+ swarm_progress,
2866
+ swarm_complete,
2867
+ swarm_record_outcome,
2868
+ swarm_subtask_prompt,
2869
+ swarm_spawn_subtask,
2870
+ swarm_complete_subtask,
2871
+ swarm_evaluation_prompt,
2872
+ swarm_broadcast,
2873
+ // Worktree Isolation
2874
+ swarm_worktree_create,
2875
+ swarm_worktree_merge,
2876
+ swarm_worktree_cleanup,
2877
+ swarm_worktree_list,
2878
+ // Structured Review
2879
+ swarm_review,
2880
+ swarm_review_feedback,
2881
+ // Adversarial Review (VDD/Chainlink)
2882
+ swarm_adversarial_review,
2883
+ // Skills
2884
+ skills_list,
2885
+ skills_read,
2886
+ skills_use,
2887
+ skills_create,
2888
+ skills_update,
2889
+ skills_delete,
2890
+ skills_init,
2891
+ skills_add_script,
2892
+ skills_execute,
2893
+ // Swarm Insights
2894
+ swarm_get_strategy_insights,
2895
+ swarm_get_file_insights,
2896
+ swarm_get_pattern_insights,
2897
+ // CASS (Cross-Agent Session Search)
2898
+ cass_search,
2899
+ cass_view,
2900
+ cass_expand,
2901
+ cass_health,
2902
+ cass_index,
2903
+ cass_stats,
2904
+ // Hivemind (Unified Memory - Sessions + Learnings)
2905
+ hivemind_store,
2906
+ hivemind_find,
2907
+ hivemind_get,
2908
+ hivemind_remove,
2909
+ hivemind_validate,
2910
+ hivemind_stats,
2911
+ hivemind_index,
2912
+ hivemind_sync,
2913
+ },
2914
+
2915
+ // Swarm-aware compaction hook with LLM-powered continuation prompts
2916
+ // Three-level fallback chain: LLM → static context → detection fallback → none
2917
+ "experimental.session.compacting": async (
2918
+ input: { sessionID: string },
2919
+ output: CompactionOutput,
2920
+ ) => {
2921
+ const startTime = Date.now();
2922
+
2923
+ // =======================================================================
2924
+ // LOG: Compaction hook invoked - capture EVERYTHING we receive
2925
+ // =======================================================================
2926
+ logCompaction("info", "compaction_hook_invoked", {
2927
+ session_id: input.sessionID,
2928
+ project_directory: projectDirectory,
2929
+ input_keys: Object.keys(input),
2930
+ input_full: JSON.parse(JSON.stringify(input)), // Deep clone for logging
2931
+ output_keys: Object.keys(output),
2932
+ output_context_count: output.context?.length ?? 0,
2933
+ output_has_prompt_field: "prompt" in output,
2934
+ output_initial_state: {
2935
+ context: output.context,
2936
+ prompt: (output as any).prompt,
2937
+ },
2938
+ env: {
2939
+ OPENCODE_SESSION_ID: process.env.OPENCODE_SESSION_ID,
2940
+ OPENCODE_MESSAGE_ID: process.env.OPENCODE_MESSAGE_ID,
2941
+ OPENCODE_AGENT: process.env.OPENCODE_AGENT,
2942
+ OPENCODE_LITE_MODEL: process.env.OPENCODE_LITE_MODEL,
2943
+ SWARM_PROJECT_DIR: process.env.SWARM_PROJECT_DIR,
2944
+ },
2945
+ cwd: process.cwd(),
2946
+ timestamp: new Date().toISOString(),
2947
+ });
2948
+
2949
+ // =======================================================================
2950
+ // STEP 1: Scan session messages for swarm tool calls
2951
+ // =======================================================================
2952
+ const sessionScanStart = Date.now();
2953
+ const sessionScan = await scanSessionMessages(input.sessionID);
2954
+ const sessionScanDuration = Date.now() - sessionScanStart;
2955
+
2956
+ logCompaction("info", "session_scan_results", {
2957
+ session_id: input.sessionID,
2958
+ duration_ms: sessionScanDuration,
2959
+ message_count: sessionScan.messageCount,
2960
+ tool_call_count: sessionScan.toolCalls.length,
2961
+ swarm_detected_from_messages: sessionScan.swarmDetected,
2962
+ reasons: sessionScan.reasons,
2963
+ });
2964
+
2965
+ // =======================================================================
2966
+ // STEP 2: Detect swarm state from hive cells
2967
+ // =======================================================================
2968
+ const detectionStart = Date.now();
2969
+ const detection = await detectSwarm();
2970
+ const detectionDuration = Date.now() - detectionStart;
2971
+
2972
+ logCompaction("info", "swarm_detection_complete", {
2973
+ session_id: input.sessionID,
2974
+ duration_ms: detectionDuration,
2975
+ detected: detection.detected,
2976
+ confidence: detection.confidence,
2977
+ reasons: detection.reasons,
2978
+ reason_count: detection.reasons.length,
2979
+ });
2980
+
2981
+ // =======================================================================
2982
+ // STEP 3: Merge session scan with hive detection for final confidence
2983
+ // =======================================================================
2984
+ // If session messages show high-confidence swarm tools, boost confidence
2985
+ if (sessionScan.swarmDetected && sessionScan.reasons.some(r => r.includes("high-confidence"))) {
2986
+ if (detection.confidence === "none" || detection.confidence === "low") {
2987
+ detection.confidence = "high";
2988
+ detection.detected = true;
2989
+ detection.reasons.push(...sessionScan.reasons);
2990
+
2991
+ logCompaction("info", "confidence_boost_from_session_scan", {
2992
+ session_id: input.sessionID,
2993
+ original_confidence: detection.confidence,
2994
+ boosted_to: "high",
2995
+ session_reasons: sessionScan.reasons,
2996
+ });
2997
+ }
2998
+ } else if (sessionScan.swarmDetected) {
2999
+ // Medium boost for any swarm tools found
3000
+ if (detection.confidence === "none") {
3001
+ detection.confidence = "medium";
3002
+ detection.detected = true;
3003
+ detection.reasons.push(...sessionScan.reasons);
3004
+
3005
+ logCompaction("info", "confidence_boost_from_session_scan", {
3006
+ session_id: input.sessionID,
3007
+ original_confidence: "none",
3008
+ boosted_to: "medium",
3009
+ session_reasons: sessionScan.reasons,
3010
+ });
3011
+ } else if (detection.confidence === "low") {
3012
+ detection.confidence = "medium";
3013
+ detection.reasons.push(...sessionScan.reasons);
3014
+
3015
+ logCompaction("info", "confidence_boost_from_session_scan", {
3016
+ session_id: input.sessionID,
3017
+ original_confidence: "low",
3018
+ boosted_to: "medium",
3019
+ session_reasons: sessionScan.reasons,
3020
+ });
3021
+ }
3022
+ }
3023
+
3024
+ logCompaction("info", "final_swarm_detection", {
3025
+ session_id: input.sessionID,
3026
+ confidence: detection.confidence,
3027
+ detected: detection.detected,
3028
+ combined_reasons: detection.reasons,
3029
+ message_scan_contributed: sessionScan.swarmDetected,
3030
+ });
3031
+
3032
+ if (detection.confidence === "high" || detection.confidence === "medium") {
3033
+ // Definite or probable swarm - try LLM-powered compaction
3034
+ logCompaction("info", "swarm_detected_attempting_llm", {
3035
+ session_id: input.sessionID,
3036
+ confidence: detection.confidence,
3037
+ reasons: detection.reasons,
3038
+ has_projection: !!sessionScan.projection?.isSwarm,
3039
+ });
3040
+
3041
+ // Hoist snapshot and queryDuration outside try block so they're available in fallback path
3042
+ let snapshot: SwarmStateSnapshot | undefined;
3043
+ let queryDuration = 0; // 0 if using projection, actual duration if using hive query
3044
+
3045
+ try {
3046
+ // =======================================================================
3047
+ // PREFER PROJECTION (ground truth from events) OVER HIVE QUERY
3048
+ // =======================================================================
3049
+ // The projection is derived from session events - it's the source of truth.
3050
+ // Hive query may show all cells closed even if swarm was active.
3051
+
3052
+ if (sessionScan.projection?.isSwarm) {
3053
+ // Use projection as primary source - convert to snapshot format
3054
+ const proj = sessionScan.projection;
3055
+ snapshot = {
3056
+ sessionID: input.sessionID,
3057
+ detection: {
3058
+ confidence: isSwarmActive(proj) ? "high" : "medium",
3059
+ reasons: sessionScan.reasons,
3060
+ },
3061
+ epic: proj.epic ? {
3062
+ id: proj.epic.id,
3063
+ title: proj.epic.title,
3064
+ status: proj.epic.status,
3065
+ subtasks: Array.from(proj.subtasks.values()).map(s => ({
3066
+ id: s.id,
3067
+ title: s.title,
3068
+ status: s.status as "open" | "in_progress" | "blocked" | "closed",
3069
+ files: s.files,
3070
+ })),
3071
+ } : undefined,
3072
+ messages: [],
3073
+ reservations: [],
3074
+ };
3075
+
3076
+ logCompaction("info", "using_projection_as_snapshot", {
3077
+ session_id: input.sessionID,
3078
+ epic_id: proj.epic?.id,
3079
+ epic_title: proj.epic?.title,
3080
+ subtask_count: proj.subtasks.size,
3081
+ is_active: isSwarmActive(proj),
3082
+ counts: proj.counts,
3083
+ });
3084
+ } else {
3085
+ // Fallback to hive query (may be stale)
3086
+ const queryStart = Date.now();
3087
+ snapshot = await querySwarmState(input.sessionID);
3088
+ queryDuration = Date.now() - queryStart;
3089
+
3090
+ logCompaction("info", "fallback_to_hive_query", {
3091
+ session_id: input.sessionID,
3092
+ duration_ms: queryDuration,
3093
+ reason: "no projection available or not a swarm",
3094
+ });
3095
+ }
3096
+
3097
+ logCompaction("info", "swarm_state_resolved", {
3098
+ session_id: input.sessionID,
3099
+ source: sessionScan.projection?.isSwarm ? "projection" : "hive_query",
3100
+ has_epic: !!snapshot.epic,
3101
+ epic_id: snapshot.epic?.id,
3102
+ epic_title: snapshot.epic?.title,
3103
+ epic_status: snapshot.epic?.status,
3104
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
3105
+ subtasks: snapshot.epic?.subtasks?.map(s => ({
3106
+ id: s.id,
3107
+ title: s.title,
3108
+ status: s.status,
3109
+ file_count: s.files?.length ?? 0,
3110
+ })),
3111
+ message_count: snapshot.messages?.length ?? 0,
3112
+ reservation_count: snapshot.reservations?.length ?? 0,
3113
+ detection_confidence: snapshot.detection.confidence,
3114
+ detection_reasons: snapshot.detection.reasons,
3115
+ });
3116
+
3117
+ // =======================================================================
3118
+ // CAPTURE POINT 1: Detection complete - record confidence and reasons
3119
+ // =======================================================================
3120
+ await captureCompaction(
3121
+ input.sessionID,
3122
+ snapshot.epic?.id || "unknown",
3123
+ "detection_complete",
3124
+ {
3125
+ confidence: snapshot.detection.confidence,
3126
+ detected: detection.detected,
3127
+ reasons: snapshot.detection.reasons,
3128
+ session_scan_contributed: sessionScan.swarmDetected,
3129
+ session_scan_reasons: sessionScan.reasons,
3130
+ epic_id: snapshot.epic?.id,
3131
+ epic_title: snapshot.epic?.title,
3132
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
3133
+ },
3134
+ );
3135
+
3136
+ // Level 2: Generate prompt with LLM
3137
+ const llmStart = Date.now();
3138
+ const llmPrompt = await generateCompactionPrompt(snapshot);
3139
+ const llmDuration = Date.now() - llmStart;
3140
+
3141
+ logCompaction("info", "llm_generation_complete", {
3142
+ session_id: input.sessionID,
3143
+ duration_ms: llmDuration,
3144
+ success: !!llmPrompt,
3145
+ prompt_length: llmPrompt?.length ?? 0,
3146
+ prompt_preview: llmPrompt?.substring(0, 500),
3147
+ });
3148
+
3149
+ // =======================================================================
3150
+ // CAPTURE POINT 2: Prompt generated - record FULL prompt content
3151
+ // =======================================================================
3152
+ if (llmPrompt) {
3153
+ await captureCompaction(
3154
+ input.sessionID,
3155
+ snapshot.epic?.id || "unknown",
3156
+ "prompt_generated",
3157
+ {
3158
+ prompt_length: llmPrompt.length,
3159
+ full_prompt: llmPrompt, // FULL content, not truncated
3160
+ context_type: "llm_generated",
3161
+ duration_ms: llmDuration,
3162
+ },
3163
+ );
3164
+ }
3165
+
3166
+ if (llmPrompt) {
3167
+ // SUCCESS: Use LLM-generated prompt
3168
+ const header = `[Swarm compaction: LLM-generated, ${detection.reasons.join(", ")}]\n\n`;
3169
+ const fullContent = header + llmPrompt;
3170
+
3171
+ // Progressive enhancement: use new API if available
3172
+ if ("prompt" in output) {
3173
+ output.prompt = fullContent;
3174
+ logCompaction("info", "context_injected_via_prompt_api", {
3175
+ session_id: input.sessionID,
3176
+ content_length: fullContent.length,
3177
+ method: "output.prompt",
3178
+ });
3179
+ } else {
3180
+ output.context.push(fullContent);
3181
+ logCompaction("info", "context_injected_via_context_array", {
3182
+ session_id: input.sessionID,
3183
+ content_length: fullContent.length,
3184
+ method: "output.context.push",
3185
+ context_count_after: output.context.length,
3186
+ });
3187
+ }
3188
+
3189
+ // =======================================================================
3190
+ // CAPTURE POINT 3a: Context injected (LLM path) - record FULL content
3191
+ // =======================================================================
3192
+ await captureCompaction(
3193
+ input.sessionID,
3194
+ snapshot.epic?.id || "unknown",
3195
+ "context_injected",
3196
+ {
3197
+ full_content: fullContent, // FULL content, not truncated
3198
+ content_length: fullContent.length,
3199
+ injection_method: "prompt" in output ? "output.prompt" : "output.context.push",
3200
+ context_type: "llm_generated",
3201
+ },
3202
+ );
3203
+
3204
+ const totalDuration = Date.now() - startTime;
3205
+ logCompaction("info", "compaction_complete_llm_success", {
3206
+ session_id: input.sessionID,
3207
+ total_duration_ms: totalDuration,
3208
+ detection_duration_ms: detectionDuration,
3209
+ query_duration_ms: queryDuration,
3210
+ llm_duration_ms: llmDuration,
3211
+ confidence: detection.confidence,
3212
+ context_type: "llm_generated",
3213
+ content_length: fullContent.length,
3214
+ });
3215
+ return;
3216
+ }
3217
+
3218
+ // LLM failed, fall through to static prompt
3219
+ logCompaction("warn", "llm_generation_returned_null", {
3220
+ session_id: input.sessionID,
3221
+ llm_duration_ms: llmDuration,
3222
+ falling_back_to: "static_prompt",
3223
+ });
3224
+ } catch (err) {
3225
+ // LLM failed, fall through to static prompt
3226
+ logCompaction("error", "llm_generation_failed", {
3227
+ session_id: input.sessionID,
3228
+ error: err instanceof Error ? err.message : String(err),
3229
+ error_stack: err instanceof Error ? err.stack : undefined,
3230
+ falling_back_to: "static_prompt",
3231
+ });
3232
+ }
3233
+
3234
+ // Guard: Don't double-inject if LLM prompt was already set
3235
+ // This can happen if the error occurred after setting output.prompt but before return
3236
+ if ("prompt" in output && output.prompt) {
3237
+ logCompaction("info", "skipping_static_fallback_prompt_already_set", {
3238
+ session_id: input.sessionID,
3239
+ prompt_length: output.prompt.length,
3240
+ });
3241
+ return;
3242
+ }
3243
+
3244
+ // Level 3: Fall back to static context WITH dynamic state from snapshot
3245
+ const header = `[Swarm detected: ${detection.reasons.join(", ")}]\n\n`;
3246
+
3247
+ // Build dynamic state section if we have snapshot data
3248
+ const dynamicState = snapshot ? buildDynamicStateFromSnapshot(snapshot) : "";
3249
+ const staticContent = header + dynamicState + SWARM_COMPACTION_CONTEXT;
3250
+ output.context.push(staticContent);
3251
+
3252
+ // =======================================================================
3253
+ // CAPTURE POINT 3b: Context injected (static fallback) - record FULL content
3254
+ // =======================================================================
3255
+ await captureCompaction(
3256
+ input.sessionID,
3257
+ snapshot?.epic?.id || "unknown",
3258
+ "context_injected",
3259
+ {
3260
+ full_content: staticContent,
3261
+ content_length: staticContent.length,
3262
+ injection_method: "output.context.push",
3263
+ context_type: "static_with_dynamic_state",
3264
+ has_dynamic_state: !!dynamicState,
3265
+ epic_id: snapshot?.epic?.id,
3266
+ subtask_count: snapshot?.epic?.subtasks?.length ?? 0,
3267
+ },
3268
+ );
3269
+
3270
+ const totalDuration = Date.now() - startTime;
3271
+ logCompaction("info", "compaction_complete_static_fallback", {
3272
+ session_id: input.sessionID,
3273
+ total_duration_ms: totalDuration,
3274
+ confidence: detection.confidence,
3275
+ context_type: dynamicState ? "static_with_dynamic_state" : "static_swarm_context",
3276
+ content_length: staticContent.length,
3277
+ context_count_after: output.context.length,
3278
+ has_dynamic_state: !!dynamicState,
3279
+ epic_id: snapshot?.epic?.id,
3280
+ subtask_count: snapshot?.epic?.subtasks?.length ?? 0,
3281
+ });
3282
+ } else if (detection.confidence === "low") {
3283
+ // Level 4: Possible swarm - inject fallback detection prompt
3284
+ const header = `[Possible swarm: ${detection.reasons.join(", ")}]\n\n`;
3285
+ const fallbackContent = header + SWARM_DETECTION_FALLBACK;
3286
+ output.context.push(fallbackContent);
3287
+
3288
+ // =======================================================================
3289
+ // CAPTURE POINT 3c: Context injected (detection fallback) - record FULL content
3290
+ // =======================================================================
3291
+ await captureCompaction(
3292
+ input.sessionID,
3293
+ "unknown", // No snapshot for low confidence
3294
+ "context_injected",
3295
+ {
3296
+ full_content: fallbackContent,
3297
+ content_length: fallbackContent.length,
3298
+ injection_method: "output.context.push",
3299
+ context_type: "detection_fallback",
3300
+ },
3301
+ );
3302
+
3303
+ const totalDuration = Date.now() - startTime;
3304
+ logCompaction("info", "compaction_complete_detection_fallback", {
3305
+ session_id: input.sessionID,
3306
+ total_duration_ms: totalDuration,
3307
+ confidence: detection.confidence,
3308
+ context_type: "detection_fallback",
3309
+ content_length: fallbackContent.length,
3310
+ context_count_after: output.context.length,
3311
+ reasons: detection.reasons,
3312
+ });
3313
+ } else {
3314
+ // Level 5: confidence === "none" - no injection, probably not a swarm
3315
+ const totalDuration = Date.now() - startTime;
3316
+ logCompaction("info", "compaction_complete_no_swarm", {
3317
+ session_id: input.sessionID,
3318
+ total_duration_ms: totalDuration,
3319
+ confidence: detection.confidence,
3320
+ context_type: "none",
3321
+ reasons: detection.reasons,
3322
+ context_count_unchanged: output.context.length,
3323
+ });
3324
+ }
3325
+
3326
+ // =======================================================================
3327
+ // LOG: Final output state
3328
+ // =======================================================================
3329
+ logCompaction("debug", "compaction_hook_complete_final_state", {
3330
+ session_id: input.sessionID,
3331
+ output_context_count: output.context?.length ?? 0,
3332
+ output_context_lengths: output.context?.map(c => c.length) ?? [],
3333
+ output_has_prompt: !!(output as any).prompt,
3334
+ output_prompt_length: (output as any).prompt?.length ?? 0,
3335
+ total_duration_ms: Date.now() - startTime,
3336
+ });
3337
+ },
3338
+ };
3339
+ };
3340
+
3341
+ export default SwarmPlugin;