opencode-swarm-plugin 0.43.0 → 0.44.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. package/bin/cass.characterization.test.ts +422 -0
  2. package/bin/swarm.serve.test.ts +6 -4
  3. package/bin/swarm.test.ts +68 -0
  4. package/bin/swarm.ts +81 -8
  5. package/dist/compaction-prompt-scoring.js +139 -0
  6. package/dist/contributor-tools.d.ts +42 -0
  7. package/dist/contributor-tools.d.ts.map +1 -0
  8. package/dist/eval-capture.js +12811 -0
  9. package/dist/hive.d.ts.map +1 -1
  10. package/dist/index.d.ts +12 -0
  11. package/dist/index.d.ts.map +1 -1
  12. package/dist/index.js +7728 -62590
  13. package/dist/plugin.js +23833 -78695
  14. package/dist/sessions/agent-discovery.d.ts +59 -0
  15. package/dist/sessions/agent-discovery.d.ts.map +1 -0
  16. package/dist/sessions/index.d.ts +10 -0
  17. package/dist/sessions/index.d.ts.map +1 -0
  18. package/dist/swarm-orchestrate.d.ts.map +1 -1
  19. package/dist/swarm-prompts.d.ts.map +1 -1
  20. package/dist/swarm-review.d.ts.map +1 -1
  21. package/package.json +17 -5
  22. package/.changeset/swarm-insights-data-layer.md +0 -63
  23. package/.hive/analysis/eval-failure-analysis-2025-12-25.md +0 -331
  24. package/.hive/analysis/session-data-quality-audit.md +0 -320
  25. package/.hive/eval-results.json +0 -483
  26. package/.hive/issues.jsonl +0 -138
  27. package/.hive/memories.jsonl +0 -729
  28. package/.opencode/eval-history.jsonl +0 -327
  29. package/.turbo/turbo-build.log +0 -9
  30. package/CHANGELOG.md +0 -2255
  31. package/SCORER-ANALYSIS.md +0 -598
  32. package/docs/analysis/subagent-coordination-patterns.md +0 -902
  33. package/docs/analysis-socratic-planner-pattern.md +0 -504
  34. package/docs/planning/ADR-001-monorepo-structure.md +0 -171
  35. package/docs/planning/ADR-002-package-extraction.md +0 -393
  36. package/docs/planning/ADR-003-performance-improvements.md +0 -451
  37. package/docs/planning/ADR-004-message-queue-features.md +0 -187
  38. package/docs/planning/ADR-005-devtools-observability.md +0 -202
  39. package/docs/planning/ADR-007-swarm-enhancements-worktree-review.md +0 -168
  40. package/docs/planning/ADR-008-worker-handoff-protocol.md +0 -293
  41. package/docs/planning/ADR-009-oh-my-opencode-patterns.md +0 -353
  42. package/docs/planning/ROADMAP.md +0 -368
  43. package/docs/semantic-memory-cli-syntax.md +0 -123
  44. package/docs/swarm-mail-architecture.md +0 -1147
  45. package/docs/testing/context-recovery-test.md +0 -470
  46. package/evals/ARCHITECTURE.md +0 -1189
  47. package/evals/README.md +0 -768
  48. package/evals/compaction-prompt.eval.ts +0 -149
  49. package/evals/compaction-resumption.eval.ts +0 -289
  50. package/evals/coordinator-behavior.eval.ts +0 -307
  51. package/evals/coordinator-session.eval.ts +0 -154
  52. package/evals/evalite.config.ts.bak +0 -15
  53. package/evals/example.eval.ts +0 -31
  54. package/evals/fixtures/compaction-cases.ts +0 -350
  55. package/evals/fixtures/compaction-prompt-cases.ts +0 -311
  56. package/evals/fixtures/coordinator-sessions.ts +0 -328
  57. package/evals/fixtures/decomposition-cases.ts +0 -105
  58. package/evals/lib/compaction-loader.test.ts +0 -248
  59. package/evals/lib/compaction-loader.ts +0 -320
  60. package/evals/lib/data-loader.evalite-test.ts +0 -289
  61. package/evals/lib/data-loader.test.ts +0 -345
  62. package/evals/lib/data-loader.ts +0 -281
  63. package/evals/lib/llm.ts +0 -115
  64. package/evals/scorers/compaction-prompt-scorers.ts +0 -145
  65. package/evals/scorers/compaction-scorers.ts +0 -305
  66. package/evals/scorers/coordinator-discipline.evalite-test.ts +0 -539
  67. package/evals/scorers/coordinator-discipline.ts +0 -325
  68. package/evals/scorers/index.test.ts +0 -146
  69. package/evals/scorers/index.ts +0 -328
  70. package/evals/scorers/outcome-scorers.evalite-test.ts +0 -27
  71. package/evals/scorers/outcome-scorers.ts +0 -349
  72. package/evals/swarm-decomposition.eval.ts +0 -121
  73. package/examples/commands/swarm.md +0 -745
  74. package/examples/plugin-wrapper-template.ts +0 -2426
  75. package/examples/skills/hive-workflow/SKILL.md +0 -212
  76. package/examples/skills/skill-creator/SKILL.md +0 -223
  77. package/examples/skills/swarm-coordination/SKILL.md +0 -292
  78. package/global-skills/cli-builder/SKILL.md +0 -344
  79. package/global-skills/cli-builder/references/advanced-patterns.md +0 -244
  80. package/global-skills/learning-systems/SKILL.md +0 -644
  81. package/global-skills/skill-creator/LICENSE.txt +0 -202
  82. package/global-skills/skill-creator/SKILL.md +0 -352
  83. package/global-skills/skill-creator/references/output-patterns.md +0 -82
  84. package/global-skills/skill-creator/references/workflows.md +0 -28
  85. package/global-skills/swarm-coordination/SKILL.md +0 -995
  86. package/global-skills/swarm-coordination/references/coordinator-patterns.md +0 -235
  87. package/global-skills/swarm-coordination/references/strategies.md +0 -138
  88. package/global-skills/system-design/SKILL.md +0 -213
  89. package/global-skills/testing-patterns/SKILL.md +0 -430
  90. package/global-skills/testing-patterns/references/dependency-breaking-catalog.md +0 -586
  91. package/opencode-swarm-plugin-0.30.7.tgz +0 -0
  92. package/opencode-swarm-plugin-0.31.0.tgz +0 -0
  93. package/scripts/cleanup-test-memories.ts +0 -346
  94. package/scripts/init-skill.ts +0 -222
  95. package/scripts/migrate-unknown-sessions.ts +0 -349
  96. package/scripts/validate-skill.ts +0 -204
  97. package/src/agent-mail.ts +0 -1724
  98. package/src/anti-patterns.test.ts +0 -1167
  99. package/src/anti-patterns.ts +0 -448
  100. package/src/compaction-capture.integration.test.ts +0 -257
  101. package/src/compaction-hook.test.ts +0 -838
  102. package/src/compaction-hook.ts +0 -1204
  103. package/src/compaction-observability.integration.test.ts +0 -139
  104. package/src/compaction-observability.test.ts +0 -187
  105. package/src/compaction-observability.ts +0 -324
  106. package/src/compaction-prompt-scorers.test.ts +0 -475
  107. package/src/compaction-prompt-scoring.ts +0 -300
  108. package/src/dashboard.test.ts +0 -611
  109. package/src/dashboard.ts +0 -462
  110. package/src/error-enrichment.test.ts +0 -403
  111. package/src/error-enrichment.ts +0 -219
  112. package/src/eval-capture.test.ts +0 -1015
  113. package/src/eval-capture.ts +0 -929
  114. package/src/eval-gates.test.ts +0 -306
  115. package/src/eval-gates.ts +0 -218
  116. package/src/eval-history.test.ts +0 -508
  117. package/src/eval-history.ts +0 -214
  118. package/src/eval-learning.test.ts +0 -378
  119. package/src/eval-learning.ts +0 -360
  120. package/src/eval-runner.test.ts +0 -223
  121. package/src/eval-runner.ts +0 -402
  122. package/src/export-tools.test.ts +0 -476
  123. package/src/export-tools.ts +0 -257
  124. package/src/hive.integration.test.ts +0 -2241
  125. package/src/hive.ts +0 -1628
  126. package/src/index.ts +0 -935
  127. package/src/learning.integration.test.ts +0 -1815
  128. package/src/learning.ts +0 -1079
  129. package/src/logger.test.ts +0 -189
  130. package/src/logger.ts +0 -135
  131. package/src/mandate-promotion.test.ts +0 -473
  132. package/src/mandate-promotion.ts +0 -239
  133. package/src/mandate-storage.integration.test.ts +0 -601
  134. package/src/mandate-storage.test.ts +0 -578
  135. package/src/mandate-storage.ts +0 -794
  136. package/src/mandates.ts +0 -540
  137. package/src/memory-tools.test.ts +0 -195
  138. package/src/memory-tools.ts +0 -344
  139. package/src/memory.integration.test.ts +0 -334
  140. package/src/memory.test.ts +0 -158
  141. package/src/memory.ts +0 -527
  142. package/src/model-selection.test.ts +0 -188
  143. package/src/model-selection.ts +0 -68
  144. package/src/observability-tools.test.ts +0 -359
  145. package/src/observability-tools.ts +0 -871
  146. package/src/output-guardrails.test.ts +0 -438
  147. package/src/output-guardrails.ts +0 -381
  148. package/src/pattern-maturity.test.ts +0 -1160
  149. package/src/pattern-maturity.ts +0 -525
  150. package/src/planning-guardrails.test.ts +0 -491
  151. package/src/planning-guardrails.ts +0 -438
  152. package/src/plugin.ts +0 -23
  153. package/src/post-compaction-tracker.test.ts +0 -251
  154. package/src/post-compaction-tracker.ts +0 -237
  155. package/src/query-tools.test.ts +0 -636
  156. package/src/query-tools.ts +0 -324
  157. package/src/rate-limiter.integration.test.ts +0 -466
  158. package/src/rate-limiter.ts +0 -774
  159. package/src/replay-tools.test.ts +0 -496
  160. package/src/replay-tools.ts +0 -240
  161. package/src/repo-crawl.integration.test.ts +0 -441
  162. package/src/repo-crawl.ts +0 -610
  163. package/src/schemas/cell-events.test.ts +0 -347
  164. package/src/schemas/cell-events.ts +0 -807
  165. package/src/schemas/cell.ts +0 -257
  166. package/src/schemas/evaluation.ts +0 -166
  167. package/src/schemas/index.test.ts +0 -199
  168. package/src/schemas/index.ts +0 -286
  169. package/src/schemas/mandate.ts +0 -232
  170. package/src/schemas/swarm-context.ts +0 -115
  171. package/src/schemas/task.ts +0 -161
  172. package/src/schemas/worker-handoff.test.ts +0 -302
  173. package/src/schemas/worker-handoff.ts +0 -131
  174. package/src/skills.integration.test.ts +0 -1192
  175. package/src/skills.test.ts +0 -643
  176. package/src/skills.ts +0 -1549
  177. package/src/storage.integration.test.ts +0 -341
  178. package/src/storage.ts +0 -884
  179. package/src/structured.integration.test.ts +0 -817
  180. package/src/structured.test.ts +0 -1046
  181. package/src/structured.ts +0 -762
  182. package/src/swarm-decompose.test.ts +0 -188
  183. package/src/swarm-decompose.ts +0 -1302
  184. package/src/swarm-deferred.integration.test.ts +0 -157
  185. package/src/swarm-deferred.test.ts +0 -38
  186. package/src/swarm-insights.test.ts +0 -214
  187. package/src/swarm-insights.ts +0 -459
  188. package/src/swarm-mail.integration.test.ts +0 -970
  189. package/src/swarm-mail.ts +0 -739
  190. package/src/swarm-orchestrate.integration.test.ts +0 -282
  191. package/src/swarm-orchestrate.test.ts +0 -548
  192. package/src/swarm-orchestrate.ts +0 -3084
  193. package/src/swarm-prompts.test.ts +0 -1270
  194. package/src/swarm-prompts.ts +0 -2077
  195. package/src/swarm-research.integration.test.ts +0 -701
  196. package/src/swarm-research.test.ts +0 -698
  197. package/src/swarm-research.ts +0 -472
  198. package/src/swarm-review.integration.test.ts +0 -285
  199. package/src/swarm-review.test.ts +0 -879
  200. package/src/swarm-review.ts +0 -709
  201. package/src/swarm-strategies.ts +0 -407
  202. package/src/swarm-worktree.test.ts +0 -501
  203. package/src/swarm-worktree.ts +0 -575
  204. package/src/swarm.integration.test.ts +0 -2377
  205. package/src/swarm.ts +0 -38
  206. package/src/tool-adapter.integration.test.ts +0 -1221
  207. package/src/tool-availability.ts +0 -461
  208. package/tsconfig.json +0 -28
@@ -1,2426 +0,0 @@
1
- /**
2
- * OpenCode Swarm Plugin Wrapper
3
- *
4
- * This is a thin wrapper that shells out to the `swarm` CLI for all tool execution.
5
- * Generated by: swarm setup
6
- *
7
- * The plugin only depends on @opencode-ai/plugin (provided by OpenCode).
8
- * All tool logic lives in the npm package - this just bridges to it.
9
- *
10
- * Environment variables:
11
- * - OPENCODE_SESSION_ID: Passed to CLI for session state persistence
12
- * - OPENCODE_MESSAGE_ID: Passed to CLI for context
13
- * - OPENCODE_AGENT: Passed to CLI for context
14
- * - SWARM_PROJECT_DIR: Project directory (critical for database path)
15
- */
16
- import type { Plugin, PluginInput, Hooks } from "@opencode-ai/plugin";
17
- import type { ToolPart } from "@opencode-ai/sdk";
18
- import { tool } from "@opencode-ai/plugin";
19
- import { spawn } from "child_process";
20
- import { appendFileSync, mkdirSync, existsSync } from "node:fs";
21
- import { join } from "node:path";
22
- import { homedir } from "node:os";
23
-
24
- const SWARM_CLI = "swarm";
25
-
26
- // =============================================================================
27
- // File-based Logging (writes to ~/.config/swarm-tools/logs/)
28
- // =============================================================================
29
-
30
- const LOG_DIR = join(homedir(), ".config", "swarm-tools", "logs");
31
- const COMPACTION_LOG = join(LOG_DIR, "compaction.log");
32
-
33
- /**
34
- * Ensure log directory exists
35
- */
36
- function ensureLogDir(): void {
37
- if (!existsSync(LOG_DIR)) {
38
- mkdirSync(LOG_DIR, { recursive: true });
39
- }
40
- }
41
-
42
- /**
43
- * Log a compaction event to file (JSON lines format, compatible with `swarm log`)
44
- *
45
- * @param level - Log level (info, debug, warn, error)
46
- * @param msg - Log message
47
- * @param data - Additional structured data
48
- */
49
- function logCompaction(
50
- level: "info" | "debug" | "warn" | "error",
51
- msg: string,
52
- data?: Record<string, unknown>,
53
- ): void {
54
- try {
55
- ensureLogDir();
56
- const entry = JSON.stringify({
57
- time: new Date().toISOString(),
58
- level,
59
- msg,
60
- ...data,
61
- });
62
- appendFileSync(COMPACTION_LOG, entry + "\n");
63
- } catch {
64
- // Silently fail - logging should never break the plugin
65
- }
66
- }
67
-
68
- /**
69
- * Capture compaction event for evals (non-fatal dynamic import)
70
- *
71
- * Uses dynamic import to avoid circular dependencies and keep the plugin wrapper
72
- * self-contained. Captures COMPACTION events to session JSONL for eval analysis.
73
- *
74
- * @param sessionID - Session ID
75
- * @param epicID - Epic ID (or "unknown" if not detected)
76
- * @param compactionType - Event type (detection_complete, prompt_generated, context_injected)
77
- * @param payload - Event-specific data (full prompts, detection results, etc.)
78
- */
79
- async function captureCompaction(
80
- sessionID: string,
81
- epicID: string,
82
- compactionType: "detection_complete" | "prompt_generated" | "context_injected",
83
- payload: any,
84
- ): Promise<void> {
85
- try {
86
- // Dynamic import to avoid circular deps (plugin wrapper → src → plugin wrapper)
87
- const { captureCompactionEvent } = await import("../src/eval-capture");
88
- captureCompactionEvent({
89
- session_id: sessionID,
90
- epic_id: epicID,
91
- compaction_type: compactionType,
92
- payload,
93
- });
94
- } catch (err) {
95
- // Non-fatal - capture failures shouldn't break compaction
96
- logCompaction("warn", "compaction_capture_failed", {
97
- session_id: sessionID,
98
- compaction_type: compactionType,
99
- error: err instanceof Error ? err.message : String(err),
100
- });
101
- }
102
- }
103
-
104
- // Module-level project directory - set during plugin initialization
105
- // This is CRITICAL: without it, the CLI uses process.cwd() which may be wrong
106
- let projectDirectory: string = process.cwd();
107
-
108
- // Module-level SDK client - set during plugin initialization
109
- // Used for scanning session messages during compaction
110
- let sdkClient: any = null;
111
-
112
- // =============================================================================
113
- // CLI Execution Helper
114
- // =============================================================================
115
-
116
- /**
117
- * Execute a swarm tool via CLI
118
- *
119
- * Spawns `swarm tool <name> --json '<args>'` and returns the result.
120
- * Passes session context via environment variables.
121
- *
122
- * IMPORTANT: Runs in projectDirectory (set by OpenCode) not process.cwd()
123
- */
124
- async function execTool(
125
- name: string,
126
- args: Record<string, unknown>,
127
- ctx: { sessionID: string; messageID: string; agent: string },
128
- ): Promise<string> {
129
- return new Promise((resolve, reject) => {
130
- const hasArgs = Object.keys(args).length > 0;
131
- const cliArgs = hasArgs
132
- ? ["tool", name, "--json", JSON.stringify(args)]
133
- : ["tool", name];
134
-
135
- const proc = spawn(SWARM_CLI, cliArgs, {
136
- cwd: projectDirectory, // Run in project directory, not plugin directory
137
- stdio: ["ignore", "pipe", "pipe"],
138
- env: {
139
- ...process.env,
140
- OPENCODE_SESSION_ID: ctx.sessionID,
141
- OPENCODE_MESSAGE_ID: ctx.messageID,
142
- OPENCODE_AGENT: ctx.agent,
143
- SWARM_PROJECT_DIR: projectDirectory, // Also pass as env var
144
- },
145
- });
146
-
147
- let stdout = "";
148
- let stderr = "";
149
-
150
- proc.stdout.on("data", (data) => {
151
- stdout += data;
152
- });
153
- proc.stderr.on("data", (data) => {
154
- stderr += data;
155
- });
156
-
157
- proc.on("close", (code) => {
158
- if (code === 0) {
159
- // Success - return the JSON output
160
- try {
161
- const result = JSON.parse(stdout);
162
- if (result.success && result.data !== undefined) {
163
- // Unwrap the data for cleaner tool output
164
- resolve(
165
- typeof result.data === "string"
166
- ? result.data
167
- : JSON.stringify(result.data, null, 2),
168
- );
169
- } else if (!result.success && result.error) {
170
- // Tool returned an error in JSON format
171
- // Handle both string errors and object errors with .message
172
- const errorMsg = typeof result.error === "string"
173
- ? result.error
174
- : (result.error.message || "Tool execution failed");
175
- reject(new Error(errorMsg));
176
- } else {
177
- resolve(stdout);
178
- }
179
- } catch {
180
- resolve(stdout);
181
- }
182
- } else if (code === 2) {
183
- reject(new Error(`Unknown tool: ${name}`));
184
- } else if (code === 3) {
185
- reject(new Error(`Invalid JSON args: ${stderr}`));
186
- } else {
187
- // Tool returned error
188
- try {
189
- const result = JSON.parse(stdout);
190
- if (!result.success && result.error) {
191
- // Handle both string errors and object errors with .message
192
- const errorMsg = typeof result.error === "string"
193
- ? result.error
194
- : (result.error.message || `Tool failed with code ${code}`);
195
- reject(new Error(errorMsg));
196
- } else {
197
- reject(
198
- new Error(stderr || stdout || `Tool failed with code ${code}`),
199
- );
200
- }
201
- } catch {
202
- reject(
203
- new Error(stderr || stdout || `Tool failed with code ${code}`),
204
- );
205
- }
206
- }
207
- });
208
-
209
- proc.on("error", (err) => {
210
- if ((err as NodeJS.ErrnoException).code === "ENOENT") {
211
- reject(
212
- new Error(
213
- `swarm CLI not found. Install with: npm install -g opencode-swarm-plugin`,
214
- ),
215
- );
216
- } else {
217
- reject(err);
218
- }
219
- });
220
- });
221
- }
222
-
223
- // =============================================================================
224
- // Beads Tools
225
- // =============================================================================
226
-
227
- const hive_create = tool({
228
- description: "Create a new bead with type-safe validation",
229
- args: {
230
- title: tool.schema.string().describe("Bead title"),
231
- type: tool.schema
232
- .enum(["bug", "feature", "task", "epic", "chore"])
233
- .optional()
234
- .describe("Issue type (default: task)"),
235
- priority: tool.schema
236
- .number()
237
- .min(0)
238
- .max(3)
239
- .optional()
240
- .describe("Priority 0-3 (default: 2)"),
241
- description: tool.schema.string().optional().describe("Bead description"),
242
- parent_id: tool.schema
243
- .string()
244
- .optional()
245
- .describe("Parent bead ID for epic children"),
246
- },
247
- execute: (args, ctx) => execTool("hive_create", args, ctx),
248
- });
249
-
250
- const hive_create_epic = tool({
251
- description: "Create epic with subtasks in one atomic operation",
252
- args: {
253
- epic_title: tool.schema.string().describe("Epic title"),
254
- epic_description: tool.schema
255
- .string()
256
- .optional()
257
- .describe("Epic description"),
258
- subtasks: tool.schema
259
- .array(
260
- tool.schema.object({
261
- title: tool.schema.string(),
262
- priority: tool.schema.number().min(0).max(3).optional(),
263
- files: tool.schema.array(tool.schema.string()).optional(),
264
- }),
265
- )
266
- .describe("Subtasks to create under the epic"),
267
- },
268
- execute: (args, ctx) => execTool("hive_create_epic", args, ctx),
269
- });
270
-
271
- const hive_query = tool({
272
- description: "Query beads with filters (replaces bd list, bd ready, bd wip)",
273
- args: {
274
- status: tool.schema
275
- .enum(["open", "in_progress", "blocked", "closed"])
276
- .optional()
277
- .describe("Filter by status"),
278
- type: tool.schema
279
- .enum(["bug", "feature", "task", "epic", "chore"])
280
- .optional()
281
- .describe("Filter by type"),
282
- ready: tool.schema
283
- .boolean()
284
- .optional()
285
- .describe("Only show unblocked beads"),
286
- limit: tool.schema
287
- .number()
288
- .optional()
289
- .describe("Max results (default: 20)"),
290
- },
291
- execute: (args, ctx) => execTool("hive_query", args, ctx),
292
- });
293
-
294
- const hive_update = tool({
295
- description: "Update bead status/description",
296
- args: {
297
- id: tool.schema.string().describe("Cell ID"),
298
- status: tool.schema
299
- .enum(["open", "in_progress", "blocked", "closed"])
300
- .optional()
301
- .describe("New status"),
302
- description: tool.schema.string().optional().describe("New description"),
303
- priority: tool.schema
304
- .number()
305
- .min(0)
306
- .max(3)
307
- .optional()
308
- .describe("New priority"),
309
- },
310
- execute: (args, ctx) => execTool("hive_update", args, ctx),
311
- });
312
-
313
- const hive_close = tool({
314
- description: "Close a bead with reason",
315
- args: {
316
- id: tool.schema.string().describe("Cell ID"),
317
- reason: tool.schema.string().describe("Completion reason"),
318
- },
319
- execute: (args, ctx) => execTool("hive_close", args, ctx),
320
- });
321
-
322
- const hive_start = tool({
323
- description: "Mark a bead as in-progress",
324
- args: {
325
- id: tool.schema.string().describe("Cell ID"),
326
- },
327
- execute: (args, ctx) => execTool("hive_start", args, ctx),
328
- });
329
-
330
- const hive_ready = tool({
331
- description: "Get the next ready bead (unblocked, highest priority)",
332
- args: {},
333
- execute: (args, ctx) => execTool("hive_ready", args, ctx),
334
- });
335
-
336
- const hive_sync = tool({
337
- description: "Sync beads to git and push (MANDATORY at session end)",
338
- args: {
339
- auto_pull: tool.schema.boolean().optional().describe("Pull before sync"),
340
- },
341
- execute: (args, ctx) => execTool("hive_sync", args, ctx),
342
- });
343
-
344
- const hive_cells = tool({
345
- description: `Query cells from the hive database with flexible filtering.
346
-
347
- USE THIS TOOL TO:
348
- - List all open cells: hive_cells()
349
- - Find cells by status: hive_cells({ status: "in_progress" })
350
- - Find cells by type: hive_cells({ type: "bug" })
351
- - Get a specific cell by partial ID: hive_cells({ id: "mjkmd" })
352
- - Get the next ready (unblocked) cell: hive_cells({ ready: true })
353
- - Combine filters: hive_cells({ status: "open", type: "task" })
354
-
355
- RETURNS: Array of cells with id, title, status, priority, type, parent_id, created_at, updated_at
356
-
357
- PREFER THIS OVER hive_query when you need to:
358
- - See what work is available
359
- - Check status of multiple cells
360
- - Find cells matching criteria
361
- - Look up a cell by partial ID`,
362
- args: {
363
- id: tool.schema.string().optional().describe("Partial or full cell ID to look up"),
364
- status: tool.schema.enum(["open", "in_progress", "blocked", "closed"]).optional().describe("Filter by status"),
365
- type: tool.schema.enum(["task", "bug", "feature", "epic", "chore"]).optional().describe("Filter by type"),
366
- ready: tool.schema.boolean().optional().describe("If true, return only the next unblocked cell"),
367
- limit: tool.schema.number().optional().describe("Max cells to return (default 20)"),
368
- },
369
- execute: (args, ctx) => execTool("hive_cells", args, ctx),
370
- });
371
-
372
- const beads_link_thread = tool({
373
- description: "Add metadata linking bead to Agent Mail thread",
374
- args: {
375
- bead_id: tool.schema.string().describe("Cell ID"),
376
- thread_id: tool.schema.string().describe("Agent Mail thread ID"),
377
- },
378
- execute: (args, ctx) => execTool("beads_link_thread", args, ctx),
379
- });
380
-
381
- // =============================================================================
382
- // Swarm Mail Tools (Embedded)
383
- // =============================================================================
384
-
385
- const swarmmail_init = tool({
386
- description: "Initialize Swarm Mail session (REQUIRED FIRST)",
387
- args: {
388
- project_path: tool.schema.string().describe("Absolute path to the project"),
389
- agent_name: tool.schema.string().optional().describe("Custom agent name"),
390
- task_description: tool.schema
391
- .string()
392
- .optional()
393
- .describe("Task description"),
394
- },
395
- execute: (args, ctx) => execTool("swarmmail_init", args, ctx),
396
- });
397
-
398
- const swarmmail_send = tool({
399
- description: "Send message to other agents via Swarm Mail",
400
- args: {
401
- to: tool.schema
402
- .array(tool.schema.string())
403
- .describe("Recipient agent names"),
404
- subject: tool.schema.string().describe("Message subject"),
405
- body: tool.schema.string().describe("Message body"),
406
- thread_id: tool.schema
407
- .string()
408
- .optional()
409
- .describe("Thread ID for grouping"),
410
- importance: tool.schema
411
- .enum(["low", "normal", "high", "urgent"])
412
- .optional()
413
- .describe("Message importance"),
414
- ack_required: tool.schema
415
- .boolean()
416
- .optional()
417
- .describe("Require acknowledgment"),
418
- },
419
- execute: (args, ctx) => execTool("swarmmail_send", args, ctx),
420
- });
421
-
422
- const swarmmail_inbox = tool({
423
- description: "Fetch inbox (CONTEXT-SAFE: bodies excluded, max 5 messages)",
424
- args: {
425
- limit: tool.schema
426
- .number()
427
- .max(5)
428
- .optional()
429
- .describe("Max messages (max 5)"),
430
- urgent_only: tool.schema
431
- .boolean()
432
- .optional()
433
- .describe("Only urgent messages"),
434
- },
435
- execute: (args, ctx) => execTool("swarmmail_inbox", args, ctx),
436
- });
437
-
438
- const swarmmail_read_message = tool({
439
- description: "Fetch ONE message body by ID",
440
- args: {
441
- message_id: tool.schema.number().describe("Message ID"),
442
- },
443
- execute: (args, ctx) => execTool("swarmmail_read_message", args, ctx),
444
- });
445
-
446
- const swarmmail_reserve = tool({
447
- description: "Reserve file paths for exclusive editing",
448
- args: {
449
- paths: tool.schema
450
- .array(tool.schema.string())
451
- .describe("File paths/patterns"),
452
- ttl_seconds: tool.schema.number().optional().describe("Reservation TTL"),
453
- exclusive: tool.schema.boolean().optional().describe("Exclusive lock"),
454
- reason: tool.schema.string().optional().describe("Reservation reason"),
455
- },
456
- execute: (args, ctx) => execTool("swarmmail_reserve", args, ctx),
457
- });
458
-
459
- const swarmmail_release = tool({
460
- description: "Release file reservations",
461
- args: {
462
- paths: tool.schema
463
- .array(tool.schema.string())
464
- .optional()
465
- .describe("Paths to release"),
466
- reservation_ids: tool.schema
467
- .array(tool.schema.number())
468
- .optional()
469
- .describe("Reservation IDs"),
470
- },
471
- execute: (args, ctx) => execTool("swarmmail_release", args, ctx),
472
- });
473
-
474
- const swarmmail_ack = tool({
475
- description: "Acknowledge a message",
476
- args: {
477
- message_id: tool.schema.number().describe("Message ID"),
478
- },
479
- execute: (args, ctx) => execTool("swarmmail_ack", args, ctx),
480
- });
481
-
482
- const swarmmail_health = tool({
483
- description: "Check Swarm Mail database health",
484
- args: {},
485
- execute: (args, ctx) => execTool("swarmmail_health", args, ctx),
486
- });
487
-
488
- // =============================================================================
489
- // Structured Tools
490
- // =============================================================================
491
-
492
- const structured_extract_json = tool({
493
- description: "Extract JSON from markdown/text response",
494
- args: {
495
- text: tool.schema.string().describe("Text containing JSON"),
496
- },
497
- execute: (args, ctx) => execTool("structured_extract_json", args, ctx),
498
- });
499
-
500
- const structured_validate = tool({
501
- description: "Validate agent response against a schema",
502
- args: {
503
- response: tool.schema.string().describe("Agent response to validate"),
504
- schema_name: tool.schema
505
- .enum(["evaluation", "task_decomposition", "cell_tree"])
506
- .describe("Schema to validate against"),
507
- max_retries: tool.schema
508
- .number()
509
- .min(1)
510
- .max(5)
511
- .optional()
512
- .describe("Max retries"),
513
- },
514
- execute: (args, ctx) => execTool("structured_validate", args, ctx),
515
- });
516
-
517
- const structured_parse_evaluation = tool({
518
- description: "Parse and validate evaluation response",
519
- args: {
520
- response: tool.schema.string().describe("Agent response"),
521
- },
522
- execute: (args, ctx) => execTool("structured_parse_evaluation", args, ctx),
523
- });
524
-
525
- const structured_parse_decomposition = tool({
526
- description: "Parse and validate task decomposition response",
527
- args: {
528
- response: tool.schema.string().describe("Agent response"),
529
- },
530
- execute: (args, ctx) => execTool("structured_parse_decomposition", args, ctx),
531
- });
532
-
533
- const structured_parse_cell_tree = tool({
534
- description: "Parse and validate bead tree response",
535
- args: {
536
- response: tool.schema.string().describe("Agent response"),
537
- },
538
- execute: (args, ctx) => execTool("structured_parse_cell_tree", args, ctx),
539
- });
540
-
541
- // =============================================================================
542
- // Swarm Tools
543
- // =============================================================================
544
-
545
- const swarm_init = tool({
546
- description: "Initialize swarm session and check tool availability",
547
- args: {
548
- project_path: tool.schema.string().optional().describe("Project path"),
549
- isolation: tool.schema
550
- .enum(["worktree", "reservation"])
551
- .optional()
552
- .describe(
553
- "Isolation mode: 'worktree' for git worktree isolation, 'reservation' for file reservations (default)",
554
- ),
555
- },
556
- execute: (args, ctx) => execTool("swarm_init", args, ctx),
557
- });
558
-
559
- const swarm_select_strategy = tool({
560
- description: "Analyze task and recommend decomposition strategy",
561
- args: {
562
- task: tool.schema.string().min(1).describe("Task to analyze"),
563
- codebase_context: tool.schema
564
- .string()
565
- .optional()
566
- .describe("Codebase context"),
567
- },
568
- execute: (args, ctx) => execTool("swarm_select_strategy", args, ctx),
569
- });
570
-
571
- const swarm_plan_prompt = tool({
572
- description: "Generate strategy-specific decomposition prompt",
573
- args: {
574
- task: tool.schema.string().min(1).describe("Task to decompose"),
575
- strategy: tool.schema
576
- .enum(["file-based", "feature-based", "risk-based", "auto"])
577
- .optional()
578
- .describe("Decomposition strategy"),
579
- max_subtasks: tool.schema
580
- .number()
581
- .int()
582
- .min(2)
583
- .max(10)
584
- .optional()
585
- .describe("Max subtasks"),
586
- context: tool.schema.string().optional().describe("Additional context"),
587
- query_cass: tool.schema
588
- .boolean()
589
- .optional()
590
- .describe("Query CASS for similar tasks"),
591
- cass_limit: tool.schema
592
- .number()
593
- .int()
594
- .min(1)
595
- .max(10)
596
- .optional()
597
- .describe("CASS limit"),
598
- },
599
- execute: (args, ctx) => execTool("swarm_plan_prompt", args, ctx),
600
- });
601
-
602
- const swarm_decompose = tool({
603
- description: "Generate decomposition prompt for breaking task into subtasks",
604
- args: {
605
- task: tool.schema.string().min(1).describe("Task to decompose"),
606
- max_subtasks: tool.schema
607
- .number()
608
- .int()
609
- .min(2)
610
- .max(10)
611
- .optional()
612
- .describe("Max subtasks"),
613
- context: tool.schema.string().optional().describe("Additional context"),
614
- query_cass: tool.schema.boolean().optional().describe("Query CASS"),
615
- cass_limit: tool.schema
616
- .number()
617
- .int()
618
- .min(1)
619
- .max(10)
620
- .optional()
621
- .describe("CASS limit"),
622
- },
623
- execute: (args, ctx) => execTool("swarm_decompose", args, ctx),
624
- });
625
-
626
- const swarm_validate_decomposition = tool({
627
- description: "Validate a decomposition response against CellTreeSchema",
628
- args: {
629
- response: tool.schema.string().describe("Decomposition response"),
630
- },
631
- execute: (args, ctx) => execTool("swarm_validate_decomposition", args, ctx),
632
- });
633
-
634
- const swarm_status = tool({
635
- description: "Get status of a swarm by epic ID",
636
- args: {
637
- epic_id: tool.schema.string().describe("Epic bead ID"),
638
- project_key: tool.schema.string().describe("Project key"),
639
- },
640
- execute: (args, ctx) => execTool("swarm_status", args, ctx),
641
- });
642
-
643
- const swarm_progress = tool({
644
- description: "Report progress on a subtask to coordinator",
645
- args: {
646
- project_key: tool.schema.string().describe("Project key"),
647
- agent_name: tool.schema.string().describe("Agent name"),
648
- bead_id: tool.schema.string().describe("Cell ID"),
649
- status: tool.schema
650
- .enum(["in_progress", "blocked", "completed", "failed"])
651
- .describe("Status"),
652
- message: tool.schema.string().optional().describe("Progress message"),
653
- progress_percent: tool.schema
654
- .number()
655
- .min(0)
656
- .max(100)
657
- .optional()
658
- .describe("Progress %"),
659
- files_touched: tool.schema
660
- .array(tool.schema.string())
661
- .optional()
662
- .describe("Files modified"),
663
- },
664
- execute: (args, ctx) => execTool("swarm_progress", args, ctx),
665
- });
666
-
667
- const swarm_complete = tool({
668
- description:
669
- "Mark subtask complete with Verification Gate. Runs UBS scan, typecheck, and tests before allowing completion.",
670
- args: {
671
- project_key: tool.schema.string().describe("Project key"),
672
- agent_name: tool.schema.string().describe("Agent name"),
673
- bead_id: tool.schema.string().describe("Cell ID"),
674
- summary: tool.schema.string().describe("Completion summary"),
675
- evaluation: tool.schema.string().optional().describe("Self-evaluation JSON"),
676
- files_touched: tool.schema
677
- .array(tool.schema.string())
678
- .optional()
679
- .describe("Files modified - will be verified"),
680
- skip_ubs_scan: tool.schema.boolean().optional().describe("Skip UBS scan"),
681
- skip_verification: tool.schema
682
- .boolean()
683
- .optional()
684
- .describe("Skip ALL verification (UBS, typecheck, tests)"),
685
- skip_review: tool.schema
686
- .boolean()
687
- .optional()
688
- .describe("Skip review gate check"),
689
- },
690
- execute: (args, ctx) => execTool("swarm_complete", args, ctx),
691
- });
692
-
693
- const swarm_record_outcome = tool({
694
- description: "Record subtask outcome for implicit feedback scoring",
695
- args: {
696
- bead_id: tool.schema.string().describe("Cell ID"),
697
- duration_ms: tool.schema.number().int().min(0).describe("Duration in ms"),
698
- error_count: tool.schema
699
- .number()
700
- .int()
701
- .min(0)
702
- .optional()
703
- .describe("Error count"),
704
- retry_count: tool.schema
705
- .number()
706
- .int()
707
- .min(0)
708
- .optional()
709
- .describe("Retry count"),
710
- success: tool.schema.boolean().describe("Whether task succeeded"),
711
- files_touched: tool.schema
712
- .array(tool.schema.string())
713
- .optional()
714
- .describe("Files modified"),
715
- criteria: tool.schema
716
- .array(tool.schema.string())
717
- .optional()
718
- .describe("Evaluation criteria"),
719
- strategy: tool.schema
720
- .enum(["file-based", "feature-based", "risk-based"])
721
- .optional()
722
- .describe("Strategy used"),
723
- },
724
- execute: (args, ctx) => execTool("swarm_record_outcome", args, ctx),
725
- });
726
-
727
- const swarm_subtask_prompt = tool({
728
- description: "Generate the prompt for a spawned subtask agent",
729
- args: {
730
- agent_name: tool.schema.string().describe("Agent name"),
731
- bead_id: tool.schema.string().describe("Cell ID"),
732
- epic_id: tool.schema.string().describe("Epic ID"),
733
- subtask_title: tool.schema.string().describe("Subtask title"),
734
- subtask_description: tool.schema
735
- .string()
736
- .optional()
737
- .describe("Description"),
738
- files: tool.schema.array(tool.schema.string()).describe("Files to work on"),
739
- shared_context: tool.schema.string().optional().describe("Shared context"),
740
- },
741
- execute: (args, ctx) => execTool("swarm_subtask_prompt", args, ctx),
742
- });
743
-
744
- const swarm_spawn_subtask = tool({
745
- description: "Prepare a subtask for spawning with Task tool",
746
- args: {
747
- bead_id: tool.schema.string().describe("Cell ID"),
748
- epic_id: tool.schema.string().describe("Epic ID"),
749
- subtask_title: tool.schema.string().describe("Subtask title"),
750
- subtask_description: tool.schema
751
- .string()
752
- .optional()
753
- .describe("Description"),
754
- files: tool.schema.array(tool.schema.string()).describe("Files to work on"),
755
- shared_context: tool.schema.string().optional().describe("Shared context"),
756
- },
757
- execute: (args, ctx) => execTool("swarm_spawn_subtask", args, ctx),
758
- });
759
-
760
- const swarm_complete_subtask = tool({
761
- description: "Handle subtask completion after Task agent returns",
762
- args: {
763
- bead_id: tool.schema.string().describe("Cell ID"),
764
- task_result: tool.schema.string().describe("Task result JSON"),
765
- files_touched: tool.schema
766
- .array(tool.schema.string())
767
- .optional()
768
- .describe("Files modified"),
769
- },
770
- execute: (args, ctx) => execTool("swarm_complete_subtask", args, ctx),
771
- });
772
-
773
- const swarm_evaluation_prompt = tool({
774
- description: "Generate self-evaluation prompt for a completed subtask",
775
- args: {
776
- bead_id: tool.schema.string().describe("Cell ID"),
777
- subtask_title: tool.schema.string().describe("Subtask title"),
778
- files_touched: tool.schema
779
- .array(tool.schema.string())
780
- .describe("Files modified"),
781
- },
782
- execute: (args, ctx) => execTool("swarm_evaluation_prompt", args, ctx),
783
- });
784
-
785
- const swarm_broadcast = tool({
786
- description:
787
- "Broadcast context update to all agents working on the same epic",
788
- args: {
789
- project_path: tool.schema.string().describe("Project path"),
790
- agent_name: tool.schema.string().describe("Agent name"),
791
- epic_id: tool.schema.string().describe("Epic ID"),
792
- message: tool.schema.string().describe("Context update message"),
793
- importance: tool.schema
794
- .enum(["info", "warning", "blocker"])
795
- .optional()
796
- .describe("Priority level (default: info)"),
797
- files_affected: tool.schema
798
- .array(tool.schema.string())
799
- .optional()
800
- .describe("Files this context relates to"),
801
- },
802
- execute: (args, ctx) => execTool("swarm_broadcast", args, ctx),
803
- });
804
-
805
- // =============================================================================
806
- // Worktree Isolation Tools
807
- // =============================================================================
808
-
809
- const swarm_worktree_create = tool({
810
- description:
811
- "Create a git worktree for isolated task execution. Worker operates in worktree, not main branch.",
812
- args: {
813
- project_path: tool.schema.string().describe("Absolute path to project root"),
814
- task_id: tool.schema.string().describe("Task/bead ID (e.g., bd-abc123.1)"),
815
- start_commit: tool.schema
816
- .string()
817
- .describe("Commit SHA to create worktree at (swarm start point)"),
818
- },
819
- execute: (args, ctx) => execTool("swarm_worktree_create", args, ctx),
820
- });
821
-
822
- const swarm_worktree_merge = tool({
823
- description:
824
- "Cherry-pick commits from worktree back to main branch. Call after worker completes.",
825
- args: {
826
- project_path: tool.schema.string().describe("Absolute path to project root"),
827
- task_id: tool.schema.string().describe("Task/bead ID"),
828
- start_commit: tool.schema
829
- .string()
830
- .optional()
831
- .describe("Original start commit (to find new commits)"),
832
- },
833
- execute: (args, ctx) => execTool("swarm_worktree_merge", args, ctx),
834
- });
835
-
836
- const swarm_worktree_cleanup = tool({
837
- description:
838
- "Remove a worktree after completion or abort. Idempotent - safe to call multiple times.",
839
- args: {
840
- project_path: tool.schema.string().describe("Absolute path to project root"),
841
- task_id: tool.schema.string().optional().describe("Task/bead ID to clean up"),
842
- cleanup_all: tool.schema
843
- .boolean()
844
- .optional()
845
- .describe("Remove all worktrees for this project"),
846
- },
847
- execute: (args, ctx) => execTool("swarm_worktree_cleanup", args, ctx),
848
- });
849
-
850
- const swarm_worktree_list = tool({
851
- description: "List all active worktrees for a project",
852
- args: {
853
- project_path: tool.schema.string().describe("Absolute path to project root"),
854
- },
855
- execute: (args, ctx) => execTool("swarm_worktree_list", args, ctx),
856
- });
857
-
858
- // =============================================================================
859
- // Structured Review Tools
860
- // =============================================================================
861
-
862
- const swarm_review = tool({
863
- description:
864
- "Generate a review prompt for a completed subtask. Includes epic context, dependencies, and diff.",
865
- args: {
866
- project_key: tool.schema.string().describe("Project path"),
867
- epic_id: tool.schema.string().describe("Epic bead ID"),
868
- task_id: tool.schema.string().describe("Subtask bead ID to review"),
869
- files_touched: tool.schema
870
- .array(tool.schema.string())
871
- .optional()
872
- .describe("Files modified (will get diff for these)"),
873
- },
874
- execute: (args, ctx) => execTool("swarm_review", args, ctx),
875
- });
876
-
877
- const swarm_review_feedback = tool({
878
- description:
879
- "Send review feedback to a worker. Tracks attempts (max 3). Fails task after 3 rejections.",
880
- args: {
881
- project_key: tool.schema.string().describe("Project path"),
882
- task_id: tool.schema.string().describe("Subtask bead ID"),
883
- worker_id: tool.schema.string().describe("Worker agent name"),
884
- status: tool.schema
885
- .enum(["approved", "needs_changes"])
886
- .describe("Review status"),
887
- summary: tool.schema.string().optional().describe("Review summary"),
888
- issues: tool.schema
889
- .string()
890
- .optional()
891
- .describe("JSON array of ReviewIssue objects (for needs_changes)"),
892
- },
893
- execute: (args, ctx) => execTool("swarm_review_feedback", args, ctx),
894
- });
895
-
896
- // =============================================================================
897
- // Skills Tools
898
- // =============================================================================
899
-
900
- const skills_list = tool({
901
- description:
902
- "List all available skills from global, project, and bundled sources",
903
- args: {
904
- source: tool.schema
905
- .enum(["all", "global", "project", "bundled"])
906
- .optional()
907
- .describe("Filter by source (default: all)"),
908
- },
909
- execute: (args, ctx) => execTool("skills_list", args, ctx),
910
- });
911
-
912
- const skills_read = tool({
913
- description: "Read a skill's full content including SKILL.md and references",
914
- args: {
915
- name: tool.schema.string().describe("Skill name"),
916
- },
917
- execute: (args, ctx) => execTool("skills_read", args, ctx),
918
- });
919
-
920
- const skills_use = tool({
921
- description:
922
- "Get skill content formatted for injection into agent context. Use this when you need to apply a skill's knowledge to the current task.",
923
- args: {
924
- name: tool.schema.string().describe("Skill name"),
925
- context: tool.schema
926
- .string()
927
- .optional()
928
- .describe("Optional context about how the skill will be used"),
929
- },
930
- execute: (args, ctx) => execTool("skills_use", args, ctx),
931
- });
932
-
933
- const skills_create = tool({
934
- description: "Create a new skill with SKILL.md template",
935
- args: {
936
- name: tool.schema.string().describe("Skill name (kebab-case)"),
937
- description: tool.schema.string().describe("Brief skill description"),
938
- scope: tool.schema
939
- .enum(["global", "project"])
940
- .optional()
941
- .describe("Where to create (default: project)"),
942
- tags: tool.schema
943
- .array(tool.schema.string())
944
- .optional()
945
- .describe("Skill tags for discovery"),
946
- },
947
- execute: (args, ctx) => execTool("skills_create", args, ctx),
948
- });
949
-
950
- const skills_update = tool({
951
- description: "Update an existing skill's SKILL.md content",
952
- args: {
953
- name: tool.schema.string().describe("Skill name"),
954
- content: tool.schema.string().describe("New SKILL.md content"),
955
- },
956
- execute: (args, ctx) => execTool("skills_update", args, ctx),
957
- });
958
-
959
- const skills_delete = tool({
960
- description: "Delete a skill (project skills only)",
961
- args: {
962
- name: tool.schema.string().describe("Skill name"),
963
- },
964
- execute: (args, ctx) => execTool("skills_delete", args, ctx),
965
- });
966
-
967
- const skills_init = tool({
968
- description: "Initialize skills directory in current project",
969
- args: {
970
- path: tool.schema
971
- .string()
972
- .optional()
973
- .describe("Custom path (default: .opencode/skills)"),
974
- },
975
- execute: (args, ctx) => execTool("skills_init", args, ctx),
976
- });
977
-
978
- const skills_add_script = tool({
979
- description: "Add an executable script to a skill",
980
- args: {
981
- skill_name: tool.schema.string().describe("Skill name"),
982
- script_name: tool.schema.string().describe("Script filename"),
983
- content: tool.schema.string().describe("Script content"),
984
- executable: tool.schema
985
- .boolean()
986
- .optional()
987
- .describe("Make executable (default: true)"),
988
- },
989
- execute: (args, ctx) => execTool("skills_add_script", args, ctx),
990
- });
991
-
992
- const skills_execute = tool({
993
- description: "Execute a skill's script",
994
- args: {
995
- skill_name: tool.schema.string().describe("Skill name"),
996
- script_name: tool.schema.string().describe("Script to execute"),
997
- args: tool.schema
998
- .array(tool.schema.string())
999
- .optional()
1000
- .describe("Script arguments"),
1001
- },
1002
- execute: (args, ctx) => execTool("skills_execute", args, ctx),
1003
- });
1004
-
1005
- // =============================================================================
1006
- // Swarm Insights Tools
1007
- // =============================================================================
1008
-
1009
- const swarm_get_strategy_insights = tool({
1010
- description: "Get strategy success rates for decomposition planning. Use this when planning task decomposition to see which strategies (file-based, feature-based, risk-based) have historically succeeded or failed. Returns success rates and recommendations based on past swarm outcomes.",
1011
- args: {
1012
- task: tool.schema.string().describe("Task description to analyze for strategy recommendation"),
1013
- },
1014
- execute: (args, ctx) => execTool("swarm_get_strategy_insights", args, ctx),
1015
- });
1016
-
1017
- const swarm_get_file_insights = tool({
1018
- description: "Get file-specific gotchas for worker context. Use this when assigning files to workers to warn them about historical failure patterns. Queries past outcomes and semantic memory for file-specific learnings (edge cases, common bugs, performance traps).",
1019
- args: {
1020
- files: tool.schema.array(tool.schema.string()).describe("File paths to get insights for"),
1021
- },
1022
- execute: (args, ctx) => execTool("swarm_get_file_insights", args, ctx),
1023
- });
1024
-
1025
- const swarm_get_pattern_insights = tool({
1026
- description: "Get common failure patterns across swarms. Use this during planning or when debugging stuck swarms to see recurring anti-patterns (type errors, timeouts, conflicts, test failures). Returns top 5 most frequent failure patterns with recommendations.",
1027
- args: {},
1028
- execute: (args, ctx) => execTool("swarm_get_pattern_insights", args, ctx),
1029
- });
1030
-
1031
- // =============================================================================
1032
- // Plugin Export
1033
- // =============================================================================
1034
-
1035
- // =============================================================================
1036
- // Compaction Hook - Swarm Recovery Context
1037
- // =============================================================================
1038
-
1039
- /**
1040
- * Detection result with confidence level
1041
- */
1042
- interface SwarmDetection {
1043
- detected: boolean;
1044
- confidence: "high" | "medium" | "low" | "none";
1045
- reasons: string[];
1046
- }
1047
-
1048
- /**
1049
- * Structured state snapshot for LLM-powered compaction
1050
- *
1051
- * This is passed to the lite model to generate a continuation prompt
1052
- * with concrete data instead of just instructions.
1053
- */
1054
- interface SwarmStateSnapshot {
1055
- sessionID: string;
1056
- detection: {
1057
- confidence: "high" | "medium" | "low" | "none";
1058
- reasons: string[];
1059
- };
1060
- epic?: {
1061
- id: string;
1062
- title: string;
1063
- status: string;
1064
- subtasks: Array<{
1065
- id: string;
1066
- title: string;
1067
- status: "open" | "in_progress" | "blocked" | "closed";
1068
- files: string[];
1069
- assignedTo?: string;
1070
- }>;
1071
- };
1072
- messages: Array<{
1073
- from: string;
1074
- to: string[];
1075
- subject: string;
1076
- body: string;
1077
- timestamp: number;
1078
- importance?: string;
1079
- }>;
1080
- reservations: Array<{
1081
- agent: string;
1082
- paths: string[];
1083
- exclusive: boolean;
1084
- expiresAt: number;
1085
- }>;
1086
- }
1087
-
1088
- /**
1089
- * Query actual swarm state using spawn (like detectSwarm does)
1090
- *
1091
- * Returns structured snapshot of current state for LLM compaction.
1092
- * Shells out to swarm CLI to get real data.
1093
- */
1094
- async function querySwarmState(sessionID: string): Promise<SwarmStateSnapshot> {
1095
- const startTime = Date.now();
1096
-
1097
- logCompaction("debug", "query_swarm_state_start", {
1098
- session_id: sessionID,
1099
- project_directory: projectDirectory,
1100
- });
1101
-
1102
- try {
1103
- // Query cells via swarm CLI
1104
- const cliStart = Date.now();
1105
- const cellsResult = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
1106
- (resolve) => {
1107
- const proc = spawn(SWARM_CLI, ["tool", "hive_query"], {
1108
- cwd: projectDirectory,
1109
- stdio: ["ignore", "pipe", "pipe"],
1110
- });
1111
- let stdout = "";
1112
- let stderr = "";
1113
- proc.stdout.on("data", (d) => {
1114
- stdout += d;
1115
- });
1116
- proc.stderr.on("data", (d) => {
1117
- stderr += d;
1118
- });
1119
- proc.on("close", (exitCode) =>
1120
- resolve({ exitCode: exitCode ?? 1, stdout, stderr }),
1121
- );
1122
- },
1123
- );
1124
- const cliDuration = Date.now() - cliStart;
1125
-
1126
- logCompaction("debug", "query_swarm_state_cli_complete", {
1127
- session_id: sessionID,
1128
- duration_ms: cliDuration,
1129
- exit_code: cellsResult.exitCode,
1130
- stdout_length: cellsResult.stdout.length,
1131
- stderr_length: cellsResult.stderr.length,
1132
- });
1133
-
1134
- let cells: any[] = [];
1135
- if (cellsResult.exitCode === 0) {
1136
- try {
1137
- const parsed = JSON.parse(cellsResult.stdout);
1138
- // Handle wrapped response: { success: true, data: [...] }
1139
- cells = Array.isArray(parsed) ? parsed : (parsed?.data ?? []);
1140
- } catch (parseErr) {
1141
- logCompaction("error", "query_swarm_state_parse_failed", {
1142
- session_id: sessionID,
1143
- error: parseErr instanceof Error ? parseErr.message : String(parseErr),
1144
- stdout_preview: cellsResult.stdout.substring(0, 500),
1145
- });
1146
- }
1147
- }
1148
-
1149
- logCompaction("debug", "query_swarm_state_cells_parsed", {
1150
- session_id: sessionID,
1151
- cell_count: cells.length,
1152
- cells: cells.map((c: any) => ({
1153
- id: c.id,
1154
- title: c.title,
1155
- type: c.type,
1156
- status: c.status,
1157
- parent_id: c.parent_id,
1158
- })),
1159
- });
1160
-
1161
- // Find active epic (first unclosed epic with subtasks)
1162
- const openEpics = cells.filter(
1163
- (c: { type?: string; status: string }) =>
1164
- c.type === "epic" && c.status !== "closed",
1165
- );
1166
- const epic = openEpics[0];
1167
-
1168
- logCompaction("debug", "query_swarm_state_epics", {
1169
- session_id: sessionID,
1170
- open_epic_count: openEpics.length,
1171
- selected_epic: epic ? { id: epic.id, title: epic.title, status: epic.status } : null,
1172
- });
1173
-
1174
- // Get subtasks if we have an epic
1175
- const subtasks =
1176
- epic && epic.id
1177
- ? cells.filter(
1178
- (c: { parent_id?: string }) => c.parent_id === epic.id,
1179
- )
1180
- : [];
1181
-
1182
- logCompaction("debug", "query_swarm_state_subtasks", {
1183
- session_id: sessionID,
1184
- subtask_count: subtasks.length,
1185
- subtasks: subtasks.map((s: any) => ({
1186
- id: s.id,
1187
- title: s.title,
1188
- status: s.status,
1189
- files: s.files,
1190
- })),
1191
- });
1192
-
1193
- // TODO: Query swarm mail for messages and reservations
1194
- // For MVP, use empty arrays - the fallback chain handles this
1195
- const messages: SwarmStateSnapshot["messages"] = [];
1196
- const reservations: SwarmStateSnapshot["reservations"] = [];
1197
-
1198
- // Run detection for confidence (already logged internally)
1199
- const detection = await detectSwarm();
1200
-
1201
- const snapshot: SwarmStateSnapshot = {
1202
- sessionID,
1203
- detection: {
1204
- confidence: detection.confidence,
1205
- reasons: detection.reasons,
1206
- },
1207
- epic: epic
1208
- ? {
1209
- id: epic.id,
1210
- title: epic.title,
1211
- status: epic.status,
1212
- subtasks: subtasks.map((s: {
1213
- id: string;
1214
- title: string;
1215
- status: string;
1216
- files?: string[];
1217
- }) => ({
1218
- id: s.id,
1219
- title: s.title,
1220
- status: s.status as "open" | "in_progress" | "blocked" | "closed",
1221
- files: s.files || [],
1222
- })),
1223
- }
1224
- : undefined,
1225
- messages,
1226
- reservations,
1227
- };
1228
-
1229
- const totalDuration = Date.now() - startTime;
1230
- logCompaction("debug", "query_swarm_state_complete", {
1231
- session_id: sessionID,
1232
- duration_ms: totalDuration,
1233
- has_epic: !!snapshot.epic,
1234
- epic_id: snapshot.epic?.id,
1235
- subtask_count: snapshot.epic?.subtasks?.length ?? 0,
1236
- message_count: snapshot.messages.length,
1237
- reservation_count: snapshot.reservations.length,
1238
- });
1239
-
1240
- return snapshot;
1241
- } catch (err) {
1242
- logCompaction("error", "query_swarm_state_exception", {
1243
- session_id: sessionID,
1244
- error: err instanceof Error ? err.message : String(err),
1245
- stack: err instanceof Error ? err.stack : undefined,
1246
- duration_ms: Date.now() - startTime,
1247
- });
1248
-
1249
- // If query fails, return minimal snapshot
1250
- const detection = await detectSwarm();
1251
- return {
1252
- sessionID,
1253
- detection: {
1254
- confidence: detection.confidence,
1255
- reasons: detection.reasons,
1256
- },
1257
- messages: [],
1258
- reservations: [],
1259
- };
1260
- }
1261
- }
1262
-
1263
- /**
1264
- * Generate compaction prompt using LLM
1265
- *
1266
- * Shells out to `opencode run -m <liteModel>` with structured state.
1267
- * Returns markdown continuation prompt or null on failure.
1268
- *
1269
- * Timeout: 30 seconds
1270
- */
1271
- async function generateCompactionPrompt(
1272
- snapshot: SwarmStateSnapshot,
1273
- ): Promise<string | null> {
1274
- const startTime = Date.now();
1275
- const liteModel = process.env.OPENCODE_LITE_MODEL || "__SWARM_LITE_MODEL__";
1276
-
1277
- logCompaction("debug", "generate_compaction_prompt_start", {
1278
- session_id: snapshot.sessionID,
1279
- lite_model: liteModel,
1280
- has_epic: !!snapshot.epic,
1281
- epic_id: snapshot.epic?.id,
1282
- subtask_count: snapshot.epic?.subtasks?.length ?? 0,
1283
- snapshot_size: JSON.stringify(snapshot).length,
1284
- });
1285
-
1286
- try {
1287
- const promptText = `You are generating a continuation prompt for a compacted swarm coordination session.
1288
-
1289
- Analyze this swarm state and generate a structured markdown prompt that will be given to the resumed session:
1290
-
1291
- ${JSON.stringify(snapshot, null, 2)}
1292
-
1293
- Generate a prompt following this structure:
1294
-
1295
- ┌─────────────────────────────────────────────────────────────┐
1296
- │ │
1297
- │ 🐝 YOU ARE THE COORDINATOR 🐝 │
1298
- │ │
1299
- │ NOT A WORKER. NOT AN IMPLEMENTER. │
1300
- │ YOU ORCHESTRATE. │
1301
- │ │
1302
- └─────────────────────────────────────────────────────────────┘
1303
-
1304
- # 🐝 Swarm Continuation - [Epic Title or "Unknown"]
1305
-
1306
- **NON-NEGOTIABLE: YOU ARE THE COORDINATOR.** You resumed after context compaction.
1307
-
1308
- ## Epic State
1309
-
1310
- **ID:** [epic ID or "Unknown"]
1311
- **Title:** [epic title or "No active epic"]
1312
- **Status:** [X/Y subtasks complete]
1313
- **Project:** ${projectDirectory}
1314
-
1315
- ## Subtask Status
1316
-
1317
- ### ✅ Completed (N)
1318
- [List completed subtasks with IDs]
1319
-
1320
- ### 🚧 In Progress (N)
1321
- [List in-progress subtasks with IDs, files, agents if known]
1322
-
1323
- ### 🚫 Blocked (N)
1324
- [List blocked subtasks]
1325
-
1326
- ### ⏳ Pending (N)
1327
- [List pending subtasks]
1328
-
1329
- ## Next Actions (IMMEDIATE)
1330
-
1331
- [List 3-5 concrete actions with actual commands, using real IDs from the state]
1332
-
1333
- ## 🎯 COORDINATOR MANDATES (NON-NEGOTIABLE)
1334
-
1335
- **YOU ARE THE COORDINATOR. NOT A WORKER.**
1336
-
1337
- ### ⛔ FORBIDDEN - NEVER do these:
1338
- - ❌ NEVER use \`edit\`, \`write\`, or \`bash\` for implementation - SPAWN A WORKER
1339
- - ❌ NEVER fetch directly with \`repo-crawl_*\`, \`repo-autopsy_*\`, \`webfetch\`, \`fetch_fetch\` - SPAWN A RESEARCHER
1340
- - ❌ NEVER use \`context7_*\` or \`pdf-brain_*\` directly - SPAWN A RESEARCHER
1341
- - ❌ NEVER reserve files - Workers reserve files
1342
-
1343
- ### ✅ ALWAYS do these:
1344
- - ✅ ALWAYS check \`swarm_status\` and \`swarmmail_inbox\` first
1345
- - ✅ ALWAYS use \`swarm_spawn_subtask\` for implementation work
1346
- - ✅ ALWAYS use \`swarm_spawn_researcher\` for external data fetching
1347
- - ✅ ALWAYS review worker output with \`swarm_review\` → \`swarm_review_feedback\`
1348
- - ✅ ALWAYS monitor actively - Check messages every ~10 minutes
1349
- - ✅ ALWAYS unblock aggressively - Resolve dependencies immediately
1350
-
1351
- **If you need external data:** Use \`swarm_spawn_researcher\` with a clear research task. The researcher will fetch, summarize, and return findings.
1352
-
1353
- **3-strike rule enforced:** Workers get 3 review attempts. After 3 rejections, escalate to human.
1354
-
1355
- Keep the prompt concise but actionable. Use actual data from the snapshot, not placeholders. Include the ASCII header and ALL coordinator mandates.`;
1356
-
1357
- logCompaction("debug", "generate_compaction_prompt_calling_llm", {
1358
- session_id: snapshot.sessionID,
1359
- prompt_length: promptText.length,
1360
- model: liteModel,
1361
- command: `opencode run -m ${liteModel} -- <prompt>`,
1362
- });
1363
-
1364
- const llmStart = Date.now();
1365
- const result = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
1366
- (resolve, reject) => {
1367
- const proc = spawn("opencode", ["run", "-m", liteModel, "--", promptText], {
1368
- cwd: projectDirectory,
1369
- stdio: ["ignore", "pipe", "pipe"],
1370
- timeout: 30000, // 30 second timeout
1371
- });
1372
-
1373
- let stdout = "";
1374
- let stderr = "";
1375
-
1376
- proc.stdout.on("data", (d) => {
1377
- stdout += d;
1378
- });
1379
- proc.stderr.on("data", (d) => {
1380
- stderr += d;
1381
- });
1382
-
1383
- proc.on("close", (exitCode) => {
1384
- resolve({ exitCode: exitCode ?? 1, stdout, stderr });
1385
- });
1386
-
1387
- proc.on("error", (err) => {
1388
- reject(err);
1389
- });
1390
-
1391
- // Timeout handling
1392
- setTimeout(() => {
1393
- proc.kill("SIGTERM");
1394
- reject(new Error("LLM compaction timeout (30s)"));
1395
- }, 30000);
1396
- },
1397
- );
1398
- const llmDuration = Date.now() - llmStart;
1399
-
1400
- logCompaction("debug", "generate_compaction_prompt_llm_complete", {
1401
- session_id: snapshot.sessionID,
1402
- duration_ms: llmDuration,
1403
- exit_code: result.exitCode,
1404
- stdout_length: result.stdout.length,
1405
- stderr_length: result.stderr.length,
1406
- stderr_preview: result.stderr.substring(0, 500),
1407
- stdout_preview: result.stdout.substring(0, 500),
1408
- });
1409
-
1410
- if (result.exitCode !== 0) {
1411
- logCompaction("error", "generate_compaction_prompt_llm_failed", {
1412
- session_id: snapshot.sessionID,
1413
- exit_code: result.exitCode,
1414
- stderr: result.stderr,
1415
- stdout: result.stdout,
1416
- duration_ms: llmDuration,
1417
- });
1418
- return null;
1419
- }
1420
-
1421
- // Extract the prompt from stdout (LLM may wrap in markdown)
1422
- const prompt = result.stdout.trim();
1423
-
1424
- const totalDuration = Date.now() - startTime;
1425
- logCompaction("debug", "generate_compaction_prompt_success", {
1426
- session_id: snapshot.sessionID,
1427
- total_duration_ms: totalDuration,
1428
- llm_duration_ms: llmDuration,
1429
- prompt_length: prompt.length,
1430
- prompt_preview: prompt.substring(0, 500),
1431
- prompt_has_content: prompt.length > 0,
1432
- });
1433
-
1434
- return prompt.length > 0 ? prompt : null;
1435
- } catch (err) {
1436
- const totalDuration = Date.now() - startTime;
1437
- logCompaction("error", "generate_compaction_prompt_exception", {
1438
- session_id: snapshot.sessionID,
1439
- error: err instanceof Error ? err.message : String(err),
1440
- stack: err instanceof Error ? err.stack : undefined,
1441
- duration_ms: totalDuration,
1442
- });
1443
- return null;
1444
- }
1445
- }
1446
-
1447
- /**
1448
- * Session message scan result
1449
- */
1450
- interface SessionScanResult {
1451
- messageCount: number;
1452
- toolCalls: Array<{
1453
- toolName: string;
1454
- args: Record<string, unknown>;
1455
- output?: string;
1456
- }>;
1457
- swarmDetected: boolean;
1458
- reasons: string[];
1459
- }
1460
-
1461
- /**
1462
- * Scan session messages for swarm tool calls
1463
- *
1464
- * Uses SDK client to fetch messages and look for swarm activity.
1465
- * This can detect swarm work even if no cells exist yet.
1466
- */
1467
- async function scanSessionMessages(sessionID: string): Promise<SessionScanResult> {
1468
- const startTime = Date.now();
1469
- const result: SessionScanResult = {
1470
- messageCount: 0,
1471
- toolCalls: [],
1472
- swarmDetected: false,
1473
- reasons: [],
1474
- };
1475
-
1476
- logCompaction("debug", "session_scan_start", {
1477
- session_id: sessionID,
1478
- has_sdk_client: !!sdkClient,
1479
- });
1480
-
1481
- if (!sdkClient) {
1482
- logCompaction("warn", "session_scan_no_sdk_client", {
1483
- session_id: sessionID,
1484
- });
1485
- return result;
1486
- }
1487
-
1488
- try {
1489
- // Fetch session messages
1490
- const messagesStart = Date.now();
1491
- const rawResponse = await sdkClient.session.messages({ path: { id: sessionID } });
1492
- const messagesDuration = Date.now() - messagesStart;
1493
-
1494
- // Log the RAW response to understand its shape
1495
- logCompaction("debug", "session_scan_raw_response", {
1496
- session_id: sessionID,
1497
- response_type: typeof rawResponse,
1498
- is_array: Array.isArray(rawResponse),
1499
- is_null: rawResponse === null,
1500
- is_undefined: rawResponse === undefined,
1501
- keys: rawResponse && typeof rawResponse === 'object' ? Object.keys(rawResponse) : [],
1502
- raw_preview: JSON.stringify(rawResponse)?.slice(0, 500),
1503
- });
1504
-
1505
- // The response might be wrapped - check common patterns
1506
- const messages = Array.isArray(rawResponse)
1507
- ? rawResponse
1508
- : rawResponse?.data
1509
- ? rawResponse.data
1510
- : rawResponse?.messages
1511
- ? rawResponse.messages
1512
- : rawResponse?.items
1513
- ? rawResponse.items
1514
- : [];
1515
-
1516
- result.messageCount = messages?.length ?? 0;
1517
-
1518
- logCompaction("debug", "session_scan_messages_fetched", {
1519
- session_id: sessionID,
1520
- duration_ms: messagesDuration,
1521
- message_count: result.messageCount,
1522
- extraction_method: Array.isArray(rawResponse) ? 'direct_array' : rawResponse?.data ? 'data_field' : rawResponse?.messages ? 'messages_field' : rawResponse?.items ? 'items_field' : 'fallback_empty',
1523
- });
1524
-
1525
- if (!Array.isArray(messages) || messages.length === 0) {
1526
- logCompaction("debug", "session_scan_no_messages", {
1527
- session_id: sessionID,
1528
- });
1529
- return result;
1530
- }
1531
-
1532
- // Swarm-related tool patterns
1533
- const swarmTools = [
1534
- // High confidence - active swarm coordination
1535
- "hive_create_epic",
1536
- "swarm_decompose",
1537
- "swarm_spawn_subtask",
1538
- "swarm_complete",
1539
- "swarmmail_init",
1540
- "swarmmail_reserve",
1541
- // Medium confidence - swarm activity
1542
- "hive_start",
1543
- "hive_close",
1544
- "swarm_status",
1545
- "swarm_progress",
1546
- "swarmmail_send",
1547
- // Low confidence - possible swarm
1548
- "hive_create",
1549
- "hive_query",
1550
- ];
1551
-
1552
- const highConfidenceTools = new Set([
1553
- "hive_create_epic",
1554
- "swarm_decompose",
1555
- "swarm_spawn_subtask",
1556
- "swarmmail_init",
1557
- "swarmmail_reserve",
1558
- ]);
1559
-
1560
- // Scan messages for tool calls
1561
- let swarmToolCount = 0;
1562
- let highConfidenceCount = 0;
1563
-
1564
- // Debug: collect part types to understand message structure
1565
- const partTypeCounts: Record<string, number> = {};
1566
- let messagesWithParts = 0;
1567
- let messagesWithoutParts = 0;
1568
- let samplePartTypes: string[] = [];
1569
-
1570
- for (const message of messages) {
1571
- if (!message.parts || !Array.isArray(message.parts)) {
1572
- messagesWithoutParts++;
1573
- continue;
1574
- }
1575
- messagesWithParts++;
1576
-
1577
- for (const part of message.parts) {
1578
- const partType = part.type || "unknown";
1579
- partTypeCounts[partType] = (partTypeCounts[partType] || 0) + 1;
1580
-
1581
- // Collect first 10 unique part types for debugging
1582
- if (samplePartTypes.length < 10 && !samplePartTypes.includes(partType)) {
1583
- samplePartTypes.push(partType);
1584
- }
1585
-
1586
- // Check if this is a tool call part
1587
- // OpenCode SDK: ToolPart has type="tool", tool=<string name>, state={...}
1588
- if (part.type === "tool") {
1589
- const toolPart = part as ToolPart;
1590
- const toolName = toolPart.tool; // tool name is a string directly
1591
-
1592
- if (toolName && swarmTools.includes(toolName)) {
1593
- swarmToolCount++;
1594
-
1595
- if (highConfidenceTools.has(toolName)) {
1596
- highConfidenceCount++;
1597
- }
1598
-
1599
- // Extract args/output from state if available
1600
- const state = toolPart.state;
1601
- const args = state && "input" in state ? state.input : {};
1602
- const output = state && "output" in state ? state.output : undefined;
1603
-
1604
- result.toolCalls.push({
1605
- toolName,
1606
- args,
1607
- output,
1608
- });
1609
-
1610
- logCompaction("debug", "session_scan_tool_found", {
1611
- session_id: sessionID,
1612
- tool_name: toolName,
1613
- is_high_confidence: highConfidenceTools.has(toolName),
1614
- });
1615
- }
1616
- }
1617
- }
1618
- }
1619
-
1620
- // Determine if swarm detected based on tool calls
1621
- if (highConfidenceCount > 0) {
1622
- result.swarmDetected = true;
1623
- result.reasons.push(`${highConfidenceCount} high-confidence swarm tools (${Array.from(new Set(result.toolCalls.filter(tc => highConfidenceTools.has(tc.toolName)).map(tc => tc.toolName))).join(", ")})`);
1624
- }
1625
-
1626
- if (swarmToolCount > 0 && !result.swarmDetected) {
1627
- result.swarmDetected = true;
1628
- result.reasons.push(`${swarmToolCount} swarm-related tools used`);
1629
- }
1630
-
1631
- const totalDuration = Date.now() - startTime;
1632
-
1633
- // Debug: log part type distribution to understand message structure
1634
- logCompaction("debug", "session_scan_part_types", {
1635
- session_id: sessionID,
1636
- messages_with_parts: messagesWithParts,
1637
- messages_without_parts: messagesWithoutParts,
1638
- part_type_counts: partTypeCounts,
1639
- sample_part_types: samplePartTypes,
1640
- });
1641
-
1642
- logCompaction("info", "session_scan_complete", {
1643
- session_id: sessionID,
1644
- duration_ms: totalDuration,
1645
- message_count: result.messageCount,
1646
- tool_call_count: result.toolCalls.length,
1647
- swarm_tool_count: swarmToolCount,
1648
- high_confidence_count: highConfidenceCount,
1649
- swarm_detected: result.swarmDetected,
1650
- reasons: result.reasons,
1651
- unique_tools: Array.from(new Set(result.toolCalls.map(tc => tc.toolName))),
1652
- });
1653
-
1654
- return result;
1655
- } catch (err) {
1656
- const totalDuration = Date.now() - startTime;
1657
- logCompaction("error", "session_scan_exception", {
1658
- session_id: sessionID,
1659
- error: err instanceof Error ? err.message : String(err),
1660
- stack: err instanceof Error ? err.stack : undefined,
1661
- duration_ms: totalDuration,
1662
- });
1663
- return result;
1664
- }
1665
- }
1666
-
1667
- /**
1668
- * Check for swarm sign - evidence a swarm passed through
1669
- *
1670
- * Uses multiple signals with different confidence levels:
1671
- * - HIGH: in_progress cells (active work)
1672
- * - MEDIUM: Open subtasks, unclosed epics, recently updated cells
1673
- * - LOW: Any cells exist
1674
- *
1675
- * Philosophy: Err on the side of continuation.
1676
- * False positive = extra context (low cost)
1677
- * False negative = lost swarm (high cost)
1678
- */
1679
- async function detectSwarm(): Promise<SwarmDetection> {
1680
- const startTime = Date.now();
1681
- const reasons: string[] = [];
1682
- let highConfidence = false;
1683
- let mediumConfidence = false;
1684
- let lowConfidence = false;
1685
-
1686
- logCompaction("debug", "detect_swarm_start", {
1687
- project_directory: projectDirectory,
1688
- cwd: process.cwd(),
1689
- });
1690
-
1691
- try {
1692
- const cliStart = Date.now();
1693
- const result = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
1694
- (resolve) => {
1695
- // Use swarm tool to query beads
1696
- const proc = spawn(SWARM_CLI, ["tool", "hive_query"], {
1697
- cwd: projectDirectory,
1698
- stdio: ["ignore", "pipe", "pipe"],
1699
- });
1700
- let stdout = "";
1701
- let stderr = "";
1702
- proc.stdout.on("data", (d) => {
1703
- stdout += d;
1704
- });
1705
- proc.stderr.on("data", (d) => {
1706
- stderr += d;
1707
- });
1708
- proc.on("close", (exitCode) =>
1709
- resolve({ exitCode: exitCode ?? 1, stdout, stderr }),
1710
- );
1711
- },
1712
- );
1713
- const cliDuration = Date.now() - cliStart;
1714
-
1715
- logCompaction("debug", "detect_swarm_cli_complete", {
1716
- duration_ms: cliDuration,
1717
- exit_code: result.exitCode,
1718
- stdout_length: result.stdout.length,
1719
- stderr_length: result.stderr.length,
1720
- stderr_preview: result.stderr.substring(0, 200),
1721
- });
1722
-
1723
- if (result.exitCode !== 0) {
1724
- logCompaction("warn", "detect_swarm_cli_failed", {
1725
- exit_code: result.exitCode,
1726
- stderr: result.stderr,
1727
- });
1728
- return { detected: false, confidence: "none", reasons: ["hive_query failed"] };
1729
- }
1730
-
1731
- let cells: any[];
1732
- try {
1733
- cells = JSON.parse(result.stdout);
1734
- } catch (parseErr) {
1735
- logCompaction("error", "detect_swarm_parse_failed", {
1736
- error: parseErr instanceof Error ? parseErr.message : String(parseErr),
1737
- stdout_preview: result.stdout.substring(0, 500),
1738
- });
1739
- return { detected: false, confidence: "none", reasons: ["hive_query parse failed"] };
1740
- }
1741
-
1742
- if (!Array.isArray(cells) || cells.length === 0) {
1743
- logCompaction("debug", "detect_swarm_no_cells", {
1744
- is_array: Array.isArray(cells),
1745
- length: cells?.length ?? 0,
1746
- });
1747
- return { detected: false, confidence: "none", reasons: ["no cells found"] };
1748
- }
1749
-
1750
- // Log ALL cells for debugging
1751
- logCompaction("debug", "detect_swarm_cells_found", {
1752
- total_cells: cells.length,
1753
- cells: cells.map((c: any) => ({
1754
- id: c.id,
1755
- title: c.title,
1756
- type: c.type,
1757
- status: c.status,
1758
- parent_id: c.parent_id,
1759
- updated_at: c.updated_at,
1760
- created_at: c.created_at,
1761
- })),
1762
- });
1763
-
1764
- // HIGH: Any in_progress cells
1765
- const inProgress = cells.filter(
1766
- (c: { status: string }) => c.status === "in_progress"
1767
- );
1768
- if (inProgress.length > 0) {
1769
- highConfidence = true;
1770
- reasons.push(`${inProgress.length} cells in_progress`);
1771
- logCompaction("debug", "detect_swarm_in_progress", {
1772
- count: inProgress.length,
1773
- cells: inProgress.map((c: any) => ({ id: c.id, title: c.title })),
1774
- });
1775
- }
1776
-
1777
- // MEDIUM: Open subtasks (cells with parent_id)
1778
- const subtasks = cells.filter(
1779
- (c: { status: string; parent_id?: string }) =>
1780
- c.status === "open" && c.parent_id
1781
- );
1782
- if (subtasks.length > 0) {
1783
- mediumConfidence = true;
1784
- reasons.push(`${subtasks.length} open subtasks`);
1785
- logCompaction("debug", "detect_swarm_open_subtasks", {
1786
- count: subtasks.length,
1787
- cells: subtasks.map((c: any) => ({ id: c.id, title: c.title, parent_id: c.parent_id })),
1788
- });
1789
- }
1790
-
1791
- // MEDIUM: Unclosed epics
1792
- const openEpics = cells.filter(
1793
- (c: { status: string; type?: string }) =>
1794
- c.type === "epic" && c.status !== "closed"
1795
- );
1796
- if (openEpics.length > 0) {
1797
- mediumConfidence = true;
1798
- reasons.push(`${openEpics.length} unclosed epics`);
1799
- logCompaction("debug", "detect_swarm_open_epics", {
1800
- count: openEpics.length,
1801
- cells: openEpics.map((c: any) => ({ id: c.id, title: c.title, status: c.status })),
1802
- });
1803
- }
1804
-
1805
- // MEDIUM: Recently updated cells (last hour)
1806
- const oneHourAgo = Date.now() - 60 * 60 * 1000;
1807
- const recentCells = cells.filter(
1808
- (c: { updated_at?: number }) => c.updated_at && c.updated_at > oneHourAgo
1809
- );
1810
- if (recentCells.length > 0) {
1811
- mediumConfidence = true;
1812
- reasons.push(`${recentCells.length} cells updated in last hour`);
1813
- logCompaction("debug", "detect_swarm_recent_cells", {
1814
- count: recentCells.length,
1815
- one_hour_ago: oneHourAgo,
1816
- cells: recentCells.map((c: any) => ({
1817
- id: c.id,
1818
- title: c.title,
1819
- updated_at: c.updated_at,
1820
- age_minutes: Math.round((Date.now() - c.updated_at) / 60000),
1821
- })),
1822
- });
1823
- }
1824
-
1825
- // LOW: Any cells exist at all
1826
- if (cells.length > 0) {
1827
- lowConfidence = true;
1828
- reasons.push(`${cells.length} total cells in hive`);
1829
- }
1830
- } catch (err) {
1831
- // Detection failed, use fallback
1832
- lowConfidence = true;
1833
- reasons.push("Detection error, using fallback");
1834
- logCompaction("error", "detect_swarm_exception", {
1835
- error: err instanceof Error ? err.message : String(err),
1836
- stack: err instanceof Error ? err.stack : undefined,
1837
- });
1838
- }
1839
-
1840
- // Determine overall confidence
1841
- let confidence: "high" | "medium" | "low" | "none";
1842
- if (highConfidence) {
1843
- confidence = "high";
1844
- } else if (mediumConfidence) {
1845
- confidence = "medium";
1846
- } else if (lowConfidence) {
1847
- confidence = "low";
1848
- } else {
1849
- confidence = "none";
1850
- }
1851
-
1852
- const totalDuration = Date.now() - startTime;
1853
- logCompaction("debug", "detect_swarm_complete", {
1854
- duration_ms: totalDuration,
1855
- confidence,
1856
- detected: confidence !== "none",
1857
- reason_count: reasons.length,
1858
- reasons,
1859
- high_confidence: highConfidence,
1860
- medium_confidence: mediumConfidence,
1861
- low_confidence: lowConfidence,
1862
- });
1863
-
1864
- return {
1865
- detected: confidence !== "none",
1866
- confidence,
1867
- reasons,
1868
- };
1869
- }
1870
-
1871
- /**
1872
- * Swarm-aware compaction context
1873
- *
1874
- * Injected during compaction to keep the swarm cooking. The coordinator should
1875
- * wake up from compaction and immediately resume orchestration - spawning agents,
1876
- * monitoring progress, unblocking work.
1877
- */
1878
- const SWARM_COMPACTION_CONTEXT = `## 🐝 SWARM ACTIVE - Keep Cooking
1879
-
1880
- You are the **COORDINATOR** of an active swarm. Context was compacted but the swarm is still running.
1881
-
1882
- **YOUR JOB:** Keep orchestrating. Spawn agents. Monitor progress. Unblock work. Ship it.
1883
-
1884
- ### Preserve in Summary
1885
-
1886
- Extract from session context:
1887
-
1888
- 1. **Epic & Subtasks** - IDs, titles, status, file assignments
1889
- 2. **What's Running** - Which agents are active, what they're working on
1890
- 3. **What's Blocked** - Blockers and what's needed to unblock
1891
- 4. **What's Done** - Completed work and any follow-ups needed
1892
- 5. **What's Next** - Pending subtasks ready to spawn
1893
-
1894
- ### Summary Format
1895
-
1896
- \`\`\`
1897
- ## 🐝 Swarm State
1898
-
1899
- **Epic:** <bd-xxx> - <title>
1900
- **Project:** <path>
1901
- **Progress:** X/Y subtasks complete
1902
-
1903
- **Active:**
1904
- - <bd-xxx>: <title> [in_progress] → <agent> working on <files>
1905
-
1906
- **Blocked:**
1907
- - <bd-xxx>: <title> - BLOCKED: <reason>
1908
-
1909
- **Completed:**
1910
- - <bd-xxx>: <title> ✓
1911
-
1912
- **Ready to Spawn:**
1913
- - <bd-xxx>: <title> (files: <...>)
1914
- \`\`\`
1915
-
1916
- ### On Resume - IMMEDIATELY
1917
-
1918
- 1. \`swarm_status(epic_id="<epic>", project_key="<path>")\` - Get current state
1919
- 2. \`swarmmail_inbox(limit=5)\` - Check for agent messages
1920
- 3. \`swarm_review(project_key, epic_id, task_id, files_touched)\` - Review any completed work
1921
- 4. \`swarm_review_feedback(project_key, task_id, worker_id, status, issues)\` - Approve or request changes
1922
- 5. **Spawn ready subtasks** - Don't wait, fire them off
1923
- 6. **Unblock blocked work** - Resolve dependencies, reassign if needed
1924
- 7. **Collect completed work** - Close done subtasks, verify quality
1925
-
1926
- ### Keep the Swarm Cooking
1927
-
1928
- - **Spawn aggressively** - If a subtask is ready and unblocked, spawn an agent
1929
- - **Monitor actively** - Check status, read messages, respond to blockers
1930
- - **Close the loop** - When all subtasks done, verify and close the epic
1931
- - **Don't stop** - The swarm runs until the epic is closed
1932
-
1933
- **You are not waiting for instructions. You are the coordinator. Coordinate.**
1934
- `;
1935
-
1936
- /**
1937
- * Fallback detection prompt - tells the compactor what to look for
1938
- *
1939
- * Used when we can't definitively detect a swarm but want to be safe.
1940
- * The compactor can check the conversation context for these patterns.
1941
- */
1942
- const SWARM_DETECTION_FALLBACK = `## 🐝 Swarm Detection - Check Your Context
1943
-
1944
- **IMPORTANT:** Before summarizing, check if this session involves an active swarm.
1945
-
1946
- Look for ANY of these patterns in the conversation:
1947
-
1948
- ### Tool Calls (definite swarm sign)
1949
- - \`swarm_decompose\`, \`swarm_spawn_subtask\`, \`swarm_status\`, \`swarm_complete\`
1950
- - \`swarmmail_init\`, \`swarmmail_reserve\`, \`swarmmail_send\`
1951
- - \`hive_create_epic\`, \`hive_start\`, \`hive_close\`
1952
-
1953
- ### IDs and Names
1954
- - Cell IDs: \`bd-xxx\`, \`bd-xxx.N\` (subtask format)
1955
- - Agent names: BlueLake, RedMountain, GreenValley, etc.
1956
- - Epic references: "epic", "subtask", "parent"
1957
-
1958
- ### Coordination Language
1959
- - "spawn", "worker", "coordinator"
1960
- - "reserve", "reservation", "files"
1961
- - "blocked", "unblock", "dependency"
1962
- - "progress", "complete", "in_progress"
1963
-
1964
- ### If You Find Swarm Evidence
1965
-
1966
- Include this in your summary:
1967
- 1. Epic ID and title
1968
- 2. Project path
1969
- 3. Subtask status (running/blocked/done/pending)
1970
- 4. Any blockers or issues
1971
- 5. What should happen next
1972
-
1973
- **Then tell the resumed session:**
1974
- "This is an active swarm. Check swarm_status and swarmmail_inbox immediately."
1975
- `;
1976
-
1977
- // Extended hooks type to include experimental compaction hook with new prompt API
1978
- type CompactionOutput = {
1979
- context: string[];
1980
- prompt?: string; // NEW API from OpenCode PR #5907
1981
- };
1982
-
1983
- type ExtendedHooks = Hooks & {
1984
- "experimental.session.compacting"?: (
1985
- input: { sessionID: string },
1986
- output: CompactionOutput,
1987
- ) => Promise<void>;
1988
- };
1989
-
1990
- // NOTE: Only default export - named exports cause double registration!
1991
- // OpenCode's plugin loader calls ALL exports as functions.
1992
- const SwarmPlugin: Plugin = async (
1993
- input: PluginInput,
1994
- ): Promise<ExtendedHooks> => {
1995
- // CRITICAL: Set project directory from OpenCode input
1996
- // Without this, CLI uses wrong database path
1997
- projectDirectory = input.directory;
1998
-
1999
- // Store SDK client for session message scanning during compaction
2000
- sdkClient = input.client;
2001
-
2002
- return {
2003
- tool: {
2004
- // Beads
2005
- hive_create,
2006
- hive_create_epic,
2007
- hive_query,
2008
- hive_update,
2009
- hive_close,
2010
- hive_start,
2011
- hive_ready,
2012
- hive_cells,
2013
- hive_sync,
2014
- beads_link_thread,
2015
- // Swarm Mail (Embedded)
2016
- swarmmail_init,
2017
- swarmmail_send,
2018
- swarmmail_inbox,
2019
- swarmmail_read_message,
2020
- swarmmail_reserve,
2021
- swarmmail_release,
2022
- swarmmail_ack,
2023
- swarmmail_health,
2024
- // Structured
2025
- structured_extract_json,
2026
- structured_validate,
2027
- structured_parse_evaluation,
2028
- structured_parse_decomposition,
2029
- structured_parse_cell_tree,
2030
- // Swarm
2031
- swarm_init,
2032
- swarm_select_strategy,
2033
- swarm_plan_prompt,
2034
- swarm_decompose,
2035
- swarm_validate_decomposition,
2036
- swarm_status,
2037
- swarm_progress,
2038
- swarm_complete,
2039
- swarm_record_outcome,
2040
- swarm_subtask_prompt,
2041
- swarm_spawn_subtask,
2042
- swarm_complete_subtask,
2043
- swarm_evaluation_prompt,
2044
- swarm_broadcast,
2045
- // Worktree Isolation
2046
- swarm_worktree_create,
2047
- swarm_worktree_merge,
2048
- swarm_worktree_cleanup,
2049
- swarm_worktree_list,
2050
- // Structured Review
2051
- swarm_review,
2052
- swarm_review_feedback,
2053
- // Skills
2054
- skills_list,
2055
- skills_read,
2056
- skills_use,
2057
- skills_create,
2058
- skills_update,
2059
- skills_delete,
2060
- skills_init,
2061
- skills_add_script,
2062
- skills_execute,
2063
- // Swarm Insights
2064
- swarm_get_strategy_insights,
2065
- swarm_get_file_insights,
2066
- swarm_get_pattern_insights,
2067
- },
2068
-
2069
- // Swarm-aware compaction hook with LLM-powered continuation prompts
2070
- // Three-level fallback chain: LLM → static context → detection fallback → none
2071
- "experimental.session.compacting": async (
2072
- input: { sessionID: string },
2073
- output: CompactionOutput,
2074
- ) => {
2075
- const startTime = Date.now();
2076
-
2077
- // =======================================================================
2078
- // LOG: Compaction hook invoked - capture EVERYTHING we receive
2079
- // =======================================================================
2080
- logCompaction("info", "compaction_hook_invoked", {
2081
- session_id: input.sessionID,
2082
- project_directory: projectDirectory,
2083
- input_keys: Object.keys(input),
2084
- input_full: JSON.parse(JSON.stringify(input)), // Deep clone for logging
2085
- output_keys: Object.keys(output),
2086
- output_context_count: output.context?.length ?? 0,
2087
- output_has_prompt_field: "prompt" in output,
2088
- output_initial_state: {
2089
- context: output.context,
2090
- prompt: (output as any).prompt,
2091
- },
2092
- env: {
2093
- OPENCODE_SESSION_ID: process.env.OPENCODE_SESSION_ID,
2094
- OPENCODE_MESSAGE_ID: process.env.OPENCODE_MESSAGE_ID,
2095
- OPENCODE_AGENT: process.env.OPENCODE_AGENT,
2096
- OPENCODE_LITE_MODEL: process.env.OPENCODE_LITE_MODEL,
2097
- SWARM_PROJECT_DIR: process.env.SWARM_PROJECT_DIR,
2098
- },
2099
- cwd: process.cwd(),
2100
- timestamp: new Date().toISOString(),
2101
- });
2102
-
2103
- // =======================================================================
2104
- // STEP 1: Scan session messages for swarm tool calls
2105
- // =======================================================================
2106
- const sessionScanStart = Date.now();
2107
- const sessionScan = await scanSessionMessages(input.sessionID);
2108
- const sessionScanDuration = Date.now() - sessionScanStart;
2109
-
2110
- logCompaction("info", "session_scan_results", {
2111
- session_id: input.sessionID,
2112
- duration_ms: sessionScanDuration,
2113
- message_count: sessionScan.messageCount,
2114
- tool_call_count: sessionScan.toolCalls.length,
2115
- swarm_detected_from_messages: sessionScan.swarmDetected,
2116
- reasons: sessionScan.reasons,
2117
- });
2118
-
2119
- // =======================================================================
2120
- // STEP 2: Detect swarm state from hive cells
2121
- // =======================================================================
2122
- const detectionStart = Date.now();
2123
- const detection = await detectSwarm();
2124
- const detectionDuration = Date.now() - detectionStart;
2125
-
2126
- logCompaction("info", "swarm_detection_complete", {
2127
- session_id: input.sessionID,
2128
- duration_ms: detectionDuration,
2129
- detected: detection.detected,
2130
- confidence: detection.confidence,
2131
- reasons: detection.reasons,
2132
- reason_count: detection.reasons.length,
2133
- });
2134
-
2135
- // =======================================================================
2136
- // STEP 3: Merge session scan with hive detection for final confidence
2137
- // =======================================================================
2138
- // If session messages show high-confidence swarm tools, boost confidence
2139
- if (sessionScan.swarmDetected && sessionScan.reasons.some(r => r.includes("high-confidence"))) {
2140
- if (detection.confidence === "none" || detection.confidence === "low") {
2141
- detection.confidence = "high";
2142
- detection.detected = true;
2143
- detection.reasons.push(...sessionScan.reasons);
2144
-
2145
- logCompaction("info", "confidence_boost_from_session_scan", {
2146
- session_id: input.sessionID,
2147
- original_confidence: detection.confidence,
2148
- boosted_to: "high",
2149
- session_reasons: sessionScan.reasons,
2150
- });
2151
- }
2152
- } else if (sessionScan.swarmDetected) {
2153
- // Medium boost for any swarm tools found
2154
- if (detection.confidence === "none") {
2155
- detection.confidence = "medium";
2156
- detection.detected = true;
2157
- detection.reasons.push(...sessionScan.reasons);
2158
-
2159
- logCompaction("info", "confidence_boost_from_session_scan", {
2160
- session_id: input.sessionID,
2161
- original_confidence: "none",
2162
- boosted_to: "medium",
2163
- session_reasons: sessionScan.reasons,
2164
- });
2165
- } else if (detection.confidence === "low") {
2166
- detection.confidence = "medium";
2167
- detection.reasons.push(...sessionScan.reasons);
2168
-
2169
- logCompaction("info", "confidence_boost_from_session_scan", {
2170
- session_id: input.sessionID,
2171
- original_confidence: "low",
2172
- boosted_to: "medium",
2173
- session_reasons: sessionScan.reasons,
2174
- });
2175
- }
2176
- }
2177
-
2178
- logCompaction("info", "final_swarm_detection", {
2179
- session_id: input.sessionID,
2180
- confidence: detection.confidence,
2181
- detected: detection.detected,
2182
- combined_reasons: detection.reasons,
2183
- message_scan_contributed: sessionScan.swarmDetected,
2184
- });
2185
-
2186
- if (detection.confidence === "high" || detection.confidence === "medium") {
2187
- // Definite or probable swarm - try LLM-powered compaction
2188
- logCompaction("info", "swarm_detected_attempting_llm", {
2189
- session_id: input.sessionID,
2190
- confidence: detection.confidence,
2191
- reasons: detection.reasons,
2192
- });
2193
-
2194
- try {
2195
- // Level 1: Query actual state
2196
- const queryStart = Date.now();
2197
- const snapshot = await querySwarmState(input.sessionID);
2198
- const queryDuration = Date.now() - queryStart;
2199
-
2200
- logCompaction("info", "swarm_state_queried", {
2201
- session_id: input.sessionID,
2202
- duration_ms: queryDuration,
2203
- has_epic: !!snapshot.epic,
2204
- epic_id: snapshot.epic?.id,
2205
- epic_title: snapshot.epic?.title,
2206
- epic_status: snapshot.epic?.status,
2207
- subtask_count: snapshot.epic?.subtasks?.length ?? 0,
2208
- subtasks: snapshot.epic?.subtasks?.map(s => ({
2209
- id: s.id,
2210
- title: s.title,
2211
- status: s.status,
2212
- file_count: s.files?.length ?? 0,
2213
- })),
2214
- message_count: snapshot.messages?.length ?? 0,
2215
- reservation_count: snapshot.reservations?.length ?? 0,
2216
- detection_confidence: snapshot.detection.confidence,
2217
- detection_reasons: snapshot.detection.reasons,
2218
- full_snapshot: snapshot, // Log the entire snapshot
2219
- });
2220
-
2221
- // =======================================================================
2222
- // CAPTURE POINT 1: Detection complete - record confidence and reasons
2223
- // =======================================================================
2224
- await captureCompaction(
2225
- input.sessionID,
2226
- snapshot.epic?.id || "unknown",
2227
- "detection_complete",
2228
- {
2229
- confidence: snapshot.detection.confidence,
2230
- detected: detection.detected,
2231
- reasons: snapshot.detection.reasons,
2232
- session_scan_contributed: sessionScan.swarmDetected,
2233
- session_scan_reasons: sessionScan.reasons,
2234
- epic_id: snapshot.epic?.id,
2235
- epic_title: snapshot.epic?.title,
2236
- subtask_count: snapshot.epic?.subtasks?.length ?? 0,
2237
- },
2238
- );
2239
-
2240
- // Level 2: Generate prompt with LLM
2241
- const llmStart = Date.now();
2242
- const llmPrompt = await generateCompactionPrompt(snapshot);
2243
- const llmDuration = Date.now() - llmStart;
2244
-
2245
- logCompaction("info", "llm_generation_complete", {
2246
- session_id: input.sessionID,
2247
- duration_ms: llmDuration,
2248
- success: !!llmPrompt,
2249
- prompt_length: llmPrompt?.length ?? 0,
2250
- prompt_preview: llmPrompt?.substring(0, 500),
2251
- });
2252
-
2253
- // =======================================================================
2254
- // CAPTURE POINT 2: Prompt generated - record FULL prompt content
2255
- // =======================================================================
2256
- if (llmPrompt) {
2257
- await captureCompaction(
2258
- input.sessionID,
2259
- snapshot.epic?.id || "unknown",
2260
- "prompt_generated",
2261
- {
2262
- prompt_length: llmPrompt.length,
2263
- full_prompt: llmPrompt, // FULL content, not truncated
2264
- context_type: "llm_generated",
2265
- duration_ms: llmDuration,
2266
- },
2267
- );
2268
- }
2269
-
2270
- if (llmPrompt) {
2271
- // SUCCESS: Use LLM-generated prompt
2272
- const header = `[Swarm compaction: LLM-generated, ${detection.reasons.join(", ")}]\n\n`;
2273
- const fullContent = header + llmPrompt;
2274
-
2275
- // Progressive enhancement: use new API if available
2276
- if ("prompt" in output) {
2277
- output.prompt = fullContent;
2278
- logCompaction("info", "context_injected_via_prompt_api", {
2279
- session_id: input.sessionID,
2280
- content_length: fullContent.length,
2281
- method: "output.prompt",
2282
- });
2283
- } else {
2284
- output.context.push(fullContent);
2285
- logCompaction("info", "context_injected_via_context_array", {
2286
- session_id: input.sessionID,
2287
- content_length: fullContent.length,
2288
- method: "output.context.push",
2289
- context_count_after: output.context.length,
2290
- });
2291
- }
2292
-
2293
- // =======================================================================
2294
- // CAPTURE POINT 3a: Context injected (LLM path) - record FULL content
2295
- // =======================================================================
2296
- await captureCompaction(
2297
- input.sessionID,
2298
- snapshot.epic?.id || "unknown",
2299
- "context_injected",
2300
- {
2301
- full_content: fullContent, // FULL content, not truncated
2302
- content_length: fullContent.length,
2303
- injection_method: "prompt" in output ? "output.prompt" : "output.context.push",
2304
- context_type: "llm_generated",
2305
- },
2306
- );
2307
-
2308
- const totalDuration = Date.now() - startTime;
2309
- logCompaction("info", "compaction_complete_llm_success", {
2310
- session_id: input.sessionID,
2311
- total_duration_ms: totalDuration,
2312
- detection_duration_ms: detectionDuration,
2313
- query_duration_ms: queryDuration,
2314
- llm_duration_ms: llmDuration,
2315
- confidence: detection.confidence,
2316
- context_type: "llm_generated",
2317
- content_length: fullContent.length,
2318
- });
2319
- return;
2320
- }
2321
-
2322
- // LLM failed, fall through to static prompt
2323
- logCompaction("warn", "llm_generation_returned_null", {
2324
- session_id: input.sessionID,
2325
- llm_duration_ms: llmDuration,
2326
- falling_back_to: "static_prompt",
2327
- });
2328
- } catch (err) {
2329
- // LLM failed, fall through to static prompt
2330
- logCompaction("error", "llm_generation_failed", {
2331
- session_id: input.sessionID,
2332
- error: err instanceof Error ? err.message : String(err),
2333
- error_stack: err instanceof Error ? err.stack : undefined,
2334
- falling_back_to: "static_prompt",
2335
- });
2336
- }
2337
-
2338
- // Level 3: Fall back to static context
2339
- const header = `[Swarm detected: ${detection.reasons.join(", ")}]\n\n`;
2340
- const staticContent = header + SWARM_COMPACTION_CONTEXT;
2341
- output.context.push(staticContent);
2342
-
2343
- // =======================================================================
2344
- // CAPTURE POINT 3b: Context injected (static fallback) - record FULL content
2345
- // =======================================================================
2346
- await captureCompaction(
2347
- input.sessionID,
2348
- "unknown", // No snapshot available in this path
2349
- "context_injected",
2350
- {
2351
- full_content: staticContent,
2352
- content_length: staticContent.length,
2353
- injection_method: "output.context.push",
2354
- context_type: "static_swarm_context",
2355
- },
2356
- );
2357
-
2358
- const totalDuration = Date.now() - startTime;
2359
- logCompaction("info", "compaction_complete_static_fallback", {
2360
- session_id: input.sessionID,
2361
- total_duration_ms: totalDuration,
2362
- confidence: detection.confidence,
2363
- context_type: "static_swarm_context",
2364
- content_length: staticContent.length,
2365
- context_count_after: output.context.length,
2366
- });
2367
- } else if (detection.confidence === "low") {
2368
- // Level 4: Possible swarm - inject fallback detection prompt
2369
- const header = `[Possible swarm: ${detection.reasons.join(", ")}]\n\n`;
2370
- const fallbackContent = header + SWARM_DETECTION_FALLBACK;
2371
- output.context.push(fallbackContent);
2372
-
2373
- // =======================================================================
2374
- // CAPTURE POINT 3c: Context injected (detection fallback) - record FULL content
2375
- // =======================================================================
2376
- await captureCompaction(
2377
- input.sessionID,
2378
- "unknown", // No snapshot for low confidence
2379
- "context_injected",
2380
- {
2381
- full_content: fallbackContent,
2382
- content_length: fallbackContent.length,
2383
- injection_method: "output.context.push",
2384
- context_type: "detection_fallback",
2385
- },
2386
- );
2387
-
2388
- const totalDuration = Date.now() - startTime;
2389
- logCompaction("info", "compaction_complete_detection_fallback", {
2390
- session_id: input.sessionID,
2391
- total_duration_ms: totalDuration,
2392
- confidence: detection.confidence,
2393
- context_type: "detection_fallback",
2394
- content_length: fallbackContent.length,
2395
- context_count_after: output.context.length,
2396
- reasons: detection.reasons,
2397
- });
2398
- } else {
2399
- // Level 5: confidence === "none" - no injection, probably not a swarm
2400
- const totalDuration = Date.now() - startTime;
2401
- logCompaction("info", "compaction_complete_no_swarm", {
2402
- session_id: input.sessionID,
2403
- total_duration_ms: totalDuration,
2404
- confidence: detection.confidence,
2405
- context_type: "none",
2406
- reasons: detection.reasons,
2407
- context_count_unchanged: output.context.length,
2408
- });
2409
- }
2410
-
2411
- // =======================================================================
2412
- // LOG: Final output state
2413
- // =======================================================================
2414
- logCompaction("debug", "compaction_hook_complete_final_state", {
2415
- session_id: input.sessionID,
2416
- output_context_count: output.context?.length ?? 0,
2417
- output_context_lengths: output.context?.map(c => c.length) ?? [],
2418
- output_has_prompt: !!(output as any).prompt,
2419
- output_prompt_length: (output as any).prompt?.length ?? 0,
2420
- total_duration_ms: Date.now() - startTime,
2421
- });
2422
- },
2423
- };
2424
- };
2425
-
2426
- export default SwarmPlugin;