opencode-swarm-plugin 0.44.2 → 0.45.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/README.md +277 -54
  2. package/bin/swarm.ts +156 -29
  3. package/dist/bin/swarm.js +125212 -0
  4. package/dist/decision-trace-integration.d.ts +204 -0
  5. package/dist/decision-trace-integration.d.ts.map +1 -0
  6. package/dist/hive.d.ts.map +1 -1
  7. package/dist/hive.js +9 -9
  8. package/dist/index.d.ts +32 -2
  9. package/dist/index.d.ts.map +1 -1
  10. package/dist/index.js +535 -27
  11. package/dist/plugin.js +295 -27
  12. package/dist/query-tools.d.ts +20 -12
  13. package/dist/query-tools.d.ts.map +1 -1
  14. package/dist/swarm-decompose.d.ts +4 -4
  15. package/dist/swarm-decompose.d.ts.map +1 -1
  16. package/dist/swarm-prompts.d.ts.map +1 -1
  17. package/dist/swarm-prompts.js +220 -22
  18. package/dist/swarm-review.d.ts.map +1 -1
  19. package/dist/swarm-signature.d.ts +106 -0
  20. package/dist/swarm-signature.d.ts.map +1 -0
  21. package/dist/swarm-strategies.d.ts +16 -3
  22. package/dist/swarm-strategies.d.ts.map +1 -1
  23. package/dist/swarm.d.ts +4 -2
  24. package/dist/swarm.d.ts.map +1 -1
  25. package/examples/commands/swarm.md +745 -0
  26. package/examples/plugin-wrapper-template.ts +2892 -0
  27. package/examples/skills/hive-workflow/SKILL.md +212 -0
  28. package/examples/skills/skill-creator/SKILL.md +223 -0
  29. package/examples/skills/swarm-coordination/SKILL.md +292 -0
  30. package/global-skills/cli-builder/SKILL.md +344 -0
  31. package/global-skills/cli-builder/references/advanced-patterns.md +244 -0
  32. package/global-skills/learning-systems/SKILL.md +644 -0
  33. package/global-skills/skill-creator/LICENSE.txt +202 -0
  34. package/global-skills/skill-creator/SKILL.md +352 -0
  35. package/global-skills/skill-creator/references/output-patterns.md +82 -0
  36. package/global-skills/skill-creator/references/workflows.md +28 -0
  37. package/global-skills/swarm-coordination/SKILL.md +995 -0
  38. package/global-skills/swarm-coordination/references/coordinator-patterns.md +235 -0
  39. package/global-skills/swarm-coordination/references/strategies.md +138 -0
  40. package/global-skills/system-design/SKILL.md +213 -0
  41. package/global-skills/testing-patterns/SKILL.md +430 -0
  42. package/global-skills/testing-patterns/references/dependency-breaking-catalog.md +586 -0
  43. package/package.json +6 -3
@@ -0,0 +1,2892 @@
1
+ /**
2
+ * ╔═══════════════════════════════════════════════════════════════════════════╗
3
+ * ║ ║
4
+ * ║ 🐝 OPENCODE SWARM PLUGIN WRAPPER 🐝 ║
5
+ * ║ ║
6
+ * ║ This file lives at: ~/.config/opencode/plugin/swarm.ts ║
7
+ * ║ Generated by: swarm setup ║
8
+ * ║ ║
9
+ * ╠═══════════════════════════════════════════════════════════════════════════╣
10
+ * ║ ║
11
+ * ║ ⚠️ CRITICAL: THIS FILE MUST BE 100% SELF-CONTAINED ⚠️ ║
12
+ * ║ ║
13
+ * ║ ❌ NEVER import from "opencode-swarm-plugin" npm package ║
14
+ * ║ ❌ NEVER import from any package with transitive deps (evalite, etc) ║
15
+ * ║ ❌ NEVER add dependencies that aren't provided by OpenCode ║
16
+ * ║ ║
17
+ * ║ ✅ ONLY import from: @opencode-ai/plugin, @opencode-ai/sdk, node:* ║
18
+ * ║ ✅ Shell out to `swarm` CLI for all tool execution ║
19
+ * ║ ✅ Inline any logic that would otherwise require imports ║
20
+ * ║ ║
21
+ * ║ WHY? The npm package has dependencies (evalite, etc) that aren't ║
22
+ * ║ available in OpenCode's plugin context. Importing causes: ║
23
+ * ║ "Cannot find module 'evalite/runner'" → trace trap → OpenCode crash ║
24
+ * ║ ║
25
+ * ║ PATTERN: Plugin wrapper is DUMB. CLI is SMART. ║
26
+ * ║ - Wrapper: thin shell, no logic, just bridges to CLI ║
27
+ * ║ - CLI: all the smarts, all the deps, runs in its own context ║
28
+ * ║ ║
29
+ * ╚═══════════════════════════════════════════════════════════════════════════╝
30
+ *
31
+ * Environment variables passed to CLI:
32
+ * - OPENCODE_SESSION_ID: Session state persistence
33
+ * - OPENCODE_MESSAGE_ID: Message context
34
+ * - OPENCODE_AGENT: Agent context
35
+ * - SWARM_PROJECT_DIR: Project directory (critical for database path)
36
+ */
37
+ import type { Plugin, PluginInput, Hooks } from "@opencode-ai/plugin";
38
+ import type { ToolPart } from "@opencode-ai/sdk";
39
+ import { tool } from "@opencode-ai/plugin";
40
+ import { spawn } from "child_process";
41
+ import { appendFileSync, mkdirSync, existsSync } from "node:fs";
42
+ import { join } from "node:path";
43
+ import { homedir } from "node:os";
44
+
45
+ // =============================================================================
46
+ // Swarm Signature Detection (INLINED - do not import from opencode-swarm-plugin)
47
+ // =============================================================================
48
+
49
+ /**
50
+ * Subtask lifecycle status derived from events
51
+ */
52
+ type SubtaskStatus = "created" | "spawned" | "in_progress" | "completed" | "closed";
53
+
54
+ /**
55
+ * Subtask state projected from events
56
+ */
57
+ interface SubtaskState {
58
+ id: string;
59
+ title: string;
60
+ status: SubtaskStatus;
61
+ files: string[];
62
+ worker?: string;
63
+ spawnedAt?: number;
64
+ completedAt?: number;
65
+ }
66
+
67
+ /**
68
+ * Epic state projected from events
69
+ */
70
+ interface EpicState {
71
+ id: string;
72
+ title: string;
73
+ status: "open" | "in_progress" | "closed";
74
+ createdAt: number;
75
+ }
76
+
77
+ /**
78
+ * Complete swarm state projected from session events
79
+ */
80
+ interface SwarmProjection {
81
+ isSwarm: boolean;
82
+ epic?: EpicState;
83
+ subtasks: Map<string, SubtaskState>;
84
+ projectPath?: string;
85
+ coordinatorName?: string;
86
+ lastEventAt?: number;
87
+ counts: {
88
+ total: number;
89
+ created: number;
90
+ spawned: number;
91
+ inProgress: number;
92
+ completed: number;
93
+ closed: number;
94
+ };
95
+ }
96
+
97
+ /**
98
+ * Tool call event extracted from session messages
99
+ */
100
+ interface ToolCallEvent {
101
+ tool: string;
102
+ input: Record<string, unknown>;
103
+ output: string;
104
+ timestamp: number;
105
+ }
106
+
107
+ /** Parse epic ID from hive_create_epic output */
108
+ function parseEpicId(output: string): string | undefined {
109
+ try {
110
+ const parsed = JSON.parse(output);
111
+ return parsed.epic?.id || parsed.id;
112
+ } catch {
113
+ return undefined;
114
+ }
115
+ }
116
+
117
+ /** Parse subtask IDs from hive_create_epic output */
118
+ function parseSubtaskIds(output: string): string[] {
119
+ try {
120
+ const parsed = JSON.parse(output);
121
+ const subtasks = parsed.subtasks || parsed.epic?.subtasks || [];
122
+ return subtasks
123
+ .map((s: unknown) => {
124
+ if (typeof s === "object" && s !== null && "id" in s) {
125
+ return (s as { id: string }).id;
126
+ }
127
+ return undefined;
128
+ })
129
+ .filter((id: unknown): id is string => typeof id === "string");
130
+ } catch {
131
+ return [];
132
+ }
133
+ }
134
+
135
+ /**
136
+ * Project swarm state from session tool call events
137
+ */
138
+ function projectSwarmState(events: ToolCallEvent[]): SwarmProjection {
139
+ const state: SwarmProjection = {
140
+ isSwarm: false,
141
+ subtasks: new Map(),
142
+ counts: { total: 0, created: 0, spawned: 0, inProgress: 0, completed: 0, closed: 0 },
143
+ };
144
+
145
+ let hasEpic = false;
146
+ let hasSpawn = false;
147
+
148
+ for (const event of events) {
149
+ state.lastEventAt = event.timestamp;
150
+
151
+ switch (event.tool) {
152
+ case "hive_create_epic": {
153
+ const epicId = parseEpicId(event.output);
154
+ const epicTitle = typeof event.input.epic_title === "string" ? event.input.epic_title : undefined;
155
+
156
+ if (epicId) {
157
+ state.epic = { id: epicId, title: epicTitle || "Unknown Epic", status: "open", createdAt: event.timestamp };
158
+ hasEpic = true;
159
+
160
+ const subtasks = event.input.subtasks;
161
+ if (Array.isArray(subtasks)) {
162
+ for (const subtask of subtasks) {
163
+ if (typeof subtask === "object" && subtask !== null) {
164
+ state.counts.created++;
165
+ state.counts.total++;
166
+ }
167
+ }
168
+ }
169
+
170
+ const subtaskIds = parseSubtaskIds(event.output);
171
+ for (const id of subtaskIds) {
172
+ if (!state.subtasks.has(id)) {
173
+ state.subtasks.set(id, { id, title: "Unknown", status: "created", files: [] });
174
+ state.counts.total++;
175
+ state.counts.created++;
176
+ }
177
+ }
178
+ }
179
+ break;
180
+ }
181
+
182
+ case "swarm_spawn_subtask": {
183
+ const beadId = typeof event.input.bead_id === "string" ? event.input.bead_id : undefined;
184
+ const title = typeof event.input.subtask_title === "string" ? event.input.subtask_title : "Unknown";
185
+ const files = Array.isArray(event.input.files) ? (event.input.files as string[]) : [];
186
+
187
+ if (beadId) {
188
+ hasSpawn = true;
189
+ const existing = state.subtasks.get(beadId);
190
+ if (existing) {
191
+ if (existing.status === "created") { state.counts.created--; state.counts.spawned++; }
192
+ existing.status = "spawned";
193
+ existing.title = title;
194
+ existing.files = files;
195
+ existing.spawnedAt = event.timestamp;
196
+ } else {
197
+ state.subtasks.set(beadId, { id: beadId, title, status: "spawned", files, spawnedAt: event.timestamp });
198
+ state.counts.total++;
199
+ state.counts.spawned++;
200
+ }
201
+
202
+ const epicId = typeof event.input.epic_id === "string" ? event.input.epic_id : undefined;
203
+ if (epicId && !state.epic) {
204
+ state.epic = { id: epicId, title: "Unknown Epic", status: "in_progress", createdAt: event.timestamp };
205
+ }
206
+ }
207
+ break;
208
+ }
209
+
210
+ case "hive_start": {
211
+ const id = typeof event.input.id === "string" ? event.input.id : undefined;
212
+ if (id) {
213
+ const subtask = state.subtasks.get(id);
214
+ if (subtask && subtask.status !== "completed" && subtask.status !== "closed") {
215
+ if (subtask.status === "created") state.counts.created--;
216
+ else if (subtask.status === "spawned") state.counts.spawned--;
217
+ subtask.status = "in_progress";
218
+ state.counts.inProgress++;
219
+ }
220
+ if (state.epic && state.epic.id === id) state.epic.status = "in_progress";
221
+ }
222
+ break;
223
+ }
224
+
225
+ case "swarm_complete": {
226
+ const beadId = typeof event.input.bead_id === "string" ? event.input.bead_id : undefined;
227
+ if (beadId) {
228
+ const subtask = state.subtasks.get(beadId);
229
+ if (subtask && subtask.status !== "closed") {
230
+ if (subtask.status === "created") state.counts.created--;
231
+ else if (subtask.status === "spawned") state.counts.spawned--;
232
+ else if (subtask.status === "in_progress") state.counts.inProgress--;
233
+ subtask.status = "completed";
234
+ subtask.completedAt = event.timestamp;
235
+ state.counts.completed++;
236
+ }
237
+ }
238
+ break;
239
+ }
240
+
241
+ case "hive_close": {
242
+ const id = typeof event.input.id === "string" ? event.input.id : undefined;
243
+ if (id) {
244
+ const subtask = state.subtasks.get(id);
245
+ if (subtask) {
246
+ if (subtask.status === "created") state.counts.created--;
247
+ else if (subtask.status === "spawned") state.counts.spawned--;
248
+ else if (subtask.status === "in_progress") state.counts.inProgress--;
249
+ else if (subtask.status === "completed") state.counts.completed--;
250
+ subtask.status = "closed";
251
+ state.counts.closed++;
252
+ }
253
+ if (state.epic && state.epic.id === id) state.epic.status = "closed";
254
+ }
255
+ break;
256
+ }
257
+
258
+ case "swarmmail_init": {
259
+ try {
260
+ const parsed = JSON.parse(event.output);
261
+ if (parsed.agent_name) state.coordinatorName = parsed.agent_name;
262
+ if (parsed.project_key) state.projectPath = parsed.project_key;
263
+ } catch { /* skip */ }
264
+ break;
265
+ }
266
+ }
267
+ }
268
+
269
+ state.isSwarm = hasEpic && hasSpawn;
270
+ return state;
271
+ }
272
+
273
+ /** Quick check for swarm signature without full projection */
274
+ function hasSwarmSignature(events: ToolCallEvent[]): boolean {
275
+ let hasEpic = false;
276
+ let hasSpawn = false;
277
+ for (const event of events) {
278
+ if (event.tool === "hive_create_epic") hasEpic = true;
279
+ else if (event.tool === "swarm_spawn_subtask") hasSpawn = true;
280
+ if (hasEpic && hasSpawn) return true;
281
+ }
282
+ return false;
283
+ }
284
+
285
+ /** Check if swarm is still active (has pending work) */
286
+ function isSwarmActive(projection: SwarmProjection): boolean {
287
+ if (!projection.isSwarm) return false;
288
+ return projection.counts.created > 0 || projection.counts.spawned > 0 ||
289
+ projection.counts.inProgress > 0 || projection.counts.completed > 0;
290
+ }
291
+
292
+ /** Get human-readable swarm status summary */
293
+ function getSwarmSummary(projection: SwarmProjection): string {
294
+ if (!projection.isSwarm) return "No swarm detected";
295
+ const { counts, epic } = projection;
296
+ const parts: string[] = [];
297
+ if (epic) parts.push(`Epic: ${epic.id} - ${epic.title} [${epic.status}]`);
298
+ parts.push(`Subtasks: ${counts.total} total (${counts.spawned} spawned, ${counts.inProgress} in_progress, ${counts.completed} completed, ${counts.closed} closed)`);
299
+ parts.push(isSwarmActive(projection) ? "Status: ACTIVE - has pending work" : "Status: COMPLETE - all work closed");
300
+ return parts.join("\n");
301
+ }
302
+
303
+ // =============================================================================
304
+ // Constants
305
+ // =============================================================================
306
+
307
+ const SWARM_CLI = "swarm";
308
+
309
+ // =============================================================================
310
+ // File-based Logging (writes to ~/.config/swarm-tools/logs/)
311
+ // =============================================================================
312
+
313
+ const LOG_DIR = join(homedir(), ".config", "swarm-tools", "logs");
314
+ const COMPACTION_LOG = join(LOG_DIR, "compaction.log");
315
+
316
+ /**
317
+ * Ensure log directory exists
318
+ */
319
+ function ensureLogDir(): void {
320
+ if (!existsSync(LOG_DIR)) {
321
+ mkdirSync(LOG_DIR, { recursive: true });
322
+ }
323
+ }
324
+
325
+ /**
326
+ * Log a compaction event to file (JSON lines format, compatible with `swarm log`)
327
+ *
328
+ * @param level - Log level (info, debug, warn, error)
329
+ * @param msg - Log message
330
+ * @param data - Additional structured data
331
+ */
332
+ function logCompaction(
333
+ level: "info" | "debug" | "warn" | "error",
334
+ msg: string,
335
+ data?: Record<string, unknown>,
336
+ ): void {
337
+ try {
338
+ ensureLogDir();
339
+ const entry = JSON.stringify({
340
+ time: new Date().toISOString(),
341
+ level,
342
+ msg,
343
+ ...data,
344
+ });
345
+ appendFileSync(COMPACTION_LOG, entry + "\n");
346
+ } catch {
347
+ // Silently fail - logging should never break the plugin
348
+ }
349
+ }
350
+
351
+ /**
352
+ * Capture compaction event for evals via CLI
353
+ *
354
+ * Shells out to `swarm capture` command to avoid import issues.
355
+ * The CLI handles all the logic - plugin wrapper stays dumb.
356
+ *
357
+ * @param sessionID - Session ID
358
+ * @param epicID - Epic ID (or "unknown" if not detected)
359
+ * @param compactionType - Event type (detection_complete, prompt_generated, context_injected)
360
+ * @param payload - Event-specific data (full prompts, detection results, etc.)
361
+ */
362
+ async function captureCompaction(
363
+ sessionID: string,
364
+ epicID: string,
365
+ compactionType: "detection_complete" | "prompt_generated" | "context_injected",
366
+ payload: any,
367
+ ): Promise<void> {
368
+ try {
369
+ // Shell out to CLI - no imports needed, version always matches
370
+ const args = [
371
+ "capture",
372
+ "--session", sessionID,
373
+ "--epic", epicID,
374
+ "--type", compactionType,
375
+ "--payload", JSON.stringify(payload),
376
+ ];
377
+
378
+ const proc = spawn(SWARM_CLI, args, {
379
+ env: { ...process.env, SWARM_PROJECT_DIR: projectDirectory },
380
+ stdio: ["ignore", "ignore", "ignore"], // Fire and forget
381
+ });
382
+
383
+ // Don't wait - capture is non-blocking
384
+ proc.unref();
385
+ } catch (err) {
386
+ // Non-fatal - capture failures shouldn't break compaction
387
+ logCompaction("warn", "compaction_capture_failed", {
388
+ session_id: sessionID,
389
+ compaction_type: compactionType,
390
+ error: err instanceof Error ? err.message : String(err),
391
+ });
392
+ }
393
+ }
394
+
395
+ // Module-level project directory - set during plugin initialization
396
+ // This is CRITICAL: without it, the CLI uses process.cwd() which may be wrong
397
+ let projectDirectory: string = process.cwd();
398
+
399
+ // Module-level SDK client - set during plugin initialization
400
+ // Used for scanning session messages during compaction
401
+ let sdkClient: any = null;
402
+
403
+ // =============================================================================
404
+ // CLI Execution Helper
405
+ // =============================================================================
406
+
407
+ /**
408
+ * Execute a swarm tool via CLI
409
+ *
410
+ * Spawns `swarm tool <name> --json '<args>'` and returns the result.
411
+ * Passes session context via environment variables.
412
+ *
413
+ * IMPORTANT: Runs in projectDirectory (set by OpenCode) not process.cwd()
414
+ */
415
+ async function execTool(
416
+ name: string,
417
+ args: Record<string, unknown>,
418
+ ctx: { sessionID: string; messageID: string; agent: string },
419
+ ): Promise<string> {
420
+ return new Promise((resolve, reject) => {
421
+ const hasArgs = Object.keys(args).length > 0;
422
+ const cliArgs = hasArgs
423
+ ? ["tool", name, "--json", JSON.stringify(args)]
424
+ : ["tool", name];
425
+
426
+ const proc = spawn(SWARM_CLI, cliArgs, {
427
+ cwd: projectDirectory, // Run in project directory, not plugin directory
428
+ stdio: ["ignore", "pipe", "pipe"],
429
+ env: {
430
+ ...process.env,
431
+ OPENCODE_SESSION_ID: ctx.sessionID,
432
+ OPENCODE_MESSAGE_ID: ctx.messageID,
433
+ OPENCODE_AGENT: ctx.agent,
434
+ SWARM_PROJECT_DIR: projectDirectory, // Also pass as env var
435
+ },
436
+ });
437
+
438
+ let stdout = "";
439
+ let stderr = "";
440
+
441
+ proc.stdout.on("data", (data) => {
442
+ stdout += data;
443
+ });
444
+ proc.stderr.on("data", (data) => {
445
+ stderr += data;
446
+ });
447
+
448
+ proc.on("close", (code) => {
449
+ if (code === 0) {
450
+ // Success - return the JSON output
451
+ try {
452
+ const result = JSON.parse(stdout);
453
+ if (result.success && result.data !== undefined) {
454
+ // Unwrap the data for cleaner tool output
455
+ resolve(
456
+ typeof result.data === "string"
457
+ ? result.data
458
+ : JSON.stringify(result.data, null, 2),
459
+ );
460
+ } else if (!result.success && result.error) {
461
+ // Tool returned an error in JSON format
462
+ // Handle both string errors and object errors with .message
463
+ const errorMsg = typeof result.error === "string"
464
+ ? result.error
465
+ : (result.error.message || "Tool execution failed");
466
+ reject(new Error(errorMsg));
467
+ } else {
468
+ resolve(stdout);
469
+ }
470
+ } catch {
471
+ resolve(stdout);
472
+ }
473
+ } else if (code === 2) {
474
+ reject(new Error(`Unknown tool: ${name}`));
475
+ } else if (code === 3) {
476
+ reject(new Error(`Invalid JSON args: ${stderr}`));
477
+ } else {
478
+ // Tool returned error
479
+ try {
480
+ const result = JSON.parse(stdout);
481
+ if (!result.success && result.error) {
482
+ // Handle both string errors and object errors with .message
483
+ const errorMsg = typeof result.error === "string"
484
+ ? result.error
485
+ : (result.error.message || `Tool failed with code ${code}`);
486
+ reject(new Error(errorMsg));
487
+ } else {
488
+ reject(
489
+ new Error(stderr || stdout || `Tool failed with code ${code}`),
490
+ );
491
+ }
492
+ } catch {
493
+ reject(
494
+ new Error(stderr || stdout || `Tool failed with code ${code}`),
495
+ );
496
+ }
497
+ }
498
+ });
499
+
500
+ proc.on("error", (err) => {
501
+ if ((err as NodeJS.ErrnoException).code === "ENOENT") {
502
+ reject(
503
+ new Error(
504
+ `swarm CLI not found. Install with: npm install -g opencode-swarm-plugin`,
505
+ ),
506
+ );
507
+ } else {
508
+ reject(err);
509
+ }
510
+ });
511
+ });
512
+ }
513
+
514
+ // =============================================================================
515
+ // Beads Tools
516
+ // =============================================================================
517
+
518
+ const hive_create = tool({
519
+ description: "Create a new bead with type-safe validation",
520
+ args: {
521
+ title: tool.schema.string().describe("Bead title"),
522
+ type: tool.schema
523
+ .enum(["bug", "feature", "task", "epic", "chore"])
524
+ .optional()
525
+ .describe("Issue type (default: task)"),
526
+ priority: tool.schema
527
+ .number()
528
+ .min(0)
529
+ .max(3)
530
+ .optional()
531
+ .describe("Priority 0-3 (default: 2)"),
532
+ description: tool.schema.string().optional().describe("Bead description"),
533
+ parent_id: tool.schema
534
+ .string()
535
+ .optional()
536
+ .describe("Parent bead ID for epic children"),
537
+ },
538
+ execute: (args, ctx) => execTool("hive_create", args, ctx),
539
+ });
540
+
541
+ const hive_create_epic = tool({
542
+ description: "Create epic with subtasks in one atomic operation",
543
+ args: {
544
+ epic_title: tool.schema.string().describe("Epic title"),
545
+ epic_description: tool.schema
546
+ .string()
547
+ .optional()
548
+ .describe("Epic description"),
549
+ subtasks: tool.schema
550
+ .array(
551
+ tool.schema.object({
552
+ title: tool.schema.string(),
553
+ priority: tool.schema.number().min(0).max(3).optional(),
554
+ files: tool.schema.array(tool.schema.string()).optional(),
555
+ }),
556
+ )
557
+ .describe("Subtasks to create under the epic"),
558
+ },
559
+ execute: (args, ctx) => execTool("hive_create_epic", args, ctx),
560
+ });
561
+
562
+ const hive_query = tool({
563
+ description: "Query beads with filters (replaces bd list, bd ready, bd wip)",
564
+ args: {
565
+ status: tool.schema
566
+ .enum(["open", "in_progress", "blocked", "closed"])
567
+ .optional()
568
+ .describe("Filter by status"),
569
+ type: tool.schema
570
+ .enum(["bug", "feature", "task", "epic", "chore"])
571
+ .optional()
572
+ .describe("Filter by type"),
573
+ ready: tool.schema
574
+ .boolean()
575
+ .optional()
576
+ .describe("Only show unblocked beads"),
577
+ limit: tool.schema
578
+ .number()
579
+ .optional()
580
+ .describe("Max results (default: 20)"),
581
+ },
582
+ execute: (args, ctx) => execTool("hive_query", args, ctx),
583
+ });
584
+
585
+ const hive_update = tool({
586
+ description: "Update bead status/description",
587
+ args: {
588
+ id: tool.schema.string().describe("Cell ID"),
589
+ status: tool.schema
590
+ .enum(["open", "in_progress", "blocked", "closed"])
591
+ .optional()
592
+ .describe("New status"),
593
+ description: tool.schema.string().optional().describe("New description"),
594
+ priority: tool.schema
595
+ .number()
596
+ .min(0)
597
+ .max(3)
598
+ .optional()
599
+ .describe("New priority"),
600
+ },
601
+ execute: (args, ctx) => execTool("hive_update", args, ctx),
602
+ });
603
+
604
+ const hive_close = tool({
605
+ description: "Close a bead with reason",
606
+ args: {
607
+ id: tool.schema.string().describe("Cell ID"),
608
+ reason: tool.schema.string().describe("Completion reason"),
609
+ },
610
+ execute: (args, ctx) => execTool("hive_close", args, ctx),
611
+ });
612
+
613
+ const hive_start = tool({
614
+ description: "Mark a bead as in-progress",
615
+ args: {
616
+ id: tool.schema.string().describe("Cell ID"),
617
+ },
618
+ execute: (args, ctx) => execTool("hive_start", args, ctx),
619
+ });
620
+
621
+ const hive_ready = tool({
622
+ description: "Get the next ready bead (unblocked, highest priority)",
623
+ args: {},
624
+ execute: (args, ctx) => execTool("hive_ready", args, ctx),
625
+ });
626
+
627
+ const hive_sync = tool({
628
+ description: "Sync beads to git and push (MANDATORY at session end)",
629
+ args: {
630
+ auto_pull: tool.schema.boolean().optional().describe("Pull before sync"),
631
+ },
632
+ execute: (args, ctx) => execTool("hive_sync", args, ctx),
633
+ });
634
+
635
+ const hive_cells = tool({
636
+ description: `Query cells from the hive database with flexible filtering.
637
+
638
+ USE THIS TOOL TO:
639
+ - List all open cells: hive_cells()
640
+ - Find cells by status: hive_cells({ status: "in_progress" })
641
+ - Find cells by type: hive_cells({ type: "bug" })
642
+ - Get a specific cell by partial ID: hive_cells({ id: "mjkmd" })
643
+ - Get the next ready (unblocked) cell: hive_cells({ ready: true })
644
+ - Combine filters: hive_cells({ status: "open", type: "task" })
645
+
646
+ RETURNS: Array of cells with id, title, status, priority, type, parent_id, created_at, updated_at
647
+
648
+ PREFER THIS OVER hive_query when you need to:
649
+ - See what work is available
650
+ - Check status of multiple cells
651
+ - Find cells matching criteria
652
+ - Look up a cell by partial ID`,
653
+ args: {
654
+ id: tool.schema.string().optional().describe("Partial or full cell ID to look up"),
655
+ status: tool.schema.enum(["open", "in_progress", "blocked", "closed"]).optional().describe("Filter by status"),
656
+ type: tool.schema.enum(["task", "bug", "feature", "epic", "chore"]).optional().describe("Filter by type"),
657
+ ready: tool.schema.boolean().optional().describe("If true, return only the next unblocked cell"),
658
+ limit: tool.schema.number().optional().describe("Max cells to return (default 20)"),
659
+ },
660
+ execute: (args, ctx) => execTool("hive_cells", args, ctx),
661
+ });
662
+
663
+ const beads_link_thread = tool({
664
+ description: "Add metadata linking bead to Agent Mail thread",
665
+ args: {
666
+ bead_id: tool.schema.string().describe("Cell ID"),
667
+ thread_id: tool.schema.string().describe("Agent Mail thread ID"),
668
+ },
669
+ execute: (args, ctx) => execTool("beads_link_thread", args, ctx),
670
+ });
671
+
672
+ // =============================================================================
673
+ // Swarm Mail Tools (Embedded)
674
+ // =============================================================================
675
+
676
+ const swarmmail_init = tool({
677
+ description: "Initialize Swarm Mail session (REQUIRED FIRST)",
678
+ args: {
679
+ project_path: tool.schema.string().describe("Absolute path to the project"),
680
+ agent_name: tool.schema.string().optional().describe("Custom agent name"),
681
+ task_description: tool.schema
682
+ .string()
683
+ .optional()
684
+ .describe("Task description"),
685
+ },
686
+ execute: (args, ctx) => execTool("swarmmail_init", args, ctx),
687
+ });
688
+
689
+ const swarmmail_send = tool({
690
+ description: "Send message to other agents via Swarm Mail",
691
+ args: {
692
+ to: tool.schema
693
+ .array(tool.schema.string())
694
+ .describe("Recipient agent names"),
695
+ subject: tool.schema.string().describe("Message subject"),
696
+ body: tool.schema.string().describe("Message body"),
697
+ thread_id: tool.schema
698
+ .string()
699
+ .optional()
700
+ .describe("Thread ID for grouping"),
701
+ importance: tool.schema
702
+ .enum(["low", "normal", "high", "urgent"])
703
+ .optional()
704
+ .describe("Message importance"),
705
+ ack_required: tool.schema
706
+ .boolean()
707
+ .optional()
708
+ .describe("Require acknowledgment"),
709
+ },
710
+ execute: (args, ctx) => execTool("swarmmail_send", args, ctx),
711
+ });
712
+
713
+ const swarmmail_inbox = tool({
714
+ description: "Fetch inbox (CONTEXT-SAFE: bodies excluded, max 5 messages)",
715
+ args: {
716
+ limit: tool.schema
717
+ .number()
718
+ .max(5)
719
+ .optional()
720
+ .describe("Max messages (max 5)"),
721
+ urgent_only: tool.schema
722
+ .boolean()
723
+ .optional()
724
+ .describe("Only urgent messages"),
725
+ },
726
+ execute: (args, ctx) => execTool("swarmmail_inbox", args, ctx),
727
+ });
728
+
729
+ const swarmmail_read_message = tool({
730
+ description: "Fetch ONE message body by ID",
731
+ args: {
732
+ message_id: tool.schema.number().describe("Message ID"),
733
+ },
734
+ execute: (args, ctx) => execTool("swarmmail_read_message", args, ctx),
735
+ });
736
+
737
+ const swarmmail_reserve = tool({
738
+ description: "Reserve file paths for exclusive editing",
739
+ args: {
740
+ paths: tool.schema
741
+ .array(tool.schema.string())
742
+ .describe("File paths/patterns"),
743
+ ttl_seconds: tool.schema.number().optional().describe("Reservation TTL"),
744
+ exclusive: tool.schema.boolean().optional().describe("Exclusive lock"),
745
+ reason: tool.schema.string().optional().describe("Reservation reason"),
746
+ },
747
+ execute: (args, ctx) => execTool("swarmmail_reserve", args, ctx),
748
+ });
749
+
750
+ const swarmmail_release = tool({
751
+ description: "Release file reservations",
752
+ args: {
753
+ paths: tool.schema
754
+ .array(tool.schema.string())
755
+ .optional()
756
+ .describe("Paths to release"),
757
+ reservation_ids: tool.schema
758
+ .array(tool.schema.number())
759
+ .optional()
760
+ .describe("Reservation IDs"),
761
+ },
762
+ execute: (args, ctx) => execTool("swarmmail_release", args, ctx),
763
+ });
764
+
765
+ const swarmmail_ack = tool({
766
+ description: "Acknowledge a message",
767
+ args: {
768
+ message_id: tool.schema.number().describe("Message ID"),
769
+ },
770
+ execute: (args, ctx) => execTool("swarmmail_ack", args, ctx),
771
+ });
772
+
773
+ const swarmmail_health = tool({
774
+ description: "Check Swarm Mail database health",
775
+ args: {},
776
+ execute: (args, ctx) => execTool("swarmmail_health", args, ctx),
777
+ });
778
+
779
+ // =============================================================================
780
+ // Structured Tools
781
+ // =============================================================================
782
+
783
+ const structured_extract_json = tool({
784
+ description: "Extract JSON from markdown/text response",
785
+ args: {
786
+ text: tool.schema.string().describe("Text containing JSON"),
787
+ },
788
+ execute: (args, ctx) => execTool("structured_extract_json", args, ctx),
789
+ });
790
+
791
+ const structured_validate = tool({
792
+ description: "Validate agent response against a schema",
793
+ args: {
794
+ response: tool.schema.string().describe("Agent response to validate"),
795
+ schema_name: tool.schema
796
+ .enum(["evaluation", "task_decomposition", "cell_tree"])
797
+ .describe("Schema to validate against"),
798
+ max_retries: tool.schema
799
+ .number()
800
+ .min(1)
801
+ .max(5)
802
+ .optional()
803
+ .describe("Max retries"),
804
+ },
805
+ execute: (args, ctx) => execTool("structured_validate", args, ctx),
806
+ });
807
+
808
+ const structured_parse_evaluation = tool({
809
+ description: "Parse and validate evaluation response",
810
+ args: {
811
+ response: tool.schema.string().describe("Agent response"),
812
+ },
813
+ execute: (args, ctx) => execTool("structured_parse_evaluation", args, ctx),
814
+ });
815
+
816
+ const structured_parse_decomposition = tool({
817
+ description: "Parse and validate task decomposition response",
818
+ args: {
819
+ response: tool.schema.string().describe("Agent response"),
820
+ },
821
+ execute: (args, ctx) => execTool("structured_parse_decomposition", args, ctx),
822
+ });
823
+
824
+ const structured_parse_cell_tree = tool({
825
+ description: "Parse and validate bead tree response",
826
+ args: {
827
+ response: tool.schema.string().describe("Agent response"),
828
+ },
829
+ execute: (args, ctx) => execTool("structured_parse_cell_tree", args, ctx),
830
+ });
831
+
832
+ // =============================================================================
833
+ // Swarm Tools
834
+ // =============================================================================
835
+
836
+ const swarm_init = tool({
837
+ description: "Initialize swarm session and check tool availability",
838
+ args: {
839
+ project_path: tool.schema.string().optional().describe("Project path"),
840
+ isolation: tool.schema
841
+ .enum(["worktree", "reservation"])
842
+ .optional()
843
+ .describe(
844
+ "Isolation mode: 'worktree' for git worktree isolation, 'reservation' for file reservations (default)",
845
+ ),
846
+ },
847
+ execute: (args, ctx) => execTool("swarm_init", args, ctx),
848
+ });
849
+
850
+ const swarm_select_strategy = tool({
851
+ description: "Analyze task and recommend decomposition strategy",
852
+ args: {
853
+ task: tool.schema.string().min(1).describe("Task to analyze"),
854
+ codebase_context: tool.schema
855
+ .string()
856
+ .optional()
857
+ .describe("Codebase context"),
858
+ },
859
+ execute: (args, ctx) => execTool("swarm_select_strategy", args, ctx),
860
+ });
861
+
862
+ const swarm_plan_prompt = tool({
863
+ description: "Generate strategy-specific decomposition prompt",
864
+ args: {
865
+ task: tool.schema.string().min(1).describe("Task to decompose"),
866
+ strategy: tool.schema
867
+ .enum(["file-based", "feature-based", "risk-based", "auto"])
868
+ .optional()
869
+ .describe("Decomposition strategy"),
870
+ max_subtasks: tool.schema
871
+ .number()
872
+ .int()
873
+ .min(2)
874
+ .max(10)
875
+ .optional()
876
+ .describe("Max subtasks"),
877
+ context: tool.schema.string().optional().describe("Additional context"),
878
+ query_cass: tool.schema
879
+ .boolean()
880
+ .optional()
881
+ .describe("Query CASS for similar tasks"),
882
+ cass_limit: tool.schema
883
+ .number()
884
+ .int()
885
+ .min(1)
886
+ .max(10)
887
+ .optional()
888
+ .describe("CASS limit"),
889
+ },
890
+ execute: (args, ctx) => execTool("swarm_plan_prompt", args, ctx),
891
+ });
892
+
893
+ const swarm_decompose = tool({
894
+ description: "Generate decomposition prompt for breaking task into subtasks",
895
+ args: {
896
+ task: tool.schema.string().min(1).describe("Task to decompose"),
897
+ max_subtasks: tool.schema
898
+ .number()
899
+ .int()
900
+ .min(2)
901
+ .max(10)
902
+ .optional()
903
+ .describe("Max subtasks"),
904
+ context: tool.schema.string().optional().describe("Additional context"),
905
+ query_cass: tool.schema.boolean().optional().describe("Query CASS"),
906
+ cass_limit: tool.schema
907
+ .number()
908
+ .int()
909
+ .min(1)
910
+ .max(10)
911
+ .optional()
912
+ .describe("CASS limit"),
913
+ },
914
+ execute: (args, ctx) => execTool("swarm_decompose", args, ctx),
915
+ });
916
+
917
+ const swarm_validate_decomposition = tool({
918
+ description: "Validate a decomposition response against CellTreeSchema",
919
+ args: {
920
+ response: tool.schema.string().describe("Decomposition response"),
921
+ },
922
+ execute: (args, ctx) => execTool("swarm_validate_decomposition", args, ctx),
923
+ });
924
+
925
+ const swarm_status = tool({
926
+ description: "Get status of a swarm by epic ID",
927
+ args: {
928
+ epic_id: tool.schema.string().describe("Epic bead ID"),
929
+ project_key: tool.schema.string().describe("Project key"),
930
+ },
931
+ execute: (args, ctx) => execTool("swarm_status", args, ctx),
932
+ });
933
+
934
+ const swarm_progress = tool({
935
+ description: "Report progress on a subtask to coordinator",
936
+ args: {
937
+ project_key: tool.schema.string().describe("Project key"),
938
+ agent_name: tool.schema.string().describe("Agent name"),
939
+ bead_id: tool.schema.string().describe("Cell ID"),
940
+ status: tool.schema
941
+ .enum(["in_progress", "blocked", "completed", "failed"])
942
+ .describe("Status"),
943
+ message: tool.schema.string().optional().describe("Progress message"),
944
+ progress_percent: tool.schema
945
+ .number()
946
+ .min(0)
947
+ .max(100)
948
+ .optional()
949
+ .describe("Progress %"),
950
+ files_touched: tool.schema
951
+ .array(tool.schema.string())
952
+ .optional()
953
+ .describe("Files modified"),
954
+ },
955
+ execute: (args, ctx) => execTool("swarm_progress", args, ctx),
956
+ });
957
+
958
+ const swarm_complete = tool({
959
+ description:
960
+ "Mark subtask complete with Verification Gate. Runs UBS scan, typecheck, and tests before allowing completion.",
961
+ args: {
962
+ project_key: tool.schema.string().describe("Project key"),
963
+ agent_name: tool.schema.string().describe("Agent name"),
964
+ bead_id: tool.schema.string().describe("Cell ID"),
965
+ summary: tool.schema.string().describe("Completion summary"),
966
+ evaluation: tool.schema.string().optional().describe("Self-evaluation JSON"),
967
+ files_touched: tool.schema
968
+ .array(tool.schema.string())
969
+ .optional()
970
+ .describe("Files modified - will be verified"),
971
+ skip_ubs_scan: tool.schema.boolean().optional().describe("Skip UBS scan"),
972
+ skip_verification: tool.schema
973
+ .boolean()
974
+ .optional()
975
+ .describe("Skip ALL verification (UBS, typecheck, tests)"),
976
+ skip_review: tool.schema
977
+ .boolean()
978
+ .optional()
979
+ .describe("Skip review gate check"),
980
+ },
981
+ execute: (args, ctx) => execTool("swarm_complete", args, ctx),
982
+ });
983
+
984
+ const swarm_record_outcome = tool({
985
+ description: "Record subtask outcome for implicit feedback scoring",
986
+ args: {
987
+ bead_id: tool.schema.string().describe("Cell ID"),
988
+ duration_ms: tool.schema.number().int().min(0).describe("Duration in ms"),
989
+ error_count: tool.schema
990
+ .number()
991
+ .int()
992
+ .min(0)
993
+ .optional()
994
+ .describe("Error count"),
995
+ retry_count: tool.schema
996
+ .number()
997
+ .int()
998
+ .min(0)
999
+ .optional()
1000
+ .describe("Retry count"),
1001
+ success: tool.schema.boolean().describe("Whether task succeeded"),
1002
+ files_touched: tool.schema
1003
+ .array(tool.schema.string())
1004
+ .optional()
1005
+ .describe("Files modified"),
1006
+ criteria: tool.schema
1007
+ .array(tool.schema.string())
1008
+ .optional()
1009
+ .describe("Evaluation criteria"),
1010
+ strategy: tool.schema
1011
+ .enum(["file-based", "feature-based", "risk-based"])
1012
+ .optional()
1013
+ .describe("Strategy used"),
1014
+ },
1015
+ execute: (args, ctx) => execTool("swarm_record_outcome", args, ctx),
1016
+ });
1017
+
1018
+ const swarm_subtask_prompt = tool({
1019
+ description: "Generate the prompt for a spawned subtask agent",
1020
+ args: {
1021
+ agent_name: tool.schema.string().describe("Agent name"),
1022
+ bead_id: tool.schema.string().describe("Cell ID"),
1023
+ epic_id: tool.schema.string().describe("Epic ID"),
1024
+ subtask_title: tool.schema.string().describe("Subtask title"),
1025
+ subtask_description: tool.schema
1026
+ .string()
1027
+ .optional()
1028
+ .describe("Description"),
1029
+ files: tool.schema.array(tool.schema.string()).describe("Files to work on"),
1030
+ shared_context: tool.schema.string().optional().describe("Shared context"),
1031
+ },
1032
+ execute: (args, ctx) => execTool("swarm_subtask_prompt", args, ctx),
1033
+ });
1034
+
1035
+ const swarm_spawn_subtask = tool({
1036
+ description: "Prepare a subtask for spawning with Task tool",
1037
+ args: {
1038
+ bead_id: tool.schema.string().describe("Cell ID"),
1039
+ epic_id: tool.schema.string().describe("Epic ID"),
1040
+ subtask_title: tool.schema.string().describe("Subtask title"),
1041
+ subtask_description: tool.schema
1042
+ .string()
1043
+ .optional()
1044
+ .describe("Description"),
1045
+ files: tool.schema.array(tool.schema.string()).describe("Files to work on"),
1046
+ shared_context: tool.schema.string().optional().describe("Shared context"),
1047
+ },
1048
+ execute: (args, ctx) => execTool("swarm_spawn_subtask", args, ctx),
1049
+ });
1050
+
1051
+ const swarm_complete_subtask = tool({
1052
+ description: "Handle subtask completion after Task agent returns",
1053
+ args: {
1054
+ bead_id: tool.schema.string().describe("Cell ID"),
1055
+ task_result: tool.schema.string().describe("Task result JSON"),
1056
+ files_touched: tool.schema
1057
+ .array(tool.schema.string())
1058
+ .optional()
1059
+ .describe("Files modified"),
1060
+ },
1061
+ execute: (args, ctx) => execTool("swarm_complete_subtask", args, ctx),
1062
+ });
1063
+
1064
+ const swarm_evaluation_prompt = tool({
1065
+ description: "Generate self-evaluation prompt for a completed subtask",
1066
+ args: {
1067
+ bead_id: tool.schema.string().describe("Cell ID"),
1068
+ subtask_title: tool.schema.string().describe("Subtask title"),
1069
+ files_touched: tool.schema
1070
+ .array(tool.schema.string())
1071
+ .describe("Files modified"),
1072
+ },
1073
+ execute: (args, ctx) => execTool("swarm_evaluation_prompt", args, ctx),
1074
+ });
1075
+
1076
+ const swarm_broadcast = tool({
1077
+ description:
1078
+ "Broadcast context update to all agents working on the same epic",
1079
+ args: {
1080
+ project_path: tool.schema.string().describe("Project path"),
1081
+ agent_name: tool.schema.string().describe("Agent name"),
1082
+ epic_id: tool.schema.string().describe("Epic ID"),
1083
+ message: tool.schema.string().describe("Context update message"),
1084
+ importance: tool.schema
1085
+ .enum(["info", "warning", "blocker"])
1086
+ .optional()
1087
+ .describe("Priority level (default: info)"),
1088
+ files_affected: tool.schema
1089
+ .array(tool.schema.string())
1090
+ .optional()
1091
+ .describe("Files this context relates to"),
1092
+ },
1093
+ execute: (args, ctx) => execTool("swarm_broadcast", args, ctx),
1094
+ });
1095
+
1096
+ // =============================================================================
1097
+ // Worktree Isolation Tools
1098
+ // =============================================================================
1099
+
1100
+ const swarm_worktree_create = tool({
1101
+ description:
1102
+ "Create a git worktree for isolated task execution. Worker operates in worktree, not main branch.",
1103
+ args: {
1104
+ project_path: tool.schema.string().describe("Absolute path to project root"),
1105
+ task_id: tool.schema.string().describe("Task/bead ID (e.g., bd-abc123.1)"),
1106
+ start_commit: tool.schema
1107
+ .string()
1108
+ .describe("Commit SHA to create worktree at (swarm start point)"),
1109
+ },
1110
+ execute: (args, ctx) => execTool("swarm_worktree_create", args, ctx),
1111
+ });
1112
+
1113
+ const swarm_worktree_merge = tool({
1114
+ description:
1115
+ "Cherry-pick commits from worktree back to main branch. Call after worker completes.",
1116
+ args: {
1117
+ project_path: tool.schema.string().describe("Absolute path to project root"),
1118
+ task_id: tool.schema.string().describe("Task/bead ID"),
1119
+ start_commit: tool.schema
1120
+ .string()
1121
+ .optional()
1122
+ .describe("Original start commit (to find new commits)"),
1123
+ },
1124
+ execute: (args, ctx) => execTool("swarm_worktree_merge", args, ctx),
1125
+ });
1126
+
1127
+ const swarm_worktree_cleanup = tool({
1128
+ description:
1129
+ "Remove a worktree after completion or abort. Idempotent - safe to call multiple times.",
1130
+ args: {
1131
+ project_path: tool.schema.string().describe("Absolute path to project root"),
1132
+ task_id: tool.schema.string().optional().describe("Task/bead ID to clean up"),
1133
+ cleanup_all: tool.schema
1134
+ .boolean()
1135
+ .optional()
1136
+ .describe("Remove all worktrees for this project"),
1137
+ },
1138
+ execute: (args, ctx) => execTool("swarm_worktree_cleanup", args, ctx),
1139
+ });
1140
+
1141
+ const swarm_worktree_list = tool({
1142
+ description: "List all active worktrees for a project",
1143
+ args: {
1144
+ project_path: tool.schema.string().describe("Absolute path to project root"),
1145
+ },
1146
+ execute: (args, ctx) => execTool("swarm_worktree_list", args, ctx),
1147
+ });
1148
+
1149
+ // =============================================================================
1150
+ // Structured Review Tools
1151
+ // =============================================================================
1152
+
1153
+ const swarm_review = tool({
1154
+ description:
1155
+ "Generate a review prompt for a completed subtask. Includes epic context, dependencies, and diff.",
1156
+ args: {
1157
+ project_key: tool.schema.string().describe("Project path"),
1158
+ epic_id: tool.schema.string().describe("Epic bead ID"),
1159
+ task_id: tool.schema.string().describe("Subtask bead ID to review"),
1160
+ files_touched: tool.schema
1161
+ .array(tool.schema.string())
1162
+ .optional()
1163
+ .describe("Files modified (will get diff for these)"),
1164
+ },
1165
+ execute: (args, ctx) => execTool("swarm_review", args, ctx),
1166
+ });
1167
+
1168
+ const swarm_review_feedback = tool({
1169
+ description:
1170
+ "Send review feedback to a worker. Tracks attempts (max 3). Fails task after 3 rejections.",
1171
+ args: {
1172
+ project_key: tool.schema.string().describe("Project path"),
1173
+ task_id: tool.schema.string().describe("Subtask bead ID"),
1174
+ worker_id: tool.schema.string().describe("Worker agent name"),
1175
+ status: tool.schema
1176
+ .enum(["approved", "needs_changes"])
1177
+ .describe("Review status"),
1178
+ summary: tool.schema.string().optional().describe("Review summary"),
1179
+ issues: tool.schema
1180
+ .string()
1181
+ .optional()
1182
+ .describe("JSON array of ReviewIssue objects (for needs_changes)"),
1183
+ },
1184
+ execute: (args, ctx) => execTool("swarm_review_feedback", args, ctx),
1185
+ });
1186
+
1187
+ // =============================================================================
1188
+ // Skills Tools
1189
+ // =============================================================================
1190
+
1191
+ const skills_list = tool({
1192
+ description:
1193
+ "List all available skills from global, project, and bundled sources",
1194
+ args: {
1195
+ source: tool.schema
1196
+ .enum(["all", "global", "project", "bundled"])
1197
+ .optional()
1198
+ .describe("Filter by source (default: all)"),
1199
+ },
1200
+ execute: (args, ctx) => execTool("skills_list", args, ctx),
1201
+ });
1202
+
1203
+ const skills_read = tool({
1204
+ description: "Read a skill's full content including SKILL.md and references",
1205
+ args: {
1206
+ name: tool.schema.string().describe("Skill name"),
1207
+ },
1208
+ execute: (args, ctx) => execTool("skills_read", args, ctx),
1209
+ });
1210
+
1211
+ const skills_use = tool({
1212
+ description:
1213
+ "Get skill content formatted for injection into agent context. Use this when you need to apply a skill's knowledge to the current task.",
1214
+ args: {
1215
+ name: tool.schema.string().describe("Skill name"),
1216
+ context: tool.schema
1217
+ .string()
1218
+ .optional()
1219
+ .describe("Optional context about how the skill will be used"),
1220
+ },
1221
+ execute: (args, ctx) => execTool("skills_use", args, ctx),
1222
+ });
1223
+
1224
+ const skills_create = tool({
1225
+ description: "Create a new skill with SKILL.md template",
1226
+ args: {
1227
+ name: tool.schema.string().describe("Skill name (kebab-case)"),
1228
+ description: tool.schema.string().describe("Brief skill description"),
1229
+ scope: tool.schema
1230
+ .enum(["global", "project"])
1231
+ .optional()
1232
+ .describe("Where to create (default: project)"),
1233
+ tags: tool.schema
1234
+ .array(tool.schema.string())
1235
+ .optional()
1236
+ .describe("Skill tags for discovery"),
1237
+ },
1238
+ execute: (args, ctx) => execTool("skills_create", args, ctx),
1239
+ });
1240
+
1241
+ const skills_update = tool({
1242
+ description: "Update an existing skill's SKILL.md content",
1243
+ args: {
1244
+ name: tool.schema.string().describe("Skill name"),
1245
+ content: tool.schema.string().describe("New SKILL.md content"),
1246
+ },
1247
+ execute: (args, ctx) => execTool("skills_update", args, ctx),
1248
+ });
1249
+
1250
+ const skills_delete = tool({
1251
+ description: "Delete a skill (project skills only)",
1252
+ args: {
1253
+ name: tool.schema.string().describe("Skill name"),
1254
+ },
1255
+ execute: (args, ctx) => execTool("skills_delete", args, ctx),
1256
+ });
1257
+
1258
+ const skills_init = tool({
1259
+ description: "Initialize skills directory in current project",
1260
+ args: {
1261
+ path: tool.schema
1262
+ .string()
1263
+ .optional()
1264
+ .describe("Custom path (default: .opencode/skills)"),
1265
+ },
1266
+ execute: (args, ctx) => execTool("skills_init", args, ctx),
1267
+ });
1268
+
1269
+ const skills_add_script = tool({
1270
+ description: "Add an executable script to a skill",
1271
+ args: {
1272
+ skill_name: tool.schema.string().describe("Skill name"),
1273
+ script_name: tool.schema.string().describe("Script filename"),
1274
+ content: tool.schema.string().describe("Script content"),
1275
+ executable: tool.schema
1276
+ .boolean()
1277
+ .optional()
1278
+ .describe("Make executable (default: true)"),
1279
+ },
1280
+ execute: (args, ctx) => execTool("skills_add_script", args, ctx),
1281
+ });
1282
+
1283
+ const skills_execute = tool({
1284
+ description: "Execute a skill's script",
1285
+ args: {
1286
+ skill_name: tool.schema.string().describe("Skill name"),
1287
+ script_name: tool.schema.string().describe("Script to execute"),
1288
+ args: tool.schema
1289
+ .array(tool.schema.string())
1290
+ .optional()
1291
+ .describe("Script arguments"),
1292
+ },
1293
+ execute: (args, ctx) => execTool("skills_execute", args, ctx),
1294
+ });
1295
+
1296
+ // =============================================================================
1297
+ // Swarm Insights Tools
1298
+ // =============================================================================
1299
+
1300
+ const swarm_get_strategy_insights = tool({
1301
+ description: "Get strategy success rates for decomposition planning. Use this when planning task decomposition to see which strategies (file-based, feature-based, risk-based) have historically succeeded or failed. Returns success rates and recommendations based on past swarm outcomes.",
1302
+ args: {
1303
+ task: tool.schema.string().describe("Task description to analyze for strategy recommendation"),
1304
+ },
1305
+ execute: (args, ctx) => execTool("swarm_get_strategy_insights", args, ctx),
1306
+ });
1307
+
1308
+ const swarm_get_file_insights = tool({
1309
+ description: "Get file-specific gotchas for worker context. Use this when assigning files to workers to warn them about historical failure patterns. Queries past outcomes and semantic memory for file-specific learnings (edge cases, common bugs, performance traps).",
1310
+ args: {
1311
+ files: tool.schema.array(tool.schema.string()).describe("File paths to get insights for"),
1312
+ },
1313
+ execute: (args, ctx) => execTool("swarm_get_file_insights", args, ctx),
1314
+ });
1315
+
1316
+ const swarm_get_pattern_insights = tool({
1317
+ description: "Get common failure patterns across swarms. Use this during planning or when debugging stuck swarms to see recurring anti-patterns (type errors, timeouts, conflicts, test failures). Returns top 5 most frequent failure patterns with recommendations.",
1318
+ args: {},
1319
+ execute: (args, ctx) => execTool("swarm_get_pattern_insights", args, ctx),
1320
+ });
1321
+
1322
+ // =============================================================================
1323
+ // CASS Tools (Cross-Agent Session Search)
1324
+ // =============================================================================
1325
+
1326
+ const cass_search = tool({
1327
+ description: "Search across all AI coding agent histories (Claude, Codex, Cursor, Gemini, Aider, ChatGPT, Cline, OpenCode, Amp, Pi-Agent). Query BEFORE solving problems from scratch - another agent may have already solved it. Returns matching sessions ranked by relevance.",
1328
+ args: {
1329
+ query: tool.schema.string().describe("Search query (e.g., 'authentication error Next.js')"),
1330
+ agent: tool.schema
1331
+ .string()
1332
+ .optional()
1333
+ .describe("Filter by agent name (e.g., 'claude', 'cursor')"),
1334
+ days: tool.schema
1335
+ .number()
1336
+ .optional()
1337
+ .describe("Only search sessions from last N days"),
1338
+ limit: tool.schema
1339
+ .number()
1340
+ .optional()
1341
+ .describe("Max results to return (default: 5)"),
1342
+ fields: tool.schema
1343
+ .string()
1344
+ .optional()
1345
+ .describe("Field selection: 'minimal' for compact output (path, line, agent only)"),
1346
+ },
1347
+ execute: (args, ctx) => execTool("cass_search", args, ctx),
1348
+ });
1349
+
1350
+ const cass_view = tool({
1351
+ description: "View a specific conversation/session from search results. Use source_path from cass_search output.",
1352
+ args: {
1353
+ path: tool.schema
1354
+ .string()
1355
+ .describe("Path to session file (from cass_search results)"),
1356
+ line: tool.schema
1357
+ .number()
1358
+ .optional()
1359
+ .describe("Jump to specific line number"),
1360
+ },
1361
+ execute: (args, ctx) => execTool("cass_view", args, ctx),
1362
+ });
1363
+
1364
+ const cass_expand = tool({
1365
+ description: "Expand context around a specific line in a session. Shows messages before/after.",
1366
+ args: {
1367
+ path: tool.schema
1368
+ .string()
1369
+ .describe("Path to session file"),
1370
+ line: tool.schema
1371
+ .number()
1372
+ .describe("Line number to expand around"),
1373
+ context: tool.schema
1374
+ .number()
1375
+ .optional()
1376
+ .describe("Number of lines before/after to show (default: 5)"),
1377
+ },
1378
+ execute: (args, ctx) => execTool("cass_expand", args, ctx),
1379
+ });
1380
+
1381
+ const cass_health = tool({
1382
+ description: "Check if cass index is healthy. Exit 0 = ready, Exit 1 = needs indexing. Run this before searching.",
1383
+ args: {},
1384
+ execute: (args, ctx) => execTool("cass_health", args, ctx),
1385
+ });
1386
+
1387
+ const cass_index = tool({
1388
+ description: "Build or rebuild the search index. Run this if health check fails or to pick up new sessions.",
1389
+ args: {
1390
+ full: tool.schema
1391
+ .boolean()
1392
+ .optional()
1393
+ .describe("Force full rebuild (default: incremental)"),
1394
+ },
1395
+ execute: (args, ctx) => execTool("cass_index", args, ctx),
1396
+ });
1397
+
1398
+ const cass_stats = tool({
1399
+ description: "Show index statistics - how many sessions, messages, agents indexed.",
1400
+ args: {},
1401
+ execute: (args, ctx) => execTool("cass_stats", args, ctx),
1402
+ });
1403
+
1404
+ // =============================================================================
1405
+ // Plugin Export
1406
+ // =============================================================================
1407
+
1408
+ // =============================================================================
1409
+ // Compaction Hook - Swarm Recovery Context
1410
+ // =============================================================================
1411
+
1412
+ /**
1413
+ * Detection result with confidence level
1414
+ */
1415
+ interface SwarmDetection {
1416
+ detected: boolean;
1417
+ confidence: "high" | "medium" | "low" | "none";
1418
+ reasons: string[];
1419
+ }
1420
+
1421
+ /**
1422
+ * Structured state snapshot for LLM-powered compaction
1423
+ *
1424
+ * This is passed to the lite model to generate a continuation prompt
1425
+ * with concrete data instead of just instructions.
1426
+ */
1427
+ interface SwarmStateSnapshot {
1428
+ sessionID: string;
1429
+ detection: {
1430
+ confidence: "high" | "medium" | "low" | "none";
1431
+ reasons: string[];
1432
+ };
1433
+ epic?: {
1434
+ id: string;
1435
+ title: string;
1436
+ status: string;
1437
+ subtasks: Array<{
1438
+ id: string;
1439
+ title: string;
1440
+ status: "open" | "in_progress" | "blocked" | "closed";
1441
+ files: string[];
1442
+ assignedTo?: string;
1443
+ }>;
1444
+ };
1445
+ messages: Array<{
1446
+ from: string;
1447
+ to: string[];
1448
+ subject: string;
1449
+ body: string;
1450
+ timestamp: number;
1451
+ importance?: string;
1452
+ }>;
1453
+ reservations: Array<{
1454
+ agent: string;
1455
+ paths: string[];
1456
+ exclusive: boolean;
1457
+ expiresAt: number;
1458
+ }>;
1459
+ }
1460
+
1461
+ /**
1462
+ * Query actual swarm state using spawn (like detectSwarm does)
1463
+ *
1464
+ * Returns structured snapshot of current state for LLM compaction.
1465
+ * Shells out to swarm CLI to get real data.
1466
+ */
1467
+ async function querySwarmState(sessionID: string): Promise<SwarmStateSnapshot> {
1468
+ const startTime = Date.now();
1469
+
1470
+ logCompaction("debug", "query_swarm_state_start", {
1471
+ session_id: sessionID,
1472
+ project_directory: projectDirectory,
1473
+ });
1474
+
1475
+ try {
1476
+ // Query cells via swarm CLI
1477
+ const cliStart = Date.now();
1478
+ const cellsResult = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
1479
+ (resolve) => {
1480
+ const proc = spawn(SWARM_CLI, ["tool", "hive_query"], {
1481
+ cwd: projectDirectory,
1482
+ stdio: ["ignore", "pipe", "pipe"],
1483
+ });
1484
+ let stdout = "";
1485
+ let stderr = "";
1486
+ proc.stdout.on("data", (d) => {
1487
+ stdout += d;
1488
+ });
1489
+ proc.stderr.on("data", (d) => {
1490
+ stderr += d;
1491
+ });
1492
+ proc.on("close", (exitCode) =>
1493
+ resolve({ exitCode: exitCode ?? 1, stdout, stderr }),
1494
+ );
1495
+ },
1496
+ );
1497
+ const cliDuration = Date.now() - cliStart;
1498
+
1499
+ logCompaction("debug", "query_swarm_state_cli_complete", {
1500
+ session_id: sessionID,
1501
+ duration_ms: cliDuration,
1502
+ exit_code: cellsResult.exitCode,
1503
+ stdout_length: cellsResult.stdout.length,
1504
+ stderr_length: cellsResult.stderr.length,
1505
+ });
1506
+
1507
+ let cells: any[] = [];
1508
+ if (cellsResult.exitCode === 0) {
1509
+ try {
1510
+ const parsed = JSON.parse(cellsResult.stdout);
1511
+ // Handle wrapped response: { success: true, data: [...] }
1512
+ cells = Array.isArray(parsed) ? parsed : (parsed?.data ?? []);
1513
+ } catch (parseErr) {
1514
+ logCompaction("error", "query_swarm_state_parse_failed", {
1515
+ session_id: sessionID,
1516
+ error: parseErr instanceof Error ? parseErr.message : String(parseErr),
1517
+ stdout_preview: cellsResult.stdout.substring(0, 500),
1518
+ });
1519
+ }
1520
+ }
1521
+
1522
+ logCompaction("debug", "query_swarm_state_cells_parsed", {
1523
+ session_id: sessionID,
1524
+ cell_count: cells.length,
1525
+ cells: cells.map((c: any) => ({
1526
+ id: c.id,
1527
+ title: c.title,
1528
+ type: c.type,
1529
+ status: c.status,
1530
+ parent_id: c.parent_id,
1531
+ })),
1532
+ });
1533
+
1534
+ // Find active epic (first unclosed epic with subtasks)
1535
+ const openEpics = cells.filter(
1536
+ (c: { type?: string; status: string }) =>
1537
+ c.type === "epic" && c.status !== "closed",
1538
+ );
1539
+ const epic = openEpics[0];
1540
+
1541
+ logCompaction("debug", "query_swarm_state_epics", {
1542
+ session_id: sessionID,
1543
+ open_epic_count: openEpics.length,
1544
+ selected_epic: epic ? { id: epic.id, title: epic.title, status: epic.status } : null,
1545
+ });
1546
+
1547
+ // Get subtasks if we have an epic
1548
+ const subtasks =
1549
+ epic && epic.id
1550
+ ? cells.filter(
1551
+ (c: { parent_id?: string }) => c.parent_id === epic.id,
1552
+ )
1553
+ : [];
1554
+
1555
+ logCompaction("debug", "query_swarm_state_subtasks", {
1556
+ session_id: sessionID,
1557
+ subtask_count: subtasks.length,
1558
+ subtasks: subtasks.map((s: any) => ({
1559
+ id: s.id,
1560
+ title: s.title,
1561
+ status: s.status,
1562
+ files: s.files,
1563
+ })),
1564
+ });
1565
+
1566
+ // TODO: Query swarm mail for messages and reservations
1567
+ // For MVP, use empty arrays - the fallback chain handles this
1568
+ const messages: SwarmStateSnapshot["messages"] = [];
1569
+ const reservations: SwarmStateSnapshot["reservations"] = [];
1570
+
1571
+ // Run detection for confidence (already logged internally)
1572
+ const detection = await detectSwarm();
1573
+
1574
+ const snapshot: SwarmStateSnapshot = {
1575
+ sessionID,
1576
+ detection: {
1577
+ confidence: detection.confidence,
1578
+ reasons: detection.reasons,
1579
+ },
1580
+ epic: epic
1581
+ ? {
1582
+ id: epic.id,
1583
+ title: epic.title,
1584
+ status: epic.status,
1585
+ subtasks: subtasks.map((s: {
1586
+ id: string;
1587
+ title: string;
1588
+ status: string;
1589
+ files?: string[];
1590
+ }) => ({
1591
+ id: s.id,
1592
+ title: s.title,
1593
+ status: s.status as "open" | "in_progress" | "blocked" | "closed",
1594
+ files: s.files || [],
1595
+ })),
1596
+ }
1597
+ : undefined,
1598
+ messages,
1599
+ reservations,
1600
+ };
1601
+
1602
+ const totalDuration = Date.now() - startTime;
1603
+ logCompaction("debug", "query_swarm_state_complete", {
1604
+ session_id: sessionID,
1605
+ duration_ms: totalDuration,
1606
+ has_epic: !!snapshot.epic,
1607
+ epic_id: snapshot.epic?.id,
1608
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
1609
+ message_count: snapshot.messages.length,
1610
+ reservation_count: snapshot.reservations.length,
1611
+ });
1612
+
1613
+ return snapshot;
1614
+ } catch (err) {
1615
+ logCompaction("error", "query_swarm_state_exception", {
1616
+ session_id: sessionID,
1617
+ error: err instanceof Error ? err.message : String(err),
1618
+ stack: err instanceof Error ? err.stack : undefined,
1619
+ duration_ms: Date.now() - startTime,
1620
+ });
1621
+
1622
+ // If query fails, return minimal snapshot
1623
+ const detection = await detectSwarm();
1624
+ return {
1625
+ sessionID,
1626
+ detection: {
1627
+ confidence: detection.confidence,
1628
+ reasons: detection.reasons,
1629
+ },
1630
+ messages: [],
1631
+ reservations: [],
1632
+ };
1633
+ }
1634
+ }
1635
+
1636
+ /**
1637
+ * Generate compaction prompt using LLM
1638
+ *
1639
+ * Shells out to `opencode run -m <liteModel>` with structured state.
1640
+ * Returns markdown continuation prompt or null on failure.
1641
+ *
1642
+ * Timeout: 30 seconds
1643
+ */
1644
+ async function generateCompactionPrompt(
1645
+ snapshot: SwarmStateSnapshot,
1646
+ ): Promise<string | null> {
1647
+ const startTime = Date.now();
1648
+ const liteModel = process.env.OPENCODE_LITE_MODEL || "__SWARM_LITE_MODEL__";
1649
+
1650
+ logCompaction("debug", "generate_compaction_prompt_start", {
1651
+ session_id: snapshot.sessionID,
1652
+ lite_model: liteModel,
1653
+ has_epic: !!snapshot.epic,
1654
+ epic_id: snapshot.epic?.id,
1655
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
1656
+ snapshot_size: JSON.stringify(snapshot).length,
1657
+ });
1658
+
1659
+ try {
1660
+ const promptText = `You are generating a continuation prompt for a compacted swarm coordination session.
1661
+
1662
+ Analyze this swarm state and generate a structured markdown prompt that will be given to the resumed session:
1663
+
1664
+ ${JSON.stringify(snapshot, null, 2)}
1665
+
1666
+ Generate a prompt following this structure:
1667
+
1668
+ ┌─────────────────────────────────────────────────────────────┐
1669
+ │ │
1670
+ │ 🐝 YOU ARE THE COORDINATOR 🐝 │
1671
+ │ │
1672
+ │ NOT A WORKER. NOT AN IMPLEMENTER. │
1673
+ │ YOU ORCHESTRATE. │
1674
+ │ │
1675
+ └─────────────────────────────────────────────────────────────┘
1676
+
1677
+ # 🐝 Swarm Continuation - [Epic Title or "Unknown"]
1678
+
1679
+ **NON-NEGOTIABLE: YOU ARE THE COORDINATOR.** You resumed after context compaction.
1680
+
1681
+ ## Epic State
1682
+
1683
+ **ID:** [epic ID or "Unknown"]
1684
+ **Title:** [epic title or "No active epic"]
1685
+ **Status:** [X/Y subtasks complete]
1686
+ **Project:** ${projectDirectory}
1687
+
1688
+ ## Subtask Status
1689
+
1690
+ ### ✅ Completed (N)
1691
+ [List completed subtasks with IDs]
1692
+
1693
+ ### 🚧 In Progress (N)
1694
+ [List in-progress subtasks with IDs, files, agents if known]
1695
+
1696
+ ### 🚫 Blocked (N)
1697
+ [List blocked subtasks]
1698
+
1699
+ ### ⏳ Pending (N)
1700
+ [List pending subtasks]
1701
+
1702
+ ## Next Actions (IMMEDIATE)
1703
+
1704
+ [List 3-5 concrete actions with actual commands, using real IDs from the state]
1705
+
1706
+ ## 🎯 COORDINATOR MANDATES (NON-NEGOTIABLE)
1707
+
1708
+ **YOU ARE THE COORDINATOR. NOT A WORKER.**
1709
+
1710
+ ### ⛔ FORBIDDEN - NEVER do these:
1711
+ - ❌ NEVER use \`edit\`, \`write\`, or \`bash\` for implementation - SPAWN A WORKER
1712
+ - ❌ NEVER fetch directly with \`repo-crawl_*\`, \`repo-autopsy_*\`, \`webfetch\`, \`fetch_fetch\` - SPAWN A RESEARCHER
1713
+ - ❌ NEVER use \`context7_*\` or \`pdf-brain_*\` directly - SPAWN A RESEARCHER
1714
+ - ❌ NEVER reserve files - Workers reserve files
1715
+
1716
+ ### ✅ ALWAYS do these:
1717
+ - ✅ ALWAYS check \`swarm_status\` and \`swarmmail_inbox\` first
1718
+ - ✅ ALWAYS use \`swarm_spawn_subtask\` for implementation work
1719
+ - ✅ ALWAYS use \`swarm_spawn_researcher\` for external data fetching
1720
+ - ✅ ALWAYS review worker output with \`swarm_review\` → \`swarm_review_feedback\`
1721
+ - ✅ ALWAYS monitor actively - Check messages every ~10 minutes
1722
+ - ✅ ALWAYS unblock aggressively - Resolve dependencies immediately
1723
+
1724
+ **If you need external data:** Use \`swarm_spawn_researcher\` with a clear research task. The researcher will fetch, summarize, and return findings.
1725
+
1726
+ **3-strike rule enforced:** Workers get 3 review attempts. After 3 rejections, escalate to human.
1727
+
1728
+ Keep the prompt concise but actionable. Use actual data from the snapshot, not placeholders. Include the ASCII header and ALL coordinator mandates.`;
1729
+
1730
+ logCompaction("debug", "generate_compaction_prompt_calling_llm", {
1731
+ session_id: snapshot.sessionID,
1732
+ prompt_length: promptText.length,
1733
+ model: liteModel,
1734
+ command: `opencode run -m ${liteModel} -- <prompt>`,
1735
+ });
1736
+
1737
+ const llmStart = Date.now();
1738
+ const result = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
1739
+ (resolve, reject) => {
1740
+ const proc = spawn("opencode", ["run", "-m", liteModel, "--", promptText], {
1741
+ cwd: projectDirectory,
1742
+ stdio: ["ignore", "pipe", "pipe"],
1743
+ timeout: 30000, // 30 second timeout
1744
+ });
1745
+
1746
+ let stdout = "";
1747
+ let stderr = "";
1748
+
1749
+ proc.stdout.on("data", (d) => {
1750
+ stdout += d;
1751
+ });
1752
+ proc.stderr.on("data", (d) => {
1753
+ stderr += d;
1754
+ });
1755
+
1756
+ proc.on("close", (exitCode) => {
1757
+ resolve({ exitCode: exitCode ?? 1, stdout, stderr });
1758
+ });
1759
+
1760
+ proc.on("error", (err) => {
1761
+ reject(err);
1762
+ });
1763
+
1764
+ // Timeout handling
1765
+ setTimeout(() => {
1766
+ proc.kill("SIGTERM");
1767
+ reject(new Error("LLM compaction timeout (30s)"));
1768
+ }, 30000);
1769
+ },
1770
+ );
1771
+ const llmDuration = Date.now() - llmStart;
1772
+
1773
+ logCompaction("debug", "generate_compaction_prompt_llm_complete", {
1774
+ session_id: snapshot.sessionID,
1775
+ duration_ms: llmDuration,
1776
+ exit_code: result.exitCode,
1777
+ stdout_length: result.stdout.length,
1778
+ stderr_length: result.stderr.length,
1779
+ stderr_preview: result.stderr.substring(0, 500),
1780
+ stdout_preview: result.stdout.substring(0, 500),
1781
+ });
1782
+
1783
+ if (result.exitCode !== 0) {
1784
+ logCompaction("error", "generate_compaction_prompt_llm_failed", {
1785
+ session_id: snapshot.sessionID,
1786
+ exit_code: result.exitCode,
1787
+ stderr: result.stderr,
1788
+ stdout: result.stdout,
1789
+ duration_ms: llmDuration,
1790
+ });
1791
+ return null;
1792
+ }
1793
+
1794
+ // Extract the prompt from stdout (LLM may wrap in markdown)
1795
+ const prompt = result.stdout.trim();
1796
+
1797
+ const totalDuration = Date.now() - startTime;
1798
+ logCompaction("debug", "generate_compaction_prompt_success", {
1799
+ session_id: snapshot.sessionID,
1800
+ total_duration_ms: totalDuration,
1801
+ llm_duration_ms: llmDuration,
1802
+ prompt_length: prompt.length,
1803
+ prompt_preview: prompt.substring(0, 500),
1804
+ prompt_has_content: prompt.length > 0,
1805
+ });
1806
+
1807
+ return prompt.length > 0 ? prompt : null;
1808
+ } catch (err) {
1809
+ const totalDuration = Date.now() - startTime;
1810
+ logCompaction("error", "generate_compaction_prompt_exception", {
1811
+ session_id: snapshot.sessionID,
1812
+ error: err instanceof Error ? err.message : String(err),
1813
+ stack: err instanceof Error ? err.stack : undefined,
1814
+ duration_ms: totalDuration,
1815
+ });
1816
+ return null;
1817
+ }
1818
+ }
1819
+
1820
+ /**
1821
+ * Session message scan result
1822
+ */
1823
+ interface SessionScanResult {
1824
+ messageCount: number;
1825
+ toolCalls: Array<{
1826
+ toolName: string;
1827
+ args: Record<string, unknown>;
1828
+ output?: string;
1829
+ timestamp?: number;
1830
+ }>;
1831
+ swarmDetected: boolean;
1832
+ reasons: string[];
1833
+ /** Projected swarm state from event fold - ground truth from session events */
1834
+ projection?: SwarmProjection;
1835
+ }
1836
+
1837
+ /**
1838
+ * Scan session messages for swarm tool calls
1839
+ *
1840
+ * Uses SDK client to fetch messages and look for swarm activity.
1841
+ * This can detect swarm work even if no cells exist yet.
1842
+ */
1843
+ async function scanSessionMessages(sessionID: string): Promise<SessionScanResult> {
1844
+ const startTime = Date.now();
1845
+ const result: SessionScanResult = {
1846
+ messageCount: 0,
1847
+ toolCalls: [],
1848
+ swarmDetected: false,
1849
+ reasons: [],
1850
+ };
1851
+
1852
+ logCompaction("debug", "session_scan_start", {
1853
+ session_id: sessionID,
1854
+ has_sdk_client: !!sdkClient,
1855
+ });
1856
+
1857
+ if (!sdkClient) {
1858
+ logCompaction("warn", "session_scan_no_sdk_client", {
1859
+ session_id: sessionID,
1860
+ });
1861
+ return result;
1862
+ }
1863
+
1864
+ try {
1865
+ // Fetch session messages
1866
+ const messagesStart = Date.now();
1867
+ const rawResponse = await sdkClient.session.messages({ path: { id: sessionID } });
1868
+ const messagesDuration = Date.now() - messagesStart;
1869
+
1870
+ // Log the RAW response to understand its shape
1871
+ logCompaction("debug", "session_scan_raw_response", {
1872
+ session_id: sessionID,
1873
+ response_type: typeof rawResponse,
1874
+ is_array: Array.isArray(rawResponse),
1875
+ is_null: rawResponse === null,
1876
+ is_undefined: rawResponse === undefined,
1877
+ keys: rawResponse && typeof rawResponse === 'object' ? Object.keys(rawResponse) : [],
1878
+ raw_preview: JSON.stringify(rawResponse)?.slice(0, 500),
1879
+ });
1880
+
1881
+ // The response might be wrapped - check common patterns
1882
+ const messages = Array.isArray(rawResponse)
1883
+ ? rawResponse
1884
+ : rawResponse?.data
1885
+ ? rawResponse.data
1886
+ : rawResponse?.messages
1887
+ ? rawResponse.messages
1888
+ : rawResponse?.items
1889
+ ? rawResponse.items
1890
+ : [];
1891
+
1892
+ result.messageCount = messages?.length ?? 0;
1893
+
1894
+ logCompaction("debug", "session_scan_messages_fetched", {
1895
+ session_id: sessionID,
1896
+ duration_ms: messagesDuration,
1897
+ message_count: result.messageCount,
1898
+ extraction_method: Array.isArray(rawResponse) ? 'direct_array' : rawResponse?.data ? 'data_field' : rawResponse?.messages ? 'messages_field' : rawResponse?.items ? 'items_field' : 'fallback_empty',
1899
+ });
1900
+
1901
+ if (!Array.isArray(messages) || messages.length === 0) {
1902
+ logCompaction("debug", "session_scan_no_messages", {
1903
+ session_id: sessionID,
1904
+ });
1905
+ return result;
1906
+ }
1907
+
1908
+ // Swarm-related tool patterns
1909
+ const swarmTools = [
1910
+ // High confidence - active swarm coordination
1911
+ "hive_create_epic",
1912
+ "swarm_decompose",
1913
+ "swarm_spawn_subtask",
1914
+ "swarm_complete",
1915
+ "swarmmail_init",
1916
+ "swarmmail_reserve",
1917
+ // Medium confidence - swarm activity
1918
+ "hive_start",
1919
+ "hive_close",
1920
+ "swarm_status",
1921
+ "swarm_progress",
1922
+ "swarmmail_send",
1923
+ // Low confidence - possible swarm
1924
+ "hive_create",
1925
+ "hive_query",
1926
+ ];
1927
+
1928
+ const highConfidenceTools = new Set([
1929
+ "hive_create_epic",
1930
+ "swarm_decompose",
1931
+ "swarm_spawn_subtask",
1932
+ "swarmmail_init",
1933
+ "swarmmail_reserve",
1934
+ ]);
1935
+
1936
+ // Scan messages for tool calls
1937
+ let swarmToolCount = 0;
1938
+ let highConfidenceCount = 0;
1939
+
1940
+ // Debug: collect part types to understand message structure
1941
+ const partTypeCounts: Record<string, number> = {};
1942
+ let messagesWithParts = 0;
1943
+ let messagesWithoutParts = 0;
1944
+ let samplePartTypes: string[] = [];
1945
+
1946
+ for (const message of messages) {
1947
+ if (!message.parts || !Array.isArray(message.parts)) {
1948
+ messagesWithoutParts++;
1949
+ continue;
1950
+ }
1951
+ messagesWithParts++;
1952
+
1953
+ for (const part of message.parts) {
1954
+ const partType = part.type || "unknown";
1955
+ partTypeCounts[partType] = (partTypeCounts[partType] || 0) + 1;
1956
+
1957
+ // Collect first 10 unique part types for debugging
1958
+ if (samplePartTypes.length < 10 && !samplePartTypes.includes(partType)) {
1959
+ samplePartTypes.push(partType);
1960
+ }
1961
+
1962
+ // Check if this is a tool call part
1963
+ // OpenCode SDK: ToolPart has type="tool", tool=<string name>, state={...}
1964
+ if (part.type === "tool") {
1965
+ const toolPart = part as ToolPart;
1966
+ const toolName = toolPart.tool; // tool name is a string directly
1967
+
1968
+ if (toolName && swarmTools.includes(toolName)) {
1969
+ swarmToolCount++;
1970
+
1971
+ if (highConfidenceTools.has(toolName)) {
1972
+ highConfidenceCount++;
1973
+ }
1974
+
1975
+ // Extract args/output/timestamp from state if available
1976
+ const state = toolPart.state;
1977
+ const args = state && "input" in state ? state.input : {};
1978
+ const output = state && "output" in state ? state.output : undefined;
1979
+ const timestamp = state && "time" in state && state.time && typeof state.time === "object" && "end" in state.time
1980
+ ? (state.time as { end: number }).end
1981
+ : Date.now();
1982
+
1983
+ result.toolCalls.push({
1984
+ toolName,
1985
+ args,
1986
+ output,
1987
+ timestamp,
1988
+ });
1989
+
1990
+ logCompaction("debug", "session_scan_tool_found", {
1991
+ session_id: sessionID,
1992
+ tool_name: toolName,
1993
+ is_high_confidence: highConfidenceTools.has(toolName),
1994
+ });
1995
+ }
1996
+ }
1997
+ }
1998
+ }
1999
+
2000
+ // =======================================================================
2001
+ // PROJECT SWARM STATE FROM EVENTS (deterministic, no heuristics)
2002
+ // =======================================================================
2003
+ // Convert tool calls to ToolCallEvent format for projection
2004
+ const events: ToolCallEvent[] = result.toolCalls.map(tc => ({
2005
+ tool: tc.toolName,
2006
+ input: tc.args as Record<string, unknown>,
2007
+ output: tc.output || "{}",
2008
+ timestamp: tc.timestamp || Date.now(),
2009
+ }));
2010
+
2011
+ // Project swarm state from events - this is the ground truth
2012
+ const projection = projectSwarmState(events);
2013
+ result.projection = projection;
2014
+
2015
+ // Use projection for swarm detection (deterministic)
2016
+ if (projection.isSwarm) {
2017
+ result.swarmDetected = true;
2018
+ result.reasons.push(`Swarm signature detected: epic ${projection.epic?.id || "unknown"} with ${projection.counts.total} subtasks`);
2019
+
2020
+ if (isSwarmActive(projection)) {
2021
+ result.reasons.push(`Swarm ACTIVE: ${projection.counts.spawned} spawned, ${projection.counts.inProgress} in_progress, ${projection.counts.completed} completed (not closed)`);
2022
+ } else {
2023
+ result.reasons.push(`Swarm COMPLETE: all ${projection.counts.closed} subtasks closed`);
2024
+ }
2025
+ } else if (highConfidenceCount > 0) {
2026
+ // Fallback to heuristic detection if no signature but high-confidence tools found
2027
+ result.swarmDetected = true;
2028
+ result.reasons.push(`${highConfidenceCount} high-confidence swarm tools (${Array.from(new Set(result.toolCalls.filter(tc => highConfidenceTools.has(tc.toolName)).map(tc => tc.toolName))).join(", ")})`);
2029
+ } else if (swarmToolCount > 0) {
2030
+ result.swarmDetected = true;
2031
+ result.reasons.push(`${swarmToolCount} swarm-related tools used`);
2032
+ }
2033
+
2034
+ const totalDuration = Date.now() - startTime;
2035
+
2036
+ // Debug: log part type distribution to understand message structure
2037
+ logCompaction("debug", "session_scan_part_types", {
2038
+ session_id: sessionID,
2039
+ messages_with_parts: messagesWithParts,
2040
+ messages_without_parts: messagesWithoutParts,
2041
+ part_type_counts: partTypeCounts,
2042
+ sample_part_types: samplePartTypes,
2043
+ });
2044
+
2045
+ logCompaction("info", "session_scan_complete", {
2046
+ session_id: sessionID,
2047
+ duration_ms: totalDuration,
2048
+ message_count: result.messageCount,
2049
+ tool_call_count: result.toolCalls.length,
2050
+ swarm_tool_count: swarmToolCount,
2051
+ high_confidence_count: highConfidenceCount,
2052
+ swarm_detected: result.swarmDetected,
2053
+ reasons: result.reasons,
2054
+ unique_tools: Array.from(new Set(result.toolCalls.map(tc => tc.toolName))),
2055
+ // Add projection summary
2056
+ projection_summary: projection.isSwarm ? {
2057
+ epic_id: projection.epic?.id,
2058
+ epic_title: projection.epic?.title,
2059
+ epic_status: projection.epic?.status,
2060
+ is_active: isSwarmActive(projection),
2061
+ counts: projection.counts,
2062
+ } : null,
2063
+ });
2064
+
2065
+ return result;
2066
+ } catch (err) {
2067
+ const totalDuration = Date.now() - startTime;
2068
+ logCompaction("error", "session_scan_exception", {
2069
+ session_id: sessionID,
2070
+ error: err instanceof Error ? err.message : String(err),
2071
+ stack: err instanceof Error ? err.stack : undefined,
2072
+ duration_ms: totalDuration,
2073
+ });
2074
+ return result;
2075
+ }
2076
+ }
2077
+
2078
+ /**
2079
+ * Check for swarm sign - evidence a swarm passed through
2080
+ *
2081
+ * Uses multiple signals with different confidence levels:
2082
+ * - HIGH: in_progress cells (active work)
2083
+ * - MEDIUM: Open subtasks, unclosed epics, recently updated cells
2084
+ * - LOW: Any cells exist
2085
+ *
2086
+ * Philosophy: Err on the side of continuation.
2087
+ * False positive = extra context (low cost)
2088
+ * False negative = lost swarm (high cost)
2089
+ */
2090
+ async function detectSwarm(): Promise<SwarmDetection> {
2091
+ const startTime = Date.now();
2092
+ const reasons: string[] = [];
2093
+ let highConfidence = false;
2094
+ let mediumConfidence = false;
2095
+ let lowConfidence = false;
2096
+
2097
+ logCompaction("debug", "detect_swarm_start", {
2098
+ project_directory: projectDirectory,
2099
+ cwd: process.cwd(),
2100
+ });
2101
+
2102
+ try {
2103
+ const cliStart = Date.now();
2104
+ const result = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
2105
+ (resolve) => {
2106
+ // Use swarm tool to query beads
2107
+ const proc = spawn(SWARM_CLI, ["tool", "hive_query"], {
2108
+ cwd: projectDirectory,
2109
+ stdio: ["ignore", "pipe", "pipe"],
2110
+ });
2111
+ let stdout = "";
2112
+ let stderr = "";
2113
+ proc.stdout.on("data", (d) => {
2114
+ stdout += d;
2115
+ });
2116
+ proc.stderr.on("data", (d) => {
2117
+ stderr += d;
2118
+ });
2119
+ proc.on("close", (exitCode) =>
2120
+ resolve({ exitCode: exitCode ?? 1, stdout, stderr }),
2121
+ );
2122
+ },
2123
+ );
2124
+ const cliDuration = Date.now() - cliStart;
2125
+
2126
+ logCompaction("debug", "detect_swarm_cli_complete", {
2127
+ duration_ms: cliDuration,
2128
+ exit_code: result.exitCode,
2129
+ stdout_length: result.stdout.length,
2130
+ stderr_length: result.stderr.length,
2131
+ stderr_preview: result.stderr.substring(0, 200),
2132
+ });
2133
+
2134
+ if (result.exitCode !== 0) {
2135
+ logCompaction("warn", "detect_swarm_cli_failed", {
2136
+ exit_code: result.exitCode,
2137
+ stderr: result.stderr,
2138
+ });
2139
+ return { detected: false, confidence: "none", reasons: ["hive_query failed"] };
2140
+ }
2141
+
2142
+ let cells: any[];
2143
+ try {
2144
+ cells = JSON.parse(result.stdout);
2145
+ } catch (parseErr) {
2146
+ logCompaction("error", "detect_swarm_parse_failed", {
2147
+ error: parseErr instanceof Error ? parseErr.message : String(parseErr),
2148
+ stdout_preview: result.stdout.substring(0, 500),
2149
+ });
2150
+ return { detected: false, confidence: "none", reasons: ["hive_query parse failed"] };
2151
+ }
2152
+
2153
+ if (!Array.isArray(cells) || cells.length === 0) {
2154
+ logCompaction("debug", "detect_swarm_no_cells", {
2155
+ is_array: Array.isArray(cells),
2156
+ length: cells?.length ?? 0,
2157
+ });
2158
+ return { detected: false, confidence: "none", reasons: ["no cells found"] };
2159
+ }
2160
+
2161
+ // Log ALL cells for debugging
2162
+ logCompaction("debug", "detect_swarm_cells_found", {
2163
+ total_cells: cells.length,
2164
+ cells: cells.map((c: any) => ({
2165
+ id: c.id,
2166
+ title: c.title,
2167
+ type: c.type,
2168
+ status: c.status,
2169
+ parent_id: c.parent_id,
2170
+ updated_at: c.updated_at,
2171
+ created_at: c.created_at,
2172
+ })),
2173
+ });
2174
+
2175
+ // HIGH: Any in_progress cells
2176
+ const inProgress = cells.filter(
2177
+ (c: { status: string }) => c.status === "in_progress"
2178
+ );
2179
+ if (inProgress.length > 0) {
2180
+ highConfidence = true;
2181
+ reasons.push(`${inProgress.length} cells in_progress`);
2182
+ logCompaction("debug", "detect_swarm_in_progress", {
2183
+ count: inProgress.length,
2184
+ cells: inProgress.map((c: any) => ({ id: c.id, title: c.title })),
2185
+ });
2186
+ }
2187
+
2188
+ // MEDIUM: Open subtasks (cells with parent_id)
2189
+ const subtasks = cells.filter(
2190
+ (c: { status: string; parent_id?: string }) =>
2191
+ c.status === "open" && c.parent_id
2192
+ );
2193
+ if (subtasks.length > 0) {
2194
+ mediumConfidence = true;
2195
+ reasons.push(`${subtasks.length} open subtasks`);
2196
+ logCompaction("debug", "detect_swarm_open_subtasks", {
2197
+ count: subtasks.length,
2198
+ cells: subtasks.map((c: any) => ({ id: c.id, title: c.title, parent_id: c.parent_id })),
2199
+ });
2200
+ }
2201
+
2202
+ // MEDIUM: Unclosed epics
2203
+ const openEpics = cells.filter(
2204
+ (c: { status: string; type?: string }) =>
2205
+ c.type === "epic" && c.status !== "closed"
2206
+ );
2207
+ if (openEpics.length > 0) {
2208
+ mediumConfidence = true;
2209
+ reasons.push(`${openEpics.length} unclosed epics`);
2210
+ logCompaction("debug", "detect_swarm_open_epics", {
2211
+ count: openEpics.length,
2212
+ cells: openEpics.map((c: any) => ({ id: c.id, title: c.title, status: c.status })),
2213
+ });
2214
+ }
2215
+
2216
+ // MEDIUM: Recently updated cells (last hour)
2217
+ const oneHourAgo = Date.now() - 60 * 60 * 1000;
2218
+ const recentCells = cells.filter(
2219
+ (c: { updated_at?: number }) => c.updated_at && c.updated_at > oneHourAgo
2220
+ );
2221
+ if (recentCells.length > 0) {
2222
+ mediumConfidence = true;
2223
+ reasons.push(`${recentCells.length} cells updated in last hour`);
2224
+ logCompaction("debug", "detect_swarm_recent_cells", {
2225
+ count: recentCells.length,
2226
+ one_hour_ago: oneHourAgo,
2227
+ cells: recentCells.map((c: any) => ({
2228
+ id: c.id,
2229
+ title: c.title,
2230
+ updated_at: c.updated_at,
2231
+ age_minutes: Math.round((Date.now() - c.updated_at) / 60000),
2232
+ })),
2233
+ });
2234
+ }
2235
+
2236
+ // LOW: Any cells exist at all
2237
+ if (cells.length > 0) {
2238
+ lowConfidence = true;
2239
+ reasons.push(`${cells.length} total cells in hive`);
2240
+ }
2241
+ } catch (err) {
2242
+ // Detection failed, use fallback
2243
+ lowConfidence = true;
2244
+ reasons.push("Detection error, using fallback");
2245
+ logCompaction("error", "detect_swarm_exception", {
2246
+ error: err instanceof Error ? err.message : String(err),
2247
+ stack: err instanceof Error ? err.stack : undefined,
2248
+ });
2249
+ }
2250
+
2251
+ // Determine overall confidence
2252
+ let confidence: "high" | "medium" | "low" | "none";
2253
+ if (highConfidence) {
2254
+ confidence = "high";
2255
+ } else if (mediumConfidence) {
2256
+ confidence = "medium";
2257
+ } else if (lowConfidence) {
2258
+ confidence = "low";
2259
+ } else {
2260
+ confidence = "none";
2261
+ }
2262
+
2263
+ const totalDuration = Date.now() - startTime;
2264
+ logCompaction("debug", "detect_swarm_complete", {
2265
+ duration_ms: totalDuration,
2266
+ confidence,
2267
+ detected: confidence !== "none",
2268
+ reason_count: reasons.length,
2269
+ reasons,
2270
+ high_confidence: highConfidence,
2271
+ medium_confidence: mediumConfidence,
2272
+ low_confidence: lowConfidence,
2273
+ });
2274
+
2275
+ return {
2276
+ detected: confidence !== "none",
2277
+ confidence,
2278
+ reasons,
2279
+ };
2280
+ }
2281
+
2282
+ /**
2283
+ * Swarm-aware compaction context
2284
+ *
2285
+ * Injected during compaction to keep the swarm cooking. The coordinator should
2286
+ * wake up from compaction and immediately resume orchestration - spawning agents,
2287
+ * monitoring progress, unblocking work.
2288
+ */
2289
+ const SWARM_COMPACTION_CONTEXT = `## 🐝 SWARM ACTIVE - Keep Cooking
2290
+
2291
+ You are the **COORDINATOR** of an active swarm. Context was compacted but the swarm is still running.
2292
+
2293
+ **YOUR JOB:** Keep orchestrating. Spawn agents. Monitor progress. Unblock work. Ship it.
2294
+
2295
+ ### Preserve in Summary
2296
+
2297
+ Extract from session context:
2298
+
2299
+ 1. **Epic & Subtasks** - IDs, titles, status, file assignments
2300
+ 2. **What's Running** - Which agents are active, what they're working on
2301
+ 3. **What's Blocked** - Blockers and what's needed to unblock
2302
+ 4. **What's Done** - Completed work and any follow-ups needed
2303
+ 5. **What's Next** - Pending subtasks ready to spawn
2304
+
2305
+ ### Summary Format
2306
+
2307
+ \`\`\`
2308
+ ## 🐝 Swarm State
2309
+
2310
+ **Epic:** <bd-xxx> - <title>
2311
+ **Project:** <path>
2312
+ **Progress:** X/Y subtasks complete
2313
+
2314
+ **Active:**
2315
+ - <bd-xxx>: <title> [in_progress] → <agent> working on <files>
2316
+
2317
+ **Blocked:**
2318
+ - <bd-xxx>: <title> - BLOCKED: <reason>
2319
+
2320
+ **Completed:**
2321
+ - <bd-xxx>: <title> ✓
2322
+
2323
+ **Ready to Spawn:**
2324
+ - <bd-xxx>: <title> (files: <...>)
2325
+ \`\`\`
2326
+
2327
+ ### On Resume - IMMEDIATELY
2328
+
2329
+ 1. \`swarm_status(epic_id="<epic>", project_key="<path>")\` - Get current state
2330
+ 2. \`swarmmail_inbox(limit=5)\` - Check for agent messages
2331
+ 3. \`swarm_review(project_key, epic_id, task_id, files_touched)\` - Review any completed work
2332
+ 4. \`swarm_review_feedback(project_key, task_id, worker_id, status, issues)\` - Approve or request changes
2333
+ 5. **Spawn ready subtasks** - Don't wait, fire them off
2334
+ 6. **Unblock blocked work** - Resolve dependencies, reassign if needed
2335
+ 7. **Collect completed work** - Close done subtasks, verify quality
2336
+
2337
+ ### Keep the Swarm Cooking
2338
+
2339
+ - **Spawn aggressively** - If a subtask is ready and unblocked, spawn an agent
2340
+ - **Monitor actively** - Check status, read messages, respond to blockers
2341
+ - **Close the loop** - When all subtasks done, verify and close the epic
2342
+ - **Don't stop** - The swarm runs until the epic is closed
2343
+
2344
+ **You are not waiting for instructions. You are the coordinator. Coordinate.**
2345
+ `;
2346
+
2347
+ /**
2348
+ * Fallback detection prompt - tells the compactor what to look for
2349
+ *
2350
+ * Used when we can't definitively detect a swarm but want to be safe.
2351
+ * The compactor can check the conversation context for these patterns.
2352
+ */
2353
+ const SWARM_DETECTION_FALLBACK = `## 🐝 Swarm Detection - Check Your Context
2354
+
2355
+ **IMPORTANT:** Before summarizing, check if this session involves an active swarm.
2356
+
2357
+ Look for ANY of these patterns in the conversation:
2358
+
2359
+ ### Tool Calls (definite swarm sign)
2360
+ - \`swarm_decompose\`, \`swarm_spawn_subtask\`, \`swarm_status\`, \`swarm_complete\`
2361
+ - \`swarmmail_init\`, \`swarmmail_reserve\`, \`swarmmail_send\`
2362
+ - \`hive_create_epic\`, \`hive_start\`, \`hive_close\`
2363
+
2364
+ ### IDs and Names
2365
+ - Cell IDs: \`bd-xxx\`, \`bd-xxx.N\` (subtask format)
2366
+ - Agent names: BlueLake, RedMountain, GreenValley, etc.
2367
+ - Epic references: "epic", "subtask", "parent"
2368
+
2369
+ ### Coordination Language
2370
+ - "spawn", "worker", "coordinator"
2371
+ - "reserve", "reservation", "files"
2372
+ - "blocked", "unblock", "dependency"
2373
+ - "progress", "complete", "in_progress"
2374
+
2375
+ ### If You Find Swarm Evidence
2376
+
2377
+ Include this in your summary:
2378
+ 1. Epic ID and title
2379
+ 2. Project path
2380
+ 3. Subtask status (running/blocked/done/pending)
2381
+ 4. Any blockers or issues
2382
+ 5. What should happen next
2383
+
2384
+ **Then tell the resumed session:**
2385
+ "This is an active swarm. Check swarm_status and swarmmail_inbox immediately."
2386
+ `;
2387
+
2388
+ // Extended hooks type to include experimental compaction hook with new prompt API
2389
+ type CompactionOutput = {
2390
+ context: string[];
2391
+ prompt?: string; // NEW API from OpenCode PR #5907
2392
+ };
2393
+
2394
+ type ExtendedHooks = Hooks & {
2395
+ "experimental.session.compacting"?: (
2396
+ input: { sessionID: string },
2397
+ output: CompactionOutput,
2398
+ ) => Promise<void>;
2399
+ };
2400
+
2401
+ // NOTE: Only default export - named exports cause double registration!
2402
+ // OpenCode's plugin loader calls ALL exports as functions.
2403
+ const SwarmPlugin: Plugin = async (
2404
+ input: PluginInput,
2405
+ ): Promise<ExtendedHooks> => {
2406
+ // CRITICAL: Set project directory from OpenCode input
2407
+ // Without this, CLI uses wrong database path
2408
+ projectDirectory = input.directory;
2409
+
2410
+ // Store SDK client for session message scanning during compaction
2411
+ sdkClient = input.client;
2412
+
2413
+ return {
2414
+ tool: {
2415
+ // Beads
2416
+ hive_create,
2417
+ hive_create_epic,
2418
+ hive_query,
2419
+ hive_update,
2420
+ hive_close,
2421
+ hive_start,
2422
+ hive_ready,
2423
+ hive_cells,
2424
+ hive_sync,
2425
+ beads_link_thread,
2426
+ // Swarm Mail (Embedded)
2427
+ swarmmail_init,
2428
+ swarmmail_send,
2429
+ swarmmail_inbox,
2430
+ swarmmail_read_message,
2431
+ swarmmail_reserve,
2432
+ swarmmail_release,
2433
+ swarmmail_ack,
2434
+ swarmmail_health,
2435
+ // Structured
2436
+ structured_extract_json,
2437
+ structured_validate,
2438
+ structured_parse_evaluation,
2439
+ structured_parse_decomposition,
2440
+ structured_parse_cell_tree,
2441
+ // Swarm
2442
+ swarm_init,
2443
+ swarm_select_strategy,
2444
+ swarm_plan_prompt,
2445
+ swarm_decompose,
2446
+ swarm_validate_decomposition,
2447
+ swarm_status,
2448
+ swarm_progress,
2449
+ swarm_complete,
2450
+ swarm_record_outcome,
2451
+ swarm_subtask_prompt,
2452
+ swarm_spawn_subtask,
2453
+ swarm_complete_subtask,
2454
+ swarm_evaluation_prompt,
2455
+ swarm_broadcast,
2456
+ // Worktree Isolation
2457
+ swarm_worktree_create,
2458
+ swarm_worktree_merge,
2459
+ swarm_worktree_cleanup,
2460
+ swarm_worktree_list,
2461
+ // Structured Review
2462
+ swarm_review,
2463
+ swarm_review_feedback,
2464
+ // Skills
2465
+ skills_list,
2466
+ skills_read,
2467
+ skills_use,
2468
+ skills_create,
2469
+ skills_update,
2470
+ skills_delete,
2471
+ skills_init,
2472
+ skills_add_script,
2473
+ skills_execute,
2474
+ // Swarm Insights
2475
+ swarm_get_strategy_insights,
2476
+ swarm_get_file_insights,
2477
+ swarm_get_pattern_insights,
2478
+ // CASS (Cross-Agent Session Search)
2479
+ cass_search,
2480
+ cass_view,
2481
+ cass_expand,
2482
+ cass_health,
2483
+ cass_index,
2484
+ cass_stats,
2485
+ },
2486
+
2487
+ // Swarm-aware compaction hook with LLM-powered continuation prompts
2488
+ // Three-level fallback chain: LLM → static context → detection fallback → none
2489
+ "experimental.session.compacting": async (
2490
+ input: { sessionID: string },
2491
+ output: CompactionOutput,
2492
+ ) => {
2493
+ const startTime = Date.now();
2494
+
2495
+ // =======================================================================
2496
+ // LOG: Compaction hook invoked - capture EVERYTHING we receive
2497
+ // =======================================================================
2498
+ logCompaction("info", "compaction_hook_invoked", {
2499
+ session_id: input.sessionID,
2500
+ project_directory: projectDirectory,
2501
+ input_keys: Object.keys(input),
2502
+ input_full: JSON.parse(JSON.stringify(input)), // Deep clone for logging
2503
+ output_keys: Object.keys(output),
2504
+ output_context_count: output.context?.length ?? 0,
2505
+ output_has_prompt_field: "prompt" in output,
2506
+ output_initial_state: {
2507
+ context: output.context,
2508
+ prompt: (output as any).prompt,
2509
+ },
2510
+ env: {
2511
+ OPENCODE_SESSION_ID: process.env.OPENCODE_SESSION_ID,
2512
+ OPENCODE_MESSAGE_ID: process.env.OPENCODE_MESSAGE_ID,
2513
+ OPENCODE_AGENT: process.env.OPENCODE_AGENT,
2514
+ OPENCODE_LITE_MODEL: process.env.OPENCODE_LITE_MODEL,
2515
+ SWARM_PROJECT_DIR: process.env.SWARM_PROJECT_DIR,
2516
+ },
2517
+ cwd: process.cwd(),
2518
+ timestamp: new Date().toISOString(),
2519
+ });
2520
+
2521
+ // =======================================================================
2522
+ // STEP 1: Scan session messages for swarm tool calls
2523
+ // =======================================================================
2524
+ const sessionScanStart = Date.now();
2525
+ const sessionScan = await scanSessionMessages(input.sessionID);
2526
+ const sessionScanDuration = Date.now() - sessionScanStart;
2527
+
2528
+ logCompaction("info", "session_scan_results", {
2529
+ session_id: input.sessionID,
2530
+ duration_ms: sessionScanDuration,
2531
+ message_count: sessionScan.messageCount,
2532
+ tool_call_count: sessionScan.toolCalls.length,
2533
+ swarm_detected_from_messages: sessionScan.swarmDetected,
2534
+ reasons: sessionScan.reasons,
2535
+ });
2536
+
2537
+ // =======================================================================
2538
+ // STEP 2: Detect swarm state from hive cells
2539
+ // =======================================================================
2540
+ const detectionStart = Date.now();
2541
+ const detection = await detectSwarm();
2542
+ const detectionDuration = Date.now() - detectionStart;
2543
+
2544
+ logCompaction("info", "swarm_detection_complete", {
2545
+ session_id: input.sessionID,
2546
+ duration_ms: detectionDuration,
2547
+ detected: detection.detected,
2548
+ confidence: detection.confidence,
2549
+ reasons: detection.reasons,
2550
+ reason_count: detection.reasons.length,
2551
+ });
2552
+
2553
+ // =======================================================================
2554
+ // STEP 3: Merge session scan with hive detection for final confidence
2555
+ // =======================================================================
2556
+ // If session messages show high-confidence swarm tools, boost confidence
2557
+ if (sessionScan.swarmDetected && sessionScan.reasons.some(r => r.includes("high-confidence"))) {
2558
+ if (detection.confidence === "none" || detection.confidence === "low") {
2559
+ detection.confidence = "high";
2560
+ detection.detected = true;
2561
+ detection.reasons.push(...sessionScan.reasons);
2562
+
2563
+ logCompaction("info", "confidence_boost_from_session_scan", {
2564
+ session_id: input.sessionID,
2565
+ original_confidence: detection.confidence,
2566
+ boosted_to: "high",
2567
+ session_reasons: sessionScan.reasons,
2568
+ });
2569
+ }
2570
+ } else if (sessionScan.swarmDetected) {
2571
+ // Medium boost for any swarm tools found
2572
+ if (detection.confidence === "none") {
2573
+ detection.confidence = "medium";
2574
+ detection.detected = true;
2575
+ detection.reasons.push(...sessionScan.reasons);
2576
+
2577
+ logCompaction("info", "confidence_boost_from_session_scan", {
2578
+ session_id: input.sessionID,
2579
+ original_confidence: "none",
2580
+ boosted_to: "medium",
2581
+ session_reasons: sessionScan.reasons,
2582
+ });
2583
+ } else if (detection.confidence === "low") {
2584
+ detection.confidence = "medium";
2585
+ detection.reasons.push(...sessionScan.reasons);
2586
+
2587
+ logCompaction("info", "confidence_boost_from_session_scan", {
2588
+ session_id: input.sessionID,
2589
+ original_confidence: "low",
2590
+ boosted_to: "medium",
2591
+ session_reasons: sessionScan.reasons,
2592
+ });
2593
+ }
2594
+ }
2595
+
2596
+ logCompaction("info", "final_swarm_detection", {
2597
+ session_id: input.sessionID,
2598
+ confidence: detection.confidence,
2599
+ detected: detection.detected,
2600
+ combined_reasons: detection.reasons,
2601
+ message_scan_contributed: sessionScan.swarmDetected,
2602
+ });
2603
+
2604
+ if (detection.confidence === "high" || detection.confidence === "medium") {
2605
+ // Definite or probable swarm - try LLM-powered compaction
2606
+ logCompaction("info", "swarm_detected_attempting_llm", {
2607
+ session_id: input.sessionID,
2608
+ confidence: detection.confidence,
2609
+ reasons: detection.reasons,
2610
+ has_projection: !!sessionScan.projection?.isSwarm,
2611
+ });
2612
+
2613
+ try {
2614
+ // =======================================================================
2615
+ // PREFER PROJECTION (ground truth from events) OVER HIVE QUERY
2616
+ // =======================================================================
2617
+ // The projection is derived from session events - it's the source of truth.
2618
+ // Hive query may show all cells closed even if swarm was active.
2619
+
2620
+ let snapshot: SwarmStateSnapshot;
2621
+
2622
+ if (sessionScan.projection?.isSwarm) {
2623
+ // Use projection as primary source - convert to snapshot format
2624
+ const proj = sessionScan.projection;
2625
+ snapshot = {
2626
+ sessionID: input.sessionID,
2627
+ detection: {
2628
+ confidence: isSwarmActive(proj) ? "high" : "medium",
2629
+ reasons: sessionScan.reasons,
2630
+ },
2631
+ epic: proj.epic ? {
2632
+ id: proj.epic.id,
2633
+ title: proj.epic.title,
2634
+ status: proj.epic.status,
2635
+ subtasks: Array.from(proj.subtasks.values()).map(s => ({
2636
+ id: s.id,
2637
+ title: s.title,
2638
+ status: s.status as "open" | "in_progress" | "blocked" | "closed",
2639
+ files: s.files,
2640
+ })),
2641
+ } : undefined,
2642
+ messages: [],
2643
+ reservations: [],
2644
+ };
2645
+
2646
+ logCompaction("info", "using_projection_as_snapshot", {
2647
+ session_id: input.sessionID,
2648
+ epic_id: proj.epic?.id,
2649
+ epic_title: proj.epic?.title,
2650
+ subtask_count: proj.subtasks.size,
2651
+ is_active: isSwarmActive(proj),
2652
+ counts: proj.counts,
2653
+ });
2654
+ } else {
2655
+ // Fallback to hive query (may be stale)
2656
+ const queryStart = Date.now();
2657
+ snapshot = await querySwarmState(input.sessionID);
2658
+ const queryDuration = Date.now() - queryStart;
2659
+
2660
+ logCompaction("info", "fallback_to_hive_query", {
2661
+ session_id: input.sessionID,
2662
+ duration_ms: queryDuration,
2663
+ reason: "no projection available or not a swarm",
2664
+ });
2665
+ }
2666
+
2667
+ logCompaction("info", "swarm_state_resolved", {
2668
+ session_id: input.sessionID,
2669
+ source: sessionScan.projection?.isSwarm ? "projection" : "hive_query",
2670
+ has_epic: !!snapshot.epic,
2671
+ epic_id: snapshot.epic?.id,
2672
+ epic_title: snapshot.epic?.title,
2673
+ epic_status: snapshot.epic?.status,
2674
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
2675
+ subtasks: snapshot.epic?.subtasks?.map(s => ({
2676
+ id: s.id,
2677
+ title: s.title,
2678
+ status: s.status,
2679
+ file_count: s.files?.length ?? 0,
2680
+ })),
2681
+ message_count: snapshot.messages?.length ?? 0,
2682
+ reservation_count: snapshot.reservations?.length ?? 0,
2683
+ detection_confidence: snapshot.detection.confidence,
2684
+ detection_reasons: snapshot.detection.reasons,
2685
+ });
2686
+
2687
+ // =======================================================================
2688
+ // CAPTURE POINT 1: Detection complete - record confidence and reasons
2689
+ // =======================================================================
2690
+ await captureCompaction(
2691
+ input.sessionID,
2692
+ snapshot.epic?.id || "unknown",
2693
+ "detection_complete",
2694
+ {
2695
+ confidence: snapshot.detection.confidence,
2696
+ detected: detection.detected,
2697
+ reasons: snapshot.detection.reasons,
2698
+ session_scan_contributed: sessionScan.swarmDetected,
2699
+ session_scan_reasons: sessionScan.reasons,
2700
+ epic_id: snapshot.epic?.id,
2701
+ epic_title: snapshot.epic?.title,
2702
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
2703
+ },
2704
+ );
2705
+
2706
+ // Level 2: Generate prompt with LLM
2707
+ const llmStart = Date.now();
2708
+ const llmPrompt = await generateCompactionPrompt(snapshot);
2709
+ const llmDuration = Date.now() - llmStart;
2710
+
2711
+ logCompaction("info", "llm_generation_complete", {
2712
+ session_id: input.sessionID,
2713
+ duration_ms: llmDuration,
2714
+ success: !!llmPrompt,
2715
+ prompt_length: llmPrompt?.length ?? 0,
2716
+ prompt_preview: llmPrompt?.substring(0, 500),
2717
+ });
2718
+
2719
+ // =======================================================================
2720
+ // CAPTURE POINT 2: Prompt generated - record FULL prompt content
2721
+ // =======================================================================
2722
+ if (llmPrompt) {
2723
+ await captureCompaction(
2724
+ input.sessionID,
2725
+ snapshot.epic?.id || "unknown",
2726
+ "prompt_generated",
2727
+ {
2728
+ prompt_length: llmPrompt.length,
2729
+ full_prompt: llmPrompt, // FULL content, not truncated
2730
+ context_type: "llm_generated",
2731
+ duration_ms: llmDuration,
2732
+ },
2733
+ );
2734
+ }
2735
+
2736
+ if (llmPrompt) {
2737
+ // SUCCESS: Use LLM-generated prompt
2738
+ const header = `[Swarm compaction: LLM-generated, ${detection.reasons.join(", ")}]\n\n`;
2739
+ const fullContent = header + llmPrompt;
2740
+
2741
+ // Progressive enhancement: use new API if available
2742
+ if ("prompt" in output) {
2743
+ output.prompt = fullContent;
2744
+ logCompaction("info", "context_injected_via_prompt_api", {
2745
+ session_id: input.sessionID,
2746
+ content_length: fullContent.length,
2747
+ method: "output.prompt",
2748
+ });
2749
+ } else {
2750
+ output.context.push(fullContent);
2751
+ logCompaction("info", "context_injected_via_context_array", {
2752
+ session_id: input.sessionID,
2753
+ content_length: fullContent.length,
2754
+ method: "output.context.push",
2755
+ context_count_after: output.context.length,
2756
+ });
2757
+ }
2758
+
2759
+ // =======================================================================
2760
+ // CAPTURE POINT 3a: Context injected (LLM path) - record FULL content
2761
+ // =======================================================================
2762
+ await captureCompaction(
2763
+ input.sessionID,
2764
+ snapshot.epic?.id || "unknown",
2765
+ "context_injected",
2766
+ {
2767
+ full_content: fullContent, // FULL content, not truncated
2768
+ content_length: fullContent.length,
2769
+ injection_method: "prompt" in output ? "output.prompt" : "output.context.push",
2770
+ context_type: "llm_generated",
2771
+ },
2772
+ );
2773
+
2774
+ const totalDuration = Date.now() - startTime;
2775
+ logCompaction("info", "compaction_complete_llm_success", {
2776
+ session_id: input.sessionID,
2777
+ total_duration_ms: totalDuration,
2778
+ detection_duration_ms: detectionDuration,
2779
+ query_duration_ms: queryDuration,
2780
+ llm_duration_ms: llmDuration,
2781
+ confidence: detection.confidence,
2782
+ context_type: "llm_generated",
2783
+ content_length: fullContent.length,
2784
+ });
2785
+ return;
2786
+ }
2787
+
2788
+ // LLM failed, fall through to static prompt
2789
+ logCompaction("warn", "llm_generation_returned_null", {
2790
+ session_id: input.sessionID,
2791
+ llm_duration_ms: llmDuration,
2792
+ falling_back_to: "static_prompt",
2793
+ });
2794
+ } catch (err) {
2795
+ // LLM failed, fall through to static prompt
2796
+ logCompaction("error", "llm_generation_failed", {
2797
+ session_id: input.sessionID,
2798
+ error: err instanceof Error ? err.message : String(err),
2799
+ error_stack: err instanceof Error ? err.stack : undefined,
2800
+ falling_back_to: "static_prompt",
2801
+ });
2802
+ }
2803
+
2804
+ // Level 3: Fall back to static context
2805
+ const header = `[Swarm detected: ${detection.reasons.join(", ")}]\n\n`;
2806
+ const staticContent = header + SWARM_COMPACTION_CONTEXT;
2807
+ output.context.push(staticContent);
2808
+
2809
+ // =======================================================================
2810
+ // CAPTURE POINT 3b: Context injected (static fallback) - record FULL content
2811
+ // =======================================================================
2812
+ await captureCompaction(
2813
+ input.sessionID,
2814
+ "unknown", // No snapshot available in this path
2815
+ "context_injected",
2816
+ {
2817
+ full_content: staticContent,
2818
+ content_length: staticContent.length,
2819
+ injection_method: "output.context.push",
2820
+ context_type: "static_swarm_context",
2821
+ },
2822
+ );
2823
+
2824
+ const totalDuration = Date.now() - startTime;
2825
+ logCompaction("info", "compaction_complete_static_fallback", {
2826
+ session_id: input.sessionID,
2827
+ total_duration_ms: totalDuration,
2828
+ confidence: detection.confidence,
2829
+ context_type: "static_swarm_context",
2830
+ content_length: staticContent.length,
2831
+ context_count_after: output.context.length,
2832
+ });
2833
+ } else if (detection.confidence === "low") {
2834
+ // Level 4: Possible swarm - inject fallback detection prompt
2835
+ const header = `[Possible swarm: ${detection.reasons.join(", ")}]\n\n`;
2836
+ const fallbackContent = header + SWARM_DETECTION_FALLBACK;
2837
+ output.context.push(fallbackContent);
2838
+
2839
+ // =======================================================================
2840
+ // CAPTURE POINT 3c: Context injected (detection fallback) - record FULL content
2841
+ // =======================================================================
2842
+ await captureCompaction(
2843
+ input.sessionID,
2844
+ "unknown", // No snapshot for low confidence
2845
+ "context_injected",
2846
+ {
2847
+ full_content: fallbackContent,
2848
+ content_length: fallbackContent.length,
2849
+ injection_method: "output.context.push",
2850
+ context_type: "detection_fallback",
2851
+ },
2852
+ );
2853
+
2854
+ const totalDuration = Date.now() - startTime;
2855
+ logCompaction("info", "compaction_complete_detection_fallback", {
2856
+ session_id: input.sessionID,
2857
+ total_duration_ms: totalDuration,
2858
+ confidence: detection.confidence,
2859
+ context_type: "detection_fallback",
2860
+ content_length: fallbackContent.length,
2861
+ context_count_after: output.context.length,
2862
+ reasons: detection.reasons,
2863
+ });
2864
+ } else {
2865
+ // Level 5: confidence === "none" - no injection, probably not a swarm
2866
+ const totalDuration = Date.now() - startTime;
2867
+ logCompaction("info", "compaction_complete_no_swarm", {
2868
+ session_id: input.sessionID,
2869
+ total_duration_ms: totalDuration,
2870
+ confidence: detection.confidence,
2871
+ context_type: "none",
2872
+ reasons: detection.reasons,
2873
+ context_count_unchanged: output.context.length,
2874
+ });
2875
+ }
2876
+
2877
+ // =======================================================================
2878
+ // LOG: Final output state
2879
+ // =======================================================================
2880
+ logCompaction("debug", "compaction_hook_complete_final_state", {
2881
+ session_id: input.sessionID,
2882
+ output_context_count: output.context?.length ?? 0,
2883
+ output_context_lengths: output.context?.map(c => c.length) ?? [],
2884
+ output_has_prompt: !!(output as any).prompt,
2885
+ output_prompt_length: (output as any).prompt?.length ?? 0,
2886
+ total_duration_ms: Date.now() - startTime,
2887
+ });
2888
+ },
2889
+ };
2890
+ };
2891
+
2892
+ export default SwarmPlugin;