opencode-swarm-plugin 0.44.1 → 0.45.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/README.md +277 -54
  2. package/bin/swarm.ts +3 -3
  3. package/dist/decision-trace-integration.d.ts +204 -0
  4. package/dist/decision-trace-integration.d.ts.map +1 -0
  5. package/dist/hive.d.ts.map +1 -1
  6. package/dist/hive.js +14834 -0
  7. package/dist/index.d.ts +50 -2
  8. package/dist/index.d.ts.map +1 -1
  9. package/dist/index.js +640 -27
  10. package/dist/plugin.js +395 -27
  11. package/dist/query-tools.d.ts +20 -12
  12. package/dist/query-tools.d.ts.map +1 -1
  13. package/dist/swarm-decompose.d.ts +4 -4
  14. package/dist/swarm-decompose.d.ts.map +1 -1
  15. package/dist/swarm-prompts.d.ts.map +1 -1
  16. package/dist/swarm-prompts.js +39605 -0
  17. package/dist/swarm-review.d.ts.map +1 -1
  18. package/dist/swarm-signature.d.ts +106 -0
  19. package/dist/swarm-signature.d.ts.map +1 -0
  20. package/dist/swarm-strategies.d.ts +16 -3
  21. package/dist/swarm-strategies.d.ts.map +1 -1
  22. package/dist/swarm-validation.d.ts +127 -0
  23. package/dist/swarm-validation.d.ts.map +1 -0
  24. package/dist/swarm.d.ts +4 -2
  25. package/dist/swarm.d.ts.map +1 -1
  26. package/dist/validators/index.d.ts +7 -0
  27. package/dist/validators/index.d.ts.map +1 -0
  28. package/dist/validators/schema-validator.d.ts +58 -0
  29. package/dist/validators/schema-validator.d.ts.map +1 -0
  30. package/examples/commands/swarm.md +745 -0
  31. package/examples/plugin-wrapper-template.ts +2611 -0
  32. package/examples/skills/hive-workflow/SKILL.md +212 -0
  33. package/examples/skills/skill-creator/SKILL.md +223 -0
  34. package/examples/skills/swarm-coordination/SKILL.md +292 -0
  35. package/global-skills/cli-builder/SKILL.md +344 -0
  36. package/global-skills/cli-builder/references/advanced-patterns.md +244 -0
  37. package/global-skills/learning-systems/SKILL.md +644 -0
  38. package/global-skills/skill-creator/LICENSE.txt +202 -0
  39. package/global-skills/skill-creator/SKILL.md +352 -0
  40. package/global-skills/skill-creator/references/output-patterns.md +82 -0
  41. package/global-skills/skill-creator/references/workflows.md +28 -0
  42. package/global-skills/swarm-coordination/SKILL.md +995 -0
  43. package/global-skills/swarm-coordination/references/coordinator-patterns.md +235 -0
  44. package/global-skills/swarm-coordination/references/strategies.md +138 -0
  45. package/global-skills/system-design/SKILL.md +213 -0
  46. package/global-skills/testing-patterns/SKILL.md +430 -0
  47. package/global-skills/testing-patterns/references/dependency-breaking-catalog.md +586 -0
  48. package/package.json +6 -3
@@ -0,0 +1,2611 @@
1
+ /**
2
+ * OpenCode Swarm Plugin Wrapper
3
+ *
4
+ * This is a thin wrapper that shells out to the `swarm` CLI for all tool execution.
5
+ * Generated by: swarm setup
6
+ *
7
+ * The plugin only depends on @opencode-ai/plugin (provided by OpenCode).
8
+ * All tool logic lives in the npm package - this just bridges to it.
9
+ *
10
+ * Environment variables:
11
+ * - OPENCODE_SESSION_ID: Passed to CLI for session state persistence
12
+ * - OPENCODE_MESSAGE_ID: Passed to CLI for context
13
+ * - OPENCODE_AGENT: Passed to CLI for context
14
+ * - SWARM_PROJECT_DIR: Project directory (critical for database path)
15
+ */
16
+ import type { Plugin, PluginInput, Hooks } from "@opencode-ai/plugin";
17
+ import type { ToolPart } from "@opencode-ai/sdk";
18
+ import { tool } from "@opencode-ai/plugin";
19
+ import { spawn } from "child_process";
20
+ import { appendFileSync, mkdirSync, existsSync } from "node:fs";
21
+ import { join } from "node:path";
22
+ import { homedir } from "node:os";
23
+
24
+ // Import swarm signature projection for deterministic swarm detection
25
+ import {
26
+ projectSwarmState,
27
+ hasSwarmSignature,
28
+ isSwarmActive,
29
+ getSwarmSummary,
30
+ type SwarmProjection,
31
+ type ToolCallEvent,
32
+ } from "opencode-swarm-plugin";
33
+
34
+ const SWARM_CLI = "swarm";
35
+
36
+ // =============================================================================
37
+ // File-based Logging (writes to ~/.config/swarm-tools/logs/)
38
+ // =============================================================================
39
+
40
+ const LOG_DIR = join(homedir(), ".config", "swarm-tools", "logs");
41
+ const COMPACTION_LOG = join(LOG_DIR, "compaction.log");
42
+
43
+ /**
44
+ * Ensure log directory exists
45
+ */
46
+ function ensureLogDir(): void {
47
+ if (!existsSync(LOG_DIR)) {
48
+ mkdirSync(LOG_DIR, { recursive: true });
49
+ }
50
+ }
51
+
52
+ /**
53
+ * Log a compaction event to file (JSON lines format, compatible with `swarm log`)
54
+ *
55
+ * @param level - Log level (info, debug, warn, error)
56
+ * @param msg - Log message
57
+ * @param data - Additional structured data
58
+ */
59
+ function logCompaction(
60
+ level: "info" | "debug" | "warn" | "error",
61
+ msg: string,
62
+ data?: Record<string, unknown>,
63
+ ): void {
64
+ try {
65
+ ensureLogDir();
66
+ const entry = JSON.stringify({
67
+ time: new Date().toISOString(),
68
+ level,
69
+ msg,
70
+ ...data,
71
+ });
72
+ appendFileSync(COMPACTION_LOG, entry + "\n");
73
+ } catch {
74
+ // Silently fail - logging should never break the plugin
75
+ }
76
+ }
77
+
78
+ /**
79
+ * Capture compaction event for evals (non-fatal dynamic import)
80
+ *
81
+ * Uses dynamic import to avoid circular dependencies and keep the plugin wrapper
82
+ * self-contained. Captures COMPACTION events to session JSONL for eval analysis.
83
+ *
84
+ * @param sessionID - Session ID
85
+ * @param epicID - Epic ID (or "unknown" if not detected)
86
+ * @param compactionType - Event type (detection_complete, prompt_generated, context_injected)
87
+ * @param payload - Event-specific data (full prompts, detection results, etc.)
88
+ */
89
+ async function captureCompaction(
90
+ sessionID: string,
91
+ epicID: string,
92
+ compactionType: "detection_complete" | "prompt_generated" | "context_injected",
93
+ payload: any,
94
+ ): Promise<void> {
95
+ try {
96
+ // Dynamic import to avoid circular deps (plugin wrapper → src → plugin wrapper)
97
+ const { captureCompactionEvent } = await import("../src/eval-capture");
98
+ captureCompactionEvent({
99
+ session_id: sessionID,
100
+ epic_id: epicID,
101
+ compaction_type: compactionType,
102
+ payload,
103
+ });
104
+ } catch (err) {
105
+ // Non-fatal - capture failures shouldn't break compaction
106
+ logCompaction("warn", "compaction_capture_failed", {
107
+ session_id: sessionID,
108
+ compaction_type: compactionType,
109
+ error: err instanceof Error ? err.message : String(err),
110
+ });
111
+ }
112
+ }
113
+
114
+ // Module-level project directory - set during plugin initialization
115
+ // This is CRITICAL: without it, the CLI uses process.cwd() which may be wrong
116
+ let projectDirectory: string = process.cwd();
117
+
118
+ // Module-level SDK client - set during plugin initialization
119
+ // Used for scanning session messages during compaction
120
+ let sdkClient: any = null;
121
+
122
+ // =============================================================================
123
+ // CLI Execution Helper
124
+ // =============================================================================
125
+
126
+ /**
127
+ * Execute a swarm tool via CLI
128
+ *
129
+ * Spawns `swarm tool <name> --json '<args>'` and returns the result.
130
+ * Passes session context via environment variables.
131
+ *
132
+ * IMPORTANT: Runs in projectDirectory (set by OpenCode) not process.cwd()
133
+ */
134
+ async function execTool(
135
+ name: string,
136
+ args: Record<string, unknown>,
137
+ ctx: { sessionID: string; messageID: string; agent: string },
138
+ ): Promise<string> {
139
+ return new Promise((resolve, reject) => {
140
+ const hasArgs = Object.keys(args).length > 0;
141
+ const cliArgs = hasArgs
142
+ ? ["tool", name, "--json", JSON.stringify(args)]
143
+ : ["tool", name];
144
+
145
+ const proc = spawn(SWARM_CLI, cliArgs, {
146
+ cwd: projectDirectory, // Run in project directory, not plugin directory
147
+ stdio: ["ignore", "pipe", "pipe"],
148
+ env: {
149
+ ...process.env,
150
+ OPENCODE_SESSION_ID: ctx.sessionID,
151
+ OPENCODE_MESSAGE_ID: ctx.messageID,
152
+ OPENCODE_AGENT: ctx.agent,
153
+ SWARM_PROJECT_DIR: projectDirectory, // Also pass as env var
154
+ },
155
+ });
156
+
157
+ let stdout = "";
158
+ let stderr = "";
159
+
160
+ proc.stdout.on("data", (data) => {
161
+ stdout += data;
162
+ });
163
+ proc.stderr.on("data", (data) => {
164
+ stderr += data;
165
+ });
166
+
167
+ proc.on("close", (code) => {
168
+ if (code === 0) {
169
+ // Success - return the JSON output
170
+ try {
171
+ const result = JSON.parse(stdout);
172
+ if (result.success && result.data !== undefined) {
173
+ // Unwrap the data for cleaner tool output
174
+ resolve(
175
+ typeof result.data === "string"
176
+ ? result.data
177
+ : JSON.stringify(result.data, null, 2),
178
+ );
179
+ } else if (!result.success && result.error) {
180
+ // Tool returned an error in JSON format
181
+ // Handle both string errors and object errors with .message
182
+ const errorMsg = typeof result.error === "string"
183
+ ? result.error
184
+ : (result.error.message || "Tool execution failed");
185
+ reject(new Error(errorMsg));
186
+ } else {
187
+ resolve(stdout);
188
+ }
189
+ } catch {
190
+ resolve(stdout);
191
+ }
192
+ } else if (code === 2) {
193
+ reject(new Error(`Unknown tool: ${name}`));
194
+ } else if (code === 3) {
195
+ reject(new Error(`Invalid JSON args: ${stderr}`));
196
+ } else {
197
+ // Tool returned error
198
+ try {
199
+ const result = JSON.parse(stdout);
200
+ if (!result.success && result.error) {
201
+ // Handle both string errors and object errors with .message
202
+ const errorMsg = typeof result.error === "string"
203
+ ? result.error
204
+ : (result.error.message || `Tool failed with code ${code}`);
205
+ reject(new Error(errorMsg));
206
+ } else {
207
+ reject(
208
+ new Error(stderr || stdout || `Tool failed with code ${code}`),
209
+ );
210
+ }
211
+ } catch {
212
+ reject(
213
+ new Error(stderr || stdout || `Tool failed with code ${code}`),
214
+ );
215
+ }
216
+ }
217
+ });
218
+
219
+ proc.on("error", (err) => {
220
+ if ((err as NodeJS.ErrnoException).code === "ENOENT") {
221
+ reject(
222
+ new Error(
223
+ `swarm CLI not found. Install with: npm install -g opencode-swarm-plugin`,
224
+ ),
225
+ );
226
+ } else {
227
+ reject(err);
228
+ }
229
+ });
230
+ });
231
+ }
232
+
233
+ // =============================================================================
234
+ // Beads Tools
235
+ // =============================================================================
236
+
237
+ const hive_create = tool({
238
+ description: "Create a new bead with type-safe validation",
239
+ args: {
240
+ title: tool.schema.string().describe("Bead title"),
241
+ type: tool.schema
242
+ .enum(["bug", "feature", "task", "epic", "chore"])
243
+ .optional()
244
+ .describe("Issue type (default: task)"),
245
+ priority: tool.schema
246
+ .number()
247
+ .min(0)
248
+ .max(3)
249
+ .optional()
250
+ .describe("Priority 0-3 (default: 2)"),
251
+ description: tool.schema.string().optional().describe("Bead description"),
252
+ parent_id: tool.schema
253
+ .string()
254
+ .optional()
255
+ .describe("Parent bead ID for epic children"),
256
+ },
257
+ execute: (args, ctx) => execTool("hive_create", args, ctx),
258
+ });
259
+
260
+ const hive_create_epic = tool({
261
+ description: "Create epic with subtasks in one atomic operation",
262
+ args: {
263
+ epic_title: tool.schema.string().describe("Epic title"),
264
+ epic_description: tool.schema
265
+ .string()
266
+ .optional()
267
+ .describe("Epic description"),
268
+ subtasks: tool.schema
269
+ .array(
270
+ tool.schema.object({
271
+ title: tool.schema.string(),
272
+ priority: tool.schema.number().min(0).max(3).optional(),
273
+ files: tool.schema.array(tool.schema.string()).optional(),
274
+ }),
275
+ )
276
+ .describe("Subtasks to create under the epic"),
277
+ },
278
+ execute: (args, ctx) => execTool("hive_create_epic", args, ctx),
279
+ });
280
+
281
+ const hive_query = tool({
282
+ description: "Query beads with filters (replaces bd list, bd ready, bd wip)",
283
+ args: {
284
+ status: tool.schema
285
+ .enum(["open", "in_progress", "blocked", "closed"])
286
+ .optional()
287
+ .describe("Filter by status"),
288
+ type: tool.schema
289
+ .enum(["bug", "feature", "task", "epic", "chore"])
290
+ .optional()
291
+ .describe("Filter by type"),
292
+ ready: tool.schema
293
+ .boolean()
294
+ .optional()
295
+ .describe("Only show unblocked beads"),
296
+ limit: tool.schema
297
+ .number()
298
+ .optional()
299
+ .describe("Max results (default: 20)"),
300
+ },
301
+ execute: (args, ctx) => execTool("hive_query", args, ctx),
302
+ });
303
+
304
+ const hive_update = tool({
305
+ description: "Update bead status/description",
306
+ args: {
307
+ id: tool.schema.string().describe("Cell ID"),
308
+ status: tool.schema
309
+ .enum(["open", "in_progress", "blocked", "closed"])
310
+ .optional()
311
+ .describe("New status"),
312
+ description: tool.schema.string().optional().describe("New description"),
313
+ priority: tool.schema
314
+ .number()
315
+ .min(0)
316
+ .max(3)
317
+ .optional()
318
+ .describe("New priority"),
319
+ },
320
+ execute: (args, ctx) => execTool("hive_update", args, ctx),
321
+ });
322
+
323
+ const hive_close = tool({
324
+ description: "Close a bead with reason",
325
+ args: {
326
+ id: tool.schema.string().describe("Cell ID"),
327
+ reason: tool.schema.string().describe("Completion reason"),
328
+ },
329
+ execute: (args, ctx) => execTool("hive_close", args, ctx),
330
+ });
331
+
332
+ const hive_start = tool({
333
+ description: "Mark a bead as in-progress",
334
+ args: {
335
+ id: tool.schema.string().describe("Cell ID"),
336
+ },
337
+ execute: (args, ctx) => execTool("hive_start", args, ctx),
338
+ });
339
+
340
+ const hive_ready = tool({
341
+ description: "Get the next ready bead (unblocked, highest priority)",
342
+ args: {},
343
+ execute: (args, ctx) => execTool("hive_ready", args, ctx),
344
+ });
345
+
346
+ const hive_sync = tool({
347
+ description: "Sync beads to git and push (MANDATORY at session end)",
348
+ args: {
349
+ auto_pull: tool.schema.boolean().optional().describe("Pull before sync"),
350
+ },
351
+ execute: (args, ctx) => execTool("hive_sync", args, ctx),
352
+ });
353
+
354
+ const hive_cells = tool({
355
+ description: `Query cells from the hive database with flexible filtering.
356
+
357
+ USE THIS TOOL TO:
358
+ - List all open cells: hive_cells()
359
+ - Find cells by status: hive_cells({ status: "in_progress" })
360
+ - Find cells by type: hive_cells({ type: "bug" })
361
+ - Get a specific cell by partial ID: hive_cells({ id: "mjkmd" })
362
+ - Get the next ready (unblocked) cell: hive_cells({ ready: true })
363
+ - Combine filters: hive_cells({ status: "open", type: "task" })
364
+
365
+ RETURNS: Array of cells with id, title, status, priority, type, parent_id, created_at, updated_at
366
+
367
+ PREFER THIS OVER hive_query when you need to:
368
+ - See what work is available
369
+ - Check status of multiple cells
370
+ - Find cells matching criteria
371
+ - Look up a cell by partial ID`,
372
+ args: {
373
+ id: tool.schema.string().optional().describe("Partial or full cell ID to look up"),
374
+ status: tool.schema.enum(["open", "in_progress", "blocked", "closed"]).optional().describe("Filter by status"),
375
+ type: tool.schema.enum(["task", "bug", "feature", "epic", "chore"]).optional().describe("Filter by type"),
376
+ ready: tool.schema.boolean().optional().describe("If true, return only the next unblocked cell"),
377
+ limit: tool.schema.number().optional().describe("Max cells to return (default 20)"),
378
+ },
379
+ execute: (args, ctx) => execTool("hive_cells", args, ctx),
380
+ });
381
+
382
+ const beads_link_thread = tool({
383
+ description: "Add metadata linking bead to Agent Mail thread",
384
+ args: {
385
+ bead_id: tool.schema.string().describe("Cell ID"),
386
+ thread_id: tool.schema.string().describe("Agent Mail thread ID"),
387
+ },
388
+ execute: (args, ctx) => execTool("beads_link_thread", args, ctx),
389
+ });
390
+
391
+ // =============================================================================
392
+ // Swarm Mail Tools (Embedded)
393
+ // =============================================================================
394
+
395
+ const swarmmail_init = tool({
396
+ description: "Initialize Swarm Mail session (REQUIRED FIRST)",
397
+ args: {
398
+ project_path: tool.schema.string().describe("Absolute path to the project"),
399
+ agent_name: tool.schema.string().optional().describe("Custom agent name"),
400
+ task_description: tool.schema
401
+ .string()
402
+ .optional()
403
+ .describe("Task description"),
404
+ },
405
+ execute: (args, ctx) => execTool("swarmmail_init", args, ctx),
406
+ });
407
+
408
+ const swarmmail_send = tool({
409
+ description: "Send message to other agents via Swarm Mail",
410
+ args: {
411
+ to: tool.schema
412
+ .array(tool.schema.string())
413
+ .describe("Recipient agent names"),
414
+ subject: tool.schema.string().describe("Message subject"),
415
+ body: tool.schema.string().describe("Message body"),
416
+ thread_id: tool.schema
417
+ .string()
418
+ .optional()
419
+ .describe("Thread ID for grouping"),
420
+ importance: tool.schema
421
+ .enum(["low", "normal", "high", "urgent"])
422
+ .optional()
423
+ .describe("Message importance"),
424
+ ack_required: tool.schema
425
+ .boolean()
426
+ .optional()
427
+ .describe("Require acknowledgment"),
428
+ },
429
+ execute: (args, ctx) => execTool("swarmmail_send", args, ctx),
430
+ });
431
+
432
+ const swarmmail_inbox = tool({
433
+ description: "Fetch inbox (CONTEXT-SAFE: bodies excluded, max 5 messages)",
434
+ args: {
435
+ limit: tool.schema
436
+ .number()
437
+ .max(5)
438
+ .optional()
439
+ .describe("Max messages (max 5)"),
440
+ urgent_only: tool.schema
441
+ .boolean()
442
+ .optional()
443
+ .describe("Only urgent messages"),
444
+ },
445
+ execute: (args, ctx) => execTool("swarmmail_inbox", args, ctx),
446
+ });
447
+
448
+ const swarmmail_read_message = tool({
449
+ description: "Fetch ONE message body by ID",
450
+ args: {
451
+ message_id: tool.schema.number().describe("Message ID"),
452
+ },
453
+ execute: (args, ctx) => execTool("swarmmail_read_message", args, ctx),
454
+ });
455
+
456
+ const swarmmail_reserve = tool({
457
+ description: "Reserve file paths for exclusive editing",
458
+ args: {
459
+ paths: tool.schema
460
+ .array(tool.schema.string())
461
+ .describe("File paths/patterns"),
462
+ ttl_seconds: tool.schema.number().optional().describe("Reservation TTL"),
463
+ exclusive: tool.schema.boolean().optional().describe("Exclusive lock"),
464
+ reason: tool.schema.string().optional().describe("Reservation reason"),
465
+ },
466
+ execute: (args, ctx) => execTool("swarmmail_reserve", args, ctx),
467
+ });
468
+
469
+ const swarmmail_release = tool({
470
+ description: "Release file reservations",
471
+ args: {
472
+ paths: tool.schema
473
+ .array(tool.schema.string())
474
+ .optional()
475
+ .describe("Paths to release"),
476
+ reservation_ids: tool.schema
477
+ .array(tool.schema.number())
478
+ .optional()
479
+ .describe("Reservation IDs"),
480
+ },
481
+ execute: (args, ctx) => execTool("swarmmail_release", args, ctx),
482
+ });
483
+
484
+ const swarmmail_ack = tool({
485
+ description: "Acknowledge a message",
486
+ args: {
487
+ message_id: tool.schema.number().describe("Message ID"),
488
+ },
489
+ execute: (args, ctx) => execTool("swarmmail_ack", args, ctx),
490
+ });
491
+
492
+ const swarmmail_health = tool({
493
+ description: "Check Swarm Mail database health",
494
+ args: {},
495
+ execute: (args, ctx) => execTool("swarmmail_health", args, ctx),
496
+ });
497
+
498
+ // =============================================================================
499
+ // Structured Tools
500
+ // =============================================================================
501
+
502
+ const structured_extract_json = tool({
503
+ description: "Extract JSON from markdown/text response",
504
+ args: {
505
+ text: tool.schema.string().describe("Text containing JSON"),
506
+ },
507
+ execute: (args, ctx) => execTool("structured_extract_json", args, ctx),
508
+ });
509
+
510
+ const structured_validate = tool({
511
+ description: "Validate agent response against a schema",
512
+ args: {
513
+ response: tool.schema.string().describe("Agent response to validate"),
514
+ schema_name: tool.schema
515
+ .enum(["evaluation", "task_decomposition", "cell_tree"])
516
+ .describe("Schema to validate against"),
517
+ max_retries: tool.schema
518
+ .number()
519
+ .min(1)
520
+ .max(5)
521
+ .optional()
522
+ .describe("Max retries"),
523
+ },
524
+ execute: (args, ctx) => execTool("structured_validate", args, ctx),
525
+ });
526
+
527
+ const structured_parse_evaluation = tool({
528
+ description: "Parse and validate evaluation response",
529
+ args: {
530
+ response: tool.schema.string().describe("Agent response"),
531
+ },
532
+ execute: (args, ctx) => execTool("structured_parse_evaluation", args, ctx),
533
+ });
534
+
535
+ const structured_parse_decomposition = tool({
536
+ description: "Parse and validate task decomposition response",
537
+ args: {
538
+ response: tool.schema.string().describe("Agent response"),
539
+ },
540
+ execute: (args, ctx) => execTool("structured_parse_decomposition", args, ctx),
541
+ });
542
+
543
+ const structured_parse_cell_tree = tool({
544
+ description: "Parse and validate bead tree response",
545
+ args: {
546
+ response: tool.schema.string().describe("Agent response"),
547
+ },
548
+ execute: (args, ctx) => execTool("structured_parse_cell_tree", args, ctx),
549
+ });
550
+
551
+ // =============================================================================
552
+ // Swarm Tools
553
+ // =============================================================================
554
+
555
+ const swarm_init = tool({
556
+ description: "Initialize swarm session and check tool availability",
557
+ args: {
558
+ project_path: tool.schema.string().optional().describe("Project path"),
559
+ isolation: tool.schema
560
+ .enum(["worktree", "reservation"])
561
+ .optional()
562
+ .describe(
563
+ "Isolation mode: 'worktree' for git worktree isolation, 'reservation' for file reservations (default)",
564
+ ),
565
+ },
566
+ execute: (args, ctx) => execTool("swarm_init", args, ctx),
567
+ });
568
+
569
+ const swarm_select_strategy = tool({
570
+ description: "Analyze task and recommend decomposition strategy",
571
+ args: {
572
+ task: tool.schema.string().min(1).describe("Task to analyze"),
573
+ codebase_context: tool.schema
574
+ .string()
575
+ .optional()
576
+ .describe("Codebase context"),
577
+ },
578
+ execute: (args, ctx) => execTool("swarm_select_strategy", args, ctx),
579
+ });
580
+
581
+ const swarm_plan_prompt = tool({
582
+ description: "Generate strategy-specific decomposition prompt",
583
+ args: {
584
+ task: tool.schema.string().min(1).describe("Task to decompose"),
585
+ strategy: tool.schema
586
+ .enum(["file-based", "feature-based", "risk-based", "auto"])
587
+ .optional()
588
+ .describe("Decomposition strategy"),
589
+ max_subtasks: tool.schema
590
+ .number()
591
+ .int()
592
+ .min(2)
593
+ .max(10)
594
+ .optional()
595
+ .describe("Max subtasks"),
596
+ context: tool.schema.string().optional().describe("Additional context"),
597
+ query_cass: tool.schema
598
+ .boolean()
599
+ .optional()
600
+ .describe("Query CASS for similar tasks"),
601
+ cass_limit: tool.schema
602
+ .number()
603
+ .int()
604
+ .min(1)
605
+ .max(10)
606
+ .optional()
607
+ .describe("CASS limit"),
608
+ },
609
+ execute: (args, ctx) => execTool("swarm_plan_prompt", args, ctx),
610
+ });
611
+
612
+ const swarm_decompose = tool({
613
+ description: "Generate decomposition prompt for breaking task into subtasks",
614
+ args: {
615
+ task: tool.schema.string().min(1).describe("Task to decompose"),
616
+ max_subtasks: tool.schema
617
+ .number()
618
+ .int()
619
+ .min(2)
620
+ .max(10)
621
+ .optional()
622
+ .describe("Max subtasks"),
623
+ context: tool.schema.string().optional().describe("Additional context"),
624
+ query_cass: tool.schema.boolean().optional().describe("Query CASS"),
625
+ cass_limit: tool.schema
626
+ .number()
627
+ .int()
628
+ .min(1)
629
+ .max(10)
630
+ .optional()
631
+ .describe("CASS limit"),
632
+ },
633
+ execute: (args, ctx) => execTool("swarm_decompose", args, ctx),
634
+ });
635
+
636
+ const swarm_validate_decomposition = tool({
637
+ description: "Validate a decomposition response against CellTreeSchema",
638
+ args: {
639
+ response: tool.schema.string().describe("Decomposition response"),
640
+ },
641
+ execute: (args, ctx) => execTool("swarm_validate_decomposition", args, ctx),
642
+ });
643
+
644
+ const swarm_status = tool({
645
+ description: "Get status of a swarm by epic ID",
646
+ args: {
647
+ epic_id: tool.schema.string().describe("Epic bead ID"),
648
+ project_key: tool.schema.string().describe("Project key"),
649
+ },
650
+ execute: (args, ctx) => execTool("swarm_status", args, ctx),
651
+ });
652
+
653
+ const swarm_progress = tool({
654
+ description: "Report progress on a subtask to coordinator",
655
+ args: {
656
+ project_key: tool.schema.string().describe("Project key"),
657
+ agent_name: tool.schema.string().describe("Agent name"),
658
+ bead_id: tool.schema.string().describe("Cell ID"),
659
+ status: tool.schema
660
+ .enum(["in_progress", "blocked", "completed", "failed"])
661
+ .describe("Status"),
662
+ message: tool.schema.string().optional().describe("Progress message"),
663
+ progress_percent: tool.schema
664
+ .number()
665
+ .min(0)
666
+ .max(100)
667
+ .optional()
668
+ .describe("Progress %"),
669
+ files_touched: tool.schema
670
+ .array(tool.schema.string())
671
+ .optional()
672
+ .describe("Files modified"),
673
+ },
674
+ execute: (args, ctx) => execTool("swarm_progress", args, ctx),
675
+ });
676
+
677
+ const swarm_complete = tool({
678
+ description:
679
+ "Mark subtask complete with Verification Gate. Runs UBS scan, typecheck, and tests before allowing completion.",
680
+ args: {
681
+ project_key: tool.schema.string().describe("Project key"),
682
+ agent_name: tool.schema.string().describe("Agent name"),
683
+ bead_id: tool.schema.string().describe("Cell ID"),
684
+ summary: tool.schema.string().describe("Completion summary"),
685
+ evaluation: tool.schema.string().optional().describe("Self-evaluation JSON"),
686
+ files_touched: tool.schema
687
+ .array(tool.schema.string())
688
+ .optional()
689
+ .describe("Files modified - will be verified"),
690
+ skip_ubs_scan: tool.schema.boolean().optional().describe("Skip UBS scan"),
691
+ skip_verification: tool.schema
692
+ .boolean()
693
+ .optional()
694
+ .describe("Skip ALL verification (UBS, typecheck, tests)"),
695
+ skip_review: tool.schema
696
+ .boolean()
697
+ .optional()
698
+ .describe("Skip review gate check"),
699
+ },
700
+ execute: (args, ctx) => execTool("swarm_complete", args, ctx),
701
+ });
702
+
703
+ const swarm_record_outcome = tool({
704
+ description: "Record subtask outcome for implicit feedback scoring",
705
+ args: {
706
+ bead_id: tool.schema.string().describe("Cell ID"),
707
+ duration_ms: tool.schema.number().int().min(0).describe("Duration in ms"),
708
+ error_count: tool.schema
709
+ .number()
710
+ .int()
711
+ .min(0)
712
+ .optional()
713
+ .describe("Error count"),
714
+ retry_count: tool.schema
715
+ .number()
716
+ .int()
717
+ .min(0)
718
+ .optional()
719
+ .describe("Retry count"),
720
+ success: tool.schema.boolean().describe("Whether task succeeded"),
721
+ files_touched: tool.schema
722
+ .array(tool.schema.string())
723
+ .optional()
724
+ .describe("Files modified"),
725
+ criteria: tool.schema
726
+ .array(tool.schema.string())
727
+ .optional()
728
+ .describe("Evaluation criteria"),
729
+ strategy: tool.schema
730
+ .enum(["file-based", "feature-based", "risk-based"])
731
+ .optional()
732
+ .describe("Strategy used"),
733
+ },
734
+ execute: (args, ctx) => execTool("swarm_record_outcome", args, ctx),
735
+ });
736
+
737
+ const swarm_subtask_prompt = tool({
738
+ description: "Generate the prompt for a spawned subtask agent",
739
+ args: {
740
+ agent_name: tool.schema.string().describe("Agent name"),
741
+ bead_id: tool.schema.string().describe("Cell ID"),
742
+ epic_id: tool.schema.string().describe("Epic ID"),
743
+ subtask_title: tool.schema.string().describe("Subtask title"),
744
+ subtask_description: tool.schema
745
+ .string()
746
+ .optional()
747
+ .describe("Description"),
748
+ files: tool.schema.array(tool.schema.string()).describe("Files to work on"),
749
+ shared_context: tool.schema.string().optional().describe("Shared context"),
750
+ },
751
+ execute: (args, ctx) => execTool("swarm_subtask_prompt", args, ctx),
752
+ });
753
+
754
+ const swarm_spawn_subtask = tool({
755
+ description: "Prepare a subtask for spawning with Task tool",
756
+ args: {
757
+ bead_id: tool.schema.string().describe("Cell ID"),
758
+ epic_id: tool.schema.string().describe("Epic ID"),
759
+ subtask_title: tool.schema.string().describe("Subtask title"),
760
+ subtask_description: tool.schema
761
+ .string()
762
+ .optional()
763
+ .describe("Description"),
764
+ files: tool.schema.array(tool.schema.string()).describe("Files to work on"),
765
+ shared_context: tool.schema.string().optional().describe("Shared context"),
766
+ },
767
+ execute: (args, ctx) => execTool("swarm_spawn_subtask", args, ctx),
768
+ });
769
+
770
+ const swarm_complete_subtask = tool({
771
+ description: "Handle subtask completion after Task agent returns",
772
+ args: {
773
+ bead_id: tool.schema.string().describe("Cell ID"),
774
+ task_result: tool.schema.string().describe("Task result JSON"),
775
+ files_touched: tool.schema
776
+ .array(tool.schema.string())
777
+ .optional()
778
+ .describe("Files modified"),
779
+ },
780
+ execute: (args, ctx) => execTool("swarm_complete_subtask", args, ctx),
781
+ });
782
+
783
+ const swarm_evaluation_prompt = tool({
784
+ description: "Generate self-evaluation prompt for a completed subtask",
785
+ args: {
786
+ bead_id: tool.schema.string().describe("Cell ID"),
787
+ subtask_title: tool.schema.string().describe("Subtask title"),
788
+ files_touched: tool.schema
789
+ .array(tool.schema.string())
790
+ .describe("Files modified"),
791
+ },
792
+ execute: (args, ctx) => execTool("swarm_evaluation_prompt", args, ctx),
793
+ });
794
+
795
+ const swarm_broadcast = tool({
796
+ description:
797
+ "Broadcast context update to all agents working on the same epic",
798
+ args: {
799
+ project_path: tool.schema.string().describe("Project path"),
800
+ agent_name: tool.schema.string().describe("Agent name"),
801
+ epic_id: tool.schema.string().describe("Epic ID"),
802
+ message: tool.schema.string().describe("Context update message"),
803
+ importance: tool.schema
804
+ .enum(["info", "warning", "blocker"])
805
+ .optional()
806
+ .describe("Priority level (default: info)"),
807
+ files_affected: tool.schema
808
+ .array(tool.schema.string())
809
+ .optional()
810
+ .describe("Files this context relates to"),
811
+ },
812
+ execute: (args, ctx) => execTool("swarm_broadcast", args, ctx),
813
+ });
814
+
815
+ // =============================================================================
816
+ // Worktree Isolation Tools
817
+ // =============================================================================
818
+
819
+ const swarm_worktree_create = tool({
820
+ description:
821
+ "Create a git worktree for isolated task execution. Worker operates in worktree, not main branch.",
822
+ args: {
823
+ project_path: tool.schema.string().describe("Absolute path to project root"),
824
+ task_id: tool.schema.string().describe("Task/bead ID (e.g., bd-abc123.1)"),
825
+ start_commit: tool.schema
826
+ .string()
827
+ .describe("Commit SHA to create worktree at (swarm start point)"),
828
+ },
829
+ execute: (args, ctx) => execTool("swarm_worktree_create", args, ctx),
830
+ });
831
+
832
+ const swarm_worktree_merge = tool({
833
+ description:
834
+ "Cherry-pick commits from worktree back to main branch. Call after worker completes.",
835
+ args: {
836
+ project_path: tool.schema.string().describe("Absolute path to project root"),
837
+ task_id: tool.schema.string().describe("Task/bead ID"),
838
+ start_commit: tool.schema
839
+ .string()
840
+ .optional()
841
+ .describe("Original start commit (to find new commits)"),
842
+ },
843
+ execute: (args, ctx) => execTool("swarm_worktree_merge", args, ctx),
844
+ });
845
+
846
+ const swarm_worktree_cleanup = tool({
847
+ description:
848
+ "Remove a worktree after completion or abort. Idempotent - safe to call multiple times.",
849
+ args: {
850
+ project_path: tool.schema.string().describe("Absolute path to project root"),
851
+ task_id: tool.schema.string().optional().describe("Task/bead ID to clean up"),
852
+ cleanup_all: tool.schema
853
+ .boolean()
854
+ .optional()
855
+ .describe("Remove all worktrees for this project"),
856
+ },
857
+ execute: (args, ctx) => execTool("swarm_worktree_cleanup", args, ctx),
858
+ });
859
+
860
+ const swarm_worktree_list = tool({
861
+ description: "List all active worktrees for a project",
862
+ args: {
863
+ project_path: tool.schema.string().describe("Absolute path to project root"),
864
+ },
865
+ execute: (args, ctx) => execTool("swarm_worktree_list", args, ctx),
866
+ });
867
+
868
+ // =============================================================================
869
+ // Structured Review Tools
870
+ // =============================================================================
871
+
872
+ const swarm_review = tool({
873
+ description:
874
+ "Generate a review prompt for a completed subtask. Includes epic context, dependencies, and diff.",
875
+ args: {
876
+ project_key: tool.schema.string().describe("Project path"),
877
+ epic_id: tool.schema.string().describe("Epic bead ID"),
878
+ task_id: tool.schema.string().describe("Subtask bead ID to review"),
879
+ files_touched: tool.schema
880
+ .array(tool.schema.string())
881
+ .optional()
882
+ .describe("Files modified (will get diff for these)"),
883
+ },
884
+ execute: (args, ctx) => execTool("swarm_review", args, ctx),
885
+ });
886
+
887
+ const swarm_review_feedback = tool({
888
+ description:
889
+ "Send review feedback to a worker. Tracks attempts (max 3). Fails task after 3 rejections.",
890
+ args: {
891
+ project_key: tool.schema.string().describe("Project path"),
892
+ task_id: tool.schema.string().describe("Subtask bead ID"),
893
+ worker_id: tool.schema.string().describe("Worker agent name"),
894
+ status: tool.schema
895
+ .enum(["approved", "needs_changes"])
896
+ .describe("Review status"),
897
+ summary: tool.schema.string().optional().describe("Review summary"),
898
+ issues: tool.schema
899
+ .string()
900
+ .optional()
901
+ .describe("JSON array of ReviewIssue objects (for needs_changes)"),
902
+ },
903
+ execute: (args, ctx) => execTool("swarm_review_feedback", args, ctx),
904
+ });
905
+
906
+ // =============================================================================
907
+ // Skills Tools
908
+ // =============================================================================
909
+
910
+ const skills_list = tool({
911
+ description:
912
+ "List all available skills from global, project, and bundled sources",
913
+ args: {
914
+ source: tool.schema
915
+ .enum(["all", "global", "project", "bundled"])
916
+ .optional()
917
+ .describe("Filter by source (default: all)"),
918
+ },
919
+ execute: (args, ctx) => execTool("skills_list", args, ctx),
920
+ });
921
+
922
+ const skills_read = tool({
923
+ description: "Read a skill's full content including SKILL.md and references",
924
+ args: {
925
+ name: tool.schema.string().describe("Skill name"),
926
+ },
927
+ execute: (args, ctx) => execTool("skills_read", args, ctx),
928
+ });
929
+
930
+ const skills_use = tool({
931
+ description:
932
+ "Get skill content formatted for injection into agent context. Use this when you need to apply a skill's knowledge to the current task.",
933
+ args: {
934
+ name: tool.schema.string().describe("Skill name"),
935
+ context: tool.schema
936
+ .string()
937
+ .optional()
938
+ .describe("Optional context about how the skill will be used"),
939
+ },
940
+ execute: (args, ctx) => execTool("skills_use", args, ctx),
941
+ });
942
+
943
+ const skills_create = tool({
944
+ description: "Create a new skill with SKILL.md template",
945
+ args: {
946
+ name: tool.schema.string().describe("Skill name (kebab-case)"),
947
+ description: tool.schema.string().describe("Brief skill description"),
948
+ scope: tool.schema
949
+ .enum(["global", "project"])
950
+ .optional()
951
+ .describe("Where to create (default: project)"),
952
+ tags: tool.schema
953
+ .array(tool.schema.string())
954
+ .optional()
955
+ .describe("Skill tags for discovery"),
956
+ },
957
+ execute: (args, ctx) => execTool("skills_create", args, ctx),
958
+ });
959
+
960
+ const skills_update = tool({
961
+ description: "Update an existing skill's SKILL.md content",
962
+ args: {
963
+ name: tool.schema.string().describe("Skill name"),
964
+ content: tool.schema.string().describe("New SKILL.md content"),
965
+ },
966
+ execute: (args, ctx) => execTool("skills_update", args, ctx),
967
+ });
968
+
969
+ const skills_delete = tool({
970
+ description: "Delete a skill (project skills only)",
971
+ args: {
972
+ name: tool.schema.string().describe("Skill name"),
973
+ },
974
+ execute: (args, ctx) => execTool("skills_delete", args, ctx),
975
+ });
976
+
977
+ const skills_init = tool({
978
+ description: "Initialize skills directory in current project",
979
+ args: {
980
+ path: tool.schema
981
+ .string()
982
+ .optional()
983
+ .describe("Custom path (default: .opencode/skills)"),
984
+ },
985
+ execute: (args, ctx) => execTool("skills_init", args, ctx),
986
+ });
987
+
988
+ const skills_add_script = tool({
989
+ description: "Add an executable script to a skill",
990
+ args: {
991
+ skill_name: tool.schema.string().describe("Skill name"),
992
+ script_name: tool.schema.string().describe("Script filename"),
993
+ content: tool.schema.string().describe("Script content"),
994
+ executable: tool.schema
995
+ .boolean()
996
+ .optional()
997
+ .describe("Make executable (default: true)"),
998
+ },
999
+ execute: (args, ctx) => execTool("skills_add_script", args, ctx),
1000
+ });
1001
+
1002
+ const skills_execute = tool({
1003
+ description: "Execute a skill's script",
1004
+ args: {
1005
+ skill_name: tool.schema.string().describe("Skill name"),
1006
+ script_name: tool.schema.string().describe("Script to execute"),
1007
+ args: tool.schema
1008
+ .array(tool.schema.string())
1009
+ .optional()
1010
+ .describe("Script arguments"),
1011
+ },
1012
+ execute: (args, ctx) => execTool("skills_execute", args, ctx),
1013
+ });
1014
+
1015
+ // =============================================================================
1016
+ // Swarm Insights Tools
1017
+ // =============================================================================
1018
+
1019
+ const swarm_get_strategy_insights = tool({
1020
+ description: "Get strategy success rates for decomposition planning. Use this when planning task decomposition to see which strategies (file-based, feature-based, risk-based) have historically succeeded or failed. Returns success rates and recommendations based on past swarm outcomes.",
1021
+ args: {
1022
+ task: tool.schema.string().describe("Task description to analyze for strategy recommendation"),
1023
+ },
1024
+ execute: (args, ctx) => execTool("swarm_get_strategy_insights", args, ctx),
1025
+ });
1026
+
1027
+ const swarm_get_file_insights = tool({
1028
+ description: "Get file-specific gotchas for worker context. Use this when assigning files to workers to warn them about historical failure patterns. Queries past outcomes and semantic memory for file-specific learnings (edge cases, common bugs, performance traps).",
1029
+ args: {
1030
+ files: tool.schema.array(tool.schema.string()).describe("File paths to get insights for"),
1031
+ },
1032
+ execute: (args, ctx) => execTool("swarm_get_file_insights", args, ctx),
1033
+ });
1034
+
1035
+ const swarm_get_pattern_insights = tool({
1036
+ description: "Get common failure patterns across swarms. Use this during planning or when debugging stuck swarms to see recurring anti-patterns (type errors, timeouts, conflicts, test failures). Returns top 5 most frequent failure patterns with recommendations.",
1037
+ args: {},
1038
+ execute: (args, ctx) => execTool("swarm_get_pattern_insights", args, ctx),
1039
+ });
1040
+
1041
+ // =============================================================================
1042
+ // CASS Tools (Cross-Agent Session Search)
1043
+ // =============================================================================
1044
+
1045
+ const cass_search = tool({
1046
+ description: "Search across all AI coding agent histories (Claude, Codex, Cursor, Gemini, Aider, ChatGPT, Cline, OpenCode, Amp, Pi-Agent). Query BEFORE solving problems from scratch - another agent may have already solved it. Returns matching sessions ranked by relevance.",
1047
+ args: {
1048
+ query: tool.schema.string().describe("Search query (e.g., 'authentication error Next.js')"),
1049
+ agent: tool.schema
1050
+ .string()
1051
+ .optional()
1052
+ .describe("Filter by agent name (e.g., 'claude', 'cursor')"),
1053
+ days: tool.schema
1054
+ .number()
1055
+ .optional()
1056
+ .describe("Only search sessions from last N days"),
1057
+ limit: tool.schema
1058
+ .number()
1059
+ .optional()
1060
+ .describe("Max results to return (default: 5)"),
1061
+ fields: tool.schema
1062
+ .string()
1063
+ .optional()
1064
+ .describe("Field selection: 'minimal' for compact output (path, line, agent only)"),
1065
+ },
1066
+ execute: (args, ctx) => execTool("cass_search", args, ctx),
1067
+ });
1068
+
1069
+ const cass_view = tool({
1070
+ description: "View a specific conversation/session from search results. Use source_path from cass_search output.",
1071
+ args: {
1072
+ path: tool.schema
1073
+ .string()
1074
+ .describe("Path to session file (from cass_search results)"),
1075
+ line: tool.schema
1076
+ .number()
1077
+ .optional()
1078
+ .describe("Jump to specific line number"),
1079
+ },
1080
+ execute: (args, ctx) => execTool("cass_view", args, ctx),
1081
+ });
1082
+
1083
+ const cass_expand = tool({
1084
+ description: "Expand context around a specific line in a session. Shows messages before/after.",
1085
+ args: {
1086
+ path: tool.schema
1087
+ .string()
1088
+ .describe("Path to session file"),
1089
+ line: tool.schema
1090
+ .number()
1091
+ .describe("Line number to expand around"),
1092
+ context: tool.schema
1093
+ .number()
1094
+ .optional()
1095
+ .describe("Number of lines before/after to show (default: 5)"),
1096
+ },
1097
+ execute: (args, ctx) => execTool("cass_expand", args, ctx),
1098
+ });
1099
+
1100
+ const cass_health = tool({
1101
+ description: "Check if cass index is healthy. Exit 0 = ready, Exit 1 = needs indexing. Run this before searching.",
1102
+ args: {},
1103
+ execute: (args, ctx) => execTool("cass_health", args, ctx),
1104
+ });
1105
+
1106
+ const cass_index = tool({
1107
+ description: "Build or rebuild the search index. Run this if health check fails or to pick up new sessions.",
1108
+ args: {
1109
+ full: tool.schema
1110
+ .boolean()
1111
+ .optional()
1112
+ .describe("Force full rebuild (default: incremental)"),
1113
+ },
1114
+ execute: (args, ctx) => execTool("cass_index", args, ctx),
1115
+ });
1116
+
1117
+ const cass_stats = tool({
1118
+ description: "Show index statistics - how many sessions, messages, agents indexed.",
1119
+ args: {},
1120
+ execute: (args, ctx) => execTool("cass_stats", args, ctx),
1121
+ });
1122
+
1123
+ // =============================================================================
1124
+ // Plugin Export
1125
+ // =============================================================================
1126
+
1127
+ // =============================================================================
1128
+ // Compaction Hook - Swarm Recovery Context
1129
+ // =============================================================================
1130
+
1131
+ /**
1132
+ * Detection result with confidence level
1133
+ */
1134
+ interface SwarmDetection {
1135
+ detected: boolean;
1136
+ confidence: "high" | "medium" | "low" | "none";
1137
+ reasons: string[];
1138
+ }
1139
+
1140
+ /**
1141
+ * Structured state snapshot for LLM-powered compaction
1142
+ *
1143
+ * This is passed to the lite model to generate a continuation prompt
1144
+ * with concrete data instead of just instructions.
1145
+ */
1146
+ interface SwarmStateSnapshot {
1147
+ sessionID: string;
1148
+ detection: {
1149
+ confidence: "high" | "medium" | "low" | "none";
1150
+ reasons: string[];
1151
+ };
1152
+ epic?: {
1153
+ id: string;
1154
+ title: string;
1155
+ status: string;
1156
+ subtasks: Array<{
1157
+ id: string;
1158
+ title: string;
1159
+ status: "open" | "in_progress" | "blocked" | "closed";
1160
+ files: string[];
1161
+ assignedTo?: string;
1162
+ }>;
1163
+ };
1164
+ messages: Array<{
1165
+ from: string;
1166
+ to: string[];
1167
+ subject: string;
1168
+ body: string;
1169
+ timestamp: number;
1170
+ importance?: string;
1171
+ }>;
1172
+ reservations: Array<{
1173
+ agent: string;
1174
+ paths: string[];
1175
+ exclusive: boolean;
1176
+ expiresAt: number;
1177
+ }>;
1178
+ }
1179
+
1180
+ /**
1181
+ * Query actual swarm state using spawn (like detectSwarm does)
1182
+ *
1183
+ * Returns structured snapshot of current state for LLM compaction.
1184
+ * Shells out to swarm CLI to get real data.
1185
+ */
1186
+ async function querySwarmState(sessionID: string): Promise<SwarmStateSnapshot> {
1187
+ const startTime = Date.now();
1188
+
1189
+ logCompaction("debug", "query_swarm_state_start", {
1190
+ session_id: sessionID,
1191
+ project_directory: projectDirectory,
1192
+ });
1193
+
1194
+ try {
1195
+ // Query cells via swarm CLI
1196
+ const cliStart = Date.now();
1197
+ const cellsResult = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
1198
+ (resolve) => {
1199
+ const proc = spawn(SWARM_CLI, ["tool", "hive_query"], {
1200
+ cwd: projectDirectory,
1201
+ stdio: ["ignore", "pipe", "pipe"],
1202
+ });
1203
+ let stdout = "";
1204
+ let stderr = "";
1205
+ proc.stdout.on("data", (d) => {
1206
+ stdout += d;
1207
+ });
1208
+ proc.stderr.on("data", (d) => {
1209
+ stderr += d;
1210
+ });
1211
+ proc.on("close", (exitCode) =>
1212
+ resolve({ exitCode: exitCode ?? 1, stdout, stderr }),
1213
+ );
1214
+ },
1215
+ );
1216
+ const cliDuration = Date.now() - cliStart;
1217
+
1218
+ logCompaction("debug", "query_swarm_state_cli_complete", {
1219
+ session_id: sessionID,
1220
+ duration_ms: cliDuration,
1221
+ exit_code: cellsResult.exitCode,
1222
+ stdout_length: cellsResult.stdout.length,
1223
+ stderr_length: cellsResult.stderr.length,
1224
+ });
1225
+
1226
+ let cells: any[] = [];
1227
+ if (cellsResult.exitCode === 0) {
1228
+ try {
1229
+ const parsed = JSON.parse(cellsResult.stdout);
1230
+ // Handle wrapped response: { success: true, data: [...] }
1231
+ cells = Array.isArray(parsed) ? parsed : (parsed?.data ?? []);
1232
+ } catch (parseErr) {
1233
+ logCompaction("error", "query_swarm_state_parse_failed", {
1234
+ session_id: sessionID,
1235
+ error: parseErr instanceof Error ? parseErr.message : String(parseErr),
1236
+ stdout_preview: cellsResult.stdout.substring(0, 500),
1237
+ });
1238
+ }
1239
+ }
1240
+
1241
+ logCompaction("debug", "query_swarm_state_cells_parsed", {
1242
+ session_id: sessionID,
1243
+ cell_count: cells.length,
1244
+ cells: cells.map((c: any) => ({
1245
+ id: c.id,
1246
+ title: c.title,
1247
+ type: c.type,
1248
+ status: c.status,
1249
+ parent_id: c.parent_id,
1250
+ })),
1251
+ });
1252
+
1253
+ // Find active epic (first unclosed epic with subtasks)
1254
+ const openEpics = cells.filter(
1255
+ (c: { type?: string; status: string }) =>
1256
+ c.type === "epic" && c.status !== "closed",
1257
+ );
1258
+ const epic = openEpics[0];
1259
+
1260
+ logCompaction("debug", "query_swarm_state_epics", {
1261
+ session_id: sessionID,
1262
+ open_epic_count: openEpics.length,
1263
+ selected_epic: epic ? { id: epic.id, title: epic.title, status: epic.status } : null,
1264
+ });
1265
+
1266
+ // Get subtasks if we have an epic
1267
+ const subtasks =
1268
+ epic && epic.id
1269
+ ? cells.filter(
1270
+ (c: { parent_id?: string }) => c.parent_id === epic.id,
1271
+ )
1272
+ : [];
1273
+
1274
+ logCompaction("debug", "query_swarm_state_subtasks", {
1275
+ session_id: sessionID,
1276
+ subtask_count: subtasks.length,
1277
+ subtasks: subtasks.map((s: any) => ({
1278
+ id: s.id,
1279
+ title: s.title,
1280
+ status: s.status,
1281
+ files: s.files,
1282
+ })),
1283
+ });
1284
+
1285
+ // TODO: Query swarm mail for messages and reservations
1286
+ // For MVP, use empty arrays - the fallback chain handles this
1287
+ const messages: SwarmStateSnapshot["messages"] = [];
1288
+ const reservations: SwarmStateSnapshot["reservations"] = [];
1289
+
1290
+ // Run detection for confidence (already logged internally)
1291
+ const detection = await detectSwarm();
1292
+
1293
+ const snapshot: SwarmStateSnapshot = {
1294
+ sessionID,
1295
+ detection: {
1296
+ confidence: detection.confidence,
1297
+ reasons: detection.reasons,
1298
+ },
1299
+ epic: epic
1300
+ ? {
1301
+ id: epic.id,
1302
+ title: epic.title,
1303
+ status: epic.status,
1304
+ subtasks: subtasks.map((s: {
1305
+ id: string;
1306
+ title: string;
1307
+ status: string;
1308
+ files?: string[];
1309
+ }) => ({
1310
+ id: s.id,
1311
+ title: s.title,
1312
+ status: s.status as "open" | "in_progress" | "blocked" | "closed",
1313
+ files: s.files || [],
1314
+ })),
1315
+ }
1316
+ : undefined,
1317
+ messages,
1318
+ reservations,
1319
+ };
1320
+
1321
+ const totalDuration = Date.now() - startTime;
1322
+ logCompaction("debug", "query_swarm_state_complete", {
1323
+ session_id: sessionID,
1324
+ duration_ms: totalDuration,
1325
+ has_epic: !!snapshot.epic,
1326
+ epic_id: snapshot.epic?.id,
1327
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
1328
+ message_count: snapshot.messages.length,
1329
+ reservation_count: snapshot.reservations.length,
1330
+ });
1331
+
1332
+ return snapshot;
1333
+ } catch (err) {
1334
+ logCompaction("error", "query_swarm_state_exception", {
1335
+ session_id: sessionID,
1336
+ error: err instanceof Error ? err.message : String(err),
1337
+ stack: err instanceof Error ? err.stack : undefined,
1338
+ duration_ms: Date.now() - startTime,
1339
+ });
1340
+
1341
+ // If query fails, return minimal snapshot
1342
+ const detection = await detectSwarm();
1343
+ return {
1344
+ sessionID,
1345
+ detection: {
1346
+ confidence: detection.confidence,
1347
+ reasons: detection.reasons,
1348
+ },
1349
+ messages: [],
1350
+ reservations: [],
1351
+ };
1352
+ }
1353
+ }
1354
+
1355
+ /**
1356
+ * Generate compaction prompt using LLM
1357
+ *
1358
+ * Shells out to `opencode run -m <liteModel>` with structured state.
1359
+ * Returns markdown continuation prompt or null on failure.
1360
+ *
1361
+ * Timeout: 30 seconds
1362
+ */
1363
+ async function generateCompactionPrompt(
1364
+ snapshot: SwarmStateSnapshot,
1365
+ ): Promise<string | null> {
1366
+ const startTime = Date.now();
1367
+ const liteModel = process.env.OPENCODE_LITE_MODEL || "__SWARM_LITE_MODEL__";
1368
+
1369
+ logCompaction("debug", "generate_compaction_prompt_start", {
1370
+ session_id: snapshot.sessionID,
1371
+ lite_model: liteModel,
1372
+ has_epic: !!snapshot.epic,
1373
+ epic_id: snapshot.epic?.id,
1374
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
1375
+ snapshot_size: JSON.stringify(snapshot).length,
1376
+ });
1377
+
1378
+ try {
1379
+ const promptText = `You are generating a continuation prompt for a compacted swarm coordination session.
1380
+
1381
+ Analyze this swarm state and generate a structured markdown prompt that will be given to the resumed session:
1382
+
1383
+ ${JSON.stringify(snapshot, null, 2)}
1384
+
1385
+ Generate a prompt following this structure:
1386
+
1387
+ ┌─────────────────────────────────────────────────────────────┐
1388
+ │ │
1389
+ │ 🐝 YOU ARE THE COORDINATOR 🐝 │
1390
+ │ │
1391
+ │ NOT A WORKER. NOT AN IMPLEMENTER. │
1392
+ │ YOU ORCHESTRATE. │
1393
+ │ │
1394
+ └─────────────────────────────────────────────────────────────┘
1395
+
1396
+ # 🐝 Swarm Continuation - [Epic Title or "Unknown"]
1397
+
1398
+ **NON-NEGOTIABLE: YOU ARE THE COORDINATOR.** You resumed after context compaction.
1399
+
1400
+ ## Epic State
1401
+
1402
+ **ID:** [epic ID or "Unknown"]
1403
+ **Title:** [epic title or "No active epic"]
1404
+ **Status:** [X/Y subtasks complete]
1405
+ **Project:** ${projectDirectory}
1406
+
1407
+ ## Subtask Status
1408
+
1409
+ ### ✅ Completed (N)
1410
+ [List completed subtasks with IDs]
1411
+
1412
+ ### 🚧 In Progress (N)
1413
+ [List in-progress subtasks with IDs, files, agents if known]
1414
+
1415
+ ### 🚫 Blocked (N)
1416
+ [List blocked subtasks]
1417
+
1418
+ ### ⏳ Pending (N)
1419
+ [List pending subtasks]
1420
+
1421
+ ## Next Actions (IMMEDIATE)
1422
+
1423
+ [List 3-5 concrete actions with actual commands, using real IDs from the state]
1424
+
1425
+ ## 🎯 COORDINATOR MANDATES (NON-NEGOTIABLE)
1426
+
1427
+ **YOU ARE THE COORDINATOR. NOT A WORKER.**
1428
+
1429
+ ### ⛔ FORBIDDEN - NEVER do these:
1430
+ - ❌ NEVER use \`edit\`, \`write\`, or \`bash\` for implementation - SPAWN A WORKER
1431
+ - ❌ NEVER fetch directly with \`repo-crawl_*\`, \`repo-autopsy_*\`, \`webfetch\`, \`fetch_fetch\` - SPAWN A RESEARCHER
1432
+ - ❌ NEVER use \`context7_*\` or \`pdf-brain_*\` directly - SPAWN A RESEARCHER
1433
+ - ❌ NEVER reserve files - Workers reserve files
1434
+
1435
+ ### ✅ ALWAYS do these:
1436
+ - ✅ ALWAYS check \`swarm_status\` and \`swarmmail_inbox\` first
1437
+ - ✅ ALWAYS use \`swarm_spawn_subtask\` for implementation work
1438
+ - ✅ ALWAYS use \`swarm_spawn_researcher\` for external data fetching
1439
+ - ✅ ALWAYS review worker output with \`swarm_review\` → \`swarm_review_feedback\`
1440
+ - ✅ ALWAYS monitor actively - Check messages every ~10 minutes
1441
+ - ✅ ALWAYS unblock aggressively - Resolve dependencies immediately
1442
+
1443
+ **If you need external data:** Use \`swarm_spawn_researcher\` with a clear research task. The researcher will fetch, summarize, and return findings.
1444
+
1445
+ **3-strike rule enforced:** Workers get 3 review attempts. After 3 rejections, escalate to human.
1446
+
1447
+ Keep the prompt concise but actionable. Use actual data from the snapshot, not placeholders. Include the ASCII header and ALL coordinator mandates.`;
1448
+
1449
+ logCompaction("debug", "generate_compaction_prompt_calling_llm", {
1450
+ session_id: snapshot.sessionID,
1451
+ prompt_length: promptText.length,
1452
+ model: liteModel,
1453
+ command: `opencode run -m ${liteModel} -- <prompt>`,
1454
+ });
1455
+
1456
+ const llmStart = Date.now();
1457
+ const result = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
1458
+ (resolve, reject) => {
1459
+ const proc = spawn("opencode", ["run", "-m", liteModel, "--", promptText], {
1460
+ cwd: projectDirectory,
1461
+ stdio: ["ignore", "pipe", "pipe"],
1462
+ timeout: 30000, // 30 second timeout
1463
+ });
1464
+
1465
+ let stdout = "";
1466
+ let stderr = "";
1467
+
1468
+ proc.stdout.on("data", (d) => {
1469
+ stdout += d;
1470
+ });
1471
+ proc.stderr.on("data", (d) => {
1472
+ stderr += d;
1473
+ });
1474
+
1475
+ proc.on("close", (exitCode) => {
1476
+ resolve({ exitCode: exitCode ?? 1, stdout, stderr });
1477
+ });
1478
+
1479
+ proc.on("error", (err) => {
1480
+ reject(err);
1481
+ });
1482
+
1483
+ // Timeout handling
1484
+ setTimeout(() => {
1485
+ proc.kill("SIGTERM");
1486
+ reject(new Error("LLM compaction timeout (30s)"));
1487
+ }, 30000);
1488
+ },
1489
+ );
1490
+ const llmDuration = Date.now() - llmStart;
1491
+
1492
+ logCompaction("debug", "generate_compaction_prompt_llm_complete", {
1493
+ session_id: snapshot.sessionID,
1494
+ duration_ms: llmDuration,
1495
+ exit_code: result.exitCode,
1496
+ stdout_length: result.stdout.length,
1497
+ stderr_length: result.stderr.length,
1498
+ stderr_preview: result.stderr.substring(0, 500),
1499
+ stdout_preview: result.stdout.substring(0, 500),
1500
+ });
1501
+
1502
+ if (result.exitCode !== 0) {
1503
+ logCompaction("error", "generate_compaction_prompt_llm_failed", {
1504
+ session_id: snapshot.sessionID,
1505
+ exit_code: result.exitCode,
1506
+ stderr: result.stderr,
1507
+ stdout: result.stdout,
1508
+ duration_ms: llmDuration,
1509
+ });
1510
+ return null;
1511
+ }
1512
+
1513
+ // Extract the prompt from stdout (LLM may wrap in markdown)
1514
+ const prompt = result.stdout.trim();
1515
+
1516
+ const totalDuration = Date.now() - startTime;
1517
+ logCompaction("debug", "generate_compaction_prompt_success", {
1518
+ session_id: snapshot.sessionID,
1519
+ total_duration_ms: totalDuration,
1520
+ llm_duration_ms: llmDuration,
1521
+ prompt_length: prompt.length,
1522
+ prompt_preview: prompt.substring(0, 500),
1523
+ prompt_has_content: prompt.length > 0,
1524
+ });
1525
+
1526
+ return prompt.length > 0 ? prompt : null;
1527
+ } catch (err) {
1528
+ const totalDuration = Date.now() - startTime;
1529
+ logCompaction("error", "generate_compaction_prompt_exception", {
1530
+ session_id: snapshot.sessionID,
1531
+ error: err instanceof Error ? err.message : String(err),
1532
+ stack: err instanceof Error ? err.stack : undefined,
1533
+ duration_ms: totalDuration,
1534
+ });
1535
+ return null;
1536
+ }
1537
+ }
1538
+
1539
+ /**
1540
+ * Session message scan result
1541
+ */
1542
+ interface SessionScanResult {
1543
+ messageCount: number;
1544
+ toolCalls: Array<{
1545
+ toolName: string;
1546
+ args: Record<string, unknown>;
1547
+ output?: string;
1548
+ timestamp?: number;
1549
+ }>;
1550
+ swarmDetected: boolean;
1551
+ reasons: string[];
1552
+ /** Projected swarm state from event fold - ground truth from session events */
1553
+ projection?: SwarmProjection;
1554
+ }
1555
+
1556
+ /**
1557
+ * Scan session messages for swarm tool calls
1558
+ *
1559
+ * Uses SDK client to fetch messages and look for swarm activity.
1560
+ * This can detect swarm work even if no cells exist yet.
1561
+ */
1562
+ async function scanSessionMessages(sessionID: string): Promise<SessionScanResult> {
1563
+ const startTime = Date.now();
1564
+ const result: SessionScanResult = {
1565
+ messageCount: 0,
1566
+ toolCalls: [],
1567
+ swarmDetected: false,
1568
+ reasons: [],
1569
+ };
1570
+
1571
+ logCompaction("debug", "session_scan_start", {
1572
+ session_id: sessionID,
1573
+ has_sdk_client: !!sdkClient,
1574
+ });
1575
+
1576
+ if (!sdkClient) {
1577
+ logCompaction("warn", "session_scan_no_sdk_client", {
1578
+ session_id: sessionID,
1579
+ });
1580
+ return result;
1581
+ }
1582
+
1583
+ try {
1584
+ // Fetch session messages
1585
+ const messagesStart = Date.now();
1586
+ const rawResponse = await sdkClient.session.messages({ path: { id: sessionID } });
1587
+ const messagesDuration = Date.now() - messagesStart;
1588
+
1589
+ // Log the RAW response to understand its shape
1590
+ logCompaction("debug", "session_scan_raw_response", {
1591
+ session_id: sessionID,
1592
+ response_type: typeof rawResponse,
1593
+ is_array: Array.isArray(rawResponse),
1594
+ is_null: rawResponse === null,
1595
+ is_undefined: rawResponse === undefined,
1596
+ keys: rawResponse && typeof rawResponse === 'object' ? Object.keys(rawResponse) : [],
1597
+ raw_preview: JSON.stringify(rawResponse)?.slice(0, 500),
1598
+ });
1599
+
1600
+ // The response might be wrapped - check common patterns
1601
+ const messages = Array.isArray(rawResponse)
1602
+ ? rawResponse
1603
+ : rawResponse?.data
1604
+ ? rawResponse.data
1605
+ : rawResponse?.messages
1606
+ ? rawResponse.messages
1607
+ : rawResponse?.items
1608
+ ? rawResponse.items
1609
+ : [];
1610
+
1611
+ result.messageCount = messages?.length ?? 0;
1612
+
1613
+ logCompaction("debug", "session_scan_messages_fetched", {
1614
+ session_id: sessionID,
1615
+ duration_ms: messagesDuration,
1616
+ message_count: result.messageCount,
1617
+ extraction_method: Array.isArray(rawResponse) ? 'direct_array' : rawResponse?.data ? 'data_field' : rawResponse?.messages ? 'messages_field' : rawResponse?.items ? 'items_field' : 'fallback_empty',
1618
+ });
1619
+
1620
+ if (!Array.isArray(messages) || messages.length === 0) {
1621
+ logCompaction("debug", "session_scan_no_messages", {
1622
+ session_id: sessionID,
1623
+ });
1624
+ return result;
1625
+ }
1626
+
1627
+ // Swarm-related tool patterns
1628
+ const swarmTools = [
1629
+ // High confidence - active swarm coordination
1630
+ "hive_create_epic",
1631
+ "swarm_decompose",
1632
+ "swarm_spawn_subtask",
1633
+ "swarm_complete",
1634
+ "swarmmail_init",
1635
+ "swarmmail_reserve",
1636
+ // Medium confidence - swarm activity
1637
+ "hive_start",
1638
+ "hive_close",
1639
+ "swarm_status",
1640
+ "swarm_progress",
1641
+ "swarmmail_send",
1642
+ // Low confidence - possible swarm
1643
+ "hive_create",
1644
+ "hive_query",
1645
+ ];
1646
+
1647
+ const highConfidenceTools = new Set([
1648
+ "hive_create_epic",
1649
+ "swarm_decompose",
1650
+ "swarm_spawn_subtask",
1651
+ "swarmmail_init",
1652
+ "swarmmail_reserve",
1653
+ ]);
1654
+
1655
+ // Scan messages for tool calls
1656
+ let swarmToolCount = 0;
1657
+ let highConfidenceCount = 0;
1658
+
1659
+ // Debug: collect part types to understand message structure
1660
+ const partTypeCounts: Record<string, number> = {};
1661
+ let messagesWithParts = 0;
1662
+ let messagesWithoutParts = 0;
1663
+ let samplePartTypes: string[] = [];
1664
+
1665
+ for (const message of messages) {
1666
+ if (!message.parts || !Array.isArray(message.parts)) {
1667
+ messagesWithoutParts++;
1668
+ continue;
1669
+ }
1670
+ messagesWithParts++;
1671
+
1672
+ for (const part of message.parts) {
1673
+ const partType = part.type || "unknown";
1674
+ partTypeCounts[partType] = (partTypeCounts[partType] || 0) + 1;
1675
+
1676
+ // Collect first 10 unique part types for debugging
1677
+ if (samplePartTypes.length < 10 && !samplePartTypes.includes(partType)) {
1678
+ samplePartTypes.push(partType);
1679
+ }
1680
+
1681
+ // Check if this is a tool call part
1682
+ // OpenCode SDK: ToolPart has type="tool", tool=<string name>, state={...}
1683
+ if (part.type === "tool") {
1684
+ const toolPart = part as ToolPart;
1685
+ const toolName = toolPart.tool; // tool name is a string directly
1686
+
1687
+ if (toolName && swarmTools.includes(toolName)) {
1688
+ swarmToolCount++;
1689
+
1690
+ if (highConfidenceTools.has(toolName)) {
1691
+ highConfidenceCount++;
1692
+ }
1693
+
1694
+ // Extract args/output/timestamp from state if available
1695
+ const state = toolPart.state;
1696
+ const args = state && "input" in state ? state.input : {};
1697
+ const output = state && "output" in state ? state.output : undefined;
1698
+ const timestamp = state && "time" in state && state.time && typeof state.time === "object" && "end" in state.time
1699
+ ? (state.time as { end: number }).end
1700
+ : Date.now();
1701
+
1702
+ result.toolCalls.push({
1703
+ toolName,
1704
+ args,
1705
+ output,
1706
+ timestamp,
1707
+ });
1708
+
1709
+ logCompaction("debug", "session_scan_tool_found", {
1710
+ session_id: sessionID,
1711
+ tool_name: toolName,
1712
+ is_high_confidence: highConfidenceTools.has(toolName),
1713
+ });
1714
+ }
1715
+ }
1716
+ }
1717
+ }
1718
+
1719
+ // =======================================================================
1720
+ // PROJECT SWARM STATE FROM EVENTS (deterministic, no heuristics)
1721
+ // =======================================================================
1722
+ // Convert tool calls to ToolCallEvent format for projection
1723
+ const events: ToolCallEvent[] = result.toolCalls.map(tc => ({
1724
+ tool: tc.toolName,
1725
+ input: tc.args as Record<string, unknown>,
1726
+ output: tc.output || "{}",
1727
+ timestamp: tc.timestamp || Date.now(),
1728
+ }));
1729
+
1730
+ // Project swarm state from events - this is the ground truth
1731
+ const projection = projectSwarmState(events);
1732
+ result.projection = projection;
1733
+
1734
+ // Use projection for swarm detection (deterministic)
1735
+ if (projection.isSwarm) {
1736
+ result.swarmDetected = true;
1737
+ result.reasons.push(`Swarm signature detected: epic ${projection.epic?.id || "unknown"} with ${projection.counts.total} subtasks`);
1738
+
1739
+ if (isSwarmActive(projection)) {
1740
+ result.reasons.push(`Swarm ACTIVE: ${projection.counts.spawned} spawned, ${projection.counts.inProgress} in_progress, ${projection.counts.completed} completed (not closed)`);
1741
+ } else {
1742
+ result.reasons.push(`Swarm COMPLETE: all ${projection.counts.closed} subtasks closed`);
1743
+ }
1744
+ } else if (highConfidenceCount > 0) {
1745
+ // Fallback to heuristic detection if no signature but high-confidence tools found
1746
+ result.swarmDetected = true;
1747
+ result.reasons.push(`${highConfidenceCount} high-confidence swarm tools (${Array.from(new Set(result.toolCalls.filter(tc => highConfidenceTools.has(tc.toolName)).map(tc => tc.toolName))).join(", ")})`);
1748
+ } else if (swarmToolCount > 0) {
1749
+ result.swarmDetected = true;
1750
+ result.reasons.push(`${swarmToolCount} swarm-related tools used`);
1751
+ }
1752
+
1753
+ const totalDuration = Date.now() - startTime;
1754
+
1755
+ // Debug: log part type distribution to understand message structure
1756
+ logCompaction("debug", "session_scan_part_types", {
1757
+ session_id: sessionID,
1758
+ messages_with_parts: messagesWithParts,
1759
+ messages_without_parts: messagesWithoutParts,
1760
+ part_type_counts: partTypeCounts,
1761
+ sample_part_types: samplePartTypes,
1762
+ });
1763
+
1764
+ logCompaction("info", "session_scan_complete", {
1765
+ session_id: sessionID,
1766
+ duration_ms: totalDuration,
1767
+ message_count: result.messageCount,
1768
+ tool_call_count: result.toolCalls.length,
1769
+ swarm_tool_count: swarmToolCount,
1770
+ high_confidence_count: highConfidenceCount,
1771
+ swarm_detected: result.swarmDetected,
1772
+ reasons: result.reasons,
1773
+ unique_tools: Array.from(new Set(result.toolCalls.map(tc => tc.toolName))),
1774
+ // Add projection summary
1775
+ projection_summary: projection.isSwarm ? {
1776
+ epic_id: projection.epic?.id,
1777
+ epic_title: projection.epic?.title,
1778
+ epic_status: projection.epic?.status,
1779
+ is_active: isSwarmActive(projection),
1780
+ counts: projection.counts,
1781
+ } : null,
1782
+ });
1783
+
1784
+ return result;
1785
+ } catch (err) {
1786
+ const totalDuration = Date.now() - startTime;
1787
+ logCompaction("error", "session_scan_exception", {
1788
+ session_id: sessionID,
1789
+ error: err instanceof Error ? err.message : String(err),
1790
+ stack: err instanceof Error ? err.stack : undefined,
1791
+ duration_ms: totalDuration,
1792
+ });
1793
+ return result;
1794
+ }
1795
+ }
1796
+
1797
+ /**
1798
+ * Check for swarm sign - evidence a swarm passed through
1799
+ *
1800
+ * Uses multiple signals with different confidence levels:
1801
+ * - HIGH: in_progress cells (active work)
1802
+ * - MEDIUM: Open subtasks, unclosed epics, recently updated cells
1803
+ * - LOW: Any cells exist
1804
+ *
1805
+ * Philosophy: Err on the side of continuation.
1806
+ * False positive = extra context (low cost)
1807
+ * False negative = lost swarm (high cost)
1808
+ */
1809
+ async function detectSwarm(): Promise<SwarmDetection> {
1810
+ const startTime = Date.now();
1811
+ const reasons: string[] = [];
1812
+ let highConfidence = false;
1813
+ let mediumConfidence = false;
1814
+ let lowConfidence = false;
1815
+
1816
+ logCompaction("debug", "detect_swarm_start", {
1817
+ project_directory: projectDirectory,
1818
+ cwd: process.cwd(),
1819
+ });
1820
+
1821
+ try {
1822
+ const cliStart = Date.now();
1823
+ const result = await new Promise<{ exitCode: number; stdout: string; stderr: string }>(
1824
+ (resolve) => {
1825
+ // Use swarm tool to query beads
1826
+ const proc = spawn(SWARM_CLI, ["tool", "hive_query"], {
1827
+ cwd: projectDirectory,
1828
+ stdio: ["ignore", "pipe", "pipe"],
1829
+ });
1830
+ let stdout = "";
1831
+ let stderr = "";
1832
+ proc.stdout.on("data", (d) => {
1833
+ stdout += d;
1834
+ });
1835
+ proc.stderr.on("data", (d) => {
1836
+ stderr += d;
1837
+ });
1838
+ proc.on("close", (exitCode) =>
1839
+ resolve({ exitCode: exitCode ?? 1, stdout, stderr }),
1840
+ );
1841
+ },
1842
+ );
1843
+ const cliDuration = Date.now() - cliStart;
1844
+
1845
+ logCompaction("debug", "detect_swarm_cli_complete", {
1846
+ duration_ms: cliDuration,
1847
+ exit_code: result.exitCode,
1848
+ stdout_length: result.stdout.length,
1849
+ stderr_length: result.stderr.length,
1850
+ stderr_preview: result.stderr.substring(0, 200),
1851
+ });
1852
+
1853
+ if (result.exitCode !== 0) {
1854
+ logCompaction("warn", "detect_swarm_cli_failed", {
1855
+ exit_code: result.exitCode,
1856
+ stderr: result.stderr,
1857
+ });
1858
+ return { detected: false, confidence: "none", reasons: ["hive_query failed"] };
1859
+ }
1860
+
1861
+ let cells: any[];
1862
+ try {
1863
+ cells = JSON.parse(result.stdout);
1864
+ } catch (parseErr) {
1865
+ logCompaction("error", "detect_swarm_parse_failed", {
1866
+ error: parseErr instanceof Error ? parseErr.message : String(parseErr),
1867
+ stdout_preview: result.stdout.substring(0, 500),
1868
+ });
1869
+ return { detected: false, confidence: "none", reasons: ["hive_query parse failed"] };
1870
+ }
1871
+
1872
+ if (!Array.isArray(cells) || cells.length === 0) {
1873
+ logCompaction("debug", "detect_swarm_no_cells", {
1874
+ is_array: Array.isArray(cells),
1875
+ length: cells?.length ?? 0,
1876
+ });
1877
+ return { detected: false, confidence: "none", reasons: ["no cells found"] };
1878
+ }
1879
+
1880
+ // Log ALL cells for debugging
1881
+ logCompaction("debug", "detect_swarm_cells_found", {
1882
+ total_cells: cells.length,
1883
+ cells: cells.map((c: any) => ({
1884
+ id: c.id,
1885
+ title: c.title,
1886
+ type: c.type,
1887
+ status: c.status,
1888
+ parent_id: c.parent_id,
1889
+ updated_at: c.updated_at,
1890
+ created_at: c.created_at,
1891
+ })),
1892
+ });
1893
+
1894
+ // HIGH: Any in_progress cells
1895
+ const inProgress = cells.filter(
1896
+ (c: { status: string }) => c.status === "in_progress"
1897
+ );
1898
+ if (inProgress.length > 0) {
1899
+ highConfidence = true;
1900
+ reasons.push(`${inProgress.length} cells in_progress`);
1901
+ logCompaction("debug", "detect_swarm_in_progress", {
1902
+ count: inProgress.length,
1903
+ cells: inProgress.map((c: any) => ({ id: c.id, title: c.title })),
1904
+ });
1905
+ }
1906
+
1907
+ // MEDIUM: Open subtasks (cells with parent_id)
1908
+ const subtasks = cells.filter(
1909
+ (c: { status: string; parent_id?: string }) =>
1910
+ c.status === "open" && c.parent_id
1911
+ );
1912
+ if (subtasks.length > 0) {
1913
+ mediumConfidence = true;
1914
+ reasons.push(`${subtasks.length} open subtasks`);
1915
+ logCompaction("debug", "detect_swarm_open_subtasks", {
1916
+ count: subtasks.length,
1917
+ cells: subtasks.map((c: any) => ({ id: c.id, title: c.title, parent_id: c.parent_id })),
1918
+ });
1919
+ }
1920
+
1921
+ // MEDIUM: Unclosed epics
1922
+ const openEpics = cells.filter(
1923
+ (c: { status: string; type?: string }) =>
1924
+ c.type === "epic" && c.status !== "closed"
1925
+ );
1926
+ if (openEpics.length > 0) {
1927
+ mediumConfidence = true;
1928
+ reasons.push(`${openEpics.length} unclosed epics`);
1929
+ logCompaction("debug", "detect_swarm_open_epics", {
1930
+ count: openEpics.length,
1931
+ cells: openEpics.map((c: any) => ({ id: c.id, title: c.title, status: c.status })),
1932
+ });
1933
+ }
1934
+
1935
+ // MEDIUM: Recently updated cells (last hour)
1936
+ const oneHourAgo = Date.now() - 60 * 60 * 1000;
1937
+ const recentCells = cells.filter(
1938
+ (c: { updated_at?: number }) => c.updated_at && c.updated_at > oneHourAgo
1939
+ );
1940
+ if (recentCells.length > 0) {
1941
+ mediumConfidence = true;
1942
+ reasons.push(`${recentCells.length} cells updated in last hour`);
1943
+ logCompaction("debug", "detect_swarm_recent_cells", {
1944
+ count: recentCells.length,
1945
+ one_hour_ago: oneHourAgo,
1946
+ cells: recentCells.map((c: any) => ({
1947
+ id: c.id,
1948
+ title: c.title,
1949
+ updated_at: c.updated_at,
1950
+ age_minutes: Math.round((Date.now() - c.updated_at) / 60000),
1951
+ })),
1952
+ });
1953
+ }
1954
+
1955
+ // LOW: Any cells exist at all
1956
+ if (cells.length > 0) {
1957
+ lowConfidence = true;
1958
+ reasons.push(`${cells.length} total cells in hive`);
1959
+ }
1960
+ } catch (err) {
1961
+ // Detection failed, use fallback
1962
+ lowConfidence = true;
1963
+ reasons.push("Detection error, using fallback");
1964
+ logCompaction("error", "detect_swarm_exception", {
1965
+ error: err instanceof Error ? err.message : String(err),
1966
+ stack: err instanceof Error ? err.stack : undefined,
1967
+ });
1968
+ }
1969
+
1970
+ // Determine overall confidence
1971
+ let confidence: "high" | "medium" | "low" | "none";
1972
+ if (highConfidence) {
1973
+ confidence = "high";
1974
+ } else if (mediumConfidence) {
1975
+ confidence = "medium";
1976
+ } else if (lowConfidence) {
1977
+ confidence = "low";
1978
+ } else {
1979
+ confidence = "none";
1980
+ }
1981
+
1982
+ const totalDuration = Date.now() - startTime;
1983
+ logCompaction("debug", "detect_swarm_complete", {
1984
+ duration_ms: totalDuration,
1985
+ confidence,
1986
+ detected: confidence !== "none",
1987
+ reason_count: reasons.length,
1988
+ reasons,
1989
+ high_confidence: highConfidence,
1990
+ medium_confidence: mediumConfidence,
1991
+ low_confidence: lowConfidence,
1992
+ });
1993
+
1994
+ return {
1995
+ detected: confidence !== "none",
1996
+ confidence,
1997
+ reasons,
1998
+ };
1999
+ }
2000
+
2001
+ /**
2002
+ * Swarm-aware compaction context
2003
+ *
2004
+ * Injected during compaction to keep the swarm cooking. The coordinator should
2005
+ * wake up from compaction and immediately resume orchestration - spawning agents,
2006
+ * monitoring progress, unblocking work.
2007
+ */
2008
+ const SWARM_COMPACTION_CONTEXT = `## 🐝 SWARM ACTIVE - Keep Cooking
2009
+
2010
+ You are the **COORDINATOR** of an active swarm. Context was compacted but the swarm is still running.
2011
+
2012
+ **YOUR JOB:** Keep orchestrating. Spawn agents. Monitor progress. Unblock work. Ship it.
2013
+
2014
+ ### Preserve in Summary
2015
+
2016
+ Extract from session context:
2017
+
2018
+ 1. **Epic & Subtasks** - IDs, titles, status, file assignments
2019
+ 2. **What's Running** - Which agents are active, what they're working on
2020
+ 3. **What's Blocked** - Blockers and what's needed to unblock
2021
+ 4. **What's Done** - Completed work and any follow-ups needed
2022
+ 5. **What's Next** - Pending subtasks ready to spawn
2023
+
2024
+ ### Summary Format
2025
+
2026
+ \`\`\`
2027
+ ## 🐝 Swarm State
2028
+
2029
+ **Epic:** <bd-xxx> - <title>
2030
+ **Project:** <path>
2031
+ **Progress:** X/Y subtasks complete
2032
+
2033
+ **Active:**
2034
+ - <bd-xxx>: <title> [in_progress] → <agent> working on <files>
2035
+
2036
+ **Blocked:**
2037
+ - <bd-xxx>: <title> - BLOCKED: <reason>
2038
+
2039
+ **Completed:**
2040
+ - <bd-xxx>: <title> ✓
2041
+
2042
+ **Ready to Spawn:**
2043
+ - <bd-xxx>: <title> (files: <...>)
2044
+ \`\`\`
2045
+
2046
+ ### On Resume - IMMEDIATELY
2047
+
2048
+ 1. \`swarm_status(epic_id="<epic>", project_key="<path>")\` - Get current state
2049
+ 2. \`swarmmail_inbox(limit=5)\` - Check for agent messages
2050
+ 3. \`swarm_review(project_key, epic_id, task_id, files_touched)\` - Review any completed work
2051
+ 4. \`swarm_review_feedback(project_key, task_id, worker_id, status, issues)\` - Approve or request changes
2052
+ 5. **Spawn ready subtasks** - Don't wait, fire them off
2053
+ 6. **Unblock blocked work** - Resolve dependencies, reassign if needed
2054
+ 7. **Collect completed work** - Close done subtasks, verify quality
2055
+
2056
+ ### Keep the Swarm Cooking
2057
+
2058
+ - **Spawn aggressively** - If a subtask is ready and unblocked, spawn an agent
2059
+ - **Monitor actively** - Check status, read messages, respond to blockers
2060
+ - **Close the loop** - When all subtasks done, verify and close the epic
2061
+ - **Don't stop** - The swarm runs until the epic is closed
2062
+
2063
+ **You are not waiting for instructions. You are the coordinator. Coordinate.**
2064
+ `;
2065
+
2066
+ /**
2067
+ * Fallback detection prompt - tells the compactor what to look for
2068
+ *
2069
+ * Used when we can't definitively detect a swarm but want to be safe.
2070
+ * The compactor can check the conversation context for these patterns.
2071
+ */
2072
+ const SWARM_DETECTION_FALLBACK = `## 🐝 Swarm Detection - Check Your Context
2073
+
2074
+ **IMPORTANT:** Before summarizing, check if this session involves an active swarm.
2075
+
2076
+ Look for ANY of these patterns in the conversation:
2077
+
2078
+ ### Tool Calls (definite swarm sign)
2079
+ - \`swarm_decompose\`, \`swarm_spawn_subtask\`, \`swarm_status\`, \`swarm_complete\`
2080
+ - \`swarmmail_init\`, \`swarmmail_reserve\`, \`swarmmail_send\`
2081
+ - \`hive_create_epic\`, \`hive_start\`, \`hive_close\`
2082
+
2083
+ ### IDs and Names
2084
+ - Cell IDs: \`bd-xxx\`, \`bd-xxx.N\` (subtask format)
2085
+ - Agent names: BlueLake, RedMountain, GreenValley, etc.
2086
+ - Epic references: "epic", "subtask", "parent"
2087
+
2088
+ ### Coordination Language
2089
+ - "spawn", "worker", "coordinator"
2090
+ - "reserve", "reservation", "files"
2091
+ - "blocked", "unblock", "dependency"
2092
+ - "progress", "complete", "in_progress"
2093
+
2094
+ ### If You Find Swarm Evidence
2095
+
2096
+ Include this in your summary:
2097
+ 1. Epic ID and title
2098
+ 2. Project path
2099
+ 3. Subtask status (running/blocked/done/pending)
2100
+ 4. Any blockers or issues
2101
+ 5. What should happen next
2102
+
2103
+ **Then tell the resumed session:**
2104
+ "This is an active swarm. Check swarm_status and swarmmail_inbox immediately."
2105
+ `;
2106
+
2107
+ // Extended hooks type to include experimental compaction hook with new prompt API
2108
+ type CompactionOutput = {
2109
+ context: string[];
2110
+ prompt?: string; // NEW API from OpenCode PR #5907
2111
+ };
2112
+
2113
+ type ExtendedHooks = Hooks & {
2114
+ "experimental.session.compacting"?: (
2115
+ input: { sessionID: string },
2116
+ output: CompactionOutput,
2117
+ ) => Promise<void>;
2118
+ };
2119
+
2120
+ // NOTE: Only default export - named exports cause double registration!
2121
+ // OpenCode's plugin loader calls ALL exports as functions.
2122
+ const SwarmPlugin: Plugin = async (
2123
+ input: PluginInput,
2124
+ ): Promise<ExtendedHooks> => {
2125
+ // CRITICAL: Set project directory from OpenCode input
2126
+ // Without this, CLI uses wrong database path
2127
+ projectDirectory = input.directory;
2128
+
2129
+ // Store SDK client for session message scanning during compaction
2130
+ sdkClient = input.client;
2131
+
2132
+ return {
2133
+ tool: {
2134
+ // Beads
2135
+ hive_create,
2136
+ hive_create_epic,
2137
+ hive_query,
2138
+ hive_update,
2139
+ hive_close,
2140
+ hive_start,
2141
+ hive_ready,
2142
+ hive_cells,
2143
+ hive_sync,
2144
+ beads_link_thread,
2145
+ // Swarm Mail (Embedded)
2146
+ swarmmail_init,
2147
+ swarmmail_send,
2148
+ swarmmail_inbox,
2149
+ swarmmail_read_message,
2150
+ swarmmail_reserve,
2151
+ swarmmail_release,
2152
+ swarmmail_ack,
2153
+ swarmmail_health,
2154
+ // Structured
2155
+ structured_extract_json,
2156
+ structured_validate,
2157
+ structured_parse_evaluation,
2158
+ structured_parse_decomposition,
2159
+ structured_parse_cell_tree,
2160
+ // Swarm
2161
+ swarm_init,
2162
+ swarm_select_strategy,
2163
+ swarm_plan_prompt,
2164
+ swarm_decompose,
2165
+ swarm_validate_decomposition,
2166
+ swarm_status,
2167
+ swarm_progress,
2168
+ swarm_complete,
2169
+ swarm_record_outcome,
2170
+ swarm_subtask_prompt,
2171
+ swarm_spawn_subtask,
2172
+ swarm_complete_subtask,
2173
+ swarm_evaluation_prompt,
2174
+ swarm_broadcast,
2175
+ // Worktree Isolation
2176
+ swarm_worktree_create,
2177
+ swarm_worktree_merge,
2178
+ swarm_worktree_cleanup,
2179
+ swarm_worktree_list,
2180
+ // Structured Review
2181
+ swarm_review,
2182
+ swarm_review_feedback,
2183
+ // Skills
2184
+ skills_list,
2185
+ skills_read,
2186
+ skills_use,
2187
+ skills_create,
2188
+ skills_update,
2189
+ skills_delete,
2190
+ skills_init,
2191
+ skills_add_script,
2192
+ skills_execute,
2193
+ // Swarm Insights
2194
+ swarm_get_strategy_insights,
2195
+ swarm_get_file_insights,
2196
+ swarm_get_pattern_insights,
2197
+ // CASS (Cross-Agent Session Search)
2198
+ cass_search,
2199
+ cass_view,
2200
+ cass_expand,
2201
+ cass_health,
2202
+ cass_index,
2203
+ cass_stats,
2204
+ },
2205
+
2206
+ // Swarm-aware compaction hook with LLM-powered continuation prompts
2207
+ // Three-level fallback chain: LLM → static context → detection fallback → none
2208
+ "experimental.session.compacting": async (
2209
+ input: { sessionID: string },
2210
+ output: CompactionOutput,
2211
+ ) => {
2212
+ const startTime = Date.now();
2213
+
2214
+ // =======================================================================
2215
+ // LOG: Compaction hook invoked - capture EVERYTHING we receive
2216
+ // =======================================================================
2217
+ logCompaction("info", "compaction_hook_invoked", {
2218
+ session_id: input.sessionID,
2219
+ project_directory: projectDirectory,
2220
+ input_keys: Object.keys(input),
2221
+ input_full: JSON.parse(JSON.stringify(input)), // Deep clone for logging
2222
+ output_keys: Object.keys(output),
2223
+ output_context_count: output.context?.length ?? 0,
2224
+ output_has_prompt_field: "prompt" in output,
2225
+ output_initial_state: {
2226
+ context: output.context,
2227
+ prompt: (output as any).prompt,
2228
+ },
2229
+ env: {
2230
+ OPENCODE_SESSION_ID: process.env.OPENCODE_SESSION_ID,
2231
+ OPENCODE_MESSAGE_ID: process.env.OPENCODE_MESSAGE_ID,
2232
+ OPENCODE_AGENT: process.env.OPENCODE_AGENT,
2233
+ OPENCODE_LITE_MODEL: process.env.OPENCODE_LITE_MODEL,
2234
+ SWARM_PROJECT_DIR: process.env.SWARM_PROJECT_DIR,
2235
+ },
2236
+ cwd: process.cwd(),
2237
+ timestamp: new Date().toISOString(),
2238
+ });
2239
+
2240
+ // =======================================================================
2241
+ // STEP 1: Scan session messages for swarm tool calls
2242
+ // =======================================================================
2243
+ const sessionScanStart = Date.now();
2244
+ const sessionScan = await scanSessionMessages(input.sessionID);
2245
+ const sessionScanDuration = Date.now() - sessionScanStart;
2246
+
2247
+ logCompaction("info", "session_scan_results", {
2248
+ session_id: input.sessionID,
2249
+ duration_ms: sessionScanDuration,
2250
+ message_count: sessionScan.messageCount,
2251
+ tool_call_count: sessionScan.toolCalls.length,
2252
+ swarm_detected_from_messages: sessionScan.swarmDetected,
2253
+ reasons: sessionScan.reasons,
2254
+ });
2255
+
2256
+ // =======================================================================
2257
+ // STEP 2: Detect swarm state from hive cells
2258
+ // =======================================================================
2259
+ const detectionStart = Date.now();
2260
+ const detection = await detectSwarm();
2261
+ const detectionDuration = Date.now() - detectionStart;
2262
+
2263
+ logCompaction("info", "swarm_detection_complete", {
2264
+ session_id: input.sessionID,
2265
+ duration_ms: detectionDuration,
2266
+ detected: detection.detected,
2267
+ confidence: detection.confidence,
2268
+ reasons: detection.reasons,
2269
+ reason_count: detection.reasons.length,
2270
+ });
2271
+
2272
+ // =======================================================================
2273
+ // STEP 3: Merge session scan with hive detection for final confidence
2274
+ // =======================================================================
2275
+ // If session messages show high-confidence swarm tools, boost confidence
2276
+ if (sessionScan.swarmDetected && sessionScan.reasons.some(r => r.includes("high-confidence"))) {
2277
+ if (detection.confidence === "none" || detection.confidence === "low") {
2278
+ detection.confidence = "high";
2279
+ detection.detected = true;
2280
+ detection.reasons.push(...sessionScan.reasons);
2281
+
2282
+ logCompaction("info", "confidence_boost_from_session_scan", {
2283
+ session_id: input.sessionID,
2284
+ original_confidence: detection.confidence,
2285
+ boosted_to: "high",
2286
+ session_reasons: sessionScan.reasons,
2287
+ });
2288
+ }
2289
+ } else if (sessionScan.swarmDetected) {
2290
+ // Medium boost for any swarm tools found
2291
+ if (detection.confidence === "none") {
2292
+ detection.confidence = "medium";
2293
+ detection.detected = true;
2294
+ detection.reasons.push(...sessionScan.reasons);
2295
+
2296
+ logCompaction("info", "confidence_boost_from_session_scan", {
2297
+ session_id: input.sessionID,
2298
+ original_confidence: "none",
2299
+ boosted_to: "medium",
2300
+ session_reasons: sessionScan.reasons,
2301
+ });
2302
+ } else if (detection.confidence === "low") {
2303
+ detection.confidence = "medium";
2304
+ detection.reasons.push(...sessionScan.reasons);
2305
+
2306
+ logCompaction("info", "confidence_boost_from_session_scan", {
2307
+ session_id: input.sessionID,
2308
+ original_confidence: "low",
2309
+ boosted_to: "medium",
2310
+ session_reasons: sessionScan.reasons,
2311
+ });
2312
+ }
2313
+ }
2314
+
2315
+ logCompaction("info", "final_swarm_detection", {
2316
+ session_id: input.sessionID,
2317
+ confidence: detection.confidence,
2318
+ detected: detection.detected,
2319
+ combined_reasons: detection.reasons,
2320
+ message_scan_contributed: sessionScan.swarmDetected,
2321
+ });
2322
+
2323
+ if (detection.confidence === "high" || detection.confidence === "medium") {
2324
+ // Definite or probable swarm - try LLM-powered compaction
2325
+ logCompaction("info", "swarm_detected_attempting_llm", {
2326
+ session_id: input.sessionID,
2327
+ confidence: detection.confidence,
2328
+ reasons: detection.reasons,
2329
+ has_projection: !!sessionScan.projection?.isSwarm,
2330
+ });
2331
+
2332
+ try {
2333
+ // =======================================================================
2334
+ // PREFER PROJECTION (ground truth from events) OVER HIVE QUERY
2335
+ // =======================================================================
2336
+ // The projection is derived from session events - it's the source of truth.
2337
+ // Hive query may show all cells closed even if swarm was active.
2338
+
2339
+ let snapshot: SwarmStateSnapshot;
2340
+
2341
+ if (sessionScan.projection?.isSwarm) {
2342
+ // Use projection as primary source - convert to snapshot format
2343
+ const proj = sessionScan.projection;
2344
+ snapshot = {
2345
+ sessionID: input.sessionID,
2346
+ detection: {
2347
+ confidence: isSwarmActive(proj) ? "high" : "medium",
2348
+ reasons: sessionScan.reasons,
2349
+ },
2350
+ epic: proj.epic ? {
2351
+ id: proj.epic.id,
2352
+ title: proj.epic.title,
2353
+ status: proj.epic.status,
2354
+ subtasks: Array.from(proj.subtasks.values()).map(s => ({
2355
+ id: s.id,
2356
+ title: s.title,
2357
+ status: s.status as "open" | "in_progress" | "blocked" | "closed",
2358
+ files: s.files,
2359
+ })),
2360
+ } : undefined,
2361
+ messages: [],
2362
+ reservations: [],
2363
+ };
2364
+
2365
+ logCompaction("info", "using_projection_as_snapshot", {
2366
+ session_id: input.sessionID,
2367
+ epic_id: proj.epic?.id,
2368
+ epic_title: proj.epic?.title,
2369
+ subtask_count: proj.subtasks.size,
2370
+ is_active: isSwarmActive(proj),
2371
+ counts: proj.counts,
2372
+ });
2373
+ } else {
2374
+ // Fallback to hive query (may be stale)
2375
+ const queryStart = Date.now();
2376
+ snapshot = await querySwarmState(input.sessionID);
2377
+ const queryDuration = Date.now() - queryStart;
2378
+
2379
+ logCompaction("info", "fallback_to_hive_query", {
2380
+ session_id: input.sessionID,
2381
+ duration_ms: queryDuration,
2382
+ reason: "no projection available or not a swarm",
2383
+ });
2384
+ }
2385
+
2386
+ logCompaction("info", "swarm_state_resolved", {
2387
+ session_id: input.sessionID,
2388
+ source: sessionScan.projection?.isSwarm ? "projection" : "hive_query",
2389
+ has_epic: !!snapshot.epic,
2390
+ epic_id: snapshot.epic?.id,
2391
+ epic_title: snapshot.epic?.title,
2392
+ epic_status: snapshot.epic?.status,
2393
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
2394
+ subtasks: snapshot.epic?.subtasks?.map(s => ({
2395
+ id: s.id,
2396
+ title: s.title,
2397
+ status: s.status,
2398
+ file_count: s.files?.length ?? 0,
2399
+ })),
2400
+ message_count: snapshot.messages?.length ?? 0,
2401
+ reservation_count: snapshot.reservations?.length ?? 0,
2402
+ detection_confidence: snapshot.detection.confidence,
2403
+ detection_reasons: snapshot.detection.reasons,
2404
+ });
2405
+
2406
+ // =======================================================================
2407
+ // CAPTURE POINT 1: Detection complete - record confidence and reasons
2408
+ // =======================================================================
2409
+ await captureCompaction(
2410
+ input.sessionID,
2411
+ snapshot.epic?.id || "unknown",
2412
+ "detection_complete",
2413
+ {
2414
+ confidence: snapshot.detection.confidence,
2415
+ detected: detection.detected,
2416
+ reasons: snapshot.detection.reasons,
2417
+ session_scan_contributed: sessionScan.swarmDetected,
2418
+ session_scan_reasons: sessionScan.reasons,
2419
+ epic_id: snapshot.epic?.id,
2420
+ epic_title: snapshot.epic?.title,
2421
+ subtask_count: snapshot.epic?.subtasks?.length ?? 0,
2422
+ },
2423
+ );
2424
+
2425
+ // Level 2: Generate prompt with LLM
2426
+ const llmStart = Date.now();
2427
+ const llmPrompt = await generateCompactionPrompt(snapshot);
2428
+ const llmDuration = Date.now() - llmStart;
2429
+
2430
+ logCompaction("info", "llm_generation_complete", {
2431
+ session_id: input.sessionID,
2432
+ duration_ms: llmDuration,
2433
+ success: !!llmPrompt,
2434
+ prompt_length: llmPrompt?.length ?? 0,
2435
+ prompt_preview: llmPrompt?.substring(0, 500),
2436
+ });
2437
+
2438
+ // =======================================================================
2439
+ // CAPTURE POINT 2: Prompt generated - record FULL prompt content
2440
+ // =======================================================================
2441
+ if (llmPrompt) {
2442
+ await captureCompaction(
2443
+ input.sessionID,
2444
+ snapshot.epic?.id || "unknown",
2445
+ "prompt_generated",
2446
+ {
2447
+ prompt_length: llmPrompt.length,
2448
+ full_prompt: llmPrompt, // FULL content, not truncated
2449
+ context_type: "llm_generated",
2450
+ duration_ms: llmDuration,
2451
+ },
2452
+ );
2453
+ }
2454
+
2455
+ if (llmPrompt) {
2456
+ // SUCCESS: Use LLM-generated prompt
2457
+ const header = `[Swarm compaction: LLM-generated, ${detection.reasons.join(", ")}]\n\n`;
2458
+ const fullContent = header + llmPrompt;
2459
+
2460
+ // Progressive enhancement: use new API if available
2461
+ if ("prompt" in output) {
2462
+ output.prompt = fullContent;
2463
+ logCompaction("info", "context_injected_via_prompt_api", {
2464
+ session_id: input.sessionID,
2465
+ content_length: fullContent.length,
2466
+ method: "output.prompt",
2467
+ });
2468
+ } else {
2469
+ output.context.push(fullContent);
2470
+ logCompaction("info", "context_injected_via_context_array", {
2471
+ session_id: input.sessionID,
2472
+ content_length: fullContent.length,
2473
+ method: "output.context.push",
2474
+ context_count_after: output.context.length,
2475
+ });
2476
+ }
2477
+
2478
+ // =======================================================================
2479
+ // CAPTURE POINT 3a: Context injected (LLM path) - record FULL content
2480
+ // =======================================================================
2481
+ await captureCompaction(
2482
+ input.sessionID,
2483
+ snapshot.epic?.id || "unknown",
2484
+ "context_injected",
2485
+ {
2486
+ full_content: fullContent, // FULL content, not truncated
2487
+ content_length: fullContent.length,
2488
+ injection_method: "prompt" in output ? "output.prompt" : "output.context.push",
2489
+ context_type: "llm_generated",
2490
+ },
2491
+ );
2492
+
2493
+ const totalDuration = Date.now() - startTime;
2494
+ logCompaction("info", "compaction_complete_llm_success", {
2495
+ session_id: input.sessionID,
2496
+ total_duration_ms: totalDuration,
2497
+ detection_duration_ms: detectionDuration,
2498
+ query_duration_ms: queryDuration,
2499
+ llm_duration_ms: llmDuration,
2500
+ confidence: detection.confidence,
2501
+ context_type: "llm_generated",
2502
+ content_length: fullContent.length,
2503
+ });
2504
+ return;
2505
+ }
2506
+
2507
+ // LLM failed, fall through to static prompt
2508
+ logCompaction("warn", "llm_generation_returned_null", {
2509
+ session_id: input.sessionID,
2510
+ llm_duration_ms: llmDuration,
2511
+ falling_back_to: "static_prompt",
2512
+ });
2513
+ } catch (err) {
2514
+ // LLM failed, fall through to static prompt
2515
+ logCompaction("error", "llm_generation_failed", {
2516
+ session_id: input.sessionID,
2517
+ error: err instanceof Error ? err.message : String(err),
2518
+ error_stack: err instanceof Error ? err.stack : undefined,
2519
+ falling_back_to: "static_prompt",
2520
+ });
2521
+ }
2522
+
2523
+ // Level 3: Fall back to static context
2524
+ const header = `[Swarm detected: ${detection.reasons.join(", ")}]\n\n`;
2525
+ const staticContent = header + SWARM_COMPACTION_CONTEXT;
2526
+ output.context.push(staticContent);
2527
+
2528
+ // =======================================================================
2529
+ // CAPTURE POINT 3b: Context injected (static fallback) - record FULL content
2530
+ // =======================================================================
2531
+ await captureCompaction(
2532
+ input.sessionID,
2533
+ "unknown", // No snapshot available in this path
2534
+ "context_injected",
2535
+ {
2536
+ full_content: staticContent,
2537
+ content_length: staticContent.length,
2538
+ injection_method: "output.context.push",
2539
+ context_type: "static_swarm_context",
2540
+ },
2541
+ );
2542
+
2543
+ const totalDuration = Date.now() - startTime;
2544
+ logCompaction("info", "compaction_complete_static_fallback", {
2545
+ session_id: input.sessionID,
2546
+ total_duration_ms: totalDuration,
2547
+ confidence: detection.confidence,
2548
+ context_type: "static_swarm_context",
2549
+ content_length: staticContent.length,
2550
+ context_count_after: output.context.length,
2551
+ });
2552
+ } else if (detection.confidence === "low") {
2553
+ // Level 4: Possible swarm - inject fallback detection prompt
2554
+ const header = `[Possible swarm: ${detection.reasons.join(", ")}]\n\n`;
2555
+ const fallbackContent = header + SWARM_DETECTION_FALLBACK;
2556
+ output.context.push(fallbackContent);
2557
+
2558
+ // =======================================================================
2559
+ // CAPTURE POINT 3c: Context injected (detection fallback) - record FULL content
2560
+ // =======================================================================
2561
+ await captureCompaction(
2562
+ input.sessionID,
2563
+ "unknown", // No snapshot for low confidence
2564
+ "context_injected",
2565
+ {
2566
+ full_content: fallbackContent,
2567
+ content_length: fallbackContent.length,
2568
+ injection_method: "output.context.push",
2569
+ context_type: "detection_fallback",
2570
+ },
2571
+ );
2572
+
2573
+ const totalDuration = Date.now() - startTime;
2574
+ logCompaction("info", "compaction_complete_detection_fallback", {
2575
+ session_id: input.sessionID,
2576
+ total_duration_ms: totalDuration,
2577
+ confidence: detection.confidence,
2578
+ context_type: "detection_fallback",
2579
+ content_length: fallbackContent.length,
2580
+ context_count_after: output.context.length,
2581
+ reasons: detection.reasons,
2582
+ });
2583
+ } else {
2584
+ // Level 5: confidence === "none" - no injection, probably not a swarm
2585
+ const totalDuration = Date.now() - startTime;
2586
+ logCompaction("info", "compaction_complete_no_swarm", {
2587
+ session_id: input.sessionID,
2588
+ total_duration_ms: totalDuration,
2589
+ confidence: detection.confidence,
2590
+ context_type: "none",
2591
+ reasons: detection.reasons,
2592
+ context_count_unchanged: output.context.length,
2593
+ });
2594
+ }
2595
+
2596
+ // =======================================================================
2597
+ // LOG: Final output state
2598
+ // =======================================================================
2599
+ logCompaction("debug", "compaction_hook_complete_final_state", {
2600
+ session_id: input.sessionID,
2601
+ output_context_count: output.context?.length ?? 0,
2602
+ output_context_lengths: output.context?.map(c => c.length) ?? [],
2603
+ output_has_prompt: !!(output as any).prompt,
2604
+ output_prompt_length: (output as any).prompt?.length ?? 0,
2605
+ total_duration_ms: Date.now() - startTime,
2606
+ });
2607
+ },
2608
+ };
2609
+ };
2610
+
2611
+ export default SwarmPlugin;