opencode-swarm-plugin 0.31.7 → 0.33.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/.turbo/turbo-build.log +4 -4
  2. package/.turbo/turbo-test.log +324 -316
  3. package/CHANGELOG.md +394 -0
  4. package/README.md +129 -181
  5. package/bin/swarm.test.ts +31 -0
  6. package/bin/swarm.ts +635 -140
  7. package/dist/compaction-hook.d.ts +1 -1
  8. package/dist/compaction-hook.d.ts.map +1 -1
  9. package/dist/hive.d.ts.map +1 -1
  10. package/dist/index.d.ts +17 -2
  11. package/dist/index.d.ts.map +1 -1
  12. package/dist/index.js +653 -139
  13. package/dist/memory-tools.d.ts.map +1 -1
  14. package/dist/memory.d.ts +5 -4
  15. package/dist/memory.d.ts.map +1 -1
  16. package/dist/observability-tools.d.ts +116 -0
  17. package/dist/observability-tools.d.ts.map +1 -0
  18. package/dist/plugin.js +648 -136
  19. package/dist/skills.d.ts.map +1 -1
  20. package/dist/swarm-orchestrate.d.ts +29 -5
  21. package/dist/swarm-orchestrate.d.ts.map +1 -1
  22. package/dist/swarm-prompts.d.ts +66 -0
  23. package/dist/swarm-prompts.d.ts.map +1 -1
  24. package/dist/swarm.d.ts +17 -2
  25. package/dist/swarm.d.ts.map +1 -1
  26. package/evals/lib/{data-loader.test.ts → data-loader.evalite-test.ts} +7 -6
  27. package/evals/lib/data-loader.ts +1 -1
  28. package/evals/scorers/{outcome-scorers.test.ts → outcome-scorers.evalite-test.ts} +1 -1
  29. package/examples/plugin-wrapper-template.ts +316 -12
  30. package/global-skills/swarm-coordination/SKILL.md +118 -8
  31. package/package.json +3 -2
  32. package/src/compaction-hook.ts +5 -3
  33. package/src/hive.integration.test.ts +83 -1
  34. package/src/hive.ts +37 -12
  35. package/src/index.ts +25 -1
  36. package/src/mandate-storage.integration.test.ts +601 -0
  37. package/src/memory-tools.ts +6 -4
  38. package/src/memory.integration.test.ts +117 -49
  39. package/src/memory.test.ts +41 -217
  40. package/src/memory.ts +12 -8
  41. package/src/observability-tools.test.ts +346 -0
  42. package/src/observability-tools.ts +594 -0
  43. package/src/repo-crawl.integration.test.ts +441 -0
  44. package/src/skills.integration.test.ts +1192 -0
  45. package/src/skills.test.ts +42 -1
  46. package/src/skills.ts +8 -4
  47. package/src/structured.integration.test.ts +817 -0
  48. package/src/swarm-deferred.integration.test.ts +157 -0
  49. package/src/swarm-deferred.test.ts +38 -0
  50. package/src/swarm-mail.integration.test.ts +15 -19
  51. package/src/swarm-orchestrate.integration.test.ts +282 -0
  52. package/src/swarm-orchestrate.test.ts +123 -0
  53. package/src/swarm-orchestrate.ts +279 -201
  54. package/src/swarm-prompts.test.ts +481 -0
  55. package/src/swarm-prompts.ts +297 -0
  56. package/src/swarm-research.integration.test.ts +544 -0
  57. package/src/swarm-research.test.ts +698 -0
  58. package/src/swarm-research.ts +472 -0
  59. package/src/swarm-review.integration.test.ts +290 -0
  60. package/src/swarm.integration.test.ts +23 -20
  61. package/src/swarm.ts +6 -3
  62. package/src/tool-adapter.integration.test.ts +1221 -0
package/dist/plugin.js CHANGED
@@ -26558,7 +26558,7 @@ var init_skills = __esm(() => {
26558
26558
  "skills"
26559
26559
  ];
26560
26560
  skills_list = tool({
26561
- description: `List all available skills in the project.
26561
+ description: `[DEPRECATED] List all available skills in the project.
26562
26562
 
26563
26563
  Skills are specialized instructions that help with specific domains or tasks.
26564
26564
  Use this tool to discover what skills are available, then use skills_use to
@@ -26569,6 +26569,7 @@ Returns skill names, descriptions, and whether they have executable scripts.`,
26569
26569
  tag: tool.schema.string().optional().describe("Optional tag to filter skills by")
26570
26570
  },
26571
26571
  async execute(args) {
26572
+ console.warn("[DEPRECATED] skills_list is deprecated. OpenCode now provides native skills support. This tool will be removed in a future version.");
26572
26573
  const skills = await discoverSkills();
26573
26574
  let refs = Array.from(skills.values());
26574
26575
  if (args.tag) {
@@ -26591,7 +26592,7 @@ ${formatted}`;
26591
26592
  }
26592
26593
  });
26593
26594
  skills_use = tool({
26594
- description: `Activate a skill by loading its full instructions.
26595
+ description: `[DEPRECATED] Activate a skill by loading its full instructions.
26595
26596
 
26596
26597
  After calling this tool, follow the skill's instructions for the current task.
26597
26598
  Skills provide domain-specific guidance and best practices.
@@ -26602,6 +26603,7 @@ If the skill has scripts, you can run them with skills_execute.`,
26602
26603
  include_scripts: tool.schema.boolean().optional().describe("Also list available scripts (default: true)")
26603
26604
  },
26604
26605
  async execute(args) {
26606
+ console.warn("[DEPRECATED] skills_use is deprecated. OpenCode now provides native skills support. This tool will be removed in a future version.");
26605
26607
  const skill = await getSkill(args.name);
26606
26608
  if (!skill) {
26607
26609
  const available = await listSkills();
@@ -26634,7 +26636,7 @@ Run scripts with skills_execute tool.`;
26634
26636
  }
26635
26637
  });
26636
26638
  skills_execute = tool({
26637
- description: `Execute a script from a skill's scripts/ directory.
26639
+ description: `[DEPRECATED] Execute a script from a skill's scripts/ directory.
26638
26640
 
26639
26641
  Some skills include helper scripts for common operations.
26640
26642
  Use skills_use first to see available scripts, then execute them here.
@@ -26646,6 +26648,7 @@ Scripts run in the skill's directory with the project directory as an argument.`
26646
26648
  args: tool.schema.array(tool.schema.string()).optional().describe("Additional arguments to pass to the script")
26647
26649
  },
26648
26650
  async execute(args, ctx) {
26651
+ console.warn("[DEPRECATED] skills_execute is deprecated. OpenCode now provides native skills support. This tool will be removed in a future version.");
26649
26652
  const skill = await getSkill(args.skill);
26650
26653
  if (!skill) {
26651
26654
  return `Skill '${args.skill}' not found.`;
@@ -26691,7 +26694,7 @@ ${output}`;
26691
26694
  }
26692
26695
  });
26693
26696
  skills_read = tool({
26694
- description: `Read a resource file from a skill's directory.
26697
+ description: `[DEPRECATED] Read a resource file from a skill's directory.
26695
26698
 
26696
26699
  Skills may include additional files like:
26697
26700
  - examples.md - Example usage
@@ -26704,6 +26707,7 @@ Use this to access supplementary skill resources.`,
26704
26707
  file: tool.schema.string().describe("Relative path to the file within the skill directory")
26705
26708
  },
26706
26709
  async execute(args) {
26710
+ console.warn("[DEPRECATED] skills_read is deprecated. OpenCode now provides native skills support. This tool will be removed in a future version.");
26707
26711
  const skill = await getSkill(args.skill);
26708
26712
  if (!skill) {
26709
26713
  return `Skill '${args.skill}' not found.`;
@@ -27084,7 +27088,7 @@ import {
27084
27088
  FlushManager,
27085
27089
  importFromJSONL,
27086
27090
  syncMemories,
27087
- getSwarmMail,
27091
+ getSwarmMailLibSQL,
27088
27092
  resolvePartialId
27089
27093
  } from "swarm-mail";
27090
27094
  import { existsSync, readFileSync } from "node:fs";
@@ -27734,7 +27738,7 @@ async function getHiveAdapter(projectKey) {
27734
27738
  if (adapterCache.has(projectKey)) {
27735
27739
  return adapterCache.get(projectKey);
27736
27740
  }
27737
- const swarmMail = await getSwarmMail(projectKey);
27741
+ const swarmMail = await getSwarmMailLibSQL(projectKey);
27738
27742
  const db = await swarmMail.getDatabase();
27739
27743
  const adapter = createHiveAdapter(db, projectKey);
27740
27744
  await adapter.runMigrations();
@@ -28101,7 +28105,7 @@ var hive_sync = tool({
28101
28105
  outputPath: `${projectKey}/.hive/issues.jsonl`
28102
28106
  });
28103
28107
  const flushResult = await withTimeout(flushManager.flush(), TIMEOUT_MS, "flush hive");
28104
- const swarmMail = await getSwarmMail(projectKey);
28108
+ const swarmMail = await getSwarmMailLibSQL(projectKey);
28105
28109
  const db = await swarmMail.getDatabase();
28106
28110
  const hivePath = join(projectKey, ".hive");
28107
28111
  let memoriesSynced = 0;
@@ -28134,9 +28138,27 @@ var hive_sync = tool({
28134
28138
  const remoteCheckResult2 = await runGitCommand(["remote"]);
28135
28139
  const hasRemote2 = remoteCheckResult2.stdout.trim() !== "";
28136
28140
  if (hasRemote2) {
28137
- const pullResult = await withTimeout(runGitCommand(["pull", "--rebase"]), TIMEOUT_MS, "git pull --rebase");
28138
- if (pullResult.exitCode !== 0) {
28139
- throw new HiveError(`Failed to pull: ${pullResult.stderr}`, "git pull --rebase", pullResult.exitCode);
28141
+ const statusResult = await runGitCommand(["status", "--porcelain"]);
28142
+ const hasUnstagedChanges = statusResult.stdout.trim() !== "";
28143
+ let didStash = false;
28144
+ if (hasUnstagedChanges) {
28145
+ const stashResult = await runGitCommand(["stash", "push", "-u", "-m", "hive_sync: auto-stash before pull"]);
28146
+ if (stashResult.exitCode === 0) {
28147
+ didStash = true;
28148
+ }
28149
+ }
28150
+ try {
28151
+ const pullResult = await withTimeout(runGitCommand(["pull", "--rebase"]), TIMEOUT_MS, "git pull --rebase");
28152
+ if (pullResult.exitCode !== 0) {
28153
+ throw new HiveError(`Failed to pull: ${pullResult.stderr}`, "git pull --rebase", pullResult.exitCode);
28154
+ }
28155
+ } finally {
28156
+ if (didStash) {
28157
+ const popResult = await runGitCommand(["stash", "pop"]);
28158
+ if (popResult.exitCode !== 0) {
28159
+ console.warn(`[hive_sync] Warning: stash pop failed. Your changes are in 'git stash list'. Error: ${popResult.stderr}`);
28160
+ }
28161
+ }
28140
28162
  }
28141
28163
  }
28142
28164
  }
@@ -32535,7 +32557,8 @@ import {
32535
32557
  sendSwarmMessage as sendSwarmMessage3,
32536
32558
  getAgent,
32537
32559
  createEvent as createEvent2,
32538
- appendEvent as appendEvent2
32560
+ appendEvent as appendEvent2,
32561
+ getSwarmMailLibSQL as getSwarmMailLibSQL2
32539
32562
  } from "swarm-mail";
32540
32563
  init_skills();
32541
32564
 
@@ -33339,60 +33362,6 @@ ${progress.blockers.map((b) => `- ${b}`).join(`
33339
33362
 
33340
33363
  `);
33341
33364
  }
33342
- async function runUbsScan(files) {
33343
- if (files.length === 0) {
33344
- return null;
33345
- }
33346
- const ubsAvailable = await isToolAvailable("ubs");
33347
- if (!ubsAvailable) {
33348
- warnMissingTool("ubs");
33349
- return null;
33350
- }
33351
- try {
33352
- const result = await Bun.$`ubs scan ${files.join(" ")} --json`.quiet().nothrow();
33353
- const output = result.stdout.toString();
33354
- if (!output.trim()) {
33355
- return {
33356
- exitCode: result.exitCode,
33357
- bugs: [],
33358
- summary: { total: 0, critical: 0, high: 0, medium: 0, low: 0 }
33359
- };
33360
- }
33361
- try {
33362
- const parsed = JSON.parse(output);
33363
- if (typeof parsed !== "object" || parsed === null) {
33364
- throw new Error("UBS output is not an object");
33365
- }
33366
- if (!Array.isArray(parsed.bugs)) {
33367
- console.warn("[swarm] UBS output missing bugs array, using empty");
33368
- }
33369
- if (typeof parsed.summary !== "object" || parsed.summary === null) {
33370
- console.warn("[swarm] UBS output missing summary object, using empty");
33371
- }
33372
- return {
33373
- exitCode: result.exitCode,
33374
- bugs: Array.isArray(parsed.bugs) ? parsed.bugs : [],
33375
- summary: parsed.summary || {
33376
- total: 0,
33377
- critical: 0,
33378
- high: 0,
33379
- medium: 0,
33380
- low: 0
33381
- }
33382
- };
33383
- } catch (error45) {
33384
- console.error(`[swarm] CRITICAL: UBS scan failed to parse JSON output because output is malformed:`, error45);
33385
- console.error(`[swarm] Raw output: ${output}. Try: Run 'ubs doctor' to check installation, verify UBS version with 'ubs --version' (need v1.0.0+), or check if UBS supports --json flag.`);
33386
- return {
33387
- exitCode: result.exitCode,
33388
- bugs: [],
33389
- summary: { total: 0, critical: 0, high: 0, medium: 0, low: 0 }
33390
- };
33391
- }
33392
- } catch {
33393
- return null;
33394
- }
33395
- }
33396
33365
  async function runTypecheckVerification() {
33397
33366
  const step = {
33398
33367
  name: "typecheck",
@@ -33477,34 +33446,9 @@ async function runTestVerification(filesTouched) {
33477
33446
  }
33478
33447
  return step;
33479
33448
  }
33480
- async function runVerificationGate(filesTouched, skipUbs = false) {
33449
+ async function runVerificationGate(filesTouched, _skipUbs = false) {
33481
33450
  const steps = [];
33482
33451
  const blockers = [];
33483
- if (!skipUbs && filesTouched.length > 0) {
33484
- const ubsResult = await runUbsScan(filesTouched);
33485
- if (ubsResult) {
33486
- const ubsStep = {
33487
- name: "ubs_scan",
33488
- command: `ubs scan ${filesTouched.join(" ")}`,
33489
- passed: ubsResult.summary.critical === 0,
33490
- exitCode: ubsResult.exitCode
33491
- };
33492
- if (!ubsStep.passed) {
33493
- ubsStep.error = `Found ${ubsResult.summary.critical} critical bugs`;
33494
- blockers.push(`UBS found ${ubsResult.summary.critical} critical bug(s). Try: Run 'ubs scan ${filesTouched.join(" ")}' to see details, fix critical bugs in reported files, or use skip_ubs_scan=true to bypass (not recommended).`);
33495
- }
33496
- steps.push(ubsStep);
33497
- } else {
33498
- steps.push({
33499
- name: "ubs_scan",
33500
- command: "ubs scan",
33501
- passed: true,
33502
- exitCode: 0,
33503
- skipped: true,
33504
- skipReason: "UBS not available"
33505
- });
33506
- }
33507
- }
33508
33452
  const typecheckStep = await runTypecheckVerification();
33509
33453
  steps.push(typecheckStep);
33510
33454
  if (!typecheckStep.passed && !typecheckStep.skipped) {
@@ -33827,16 +33771,15 @@ ${args.files_affected.map((f) => `- \`${f}\``).join(`
33827
33771
  }
33828
33772
  });
33829
33773
  var swarm_complete = tool({
33830
- description: "Mark subtask complete with Verification Gate. Runs UBS scan, typecheck, and tests before allowing completion.",
33774
+ description: "Mark subtask complete with Verification Gate. Runs typecheck and tests before allowing completion.",
33831
33775
  args: {
33832
33776
  project_key: tool.schema.string().describe("Project path"),
33833
33777
  agent_name: tool.schema.string().describe("Your Agent Mail name"),
33834
33778
  bead_id: tool.schema.string().describe("Subtask bead ID"),
33835
33779
  summary: tool.schema.string().describe("Brief summary of work done"),
33836
33780
  evaluation: tool.schema.string().optional().describe("Self-evaluation JSON (Evaluation schema)"),
33837
- files_touched: tool.schema.array(tool.schema.string()).optional().describe("Files modified - will be verified (UBS, typecheck, tests)"),
33838
- skip_ubs_scan: tool.schema.boolean().optional().describe("Skip UBS bug scan (default: false)"),
33839
- skip_verification: tool.schema.boolean().optional().describe("Skip ALL verification (UBS, typecheck, tests). Use sparingly! (default: false)"),
33781
+ files_touched: tool.schema.array(tool.schema.string()).optional().describe("Files modified - will be verified (typecheck, tests)"),
33782
+ skip_verification: tool.schema.boolean().optional().describe("Skip ALL verification (typecheck, tests). Use sparingly! (default: false)"),
33840
33783
  planned_files: tool.schema.array(tool.schema.string()).optional().describe("Files that were originally planned to be modified"),
33841
33784
  start_time: tool.schema.number().optional().describe("Task start timestamp (Unix ms) for duration calculation"),
33842
33785
  error_count: tool.schema.number().optional().describe("Number of errors encountered during task"),
@@ -33919,7 +33862,7 @@ Continuing with completion, but this should be fixed for future subtasks.`;
33919
33862
  }
33920
33863
  let verificationResult = null;
33921
33864
  if (!args.skip_verification && args.files_touched?.length) {
33922
- verificationResult = await runVerificationGate(args.files_touched, args.skip_ubs_scan ?? false);
33865
+ verificationResult = await runVerificationGate(args.files_touched, false);
33923
33866
  if (!verificationResult.passed) {
33924
33867
  return JSON.stringify({
33925
33868
  success: false,
@@ -33941,21 +33884,6 @@ Continuing with completion, but this should be fixed for future subtasks.`;
33941
33884
  }, null, 2);
33942
33885
  }
33943
33886
  }
33944
- let ubsResult = null;
33945
- if (!args.skip_verification && !verificationResult && args.files_touched?.length && !args.skip_ubs_scan) {
33946
- ubsResult = await runUbsScan(args.files_touched);
33947
- if (ubsResult && ubsResult.summary.critical > 0) {
33948
- return JSON.stringify({
33949
- success: false,
33950
- error: `UBS found ${ubsResult.summary.critical} critical bug(s) that must be fixed before completing`,
33951
- ubs_scan: {
33952
- critical_count: ubsResult.summary.critical,
33953
- bugs: ubsResult.bugs.filter((b) => b.severity === "critical")
33954
- },
33955
- hint: `Fix these critical bugs: ${ubsResult.bugs.filter((b) => b.severity === "critical").map((b) => `${b.file}:${b.line} - ${b.message}`).slice(0, 3).join("; ")}. Try: Run 'ubs scan ${args.files_touched?.join(" ") || "."} --json' for full report, fix reported issues, or use skip_ubs_scan=true to bypass (not recommended).`
33956
- }, null, 2);
33957
- }
33958
- }
33959
33887
  let contractValidation = null;
33960
33888
  let contractWarning;
33961
33889
  if (args.files_touched && args.files_touched.length > 0) {
@@ -34025,6 +33953,23 @@ This will be recorded as a negative learning signal.`;
34025
33953
  }
34026
33954
  }, null, 2);
34027
33955
  }
33956
+ let deferredResolved = false;
33957
+ let deferredError;
33958
+ try {
33959
+ const swarmMail = await getSwarmMailLibSQL2(args.project_key);
33960
+ const db = await swarmMail.getDatabase();
33961
+ const deferredUrl = `deferred:${args.bead_id}`;
33962
+ const checkResult = await db.query(`SELECT url, resolved FROM deferred WHERE url = ? AND resolved = 0`, [deferredUrl]);
33963
+ if (checkResult.rows.length > 0) {
33964
+ await db.query(`UPDATE deferred SET resolved = 1, value = ? WHERE url = ? AND resolved = 0`, [JSON.stringify({ completed: true, summary: args.summary }), deferredUrl]);
33965
+ deferredResolved = true;
33966
+ } else {
33967
+ console.info(`[swarm_complete] No deferred found for ${args.bead_id} - task may not be part of active swarm`);
33968
+ }
33969
+ } catch (error45) {
33970
+ deferredError = error45 instanceof Error ? error45.message : String(error45);
33971
+ console.warn(`[swarm_complete] Failed to resolve deferred (non-fatal): ${deferredError}`);
33972
+ }
34028
33973
  let syncSuccess = false;
34029
33974
  let syncError;
34030
33975
  try {
@@ -34129,6 +34074,8 @@ This will be recorded as a negative learning signal.`;
34129
34074
  sync_error: syncError,
34130
34075
  message_sent: messageSent,
34131
34076
  message_error: messageError,
34077
+ deferred_resolved: deferredResolved,
34078
+ deferred_error: deferredError,
34132
34079
  agent_registration: {
34133
34080
  verified: agentRegistered,
34134
34081
  warning: registrationWarning || undefined
@@ -34143,15 +34090,6 @@ This will be recorded as a negative learning signal.`;
34143
34090
  skipReason: s.skipReason
34144
34091
  }))
34145
34092
  } : args.skip_verification ? { skipped: true, reason: "skip_verification=true" } : { skipped: true, reason: "no files_touched provided" },
34146
- ubs_scan: ubsResult ? {
34147
- ran: true,
34148
- bugs_found: ubsResult.summary.total,
34149
- summary: ubsResult.summary,
34150
- warnings: ubsResult.bugs.filter((b) => b.severity !== "critical")
34151
- } : verificationResult ? { ran: true, included_in_verification_gate: true } : {
34152
- ran: false,
34153
- reason: args.skip_ubs_scan ? "skipped" : "no files or ubs unavailable"
34154
- },
34155
34093
  learning_prompt: `## Reflection
34156
34094
 
34157
34095
  Did you learn anything reusable during this subtask? Consider:
@@ -34189,9 +34127,7 @@ Files touched: ${args.files_touched?.join(", ") || "none recorded"}`,
34189
34127
  const errorStack = error45 instanceof Error ? error45.stack : undefined;
34190
34128
  let failedStep = "unknown";
34191
34129
  if (errorMessage.includes("verification")) {
34192
- failedStep = "Verification Gate (UBS/typecheck/tests)";
34193
- } else if (errorMessage.includes("UBS") || errorMessage.includes("ubs")) {
34194
- failedStep = "UBS scan";
34130
+ failedStep = "Verification Gate (typecheck/tests)";
34195
34131
  } else if (errorMessage.includes("evaluation")) {
34196
34132
  failedStep = "Self-evaluation parsing";
34197
34133
  } else if (errorMessage.includes("bead") || errorMessage.includes("close")) {
@@ -34223,7 +34159,6 @@ ${errorStack.slice(0, 1000)}
34223
34159
  `### Context`,
34224
34160
  `- **Summary**: ${args.summary}`,
34225
34161
  `- **Files touched**: ${args.files_touched?.length ? args.files_touched.join(", ") : "none"}`,
34226
- `- **Skip UBS**: ${args.skip_ubs_scan ?? false}`,
34227
34162
  `- **Skip verification**: ${args.skip_verification ?? false}`,
34228
34163
  "",
34229
34164
  `### Recovery Actions`,
@@ -34261,7 +34196,6 @@ ${errorStack.slice(0, 1000)}
34261
34196
  context: {
34262
34197
  summary: args.summary,
34263
34198
  files_touched: args.files_touched || [],
34264
- skip_ubs_scan: args.skip_ubs_scan ?? false,
34265
34199
  skip_verification: args.skip_verification ?? false
34266
34200
  },
34267
34201
  recovery: {
@@ -34273,7 +34207,6 @@ ${errorStack.slice(0, 1000)}
34273
34207
  ],
34274
34208
  common_fixes: {
34275
34209
  "Verification Gate": "Use skip_verification=true to bypass (not recommended)",
34276
- "UBS scan": "Use skip_ubs_scan=true to bypass",
34277
34210
  "Cell close": "Check cell status with hive_query(), may need hive_update() first",
34278
34211
  "Self-evaluation": "Check evaluation JSON format matches EvaluationSchema"
34279
34212
  }
@@ -34579,8 +34512,9 @@ var swarm_recover = tool({
34579
34512
  },
34580
34513
  async execute(args) {
34581
34514
  try {
34582
- const { getDatabase } = await import("swarm-mail");
34583
- const db = await getDatabase(args.project_key);
34515
+ const { getSwarmMailLibSQL: getSwarmMailLibSQL3 } = await import("swarm-mail");
34516
+ const swarmMail = await getSwarmMailLibSQL3(args.project_key);
34517
+ const db = await swarmMail.getDatabase();
34584
34518
  const result = await db.query(`SELECT * FROM swarm_contexts
34585
34519
  WHERE epic_id = $1
34586
34520
  ORDER BY updated_at DESC
@@ -35214,6 +35148,167 @@ Other cell operations:
35214
35148
  **Memory is the swarm's collective intelligence. Query it. Feed it.**
35215
35149
 
35216
35150
  Begin now.`;
35151
+ var RESEARCHER_PROMPT = `You are a swarm researcher gathering documentation for: **{research_id}**
35152
+
35153
+ ## [IDENTITY]
35154
+ Agent: (assigned at spawn)
35155
+ Research Task: {research_id}
35156
+ Epic: {epic_id}
35157
+
35158
+ ## [MISSION]
35159
+ Gather comprehensive documentation for the specified technologies to inform task decomposition.
35160
+
35161
+ **COORDINATOR PROVIDED THESE TECHNOLOGIES TO RESEARCH:**
35162
+ {tech_stack}
35163
+
35164
+ You do NOT discover what to research - the coordinator already decided that.
35165
+ You DO discover what TOOLS are available to fetch documentation.
35166
+
35167
+ ## [OUTPUT MODE]
35168
+ {check_upgrades}
35169
+
35170
+ ## [WORKFLOW]
35171
+
35172
+ ### Step 1: Initialize (MANDATORY FIRST)
35173
+ \`\`\`
35174
+ swarmmail_init(project_path="{project_path}", task_description="{research_id}: Documentation research")
35175
+ \`\`\`
35176
+
35177
+ ### Step 2: Discover Available Documentation Tools
35178
+ Check what's available for fetching docs:
35179
+ - **next-devtools**: \`nextjs_docs\` for Next.js documentation
35180
+ - **context7**: Library documentation lookup (\`use context7\` in prompts)
35181
+ - **fetch**: General web fetching for official docs sites
35182
+ - **pdf-brain**: Internal knowledge base search
35183
+
35184
+ **Don't assume** - check which tools exist in your environment.
35185
+
35186
+ ### Step 3: Read Installed Versions
35187
+ For each technology in the tech stack:
35188
+ 1. Check package.json (or equivalent) for installed version
35189
+ 2. Record exact version numbers
35190
+ 3. Note any version constraints (^, ~, etc.)
35191
+
35192
+ ### Step 4: Fetch Documentation
35193
+ For EACH technology in the list:
35194
+ - Use the most appropriate tool (Next.js → nextjs_docs, libraries → context7, others → fetch)
35195
+ - Fetch documentation for the INSTALLED version (not latest, unless --check-upgrades)
35196
+ - Focus on: API changes, breaking changes, migration guides, best practices
35197
+ - Extract key patterns, gotchas, and compatibility notes
35198
+
35199
+ **If --check-upgrades mode:**
35200
+ - ALSO fetch docs for the LATEST version
35201
+ - Compare installed vs latest
35202
+ - Note breaking changes, new features, migration complexity
35203
+
35204
+ ### Step 5: Store Detailed Findings
35205
+ For EACH technology, store in semantic-memory:
35206
+ \`\`\`
35207
+ semantic-memory_store(
35208
+ information="<technology-name> <version>: <key patterns, gotchas, API changes, compatibility notes>",
35209
+ tags="research, <tech-name>, documentation, {epic_id}"
35210
+ )
35211
+ \`\`\`
35212
+
35213
+ **Why store individually?** Future agents can search by technology name.
35214
+
35215
+ ### Step 6: Broadcast Summary
35216
+ Send condensed findings to coordinator:
35217
+ \`\`\`
35218
+ swarmmail_send(
35219
+ to=["coordinator"],
35220
+ subject="Research Complete: {research_id}",
35221
+ body="<brief summary - see semantic-memory for details>",
35222
+ thread_id="{epic_id}"
35223
+ )
35224
+ \`\`\`
35225
+
35226
+ ### Step 7: Return Structured Output
35227
+ Output JSON with:
35228
+ \`\`\`json
35229
+ {
35230
+ "technologies": [
35231
+ {
35232
+ "name": "string",
35233
+ "installed_version": "string",
35234
+ "latest_version": "string | null", // Only if --check-upgrades
35235
+ "key_patterns": ["string"],
35236
+ "gotchas": ["string"],
35237
+ "breaking_changes": ["string"], // Only if --check-upgrades
35238
+ "memory_id": "string" // ID of semantic-memory entry
35239
+ }
35240
+ ],
35241
+ "summary": "string" // Condensed summary for shared_context
35242
+ }
35243
+ \`\`\`
35244
+
35245
+ ## [CRITICAL REQUIREMENTS]
35246
+
35247
+ **NON-NEGOTIABLE:**
35248
+ 1. Step 1 (swarmmail_init) MUST be first
35249
+ 2. Research ONLY the technologies the coordinator specified
35250
+ 3. Fetch docs for INSTALLED versions (unless --check-upgrades)
35251
+ 4. Store detailed findings in semantic-memory (one per technology)
35252
+ 5. Return condensed summary for coordinator (full details in memory)
35253
+ 6. Use appropriate doc tools (nextjs_docs for Next.js, context7 for libraries, etc.)
35254
+
35255
+ **Output goes TWO places:**
35256
+ - **semantic-memory**: Detailed findings (searchable by future agents)
35257
+ - **Return JSON**: Condensed summary (for coordinator's shared_context)
35258
+
35259
+ Begin research now.`;
35260
+ var COORDINATOR_POST_WORKER_CHECKLIST = `
35261
+ ## ⚠️ MANDATORY: Post-Worker Review (DO THIS IMMEDIATELY)
35262
+
35263
+ **A worker just returned. Before doing ANYTHING else, complete this checklist:**
35264
+
35265
+ ### Step 1: Check Swarm Mail
35266
+ \`\`\`
35267
+ swarmmail_inbox()
35268
+ swarmmail_read_message(message_id=N) // Read any messages from the worker
35269
+ \`\`\`
35270
+
35271
+ ### Step 2: Review the Work
35272
+ \`\`\`
35273
+ swarm_review(
35274
+ project_key="{project_key}",
35275
+ epic_id="{epic_id}",
35276
+ task_id="{task_id}",
35277
+ files_touched=[{files_touched}]
35278
+ )
35279
+ \`\`\`
35280
+
35281
+ This generates a review prompt with:
35282
+ - Epic context (what we're trying to achieve)
35283
+ - Subtask requirements
35284
+ - Git diff of changes
35285
+ - Dependency status
35286
+
35287
+ ### Step 3: Evaluate Against Criteria
35288
+ - Does the work fulfill the subtask requirements?
35289
+ - Does it serve the overall epic goal?
35290
+ - Does it enable downstream tasks?
35291
+ - Type safety, no obvious bugs?
35292
+
35293
+ ### Step 4: Send Feedback
35294
+ \`\`\`
35295
+ swarm_review_feedback(
35296
+ project_key="{project_key}",
35297
+ task_id="{task_id}",
35298
+ worker_id="{worker_id}",
35299
+ status="approved", // or "needs_changes"
35300
+ summary="<brief summary>",
35301
+ issues="[]" // or "[{file, line, issue, suggestion}]"
35302
+ )
35303
+ \`\`\`
35304
+
35305
+ ### Step 5: ONLY THEN Continue
35306
+ - If approved: Close the cell, spawn next worker
35307
+ - If needs_changes: Worker gets feedback, retries (max 3 attempts)
35308
+ - If 3 failures: Mark blocked, escalate to human
35309
+
35310
+ **⚠️ DO NOT spawn the next worker until review is complete.**
35311
+ `;
35217
35312
  var EVALUATION_PROMPT = `Evaluate the work completed for this subtask.
35218
35313
 
35219
35314
  ## Subtask
@@ -35250,6 +35345,12 @@ For each criterion, assess passed/failed and provide brief feedback:
35250
35345
 
35251
35346
  If any criterion fails, the overall evaluation fails and retry_suggestion
35252
35347
  should describe what needs to be fixed.`;
35348
+ function formatResearcherPrompt(params) {
35349
+ const techList = params.tech_stack.map((t) => `- ${t}`).join(`
35350
+ `);
35351
+ const upgradesMode = params.check_upgrades ? "**UPGRADE COMPARISON MODE**: Fetch docs for BOTH installed AND latest versions. Compare and note breaking changes." : "**DEFAULT MODE**: Fetch docs for INSTALLED versions only (from lockfiles).";
35352
+ return RESEARCHER_PROMPT.replace(/{research_id}/g, params.research_id).replace(/{epic_id}/g, params.epic_id).replace("{tech_stack}", techList).replace("{project_path}", params.project_path).replace("{check_upgrades}", upgradesMode);
35353
+ }
35253
35354
  function formatSubtaskPromptV2(params) {
35254
35355
  const fileList = params.files.length > 0 ? params.files.map((f) => `- \`${f}\``).join(`
35255
35356
  `) : "(no specific files - use judgment)";
@@ -35385,6 +35486,8 @@ var swarm_spawn_subtask = tool({
35385
35486
  liteModel: "anthropic/claude-haiku-4-5"
35386
35487
  };
35387
35488
  const selectedModel = selectWorkerModel2(subtask, config2);
35489
+ const filesJoined = args.files.map((f) => `"${f}"`).join(", ");
35490
+ const postCompletionInstructions = COORDINATOR_POST_WORKER_CHECKLIST.replace(/{project_key}/g, args.project_path || "$PWD").replace(/{epic_id}/g, args.epic_id).replace(/{task_id}/g, args.bead_id).replace(/{files_touched}/g, filesJoined).replace(/{worker_id}/g, "worker");
35388
35491
  return JSON.stringify({
35389
35492
  prompt,
35390
35493
  bead_id: args.bead_id,
@@ -35392,7 +35495,50 @@ var swarm_spawn_subtask = tool({
35392
35495
  files: args.files,
35393
35496
  project_path: args.project_path,
35394
35497
  recovery_context: args.recovery_context,
35395
- recommended_model: selectedModel
35498
+ recommended_model: selectedModel,
35499
+ post_completion_instructions: postCompletionInstructions
35500
+ }, null, 2);
35501
+ }
35502
+ });
35503
+ var swarm_spawn_researcher = tool({
35504
+ description: "Prepare a research task for spawning. Returns prompt for gathering technology documentation. Researcher fetches docs and stores findings in semantic-memory.",
35505
+ args: {
35506
+ research_id: tool.schema.string().describe("Unique ID for this research task"),
35507
+ epic_id: tool.schema.string().describe("Parent epic ID"),
35508
+ tech_stack: tool.schema.array(tool.schema.string()).describe("Explicit list of technologies to research (from coordinator)"),
35509
+ project_path: tool.schema.string().describe("Absolute project path for swarmmail_init"),
35510
+ check_upgrades: tool.schema.boolean().optional().describe("If true, compare installed vs latest versions (default: false)")
35511
+ },
35512
+ async execute(args) {
35513
+ const prompt = formatResearcherPrompt({
35514
+ research_id: args.research_id,
35515
+ epic_id: args.epic_id,
35516
+ tech_stack: args.tech_stack,
35517
+ project_path: args.project_path,
35518
+ check_upgrades: args.check_upgrades ?? false
35519
+ });
35520
+ return JSON.stringify({
35521
+ prompt,
35522
+ research_id: args.research_id,
35523
+ epic_id: args.epic_id,
35524
+ tech_stack: args.tech_stack,
35525
+ project_path: args.project_path,
35526
+ check_upgrades: args.check_upgrades ?? false,
35527
+ subagent_type: "swarm/researcher",
35528
+ expected_output: {
35529
+ technologies: [
35530
+ {
35531
+ name: "string",
35532
+ installed_version: "string",
35533
+ latest_version: "string | null",
35534
+ key_patterns: ["string"],
35535
+ gotchas: ["string"],
35536
+ breaking_changes: ["string"],
35537
+ memory_id: "string"
35538
+ }
35539
+ ],
35540
+ summary: "string"
35541
+ }
35396
35542
  }, null, 2);
35397
35543
  }
35398
35544
  });
@@ -35507,6 +35653,7 @@ ${args.context}` : `## Additional Context
35507
35653
  var promptTools = {
35508
35654
  swarm_subtask_prompt,
35509
35655
  swarm_spawn_subtask,
35656
+ swarm_spawn_researcher,
35510
35657
  swarm_evaluation_prompt,
35511
35658
  swarm_plan_prompt
35512
35659
  };
@@ -36559,7 +36706,7 @@ var mandateTools = {
36559
36706
 
36560
36707
  // src/memory-tools.ts
36561
36708
  init_dist();
36562
- import { getSwarmMail as getSwarmMail2 } from "swarm-mail";
36709
+ import { getSwarmMailLibSQL as getSwarmMailLibSQL3 } from "swarm-mail";
36563
36710
 
36564
36711
  // ../../node_modules/.bun/effect@3.19.12/node_modules/effect/dist/esm/Function.js
36565
36712
  var isFunction = (input) => typeof input === "function";
@@ -49980,7 +50127,8 @@ import {
49980
50127
  makeOllamaLive,
49981
50128
  Ollama,
49982
50129
  legacyDatabaseExists,
49983
- migrateLegacyMemories
50130
+ migrateLegacyMemories,
50131
+ toSwarmDb
49984
50132
  } from "swarm-mail";
49985
50133
  var migrationChecked = false;
49986
50134
  async function maybeAutoMigrate(db) {
@@ -50014,7 +50162,8 @@ async function createMemoryAdapter(db) {
50014
50162
  migrationChecked = true;
50015
50163
  await maybeAutoMigrate(db);
50016
50164
  }
50017
- const store = createMemoryStore(db);
50165
+ const drizzleDb = toSwarmDb(db);
50166
+ const store = createMemoryStore(drizzleDb);
50018
50167
  const config2 = getDefaultConfig();
50019
50168
  const ollamaLayer = makeOllamaLive(config2);
50020
50169
  const generateId = () => {
@@ -50159,9 +50308,9 @@ async function getMemoryAdapter(projectPath) {
50159
50308
  if (cachedAdapter && cachedProjectPath === path2) {
50160
50309
  return cachedAdapter;
50161
50310
  }
50162
- const swarmMail = await getSwarmMail2(path2);
50163
- const db = await swarmMail.getDatabase();
50164
- cachedAdapter = await createMemoryAdapter(db);
50311
+ const swarmMail = await getSwarmMailLibSQL3(path2);
50312
+ const dbAdapter = await swarmMail.getDatabase();
50313
+ cachedAdapter = await createMemoryAdapter(dbAdapter);
50165
50314
  cachedProjectPath = path2;
50166
50315
  return cachedAdapter;
50167
50316
  }
@@ -50268,6 +50417,368 @@ var memoryTools = {
50268
50417
  "semantic-memory_check": semantic_memory_check
50269
50418
  };
50270
50419
 
50420
+ // src/observability-tools.ts
50421
+ init_dist();
50422
+ import {
50423
+ agentActivity,
50424
+ checkpointFrequency,
50425
+ failedDecompositions,
50426
+ getSwarmMailLibSQL as getSwarmMailLibSQL4,
50427
+ humanFeedback,
50428
+ lockContention,
50429
+ messageLatency,
50430
+ recoverySuccess,
50431
+ scopeViolations,
50432
+ strategySuccessRates,
50433
+ taskDuration
50434
+ } from "swarm-mail";
50435
+ function parseSince(since) {
50436
+ const now = Date.now();
50437
+ const match11 = since.match(/^(\d+)([dhm])$/);
50438
+ if (!match11) {
50439
+ throw new Error(`Invalid since format: ${since}. Use "7d", "24h", or "1h"`);
50440
+ }
50441
+ const [, value, unit] = match11;
50442
+ const num = Number.parseInt(value, 10);
50443
+ switch (unit) {
50444
+ case "d":
50445
+ return now - num * 24 * 60 * 60 * 1000;
50446
+ case "h":
50447
+ return now - num * 60 * 60 * 1000;
50448
+ case "m":
50449
+ return now - num * 60 * 1000;
50450
+ default:
50451
+ throw new Error(`Unknown unit: ${unit}`);
50452
+ }
50453
+ }
50454
+ async function executeQuery(swarmMail, query) {
50455
+ const db = await swarmMail.getDatabase();
50456
+ const result = await db.query(query.sql, Object.values(query.parameters || {}));
50457
+ return result.rows;
50458
+ }
50459
+ function formatSummary(queryType, results) {
50460
+ if (results.length === 0) {
50461
+ return `No ${queryType} data found.`;
50462
+ }
50463
+ const count = results.length;
50464
+ const preview = results.slice(0, 3);
50465
+ return `${queryType}: ${count} result(s). Top 3: ${JSON.stringify(preview, null, 2).slice(0, 400)}`;
50466
+ }
50467
+ function capResults(results) {
50468
+ return results.slice(0, 50);
50469
+ }
50470
+ var swarm_analytics = tool({
50471
+ description: "Query pre-built analytics for swarm coordination. Returns structured data about failed decompositions, strategy success rates, lock contention, agent activity, message latency, scope violations, task duration, checkpoint frequency, recovery success, and human feedback.",
50472
+ args: {
50473
+ query: tool.schema.enum([
50474
+ "failed-decompositions",
50475
+ "strategy-success-rates",
50476
+ "lock-contention",
50477
+ "agent-activity",
50478
+ "message-latency",
50479
+ "scope-violations",
50480
+ "task-duration",
50481
+ "checkpoint-frequency",
50482
+ "recovery-success",
50483
+ "human-feedback"
50484
+ ]).describe("Type of analytics query to run"),
50485
+ since: tool.schema.string().optional().describe("Time filter: '7d', '24h', '1h' (optional)"),
50486
+ format: tool.schema.enum(["json", "summary"]).optional().describe("Output format: 'json' (default) or 'summary' (context-efficient)")
50487
+ },
50488
+ async execute(args2) {
50489
+ try {
50490
+ const projectPath = process.cwd();
50491
+ const db = await getSwarmMailLibSQL4(projectPath);
50492
+ const filters = {
50493
+ project_key: projectPath
50494
+ };
50495
+ if (args2.since) {
50496
+ filters.since = parseSince(args2.since);
50497
+ }
50498
+ let query;
50499
+ switch (args2.query) {
50500
+ case "failed-decompositions":
50501
+ query = failedDecompositions(filters);
50502
+ break;
50503
+ case "strategy-success-rates":
50504
+ query = strategySuccessRates(filters);
50505
+ break;
50506
+ case "lock-contention":
50507
+ query = lockContention(filters);
50508
+ break;
50509
+ case "agent-activity":
50510
+ query = agentActivity(filters);
50511
+ break;
50512
+ case "message-latency":
50513
+ query = messageLatency(filters);
50514
+ break;
50515
+ case "scope-violations":
50516
+ query = scopeViolations.buildQuery ? scopeViolations.buildQuery(filters) : scopeViolations;
50517
+ break;
50518
+ case "task-duration":
50519
+ query = taskDuration.buildQuery ? taskDuration.buildQuery(filters) : taskDuration;
50520
+ break;
50521
+ case "checkpoint-frequency":
50522
+ query = checkpointFrequency.buildQuery ? checkpointFrequency.buildQuery(filters) : checkpointFrequency;
50523
+ break;
50524
+ case "recovery-success":
50525
+ query = recoverySuccess.buildQuery ? recoverySuccess.buildQuery(filters) : recoverySuccess;
50526
+ break;
50527
+ case "human-feedback":
50528
+ query = humanFeedback.buildQuery ? humanFeedback.buildQuery(filters) : humanFeedback;
50529
+ break;
50530
+ default:
50531
+ return JSON.stringify({
50532
+ error: `Unknown query type: ${args2.query}`
50533
+ });
50534
+ }
50535
+ const results = await executeQuery(db, query);
50536
+ if (args2.format === "summary") {
50537
+ return formatSummary(args2.query, results);
50538
+ }
50539
+ return JSON.stringify({
50540
+ query: args2.query,
50541
+ filters,
50542
+ count: results.length,
50543
+ results
50544
+ }, null, 2);
50545
+ } catch (error45) {
50546
+ return JSON.stringify({
50547
+ error: error45 instanceof Error ? error45.message : String(error45)
50548
+ });
50549
+ }
50550
+ }
50551
+ });
50552
+ var swarm_query = tool({
50553
+ description: "Execute raw SQL queries against the swarm event store. Context-safe: results capped at 50 rows. Useful for custom analytics and debugging.",
50554
+ args: {
50555
+ sql: tool.schema.string().describe("SQL query to execute (SELECT only for safety)"),
50556
+ format: tool.schema.enum(["json", "table"]).optional().describe("Output format: 'json' (default) or 'table' (visual)")
50557
+ },
50558
+ async execute(args2) {
50559
+ try {
50560
+ const projectPath = process.cwd();
50561
+ const swarmMail = await getSwarmMailLibSQL4(projectPath);
50562
+ const db = await swarmMail.getDatabase();
50563
+ if (!args2.sql.trim().toLowerCase().startsWith("select")) {
50564
+ return JSON.stringify({
50565
+ error: "Only SELECT queries are allowed for safety"
50566
+ });
50567
+ }
50568
+ const result = await db.query(args2.sql, []);
50569
+ const rows = result.rows;
50570
+ const cappedRows = capResults(rows);
50571
+ if (args2.format === "table") {
50572
+ if (cappedRows.length === 0) {
50573
+ return "No results";
50574
+ }
50575
+ const headers = Object.keys(cappedRows[0]);
50576
+ const headerRow = headers.join(" | ");
50577
+ const separator = headers.map(() => "---").join(" | ");
50578
+ const dataRows = cappedRows.map((row) => headers.map((h) => row[h]).join(" | "));
50579
+ return [headerRow, separator, ...dataRows].join(`
50580
+ `);
50581
+ }
50582
+ return JSON.stringify({
50583
+ count: cappedRows.length,
50584
+ total: rows.length,
50585
+ capped: rows.length > 50,
50586
+ results: cappedRows
50587
+ }, null, 2);
50588
+ } catch (error45) {
50589
+ return JSON.stringify({
50590
+ error: error45 instanceof Error ? error45.message : String(error45)
50591
+ });
50592
+ }
50593
+ }
50594
+ });
50595
+ var swarm_diagnose = tool({
50596
+ description: "Auto-diagnose issues for a specific epic or task. Returns structured diagnosis with blockers, conflicts, slow tasks, errors, and timeline.",
50597
+ args: {
50598
+ epic_id: tool.schema.string().optional().describe("Epic ID to diagnose"),
50599
+ bead_id: tool.schema.string().optional().describe("Task ID to diagnose"),
50600
+ include: tool.schema.array(tool.schema.enum([
50601
+ "blockers",
50602
+ "conflicts",
50603
+ "slow_tasks",
50604
+ "errors",
50605
+ "timeline"
50606
+ ])).optional().describe("What to include in diagnosis (default: all)")
50607
+ },
50608
+ async execute(args2) {
50609
+ try {
50610
+ const projectPath = process.cwd();
50611
+ const swarmMail = await getSwarmMailLibSQL4(projectPath);
50612
+ const db = await swarmMail.getDatabase();
50613
+ const diagnosis = [];
50614
+ const include = args2.include || [
50615
+ "blockers",
50616
+ "conflicts",
50617
+ "slow_tasks",
50618
+ "errors",
50619
+ "timeline"
50620
+ ];
50621
+ if (include.includes("blockers")) {
50622
+ const blockerQuery = `
50623
+ SELECT json_extract(data, '$.agent_name') as agent,
50624
+ json_extract(data, '$.bead_id') as bead_id,
50625
+ timestamp
50626
+ FROM events
50627
+ WHERE type = 'task_blocked'
50628
+ ${args2.epic_id ? "AND json_extract(data, '$.epic_id') = ?" : ""}
50629
+ ${args2.bead_id ? "AND json_extract(data, '$.bead_id') = ?" : ""}
50630
+ ORDER BY timestamp DESC
50631
+ LIMIT 10
50632
+ `;
50633
+ const params = [];
50634
+ if (args2.epic_id)
50635
+ params.push(args2.epic_id);
50636
+ if (args2.bead_id)
50637
+ params.push(args2.bead_id);
50638
+ const blockers = await db.query(blockerQuery, params);
50639
+ if (blockers.rows.length > 0) {
50640
+ diagnosis.push({
50641
+ type: "blockers",
50642
+ message: `Found ${blockers.rows.length} blocked task(s)`,
50643
+ severity: "high"
50644
+ });
50645
+ }
50646
+ }
50647
+ if (include.includes("errors")) {
50648
+ const errorQuery = `
50649
+ SELECT type, json_extract(data, '$.error_count') as error_count
50650
+ FROM events
50651
+ WHERE type = 'subtask_outcome'
50652
+ AND json_extract(data, '$.success') = 'false'
50653
+ ${args2.epic_id ? "AND json_extract(data, '$.epic_id') = ?" : ""}
50654
+ ${args2.bead_id ? "AND json_extract(data, '$.bead_id') = ?" : ""}
50655
+ LIMIT 10
50656
+ `;
50657
+ const params = [];
50658
+ if (args2.epic_id)
50659
+ params.push(args2.epic_id);
50660
+ if (args2.bead_id)
50661
+ params.push(args2.bead_id);
50662
+ const errors3 = await db.query(errorQuery, params);
50663
+ if (errors3.rows.length > 0) {
50664
+ diagnosis.push({
50665
+ type: "errors",
50666
+ message: `Found ${errors3.rows.length} failed task(s)`,
50667
+ severity: "high"
50668
+ });
50669
+ }
50670
+ }
50671
+ let timeline = [];
50672
+ if (include.includes("timeline")) {
50673
+ const timelineQuery = `
50674
+ SELECT timestamp, type, json_extract(data, '$.agent_name') as agent
50675
+ FROM events
50676
+ ${args2.epic_id ? "WHERE json_extract(data, '$.epic_id') = ?" : ""}
50677
+ ${args2.bead_id ? (args2.epic_id ? "AND" : "WHERE") + " json_extract(data, '$.bead_id') = ?" : ""}
50678
+ ORDER BY timestamp DESC
50679
+ LIMIT 20
50680
+ `;
50681
+ const params = [];
50682
+ if (args2.epic_id)
50683
+ params.push(args2.epic_id);
50684
+ if (args2.bead_id)
50685
+ params.push(args2.bead_id);
50686
+ const events = await db.query(timelineQuery, params);
50687
+ timeline = events.rows;
50688
+ }
50689
+ return JSON.stringify({
50690
+ epic_id: args2.epic_id,
50691
+ bead_id: args2.bead_id,
50692
+ diagnosis,
50693
+ timeline: include.includes("timeline") ? timeline : undefined
50694
+ }, null, 2);
50695
+ } catch (error45) {
50696
+ return JSON.stringify({
50697
+ error: error45 instanceof Error ? error45.message : String(error45)
50698
+ });
50699
+ }
50700
+ }
50701
+ });
50702
+ var swarm_insights = tool({
50703
+ description: "Generate learning insights from swarm coordination metrics. Analyzes success rates, duration, conflicts, and retries to provide actionable recommendations.",
50704
+ args: {
50705
+ scope: tool.schema.enum(["epic", "project", "recent"]).describe("Scope of analysis: 'epic', 'project', or 'recent'"),
50706
+ epic_id: tool.schema.string().optional().describe("Epic ID (required if scope='epic')"),
50707
+ metrics: tool.schema.array(tool.schema.enum([
50708
+ "success_rate",
50709
+ "avg_duration",
50710
+ "conflict_rate",
50711
+ "retry_rate"
50712
+ ])).describe("Metrics to analyze")
50713
+ },
50714
+ async execute(args2) {
50715
+ try {
50716
+ if (args2.scope === "epic" && !args2.epic_id) {
50717
+ return JSON.stringify({
50718
+ error: "epic_id is required when scope='epic'"
50719
+ });
50720
+ }
50721
+ const projectPath = process.cwd();
50722
+ const swarmMail = await getSwarmMailLibSQL4(projectPath);
50723
+ const db = await swarmMail.getDatabase();
50724
+ const insights = [];
50725
+ if (args2.metrics.includes("success_rate")) {
50726
+ const query = `
50727
+ SELECT
50728
+ SUM(CASE WHEN json_extract(data, '$.success') = 'true' THEN 1 ELSE 0 END) as successes,
50729
+ COUNT(*) as total
50730
+ FROM events
50731
+ WHERE type = 'subtask_outcome'
50732
+ ${args2.epic_id ? "AND json_extract(data, '$.epic_id') = ?" : ""}
50733
+ `;
50734
+ const result = await db.query(query, args2.epic_id ? [args2.epic_id] : []);
50735
+ const row = result.rows[0];
50736
+ if (row && row.total > 0) {
50737
+ const rate = row.successes / row.total * 100;
50738
+ insights.push({
50739
+ metric: "success_rate",
50740
+ value: `${rate.toFixed(1)}%`,
50741
+ insight: rate < 50 ? "Low success rate - review decomposition strategy" : rate < 80 ? "Moderate success rate - monitor for patterns" : "Good success rate - maintain current approach"
50742
+ });
50743
+ }
50744
+ }
50745
+ if (args2.metrics.includes("avg_duration")) {
50746
+ const query = `
50747
+ SELECT AVG(CAST(json_extract(data, '$.duration_ms') AS REAL)) as avg_duration
50748
+ FROM events
50749
+ WHERE type = 'subtask_outcome'
50750
+ ${args2.epic_id ? "AND json_extract(data, '$.epic_id') = ?" : ""}
50751
+ `;
50752
+ const result = await db.query(query, args2.epic_id ? [args2.epic_id] : []);
50753
+ const row = result.rows[0];
50754
+ if (row?.avg_duration) {
50755
+ const avgMinutes = (row.avg_duration / 60000).toFixed(1);
50756
+ insights.push({
50757
+ metric: "avg_duration",
50758
+ value: `${avgMinutes} min`,
50759
+ insight: row.avg_duration > 600000 ? "Tasks taking >10min - consider smaller decomposition" : "Task duration is reasonable"
50760
+ });
50761
+ }
50762
+ }
50763
+ return JSON.stringify({
50764
+ scope: args2.scope,
50765
+ epic_id: args2.epic_id,
50766
+ insights
50767
+ }, null, 2);
50768
+ } catch (error45) {
50769
+ return JSON.stringify({
50770
+ error: error45 instanceof Error ? error45.message : String(error45)
50771
+ });
50772
+ }
50773
+ }
50774
+ });
50775
+ var observabilityTools = {
50776
+ swarm_analytics,
50777
+ swarm_query,
50778
+ swarm_diagnose,
50779
+ swarm_insights
50780
+ };
50781
+
50271
50782
  // src/output-guardrails.ts
50272
50783
  var DEFAULT_GUARDRAIL_CONFIG = {
50273
50784
  defaultMaxChars: 32000,
@@ -50690,7 +51201,8 @@ var SwarmPlugin = async (input) => {
50690
51201
  ...repoCrawlTools,
50691
51202
  ...skillsTools,
50692
51203
  ...mandateTools,
50693
- ...memoryTools
51204
+ ...memoryTools,
51205
+ ...observabilityTools
50694
51206
  },
50695
51207
  event: async ({ event }) => {
50696
51208
  if (event.type === "session.idle") {