opencode-swarm-plugin 0.31.7 → 0.33.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +4 -4
- package/.turbo/turbo-test.log +324 -316
- package/CHANGELOG.md +394 -0
- package/README.md +129 -181
- package/bin/swarm.test.ts +31 -0
- package/bin/swarm.ts +635 -140
- package/dist/compaction-hook.d.ts +1 -1
- package/dist/compaction-hook.d.ts.map +1 -1
- package/dist/hive.d.ts.map +1 -1
- package/dist/index.d.ts +17 -2
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +653 -139
- package/dist/memory-tools.d.ts.map +1 -1
- package/dist/memory.d.ts +5 -4
- package/dist/memory.d.ts.map +1 -1
- package/dist/observability-tools.d.ts +116 -0
- package/dist/observability-tools.d.ts.map +1 -0
- package/dist/plugin.js +648 -136
- package/dist/skills.d.ts.map +1 -1
- package/dist/swarm-orchestrate.d.ts +29 -5
- package/dist/swarm-orchestrate.d.ts.map +1 -1
- package/dist/swarm-prompts.d.ts +66 -0
- package/dist/swarm-prompts.d.ts.map +1 -1
- package/dist/swarm.d.ts +17 -2
- package/dist/swarm.d.ts.map +1 -1
- package/evals/lib/{data-loader.test.ts → data-loader.evalite-test.ts} +7 -6
- package/evals/lib/data-loader.ts +1 -1
- package/evals/scorers/{outcome-scorers.test.ts → outcome-scorers.evalite-test.ts} +1 -1
- package/examples/plugin-wrapper-template.ts +316 -12
- package/global-skills/swarm-coordination/SKILL.md +118 -8
- package/package.json +3 -2
- package/src/compaction-hook.ts +5 -3
- package/src/hive.integration.test.ts +83 -1
- package/src/hive.ts +37 -12
- package/src/index.ts +25 -1
- package/src/mandate-storage.integration.test.ts +601 -0
- package/src/memory-tools.ts +6 -4
- package/src/memory.integration.test.ts +117 -49
- package/src/memory.test.ts +41 -217
- package/src/memory.ts +12 -8
- package/src/observability-tools.test.ts +346 -0
- package/src/observability-tools.ts +594 -0
- package/src/repo-crawl.integration.test.ts +441 -0
- package/src/skills.integration.test.ts +1192 -0
- package/src/skills.test.ts +42 -1
- package/src/skills.ts +8 -4
- package/src/structured.integration.test.ts +817 -0
- package/src/swarm-deferred.integration.test.ts +157 -0
- package/src/swarm-deferred.test.ts +38 -0
- package/src/swarm-mail.integration.test.ts +15 -19
- package/src/swarm-orchestrate.integration.test.ts +282 -0
- package/src/swarm-orchestrate.test.ts +123 -0
- package/src/swarm-orchestrate.ts +279 -201
- package/src/swarm-prompts.test.ts +481 -0
- package/src/swarm-prompts.ts +297 -0
- package/src/swarm-research.integration.test.ts +544 -0
- package/src/swarm-research.test.ts +698 -0
- package/src/swarm-research.ts +472 -0
- package/src/swarm-review.integration.test.ts +290 -0
- package/src/swarm.integration.test.ts +23 -20
- package/src/swarm.ts +6 -3
- package/src/tool-adapter.integration.test.ts +1221 -0
package/dist/index.js
CHANGED
|
@@ -26558,7 +26558,7 @@ var init_skills = __esm(() => {
|
|
|
26558
26558
|
"skills"
|
|
26559
26559
|
];
|
|
26560
26560
|
skills_list = tool({
|
|
26561
|
-
description: `List all available skills in the project.
|
|
26561
|
+
description: `[DEPRECATED] List all available skills in the project.
|
|
26562
26562
|
|
|
26563
26563
|
Skills are specialized instructions that help with specific domains or tasks.
|
|
26564
26564
|
Use this tool to discover what skills are available, then use skills_use to
|
|
@@ -26569,6 +26569,7 @@ Returns skill names, descriptions, and whether they have executable scripts.`,
|
|
|
26569
26569
|
tag: tool.schema.string().optional().describe("Optional tag to filter skills by")
|
|
26570
26570
|
},
|
|
26571
26571
|
async execute(args) {
|
|
26572
|
+
console.warn("[DEPRECATED] skills_list is deprecated. OpenCode now provides native skills support. This tool will be removed in a future version.");
|
|
26572
26573
|
const skills = await discoverSkills();
|
|
26573
26574
|
let refs = Array.from(skills.values());
|
|
26574
26575
|
if (args.tag) {
|
|
@@ -26591,7 +26592,7 @@ ${formatted}`;
|
|
|
26591
26592
|
}
|
|
26592
26593
|
});
|
|
26593
26594
|
skills_use = tool({
|
|
26594
|
-
description: `Activate a skill by loading its full instructions.
|
|
26595
|
+
description: `[DEPRECATED] Activate a skill by loading its full instructions.
|
|
26595
26596
|
|
|
26596
26597
|
After calling this tool, follow the skill's instructions for the current task.
|
|
26597
26598
|
Skills provide domain-specific guidance and best practices.
|
|
@@ -26602,6 +26603,7 @@ If the skill has scripts, you can run them with skills_execute.`,
|
|
|
26602
26603
|
include_scripts: tool.schema.boolean().optional().describe("Also list available scripts (default: true)")
|
|
26603
26604
|
},
|
|
26604
26605
|
async execute(args) {
|
|
26606
|
+
console.warn("[DEPRECATED] skills_use is deprecated. OpenCode now provides native skills support. This tool will be removed in a future version.");
|
|
26605
26607
|
const skill = await getSkill(args.name);
|
|
26606
26608
|
if (!skill) {
|
|
26607
26609
|
const available = await listSkills();
|
|
@@ -26634,7 +26636,7 @@ Run scripts with skills_execute tool.`;
|
|
|
26634
26636
|
}
|
|
26635
26637
|
});
|
|
26636
26638
|
skills_execute = tool({
|
|
26637
|
-
description: `Execute a script from a skill's scripts/ directory.
|
|
26639
|
+
description: `[DEPRECATED] Execute a script from a skill's scripts/ directory.
|
|
26638
26640
|
|
|
26639
26641
|
Some skills include helper scripts for common operations.
|
|
26640
26642
|
Use skills_use first to see available scripts, then execute them here.
|
|
@@ -26646,6 +26648,7 @@ Scripts run in the skill's directory with the project directory as an argument.`
|
|
|
26646
26648
|
args: tool.schema.array(tool.schema.string()).optional().describe("Additional arguments to pass to the script")
|
|
26647
26649
|
},
|
|
26648
26650
|
async execute(args, ctx) {
|
|
26651
|
+
console.warn("[DEPRECATED] skills_execute is deprecated. OpenCode now provides native skills support. This tool will be removed in a future version.");
|
|
26649
26652
|
const skill = await getSkill(args.skill);
|
|
26650
26653
|
if (!skill) {
|
|
26651
26654
|
return `Skill '${args.skill}' not found.`;
|
|
@@ -26691,7 +26694,7 @@ ${output}`;
|
|
|
26691
26694
|
}
|
|
26692
26695
|
});
|
|
26693
26696
|
skills_read = tool({
|
|
26694
|
-
description: `Read a resource file from a skill's directory.
|
|
26697
|
+
description: `[DEPRECATED] Read a resource file from a skill's directory.
|
|
26695
26698
|
|
|
26696
26699
|
Skills may include additional files like:
|
|
26697
26700
|
- examples.md - Example usage
|
|
@@ -26704,6 +26707,7 @@ Use this to access supplementary skill resources.`,
|
|
|
26704
26707
|
file: tool.schema.string().describe("Relative path to the file within the skill directory")
|
|
26705
26708
|
},
|
|
26706
26709
|
async execute(args) {
|
|
26710
|
+
console.warn("[DEPRECATED] skills_read is deprecated. OpenCode now provides native skills support. This tool will be removed in a future version.");
|
|
26707
26711
|
const skill = await getSkill(args.skill);
|
|
26708
26712
|
if (!skill) {
|
|
26709
26713
|
return `Skill '${args.skill}' not found.`;
|
|
@@ -27084,7 +27088,7 @@ import {
|
|
|
27084
27088
|
FlushManager,
|
|
27085
27089
|
importFromJSONL,
|
|
27086
27090
|
syncMemories,
|
|
27087
|
-
|
|
27091
|
+
getSwarmMailLibSQL,
|
|
27088
27092
|
resolvePartialId
|
|
27089
27093
|
} from "swarm-mail";
|
|
27090
27094
|
import { existsSync, readFileSync } from "node:fs";
|
|
@@ -27977,7 +27981,7 @@ async function getHiveAdapter(projectKey) {
|
|
|
27977
27981
|
if (adapterCache.has(projectKey)) {
|
|
27978
27982
|
return adapterCache.get(projectKey);
|
|
27979
27983
|
}
|
|
27980
|
-
const swarmMail = await
|
|
27984
|
+
const swarmMail = await getSwarmMailLibSQL(projectKey);
|
|
27981
27985
|
const db = await swarmMail.getDatabase();
|
|
27982
27986
|
const adapter = createHiveAdapter(db, projectKey);
|
|
27983
27987
|
await adapter.runMigrations();
|
|
@@ -28345,7 +28349,7 @@ var hive_sync = tool({
|
|
|
28345
28349
|
outputPath: `${projectKey}/.hive/issues.jsonl`
|
|
28346
28350
|
});
|
|
28347
28351
|
const flushResult = await withTimeout(flushManager.flush(), TIMEOUT_MS, "flush hive");
|
|
28348
|
-
const swarmMail = await
|
|
28352
|
+
const swarmMail = await getSwarmMailLibSQL(projectKey);
|
|
28349
28353
|
const db = await swarmMail.getDatabase();
|
|
28350
28354
|
const hivePath = join(projectKey, ".hive");
|
|
28351
28355
|
let memoriesSynced = 0;
|
|
@@ -28378,9 +28382,27 @@ var hive_sync = tool({
|
|
|
28378
28382
|
const remoteCheckResult2 = await runGitCommand(["remote"]);
|
|
28379
28383
|
const hasRemote2 = remoteCheckResult2.stdout.trim() !== "";
|
|
28380
28384
|
if (hasRemote2) {
|
|
28381
|
-
const
|
|
28382
|
-
|
|
28383
|
-
|
|
28385
|
+
const statusResult = await runGitCommand(["status", "--porcelain"]);
|
|
28386
|
+
const hasUnstagedChanges = statusResult.stdout.trim() !== "";
|
|
28387
|
+
let didStash = false;
|
|
28388
|
+
if (hasUnstagedChanges) {
|
|
28389
|
+
const stashResult = await runGitCommand(["stash", "push", "-u", "-m", "hive_sync: auto-stash before pull"]);
|
|
28390
|
+
if (stashResult.exitCode === 0) {
|
|
28391
|
+
didStash = true;
|
|
28392
|
+
}
|
|
28393
|
+
}
|
|
28394
|
+
try {
|
|
28395
|
+
const pullResult = await withTimeout(runGitCommand(["pull", "--rebase"]), TIMEOUT_MS, "git pull --rebase");
|
|
28396
|
+
if (pullResult.exitCode !== 0) {
|
|
28397
|
+
throw new HiveError(`Failed to pull: ${pullResult.stderr}`, "git pull --rebase", pullResult.exitCode);
|
|
28398
|
+
}
|
|
28399
|
+
} finally {
|
|
28400
|
+
if (didStash) {
|
|
28401
|
+
const popResult = await runGitCommand(["stash", "pop"]);
|
|
28402
|
+
if (popResult.exitCode !== 0) {
|
|
28403
|
+
console.warn(`[hive_sync] Warning: stash pop failed. Your changes are in 'git stash list'. Error: ${popResult.stderr}`);
|
|
28404
|
+
}
|
|
28405
|
+
}
|
|
28384
28406
|
}
|
|
28385
28407
|
}
|
|
28386
28408
|
}
|
|
@@ -32936,7 +32958,8 @@ import {
|
|
|
32936
32958
|
sendSwarmMessage as sendSwarmMessage3,
|
|
32937
32959
|
getAgent,
|
|
32938
32960
|
createEvent as createEvent2,
|
|
32939
|
-
appendEvent as appendEvent2
|
|
32961
|
+
appendEvent as appendEvent2,
|
|
32962
|
+
getSwarmMailLibSQL as getSwarmMailLibSQL2
|
|
32940
32963
|
} from "swarm-mail";
|
|
32941
32964
|
init_skills();
|
|
32942
32965
|
|
|
@@ -33740,60 +33763,6 @@ ${progress.blockers.map((b) => `- ${b}`).join(`
|
|
|
33740
33763
|
|
|
33741
33764
|
`);
|
|
33742
33765
|
}
|
|
33743
|
-
async function runUbsScan(files) {
|
|
33744
|
-
if (files.length === 0) {
|
|
33745
|
-
return null;
|
|
33746
|
-
}
|
|
33747
|
-
const ubsAvailable = await isToolAvailable("ubs");
|
|
33748
|
-
if (!ubsAvailable) {
|
|
33749
|
-
warnMissingTool("ubs");
|
|
33750
|
-
return null;
|
|
33751
|
-
}
|
|
33752
|
-
try {
|
|
33753
|
-
const result = await Bun.$`ubs scan ${files.join(" ")} --json`.quiet().nothrow();
|
|
33754
|
-
const output = result.stdout.toString();
|
|
33755
|
-
if (!output.trim()) {
|
|
33756
|
-
return {
|
|
33757
|
-
exitCode: result.exitCode,
|
|
33758
|
-
bugs: [],
|
|
33759
|
-
summary: { total: 0, critical: 0, high: 0, medium: 0, low: 0 }
|
|
33760
|
-
};
|
|
33761
|
-
}
|
|
33762
|
-
try {
|
|
33763
|
-
const parsed = JSON.parse(output);
|
|
33764
|
-
if (typeof parsed !== "object" || parsed === null) {
|
|
33765
|
-
throw new Error("UBS output is not an object");
|
|
33766
|
-
}
|
|
33767
|
-
if (!Array.isArray(parsed.bugs)) {
|
|
33768
|
-
console.warn("[swarm] UBS output missing bugs array, using empty");
|
|
33769
|
-
}
|
|
33770
|
-
if (typeof parsed.summary !== "object" || parsed.summary === null) {
|
|
33771
|
-
console.warn("[swarm] UBS output missing summary object, using empty");
|
|
33772
|
-
}
|
|
33773
|
-
return {
|
|
33774
|
-
exitCode: result.exitCode,
|
|
33775
|
-
bugs: Array.isArray(parsed.bugs) ? parsed.bugs : [],
|
|
33776
|
-
summary: parsed.summary || {
|
|
33777
|
-
total: 0,
|
|
33778
|
-
critical: 0,
|
|
33779
|
-
high: 0,
|
|
33780
|
-
medium: 0,
|
|
33781
|
-
low: 0
|
|
33782
|
-
}
|
|
33783
|
-
};
|
|
33784
|
-
} catch (error45) {
|
|
33785
|
-
console.error(`[swarm] CRITICAL: UBS scan failed to parse JSON output because output is malformed:`, error45);
|
|
33786
|
-
console.error(`[swarm] Raw output: ${output}. Try: Run 'ubs doctor' to check installation, verify UBS version with 'ubs --version' (need v1.0.0+), or check if UBS supports --json flag.`);
|
|
33787
|
-
return {
|
|
33788
|
-
exitCode: result.exitCode,
|
|
33789
|
-
bugs: [],
|
|
33790
|
-
summary: { total: 0, critical: 0, high: 0, medium: 0, low: 0 }
|
|
33791
|
-
};
|
|
33792
|
-
}
|
|
33793
|
-
} catch {
|
|
33794
|
-
return null;
|
|
33795
|
-
}
|
|
33796
|
-
}
|
|
33797
33766
|
async function runTypecheckVerification() {
|
|
33798
33767
|
const step = {
|
|
33799
33768
|
name: "typecheck",
|
|
@@ -33878,34 +33847,9 @@ async function runTestVerification(filesTouched) {
|
|
|
33878
33847
|
}
|
|
33879
33848
|
return step;
|
|
33880
33849
|
}
|
|
33881
|
-
async function runVerificationGate(filesTouched,
|
|
33850
|
+
async function runVerificationGate(filesTouched, _skipUbs = false) {
|
|
33882
33851
|
const steps = [];
|
|
33883
33852
|
const blockers = [];
|
|
33884
|
-
if (!skipUbs && filesTouched.length > 0) {
|
|
33885
|
-
const ubsResult = await runUbsScan(filesTouched);
|
|
33886
|
-
if (ubsResult) {
|
|
33887
|
-
const ubsStep = {
|
|
33888
|
-
name: "ubs_scan",
|
|
33889
|
-
command: `ubs scan ${filesTouched.join(" ")}`,
|
|
33890
|
-
passed: ubsResult.summary.critical === 0,
|
|
33891
|
-
exitCode: ubsResult.exitCode
|
|
33892
|
-
};
|
|
33893
|
-
if (!ubsStep.passed) {
|
|
33894
|
-
ubsStep.error = `Found ${ubsResult.summary.critical} critical bugs`;
|
|
33895
|
-
blockers.push(`UBS found ${ubsResult.summary.critical} critical bug(s). Try: Run 'ubs scan ${filesTouched.join(" ")}' to see details, fix critical bugs in reported files, or use skip_ubs_scan=true to bypass (not recommended).`);
|
|
33896
|
-
}
|
|
33897
|
-
steps.push(ubsStep);
|
|
33898
|
-
} else {
|
|
33899
|
-
steps.push({
|
|
33900
|
-
name: "ubs_scan",
|
|
33901
|
-
command: "ubs scan",
|
|
33902
|
-
passed: true,
|
|
33903
|
-
exitCode: 0,
|
|
33904
|
-
skipped: true,
|
|
33905
|
-
skipReason: "UBS not available"
|
|
33906
|
-
});
|
|
33907
|
-
}
|
|
33908
|
-
}
|
|
33909
33853
|
const typecheckStep = await runTypecheckVerification();
|
|
33910
33854
|
steps.push(typecheckStep);
|
|
33911
33855
|
if (!typecheckStep.passed && !typecheckStep.skipped) {
|
|
@@ -34228,16 +34172,15 @@ ${args.files_affected.map((f) => `- \`${f}\``).join(`
|
|
|
34228
34172
|
}
|
|
34229
34173
|
});
|
|
34230
34174
|
var swarm_complete = tool({
|
|
34231
|
-
description: "Mark subtask complete with Verification Gate. Runs
|
|
34175
|
+
description: "Mark subtask complete with Verification Gate. Runs typecheck and tests before allowing completion.",
|
|
34232
34176
|
args: {
|
|
34233
34177
|
project_key: tool.schema.string().describe("Project path"),
|
|
34234
34178
|
agent_name: tool.schema.string().describe("Your Agent Mail name"),
|
|
34235
34179
|
bead_id: tool.schema.string().describe("Subtask bead ID"),
|
|
34236
34180
|
summary: tool.schema.string().describe("Brief summary of work done"),
|
|
34237
34181
|
evaluation: tool.schema.string().optional().describe("Self-evaluation JSON (Evaluation schema)"),
|
|
34238
|
-
files_touched: tool.schema.array(tool.schema.string()).optional().describe("Files modified - will be verified (
|
|
34239
|
-
|
|
34240
|
-
skip_verification: tool.schema.boolean().optional().describe("Skip ALL verification (UBS, typecheck, tests). Use sparingly! (default: false)"),
|
|
34182
|
+
files_touched: tool.schema.array(tool.schema.string()).optional().describe("Files modified - will be verified (typecheck, tests)"),
|
|
34183
|
+
skip_verification: tool.schema.boolean().optional().describe("Skip ALL verification (typecheck, tests). Use sparingly! (default: false)"),
|
|
34241
34184
|
planned_files: tool.schema.array(tool.schema.string()).optional().describe("Files that were originally planned to be modified"),
|
|
34242
34185
|
start_time: tool.schema.number().optional().describe("Task start timestamp (Unix ms) for duration calculation"),
|
|
34243
34186
|
error_count: tool.schema.number().optional().describe("Number of errors encountered during task"),
|
|
@@ -34320,7 +34263,7 @@ Continuing with completion, but this should be fixed for future subtasks.`;
|
|
|
34320
34263
|
}
|
|
34321
34264
|
let verificationResult = null;
|
|
34322
34265
|
if (!args.skip_verification && args.files_touched?.length) {
|
|
34323
|
-
verificationResult = await runVerificationGate(args.files_touched,
|
|
34266
|
+
verificationResult = await runVerificationGate(args.files_touched, false);
|
|
34324
34267
|
if (!verificationResult.passed) {
|
|
34325
34268
|
return JSON.stringify({
|
|
34326
34269
|
success: false,
|
|
@@ -34342,21 +34285,6 @@ Continuing with completion, but this should be fixed for future subtasks.`;
|
|
|
34342
34285
|
}, null, 2);
|
|
34343
34286
|
}
|
|
34344
34287
|
}
|
|
34345
|
-
let ubsResult = null;
|
|
34346
|
-
if (!args.skip_verification && !verificationResult && args.files_touched?.length && !args.skip_ubs_scan) {
|
|
34347
|
-
ubsResult = await runUbsScan(args.files_touched);
|
|
34348
|
-
if (ubsResult && ubsResult.summary.critical > 0) {
|
|
34349
|
-
return JSON.stringify({
|
|
34350
|
-
success: false,
|
|
34351
|
-
error: `UBS found ${ubsResult.summary.critical} critical bug(s) that must be fixed before completing`,
|
|
34352
|
-
ubs_scan: {
|
|
34353
|
-
critical_count: ubsResult.summary.critical,
|
|
34354
|
-
bugs: ubsResult.bugs.filter((b) => b.severity === "critical")
|
|
34355
|
-
},
|
|
34356
|
-
hint: `Fix these critical bugs: ${ubsResult.bugs.filter((b) => b.severity === "critical").map((b) => `${b.file}:${b.line} - ${b.message}`).slice(0, 3).join("; ")}. Try: Run 'ubs scan ${args.files_touched?.join(" ") || "."} --json' for full report, fix reported issues, or use skip_ubs_scan=true to bypass (not recommended).`
|
|
34357
|
-
}, null, 2);
|
|
34358
|
-
}
|
|
34359
|
-
}
|
|
34360
34288
|
let contractValidation = null;
|
|
34361
34289
|
let contractWarning;
|
|
34362
34290
|
if (args.files_touched && args.files_touched.length > 0) {
|
|
@@ -34426,6 +34354,23 @@ This will be recorded as a negative learning signal.`;
|
|
|
34426
34354
|
}
|
|
34427
34355
|
}, null, 2);
|
|
34428
34356
|
}
|
|
34357
|
+
let deferredResolved = false;
|
|
34358
|
+
let deferredError;
|
|
34359
|
+
try {
|
|
34360
|
+
const swarmMail = await getSwarmMailLibSQL2(args.project_key);
|
|
34361
|
+
const db = await swarmMail.getDatabase();
|
|
34362
|
+
const deferredUrl = `deferred:${args.bead_id}`;
|
|
34363
|
+
const checkResult = await db.query(`SELECT url, resolved FROM deferred WHERE url = ? AND resolved = 0`, [deferredUrl]);
|
|
34364
|
+
if (checkResult.rows.length > 0) {
|
|
34365
|
+
await db.query(`UPDATE deferred SET resolved = 1, value = ? WHERE url = ? AND resolved = 0`, [JSON.stringify({ completed: true, summary: args.summary }), deferredUrl]);
|
|
34366
|
+
deferredResolved = true;
|
|
34367
|
+
} else {
|
|
34368
|
+
console.info(`[swarm_complete] No deferred found for ${args.bead_id} - task may not be part of active swarm`);
|
|
34369
|
+
}
|
|
34370
|
+
} catch (error45) {
|
|
34371
|
+
deferredError = error45 instanceof Error ? error45.message : String(error45);
|
|
34372
|
+
console.warn(`[swarm_complete] Failed to resolve deferred (non-fatal): ${deferredError}`);
|
|
34373
|
+
}
|
|
34429
34374
|
let syncSuccess = false;
|
|
34430
34375
|
let syncError;
|
|
34431
34376
|
try {
|
|
@@ -34530,6 +34475,8 @@ This will be recorded as a negative learning signal.`;
|
|
|
34530
34475
|
sync_error: syncError,
|
|
34531
34476
|
message_sent: messageSent,
|
|
34532
34477
|
message_error: messageError,
|
|
34478
|
+
deferred_resolved: deferredResolved,
|
|
34479
|
+
deferred_error: deferredError,
|
|
34533
34480
|
agent_registration: {
|
|
34534
34481
|
verified: agentRegistered,
|
|
34535
34482
|
warning: registrationWarning || undefined
|
|
@@ -34544,15 +34491,6 @@ This will be recorded as a negative learning signal.`;
|
|
|
34544
34491
|
skipReason: s.skipReason
|
|
34545
34492
|
}))
|
|
34546
34493
|
} : args.skip_verification ? { skipped: true, reason: "skip_verification=true" } : { skipped: true, reason: "no files_touched provided" },
|
|
34547
|
-
ubs_scan: ubsResult ? {
|
|
34548
|
-
ran: true,
|
|
34549
|
-
bugs_found: ubsResult.summary.total,
|
|
34550
|
-
summary: ubsResult.summary,
|
|
34551
|
-
warnings: ubsResult.bugs.filter((b) => b.severity !== "critical")
|
|
34552
|
-
} : verificationResult ? { ran: true, included_in_verification_gate: true } : {
|
|
34553
|
-
ran: false,
|
|
34554
|
-
reason: args.skip_ubs_scan ? "skipped" : "no files or ubs unavailable"
|
|
34555
|
-
},
|
|
34556
34494
|
learning_prompt: `## Reflection
|
|
34557
34495
|
|
|
34558
34496
|
Did you learn anything reusable during this subtask? Consider:
|
|
@@ -34590,9 +34528,7 @@ Files touched: ${args.files_touched?.join(", ") || "none recorded"}`,
|
|
|
34590
34528
|
const errorStack = error45 instanceof Error ? error45.stack : undefined;
|
|
34591
34529
|
let failedStep = "unknown";
|
|
34592
34530
|
if (errorMessage.includes("verification")) {
|
|
34593
|
-
failedStep = "Verification Gate (
|
|
34594
|
-
} else if (errorMessage.includes("UBS") || errorMessage.includes("ubs")) {
|
|
34595
|
-
failedStep = "UBS scan";
|
|
34531
|
+
failedStep = "Verification Gate (typecheck/tests)";
|
|
34596
34532
|
} else if (errorMessage.includes("evaluation")) {
|
|
34597
34533
|
failedStep = "Self-evaluation parsing";
|
|
34598
34534
|
} else if (errorMessage.includes("bead") || errorMessage.includes("close")) {
|
|
@@ -34624,7 +34560,6 @@ ${errorStack.slice(0, 1000)}
|
|
|
34624
34560
|
`### Context`,
|
|
34625
34561
|
`- **Summary**: ${args.summary}`,
|
|
34626
34562
|
`- **Files touched**: ${args.files_touched?.length ? args.files_touched.join(", ") : "none"}`,
|
|
34627
|
-
`- **Skip UBS**: ${args.skip_ubs_scan ?? false}`,
|
|
34628
34563
|
`- **Skip verification**: ${args.skip_verification ?? false}`,
|
|
34629
34564
|
"",
|
|
34630
34565
|
`### Recovery Actions`,
|
|
@@ -34662,7 +34597,6 @@ ${errorStack.slice(0, 1000)}
|
|
|
34662
34597
|
context: {
|
|
34663
34598
|
summary: args.summary,
|
|
34664
34599
|
files_touched: args.files_touched || [],
|
|
34665
|
-
skip_ubs_scan: args.skip_ubs_scan ?? false,
|
|
34666
34600
|
skip_verification: args.skip_verification ?? false
|
|
34667
34601
|
},
|
|
34668
34602
|
recovery: {
|
|
@@ -34674,7 +34608,6 @@ ${errorStack.slice(0, 1000)}
|
|
|
34674
34608
|
],
|
|
34675
34609
|
common_fixes: {
|
|
34676
34610
|
"Verification Gate": "Use skip_verification=true to bypass (not recommended)",
|
|
34677
|
-
"UBS scan": "Use skip_ubs_scan=true to bypass",
|
|
34678
34611
|
"Cell close": "Check cell status with hive_query(), may need hive_update() first",
|
|
34679
34612
|
"Self-evaluation": "Check evaluation JSON format matches EvaluationSchema"
|
|
34680
34613
|
}
|
|
@@ -34980,8 +34913,9 @@ var swarm_recover = tool({
|
|
|
34980
34913
|
},
|
|
34981
34914
|
async execute(args) {
|
|
34982
34915
|
try {
|
|
34983
|
-
const {
|
|
34984
|
-
const
|
|
34916
|
+
const { getSwarmMailLibSQL: getSwarmMailLibSQL3 } = await import("swarm-mail");
|
|
34917
|
+
const swarmMail = await getSwarmMailLibSQL3(args.project_key);
|
|
34918
|
+
const db = await swarmMail.getDatabase();
|
|
34985
34919
|
const result = await db.query(`SELECT * FROM swarm_contexts
|
|
34986
34920
|
WHERE epic_id = $1
|
|
34987
34921
|
ORDER BY updated_at DESC
|
|
@@ -35615,6 +35549,167 @@ Other cell operations:
|
|
|
35615
35549
|
**Memory is the swarm's collective intelligence. Query it. Feed it.**
|
|
35616
35550
|
|
|
35617
35551
|
Begin now.`;
|
|
35552
|
+
var RESEARCHER_PROMPT = `You are a swarm researcher gathering documentation for: **{research_id}**
|
|
35553
|
+
|
|
35554
|
+
## [IDENTITY]
|
|
35555
|
+
Agent: (assigned at spawn)
|
|
35556
|
+
Research Task: {research_id}
|
|
35557
|
+
Epic: {epic_id}
|
|
35558
|
+
|
|
35559
|
+
## [MISSION]
|
|
35560
|
+
Gather comprehensive documentation for the specified technologies to inform task decomposition.
|
|
35561
|
+
|
|
35562
|
+
**COORDINATOR PROVIDED THESE TECHNOLOGIES TO RESEARCH:**
|
|
35563
|
+
{tech_stack}
|
|
35564
|
+
|
|
35565
|
+
You do NOT discover what to research - the coordinator already decided that.
|
|
35566
|
+
You DO discover what TOOLS are available to fetch documentation.
|
|
35567
|
+
|
|
35568
|
+
## [OUTPUT MODE]
|
|
35569
|
+
{check_upgrades}
|
|
35570
|
+
|
|
35571
|
+
## [WORKFLOW]
|
|
35572
|
+
|
|
35573
|
+
### Step 1: Initialize (MANDATORY FIRST)
|
|
35574
|
+
\`\`\`
|
|
35575
|
+
swarmmail_init(project_path="{project_path}", task_description="{research_id}: Documentation research")
|
|
35576
|
+
\`\`\`
|
|
35577
|
+
|
|
35578
|
+
### Step 2: Discover Available Documentation Tools
|
|
35579
|
+
Check what's available for fetching docs:
|
|
35580
|
+
- **next-devtools**: \`nextjs_docs\` for Next.js documentation
|
|
35581
|
+
- **context7**: Library documentation lookup (\`use context7\` in prompts)
|
|
35582
|
+
- **fetch**: General web fetching for official docs sites
|
|
35583
|
+
- **pdf-brain**: Internal knowledge base search
|
|
35584
|
+
|
|
35585
|
+
**Don't assume** - check which tools exist in your environment.
|
|
35586
|
+
|
|
35587
|
+
### Step 3: Read Installed Versions
|
|
35588
|
+
For each technology in the tech stack:
|
|
35589
|
+
1. Check package.json (or equivalent) for installed version
|
|
35590
|
+
2. Record exact version numbers
|
|
35591
|
+
3. Note any version constraints (^, ~, etc.)
|
|
35592
|
+
|
|
35593
|
+
### Step 4: Fetch Documentation
|
|
35594
|
+
For EACH technology in the list:
|
|
35595
|
+
- Use the most appropriate tool (Next.js → nextjs_docs, libraries → context7, others → fetch)
|
|
35596
|
+
- Fetch documentation for the INSTALLED version (not latest, unless --check-upgrades)
|
|
35597
|
+
- Focus on: API changes, breaking changes, migration guides, best practices
|
|
35598
|
+
- Extract key patterns, gotchas, and compatibility notes
|
|
35599
|
+
|
|
35600
|
+
**If --check-upgrades mode:**
|
|
35601
|
+
- ALSO fetch docs for the LATEST version
|
|
35602
|
+
- Compare installed vs latest
|
|
35603
|
+
- Note breaking changes, new features, migration complexity
|
|
35604
|
+
|
|
35605
|
+
### Step 5: Store Detailed Findings
|
|
35606
|
+
For EACH technology, store in semantic-memory:
|
|
35607
|
+
\`\`\`
|
|
35608
|
+
semantic-memory_store(
|
|
35609
|
+
information="<technology-name> <version>: <key patterns, gotchas, API changes, compatibility notes>",
|
|
35610
|
+
tags="research, <tech-name>, documentation, {epic_id}"
|
|
35611
|
+
)
|
|
35612
|
+
\`\`\`
|
|
35613
|
+
|
|
35614
|
+
**Why store individually?** Future agents can search by technology name.
|
|
35615
|
+
|
|
35616
|
+
### Step 6: Broadcast Summary
|
|
35617
|
+
Send condensed findings to coordinator:
|
|
35618
|
+
\`\`\`
|
|
35619
|
+
swarmmail_send(
|
|
35620
|
+
to=["coordinator"],
|
|
35621
|
+
subject="Research Complete: {research_id}",
|
|
35622
|
+
body="<brief summary - see semantic-memory for details>",
|
|
35623
|
+
thread_id="{epic_id}"
|
|
35624
|
+
)
|
|
35625
|
+
\`\`\`
|
|
35626
|
+
|
|
35627
|
+
### Step 7: Return Structured Output
|
|
35628
|
+
Output JSON with:
|
|
35629
|
+
\`\`\`json
|
|
35630
|
+
{
|
|
35631
|
+
"technologies": [
|
|
35632
|
+
{
|
|
35633
|
+
"name": "string",
|
|
35634
|
+
"installed_version": "string",
|
|
35635
|
+
"latest_version": "string | null", // Only if --check-upgrades
|
|
35636
|
+
"key_patterns": ["string"],
|
|
35637
|
+
"gotchas": ["string"],
|
|
35638
|
+
"breaking_changes": ["string"], // Only if --check-upgrades
|
|
35639
|
+
"memory_id": "string" // ID of semantic-memory entry
|
|
35640
|
+
}
|
|
35641
|
+
],
|
|
35642
|
+
"summary": "string" // Condensed summary for shared_context
|
|
35643
|
+
}
|
|
35644
|
+
\`\`\`
|
|
35645
|
+
|
|
35646
|
+
## [CRITICAL REQUIREMENTS]
|
|
35647
|
+
|
|
35648
|
+
**NON-NEGOTIABLE:**
|
|
35649
|
+
1. Step 1 (swarmmail_init) MUST be first
|
|
35650
|
+
2. Research ONLY the technologies the coordinator specified
|
|
35651
|
+
3. Fetch docs for INSTALLED versions (unless --check-upgrades)
|
|
35652
|
+
4. Store detailed findings in semantic-memory (one per technology)
|
|
35653
|
+
5. Return condensed summary for coordinator (full details in memory)
|
|
35654
|
+
6. Use appropriate doc tools (nextjs_docs for Next.js, context7 for libraries, etc.)
|
|
35655
|
+
|
|
35656
|
+
**Output goes TWO places:**
|
|
35657
|
+
- **semantic-memory**: Detailed findings (searchable by future agents)
|
|
35658
|
+
- **Return JSON**: Condensed summary (for coordinator's shared_context)
|
|
35659
|
+
|
|
35660
|
+
Begin research now.`;
|
|
35661
|
+
var COORDINATOR_POST_WORKER_CHECKLIST = `
|
|
35662
|
+
## ⚠️ MANDATORY: Post-Worker Review (DO THIS IMMEDIATELY)
|
|
35663
|
+
|
|
35664
|
+
**A worker just returned. Before doing ANYTHING else, complete this checklist:**
|
|
35665
|
+
|
|
35666
|
+
### Step 1: Check Swarm Mail
|
|
35667
|
+
\`\`\`
|
|
35668
|
+
swarmmail_inbox()
|
|
35669
|
+
swarmmail_read_message(message_id=N) // Read any messages from the worker
|
|
35670
|
+
\`\`\`
|
|
35671
|
+
|
|
35672
|
+
### Step 2: Review the Work
|
|
35673
|
+
\`\`\`
|
|
35674
|
+
swarm_review(
|
|
35675
|
+
project_key="{project_key}",
|
|
35676
|
+
epic_id="{epic_id}",
|
|
35677
|
+
task_id="{task_id}",
|
|
35678
|
+
files_touched=[{files_touched}]
|
|
35679
|
+
)
|
|
35680
|
+
\`\`\`
|
|
35681
|
+
|
|
35682
|
+
This generates a review prompt with:
|
|
35683
|
+
- Epic context (what we're trying to achieve)
|
|
35684
|
+
- Subtask requirements
|
|
35685
|
+
- Git diff of changes
|
|
35686
|
+
- Dependency status
|
|
35687
|
+
|
|
35688
|
+
### Step 3: Evaluate Against Criteria
|
|
35689
|
+
- Does the work fulfill the subtask requirements?
|
|
35690
|
+
- Does it serve the overall epic goal?
|
|
35691
|
+
- Does it enable downstream tasks?
|
|
35692
|
+
- Type safety, no obvious bugs?
|
|
35693
|
+
|
|
35694
|
+
### Step 4: Send Feedback
|
|
35695
|
+
\`\`\`
|
|
35696
|
+
swarm_review_feedback(
|
|
35697
|
+
project_key="{project_key}",
|
|
35698
|
+
task_id="{task_id}",
|
|
35699
|
+
worker_id="{worker_id}",
|
|
35700
|
+
status="approved", // or "needs_changes"
|
|
35701
|
+
summary="<brief summary>",
|
|
35702
|
+
issues="[]" // or "[{file, line, issue, suggestion}]"
|
|
35703
|
+
)
|
|
35704
|
+
\`\`\`
|
|
35705
|
+
|
|
35706
|
+
### Step 5: ONLY THEN Continue
|
|
35707
|
+
- If approved: Close the cell, spawn next worker
|
|
35708
|
+
- If needs_changes: Worker gets feedback, retries (max 3 attempts)
|
|
35709
|
+
- If 3 failures: Mark blocked, escalate to human
|
|
35710
|
+
|
|
35711
|
+
**⚠️ DO NOT spawn the next worker until review is complete.**
|
|
35712
|
+
`;
|
|
35618
35713
|
var EVALUATION_PROMPT = `Evaluate the work completed for this subtask.
|
|
35619
35714
|
|
|
35620
35715
|
## Subtask
|
|
@@ -35651,6 +35746,12 @@ For each criterion, assess passed/failed and provide brief feedback:
|
|
|
35651
35746
|
|
|
35652
35747
|
If any criterion fails, the overall evaluation fails and retry_suggestion
|
|
35653
35748
|
should describe what needs to be fixed.`;
|
|
35749
|
+
function formatResearcherPrompt(params) {
|
|
35750
|
+
const techList = params.tech_stack.map((t) => `- ${t}`).join(`
|
|
35751
|
+
`);
|
|
35752
|
+
const upgradesMode = params.check_upgrades ? "**UPGRADE COMPARISON MODE**: Fetch docs for BOTH installed AND latest versions. Compare and note breaking changes." : "**DEFAULT MODE**: Fetch docs for INSTALLED versions only (from lockfiles).";
|
|
35753
|
+
return RESEARCHER_PROMPT.replace(/{research_id}/g, params.research_id).replace(/{epic_id}/g, params.epic_id).replace("{tech_stack}", techList).replace("{project_path}", params.project_path).replace("{check_upgrades}", upgradesMode);
|
|
35754
|
+
}
|
|
35654
35755
|
function formatSubtaskPromptV2(params) {
|
|
35655
35756
|
const fileList = params.files.length > 0 ? params.files.map((f) => `- \`${f}\``).join(`
|
|
35656
35757
|
`) : "(no specific files - use judgment)";
|
|
@@ -35786,6 +35887,8 @@ var swarm_spawn_subtask = tool({
|
|
|
35786
35887
|
liteModel: "anthropic/claude-haiku-4-5"
|
|
35787
35888
|
};
|
|
35788
35889
|
const selectedModel = selectWorkerModel2(subtask, config2);
|
|
35890
|
+
const filesJoined = args.files.map((f) => `"${f}"`).join(", ");
|
|
35891
|
+
const postCompletionInstructions = COORDINATOR_POST_WORKER_CHECKLIST.replace(/{project_key}/g, args.project_path || "$PWD").replace(/{epic_id}/g, args.epic_id).replace(/{task_id}/g, args.bead_id).replace(/{files_touched}/g, filesJoined).replace(/{worker_id}/g, "worker");
|
|
35789
35892
|
return JSON.stringify({
|
|
35790
35893
|
prompt,
|
|
35791
35894
|
bead_id: args.bead_id,
|
|
@@ -35793,7 +35896,50 @@ var swarm_spawn_subtask = tool({
|
|
|
35793
35896
|
files: args.files,
|
|
35794
35897
|
project_path: args.project_path,
|
|
35795
35898
|
recovery_context: args.recovery_context,
|
|
35796
|
-
recommended_model: selectedModel
|
|
35899
|
+
recommended_model: selectedModel,
|
|
35900
|
+
post_completion_instructions: postCompletionInstructions
|
|
35901
|
+
}, null, 2);
|
|
35902
|
+
}
|
|
35903
|
+
});
|
|
35904
|
+
var swarm_spawn_researcher = tool({
|
|
35905
|
+
description: "Prepare a research task for spawning. Returns prompt for gathering technology documentation. Researcher fetches docs and stores findings in semantic-memory.",
|
|
35906
|
+
args: {
|
|
35907
|
+
research_id: tool.schema.string().describe("Unique ID for this research task"),
|
|
35908
|
+
epic_id: tool.schema.string().describe("Parent epic ID"),
|
|
35909
|
+
tech_stack: tool.schema.array(tool.schema.string()).describe("Explicit list of technologies to research (from coordinator)"),
|
|
35910
|
+
project_path: tool.schema.string().describe("Absolute project path for swarmmail_init"),
|
|
35911
|
+
check_upgrades: tool.schema.boolean().optional().describe("If true, compare installed vs latest versions (default: false)")
|
|
35912
|
+
},
|
|
35913
|
+
async execute(args) {
|
|
35914
|
+
const prompt = formatResearcherPrompt({
|
|
35915
|
+
research_id: args.research_id,
|
|
35916
|
+
epic_id: args.epic_id,
|
|
35917
|
+
tech_stack: args.tech_stack,
|
|
35918
|
+
project_path: args.project_path,
|
|
35919
|
+
check_upgrades: args.check_upgrades ?? false
|
|
35920
|
+
});
|
|
35921
|
+
return JSON.stringify({
|
|
35922
|
+
prompt,
|
|
35923
|
+
research_id: args.research_id,
|
|
35924
|
+
epic_id: args.epic_id,
|
|
35925
|
+
tech_stack: args.tech_stack,
|
|
35926
|
+
project_path: args.project_path,
|
|
35927
|
+
check_upgrades: args.check_upgrades ?? false,
|
|
35928
|
+
subagent_type: "swarm/researcher",
|
|
35929
|
+
expected_output: {
|
|
35930
|
+
technologies: [
|
|
35931
|
+
{
|
|
35932
|
+
name: "string",
|
|
35933
|
+
installed_version: "string",
|
|
35934
|
+
latest_version: "string | null",
|
|
35935
|
+
key_patterns: ["string"],
|
|
35936
|
+
gotchas: ["string"],
|
|
35937
|
+
breaking_changes: ["string"],
|
|
35938
|
+
memory_id: "string"
|
|
35939
|
+
}
|
|
35940
|
+
],
|
|
35941
|
+
summary: "string"
|
|
35942
|
+
}
|
|
35797
35943
|
}, null, 2);
|
|
35798
35944
|
}
|
|
35799
35945
|
});
|
|
@@ -35908,6 +36054,7 @@ ${args.context}` : `## Additional Context
|
|
|
35908
36054
|
var promptTools = {
|
|
35909
36055
|
swarm_subtask_prompt,
|
|
35910
36056
|
swarm_spawn_subtask,
|
|
36057
|
+
swarm_spawn_researcher,
|
|
35911
36058
|
swarm_evaluation_prompt,
|
|
35912
36059
|
swarm_plan_prompt
|
|
35913
36060
|
};
|
|
@@ -37006,7 +37153,7 @@ var mandateTools = {
|
|
|
37006
37153
|
|
|
37007
37154
|
// src/memory-tools.ts
|
|
37008
37155
|
init_dist();
|
|
37009
|
-
import {
|
|
37156
|
+
import { getSwarmMailLibSQL as getSwarmMailLibSQL3 } from "swarm-mail";
|
|
37010
37157
|
|
|
37011
37158
|
// ../../node_modules/.bun/effect@3.19.12/node_modules/effect/dist/esm/Function.js
|
|
37012
37159
|
var isFunction = (input) => typeof input === "function";
|
|
@@ -50427,7 +50574,8 @@ import {
|
|
|
50427
50574
|
makeOllamaLive,
|
|
50428
50575
|
Ollama,
|
|
50429
50576
|
legacyDatabaseExists,
|
|
50430
|
-
migrateLegacyMemories
|
|
50577
|
+
migrateLegacyMemories,
|
|
50578
|
+
toSwarmDb
|
|
50431
50579
|
} from "swarm-mail";
|
|
50432
50580
|
var migrationChecked = false;
|
|
50433
50581
|
async function maybeAutoMigrate(db) {
|
|
@@ -50461,7 +50609,8 @@ async function createMemoryAdapter(db) {
|
|
|
50461
50609
|
migrationChecked = true;
|
|
50462
50610
|
await maybeAutoMigrate(db);
|
|
50463
50611
|
}
|
|
50464
|
-
const
|
|
50612
|
+
const drizzleDb = toSwarmDb(db);
|
|
50613
|
+
const store = createMemoryStore(drizzleDb);
|
|
50465
50614
|
const config2 = getDefaultConfig();
|
|
50466
50615
|
const ollamaLayer = makeOllamaLive(config2);
|
|
50467
50616
|
const generateId = () => {
|
|
@@ -50606,9 +50755,9 @@ async function getMemoryAdapter(projectPath) {
|
|
|
50606
50755
|
if (cachedAdapter && cachedProjectPath === path2) {
|
|
50607
50756
|
return cachedAdapter;
|
|
50608
50757
|
}
|
|
50609
|
-
const swarmMail = await
|
|
50610
|
-
const
|
|
50611
|
-
cachedAdapter = await createMemoryAdapter(
|
|
50758
|
+
const swarmMail = await getSwarmMailLibSQL3(path2);
|
|
50759
|
+
const dbAdapter = await swarmMail.getDatabase();
|
|
50760
|
+
cachedAdapter = await createMemoryAdapter(dbAdapter);
|
|
50612
50761
|
cachedProjectPath = path2;
|
|
50613
50762
|
return cachedAdapter;
|
|
50614
50763
|
}
|
|
@@ -50719,6 +50868,368 @@ var memoryTools = {
|
|
|
50719
50868
|
"semantic-memory_check": semantic_memory_check
|
|
50720
50869
|
};
|
|
50721
50870
|
|
|
50871
|
+
// src/observability-tools.ts
|
|
50872
|
+
init_dist();
|
|
50873
|
+
import {
|
|
50874
|
+
agentActivity,
|
|
50875
|
+
checkpointFrequency,
|
|
50876
|
+
failedDecompositions,
|
|
50877
|
+
getSwarmMailLibSQL as getSwarmMailLibSQL4,
|
|
50878
|
+
humanFeedback,
|
|
50879
|
+
lockContention,
|
|
50880
|
+
messageLatency,
|
|
50881
|
+
recoverySuccess,
|
|
50882
|
+
scopeViolations,
|
|
50883
|
+
strategySuccessRates,
|
|
50884
|
+
taskDuration
|
|
50885
|
+
} from "swarm-mail";
|
|
50886
|
+
function parseSince(since) {
|
|
50887
|
+
const now = Date.now();
|
|
50888
|
+
const match11 = since.match(/^(\d+)([dhm])$/);
|
|
50889
|
+
if (!match11) {
|
|
50890
|
+
throw new Error(`Invalid since format: ${since}. Use "7d", "24h", or "1h"`);
|
|
50891
|
+
}
|
|
50892
|
+
const [, value, unit] = match11;
|
|
50893
|
+
const num = Number.parseInt(value, 10);
|
|
50894
|
+
switch (unit) {
|
|
50895
|
+
case "d":
|
|
50896
|
+
return now - num * 24 * 60 * 60 * 1000;
|
|
50897
|
+
case "h":
|
|
50898
|
+
return now - num * 60 * 60 * 1000;
|
|
50899
|
+
case "m":
|
|
50900
|
+
return now - num * 60 * 1000;
|
|
50901
|
+
default:
|
|
50902
|
+
throw new Error(`Unknown unit: ${unit}`);
|
|
50903
|
+
}
|
|
50904
|
+
}
|
|
50905
|
+
async function executeQuery(swarmMail, query) {
|
|
50906
|
+
const db = await swarmMail.getDatabase();
|
|
50907
|
+
const result = await db.query(query.sql, Object.values(query.parameters || {}));
|
|
50908
|
+
return result.rows;
|
|
50909
|
+
}
|
|
50910
|
+
function formatSummary(queryType, results) {
|
|
50911
|
+
if (results.length === 0) {
|
|
50912
|
+
return `No ${queryType} data found.`;
|
|
50913
|
+
}
|
|
50914
|
+
const count = results.length;
|
|
50915
|
+
const preview = results.slice(0, 3);
|
|
50916
|
+
return `${queryType}: ${count} result(s). Top 3: ${JSON.stringify(preview, null, 2).slice(0, 400)}`;
|
|
50917
|
+
}
|
|
50918
|
+
function capResults(results) {
|
|
50919
|
+
return results.slice(0, 50);
|
|
50920
|
+
}
|
|
50921
|
+
var swarm_analytics = tool({
|
|
50922
|
+
description: "Query pre-built analytics for swarm coordination. Returns structured data about failed decompositions, strategy success rates, lock contention, agent activity, message latency, scope violations, task duration, checkpoint frequency, recovery success, and human feedback.",
|
|
50923
|
+
args: {
|
|
50924
|
+
query: tool.schema.enum([
|
|
50925
|
+
"failed-decompositions",
|
|
50926
|
+
"strategy-success-rates",
|
|
50927
|
+
"lock-contention",
|
|
50928
|
+
"agent-activity",
|
|
50929
|
+
"message-latency",
|
|
50930
|
+
"scope-violations",
|
|
50931
|
+
"task-duration",
|
|
50932
|
+
"checkpoint-frequency",
|
|
50933
|
+
"recovery-success",
|
|
50934
|
+
"human-feedback"
|
|
50935
|
+
]).describe("Type of analytics query to run"),
|
|
50936
|
+
since: tool.schema.string().optional().describe("Time filter: '7d', '24h', '1h' (optional)"),
|
|
50937
|
+
format: tool.schema.enum(["json", "summary"]).optional().describe("Output format: 'json' (default) or 'summary' (context-efficient)")
|
|
50938
|
+
},
|
|
50939
|
+
async execute(args2) {
|
|
50940
|
+
try {
|
|
50941
|
+
const projectPath = process.cwd();
|
|
50942
|
+
const db = await getSwarmMailLibSQL4(projectPath);
|
|
50943
|
+
const filters = {
|
|
50944
|
+
project_key: projectPath
|
|
50945
|
+
};
|
|
50946
|
+
if (args2.since) {
|
|
50947
|
+
filters.since = parseSince(args2.since);
|
|
50948
|
+
}
|
|
50949
|
+
let query;
|
|
50950
|
+
switch (args2.query) {
|
|
50951
|
+
case "failed-decompositions":
|
|
50952
|
+
query = failedDecompositions(filters);
|
|
50953
|
+
break;
|
|
50954
|
+
case "strategy-success-rates":
|
|
50955
|
+
query = strategySuccessRates(filters);
|
|
50956
|
+
break;
|
|
50957
|
+
case "lock-contention":
|
|
50958
|
+
query = lockContention(filters);
|
|
50959
|
+
break;
|
|
50960
|
+
case "agent-activity":
|
|
50961
|
+
query = agentActivity(filters);
|
|
50962
|
+
break;
|
|
50963
|
+
case "message-latency":
|
|
50964
|
+
query = messageLatency(filters);
|
|
50965
|
+
break;
|
|
50966
|
+
case "scope-violations":
|
|
50967
|
+
query = scopeViolations.buildQuery ? scopeViolations.buildQuery(filters) : scopeViolations;
|
|
50968
|
+
break;
|
|
50969
|
+
case "task-duration":
|
|
50970
|
+
query = taskDuration.buildQuery ? taskDuration.buildQuery(filters) : taskDuration;
|
|
50971
|
+
break;
|
|
50972
|
+
case "checkpoint-frequency":
|
|
50973
|
+
query = checkpointFrequency.buildQuery ? checkpointFrequency.buildQuery(filters) : checkpointFrequency;
|
|
50974
|
+
break;
|
|
50975
|
+
case "recovery-success":
|
|
50976
|
+
query = recoverySuccess.buildQuery ? recoverySuccess.buildQuery(filters) : recoverySuccess;
|
|
50977
|
+
break;
|
|
50978
|
+
case "human-feedback":
|
|
50979
|
+
query = humanFeedback.buildQuery ? humanFeedback.buildQuery(filters) : humanFeedback;
|
|
50980
|
+
break;
|
|
50981
|
+
default:
|
|
50982
|
+
return JSON.stringify({
|
|
50983
|
+
error: `Unknown query type: ${args2.query}`
|
|
50984
|
+
});
|
|
50985
|
+
}
|
|
50986
|
+
const results = await executeQuery(db, query);
|
|
50987
|
+
if (args2.format === "summary") {
|
|
50988
|
+
return formatSummary(args2.query, results);
|
|
50989
|
+
}
|
|
50990
|
+
return JSON.stringify({
|
|
50991
|
+
query: args2.query,
|
|
50992
|
+
filters,
|
|
50993
|
+
count: results.length,
|
|
50994
|
+
results
|
|
50995
|
+
}, null, 2);
|
|
50996
|
+
} catch (error45) {
|
|
50997
|
+
return JSON.stringify({
|
|
50998
|
+
error: error45 instanceof Error ? error45.message : String(error45)
|
|
50999
|
+
});
|
|
51000
|
+
}
|
|
51001
|
+
}
|
|
51002
|
+
});
|
|
51003
|
+
var swarm_query = tool({
|
|
51004
|
+
description: "Execute raw SQL queries against the swarm event store. Context-safe: results capped at 50 rows. Useful for custom analytics and debugging.",
|
|
51005
|
+
args: {
|
|
51006
|
+
sql: tool.schema.string().describe("SQL query to execute (SELECT only for safety)"),
|
|
51007
|
+
format: tool.schema.enum(["json", "table"]).optional().describe("Output format: 'json' (default) or 'table' (visual)")
|
|
51008
|
+
},
|
|
51009
|
+
async execute(args2) {
|
|
51010
|
+
try {
|
|
51011
|
+
const projectPath = process.cwd();
|
|
51012
|
+
const swarmMail = await getSwarmMailLibSQL4(projectPath);
|
|
51013
|
+
const db = await swarmMail.getDatabase();
|
|
51014
|
+
if (!args2.sql.trim().toLowerCase().startsWith("select")) {
|
|
51015
|
+
return JSON.stringify({
|
|
51016
|
+
error: "Only SELECT queries are allowed for safety"
|
|
51017
|
+
});
|
|
51018
|
+
}
|
|
51019
|
+
const result = await db.query(args2.sql, []);
|
|
51020
|
+
const rows = result.rows;
|
|
51021
|
+
const cappedRows = capResults(rows);
|
|
51022
|
+
if (args2.format === "table") {
|
|
51023
|
+
if (cappedRows.length === 0) {
|
|
51024
|
+
return "No results";
|
|
51025
|
+
}
|
|
51026
|
+
const headers = Object.keys(cappedRows[0]);
|
|
51027
|
+
const headerRow = headers.join(" | ");
|
|
51028
|
+
const separator = headers.map(() => "---").join(" | ");
|
|
51029
|
+
const dataRows = cappedRows.map((row) => headers.map((h) => row[h]).join(" | "));
|
|
51030
|
+
return [headerRow, separator, ...dataRows].join(`
|
|
51031
|
+
`);
|
|
51032
|
+
}
|
|
51033
|
+
return JSON.stringify({
|
|
51034
|
+
count: cappedRows.length,
|
|
51035
|
+
total: rows.length,
|
|
51036
|
+
capped: rows.length > 50,
|
|
51037
|
+
results: cappedRows
|
|
51038
|
+
}, null, 2);
|
|
51039
|
+
} catch (error45) {
|
|
51040
|
+
return JSON.stringify({
|
|
51041
|
+
error: error45 instanceof Error ? error45.message : String(error45)
|
|
51042
|
+
});
|
|
51043
|
+
}
|
|
51044
|
+
}
|
|
51045
|
+
});
|
|
51046
|
+
var swarm_diagnose = tool({
|
|
51047
|
+
description: "Auto-diagnose issues for a specific epic or task. Returns structured diagnosis with blockers, conflicts, slow tasks, errors, and timeline.",
|
|
51048
|
+
args: {
|
|
51049
|
+
epic_id: tool.schema.string().optional().describe("Epic ID to diagnose"),
|
|
51050
|
+
bead_id: tool.schema.string().optional().describe("Task ID to diagnose"),
|
|
51051
|
+
include: tool.schema.array(tool.schema.enum([
|
|
51052
|
+
"blockers",
|
|
51053
|
+
"conflicts",
|
|
51054
|
+
"slow_tasks",
|
|
51055
|
+
"errors",
|
|
51056
|
+
"timeline"
|
|
51057
|
+
])).optional().describe("What to include in diagnosis (default: all)")
|
|
51058
|
+
},
|
|
51059
|
+
async execute(args2) {
|
|
51060
|
+
try {
|
|
51061
|
+
const projectPath = process.cwd();
|
|
51062
|
+
const swarmMail = await getSwarmMailLibSQL4(projectPath);
|
|
51063
|
+
const db = await swarmMail.getDatabase();
|
|
51064
|
+
const diagnosis = [];
|
|
51065
|
+
const include = args2.include || [
|
|
51066
|
+
"blockers",
|
|
51067
|
+
"conflicts",
|
|
51068
|
+
"slow_tasks",
|
|
51069
|
+
"errors",
|
|
51070
|
+
"timeline"
|
|
51071
|
+
];
|
|
51072
|
+
if (include.includes("blockers")) {
|
|
51073
|
+
const blockerQuery = `
|
|
51074
|
+
SELECT json_extract(data, '$.agent_name') as agent,
|
|
51075
|
+
json_extract(data, '$.bead_id') as bead_id,
|
|
51076
|
+
timestamp
|
|
51077
|
+
FROM events
|
|
51078
|
+
WHERE type = 'task_blocked'
|
|
51079
|
+
${args2.epic_id ? "AND json_extract(data, '$.epic_id') = ?" : ""}
|
|
51080
|
+
${args2.bead_id ? "AND json_extract(data, '$.bead_id') = ?" : ""}
|
|
51081
|
+
ORDER BY timestamp DESC
|
|
51082
|
+
LIMIT 10
|
|
51083
|
+
`;
|
|
51084
|
+
const params = [];
|
|
51085
|
+
if (args2.epic_id)
|
|
51086
|
+
params.push(args2.epic_id);
|
|
51087
|
+
if (args2.bead_id)
|
|
51088
|
+
params.push(args2.bead_id);
|
|
51089
|
+
const blockers = await db.query(blockerQuery, params);
|
|
51090
|
+
if (blockers.rows.length > 0) {
|
|
51091
|
+
diagnosis.push({
|
|
51092
|
+
type: "blockers",
|
|
51093
|
+
message: `Found ${blockers.rows.length} blocked task(s)`,
|
|
51094
|
+
severity: "high"
|
|
51095
|
+
});
|
|
51096
|
+
}
|
|
51097
|
+
}
|
|
51098
|
+
if (include.includes("errors")) {
|
|
51099
|
+
const errorQuery = `
|
|
51100
|
+
SELECT type, json_extract(data, '$.error_count') as error_count
|
|
51101
|
+
FROM events
|
|
51102
|
+
WHERE type = 'subtask_outcome'
|
|
51103
|
+
AND json_extract(data, '$.success') = 'false'
|
|
51104
|
+
${args2.epic_id ? "AND json_extract(data, '$.epic_id') = ?" : ""}
|
|
51105
|
+
${args2.bead_id ? "AND json_extract(data, '$.bead_id') = ?" : ""}
|
|
51106
|
+
LIMIT 10
|
|
51107
|
+
`;
|
|
51108
|
+
const params = [];
|
|
51109
|
+
if (args2.epic_id)
|
|
51110
|
+
params.push(args2.epic_id);
|
|
51111
|
+
if (args2.bead_id)
|
|
51112
|
+
params.push(args2.bead_id);
|
|
51113
|
+
const errors3 = await db.query(errorQuery, params);
|
|
51114
|
+
if (errors3.rows.length > 0) {
|
|
51115
|
+
diagnosis.push({
|
|
51116
|
+
type: "errors",
|
|
51117
|
+
message: `Found ${errors3.rows.length} failed task(s)`,
|
|
51118
|
+
severity: "high"
|
|
51119
|
+
});
|
|
51120
|
+
}
|
|
51121
|
+
}
|
|
51122
|
+
let timeline = [];
|
|
51123
|
+
if (include.includes("timeline")) {
|
|
51124
|
+
const timelineQuery = `
|
|
51125
|
+
SELECT timestamp, type, json_extract(data, '$.agent_name') as agent
|
|
51126
|
+
FROM events
|
|
51127
|
+
${args2.epic_id ? "WHERE json_extract(data, '$.epic_id') = ?" : ""}
|
|
51128
|
+
${args2.bead_id ? (args2.epic_id ? "AND" : "WHERE") + " json_extract(data, '$.bead_id') = ?" : ""}
|
|
51129
|
+
ORDER BY timestamp DESC
|
|
51130
|
+
LIMIT 20
|
|
51131
|
+
`;
|
|
51132
|
+
const params = [];
|
|
51133
|
+
if (args2.epic_id)
|
|
51134
|
+
params.push(args2.epic_id);
|
|
51135
|
+
if (args2.bead_id)
|
|
51136
|
+
params.push(args2.bead_id);
|
|
51137
|
+
const events = await db.query(timelineQuery, params);
|
|
51138
|
+
timeline = events.rows;
|
|
51139
|
+
}
|
|
51140
|
+
return JSON.stringify({
|
|
51141
|
+
epic_id: args2.epic_id,
|
|
51142
|
+
bead_id: args2.bead_id,
|
|
51143
|
+
diagnosis,
|
|
51144
|
+
timeline: include.includes("timeline") ? timeline : undefined
|
|
51145
|
+
}, null, 2);
|
|
51146
|
+
} catch (error45) {
|
|
51147
|
+
return JSON.stringify({
|
|
51148
|
+
error: error45 instanceof Error ? error45.message : String(error45)
|
|
51149
|
+
});
|
|
51150
|
+
}
|
|
51151
|
+
}
|
|
51152
|
+
});
|
|
51153
|
+
var swarm_insights = tool({
|
|
51154
|
+
description: "Generate learning insights from swarm coordination metrics. Analyzes success rates, duration, conflicts, and retries to provide actionable recommendations.",
|
|
51155
|
+
args: {
|
|
51156
|
+
scope: tool.schema.enum(["epic", "project", "recent"]).describe("Scope of analysis: 'epic', 'project', or 'recent'"),
|
|
51157
|
+
epic_id: tool.schema.string().optional().describe("Epic ID (required if scope='epic')"),
|
|
51158
|
+
metrics: tool.schema.array(tool.schema.enum([
|
|
51159
|
+
"success_rate",
|
|
51160
|
+
"avg_duration",
|
|
51161
|
+
"conflict_rate",
|
|
51162
|
+
"retry_rate"
|
|
51163
|
+
])).describe("Metrics to analyze")
|
|
51164
|
+
},
|
|
51165
|
+
async execute(args2) {
|
|
51166
|
+
try {
|
|
51167
|
+
if (args2.scope === "epic" && !args2.epic_id) {
|
|
51168
|
+
return JSON.stringify({
|
|
51169
|
+
error: "epic_id is required when scope='epic'"
|
|
51170
|
+
});
|
|
51171
|
+
}
|
|
51172
|
+
const projectPath = process.cwd();
|
|
51173
|
+
const swarmMail = await getSwarmMailLibSQL4(projectPath);
|
|
51174
|
+
const db = await swarmMail.getDatabase();
|
|
51175
|
+
const insights = [];
|
|
51176
|
+
if (args2.metrics.includes("success_rate")) {
|
|
51177
|
+
const query = `
|
|
51178
|
+
SELECT
|
|
51179
|
+
SUM(CASE WHEN json_extract(data, '$.success') = 'true' THEN 1 ELSE 0 END) as successes,
|
|
51180
|
+
COUNT(*) as total
|
|
51181
|
+
FROM events
|
|
51182
|
+
WHERE type = 'subtask_outcome'
|
|
51183
|
+
${args2.epic_id ? "AND json_extract(data, '$.epic_id') = ?" : ""}
|
|
51184
|
+
`;
|
|
51185
|
+
const result = await db.query(query, args2.epic_id ? [args2.epic_id] : []);
|
|
51186
|
+
const row = result.rows[0];
|
|
51187
|
+
if (row && row.total > 0) {
|
|
51188
|
+
const rate = row.successes / row.total * 100;
|
|
51189
|
+
insights.push({
|
|
51190
|
+
metric: "success_rate",
|
|
51191
|
+
value: `${rate.toFixed(1)}%`,
|
|
51192
|
+
insight: rate < 50 ? "Low success rate - review decomposition strategy" : rate < 80 ? "Moderate success rate - monitor for patterns" : "Good success rate - maintain current approach"
|
|
51193
|
+
});
|
|
51194
|
+
}
|
|
51195
|
+
}
|
|
51196
|
+
if (args2.metrics.includes("avg_duration")) {
|
|
51197
|
+
const query = `
|
|
51198
|
+
SELECT AVG(CAST(json_extract(data, '$.duration_ms') AS REAL)) as avg_duration
|
|
51199
|
+
FROM events
|
|
51200
|
+
WHERE type = 'subtask_outcome'
|
|
51201
|
+
${args2.epic_id ? "AND json_extract(data, '$.epic_id') = ?" : ""}
|
|
51202
|
+
`;
|
|
51203
|
+
const result = await db.query(query, args2.epic_id ? [args2.epic_id] : []);
|
|
51204
|
+
const row = result.rows[0];
|
|
51205
|
+
if (row?.avg_duration) {
|
|
51206
|
+
const avgMinutes = (row.avg_duration / 60000).toFixed(1);
|
|
51207
|
+
insights.push({
|
|
51208
|
+
metric: "avg_duration",
|
|
51209
|
+
value: `${avgMinutes} min`,
|
|
51210
|
+
insight: row.avg_duration > 600000 ? "Tasks taking >10min - consider smaller decomposition" : "Task duration is reasonable"
|
|
51211
|
+
});
|
|
51212
|
+
}
|
|
51213
|
+
}
|
|
51214
|
+
return JSON.stringify({
|
|
51215
|
+
scope: args2.scope,
|
|
51216
|
+
epic_id: args2.epic_id,
|
|
51217
|
+
insights
|
|
51218
|
+
}, null, 2);
|
|
51219
|
+
} catch (error45) {
|
|
51220
|
+
return JSON.stringify({
|
|
51221
|
+
error: error45 instanceof Error ? error45.message : String(error45)
|
|
51222
|
+
});
|
|
51223
|
+
}
|
|
51224
|
+
}
|
|
51225
|
+
});
|
|
51226
|
+
var observabilityTools = {
|
|
51227
|
+
swarm_analytics,
|
|
51228
|
+
swarm_query,
|
|
51229
|
+
swarm_diagnose,
|
|
51230
|
+
swarm_insights
|
|
51231
|
+
};
|
|
51232
|
+
|
|
50722
51233
|
// src/output-guardrails.ts
|
|
50723
51234
|
var DEFAULT_GUARDRAIL_CONFIG = {
|
|
50724
51235
|
defaultMaxChars: 32000,
|
|
@@ -51472,9 +51983,11 @@ Extract from session context:
|
|
|
51472
51983
|
|
|
51473
51984
|
1. \`swarm_status(epic_id="<epic>", project_key="<path>")\` - Get current state
|
|
51474
51985
|
2. \`swarmmail_inbox(limit=5)\` - Check for agent messages
|
|
51475
|
-
3.
|
|
51476
|
-
4.
|
|
51477
|
-
5. **
|
|
51986
|
+
3. \`swarm_review(project_key, epic_id, task_id, files_touched)\` - Review any completed work
|
|
51987
|
+
4. \`swarm_review_feedback(project_key, task_id, worker_id, status, issues)\` - Approve or request changes
|
|
51988
|
+
5. **Spawn ready subtasks** - Don't wait, fire them off
|
|
51989
|
+
6. **Unblock blocked work** - Resolve dependencies, reassign if needed
|
|
51990
|
+
7. **Collect completed work** - Close done subtasks, verify quality
|
|
51478
51991
|
|
|
51479
51992
|
### Keep the Swarm Cooking
|
|
51480
51993
|
|
|
@@ -51658,7 +52171,8 @@ var SwarmPlugin = async (input) => {
|
|
|
51658
52171
|
...repoCrawlTools,
|
|
51659
52172
|
...skillsTools,
|
|
51660
52173
|
...mandateTools,
|
|
51661
|
-
...memoryTools
|
|
52174
|
+
...memoryTools,
|
|
52175
|
+
...observabilityTools
|
|
51662
52176
|
},
|
|
51663
52177
|
event: async ({ event }) => {
|
|
51664
52178
|
if (event.type === "session.idle") {
|