opencode-swarm-plugin 0.30.5 → 0.30.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/plugin.js CHANGED
@@ -27051,12 +27051,39 @@ echo "Project directory: $1"
27051
27051
  };
27052
27052
  });
27053
27053
 
27054
+ // src/model-selection.ts
27055
+ var exports_model_selection = {};
27056
+ __export(exports_model_selection, {
27057
+ selectWorkerModel: () => selectWorkerModel
27058
+ });
27059
+ function selectWorkerModel(subtask, config2) {
27060
+ if (subtask.model) {
27061
+ return subtask.model;
27062
+ }
27063
+ const files = subtask.files || [];
27064
+ if (files.length > 0) {
27065
+ const allDocs = files.every((f) => {
27066
+ const lower = f.toLowerCase();
27067
+ return lower.endsWith(".md") || lower.endsWith(".mdx");
27068
+ });
27069
+ const allTests = files.every((f) => {
27070
+ const lower = f.toLowerCase();
27071
+ return lower.includes(".test.") || lower.includes(".spec.");
27072
+ });
27073
+ if (allDocs || allTests) {
27074
+ return config2.liteModel || config2.primaryModel || "anthropic/claude-haiku-4-5";
27075
+ }
27076
+ }
27077
+ return config2.primaryModel || "anthropic/claude-haiku-4-5";
27078
+ }
27079
+
27054
27080
  // src/hive.ts
27055
27081
  init_dist();
27056
27082
  import {
27057
27083
  createHiveAdapter,
27058
27084
  FlushManager,
27059
27085
  importFromJSONL,
27086
+ syncMemories,
27060
27087
  getSwarmMail
27061
27088
  } from "swarm-mail";
27062
27089
  import { existsSync, readFileSync } from "node:fs";
@@ -27238,7 +27265,8 @@ var DecomposedSubtaskSchema = exports_external.object({
27238
27265
  description: exports_external.string(),
27239
27266
  files: exports_external.array(exports_external.string()),
27240
27267
  estimated_effort: EffortLevelSchema,
27241
- risks: exports_external.array(exports_external.string()).optional().default([])
27268
+ risks: exports_external.array(exports_external.string()).optional().default([]),
27269
+ model: exports_external.string().optional()
27242
27270
  });
27243
27271
  var SubtaskDependencySchema = exports_external.object({
27244
27272
  from: exports_external.number().int().min(0),
@@ -28003,8 +28031,18 @@ var hive_sync = tool({
28003
28031
  outputPath: `${projectKey}/.hive/issues.jsonl`
28004
28032
  });
28005
28033
  const flushResult = await withTimeout(flushManager.flush(), TIMEOUT_MS, "flush hive");
28006
- if (flushResult.cellsExported === 0) {
28007
- return "No cells to sync";
28034
+ const swarmMail = await getSwarmMail(projectKey);
28035
+ const db = await swarmMail.getDatabase();
28036
+ const hivePath = join(projectKey, ".hive");
28037
+ let memoriesSynced = 0;
28038
+ try {
28039
+ const memoryResult = await syncMemories(db, hivePath);
28040
+ memoriesSynced = memoryResult.exported;
28041
+ } catch (err) {
28042
+ console.warn("[hive_sync] Memory sync warning:", err);
28043
+ }
28044
+ if (flushResult.cellsExported === 0 && memoriesSynced === 0) {
28045
+ return "No cells or memories to sync";
28008
28046
  }
28009
28047
  const hiveStatusResult = await runGitCommand([
28010
28048
  "status",
@@ -34830,18 +34868,36 @@ swarmmail_init(project_path="{project_path}", task_description="{bead_id}: {subt
34830
34868
 
34831
34869
  **If you skip this step, your work will not be tracked and swarm_complete will fail.**
34832
34870
 
34833
- ### Step 2: Query Past Learnings (BEFORE starting work)
34871
+ ### Step 2: \uD83E\uDDE0 Query Past Learnings (MANDATORY - BEFORE starting work)
34872
+
34873
+ **⚠️ CRITICAL: ALWAYS query semantic memory BEFORE writing ANY code.**
34874
+
34834
34875
  \`\`\`
34835
- semantic-memory_find(query="<keywords from your task>", limit=5)
34876
+ semantic-memory_find(query="<keywords from your task>", limit=5, expand=true)
34836
34877
  \`\`\`
34837
34878
 
34838
- **Check if past agents solved similar problems.** Search for:
34839
- - Error messages if debugging
34840
- - Domain concepts (e.g., "authentication", "caching")
34841
- - Technology stack (e.g., "Next.js", "React")
34842
- - Patterns (e.g., "event sourcing", "validation")
34879
+ **Why this is MANDATORY:**
34880
+ - Past agents may have already solved your exact problem
34881
+ - Avoids repeating mistakes that wasted 30+ minutes before
34882
+ - Discovers project-specific patterns and gotchas
34883
+ - Finds known workarounds for tool/library quirks
34884
+
34885
+ **Search Query Examples by Task Type:**
34843
34886
 
34844
- **Past learnings save time and prevent repeating mistakes.**
34887
+ - **Bug fix**: Use exact error message or "<symptom> <component>"
34888
+ - **New feature**: Search "<domain concept> implementation pattern"
34889
+ - **Refactor**: Query "<pattern name> migration approach"
34890
+ - **Integration**: Look for "<library name> gotchas configuration"
34891
+ - **Testing**: Find "testing <component type> characterization tests"
34892
+ - **Performance**: Search "<technology> performance optimization"
34893
+
34894
+ **BEFORE you start coding:**
34895
+ 1. Run semantic-memory_find with keywords from your task
34896
+ 2. Read the results with expand=true for full content
34897
+ 3. Check if any memory solves your problem or warns of pitfalls
34898
+ 4. Adjust your approach based on past learnings
34899
+
34900
+ **If you skip this step, you WILL waste time solving already-solved problems.**
34845
34901
 
34846
34902
  ### Step 3: Load Relevant Skills (if available)
34847
34903
  \`\`\`
@@ -34927,21 +34983,44 @@ swarm_checkpoint(
34927
34983
 
34928
34984
  **Checkpoints preserve context so you can recover if things go wrong.**
34929
34985
 
34930
- ### Step 8: Store Learnings (if you discovered something)
34986
+ ### Step 8: \uD83D\uDCBE STORE YOUR LEARNINGS (if you discovered something)
34987
+
34988
+ **If you learned it the hard way, STORE IT so the next agent doesn't have to.**
34989
+
34931
34990
  \`\`\`
34932
34991
  semantic-memory_store(
34933
34992
  information="<what you learned, WHY it matters, how to apply it>",
34934
- metadata="<tags: domain, tech-stack, pattern-type>"
34993
+ tags="<domain, tech-stack, pattern-type>"
34935
34994
  )
34936
34995
  \`\`\`
34937
34996
 
34938
- **Store:**
34939
- - Tricky bugs you solved (root cause + solution)
34940
- - Project-specific patterns or domain rules
34941
- - Tool/library gotchas and workarounds
34942
- - Failed approaches (anti-patterns to avoid)
34997
+ **MANDATORY Storage Triggers - Store when you:**
34998
+ - \uD83D\uDC1B **Solved a tricky bug** (>15min debugging) - include root cause + solution
34999
+ - \uD83D\uDCA1 **Discovered a project-specific pattern** - domain rules, business logic quirks
35000
+ - ⚠️ **Found a tool/library gotcha** - API quirks, version-specific bugs, workarounds
35001
+ - \uD83D\uDEAB **Tried an approach that failed** - anti-patterns to avoid, why it didn't work
35002
+ - \uD83C\uDFD7️ **Made an architectural decision** - reasoning, alternatives considered, tradeoffs
34943
35003
 
34944
- **Don't store generic knowledge.** Store the WHY, not just the WHAT.
35004
+ **What Makes a GOOD Memory:**
35005
+
35006
+ ✅ **GOOD** (actionable, explains WHY):
35007
+ \`\`\`
35008
+ "OAuth refresh tokens need 5min buffer before expiry to avoid race conditions.
35009
+ Without buffer, token refresh can fail mid-request if expiry happens between
35010
+ check and use. Implemented with: if (expiresAt - Date.now() < 300000) refresh()"
35011
+ \`\`\`
35012
+
35013
+ ❌ **BAD** (generic, no context):
35014
+ \`\`\`
35015
+ "Fixed the auth bug by adding a null check"
35016
+ \`\`\`
35017
+
35018
+ **What NOT to Store:**
35019
+ - Generic knowledge that's in official documentation
35020
+ - Implementation details that change frequently
35021
+ - Vague descriptions without context ("fixed the thing")
35022
+
35023
+ **The WHY matters more than the WHAT.** Future agents need context to apply your learning.
34945
35024
 
34946
35025
  ### Step 9: Complete (REQUIRED - releases reservations)
34947
35026
  \`\`\`
@@ -35032,17 +35111,20 @@ Other cell operations:
35032
35111
 
35033
35112
  **NON-NEGOTIABLE:**
35034
35113
  1. Step 1 (swarmmail_init) MUST be first - do it before anything else
35035
- 2. Step 2 (semantic-memory_find) MUST happen before starting work
35114
+ 2. \uD83E\uDDE0 Step 2 (semantic-memory_find) MUST happen BEFORE starting work - query first, code second
35036
35115
  3. Step 4 (swarmmail_reserve) - YOU reserve files, not coordinator
35037
35116
  4. Step 6 (swarm_progress) - Report at milestones, don't work silently
35038
- 5. Step 9 (swarm_complete) - Use this to close, NOT hive_close
35117
+ 5. \uD83D\uDCBE Step 8 (semantic-memory_store) - If you learned something hard, STORE IT
35118
+ 6. Step 9 (swarm_complete) - Use this to close, NOT hive_close
35039
35119
 
35040
35120
  **If you skip these steps:**
35041
35121
  - Your work won't be tracked (swarm_complete will fail)
35042
- - You'll waste time repeating solved problems (no semantic memory query)
35122
+ - \uD83D\uDD04 You'll waste time repeating already-solved problems (no semantic memory query)
35043
35123
  - Edit conflicts with other agents (no file reservation)
35044
35124
  - Lost work if you crash (no checkpoints)
35045
- - Future agents repeat your mistakes (no learnings stored)
35125
+ - \uD83D\uDD04 Future agents repeat YOUR mistakes (no learnings stored)
35126
+
35127
+ **Memory is the swarm's collective intelligence. Query it. Feed it.**
35046
35128
 
35047
35129
  Begin now.`;
35048
35130
  var EVALUATION_PROMPT = `Evaluate the work completed for this subtask.
@@ -35175,7 +35257,7 @@ var swarm_subtask_prompt = tool({
35175
35257
  }
35176
35258
  });
35177
35259
  var swarm_spawn_subtask = tool({
35178
- description: "Prepare a subtask for spawning. Returns prompt with Agent Mail/hive tracking instructions. IMPORTANT: Pass project_path for swarmmail_init.",
35260
+ description: "Prepare a subtask for spawning. Returns prompt with Agent Mail/hive tracking instructions. IMPORTANT: Pass project_path for swarmmail_init. Automatically selects appropriate model based on file types.",
35179
35261
  args: {
35180
35262
  bead_id: tool.schema.string().describe("Subtask bead ID"),
35181
35263
  epic_id: tool.schema.string().describe("Parent epic bead ID"),
@@ -35188,7 +35270,8 @@ var swarm_spawn_subtask = tool({
35188
35270
  shared_context: tool.schema.string().optional(),
35189
35271
  skills_to_load: tool.schema.array(tool.schema.string()).optional(),
35190
35272
  coordinator_notes: tool.schema.string().optional()
35191
- }).optional().describe("Recovery context from checkpoint compaction")
35273
+ }).optional().describe("Recovery context from checkpoint compaction"),
35274
+ model: tool.schema.string().optional().describe("Optional explicit model override (auto-selected if not provided)")
35192
35275
  },
35193
35276
  async execute(args) {
35194
35277
  const prompt = formatSubtaskPromptV2({
@@ -35201,13 +35284,28 @@ var swarm_spawn_subtask = tool({
35201
35284
  project_path: args.project_path,
35202
35285
  recovery_context: args.recovery_context
35203
35286
  });
35287
+ const { selectWorkerModel: selectWorkerModel2 } = await Promise.resolve().then(() => exports_model_selection);
35288
+ const subtask = {
35289
+ title: args.subtask_title,
35290
+ description: args.subtask_description || "",
35291
+ files: args.files,
35292
+ estimated_effort: "medium",
35293
+ risks: [],
35294
+ model: args.model
35295
+ };
35296
+ const config2 = {
35297
+ primaryModel: "anthropic/claude-sonnet-4-5",
35298
+ liteModel: "anthropic/claude-haiku-4-5"
35299
+ };
35300
+ const selectedModel = selectWorkerModel2(subtask, config2);
35204
35301
  return JSON.stringify({
35205
35302
  prompt,
35206
35303
  bead_id: args.bead_id,
35207
35304
  epic_id: args.epic_id,
35208
35305
  files: args.files,
35209
35306
  project_path: args.project_path,
35210
- recovery_context: args.recovery_context
35307
+ recovery_context: args.recovery_context,
35308
+ recommended_model: selectedModel
35211
35309
  }, null, 2);
35212
35310
  }
35213
35311
  });
@@ -49863,12 +49961,18 @@ async function createMemoryAdapter(db) {
49863
49961
  if (tags.length > 0) {
49864
49962
  metadata.tags = tags;
49865
49963
  }
49964
+ const clampConfidence = (c) => {
49965
+ if (c === undefined)
49966
+ return 0.7;
49967
+ return Math.max(0, Math.min(1, c));
49968
+ };
49866
49969
  const memory = {
49867
49970
  id,
49868
49971
  content: args2.information,
49869
49972
  metadata,
49870
49973
  collection,
49871
- createdAt: new Date
49974
+ createdAt: new Date,
49975
+ confidence: clampConfidence(args2.confidence)
49872
49976
  };
49873
49977
  const program = exports_Effect.gen(function* () {
49874
49978
  const ollama = yield* Ollama;
@@ -49975,12 +50079,13 @@ async function getMemoryAdapter(projectPath) {
49975
50079
  return cachedAdapter;
49976
50080
  }
49977
50081
  var semantic_memory_store = tool({
49978
- description: "Store a memory with semantic embedding. Memories are searchable by semantic similarity and can be organized into collections.",
50082
+ description: "Store a memory with semantic embedding. Memories are searchable by semantic similarity and can be organized into collections. Confidence affects decay rate: high confidence (1.0) = 135 day half-life, low confidence (0.0) = 45 day half-life.",
49979
50083
  args: {
49980
50084
  information: tool.schema.string().describe("The information to store (required)"),
49981
50085
  collection: tool.schema.string().optional().describe("Collection name (defaults to 'default')"),
49982
50086
  tags: tool.schema.string().optional().describe("Comma-separated tags (e.g., 'auth,tokens,oauth')"),
49983
- metadata: tool.schema.string().optional().describe("JSON string with additional metadata")
50087
+ metadata: tool.schema.string().optional().describe("JSON string with additional metadata"),
50088
+ confidence: tool.schema.number().optional().describe("Confidence level (0.0-1.0) affecting decay rate. Higher = slower decay. Default 0.7")
49984
50089
  },
49985
50090
  async execute(args2, ctx) {
49986
50091
  const adapter = await getMemoryAdapter();
@@ -44,6 +44,7 @@ export declare const DecomposedSubtaskSchema: z.ZodObject<{
44
44
  large: "large";
45
45
  }>;
46
46
  risks: z.ZodDefault<z.ZodOptional<z.ZodArray<z.ZodString>>>;
47
+ model: z.ZodOptional<z.ZodString>;
47
48
  }, z.core.$strip>;
48
49
  export type DecomposedSubtask = z.infer<typeof DecomposedSubtaskSchema>;
49
50
  /**
@@ -78,6 +79,7 @@ export declare const TaskDecompositionSchema: z.ZodObject<{
78
79
  large: "large";
79
80
  }>;
80
81
  risks: z.ZodDefault<z.ZodOptional<z.ZodArray<z.ZodString>>>;
82
+ model: z.ZodOptional<z.ZodString>;
81
83
  }, z.core.$strip>>;
82
84
  dependencies: z.ZodDefault<z.ZodOptional<z.ZodArray<z.ZodObject<{
83
85
  from: z.ZodNumber;
@@ -1 +1 @@
1
- {"version":3,"file":"task.d.ts","sourceRoot":"","sources":["../../src/schemas/task.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AACH,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAExB;;;;;;;;GAQG;AACH,eAAO,MAAM,iBAAiB;;;;;EAK5B,CAAC;AACH,MAAM,MAAM,WAAW,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,iBAAiB,CAAC,CAAC;AAE5D;;GAEG;AACH,eAAO,MAAM,oBAAoB;;;;EAI/B,CAAC;AACH,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAElE;;GAEG;AACH,eAAO,MAAM,uBAAuB;;;;;;;;;;;iBAOlC,CAAC;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE;;GAEG;AACH,eAAO,MAAM,uBAAuB;;;;;;;;iBAMlC,CAAC;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE;;;;GAIG;AACH,eAAO,MAAM,uBAAuB;;;;;;;;;;;;;;;;;;;;;;;;;iBAWlC,CAAC;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE;;GAEG;AACH,eAAO,MAAM,mBAAmB;;;;iBAI9B,CAAC;AACH,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE;;GAEG;AACH,eAAO,MAAM,kBAAkB;;;;;;;;;;;;iBAe7B,CAAC;AACH,MAAM,MAAM,YAAY,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CAAC;AAE9D;;GAEG;AACH,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;iBAMjC,CAAC;AACH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE;;GAEG;AACH,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;iBAe7B,CAAC;AACJ,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE;;GAEG;AACH,eAAO,MAAM,iBAAiB;;;;;;;;;;;;;;;;;;;;;iBAS5B,CAAC;AACH,MAAM,MAAM,WAAW,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,iBAAiB,CAAC,CAAC"}
1
+ {"version":3,"file":"task.d.ts","sourceRoot":"","sources":["../../src/schemas/task.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AACH,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAExB;;;;;;;;GAQG;AACH,eAAO,MAAM,iBAAiB;;;;;EAK5B,CAAC;AACH,MAAM,MAAM,WAAW,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,iBAAiB,CAAC,CAAC;AAE5D;;GAEG;AACH,eAAO,MAAM,oBAAoB;;;;EAI/B,CAAC;AACH,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAElE;;GAEG;AACH,eAAO,MAAM,uBAAuB;;;;;;;;;;;;iBAYlC,CAAC;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE;;GAEG;AACH,eAAO,MAAM,uBAAuB;;;;;;;;iBAMlC,CAAC;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE;;;;GAIG;AACH,eAAO,MAAM,uBAAuB;;;;;;;;;;;;;;;;;;;;;;;;;;iBAWlC,CAAC;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE;;GAEG;AACH,eAAO,MAAM,mBAAmB;;;;iBAI9B,CAAC;AACH,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE;;GAEG;AACH,eAAO,MAAM,kBAAkB;;;;;;;;;;;;iBAe7B,CAAC;AACH,MAAM,MAAM,YAAY,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CAAC;AAE9D;;GAEG;AACH,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;iBAMjC,CAAC;AACH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE;;GAEG;AACH,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;iBAe7B,CAAC;AACJ,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE;;GAEG;AACH,eAAO,MAAM,iBAAiB;;;;;;;;;;;;;;;;;;;;;iBAS5B,CAAC;AACH,MAAM,MAAM,WAAW,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,iBAAiB,CAAC,CAAC"}
@@ -37,7 +37,7 @@ export declare const SUBTASK_PROMPT = "You are a swarm agent working on a subtas
37
37
  *
38
38
  * Supports {error_context} placeholder for retry prompts.
39
39
  */
40
- export declare const SUBTASK_PROMPT_V2 = "You are a swarm agent working on: **{subtask_title}**\n\n## [IDENTITY]\nAgent: (assigned at spawn)\nCell: {bead_id}\nEpic: {epic_id}\n\n## [TASK]\n{subtask_description}\n\n## [FILES]\nReserved (exclusive):\n{file_list}\n\nOnly modify these files. Need others? Message the coordinator.\n\n## [CONTEXT]\n{shared_context}\n\n{compressed_context}\n\n{error_context}\n\n## [MANDATORY SURVIVAL CHECKLIST]\n\n**CRITICAL: Follow this checklist IN ORDER. Each step builds on the previous.**\n\n### Step 1: Initialize Coordination (REQUIRED - DO THIS FIRST)\n```\nswarmmail_init(project_path=\"{project_path}\", task_description=\"{bead_id}: {subtask_title}\")\n```\n\n**This registers you with the coordination system and enables:**\n- File reservation tracking\n- Inter-agent communication\n- Progress monitoring\n- Conflict detection\n\n**If you skip this step, your work will not be tracked and swarm_complete will fail.**\n\n### Step 2: Query Past Learnings (BEFORE starting work)\n```\nsemantic-memory_find(query=\"<keywords from your task>\", limit=5)\n```\n\n**Check if past agents solved similar problems.** Search for:\n- Error messages if debugging\n- Domain concepts (e.g., \"authentication\", \"caching\")\n- Technology stack (e.g., \"Next.js\", \"React\")\n- Patterns (e.g., \"event sourcing\", \"validation\")\n\n**Past learnings save time and prevent repeating mistakes.**\n\n### Step 3: Load Relevant Skills (if available)\n```\nskills_list() # See what skills exist\nskills_use(name=\"<relevant-skill>\", context=\"<your task>\") # Load skill\n```\n\n**Common skill triggers:**\n- Writing tests? \u2192 `skills_use(name=\"testing-patterns\")`\n- Breaking dependencies? \u2192 `skills_use(name=\"testing-patterns\")`\n- Multi-agent coordination? \u2192 `skills_use(name=\"swarm-coordination\")`\n- Building a CLI? \u2192 `skills_use(name=\"cli-builder\")`\n\n### Step 4: Reserve Your Files (YOU reserve, not coordinator)\n```\nswarmmail_reserve(\n paths=[{file_list}],\n reason=\"{bead_id}: {subtask_title}\",\n exclusive=true\n)\n```\n\n**Workers reserve their own files.** This prevents edit conflicts with other agents.\n\n### Step 5: Do the Work (TDD MANDATORY)\n\n**Follow RED \u2192 GREEN \u2192 REFACTOR. No exceptions.**\n\n1. **RED**: Write a failing test that describes the expected behavior\n - Test MUST fail before you write implementation\n - If test passes immediately, your test is wrong\n \n2. **GREEN**: Write minimal code to make the test pass\n - Don't over-engineer - just make it green\n - Hardcode if needed, refactor later\n \n3. **REFACTOR**: Clean up while tests stay green\n - Run tests after every change\n - If tests break, undo and try again\n\n```bash\n# Run tests continuously\nbun test <your-test-file> --watch\n```\n\n**Why TDD?**\n- Catches bugs before they exist\n- Documents expected behavior\n- Enables fearless refactoring\n- Proves your code works\n\n### Step 6: Report Progress at Milestones\n```\nswarm_progress(\n project_key=\"{project_path}\",\n agent_name=\"<your-agent-name>\",\n bead_id=\"{bead_id}\",\n status=\"in_progress\",\n progress_percent=25, # or 50, 75\n message=\"<what you just completed>\"\n)\n```\n\n**Report at 25%, 50%, 75% completion.** This:\n- Triggers auto-checkpoint (saves context)\n- Keeps coordinator informed\n- Prevents silent failures\n\n### Step 7: Manual Checkpoint BEFORE Risky Operations\n```\nswarm_checkpoint(\n project_key=\"{project_path}\",\n agent_name=\"<your-agent-name>\",\n bead_id=\"{bead_id}\"\n)\n```\n\n**Call BEFORE:**\n- Large refactors\n- File deletions\n- Breaking API changes\n- Anything that might fail catastrophically\n\n**Checkpoints preserve context so you can recover if things go wrong.**\n\n### Step 8: Store Learnings (if you discovered something)\n```\nsemantic-memory_store(\n information=\"<what you learned, WHY it matters, how to apply it>\",\n metadata=\"<tags: domain, tech-stack, pattern-type>\"\n)\n```\n\n**Store:**\n- Tricky bugs you solved (root cause + solution)\n- Project-specific patterns or domain rules\n- Tool/library gotchas and workarounds\n- Failed approaches (anti-patterns to avoid)\n\n**Don't store generic knowledge.** Store the WHY, not just the WHAT.\n\n### Step 9: Complete (REQUIRED - releases reservations)\n```\nswarm_complete(\n project_key=\"{project_path}\",\n agent_name=\"<your-agent-name>\",\n bead_id=\"{bead_id}\",\n summary=\"<what you accomplished>\",\n files_touched=[\"list\", \"of\", \"files\"]\n)\n```\n\n**This automatically:**\n- Runs UBS bug scan\n- Releases file reservations\n- Records learning signals\n- Notifies coordinator\n\n**DO NOT manually close the cell with hive_close.** Use swarm_complete.\n\n## [SWARM MAIL COMMUNICATION]\n\n### Check Inbox Regularly\n```\nswarmmail_inbox() # Check for coordinator messages\nswarmmail_read_message(message_id=N) # Read specific message\n```\n\n### When Blocked\n```\nswarmmail_send(\n to=[\"coordinator\"],\n subject=\"BLOCKED: {bead_id}\",\n body=\"<blocker description, what you need>\",\n importance=\"high\",\n thread_id=\"{epic_id}\"\n)\nhive_update(id=\"{bead_id}\", status=\"blocked\")\n```\n\n### Report Issues to Other Agents\n```\nswarmmail_send(\n to=[\"OtherAgent\", \"coordinator\"],\n subject=\"Issue in {bead_id}\",\n body=\"<describe problem, don't fix their code>\",\n thread_id=\"{epic_id}\"\n)\n```\n\n### Manual Release (if needed)\n```\nswarmmail_release() # Manually release reservations\n```\n\n**Note:** `swarm_complete` automatically releases reservations. Only use manual release if aborting work.\n\n## [OTHER TOOLS]\n### Hive - You Have Autonomy to File Issues\nYou can create new cells against this epic when you discover:\n- **Bugs**: Found a bug while working? File it.\n- **Tech debt**: Spotted something that needs cleanup? File it.\n- **Follow-up work**: Task needs more work than scoped? File a follow-up.\n- **Dependencies**: Need something from another agent? File and link it.\n\n```\nhive_create(\n title=\"<descriptive title>\",\n type=\"bug\", # or \"task\", \"chore\"\n priority=2,\n parent_id=\"{epic_id}\", # Links to this epic\n description=\"Found while working on {bead_id}: <details>\"\n)\n```\n\n**Don't silently ignore issues.** File them so they get tracked and addressed.\n\nOther cell operations:\n- hive_update(id, status) - Mark blocked if stuck\n- hive_query(status=\"open\") - See what else needs work\n\n### Skills\n- skills_list() - Discover available skills\n- skills_use(name) - Activate skill for specialized guidance\n- skills_create(name) - Create new skill (if you found a reusable pattern)\n\n## [CRITICAL REQUIREMENTS]\n\n**NON-NEGOTIABLE:**\n1. Step 1 (swarmmail_init) MUST be first - do it before anything else\n2. Step 2 (semantic-memory_find) MUST happen before starting work\n3. Step 4 (swarmmail_reserve) - YOU reserve files, not coordinator\n4. Step 6 (swarm_progress) - Report at milestones, don't work silently\n5. Step 9 (swarm_complete) - Use this to close, NOT hive_close\n\n**If you skip these steps:**\n- Your work won't be tracked (swarm_complete will fail)\n- You'll waste time repeating solved problems (no semantic memory query)\n- Edit conflicts with other agents (no file reservation)\n- Lost work if you crash (no checkpoints)\n- Future agents repeat your mistakes (no learnings stored)\n\nBegin now.";
40
+ export declare const SUBTASK_PROMPT_V2 = "You are a swarm agent working on: **{subtask_title}**\n\n## [IDENTITY]\nAgent: (assigned at spawn)\nCell: {bead_id}\nEpic: {epic_id}\n\n## [TASK]\n{subtask_description}\n\n## [FILES]\nReserved (exclusive):\n{file_list}\n\nOnly modify these files. Need others? Message the coordinator.\n\n## [CONTEXT]\n{shared_context}\n\n{compressed_context}\n\n{error_context}\n\n## [MANDATORY SURVIVAL CHECKLIST]\n\n**CRITICAL: Follow this checklist IN ORDER. Each step builds on the previous.**\n\n### Step 1: Initialize Coordination (REQUIRED - DO THIS FIRST)\n```\nswarmmail_init(project_path=\"{project_path}\", task_description=\"{bead_id}: {subtask_title}\")\n```\n\n**This registers you with the coordination system and enables:**\n- File reservation tracking\n- Inter-agent communication\n- Progress monitoring\n- Conflict detection\n\n**If you skip this step, your work will not be tracked and swarm_complete will fail.**\n\n### Step 2: \uD83E\uDDE0 Query Past Learnings (MANDATORY - BEFORE starting work)\n\n**\u26A0\uFE0F CRITICAL: ALWAYS query semantic memory BEFORE writing ANY code.**\n\n```\nsemantic-memory_find(query=\"<keywords from your task>\", limit=5, expand=true)\n```\n\n**Why this is MANDATORY:**\n- Past agents may have already solved your exact problem\n- Avoids repeating mistakes that wasted 30+ minutes before\n- Discovers project-specific patterns and gotchas\n- Finds known workarounds for tool/library quirks\n\n**Search Query Examples by Task Type:**\n\n- **Bug fix**: Use exact error message or \"<symptom> <component>\"\n- **New feature**: Search \"<domain concept> implementation pattern\"\n- **Refactor**: Query \"<pattern name> migration approach\"\n- **Integration**: Look for \"<library name> gotchas configuration\"\n- **Testing**: Find \"testing <component type> characterization tests\"\n- **Performance**: Search \"<technology> performance optimization\"\n\n**BEFORE you start coding:**\n1. Run semantic-memory_find with keywords from your task\n2. Read the results with expand=true for full content\n3. Check if any memory solves your problem or warns of pitfalls\n4. Adjust your approach based on past learnings\n\n**If you skip this step, you WILL waste time solving already-solved problems.**\n\n### Step 3: Load Relevant Skills (if available)\n```\nskills_list() # See what skills exist\nskills_use(name=\"<relevant-skill>\", context=\"<your task>\") # Load skill\n```\n\n**Common skill triggers:**\n- Writing tests? \u2192 `skills_use(name=\"testing-patterns\")`\n- Breaking dependencies? \u2192 `skills_use(name=\"testing-patterns\")`\n- Multi-agent coordination? \u2192 `skills_use(name=\"swarm-coordination\")`\n- Building a CLI? \u2192 `skills_use(name=\"cli-builder\")`\n\n### Step 4: Reserve Your Files (YOU reserve, not coordinator)\n```\nswarmmail_reserve(\n paths=[{file_list}],\n reason=\"{bead_id}: {subtask_title}\",\n exclusive=true\n)\n```\n\n**Workers reserve their own files.** This prevents edit conflicts with other agents.\n\n### Step 5: Do the Work (TDD MANDATORY)\n\n**Follow RED \u2192 GREEN \u2192 REFACTOR. No exceptions.**\n\n1. **RED**: Write a failing test that describes the expected behavior\n - Test MUST fail before you write implementation\n - If test passes immediately, your test is wrong\n \n2. **GREEN**: Write minimal code to make the test pass\n - Don't over-engineer - just make it green\n - Hardcode if needed, refactor later\n \n3. **REFACTOR**: Clean up while tests stay green\n - Run tests after every change\n - If tests break, undo and try again\n\n```bash\n# Run tests continuously\nbun test <your-test-file> --watch\n```\n\n**Why TDD?**\n- Catches bugs before they exist\n- Documents expected behavior\n- Enables fearless refactoring\n- Proves your code works\n\n### Step 6: Report Progress at Milestones\n```\nswarm_progress(\n project_key=\"{project_path}\",\n agent_name=\"<your-agent-name>\",\n bead_id=\"{bead_id}\",\n status=\"in_progress\",\n progress_percent=25, # or 50, 75\n message=\"<what you just completed>\"\n)\n```\n\n**Report at 25%, 50%, 75% completion.** This:\n- Triggers auto-checkpoint (saves context)\n- Keeps coordinator informed\n- Prevents silent failures\n\n### Step 7: Manual Checkpoint BEFORE Risky Operations\n```\nswarm_checkpoint(\n project_key=\"{project_path}\",\n agent_name=\"<your-agent-name>\",\n bead_id=\"{bead_id}\"\n)\n```\n\n**Call BEFORE:**\n- Large refactors\n- File deletions\n- Breaking API changes\n- Anything that might fail catastrophically\n\n**Checkpoints preserve context so you can recover if things go wrong.**\n\n### Step 8: \uD83D\uDCBE STORE YOUR LEARNINGS (if you discovered something)\n\n**If you learned it the hard way, STORE IT so the next agent doesn't have to.**\n\n```\nsemantic-memory_store(\n information=\"<what you learned, WHY it matters, how to apply it>\",\n tags=\"<domain, tech-stack, pattern-type>\"\n)\n```\n\n**MANDATORY Storage Triggers - Store when you:**\n- \uD83D\uDC1B **Solved a tricky bug** (>15min debugging) - include root cause + solution\n- \uD83D\uDCA1 **Discovered a project-specific pattern** - domain rules, business logic quirks\n- \u26A0\uFE0F **Found a tool/library gotcha** - API quirks, version-specific bugs, workarounds\n- \uD83D\uDEAB **Tried an approach that failed** - anti-patterns to avoid, why it didn't work\n- \uD83C\uDFD7\uFE0F **Made an architectural decision** - reasoning, alternatives considered, tradeoffs\n\n**What Makes a GOOD Memory:**\n\n\u2705 **GOOD** (actionable, explains WHY):\n```\n\"OAuth refresh tokens need 5min buffer before expiry to avoid race conditions.\nWithout buffer, token refresh can fail mid-request if expiry happens between\ncheck and use. Implemented with: if (expiresAt - Date.now() < 300000) refresh()\"\n```\n\n\u274C **BAD** (generic, no context):\n```\n\"Fixed the auth bug by adding a null check\"\n```\n\n**What NOT to Store:**\n- Generic knowledge that's in official documentation\n- Implementation details that change frequently\n- Vague descriptions without context (\"fixed the thing\")\n\n**The WHY matters more than the WHAT.** Future agents need context to apply your learning.\n\n### Step 9: Complete (REQUIRED - releases reservations)\n```\nswarm_complete(\n project_key=\"{project_path}\",\n agent_name=\"<your-agent-name>\",\n bead_id=\"{bead_id}\",\n summary=\"<what you accomplished>\",\n files_touched=[\"list\", \"of\", \"files\"]\n)\n```\n\n**This automatically:**\n- Runs UBS bug scan\n- Releases file reservations\n- Records learning signals\n- Notifies coordinator\n\n**DO NOT manually close the cell with hive_close.** Use swarm_complete.\n\n## [SWARM MAIL COMMUNICATION]\n\n### Check Inbox Regularly\n```\nswarmmail_inbox() # Check for coordinator messages\nswarmmail_read_message(message_id=N) # Read specific message\n```\n\n### When Blocked\n```\nswarmmail_send(\n to=[\"coordinator\"],\n subject=\"BLOCKED: {bead_id}\",\n body=\"<blocker description, what you need>\",\n importance=\"high\",\n thread_id=\"{epic_id}\"\n)\nhive_update(id=\"{bead_id}\", status=\"blocked\")\n```\n\n### Report Issues to Other Agents\n```\nswarmmail_send(\n to=[\"OtherAgent\", \"coordinator\"],\n subject=\"Issue in {bead_id}\",\n body=\"<describe problem, don't fix their code>\",\n thread_id=\"{epic_id}\"\n)\n```\n\n### Manual Release (if needed)\n```\nswarmmail_release() # Manually release reservations\n```\n\n**Note:** `swarm_complete` automatically releases reservations. Only use manual release if aborting work.\n\n## [OTHER TOOLS]\n### Hive - You Have Autonomy to File Issues\nYou can create new cells against this epic when you discover:\n- **Bugs**: Found a bug while working? File it.\n- **Tech debt**: Spotted something that needs cleanup? File it.\n- **Follow-up work**: Task needs more work than scoped? File a follow-up.\n- **Dependencies**: Need something from another agent? File and link it.\n\n```\nhive_create(\n title=\"<descriptive title>\",\n type=\"bug\", # or \"task\", \"chore\"\n priority=2,\n parent_id=\"{epic_id}\", # Links to this epic\n description=\"Found while working on {bead_id}: <details>\"\n)\n```\n\n**Don't silently ignore issues.** File them so they get tracked and addressed.\n\nOther cell operations:\n- hive_update(id, status) - Mark blocked if stuck\n- hive_query(status=\"open\") - See what else needs work\n\n### Skills\n- skills_list() - Discover available skills\n- skills_use(name) - Activate skill for specialized guidance\n- skills_create(name) - Create new skill (if you found a reusable pattern)\n\n## [CRITICAL REQUIREMENTS]\n\n**NON-NEGOTIABLE:**\n1. Step 1 (swarmmail_init) MUST be first - do it before anything else\n2. \uD83E\uDDE0 Step 2 (semantic-memory_find) MUST happen BEFORE starting work - query first, code second\n3. Step 4 (swarmmail_reserve) - YOU reserve files, not coordinator\n4. Step 6 (swarm_progress) - Report at milestones, don't work silently\n5. \uD83D\uDCBE Step 8 (semantic-memory_store) - If you learned something hard, STORE IT\n6. Step 9 (swarm_complete) - Use this to close, NOT hive_close\n\n**If you skip these steps:**\n- Your work won't be tracked (swarm_complete will fail)\n- \uD83D\uDD04 You'll waste time repeating already-solved problems (no semantic memory query)\n- Edit conflicts with other agents (no file reservation)\n- Lost work if you crash (no checkpoints)\n- \uD83D\uDD04 Future agents repeat YOUR mistakes (no learnings stored)\n\n**Memory is the swarm's collective intelligence. Query it. Feed it.**\n\nBegin now.";
41
41
  /**
42
42
  * Prompt for self-evaluation before completing a subtask.
43
43
  *
@@ -130,6 +130,7 @@ export declare const swarm_spawn_subtask: {
130
130
  skills_to_load: import("zod").ZodOptional<import("zod").ZodArray<import("zod").ZodString>>;
131
131
  coordinator_notes: import("zod").ZodOptional<import("zod").ZodString>;
132
132
  }, import("zod/v4/core").$strip>>;
133
+ model: import("zod").ZodOptional<import("zod").ZodString>;
133
134
  };
134
135
  execute(args: {
135
136
  bead_id: string;
@@ -144,6 +145,7 @@ export declare const swarm_spawn_subtask: {
144
145
  skills_to_load?: string[] | undefined;
145
146
  coordinator_notes?: string | undefined;
146
147
  } | undefined;
148
+ model?: string | undefined;
147
149
  }, context: import("@opencode-ai/plugin").ToolContext): Promise<string>;
148
150
  };
149
151
  /**
@@ -233,6 +235,7 @@ export declare const promptTools: {
233
235
  skills_to_load: import("zod").ZodOptional<import("zod").ZodArray<import("zod").ZodString>>;
234
236
  coordinator_notes: import("zod").ZodOptional<import("zod").ZodString>;
235
237
  }, import("zod/v4/core").$strip>>;
238
+ model: import("zod").ZodOptional<import("zod").ZodString>;
236
239
  };
237
240
  execute(args: {
238
241
  bead_id: string;
@@ -247,6 +250,7 @@ export declare const promptTools: {
247
250
  skills_to_load?: string[] | undefined;
248
251
  coordinator_notes?: string | undefined;
249
252
  } | undefined;
253
+ model?: string | undefined;
250
254
  }, context: import("@opencode-ai/plugin").ToolContext): Promise<string>;
251
255
  };
252
256
  swarm_evaluation_prompt: {
@@ -1 +1 @@
1
- {"version":3,"file":"swarm-prompts.d.ts","sourceRoot":"","sources":["../src/swarm-prompts.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AASH;;;;;GAKG;AACH,eAAO,MAAM,oBAAoB,m6EAkET,CAAC;AAEzB;;GAEG;AACH,eAAO,MAAM,6BAA6B,gxDAyDlB,CAAC;AAEzB;;;;;GAKG;AACH,eAAO,MAAM,cAAc,mkFAgFK,CAAC;AAEjC;;;;;;;GAOG;AACH,eAAO,MAAM,iBAAiB,ksOA8PnB,CAAC;AAEZ;;;;GAIG;AACH,eAAO,MAAM,iBAAiB,8jCAmCU,CAAC;AAMzC;;GAEG;AACH,wBAAgB,qBAAqB,CAAC,MAAM,EAAE;IAC5C,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;IACtB,mBAAmB,EAAE,MAAM,CAAC;IAC5B,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,gBAAgB,CAAC,EAAE;QACjB,cAAc,CAAC,EAAE,MAAM,CAAC;QACxB,cAAc,CAAC,EAAE,MAAM,EAAE,CAAC;QAC1B,iBAAiB,CAAC,EAAE,MAAM,CAAC;KAC5B,CAAC;CACH,GAAG,MAAM,CA2ET;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,MAAM,EAAE;IAC1C,UAAU,EAAE,MAAM,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;IACtB,mBAAmB,EAAE,MAAM,CAAC;IAC5B,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB,cAAc,CAAC,EAAE,MAAM,CAAC;CACzB,GAAG,MAAM,CAUT;AAED;;GAEG;AACH,wBAAgB,sBAAsB,CAAC,MAAM,EAAE;IAC7C,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;IACtB,aAAa,EAAE,MAAM,EAAE,CAAC;CACzB,GAAG,MAAM,CAMT;AAMD;;GAEG;AACH,eAAO,MAAM,oBAAoB;;;;;;;;;;;;;;;;;;;;;;CAoC/B,CAAC;AAEH;;;;;GAKG;AACH,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA0D9B,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,uBAAuB;;;;;;;;;;;;CAoClC,CAAC;AAEH;;;;;GAKG;AACH,eAAO,MAAM,iBAAiB;;;;;;;;;;;;;;;;;;;;;;;;;CA0I5B,CAAC;AAEH,eAAO,MAAM,WAAW;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAKvB,CAAC"}
1
+ {"version":3,"file":"swarm-prompts.d.ts","sourceRoot":"","sources":["../src/swarm-prompts.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AASH;;;;;GAKG;AACH,eAAO,MAAM,oBAAoB,m6EAkET,CAAC;AAEzB;;GAEG;AACH,eAAO,MAAM,6BAA6B,gxDAyDlB,CAAC;AAEzB;;;;;GAKG;AACH,eAAO,MAAM,cAAc,mkFAgFK,CAAC;AAEjC;;;;;;;GAOG;AACH,eAAO,MAAM,iBAAiB,mzSA0SnB,CAAC;AAEZ;;;;GAIG;AACH,eAAO,MAAM,iBAAiB,8jCAmCU,CAAC;AAMzC;;GAEG;AACH,wBAAgB,qBAAqB,CAAC,MAAM,EAAE;IAC5C,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;IACtB,mBAAmB,EAAE,MAAM,CAAC;IAC5B,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,gBAAgB,CAAC,EAAE;QACjB,cAAc,CAAC,EAAE,MAAM,CAAC;QACxB,cAAc,CAAC,EAAE,MAAM,EAAE,CAAC;QAC1B,iBAAiB,CAAC,EAAE,MAAM,CAAC;KAC5B,CAAC;CACH,GAAG,MAAM,CA2ET;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,MAAM,EAAE;IAC1C,UAAU,EAAE,MAAM,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;IACtB,mBAAmB,EAAE,MAAM,CAAC;IAC5B,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB,cAAc,CAAC,EAAE,MAAM,CAAC;CACzB,GAAG,MAAM,CAUT;AAED;;GAEG;AACH,wBAAgB,sBAAsB,CAAC,MAAM,EAAE;IAC7C,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;IACtB,aAAa,EAAE,MAAM,EAAE,CAAC;CACzB,GAAG,MAAM,CAMT;AAMD;;GAEG;AACH,eAAO,MAAM,oBAAoB;;;;;;;;;;;;;;;;;;;;;;CAoC/B,CAAC;AAEH;;;;;GAKG;AACH,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAqF9B,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,uBAAuB;;;;;;;;;;;;CAoClC,CAAC;AAEH;;;;;GAKG;AACH,eAAO,MAAM,iBAAiB;;;;;;;;;;;;;;;;;;;;;;;;;CA0I5B,CAAC;AAEH,eAAO,MAAM,WAAW;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAKvB,CAAC"}
package/dist/swarm.d.ts CHANGED
@@ -344,6 +344,7 @@ export declare const swarmTools: {
344
344
  skills_to_load: import("zod").ZodOptional<import("zod").ZodArray<import("zod").ZodString>>;
345
345
  coordinator_notes: import("zod").ZodOptional<import("zod").ZodString>;
346
346
  }, import("zod/v4/core").$strip>>;
347
+ model: import("zod").ZodOptional<import("zod").ZodString>;
347
348
  };
348
349
  execute(args: {
349
350
  bead_id: string;
@@ -358,6 +359,7 @@ export declare const swarmTools: {
358
359
  skills_to_load?: string[] | undefined;
359
360
  coordinator_notes?: string | undefined;
360
361
  } | undefined;
362
+ model?: string | undefined;
361
363
  }, context: import("@opencode-ai/plugin").ToolContext): Promise<string>;
362
364
  };
363
365
  swarm_evaluation_prompt: {
@@ -1 +1 @@
1
- {"version":3,"file":"swarm.d.ts","sourceRoot":"","sources":["../src/swarm.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AAGH,cAAc,oBAAoB,CAAC;AACnC,cAAc,mBAAmB,CAAC;AAClC,cAAc,iBAAiB,CAAC;AAChC,cAAc,qBAAqB,CAAC;AAQpC;;;GAGG;AACH,eAAO,MAAM,UAAU;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAKtB,CAAC"}
1
+ {"version":3,"file":"swarm.d.ts","sourceRoot":"","sources":["../src/swarm.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AAGH,cAAc,oBAAoB,CAAC;AACnC,cAAc,mBAAmB,CAAC;AAClC,cAAc,iBAAiB,CAAC;AAChC,cAAc,qBAAqB,CAAC;AAQpC;;;GAGG;AACH,eAAO,MAAM,UAAU;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAKtB,CAAC"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "opencode-swarm-plugin",
3
- "version": "0.30.5",
3
+ "version": "0.30.7",
4
4
  "description": "Multi-agent swarm coordination for OpenCode with learning capabilities, beads integration, and Agent Mail",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -39,7 +39,7 @@
39
39
  "gray-matter": "^4.0.3",
40
40
  "ioredis": "^5.4.1",
41
41
  "minimatch": "^10.1.1",
42
- "swarm-mail": "0.5.0",
42
+ "swarm-mail": "1.0.0",
43
43
  "zod": "4.1.8"
44
44
  },
45
45
  "devDependencies": {
package/src/hive.ts CHANGED
@@ -19,6 +19,7 @@ import {
19
19
  createHiveAdapter,
20
20
  FlushManager,
21
21
  importFromJSONL,
22
+ syncMemories,
22
23
  type HiveAdapter,
23
24
  type Cell as AdapterCell,
24
25
  getSwarmMail,
@@ -1012,8 +1013,21 @@ export const hive_sync = tool({
1012
1013
  "flush hive",
1013
1014
  );
1014
1015
 
1015
- if (flushResult.cellsExported === 0) {
1016
- return "No cells to sync";
1016
+ // 2b. Sync memories to JSONL
1017
+ const swarmMail = await getSwarmMail(projectKey);
1018
+ const db = await swarmMail.getDatabase();
1019
+ const hivePath = join(projectKey, ".hive");
1020
+ let memoriesSynced = 0;
1021
+ try {
1022
+ const memoryResult = await syncMemories(db, hivePath);
1023
+ memoriesSynced = memoryResult.exported;
1024
+ } catch (err) {
1025
+ // Memory sync is optional - don't fail if it errors
1026
+ console.warn("[hive_sync] Memory sync warning:", err);
1027
+ }
1028
+
1029
+ if (flushResult.cellsExported === 0 && memoriesSynced === 0) {
1030
+ return "No cells or memories to sync";
1017
1031
  }
1018
1032
 
1019
1033
  // 3. Check if there are changes to commit
@@ -104,7 +104,7 @@ export { createMemoryAdapter };
104
104
  */
105
105
  export const semantic_memory_store = tool({
106
106
  description:
107
- "Store a memory with semantic embedding. Memories are searchable by semantic similarity and can be organized into collections.",
107
+ "Store a memory with semantic embedding. Memories are searchable by semantic similarity and can be organized into collections. Confidence affects decay rate: high confidence (1.0) = 135 day half-life, low confidence (0.0) = 45 day half-life.",
108
108
  args: {
109
109
  information: tool.schema
110
110
  .string()
@@ -121,6 +121,10 @@ export const semantic_memory_store = tool({
121
121
  .string()
122
122
  .optional()
123
123
  .describe("JSON string with additional metadata"),
124
+ confidence: tool.schema
125
+ .number()
126
+ .optional()
127
+ .describe("Confidence level (0.0-1.0) affecting decay rate. Higher = slower decay. Default 0.7"),
124
128
  },
125
129
  async execute(args, ctx: ToolContext) {
126
130
  const adapter = await getMemoryAdapter();
package/src/memory.ts CHANGED
@@ -68,6 +68,8 @@ export interface StoreArgs {
68
68
  readonly collection?: string;
69
69
  readonly tags?: string;
70
70
  readonly metadata?: string;
71
+ /** Confidence level (0.0-1.0) affecting decay rate. Higher = slower decay. Default 0.7 */
72
+ readonly confidence?: number;
71
73
  }
72
74
 
73
75
  /** Arguments for find operation */
@@ -288,12 +290,19 @@ export async function createMemoryAdapter(
288
290
  metadata.tags = tags;
289
291
  }
290
292
 
293
+ // Clamp confidence to valid range [0.0, 1.0]
294
+ const clampConfidence = (c: number | undefined): number => {
295
+ if (c === undefined) return 0.7;
296
+ return Math.max(0.0, Math.min(1.0, c));
297
+ };
298
+
291
299
  const memory: Memory = {
292
300
  id,
293
301
  content: args.information,
294
302
  metadata,
295
303
  collection,
296
304
  createdAt: new Date(),
305
+ confidence: clampConfidence(args.confidence),
297
306
  };
298
307
 
299
308
  // Generate embedding