opencode-swarm-plugin 0.12.4 → 0.12.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/plugin.js CHANGED
@@ -21560,15 +21560,15 @@ var BeadDependencySchema = exports_external.object({
21560
21560
  type: exports_external.enum(["blocks", "blocked-by", "related", "discovered-from"])
21561
21561
  });
21562
21562
  var BeadSchema = exports_external.object({
21563
- id: exports_external.string().regex(/^[a-z0-9-]+-[a-z0-9]+(\.\d+)?$/, "Invalid bead ID format"),
21563
+ id: exports_external.string().regex(/^[a-z0-9]+(-[a-z0-9]+)+(\.\d+)?$/, "Invalid bead ID format"),
21564
21564
  title: exports_external.string().min(1, "Title required"),
21565
21565
  description: exports_external.string().optional().default(""),
21566
21566
  status: BeadStatusSchema.default("open"),
21567
21567
  priority: exports_external.number().int().min(0).max(3).default(2),
21568
21568
  issue_type: BeadTypeSchema.default("task"),
21569
- created_at: exports_external.string(),
21570
- updated_at: exports_external.string().optional(),
21571
- closed_at: exports_external.string().optional(),
21569
+ created_at: exports_external.string().datetime({ offset: true }),
21570
+ updated_at: exports_external.string().datetime({ offset: true }).optional(),
21571
+ closed_at: exports_external.string().datetime({ offset: true }).optional(),
21572
21572
  parent_id: exports_external.string().optional(),
21573
21573
  dependencies: exports_external.array(BeadDependencySchema).optional().default([]),
21574
21574
  metadata: exports_external.record(exports_external.string(), exports_external.unknown()).optional()
@@ -21641,7 +21641,7 @@ var EvaluationSchema = exports_external.object({
21641
21641
  criteria: exports_external.record(exports_external.string(), CriterionEvaluationSchema),
21642
21642
  overall_feedback: exports_external.string(),
21643
21643
  retry_suggestion: exports_external.string().nullable(),
21644
- timestamp: exports_external.string().optional()
21644
+ timestamp: exports_external.string().datetime({ offset: true }).optional()
21645
21645
  });
21646
21646
  var DEFAULT_CRITERIA = [
21647
21647
  "type_safe",
@@ -21659,7 +21659,7 @@ var WeightedEvaluationSchema = exports_external.object({
21659
21659
  criteria: exports_external.record(exports_external.string(), WeightedCriterionEvaluationSchema),
21660
21660
  overall_feedback: exports_external.string(),
21661
21661
  retry_suggestion: exports_external.string().nullable(),
21662
- timestamp: exports_external.string().optional(),
21662
+ timestamp: exports_external.string().datetime({ offset: true }).optional(),
21663
21663
  average_weight: exports_external.number().min(0).max(1).optional(),
21664
21664
  raw_score: exports_external.number().min(0).max(1).optional(),
21665
21665
  weighted_score: exports_external.number().min(0).max(1).optional()
@@ -21732,7 +21732,7 @@ var SwarmSpawnResultSchema = exports_external.object({
21732
21732
  coordinator_name: exports_external.string(),
21733
21733
  thread_id: exports_external.string(),
21734
21734
  agents: exports_external.array(SpawnedAgentSchema),
21735
- started_at: exports_external.string()
21735
+ started_at: exports_external.string().datetime({ offset: true })
21736
21736
  });
21737
21737
  var AgentProgressSchema = exports_external.object({
21738
21738
  bead_id: exports_external.string(),
@@ -21742,7 +21742,7 @@ var AgentProgressSchema = exports_external.object({
21742
21742
  message: exports_external.string().optional(),
21743
21743
  files_touched: exports_external.array(exports_external.string()).optional(),
21744
21744
  blockers: exports_external.array(exports_external.string()).optional(),
21745
- timestamp: exports_external.string()
21745
+ timestamp: exports_external.string().datetime({ offset: true })
21746
21746
  });
21747
21747
  var SwarmStatusSchema = exports_external.object({
21748
21748
  epic_id: exports_external.string(),
@@ -21752,7 +21752,7 @@ var SwarmStatusSchema = exports_external.object({
21752
21752
  failed: exports_external.number().int().min(0),
21753
21753
  blocked: exports_external.number().int().min(0),
21754
21754
  agents: exports_external.array(SpawnedAgentSchema),
21755
- last_update: exports_external.string()
21755
+ last_update: exports_external.string().datetime({ offset: true })
21756
21756
  });
21757
21757
  // src/beads.ts
21758
21758
  class BeadError extends Error {
@@ -21890,18 +21890,32 @@ var beads_create_epic = tool({
21890
21890
  };
21891
21891
  return JSON.stringify(result, null, 2);
21892
21892
  } catch (error45) {
21893
- const rollbackHint = created.map((b) => `bd close ${b.id} --reason "Rollback partial epic"`).join(`
21894
- `);
21895
- const result = {
21896
- success: false,
21897
- epic: created[0] || {},
21898
- subtasks: created.slice(1),
21899
- rollback_hint: rollbackHint
21900
- };
21901
- return JSON.stringify({
21902
- ...result,
21903
- error: error45 instanceof Error ? error45.message : String(error45)
21904
- }, null, 2);
21893
+ const rollbackCommands = [];
21894
+ for (const bead of created) {
21895
+ try {
21896
+ const closeCmd = [
21897
+ "bd",
21898
+ "close",
21899
+ bead.id,
21900
+ "--reason",
21901
+ "Rollback partial epic",
21902
+ "--json"
21903
+ ];
21904
+ await Bun.$`${closeCmd}`.quiet().nothrow();
21905
+ rollbackCommands.push(`bd close ${bead.id} --reason "Rollback partial epic"`);
21906
+ } catch (rollbackError) {
21907
+ console.error(`Failed to rollback bead ${bead.id}:`, rollbackError);
21908
+ }
21909
+ }
21910
+ const errorMsg = error45 instanceof Error ? error45.message : String(error45);
21911
+ const rollbackInfo = rollbackCommands.length > 0 ? `
21912
+
21913
+ Rolled back ${rollbackCommands.length} bead(s):
21914
+ ${rollbackCommands.join(`
21915
+ `)}` : `
21916
+
21917
+ No beads to rollback.`;
21918
+ throw new BeadError(`Epic creation failed: ${errorMsg}${rollbackInfo}`, "beads_create_epic", 1);
21905
21919
  }
21906
21920
  }
21907
21921
  });
@@ -22028,17 +22042,22 @@ var beads_sync = tool({
22028
22042
  },
22029
22043
  async execute(args, ctx) {
22030
22044
  const autoPull = args.auto_pull ?? true;
22045
+ const TIMEOUT_MS = 30000;
22046
+ const withTimeout = async (promise2, timeoutMs, operation) => {
22047
+ const timeoutPromise = new Promise((_, reject) => setTimeout(() => reject(new BeadError(`Operation timed out after ${timeoutMs}ms`, operation)), timeoutMs));
22048
+ return Promise.race([promise2, timeoutPromise]);
22049
+ };
22031
22050
  if (autoPull) {
22032
- const pullResult = await Bun.$`git pull --rebase`.quiet().nothrow();
22051
+ const pullResult = await withTimeout(Bun.$`git pull --rebase`.quiet().nothrow(), TIMEOUT_MS, "git pull --rebase");
22033
22052
  if (pullResult.exitCode !== 0) {
22034
22053
  throw new BeadError(`Failed to pull: ${pullResult.stderr.toString()}`, "git pull --rebase", pullResult.exitCode);
22035
22054
  }
22036
22055
  }
22037
- const syncResult = await Bun.$`bd sync`.quiet().nothrow();
22056
+ const syncResult = await withTimeout(Bun.$`bd sync`.quiet().nothrow(), TIMEOUT_MS, "bd sync");
22038
22057
  if (syncResult.exitCode !== 0) {
22039
22058
  throw new BeadError(`Failed to sync beads: ${syncResult.stderr.toString()}`, "bd sync", syncResult.exitCode);
22040
22059
  }
22041
- const pushResult = await Bun.$`git push`.quiet().nothrow();
22060
+ const pushResult = await withTimeout(Bun.$`git push`.quiet().nothrow(), TIMEOUT_MS, "git push");
22042
22061
  if (pushResult.exitCode !== 0) {
22043
22062
  throw new BeadError(`Failed to push: ${pushResult.stderr.toString()}`, "git push", pushResult.exitCode);
22044
22063
  }
@@ -22321,9 +22340,11 @@ function getLimitsForEndpoint(endpoint) {
22321
22340
  const upperEndpoint = endpoint.toUpperCase();
22322
22341
  const perMinuteEnv = process.env[`OPENCODE_RATE_LIMIT_${upperEndpoint}_PER_MIN`];
22323
22342
  const perHourEnv = process.env[`OPENCODE_RATE_LIMIT_${upperEndpoint}_PER_HOUR`];
22343
+ const parsedPerMinute = perMinuteEnv ? parseInt(perMinuteEnv, 10) : NaN;
22344
+ const parsedPerHour = perHourEnv ? parseInt(perHourEnv, 10) : NaN;
22324
22345
  return {
22325
- perMinute: perMinuteEnv ? parseInt(perMinuteEnv, 10) : defaults.perMinute,
22326
- perHour: perHourEnv ? parseInt(perHourEnv, 10) : defaults.perHour
22346
+ perMinute: Number.isNaN(parsedPerMinute) ? defaults.perMinute : parsedPerMinute,
22347
+ perHour: Number.isNaN(parsedPerHour) ? defaults.perHour : parsedPerHour
22327
22348
  };
22328
22349
  }
22329
22350
 
@@ -22362,6 +22383,7 @@ class RedisRateLimiter {
22362
22383
  const pipeline = this.redis.pipeline();
22363
22384
  pipeline.zremrangebyscore(key, 0, windowStart);
22364
22385
  pipeline.zcard(key);
22386
+ pipeline.zrange(key, 0, 0, "WITHSCORES");
22365
22387
  const results = await pipeline.exec();
22366
22388
  if (!results) {
22367
22389
  return { allowed: true, remaining: limit, resetAt: now + windowDuration };
@@ -22371,7 +22393,7 @@ class RedisRateLimiter {
22371
22393
  const allowed = count < limit;
22372
22394
  let resetAt = now + windowDuration;
22373
22395
  if (!allowed) {
22374
- const oldest = await this.redis.zrange(key, 0, 0, "WITHSCORES");
22396
+ const oldest = results[2]?.[1] || [];
22375
22397
  if (oldest.length >= 2) {
22376
22398
  const oldestTimestamp = parseInt(oldest[1], 10);
22377
22399
  resetAt = oldestTimestamp + windowDuration;
@@ -22612,8 +22634,11 @@ function saveSessionState(sessionID, state) {
22612
22634
  }
22613
22635
  const path = getSessionStatePath(sessionID);
22614
22636
  writeFileSync(path, JSON.stringify(state, null, 2));
22637
+ return true;
22615
22638
  } catch (error45) {
22616
- console.warn(`[agent-mail] Could not save session state: ${error45}`);
22639
+ console.error(`[agent-mail] CRITICAL: Could not save session state: ${error45}`);
22640
+ console.error(`[agent-mail] Session state will not persist across CLI invocations!`);
22641
+ return false;
22617
22642
  }
22618
22643
  }
22619
22644
  var sessionStates = new Map;
@@ -22893,6 +22918,7 @@ async function mcpCall(toolName, args) {
22893
22918
  } catch (error45) {
22894
22919
  lastError = error45 instanceof Error ? error45 : new Error(String(error45));
22895
22920
  consecutiveFailures++;
22921
+ const retryable = isRetryableError(error45);
22896
22922
  if (consecutiveFailures >= RECOVERY_CONFIG.failureThreshold && RECOVERY_CONFIG.enabled) {
22897
22923
  console.warn(`[agent-mail] ${consecutiveFailures} consecutive failures, checking server health...`);
22898
22924
  const healthy = await isServerFunctional();
@@ -22901,12 +22927,14 @@ async function mcpCall(toolName, args) {
22901
22927
  const restarted = await restartServer();
22902
22928
  if (restarted) {
22903
22929
  agentMailAvailable = null;
22904
- attempt--;
22905
- continue;
22930
+ if (retryable) {
22931
+ attempt--;
22932
+ continue;
22933
+ }
22906
22934
  }
22907
22935
  }
22908
22936
  }
22909
- if (!isRetryableError(error45)) {
22937
+ if (!retryable) {
22910
22938
  console.warn(`[agent-mail] Non-retryable error for ${toolName}: ${lastError.message}`);
22911
22939
  throw lastError;
22912
22940
  }
@@ -23039,12 +23067,12 @@ var agentmail_read_message = tool({
23039
23067
  const messages = await mcpCall("fetch_inbox", {
23040
23068
  project_key: state.projectKey,
23041
23069
  agent_name: state.agentName,
23042
- limit: 1,
23070
+ limit: 50,
23043
23071
  include_bodies: true
23044
23072
  });
23045
23073
  const message = messages.find((m) => m.id === args.message_id);
23046
23074
  if (!message) {
23047
- return `Message ${args.message_id} not found`;
23075
+ return `Message ${args.message_id} not found in recent 50 messages. Try using agentmail_search to locate it.`;
23048
23076
  }
23049
23077
  await recordRateLimitedRequest(state.agentName, "read_message");
23050
23078
  return JSON.stringify(message, null, 2);
@@ -24518,6 +24546,13 @@ var swarm_validate_decomposition = tool({
24518
24546
  for (let i = 0;i < validated.subtasks.length; i++) {
24519
24547
  const deps = validated.subtasks[i].dependencies;
24520
24548
  for (const dep of deps) {
24549
+ if (dep < 0 || dep >= validated.subtasks.length) {
24550
+ return JSON.stringify({
24551
+ valid: false,
24552
+ error: `Invalid dependency: subtask ${i} depends on ${dep}, but only ${validated.subtasks.length} subtasks exist (indices 0-${validated.subtasks.length - 1})`,
24553
+ hint: "Dependency index is out of bounds"
24554
+ }, null, 2);
24555
+ }
24521
24556
  if (dep >= i) {
24522
24557
  return JSON.stringify({
24523
24558
  valid: false,
@@ -2,282 +2,117 @@
2
2
  description: Decompose task into parallel subtasks and coordinate agents
3
3
  ---
4
4
 
5
- You are a swarm coordinator. Take a complex task, break it into beads, and unleash parallel agents.
5
+ You are a swarm coordinator. Decompose the task into beads and spawn parallel agents.
6
6
 
7
- ## Usage
7
+ ## Task
8
8
 
9
- ```
10
- /swarm <task description or bead-id>
11
- /swarm --to-main <task> # Skip PR, push directly to main (use sparingly)
12
- /swarm --no-sync <task> # Skip mid-task context sync (for simple independent tasks)
13
- ```
14
-
15
- **Default behavior: Feature branch + PR with context sync.** All swarm work goes to a feature branch, agents share context mid-task, and creates a PR for review.
16
-
17
- ## Step 1: Initialize Session
18
-
19
- Use the plugin's agent-mail tools to register:
20
-
21
- ```
22
- agentmail_init with project_path=$PWD, task_description="Swarm coordinator: <task>"
23
- ```
24
-
25
- This returns your agent name and session state. Remember it.
26
-
27
- ## Step 2: Create Feature Branch
28
-
29
- **CRITICAL: Never push directly to main.**
30
-
31
- ```bash
32
- # Create branch from bead ID or task name
33
- git checkout -b swarm/<bead-id> # e.g., swarm/trt-buddy-d7d
34
- # Or for ad-hoc tasks:
35
- git checkout -b swarm/<short-description> # e.g., swarm/contextual-checkins
36
-
37
- git push -u origin HEAD
38
- ```
39
-
40
- ## Step 3: Understand the Task
41
-
42
- If given a bead-id:
9
+ $ARGUMENTS
43
10
 
44
- ```
45
- beads_query with id=<bead-id>
46
- ```
11
+ ## Flags (parse from task above)
47
12
 
48
- If given a description, analyze it to understand scope.
13
+ - `--to-main` - Push directly to main, skip PR
14
+ - `--no-sync` - Skip mid-task context sharing
49
15
 
50
- ## Step 4: Select Strategy & Decompose
16
+ **Default: Feature branch + PR with context sync.**
51
17
 
52
- ### Option A: Use the Planner Agent (Recommended)
18
+ ## Workflow
53
19
 
54
- Spawn the `@swarm-planner` agent to handle decomposition:
20
+ ### 1. Initialize
55
21
 
56
22
  ```
57
- Task(
58
- subagent_type="general",
59
- description="Plan swarm decomposition",
60
- prompt="You are @swarm-planner. Decompose this task: <task description>. Use swarm_select_strategy and swarm_plan_prompt to guide your decomposition. Return ONLY valid BeadTree JSON."
61
- )
23
+ agentmail_init(project_path="$PWD", task_description="Swarm: <task summary>")
62
24
  ```
63
25
 
64
- ### Option B: Manual Decomposition
65
-
66
- 1. **Select strategy**:
26
+ ### 2. Create Feature Branch (unless --to-main)
67
27
 
68
- ```
69
- swarm_select_strategy with task="<task description>"
70
- ```
71
-
72
- 2. **Get planning prompt**:
73
-
74
- ```
75
- swarm_plan_prompt with task="<task description>", strategy="<selected or auto>"
28
+ ```bash
29
+ git checkout -b swarm/<short-task-name>
30
+ git push -u origin HEAD
76
31
  ```
77
32
 
78
- 3. **Create decomposition** following the prompt guidelines
33
+ ### 3. Decompose Task
79
34
 
80
- 4. **Validate**:
35
+ Use strategy selection and planning:
81
36
 
82
37
  ```
83
- swarm_validate_decomposition with response="<your BeadTree JSON>"
38
+ swarm_select_strategy(task="<the task>")
39
+ swarm_plan_prompt(task="<the task>", strategy="<auto or selected>")
84
40
  ```
85
41
 
86
- ### Create Beads
87
-
88
- Once you have a valid BeadTree:
42
+ Follow the prompt to create a BeadTree, then validate:
89
43
 
90
44
  ```
91
- beads_create_epic with epic_title="<parent task>", subtasks=[{title, description, files, priority}...]
45
+ swarm_validate_decomposition(response="<your BeadTree JSON>")
92
46
  ```
93
47
 
94
- **Decomposition rules:**
95
-
96
- - Each bead should be completable by one agent
97
- - Beads should be independent (parallelizable) where possible
98
- - If there are dependencies, order them in the subtasks array
99
- - Aim for 3-7 beads per swarm (too few = not parallel, too many = coordination overhead)
100
-
101
- ## Step 5: Reserve Files
102
-
103
- For each subtask, reserve the files it will touch:
48
+ ### 4. Create Beads
104
49
 
105
50
  ```
106
- agentmail_reserve with paths=[<files>], reason="<bead-id>: <brief description>"
51
+ beads_create_epic(epic_title="<task>", subtasks=[{title, files, priority}...])
107
52
  ```
108
53
 
109
- **Conflict prevention:**
110
-
111
- - No two agents should edit the same file
112
- - If overlap exists, merge beads or sequence them
113
-
114
- ## Step 6: Spawn the Swarm
115
-
116
- **CRITICAL: Spawn ALL agents in a SINGLE message with multiple Task calls.**
117
-
118
- Use the prompt generator for each subtask:
54
+ Rules:
119
55
 
120
- ```
121
- swarm_spawn_subtask with bead_id="<bead-id>", epic_id="<epic-id>", subtask_title="<title>", subtask_description="<description>", files=[<files>], shared_context="Branch: swarm/<id>, sync_enabled: true"
122
- ```
56
+ - Each bead completable by one agent
57
+ - Independent where possible (parallelizable)
58
+ - 3-7 beads per swarm
123
59
 
124
- Then spawn agents with the generated prompts:
60
+ ### 5. Reserve Files
125
61
 
126
62
  ```
127
- Task(
128
- subagent_type="general",
129
- description="Swarm worker: <bead-title>",
130
- prompt="<output from swarm_spawn_subtask>"
131
- )
63
+ agentmail_reserve(paths=[<files>], reason="<bead-id>: <description>")
132
64
  ```
133
65
 
134
- Spawn ALL agents in parallel in a single response.
135
-
136
- ## Step 7: Monitor Progress (unless --no-sync)
66
+ No two agents should edit the same file.
137
67
 
138
- Check swarm status:
68
+ ### 6. Spawn Agents
139
69
 
140
- ```
141
- swarm_status with epic_id="<parent-bead-id>"
142
- ```
70
+ **CRITICAL: Spawn ALL in a SINGLE message with multiple Task calls.**
143
71
 
144
- Monitor inbox for progress updates:
72
+ For each subtask:
145
73
 
146
74
  ```
147
- agentmail_inbox
75
+ swarm_spawn_subtask(bead_id="<id>", epic_id="<epic>", subtask_title="<title>", files=[...])
148
76
  ```
149
77
 
150
- **When you receive progress updates:**
151
-
152
- 1. **Review decisions made** - Are agents making compatible choices?
153
- 2. **Check for pattern conflicts** - Different approaches to the same problem?
154
- 3. **Identify shared concerns** - Common blockers or discoveries?
155
-
156
- **If you spot incompatibilities, broadcast shared context:**
78
+ Then spawn:
157
79
 
158
80
  ```
159
- agentmail_send with to=["*"], subject="Coordinator Update", body="<guidance>", thread_id="<epic-id>", importance="high"
81
+ Task(subagent_type="swarm-worker", description="<bead-title>", prompt="<from swarm_spawn_subtask>")
160
82
  ```
161
83
 
162
- ## Step 8: Collect Results
163
-
164
- When agents complete, they send completion messages. Summarize the thread:
84
+ ### 7. Monitor (unless --no-sync)
165
85
 
166
86
  ```
167
- agentmail_summarize_thread with thread_id="<epic-id>"
87
+ swarm_status(epic_id="<epic-id>")
88
+ agentmail_inbox()
168
89
  ```
169
90
 
170
- ## Step 9: Complete Swarm
171
-
172
- Use the swarm completion tool:
91
+ If incompatibilities spotted, broadcast:
173
92
 
174
93
  ```
175
- swarm_complete with project_key=$PWD, agent_name=<YOUR_NAME>, bead_id="<epic-id>", summary="<what was accomplished>", files_touched=[<all files>]
94
+ agentmail_send(to=["*"], subject="Coordinator Update", body="<guidance>", importance="high")
176
95
  ```
177
96
 
178
- This:
179
-
180
- - Runs [UBS (Ultimate Bug Scanner)](https://github.com/Dicklesworthstone/ultimate_bug_scanner) on touched files to detect bugs before completion
181
- - Releases file reservations
182
- - Closes the bead
183
- - Records outcome for learning
184
-
185
- > **Note:** UBS is optional but recommended. If not installed, swarm completion proceeds with a warning that manual review is advised. Install via:
186
- > ```bash
187
- > curl -fsSL "https://raw.githubusercontent.com/Dicklesworthstone/ultimate_bug_scanner/master/install.sh" | bash
188
- > ```
189
- > See the [UBS repo](https://github.com/Dicklesworthstone/ultimate_bug_scanner) for more options (Docker, Nix, etc.).
190
-
191
- Then sync beads:
97
+ ### 8. Complete
192
98
 
193
99
  ```
194
- beads_sync
100
+ swarm_complete(project_key="$PWD", agent_name="<your-name>", bead_id="<epic-id>", summary="<done>", files_touched=[...])
101
+ beads_sync()
195
102
  ```
196
103
 
197
- ## Step 10: Create PR
104
+ ### 9. Create PR (unless --to-main)
198
105
 
199
106
  ```bash
200
- gh pr create --title "feat: <epic title>" --body "$(cat <<'EOF'
201
- ## Summary
202
- <1-3 bullet points from swarm results>
203
-
204
- ## Beads Completed
205
- - <bead-id>: <summary>
206
- - <bead-id>: <summary>
207
-
208
- ## Files Changed
209
- <aggregate list>
210
-
211
- ## Testing
212
- - [ ] Type check passes
213
- - [ ] Tests pass (if applicable)
214
- EOF
215
- )"
216
- ```
217
-
218
- Report summary:
219
-
220
- ```markdown
221
- ## Swarm Complete: <task>
222
-
223
- ### PR: #<number>
224
-
225
- ### Agents Spawned: N
226
-
227
- ### Beads Closed: N
228
-
229
- ### Work Completed
230
-
231
- - [bead-id]: [summary]
232
-
233
- ### Files Changed
234
-
235
- - [aggregate list]
107
+ gh pr create --title "feat: <epic title>" --body "## Summary\n<bullets>\n\n## Beads\n<list>"
236
108
  ```
237
109
 
238
- ## Failure Handling
239
-
240
- If an agent fails:
241
-
242
- - Check its messages: `agentmail_inbox`
243
- - The bead remains in-progress
244
- - Manually investigate or re-spawn
245
-
246
- If file conflicts occur:
247
-
248
- - Agent Mail reservations should prevent this
249
- - If it happens, one agent needs to wait
250
-
251
- ## Direct-to-Main Mode (--to-main)
252
-
253
- Only use when explicitly requested. Skips branch/PR:
254
-
255
- - Trivial fixes across many files
256
- - Automated migrations with high confidence
257
- - User explicitly says "push to main"
258
-
259
- ## No-Sync Mode (--no-sync)
260
-
261
- Skip mid-task context sharing when tasks are truly independent:
262
-
263
- - Simple mechanical changes (find/replace, formatting, lint fixes)
264
- - Tasks with zero integration points
265
- - Completely separate feature areas with no shared types
266
-
267
- In this mode:
268
-
269
- - Agents skip the mid-task progress message
270
- - Coordinator skips Step 7 (monitoring)
271
- - Faster execution, less coordination overhead
272
-
273
- **Default is sync ON** - prefer sharing context. Use `--no-sync` deliberately.
274
-
275
110
  ## Strategy Reference
276
111
 
277
- | Strategy | Best For | Auto-Detected Keywords |
278
- | ----------------- | --------------------------- | ---------------------------------------------- |
279
- | **file-based** | Refactoring, migrations | refactor, migrate, rename, update all, convert |
280
- | **feature-based** | New features, functionality | add, implement, build, create, feature, new |
281
- | **risk-based** | Bug fixes, security | fix, bug, security, critical, urgent, hotfix |
112
+ | Strategy | Best For | Keywords |
113
+ | ------------- | ----------------------- | ------------------------------------- |
114
+ | file-based | Refactoring, migrations | refactor, migrate, rename, update all |
115
+ | feature-based | New features | add, implement, build, create, new |
116
+ | risk-based | Bug fixes, security | fix, bug, security, critical, urgent |
282
117
 
283
- Use `swarm_select_strategy` to see which strategy is recommended and why.
118
+ Begin decomposition now.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "opencode-swarm-plugin",
3
- "version": "0.12.4",
3
+ "version": "0.12.6",
4
4
  "description": "Multi-agent swarm coordination for OpenCode with learning capabilities, beads integration, and Agent Mail",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
package/src/agent-mail.ts CHANGED
@@ -107,8 +107,10 @@ function loadSessionState(sessionID: string): AgentMailState | null {
107
107
 
108
108
  /**
109
109
  * Save session state to disk
110
+ *
111
+ * @returns true if save succeeded, false if failed
110
112
  */
111
- function saveSessionState(sessionID: string, state: AgentMailState): void {
113
+ function saveSessionState(sessionID: string, state: AgentMailState): boolean {
112
114
  try {
113
115
  // Ensure directory exists
114
116
  if (!existsSync(SESSION_STATE_DIR)) {
@@ -116,9 +118,16 @@ function saveSessionState(sessionID: string, state: AgentMailState): void {
116
118
  }
117
119
  const path = getSessionStatePath(sessionID);
118
120
  writeFileSync(path, JSON.stringify(state, null, 2));
121
+ return true;
119
122
  } catch (error) {
120
123
  // Non-fatal - state just won't persist
121
- console.warn(`[agent-mail] Could not save session state: ${error}`);
124
+ console.error(
125
+ `[agent-mail] CRITICAL: Could not save session state: ${error}`,
126
+ );
127
+ console.error(
128
+ `[agent-mail] Session state will not persist across CLI invocations!`,
129
+ );
130
+ return false;
122
131
  }
123
132
  }
124
133
 
@@ -718,6 +727,9 @@ export async function mcpCall<T>(
718
727
  // Track consecutive failures
719
728
  consecutiveFailures++;
720
729
 
730
+ // Check if error is retryable FIRST
731
+ const retryable = isRetryableError(error);
732
+
721
733
  // Check if we should attempt server restart
722
734
  if (
723
735
  consecutiveFailures >= RECOVERY_CONFIG.failureThreshold &&
@@ -734,15 +746,18 @@ export async function mcpCall<T>(
734
746
  if (restarted) {
735
747
  // Reset availability cache since server restarted
736
748
  agentMailAvailable = null;
737
- // Don't count this attempt against retries - try again
738
- attempt--;
739
- continue;
749
+ // Only retry if the error was retryable in the first place
750
+ if (retryable) {
751
+ // Don't count this attempt against retries - try again
752
+ attempt--;
753
+ continue;
754
+ }
740
755
  }
741
756
  }
742
757
  }
743
758
 
744
- // Check if error is retryable
745
- if (!isRetryableError(error)) {
759
+ // If error is not retryable, throw immediately
760
+ if (!retryable) {
746
761
  console.warn(
747
762
  `[agent-mail] Non-retryable error for ${toolName}: ${lastError.message}`,
748
763
  );
@@ -1006,18 +1021,18 @@ export const agentmail_read_message = tool({
1006
1021
  message_id: args.message_id,
1007
1022
  });
1008
1023
 
1009
- // Fetch with body - we need to use fetch_inbox with specific message
1010
- // Since there's no get_message, we'll use search
1024
+ // Fetch with body - fetch more messages to find the requested one
1025
+ // Since there's no get_message endpoint, we need to fetch a reasonable batch
1011
1026
  const messages = await mcpCall<MessageHeader[]>("fetch_inbox", {
1012
1027
  project_key: state.projectKey,
1013
1028
  agent_name: state.agentName,
1014
- limit: 1,
1029
+ limit: 50, // Fetch more messages to increase chance of finding the target
1015
1030
  include_bodies: true, // Only for single message fetch
1016
1031
  });
1017
1032
 
1018
1033
  const message = messages.find((m) => m.id === args.message_id);
1019
1034
  if (!message) {
1020
- return `Message ${args.message_id} not found`;
1035
+ return `Message ${args.message_id} not found in recent 50 messages. Try using agentmail_search to locate it.`;
1021
1036
  }
1022
1037
 
1023
1038
  // Record successful request