opencode-swarm-plugin 0.12.10 → 0.12.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.beads/issues.jsonl +7 -7
- package/dist/index.js +459 -33
- package/dist/plugin.js +457 -33
- package/package.json +1 -1
- package/src/index.ts +21 -0
- package/src/learning.ts +19 -0
- package/src/repo-crawl.ts +610 -0
- package/src/schemas/evaluation.ts +21 -0
- package/src/swarm.ts +173 -29
package/src/swarm.ts
CHANGED
|
@@ -25,7 +25,7 @@ import {
|
|
|
25
25
|
type SpawnedAgent,
|
|
26
26
|
type Bead,
|
|
27
27
|
} from "./schemas";
|
|
28
|
-
import { mcpCall } from "./agent-mail";
|
|
28
|
+
import { mcpCall, requireState } from "./agent-mail";
|
|
29
29
|
import {
|
|
30
30
|
OutcomeSignalsSchema,
|
|
31
31
|
DecompositionStrategySchema,
|
|
@@ -678,51 +678,46 @@ Begin work on your subtask now.`;
|
|
|
678
678
|
*/
|
|
679
679
|
export const SUBTASK_PROMPT_V2 = `You are a swarm agent working on: **{subtask_title}**
|
|
680
680
|
|
|
681
|
-
##
|
|
682
|
-
|
|
683
|
-
|
|
681
|
+
## [IDENTITY]
|
|
682
|
+
Agent: (assigned at spawn)
|
|
683
|
+
Bead: {bead_id}
|
|
684
|
+
Epic: {epic_id}
|
|
684
685
|
|
|
685
|
-
##
|
|
686
|
+
## [TASK]
|
|
686
687
|
{subtask_description}
|
|
687
688
|
|
|
688
|
-
##
|
|
689
|
+
## [FILES]
|
|
690
|
+
Reserved (exclusive):
|
|
689
691
|
{file_list}
|
|
690
692
|
|
|
691
693
|
Only modify these files. Need others? Message the coordinator.
|
|
692
694
|
|
|
693
|
-
##
|
|
695
|
+
## [CONTEXT]
|
|
694
696
|
{shared_context}
|
|
695
697
|
|
|
696
698
|
{compressed_context}
|
|
697
699
|
|
|
698
700
|
{error_context}
|
|
699
701
|
|
|
700
|
-
##
|
|
702
|
+
## [TOOLS]
|
|
703
|
+
### Beads
|
|
704
|
+
- beads_update (status: blocked)
|
|
705
|
+
- beads_create (new bugs)
|
|
706
|
+
- beads_close (via swarm_complete)
|
|
701
707
|
|
|
702
|
-
### Agent Mail
|
|
703
|
-
|
|
704
|
-
// Report progress, ask questions, announce blockers
|
|
705
|
-
agentmail_send({
|
|
706
|
-
to: ["coordinator"],
|
|
707
|
-
subject: "Progress update",
|
|
708
|
-
body: "What you did or need",
|
|
709
|
-
thread_id: "{epic_id}"
|
|
710
|
-
})
|
|
711
|
-
\`\`\`
|
|
708
|
+
### Agent Mail
|
|
709
|
+
- agentmail_send (thread_id: {epic_id})
|
|
712
710
|
|
|
713
|
-
###
|
|
714
|
-
-
|
|
715
|
-
- **Found bug?** \`beads_create({ title: "Bug description", type: "bug" })\`
|
|
716
|
-
- **Done?** \`swarm_complete({ bead_id: "{bead_id}", summary: "What you did", files_touched: [...] })\`
|
|
711
|
+
### Completion
|
|
712
|
+
- swarm_complete (REQUIRED when done)
|
|
717
713
|
|
|
718
|
-
##
|
|
714
|
+
## [OUTPUT]
|
|
715
|
+
1. Read files first
|
|
716
|
+
2. Implement changes
|
|
717
|
+
3. Verify (typecheck)
|
|
718
|
+
4. Complete with swarm_complete
|
|
719
719
|
|
|
720
|
-
|
|
721
|
-
2. **Plan** your approach (message coordinator if complex)
|
|
722
|
-
3. **Implement** the changes
|
|
723
|
-
4. **Verify** (typecheck, tests)
|
|
724
|
-
5. **Report** progress via Agent Mail
|
|
725
|
-
6. **Complete** with swarm_complete when done
|
|
720
|
+
Return: Summary of changes made
|
|
726
721
|
|
|
727
722
|
**Never work silently.** Communicate progress and blockers immediately.
|
|
728
723
|
|
|
@@ -1755,6 +1750,92 @@ async function runUbsScan(files: string[]): Promise<UbsScanResult | null> {
|
|
|
1755
1750
|
}
|
|
1756
1751
|
}
|
|
1757
1752
|
|
|
1753
|
+
/**
|
|
1754
|
+
* Broadcast context updates to all agents in the epic
|
|
1755
|
+
*
|
|
1756
|
+
* Enables mid-task coordination by sharing discoveries, warnings, or blockers
|
|
1757
|
+
* with all agents working on the same epic. Agents can broadcast without
|
|
1758
|
+
* waiting for task completion.
|
|
1759
|
+
*
|
|
1760
|
+
* Based on "Patterns for Building AI Agents" p.31: "Ensure subagents can share context along the way"
|
|
1761
|
+
*/
|
|
1762
|
+
export const swarm_broadcast = tool({
|
|
1763
|
+
description:
|
|
1764
|
+
"Broadcast context update to all agents working on the same epic",
|
|
1765
|
+
args: {
|
|
1766
|
+
epic_id: tool.schema.string().describe("Epic ID (e.g., bd-abc123)"),
|
|
1767
|
+
message: tool.schema
|
|
1768
|
+
.string()
|
|
1769
|
+
.describe("Context update to share (what changed, what was learned)"),
|
|
1770
|
+
importance: tool.schema
|
|
1771
|
+
.enum(["info", "warning", "blocker"])
|
|
1772
|
+
.default("info")
|
|
1773
|
+
.describe("Priority level (default: info)"),
|
|
1774
|
+
files_affected: tool.schema
|
|
1775
|
+
.array(tool.schema.string())
|
|
1776
|
+
.optional()
|
|
1777
|
+
.describe("Files this context relates to"),
|
|
1778
|
+
},
|
|
1779
|
+
async execute(args, ctx) {
|
|
1780
|
+
// Get agent state - requires prior initialization
|
|
1781
|
+
const state = requireState(ctx.sessionID);
|
|
1782
|
+
|
|
1783
|
+
// Extract bead_id from context if available (for traceability)
|
|
1784
|
+
// In the swarm flow, ctx might have the current bead being worked on
|
|
1785
|
+
const beadId = (ctx as { beadId?: string }).beadId || "unknown";
|
|
1786
|
+
|
|
1787
|
+
// Format the broadcast message
|
|
1788
|
+
const body = [
|
|
1789
|
+
`## Context Update`,
|
|
1790
|
+
"",
|
|
1791
|
+
`**From**: ${state.agentName} (${beadId})`,
|
|
1792
|
+
`**Priority**: ${args.importance.toUpperCase()}`,
|
|
1793
|
+
"",
|
|
1794
|
+
args.message,
|
|
1795
|
+
"",
|
|
1796
|
+
args.files_affected && args.files_affected.length > 0
|
|
1797
|
+
? `**Files affected**:\n${args.files_affected.map((f) => `- \`${f}\``).join("\n")}`
|
|
1798
|
+
: "",
|
|
1799
|
+
]
|
|
1800
|
+
.filter(Boolean)
|
|
1801
|
+
.join("\n");
|
|
1802
|
+
|
|
1803
|
+
// Map importance to Agent Mail importance
|
|
1804
|
+
const mailImportance =
|
|
1805
|
+
args.importance === "blocker"
|
|
1806
|
+
? "urgent"
|
|
1807
|
+
: args.importance === "warning"
|
|
1808
|
+
? "high"
|
|
1809
|
+
: "normal";
|
|
1810
|
+
|
|
1811
|
+
// Send as broadcast to thread (empty 'to' = all agents in thread)
|
|
1812
|
+
await mcpCall("send_message", {
|
|
1813
|
+
project_key: state.projectKey,
|
|
1814
|
+
sender_name: state.agentName,
|
|
1815
|
+
to: [], // Broadcast to thread
|
|
1816
|
+
subject: `[${args.importance.toUpperCase()}] Context update from ${state.agentName}`,
|
|
1817
|
+
body_md: body,
|
|
1818
|
+
thread_id: args.epic_id,
|
|
1819
|
+
importance: mailImportance,
|
|
1820
|
+
ack_required: args.importance === "blocker", // Require ack for blockers
|
|
1821
|
+
});
|
|
1822
|
+
|
|
1823
|
+
return JSON.stringify(
|
|
1824
|
+
{
|
|
1825
|
+
broadcast: true,
|
|
1826
|
+
epic_id: args.epic_id,
|
|
1827
|
+
from: state.agentName,
|
|
1828
|
+
bead_id: beadId,
|
|
1829
|
+
importance: args.importance,
|
|
1830
|
+
recipients: "all agents in epic",
|
|
1831
|
+
ack_required: args.importance === "blocker",
|
|
1832
|
+
},
|
|
1833
|
+
null,
|
|
1834
|
+
2,
|
|
1835
|
+
);
|
|
1836
|
+
},
|
|
1837
|
+
});
|
|
1838
|
+
|
|
1758
1839
|
/**
|
|
1759
1840
|
* Mark a subtask as complete
|
|
1760
1841
|
*
|
|
@@ -1919,6 +2000,40 @@ export const swarm_complete = tool({
|
|
|
1919
2000
|
},
|
|
1920
2001
|
});
|
|
1921
2002
|
|
|
2003
|
+
/**
|
|
2004
|
+
* Classify failure based on error message heuristics
|
|
2005
|
+
*
|
|
2006
|
+
* Simple pattern matching to categorize why a task failed.
|
|
2007
|
+
* Used when failure_mode is not explicitly provided.
|
|
2008
|
+
*
|
|
2009
|
+
* @param error - Error object or message
|
|
2010
|
+
* @returns FailureMode classification
|
|
2011
|
+
*/
|
|
2012
|
+
function classifyFailure(error: Error | string): string {
|
|
2013
|
+
const msg = (typeof error === "string" ? error : error.message).toLowerCase();
|
|
2014
|
+
|
|
2015
|
+
if (msg.includes("timeout")) return "timeout";
|
|
2016
|
+
if (msg.includes("conflict") || msg.includes("reservation"))
|
|
2017
|
+
return "conflict";
|
|
2018
|
+
if (msg.includes("validation") || msg.includes("schema")) return "validation";
|
|
2019
|
+
if (msg.includes("context") || msg.includes("token"))
|
|
2020
|
+
return "context_overflow";
|
|
2021
|
+
if (msg.includes("blocked") || msg.includes("dependency"))
|
|
2022
|
+
return "dependency_blocked";
|
|
2023
|
+
if (msg.includes("cancel")) return "user_cancelled";
|
|
2024
|
+
|
|
2025
|
+
// Check for tool failure patterns
|
|
2026
|
+
if (
|
|
2027
|
+
msg.includes("tool") ||
|
|
2028
|
+
msg.includes("command") ||
|
|
2029
|
+
msg.includes("failed to execute")
|
|
2030
|
+
) {
|
|
2031
|
+
return "tool_failure";
|
|
2032
|
+
}
|
|
2033
|
+
|
|
2034
|
+
return "unknown";
|
|
2035
|
+
}
|
|
2036
|
+
|
|
1922
2037
|
/**
|
|
1923
2038
|
* Record outcome signals from a completed subtask
|
|
1924
2039
|
*
|
|
@@ -1968,6 +2083,25 @@ export const swarm_record_outcome = tool({
|
|
|
1968
2083
|
.enum(["file-based", "feature-based", "risk-based", "research-based"])
|
|
1969
2084
|
.optional()
|
|
1970
2085
|
.describe("Decomposition strategy used for this task"),
|
|
2086
|
+
failure_mode: tool.schema
|
|
2087
|
+
.enum([
|
|
2088
|
+
"timeout",
|
|
2089
|
+
"conflict",
|
|
2090
|
+
"validation",
|
|
2091
|
+
"tool_failure",
|
|
2092
|
+
"context_overflow",
|
|
2093
|
+
"dependency_blocked",
|
|
2094
|
+
"user_cancelled",
|
|
2095
|
+
"unknown",
|
|
2096
|
+
])
|
|
2097
|
+
.optional()
|
|
2098
|
+
.describe(
|
|
2099
|
+
"Failure classification (only when success=false). Auto-classified if not provided.",
|
|
2100
|
+
),
|
|
2101
|
+
failure_details: tool.schema
|
|
2102
|
+
.string()
|
|
2103
|
+
.optional()
|
|
2104
|
+
.describe("Detailed failure context (error message, stack trace, etc.)"),
|
|
1971
2105
|
},
|
|
1972
2106
|
async execute(args) {
|
|
1973
2107
|
// Build outcome signals
|
|
@@ -1980,8 +2114,15 @@ export const swarm_record_outcome = tool({
|
|
|
1980
2114
|
files_touched: args.files_touched ?? [],
|
|
1981
2115
|
timestamp: new Date().toISOString(),
|
|
1982
2116
|
strategy: args.strategy as LearningDecompositionStrategy | undefined,
|
|
2117
|
+
failure_mode: args.failure_mode,
|
|
2118
|
+
failure_details: args.failure_details,
|
|
1983
2119
|
};
|
|
1984
2120
|
|
|
2121
|
+
// If task failed but no failure_mode provided, try to classify from failure_details
|
|
2122
|
+
if (!args.success && !args.failure_mode && args.failure_details) {
|
|
2123
|
+
signals.failure_mode = classifyFailure(args.failure_details) as any;
|
|
2124
|
+
}
|
|
2125
|
+
|
|
1985
2126
|
// Validate signals
|
|
1986
2127
|
const validated = OutcomeSignalsSchema.parse(signals);
|
|
1987
2128
|
|
|
@@ -2039,6 +2180,8 @@ export const swarm_record_outcome = tool({
|
|
|
2039
2180
|
retry_count: args.retry_count ?? 0,
|
|
2040
2181
|
success: args.success,
|
|
2041
2182
|
strategy: args.strategy,
|
|
2183
|
+
failure_mode: validated.failure_mode,
|
|
2184
|
+
failure_details: validated.failure_details,
|
|
2042
2185
|
accumulated_errors: errorStats.total,
|
|
2043
2186
|
unresolved_errors: errorStats.unresolved,
|
|
2044
2187
|
},
|
|
@@ -2559,6 +2702,7 @@ export const swarmTools = {
|
|
|
2559
2702
|
swarm_validate_decomposition: swarm_validate_decomposition,
|
|
2560
2703
|
swarm_status: swarm_status,
|
|
2561
2704
|
swarm_progress: swarm_progress,
|
|
2705
|
+
swarm_broadcast: swarm_broadcast,
|
|
2562
2706
|
swarm_complete: swarm_complete,
|
|
2563
2707
|
swarm_record_outcome: swarm_record_outcome,
|
|
2564
2708
|
swarm_subtask_prompt: swarm_subtask_prompt,
|