opencode-swarm-plugin 0.37.0 → 0.38.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.hive/issues.jsonl +9 -5
- package/.hive/memories.jsonl +13 -1
- package/.turbo/turbo-build.log +4 -4
- package/.turbo/turbo-test.log +319 -319
- package/CHANGELOG.md +128 -0
- package/README.md +33 -0
- package/bin/swarm.ts +2 -208
- package/dist/hive.d.ts +59 -0
- package/dist/hive.d.ts.map +1 -1
- package/dist/index.d.ts +43 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +453 -118
- package/dist/plugin.js +452 -118
- package/dist/swarm-decompose.d.ts +30 -0
- package/dist/swarm-decompose.d.ts.map +1 -1
- package/dist/swarm.d.ts +15 -0
- package/dist/swarm.d.ts.map +1 -1
- package/evals/README.md +27 -10
- package/examples/plugin-wrapper-template.ts +60 -8
- package/package.json +4 -1
- package/src/compaction-hook.test.ts +97 -2
- package/src/compaction-hook.ts +32 -2
- package/src/swarm-decompose.test.ts +40 -47
- package/src/swarm-orchestrate.test.ts +270 -7
- package/src/swarm-orchestrate.ts +98 -11
- package/src/swarm-prompts.test.ts +121 -0
- package/src/swarm-prompts.ts +295 -2
- package/src/swarm-research.integration.test.ts +157 -0
package/dist/swarm.d.ts
CHANGED
|
@@ -488,9 +488,24 @@ export declare const swarmTools: {
|
|
|
488
488
|
description: string;
|
|
489
489
|
args: {
|
|
490
490
|
response: import("zod").ZodString;
|
|
491
|
+
project_path: import("zod").ZodOptional<import("zod").ZodString>;
|
|
492
|
+
task: import("zod").ZodOptional<import("zod").ZodString>;
|
|
493
|
+
context: import("zod").ZodOptional<import("zod").ZodString>;
|
|
494
|
+
strategy: import("zod").ZodOptional<import("zod").ZodEnum<{
|
|
495
|
+
"file-based": "file-based";
|
|
496
|
+
"feature-based": "feature-based";
|
|
497
|
+
"risk-based": "risk-based";
|
|
498
|
+
auto: "auto";
|
|
499
|
+
}>>;
|
|
500
|
+
epic_id: import("zod").ZodOptional<import("zod").ZodString>;
|
|
491
501
|
};
|
|
492
502
|
execute(args: {
|
|
493
503
|
response: string;
|
|
504
|
+
project_path?: string | undefined;
|
|
505
|
+
task?: string | undefined;
|
|
506
|
+
context?: string | undefined;
|
|
507
|
+
strategy?: "file-based" | "feature-based" | "risk-based" | "auto" | undefined;
|
|
508
|
+
epic_id?: string | undefined;
|
|
494
509
|
}, context: import("@opencode-ai/plugin").ToolContext): Promise<string>;
|
|
495
510
|
};
|
|
496
511
|
swarm_delegate_planning: {
|
package/dist/swarm.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"swarm.d.ts","sourceRoot":"","sources":["../src/swarm.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AAGH,cAAc,oBAAoB,CAAC;AACnC,cAAc,mBAAmB,CAAC;AAClC,cAAc,iBAAiB,CAAC;AAChC,cAAc,qBAAqB,CAAC;AACpC,cAAc,kBAAkB,CAAC;AASjC;;;GAGG;AACH,eAAO,MAAM,UAAU
|
|
1
|
+
{"version":3,"file":"swarm.d.ts","sourceRoot":"","sources":["../src/swarm.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AAGH,cAAc,oBAAoB,CAAC;AACnC,cAAc,mBAAmB,CAAC;AAClC,cAAc,iBAAiB,CAAC;AAChC,cAAc,qBAAqB,CAAC;AACpC,cAAc,kBAAkB,CAAC;AASjC;;;GAGG;AACH,eAAO,MAAM,UAAU;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAMtB,CAAC"}
|
package/evals/README.md
CHANGED
|
@@ -5,14 +5,12 @@ TypeScript-native evaluation framework for testing swarm task decomposition qual
|
|
|
5
5
|
## Quick Start
|
|
6
6
|
|
|
7
7
|
```bash
|
|
8
|
-
# Watch mode for development
|
|
9
|
-
pnpm eval:dev
|
|
10
|
-
|
|
11
8
|
# Run all evals once
|
|
12
|
-
|
|
9
|
+
bun run eval:run
|
|
13
10
|
|
|
14
|
-
#
|
|
15
|
-
|
|
11
|
+
# Run specific eval suite
|
|
12
|
+
bun run eval:decomposition
|
|
13
|
+
bun run eval:coordinator
|
|
16
14
|
```
|
|
17
15
|
|
|
18
16
|
## Structure
|
|
@@ -134,13 +132,32 @@ Scores coordinator discipline during swarm sessions.
|
|
|
134
132
|
bunx evalite run evals/coordinator-session.eval.ts
|
|
135
133
|
```
|
|
136
134
|
|
|
137
|
-
## Data
|
|
135
|
+
## Data Capture
|
|
136
|
+
|
|
137
|
+
### What Gets Captured
|
|
138
|
+
|
|
139
|
+
**Decomposition Eval Data:**
|
|
140
|
+
- Task input (user's original request)
|
|
141
|
+
- Generated CellTree JSON (epic + subtasks)
|
|
142
|
+
- Timestamp and context
|
|
143
|
+
- Stored in: `.opencode/eval-data.jsonl`
|
|
144
|
+
|
|
145
|
+
**Coordinator Session Data:**
|
|
146
|
+
- Real swarm sessions captured during `/swarm` runs
|
|
147
|
+
- Includes: decomposition, spawn events, reviews, violations
|
|
148
|
+
- Stored in: `~/.config/swarm-tools/sessions/*.jsonl`
|
|
149
|
+
|
|
150
|
+
**Subtask Outcome Data:**
|
|
151
|
+
- Duration, success/failure, error count, retry count
|
|
152
|
+
- Files touched, strategy used
|
|
153
|
+
- Used for learning and pattern maturity
|
|
154
|
+
- Stored in: swarm-mail database (libSQL)
|
|
138
155
|
|
|
139
|
-
###
|
|
156
|
+
### Data Loaders
|
|
140
157
|
|
|
141
|
-
|
|
158
|
+
**lib/data-loader.ts** provides utilities to load eval data:
|
|
142
159
|
|
|
143
|
-
- `loadEvalCases()` -
|
|
160
|
+
- `loadEvalCases()` - Load eval records from swarm-mail database
|
|
144
161
|
- `loadCapturedSessions()` - Real coordinator sessions from `~/.config/swarm-tools/sessions/`
|
|
145
162
|
- `hasRealEvalData()` - Check if enough real data exists
|
|
146
163
|
- `getEvalDataSummary()` - Stats about available eval data
|
|
@@ -305,6 +305,34 @@ const hive_sync = tool({
|
|
|
305
305
|
execute: (args, ctx) => execTool("hive_sync", args, ctx),
|
|
306
306
|
});
|
|
307
307
|
|
|
308
|
+
const hive_cells = tool({
|
|
309
|
+
description: `Query cells from the hive database with flexible filtering.
|
|
310
|
+
|
|
311
|
+
USE THIS TOOL TO:
|
|
312
|
+
- List all open cells: hive_cells()
|
|
313
|
+
- Find cells by status: hive_cells({ status: "in_progress" })
|
|
314
|
+
- Find cells by type: hive_cells({ type: "bug" })
|
|
315
|
+
- Get a specific cell by partial ID: hive_cells({ id: "mjkmd" })
|
|
316
|
+
- Get the next ready (unblocked) cell: hive_cells({ ready: true })
|
|
317
|
+
- Combine filters: hive_cells({ status: "open", type: "task" })
|
|
318
|
+
|
|
319
|
+
RETURNS: Array of cells with id, title, status, priority, type, parent_id, created_at, updated_at
|
|
320
|
+
|
|
321
|
+
PREFER THIS OVER hive_query when you need to:
|
|
322
|
+
- See what work is available
|
|
323
|
+
- Check status of multiple cells
|
|
324
|
+
- Find cells matching criteria
|
|
325
|
+
- Look up a cell by partial ID`,
|
|
326
|
+
args: {
|
|
327
|
+
id: tool.schema.string().optional().describe("Partial or full cell ID to look up"),
|
|
328
|
+
status: tool.schema.enum(["open", "in_progress", "blocked", "closed"]).optional().describe("Filter by status"),
|
|
329
|
+
type: tool.schema.enum(["task", "bug", "feature", "epic", "chore"]).optional().describe("Filter by type"),
|
|
330
|
+
ready: tool.schema.boolean().optional().describe("If true, return only the next unblocked cell"),
|
|
331
|
+
limit: tool.schema.number().optional().describe("Max cells to return (default 20)"),
|
|
332
|
+
},
|
|
333
|
+
execute: (args, ctx) => execTool("hive_cells", args, ctx),
|
|
334
|
+
});
|
|
335
|
+
|
|
308
336
|
const beads_link_thread = tool({
|
|
309
337
|
description: "Add metadata linking bead to Agent Mail thread",
|
|
310
338
|
args: {
|
|
@@ -1202,9 +1230,18 @@ ${JSON.stringify(snapshot, null, 2)}
|
|
|
1202
1230
|
|
|
1203
1231
|
Generate a prompt following this structure:
|
|
1204
1232
|
|
|
1233
|
+
┌─────────────────────────────────────────────────────────────┐
|
|
1234
|
+
│ │
|
|
1235
|
+
│ 🐝 YOU ARE THE COORDINATOR 🐝 │
|
|
1236
|
+
│ │
|
|
1237
|
+
│ NOT A WORKER. NOT AN IMPLEMENTER. │
|
|
1238
|
+
│ YOU ORCHESTRATE. │
|
|
1239
|
+
│ │
|
|
1240
|
+
└─────────────────────────────────────────────────────────────┘
|
|
1241
|
+
|
|
1205
1242
|
# 🐝 Swarm Continuation - [Epic Title or "Unknown"]
|
|
1206
1243
|
|
|
1207
|
-
|
|
1244
|
+
**NON-NEGOTIABLE: YOU ARE THE COORDINATOR.** You resumed after context compaction.
|
|
1208
1245
|
|
|
1209
1246
|
## Epic State
|
|
1210
1247
|
|
|
@@ -1231,15 +1268,29 @@ You are resuming coordination of an active swarm that was interrupted by context
|
|
|
1231
1268
|
|
|
1232
1269
|
[List 3-5 concrete actions with actual commands, using real IDs from the state]
|
|
1233
1270
|
|
|
1234
|
-
##
|
|
1271
|
+
## 🎯 COORDINATOR MANDATES (NON-NEGOTIABLE)
|
|
1272
|
+
|
|
1273
|
+
**YOU ARE THE COORDINATOR. NOT A WORKER.**
|
|
1274
|
+
|
|
1275
|
+
### ⛔ FORBIDDEN - NEVER do these:
|
|
1276
|
+
- ❌ NEVER use \`edit\`, \`write\`, or \`bash\` for implementation - SPAWN A WORKER
|
|
1277
|
+
- ❌ NEVER fetch directly with \`repo-crawl_*\`, \`repo-autopsy_*\`, \`webfetch\`, \`fetch_fetch\` - SPAWN A RESEARCHER
|
|
1278
|
+
- ❌ NEVER use \`context7_*\` or \`pdf-brain_*\` directly - SPAWN A RESEARCHER
|
|
1279
|
+
- ❌ NEVER reserve files - Workers reserve files
|
|
1280
|
+
|
|
1281
|
+
### ✅ ALWAYS do these:
|
|
1282
|
+
- ✅ ALWAYS check \`swarm_status\` and \`swarmmail_inbox\` first
|
|
1283
|
+
- ✅ ALWAYS use \`swarm_spawn_subtask\` for implementation work
|
|
1284
|
+
- ✅ ALWAYS use \`swarm_spawn_researcher\` for external data fetching
|
|
1285
|
+
- ✅ ALWAYS review worker output with \`swarm_review\` → \`swarm_review_feedback\`
|
|
1286
|
+
- ✅ ALWAYS monitor actively - Check messages every ~10 minutes
|
|
1287
|
+
- ✅ ALWAYS unblock aggressively - Resolve dependencies immediately
|
|
1288
|
+
|
|
1289
|
+
**If you need external data:** Use \`swarm_spawn_researcher\` with a clear research task. The researcher will fetch, summarize, and return findings.
|
|
1235
1290
|
|
|
1236
|
-
-
|
|
1237
|
-
- **Monitor actively** - Check messages every ~10 minutes
|
|
1238
|
-
- **Unblock aggressively** - Resolve dependencies immediately
|
|
1239
|
-
- **Review thoroughly** - 3-strike rule enforced
|
|
1240
|
-
- **Ship it** - When all subtasks done, close the epic
|
|
1291
|
+
**3-strike rule enforced:** Workers get 3 review attempts. After 3 rejections, escalate to human.
|
|
1241
1292
|
|
|
1242
|
-
Keep the prompt concise but actionable. Use actual data from the snapshot, not placeholders.`;
|
|
1293
|
+
Keep the prompt concise but actionable. Use actual data from the snapshot, not placeholders. Include the ASCII header and ALL coordinator mandates.`;
|
|
1243
1294
|
|
|
1244
1295
|
logCompaction("debug", "generate_compaction_prompt_calling_llm", {
|
|
1245
1296
|
session_id: snapshot.sessionID,
|
|
@@ -1896,6 +1947,7 @@ const SwarmPlugin: Plugin = async (
|
|
|
1896
1947
|
hive_close,
|
|
1897
1948
|
hive_start,
|
|
1898
1949
|
hive_ready,
|
|
1950
|
+
hive_cells,
|
|
1899
1951
|
hive_sync,
|
|
1900
1952
|
beads_link_thread,
|
|
1901
1953
|
// Swarm Mail (Embedded)
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "opencode-swarm-plugin",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.38.0",
|
|
4
4
|
"description": "Multi-agent swarm coordination for OpenCode with learning capabilities, beads integration, and Agent Mail",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -30,6 +30,9 @@
|
|
|
30
30
|
"test:all": "bun test --timeout 60000 src/",
|
|
31
31
|
"test:watch": "bun test --watch src/",
|
|
32
32
|
"typecheck": "tsc --noEmit",
|
|
33
|
+
"eval:run": "bunx evalite run evals/",
|
|
34
|
+
"eval:decomposition": "bunx evalite run evals/swarm-decomposition.eval.ts",
|
|
35
|
+
"eval:coordinator": "bunx evalite run evals/coordinator-session.eval.ts",
|
|
33
36
|
"postinstall": "node -e \"console.log('\\n\\x1b[33m Run \\x1b[36mswarm setup\\x1b[33m to configure OpenCode integration\\x1b[0m\\n')\""
|
|
34
37
|
},
|
|
35
38
|
"dependencies": {
|
|
@@ -64,7 +64,7 @@ describe("Compaction Hook", () => {
|
|
|
64
64
|
describe("SWARM_COMPACTION_CONTEXT", () => {
|
|
65
65
|
it("contains coordinator instructions", () => {
|
|
66
66
|
expect(SWARM_COMPACTION_CONTEXT).toContain("COORDINATOR");
|
|
67
|
-
expect(SWARM_COMPACTION_CONTEXT).toContain("
|
|
67
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("YOU ARE THE COORDINATOR");
|
|
68
68
|
});
|
|
69
69
|
|
|
70
70
|
it("contains prohibition-first anti-patterns", () => {
|
|
@@ -136,7 +136,7 @@ describe("Compaction Hook", () => {
|
|
|
136
136
|
it("HIGH confidence triggers full context", async () => {
|
|
137
137
|
// This would need proper mocking of active reservations
|
|
138
138
|
// For now, just verify the context strings exist
|
|
139
|
-
expect(SWARM_COMPACTION_CONTEXT).toContain("
|
|
139
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("YOU ARE THE COORDINATOR");
|
|
140
140
|
});
|
|
141
141
|
|
|
142
142
|
it("LOW confidence triggers fallback prompt", async () => {
|
|
@@ -145,6 +145,101 @@ describe("Compaction Hook", () => {
|
|
|
145
145
|
});
|
|
146
146
|
});
|
|
147
147
|
|
|
148
|
+
describe("Forbidden tools anti-pattern (TDD red phase)", () => {
|
|
149
|
+
it("SWARM_COMPACTION_CONTEXT includes 'NEVER fetch directly' rule", () => {
|
|
150
|
+
// Should warn against direct fetching
|
|
151
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("NEVER");
|
|
152
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("repo-crawl");
|
|
153
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("webfetch");
|
|
154
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("fetch_fetch");
|
|
155
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("context7");
|
|
156
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("pdf-brain");
|
|
157
|
+
});
|
|
158
|
+
|
|
159
|
+
it("SWARM_COMPACTION_CONTEXT instructs to spawn researcher instead", () => {
|
|
160
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("SPAWN A RESEARCHER");
|
|
161
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("swarm_spawn_researcher");
|
|
162
|
+
});
|
|
163
|
+
|
|
164
|
+
it("lists all forbidden repo-crawl tools", () => {
|
|
165
|
+
const forbiddenTools = [
|
|
166
|
+
"repo-crawl_file",
|
|
167
|
+
"repo-crawl_readme",
|
|
168
|
+
"repo-crawl_search",
|
|
169
|
+
"repo-crawl_structure",
|
|
170
|
+
"repo-crawl_tree"
|
|
171
|
+
];
|
|
172
|
+
|
|
173
|
+
for (const tool of forbiddenTools) {
|
|
174
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain(tool);
|
|
175
|
+
}
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
it("lists all forbidden repo-autopsy tools", () => {
|
|
179
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("repo-autopsy");
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
it("lists all forbidden context7 tools", () => {
|
|
183
|
+
const forbiddenTools = [
|
|
184
|
+
"context7_resolve-library-id",
|
|
185
|
+
"context7_get-library-docs"
|
|
186
|
+
];
|
|
187
|
+
|
|
188
|
+
for (const tool of forbiddenTools) {
|
|
189
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain(tool);
|
|
190
|
+
}
|
|
191
|
+
});
|
|
192
|
+
|
|
193
|
+
it("lists all forbidden pdf-brain tools", () => {
|
|
194
|
+
const forbiddenTools = [
|
|
195
|
+
"pdf-brain_search",
|
|
196
|
+
"pdf-brain_read"
|
|
197
|
+
];
|
|
198
|
+
|
|
199
|
+
for (const tool of forbiddenTools) {
|
|
200
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain(tool);
|
|
201
|
+
}
|
|
202
|
+
});
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
describe("Coordinator identity reinforcement (TDD red phase)", () => {
|
|
206
|
+
it("includes ASCII header for coordinator identity", () => {
|
|
207
|
+
// Should have prominent visual indicator
|
|
208
|
+
expect(SWARM_COMPACTION_CONTEXT).toMatch(/[╔═╗║╚╝]|[┌─┐│└┘]|[█▀▄]/);
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
it("repeats 'YOU ARE THE COORDINATOR' multiple times", () => {
|
|
212
|
+
const matches = SWARM_COMPACTION_CONTEXT.match(/YOU ARE THE COORDINATOR/gi);
|
|
213
|
+
expect(matches).toBeDefined();
|
|
214
|
+
expect(matches!.length).toBeGreaterThanOrEqual(2);
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
it("uses strong imperative language NEVER/ALWAYS/NON-NEGOTIABLE", () => {
|
|
218
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("NEVER");
|
|
219
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("ALWAYS");
|
|
220
|
+
expect(SWARM_COMPACTION_CONTEXT).toContain("NON-NEGOTIABLE");
|
|
221
|
+
});
|
|
222
|
+
|
|
223
|
+
it("makes role unmistakable with multiple strong statements", () => {
|
|
224
|
+
// Check for strong coordinator identity statements
|
|
225
|
+
const identityPatterns = [
|
|
226
|
+
/YOU ARE THE COORDINATOR/i,
|
|
227
|
+
/NOT A WORKER/i,
|
|
228
|
+
/ORCHESTRATE/i,
|
|
229
|
+
/DO NOT IMPLEMENT/i
|
|
230
|
+
];
|
|
231
|
+
|
|
232
|
+
let matchCount = 0;
|
|
233
|
+
for (const pattern of identityPatterns) {
|
|
234
|
+
if (pattern.test(SWARM_COMPACTION_CONTEXT)) {
|
|
235
|
+
matchCount++;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
expect(matchCount).toBeGreaterThanOrEqual(3);
|
|
240
|
+
});
|
|
241
|
+
});
|
|
242
|
+
|
|
148
243
|
describe("Specific swarm state injection (TDD red phase)", () => {
|
|
149
244
|
it("includes specific epic ID when in_progress epic exists", async () => {
|
|
150
245
|
// Mock hive with an in_progress epic
|
package/src/compaction-hook.ts
CHANGED
|
@@ -68,9 +68,21 @@ function getLog() {
|
|
|
68
68
|
* This is NOT about preserving state for a human - it's about the swarm continuing
|
|
69
69
|
* autonomously after context compression.
|
|
70
70
|
*/
|
|
71
|
-
export const SWARM_COMPACTION_CONTEXT =
|
|
71
|
+
export const SWARM_COMPACTION_CONTEXT = `
|
|
72
|
+
┌─────────────────────────────────────────────────────────────┐
|
|
73
|
+
│ │
|
|
74
|
+
│ 🐝 YOU ARE THE COORDINATOR 🐝 │
|
|
75
|
+
│ │
|
|
76
|
+
│ NOT A WORKER. NOT AN IMPLEMENTER. │
|
|
77
|
+
│ YOU ORCHESTRATE. │
|
|
78
|
+
│ │
|
|
79
|
+
└─────────────────────────────────────────────────────────────┘
|
|
72
80
|
|
|
73
|
-
|
|
81
|
+
## 🎯 NON-NEGOTIABLE: YOU ARE THE COORDINATOR
|
|
82
|
+
|
|
83
|
+
Context was compacted but the swarm is still running. **YOU ARE THE COORDINATOR.**
|
|
84
|
+
|
|
85
|
+
Your role is ORCHESTRATION, not implementation. When you catch yourself about to do work directly, STOP.
|
|
74
86
|
|
|
75
87
|
### ⛔ NEVER DO THESE (Coordinator Anti-Patterns)
|
|
76
88
|
|
|
@@ -81,9 +93,27 @@ Context was compacted but the swarm is still running. You are the **COORDINATOR*
|
|
|
81
93
|
- ❌ **NEVER** implement features yourself - SPAWN A WORKER
|
|
82
94
|
- ❌ **NEVER** "just do it myself to save time" - NO. SPAWN A WORKER.
|
|
83
95
|
- ❌ **NEVER** reserve files with \`swarmmail_reserve\` - Workers reserve files
|
|
96
|
+
- ❌ **NEVER** fetch files/docs directly - SPAWN A RESEARCHER
|
|
84
97
|
|
|
85
98
|
**If you catch yourself about to edit a file, STOP. Use \`swarm_spawn_subtask\` instead.**
|
|
86
99
|
|
|
100
|
+
### 🚫 FORBIDDEN TOOLS (Coordinators MUST delegate these)
|
|
101
|
+
|
|
102
|
+
**NEVER use these tools directly. ALWAYS spawn a researcher worker via \`swarm_spawn_researcher\`:**
|
|
103
|
+
|
|
104
|
+
**Repository fetching:**
|
|
105
|
+
- \`repo-crawl_file\`, \`repo-crawl_readme\`, \`repo-crawl_search\`, \`repo-crawl_structure\`, \`repo-crawl_tree\`
|
|
106
|
+
- \`repo-autopsy_*\` (all repo-autopsy tools)
|
|
107
|
+
|
|
108
|
+
**Web/documentation fetching:**
|
|
109
|
+
- \`webfetch\`, \`fetch_fetch\`
|
|
110
|
+
- \`context7_resolve-library-id\`, \`context7_get-library-docs\`
|
|
111
|
+
|
|
112
|
+
**Knowledge base:**
|
|
113
|
+
- \`pdf-brain_search\`, \`pdf-brain_read\`
|
|
114
|
+
|
|
115
|
+
**If you need external data:** Use \`swarm_spawn_researcher\` with a clear research task. The researcher will fetch, summarize, and return findings.
|
|
116
|
+
|
|
87
117
|
### ✅ ALWAYS DO THESE (Coordinator Checklist)
|
|
88
118
|
|
|
89
119
|
On resume, execute this checklist IN ORDER:
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
* TDD: Testing eval capture integration - verifies captureDecomposition() is called
|
|
7
7
|
* after successful validation with correct parameters.
|
|
8
8
|
*/
|
|
9
|
-
import { afterEach, beforeEach, describe, expect, test,
|
|
9
|
+
import { afterEach, beforeEach, describe, expect, test, spyOn } from "bun:test";
|
|
10
10
|
import * as fs from "node:fs";
|
|
11
11
|
import { swarm_validate_decomposition } from "./swarm-decompose";
|
|
12
12
|
import * as evalCapture from "./eval-capture.js";
|
|
@@ -41,15 +41,8 @@ afterEach(() => {
|
|
|
41
41
|
|
|
42
42
|
describe("captureDecomposition integration", () => {
|
|
43
43
|
test("calls captureDecomposition after successful validation with all params", async () => {
|
|
44
|
-
//
|
|
45
|
-
const captureDecompositionSpy =
|
|
46
|
-
id: "test-epic-123",
|
|
47
|
-
timestamp: new Date().toISOString(),
|
|
48
|
-
task: "Add user authentication",
|
|
49
|
-
}));
|
|
50
|
-
const original = evalCapture.captureDecomposition;
|
|
51
|
-
// @ts-expect-error - mocking for test
|
|
52
|
-
evalCapture.captureDecomposition = captureDecompositionSpy;
|
|
44
|
+
// Spy on captureDecomposition
|
|
45
|
+
const captureDecompositionSpy = spyOn(evalCapture, "captureDecomposition");
|
|
53
46
|
|
|
54
47
|
const validCellTree = JSON.stringify({
|
|
55
48
|
epic: {
|
|
@@ -91,27 +84,37 @@ describe("captureDecomposition integration", () => {
|
|
|
91
84
|
|
|
92
85
|
// Verify captureDecomposition was called with correct params
|
|
93
86
|
expect(captureDecompositionSpy).toHaveBeenCalledTimes(1);
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
87
|
+
expect(captureDecompositionSpy).toHaveBeenCalledWith({
|
|
88
|
+
epicId: "test-epic-123",
|
|
89
|
+
projectPath: testProjectPath,
|
|
90
|
+
task: "Add user authentication",
|
|
91
|
+
context: "Using NextAuth.js",
|
|
92
|
+
strategy: "feature-based",
|
|
93
|
+
epicTitle: "Add OAuth",
|
|
94
|
+
epicDescription: "Implement OAuth authentication",
|
|
95
|
+
subtasks: [
|
|
96
|
+
{
|
|
97
|
+
title: "Add OAuth provider config",
|
|
98
|
+
description: "Set up Google OAuth",
|
|
99
|
+
files: ["src/auth/google.ts", "src/auth/config.ts"],
|
|
100
|
+
dependencies: [],
|
|
101
|
+
estimated_complexity: 2,
|
|
102
|
+
},
|
|
103
|
+
{
|
|
104
|
+
title: "Add login UI",
|
|
105
|
+
description: "Create login button component",
|
|
106
|
+
files: ["src/components/LoginButton.tsx"],
|
|
107
|
+
dependencies: [0],
|
|
108
|
+
estimated_complexity: 1,
|
|
109
|
+
},
|
|
110
|
+
],
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
captureDecompositionSpy.mockRestore();
|
|
108
114
|
});
|
|
109
115
|
|
|
110
116
|
test("does not call captureDecomposition when validation fails", async () => {
|
|
111
|
-
const captureDecompositionSpy =
|
|
112
|
-
const original = evalCapture.captureDecomposition;
|
|
113
|
-
// @ts-expect-error - mocking for test
|
|
114
|
-
evalCapture.captureDecomposition = captureDecompositionSpy;
|
|
117
|
+
const captureDecompositionSpy = spyOn(evalCapture, "captureDecomposition");
|
|
115
118
|
|
|
116
119
|
// Invalid CellTree - missing required fields
|
|
117
120
|
const invalidCellTree = JSON.stringify({
|
|
@@ -136,20 +139,11 @@ describe("captureDecomposition integration", () => {
|
|
|
136
139
|
// Verify captureDecomposition was NOT called
|
|
137
140
|
expect(captureDecompositionSpy).not.toHaveBeenCalled();
|
|
138
141
|
|
|
139
|
-
|
|
140
|
-
// @ts-expect-error - restoring mock
|
|
141
|
-
evalCapture.captureDecomposition = original;
|
|
142
|
+
captureDecompositionSpy.mockRestore();
|
|
142
143
|
});
|
|
143
144
|
|
|
144
145
|
test("handles optional context and description fields", async () => {
|
|
145
|
-
const captureDecompositionSpy =
|
|
146
|
-
id: "test-epic-789",
|
|
147
|
-
timestamp: new Date().toISOString(),
|
|
148
|
-
task: "Fix the auth bug",
|
|
149
|
-
}));
|
|
150
|
-
const original = evalCapture.captureDecomposition;
|
|
151
|
-
// @ts-expect-error - mocking for test
|
|
152
|
-
evalCapture.captureDecomposition = captureDecompositionSpy;
|
|
146
|
+
const captureDecompositionSpy = spyOn(evalCapture, "captureDecomposition");
|
|
153
147
|
|
|
154
148
|
const validCellTree = JSON.stringify({
|
|
155
149
|
epic: {
|
|
@@ -183,13 +177,12 @@ describe("captureDecomposition integration", () => {
|
|
|
183
177
|
|
|
184
178
|
// Verify captureDecomposition was called without optional fields
|
|
185
179
|
expect(captureDecompositionSpy).toHaveBeenCalledTimes(1);
|
|
186
|
-
const
|
|
187
|
-
expect(
|
|
188
|
-
expect(
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
evalCapture.captureDecomposition = original;
|
|
180
|
+
const call = captureDecompositionSpy.mock.calls[0];
|
|
181
|
+
expect(call[0].epicId).toBe("test-epic-789");
|
|
182
|
+
expect(call[0].context).toBeUndefined();
|
|
183
|
+
// Schema default makes description empty string instead of undefined
|
|
184
|
+
expect(call[0].epicDescription).toBe("");
|
|
185
|
+
|
|
186
|
+
captureDecompositionSpy.mockRestore();
|
|
194
187
|
});
|
|
195
188
|
});
|