opencode-swarm-plugin 0.30.6 → 0.31.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.hive/memories.jsonl +10 -0
- package/.turbo/turbo-build.log +3 -3
- package/.turbo/turbo-test.log +339 -339
- package/CHANGELOG.md +103 -0
- package/dist/hive.d.ts.map +1 -1
- package/dist/index.d.ts +10 -6
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +241 -49
- package/dist/memory-tools.d.ts +4 -0
- package/dist/memory-tools.d.ts.map +1 -1
- package/dist/memory.d.ts +2 -0
- package/dist/memory.d.ts.map +1 -1
- package/dist/model-selection.d.ts +37 -0
- package/dist/model-selection.d.ts.map +1 -0
- package/dist/plugin.js +241 -49
- package/dist/schemas/task.d.ts +2 -0
- package/dist/schemas/task.d.ts.map +1 -1
- package/dist/swarm-decompose.d.ts +8 -8
- package/dist/swarm-decompose.d.ts.map +1 -1
- package/dist/swarm-orchestrate.d.ts.map +1 -1
- package/dist/swarm-prompts.d.ts +11 -7
- package/dist/swarm-prompts.d.ts.map +1 -1
- package/dist/swarm.d.ts +8 -6
- package/dist/swarm.d.ts.map +1 -1
- package/opencode-swarm-plugin-0.30.7.tgz +0 -0
- package/package.json +2 -2
- package/src/hive.integration.test.ts +332 -3
- package/src/hive.ts +171 -13
- package/src/memory-tools.ts +5 -1
- package/src/memory.ts +9 -0
- package/src/swarm-decompose.ts +7 -11
- package/src/swarm-orchestrate.ts +27 -1
- package/src/swarm-prompts.test.ts +12 -9
- package/src/swarm-prompts.ts +43 -19
- package/src/swarm.integration.test.ts +74 -4
package/dist/swarm-prompts.d.ts
CHANGED
|
@@ -17,11 +17,11 @@
|
|
|
17
17
|
* Used by swarm_decompose to instruct the agent on how to break down work.
|
|
18
18
|
* The agent responds with a CellTree that gets validated.
|
|
19
19
|
*/
|
|
20
|
-
export declare const DECOMPOSITION_PROMPT = "You are decomposing a task into parallelizable subtasks for a swarm of agents.\n\n## Task\n{task}\n\n{context_section}\n\n## MANDATORY: Hive Issue Tracking\n\n**Every subtask MUST become a cell.** This is non-negotiable.\n\nAfter decomposition, the coordinator will:\n1. Create an epic cell for the overall task\n2. Create child cells for each subtask\n3. Track progress through cell status updates\n4. Close cells with summaries when complete\n\nAgents MUST update their cell status as they work. No silent progress.\n\n## Requirements\n\n1. **Break into
|
|
20
|
+
export declare const DECOMPOSITION_PROMPT = "You are decomposing a task into parallelizable subtasks for a swarm of agents.\n\n## Task\n{task}\n\n{context_section}\n\n## MANDATORY: Hive Issue Tracking\n\n**Every subtask MUST become a cell.** This is non-negotiable.\n\nAfter decomposition, the coordinator will:\n1. Create an epic cell for the overall task\n2. Create child cells for each subtask\n3. Track progress through cell status updates\n4. Close cells with summaries when complete\n\nAgents MUST update their cell status as they work. No silent progress.\n\n## Requirements\n\n1. **Break into independent subtasks** that can run in parallel (as many as needed)\n2. **Assign files** - each subtask must specify which files it will modify\n3. **No file overlap** - files cannot appear in multiple subtasks (they get exclusive locks)\n4. **Order by dependency** - if subtask B needs subtask A's output, A must come first in the array\n5. **Estimate complexity** - 1 (trivial) to 5 (complex)\n6. **Plan aggressively** - break down more than you think necessary, smaller is better\n\n## Response Format\n\nRespond with a JSON object matching this schema:\n\n```typescript\n{\n epic: {\n title: string, // Epic title for the hive tracker\n description?: string // Brief description of the overall goal\n },\n subtasks: [\n {\n title: string, // What this subtask accomplishes\n description?: string, // Detailed instructions for the agent\n files: string[], // Files this subtask will modify (globs allowed)\n dependencies: number[], // Indices of subtasks this depends on (0-indexed)\n estimated_complexity: 1-5 // Effort estimate\n },\n // ... more subtasks\n ]\n}\n```\n\n## Guidelines\n\n- **Plan aggressively** - when in doubt, split further. 3 small tasks > 1 medium task\n- **Prefer smaller, focused subtasks** over large complex ones\n- **Include test files** in the same subtask as the code they test\n- **Consider shared types** - if multiple files share types, handle that first\n- **Think about imports** - changes to exported APIs affect downstream files\n- **Explicit > implicit** - spell out what each subtask should do, don't assume\n\n## File Assignment Examples\n\n- Schema change: `[\"src/schemas/user.ts\", \"src/schemas/index.ts\"]`\n- Component + test: `[\"src/components/Button.tsx\", \"src/components/Button.test.tsx\"]`\n- API route: `[\"src/app/api/users/route.ts\"]`\n\nNow decompose the task:";
|
|
21
21
|
/**
|
|
22
22
|
* Strategy-specific decomposition prompt template
|
|
23
23
|
*/
|
|
24
|
-
export declare const STRATEGY_DECOMPOSITION_PROMPT = "You are decomposing a task into parallelizable subtasks for a swarm of agents.\n\n## Task\n{task}\n\n{strategy_guidelines}\n\n{context_section}\n\n{cass_history}\n\n{skills_context}\n\n## MANDATORY: Hive Issue Tracking\n\n**Every subtask MUST become a cell.** This is non-negotiable.\n\nAfter decomposition, the coordinator will:\n1. Create an epic cell for the overall task\n2. Create child cells for each subtask\n3. Track progress through cell status updates\n4. Close cells with summaries when complete\n\nAgents MUST update their cell status as they work. No silent progress.\n\n## Requirements\n\n1. **Break into
|
|
24
|
+
export declare const STRATEGY_DECOMPOSITION_PROMPT = "You are decomposing a task into parallelizable subtasks for a swarm of agents.\n\n## Task\n{task}\n\n{strategy_guidelines}\n\n{context_section}\n\n{cass_history}\n\n{skills_context}\n\n## MANDATORY: Hive Issue Tracking\n\n**Every subtask MUST become a cell.** This is non-negotiable.\n\nAfter decomposition, the coordinator will:\n1. Create an epic cell for the overall task\n2. Create child cells for each subtask\n3. Track progress through cell status updates\n4. Close cells with summaries when complete\n\nAgents MUST update their cell status as they work. No silent progress.\n\n## Requirements\n\n1. **Break into independent subtasks** that can run in parallel (as many as needed)\n2. **Assign files** - each subtask must specify which files it will modify\n3. **No file overlap** - files cannot appear in multiple subtasks (they get exclusive locks)\n4. **Order by dependency** - if subtask B needs subtask A's output, A must come first in the array\n5. **Estimate complexity** - 1 (trivial) to 5 (complex)\n6. **Plan aggressively** - break down more than you think necessary, smaller is better\n\n## Response Format\n\nRespond with a JSON object matching this schema:\n\n```typescript\n{\n epic: {\n title: string, // Epic title for the hive tracker\n description?: string // Brief description of the overall goal\n },\n subtasks: [\n {\n title: string, // What this subtask accomplishes\n description?: string, // Detailed instructions for the agent\n files: string[], // Files this subtask will modify (globs allowed)\n dependencies: number[], // Indices of subtasks this depends on (0-indexed)\n estimated_complexity: 1-5 // Effort estimate\n },\n // ... more subtasks\n ]\n}\n```\n\nNow decompose the task:";
|
|
25
25
|
/**
|
|
26
26
|
* Prompt template for spawned subtask agents.
|
|
27
27
|
*
|
|
@@ -37,7 +37,7 @@ export declare const SUBTASK_PROMPT = "You are a swarm agent working on a subtas
|
|
|
37
37
|
*
|
|
38
38
|
* Supports {error_context} placeholder for retry prompts.
|
|
39
39
|
*/
|
|
40
|
-
export declare const SUBTASK_PROMPT_V2 = "You are a swarm agent working on: **{subtask_title}**\n\n## [IDENTITY]\nAgent: (assigned at spawn)\nCell: {bead_id}\nEpic: {epic_id}\n\n## [TASK]\n{subtask_description}\n\n## [FILES]\nReserved (exclusive):\n{file_list}\n\nOnly modify these files. Need others? Message the coordinator.\n\n## [CONTEXT]\n{shared_context}\n\n{compressed_context}\n\n{error_context}\n\n## [MANDATORY SURVIVAL CHECKLIST]\n\n**CRITICAL: Follow this checklist IN ORDER. Each step builds on the previous.**\n\n### Step 1: Initialize Coordination (REQUIRED - DO THIS FIRST)\n```\nswarmmail_init(project_path=\"{project_path}\", task_description=\"{bead_id}: {subtask_title}\")\n```\n\n**This registers you with the coordination system and enables:**\n- File reservation tracking\n- Inter-agent communication\n- Progress monitoring\n- Conflict detection\n\n**If you skip this step, your work will not be tracked and swarm_complete will fail.**\n\n### Step 2: Query Past Learnings (BEFORE starting work)\n```\nsemantic-memory_find(query=\"<keywords from your task>\", limit=5)\n```\n\n**
|
|
40
|
+
export declare const SUBTASK_PROMPT_V2 = "You are a swarm agent working on: **{subtask_title}**\n\n## [IDENTITY]\nAgent: (assigned at spawn)\nCell: {bead_id}\nEpic: {epic_id}\n\n## [TASK]\n{subtask_description}\n\n## [FILES]\nReserved (exclusive):\n{file_list}\n\nOnly modify these files. Need others? Message the coordinator.\n\n## [CONTEXT]\n{shared_context}\n\n{compressed_context}\n\n{error_context}\n\n## [MANDATORY SURVIVAL CHECKLIST]\n\n**CRITICAL: Follow this checklist IN ORDER. Each step builds on the previous.**\n\n### Step 1: Initialize Coordination (REQUIRED - DO THIS FIRST)\n```\nswarmmail_init(project_path=\"{project_path}\", task_description=\"{bead_id}: {subtask_title}\")\n```\n\n**This registers you with the coordination system and enables:**\n- File reservation tracking\n- Inter-agent communication\n- Progress monitoring\n- Conflict detection\n\n**If you skip this step, your work will not be tracked and swarm_complete will fail.**\n\n### Step 2: \uD83E\uDDE0 Query Past Learnings (MANDATORY - BEFORE starting work)\n\n**\u26A0\uFE0F CRITICAL: ALWAYS query semantic memory BEFORE writing ANY code.**\n\n```\nsemantic-memory_find(query=\"<keywords from your task>\", limit=5, expand=true)\n```\n\n**Why this is MANDATORY:**\n- Past agents may have already solved your exact problem\n- Avoids repeating mistakes that wasted 30+ minutes before\n- Discovers project-specific patterns and gotchas\n- Finds known workarounds for tool/library quirks\n\n**Search Query Examples by Task Type:**\n\n- **Bug fix**: Use exact error message or \"<symptom> <component>\"\n- **New feature**: Search \"<domain concept> implementation pattern\"\n- **Refactor**: Query \"<pattern name> migration approach\"\n- **Integration**: Look for \"<library name> gotchas configuration\"\n- **Testing**: Find \"testing <component type> characterization tests\"\n- **Performance**: Search \"<technology> performance optimization\"\n\n**BEFORE you start coding:**\n1. Run semantic-memory_find with keywords from your task\n2. Read the results with expand=true for full content\n3. Check if any memory solves your problem or warns of pitfalls\n4. Adjust your approach based on past learnings\n\n**If you skip this step, you WILL waste time solving already-solved problems.**\n\n### Step 3: Load Relevant Skills (if available)\n```\nskills_list() # See what skills exist\nskills_use(name=\"<relevant-skill>\", context=\"<your task>\") # Load skill\n```\n\n**Common skill triggers:**\n- Writing tests? \u2192 `skills_use(name=\"testing-patterns\")`\n- Breaking dependencies? \u2192 `skills_use(name=\"testing-patterns\")`\n- Multi-agent coordination? \u2192 `skills_use(name=\"swarm-coordination\")`\n- Building a CLI? \u2192 `skills_use(name=\"cli-builder\")`\n\n### Step 4: Reserve Your Files (YOU reserve, not coordinator)\n```\nswarmmail_reserve(\n paths=[{file_list}],\n reason=\"{bead_id}: {subtask_title}\",\n exclusive=true\n)\n```\n\n**Workers reserve their own files.** This prevents edit conflicts with other agents.\n\n### Step 5: Do the Work (TDD MANDATORY)\n\n**Follow RED \u2192 GREEN \u2192 REFACTOR. No exceptions.**\n\n1. **RED**: Write a failing test that describes the expected behavior\n - Test MUST fail before you write implementation\n - If test passes immediately, your test is wrong\n \n2. **GREEN**: Write minimal code to make the test pass\n - Don't over-engineer - just make it green\n - Hardcode if needed, refactor later\n \n3. **REFACTOR**: Clean up while tests stay green\n - Run tests after every change\n - If tests break, undo and try again\n\n```bash\n# Run tests continuously\nbun test <your-test-file> --watch\n```\n\n**Why TDD?**\n- Catches bugs before they exist\n- Documents expected behavior\n- Enables fearless refactoring\n- Proves your code works\n\n### Step 6: Report Progress at Milestones\n```\nswarm_progress(\n project_key=\"{project_path}\",\n agent_name=\"<your-agent-name>\",\n bead_id=\"{bead_id}\",\n status=\"in_progress\",\n progress_percent=25, # or 50, 75\n message=\"<what you just completed>\"\n)\n```\n\n**Report at 25%, 50%, 75% completion.** This:\n- Triggers auto-checkpoint (saves context)\n- Keeps coordinator informed\n- Prevents silent failures\n\n### Step 7: Manual Checkpoint BEFORE Risky Operations\n```\nswarm_checkpoint(\n project_key=\"{project_path}\",\n agent_name=\"<your-agent-name>\",\n bead_id=\"{bead_id}\"\n)\n```\n\n**Call BEFORE:**\n- Large refactors\n- File deletions\n- Breaking API changes\n- Anything that might fail catastrophically\n\n**Checkpoints preserve context so you can recover if things go wrong.**\n\n### Step 8: \uD83D\uDCBE STORE YOUR LEARNINGS (if you discovered something)\n\n**If you learned it the hard way, STORE IT so the next agent doesn't have to.**\n\n```\nsemantic-memory_store(\n information=\"<what you learned, WHY it matters, how to apply it>\",\n tags=\"<domain, tech-stack, pattern-type>\"\n)\n```\n\n**MANDATORY Storage Triggers - Store when you:**\n- \uD83D\uDC1B **Solved a tricky bug** (>15min debugging) - include root cause + solution\n- \uD83D\uDCA1 **Discovered a project-specific pattern** - domain rules, business logic quirks\n- \u26A0\uFE0F **Found a tool/library gotcha** - API quirks, version-specific bugs, workarounds\n- \uD83D\uDEAB **Tried an approach that failed** - anti-patterns to avoid, why it didn't work\n- \uD83C\uDFD7\uFE0F **Made an architectural decision** - reasoning, alternatives considered, tradeoffs\n\n**What Makes a GOOD Memory:**\n\n\u2705 **GOOD** (actionable, explains WHY):\n```\n\"OAuth refresh tokens need 5min buffer before expiry to avoid race conditions.\nWithout buffer, token refresh can fail mid-request if expiry happens between\ncheck and use. Implemented with: if (expiresAt - Date.now() < 300000) refresh()\"\n```\n\n\u274C **BAD** (generic, no context):\n```\n\"Fixed the auth bug by adding a null check\"\n```\n\n**What NOT to Store:**\n- Generic knowledge that's in official documentation\n- Implementation details that change frequently\n- Vague descriptions without context (\"fixed the thing\")\n\n**The WHY matters more than the WHAT.** Future agents need context to apply your learning.\n\n### Step 9: Complete (REQUIRED - releases reservations)\n```\nswarm_complete(\n project_key=\"{project_path}\",\n agent_name=\"<your-agent-name>\",\n bead_id=\"{bead_id}\",\n summary=\"<what you accomplished>\",\n files_touched=[\"list\", \"of\", \"files\"]\n)\n```\n\n**This automatically:**\n- Runs UBS bug scan\n- Releases file reservations\n- Records learning signals\n- Notifies coordinator\n\n**DO NOT manually close the cell with hive_close.** Use swarm_complete.\n\n## [SWARM MAIL COMMUNICATION]\n\n### Check Inbox Regularly\n```\nswarmmail_inbox() # Check for coordinator messages\nswarmmail_read_message(message_id=N) # Read specific message\n```\n\n### When Blocked\n```\nswarmmail_send(\n to=[\"coordinator\"],\n subject=\"BLOCKED: {bead_id}\",\n body=\"<blocker description, what you need>\",\n importance=\"high\",\n thread_id=\"{epic_id}\"\n)\nhive_update(id=\"{bead_id}\", status=\"blocked\")\n```\n\n### Report Issues to Other Agents\n```\nswarmmail_send(\n to=[\"OtherAgent\", \"coordinator\"],\n subject=\"Issue in {bead_id}\",\n body=\"<describe problem, don't fix their code>\",\n thread_id=\"{epic_id}\"\n)\n```\n\n### Manual Release (if needed)\n```\nswarmmail_release() # Manually release reservations\n```\n\n**Note:** `swarm_complete` automatically releases reservations. Only use manual release if aborting work.\n\n## [OTHER TOOLS]\n### Hive - You Have Autonomy to File Issues\nYou can create new cells against this epic when you discover:\n- **Bugs**: Found a bug while working? File it.\n- **Tech debt**: Spotted something that needs cleanup? File it.\n- **Follow-up work**: Task needs more work than scoped? File a follow-up.\n- **Dependencies**: Need something from another agent? File and link it.\n\n```\nhive_create(\n title=\"<descriptive title>\",\n type=\"bug\", # or \"task\", \"chore\"\n priority=2,\n parent_id=\"{epic_id}\", # Links to this epic\n description=\"Found while working on {bead_id}: <details>\"\n)\n```\n\n**Don't silently ignore issues.** File them so they get tracked and addressed.\n\nOther cell operations:\n- hive_update(id, status) - Mark blocked if stuck\n- hive_query(status=\"open\") - See what else needs work\n\n### Skills\n- skills_list() - Discover available skills\n- skills_use(name) - Activate skill for specialized guidance\n- skills_create(name) - Create new skill (if you found a reusable pattern)\n\n## [CRITICAL REQUIREMENTS]\n\n**NON-NEGOTIABLE:**\n1. Step 1 (swarmmail_init) MUST be first - do it before anything else\n2. \uD83E\uDDE0 Step 2 (semantic-memory_find) MUST happen BEFORE starting work - query first, code second\n3. Step 4 (swarmmail_reserve) - YOU reserve files, not coordinator\n4. Step 6 (swarm_progress) - Report at milestones, don't work silently\n5. \uD83D\uDCBE Step 8 (semantic-memory_store) - If you learned something hard, STORE IT\n6. Step 9 (swarm_complete) - Use this to close, NOT hive_close\n\n**If you skip these steps:**\n- Your work won't be tracked (swarm_complete will fail)\n- \uD83D\uDD04 You'll waste time repeating already-solved problems (no semantic memory query)\n- Edit conflicts with other agents (no file reservation)\n- Lost work if you crash (no checkpoints)\n- \uD83D\uDD04 Future agents repeat YOUR mistakes (no learnings stored)\n\n**Memory is the swarm's collective intelligence. Query it. Feed it.**\n\nBegin now.";
|
|
41
41
|
/**
|
|
42
42
|
* Prompt for self-evaluation before completing a subtask.
|
|
43
43
|
*
|
|
@@ -130,6 +130,7 @@ export declare const swarm_spawn_subtask: {
|
|
|
130
130
|
skills_to_load: import("zod").ZodOptional<import("zod").ZodArray<import("zod").ZodString>>;
|
|
131
131
|
coordinator_notes: import("zod").ZodOptional<import("zod").ZodString>;
|
|
132
132
|
}, import("zod/v4/core").$strip>>;
|
|
133
|
+
model: import("zod").ZodOptional<import("zod").ZodString>;
|
|
133
134
|
};
|
|
134
135
|
execute(args: {
|
|
135
136
|
bead_id: string;
|
|
@@ -144,6 +145,7 @@ export declare const swarm_spawn_subtask: {
|
|
|
144
145
|
skills_to_load?: string[] | undefined;
|
|
145
146
|
coordinator_notes?: string | undefined;
|
|
146
147
|
} | undefined;
|
|
148
|
+
model?: string | undefined;
|
|
147
149
|
}, context: import("@opencode-ai/plugin").ToolContext): Promise<string>;
|
|
148
150
|
};
|
|
149
151
|
/**
|
|
@@ -178,7 +180,7 @@ export declare const swarm_plan_prompt: {
|
|
|
178
180
|
"risk-based": "risk-based";
|
|
179
181
|
auto: "auto";
|
|
180
182
|
}>>;
|
|
181
|
-
max_subtasks: import("zod").
|
|
183
|
+
max_subtasks: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
182
184
|
context: import("zod").ZodOptional<import("zod").ZodString>;
|
|
183
185
|
query_cass: import("zod").ZodOptional<import("zod").ZodBoolean>;
|
|
184
186
|
cass_limit: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
@@ -186,8 +188,8 @@ export declare const swarm_plan_prompt: {
|
|
|
186
188
|
};
|
|
187
189
|
execute(args: {
|
|
188
190
|
task: string;
|
|
189
|
-
max_subtasks: number;
|
|
190
191
|
strategy?: "file-based" | "feature-based" | "risk-based" | "auto" | undefined;
|
|
192
|
+
max_subtasks?: number | undefined;
|
|
191
193
|
context?: string | undefined;
|
|
192
194
|
query_cass?: boolean | undefined;
|
|
193
195
|
cass_limit?: number | undefined;
|
|
@@ -233,6 +235,7 @@ export declare const promptTools: {
|
|
|
233
235
|
skills_to_load: import("zod").ZodOptional<import("zod").ZodArray<import("zod").ZodString>>;
|
|
234
236
|
coordinator_notes: import("zod").ZodOptional<import("zod").ZodString>;
|
|
235
237
|
}, import("zod/v4/core").$strip>>;
|
|
238
|
+
model: import("zod").ZodOptional<import("zod").ZodString>;
|
|
236
239
|
};
|
|
237
240
|
execute(args: {
|
|
238
241
|
bead_id: string;
|
|
@@ -247,6 +250,7 @@ export declare const promptTools: {
|
|
|
247
250
|
skills_to_load?: string[] | undefined;
|
|
248
251
|
coordinator_notes?: string | undefined;
|
|
249
252
|
} | undefined;
|
|
253
|
+
model?: string | undefined;
|
|
250
254
|
}, context: import("@opencode-ai/plugin").ToolContext): Promise<string>;
|
|
251
255
|
};
|
|
252
256
|
swarm_evaluation_prompt: {
|
|
@@ -272,7 +276,7 @@ export declare const promptTools: {
|
|
|
272
276
|
"risk-based": "risk-based";
|
|
273
277
|
auto: "auto";
|
|
274
278
|
}>>;
|
|
275
|
-
max_subtasks: import("zod").
|
|
279
|
+
max_subtasks: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
276
280
|
context: import("zod").ZodOptional<import("zod").ZodString>;
|
|
277
281
|
query_cass: import("zod").ZodOptional<import("zod").ZodBoolean>;
|
|
278
282
|
cass_limit: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
@@ -280,8 +284,8 @@ export declare const promptTools: {
|
|
|
280
284
|
};
|
|
281
285
|
execute(args: {
|
|
282
286
|
task: string;
|
|
283
|
-
max_subtasks: number;
|
|
284
287
|
strategy?: "file-based" | "feature-based" | "risk-based" | "auto" | undefined;
|
|
288
|
+
max_subtasks?: number | undefined;
|
|
285
289
|
context?: string | undefined;
|
|
286
290
|
query_cass?: boolean | undefined;
|
|
287
291
|
cass_limit?: number | undefined;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"swarm-prompts.d.ts","sourceRoot":"","sources":["../src/swarm-prompts.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AASH;;;;;GAKG;AACH,eAAO,MAAM,oBAAoB,
|
|
1
|
+
{"version":3,"file":"swarm-prompts.d.ts","sourceRoot":"","sources":["../src/swarm-prompts.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AASH;;;;;GAKG;AACH,eAAO,MAAM,oBAAoB,s6EAkET,CAAC;AAEzB;;GAEG;AACH,eAAO,MAAM,6BAA6B,mxDAyDlB,CAAC;AAEzB;;;;;GAKG;AACH,eAAO,MAAM,cAAc,mkFAgFK,CAAC;AAEjC;;;;;;;GAOG;AACH,eAAO,MAAM,iBAAiB,mzSA0SnB,CAAC;AAEZ;;;;GAIG;AACH,eAAO,MAAM,iBAAiB,8jCAmCU,CAAC;AAMzC;;GAEG;AACH,wBAAgB,qBAAqB,CAAC,MAAM,EAAE;IAC5C,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;IACtB,mBAAmB,EAAE,MAAM,CAAC;IAC5B,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,gBAAgB,CAAC,EAAE;QACjB,cAAc,CAAC,EAAE,MAAM,CAAC;QACxB,cAAc,CAAC,EAAE,MAAM,EAAE,CAAC;QAC1B,iBAAiB,CAAC,EAAE,MAAM,CAAC;KAC5B,CAAC;CACH,GAAG,MAAM,CA2ET;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,MAAM,EAAE;IAC1C,UAAU,EAAE,MAAM,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;IACtB,mBAAmB,EAAE,MAAM,CAAC;IAC5B,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB,cAAc,CAAC,EAAE,MAAM,CAAC;CACzB,GAAG,MAAM,CAUT;AAED;;GAEG;AACH,wBAAgB,sBAAsB,CAAC,MAAM,EAAE;IAC7C,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;IACtB,aAAa,EAAE,MAAM,EAAE,CAAC;CACzB,GAAG,MAAM,CAMT;AAMD;;GAEG;AACH,eAAO,MAAM,oBAAoB;;;;;;;;;;;;;;;;;;;;;;CAoC/B,CAAC;AAEH;;;;;GAKG;AACH,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAqF9B,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,uBAAuB;;;;;;;;;;;;CAoClC,CAAC;AAEH;;;;;GAKG;AACH,eAAO,MAAM,iBAAiB;;;;;;;;;;;;;;;;;;;;;;;;;CAwI5B,CAAC;AAEH,eAAO,MAAM,WAAW;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAKvB,CAAC"}
|
package/dist/swarm.d.ts
CHANGED
|
@@ -344,6 +344,7 @@ export declare const swarmTools: {
|
|
|
344
344
|
skills_to_load: import("zod").ZodOptional<import("zod").ZodArray<import("zod").ZodString>>;
|
|
345
345
|
coordinator_notes: import("zod").ZodOptional<import("zod").ZodString>;
|
|
346
346
|
}, import("zod/v4/core").$strip>>;
|
|
347
|
+
model: import("zod").ZodOptional<import("zod").ZodString>;
|
|
347
348
|
};
|
|
348
349
|
execute(args: {
|
|
349
350
|
bead_id: string;
|
|
@@ -358,6 +359,7 @@ export declare const swarmTools: {
|
|
|
358
359
|
skills_to_load?: string[] | undefined;
|
|
359
360
|
coordinator_notes?: string | undefined;
|
|
360
361
|
} | undefined;
|
|
362
|
+
model?: string | undefined;
|
|
361
363
|
}, context: import("@opencode-ai/plugin").ToolContext): Promise<string>;
|
|
362
364
|
};
|
|
363
365
|
swarm_evaluation_prompt: {
|
|
@@ -383,7 +385,7 @@ export declare const swarmTools: {
|
|
|
383
385
|
"risk-based": "risk-based";
|
|
384
386
|
auto: "auto";
|
|
385
387
|
}>>;
|
|
386
|
-
max_subtasks: import("zod").
|
|
388
|
+
max_subtasks: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
387
389
|
context: import("zod").ZodOptional<import("zod").ZodString>;
|
|
388
390
|
query_cass: import("zod").ZodOptional<import("zod").ZodBoolean>;
|
|
389
391
|
cass_limit: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
@@ -391,8 +393,8 @@ export declare const swarmTools: {
|
|
|
391
393
|
};
|
|
392
394
|
execute(args: {
|
|
393
395
|
task: string;
|
|
394
|
-
max_subtasks: number;
|
|
395
396
|
strategy?: "file-based" | "feature-based" | "risk-based" | "auto" | undefined;
|
|
397
|
+
max_subtasks?: number | undefined;
|
|
396
398
|
context?: string | undefined;
|
|
397
399
|
query_cass?: boolean | undefined;
|
|
398
400
|
cass_limit?: number | undefined;
|
|
@@ -403,14 +405,14 @@ export declare const swarmTools: {
|
|
|
403
405
|
description: string;
|
|
404
406
|
args: {
|
|
405
407
|
task: import("zod").ZodString;
|
|
406
|
-
max_subtasks: import("zod").
|
|
408
|
+
max_subtasks: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
407
409
|
context: import("zod").ZodOptional<import("zod").ZodString>;
|
|
408
410
|
query_cass: import("zod").ZodOptional<import("zod").ZodBoolean>;
|
|
409
411
|
cass_limit: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
410
412
|
};
|
|
411
413
|
execute(args: {
|
|
412
414
|
task: string;
|
|
413
|
-
max_subtasks
|
|
415
|
+
max_subtasks?: number | undefined;
|
|
414
416
|
context?: string | undefined;
|
|
415
417
|
query_cass?: boolean | undefined;
|
|
416
418
|
cass_limit?: number | undefined;
|
|
@@ -430,7 +432,7 @@ export declare const swarmTools: {
|
|
|
430
432
|
args: {
|
|
431
433
|
task: import("zod").ZodString;
|
|
432
434
|
context: import("zod").ZodOptional<import("zod").ZodString>;
|
|
433
|
-
max_subtasks: import("zod").
|
|
435
|
+
max_subtasks: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
434
436
|
strategy: import("zod").ZodDefault<import("zod").ZodOptional<import("zod").ZodEnum<{
|
|
435
437
|
"file-based": "file-based";
|
|
436
438
|
"feature-based": "feature-based";
|
|
@@ -441,10 +443,10 @@ export declare const swarmTools: {
|
|
|
441
443
|
};
|
|
442
444
|
execute(args: {
|
|
443
445
|
task: string;
|
|
444
|
-
max_subtasks: number;
|
|
445
446
|
strategy: "file-based" | "feature-based" | "risk-based" | "auto";
|
|
446
447
|
query_cass: boolean;
|
|
447
448
|
context?: string | undefined;
|
|
449
|
+
max_subtasks?: number | undefined;
|
|
448
450
|
}, context: import("@opencode-ai/plugin").ToolContext): Promise<string>;
|
|
449
451
|
};
|
|
450
452
|
swarm_plan_interactive: {
|
package/dist/swarm.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"swarm.d.ts","sourceRoot":"","sources":["../src/swarm.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AAGH,cAAc,oBAAoB,CAAC;AACnC,cAAc,mBAAmB,CAAC;AAClC,cAAc,iBAAiB,CAAC;AAChC,cAAc,qBAAqB,CAAC;AAQpC;;;GAGG;AACH,eAAO,MAAM,UAAU
|
|
1
|
+
{"version":3,"file":"swarm.d.ts","sourceRoot":"","sources":["../src/swarm.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AAGH,cAAc,oBAAoB,CAAC;AACnC,cAAc,mBAAmB,CAAC;AAClC,cAAc,iBAAiB,CAAC;AAChC,cAAc,qBAAqB,CAAC;AAQpC;;;GAGG;AACH,eAAO,MAAM,UAAU;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAKtB,CAAC"}
|
|
Binary file
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "opencode-swarm-plugin",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.31.0",
|
|
4
4
|
"description": "Multi-agent swarm coordination for OpenCode with learning capabilities, beads integration, and Agent Mail",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -39,7 +39,7 @@
|
|
|
39
39
|
"gray-matter": "^4.0.3",
|
|
40
40
|
"ioredis": "^5.4.1",
|
|
41
41
|
"minimatch": "^10.1.1",
|
|
42
|
-
"swarm-mail": "
|
|
42
|
+
"swarm-mail": "1.1.0",
|
|
43
43
|
"zod": "4.1.8"
|
|
44
44
|
},
|
|
45
45
|
"devDependencies": {
|
|
@@ -406,7 +406,7 @@ describe("beads integration", () => {
|
|
|
406
406
|
});
|
|
407
407
|
|
|
408
408
|
describe("hive_create_epic", () => {
|
|
409
|
-
it("creates an epic with subtasks", async () => {
|
|
409
|
+
it("creates an epic with subtasks and syncs to JSONL", async () => {
|
|
410
410
|
const result = await hive_create_epic.execute(
|
|
411
411
|
{
|
|
412
412
|
epic_title: "Integration test epic",
|
|
@@ -438,6 +438,28 @@ describe("beads integration", () => {
|
|
|
438
438
|
expect(subtaskBead).toBeDefined();
|
|
439
439
|
expect(subtaskBead!.parent_id).toBe(epicResult.epic.id);
|
|
440
440
|
}
|
|
441
|
+
|
|
442
|
+
// NEW TEST: Verify cells are synced to JSONL immediately
|
|
443
|
+
const { readFileSync, existsSync } = await import("node:fs");
|
|
444
|
+
const { join } = await import("node:path");
|
|
445
|
+
const jsonlPath = join(TEST_PROJECT_KEY, ".hive", "issues.jsonl");
|
|
446
|
+
|
|
447
|
+
expect(existsSync(jsonlPath)).toBe(true);
|
|
448
|
+
|
|
449
|
+
const jsonlContent = readFileSync(jsonlPath, "utf-8");
|
|
450
|
+
const lines = jsonlContent.trim().split("\n").filter(l => l);
|
|
451
|
+
const cells = lines.map(line => JSON.parse(line));
|
|
452
|
+
|
|
453
|
+
// Epic and all subtasks should be in JSONL
|
|
454
|
+
const epicInJsonl = cells.find(c => c.id === epicResult.epic.id);
|
|
455
|
+
expect(epicInJsonl).toBeDefined();
|
|
456
|
+
expect(epicInJsonl!.title).toBe("Integration test epic");
|
|
457
|
+
|
|
458
|
+
for (const subtask of epicResult.subtasks) {
|
|
459
|
+
const subtaskInJsonl = cells.find(c => c.id === subtask.id);
|
|
460
|
+
expect(subtaskInJsonl).toBeDefined();
|
|
461
|
+
expect(subtaskInJsonl!.parent_id).toBe(epicResult.epic.id);
|
|
462
|
+
}
|
|
441
463
|
});
|
|
442
464
|
|
|
443
465
|
it("creates an epic with files metadata in subtasks", async () => {
|
|
@@ -601,6 +623,181 @@ describe("beads integration", () => {
|
|
|
601
623
|
});
|
|
602
624
|
});
|
|
603
625
|
|
|
626
|
+
describe("partial ID resolution", () => {
|
|
627
|
+
let fullId: string;
|
|
628
|
+
let hash: string;
|
|
629
|
+
|
|
630
|
+
beforeEach(async () => {
|
|
631
|
+
// Create a test cell to resolve
|
|
632
|
+
const result = await hive_create.execute(
|
|
633
|
+
{ title: "Partial ID test cell" },
|
|
634
|
+
mockContext,
|
|
635
|
+
);
|
|
636
|
+
const cell = parseResponse<Cell>(result);
|
|
637
|
+
fullId = cell.id;
|
|
638
|
+
createdBeadIds.push(fullId);
|
|
639
|
+
|
|
640
|
+
// Extract hash from ID (format: {prefix}-{hash}-{timestamp}{random})
|
|
641
|
+
// The last segment is always timestamp+random (11 chars)
|
|
642
|
+
// The hash is the 6-char segment before that
|
|
643
|
+
// Examples:
|
|
644
|
+
// "opencode-swarm-monorepo-lf2p4u-mjd2h5v4wdt" -> hash is "lf2p4u"
|
|
645
|
+
// "cell--gcel4-mjd2h5v4wdt" -> hash is "-gcel4" (negative hash creates consecutive hyphens)
|
|
646
|
+
|
|
647
|
+
// Find the last hyphen, then work backwards to find the second-to-last hyphen
|
|
648
|
+
const lastHyphenIndex = fullId.lastIndexOf("-");
|
|
649
|
+
if (lastHyphenIndex === -1) {
|
|
650
|
+
hash = "";
|
|
651
|
+
} else {
|
|
652
|
+
// Get everything before the last hyphen
|
|
653
|
+
const beforeLast = fullId.substring(0, lastHyphenIndex);
|
|
654
|
+
// Find the second-to-last hyphen
|
|
655
|
+
const secondLastHyphenIndex = beforeLast.lastIndexOf("-");
|
|
656
|
+
if (secondLastHyphenIndex === -1) {
|
|
657
|
+
hash = "";
|
|
658
|
+
} else {
|
|
659
|
+
// Hash is between second-to-last and last hyphen
|
|
660
|
+
hash = fullId.substring(secondLastHyphenIndex + 1, lastHyphenIndex);
|
|
661
|
+
}
|
|
662
|
+
}
|
|
663
|
+
});
|
|
664
|
+
|
|
665
|
+
describe("hive_update", () => {
|
|
666
|
+
it("accepts full cell ID (no resolution needed)", async () => {
|
|
667
|
+
const result = await hive_update.execute(
|
|
668
|
+
{ id: fullId, description: "Updated via full ID" },
|
|
669
|
+
mockContext,
|
|
670
|
+
);
|
|
671
|
+
|
|
672
|
+
const updated = parseResponse<Cell>(result);
|
|
673
|
+
expect(updated.id).toBe(fullId);
|
|
674
|
+
expect(updated.description).toContain("Updated via full ID");
|
|
675
|
+
});
|
|
676
|
+
|
|
677
|
+
it("resolves hash to full ID (or shows helpful error if ambiguous)", async () => {
|
|
678
|
+
try {
|
|
679
|
+
const result = await hive_update.execute(
|
|
680
|
+
{ id: hash, priority: 1 },
|
|
681
|
+
mockContext,
|
|
682
|
+
);
|
|
683
|
+
|
|
684
|
+
const updated = parseResponse<Cell>(result);
|
|
685
|
+
expect(updated.id).toBe(fullId);
|
|
686
|
+
expect(updated.priority).toBe(1);
|
|
687
|
+
} catch (error) {
|
|
688
|
+
// In test environment with many cells, hash may be ambiguous
|
|
689
|
+
// Verify we get a helpful error message
|
|
690
|
+
if (error instanceof Error && error.message.includes("Ambiguous")) {
|
|
691
|
+
expect(error.message).toMatch(/ambiguous.*multiple/i);
|
|
692
|
+
expect(error.message).toContain(hash);
|
|
693
|
+
} else {
|
|
694
|
+
throw error; // Re-throw if not ambiguity error
|
|
695
|
+
}
|
|
696
|
+
}
|
|
697
|
+
});
|
|
698
|
+
|
|
699
|
+
it("throws helpful error for non-existent hash", async () => {
|
|
700
|
+
await expect(
|
|
701
|
+
hive_update.execute({ id: "zzzzzz", status: "closed" }, mockContext),
|
|
702
|
+
).rejects.toThrow(/not found|no cell|zzzzzz/i);
|
|
703
|
+
});
|
|
704
|
+
|
|
705
|
+
it("throws helpful error for ambiguous hash", async () => {
|
|
706
|
+
// Create another cell with potentially similar hash
|
|
707
|
+
// (in practice, hashes are unique, but we simulate ambiguity by using a short partial)
|
|
708
|
+
// This test verifies the error message is helpful
|
|
709
|
+
try {
|
|
710
|
+
// Use a single char which might match multiple cells in larger datasets
|
|
711
|
+
await hive_update.execute({ id: "a", status: "closed" }, mockContext);
|
|
712
|
+
// If it succeeds, it means only one cell matched - that's fine
|
|
713
|
+
} catch (error) {
|
|
714
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
715
|
+
// Error should mention ambiguity if multiple matches
|
|
716
|
+
if (message.includes("ambiguous") || message.includes("multiple")) {
|
|
717
|
+
expect(message).toMatch(/ambiguous|multiple/i);
|
|
718
|
+
}
|
|
719
|
+
}
|
|
720
|
+
});
|
|
721
|
+
});
|
|
722
|
+
|
|
723
|
+
describe("hive_close", () => {
|
|
724
|
+
it("accepts full cell ID", async () => {
|
|
725
|
+
const result = await hive_close.execute(
|
|
726
|
+
{ id: fullId, reason: "Closed via full ID" },
|
|
727
|
+
mockContext,
|
|
728
|
+
);
|
|
729
|
+
|
|
730
|
+
expect(result).toContain("Closed");
|
|
731
|
+
expect(result).toContain(fullId);
|
|
732
|
+
|
|
733
|
+
const closed = await adapter.getCell(TEST_PROJECT_KEY, fullId);
|
|
734
|
+
expect(closed?.status).toBe("closed");
|
|
735
|
+
});
|
|
736
|
+
|
|
737
|
+
it("resolves hash to full ID (or shows helpful error if ambiguous)", async () => {
|
|
738
|
+
try {
|
|
739
|
+
const result = await hive_close.execute(
|
|
740
|
+
{ id: hash, reason: "Close via hash" },
|
|
741
|
+
mockContext,
|
|
742
|
+
);
|
|
743
|
+
|
|
744
|
+
expect(result).toContain("Closed");
|
|
745
|
+
expect(result).toContain(fullId);
|
|
746
|
+
} catch (error) {
|
|
747
|
+
if (error instanceof Error && error.message.includes("Ambiguous")) {
|
|
748
|
+
expect(error.message).toMatch(/ambiguous.*multiple/i);
|
|
749
|
+
expect(error.message).toContain(hash);
|
|
750
|
+
} else {
|
|
751
|
+
throw error;
|
|
752
|
+
}
|
|
753
|
+
}
|
|
754
|
+
});
|
|
755
|
+
|
|
756
|
+
it("throws helpful error for non-existent hash", async () => {
|
|
757
|
+
await expect(
|
|
758
|
+
hive_close.execute({ id: "nonono", reason: "Test" }, mockContext),
|
|
759
|
+
).rejects.toThrow(/not found|no cell|nonono/i);
|
|
760
|
+
});
|
|
761
|
+
});
|
|
762
|
+
|
|
763
|
+
describe("hive_start", () => {
|
|
764
|
+
it("accepts full cell ID", async () => {
|
|
765
|
+
const result = await hive_start.execute({ id: fullId }, mockContext);
|
|
766
|
+
|
|
767
|
+
expect(result).toContain("Started");
|
|
768
|
+
expect(result).toContain(fullId);
|
|
769
|
+
|
|
770
|
+
const started = await adapter.getCell(TEST_PROJECT_KEY, fullId);
|
|
771
|
+
expect(started?.status).toBe("in_progress");
|
|
772
|
+
});
|
|
773
|
+
|
|
774
|
+
it("resolves hash to full ID (or shows helpful error if ambiguous)", async () => {
|
|
775
|
+
try {
|
|
776
|
+
const result = await hive_start.execute(
|
|
777
|
+
{ id: hash },
|
|
778
|
+
mockContext,
|
|
779
|
+
);
|
|
780
|
+
|
|
781
|
+
expect(result).toContain("Started");
|
|
782
|
+
expect(result).toContain(fullId);
|
|
783
|
+
} catch (error) {
|
|
784
|
+
if (error instanceof Error && error.message.includes("Ambiguous")) {
|
|
785
|
+
expect(error.message).toMatch(/ambiguous.*multiple/i);
|
|
786
|
+
expect(error.message).toContain(hash);
|
|
787
|
+
} else {
|
|
788
|
+
throw error;
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
});
|
|
792
|
+
|
|
793
|
+
it("throws helpful error for non-existent hash", async () => {
|
|
794
|
+
await expect(
|
|
795
|
+
hive_start.execute({ id: "nope99" }, mockContext),
|
|
796
|
+
).rejects.toThrow(/not found|no cell|nope99/i);
|
|
797
|
+
});
|
|
798
|
+
});
|
|
799
|
+
});
|
|
800
|
+
|
|
604
801
|
describe("workflow integration", () => {
|
|
605
802
|
it("complete bead lifecycle: create -> start -> update -> close", async () => {
|
|
606
803
|
// 1. Create
|
|
@@ -1225,8 +1422,8 @@ describe("beads integration", () => {
|
|
|
1225
1422
|
mockContext,
|
|
1226
1423
|
);
|
|
1227
1424
|
|
|
1228
|
-
// Should return "No cells to sync" since no dirty cells
|
|
1229
|
-
expect(result).toContain("No cells to sync");
|
|
1425
|
+
// Should return "No cells or memories to sync" since no dirty cells
|
|
1426
|
+
expect(result).toContain("No cells or memories to sync");
|
|
1230
1427
|
} finally {
|
|
1231
1428
|
setHiveWorkingDirectory(originalDir);
|
|
1232
1429
|
rmSync(tempProject, { recursive: true, force: true });
|
|
@@ -1450,4 +1647,136 @@ describe("beads integration", () => {
|
|
|
1450
1647
|
rmSync(tempProject, { recursive: true, force: true });
|
|
1451
1648
|
});
|
|
1452
1649
|
});
|
|
1650
|
+
|
|
1651
|
+
describe("process exit hook", () => {
|
|
1652
|
+
it("registers beforeExit hook that syncs dirty cells", async () => {
|
|
1653
|
+
const { mkdirSync, rmSync, writeFileSync, readFileSync, existsSync } = await import("node:fs");
|
|
1654
|
+
const { join } = await import("node:path");
|
|
1655
|
+
const { tmpdir } = await import("node:os");
|
|
1656
|
+
const { execSync } = await import("node:child_process");
|
|
1657
|
+
|
|
1658
|
+
// Create temp project
|
|
1659
|
+
const tempProject = join(tmpdir(), `hive-exit-hook-test-${Date.now()}`);
|
|
1660
|
+
const hiveDir = join(tempProject, ".hive");
|
|
1661
|
+
mkdirSync(hiveDir, { recursive: true });
|
|
1662
|
+
|
|
1663
|
+
// Initialize git repo
|
|
1664
|
+
execSync("git init", { cwd: tempProject });
|
|
1665
|
+
execSync('git config user.email "test@example.com"', { cwd: tempProject });
|
|
1666
|
+
execSync('git config user.name "Test User"', { cwd: tempProject });
|
|
1667
|
+
|
|
1668
|
+
// Initial commit with empty issues.jsonl
|
|
1669
|
+
writeFileSync(join(hiveDir, "issues.jsonl"), "");
|
|
1670
|
+
execSync("git add .", { cwd: tempProject });
|
|
1671
|
+
execSync('git commit -m "initial"', { cwd: tempProject });
|
|
1672
|
+
|
|
1673
|
+
// Set working directory
|
|
1674
|
+
const originalDir = getHiveWorkingDirectory();
|
|
1675
|
+
setHiveWorkingDirectory(tempProject);
|
|
1676
|
+
|
|
1677
|
+
try {
|
|
1678
|
+
// Create a cell (marks it dirty but don't sync)
|
|
1679
|
+
await hive_create.execute(
|
|
1680
|
+
{ title: "Exit hook test cell", type: "task" },
|
|
1681
|
+
mockContext,
|
|
1682
|
+
);
|
|
1683
|
+
|
|
1684
|
+
// Verify cell is NOT in JSONL yet (only in PGLite)
|
|
1685
|
+
const beforeContent = readFileSync(join(hiveDir, "issues.jsonl"), "utf-8");
|
|
1686
|
+
expect(beforeContent.trim()).toBe("");
|
|
1687
|
+
|
|
1688
|
+
// Simulate process exit by triggering beforeExit event
|
|
1689
|
+
process.emit("beforeExit", 0);
|
|
1690
|
+
|
|
1691
|
+
// Wait for async flush to complete
|
|
1692
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
1693
|
+
|
|
1694
|
+
// Verify cell was synced to JSONL by the exit hook
|
|
1695
|
+
const afterContent = readFileSync(join(hiveDir, "issues.jsonl"), "utf-8");
|
|
1696
|
+
expect(afterContent.trim()).not.toBe("");
|
|
1697
|
+
|
|
1698
|
+
const cells = afterContent.trim().split("\n").map(line => JSON.parse(line));
|
|
1699
|
+
expect(cells).toHaveLength(1);
|
|
1700
|
+
expect(cells[0].title).toBe("Exit hook test cell");
|
|
1701
|
+
} finally {
|
|
1702
|
+
setHiveWorkingDirectory(originalDir);
|
|
1703
|
+
rmSync(tempProject, { recursive: true, force: true });
|
|
1704
|
+
}
|
|
1705
|
+
});
|
|
1706
|
+
|
|
1707
|
+
it("exit hook is idempotent - safe to call multiple times", async () => {
|
|
1708
|
+
const { mkdirSync, rmSync, writeFileSync, readFileSync } = await import("node:fs");
|
|
1709
|
+
const { join } = await import("node:path");
|
|
1710
|
+
const { tmpdir } = await import("node:os");
|
|
1711
|
+
|
|
1712
|
+
// Create temp project
|
|
1713
|
+
const tempProject = join(tmpdir(), `hive-exit-hook-test-${Date.now()}`);
|
|
1714
|
+
const hiveDir = join(tempProject, ".hive");
|
|
1715
|
+
mkdirSync(hiveDir, { recursive: true });
|
|
1716
|
+
writeFileSync(join(hiveDir, "issues.jsonl"), "");
|
|
1717
|
+
|
|
1718
|
+
// Set working directory
|
|
1719
|
+
const originalDir = getHiveWorkingDirectory();
|
|
1720
|
+
setHiveWorkingDirectory(tempProject);
|
|
1721
|
+
|
|
1722
|
+
try {
|
|
1723
|
+
// Create a cell
|
|
1724
|
+
await hive_create.execute(
|
|
1725
|
+
{ title: "Idempotent test cell", type: "task" },
|
|
1726
|
+
mockContext,
|
|
1727
|
+
);
|
|
1728
|
+
|
|
1729
|
+
// Trigger exit hook multiple times
|
|
1730
|
+
process.emit("beforeExit", 0);
|
|
1731
|
+
await new Promise(resolve => setTimeout(resolve, 50));
|
|
1732
|
+
|
|
1733
|
+
process.emit("beforeExit", 0);
|
|
1734
|
+
await new Promise(resolve => setTimeout(resolve, 50));
|
|
1735
|
+
|
|
1736
|
+
// Verify cell is written only once (no duplication)
|
|
1737
|
+
const content = readFileSync(join(hiveDir, "issues.jsonl"), "utf-8");
|
|
1738
|
+
const lines = content.trim().split("\n").filter(l => l);
|
|
1739
|
+
|
|
1740
|
+
// Should have exactly one cell (even though we triggered hook twice)
|
|
1741
|
+
expect(lines.length).toBeGreaterThanOrEqual(1);
|
|
1742
|
+
|
|
1743
|
+
// All cells should have unique IDs
|
|
1744
|
+
const cells = lines.map(line => JSON.parse(line));
|
|
1745
|
+
const uniqueIds = new Set(cells.map(c => c.id));
|
|
1746
|
+
expect(uniqueIds.size).toBe(cells.length);
|
|
1747
|
+
} finally {
|
|
1748
|
+
setHiveWorkingDirectory(originalDir);
|
|
1749
|
+
rmSync(tempProject, { recursive: true, force: true });
|
|
1750
|
+
}
|
|
1751
|
+
});
|
|
1752
|
+
|
|
1753
|
+
it("exit hook handles case with no dirty cells gracefully", async () => {
|
|
1754
|
+
const { mkdirSync, rmSync, writeFileSync, readFileSync } = await import("node:fs");
|
|
1755
|
+
const { join } = await import("node:path");
|
|
1756
|
+
const { tmpdir } = await import("node:os");
|
|
1757
|
+
|
|
1758
|
+
// Create temp project with empty JSONL
|
|
1759
|
+
const tempProject = join(tmpdir(), `hive-exit-hook-test-${Date.now()}`);
|
|
1760
|
+
const hiveDir = join(tempProject, ".hive");
|
|
1761
|
+
mkdirSync(hiveDir, { recursive: true });
|
|
1762
|
+
writeFileSync(join(hiveDir, "issues.jsonl"), "");
|
|
1763
|
+
|
|
1764
|
+
// Set working directory
|
|
1765
|
+
const originalDir = getHiveWorkingDirectory();
|
|
1766
|
+
setHiveWorkingDirectory(tempProject);
|
|
1767
|
+
|
|
1768
|
+
try {
|
|
1769
|
+
// Trigger exit hook with no dirty cells (should not throw)
|
|
1770
|
+
process.emit("beforeExit", 0);
|
|
1771
|
+
await new Promise(resolve => setTimeout(resolve, 50));
|
|
1772
|
+
|
|
1773
|
+
// JSONL should still be empty (no error thrown)
|
|
1774
|
+
const content = readFileSync(join(hiveDir, "issues.jsonl"), "utf-8");
|
|
1775
|
+
expect(content.trim()).toBe("");
|
|
1776
|
+
} finally {
|
|
1777
|
+
setHiveWorkingDirectory(originalDir);
|
|
1778
|
+
rmSync(tempProject, { recursive: true, force: true });
|
|
1779
|
+
}
|
|
1780
|
+
});
|
|
1781
|
+
});
|
|
1453
1782
|
});
|