opencode-hive 1.2.0 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,12 +1,16 @@
1
1
  import { createRequire } from "node:module";
2
2
  var __defProp = Object.defineProperty;
3
+ var __returnValue = (v) => v;
4
+ function __exportSetter(name, newValue) {
5
+ this[name] = __returnValue.bind(null, newValue);
6
+ }
3
7
  var __export = (target, all) => {
4
8
  for (var name in all)
5
9
  __defProp(target, name, {
6
10
  get: all[name],
7
11
  enumerable: true,
8
12
  configurable: true,
9
- set: (newValue) => all[name] = () => newValue
13
+ set: __exportSetter.bind(all, name)
10
14
  });
11
15
  };
12
16
  var __require = /* @__PURE__ */ createRequire(import.meta.url);
@@ -12927,9 +12931,9 @@ Each agent gets:
12927
12931
 
12928
12932
  \`\`\`typescript
12929
12933
  // Using Hive tools for parallel execution
12930
- hive_worktree_create({ task: "01-fix-abort-tests" })
12931
- hive_worktree_create({ task: "02-fix-batch-tests" })
12932
- hive_worktree_create({ task: "03-fix-race-condition-tests" })
12934
+ hive_worktree_start({ task: "01-fix-abort-tests" })
12935
+ hive_worktree_start({ task: "02-fix-batch-tests" })
12936
+ hive_worktree_start({ task: "03-fix-race-condition-tests" })
12933
12937
  // All three run concurrently in isolated worktrees
12934
12938
  \`\`\`
12935
12939
 
@@ -13429,7 +13433,7 @@ Only \`done\` satisfies dependencies (not \`blocked\`, \`failed\`, \`partial\`,
13429
13433
  ### Step 3: Execute Batch
13430
13434
 
13431
13435
  For each task in the batch:
13432
- 1. Mark as in_progress via \`hive_worktree_create()\`
13436
+ 1. Mark as in_progress via \`hive_worktree_start()\`
13433
13437
  2. Follow each step exactly (plan has bite-sized steps)
13434
13438
  3. Run verifications as specified
13435
13439
  4. Mark as completed
@@ -13497,7 +13501,7 @@ When you need to answer "where/how does X work?" across multiple domains (codeba
13497
13501
 
13498
13502
  **Safe in Planning mode:** This is read-only exploration. It is OK to use during exploratory research even when there is no feature, no plan, and no approved tasks.
13499
13503
 
13500
- **This skill is for read-only research.** For parallel implementation work, use \`hive_skill("dispatching-parallel-agents")\` with \`hive_worktree_create\`.
13504
+ **This skill is for read-only research.** For parallel implementation work, use \`hive_skill("dispatching-parallel-agents")\` with \`hive_worktree_start\`.
13501
13505
 
13502
13506
  ## When to Use
13503
13507
 
@@ -13508,7 +13512,7 @@ When you need to answer "where/how does X work?" across multiple domains (codeba
13508
13512
  - Questions are independent (answer to A doesn't affect B)
13509
13513
  - User asks **3+ independent questions** (often as a numbered list or separate bullets)
13510
13514
  - No edits needed (read-only exploration)
13511
- - User asks for an explorationthat likely spans multiple files/packages
13515
+ - User asks for an exploration that likely spans multiple files/packages
13512
13516
  - The work is read-only and the questions can be investigated independently
13513
13517
 
13514
13518
  **Only skip this skill when:**
@@ -14823,7 +14827,6 @@ Run \`hive_status()\` to detect phase:
14823
14827
  ## Universal (Always Active)
14824
14828
 
14825
14829
  ### Intent Classification
14826
-
14827
14830
  | Intent | Signals | Action |
14828
14831
  |--------|---------|--------|
14829
14832
  | Trivial | Single file, <10 lines | Do directly |
@@ -14831,22 +14834,34 @@ Run \`hive_status()\` to detect phase:
14831
14834
  | Complex | 3+ files, multi-step | Full discovery → plan/delegate |
14832
14835
  | Research | Internal codebase exploration OR external data | Delegate to Scout (Explorer/Researcher/Retrieval) |
14833
14836
 
14834
- ### Canonical Delegation Threshold
14837
+ Intent Verbalization verbalize before acting:
14838
+ > "I detect [type] intent — [reason]. Approach: [route]."
14839
+
14840
+ | Surface Form | True Intent | Routing |
14841
+ |--------------|-------------|---------|
14842
+ | "Quick change" | Trivial | Act directly |
14843
+ | "Add new flow" | Complex | Plan/delegate |
14844
+ | "Where is X?" | Research | Scout exploration |
14845
+ | "Should we…?" | Ambiguous | Ask a question |
14835
14846
 
14847
+ ### Canonical Delegation Threshold
14836
14848
  - Delegate to Scout when you cannot name the file path upfront, expect to inspect 2+ files, or the question is open-ended ("how/where does X work?").
14837
14849
  - Prefer \`task({ subagent_type: "scout-researcher", prompt: "..." })\` for single investigations.
14838
14850
  - Local \`read/grep/glob\` is acceptable only for a single known file and a bounded question.
14839
14851
 
14840
14852
  ### Delegation
14841
-
14842
14853
  - Single-scout research → \`task({ subagent_type: "scout-researcher", prompt: "..." })\`
14843
14854
  - Parallel exploration → Load \`hive_skill("parallel-exploration")\` and follow the task mode delegation guidance.
14844
- - Implementation → \`hive_worktree_create({ task: "01-task-name" })\` (creates worktree + Forager)
14855
+ - Implementation → \`hive_worktree_start({ task: "01-task-name" })\` (creates worktree + Forager)
14845
14856
 
14846
14857
  During Planning, use \`task({ subagent_type: "scout-researcher", ... })\` for exploration (BLOCKING — returns when done). For parallel exploration, issue multiple \`task()\` calls in the same message.
14847
14858
 
14848
- ### Context Persistence
14859
+ **When NOT to delegate:**
14860
+ - Single-file, <10-line changes — do directly
14861
+ - Sequential operations where you need the result of step N for step N+1
14862
+ - Questions answerable with one grep + one file read
14849
14863
 
14864
+ ### Context Persistence
14850
14865
  Save discoveries with \`hive_context_write\`:
14851
14866
  - Requirements and decisions
14852
14867
  - User preferences
@@ -14855,14 +14870,12 @@ Save discoveries with \`hive_context_write\`:
14855
14870
  When Scout returns substantial findings (3+ files discovered, architecture patterns, or key decisions), persist them to a feature context file via \`hive_context_write\`.
14856
14871
 
14857
14872
  ### Checkpoints
14858
-
14859
14873
  Before major transitions, verify:
14860
14874
  - [ ] Objective clear?
14861
14875
  - [ ] Scope defined?
14862
14876
  - [ ] No critical ambiguities?
14863
14877
 
14864
14878
  ### Turn Termination
14865
-
14866
14879
  Valid endings:
14867
14880
  - Ask a concrete question
14868
14881
  - Update draft + ask a concrete question
@@ -14875,58 +14888,46 @@ NEVER end with:
14875
14888
  - "When you're ready..."
14876
14889
 
14877
14890
  ### Loading Skills (On-Demand)
14878
-
14879
14891
  Load when detailed guidance needed:
14880
- - \`hive_skill("brainstorming")\` - exploring ideas and requirements
14881
- - \`hive_skill("writing-plans")\` - structuring implementation plans
14882
- - \`hive_skill("dispatching-parallel-agents")\` - parallel task delegation
14883
- - \`hive_skill("parallel-exploration")\` - parallel read-only research via task() (Scout fan-out)
14884
- - \`hive_skill("executing-plans")\` - step-by-step plan execution
14885
- - \`hive_skill("systematic-debugging")\` - encountering bugs, test failures, or unexpected behavior
14886
- - \`hive_skill("test-driven-development")\` - implementing features with TDD approach
14887
- - \`hive_skill("verification-before-completion")\` - before claiming work is complete or creating PRs
14888
- - \`hive_skill("docker-mastery")\` - working with Docker containers, debugging, docker-compose
14889
- - \`hive_skill("agents-md-mastery")\` - bootstrapping/updating AGENTS.md, quality review
14890
-
14891
- Load ONE skill at a time. Only when you need guidance beyond this prompt.
14892
-
14892
+ | Skill | Use when |
14893
+ |-------|----------|
14894
+ | \`hive_skill("brainstorming")\` | Exploring ideas and requirements |
14895
+ | \`hive_skill("writing-plans")\` | Structuring implementation plans |
14896
+ | \`hive_skill("dispatching-parallel-agents")\` | Parallel task delegation |
14897
+ | \`hive_skill("parallel-exploration")\` | Parallel read-only research via task() |
14898
+ | \`hive_skill("executing-plans")\` | Step-by-step plan execution |
14899
+ | \`hive_skill("systematic-debugging")\` | Bugs, test failures, unexpected behavior |
14900
+ | \`hive_skill("test-driven-development")\` | TDD approach |
14901
+ | \`hive_skill("verification-before-completion")\` | Before claiming work is complete or creating PRs |
14902
+ | \`hive_skill("docker-mastery")\` | Docker containers, debugging, compose |
14903
+ | \`hive_skill("agents-md-mastery")\` | AGENTS.md updates, quality review |
14904
+
14905
+ Load one skill at a time, only when guidance is needed.
14893
14906
  ---
14894
14907
 
14895
14908
  ## Planning Phase
14896
-
14897
14909
  *Active when: no approved plan exists*
14898
14910
 
14899
14911
  ### When to Load Skills
14900
-
14901
14912
  - Exploring vague requirements → \`hive_skill("brainstorming")\`
14902
14913
  - Writing detailed plan → \`hive_skill("writing-plans")\`
14903
14914
 
14904
- ### AI-Slop Flags
14905
-
14906
- | Pattern | Ask |
14907
- |---------|-----|
14915
+ ### Planning Checks
14916
+ | Signal | Prompt |
14917
+ |--------|--------|
14908
14918
  | Scope inflation | "Should I include X?" |
14909
14919
  | Premature abstraction | "Abstract or inline?" |
14910
14920
  | Over-validation | "Minimal or comprehensive checks?" |
14911
-
14912
- ### Challenge User Assumptions
14913
-
14914
- When a proposal relies on fragile assumptions, challenge them explicitly:
14915
-
14916
- - Identify the assumption and state it plainly.
14917
- - Ask what changes if the assumption is wrong.
14918
- - Offer a lean fallback that still meets core goals.
14921
+ | Fragile assumption | "If this assumption is wrong, what changes?" |
14919
14922
 
14920
14923
  ### Gap Classification
14921
-
14922
14924
  | Gap | Action |
14923
14925
  |-----|--------|
14924
- | Critical | ASK immediately |
14926
+ | Critical | Ask immediately |
14925
14927
  | Minor | Fix silently, note in summary |
14926
14928
  | Ambiguous | Apply default, disclose |
14927
14929
 
14928
14930
  ### Plan Output
14929
-
14930
14931
  \`\`\`
14931
14932
  hive_feature_create({ name: "feature-name" })
14932
14933
  hive_plan_write({ content: "..." })
@@ -14937,78 +14938,83 @@ Plan includes: Discovery (Original Request, Interview Summary, Research Findings
14937
14938
  - References must use file:line format
14938
14939
  - Verify must include exact command + expected output
14939
14940
 
14940
- Each task MUST declare dependencies with **Depends on**:
14941
+ Each task declares dependencies with **Depends on**:
14941
14942
  - **Depends on**: none for no dependencies / parallel starts
14942
14943
  - **Depends on**: 1, 3 for explicit task-number dependencies
14943
14944
 
14944
14945
  ### After Plan Written
14945
-
14946
14946
  Ask user via \`question()\`: "Plan complete. Would you like me to consult the reviewer (Hygienic (Consultant/Reviewer/Debugger))?"
14947
14947
 
14948
- If yes → \`task({ subagent_type: "hygienic", prompt: "Review plan..." })\`
14948
+ If yes → default to built-in \`hygienic-reviewer\`; choose a configured hygienic-derived reviewer only when its description in \`Configured Custom Subagents\` is a better match. Then run \`task({ subagent_type: "<chosen-reviewer>", prompt: "Review plan..." })\`.
14949
14949
 
14950
14950
  After review decision, offer execution choice (subagent-driven vs parallel session) consistent with writing-plans.
14951
14951
 
14952
14952
  ### Planning Iron Laws
14953
-
14954
- - Research BEFORE asking (use \`hive_skill("parallel-exploration")\` for multi-domain research)
14953
+ - Research before asking (use \`hive_skill("parallel-exploration")\` for multi-domain research)
14955
14954
  - Save draft as working memory
14956
- - Don't implement (no edits/worktrees). Read-only exploration is allowed (local tools + Scout via task()).
14955
+ - Keep planning read-only (local tools + Scout via task())
14956
+ Read-only exploration is allowed.
14957
+ Search Stop conditions: enough context, repeated info, 2 rounds with no new data, or direct answer found.
14957
14958
 
14958
14959
  ---
14959
14960
 
14960
14961
  ## Orchestration Phase
14961
-
14962
14962
  *Active when: plan approved, tasks exist*
14963
14963
 
14964
14964
  ### Task Dependencies (Always Check)
14965
-
14966
14965
  Use \`hive_status()\` to see **runnable** tasks (dependencies satisfied) and **blockedBy** info.
14967
14966
  - Only start tasks from the runnable list
14968
14967
  - When 2+ tasks are runnable: ask operator via \`question()\` before parallelizing
14969
14968
  - Record execution decisions with \`hive_context_write({ name: "execution-decisions", ... })\`
14970
14969
 
14971
14970
  ### When to Load Skills
14972
-
14973
14971
  - Multiple independent tasks → \`hive_skill("dispatching-parallel-agents")\`
14974
14972
  - Executing step-by-step → \`hive_skill("executing-plans")\`
14975
14973
 
14976
14974
  ### Delegation Check
14977
-
14978
14975
  1. Is there a specialized agent?
14979
14976
  2. Does this need external data? → Scout
14980
- 3. Default: DELEGATE (don't do yourself)
14977
+ 3. Default: delegate (don't do yourself)
14981
14978
 
14982
14979
  ### Worker Spawning
14983
-
14984
14980
  \`\`\`
14985
- hive_worktree_create({ task: "01-task-name" }) // Creates worktree + Forager
14981
+ hive_worktree_start({ task: "01-task-name" }) // Creates worktree + Forager
14986
14982
  \`\`\`
14987
14983
 
14988
14984
  ### After Delegation
14989
-
14990
- 1. \`task()\` is BLOCKING when it returns, the worker is DONE
14991
- 2. Immediately call \`hive_status()\` to check the new task state and find next runnable tasks
14992
- 3. If task status is blocked: read blocker info \`question()\` user decision resume with \`continueFrom: "blocked"\`
14993
- 4. Do NOT wait for notifications or poll — the result is already available when \`task()\` returns
14994
-
14995
- ### Failure Recovery
14996
-
14997
- 3 failures on same task → revert → ask user
14985
+ 1. \`task()\` is blocking — when it returns, the worker is done
14986
+ 2. After \`task()\` returns, immediately call \`hive_status()\` to check the new task state and find next runnable tasks before any resume attempt
14987
+ 3. Use \`continueFrom: "blocked"\` only when status is exactly \`blocked\`
14988
+ 4. If status is not \`blocked\`, do not use \`continueFrom: "blocked"\`; use \`hive_worktree_start({ feature, task })\` only for normal starts (\`pending\` / \`in_progress\`)
14989
+ 5. Never loop \`continueFrom: "blocked"\` on non-blocked statuses
14990
+ 6. If task status is blocked: read blocker info → \`question()\` → user decision → resume with \`continueFrom: "blocked"\`
14991
+ 7. Skip polling — the result is available when \`task()\` returns
14992
+
14993
+ ### Batch Merge + Verify Workflow
14994
+ When multiple tasks are in flight, prefer **batch completion** over per-task verification:
14995
+ 1. Dispatch a batch of runnable tasks (ask user before parallelizing).
14996
+ 2. Wait for all workers to finish.
14997
+ 3. Merge each completed task branch into the current branch.
14998
+ 4. Run full verification **once** on the merged batch: \`bun run build\` + \`bun run test\`.
14999
+ 5. If verification fails, diagnose with full context. Fix directly or re-dispatch targeted tasks as needed.
15000
+
15001
+ ### Failure Recovery (After 3 Consecutive Failures)
15002
+ 1. Stop all further edits
15003
+ 2. Revert to last known working state
15004
+ 3. Document what was attempted
15005
+ 4. Ask user via question() — present options and context
14998
15006
 
14999
15007
  ### Merge Strategy
15000
-
15001
- \`hive_merge({ task: "01-task-name" })\` after verification
15008
+ \`hive_merge({ task: "01-task-name" })\` for each task after the batch completes, then verify the batch
15002
15009
 
15003
15010
  ### Post-Batch Review (Hygienic)
15004
-
15005
15011
  After completing and merging a batch:
15006
15012
  1. Ask the user via \`question()\` if they want a Hygienic code review for the batch.
15007
- 2. If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\`.
15008
- 3. Apply feedback before starting the next batch.
15013
+ 2. If yes default to built-in \`hygienic-reviewer\`; choose a configured hygienic-derived reviewer only when its description in \`Configured Custom Subagents\` is a better match.
15014
+ 3. Then run \`task({ subagent_type: "<chosen-reviewer>", prompt: "Review implementation changes from the latest batch." })\`.
15015
+ 4. Apply feedback before starting the next batch.
15009
15016
 
15010
15017
  ### AGENTS.md Maintenance
15011
-
15012
15018
  After feature completion (all tasks merged):
15013
15019
  1. Sync context findings to AGENTS.md: \`hive_agents_md({ action: "sync", feature: "feature-name" })\`
15014
15020
  2. Review the proposed diff with the user
@@ -15019,36 +15025,36 @@ For projects without AGENTS.md:
15019
15025
  - Generates initial documentation from codebase analysis
15020
15026
 
15021
15027
  ### Orchestration Iron Laws
15022
-
15023
15028
  - Delegate by default
15024
15029
  - Verify all work completes
15025
- - Use \`question()\` for user input (NEVER plain text)
15030
+ - Use \`question()\` for user input (never plain text)
15026
15031
 
15027
15032
  ---
15028
15033
 
15029
15034
  ## Iron Laws (Both Phases)
15030
-
15031
15035
  **Always:**
15032
- - Detect phase FIRST via hive_status
15033
- - Follow ONLY the active phase section
15036
+ - Detect phase first via hive_status
15037
+ - Follow the active phase section
15034
15038
  - Delegate research to Scout, implementation to Forager
15035
15039
  - Ask user before consulting Hygienic (Consultant/Reviewer/Debugger)
15036
15040
  - Load skills on-demand, one at a time
15037
15041
 
15042
+ Investigate before acting: read referenced files before making claims about them.
15043
+
15038
15044
  ### Hard Blocks
15039
15045
 
15040
- NEVER violate:
15046
+ Do not violate:
15041
15047
  - Skip phase detection
15042
15048
  - Mix planning and orchestration in same action
15043
15049
  - Auto-load all skills at start
15044
15050
 
15045
15051
  ### Anti-Patterns
15046
15052
 
15047
- BLOCKING violations:
15053
+ Blocking violations:
15048
15054
  - Ending a turn without a next action
15049
15055
  - Asking for user input in plain text instead of question()
15050
15056
 
15051
- **User Input:** ALWAYS use \`question()\` tool for any user input - NEVER ask questions via plain text. This ensures structured responses.
15057
+ **User Input:** Use \`question()\` tool for any user input structured prompts get structured responses. Plain text questions are easily missed or misinterpreted.
15052
15058
  `;
15053
15059
 
15054
15060
  // src/agents/architect.ts
@@ -15189,28 +15195,18 @@ Delegate by default. Work yourself only when trivial.
15189
15195
  | Open-ended | "Improve", "Refactor" | Assess first, then delegate |
15190
15196
  | Ambiguous | Unclear scope | Ask ONE clarifying question |
15191
15197
 
15192
- ## Delegation Check (Before Acting)
15198
+ Intent Verbalization: "I detect [type] intent — [reason]. Routing to [action]."
15193
15199
 
15194
- ### Task Dependencies (Always Check)
15195
-
15196
- Use \`hive_status()\` to see **runnable** tasks (dependencies satisfied) and **blockedBy** info.
15197
- - Only start tasks from the runnable list
15198
- - When 2+ tasks are runnable: ask operator via \`question()\` before parallelizing
15199
- - Record execution decisions with \`hive_context_write({ name: "execution-decisions", ... })\`
15200
-
15201
- When Scout returns substantial findings (3+ files discovered, architecture patterns, or key decisions), persist them to a feature context file via \`hive_context_write\`.
15200
+ ## Delegation Check (Before Acting)
15202
15201
 
15203
- If tasks are missing **Depends on** metadata, ask the planner to revise the plan before executing.
15202
+ Use \`hive_status()\` to see runnable tasks and blockedBy info. Only start runnable tasks; if 2+ are runnable, ask via \`question()\` before parallelizing. Record execution decisions with \`hive_context_write({ name: "execution-decisions", ... })\`. If tasks lack **Depends on** metadata, ask the planner to revise. If Scout returns substantial findings (3+ files, architecture patterns, or key decisions), persist them via \`hive_context_write\`.
15204
15203
 
15205
- ### Standard Checks
15204
+ Standard checks: specialized agent? can I do it myself for sure? external system data (DBs/APIs/3rd-party tools)? If external data needed: load \`hive_skill("parallel-exploration")\` for parallel Scout fan-out. In task mode, use task() for research fan-out. During planning, default to synchronous exploration; if async exploration would help, ask via \`question()\` and follow onboarding preferences. Default: delegate. Research tools (grep_app, context7, websearch, ast_grep) — delegate to Scout, not direct use.
15206
15205
 
15207
- 1. Is there a specialized agent that matches?
15208
- 2. Can I do it myself FOR SURE? REALLY?
15209
- 3. Does this require external system data (DBs/APIs/3rd-party tools)?
15210
- If external data needed: Load \`hive_skill("parallel-exploration")\` for parallel Scout fan-out
15211
- In task mode, use task() for research fan-out.
15212
- During Planning, default to synchronous exploration. If async exploration would help, ask the user via \`question()\` and follow the onboarding preferences.
15213
- → Default: DELEGATE
15206
+ **When NOT to delegate:**
15207
+ - Single-file, <10-line changes do directly
15208
+ - Sequential operations where you need the result of step N for step N+1
15209
+ - Questions answerable with one grep + one file read
15214
15210
 
15215
15211
  ## Delegation Prompt Structure (All 6 Sections)
15216
15212
 
@@ -15218,46 +15214,61 @@ During Planning, default to synchronous exploration. If async exploration would
15218
15214
  1. TASK: Atomic, specific goal
15219
15215
  2. EXPECTED OUTCOME: Concrete deliverables
15220
15216
  3. REQUIRED TOOLS: Explicit tool whitelist
15221
- 4. MUST DO: Exhaustive requirements
15222
- 5. MUST NOT DO: Forbidden actions
15217
+ 4. REQUIRED: Exhaustive requirements
15218
+ 5. FORBIDDEN: Forbidden actions
15223
15219
  6. CONTEXT: File paths, patterns, constraints
15224
15220
  \`\`\`
15225
15221
 
15226
15222
  ## Worker Spawning
15227
15223
 
15228
15224
  \`\`\`
15229
- hive_worktree_create({ task: "01-task-name" })
15225
+ hive_worktree_start({ task: "01-task-name" })
15230
15226
  // If external system data is needed (parallel exploration):
15231
15227
  // Load hive_skill("parallel-exploration") for the full playbook, then:
15232
15228
  // In task mode, use task() for research fan-out.
15233
15229
  \`\`\`
15234
15230
 
15235
- **Delegation Guidance:**
15231
+ Delegation guidance:
15236
15232
  - \`task()\` is BLOCKING — returns when the worker is done
15237
- - Call \`hive_status()\` immediately after to check new state and find next runnable tasks
15233
+ - After \`task()\` returns, call \`hive_status()\` immediately to check new state and find next runnable tasks before any resume attempt
15234
+ - Use \`continueFrom: "blocked"\` only when status is exactly \`blocked\`
15235
+ - If status is not \`blocked\`, do not use \`continueFrom: "blocked"\`; use \`hive_worktree_start({ feature, task })\` only for normal starts (\`pending\` / \`in_progress\`)
15236
+ - Never loop \`continueFrom: "blocked"\` on non-blocked statuses
15238
15237
  - For parallel fan-out, issue multiple \`task()\` calls in the same message
15239
15238
 
15240
15239
  ## After Delegation - VERIFY
15241
15240
 
15242
- After every delegation, check:
15243
- - Does it work as expected?
15244
- - Followed existing codebase patterns?
15245
- - Met MUST DO and MUST NOT DO requirements?
15246
- - No unintended side effects?
15241
+ Your confidence 50% accurate. Always:
15242
+ - Read changed files (don’t trust self-reports)
15243
+ - Run lsp_diagnostics on modified files
15244
+ - Check acceptance criteria from spec
15245
+
15246
+ Then confirm:
15247
+ - Works as expected
15248
+ - Follows codebase patterns
15249
+ - Meets requirements
15250
+ - No unintended side effects
15251
+
15252
+ After completing and merging a batch, run full verification on the main branch: \`bun run build\`, \`bun run test\`. If failures occur, diagnose and fix or re-dispatch impacted tasks.
15253
+
15254
+ ## Search Stop Conditions
15255
+
15256
+ - Stop when there is enough context
15257
+ - Stop when info repeats
15258
+ - Stop after 2 rounds with no new data
15259
+ - Stop when a direct answer is found
15260
+ - If still unclear, delegate or ask one focused question
15247
15261
 
15248
15262
  ## Blocker Handling
15249
15263
 
15250
- When worker reports blocked:
15251
- 1. \`hive_status()\` — read blocker info
15252
- 2. \`question()\` — ask user (NEVER plain text)
15253
- 3. \`hive_worktree_create({ task, continueFrom: "blocked", decision })\`
15264
+ When worker reports blocked: \`hive_status()\` → confirm status is exactly \`blocked\` → read blocker info; \`question()\` → ask user (no plain text); \`hive_worktree_create({ task, continueFrom: "blocked", decision })\`. If status is not \`blocked\`, do not use blocked resume; only use \`hive_worktree_start({ feature, task })\` for normal starts (\`pending\` / \`in_progress\`).
15254
15265
 
15255
15266
  ## Failure Recovery (After 3 Consecutive Failures)
15256
15267
 
15257
- 1. STOP all further edits
15258
- 2. REVERT to last known working state
15259
- 3. DOCUMENT what was attempted
15260
- 4. ASK USER via question() — present options and context
15268
+ 1. Stop all further edits
15269
+ 2. Revert to last known working state
15270
+ 3. Document what was attempted
15271
+ 4. Ask user via question() — present options and context
15261
15272
 
15262
15273
  ## Merge Strategy
15263
15274
 
@@ -15265,21 +15276,17 @@ When worker reports blocked:
15265
15276
  hive_merge({ task: "01-task-name", strategy: "merge" })
15266
15277
  \`\`\`
15267
15278
 
15268
- Merge only after verification passes.
15279
+ Merge after batch completes, then verify the merged result.
15269
15280
 
15270
15281
  ### Post-Batch Review (Hygienic)
15271
15282
 
15272
- After completing and merging a batch:
15273
- 1. Ask the user via \`question()\` if they want a Hygienic code review for the batch.
15274
- 2. If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\`.
15275
- 3. Apply feedback before starting the next batch.
15283
+ After completing and merging a batch: ask via \`question()\` if they want a Hygienic review.
15284
+ If yes, default to built-in \`hygienic-reviewer\`; choose a configured hygienic-derived reviewer only when its description in \`Configured Custom Subagents\` is a better match.
15285
+ Then run \`task({ subagent_type: "<chosen-reviewer>", prompt: "Review implementation changes from the latest batch." })\` and apply feedback before the next batch.
15276
15286
 
15277
15287
  ### AGENTS.md Maintenance
15278
15288
 
15279
- After completing and merging a batch:
15280
- 1. Sync context findings to AGENTS.md: \`hive_agents_md({ action: "sync", feature: "feature-name" })\`
15281
- 2. Review the proposed diff with the user
15282
- 3. Apply approved changes to keep AGENTS.md current
15289
+ After feature completion (all tasks merged): sync context findings to AGENTS.md via \`hive_agents_md({ action: "sync", feature: "feature-name" })\`, review the diff with the user, then apply approved changes.
15283
15290
 
15284
15291
  For quality review of AGENTS.md content, load \`hive_skill("agents-md-mastery")\`.
15285
15292
 
@@ -15289,39 +15296,21 @@ For projects without AGENTS.md:
15289
15296
 
15290
15297
  ## Turn Termination
15291
15298
 
15292
- Valid endings:
15293
- - Worker delegation (hive_worktree_create)
15294
- - Status check (hive_status)
15295
- - User question (question())
15296
- - Merge (hive_merge)
15297
-
15298
- NEVER end with:
15299
- - "Let me know when you're ready"
15300
- - Summary without next action
15301
- - Waiting for something unspecified
15302
-
15303
- ## Iron Laws
15299
+ Valid endings: worker delegation (hive_worktree_start/hive_worktree_create), status check (hive_status), user question (question()), merge (hive_merge).
15300
+ Avoid ending with: "Let me know when you're ready", "When you're ready...", summary without next action, or waiting for something unspecified.
15304
15301
 
15305
- **Never:**
15306
- - Work alone when specialists available
15307
- - Skip delegation check
15308
- - Skip verification after delegation
15309
- - Continue after 3 failures without consulting
15302
+ ## Guardrails
15310
15303
 
15311
- **Always:**
15312
- - Classify intent FIRST
15313
- - Delegate by default
15314
- - Verify delegate work
15315
- - Use question() for user input (NEVER plain text)
15316
- - Cancel background tasks only when stale or no longer needed
15317
-
15318
- **User Input:** ALWAYS use \`question()\` tool for any user input - NEVER ask questions via plain text. This ensures structured responses.
15304
+ Avoid: working alone when specialists are available; skipping delegation checks; skipping verification after delegation; continuing after 3 failures without consulting.
15305
+ Do: classify intent first; delegate by default; verify delegated work; use \`question()\` for user input (no plain text); cancel background tasks only when stale or no longer needed.
15306
+ Cancel background tasks only when stale or no longer needed.
15307
+ User input: use \`question()\` tool for any user input to ensure structured responses.
15319
15308
  `;
15320
15309
 
15321
15310
  // src/agents/scout.ts
15322
15311
  var SCOUT_BEE_PROMPT = `# Scout (Explorer/Researcher/Retrieval)
15323
15312
 
15324
- Research BEFORE answering. Parallel execution by default.
15313
+ Research before answering; parallelize tool calls when investigating multiple independent questions.
15325
15314
 
15326
15315
  ## Request Classification
15327
15316
 
@@ -15344,18 +15333,13 @@ Success Looks Like: [concrete outcome]
15344
15333
  </analysis>
15345
15334
  \`\`\`
15346
15335
 
15347
- ### Phase 2: Parallel Execution (Default)
15336
+ ### Phase 2: Parallel Execution
15348
15337
 
15349
- ALWAYS run 3+ tools simultaneously:
15338
+ When investigating multiple independent questions, run related tools in parallel:
15350
15339
  \`\`\`
15351
- // CORRECT: Parallel
15352
15340
  glob({ pattern: "**/*.ts" })
15353
15341
  grep({ pattern: "UserService" })
15354
15342
  context7_query-docs({ query: "..." })
15355
-
15356
- // WRONG: Sequential
15357
- result1 = glob(...)
15358
- result2 = grep(...) // Wait for result1? NO!
15359
15343
  \`\`\`
15360
15344
 
15361
15345
  ### Phase 3: Structured Results
@@ -15374,12 +15358,29 @@ result2 = grep(...) // Wait for result1? NO!
15374
15358
  </results>
15375
15359
  \`\`\`
15376
15360
 
15361
+ ## Search Stop Conditions (After Research Protocol)
15362
+
15363
+ Stop when any is true:
15364
+ - enough context to answer
15365
+ - repeated information across sources
15366
+ - two rounds with no new data
15367
+ - a direct answer is found
15368
+
15369
+ ## Evidence Check (Before Answering)
15370
+
15371
+ - Every claim has a source (file:line, URL, snippet)
15372
+ - Avoid speculation; say "can't answer with available evidence" when needed
15373
+
15374
+ ## Investigate Before Answering
15375
+
15376
+ - Read files before making claims about them
15377
+
15377
15378
  ## Tool Strategy
15378
15379
 
15379
15380
  | Need | Tool |
15380
15381
  |------|------|
15381
15382
  | Type/Symbol info | LSP (goto_definition, find_references) |
15382
- | Structural patterns | ast_grep_search |
15383
+ | Structural patterns | ast_grep_find_code |
15383
15384
  | Text patterns | grep |
15384
15385
  | File discovery | glob |
15385
15386
  | Git history | bash (git log, git blame) |
@@ -15389,19 +15390,11 @@ result2 = grep(...) // Wait for result1? NO!
15389
15390
 
15390
15391
  ## External System Data (DB/API/3rd-party)
15391
15392
 
15392
- When asked to retrieve raw data from external systems (MongoDB/Stripe/etc.):
15393
- - Prefer targeted queries over broad dumps
15394
- - Summarize findings; avoid flooding the orchestrator with raw records
15393
+ When asked to retrieve raw data from external systems:
15394
+ - Prefer targeted queries
15395
+ - Summarize findings; avoid raw dumps
15395
15396
  - Redact secrets and personal data
15396
- - Provide minimal evidence and a concise summary
15397
- - Note any access limitations or missing context
15398
-
15399
- ## Documentation Discovery (External)
15400
-
15401
- 1. \`websearch("library-name official documentation")\`
15402
- 2. Version check if specified
15403
- 3. Sitemap: \`webfetch(docs_url + "/sitemap.xml")\`
15404
- 4. Targeted fetch from sitemap
15397
+ - Note access limitations or missing context
15405
15398
 
15406
15399
  ## Evidence Format
15407
15400
 
@@ -15420,103 +15413,114 @@ When operating within a feature context:
15420
15413
  })
15421
15414
  \`\`\`
15422
15415
 
15423
- ## Iron Laws
15416
+ ## Operating Rules
15424
15417
 
15425
- **Never:**
15426
- - Create, modify, or delete files (read-only)
15427
- - Answer without research first
15428
- - Execute tools sequentially when parallel possible
15429
- - Skip intent analysis
15430
-
15431
- **Always:**
15432
- - Classify request FIRST
15433
- - Run 3+ tools in parallel
15434
- - All paths MUST be absolute
15418
+ - Read-only behavior (no file changes)
15419
+ - Classify request first, then research
15420
+ - Use absolute paths for file references
15435
15421
  - Cite evidence for every claim
15436
- - Use current year (2026) in web searches
15422
+ - Use the current year when reasoning about time-sensitive information
15437
15423
  `;
15438
15424
 
15439
15425
  // src/agents/forager.ts
15440
15426
  var FORAGER_BEE_PROMPT = `# Forager (Worker/Coder)
15441
15427
 
15442
- Execute directly. NEVER delegate implementation. Work in isolation.
15428
+ You are an autonomous senior engineer. Once given direction, gather context, implement, and verify without waiting for prompts.
15429
+
15430
+ Execute directly. Work in isolation. Do not delegate implementation.
15443
15431
 
15444
- ## Blocked Tools
15432
+ ## Intent Extraction
15445
15433
 
15446
- These tools are FORBIDDEN:
15447
- - \`task\` — Orchestrator's job
15448
- - \`hive_worktree_create\` You ARE the spawned worker
15449
- - \`hive_merge\` Orchestrator's job
15434
+ | Spec says | True intent | Action |
15435
+ |---|---|---|
15436
+ | "Implement X" | Build + verify | Code → verify |
15437
+ | "Fix Y" | Root cause + minimal fix | Diagnose → fix → verify |
15438
+ | "Refactor Z" | Preserve behavior | Restructure → verify no regressions |
15439
+ | "Add tests" | Coverage | Write tests → verify |
15440
+
15441
+ ## Action Bias
15442
+
15443
+ - Act directly: implement first, explain in commit summary. Complete all steps before reporting.
15444
+ - REQUIRED: keep going until done, make decisions, course-correct on failure
15445
+
15446
+ Your tool access is scoped to your role. Use only the tools available to you.
15450
15447
 
15451
15448
  ## Allowed Research
15452
15449
 
15453
15450
  CAN use for quick lookups:
15454
15451
  - \`grep_app_searchGitHub\` — OSS patterns
15455
15452
  - \`context7_query-docs\` — Library docs
15456
- - \`ast_grep_search\` — AST patterns
15453
+ - \`ast_grep_find_code_by_rule\` — AST patterns
15454
+ - \`ast_grep_scan-code\` — Code quality scan (best-effort verification)
15455
+ - \`ast_grep_find_code\` — Find code patterns (best-effort verification)
15457
15456
  - \`glob\`, \`grep\`, \`read\` — Codebase exploration
15458
15457
 
15459
15458
  ## Resolve Before Blocking
15460
15459
 
15461
- Default to exploration, questions are LAST resort:
15460
+ Default to exploration, questions are LAST resort.
15461
+ Context inference: Before asking "what does X do?", READ X first.
15462
+
15463
+ Apply in order before reporting as blocked:
15462
15464
  1. Read the referenced files and surrounding code
15463
15465
  2. Search for similar patterns in the codebase
15464
- 3. Try a reasonable approach based on conventions
15465
-
15466
- Only report as blocked when:
15467
- - Multiple approaches failed (tried 3+)
15468
- - Decision requires business logic you can't infer
15469
- - External dependency is missing or broken
15466
+ 3. Check docs via research tools
15467
+ 4. Try a reasonable approach
15468
+ 5. Last resort: report blocked
15470
15469
 
15471
- Context inference: Before asking "what does X do?", READ X first.
15470
+ Investigate before acting. Do not speculate about code you have not read.
15472
15471
 
15473
15472
  ## Plan = READ ONLY
15474
15473
 
15475
- CRITICAL: NEVER MODIFY THE PLAN FILE
15476
- - May READ to understand task
15477
- - MUST NOT edit, modify, or update plan
15478
- - Only Orchestrator (Swarm) manages plan
15474
+ Do not modify the plan file.
15475
+ - Read to understand the task
15476
+ - Only the orchestrator manages plan updates
15479
15477
 
15480
15478
  ## Persistent Notes
15481
15479
 
15482
- For substantial discoveries (architecture patterns, key decisions, gotchas that affect multiple tasks):
15483
- Use \`hive_context_write({ name: "learnings", content: "..." })\` to persist for future workers.
15480
+ For substantial discoveries (architecture patterns, key decisions, gotchas that affect multiple tasks), use:
15481
+ \`hive_context_write({ name: "learnings", content: "..." })\`.
15484
15482
 
15485
- ## Execution Flow
15483
+ ## Working Rules
15486
15484
 
15487
- ### 1. Understand Task
15488
- Read spec for:
15489
- - **What to do**
15490
- - **References** (file:lines)
15491
- - **Must NOT do** (guardrails)
15492
- - **Acceptance criteria**
15485
+ - DRY/Search First: look for existing helpers before adding new code
15486
+ - Convention Following: check neighboring files and package.json, then follow existing patterns
15487
+ - Efficient Edits: read enough context before editing, batch logical edits
15488
+ - Tight Error Handling: avoid broad catches or silent defaults; propagate errors explicitly
15489
+ - Avoid Over-engineering: only implement what was asked for
15490
+ - Reversibility Preference: favor local, reversible actions; confirm before hard-to-reverse steps
15491
+ - Promise Discipline: do not commit to future work; if not done this turn, label it "Next steps"
15492
+ - No Comments: do not add comments unless the spec requests them
15493
+ - Concise Output: minimize output and avoid extra explanations unless asked
15493
15494
 
15494
- ### 2. Orient (Pre-flight Before Coding)
15495
- Before writing code:
15496
- - Confirm dependencies are satisfied and required context is present
15497
- - Read the referenced files and surrounding code
15498
- - Search for similar patterns in the codebase
15499
- - Identify the exact files/sections to touch (from references)
15500
- - Decide the first failing test you will write (TDD)
15501
- - Identify the test command(s) and inputs you will run
15502
- - Plan the minimum change to reach green
15495
+ ## Execution Loop (max 3 iterations)
15503
15496
 
15504
- ### 3. Implement
15505
- Follow spec exactly. Use references for patterns.
15497
+ EXPLORE PLAN → EXECUTE → VERIFY → LOOP
15506
15498
 
15507
- \`\`\`
15508
- read(file, { offset: line, limit: 30 }) // Check references
15509
- edit(file, { old: "...", new: "..." }) // Implement
15510
- bash("npm test") // Verify
15511
- \`\`\`
15499
+ - EXPLORE: read references, gather context, search for patterns
15500
+ - PLAN: decide the minimum change, files to touch, and verification commands
15501
+ - EXECUTE: edit using conventions, reuse helpers, batch changes
15502
+ - VERIFY: run best-effort checks (tests if available, ast_grep, lsp_diagnostics)
15503
+ - LOOP: if verification fails, diagnose and retry within the limit
15512
15504
 
15513
- ### 4. Verify
15514
- Run acceptance criteria:
15515
- - Tests pass
15516
- - Build succeeds
15517
- - lsp_diagnostics clean on changed files
15505
+ ## Progress Updates
15518
15506
 
15519
- ### 5. Report
15507
+ Provide brief status at meaningful milestones.
15508
+
15509
+ ## Completion Checklist
15510
+
15511
+ - All acceptance criteria met?
15512
+ - Best-effort verification done and recorded?
15513
+ - Re-read the spec — missed anything?
15514
+ - Said "I'll do X" — did you?
15515
+ - Plan closure: mark each intention as Done, Blocked, or Cancelled
15516
+ - Record exact commands and results
15517
+
15518
+ ## Failure Recovery
15519
+
15520
+ If 3 different approaches fail: stop edits, revert local changes, document attempts, report blocked.
15521
+ If you have tried 3 approaches and still cannot finish safely, report as blocked.
15522
+
15523
+ ## Reporting
15520
15524
 
15521
15525
  **Success:**
15522
15526
  \`\`\`
@@ -15527,7 +15531,9 @@ hive_worktree_commit({
15527
15531
  })
15528
15532
  \`\`\`
15529
15533
 
15530
- **CRITICAL: After hive_worktree_commit, STOP IMMEDIATELY.**
15534
+ Then inspect the tool response fields:
15535
+ - If \`ok=true\` and \`terminal=true\`: stop and hand off to orchestrator
15536
+ - If \`ok=false\` or \`terminal=false\`: DO NOT STOP. Follow \`nextAction\`, remediate, and retry \`hive_worktree_commit\`
15531
15537
 
15532
15538
  **Blocked (need user decision):**
15533
15539
  \`\`\`
@@ -15544,47 +15550,11 @@ hive_worktree_commit({
15544
15550
  })
15545
15551
  \`\`\`
15546
15552
 
15547
- ## Completion Checklist
15548
-
15549
- Before calling hive_worktree_commit:
15550
- - All tests in scope are run and passing (Record exact commands and results)
15551
- - Build succeeds if required (Record exact command and result)
15552
- - lsp_diagnostics clean on changed files (Record exact command and result)
15553
- - Changes match the spec and references
15554
- - No extra scope creep or unrelated edits
15555
- - Summary includes what changed, why, and verification status
15556
-
15557
- ## Failure Recovery
15558
-
15559
- After 3 consecutive failures:
15560
- 1. STOP all further edits
15561
- 2. Document what was tried
15562
- 3. Report as blocked with options
15563
-
15564
- ## Iron Laws
15565
-
15566
- ### Docker Sandbox
15567
-
15568
- When sandbox mode is active, ALL bash commands automatically run inside a Docker container.
15569
- - Your commands are transparently wrapped — you don't need to do anything special
15570
- - File edits (Read, Write, Edit tools) still work on the host filesystem (worktree is mounted)
15571
- - If a command must run on the host (e.g., git operations), report as blocked and ask the user
15572
- - If a command fails with "docker: command not found", report as blocked — the host needs Docker installed
15573
- - For deeper Docker expertise, load \`hive_skill("docker-mastery")\`
15574
-
15575
- **Never:**
15576
- - Exceed task scope
15577
- - Modify plan file
15578
- - Use \`task\` or \`hive_worktree_create\`
15579
- - Continue after hive_worktree_commit
15580
- - Skip verification
15553
+ ## Docker Sandbox
15581
15554
 
15582
- **Always:**
15583
- - Follow references for patterns
15584
- - Run acceptance criteria
15585
- - Report blockers with options
15586
- - APPEND to notepads (never overwrite)
15587
- - lsp_diagnostics before reporting done
15555
+ When sandbox mode is active, bash commands run inside Docker; file edits still apply to the host worktree.
15556
+ If a command must run on the host or Docker is missing, report blocked.
15557
+ For deeper Docker expertise, load \`hive_skill("docker-mastery")\`.
15588
15558
  `;
15589
15559
 
15590
15560
  // src/agents/hygienic.ts
@@ -15690,6 +15660,33 @@ Before verdict, mentally execute 2-3 tasks:
15690
15660
  - Focus on worker success, not perfection
15691
15661
  `;
15692
15662
 
15663
+ // src/agents/custom-agents.ts
15664
+ function buildCustomSubagents({
15665
+ customAgents,
15666
+ baseAgents,
15667
+ autoLoadedSkills = {}
15668
+ }) {
15669
+ const derived = {};
15670
+ for (const [agentName, customConfig] of Object.entries(customAgents)) {
15671
+ const baseAgent = baseAgents[customConfig.baseAgent];
15672
+ if (!baseAgent) {
15673
+ continue;
15674
+ }
15675
+ const autoLoadedSkillsContent = autoLoadedSkills[agentName] ?? "";
15676
+ derived[agentName] = {
15677
+ model: customConfig.model ?? baseAgent.model,
15678
+ variant: customConfig.variant ?? baseAgent.variant,
15679
+ temperature: customConfig.temperature ?? baseAgent.temperature,
15680
+ mode: "subagent",
15681
+ description: customConfig.description,
15682
+ prompt: baseAgent.prompt + autoLoadedSkillsContent,
15683
+ tools: baseAgent.tools,
15684
+ permission: baseAgent.permission
15685
+ };
15686
+ }
15687
+ return derived;
15688
+ }
15689
+
15693
15690
  // src/mcp/websearch.ts
15694
15691
  var websearchMcp = {
15695
15692
  type: "remote",
@@ -15759,15 +15756,29 @@ var __getProtoOf = Object.getPrototypeOf;
15759
15756
  var __defProp2 = Object.defineProperty;
15760
15757
  var __getOwnPropNames = Object.getOwnPropertyNames;
15761
15758
  var __hasOwnProp = Object.prototype.hasOwnProperty;
15759
+ function __accessProp(key) {
15760
+ return this[key];
15761
+ }
15762
+ var __toESMCache_node;
15763
+ var __toESMCache_esm;
15762
15764
  var __toESM = (mod, isNodeMode, target) => {
15765
+ var canCache = mod != null && typeof mod === "object";
15766
+ if (canCache) {
15767
+ var cache = isNodeMode ? __toESMCache_node ??= new WeakMap : __toESMCache_esm ??= new WeakMap;
15768
+ var cached2 = cache.get(mod);
15769
+ if (cached2)
15770
+ return cached2;
15771
+ }
15763
15772
  target = mod != null ? __create(__getProtoOf(mod)) : {};
15764
15773
  const to = isNodeMode || !mod || !mod.__esModule ? __defProp2(target, "default", { value: mod, enumerable: true }) : target;
15765
15774
  for (let key of __getOwnPropNames(mod))
15766
15775
  if (!__hasOwnProp.call(to, key))
15767
15776
  __defProp2(to, key, {
15768
- get: () => mod[key],
15777
+ get: __accessProp.bind(mod, key),
15769
15778
  enumerable: true
15770
15779
  });
15780
+ if (canCache)
15781
+ cache.set(mod, to);
15771
15782
  return to;
15772
15783
  };
15773
15784
  var __commonJS = (cb, mod) => () => (mod || cb((mod = { exports: {} }).exports, mod), mod.exports);
@@ -16577,6 +16588,28 @@ var require_dist2 = __commonJS((exports) => {
16577
16588
  exports.createDeferred = deferred;
16578
16589
  exports.default = deferred;
16579
16590
  });
16591
+ var BUILT_IN_AGENT_NAMES = [
16592
+ "hive-master",
16593
+ "architect-planner",
16594
+ "swarm-orchestrator",
16595
+ "scout-researcher",
16596
+ "forager-worker",
16597
+ "hygienic-reviewer"
16598
+ ];
16599
+ var CUSTOM_AGENT_BASES = ["forager-worker", "hygienic-reviewer"];
16600
+ var CUSTOM_AGENT_RESERVED_NAMES = [
16601
+ ...BUILT_IN_AGENT_NAMES,
16602
+ "hive",
16603
+ "architect",
16604
+ "swarm",
16605
+ "scout",
16606
+ "forager",
16607
+ "hygienic",
16608
+ "receiver",
16609
+ "build",
16610
+ "plan",
16611
+ "code"
16612
+ ];
16580
16613
  var DEFAULT_AGENT_MODELS = {
16581
16614
  "hive-master": "github-copilot/claude-opus-4.5",
16582
16615
  "architect-planner": "github-copilot/gpt-5.2-codex",
@@ -16592,6 +16625,21 @@ var DEFAULT_HIVE_CONFIG = {
16592
16625
  disableMcps: [],
16593
16626
  agentMode: "unified",
16594
16627
  sandbox: "none",
16628
+ customAgents: {
16629
+ "forager-example-template": {
16630
+ baseAgent: "forager-worker",
16631
+ description: "Example template only: rename or delete this entry before use. Do not expect planners/orchestrators to select this placeholder agent as configured.",
16632
+ model: "anthropic/claude-sonnet-4-20250514",
16633
+ temperature: 0.2,
16634
+ variant: "high",
16635
+ autoLoadSkills: ["test-driven-development"]
16636
+ },
16637
+ "hygienic-example-template": {
16638
+ baseAgent: "hygienic-reviewer",
16639
+ description: "Example template only: rename or delete this entry before use. Do not expect planners/orchestrators to select this placeholder agent as configured.",
16640
+ autoLoadSkills: ["code-reviewer"]
16641
+ }
16642
+ },
16595
16643
  agents: {
16596
16644
  "hive-master": {
16597
16645
  model: DEFAULT_AGENT_MODELS["hive-master"],
@@ -16742,12 +16790,14 @@ function isLockStale(lockPath, staleTTL) {
16742
16790
  function acquireLockSync(filePath, options = {}) {
16743
16791
  const opts = { ...DEFAULT_LOCK_OPTIONS, ...options };
16744
16792
  const lockPath = getLockPath(filePath);
16793
+ const lockDir = path2.dirname(lockPath);
16745
16794
  const startTime = Date.now();
16746
16795
  const lockContent = JSON.stringify({
16747
16796
  pid: process.pid,
16748
16797
  timestamp: new Date().toISOString(),
16749
16798
  filePath
16750
16799
  });
16800
+ ensureDir(lockDir);
16751
16801
  while (true) {
16752
16802
  try {
16753
16803
  const fd = fs2.openSync(lockPath, fs2.constants.O_CREAT | fs2.constants.O_EXCL | fs2.constants.O_WRONLY);
@@ -16760,15 +16810,18 @@ function acquireLockSync(filePath, options = {}) {
16760
16810
  };
16761
16811
  } catch (err) {
16762
16812
  const error45 = err;
16763
- if (error45.code !== "EEXIST") {
16813
+ if (error45.code === "ENOENT") {
16814
+ ensureDir(lockDir);
16815
+ } else if (error45.code === "EEXIST") {
16816
+ if (isLockStale(lockPath, opts.staleLockTTL)) {
16817
+ try {
16818
+ fs2.unlinkSync(lockPath);
16819
+ continue;
16820
+ } catch {}
16821
+ }
16822
+ } else {
16764
16823
  throw error45;
16765
16824
  }
16766
- if (isLockStale(lockPath, opts.staleLockTTL)) {
16767
- try {
16768
- fs2.unlinkSync(lockPath);
16769
- continue;
16770
- } catch {}
16771
- }
16772
16825
  if (Date.now() - startTime >= opts.timeout) {
16773
16826
  throw new Error(`Failed to acquire lock on ${filePath} after ${opts.timeout}ms. ` + `Lock file: ${lockPath}`);
16774
16827
  }
@@ -16793,14 +16846,6 @@ function writeAtomic(filePath, content) {
16793
16846
  function writeJsonAtomic(filePath, data) {
16794
16847
  writeAtomic(filePath, JSON.stringify(data, null, 2));
16795
16848
  }
16796
- function writeJsonLockedSync(filePath, data, options = {}) {
16797
- const release = acquireLockSync(filePath, options);
16798
- try {
16799
- writeJsonAtomic(filePath, data);
16800
- } finally {
16801
- release();
16802
- }
16803
- }
16804
16849
  function deepMerge(target, patch) {
16805
16850
  const result = { ...target };
16806
16851
  for (const key of Object.keys(patch)) {
@@ -17329,23 +17374,31 @@ ${f.content}`).join(`
17329
17374
  }
17330
17375
  update(featureName, taskFolder, updates, lockOptions) {
17331
17376
  const statusPath = getTaskStatusPath(this.projectRoot, featureName, taskFolder);
17332
- const current = readJson(statusPath);
17333
- if (!current) {
17377
+ if (!fileExists(statusPath)) {
17334
17378
  throw new Error(`Task '${taskFolder}' not found`);
17335
17379
  }
17336
- const updated = {
17337
- ...current,
17338
- ...updates,
17339
- schemaVersion: TASK_STATUS_SCHEMA_VERSION
17340
- };
17341
- if (updates.status === "in_progress" && !current.startedAt) {
17342
- updated.startedAt = new Date().toISOString();
17343
- }
17344
- if (updates.status === "done" && !current.completedAt) {
17345
- updated.completedAt = new Date().toISOString();
17380
+ const release = acquireLockSync(statusPath, lockOptions);
17381
+ try {
17382
+ const current = readJson(statusPath);
17383
+ if (!current) {
17384
+ throw new Error(`Task '${taskFolder}' not found`);
17385
+ }
17386
+ const updated = {
17387
+ ...current,
17388
+ ...updates,
17389
+ schemaVersion: TASK_STATUS_SCHEMA_VERSION
17390
+ };
17391
+ if (updates.status === "in_progress" && !current.startedAt) {
17392
+ updated.startedAt = new Date().toISOString();
17393
+ }
17394
+ if (updates.status === "done" && !current.completedAt) {
17395
+ updated.completedAt = new Date().toISOString();
17396
+ }
17397
+ writeJsonAtomic(statusPath, updated);
17398
+ return updated;
17399
+ } finally {
17400
+ release();
17346
17401
  }
17347
- writeJsonLockedSync(statusPath, updated, lockOptions);
17348
- return updated;
17349
17402
  }
17350
17403
  patchBackgroundFields(featureName, taskFolder, patch, lockOptions) {
17351
17404
  const statusPath = getTaskStatusPath(this.projectRoot, featureName, taskFolder);
@@ -22160,6 +22213,8 @@ ${f.content}`);
22160
22213
  }
22161
22214
  class ConfigService {
22162
22215
  configPath;
22216
+ cachedConfig = null;
22217
+ cachedCustomAgentConfigs = null;
22163
22218
  constructor() {
22164
22219
  const homeDir = process.env.HOME || process.env.USERPROFILE || "";
22165
22220
  const configDir = path6.join(homeDir, ".config", "opencode");
@@ -22169,49 +22224,50 @@ class ConfigService {
22169
22224
  return this.configPath;
22170
22225
  }
22171
22226
  get() {
22227
+ if (this.cachedConfig !== null) {
22228
+ return this.cachedConfig;
22229
+ }
22172
22230
  try {
22173
22231
  if (!fs10.existsSync(this.configPath)) {
22174
- return { ...DEFAULT_HIVE_CONFIG };
22232
+ this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
22233
+ this.cachedCustomAgentConfigs = null;
22234
+ return this.cachedConfig;
22175
22235
  }
22176
22236
  const raw = fs10.readFileSync(this.configPath, "utf-8");
22177
22237
  const stored = JSON.parse(raw);
22178
- return {
22238
+ const storedCustomAgents = this.isObjectRecord(stored.customAgents) ? stored.customAgents : {};
22239
+ const mergedBuiltInAgents = BUILT_IN_AGENT_NAMES.reduce((acc, agentName) => {
22240
+ acc[agentName] = {
22241
+ ...DEFAULT_HIVE_CONFIG.agents?.[agentName],
22242
+ ...stored.agents?.[agentName]
22243
+ };
22244
+ return acc;
22245
+ }, {});
22246
+ const merged = {
22179
22247
  ...DEFAULT_HIVE_CONFIG,
22180
22248
  ...stored,
22181
22249
  agents: {
22182
22250
  ...DEFAULT_HIVE_CONFIG.agents,
22183
22251
  ...stored.agents,
22184
- "hive-master": {
22185
- ...DEFAULT_HIVE_CONFIG.agents?.["hive-master"],
22186
- ...stored.agents?.["hive-master"]
22187
- },
22188
- "architect-planner": {
22189
- ...DEFAULT_HIVE_CONFIG.agents?.["architect-planner"],
22190
- ...stored.agents?.["architect-planner"]
22191
- },
22192
- "swarm-orchestrator": {
22193
- ...DEFAULT_HIVE_CONFIG.agents?.["swarm-orchestrator"],
22194
- ...stored.agents?.["swarm-orchestrator"]
22195
- },
22196
- "scout-researcher": {
22197
- ...DEFAULT_HIVE_CONFIG.agents?.["scout-researcher"],
22198
- ...stored.agents?.["scout-researcher"]
22199
- },
22200
- "forager-worker": {
22201
- ...DEFAULT_HIVE_CONFIG.agents?.["forager-worker"],
22202
- ...stored.agents?.["forager-worker"]
22203
- },
22204
- "hygienic-reviewer": {
22205
- ...DEFAULT_HIVE_CONFIG.agents?.["hygienic-reviewer"],
22206
- ...stored.agents?.["hygienic-reviewer"]
22207
- }
22252
+ ...mergedBuiltInAgents
22253
+ },
22254
+ customAgents: {
22255
+ ...DEFAULT_HIVE_CONFIG.customAgents,
22256
+ ...storedCustomAgents
22208
22257
  }
22209
22258
  };
22259
+ this.cachedConfig = merged;
22260
+ this.cachedCustomAgentConfigs = null;
22261
+ return this.cachedConfig;
22210
22262
  } catch {
22211
- return { ...DEFAULT_HIVE_CONFIG };
22263
+ this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
22264
+ this.cachedCustomAgentConfigs = null;
22265
+ return this.cachedConfig;
22212
22266
  }
22213
22267
  }
22214
22268
  set(updates) {
22269
+ this.cachedConfig = null;
22270
+ this.cachedCustomAgentConfigs = null;
22215
22271
  const current = this.get();
22216
22272
  const merged = {
22217
22273
  ...current,
@@ -22219,13 +22275,19 @@ class ConfigService {
22219
22275
  agents: updates.agents ? {
22220
22276
  ...current.agents,
22221
22277
  ...updates.agents
22222
- } : current.agents
22278
+ } : current.agents,
22279
+ customAgents: updates.customAgents ? {
22280
+ ...current.customAgents,
22281
+ ...updates.customAgents
22282
+ } : current.customAgents
22223
22283
  };
22224
22284
  const configDir = path6.dirname(this.configPath);
22225
22285
  if (!fs10.existsSync(configDir)) {
22226
22286
  fs10.mkdirSync(configDir, { recursive: true });
22227
22287
  }
22228
22288
  fs10.writeFileSync(this.configPath, JSON.stringify(merged, null, 2));
22289
+ this.cachedConfig = merged;
22290
+ this.cachedCustomAgentConfigs = null;
22229
22291
  return merged;
22230
22292
  }
22231
22293
  exists() {
@@ -22239,20 +22301,94 @@ class ConfigService {
22239
22301
  }
22240
22302
  getAgentConfig(agent) {
22241
22303
  const config2 = this.get();
22242
- const agentConfig = config2.agents?.[agent] ?? {};
22243
- const defaultAutoLoadSkills = DEFAULT_HIVE_CONFIG.agents?.[agent]?.autoLoadSkills ?? [];
22244
- const userAutoLoadSkills = agentConfig.autoLoadSkills ?? [];
22245
- const isPlannerAgent = agent === "hive-master" || agent === "architect-planner";
22246
- const effectiveUserAutoLoadSkills = isPlannerAgent ? userAutoLoadSkills : userAutoLoadSkills.filter((skill) => skill !== "onboarding");
22247
- const effectiveDefaultAutoLoadSkills = isPlannerAgent ? defaultAutoLoadSkills : defaultAutoLoadSkills.filter((skill) => skill !== "onboarding");
22248
- const combinedAutoLoadSkills = [...effectiveDefaultAutoLoadSkills, ...effectiveUserAutoLoadSkills];
22304
+ if (this.isBuiltInAgent(agent)) {
22305
+ const agentConfig = config2.agents?.[agent] ?? {};
22306
+ const defaultAutoLoadSkills = DEFAULT_HIVE_CONFIG.agents?.[agent]?.autoLoadSkills ?? [];
22307
+ const effectiveAutoLoadSkills = this.resolveAutoLoadSkills(defaultAutoLoadSkills, agentConfig.autoLoadSkills ?? [], this.isPlannerAgent(agent));
22308
+ return {
22309
+ ...agentConfig,
22310
+ autoLoadSkills: effectiveAutoLoadSkills
22311
+ };
22312
+ }
22313
+ const customAgents = this.getCustomAgentConfigs();
22314
+ return customAgents[agent] ?? {};
22315
+ }
22316
+ getCustomAgentConfigs() {
22317
+ if (this.cachedCustomAgentConfigs !== null) {
22318
+ return this.cachedCustomAgentConfigs;
22319
+ }
22320
+ const config2 = this.get();
22321
+ const customAgents = this.isObjectRecord(config2.customAgents) ? config2.customAgents : {};
22322
+ const resolved = {};
22323
+ for (const [agentName, declaration] of Object.entries(customAgents)) {
22324
+ if (this.isReservedCustomAgentName(agentName)) {
22325
+ console.warn(`[hive:config] Skipping custom agent "${agentName}": reserved name`);
22326
+ continue;
22327
+ }
22328
+ if (!this.isObjectRecord(declaration)) {
22329
+ console.warn(`[hive:config] Skipping custom agent "${agentName}": invalid declaration (expected object)`);
22330
+ continue;
22331
+ }
22332
+ const baseAgent = declaration["baseAgent"];
22333
+ if (typeof baseAgent !== "string" || !this.isSupportedCustomAgentBase(baseAgent)) {
22334
+ console.warn(`[hive:config] Skipping custom agent "${agentName}": unsupported baseAgent "${String(baseAgent)}"`);
22335
+ continue;
22336
+ }
22337
+ const autoLoadSkillsValue = declaration["autoLoadSkills"];
22338
+ const additionalAutoLoadSkills = Array.isArray(autoLoadSkillsValue) ? autoLoadSkillsValue.filter((skill) => typeof skill === "string") : [];
22339
+ const baseAgentConfig = this.getAgentConfig(baseAgent);
22340
+ const effectiveAutoLoadSkills = this.resolveAutoLoadSkills(baseAgentConfig.autoLoadSkills ?? [], additionalAutoLoadSkills, this.isPlannerAgent(baseAgent));
22341
+ const descriptionValue = declaration["description"];
22342
+ const description = typeof descriptionValue === "string" ? descriptionValue.trim() : "";
22343
+ if (!description) {
22344
+ console.warn(`[hive:config] Skipping custom agent "${agentName}": description must be a non-empty string`);
22345
+ continue;
22346
+ }
22347
+ const modelValue = declaration["model"];
22348
+ const temperatureValue = declaration["temperature"];
22349
+ const variantValue = declaration["variant"];
22350
+ const model = typeof modelValue === "string" ? modelValue.trim() || baseAgentConfig.model : baseAgentConfig.model;
22351
+ const variant = typeof variantValue === "string" ? variantValue.trim() || baseAgentConfig.variant : baseAgentConfig.variant;
22352
+ resolved[agentName] = {
22353
+ baseAgent,
22354
+ description,
22355
+ model,
22356
+ temperature: typeof temperatureValue === "number" ? temperatureValue : baseAgentConfig.temperature,
22357
+ variant,
22358
+ autoLoadSkills: effectiveAutoLoadSkills
22359
+ };
22360
+ }
22361
+ this.cachedCustomAgentConfigs = resolved;
22362
+ return this.cachedCustomAgentConfigs;
22363
+ }
22364
+ hasConfiguredAgent(agent) {
22365
+ if (this.isBuiltInAgent(agent)) {
22366
+ return true;
22367
+ }
22368
+ const customAgents = this.getCustomAgentConfigs();
22369
+ return customAgents[agent] !== undefined;
22370
+ }
22371
+ isBuiltInAgent(agent) {
22372
+ return BUILT_IN_AGENT_NAMES.includes(agent);
22373
+ }
22374
+ isReservedCustomAgentName(agent) {
22375
+ return CUSTOM_AGENT_RESERVED_NAMES.includes(agent);
22376
+ }
22377
+ isSupportedCustomAgentBase(baseAgent) {
22378
+ return CUSTOM_AGENT_BASES.includes(baseAgent);
22379
+ }
22380
+ isPlannerAgent(agent) {
22381
+ return agent === "hive-master" || agent === "architect-planner";
22382
+ }
22383
+ isObjectRecord(value) {
22384
+ return value !== null && typeof value === "object" && !Array.isArray(value);
22385
+ }
22386
+ resolveAutoLoadSkills(baseAutoLoadSkills, additionalAutoLoadSkills, isPlannerAgent) {
22387
+ const effectiveAdditionalSkills = isPlannerAgent ? additionalAutoLoadSkills : additionalAutoLoadSkills.filter((skill) => skill !== "onboarding");
22388
+ const combinedAutoLoadSkills = [...baseAutoLoadSkills, ...effectiveAdditionalSkills];
22249
22389
  const uniqueAutoLoadSkills = Array.from(new Set(combinedAutoLoadSkills));
22250
- const disabledSkills = config2.disableSkills ?? [];
22251
- const effectiveAutoLoadSkills = uniqueAutoLoadSkills.filter((skill) => !disabledSkills.includes(skill));
22252
- return {
22253
- ...agentConfig,
22254
- autoLoadSkills: effectiveAutoLoadSkills
22255
- };
22390
+ const disabledSkills = this.getDisabledSkills();
22391
+ return uniqueAutoLoadSkills.filter((skill) => !disabledSkills.includes(skill));
22256
22392
  }
22257
22393
  isOmoSlimEnabled() {
22258
22394
  const config2 = this.get();
@@ -22273,6 +22409,22 @@ class ConfigService {
22273
22409
  const persistent = config2.persistentContainers ?? mode === "docker";
22274
22410
  return { mode, ...image && { image }, persistent };
22275
22411
  }
22412
+ getHookCadence(hookName, options) {
22413
+ const config2 = this.get();
22414
+ const configuredCadence = config2.hook_cadence?.[hookName];
22415
+ if (options?.safetyCritical && configuredCadence && configuredCadence > 1) {
22416
+ console.warn(`[hive:cadence] Ignoring cadence > 1 for safety-critical hook: ${hookName}`);
22417
+ return 1;
22418
+ }
22419
+ if (configuredCadence === undefined || configuredCadence === null) {
22420
+ return 1;
22421
+ }
22422
+ if (configuredCadence <= 0 || !Number.isInteger(configuredCadence)) {
22423
+ console.warn(`[hive:cadence] Invalid cadence ${configuredCadence} for ${hookName}, using 1`);
22424
+ return 1;
22425
+ }
22426
+ return configuredCadence;
22427
+ }
22276
22428
  }
22277
22429
 
22278
22430
  class AgentsMdService {
@@ -22729,8 +22881,16 @@ hive_worktree_commit({
22729
22881
  })
22730
22882
  \`\`\`
22731
22883
 
22732
- **CRITICAL: After calling hive_worktree_commit, you MUST STOP IMMEDIATELY.**
22733
- Do NOT continue working. Do NOT respond further. Your session is DONE.
22884
+ Then inspect the tool response fields:
22885
+ - If \`ok=true\` and \`terminal=true\`: stop the session
22886
+ - Otherwise: **DO NOT STOP**. Follow \`nextAction\`, remediate, and retry \`hive_worktree_commit\`
22887
+
22888
+ **CRITICAL: Stop only on terminal commit result (ok=true and terminal=true).**
22889
+ If commit returns non-terminal (for example verification_required), DO NOT STOP.
22890
+ Follow result.nextAction, fix the issue, and call hive_worktree_commit again.
22891
+
22892
+ Only when commit result is terminal should you stop.
22893
+ Do NOT continue working after a terminal result. Do NOT respond further. Your session is DONE.
22734
22894
  The Hive Master will take over from here.
22735
22895
 
22736
22896
  **Summary Guidance** (used verbatim for downstream task context):
@@ -23059,23 +23219,53 @@ function writeWorkerPromptFile(feature, task, prompt, hiveDir) {
23059
23219
  }
23060
23220
 
23061
23221
  // src/hooks/variant-hook.ts
23062
- var HIVE_AGENT_NAMES = [
23063
- "hive-master",
23064
- "architect-planner",
23065
- "swarm-orchestrator",
23066
- "scout-researcher",
23067
- "forager-worker",
23068
- "hygienic-reviewer"
23069
- ];
23070
- function isHiveAgent(agent) {
23071
- return agent !== undefined && HIVE_AGENT_NAMES.includes(agent);
23072
- }
23073
23222
  function normalizeVariant(variant) {
23074
23223
  if (variant === undefined)
23075
23224
  return;
23076
23225
  const trimmed2 = variant.trim();
23077
23226
  return trimmed2.length > 0 ? trimmed2 : undefined;
23078
23227
  }
23228
+ function createVariantHook(configService) {
23229
+ return async (input, output) => {
23230
+ const { agent } = input;
23231
+ if (!agent)
23232
+ return;
23233
+ if (!configService.hasConfiguredAgent(agent))
23234
+ return;
23235
+ if (output.message.variant !== undefined)
23236
+ return;
23237
+ const agentConfig = configService.getAgentConfig(agent);
23238
+ const configuredVariant = normalizeVariant(agentConfig.variant);
23239
+ if (configuredVariant !== undefined) {
23240
+ output.message.variant = configuredVariant;
23241
+ }
23242
+ };
23243
+ }
23244
+
23245
+ // src/hooks/system-hook.ts
23246
+ var fallbackTurnCounters = {};
23247
+ function shouldExecuteHook(hookName, configService, turnCounters, options) {
23248
+ const cadence = configService?.getHookCadence(hookName, options) ?? 1;
23249
+ const counters = turnCounters ?? fallbackTurnCounters;
23250
+ counters[hookName] = (counters[hookName] || 0) + 1;
23251
+ const currentTurn = counters[hookName];
23252
+ if (cadence === 1) {
23253
+ return true;
23254
+ }
23255
+ return (currentTurn - 1) % cadence === 0;
23256
+ }
23257
+ var HIVE_SYSTEM_PROMPT = `
23258
+ ## Hive — Active Session
23259
+
23260
+ **Important:** hive_worktree_commit commits to the task branch but does NOT merge.
23261
+ Use hive_merge to integrate changes into the current branch.
23262
+ `;
23263
+
23264
+ // src/utils/compaction-prompt.ts
23265
+ var COMPACTION_RESUME_PROMPT = "You were compacted mid-task. " + "Resume by reading your worker-prompt.md (in the task worktree root) to recall your assignment. " + "Do not call status tools or re-read the full codebase. " + "Locate your last commit message or notes, then continue from where you left off.";
23266
+ function buildCompactionPrompt() {
23267
+ return COMPACTION_RESUME_PROMPT;
23268
+ }
23079
23269
 
23080
23270
  // src/index.ts
23081
23271
  function formatSkillsXml(skills) {
@@ -23097,9 +23287,8 @@ function formatSkillsXml(skills) {
23097
23287
  ${skillsXml}
23098
23288
  </available_skills>`;
23099
23289
  }
23100
- async function buildAutoLoadedSkillsContent(agentName, configService, projectRoot) {
23101
- const agentConfig = configService.getAgentConfig(agentName);
23102
- const autoLoadSkills = agentConfig.autoLoadSkills ?? [];
23290
+ async function buildAutoLoadedSkillsContent(agentName, configService, projectRoot, autoLoadSkillsOverride) {
23291
+ const autoLoadSkills = autoLoadSkillsOverride ?? (configService.getAgentConfig(agentName).autoLoadSkills ?? []);
23103
23292
  if (autoLoadSkills.length === 0) {
23104
23293
  return "";
23105
23294
  }
@@ -23162,85 +23351,6 @@ No Hive skills available.` : base + formatSkillsXml(filteredSkills);
23162
23351
  }
23163
23352
  });
23164
23353
  }
23165
- var HIVE_SYSTEM_PROMPT = `
23166
- ## Hive - Feature Development System
23167
-
23168
- Plan-first development: Write plan → User reviews → Approve → Execute tasks
23169
-
23170
- ### Tools (14 total)
23171
-
23172
- | Domain | Tools |
23173
- |--------|-------|
23174
- | Feature | hive_feature_create, hive_feature_complete |
23175
- | Plan | hive_plan_write, hive_plan_read, hive_plan_approve |
23176
- | Task | hive_tasks_sync, hive_task_create, hive_task_update |
23177
- | Worktree | hive_worktree_create, hive_worktree_commit, hive_worktree_discard |
23178
- | Merge | hive_merge |
23179
- | Context | hive_context_write |
23180
- | Status | hive_status |
23181
- | Skill | hive_skill |
23182
-
23183
- ### Workflow
23184
-
23185
- 1. \`hive_feature_create(name)\` - Create feature
23186
- 2. \`hive_plan_write(content)\` - Write plan.md
23187
- 3. User adds comments in VSCode → \`hive_plan_read\` to see them
23188
- 4. Revise plan → User approves
23189
- 5. \`hive_tasks_sync()\` - Generate tasks from plan
23190
- 6. \`hive_worktree_create(task)\` → work in worktree → \`hive_worktree_commit(task, summary)\`
23191
- 7. \`hive_merge(task)\` - Merge task branch into main (when ready)
23192
-
23193
- **Important:** \`hive_worktree_commit\` commits changes to task branch but does NOT merge.
23194
- Use \`hive_merge\` to explicitly integrate changes. Worktrees persist until manually removed.
23195
-
23196
- ### Delegated Execution
23197
-
23198
- \`hive_worktree_create\` creates worktree and spawns worker automatically:
23199
-
23200
- 1. \`hive_worktree_create(task)\` → Creates worktree + spawns Forager (Worker/Coder) worker
23201
- 2. Worker executes → calls \`hive_worktree_commit(status: "completed")\`
23202
- 3. Worker blocked → calls \`hive_worktree_commit(status: "blocked", blocker: {...})\`
23203
-
23204
- **Handling blocked workers:**
23205
- 1. Check blockers with \`hive_status()\`
23206
- 2. Read the blocker info (reason, options, recommendation, context)
23207
- 3. Ask user via \`question()\` tool - NEVER plain text
23208
- 4. Resume with \`hive_worktree_create(task, continueFrom: "blocked", decision: answer)\`
23209
-
23210
- **CRITICAL**: When resuming, a NEW worker spawns in the SAME worktree.
23211
- The previous worker's progress is preserved. Include the user's decision in the \`decision\` parameter.
23212
-
23213
- **After task() Returns:**
23214
- - task() is BLOCKING — when it returns, the worker is DONE
23215
- - Call \`hive_status()\` immediately to check the new task state and find next runnable tasks
23216
- - No notifications or polling needed — the result is already available
23217
-
23218
- **For research**, use MCP tools or parallel exploration:
23219
- - \`grep_app_searchGitHub\` - Find code in OSS
23220
- - \`context7_query-docs\` - Library documentation
23221
- - \`websearch_web_search_exa\` - Web search via Exa
23222
- - \`ast_grep_search\` - AST-based search
23223
- - For exploratory fan-out, load \`hive_skill("parallel-exploration")\` and use multiple \`task()\` calls in the same message
23224
-
23225
- ### Planning Phase - Context Management REQUIRED
23226
-
23227
- As you research and plan, CONTINUOUSLY save findings using \`hive_context_write\`:
23228
- - Research findings (API patterns, library docs, codebase structure)
23229
- - User preferences ("we use Zustand, not Redux")
23230
- - Rejected alternatives ("tried X, too complex")
23231
- - Architecture decisions ("auth lives in /lib/auth")
23232
-
23233
- **Update existing context files** when new info emerges - dont create duplicates.
23234
-
23235
- \`hive_tasks_sync\` parses \`### N. Task Name\` headers.
23236
-
23237
- ### Execution Phase - Stay Aligned
23238
-
23239
- During execution, call \`hive_status\` periodically to:
23240
- - Check current progress and pending work
23241
- - See context files to read
23242
- - Get reminded of next actions
23243
- `;
23244
23354
  var plugin = async (ctx) => {
23245
23355
  const { directory, client } = ctx;
23246
23356
  const featureService = new FeatureService(directory);
@@ -23295,6 +23405,7 @@ To unblock: Remove .hive/features/${feature}/BLOCKED`;
23295
23405
  }
23296
23406
  return null;
23297
23407
  };
23408
+ const turnCounters = {};
23298
23409
  const checkDependencies = (feature, taskFolder) => {
23299
23410
  const taskStatus = taskService.getRawStatus(feature, taskFolder);
23300
23411
  if (!taskStatus) {
@@ -23332,8 +23443,397 @@ To unblock: Remove .hive/features/${feature}/BLOCKED`;
23332
23443
  }
23333
23444
  return { allowed: true };
23334
23445
  };
23446
+ const respond = (payload) => JSON.stringify(payload, null, 2);
23447
+ const buildWorktreeLaunchResponse = async ({
23448
+ feature,
23449
+ task,
23450
+ taskInfo,
23451
+ worktree,
23452
+ continueFrom,
23453
+ decision
23454
+ }) => {
23455
+ taskService.update(feature, task, {
23456
+ status: "in_progress",
23457
+ baseCommit: worktree.commit
23458
+ });
23459
+ const planResult = planService.read(feature);
23460
+ const allTasks = taskService.list(feature);
23461
+ const rawContextFiles = contextService.list(feature).map((f) => ({
23462
+ name: f.name,
23463
+ content: f.content
23464
+ }));
23465
+ const rawPreviousTasks = allTasks.filter((t) => t.status === "done" && t.summary).map((t) => ({ name: t.folder, summary: t.summary }));
23466
+ const taskBudgetResult = applyTaskBudget(rawPreviousTasks, { ...DEFAULT_BUDGET, feature });
23467
+ const contextBudgetResult = applyContextBudget(rawContextFiles, { ...DEFAULT_BUDGET, feature });
23468
+ const contextFiles = contextBudgetResult.files.map((f) => ({
23469
+ name: f.name,
23470
+ content: f.content
23471
+ }));
23472
+ const previousTasks = taskBudgetResult.tasks.map((t) => ({
23473
+ name: t.name,
23474
+ summary: t.summary
23475
+ }));
23476
+ const truncationEvents = [
23477
+ ...taskBudgetResult.truncationEvents,
23478
+ ...contextBudgetResult.truncationEvents
23479
+ ];
23480
+ const droppedTasksHint = taskBudgetResult.droppedTasksHint;
23481
+ const taskOrder = parseInt(taskInfo.folder.match(/^(\d+)/)?.[1] || "0", 10);
23482
+ const status = taskService.getRawStatus(feature, task);
23483
+ const dependsOn = status?.dependsOn ?? [];
23484
+ const specContent = taskService.buildSpecContent({
23485
+ featureName: feature,
23486
+ task: {
23487
+ folder: task,
23488
+ name: taskInfo.planTitle ?? taskInfo.name,
23489
+ order: taskOrder,
23490
+ description: undefined
23491
+ },
23492
+ dependsOn,
23493
+ allTasks: allTasks.map((t) => ({
23494
+ folder: t.folder,
23495
+ name: t.name,
23496
+ order: parseInt(t.folder.match(/^(\d+)/)?.[1] || "0", 10)
23497
+ })),
23498
+ planContent: planResult?.content ?? null,
23499
+ contextFiles,
23500
+ completedTasks: previousTasks
23501
+ });
23502
+ taskService.writeSpec(feature, task, specContent);
23503
+ const workerPrompt = buildWorkerPrompt({
23504
+ feature,
23505
+ task,
23506
+ taskOrder,
23507
+ worktreePath: worktree.path,
23508
+ branch: worktree.branch,
23509
+ plan: planResult?.content || "No plan available",
23510
+ contextFiles,
23511
+ spec: specContent,
23512
+ previousTasks,
23513
+ continueFrom: continueFrom === "blocked" ? {
23514
+ status: "blocked",
23515
+ previousSummary: taskInfo.summary || "No previous summary",
23516
+ decision: decision || "No decision provided"
23517
+ } : undefined
23518
+ });
23519
+ const customAgentConfigs = configService.getCustomAgentConfigs();
23520
+ const defaultAgent = "forager-worker";
23521
+ const eligibleAgents = [
23522
+ {
23523
+ name: defaultAgent,
23524
+ baseAgent: defaultAgent,
23525
+ description: "Default implementation worker"
23526
+ },
23527
+ ...Object.entries(customAgentConfigs).filter(([, config2]) => config2.baseAgent === "forager-worker").sort(([left], [right]) => left.localeCompare(right)).map(([name, config2]) => ({
23528
+ name,
23529
+ baseAgent: config2.baseAgent,
23530
+ description: config2.description
23531
+ }))
23532
+ ];
23533
+ const agent = defaultAgent;
23534
+ const rawStatus = taskService.getRawStatus(feature, task);
23535
+ const attempt = (rawStatus?.workerSession?.attempt || 0) + 1;
23536
+ const idempotencyKey = `hive-${feature}-${task}-${attempt}`;
23537
+ taskService.patchBackgroundFields(feature, task, { idempotencyKey });
23538
+ const contextContent = contextFiles.map((f) => f.content).join(`
23539
+
23540
+ `);
23541
+ const previousTasksContent = previousTasks.map((t) => `- **${t.name}**: ${t.summary}`).join(`
23542
+ `);
23543
+ const promptMeta = calculatePromptMeta({
23544
+ plan: planResult?.content || "",
23545
+ context: contextContent,
23546
+ previousTasks: previousTasksContent,
23547
+ spec: specContent,
23548
+ workerPrompt
23549
+ });
23550
+ const hiveDir = path8.join(directory, ".hive");
23551
+ const workerPromptPath = writeWorkerPromptFile(feature, task, workerPrompt, hiveDir);
23552
+ const relativePromptPath = normalizePath(path8.relative(directory, workerPromptPath));
23553
+ const PREVIEW_MAX_LENGTH = 200;
23554
+ const workerPromptPreview = workerPrompt.length > PREVIEW_MAX_LENGTH ? workerPrompt.slice(0, PREVIEW_MAX_LENGTH) + "..." : workerPrompt;
23555
+ const taskToolPrompt = `Follow instructions in @${relativePromptPath}`;
23556
+ const taskToolInstructions = `## Delegation Required
23557
+
23558
+ Choose one of the eligible forager-derived agents below.
23559
+ Default to \`${defaultAgent}\` if no specialist is a better match.
23560
+
23561
+ ${eligibleAgents.map((candidate) => `- \`${candidate.name}\` — ${candidate.description}`).join(`
23562
+ `)}
23563
+
23564
+ Use OpenCode's built-in \`task\` tool with the chosen \`subagent_type\` and the provided \`taskToolCall.prompt\` value.
23565
+ \`taskToolCall.subagent_type\` is prefilled with the default for convenience; override it when a specialist in \`eligibleAgents\` is a better match.
23566
+
23567
+ \`\`\`
23568
+ task({
23569
+ subagent_type: "<chosen-agent>",
23570
+ description: "Hive: ${task}",
23571
+ prompt: "${taskToolPrompt}"
23572
+ })
23573
+ \`\`\`
23574
+
23575
+ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not inline the file contents.
23576
+
23577
+ `;
23578
+ const responseBase = {
23579
+ success: true,
23580
+ terminal: false,
23581
+ worktreePath: worktree.path,
23582
+ branch: worktree.branch,
23583
+ mode: "delegate",
23584
+ agent,
23585
+ defaultAgent,
23586
+ eligibleAgents,
23587
+ delegationRequired: true,
23588
+ workerPromptPath: relativePromptPath,
23589
+ workerPromptPreview,
23590
+ taskPromptMode: "opencode-at-file",
23591
+ taskToolCall: {
23592
+ subagent_type: agent,
23593
+ description: `Hive: ${task}`,
23594
+ prompt: taskToolPrompt
23595
+ },
23596
+ instructions: taskToolInstructions
23597
+ };
23598
+ const jsonPayload = JSON.stringify(responseBase, null, 2);
23599
+ const payloadMeta = calculatePayloadMeta({
23600
+ jsonPayload,
23601
+ promptInlined: false,
23602
+ promptReferencedByFile: true
23603
+ });
23604
+ const sizeWarnings = checkWarnings(promptMeta, payloadMeta);
23605
+ const budgetWarnings = truncationEvents.map((event) => ({
23606
+ type: event.type,
23607
+ severity: "info",
23608
+ message: event.message,
23609
+ affected: event.affected,
23610
+ count: event.count
23611
+ }));
23612
+ const allWarnings = [...sizeWarnings, ...budgetWarnings];
23613
+ return respond({
23614
+ ...responseBase,
23615
+ promptMeta,
23616
+ payloadMeta,
23617
+ budgetApplied: {
23618
+ maxTasks: DEFAULT_BUDGET.maxTasks,
23619
+ maxSummaryChars: DEFAULT_BUDGET.maxSummaryChars,
23620
+ maxContextChars: DEFAULT_BUDGET.maxContextChars,
23621
+ maxTotalContextChars: DEFAULT_BUDGET.maxTotalContextChars,
23622
+ tasksIncluded: previousTasks.length,
23623
+ tasksDropped: rawPreviousTasks.length - previousTasks.length,
23624
+ droppedTasksHint
23625
+ },
23626
+ warnings: allWarnings.length > 0 ? allWarnings : undefined
23627
+ });
23628
+ };
23629
+ const executeWorktreeStart = async ({
23630
+ task,
23631
+ feature: explicitFeature
23632
+ }) => {
23633
+ const feature = resolveFeature(explicitFeature);
23634
+ if (!feature) {
23635
+ return respond({
23636
+ success: false,
23637
+ terminal: true,
23638
+ error: "No feature specified. Create a feature or provide feature param.",
23639
+ reason: "feature_required",
23640
+ task,
23641
+ hints: [
23642
+ "Create/select a feature first or pass the feature parameter explicitly.",
23643
+ "Use hive_status to inspect the active feature state before retrying."
23644
+ ]
23645
+ });
23646
+ }
23647
+ const blockedMessage = checkBlocked(feature);
23648
+ if (blockedMessage) {
23649
+ return respond({
23650
+ success: false,
23651
+ terminal: true,
23652
+ error: blockedMessage,
23653
+ reason: "feature_blocked",
23654
+ feature,
23655
+ task,
23656
+ hints: [
23657
+ "Wait for the human to unblock the feature before retrying.",
23658
+ `If approved, remove .hive/features/${feature}/BLOCKED and retry hive_worktree_start.`
23659
+ ]
23660
+ });
23661
+ }
23662
+ const taskInfo = taskService.get(feature, task);
23663
+ if (!taskInfo) {
23664
+ return respond({
23665
+ success: false,
23666
+ terminal: true,
23667
+ error: `Task "${task}" not found`,
23668
+ reason: "task_not_found",
23669
+ feature,
23670
+ task,
23671
+ hints: [
23672
+ "Check the task folder name in tasks.json or hive_status output.",
23673
+ "Run hive_tasks_sync if the approved plan has changed and tasks need regeneration."
23674
+ ]
23675
+ });
23676
+ }
23677
+ if (taskInfo.status === "done") {
23678
+ return respond({
23679
+ success: false,
23680
+ terminal: true,
23681
+ error: `Task "${task}" is already completed (status: done). It cannot be restarted.`,
23682
+ currentStatus: "done",
23683
+ hints: [
23684
+ "Use hive_merge to integrate the completed task branch if not already merged.",
23685
+ "Use hive_status to see all task states and find the next runnable task."
23686
+ ]
23687
+ });
23688
+ }
23689
+ if (taskInfo.status === "blocked") {
23690
+ return respond({
23691
+ success: false,
23692
+ terminal: true,
23693
+ error: `Task "${task}" is blocked and must be resumed with hive_worktree_create using continueFrom: 'blocked'.`,
23694
+ currentStatus: "blocked",
23695
+ feature,
23696
+ task,
23697
+ hints: [
23698
+ 'Ask the user the blocker question, then call hive_worktree_create({ task, continueFrom: "blocked", decision }).',
23699
+ "Use hive_status to inspect blocker details before retrying."
23700
+ ]
23701
+ });
23702
+ }
23703
+ const depCheck = checkDependencies(feature, task);
23704
+ if (!depCheck.allowed) {
23705
+ return respond({
23706
+ success: false,
23707
+ terminal: true,
23708
+ reason: "dependencies_not_done",
23709
+ feature,
23710
+ task,
23711
+ error: depCheck.error,
23712
+ hints: [
23713
+ "Complete the required dependencies before starting this task.",
23714
+ "Use hive_status to see current task states."
23715
+ ]
23716
+ });
23717
+ }
23718
+ const worktree = await worktreeService.create(feature, task);
23719
+ return buildWorktreeLaunchResponse({ feature, task, taskInfo, worktree });
23720
+ };
23721
+ const executeBlockedResume = async ({
23722
+ task,
23723
+ feature: explicitFeature,
23724
+ continueFrom,
23725
+ decision
23726
+ }) => {
23727
+ const feature = resolveFeature(explicitFeature);
23728
+ if (!feature) {
23729
+ return respond({
23730
+ success: false,
23731
+ terminal: true,
23732
+ error: "No feature specified. Create a feature or provide feature param.",
23733
+ reason: "feature_required",
23734
+ task,
23735
+ hints: [
23736
+ "Create/select a feature first or pass the feature parameter explicitly.",
23737
+ "Use hive_status to inspect the active feature state before retrying."
23738
+ ]
23739
+ });
23740
+ }
23741
+ const blockedMessage = checkBlocked(feature);
23742
+ if (blockedMessage) {
23743
+ return respond({
23744
+ success: false,
23745
+ terminal: true,
23746
+ error: blockedMessage,
23747
+ reason: "feature_blocked",
23748
+ feature,
23749
+ task,
23750
+ hints: [
23751
+ "Wait for the human to unblock the feature before retrying.",
23752
+ `If approved, remove .hive/features/${feature}/BLOCKED and retry hive_worktree_create.`
23753
+ ]
23754
+ });
23755
+ }
23756
+ const taskInfo = taskService.get(feature, task);
23757
+ if (!taskInfo) {
23758
+ return respond({
23759
+ success: false,
23760
+ terminal: true,
23761
+ error: `Task "${task}" not found`,
23762
+ reason: "task_not_found",
23763
+ feature,
23764
+ task,
23765
+ hints: [
23766
+ "Check the task folder name in tasks.json or hive_status output.",
23767
+ "Run hive_tasks_sync if the approved plan has changed and tasks need regeneration."
23768
+ ]
23769
+ });
23770
+ }
23771
+ if (taskInfo.status === "done") {
23772
+ return respond({
23773
+ success: false,
23774
+ terminal: true,
23775
+ error: `Task "${task}" is already completed (status: done). It cannot be restarted.`,
23776
+ currentStatus: "done",
23777
+ hints: [
23778
+ "Use hive_merge to integrate the completed task branch if not already merged.",
23779
+ "Use hive_status to see all task states and find the next runnable task."
23780
+ ]
23781
+ });
23782
+ }
23783
+ if (continueFrom !== "blocked") {
23784
+ return respond({
23785
+ success: false,
23786
+ terminal: true,
23787
+ error: "hive_worktree_create is only for resuming blocked tasks.",
23788
+ reason: "blocked_resume_required",
23789
+ currentStatus: taskInfo.status,
23790
+ feature,
23791
+ task,
23792
+ hints: [
23793
+ "Use hive_worktree_start({ feature, task }) to start a pending or in-progress task normally.",
23794
+ 'Use hive_worktree_create({ task, continueFrom: "blocked", decision }) only after hive_status confirms the task is blocked.'
23795
+ ]
23796
+ });
23797
+ }
23798
+ if (taskInfo.status !== "blocked") {
23799
+ return respond({
23800
+ success: false,
23801
+ terminal: true,
23802
+ error: `continueFrom: 'blocked' was specified but task "${task}" is not in blocked state (current status: ${taskInfo.status}).`,
23803
+ currentStatus: taskInfo.status,
23804
+ hints: [
23805
+ "Use hive_worktree_start({ feature, task }) for normal starts or re-dispatch.",
23806
+ "Use hive_status to verify the current task status before retrying."
23807
+ ]
23808
+ });
23809
+ }
23810
+ const worktree = await worktreeService.get(feature, task);
23811
+ if (!worktree) {
23812
+ return respond({
23813
+ success: false,
23814
+ terminal: true,
23815
+ error: `Cannot resume blocked task "${task}": no existing worktree record found.`,
23816
+ currentStatus: taskInfo.status,
23817
+ hints: [
23818
+ "The worktree may have been removed manually. Use hive_worktree_discard to reset the task to pending, then restart it with hive_worktree_start.",
23819
+ "Use hive_status to inspect the current state of the task and its worktree."
23820
+ ]
23821
+ });
23822
+ }
23823
+ return buildWorktreeLaunchResponse({
23824
+ feature,
23825
+ task,
23826
+ taskInfo,
23827
+ worktree,
23828
+ continueFrom,
23829
+ decision
23830
+ });
23831
+ };
23335
23832
  return {
23336
23833
  "experimental.chat.system.transform": async (input, output) => {
23834
+ if (!shouldExecuteHook("experimental.chat.system.transform", configService, turnCounters)) {
23835
+ return;
23836
+ }
23337
23837
  output.system.push(HIVE_SYSTEM_PROMPT);
23338
23838
  const activeFeature = resolveFeature();
23339
23839
  if (activeFeature) {
@@ -23354,21 +23854,14 @@ To unblock: Remove .hive/features/${feature}/BLOCKED`;
23354
23854
  }
23355
23855
  }
23356
23856
  },
23357
- "chat.message": async (input, output) => {
23358
- const { agent } = input;
23359
- if (!agent)
23360
- return;
23361
- if (!isHiveAgent(agent))
23362
- return;
23363
- if (output.message.variant !== undefined)
23364
- return;
23365
- const agentConfig = configService.getAgentConfig(agent);
23366
- const configuredVariant = normalizeVariant(agentConfig.variant);
23367
- if (configuredVariant !== undefined) {
23368
- output.message.variant = configuredVariant;
23369
- }
23857
+ "experimental.session.compacting": async (_input, output) => {
23858
+ output.context.push(buildCompactionPrompt());
23370
23859
  },
23860
+ "chat.message": createVariantHook(configService),
23371
23861
  "tool.execute.before": async (input, output) => {
23862
+ if (!shouldExecuteHook("tool.execute.before", configService, turnCounters, { safetyCritical: true })) {
23863
+ return;
23864
+ }
23372
23865
  if (input.tool !== "bash")
23373
23866
  return;
23374
23867
  const sandboxConfig = configService.getSandboxConfig();
@@ -23557,7 +24050,7 @@ Expand your Discovery section and try again.`;
23557
24050
  return "Error: No feature specified. Create a feature or provide feature param.";
23558
24051
  const folder = taskService.create(feature, name, order);
23559
24052
  return `Manual task created: ${folder}
23560
- Reminder: start work with hive_worktree_create to use its worktree, and ensure any subagents work in that worktree too.`;
24053
+ Reminder: start work with hive_worktree_start to use its worktree, and ensure any subagents work in that worktree too.`;
23561
24054
  }
23562
24055
  }),
23563
24056
  hive_task_update: tool({
@@ -23579,202 +24072,30 @@ Reminder: start work with hive_worktree_create to use its worktree, and ensure a
23579
24072
  return `Task "${task}" updated: status=${updated.status}`;
23580
24073
  }
23581
24074
  }),
24075
+ hive_worktree_start: tool({
24076
+ description: "Create worktree and begin work on pending/in-progress task. Spawns Forager worker automatically.",
24077
+ args: {
24078
+ task: tool.schema.string().describe("Task folder name"),
24079
+ feature: tool.schema.string().optional().describe("Feature name (defaults to detection or single feature)")
24080
+ },
24081
+ async execute({ task, feature: explicitFeature }) {
24082
+ return executeWorktreeStart({ task, feature: explicitFeature });
24083
+ }
24084
+ }),
23582
24085
  hive_worktree_create: tool({
23583
- description: "Create worktree and begin work on task. Spawns Forager worker automatically.",
24086
+ description: "Resume a blocked task in its existing worktree. Spawns Forager worker automatically.",
23584
24087
  args: {
23585
24088
  task: tool.schema.string().describe("Task folder name"),
23586
24089
  feature: tool.schema.string().optional().describe("Feature name (defaults to detection or single feature)"),
23587
24090
  continueFrom: tool.schema.enum(["blocked"]).optional().describe("Resume a blocked task"),
23588
24091
  decision: tool.schema.string().optional().describe("Answer to blocker question when continuing")
23589
24092
  },
23590
- async execute({ task, feature: explicitFeature, continueFrom, decision }, toolContext) {
23591
- const feature = resolveFeature(explicitFeature);
23592
- if (!feature)
23593
- return "Error: No feature specified. Create a feature or provide feature param.";
23594
- const blockedMessage = checkBlocked(feature);
23595
- if (blockedMessage)
23596
- return blockedMessage;
23597
- const taskInfo = taskService.get(feature, task);
23598
- if (!taskInfo)
23599
- return `Error: Task "${task}" not found`;
23600
- if (taskInfo.status === "done")
23601
- return "Error: Task already completed";
23602
- if (continueFrom === "blocked" && taskInfo.status !== "blocked") {
23603
- return "Error: Task is not in blocked state. Use without continueFrom.";
23604
- }
23605
- if (continueFrom !== "blocked") {
23606
- const depCheck = checkDependencies(feature, task);
23607
- if (!depCheck.allowed) {
23608
- return JSON.stringify({
23609
- success: false,
23610
- error: depCheck.error,
23611
- hints: [
23612
- "Complete the required dependencies before starting this task.",
23613
- "Use hive_status to see current task states."
23614
- ]
23615
- });
23616
- }
23617
- }
23618
- let worktree;
23619
- if (continueFrom === "blocked") {
23620
- worktree = await worktreeService.get(feature, task);
23621
- if (!worktree)
23622
- return "Error: No worktree found for blocked task";
23623
- } else {
23624
- worktree = await worktreeService.create(feature, task);
23625
- }
23626
- taskService.update(feature, task, {
23627
- status: "in_progress",
23628
- baseCommit: worktree.commit
23629
- });
23630
- const planResult = planService.read(feature);
23631
- const allTasks = taskService.list(feature);
23632
- const rawContextFiles = contextService.list(feature).map((f) => ({
23633
- name: f.name,
23634
- content: f.content
23635
- }));
23636
- const rawPreviousTasks = allTasks.filter((t) => t.status === "done" && t.summary).map((t) => ({ name: t.folder, summary: t.summary }));
23637
- const taskBudgetResult = applyTaskBudget(rawPreviousTasks, { ...DEFAULT_BUDGET, feature });
23638
- const contextBudgetResult = applyContextBudget(rawContextFiles, { ...DEFAULT_BUDGET, feature });
23639
- const contextFiles = contextBudgetResult.files.map((f) => ({
23640
- name: f.name,
23641
- content: f.content
23642
- }));
23643
- const previousTasks = taskBudgetResult.tasks.map((t) => ({
23644
- name: t.name,
23645
- summary: t.summary
23646
- }));
23647
- const truncationEvents = [
23648
- ...taskBudgetResult.truncationEvents,
23649
- ...contextBudgetResult.truncationEvents
23650
- ];
23651
- const droppedTasksHint = taskBudgetResult.droppedTasksHint;
23652
- const taskOrder = parseInt(taskInfo.folder.match(/^(\d+)/)?.[1] || "0", 10);
23653
- const status = taskService.getRawStatus(feature, task);
23654
- const dependsOn = status?.dependsOn ?? [];
23655
- const specContent = taskService.buildSpecContent({
23656
- featureName: feature,
23657
- task: {
23658
- folder: task,
23659
- name: taskInfo.planTitle ?? taskInfo.name,
23660
- order: taskOrder,
23661
- description: undefined
23662
- },
23663
- dependsOn,
23664
- allTasks: allTasks.map((t) => ({
23665
- folder: t.folder,
23666
- name: t.name,
23667
- order: parseInt(t.folder.match(/^(\d+)/)?.[1] || "0", 10)
23668
- })),
23669
- planContent: planResult?.content ?? null,
23670
- contextFiles,
23671
- completedTasks: previousTasks
23672
- });
23673
- taskService.writeSpec(feature, task, specContent);
23674
- const workerPrompt = buildWorkerPrompt({
23675
- feature,
23676
- task,
23677
- taskOrder: parseInt(taskInfo.folder.match(/^(\d+)/)?.[1] || "0", 10),
23678
- worktreePath: worktree.path,
23679
- branch: worktree.branch,
23680
- plan: planResult?.content || "No plan available",
23681
- contextFiles,
23682
- spec: specContent,
23683
- previousTasks,
23684
- continueFrom: continueFrom === "blocked" ? {
23685
- status: "blocked",
23686
- previousSummary: taskInfo.summary || "No previous summary",
23687
- decision: decision || "No decision provided"
23688
- } : undefined
23689
- });
23690
- const agent = "forager-worker";
23691
- const rawStatus = taskService.getRawStatus(feature, task);
23692
- const attempt = (rawStatus?.workerSession?.attempt || 0) + 1;
23693
- const idempotencyKey = `hive-${feature}-${task}-${attempt}`;
23694
- taskService.patchBackgroundFields(feature, task, { idempotencyKey });
23695
- const contextContent = contextFiles.map((f) => f.content).join(`
23696
-
23697
- `);
23698
- const previousTasksContent = previousTasks.map((t) => `- **${t.name}**: ${t.summary}`).join(`
23699
- `);
23700
- const promptMeta = calculatePromptMeta({
23701
- plan: planResult?.content || "",
23702
- context: contextContent,
23703
- previousTasks: previousTasksContent,
23704
- spec: specContent,
23705
- workerPrompt
23706
- });
23707
- const hiveDir = path8.join(directory, ".hive");
23708
- const workerPromptPath = writeWorkerPromptFile(feature, task, workerPrompt, hiveDir);
23709
- const relativePromptPath = normalizePath(path8.relative(directory, workerPromptPath));
23710
- const PREVIEW_MAX_LENGTH = 200;
23711
- const workerPromptPreview = workerPrompt.length > PREVIEW_MAX_LENGTH ? workerPrompt.slice(0, PREVIEW_MAX_LENGTH) + "..." : workerPrompt;
23712
- const taskToolPrompt = `Follow instructions in @${relativePromptPath}`;
23713
- const taskToolInstructions = `## Delegation Required
23714
-
23715
- Use OpenCode's built-in \`task\` tool to spawn a Forager (Worker/Coder) worker.
23716
-
23717
- \`\`\`
23718
- task({
23719
- subagent_type: "${agent}",
23720
- description: "Hive: ${task}",
23721
- prompt: "${taskToolPrompt}"
23722
- })
23723
- \`\`\`
23724
-
23725
- Use the \`@path\` attachment syntax in the prompt to reference the file. Do not inline the file contents.
23726
-
23727
- `;
23728
- const responseBase = {
23729
- worktreePath: worktree.path,
23730
- branch: worktree.branch,
23731
- mode: "delegate",
23732
- agent,
23733
- delegationRequired: true,
23734
- workerPromptPath: relativePromptPath,
23735
- workerPromptPreview,
23736
- taskPromptMode: "opencode-at-file",
23737
- taskToolCall: {
23738
- subagent_type: agent,
23739
- description: `Hive: ${task}`,
23740
- prompt: taskToolPrompt
23741
- },
23742
- instructions: taskToolInstructions
23743
- };
23744
- const jsonPayload = JSON.stringify(responseBase, null, 2);
23745
- const payloadMeta = calculatePayloadMeta({
23746
- jsonPayload,
23747
- promptInlined: false,
23748
- promptReferencedByFile: true
23749
- });
23750
- const sizeWarnings = checkWarnings(promptMeta, payloadMeta);
23751
- const budgetWarnings = truncationEvents.map((event) => ({
23752
- type: event.type,
23753
- severity: "info",
23754
- message: event.message,
23755
- affected: event.affected,
23756
- count: event.count
23757
- }));
23758
- const allWarnings = [...sizeWarnings, ...budgetWarnings];
23759
- return JSON.stringify({
23760
- ...responseBase,
23761
- promptMeta,
23762
- payloadMeta,
23763
- budgetApplied: {
23764
- maxTasks: DEFAULT_BUDGET.maxTasks,
23765
- maxSummaryChars: DEFAULT_BUDGET.maxSummaryChars,
23766
- maxContextChars: DEFAULT_BUDGET.maxContextChars,
23767
- maxTotalContextChars: DEFAULT_BUDGET.maxTotalContextChars,
23768
- tasksIncluded: previousTasks.length,
23769
- tasksDropped: rawPreviousTasks.length - previousTasks.length,
23770
- droppedTasksHint
23771
- },
23772
- warnings: allWarnings.length > 0 ? allWarnings : undefined
23773
- }, null, 2);
24093
+ async execute({ task, feature: explicitFeature, continueFrom, decision }) {
24094
+ return executeBlockedResume({ task, feature: explicitFeature, continueFrom, decision });
23774
24095
  }
23775
24096
  }),
23776
24097
  hive_worktree_commit: tool({
23777
- description: "Complete task: commit changes to branch, write report. Supports blocked/failed/partial status for worker communication.",
24098
+ description: "Complete task: commit changes to branch, write report. Supports blocked/failed/partial status for worker communication. Returns JSON with ok/terminal semantics for worker control flow.",
23778
24099
  args: {
23779
24100
  task: tool.schema.string().describe("Task folder name"),
23780
24101
  summary: tool.schema.string().describe("Summary of what was done"),
@@ -23788,29 +24109,54 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
23788
24109
  feature: tool.schema.string().optional().describe("Feature name (defaults to detection or single feature)")
23789
24110
  },
23790
24111
  async execute({ task, summary, status = "completed", blocker, feature: explicitFeature }) {
24112
+ const respond2 = (payload) => JSON.stringify(payload, null, 2);
23791
24113
  const feature = resolveFeature(explicitFeature);
23792
- if (!feature)
23793
- return "Error: No feature specified. Create a feature or provide feature param.";
24114
+ if (!feature) {
24115
+ return respond2({
24116
+ ok: false,
24117
+ terminal: false,
24118
+ status: "error",
24119
+ reason: "feature_required",
24120
+ task,
24121
+ taskState: "unknown",
24122
+ message: "No feature specified. Create a feature or provide feature param.",
24123
+ nextAction: "Provide feature explicitly or create/select an active feature, then retry hive_worktree_commit."
24124
+ });
24125
+ }
23794
24126
  const taskInfo = taskService.get(feature, task);
23795
- if (!taskInfo)
23796
- return `Error: Task "${task}" not found`;
23797
- if (taskInfo.status !== "in_progress" && taskInfo.status !== "blocked")
23798
- return "Error: Task not in progress";
24127
+ if (!taskInfo) {
24128
+ return respond2({
24129
+ ok: false,
24130
+ terminal: false,
24131
+ status: "error",
24132
+ reason: "task_not_found",
24133
+ feature,
24134
+ task,
24135
+ taskState: "unknown",
24136
+ message: `Task "${task}" not found`,
24137
+ nextAction: "Check the task folder name in your worker-prompt.md and retry hive_worktree_commit with the correct task id."
24138
+ });
24139
+ }
24140
+ if (taskInfo.status !== "in_progress" && taskInfo.status !== "blocked") {
24141
+ return respond2({
24142
+ ok: false,
24143
+ terminal: false,
24144
+ status: "error",
24145
+ reason: "invalid_task_state",
24146
+ feature,
24147
+ task,
24148
+ taskState: taskInfo.status,
24149
+ message: "Task not in progress",
24150
+ nextAction: "Only in_progress or blocked tasks can be committed. Start/resume the task first."
24151
+ });
24152
+ }
24153
+ let verificationNote;
23799
24154
  if (status === "completed") {
23800
- const verificationKeywords = ["test", "build", "lint", "vitest", "jest", "npm run", "pnpm", "cargo", "pytest", "verified", "passes", "succeeds"];
24155
+ const verificationKeywords = ["test", "build", "lint", "vitest", "jest", "npm run", "pnpm", "cargo", "pytest", "verified", "passes", "succeeds", "ast-grep", "scan"];
23801
24156
  const summaryLower = summary.toLowerCase();
23802
24157
  const hasVerificationMention = verificationKeywords.some((kw) => summaryLower.includes(kw));
23803
24158
  if (!hasVerificationMention) {
23804
- return `BLOCKED: No verification detected in summary.
23805
-
23806
- Before claiming completion, you must:
23807
- 1. Run tests (vitest, jest, pytest, etc.)
23808
- 2. Run build (npm run build, cargo build, etc.)
23809
- 3. Include verification results in summary
23810
-
23811
- Example summary: "Implemented auth flow. Tests pass (vitest). Build succeeds."
23812
-
23813
- Re-run with updated summary showing verification results.`;
24159
+ verificationNote = "No verification evidence in summary. Orchestrator should run build+test after merge.";
23814
24160
  }
23815
24161
  }
23816
24162
  if (status === "blocked") {
@@ -23820,16 +24166,42 @@ Re-run with updated summary showing verification results.`;
23820
24166
  blocker
23821
24167
  });
23822
24168
  const worktree2 = await worktreeService.get(feature, task);
23823
- return JSON.stringify({
24169
+ return respond2({
24170
+ ok: true,
24171
+ terminal: true,
23824
24172
  status: "blocked",
24173
+ reason: "user_decision_required",
24174
+ feature,
23825
24175
  task,
24176
+ taskState: "blocked",
23826
24177
  summary,
23827
24178
  blocker,
23828
24179
  worktreePath: worktree2?.path,
23829
- message: 'Task blocked. Hive Master will ask user and resume with hive_worktree_create(continueFrom: "blocked", decision: answer)'
23830
- }, null, 2);
24180
+ branch: worktree2?.branch,
24181
+ message: 'Task blocked. Hive Master will ask user and resume with hive_worktree_create(continueFrom: "blocked", decision: answer)',
24182
+ nextAction: 'Wait for orchestrator to collect user decision and resume with continueFrom: "blocked".'
24183
+ });
23831
24184
  }
23832
24185
  const commitResult = await worktreeService.commitChanges(feature, task, `hive(${task}): ${summary.slice(0, 50)}`);
24186
+ if (status === "completed" && !commitResult.committed && commitResult.message !== "No changes to commit") {
24187
+ return respond2({
24188
+ ok: false,
24189
+ terminal: false,
24190
+ status: "rejected",
24191
+ reason: "commit_failed",
24192
+ feature,
24193
+ task,
24194
+ taskState: taskInfo.status,
24195
+ summary,
24196
+ commit: {
24197
+ committed: commitResult.committed,
24198
+ sha: commitResult.sha,
24199
+ message: commitResult.message
24200
+ },
24201
+ message: `Commit failed: ${commitResult.message || "unknown error"}`,
24202
+ nextAction: "Resolve git/worktree issue, then call hive_worktree_commit again."
24203
+ });
24204
+ }
23833
24205
  const diff = await worktreeService.getDiff(feature, task);
23834
24206
  const statusLabel = status === "completed" ? "success" : status;
23835
24207
  const reportLines = [
@@ -23859,13 +24231,31 @@ Re-run with updated summary showing verification results.`;
23859
24231
  } else {
23860
24232
  reportLines.push("---", "", "## Changes", "", "_No file changes detected_", "");
23861
24233
  }
23862
- taskService.writeReport(feature, task, reportLines.join(`
24234
+ const reportPath = taskService.writeReport(feature, task, reportLines.join(`
23863
24235
  `));
23864
24236
  const finalStatus = status === "completed" ? "done" : status;
23865
24237
  taskService.update(feature, task, { status: finalStatus, summary });
23866
24238
  const worktree = await worktreeService.get(feature, task);
23867
- return `Task "${task}" ${status}. Changes committed to branch ${worktree?.branch || "unknown"}.
23868
- Use hive_merge to integrate changes. Worktree preserved at ${worktree?.path || "unknown"}.`;
24239
+ return respond2({
24240
+ ok: true,
24241
+ terminal: true,
24242
+ status,
24243
+ feature,
24244
+ task,
24245
+ taskState: finalStatus,
24246
+ summary,
24247
+ ...verificationNote && { verificationNote },
24248
+ commit: {
24249
+ committed: commitResult.committed,
24250
+ sha: commitResult.sha,
24251
+ message: commitResult.message
24252
+ },
24253
+ worktreePath: worktree?.path,
24254
+ branch: worktree?.branch,
24255
+ reportPath,
24256
+ message: `Task "${task}" ${status}.`,
24257
+ nextAction: "Use hive_merge to integrate changes. Worktree is preserved for review."
24258
+ });
23869
24259
  }
23870
24260
  }),
23871
24261
  hive_worktree_discard: tool({
@@ -23936,23 +24326,40 @@ Files changed: ${result.filesChanged?.length || 0}`;
23936
24326
  feature: tool.schema.string().optional().describe("Feature name (defaults to active)")
23937
24327
  },
23938
24328
  async execute({ feature: explicitFeature }) {
24329
+ const respond2 = (payload) => JSON.stringify(payload, null, 2);
23939
24330
  const feature = resolveFeature(explicitFeature);
23940
24331
  if (!feature) {
23941
- return JSON.stringify({
24332
+ return respond2({
24333
+ success: false,
24334
+ terminal: true,
24335
+ reason: "feature_required",
23942
24336
  error: "No feature specified and no active feature found",
23943
24337
  hint: "Use hive_feature_create to create a new feature"
23944
24338
  });
23945
24339
  }
23946
24340
  const featureData = featureService.get(feature);
23947
24341
  if (!featureData) {
23948
- return JSON.stringify({
24342
+ return respond2({
24343
+ success: false,
24344
+ terminal: true,
24345
+ reason: "feature_not_found",
23949
24346
  error: `Feature '${feature}' not found`,
23950
24347
  availableFeatures: featureService.list()
23951
24348
  });
23952
24349
  }
23953
24350
  const blocked = checkBlocked(feature);
23954
- if (blocked)
23955
- return blocked;
24351
+ if (blocked) {
24352
+ return respond2({
24353
+ success: false,
24354
+ terminal: true,
24355
+ blocked: true,
24356
+ error: blocked,
24357
+ hints: [
24358
+ "Read the blocker details and resolve them before retrying hive_status.",
24359
+ `Remove .hive/features/${feature}/BLOCKED once the blocker is resolved.`
24360
+ ]
24361
+ });
24362
+ }
23956
24363
  const plan = planService.read(feature);
23957
24364
  const tasks = taskService.list(feature);
23958
24365
  const contextFiles = contextService.list(feature);
@@ -24009,7 +24416,7 @@ Files changed: ${result.filesChanged?.length || 0}`;
24009
24416
  return `${runnableTasks.length} tasks are ready to start in parallel: ${runnableTasks.join(", ")}`;
24010
24417
  }
24011
24418
  if (runnableTasks.length === 1) {
24012
- return `Start next task with hive_worktree_create: ${runnableTasks[0]}`;
24419
+ return `Start next task with hive_worktree_start: ${runnableTasks[0]}`;
24013
24420
  }
24014
24421
  const pending = tasks2.find((t) => t.status === "pending");
24015
24422
  if (pending) {
@@ -24018,7 +24425,7 @@ Files changed: ${result.filesChanged?.length || 0}`;
24018
24425
  return "All tasks complete. Review and merge or complete feature.";
24019
24426
  };
24020
24427
  const planStatus = featureData.status === "planning" ? "draft" : featureData.status === "approved" ? "approved" : featureData.status === "executing" ? "locked" : "none";
24021
- return JSON.stringify({
24428
+ return respond2({
24022
24429
  feature: {
24023
24430
  name: feature,
24024
24431
  status: featureData.status,
@@ -24101,7 +24508,43 @@ ${result.diff}
24101
24508
  }
24102
24509
  },
24103
24510
  config: async (opencodeConfig) => {
24511
+ function agentTools(allowed) {
24512
+ const allHiveTools = [
24513
+ "hive_feature_create",
24514
+ "hive_feature_complete",
24515
+ "hive_plan_write",
24516
+ "hive_plan_read",
24517
+ "hive_plan_approve",
24518
+ "hive_tasks_sync",
24519
+ "hive_task_create",
24520
+ "hive_task_update",
24521
+ "hive_worktree_start",
24522
+ "hive_worktree_create",
24523
+ "hive_worktree_commit",
24524
+ "hive_worktree_discard",
24525
+ "hive_merge",
24526
+ "hive_context_write",
24527
+ "hive_status",
24528
+ "hive_skill",
24529
+ "hive_agents_md"
24530
+ ];
24531
+ const result = {};
24532
+ for (const tool3 of allHiveTools) {
24533
+ if (!allowed.includes(tool3)) {
24534
+ result[tool3] = false;
24535
+ }
24536
+ }
24537
+ return result;
24538
+ }
24104
24539
  configService.init();
24540
+ const hiveConfigData = configService.get();
24541
+ const agentMode = hiveConfigData.agentMode ?? "unified";
24542
+ const customAgentConfigs = configService.getCustomAgentConfigs();
24543
+ const customSubagentAppendix = Object.keys(customAgentConfigs).length === 0 ? "" : `
24544
+
24545
+ ## Configured Custom Subagents
24546
+ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompare(right)).map(([name, config2]) => `- \`${name}\` — derived from \`${config2.baseAgent}\`; ${config2.description}`).join(`
24547
+ `)}`;
24105
24548
  const hiveUserConfig = configService.getAgentConfig("hive-master");
24106
24549
  const hiveAutoLoadedSkills = await buildAutoLoadedSkillsContent("hive-master", configService, directory);
24107
24550
  const hiveConfig = {
@@ -24109,7 +24552,7 @@ ${result.diff}
24109
24552
  variant: hiveUserConfig.variant,
24110
24553
  temperature: hiveUserConfig.temperature ?? 0.5,
24111
24554
  description: "Hive (Hybrid) - Plans + orchestrates. Detects phase, loads skills on-demand.",
24112
- prompt: QUEEN_BEE_PROMPT + hiveAutoLoadedSkills,
24555
+ prompt: QUEEN_BEE_PROMPT + hiveAutoLoadedSkills + (agentMode === "unified" ? customSubagentAppendix : ""),
24113
24556
  permission: {
24114
24557
  question: "allow",
24115
24558
  skill: "allow",
@@ -24124,7 +24567,8 @@ ${result.diff}
24124
24567
  variant: architectUserConfig.variant,
24125
24568
  temperature: architectUserConfig.temperature ?? 0.7,
24126
24569
  description: "Architect (Planner) - Plans features, interviews, writes plans. NEVER executes.",
24127
- prompt: ARCHITECT_BEE_PROMPT + architectAutoLoadedSkills,
24570
+ prompt: ARCHITECT_BEE_PROMPT + architectAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
24571
+ tools: agentTools(["hive_feature_create", "hive_plan_write", "hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
24128
24572
  permission: {
24129
24573
  edit: "deny",
24130
24574
  task: "allow",
@@ -24142,7 +24586,24 @@ ${result.diff}
24142
24586
  variant: swarmUserConfig.variant,
24143
24587
  temperature: swarmUserConfig.temperature ?? 0.5,
24144
24588
  description: "Swarm (Orchestrator) - Orchestrates execution. Delegates, spawns workers, verifies, merges.",
24145
- prompt: SWARM_BEE_PROMPT + swarmAutoLoadedSkills,
24589
+ prompt: SWARM_BEE_PROMPT + swarmAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
24590
+ tools: agentTools([
24591
+ "hive_feature_create",
24592
+ "hive_feature_complete",
24593
+ "hive_plan_read",
24594
+ "hive_plan_approve",
24595
+ "hive_tasks_sync",
24596
+ "hive_task_create",
24597
+ "hive_task_update",
24598
+ "hive_worktree_start",
24599
+ "hive_worktree_create",
24600
+ "hive_worktree_discard",
24601
+ "hive_merge",
24602
+ "hive_context_write",
24603
+ "hive_status",
24604
+ "hive_skill",
24605
+ "hive_agents_md"
24606
+ ]),
24146
24607
  permission: {
24147
24608
  question: "allow",
24148
24609
  skill: "allow",
@@ -24159,6 +24620,7 @@ ${result.diff}
24159
24620
  mode: "subagent",
24160
24621
  description: "Scout (Explorer/Researcher/Retrieval) - Researches codebase + external docs/data.",
24161
24622
  prompt: SCOUT_BEE_PROMPT + scoutAutoLoadedSkills,
24623
+ tools: agentTools(["hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
24162
24624
  permission: {
24163
24625
  edit: "deny",
24164
24626
  task: "deny",
@@ -24176,6 +24638,7 @@ ${result.diff}
24176
24638
  mode: "subagent",
24177
24639
  description: "Forager (Worker/Coder) - Executes tasks directly in isolated worktrees. Never delegates.",
24178
24640
  prompt: FORAGER_BEE_PROMPT + foragerAutoLoadedSkills,
24641
+ tools: agentTools(["hive_plan_read", "hive_worktree_commit", "hive_context_write", "hive_skill"]),
24179
24642
  permission: {
24180
24643
  task: "deny",
24181
24644
  delegate: "deny",
@@ -24191,6 +24654,7 @@ ${result.diff}
24191
24654
  mode: "subagent",
24192
24655
  description: "Hygienic (Consultant/Reviewer/Debugger) - Reviews plan documentation quality. OKAY/REJECT verdict.",
24193
24656
  prompt: HYGIENIC_BEE_PROMPT + hygienicAutoLoadedSkills,
24657
+ tools: agentTools(["hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
24194
24658
  permission: {
24195
24659
  edit: "deny",
24196
24660
  task: "deny",
@@ -24198,21 +24662,44 @@ ${result.diff}
24198
24662
  skill: "allow"
24199
24663
  }
24200
24664
  };
24201
- const hiveConfigData = configService.get();
24202
- const agentMode = hiveConfigData.agentMode ?? "unified";
24665
+ const builtInAgentConfigs = {
24666
+ "hive-master": hiveConfig,
24667
+ "architect-planner": architectConfig,
24668
+ "swarm-orchestrator": swarmConfig,
24669
+ "scout-researcher": scoutConfig,
24670
+ "forager-worker": foragerConfig,
24671
+ "hygienic-reviewer": hygienicConfig
24672
+ };
24673
+ const customAutoLoadedSkills = Object.fromEntries(await Promise.all(Object.entries(customAgentConfigs).map(async ([customAgentName, customAgentConfig]) => {
24674
+ const inheritedBaseSkills = customAgentConfig.baseAgent === "forager-worker" ? foragerUserConfig.autoLoadSkills ?? [] : hygienicUserConfig.autoLoadSkills ?? [];
24675
+ const deltaAutoLoadSkills = (customAgentConfig.autoLoadSkills ?? []).filter((skill) => !inheritedBaseSkills.includes(skill));
24676
+ return [
24677
+ customAgentName,
24678
+ await buildAutoLoadedSkillsContent(customAgentName, configService, directory, deltaAutoLoadSkills)
24679
+ ];
24680
+ })));
24681
+ const customSubagents = buildCustomSubagents({
24682
+ customAgents: customAgentConfigs,
24683
+ baseAgents: {
24684
+ "forager-worker": foragerConfig,
24685
+ "hygienic-reviewer": hygienicConfig
24686
+ },
24687
+ autoLoadedSkills: customAutoLoadedSkills
24688
+ });
24203
24689
  const allAgents = {};
24204
24690
  if (agentMode === "unified") {
24205
- allAgents["hive-master"] = hiveConfig;
24206
- allAgents["scout-researcher"] = scoutConfig;
24207
- allAgents["forager-worker"] = foragerConfig;
24208
- allAgents["hygienic-reviewer"] = hygienicConfig;
24691
+ allAgents["hive-master"] = builtInAgentConfigs["hive-master"];
24692
+ allAgents["scout-researcher"] = builtInAgentConfigs["scout-researcher"];
24693
+ allAgents["forager-worker"] = builtInAgentConfigs["forager-worker"];
24694
+ allAgents["hygienic-reviewer"] = builtInAgentConfigs["hygienic-reviewer"];
24209
24695
  } else {
24210
- allAgents["architect-planner"] = architectConfig;
24211
- allAgents["swarm-orchestrator"] = swarmConfig;
24212
- allAgents["scout-researcher"] = scoutConfig;
24213
- allAgents["forager-worker"] = foragerConfig;
24214
- allAgents["hygienic-reviewer"] = hygienicConfig;
24696
+ allAgents["architect-planner"] = builtInAgentConfigs["architect-planner"];
24697
+ allAgents["swarm-orchestrator"] = builtInAgentConfigs["swarm-orchestrator"];
24698
+ allAgents["scout-researcher"] = builtInAgentConfigs["scout-researcher"];
24699
+ allAgents["forager-worker"] = builtInAgentConfigs["forager-worker"];
24700
+ allAgents["hygienic-reviewer"] = builtInAgentConfigs["hygienic-reviewer"];
24215
24701
  }
24702
+ Object.assign(allAgents, customSubagents);
24216
24703
  const configAgent = opencodeConfig.agent;
24217
24704
  if (!configAgent) {
24218
24705
  opencodeConfig.agent = allAgents;