opencode-hive 1.2.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,12 +1,16 @@
1
1
  import { createRequire } from "node:module";
2
2
  var __defProp = Object.defineProperty;
3
+ var __returnValue = (v) => v;
4
+ function __exportSetter(name, newValue) {
5
+ this[name] = __returnValue.bind(null, newValue);
6
+ }
3
7
  var __export = (target, all) => {
4
8
  for (var name in all)
5
9
  __defProp(target, name, {
6
10
  get: all[name],
7
11
  enumerable: true,
8
12
  configurable: true,
9
- set: (newValue) => all[name] = () => newValue
13
+ set: __exportSetter.bind(all, name)
10
14
  });
11
15
  };
12
16
  var __require = /* @__PURE__ */ createRequire(import.meta.url);
@@ -14823,7 +14827,6 @@ Run \`hive_status()\` to detect phase:
14823
14827
  ## Universal (Always Active)
14824
14828
 
14825
14829
  ### Intent Classification
14826
-
14827
14830
  | Intent | Signals | Action |
14828
14831
  |--------|---------|--------|
14829
14832
  | Trivial | Single file, <10 lines | Do directly |
@@ -14831,22 +14834,34 @@ Run \`hive_status()\` to detect phase:
14831
14834
  | Complex | 3+ files, multi-step | Full discovery → plan/delegate |
14832
14835
  | Research | Internal codebase exploration OR external data | Delegate to Scout (Explorer/Researcher/Retrieval) |
14833
14836
 
14834
- ### Canonical Delegation Threshold
14837
+ Intent Verbalization verbalize before acting:
14838
+ > "I detect [type] intent — [reason]. Approach: [route]."
14839
+
14840
+ | Surface Form | True Intent | Routing |
14841
+ |--------------|-------------|---------|
14842
+ | "Quick change" | Trivial | Act directly |
14843
+ | "Add new flow" | Complex | Plan/delegate |
14844
+ | "Where is X?" | Research | Scout exploration |
14845
+ | "Should we…?" | Ambiguous | Ask a question |
14835
14846
 
14847
+ ### Canonical Delegation Threshold
14836
14848
  - Delegate to Scout when you cannot name the file path upfront, expect to inspect 2+ files, or the question is open-ended ("how/where does X work?").
14837
14849
  - Prefer \`task({ subagent_type: "scout-researcher", prompt: "..." })\` for single investigations.
14838
14850
  - Local \`read/grep/glob\` is acceptable only for a single known file and a bounded question.
14839
14851
 
14840
14852
  ### Delegation
14841
-
14842
14853
  - Single-scout research → \`task({ subagent_type: "scout-researcher", prompt: "..." })\`
14843
14854
  - Parallel exploration → Load \`hive_skill("parallel-exploration")\` and follow the task mode delegation guidance.
14844
14855
  - Implementation → \`hive_worktree_create({ task: "01-task-name" })\` (creates worktree + Forager)
14845
14856
 
14846
14857
  During Planning, use \`task({ subagent_type: "scout-researcher", ... })\` for exploration (BLOCKING — returns when done). For parallel exploration, issue multiple \`task()\` calls in the same message.
14847
14858
 
14848
- ### Context Persistence
14859
+ **When NOT to delegate:**
14860
+ - Single-file, <10-line changes — do directly
14861
+ - Sequential operations where you need the result of step N for step N+1
14862
+ - Questions answerable with one grep + one file read
14849
14863
 
14864
+ ### Context Persistence
14850
14865
  Save discoveries with \`hive_context_write\`:
14851
14866
  - Requirements and decisions
14852
14867
  - User preferences
@@ -14855,14 +14870,12 @@ Save discoveries with \`hive_context_write\`:
14855
14870
  When Scout returns substantial findings (3+ files discovered, architecture patterns, or key decisions), persist them to a feature context file via \`hive_context_write\`.
14856
14871
 
14857
14872
  ### Checkpoints
14858
-
14859
14873
  Before major transitions, verify:
14860
14874
  - [ ] Objective clear?
14861
14875
  - [ ] Scope defined?
14862
14876
  - [ ] No critical ambiguities?
14863
14877
 
14864
14878
  ### Turn Termination
14865
-
14866
14879
  Valid endings:
14867
14880
  - Ask a concrete question
14868
14881
  - Update draft + ask a concrete question
@@ -14875,58 +14888,46 @@ NEVER end with:
14875
14888
  - "When you're ready..."
14876
14889
 
14877
14890
  ### Loading Skills (On-Demand)
14878
-
14879
14891
  Load when detailed guidance needed:
14880
- - \`hive_skill("brainstorming")\` - exploring ideas and requirements
14881
- - \`hive_skill("writing-plans")\` - structuring implementation plans
14882
- - \`hive_skill("dispatching-parallel-agents")\` - parallel task delegation
14883
- - \`hive_skill("parallel-exploration")\` - parallel read-only research via task() (Scout fan-out)
14884
- - \`hive_skill("executing-plans")\` - step-by-step plan execution
14885
- - \`hive_skill("systematic-debugging")\` - encountering bugs, test failures, or unexpected behavior
14886
- - \`hive_skill("test-driven-development")\` - implementing features with TDD approach
14887
- - \`hive_skill("verification-before-completion")\` - before claiming work is complete or creating PRs
14888
- - \`hive_skill("docker-mastery")\` - working with Docker containers, debugging, docker-compose
14889
- - \`hive_skill("agents-md-mastery")\` - bootstrapping/updating AGENTS.md, quality review
14890
-
14891
- Load ONE skill at a time. Only when you need guidance beyond this prompt.
14892
-
14892
+ | Skill | Use when |
14893
+ |-------|----------|
14894
+ | \`hive_skill("brainstorming")\` | Exploring ideas and requirements |
14895
+ | \`hive_skill("writing-plans")\` | Structuring implementation plans |
14896
+ | \`hive_skill("dispatching-parallel-agents")\` | Parallel task delegation |
14897
+ | \`hive_skill("parallel-exploration")\` | Parallel read-only research via task() |
14898
+ | \`hive_skill("executing-plans")\` | Step-by-step plan execution |
14899
+ | \`hive_skill("systematic-debugging")\` | Bugs, test failures, unexpected behavior |
14900
+ | \`hive_skill("test-driven-development")\` | TDD approach |
14901
+ | \`hive_skill("verification-before-completion")\` | Before claiming work is complete or creating PRs |
14902
+ | \`hive_skill("docker-mastery")\` | Docker containers, debugging, compose |
14903
+ | \`hive_skill("agents-md-mastery")\` | AGENTS.md updates, quality review |
14904
+
14905
+ Load one skill at a time, only when guidance is needed.
14893
14906
  ---
14894
14907
 
14895
14908
  ## Planning Phase
14896
-
14897
14909
  *Active when: no approved plan exists*
14898
14910
 
14899
14911
  ### When to Load Skills
14900
-
14901
14912
  - Exploring vague requirements → \`hive_skill("brainstorming")\`
14902
14913
  - Writing detailed plan → \`hive_skill("writing-plans")\`
14903
14914
 
14904
- ### AI-Slop Flags
14905
-
14906
- | Pattern | Ask |
14907
- |---------|-----|
14915
+ ### Planning Checks
14916
+ | Signal | Prompt |
14917
+ |--------|--------|
14908
14918
  | Scope inflation | "Should I include X?" |
14909
14919
  | Premature abstraction | "Abstract or inline?" |
14910
14920
  | Over-validation | "Minimal or comprehensive checks?" |
14911
-
14912
- ### Challenge User Assumptions
14913
-
14914
- When a proposal relies on fragile assumptions, challenge them explicitly:
14915
-
14916
- - Identify the assumption and state it plainly.
14917
- - Ask what changes if the assumption is wrong.
14918
- - Offer a lean fallback that still meets core goals.
14921
+ | Fragile assumption | "If this assumption is wrong, what changes?" |
14919
14922
 
14920
14923
  ### Gap Classification
14921
-
14922
14924
  | Gap | Action |
14923
14925
  |-----|--------|
14924
- | Critical | ASK immediately |
14926
+ | Critical | Ask immediately |
14925
14927
  | Minor | Fix silently, note in summary |
14926
14928
  | Ambiguous | Apply default, disclose |
14927
14929
 
14928
14930
  ### Plan Output
14929
-
14930
14931
  \`\`\`
14931
14932
  hive_feature_create({ name: "feature-name" })
14932
14933
  hive_plan_write({ content: "..." })
@@ -14937,12 +14938,11 @@ Plan includes: Discovery (Original Request, Interview Summary, Research Findings
14937
14938
  - References must use file:line format
14938
14939
  - Verify must include exact command + expected output
14939
14940
 
14940
- Each task MUST declare dependencies with **Depends on**:
14941
+ Each task declares dependencies with **Depends on**:
14941
14942
  - **Depends on**: none for no dependencies / parallel starts
14942
14943
  - **Depends on**: 1, 3 for explicit task-number dependencies
14943
14944
 
14944
14945
  ### After Plan Written
14945
-
14946
14946
  Ask user via \`question()\`: "Plan complete. Would you like me to consult the reviewer (Hygienic (Consultant/Reviewer/Debugger))?"
14947
14947
 
14948
14948
  If yes → \`task({ subagent_type: "hygienic", prompt: "Review plan..." })\`
@@ -14950,65 +14950,68 @@ If yes → \`task({ subagent_type: "hygienic", prompt: "Review plan..." })\`
14950
14950
  After review decision, offer execution choice (subagent-driven vs parallel session) consistent with writing-plans.
14951
14951
 
14952
14952
  ### Planning Iron Laws
14953
-
14954
- - Research BEFORE asking (use \`hive_skill("parallel-exploration")\` for multi-domain research)
14953
+ - Research before asking (use \`hive_skill("parallel-exploration")\` for multi-domain research)
14955
14954
  - Save draft as working memory
14956
- - Don't implement (no edits/worktrees). Read-only exploration is allowed (local tools + Scout via task()).
14955
+ - Keep planning read-only (local tools + Scout via task())
14956
+ Read-only exploration is allowed.
14957
+ Search Stop conditions: enough context, repeated info, 2 rounds with no new data, or direct answer found.
14957
14958
 
14958
14959
  ---
14959
14960
 
14960
14961
  ## Orchestration Phase
14961
-
14962
14962
  *Active when: plan approved, tasks exist*
14963
14963
 
14964
14964
  ### Task Dependencies (Always Check)
14965
-
14966
14965
  Use \`hive_status()\` to see **runnable** tasks (dependencies satisfied) and **blockedBy** info.
14967
14966
  - Only start tasks from the runnable list
14968
14967
  - When 2+ tasks are runnable: ask operator via \`question()\` before parallelizing
14969
14968
  - Record execution decisions with \`hive_context_write({ name: "execution-decisions", ... })\`
14970
14969
 
14971
14970
  ### When to Load Skills
14972
-
14973
14971
  - Multiple independent tasks → \`hive_skill("dispatching-parallel-agents")\`
14974
14972
  - Executing step-by-step → \`hive_skill("executing-plans")\`
14975
14973
 
14976
14974
  ### Delegation Check
14977
-
14978
14975
  1. Is there a specialized agent?
14979
14976
  2. Does this need external data? → Scout
14980
- 3. Default: DELEGATE (don't do yourself)
14977
+ 3. Default: delegate (don't do yourself)
14981
14978
 
14982
14979
  ### Worker Spawning
14983
-
14984
14980
  \`\`\`
14985
14981
  hive_worktree_create({ task: "01-task-name" }) // Creates worktree + Forager
14986
14982
  \`\`\`
14987
14983
 
14988
14984
  ### After Delegation
14989
-
14990
- 1. \`task()\` is BLOCKING — when it returns, the worker is DONE
14985
+ 1. \`task()\` is blocking — when it returns, the worker is done
14991
14986
  2. Immediately call \`hive_status()\` to check the new task state and find next runnable tasks
14992
- 3. If task status is blocked: read blocker info \`question()\` user decision resume with \`continueFrom: "blocked"\`
14993
- 4. Do NOT wait for notifications or poll the result is already available when \`task()\` returns
14994
-
14995
- ### Failure Recovery
14996
-
14997
- 3 failures on same task revert ask user
14987
+ 3. The delegated task MUST transition out of \`in_progress\`; if still \`in_progress\`, resume worker with explicit instruction to resolve commit response and retry
14988
+ 4. If task status is blocked: read blocker info \`question()\` user decision resume with \`continueFrom: "blocked"\`
14989
+ 5. Skip polling — the result is available when \`task()\` returns
14990
+
14991
+ ### Batch Merge + Verify Workflow
14992
+ When multiple tasks are in flight, prefer **batch completion** over per-task verification:
14993
+ 1. Dispatch a batch of runnable tasks (ask user before parallelizing).
14994
+ 2. Wait for all workers to finish.
14995
+ 3. Merge each completed task branch into the current branch.
14996
+ 4. Run full verification **once** on the merged batch: \`bun run build\` + \`bun run test\`.
14997
+ 5. If verification fails, diagnose with full context. Fix directly or re-dispatch targeted tasks as needed.
14998
+
14999
+ ### Failure Recovery (After 3 Consecutive Failures)
15000
+ 1. Stop all further edits
15001
+ 2. Revert to last known working state
15002
+ 3. Document what was attempted
15003
+ 4. Ask user via question() — present options and context
14998
15004
 
14999
15005
  ### Merge Strategy
15000
-
15001
- \`hive_merge({ task: "01-task-name" })\` after verification
15006
+ \`hive_merge({ task: "01-task-name" })\` for each task after the batch completes, then verify the batch
15002
15007
 
15003
15008
  ### Post-Batch Review (Hygienic)
15004
-
15005
15009
  After completing and merging a batch:
15006
15010
  1. Ask the user via \`question()\` if they want a Hygienic code review for the batch.
15007
15011
  2. If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\`.
15008
15012
  3. Apply feedback before starting the next batch.
15009
15013
 
15010
15014
  ### AGENTS.md Maintenance
15011
-
15012
15015
  After feature completion (all tasks merged):
15013
15016
  1. Sync context findings to AGENTS.md: \`hive_agents_md({ action: "sync", feature: "feature-name" })\`
15014
15017
  2. Review the proposed diff with the user
@@ -15019,36 +15022,36 @@ For projects without AGENTS.md:
15019
15022
  - Generates initial documentation from codebase analysis
15020
15023
 
15021
15024
  ### Orchestration Iron Laws
15022
-
15023
15025
  - Delegate by default
15024
15026
  - Verify all work completes
15025
- - Use \`question()\` for user input (NEVER plain text)
15027
+ - Use \`question()\` for user input (never plain text)
15026
15028
 
15027
15029
  ---
15028
15030
 
15029
15031
  ## Iron Laws (Both Phases)
15030
-
15031
15032
  **Always:**
15032
- - Detect phase FIRST via hive_status
15033
- - Follow ONLY the active phase section
15033
+ - Detect phase first via hive_status
15034
+ - Follow the active phase section
15034
15035
  - Delegate research to Scout, implementation to Forager
15035
15036
  - Ask user before consulting Hygienic (Consultant/Reviewer/Debugger)
15036
15037
  - Load skills on-demand, one at a time
15037
15038
 
15039
+ Investigate before acting: read referenced files before making claims about them.
15040
+
15038
15041
  ### Hard Blocks
15039
15042
 
15040
- NEVER violate:
15043
+ Do not violate:
15041
15044
  - Skip phase detection
15042
15045
  - Mix planning and orchestration in same action
15043
15046
  - Auto-load all skills at start
15044
15047
 
15045
15048
  ### Anti-Patterns
15046
15049
 
15047
- BLOCKING violations:
15050
+ Blocking violations:
15048
15051
  - Ending a turn without a next action
15049
15052
  - Asking for user input in plain text instead of question()
15050
15053
 
15051
- **User Input:** ALWAYS use \`question()\` tool for any user input - NEVER ask questions via plain text. This ensures structured responses.
15054
+ **User Input:** Use \`question()\` tool for any user input structured prompts get structured responses. Plain text questions are easily missed or misinterpreted.
15052
15055
  `;
15053
15056
 
15054
15057
  // src/agents/architect.ts
@@ -15189,28 +15192,18 @@ Delegate by default. Work yourself only when trivial.
15189
15192
  | Open-ended | "Improve", "Refactor" | Assess first, then delegate |
15190
15193
  | Ambiguous | Unclear scope | Ask ONE clarifying question |
15191
15194
 
15192
- ## Delegation Check (Before Acting)
15195
+ Intent Verbalization: "I detect [type] intent — [reason]. Routing to [action]."
15193
15196
 
15194
- ### Task Dependencies (Always Check)
15195
-
15196
- Use \`hive_status()\` to see **runnable** tasks (dependencies satisfied) and **blockedBy** info.
15197
- - Only start tasks from the runnable list
15198
- - When 2+ tasks are runnable: ask operator via \`question()\` before parallelizing
15199
- - Record execution decisions with \`hive_context_write({ name: "execution-decisions", ... })\`
15200
-
15201
- When Scout returns substantial findings (3+ files discovered, architecture patterns, or key decisions), persist them to a feature context file via \`hive_context_write\`.
15197
+ ## Delegation Check (Before Acting)
15202
15198
 
15203
- If tasks are missing **Depends on** metadata, ask the planner to revise the plan before executing.
15199
+ Use \`hive_status()\` to see runnable tasks and blockedBy info. Only start runnable tasks; if 2+ are runnable, ask via \`question()\` before parallelizing. Record execution decisions with \`hive_context_write({ name: "execution-decisions", ... })\`. If tasks lack **Depends on** metadata, ask the planner to revise. If Scout returns substantial findings (3+ files, architecture patterns, or key decisions), persist them via \`hive_context_write\`.
15204
15200
 
15205
- ### Standard Checks
15201
+ Standard checks: specialized agent? can I do it myself for sure? external system data (DBs/APIs/3rd-party tools)? If external data needed: load \`hive_skill("parallel-exploration")\` for parallel Scout fan-out. In task mode, use task() for research fan-out. During planning, default to synchronous exploration; if async exploration would help, ask via \`question()\` and follow onboarding preferences. Default: delegate. Research tools (grep_app, context7, websearch, ast_grep) — delegate to Scout, not direct use.
15206
15202
 
15207
- 1. Is there a specialized agent that matches?
15208
- 2. Can I do it myself FOR SURE? REALLY?
15209
- 3. Does this require external system data (DBs/APIs/3rd-party tools)?
15210
- If external data needed: Load \`hive_skill("parallel-exploration")\` for parallel Scout fan-out
15211
- In task mode, use task() for research fan-out.
15212
- During Planning, default to synchronous exploration. If async exploration would help, ask the user via \`question()\` and follow the onboarding preferences.
15213
- → Default: DELEGATE
15203
+ **When NOT to delegate:**
15204
+ - Single-file, <10-line changes do directly
15205
+ - Sequential operations where you need the result of step N for step N+1
15206
+ - Questions answerable with one grep + one file read
15214
15207
 
15215
15208
  ## Delegation Prompt Structure (All 6 Sections)
15216
15209
 
@@ -15218,8 +15211,8 @@ During Planning, default to synchronous exploration. If async exploration would
15218
15211
  1. TASK: Atomic, specific goal
15219
15212
  2. EXPECTED OUTCOME: Concrete deliverables
15220
15213
  3. REQUIRED TOOLS: Explicit tool whitelist
15221
- 4. MUST DO: Exhaustive requirements
15222
- 5. MUST NOT DO: Forbidden actions
15214
+ 4. REQUIRED: Exhaustive requirements
15215
+ 5. FORBIDDEN: Forbidden actions
15223
15216
  6. CONTEXT: File paths, patterns, constraints
15224
15217
  \`\`\`
15225
15218
 
@@ -15232,32 +15225,45 @@ hive_worktree_create({ task: "01-task-name" })
15232
15225
  // In task mode, use task() for research fan-out.
15233
15226
  \`\`\`
15234
15227
 
15235
- **Delegation Guidance:**
15228
+ Delegation guidance:
15236
15229
  - \`task()\` is BLOCKING — returns when the worker is done
15237
15230
  - Call \`hive_status()\` immediately after to check new state and find next runnable tasks
15231
+ - Invariant: delegated task must not remain \`in_progress\`; if it does, treat as non-terminal completion and resume/retry worker with explicit commit-result handling
15238
15232
  - For parallel fan-out, issue multiple \`task()\` calls in the same message
15239
15233
 
15240
15234
  ## After Delegation - VERIFY
15241
15235
 
15242
- After every delegation, check:
15243
- - Does it work as expected?
15244
- - Followed existing codebase patterns?
15245
- - Met MUST DO and MUST NOT DO requirements?
15246
- - No unintended side effects?
15236
+ Your confidence 50% accurate. Always:
15237
+ - Read changed files (don’t trust self-reports)
15238
+ - Run lsp_diagnostics on modified files
15239
+ - Check acceptance criteria from spec
15240
+
15241
+ Then confirm:
15242
+ - Works as expected
15243
+ - Follows codebase patterns
15244
+ - Meets requirements
15245
+ - No unintended side effects
15246
+
15247
+ After completing and merging a batch, run full verification on the main branch: \`bun run build\`, \`bun run test\`. If failures occur, diagnose and fix or re-dispatch impacted tasks.
15248
+
15249
+ ## Search Stop Conditions
15250
+
15251
+ - Stop when there is enough context
15252
+ - Stop when info repeats
15253
+ - Stop after 2 rounds with no new data
15254
+ - Stop when a direct answer is found
15255
+ - If still unclear, delegate or ask one focused question
15247
15256
 
15248
15257
  ## Blocker Handling
15249
15258
 
15250
- When worker reports blocked:
15251
- 1. \`hive_status()\` — read blocker info
15252
- 2. \`question()\` — ask user (NEVER plain text)
15253
- 3. \`hive_worktree_create({ task, continueFrom: "blocked", decision })\`
15259
+ When worker reports blocked: \`hive_status()\` → read blocker info; \`question()\` → ask user (no plain text); \`hive_worktree_create({ task, continueFrom: "blocked", decision })\`.
15254
15260
 
15255
15261
  ## Failure Recovery (After 3 Consecutive Failures)
15256
15262
 
15257
- 1. STOP all further edits
15258
- 2. REVERT to last known working state
15259
- 3. DOCUMENT what was attempted
15260
- 4. ASK USER via question() — present options and context
15263
+ 1. Stop all further edits
15264
+ 2. Revert to last known working state
15265
+ 3. Document what was attempted
15266
+ 4. Ask user via question() — present options and context
15261
15267
 
15262
15268
  ## Merge Strategy
15263
15269
 
@@ -15265,21 +15271,15 @@ When worker reports blocked:
15265
15271
  hive_merge({ task: "01-task-name", strategy: "merge" })
15266
15272
  \`\`\`
15267
15273
 
15268
- Merge only after verification passes.
15274
+ Merge after batch completes, then verify the merged result.
15269
15275
 
15270
15276
  ### Post-Batch Review (Hygienic)
15271
15277
 
15272
- After completing and merging a batch:
15273
- 1. Ask the user via \`question()\` if they want a Hygienic code review for the batch.
15274
- 2. If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\`.
15275
- 3. Apply feedback before starting the next batch.
15278
+ After completing and merging a batch: ask via \`question()\` if they want a Hygienic review. If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\` and apply feedback before the next batch.
15276
15279
 
15277
15280
  ### AGENTS.md Maintenance
15278
15281
 
15279
- After completing and merging a batch:
15280
- 1. Sync context findings to AGENTS.md: \`hive_agents_md({ action: "sync", feature: "feature-name" })\`
15281
- 2. Review the proposed diff with the user
15282
- 3. Apply approved changes to keep AGENTS.md current
15282
+ After feature completion (all tasks merged): sync context findings to AGENTS.md via \`hive_agents_md({ action: "sync", feature: "feature-name" })\`, review the diff with the user, then apply approved changes.
15283
15283
 
15284
15284
  For quality review of AGENTS.md content, load \`hive_skill("agents-md-mastery")\`.
15285
15285
 
@@ -15289,39 +15289,21 @@ For projects without AGENTS.md:
15289
15289
 
15290
15290
  ## Turn Termination
15291
15291
 
15292
- Valid endings:
15293
- - Worker delegation (hive_worktree_create)
15294
- - Status check (hive_status)
15295
- - User question (question())
15296
- - Merge (hive_merge)
15297
-
15298
- NEVER end with:
15299
- - "Let me know when you're ready"
15300
- - Summary without next action
15301
- - Waiting for something unspecified
15302
-
15303
- ## Iron Laws
15292
+ Valid endings: worker delegation (hive_worktree_create), status check (hive_status), user question (question()), merge (hive_merge).
15293
+ Avoid ending with: "Let me know when you're ready", "When you're ready...", summary without next action, or waiting for something unspecified.
15304
15294
 
15305
- **Never:**
15306
- - Work alone when specialists available
15307
- - Skip delegation check
15308
- - Skip verification after delegation
15309
- - Continue after 3 failures without consulting
15310
-
15311
- **Always:**
15312
- - Classify intent FIRST
15313
- - Delegate by default
15314
- - Verify delegate work
15315
- - Use question() for user input (NEVER plain text)
15316
- - Cancel background tasks only when stale or no longer needed
15295
+ ## Guardrails
15317
15296
 
15318
- **User Input:** ALWAYS use \`question()\` tool for any user input - NEVER ask questions via plain text. This ensures structured responses.
15297
+ Avoid: working alone when specialists are available; skipping delegation checks; skipping verification after delegation; continuing after 3 failures without consulting.
15298
+ Do: classify intent first; delegate by default; verify delegated work; use \`question()\` for user input (no plain text); cancel background tasks only when stale or no longer needed.
15299
+ Cancel background tasks only when stale or no longer needed.
15300
+ User input: use \`question()\` tool for any user input to ensure structured responses.
15319
15301
  `;
15320
15302
 
15321
15303
  // src/agents/scout.ts
15322
15304
  var SCOUT_BEE_PROMPT = `# Scout (Explorer/Researcher/Retrieval)
15323
15305
 
15324
- Research BEFORE answering. Parallel execution by default.
15306
+ Research before answering; parallelize tool calls when investigating multiple independent questions.
15325
15307
 
15326
15308
  ## Request Classification
15327
15309
 
@@ -15344,18 +15326,13 @@ Success Looks Like: [concrete outcome]
15344
15326
  </analysis>
15345
15327
  \`\`\`
15346
15328
 
15347
- ### Phase 2: Parallel Execution (Default)
15329
+ ### Phase 2: Parallel Execution
15348
15330
 
15349
- ALWAYS run 3+ tools simultaneously:
15331
+ When investigating multiple independent questions, run related tools in parallel:
15350
15332
  \`\`\`
15351
- // CORRECT: Parallel
15352
15333
  glob({ pattern: "**/*.ts" })
15353
15334
  grep({ pattern: "UserService" })
15354
15335
  context7_query-docs({ query: "..." })
15355
-
15356
- // WRONG: Sequential
15357
- result1 = glob(...)
15358
- result2 = grep(...) // Wait for result1? NO!
15359
15336
  \`\`\`
15360
15337
 
15361
15338
  ### Phase 3: Structured Results
@@ -15374,12 +15351,29 @@ result2 = grep(...) // Wait for result1? NO!
15374
15351
  </results>
15375
15352
  \`\`\`
15376
15353
 
15354
+ ## Search Stop Conditions (After Research Protocol)
15355
+
15356
+ Stop when any is true:
15357
+ - enough context to answer
15358
+ - repeated information across sources
15359
+ - two rounds with no new data
15360
+ - a direct answer is found
15361
+
15362
+ ## Evidence Check (Before Answering)
15363
+
15364
+ - Every claim has a source (file:line, URL, snippet)
15365
+ - Avoid speculation; say "can't answer with available evidence" when needed
15366
+
15367
+ ## Investigate Before Answering
15368
+
15369
+ - Read files before making claims about them
15370
+
15377
15371
  ## Tool Strategy
15378
15372
 
15379
15373
  | Need | Tool |
15380
15374
  |------|------|
15381
15375
  | Type/Symbol info | LSP (goto_definition, find_references) |
15382
- | Structural patterns | ast_grep_search |
15376
+ | Structural patterns | ast_grep_find_code |
15383
15377
  | Text patterns | grep |
15384
15378
  | File discovery | glob |
15385
15379
  | Git history | bash (git log, git blame) |
@@ -15389,19 +15383,11 @@ result2 = grep(...) // Wait for result1? NO!
15389
15383
 
15390
15384
  ## External System Data (DB/API/3rd-party)
15391
15385
 
15392
- When asked to retrieve raw data from external systems (MongoDB/Stripe/etc.):
15393
- - Prefer targeted queries over broad dumps
15394
- - Summarize findings; avoid flooding the orchestrator with raw records
15386
+ When asked to retrieve raw data from external systems:
15387
+ - Prefer targeted queries
15388
+ - Summarize findings; avoid raw dumps
15395
15389
  - Redact secrets and personal data
15396
- - Provide minimal evidence and a concise summary
15397
- - Note any access limitations or missing context
15398
-
15399
- ## Documentation Discovery (External)
15400
-
15401
- 1. \`websearch("library-name official documentation")\`
15402
- 2. Version check if specified
15403
- 3. Sitemap: \`webfetch(docs_url + "/sitemap.xml")\`
15404
- 4. Targeted fetch from sitemap
15390
+ - Note access limitations or missing context
15405
15391
 
15406
15392
  ## Evidence Format
15407
15393
 
@@ -15420,103 +15406,114 @@ When operating within a feature context:
15420
15406
  })
15421
15407
  \`\`\`
15422
15408
 
15423
- ## Iron Laws
15424
-
15425
- **Never:**
15426
- - Create, modify, or delete files (read-only)
15427
- - Answer without research first
15428
- - Execute tools sequentially when parallel possible
15429
- - Skip intent analysis
15409
+ ## Operating Rules
15430
15410
 
15431
- **Always:**
15432
- - Classify request FIRST
15433
- - Run 3+ tools in parallel
15434
- - All paths MUST be absolute
15411
+ - Read-only behavior (no file changes)
15412
+ - Classify request first, then research
15413
+ - Use absolute paths for file references
15435
15414
  - Cite evidence for every claim
15436
- - Use current year (2026) in web searches
15415
+ - Use the current year when reasoning about time-sensitive information
15437
15416
  `;
15438
15417
 
15439
15418
  // src/agents/forager.ts
15440
15419
  var FORAGER_BEE_PROMPT = `# Forager (Worker/Coder)
15441
15420
 
15442
- Execute directly. NEVER delegate implementation. Work in isolation.
15421
+ You are an autonomous senior engineer. Once given direction, gather context, implement, and verify without waiting for prompts.
15422
+
15423
+ Execute directly. Work in isolation. Do not delegate implementation.
15424
+
15425
+ ## Intent Extraction
15426
+
15427
+ | Spec says | True intent | Action |
15428
+ |---|---|---|
15429
+ | "Implement X" | Build + verify | Code → verify |
15430
+ | "Fix Y" | Root cause + minimal fix | Diagnose → fix → verify |
15431
+ | "Refactor Z" | Preserve behavior | Restructure → verify no regressions |
15432
+ | "Add tests" | Coverage | Write tests → verify |
15443
15433
 
15444
- ## Blocked Tools
15434
+ ## Action Bias
15445
15435
 
15446
- These tools are FORBIDDEN:
15447
- - \`task\` Orchestrator's job
15448
- - \`hive_worktree_create\` — You ARE the spawned worker
15449
- - \`hive_merge\` Orchestrator's job
15436
+ - Act directly: implement first, explain in commit summary. Complete all steps before reporting.
15437
+ - REQUIRED: keep going until done, make decisions, course-correct on failure
15438
+
15439
+ Your tool access is scoped to your role. Use only the tools available to you.
15450
15440
 
15451
15441
  ## Allowed Research
15452
15442
 
15453
15443
  CAN use for quick lookups:
15454
15444
  - \`grep_app_searchGitHub\` — OSS patterns
15455
15445
  - \`context7_query-docs\` — Library docs
15456
- - \`ast_grep_search\` — AST patterns
15446
+ - \`ast_grep_find_code_by_rule\` — AST patterns
15447
+ - \`ast_grep_scan-code\` — Code quality scan (best-effort verification)
15448
+ - \`ast_grep_find_code\` — Find code patterns (best-effort verification)
15457
15449
  - \`glob\`, \`grep\`, \`read\` — Codebase exploration
15458
15450
 
15459
15451
  ## Resolve Before Blocking
15460
15452
 
15461
- Default to exploration, questions are LAST resort:
15453
+ Default to exploration, questions are LAST resort.
15454
+ Context inference: Before asking "what does X do?", READ X first.
15455
+
15456
+ Apply in order before reporting as blocked:
15462
15457
  1. Read the referenced files and surrounding code
15463
15458
  2. Search for similar patterns in the codebase
15464
- 3. Try a reasonable approach based on conventions
15459
+ 3. Check docs via research tools
15460
+ 4. Try a reasonable approach
15461
+ 5. Last resort: report blocked
15465
15462
 
15466
- Only report as blocked when:
15467
- - Multiple approaches failed (tried 3+)
15468
- - Decision requires business logic you can't infer
15469
- - External dependency is missing or broken
15470
-
15471
- Context inference: Before asking "what does X do?", READ X first.
15463
+ Investigate before acting. Do not speculate about code you have not read.
15472
15464
 
15473
15465
  ## Plan = READ ONLY
15474
15466
 
15475
- CRITICAL: NEVER MODIFY THE PLAN FILE
15476
- - May READ to understand task
15477
- - MUST NOT edit, modify, or update plan
15478
- - Only Orchestrator (Swarm) manages plan
15467
+ Do not modify the plan file.
15468
+ - Read to understand the task
15469
+ - Only the orchestrator manages plan updates
15479
15470
 
15480
15471
  ## Persistent Notes
15481
15472
 
15482
- For substantial discoveries (architecture patterns, key decisions, gotchas that affect multiple tasks):
15483
- Use \`hive_context_write({ name: "learnings", content: "..." })\` to persist for future workers.
15473
+ For substantial discoveries (architecture patterns, key decisions, gotchas that affect multiple tasks), use:
15474
+ \`hive_context_write({ name: "learnings", content: "..." })\`.
15484
15475
 
15485
- ## Execution Flow
15476
+ ## Working Rules
15486
15477
 
15487
- ### 1. Understand Task
15488
- Read spec for:
15489
- - **What to do**
15490
- - **References** (file:lines)
15491
- - **Must NOT do** (guardrails)
15492
- - **Acceptance criteria**
15478
+ - DRY/Search First: look for existing helpers before adding new code
15479
+ - Convention Following: check neighboring files and package.json, then follow existing patterns
15480
+ - Efficient Edits: read enough context before editing, batch logical edits
15481
+ - Tight Error Handling: avoid broad catches or silent defaults; propagate errors explicitly
15482
+ - Avoid Over-engineering: only implement what was asked for
15483
+ - Reversibility Preference: favor local, reversible actions; confirm before hard-to-reverse steps
15484
+ - Promise Discipline: do not commit to future work; if not done this turn, label it "Next steps"
15485
+ - No Comments: do not add comments unless the spec requests them
15486
+ - Concise Output: minimize output and avoid extra explanations unless asked
15493
15487
 
15494
- ### 2. Orient (Pre-flight Before Coding)
15495
- Before writing code:
15496
- - Confirm dependencies are satisfied and required context is present
15497
- - Read the referenced files and surrounding code
15498
- - Search for similar patterns in the codebase
15499
- - Identify the exact files/sections to touch (from references)
15500
- - Decide the first failing test you will write (TDD)
15501
- - Identify the test command(s) and inputs you will run
15502
- - Plan the minimum change to reach green
15488
+ ## Execution Loop (max 3 iterations)
15503
15489
 
15504
- ### 3. Implement
15505
- Follow spec exactly. Use references for patterns.
15490
+ EXPLORE PLAN → EXECUTE → VERIFY → LOOP
15506
15491
 
15507
- \`\`\`
15508
- read(file, { offset: line, limit: 30 }) // Check references
15509
- edit(file, { old: "...", new: "..." }) // Implement
15510
- bash("npm test") // Verify
15511
- \`\`\`
15492
+ - EXPLORE: read references, gather context, search for patterns
15493
+ - PLAN: decide the minimum change, files to touch, and verification commands
15494
+ - EXECUTE: edit using conventions, reuse helpers, batch changes
15495
+ - VERIFY: run best-effort checks (tests if available, ast_grep, lsp_diagnostics)
15496
+ - LOOP: if verification fails, diagnose and retry within the limit
15512
15497
 
15513
- ### 4. Verify
15514
- Run acceptance criteria:
15515
- - Tests pass
15516
- - Build succeeds
15517
- - lsp_diagnostics clean on changed files
15498
+ ## Progress Updates
15499
+
15500
+ Provide brief status at meaningful milestones.
15501
+
15502
+ ## Completion Checklist
15518
15503
 
15519
- ### 5. Report
15504
+ - All acceptance criteria met?
15505
+ - Best-effort verification done and recorded?
15506
+ - Re-read the spec — missed anything?
15507
+ - Said "I'll do X" — did you?
15508
+ - Plan closure: mark each intention as Done, Blocked, or Cancelled
15509
+ - Record exact commands and results
15510
+
15511
+ ## Failure Recovery
15512
+
15513
+ If 3 different approaches fail: stop edits, revert local changes, document attempts, report blocked.
15514
+ If you have tried 3 approaches and still cannot finish safely, report as blocked.
15515
+
15516
+ ## Reporting
15520
15517
 
15521
15518
  **Success:**
15522
15519
  \`\`\`
@@ -15527,7 +15524,9 @@ hive_worktree_commit({
15527
15524
  })
15528
15525
  \`\`\`
15529
15526
 
15530
- **CRITICAL: After hive_worktree_commit, STOP IMMEDIATELY.**
15527
+ Then inspect the tool response fields:
15528
+ - If \`ok=true\` and \`terminal=true\`: stop and hand off to orchestrator
15529
+ - If \`ok=false\` or \`terminal=false\`: DO NOT STOP. Follow \`nextAction\`, remediate, and retry \`hive_worktree_commit\`
15531
15530
 
15532
15531
  **Blocked (need user decision):**
15533
15532
  \`\`\`
@@ -15544,47 +15543,11 @@ hive_worktree_commit({
15544
15543
  })
15545
15544
  \`\`\`
15546
15545
 
15547
- ## Completion Checklist
15548
-
15549
- Before calling hive_worktree_commit:
15550
- - All tests in scope are run and passing (Record exact commands and results)
15551
- - Build succeeds if required (Record exact command and result)
15552
- - lsp_diagnostics clean on changed files (Record exact command and result)
15553
- - Changes match the spec and references
15554
- - No extra scope creep or unrelated edits
15555
- - Summary includes what changed, why, and verification status
15556
-
15557
- ## Failure Recovery
15558
-
15559
- After 3 consecutive failures:
15560
- 1. STOP all further edits
15561
- 2. Document what was tried
15562
- 3. Report as blocked with options
15563
-
15564
- ## Iron Laws
15565
-
15566
- ### Docker Sandbox
15567
-
15568
- When sandbox mode is active, ALL bash commands automatically run inside a Docker container.
15569
- - Your commands are transparently wrapped — you don't need to do anything special
15570
- - File edits (Read, Write, Edit tools) still work on the host filesystem (worktree is mounted)
15571
- - If a command must run on the host (e.g., git operations), report as blocked and ask the user
15572
- - If a command fails with "docker: command not found", report as blocked — the host needs Docker installed
15573
- - For deeper Docker expertise, load \`hive_skill("docker-mastery")\`
15546
+ ## Docker Sandbox
15574
15547
 
15575
- **Never:**
15576
- - Exceed task scope
15577
- - Modify plan file
15578
- - Use \`task\` or \`hive_worktree_create\`
15579
- - Continue after hive_worktree_commit
15580
- - Skip verification
15581
-
15582
- **Always:**
15583
- - Follow references for patterns
15584
- - Run acceptance criteria
15585
- - Report blockers with options
15586
- - APPEND to notepads (never overwrite)
15587
- - lsp_diagnostics before reporting done
15548
+ When sandbox mode is active, bash commands run inside Docker; file edits still apply to the host worktree.
15549
+ If a command must run on the host or Docker is missing, report blocked.
15550
+ For deeper Docker expertise, load \`hive_skill("docker-mastery")\`.
15588
15551
  `;
15589
15552
 
15590
15553
  // src/agents/hygienic.ts
@@ -15759,15 +15722,29 @@ var __getProtoOf = Object.getPrototypeOf;
15759
15722
  var __defProp2 = Object.defineProperty;
15760
15723
  var __getOwnPropNames = Object.getOwnPropertyNames;
15761
15724
  var __hasOwnProp = Object.prototype.hasOwnProperty;
15725
+ function __accessProp(key) {
15726
+ return this[key];
15727
+ }
15728
+ var __toESMCache_node;
15729
+ var __toESMCache_esm;
15762
15730
  var __toESM = (mod, isNodeMode, target) => {
15731
+ var canCache = mod != null && typeof mod === "object";
15732
+ if (canCache) {
15733
+ var cache = isNodeMode ? __toESMCache_node ??= new WeakMap : __toESMCache_esm ??= new WeakMap;
15734
+ var cached2 = cache.get(mod);
15735
+ if (cached2)
15736
+ return cached2;
15737
+ }
15763
15738
  target = mod != null ? __create(__getProtoOf(mod)) : {};
15764
15739
  const to = isNodeMode || !mod || !mod.__esModule ? __defProp2(target, "default", { value: mod, enumerable: true }) : target;
15765
15740
  for (let key of __getOwnPropNames(mod))
15766
15741
  if (!__hasOwnProp.call(to, key))
15767
15742
  __defProp2(to, key, {
15768
- get: () => mod[key],
15743
+ get: __accessProp.bind(mod, key),
15769
15744
  enumerable: true
15770
15745
  });
15746
+ if (canCache)
15747
+ cache.set(mod, to);
15771
15748
  return to;
15772
15749
  };
15773
15750
  var __commonJS = (cb, mod) => () => (mod || cb((mod = { exports: {} }).exports, mod), mod.exports);
@@ -16742,12 +16719,14 @@ function isLockStale(lockPath, staleTTL) {
16742
16719
  function acquireLockSync(filePath, options = {}) {
16743
16720
  const opts = { ...DEFAULT_LOCK_OPTIONS, ...options };
16744
16721
  const lockPath = getLockPath(filePath);
16722
+ const lockDir = path2.dirname(lockPath);
16745
16723
  const startTime = Date.now();
16746
16724
  const lockContent = JSON.stringify({
16747
16725
  pid: process.pid,
16748
16726
  timestamp: new Date().toISOString(),
16749
16727
  filePath
16750
16728
  });
16729
+ ensureDir(lockDir);
16751
16730
  while (true) {
16752
16731
  try {
16753
16732
  const fd = fs2.openSync(lockPath, fs2.constants.O_CREAT | fs2.constants.O_EXCL | fs2.constants.O_WRONLY);
@@ -16760,15 +16739,18 @@ function acquireLockSync(filePath, options = {}) {
16760
16739
  };
16761
16740
  } catch (err) {
16762
16741
  const error45 = err;
16763
- if (error45.code !== "EEXIST") {
16742
+ if (error45.code === "ENOENT") {
16743
+ ensureDir(lockDir);
16744
+ } else if (error45.code === "EEXIST") {
16745
+ if (isLockStale(lockPath, opts.staleLockTTL)) {
16746
+ try {
16747
+ fs2.unlinkSync(lockPath);
16748
+ continue;
16749
+ } catch {}
16750
+ }
16751
+ } else {
16764
16752
  throw error45;
16765
16753
  }
16766
- if (isLockStale(lockPath, opts.staleLockTTL)) {
16767
- try {
16768
- fs2.unlinkSync(lockPath);
16769
- continue;
16770
- } catch {}
16771
- }
16772
16754
  if (Date.now() - startTime >= opts.timeout) {
16773
16755
  throw new Error(`Failed to acquire lock on ${filePath} after ${opts.timeout}ms. ` + `Lock file: ${lockPath}`);
16774
16756
  }
@@ -16793,14 +16775,6 @@ function writeAtomic(filePath, content) {
16793
16775
  function writeJsonAtomic(filePath, data) {
16794
16776
  writeAtomic(filePath, JSON.stringify(data, null, 2));
16795
16777
  }
16796
- function writeJsonLockedSync(filePath, data, options = {}) {
16797
- const release = acquireLockSync(filePath, options);
16798
- try {
16799
- writeJsonAtomic(filePath, data);
16800
- } finally {
16801
- release();
16802
- }
16803
- }
16804
16778
  function deepMerge(target, patch) {
16805
16779
  const result = { ...target };
16806
16780
  for (const key of Object.keys(patch)) {
@@ -17329,23 +17303,31 @@ ${f.content}`).join(`
17329
17303
  }
17330
17304
  update(featureName, taskFolder, updates, lockOptions) {
17331
17305
  const statusPath = getTaskStatusPath(this.projectRoot, featureName, taskFolder);
17332
- const current = readJson(statusPath);
17333
- if (!current) {
17306
+ if (!fileExists(statusPath)) {
17334
17307
  throw new Error(`Task '${taskFolder}' not found`);
17335
17308
  }
17336
- const updated = {
17337
- ...current,
17338
- ...updates,
17339
- schemaVersion: TASK_STATUS_SCHEMA_VERSION
17340
- };
17341
- if (updates.status === "in_progress" && !current.startedAt) {
17342
- updated.startedAt = new Date().toISOString();
17343
- }
17344
- if (updates.status === "done" && !current.completedAt) {
17345
- updated.completedAt = new Date().toISOString();
17309
+ const release = acquireLockSync(statusPath, lockOptions);
17310
+ try {
17311
+ const current = readJson(statusPath);
17312
+ if (!current) {
17313
+ throw new Error(`Task '${taskFolder}' not found`);
17314
+ }
17315
+ const updated = {
17316
+ ...current,
17317
+ ...updates,
17318
+ schemaVersion: TASK_STATUS_SCHEMA_VERSION
17319
+ };
17320
+ if (updates.status === "in_progress" && !current.startedAt) {
17321
+ updated.startedAt = new Date().toISOString();
17322
+ }
17323
+ if (updates.status === "done" && !current.completedAt) {
17324
+ updated.completedAt = new Date().toISOString();
17325
+ }
17326
+ writeJsonAtomic(statusPath, updated);
17327
+ return updated;
17328
+ } finally {
17329
+ release();
17346
17330
  }
17347
- writeJsonLockedSync(statusPath, updated, lockOptions);
17348
- return updated;
17349
17331
  }
17350
17332
  patchBackgroundFields(featureName, taskFolder, patch, lockOptions) {
17351
17333
  const statusPath = getTaskStatusPath(this.projectRoot, featureName, taskFolder);
@@ -22160,6 +22142,7 @@ ${f.content}`);
22160
22142
  }
22161
22143
  class ConfigService {
22162
22144
  configPath;
22145
+ cachedConfig = null;
22163
22146
  constructor() {
22164
22147
  const homeDir = process.env.HOME || process.env.USERPROFILE || "";
22165
22148
  const configDir = path6.join(homeDir, ".config", "opencode");
@@ -22169,13 +22152,17 @@ class ConfigService {
22169
22152
  return this.configPath;
22170
22153
  }
22171
22154
  get() {
22155
+ if (this.cachedConfig !== null) {
22156
+ return this.cachedConfig;
22157
+ }
22172
22158
  try {
22173
22159
  if (!fs10.existsSync(this.configPath)) {
22174
- return { ...DEFAULT_HIVE_CONFIG };
22160
+ this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
22161
+ return this.cachedConfig;
22175
22162
  }
22176
22163
  const raw = fs10.readFileSync(this.configPath, "utf-8");
22177
22164
  const stored = JSON.parse(raw);
22178
- return {
22165
+ const merged = {
22179
22166
  ...DEFAULT_HIVE_CONFIG,
22180
22167
  ...stored,
22181
22168
  agents: {
@@ -22207,11 +22194,15 @@ class ConfigService {
22207
22194
  }
22208
22195
  }
22209
22196
  };
22197
+ this.cachedConfig = merged;
22198
+ return this.cachedConfig;
22210
22199
  } catch {
22211
- return { ...DEFAULT_HIVE_CONFIG };
22200
+ this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
22201
+ return this.cachedConfig;
22212
22202
  }
22213
22203
  }
22214
22204
  set(updates) {
22205
+ this.cachedConfig = null;
22215
22206
  const current = this.get();
22216
22207
  const merged = {
22217
22208
  ...current,
@@ -22226,6 +22217,7 @@ class ConfigService {
22226
22217
  fs10.mkdirSync(configDir, { recursive: true });
22227
22218
  }
22228
22219
  fs10.writeFileSync(this.configPath, JSON.stringify(merged, null, 2));
22220
+ this.cachedConfig = merged;
22229
22221
  return merged;
22230
22222
  }
22231
22223
  exists() {
@@ -22273,6 +22265,22 @@ class ConfigService {
22273
22265
  const persistent = config2.persistentContainers ?? mode === "docker";
22274
22266
  return { mode, ...image && { image }, persistent };
22275
22267
  }
22268
+ getHookCadence(hookName, options) {
22269
+ const config2 = this.get();
22270
+ const configuredCadence = config2.hook_cadence?.[hookName];
22271
+ if (options?.safetyCritical && configuredCadence && configuredCadence > 1) {
22272
+ console.warn(`[hive:cadence] Ignoring cadence > 1 for safety-critical hook: ${hookName}`);
22273
+ return 1;
22274
+ }
22275
+ if (configuredCadence === undefined || configuredCadence === null) {
22276
+ return 1;
22277
+ }
22278
+ if (configuredCadence <= 0 || !Number.isInteger(configuredCadence)) {
22279
+ console.warn(`[hive:cadence] Invalid cadence ${configuredCadence} for ${hookName}, using 1`);
22280
+ return 1;
22281
+ }
22282
+ return configuredCadence;
22283
+ }
22276
22284
  }
22277
22285
 
22278
22286
  class AgentsMdService {
@@ -22729,8 +22737,16 @@ hive_worktree_commit({
22729
22737
  })
22730
22738
  \`\`\`
22731
22739
 
22732
- **CRITICAL: After calling hive_worktree_commit, you MUST STOP IMMEDIATELY.**
22733
- Do NOT continue working. Do NOT respond further. Your session is DONE.
22740
+ Then inspect the tool response fields:
22741
+ - If \`ok=true\` and \`terminal=true\`: stop the session
22742
+ - Otherwise: **DO NOT STOP**. Follow \`nextAction\`, remediate, and retry \`hive_worktree_commit\`
22743
+
22744
+ **CRITICAL: Stop only on terminal commit result (ok=true and terminal=true).**
22745
+ If commit returns non-terminal (for example verification_required), DO NOT STOP.
22746
+ Follow result.nextAction, fix the issue, and call hive_worktree_commit again.
22747
+
22748
+ Only when commit result is terminal should you stop.
22749
+ Do NOT continue working after a terminal result. Do NOT respond further. Your session is DONE.
22734
22750
  The Hive Master will take over from here.
22735
22751
 
22736
22752
  **Summary Guidance** (used verbatim for downstream task context):
@@ -23077,6 +23093,31 @@ function normalizeVariant(variant) {
23077
23093
  return trimmed2.length > 0 ? trimmed2 : undefined;
23078
23094
  }
23079
23095
 
23096
+ // src/hooks/system-hook.ts
23097
+ var fallbackTurnCounters = {};
23098
+ function shouldExecuteHook(hookName, configService, turnCounters, options) {
23099
+ const cadence = configService?.getHookCadence(hookName, options) ?? 1;
23100
+ const counters = turnCounters ?? fallbackTurnCounters;
23101
+ counters[hookName] = (counters[hookName] || 0) + 1;
23102
+ const currentTurn = counters[hookName];
23103
+ if (cadence === 1) {
23104
+ return true;
23105
+ }
23106
+ return (currentTurn - 1) % cadence === 0;
23107
+ }
23108
+ var HIVE_SYSTEM_PROMPT = `
23109
+ ## Hive — Active Session
23110
+
23111
+ **Important:** hive_worktree_commit commits to the task branch but does NOT merge.
23112
+ Use hive_merge to integrate changes into the current branch.
23113
+ `;
23114
+
23115
+ // src/utils/compaction-prompt.ts
23116
+ var COMPACTION_RESUME_PROMPT = "You were compacted mid-task. " + "Resume by reading your worker-prompt.md (in the task worktree root) to recall your assignment. " + "Do not call status tools or re-read the full codebase. " + "Locate your last commit message or notes, then continue from where you left off.";
23117
+ function buildCompactionPrompt() {
23118
+ return COMPACTION_RESUME_PROMPT;
23119
+ }
23120
+
23080
23121
  // src/index.ts
23081
23122
  function formatSkillsXml(skills) {
23082
23123
  if (skills.length === 0)
@@ -23162,85 +23203,6 @@ No Hive skills available.` : base + formatSkillsXml(filteredSkills);
23162
23203
  }
23163
23204
  });
23164
23205
  }
23165
- var HIVE_SYSTEM_PROMPT = `
23166
- ## Hive - Feature Development System
23167
-
23168
- Plan-first development: Write plan → User reviews → Approve → Execute tasks
23169
-
23170
- ### Tools (14 total)
23171
-
23172
- | Domain | Tools |
23173
- |--------|-------|
23174
- | Feature | hive_feature_create, hive_feature_complete |
23175
- | Plan | hive_plan_write, hive_plan_read, hive_plan_approve |
23176
- | Task | hive_tasks_sync, hive_task_create, hive_task_update |
23177
- | Worktree | hive_worktree_create, hive_worktree_commit, hive_worktree_discard |
23178
- | Merge | hive_merge |
23179
- | Context | hive_context_write |
23180
- | Status | hive_status |
23181
- | Skill | hive_skill |
23182
-
23183
- ### Workflow
23184
-
23185
- 1. \`hive_feature_create(name)\` - Create feature
23186
- 2. \`hive_plan_write(content)\` - Write plan.md
23187
- 3. User adds comments in VSCode → \`hive_plan_read\` to see them
23188
- 4. Revise plan → User approves
23189
- 5. \`hive_tasks_sync()\` - Generate tasks from plan
23190
- 6. \`hive_worktree_create(task)\` → work in worktree → \`hive_worktree_commit(task, summary)\`
23191
- 7. \`hive_merge(task)\` - Merge task branch into main (when ready)
23192
-
23193
- **Important:** \`hive_worktree_commit\` commits changes to task branch but does NOT merge.
23194
- Use \`hive_merge\` to explicitly integrate changes. Worktrees persist until manually removed.
23195
-
23196
- ### Delegated Execution
23197
-
23198
- \`hive_worktree_create\` creates worktree and spawns worker automatically:
23199
-
23200
- 1. \`hive_worktree_create(task)\` → Creates worktree + spawns Forager (Worker/Coder) worker
23201
- 2. Worker executes → calls \`hive_worktree_commit(status: "completed")\`
23202
- 3. Worker blocked → calls \`hive_worktree_commit(status: "blocked", blocker: {...})\`
23203
-
23204
- **Handling blocked workers:**
23205
- 1. Check blockers with \`hive_status()\`
23206
- 2. Read the blocker info (reason, options, recommendation, context)
23207
- 3. Ask user via \`question()\` tool - NEVER plain text
23208
- 4. Resume with \`hive_worktree_create(task, continueFrom: "blocked", decision: answer)\`
23209
-
23210
- **CRITICAL**: When resuming, a NEW worker spawns in the SAME worktree.
23211
- The previous worker's progress is preserved. Include the user's decision in the \`decision\` parameter.
23212
-
23213
- **After task() Returns:**
23214
- - task() is BLOCKING — when it returns, the worker is DONE
23215
- - Call \`hive_status()\` immediately to check the new task state and find next runnable tasks
23216
- - No notifications or polling needed — the result is already available
23217
-
23218
- **For research**, use MCP tools or parallel exploration:
23219
- - \`grep_app_searchGitHub\` - Find code in OSS
23220
- - \`context7_query-docs\` - Library documentation
23221
- - \`websearch_web_search_exa\` - Web search via Exa
23222
- - \`ast_grep_search\` - AST-based search
23223
- - For exploratory fan-out, load \`hive_skill("parallel-exploration")\` and use multiple \`task()\` calls in the same message
23224
-
23225
- ### Planning Phase - Context Management REQUIRED
23226
-
23227
- As you research and plan, CONTINUOUSLY save findings using \`hive_context_write\`:
23228
- - Research findings (API patterns, library docs, codebase structure)
23229
- - User preferences ("we use Zustand, not Redux")
23230
- - Rejected alternatives ("tried X, too complex")
23231
- - Architecture decisions ("auth lives in /lib/auth")
23232
-
23233
- **Update existing context files** when new info emerges - dont create duplicates.
23234
-
23235
- \`hive_tasks_sync\` parses \`### N. Task Name\` headers.
23236
-
23237
- ### Execution Phase - Stay Aligned
23238
-
23239
- During execution, call \`hive_status\` periodically to:
23240
- - Check current progress and pending work
23241
- - See context files to read
23242
- - Get reminded of next actions
23243
- `;
23244
23206
  var plugin = async (ctx) => {
23245
23207
  const { directory, client } = ctx;
23246
23208
  const featureService = new FeatureService(directory);
@@ -23295,6 +23257,7 @@ To unblock: Remove .hive/features/${feature}/BLOCKED`;
23295
23257
  }
23296
23258
  return null;
23297
23259
  };
23260
+ const turnCounters = {};
23298
23261
  const checkDependencies = (feature, taskFolder) => {
23299
23262
  const taskStatus = taskService.getRawStatus(feature, taskFolder);
23300
23263
  if (!taskStatus) {
@@ -23334,6 +23297,9 @@ To unblock: Remove .hive/features/${feature}/BLOCKED`;
23334
23297
  };
23335
23298
  return {
23336
23299
  "experimental.chat.system.transform": async (input, output) => {
23300
+ if (!shouldExecuteHook("experimental.chat.system.transform", configService, turnCounters)) {
23301
+ return;
23302
+ }
23337
23303
  output.system.push(HIVE_SYSTEM_PROMPT);
23338
23304
  const activeFeature = resolveFeature();
23339
23305
  if (activeFeature) {
@@ -23354,6 +23320,9 @@ To unblock: Remove .hive/features/${feature}/BLOCKED`;
23354
23320
  }
23355
23321
  }
23356
23322
  },
23323
+ "experimental.session.compacting": async (_input, output) => {
23324
+ output.context.push(buildCompactionPrompt());
23325
+ },
23357
23326
  "chat.message": async (input, output) => {
23358
23327
  const { agent } = input;
23359
23328
  if (!agent)
@@ -23369,6 +23338,9 @@ To unblock: Remove .hive/features/${feature}/BLOCKED`;
23369
23338
  }
23370
23339
  },
23371
23340
  "tool.execute.before": async (input, output) => {
23341
+ if (!shouldExecuteHook("tool.execute.before", configService, turnCounters, { safetyCritical: true })) {
23342
+ return;
23343
+ }
23372
23344
  if (input.tool !== "bash")
23373
23345
  return;
23374
23346
  const sandboxConfig = configService.getSandboxConfig();
@@ -23774,7 +23746,7 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
23774
23746
  }
23775
23747
  }),
23776
23748
  hive_worktree_commit: tool({
23777
- description: "Complete task: commit changes to branch, write report. Supports blocked/failed/partial status for worker communication.",
23749
+ description: "Complete task: commit changes to branch, write report. Supports blocked/failed/partial status for worker communication. Returns JSON with ok/terminal semantics for worker control flow.",
23778
23750
  args: {
23779
23751
  task: tool.schema.string().describe("Task folder name"),
23780
23752
  summary: tool.schema.string().describe("Summary of what was done"),
@@ -23788,29 +23760,54 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
23788
23760
  feature: tool.schema.string().optional().describe("Feature name (defaults to detection or single feature)")
23789
23761
  },
23790
23762
  async execute({ task, summary, status = "completed", blocker, feature: explicitFeature }) {
23763
+ const respond = (payload) => JSON.stringify(payload, null, 2);
23791
23764
  const feature = resolveFeature(explicitFeature);
23792
- if (!feature)
23793
- return "Error: No feature specified. Create a feature or provide feature param.";
23765
+ if (!feature) {
23766
+ return respond({
23767
+ ok: false,
23768
+ terminal: false,
23769
+ status: "error",
23770
+ reason: "feature_required",
23771
+ task,
23772
+ taskState: "unknown",
23773
+ message: "No feature specified. Create a feature or provide feature param.",
23774
+ nextAction: "Provide feature explicitly or create/select an active feature, then retry hive_worktree_commit."
23775
+ });
23776
+ }
23794
23777
  const taskInfo = taskService.get(feature, task);
23795
- if (!taskInfo)
23796
- return `Error: Task "${task}" not found`;
23797
- if (taskInfo.status !== "in_progress" && taskInfo.status !== "blocked")
23798
- return "Error: Task not in progress";
23778
+ if (!taskInfo) {
23779
+ return respond({
23780
+ ok: false,
23781
+ terminal: false,
23782
+ status: "error",
23783
+ reason: "task_not_found",
23784
+ feature,
23785
+ task,
23786
+ taskState: "unknown",
23787
+ message: `Task "${task}" not found`,
23788
+ nextAction: "Check the task folder name in your worker-prompt.md and retry hive_worktree_commit with the correct task id."
23789
+ });
23790
+ }
23791
+ if (taskInfo.status !== "in_progress" && taskInfo.status !== "blocked") {
23792
+ return respond({
23793
+ ok: false,
23794
+ terminal: false,
23795
+ status: "error",
23796
+ reason: "invalid_task_state",
23797
+ feature,
23798
+ task,
23799
+ taskState: taskInfo.status,
23800
+ message: "Task not in progress",
23801
+ nextAction: "Only in_progress or blocked tasks can be committed. Start/resume the task first."
23802
+ });
23803
+ }
23804
+ let verificationNote;
23799
23805
  if (status === "completed") {
23800
- const verificationKeywords = ["test", "build", "lint", "vitest", "jest", "npm run", "pnpm", "cargo", "pytest", "verified", "passes", "succeeds"];
23806
+ const verificationKeywords = ["test", "build", "lint", "vitest", "jest", "npm run", "pnpm", "cargo", "pytest", "verified", "passes", "succeeds", "ast-grep", "scan"];
23801
23807
  const summaryLower = summary.toLowerCase();
23802
23808
  const hasVerificationMention = verificationKeywords.some((kw) => summaryLower.includes(kw));
23803
23809
  if (!hasVerificationMention) {
23804
- return `BLOCKED: No verification detected in summary.
23805
-
23806
- Before claiming completion, you must:
23807
- 1. Run tests (vitest, jest, pytest, etc.)
23808
- 2. Run build (npm run build, cargo build, etc.)
23809
- 3. Include verification results in summary
23810
-
23811
- Example summary: "Implemented auth flow. Tests pass (vitest). Build succeeds."
23812
-
23813
- Re-run with updated summary showing verification results.`;
23810
+ verificationNote = "No verification evidence in summary. Orchestrator should run build+test after merge.";
23814
23811
  }
23815
23812
  }
23816
23813
  if (status === "blocked") {
@@ -23820,16 +23817,42 @@ Re-run with updated summary showing verification results.`;
23820
23817
  blocker
23821
23818
  });
23822
23819
  const worktree2 = await worktreeService.get(feature, task);
23823
- return JSON.stringify({
23820
+ return respond({
23821
+ ok: true,
23822
+ terminal: true,
23824
23823
  status: "blocked",
23824
+ reason: "user_decision_required",
23825
+ feature,
23825
23826
  task,
23827
+ taskState: "blocked",
23826
23828
  summary,
23827
23829
  blocker,
23828
23830
  worktreePath: worktree2?.path,
23829
- message: 'Task blocked. Hive Master will ask user and resume with hive_worktree_create(continueFrom: "blocked", decision: answer)'
23830
- }, null, 2);
23831
+ branch: worktree2?.branch,
23832
+ message: 'Task blocked. Hive Master will ask user and resume with hive_worktree_create(continueFrom: "blocked", decision: answer)',
23833
+ nextAction: 'Wait for orchestrator to collect user decision and resume with continueFrom: "blocked".'
23834
+ });
23831
23835
  }
23832
23836
  const commitResult = await worktreeService.commitChanges(feature, task, `hive(${task}): ${summary.slice(0, 50)}`);
23837
+ if (status === "completed" && !commitResult.committed && commitResult.message !== "No changes to commit") {
23838
+ return respond({
23839
+ ok: false,
23840
+ terminal: false,
23841
+ status: "rejected",
23842
+ reason: "commit_failed",
23843
+ feature,
23844
+ task,
23845
+ taskState: taskInfo.status,
23846
+ summary,
23847
+ commit: {
23848
+ committed: commitResult.committed,
23849
+ sha: commitResult.sha,
23850
+ message: commitResult.message
23851
+ },
23852
+ message: `Commit failed: ${commitResult.message || "unknown error"}`,
23853
+ nextAction: "Resolve git/worktree issue, then call hive_worktree_commit again."
23854
+ });
23855
+ }
23833
23856
  const diff = await worktreeService.getDiff(feature, task);
23834
23857
  const statusLabel = status === "completed" ? "success" : status;
23835
23858
  const reportLines = [
@@ -23859,13 +23882,31 @@ Re-run with updated summary showing verification results.`;
23859
23882
  } else {
23860
23883
  reportLines.push("---", "", "## Changes", "", "_No file changes detected_", "");
23861
23884
  }
23862
- taskService.writeReport(feature, task, reportLines.join(`
23885
+ const reportPath = taskService.writeReport(feature, task, reportLines.join(`
23863
23886
  `));
23864
23887
  const finalStatus = status === "completed" ? "done" : status;
23865
23888
  taskService.update(feature, task, { status: finalStatus, summary });
23866
23889
  const worktree = await worktreeService.get(feature, task);
23867
- return `Task "${task}" ${status}. Changes committed to branch ${worktree?.branch || "unknown"}.
23868
- Use hive_merge to integrate changes. Worktree preserved at ${worktree?.path || "unknown"}.`;
23890
+ return respond({
23891
+ ok: true,
23892
+ terminal: true,
23893
+ status,
23894
+ feature,
23895
+ task,
23896
+ taskState: finalStatus,
23897
+ summary,
23898
+ ...verificationNote && { verificationNote },
23899
+ commit: {
23900
+ committed: commitResult.committed,
23901
+ sha: commitResult.sha,
23902
+ message: commitResult.message
23903
+ },
23904
+ worktreePath: worktree?.path,
23905
+ branch: worktree?.branch,
23906
+ reportPath,
23907
+ message: `Task "${task}" ${status}.`,
23908
+ nextAction: "Use hive_merge to integrate changes. Worktree is preserved for review."
23909
+ });
23869
23910
  }
23870
23911
  }),
23871
23912
  hive_worktree_discard: tool({
@@ -24101,6 +24142,33 @@ ${result.diff}
24101
24142
  }
24102
24143
  },
24103
24144
  config: async (opencodeConfig) => {
24145
+ function agentTools(allowed) {
24146
+ const allHiveTools = [
24147
+ "hive_feature_create",
24148
+ "hive_feature_complete",
24149
+ "hive_plan_write",
24150
+ "hive_plan_read",
24151
+ "hive_plan_approve",
24152
+ "hive_tasks_sync",
24153
+ "hive_task_create",
24154
+ "hive_task_update",
24155
+ "hive_worktree_create",
24156
+ "hive_worktree_commit",
24157
+ "hive_worktree_discard",
24158
+ "hive_merge",
24159
+ "hive_context_write",
24160
+ "hive_status",
24161
+ "hive_skill",
24162
+ "hive_agents_md"
24163
+ ];
24164
+ const result = {};
24165
+ for (const tool3 of allHiveTools) {
24166
+ if (!allowed.includes(tool3)) {
24167
+ result[tool3] = false;
24168
+ }
24169
+ }
24170
+ return result;
24171
+ }
24104
24172
  configService.init();
24105
24173
  const hiveUserConfig = configService.getAgentConfig("hive-master");
24106
24174
  const hiveAutoLoadedSkills = await buildAutoLoadedSkillsContent("hive-master", configService, directory);
@@ -24125,6 +24193,7 @@ ${result.diff}
24125
24193
  temperature: architectUserConfig.temperature ?? 0.7,
24126
24194
  description: "Architect (Planner) - Plans features, interviews, writes plans. NEVER executes.",
24127
24195
  prompt: ARCHITECT_BEE_PROMPT + architectAutoLoadedSkills,
24196
+ tools: agentTools(["hive_feature_create", "hive_plan_write", "hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
24128
24197
  permission: {
24129
24198
  edit: "deny",
24130
24199
  task: "allow",
@@ -24143,6 +24212,22 @@ ${result.diff}
24143
24212
  temperature: swarmUserConfig.temperature ?? 0.5,
24144
24213
  description: "Swarm (Orchestrator) - Orchestrates execution. Delegates, spawns workers, verifies, merges.",
24145
24214
  prompt: SWARM_BEE_PROMPT + swarmAutoLoadedSkills,
24215
+ tools: agentTools([
24216
+ "hive_feature_create",
24217
+ "hive_feature_complete",
24218
+ "hive_plan_read",
24219
+ "hive_plan_approve",
24220
+ "hive_tasks_sync",
24221
+ "hive_task_create",
24222
+ "hive_task_update",
24223
+ "hive_worktree_create",
24224
+ "hive_worktree_discard",
24225
+ "hive_merge",
24226
+ "hive_context_write",
24227
+ "hive_status",
24228
+ "hive_skill",
24229
+ "hive_agents_md"
24230
+ ]),
24146
24231
  permission: {
24147
24232
  question: "allow",
24148
24233
  skill: "allow",
@@ -24159,6 +24244,7 @@ ${result.diff}
24159
24244
  mode: "subagent",
24160
24245
  description: "Scout (Explorer/Researcher/Retrieval) - Researches codebase + external docs/data.",
24161
24246
  prompt: SCOUT_BEE_PROMPT + scoutAutoLoadedSkills,
24247
+ tools: agentTools(["hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
24162
24248
  permission: {
24163
24249
  edit: "deny",
24164
24250
  task: "deny",
@@ -24176,6 +24262,7 @@ ${result.diff}
24176
24262
  mode: "subagent",
24177
24263
  description: "Forager (Worker/Coder) - Executes tasks directly in isolated worktrees. Never delegates.",
24178
24264
  prompt: FORAGER_BEE_PROMPT + foragerAutoLoadedSkills,
24265
+ tools: agentTools(["hive_plan_read", "hive_worktree_commit", "hive_context_write", "hive_skill"]),
24179
24266
  permission: {
24180
24267
  task: "deny",
24181
24268
  delegate: "deny",
@@ -24191,6 +24278,7 @@ ${result.diff}
24191
24278
  mode: "subagent",
24192
24279
  description: "Hygienic (Consultant/Reviewer/Debugger) - Reviews plan documentation quality. OKAY/REJECT verdict.",
24193
24280
  prompt: HYGIENIC_BEE_PROMPT + hygienicAutoLoadedSkills,
24281
+ tools: agentTools(["hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
24194
24282
  permission: {
24195
24283
  edit: "deny",
24196
24284
  task: "deny",