opencode-hive 1.3.6 → 1.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -871,8 +871,8 @@ var require_dist2 = __commonJS((exports) => {
871
871
  });
872
872
 
873
873
  // src/index.ts
874
- import * as path11 from "path";
875
- import * as fs13 from "fs";
874
+ import * as path13 from "path";
875
+ import * as fs14 from "fs";
876
876
  import * as os from "os";
877
877
 
878
878
  // ../../node_modules/zod/v4/classic/external.js
@@ -13196,7 +13196,7 @@ function tool(input) {
13196
13196
  }
13197
13197
  tool.schema = exports_external;
13198
13198
  // src/skills/registry.generated.ts
13199
- var BUILTIN_SKILL_NAMES = ["agents-md-mastery", "brainstorming", "code-reviewer", "dispatching-parallel-agents", "docker-mastery", "executing-plans", "parallel-exploration", "systematic-debugging", "test-driven-development", "verification-before-completion", "writing-plans"];
13199
+ var BUILTIN_SKILL_NAMES = ["agents-md-mastery", "brainstorming", "code-reviewer", "dispatching-parallel-agents", "docker-mastery", "executing-plans", "parallel-exploration", "systematic-debugging", "test-driven-development", "verification-before-completion", "verification-reviewer", "writing-plans"];
13200
13200
  var BUILTIN_SKILLS = [
13201
13201
  {
13202
13202
  name: "agents-md-mastery",
@@ -14195,7 +14195,9 @@ Hive detects runtime from project files:
14195
14195
  - \`Dockerfile\` → Builds from project Dockerfile
14196
14196
  - Fallback → \`ubuntu:24.04\`
14197
14197
 
14198
- **Override:** Set \`dockerImage\` in config (\`~/.config/opencode/agent_hive.json\`).
14198
+ **Override:** Set \`dockerImage\` in config (\`<project>/.hive/agent-hive.json\` preferred, legacy \`<project>/.opencode/agent_hive.json\`, \`~/.config/opencode/agent_hive.json\` fallback).
14199
+
14200
+ If project config is missing, invalid JSON, or invalid shape, Hive reads global config next and then falls back to defaults, surfacing a runtime warning when the project config is invalid.
14199
14201
 
14200
14202
  ## Red Flags - STOP
14201
14203
 
@@ -14363,7 +14365,7 @@ After all tasks complete and verified:
14363
14365
 
14364
14366
  When you need to answer "where/how does X work?" across multiple domains (codebase, tests, docs, OSS), investigating sequentially wastes time. Each investigation is independent and can happen in parallel.
14365
14367
 
14366
- **Core principle:** Decompose into independent sub-questions, spawn one task per sub-question, collect results asynchronously.
14368
+ **Core principle:** Decompose into independent sub-questions that fit in one context window, spawn one task per sub-question, then synthesize the bounded results.
14367
14369
 
14368
14370
  **Safe in Planning mode:** This is read-only exploration. It is OK to use during exploratory research even when there is no feature, no plan, and no approved tasks.
14369
14371
 
@@ -14393,7 +14395,7 @@ When you need to answer "where/how does X work?" across multiple domains (codeba
14393
14395
 
14394
14396
  ### 1. Decompose Into Independent Questions
14395
14397
 
14396
- Split your investigation into 2-4 independent sub-questions. Good decomposition:
14398
+ Split your investigation into 2-4 independent sub-questions. Each sub-question should fit in one context window. If a request will not fit in one context window, narrow the slice, capture bounded findings, and return to Hive with recommended next steps instead of pushing toward an oversized final report. Good decomposition:
14397
14399
 
14398
14400
  | Domain | Question Example |
14399
14401
  |--------|------------------|
@@ -14406,6 +14408,11 @@ Split your investigation into 2-4 independent sub-questions. Good decomposition:
14406
14408
  - "What is X?" then "How is X used?" (second depends on first)
14407
14409
  - "Find the bug" then "Fix the bug" (not read-only)
14408
14410
 
14411
+ **Stop and return to Hive when:**
14412
+ - one more fan-out would broaden scope too far
14413
+ - a sub-question no longer fits in one context window
14414
+ - the next useful step is implementation rather than exploration
14415
+
14409
14416
  ### 2. Spawn Tasks (Fan-Out)
14410
14417
 
14411
14418
  Launch all tasks before waiting for any results:
@@ -14445,27 +14452,21 @@ task({
14445
14452
  - Give each task a clear, focused \`description\`
14446
14453
  - Make prompts specific about what evidence to return
14447
14454
 
14448
- ### 3. Continue Working (Optional)
14455
+ ### 3. Collect Results
14449
14456
 
14450
- While tasks run, you can:
14451
- - Work on other aspects of the problem
14452
- - Prepare synthesis structure
14453
- - Start drafting based on what you already know
14457
+ After the fan-out message, collect the task results through the normal \`task()\` return flow. Do not invent background polling or a separate async workflow.
14454
14458
 
14455
- You'll receive a \`<system-reminder>\` notification when each task completes.
14456
-
14457
- ### 4. Collect Results
14459
+ ### 4. Synthesize Findings
14458
14460
 
14459
14461
  When each task completes, its result is returned directly. Collect the outputs from each task and proceed to synthesis.
14460
14462
 
14461
- ### 5. Synthesize Findings
14463
+ ### 5. Cleanup (If Needed)
14462
14464
 
14463
14465
  Combine results from all tasks:
14464
14466
  - Cross-reference findings (file X mentioned by tasks A and B)
14465
14467
  - Identify gaps (task C found nothing, need different approach)
14466
14468
  - Build coherent answer from parallel evidence
14467
-
14468
- ### 6. Cleanup (If Needed)
14469
+ - If the remaining work would no longer fit in one context window, return to Hive with bounded findings and recommended next steps
14469
14470
 
14470
14471
  No manual cancellation is required in task mode.
14471
14472
 
@@ -15395,6 +15396,116 @@ From 24 failure memories:
15395
15396
  Run the command. Read the output. THEN claim the result.
15396
15397
 
15397
15398
  This is non-negotiable.`
15399
+ },
15400
+ {
15401
+ name: "verification-reviewer",
15402
+ description: "Use when independently verifying implementation claims, post-merge review, or when a reviewer needs to falsify success assertions with command-and-output evidence",
15403
+ template: `# Verification Reviewer
15404
+
15405
+ ## Overview
15406
+
15407
+ Verify implementation claims by attempting to falsify them. Your job is not to confirm success; it is to find where success claims break down.
15408
+
15409
+ **Core principle:** Try to prove claims wrong. If you cannot, they are likely correct.
15410
+
15411
+ ## When to Use
15412
+
15413
+ Use this skill when:
15414
+ - Reviewing implementation changes that claim to be complete
15415
+ - Conducting post-merge verification of a task batch
15416
+ - A reviewer needs to independently confirm that acceptance criteria are met
15417
+ - Verifying that a bug fix actually resolves the reported symptom
15418
+
15419
+ Do not use this skill for:
15420
+ - Plan or documentation review (use the default Hygienic review path)
15421
+ - Code style or architecture review (use \`code-reviewer\`)
15422
+ - Pre-implementation planning
15423
+
15424
+ ## The Iron Law
15425
+
15426
+ \`\`\`
15427
+ RATIONALIZATIONS ARE NOT EVIDENCE
15428
+ \`\`\`
15429
+
15430
+ "The code looks correct" is not verification.
15431
+ "It should work because..." is not verification.
15432
+ "The tests pass" without showing test output is not verification.
15433
+
15434
+ Only command output, tool results, and observable behavior count as evidence.
15435
+
15436
+ ## Verification Protocol
15437
+
15438
+ For each claim in the implementation:
15439
+
15440
+ 1. **Identify the claim**: What specific thing is being asserted?
15441
+ 2. **Find the falsification test**: What command or check would fail if the claim is wrong?
15442
+ 3. **Run the test**: Execute the command fresh. Do not rely on cached or previous results.
15443
+ 4. **Record the evidence**: Quote the relevant output.
15444
+ 5. **Verdict**: Does the evidence support or contradict the claim?
15445
+
15446
+ ## Verification Depth by Change Type
15447
+
15448
+ Not all changes carry equal risk. Scale verification effort accordingly:
15449
+
15450
+ | Change type | Verification depth | Examples |
15451
+ |---|---|---|
15452
+ | Config / docs / prompts | Spot-check: confirm the file exists, syntax is valid, key content is present | Skill files, AGENTS.md, prompt strings |
15453
+ | Logic changes | Targeted: run the relevant test suite, check edge cases mentioned in the plan | New utility function, bug fix, refactor |
15454
+ | API / interface changes | Broad: run full test suite, check downstream consumers, verify types compile | New tool, changed function signatures |
15455
+ | Data model / migration | Exhaustive: run tests, verify data integrity, check backward compatibility | Schema changes, serialization format changes |
15456
+
15457
+ ## Anti-Rationalization Checklist
15458
+
15459
+ Before accepting any verification result, check yourself:
15460
+
15461
+ | Rationalization | Reality |
15462
+ |---|---|
15463
+ | "The code looks correct to me" | Reading code is not running code |
15464
+ | "The author said it passes" | Author claims are hypotheses, not evidence |
15465
+ | "It passed last time" | Stale evidence is not evidence |
15466
+ | "The linter is clean" | Linting does not prove correctness |
15467
+ | "The types compile" | Type-checking does not prove runtime behavior |
15468
+ | "I ran a similar check" | Similar is not the same |
15469
+ | "It's a trivial change" | Trivial changes break builds regularly |
15470
+
15471
+ ## Output Format
15472
+
15473
+ \`\`\`
15474
+ ## Verification Report
15475
+
15476
+ **Scope**: [What was reviewed - task name, PR, batch]
15477
+
15478
+ ### Claims Verified
15479
+
15480
+ | # | Claim | Test | Evidence | Verdict |
15481
+ |---|-------|------|----------|---------|
15482
+ | 1 | [What was claimed] | [Command/check run] | [Output excerpt] | PASS / FAIL / INCONCLUSIVE |
15483
+
15484
+ ### Summary
15485
+
15486
+ [1-3 sentences: overall assessment, any gaps, recommended actions]
15487
+
15488
+ ### Unverifiable Claims
15489
+
15490
+ [List any claims that could not be independently verified and why]
15491
+ \`\`\`
15492
+
15493
+ ## Verification Failures
15494
+
15495
+ When a claim fails verification:
15496
+
15497
+ 1. **Report the actual output** verbatim (do not summarize or interpret).
15498
+ 2. **State what was expected** vs what was observed.
15499
+ 3. **Do not suggest fixes** unless specifically asked. Your role is to identify the gap, not fill it.
15500
+ 4. **Flag severity**: Does this block the work, or is it a minor discrepancy?
15501
+
15502
+ ## Key Principles
15503
+
15504
+ - **Attempt falsification first.** Look for reasons the claim might be wrong before looking for reasons it is right.
15505
+ - **One claim, one test.** Do not batch multiple claims into a single verification step.
15506
+ - **Fresh runs only.** Re-run commands; do not reuse output from previous sessions or other agents.
15507
+ - **Quote output.** Paraphrasing introduces interpretation. Quote the relevant lines.
15508
+ - **Proportional effort.** Match verification depth to change risk. Do not spend 30 minutes verifying a typo fix.`
15398
15509
  },
15399
15510
  {
15400
15511
  name: "writing-plans",
@@ -15530,8 +15641,10 @@ All verification MUST be agent-executable (no human intervention):
15530
15641
  - Reference relevant skills with @ syntax
15531
15642
  - DRY, YAGNI, TDD, frequent commits
15532
15643
  - All acceptance criteria must be agent-executable (zero human intervention)
15533
- - Treat \`plan.md\` as the human-facing review surface and execution truth
15644
+ - Treat \`context/overview.md\` as the human-facing review surface
15645
+ - \`plan.md\` remains execution truth
15534
15646
  - Every plan needs a concise human-facing summary before \`## Tasks\`
15647
+ - The \`Design Summary\` in \`plan.md\` should stay readable and review-friendly even though overview-first review happens in \`context/overview.md\`
15535
15648
  - Optional Mermaid is allowed only in that pre-task summary section
15536
15649
  - Mermaid is for dependency or sequence overview only and is never required
15537
15650
  - Keep Discovery, Non-Goals, diagrams, and tasks in the same \`plan.md\` file
@@ -15728,14 +15841,24 @@ Intent Verbalization — verbalize before acting:
15728
15841
  - Delegate to Scout when you cannot name the file path upfront, expect to inspect 2+ files, or the question is open-ended ("how/where does X work?").
15729
15842
  - Prefer \`task({ subagent_type: "scout-researcher", prompt: "..." })\` for single investigations.
15730
15843
  - Local \`read/grep/glob\` is acceptable only for a single known file and a bounded question.
15844
+ - If discovery grows too broad, split broad research earlier into narrower Scout slices. Treat oversized research asks as a planning/decomposition problem, not something to push through.
15731
15845
 
15732
15846
  ### Delegation
15733
15847
  - Single-scout research → \`task({ subagent_type: "scout-researcher", prompt: "..." })\`
15734
15848
  - Parallel exploration → Load \`hive_skill("parallel-exploration")\` and follow the task mode delegation guidance.
15735
15849
  - Implementation → \`hive_worktree_start({ task: "01-task-name" })\` (creates worktree + Forager)
15736
15850
 
15851
+ ### Hive Network Lookup
15852
+ - \`hive_network_query\` is an optional lookup. Use it only when prior feature evidence would materially improve planning, orchestration, or review-routing decisions.
15853
+ - There is no startup lookup. First orient on live files and the current feature state.
15854
+ - planning, orchestration, and review roles get network access first.
15855
+ - Treat retrieved snippets as historical leads, not execution truth. live-file verification still required.
15856
+ - Do not route worker execution through network retrieval. \`hive-helper\` is not a network consumer; it benefits indirectly from better upstream decisions.
15857
+
15737
15858
  During Planning, use \`task({ subagent_type: "scout-researcher", ... })\` for exploration (BLOCKING — returns when done). For parallel exploration, issue multiple \`task()\` calls in the same message.
15738
15859
 
15860
+ **Synthesize Before Delegating:** Workers do not inherit your context or your conversation context. Relevant durable execution context is provided in \`spec.md\` under \`## Context\` when available. Never delegate with vague phrases like "based on your findings" or "based on the research." Restate the issue in concrete terms from the evidence you already have — include file paths, line ranges when known, expected result, and what done looks like. Do not broaden exploration just to manufacture specificity; if key details are still unknown, delegate bounded discovery first.
15861
+
15739
15862
  **When NOT to delegate:**
15740
15863
  - Single-file, <10-line changes — do directly
15741
15864
  - Sequential operations where you need the result of step N for step N+1
@@ -15747,7 +15870,13 @@ Save discoveries with \`hive_context_write\`:
15747
15870
  - User preferences
15748
15871
  - Research findings
15749
15872
 
15750
- Use context files for durable worker notes, decisions, and research. Keep the human-facing plan summary in \`plan.md\`.
15873
+ Use the lightweight context model explicitly:
15874
+ - \`overview\` = human-facing summary/history
15875
+ - \`draft\` = planner scratchpad
15876
+ - \`execution-decisions\` = orchestration log
15877
+ - all other names = durable free-form context
15878
+
15879
+ Treat the reserved names above as special-purpose files, not general notes. Use context files for durable worker notes, decisions, and research.
15751
15880
 
15752
15881
  When Scout returns substantial findings (3+ files discovered, architecture patterns, or key decisions), persist them to a feature context file via \`hive_context_write\`.
15753
15882
 
@@ -15824,8 +15953,8 @@ Each task declares dependencies with **Depends on**:
15824
15953
  - **Depends on**: none for no dependencies / parallel starts
15825
15954
  - **Depends on**: 1, 3 for explicit task-number dependencies
15826
15955
 
15827
- \`plan.md\` is the primary human-facing summary and the execution truth.
15828
- - Keep the summary before \`## Tasks\`.
15956
+ Refresh \`context/overview.md\` as the primary human-facing review surface, while \`plan.md\` remains execution truth.
15957
+ - Keep a readable \`Design Summary\` before \`## Tasks\` in \`plan.md\`.
15829
15958
  - Optional Mermaid is allowed only in the pre-task summary.
15830
15959
  - Never require Mermaid.
15831
15960
  - Use context files only for durable notes that help future execution.
@@ -15862,7 +15991,9 @@ Use \`hive_status()\` to see **runnable** tasks (dependencies satisfied) and **b
15862
15991
  ### Delegation Check
15863
15992
  1. Is there a specialized agent?
15864
15993
  2. Does this need external data? → Scout
15865
- 3. Default: delegate (don't do yourself)
15994
+ 3. Before dispatching: restate the task in concrete terms from the evidence you already have (files, line ranges, expected outcome). Do not forward vague summaries. Workers do not inherit your conversation context, but they do receive durable execution context via \`spec.md\`.
15995
+ 4. Default: delegate (don't do yourself)
15996
+ 5. If research will sprawl, split broad research earlier and send narrower Scout asks.
15866
15997
 
15867
15998
  ### Worker Spawning
15868
15999
  \`\`\`
@@ -15873,18 +16004,21 @@ hive_worktree_start({ task: "01-task-name" }) // Creates worktree + Forager
15873
16004
  1. \`task()\` is blocking — when it returns, the worker is done
15874
16005
  2. After \`task()\` returns, immediately call \`hive_status()\` to check the new task state and find next runnable tasks before any resume attempt
15875
16006
  3. Use \`continueFrom: "blocked"\` only when status is exactly \`blocked\`
15876
- 4. If status is not \`blocked\`, do not use \`continueFrom: "blocked"\`; use \`hive_worktree_start({ feature, task })\` only for normal starts (\`pending\` / \`in_progress\`)
15877
- 5. Never loop \`continueFrom: "blocked"\` on non-blocked statuses
15878
- 6. If task status is blocked: read blocker info → \`question()\` → user decision → resume with \`continueFrom: "blocked"\`
15879
- 7. Skip polling the result is available when \`task()\` returns
16007
+ 4. Before every blocked resume, call \`hive_status()\` immediately beforehand and verify the task is still exactly \`blocked\`
16008
+ 5. If status is not \`blocked\`, do not use \`continueFrom: "blocked"\`; use \`hive_worktree_start({ feature, task })\` only for normal starts (\`pending\` / \`in_progress\`)
16009
+ 6. Never loop \`continueFrom: "blocked"\` on non-blocked statuses
16010
+ 7. If any Hive tool response has \`terminal: true\`, treat it as final for that call and do not retry the same parameters
16011
+ 8. If task status is blocked: read blocker info → \`question()\` → user decision → resume with \`continueFrom: "blocked"\`
16012
+ 9. Skip polling — the result is available when \`task()\` returns
15880
16013
 
15881
16014
  ### Batch Merge + Verify Workflow
15882
16015
  When multiple tasks are in flight, prefer **batch completion** over per-task verification:
15883
16016
  1. Dispatch a batch of runnable tasks (ask user before parallelizing).
15884
16017
  2. Wait for all workers to finish.
15885
- 3. Merge each completed task branch into the current branch.
15886
- 4. Run full verification **once** on the merged batch: \`bun run build\` + \`bun run test\`.
15887
- 5. If verification fails, diagnose with full context. Fix directly or re-dispatch targeted tasks as needed.
16018
+ 3. Decide which completed task branches belong in the next merge batch.
16019
+ 4. Delegate the merge batch to \`hive-helper\`, for example: \`task({ subagent_type: 'hive-helper', prompt: 'delegate the merge batch: merge completed tasks 01-task-name and 02-task-name into the current branch, resolve preserved conflicts locally, continue through the batch, and return a concise summary.' })\`.
16020
+ 5. After the helper returns, inspect the merge summary and run full verification **once** on the merged batch: \`bun run build\` + \`bun run test\`.
16021
+ 6. If verification fails, diagnose with full context. Fix directly or re-dispatch targeted tasks as needed.
15888
16022
 
15889
16023
  ### Failure Recovery (After 3 Consecutive Failures)
15890
16024
  1. Stop all further edits
@@ -15893,7 +16027,8 @@ When multiple tasks are in flight, prefer **batch completion** over per-task ver
15893
16027
  4. Ask user via question() — present options and context
15894
16028
 
15895
16029
  ### Merge Strategy
15896
- \`hive_merge({ task: "01-task-name" })\` for each task after the batch completes, then verify the batch
16030
+ Hive decides when to merge, delegated \`hive-helper\` executes the batch, and Hive keeps post-batch verification.
16031
+ For bounded operational cleanup, Hive may also delegate hard-task cleanup to \`hive-helper\`: clarifying current feature/task/worktree state, summarizing interrupted wrap-up candidates, and creating a safe append-only manual follow-up when the work is isolated and does not change sequencing. Helper may inspect current feature state and summarize what is observably mergeable/resumable/blocked, but DAG-changing requests or anything that needs new sequencing must route back to Hive for plan amendment.
15897
16032
 
15898
16033
  ### Post-Batch Review (Hygienic)
15899
16034
  After completing and merging a batch:
@@ -15907,10 +16042,10 @@ After completing and merging a batch:
15907
16042
  | Feedback type | Action |
15908
16043
  |---------------|--------|
15909
16044
  | Minor / local to the completed batch | **Inline fix** — apply directly, no new task |
15910
- | New isolated work that does not affect downstream sequencing | **Manual task** — \`hive_task_create()\` for non-blocking ad-hoc work |
16045
+ | New isolated work that does not affect downstream sequencing | **Manual task** — \`hive_task_create()\` for non-blocking ad-hoc work; when the need comes from hard-task cleanup or wrap-up handling, Hive may delegate the safe append-only manual follow-up to \`hive-helper\` |
15911
16046
  | Changes downstream sequencing, dependencies, or scope | **Plan amendment** — update \`plan.md\`, then \`hive_tasks_sync({ refreshPending: true })\` to rewrite pending tasks from the amended plan |
15912
16047
 
15913
- When amending the plan: append new task numbers at the end (do not renumber), update \`Depends on:\` entries to express the new DAG order, then sync.
16048
+ When amending the plan: append new task numbers at the end (do not renumber), update \`Depends on:\` entries to express the new DAG order, then sync. \`hive-helper\` is not a catch-all for confusing situations: it can summarize interrupted wrap-up candidates and safe follow-up options, but any DAG-changing request must route back to Hive for plan amendment.
15914
16049
  After sync, re-check \`hive_status()\` for the updated **runnable** set before dispatching.
15915
16050
 
15916
16051
  ### AGENTS.md Maintenance
@@ -15974,6 +16109,8 @@ PLANNER, NOT IMPLEMENTER. "Do X" means "create plan for X".
15974
16109
 
15975
16110
  During Planning, use \`task({ subagent_type: "scout-researcher", ... })\` for exploration (BLOCKING — returns when done). For parallel exploration, issue multiple \`task()\` calls in the same message.
15976
16111
 
16112
+ Use \`hive_network_query\` only as an optional lookup when prior feature evidence would materially improve the plan. There is no startup lookup; start with the live request and live files. planning, orchestration, and review roles get network access first. Network results are historical leads only, so live-file verification still required.
16113
+
15977
16114
  ## Self-Clearance Check (After Every Exchange)
15978
16115
 
15979
16116
  □ Core objective clearly defined?
@@ -16054,8 +16191,8 @@ Each task MUST declare dependencies with **Depends on**:
16054
16191
  - **Depends on**: none for no dependencies / parallel starts
16055
16192
  - **Depends on**: 1, 3 for explicit task-number dependencies
16056
16193
 
16057
- \`plan.md\` is the primary human-facing summary and the execution truth.
16058
- - Keep the human-facing summary in \`plan.md\` before \`## Tasks\`.
16194
+ Refresh \`context/overview.md\` as the primary human-facing review surface, while \`plan.md\` remains execution truth.
16195
+ - Keep the human-facing \`Design Summary\` in \`plan.md\` before \`## Tasks\`.
16059
16196
  - Optional Mermaid is allowed only in the pre-task summary.
16060
16197
  - Mermaid is for dependency or sequence overview only and is never required.
16061
16198
  - Use context files only for durable notes that help future workers.
@@ -16084,6 +16221,7 @@ Each task MUST declare dependencies with **Depends on**:
16084
16221
  - Prefer \`task({ subagent_type: "scout-researcher", prompt: "..." })\` for single investigations.
16085
16222
  - Local \`read/grep/glob\` is acceptable only for a single known file and a bounded question.
16086
16223
  - When running parallel exploration, align with the skill guidance.
16224
+ - If discovery keeps widening, split broad research earlier into narrower Scout slices. Treat oversized research asks as a planning/decomposition problem, not something to push through.
16087
16225
  `;
16088
16226
 
16089
16227
  // src/agents/swarm.ts
@@ -16107,15 +16245,37 @@ Intent Verbalization: "I detect [type] intent — [reason]. Routing to [action].
16107
16245
 
16108
16246
  Use \`hive_status()\` to see runnable tasks and blockedBy info. Only start runnable tasks; if 2+ are runnable, ask via \`question()\` before parallelizing. Record execution decisions with \`hive_context_write({ name: "execution-decisions", ... })\`. If tasks lack **Depends on** metadata, ask the planner to revise. If Scout returns substantial findings (3+ files, architecture patterns, or key decisions), persist them via \`hive_context_write\`.
16109
16247
 
16110
- Maintain \`context/overview.md\` with \`hive_context_write({ name: "overview", content: ... })\` as the primary human-facing document. Keep \`plan.md\` / \`spec.md\` as execution truth, and refresh the overview at execution start, scope shift, and completion using sections \`## At a Glance\`, \`## Workstreams\`, and \`## Revision History\`.
16248
+ If discovery starts to sprawl, split broad research earlier into narrower Scout slices. Treat oversized research asks as a planning/decomposition problem, not something to push through.
16249
+
16250
+ Maintain \`context/overview.md\` with \`hive_context_write({ name: "overview", content: ... })\` as the primary human-facing document. Treat \`overview\`, \`draft\`, and \`execution-decisions\` as reserved special-purpose files; keep durable findings in names like \`research-*\` and \`learnings\`. Keep \`plan.md\` / \`spec.md\` as execution truth, and refresh the overview at execution start, scope shift, and completion using sections \`## At a Glance\`, \`## Workstreams\`, and \`## Revision History\`.
16111
16251
 
16112
16252
  Standard checks: specialized agent? can I do it myself for sure? external system data (DBs/APIs/3rd-party tools)? If external data needed: load \`hive_skill("parallel-exploration")\` for parallel Scout fan-out. In task mode, use task() for research fan-out. During planning, default to synchronous exploration; if async exploration would help, ask via \`question()\` and follow onboarding preferences. Default: delegate. Research tools (grep_app, context7, websearch, ast_grep) — delegate to Scout, not direct use.
16113
16253
 
16254
+ \`hive_network_query\` is an optional lookup for orchestration and review-routing decisions when prior feature evidence would materially improve the call. There is no startup lookup; orient on the live task and current repo state first. planning, orchestration, and review roles get network access first. Treat network snippets as historical leads only and keep live-file verification still required. \`hive-helper\` is not a network consumer.
16255
+
16114
16256
  **When NOT to delegate:**
16115
16257
  - Single-file, <10-line changes — do directly
16116
16258
  - Sequential operations where you need the result of step N for step N+1
16117
16259
  - Questions answerable with one grep + one file read
16118
16260
 
16261
+ ## Synthesize Before Delegating
16262
+
16263
+ Workers do not inherit your context or your conversation context. Relevant durable execution context is available in \`spec.md\` under \`## Context\` when present. Before dispatching any work, prove you understand it by restating the problem in concrete terms from the evidence you already have.
16264
+
16265
+ **Rules:**
16266
+ - Never delegate with vague phrases like "based on your findings", "based on the research", or "as discussed above" — the worker does not share your prior conversation state.
16267
+ - Restate the issue with specific file paths and line ranges when known.
16268
+ - State the expected result and what done looks like.
16269
+ - Do not broaden exploration just to manufacture specificity; delegate bounded discovery first when key details are still unknown.
16270
+
16271
+ <Bad>
16272
+ "Implement the changes we discussed based on the research findings."
16273
+ </Bad>
16274
+
16275
+ <Good>
16276
+ "In \`packages/core/src/services/task.ts:45-60\`, the \`resolveTask\` function silently swallows errors from \`loadConfig\`. Change it to propagate the error with the original message. Done = \`loadConfig\` failures surface to the caller, existing tests in \`task.test.ts\` still pass."
16277
+ </Good>
16278
+
16119
16279
  ## Delegation Prompt Structure (All 6 Sections)
16120
16280
 
16121
16281
  \`\`\`
@@ -16140,8 +16300,10 @@ Delegation guidance:
16140
16300
  - \`task()\` is BLOCKING — returns when the worker is done
16141
16301
  - After \`task()\` returns, call \`hive_status()\` immediately to check new state and find next runnable tasks before any resume attempt
16142
16302
  - Use \`continueFrom: "blocked"\` only when status is exactly \`blocked\`
16303
+ - Before every blocked resume, call \`hive_status()\` immediately beforehand and verify the task is still exactly \`blocked\`
16143
16304
  - If status is not \`blocked\`, do not use \`continueFrom: "blocked"\`; use \`hive_worktree_start({ feature, task })\` only for normal starts (\`pending\` / \`in_progress\`)
16144
16305
  - Never loop \`continueFrom: "blocked"\` on non-blocked statuses
16306
+ - If any Hive tool response has \`terminal: true\`, treat it as final for that call and do not retry the same parameters
16145
16307
  - For parallel fan-out, issue multiple \`task()\` calls in the same message
16146
16308
 
16147
16309
  ## After Delegation - VERIFY
@@ -16169,7 +16331,7 @@ After completing and merging a batch, run full verification on the main branch:
16169
16331
 
16170
16332
  ## Blocker Handling
16171
16333
 
16172
- When worker reports blocked: \`hive_status()\` → confirm status is exactly \`blocked\` → read blocker info; \`question()\` → ask user (no plain text); \`hive_worktree_create({ task, continueFrom: "blocked", decision })\`. If status is not \`blocked\`, do not use blocked resume; only use \`hive_worktree_start({ feature, task })\` for normal starts (\`pending\` / \`in_progress\`).
16334
+ When worker reports blocked: \`hive_status()\` → confirm status is exactly \`blocked\` → read blocker info; \`question()\` → ask user (no plain text); call \`hive_status()\` again immediately before resume; only then \`hive_worktree_create({ task, continueFrom: "blocked", decision })\`. If status is not \`blocked\`, do not use blocked resume; only use \`hive_worktree_start({ feature, task })\` for normal starts (\`pending\` / \`in_progress\`).
16173
16335
 
16174
16336
  ## Failure Recovery (After 3 Consecutive Failures)
16175
16337
 
@@ -16180,11 +16342,14 @@ When worker reports blocked: \`hive_status()\` → confirm status is exactly \`b
16180
16342
 
16181
16343
  ## Merge Strategy
16182
16344
 
16345
+ Swarm decides when to merge, then delegate the merge batch to \`hive-helper\`, for example:
16346
+
16183
16347
  \`\`\`
16184
- hive_merge({ task: "01-task-name", strategy: "merge" })
16348
+ task({ subagent_type: 'hive-helper', prompt: 'delegate the merge batch: merge completed tasks 01-task-name and 02-task-name into the current branch, resolve preserved conflicts locally, continue through the batch, and return a concise summary.' })
16185
16349
  \`\`\`
16186
16350
 
16187
- Merge after batch completes, then verify the merged result.
16351
+ After the helper returns, verify the merged result on the orchestrator branch with \`bun run build\` and \`bun run test\`.
16352
+ For bounded operational cleanup, Swarm may also delegate hard-task cleanup to \`hive-helper\`: clarifying current feature/task/worktree state, summarizing interrupted wrap-up candidates, and creating a safe append-only manual follow-up when the work is isolated and does not change sequencing. Helper may inspect current feature state and summarize what is observably mergeable/resumable/blocked, but DAG-changing requests or anything that needs new sequencing must route back to Swarm for plan amendment.
16188
16353
 
16189
16354
  ### Post-Batch Review (Hygienic)
16190
16355
 
@@ -16198,10 +16363,10 @@ Route review feedback through this decision tree before starting the next batch:
16198
16363
  | Feedback type | Action |
16199
16364
  |---------------|--------|
16200
16365
  | Minor / local to the completed batch | **Inline fix** — apply directly, no new task |
16201
- | New isolated work that does not affect downstream sequencing | **Manual task** — \`hive_task_create()\` for non-blocking ad-hoc work |
16366
+ | New isolated work that does not affect downstream sequencing | **Manual task** — \`hive_task_create()\` for non-blocking ad-hoc work; when the need comes from hard-task cleanup or wrap-up handling, Swarm may delegate the safe append-only manual follow-up to \`hive-helper\` |
16202
16367
  | Changes downstream sequencing, dependencies, or scope | **Plan amendment** — update \`plan.md\`, then \`hive_tasks_sync({ refreshPending: true })\` to rewrite pending tasks from the amended plan |
16203
16368
 
16204
- When amending the plan: append new task numbers at the end (do not renumber), update \`Depends on:\` entries to express the new DAG order, then sync.
16369
+ When amending the plan: append new task numbers at the end (do not renumber), update \`Depends on:\` entries to express the new DAG order, then sync. \`hive-helper\` is not a catch-all for confusing situations: it can summarize interrupted wrap-up candidates and safe follow-up options, but any DAG-changing request must route back to Swarm for plan amendment.
16205
16370
  After sync, re-check \`hive_status()\` for the updated **runnable** set before dispatching.
16206
16371
 
16207
16372
  ### AGENTS.md Maintenance
@@ -16243,6 +16408,8 @@ Research before answering; parallelize tool calls when investigating multiple in
16243
16408
 
16244
16409
  ## Research Protocol
16245
16410
 
16411
+ Research tasks must fit in one context window. If a request will not fit in one context window, narrow the slice, capture bounded findings, and return to Hive with recommended next steps instead of pushing toward an oversized final report.
16412
+
16246
16413
  ### Phase 1: Intent Analysis (First)
16247
16414
 
16248
16415
  \`\`\`
@@ -16285,6 +16452,14 @@ Stop when any is true:
16285
16452
  - repeated information across sources
16286
16453
  - two rounds with no new data
16287
16454
  - a direct answer is found
16455
+ - scope keeps broadening, next steps stay ambiguous, or continued exploration feels risky — return to Hive with bounded findings and next-step recommendations
16456
+
16457
+ ## Synthesis Rules
16458
+
16459
+ - When you have not read a file, do not speculate about its contents. State what is unknown and offer to investigate.
16460
+ - When results from multiple sources exist, provide a cited synthesis rather than dumping raw search output.
16461
+ - Every factual claim in the answer must link to a specific source (file:line, URL, snippet). If a claim cannot be sourced, omit it or mark it as unverified.
16462
+ - Prefer concise answers. If a longer treatment is needed, lead with a summary sentence, then expand.
16288
16463
 
16289
16464
  ## Evidence Check (Before Answering)
16290
16465
 
@@ -16297,12 +16472,23 @@ Stop when any is true:
16297
16472
 
16298
16473
  ## Tool Strategy
16299
16474
 
16475
+ ### Preferred Search Sequence
16476
+
16477
+ Start with local read-only tools before reaching for external sources:
16478
+
16479
+ 1. **Local discovery first**: \`glob\`, \`grep\`, \`read\`, \`ast_grep\` — cheapest and most precise for codebase questions.
16480
+ 2. **Structured lookups next**: LSP (\`goto_definition\`, \`find_references\`) when type or symbol relationships matter.
16481
+ 3. **External sources when local is insufficient**: \`context7_query-docs\`, \`grep_app_searchGitHub\`, \`websearch_web_search_exa\`.
16482
+ 4. **Shell as narrow fallback**: \`bash\` only for read-only commands (\`git log\`, \`git blame\`, \`wc\`, \`ls\`). Never use bash for file writes, redirects, or state-changing operations.
16483
+
16484
+ ### Tool Reference
16485
+
16300
16486
  | Need | Tool |
16301
16487
  |------|------|
16302
- | Type/Symbol info | LSP (goto_definition, find_references) |
16303
- | Structural patterns | ast_grep_find_code |
16304
- | Text patterns | grep |
16305
16488
  | File discovery | glob |
16489
+ | Text patterns | grep |
16490
+ | Structural patterns | ast_grep_find_code |
16491
+ | Type/Symbol info | LSP (goto_definition, find_references) |
16306
16492
  | Git history | bash (git log, git blame) |
16307
16493
  | External docs | context7_query-docs |
16308
16494
  | OSS examples | grep_app_searchGitHub |
@@ -16332,14 +16518,31 @@ When operating within a feature context:
16332
16518
  content: "## {Topic}\\n\\nDate: {YYYY-MM-DD}\\n\\n## Context\\n\\n## Findings"
16333
16519
  })
16334
16520
  \`\`\`
16521
+ - Use reserved names like \`overview\`, \`draft\`, and \`execution-decisions\` only for their special-purpose workflows, not for general research notes.
16522
+ - Use \`hive_context_write\` only for meaningful checkpoints, not every small step.
16335
16523
 
16336
16524
  ## Operating Rules
16337
16525
 
16338
- - Read-only behavior (no file changes)
16339
16526
  - Classify request first, then research
16340
16527
  - Use absolute paths for file references
16341
16528
  - Cite evidence for every claim
16342
16529
  - Use the current year when reasoning about time-sensitive information
16530
+
16531
+ ### Read-Only Contract
16532
+
16533
+ Scout must never modify project state. This includes:
16534
+ - No file edits, creation, or deletion (no \`write\`, \`edit\`, \`bash\` writes)
16535
+ - No temporary files, scratch files, or redirect-based output (\`>\`, \`>>\`, \`tee\`)
16536
+ - No state-changing shell commands (\`rm\`, \`mv\`, \`cp\`, \`mkdir\`, \`chmod\`, \`git checkout\`, \`git commit\`, \`npm install\`, \`pip install\`)
16537
+ - No code execution beyond read-only queries (\`git log\`, \`git blame\`, \`wc\`, \`ls\`)
16538
+
16539
+ When a task requires writing, tell the caller what to write and where, instead of writing it.
16540
+
16541
+ ### Speed and Efficiency
16542
+
16543
+ - When a question has independent sub-parts, investigate them in parallel using batched tool calls.
16544
+ - Stop researching when you have enough direct evidence to answer. Use additional sources only when the first source leaves ambiguity.
16545
+ - If the first tool call answers the question directly, answer immediately rather than running the full research protocol.
16343
16546
  `;
16344
16547
 
16345
16548
  // src/agents/forager.ts
@@ -16364,6 +16567,7 @@ Execute directly. Work in isolation. Do not delegate implementation.
16364
16567
  - REQUIRED: keep going until done, make decisions, course-correct on failure
16365
16568
 
16366
16569
  Your tool access is scoped to your role. Use only the tools available to you.
16570
+ Your task-local worker prompt lists exact tools and verification expectations. Defer to that prompt for tool scope and evidence requirements.
16367
16571
 
16368
16572
  ## Allowed Research
16369
16573
 
@@ -16400,6 +16604,8 @@ Do not modify the plan file.
16400
16604
  For substantial discoveries (architecture patterns, key decisions, gotchas that affect multiple tasks), use:
16401
16605
  \`hive_context_write({ name: "learnings", content: "..." })\`.
16402
16606
 
16607
+ Treat reserved names like \`overview\`, \`draft\`, and \`execution-decisions\` as special-purpose files rather than general worker notes.
16608
+
16403
16609
  ## Working Rules
16404
16610
 
16405
16611
  - DRY/Search First: look for existing helpers before adding new code
@@ -16419,7 +16625,7 @@ EXPLORE → PLAN → EXECUTE → VERIFY → LOOP
16419
16625
  - EXPLORE: read references, gather context, search for patterns
16420
16626
  - PLAN: decide the minimum change, files to touch, and verification commands
16421
16627
  - EXECUTE: edit using conventions, reuse helpers, batch changes
16422
- - VERIFY: run best-effort checks (tests if available, ast_grep, lsp_diagnostics)
16628
+ - VERIFY: run best-effort checks (tests if available, ast_grep, lsp_diagnostics). Record observed output; do not substitute explanation for execution.
16423
16629
  - LOOP: if verification fails, diagnose and retry within the limit
16424
16630
 
16425
16631
  ## Progress Updates
@@ -16477,6 +16683,53 @@ If a command must run on the host or Docker is missing, report blocked.
16477
16683
  For deeper Docker expertise, load \`hive_skill("docker-mastery")\`.
16478
16684
  `;
16479
16685
 
16686
+ // src/agents/hive-helper.ts
16687
+ var HIVE_HELPER_PROMPT = `# Hive Helper
16688
+
16689
+ You are a runtime-only bounded hard-task operational assistant. You never plan, orchestrate, or broaden the assignment.
16690
+
16691
+ ## Bounded Modes
16692
+
16693
+ - merge recovery
16694
+ - state clarification
16695
+ - safe manual-follow-up assistance
16696
+
16697
+ ## Core Rules
16698
+
16699
+ - never plans, orchestrates, or broadens the assignment
16700
+ - use \`hive_merge\` first
16701
+ - if merge returns \`conflictState: 'preserved'\`, resolves locally in this helper session and continues the merge batch
16702
+ - may summarize observable state for the caller
16703
+ - may create safe append-only manual tasks when the requested follow-up fits the current approved DAG boundary
16704
+ - never update plan-backed task state
16705
+ - escalate DAG-changing requests back to Hive Master / Swarm for plan amendment
16706
+ - return only concise merged/state/task/blocker summary text
16707
+
16708
+ ## Scope
16709
+
16710
+ - Merge completed task branches for the caller
16711
+ - Clarify current observable feature/task/worktree state after interruptions or ambiguity
16712
+ - Create safe append-only manual follow-up tasks within the existing approved DAG boundary
16713
+ - Handle preserved merge conflicts in this isolated helper session
16714
+ - Continue the requested merge batch until complete or blocked
16715
+ - Do not start worktrees, rewrite plans, update plan-backed task state, or broaden the assignment
16716
+
16717
+ ## Execution
16718
+
16719
+ 1. Call \`hive_merge\` first for the requested task branch.
16720
+ 2. If the merge succeeds, continue to the next requested merge.
16721
+ 3. If \`conflictState: 'preserved'\`, inspect and resolves locally, complete the merge, and continue the merge batch.
16722
+ 4. When asked for state clarification, use observable \`hive_status\` output and summarize only what is present.
16723
+ 5. When asked for manual follow-up assistance, create only safe append-only manual tasks that do not rewrite the approved DAG or alter plan-backed task state.
16724
+ 6. If the request would change sequencing, dependencies, or plan scope, stop and escalate it back to Hive Master / Swarm for plan amendment.
16725
+ 7. If you cannot safely resolve a conflict or satisfy the bounded request, stop and return a concise blocker summary.
16726
+
16727
+ ## Output
16728
+
16729
+ Return only concise merged/state/task/blocker summary text.
16730
+ Do not include planning, orchestration commentary, or long narratives.
16731
+ `;
16732
+
16480
16733
  // src/agents/hygienic.ts
16481
16734
  var HYGIENIC_BEE_PROMPT = `# Hygienic (Consultant/Reviewer/Debugger)
16482
16735
 
@@ -16491,6 +16744,14 @@ If you are asked to review IMPLEMENTATION (code changes, diffs, PRs) instead of
16491
16744
  2. Apply it and return its output format
16492
16745
  3. Still do NOT edit code (review only)
16493
16746
 
16747
+ If you are asked to VERIFY implementation claims (confirm acceptance criteria, validate that a fix works, post-merge verification):
16748
+ 1. Load \`hive_skill("verification-reviewer")\`
16749
+ 2. Follow its falsification-first protocol
16750
+ 3. Return its evidence-backed report format
16751
+ 4. Do NOT accept rationalizations as evidence — only command output and observable results count
16752
+
16753
+ If \`hive_network_query\` results are included in a review, treat them as historical contrast with citations, never as authority over live repository state. Always prefer current diffs, files, and command output when they disagree.
16754
+
16494
16755
  Self-check before every critique:
16495
16756
  > "Am I questioning APPROACH or DOCUMENTATION?"
16496
16757
  > APPROACH → Stay silent
@@ -16654,6 +16915,7 @@ var BUILT_IN_AGENT_NAMES = [
16654
16915
  "swarm-orchestrator",
16655
16916
  "scout-researcher",
16656
16917
  "forager-worker",
16918
+ "hive-helper",
16657
16919
  "hygienic-reviewer"
16658
16920
  ];
16659
16921
  var CUSTOM_AGENT_BASES = ["forager-worker", "hygienic-reviewer"];
@@ -16676,6 +16938,7 @@ var DEFAULT_AGENT_MODELS = {
16676
16938
  "swarm-orchestrator": "github-copilot/claude-opus-4.5",
16677
16939
  "scout-researcher": "zai-coding-plan/glm-4.7",
16678
16940
  "forager-worker": "github-copilot/gpt-5.2-codex",
16941
+ "hive-helper": "github-copilot/gpt-5.2-codex",
16679
16942
  "hygienic-reviewer": "github-copilot/gpt-5.2-codex"
16680
16943
  };
16681
16944
  var DEFAULT_HIVE_CONFIG = {
@@ -16735,6 +16998,11 @@ var DEFAULT_HIVE_CONFIG = {
16735
16998
  temperature: 0.3,
16736
16999
  autoLoadSkills: ["test-driven-development", "verification-before-completion"]
16737
17000
  },
17001
+ "hive-helper": {
17002
+ model: DEFAULT_AGENT_MODELS["hive-helper"],
17003
+ temperature: 0.3,
17004
+ autoLoadSkills: []
17005
+ },
16738
17006
  "hygienic-reviewer": {
16739
17007
  model: DEFAULT_AGENT_MODELS["hygienic-reviewer"],
16740
17008
  temperature: 0.3,
@@ -17416,23 +17684,27 @@ class TaskService {
17416
17684
  return result;
17417
17685
  }
17418
17686
  create(featureName, name, order, metadata) {
17419
- const tasksPath = getTasksPath(this.projectRoot, featureName);
17420
17687
  const existingFolders = this.listFolders(featureName);
17688
+ const nextOrder = this.getNextOrder(existingFolders);
17689
+ if (order !== undefined && order !== nextOrder) {
17690
+ throw new Error(`Manual tasks are append-only: requested order ${order} does not match the next available order ${nextOrder}. ` + `Intermediate insertion requires plan amendment.`);
17691
+ }
17421
17692
  if (metadata?.source === "review" && metadata.dependsOn && metadata.dependsOn.length > 0) {
17422
17693
  throw new Error(`Review-sourced manual tasks cannot have explicit dependsOn. ` + `Cross-task dependencies require a plan amendment. ` + `Either remove the dependsOn field or amend the plan to express the dependency.`);
17423
17694
  }
17424
- const nextOrder = order ?? this.getNextOrder(existingFolders);
17425
- const folder = `${String(nextOrder).padStart(2, "0")}-${name}`;
17695
+ const dependsOn = metadata?.dependsOn ?? [];
17696
+ this.validateManualTaskDependsOn(featureName, dependsOn);
17697
+ const resolvedOrder = order ?? nextOrder;
17698
+ const folder = `${String(resolvedOrder).padStart(2, "0")}-${name}`;
17426
17699
  const collision = existingFolders.find((f) => {
17427
17700
  const match = f.match(/^(\d+)-/);
17428
- return match && parseInt(match[1], 10) === nextOrder;
17701
+ return match && parseInt(match[1], 10) === resolvedOrder;
17429
17702
  });
17430
17703
  if (collision) {
17431
- throw new Error(`Task folder collision: order ${nextOrder} already exists as "${collision}". ` + `Choose a different order number or omit to auto-increment.`);
17704
+ throw new Error(`Task folder collision: order ${resolvedOrder} already exists as "${collision}". ` + `Choose a different order number or omit to auto-increment.`);
17432
17705
  }
17433
17706
  const taskPath = getTaskPath(this.projectRoot, featureName, folder);
17434
17707
  ensureDir(taskPath);
17435
- const dependsOn = metadata?.dependsOn ?? [];
17436
17708
  const status = {
17437
17709
  status: "pending",
17438
17710
  origin: "manual",
@@ -17737,6 +18009,17 @@ ${f.content}`).join(`
17737
18009
  const orders = existingFolders.map((f) => parseInt(f.split("-")[0], 10)).filter((n) => !isNaN(n));
17738
18010
  return Math.max(...orders, 0) + 1;
17739
18011
  }
18012
+ validateManualTaskDependsOn(featureName, dependsOn) {
18013
+ for (const dependency of dependsOn) {
18014
+ const dependencyStatus = this.getRawStatus(featureName, dependency);
18015
+ if (!dependencyStatus) {
18016
+ throw new Error(`Manual tasks are append-only: dependency "${dependency}" does not exist. ` + `Dependencies on unfinished work require plan amendment.`);
18017
+ }
18018
+ if (dependencyStatus.status !== "done") {
18019
+ throw new Error(`Manual tasks are append-only: dependency "${dependency}" is ${dependencyStatus.status}, not done. ` + `Dependencies on unfinished work require plan amendment.`);
18020
+ }
18021
+ }
18022
+ }
17740
18023
  parseTasksFromPlan(content) {
17741
18024
  const tasks = [];
17742
18025
  const lines = content.split(`
@@ -22228,19 +22511,27 @@ class WorktreeService {
22228
22511
  const worktreePath = this.getWorktreePath(feature, step);
22229
22512
  const branchName = this.getBranchName(feature, step);
22230
22513
  const git = this.getGit();
22514
+ let worktreeRemoved = false;
22515
+ let branchDeleted = false;
22516
+ let pruned = false;
22231
22517
  try {
22232
22518
  await git.raw(["worktree", "remove", worktreePath, "--force"]);
22519
+ worktreeRemoved = true;
22233
22520
  } catch {
22234
22521
  await fs7.rm(worktreePath, { recursive: true, force: true });
22522
+ worktreeRemoved = true;
22235
22523
  }
22236
22524
  try {
22237
22525
  await git.raw(["worktree", "prune"]);
22526
+ pruned = true;
22238
22527
  } catch {}
22239
22528
  if (deleteBranch) {
22240
22529
  try {
22241
22530
  await git.deleteLocalBranch(branchName, true);
22531
+ branchDeleted = true;
22242
22532
  } catch {}
22243
22533
  }
22534
+ return { worktreeRemoved, branchDeleted, pruned };
22244
22535
  }
22245
22536
  async list(feature) {
22246
22537
  const worktreesDir = this.getWorktreesDir();
@@ -22372,34 +22663,61 @@ class WorktreeService {
22372
22663
  };
22373
22664
  }
22374
22665
  }
22375
- async merge(feature, step, strategy = "merge", message) {
22666
+ async merge(feature, step, strategy = "merge", message, options = {}) {
22376
22667
  const branchName = this.getBranchName(feature, step);
22377
22668
  const git = this.getGit();
22669
+ const cleanupMode = options.cleanup ?? "none";
22670
+ const preserveConflicts = options.preserveConflicts ?? false;
22671
+ const emptyCleanup = {
22672
+ worktreeRemoved: false,
22673
+ branchDeleted: false,
22674
+ pruned: false
22675
+ };
22378
22676
  if (strategy === "rebase" && message) {
22379
22677
  return {
22380
22678
  success: false,
22381
22679
  merged: false,
22680
+ strategy,
22681
+ filesChanged: [],
22682
+ conflicts: [],
22683
+ conflictState: "none",
22684
+ cleanup: emptyCleanup,
22382
22685
  error: "Custom merge message is not supported for rebase strategy"
22383
22686
  };
22384
22687
  }
22688
+ let filesChanged = [];
22385
22689
  try {
22386
22690
  const branches = await git.branch();
22387
22691
  if (!branches.all.includes(branchName)) {
22388
- return { success: false, merged: false, error: `Branch ${branchName} not found` };
22692
+ return {
22693
+ success: false,
22694
+ merged: false,
22695
+ strategy,
22696
+ filesChanged: [],
22697
+ conflicts: [],
22698
+ conflictState: "none",
22699
+ cleanup: emptyCleanup,
22700
+ error: `Branch ${branchName} not found`
22701
+ };
22389
22702
  }
22390
22703
  const currentBranch = branches.current;
22391
22704
  const diffStat = await git.diff([`${currentBranch}...${branchName}`, "--stat"]);
22392
- const filesChanged = diffStat.split(`
22705
+ filesChanged = diffStat.split(`
22393
22706
  `).filter((l) => l.trim() && l.includes("|")).map((l) => l.split("|")[0].trim());
22394
22707
  if (strategy === "squash") {
22395
22708
  await git.raw(["merge", "--squash", branchName]);
22396
22709
  const squashMessage = message || `hive: merge ${step} (squashed)`;
22397
22710
  const result = await git.commit(squashMessage);
22711
+ const cleanup = cleanupMode === "none" ? emptyCleanup : await this.remove(feature, step, cleanupMode === "worktree+branch");
22398
22712
  return {
22399
22713
  success: true,
22400
22714
  merged: true,
22715
+ strategy,
22401
22716
  sha: result.commit,
22402
- filesChanged
22717
+ filesChanged,
22718
+ conflicts: [],
22719
+ conflictState: "none",
22720
+ cleanup
22403
22721
  };
22404
22722
  } else if (strategy === "rebase") {
22405
22723
  const commits = await git.log([`${currentBranch}..${branchName}`]);
@@ -22408,40 +22726,62 @@ class WorktreeService {
22408
22726
  await git.raw(["cherry-pick", commit.hash]);
22409
22727
  }
22410
22728
  const head = (await git.revparse(["HEAD"])).trim();
22729
+ const cleanup = cleanupMode === "none" ? emptyCleanup : await this.remove(feature, step, cleanupMode === "worktree+branch");
22411
22730
  return {
22412
22731
  success: true,
22413
22732
  merged: true,
22733
+ strategy,
22414
22734
  sha: head,
22415
- filesChanged
22735
+ filesChanged,
22736
+ conflicts: [],
22737
+ conflictState: "none",
22738
+ cleanup
22416
22739
  };
22417
22740
  } else {
22418
22741
  const mergeMessage = message || `hive: merge ${step}`;
22419
22742
  const result = await git.merge([branchName, "--no-ff", "-m", mergeMessage]);
22420
22743
  const head = (await git.revparse(["HEAD"])).trim();
22744
+ const cleanup = cleanupMode === "none" ? emptyCleanup : await this.remove(feature, step, cleanupMode === "worktree+branch");
22421
22745
  return {
22422
22746
  success: true,
22423
22747
  merged: !result.failed,
22748
+ strategy,
22424
22749
  sha: head,
22425
22750
  filesChanged,
22426
- conflicts: result.conflicts?.map((c) => c.file || String(c)) || []
22751
+ conflicts: result.conflicts?.map((c) => c.file || String(c)) || [],
22752
+ conflictState: "none",
22753
+ cleanup
22427
22754
  };
22428
22755
  }
22429
22756
  } catch (error45) {
22430
22757
  const err = error45;
22431
22758
  if (err.message?.includes("CONFLICT") || err.message?.includes("conflict")) {
22432
- await git.raw(["merge", "--abort"]).catch(() => {});
22433
- await git.raw(["rebase", "--abort"]).catch(() => {});
22434
- await git.raw(["cherry-pick", "--abort"]).catch(() => {});
22759
+ const conflicts2 = await this.getActiveConflictFiles(git, err.message || "");
22760
+ const conflictState = preserveConflicts ? "preserved" : "aborted";
22761
+ if (!preserveConflicts) {
22762
+ await git.raw(["merge", "--abort"]).catch(() => {});
22763
+ await git.raw(["rebase", "--abort"]).catch(() => {});
22764
+ await git.raw(["cherry-pick", "--abort"]).catch(() => {});
22765
+ }
22435
22766
  return {
22436
22767
  success: false,
22437
22768
  merged: false,
22438
- error: "Merge conflicts detected",
22439
- conflicts: this.parseConflictsFromError(err.message || "")
22769
+ strategy,
22770
+ filesChanged,
22771
+ conflicts: conflicts2,
22772
+ conflictState,
22773
+ cleanup: emptyCleanup,
22774
+ error: "Merge conflicts detected"
22440
22775
  };
22441
22776
  }
22442
22777
  return {
22443
22778
  success: false,
22444
22779
  merged: false,
22780
+ strategy,
22781
+ filesChanged,
22782
+ conflicts: [],
22783
+ conflictState: "none",
22784
+ cleanup: emptyCleanup,
22445
22785
  error: err.message || "Merge failed"
22446
22786
  };
22447
22787
  }
@@ -22469,11 +22809,31 @@ class WorktreeService {
22469
22809
  }
22470
22810
  return conflicts2;
22471
22811
  }
22812
+ async getActiveConflictFiles(git, errorMessage) {
22813
+ try {
22814
+ const status = await git.status();
22815
+ if (status.conflicted.length > 0) {
22816
+ return [...new Set(status.conflicted)];
22817
+ }
22818
+ } catch {}
22819
+ return this.parseConflictsFromError(errorMessage);
22820
+ }
22472
22821
  }
22473
22822
  // ../hive-core/src/services/contextService.ts
22474
22823
  import * as fs8 from "fs";
22475
22824
  import * as path6 from "path";
22476
22825
  var RESERVED_OVERVIEW_CONTEXT = "overview";
22826
+ var DEFAULT_CONTEXT_CLASSIFICATION = {
22827
+ role: "durable",
22828
+ includeInExecution: true,
22829
+ includeInAgentsMdSync: true,
22830
+ includeInNetwork: true
22831
+ };
22832
+ var SPECIAL_CONTEXTS = {
22833
+ overview: { role: "human", includeInExecution: false, includeInAgentsMdSync: false, includeInNetwork: false },
22834
+ draft: { role: "scratchpad", includeInExecution: false, includeInAgentsMdSync: false, includeInNetwork: false },
22835
+ "execution-decisions": { role: "operational", includeInExecution: false, includeInAgentsMdSync: false, includeInNetwork: false }
22836
+ };
22477
22837
 
22478
22838
  class ContextService {
22479
22839
  projectRoot;
@@ -22502,15 +22862,18 @@ class ContextService {
22502
22862
  const contextPath = getContextPath(this.projectRoot, featureName);
22503
22863
  if (!fileExists(contextPath))
22504
22864
  return [];
22505
- const files = fs8.readdirSync(contextPath, { withFileTypes: true }).filter((f) => f.isFile() && f.name.endsWith(".md")).map((f) => f.name);
22865
+ const files = fs8.readdirSync(contextPath, { withFileTypes: true }).filter((f) => f.isFile() && f.name.endsWith(".md")).map((f) => f.name).sort((a, b) => a.localeCompare(b));
22506
22866
  return files.map((name) => {
22507
22867
  const filePath = path6.join(contextPath, name);
22508
22868
  const stat2 = fs8.statSync(filePath);
22509
22869
  const content = readText(filePath) || "";
22870
+ const normalizedName = name.replace(/\.md$/, "");
22871
+ const classification = this.classifyContextName(normalizedName);
22510
22872
  return {
22511
- name: name.replace(/\.md$/, ""),
22873
+ name: normalizedName,
22512
22874
  content,
22513
- updatedAt: stat2.mtime.toISOString()
22875
+ updatedAt: stat2.mtime.toISOString(),
22876
+ ...classification
22514
22877
  };
22515
22878
  });
22516
22879
  }
@@ -22518,7 +22881,13 @@ class ContextService {
22518
22881
  return this.list(featureName).find((file2) => file2.name === RESERVED_OVERVIEW_CONTEXT) ?? null;
22519
22882
  }
22520
22883
  listExecutionContext(featureName) {
22521
- return this.list(featureName).filter((file2) => file2.name !== RESERVED_OVERVIEW_CONTEXT);
22884
+ return this.list(featureName).filter((file2) => file2.includeInExecution);
22885
+ }
22886
+ listAgentsMdSyncContext(featureName) {
22887
+ return this.list(featureName).filter((file2) => file2.includeInAgentsMdSync);
22888
+ }
22889
+ listNetworkContext(featureName) {
22890
+ return this.list(featureName).filter((file2) => file2.includeInNetwork);
22522
22891
  }
22523
22892
  delete(featureName, fileName) {
22524
22893
  const contextPath = getContextPath(this.projectRoot, featureName);
@@ -22577,10 +22946,101 @@ ${f.content}`);
22577
22946
  const normalized = name.replace(/\.md$/, "");
22578
22947
  return `${normalized}.md`;
22579
22948
  }
22949
+ classifyContextName(name) {
22950
+ return SPECIAL_CONTEXTS[name] ?? DEFAULT_CONTEXT_CLASSIFICATION;
22951
+ }
22580
22952
  }
22581
- // ../hive-core/src/services/sessionService.ts
22953
+ // ../hive-core/src/services/networkService.ts
22582
22954
  import * as fs9 from "fs";
22583
22955
  import * as path7 from "path";
22956
+ class NetworkService {
22957
+ projectRoot;
22958
+ contextService;
22959
+ constructor(projectRoot) {
22960
+ this.projectRoot = projectRoot;
22961
+ this.contextService = new ContextService(projectRoot);
22962
+ }
22963
+ query(options) {
22964
+ const normalizedQuery = normalizeText(options.query);
22965
+ if (!normalizedQuery) {
22966
+ return [];
22967
+ }
22968
+ const matchingFeatures = listFeatureDirectories(this.projectRoot).map((entry) => entry.logicalName).filter((featureName) => featureName !== options.currentFeature).sort((left, right) => left.localeCompare(right)).map((featureName) => ({
22969
+ featureName,
22970
+ matches: this.collectMatches(featureName, normalizedQuery, options)
22971
+ })).filter((entry) => entry.matches.length > 0).slice(0, options.maxFeatures);
22972
+ return matchingFeatures.flatMap((entry) => entry.matches);
22973
+ }
22974
+ collectMatches(featureName, normalizedQuery, options) {
22975
+ const candidates = [];
22976
+ const planMatch = this.matchPlan(featureName, normalizedQuery, options.maxSnippetChars);
22977
+ if (planMatch) {
22978
+ candidates.push(planMatch);
22979
+ }
22980
+ const contextMatches = this.contextService.listNetworkContext(featureName).sort((left, right) => left.name.localeCompare(right.name)).map((contextFile) => this.matchContext(featureName, contextFile, normalizedQuery, options.maxSnippetChars)).filter((result) => result !== null);
22981
+ candidates.push(...contextMatches);
22982
+ return candidates.sort((left, right) => {
22983
+ if (left.sortRank !== right.sortRank) {
22984
+ return left.sortRank - right.sortRank;
22985
+ }
22986
+ return left.sourceName.localeCompare(right.sourceName);
22987
+ }).slice(0, options.maxSnippetsPerFeature).map(({ sortRank: _sortRank, ...result }) => result);
22988
+ }
22989
+ matchPlan(featureName, normalizedQuery, maxSnippetChars) {
22990
+ const planPath = getPlanPath(this.projectRoot, featureName);
22991
+ const content = readText(planPath);
22992
+ if (content === null) {
22993
+ return null;
22994
+ }
22995
+ const snippet = extractSnippet(content, normalizedQuery, maxSnippetChars);
22996
+ if (!snippet) {
22997
+ return null;
22998
+ }
22999
+ const stat2 = fs9.statSync(planPath);
23000
+ return {
23001
+ feature: featureName,
23002
+ sourceType: "plan",
23003
+ sourceName: "plan.md",
23004
+ path: planPath,
23005
+ updatedAt: stat2.mtime.toISOString(),
23006
+ snippet,
23007
+ sortRank: 0
23008
+ };
23009
+ }
23010
+ matchContext(featureName, contextFile, normalizedQuery, maxSnippetChars) {
23011
+ const snippet = extractSnippet(contextFile.content, normalizedQuery, maxSnippetChars);
23012
+ if (!snippet) {
23013
+ return null;
23014
+ }
23015
+ return {
23016
+ feature: featureName,
23017
+ sourceType: "context",
23018
+ sourceName: contextFile.name,
23019
+ path: path7.join(this.projectRoot, ".hive", "features", this.resolveDirectoryName(featureName), "context", `${contextFile.name}.md`),
23020
+ updatedAt: contextFile.updatedAt,
23021
+ snippet,
23022
+ sortRank: 1
23023
+ };
23024
+ }
23025
+ resolveDirectoryName(featureName) {
23026
+ const match = listFeatureDirectories(this.projectRoot).find((entry) => entry.logicalName === featureName);
23027
+ return match?.directoryName ?? featureName;
23028
+ }
23029
+ }
23030
+ function normalizeText(value) {
23031
+ return value.toLowerCase().replace(/\s+/g, " ").trim();
23032
+ }
23033
+ function extractSnippet(content, normalizedQuery, maxSnippetChars) {
23034
+ const normalizedContent = content.replace(/\s+/g, " ").trim();
23035
+ const matchIndex = normalizedContent.toLowerCase().indexOf(normalizedQuery);
23036
+ if (matchIndex === -1) {
23037
+ return null;
23038
+ }
23039
+ return normalizedContent.slice(matchIndex, matchIndex + maxSnippetChars).trim();
23040
+ }
23041
+ // ../hive-core/src/services/sessionService.ts
23042
+ import * as fs10 from "fs";
23043
+ import * as path8 from "path";
22584
23044
  class SessionService {
22585
23045
  projectRoot;
22586
23046
  constructor(projectRoot) {
@@ -22591,10 +23051,14 @@ class SessionService {
22591
23051
  return;
22592
23052
  }
22593
23053
  const { sessionId: _sessionId, ...rest } = patch;
22594
- Object.assign(target, rest);
23054
+ for (const [key, value] of Object.entries(rest)) {
23055
+ if (value !== undefined || key === "directiveRecoveryState") {
23056
+ target[key] = value;
23057
+ }
23058
+ }
22595
23059
  }
22596
23060
  getSessionsPath(featureName) {
22597
- return path7.join(getFeaturePath(this.projectRoot, featureName), "sessions.json");
23061
+ return path8.join(getFeaturePath(this.projectRoot, featureName), "sessions.json");
22598
23062
  }
22599
23063
  getSessions(featureName) {
22600
23064
  const sessionsPath = this.getSessionsPath(featureName);
@@ -22602,7 +23066,7 @@ class SessionService {
22602
23066
  }
22603
23067
  saveSessions(featureName, data) {
22604
23068
  const sessionsPath = this.getSessionsPath(featureName);
22605
- ensureDir(path7.dirname(sessionsPath));
23069
+ ensureDir(path8.dirname(sessionsPath));
22606
23070
  writeJson(sessionsPath, data);
22607
23071
  }
22608
23072
  getGlobalSessions() {
@@ -22611,12 +23075,12 @@ class SessionService {
22611
23075
  }
22612
23076
  saveGlobalSessions(data) {
22613
23077
  const globalPath = getGlobalSessionsPath(this.projectRoot);
22614
- ensureDir(path7.dirname(globalPath));
23078
+ ensureDir(path8.dirname(globalPath));
22615
23079
  writeJson(globalPath, data);
22616
23080
  }
22617
23081
  updateGlobalSessions(mutator) {
22618
23082
  const globalPath = getGlobalSessionsPath(this.projectRoot);
22619
- ensureDir(path7.dirname(globalPath));
23083
+ ensureDir(path8.dirname(globalPath));
22620
23084
  const release = acquireLockSync(globalPath);
22621
23085
  try {
22622
23086
  const data = readJson(globalPath) || { sessions: [] };
@@ -22734,10 +23198,10 @@ class SessionService {
22734
23198
  if (globalSession?.featureName) {
22735
23199
  return globalSession.featureName;
22736
23200
  }
22737
- const featuresPath = path7.join(this.projectRoot, ".hive", "features");
22738
- if (!fs9.existsSync(featuresPath))
23201
+ const featuresPath = path8.join(this.projectRoot, ".hive", "features");
23202
+ if (!fs10.existsSync(featuresPath))
22739
23203
  return null;
22740
- const features = fs9.readdirSync(featuresPath, { withFileTypes: true }).filter((d) => d.isDirectory()).map((d) => d.name);
23204
+ const features = fs10.readdirSync(featuresPath, { withFileTypes: true }).filter((d) => d.isDirectory()).map((d) => d.name);
22741
23205
  for (const feature of features) {
22742
23206
  const sessions = this.getSessions(feature);
22743
23207
  if (sessions.sessions.some((s) => s.sessionId === sessionId)) {
@@ -22779,16 +23243,26 @@ class SessionService {
22779
23243
  }
22780
23244
  }
22781
23245
  // ../hive-core/src/services/configService.ts
22782
- import * as fs10 from "fs";
22783
- import * as path8 from "path";
23246
+ import * as fs11 from "fs";
23247
+ import * as path9 from "path";
22784
23248
  class ConfigService {
22785
23249
  configPath;
23250
+ projectConfigPath;
23251
+ legacyProjectConfigPath;
22786
23252
  cachedConfig = null;
22787
23253
  cachedCustomAgentConfigs = null;
22788
- constructor() {
23254
+ activeReadSourceType = "global";
23255
+ activeReadPath;
23256
+ lastFallbackWarning = null;
23257
+ constructor(projectRoot) {
22789
23258
  const homeDir = process.env.HOME || process.env.USERPROFILE || "";
22790
- const configDir = path8.join(homeDir, ".config", "opencode");
22791
- this.configPath = path8.join(configDir, "agent_hive.json");
23259
+ const configDir = path9.join(homeDir, ".config", "opencode");
23260
+ this.configPath = path9.join(configDir, "agent_hive.json");
23261
+ this.activeReadPath = this.configPath;
23262
+ if (projectRoot) {
23263
+ this.projectConfigPath = path9.join(projectRoot, ".hive", "agent-hive.json");
23264
+ this.legacyProjectConfigPath = path9.join(projectRoot, ".opencode", "agent_hive.json");
23265
+ }
22792
23266
  }
22793
23267
  getPath() {
22794
23268
  return this.configPath;
@@ -22797,43 +23271,88 @@ class ConfigService {
22797
23271
  if (this.cachedConfig !== null) {
22798
23272
  return this.cachedConfig;
22799
23273
  }
22800
- try {
22801
- if (!fs10.existsSync(this.configPath)) {
22802
- this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
22803
- this.cachedCustomAgentConfigs = null;
23274
+ if (this.projectConfigPath && fs11.existsSync(this.projectConfigPath)) {
23275
+ const projectStored = this.readStoredConfig(this.projectConfigPath);
23276
+ if (projectStored.ok) {
23277
+ this.activeReadSourceType = "project";
23278
+ this.activeReadPath = this.projectConfigPath;
23279
+ this.lastFallbackWarning = null;
23280
+ this.cachedConfig = this.mergeWithDefaults(projectStored.value);
22804
23281
  return this.cachedConfig;
22805
23282
  }
22806
- const raw = fs10.readFileSync(this.configPath, "utf-8");
22807
- const stored = JSON.parse(raw);
22808
- const storedCustomAgents = this.isObjectRecord(stored.customAgents) ? stored.customAgents : {};
22809
- const mergedBuiltInAgents = BUILT_IN_AGENT_NAMES.reduce((acc, agentName) => {
22810
- acc[agentName] = {
22811
- ...DEFAULT_HIVE_CONFIG.agents?.[agentName],
22812
- ...stored.agents?.[agentName]
22813
- };
22814
- return acc;
22815
- }, {});
22816
- const merged = {
22817
- ...DEFAULT_HIVE_CONFIG,
22818
- ...stored,
22819
- agents: {
22820
- ...DEFAULT_HIVE_CONFIG.agents,
22821
- ...stored.agents,
22822
- ...mergedBuiltInAgents
22823
- },
22824
- customAgents: {
22825
- ...DEFAULT_HIVE_CONFIG.customAgents,
22826
- ...storedCustomAgents
22827
- }
22828
- };
22829
- this.cachedConfig = merged;
23283
+ const fallbackReason2 = "reason" in projectStored ? projectStored.reason : "read_error";
23284
+ this.lastFallbackWarning = this.createProjectFallbackWarning(this.projectConfigPath, fallbackReason2);
23285
+ } else if (this.legacyProjectConfigPath && fs11.existsSync(this.legacyProjectConfigPath)) {
23286
+ const projectStored = this.readStoredConfig(this.legacyProjectConfigPath);
23287
+ if (projectStored.ok) {
23288
+ this.activeReadSourceType = "project";
23289
+ this.activeReadPath = this.legacyProjectConfigPath;
23290
+ this.lastFallbackWarning = null;
23291
+ this.cachedConfig = this.mergeWithDefaults(projectStored.value);
23292
+ return this.cachedConfig;
23293
+ }
23294
+ const fallbackReason2 = "reason" in projectStored ? projectStored.reason : "read_error";
23295
+ this.lastFallbackWarning = this.createProjectFallbackWarning(this.legacyProjectConfigPath, fallbackReason2);
23296
+ }
23297
+ if (!this.projectConfigPath && !this.legacyProjectConfigPath) {
23298
+ this.lastFallbackWarning = null;
23299
+ }
23300
+ if (!fs11.existsSync(this.configPath)) {
23301
+ this.activeReadSourceType = "global";
23302
+ this.activeReadPath = this.configPath;
23303
+ this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
22830
23304
  this.cachedCustomAgentConfigs = null;
23305
+ if (this.lastFallbackWarning && this.lastFallbackWarning.fallbackType !== "defaults") {
23306
+ this.lastFallbackWarning = {
23307
+ message: `Failed to read project config at ${this.lastFallbackWarning.sourcePath}; global config at ${this.configPath} is missing; using defaults`,
23308
+ sourceType: this.lastFallbackWarning.sourceType,
23309
+ sourcePath: this.lastFallbackWarning.sourcePath,
23310
+ fallbackType: "defaults",
23311
+ reason: this.lastFallbackWarning.reason
23312
+ };
23313
+ }
22831
23314
  return this.cachedConfig;
22832
- } catch {
22833
- this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
23315
+ }
23316
+ const globalStored = this.readStoredConfig(this.configPath);
23317
+ if (globalStored.ok) {
23318
+ this.activeReadSourceType = "global";
23319
+ this.activeReadPath = this.configPath;
23320
+ this.cachedConfig = this.mergeWithDefaults(globalStored.value);
22834
23321
  this.cachedCustomAgentConfigs = null;
22835
23322
  return this.cachedConfig;
22836
23323
  }
23324
+ const fallbackReason = "reason" in globalStored ? globalStored.reason : "read_error";
23325
+ this.activeReadSourceType = "global";
23326
+ this.activeReadPath = this.configPath;
23327
+ this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
23328
+ this.cachedCustomAgentConfigs = null;
23329
+ if (this.lastFallbackWarning) {
23330
+ this.lastFallbackWarning = {
23331
+ message: `Failed to read project config at ${this.lastFallbackWarning.sourcePath}; global config at ${this.configPath} is also invalid; using defaults`,
23332
+ sourceType: this.lastFallbackWarning.sourceType,
23333
+ sourcePath: this.lastFallbackWarning.sourcePath,
23334
+ fallbackType: "defaults",
23335
+ reason: this.lastFallbackWarning.reason
23336
+ };
23337
+ return this.cachedConfig;
23338
+ }
23339
+ this.lastFallbackWarning = {
23340
+ message: `Failed to read global config at ${this.configPath}; using defaults`,
23341
+ sourceType: "global",
23342
+ sourcePath: this.configPath,
23343
+ fallbackType: "defaults",
23344
+ reason: fallbackReason
23345
+ };
23346
+ return this.cachedConfig;
23347
+ }
23348
+ getActiveReadSourceType() {
23349
+ return this.activeReadSourceType;
23350
+ }
23351
+ getActiveReadPath() {
23352
+ return this.activeReadPath;
23353
+ }
23354
+ getLastFallbackWarning() {
23355
+ return this.lastFallbackWarning;
22837
23356
  }
22838
23357
  set(updates) {
22839
23358
  this.cachedConfig = null;
@@ -22851,30 +23370,34 @@ class ConfigService {
22851
23370
  ...updates.customAgents
22852
23371
  } : current.customAgents
22853
23372
  };
22854
- const configDir = path8.dirname(this.configPath);
22855
- if (!fs10.existsSync(configDir)) {
22856
- fs10.mkdirSync(configDir, { recursive: true });
23373
+ const configDir = path9.dirname(this.configPath);
23374
+ if (!fs11.existsSync(configDir)) {
23375
+ fs11.mkdirSync(configDir, { recursive: true });
22857
23376
  }
22858
- fs10.writeFileSync(this.configPath, JSON.stringify(merged, null, 2));
23377
+ fs11.writeFileSync(this.configPath, JSON.stringify(merged, null, 2));
22859
23378
  this.cachedConfig = merged;
22860
23379
  this.cachedCustomAgentConfigs = null;
22861
23380
  return merged;
22862
23381
  }
22863
23382
  exists() {
22864
- return fs10.existsSync(this.configPath);
23383
+ return fs11.existsSync(this.configPath);
22865
23384
  }
22866
23385
  init() {
23386
+ const resolved = this.get();
23387
+ if (this.projectConfigPath || this.legacyProjectConfigPath) {
23388
+ return resolved;
23389
+ }
22867
23390
  if (!this.exists()) {
22868
23391
  return this.set(DEFAULT_HIVE_CONFIG);
22869
23392
  }
22870
- return this.get();
23393
+ return resolved;
22871
23394
  }
22872
23395
  getAgentConfig(agent) {
22873
23396
  const config2 = this.get();
22874
23397
  if (this.isBuiltInAgent(agent)) {
22875
23398
  const agentConfig = config2.agents?.[agent] ?? {};
22876
23399
  const defaultAutoLoadSkills = DEFAULT_HIVE_CONFIG.agents?.[agent]?.autoLoadSkills ?? [];
22877
- const effectiveAutoLoadSkills = this.resolveAutoLoadSkills(defaultAutoLoadSkills, agentConfig.autoLoadSkills ?? [], this.isPlannerAgent(agent));
23400
+ const effectiveAutoLoadSkills = agent === "hive-helper" ? defaultAutoLoadSkills : this.resolveAutoLoadSkills(defaultAutoLoadSkills, agentConfig.autoLoadSkills ?? [], this.isPlannerAgent(agent));
22878
23401
  return {
22879
23402
  ...agentConfig,
22880
23403
  autoLoadSkills: effectiveAutoLoadSkills
@@ -22995,10 +23518,139 @@ class ConfigService {
22995
23518
  }
22996
23519
  return configuredCadence;
22997
23520
  }
23521
+ readStoredConfig(configPath) {
23522
+ try {
23523
+ const raw = fs11.readFileSync(configPath, "utf-8");
23524
+ const parsed = JSON.parse(raw);
23525
+ if (parsed === null || typeof parsed !== "object" || Array.isArray(parsed)) {
23526
+ return { ok: false, reason: "validation_error" };
23527
+ }
23528
+ if (!this.isValidStoredConfig(parsed)) {
23529
+ return { ok: false, reason: "validation_error" };
23530
+ }
23531
+ return { ok: true, value: parsed };
23532
+ } catch (error45) {
23533
+ if (error45 instanceof SyntaxError) {
23534
+ return { ok: false, reason: "parse_error" };
23535
+ }
23536
+ return { ok: false, reason: "read_error" };
23537
+ }
23538
+ }
23539
+ mergeWithDefaults(stored) {
23540
+ const storedCustomAgents = this.isObjectRecord(stored.customAgents) ? stored.customAgents : {};
23541
+ const mergedBuiltInAgents = BUILT_IN_AGENT_NAMES.reduce((acc, agentName) => {
23542
+ acc[agentName] = {
23543
+ ...DEFAULT_HIVE_CONFIG.agents?.[agentName],
23544
+ ...stored.agents?.[agentName]
23545
+ };
23546
+ return acc;
23547
+ }, {});
23548
+ return {
23549
+ ...DEFAULT_HIVE_CONFIG,
23550
+ ...stored,
23551
+ agents: {
23552
+ ...DEFAULT_HIVE_CONFIG.agents,
23553
+ ...stored.agents,
23554
+ ...mergedBuiltInAgents
23555
+ },
23556
+ customAgents: {
23557
+ ...DEFAULT_HIVE_CONFIG.customAgents,
23558
+ ...storedCustomAgents
23559
+ }
23560
+ };
23561
+ }
23562
+ createProjectFallbackWarning(projectConfigPath, reason) {
23563
+ return {
23564
+ message: `Failed to read project config at ${projectConfigPath}; using global config at ${this.configPath}`,
23565
+ sourceType: "project",
23566
+ sourcePath: projectConfigPath,
23567
+ fallbackType: "global",
23568
+ fallbackPath: this.configPath,
23569
+ reason
23570
+ };
23571
+ }
23572
+ isValidStoredConfig(value) {
23573
+ if (!this.isObjectRecord(value)) {
23574
+ return false;
23575
+ }
23576
+ const config2 = value;
23577
+ if (config2.$schema !== undefined && typeof config2.$schema !== "string") {
23578
+ return false;
23579
+ }
23580
+ if (config2.enableToolsFor !== undefined && !this.isStringArray(config2.enableToolsFor)) {
23581
+ return false;
23582
+ }
23583
+ if (config2.disableSkills !== undefined && !this.isStringArray(config2.disableSkills)) {
23584
+ return false;
23585
+ }
23586
+ if (config2.disableMcps !== undefined && !this.isStringArray(config2.disableMcps)) {
23587
+ return false;
23588
+ }
23589
+ if (config2.omoSlimEnabled !== undefined && typeof config2.omoSlimEnabled !== "boolean") {
23590
+ return false;
23591
+ }
23592
+ if (config2.agentMode !== undefined && config2.agentMode !== "unified" && config2.agentMode !== "dedicated") {
23593
+ return false;
23594
+ }
23595
+ if (config2.agents !== undefined && !this.isObjectRecord(config2.agents)) {
23596
+ return false;
23597
+ }
23598
+ if (this.isObjectRecord(config2.agents)) {
23599
+ for (const declaration of Object.values(config2.agents)) {
23600
+ if (!this.isValidAgentConfigDeclaration(declaration)) {
23601
+ return false;
23602
+ }
23603
+ }
23604
+ }
23605
+ if (config2.sandbox !== undefined && config2.sandbox !== "none" && config2.sandbox !== "docker") {
23606
+ return false;
23607
+ }
23608
+ if (config2.dockerImage !== undefined && typeof config2.dockerImage !== "string") {
23609
+ return false;
23610
+ }
23611
+ if (config2.persistentContainers !== undefined && typeof config2.persistentContainers !== "boolean") {
23612
+ return false;
23613
+ }
23614
+ if (config2.hook_cadence !== undefined && !this.isHookCadenceRecord(config2.hook_cadence)) {
23615
+ return false;
23616
+ }
23617
+ return true;
23618
+ }
23619
+ isStringArray(value) {
23620
+ return Array.isArray(value) && value.every((item) => typeof item === "string");
23621
+ }
23622
+ isValidAgentConfigDeclaration(value) {
23623
+ if (!this.isObjectRecord(value)) {
23624
+ return false;
23625
+ }
23626
+ const declaration = value;
23627
+ if (declaration.model !== undefined && typeof declaration.model !== "string") {
23628
+ return false;
23629
+ }
23630
+ if (declaration.temperature !== undefined && typeof declaration.temperature !== "number") {
23631
+ return false;
23632
+ }
23633
+ if (declaration.skills !== undefined && !this.isStringArray(declaration.skills)) {
23634
+ return false;
23635
+ }
23636
+ if (declaration.autoLoadSkills !== undefined && !this.isStringArray(declaration.autoLoadSkills)) {
23637
+ return false;
23638
+ }
23639
+ if (declaration.variant !== undefined && typeof declaration.variant !== "string") {
23640
+ return false;
23641
+ }
23642
+ return true;
23643
+ }
23644
+ isHookCadenceRecord(value) {
23645
+ if (!this.isObjectRecord(value)) {
23646
+ return false;
23647
+ }
23648
+ return Object.values(value).every((entry) => typeof entry === "number");
23649
+ }
22998
23650
  }
22999
23651
  // ../hive-core/src/services/agentsMdService.ts
23000
- import * as fs11 from "fs";
23001
- import * as path9 from "path";
23652
+ import * as fs12 from "fs";
23653
+ import * as path10 from "path";
23002
23654
  class AgentsMdService {
23003
23655
  rootDir;
23004
23656
  contextService;
@@ -23007,7 +23659,7 @@ class AgentsMdService {
23007
23659
  this.contextService = contextService;
23008
23660
  }
23009
23661
  async init() {
23010
- const agentsMdPath = path9.join(this.rootDir, "AGENTS.md");
23662
+ const agentsMdPath = path10.join(this.rootDir, "AGENTS.md");
23011
23663
  const existed = fileExists(agentsMdPath);
23012
23664
  if (existed) {
23013
23665
  const existing = readText(agentsMdPath);
@@ -23017,15 +23669,15 @@ class AgentsMdService {
23017
23669
  return { content, existed: false };
23018
23670
  }
23019
23671
  async sync(featureName) {
23020
- const contexts = this.contextService.list(featureName);
23021
- const agentsMdPath = path9.join(this.rootDir, "AGENTS.md");
23022
- const current = await fs11.promises.readFile(agentsMdPath, "utf-8").catch(() => "");
23672
+ const contexts = this.contextService.listAgentsMdSyncContext(featureName);
23673
+ const agentsMdPath = path10.join(this.rootDir, "AGENTS.md");
23674
+ const current = await fs12.promises.readFile(agentsMdPath, "utf-8").catch(() => "");
23023
23675
  const findings = this.extractFindings(contexts);
23024
23676
  const proposals = this.generateProposals(findings, current);
23025
23677
  return { proposals, diff: this.formatDiff(current, proposals) };
23026
23678
  }
23027
23679
  apply(content) {
23028
- const agentsMdPath = path9.join(this.rootDir, "AGENTS.md");
23680
+ const agentsMdPath = path10.join(this.rootDir, "AGENTS.md");
23029
23681
  const isNew = !fileExists(agentsMdPath);
23030
23682
  writeText(agentsMdPath, content);
23031
23683
  return { path: agentsMdPath, chars: content.length, isNew };
@@ -23085,7 +23737,7 @@ class AgentsMdService {
23085
23737
  return this.generateTemplate(detections);
23086
23738
  }
23087
23739
  async detectProjectInfo() {
23088
- const packageJsonPath = path9.join(this.rootDir, "package.json");
23740
+ const packageJsonPath = path10.join(this.rootDir, "package.json");
23089
23741
  let packageJson = null;
23090
23742
  if (fileExists(packageJsonPath)) {
23091
23743
  try {
@@ -23105,26 +23757,26 @@ class AgentsMdService {
23105
23757
  return info;
23106
23758
  }
23107
23759
  detectPackageManager() {
23108
- if (fileExists(path9.join(this.rootDir, "bun.lockb")))
23760
+ if (fileExists(path10.join(this.rootDir, "bun.lockb")))
23109
23761
  return "bun";
23110
- if (fileExists(path9.join(this.rootDir, "pnpm-lock.yaml")))
23762
+ if (fileExists(path10.join(this.rootDir, "pnpm-lock.yaml")))
23111
23763
  return "pnpm";
23112
- if (fileExists(path9.join(this.rootDir, "yarn.lock")))
23764
+ if (fileExists(path10.join(this.rootDir, "yarn.lock")))
23113
23765
  return "yarn";
23114
- if (fileExists(path9.join(this.rootDir, "package-lock.json")))
23766
+ if (fileExists(path10.join(this.rootDir, "package-lock.json")))
23115
23767
  return "npm";
23116
23768
  return "npm";
23117
23769
  }
23118
23770
  detectLanguage() {
23119
- if (fileExists(path9.join(this.rootDir, "tsconfig.json")))
23771
+ if (fileExists(path10.join(this.rootDir, "tsconfig.json")))
23120
23772
  return "TypeScript";
23121
- if (fileExists(path9.join(this.rootDir, "package.json")))
23773
+ if (fileExists(path10.join(this.rootDir, "package.json")))
23122
23774
  return "JavaScript";
23123
- if (fileExists(path9.join(this.rootDir, "requirements.txt")))
23775
+ if (fileExists(path10.join(this.rootDir, "requirements.txt")))
23124
23776
  return "Python";
23125
- if (fileExists(path9.join(this.rootDir, "go.mod")))
23777
+ if (fileExists(path10.join(this.rootDir, "go.mod")))
23126
23778
  return "Go";
23127
- if (fileExists(path9.join(this.rootDir, "Cargo.toml")))
23779
+ if (fileExists(path10.join(this.rootDir, "Cargo.toml")))
23128
23780
  return "Rust";
23129
23781
  return "Unknown";
23130
23782
  }
@@ -23204,24 +23856,24 @@ class AgentsMdService {
23204
23856
  }
23205
23857
  // ../hive-core/src/services/dockerSandboxService.ts
23206
23858
  import { existsSync as existsSync5 } from "fs";
23207
- import { join as join10, sep } from "path";
23859
+ import { join as join11, sep } from "path";
23208
23860
  import { execSync } from "child_process";
23209
23861
 
23210
23862
  class DockerSandboxService {
23211
23863
  static detectImage(worktreePath) {
23212
- if (existsSync5(join10(worktreePath, "Dockerfile"))) {
23864
+ if (existsSync5(join11(worktreePath, "Dockerfile"))) {
23213
23865
  return null;
23214
23866
  }
23215
- if (existsSync5(join10(worktreePath, "package.json"))) {
23867
+ if (existsSync5(join11(worktreePath, "package.json"))) {
23216
23868
  return "node:22-slim";
23217
23869
  }
23218
- if (existsSync5(join10(worktreePath, "requirements.txt")) || existsSync5(join10(worktreePath, "pyproject.toml"))) {
23870
+ if (existsSync5(join11(worktreePath, "requirements.txt")) || existsSync5(join11(worktreePath, "pyproject.toml"))) {
23219
23871
  return "python:3.12-slim";
23220
23872
  }
23221
- if (existsSync5(join10(worktreePath, "go.mod"))) {
23873
+ if (existsSync5(join11(worktreePath, "go.mod"))) {
23222
23874
  return "golang:1.22-slim";
23223
23875
  }
23224
- if (existsSync5(join10(worktreePath, "Cargo.toml"))) {
23876
+ if (existsSync5(join11(worktreePath, "Cargo.toml"))) {
23225
23877
  return "rust:1.77-slim";
23226
23878
  }
23227
23879
  return "ubuntu:24.04";
@@ -23405,11 +24057,31 @@ ${spec}
23405
24057
  Before writing code, confirm:
23406
24058
  1. Dependencies are satisfied and required context is present.
23407
24059
  2. The exact files/sections to touch (from references) are identified.
23408
- 3. The first failing test to write is clear (TDD).
24060
+ 3. The verification path is clear: a failing test for new behavior, or the existing coverage to keep green for refactor-only work.
23409
24061
  4. The minimal change needed to reach green is planned.
23410
24062
 
23411
24063
  ---
23412
24064
 
24065
+ ## TDD Protocol (Required)
24066
+
24067
+ 1. **Red**: Write failing test first
24068
+ 2. **Green**: Minimal code to pass
24069
+ 3. **Refactor**: Clean up, keep tests green
24070
+
24071
+ When adding new behavior, write the test before the implementation.
24072
+ When refactoring existing tested code, keep tests green throughout; no new failing test is required.
24073
+
24074
+ ## Debugging Protocol (When stuck)
24075
+
24076
+ 1. **Reproduce**: Get consistent failure
24077
+ 2. **Isolate**: Binary search to find cause
24078
+ 3. **Hypothesize**: Form theory, test it
24079
+ 4. **Fix**: Minimal change that resolves
24080
+
24081
+ After 3 failed attempts at same fix: STOP and report blocker.
24082
+
24083
+ ---
24084
+
23413
24085
  ## Blocker Protocol
23414
24086
 
23415
24087
  If you hit a blocker requiring human decision, **DO NOT** use the question tool directly.
@@ -23444,6 +24116,24 @@ This keeps the user focused on ONE conversation (Hive Master) instead of multipl
23444
24116
 
23445
24117
  ---
23446
24118
 
24119
+ ## Verification Evidence
24120
+
24121
+ Before claiming completion, verify your work with command-first evidence proportional to the change type:
24122
+
24123
+ | Change type | Required verification |
24124
+ |---|---|
24125
+ | New behavior | Run tests covering the new code; record pass/fail counts |
24126
+ | Bug fix | Reproduce the original failure, then confirm the fix |
24127
+ | Refactor | Run existing tests; confirm no regressions |
24128
+ | Prompt / text-only | Run relevant local tests if available; otherwise do file-specific sanity checks such as generation, syntax/parse, or conflict-marker scans |
24129
+
24130
+ **Rules:**
24131
+ - Run the command, then record observed output. Do not substitute explanation for execution.
24132
+ - If a check cannot be run (missing deps, no test runner in worktree), explicitly state "Not run: <reason>" instead of omitting it silently.
24133
+ - command-first means: execute first, interpret second. Never claim a result you have not observed.
24134
+
24135
+ ---
24136
+
23447
24137
  ## Completion Protocol
23448
24138
 
23449
24139
  When your task is **fully complete**:
@@ -23467,10 +24157,10 @@ Optional body"
23467
24157
  - Do not provide message with hive_merge(..., strategy: 'rebase').
23468
24158
 
23469
24159
  Then inspect the tool response fields:
23470
- - If \`ok=true\` and \`terminal=true\`: stop the session
23471
- - Otherwise: **DO NOT STOP**. Follow \`nextAction\`, remediate, and retry \`hive_worktree_commit\`
24160
+ - If \`terminal=true\` (regardless of \`ok\`): stop immediately. This call is final and must not be retried with the same parameters.
24161
+ - If \`terminal=false\`: **DO NOT STOP**. Follow \`nextAction\`, remediate, and retry \`hive_worktree_commit\`
23472
24162
 
23473
- **CRITICAL: Stop only on terminal commit result (ok=true and terminal=true).**
24163
+ **CRITICAL: Any terminal commit result is final for this call.**
23474
24164
  If commit returns non-terminal (for example verification_required), DO NOT STOP.
23475
24165
  Follow result.nextAction, fix the issue, and call hive_worktree_commit again.
23476
24166
 
@@ -23508,26 +24198,6 @@ hive_worktree_commit({
23508
24198
 
23509
24199
  ---
23510
24200
 
23511
- ## TDD Protocol (Required)
23512
-
23513
- 1. **Red**: Write failing test first
23514
- 2. **Green**: Minimal code to pass
23515
- 3. **Refactor**: Clean up, keep tests green
23516
-
23517
- Never write implementation before test exists.
23518
- Exception: Pure refactoring of existing tested code.
23519
-
23520
- ## Debugging Protocol (When stuck)
23521
-
23522
- 1. **Reproduce**: Get consistent failure
23523
- 2. **Isolate**: Binary search to find cause
23524
- 3. **Hypothesize**: Form theory, test it
23525
- 4. **Fix**: Minimal change that resolves
23526
-
23527
- After 3 failed attempts at same fix: STOP and report blocker.
23528
-
23529
- ---
23530
-
23531
24201
  ## Tool Access
23532
24202
 
23533
24203
  **You have access to:**
@@ -23540,8 +24210,8 @@ After 3 failed attempts at same fix: STOP and report blocker.
23540
24210
  **You do NOT have access to (or should not use):**
23541
24211
  - \`question\` - Escalate via blocker protocol instead
23542
24212
  - \`hive_worktree_create\` - No spawning sub-workers
23543
- - \`hive_merge\` - Only Hive Master merges
23544
- - \`task\` - No recursive delegation
24213
+ - \`hive_merge\` - Only Hive/Swarm or delegated \`hive-helper\` merges; ordinary task workers must not merge or handle merge/wrap-up operational flows
24214
+ - \`task\` - No recursive delegation; only Hive/Swarm may delegate \`hive-helper\` for merge/wrap-up operational flows
23545
24215
 
23546
24216
  ---
23547
24217
 
@@ -23555,10 +24225,6 @@ After 3 failed attempts at same fix: STOP and report blocker.
23555
24225
 
23556
24226
  ---
23557
24227
 
23558
- **User Input:** ALWAYS use \`question()\` tool for any user input - NEVER ask questions via plain text. This ensures structured responses.
23559
-
23560
- ---
23561
-
23562
24228
  Begin your task now.
23563
24229
  `;
23564
24230
  }
@@ -23791,17 +24457,17 @@ function applyContextBudget(files, config2 = {}) {
23791
24457
  }
23792
24458
 
23793
24459
  // src/utils/prompt-file.ts
23794
- import * as fs12 from "fs";
23795
- import * as path10 from "path";
24460
+ import * as fs13 from "fs";
24461
+ import * as path11 from "path";
23796
24462
  function writeWorkerPromptFile(feature, task, prompt, hiveDir) {
23797
- const projectRoot = path10.dirname(hiveDir);
24463
+ const projectRoot = path11.dirname(hiveDir);
23798
24464
  const featureDir = resolveFeatureDirectoryName(projectRoot, feature);
23799
- const promptDir = path10.join(hiveDir, "features", featureDir, "tasks", task);
23800
- const promptPath = path10.join(promptDir, "worker-prompt.md");
23801
- if (!fs12.existsSync(promptDir)) {
23802
- fs12.mkdirSync(promptDir, { recursive: true });
24465
+ const promptDir = path11.join(hiveDir, "features", featureDir, "tasks", task);
24466
+ const promptPath = path11.join(promptDir, "worker-prompt.md");
24467
+ if (!fs13.existsSync(promptDir)) {
24468
+ fs13.mkdirSync(promptDir, { recursive: true });
23803
24469
  }
23804
- fs12.writeFileSync(promptPath, prompt, "utf-8");
24470
+ fs13.writeFileSync(promptPath, prompt, "utf-8");
23805
24471
  return promptPath;
23806
24472
  }
23807
24473
 
@@ -23818,6 +24484,7 @@ var BUILT_IN_AGENTS = {
23818
24484
  "swarm-orchestrator": { sessionKind: "primary", baseAgent: "swarm-orchestrator" },
23819
24485
  "forager-worker": { sessionKind: "task-worker", baseAgent: "forager-worker" },
23820
24486
  "scout-researcher": { sessionKind: "subagent", baseAgent: "scout-researcher" },
24487
+ "hive-helper": { sessionKind: "subagent", baseAgent: "hive-helper" },
23821
24488
  "hygienic-reviewer": { sessionKind: "subagent", baseAgent: "hygienic-reviewer" }
23822
24489
  };
23823
24490
  var BASE_AGENT_KIND = {
@@ -23887,70 +24554,37 @@ var HIVE_SYSTEM_PROMPT = `
23887
24554
  Use hive_merge to integrate changes into the current branch.
23888
24555
  `;
23889
24556
 
23890
- // src/utils/compaction-anchor.ts
23891
- var AGENT_ROLE_MAP = {
23892
- "hive-master": "Hive",
23893
- "architect-planner": "Architect",
23894
- "swarm-orchestrator": "Swarm",
23895
- "forager-worker": "Forager",
23896
- "scout-researcher": "Scout",
23897
- "hygienic-reviewer": "Hygienic"
23898
- };
23899
- var BASE_AGENT_ROLE_MAP = {
23900
- "forager-worker": "Forager",
23901
- "hygienic-reviewer": "Hygienic",
23902
- "scout-researcher": "Scout"
23903
- };
23904
- function resolveRole(ctx) {
23905
- if (ctx.agent && AGENT_ROLE_MAP[ctx.agent]) {
23906
- return AGENT_ROLE_MAP[ctx.agent];
23907
- }
23908
- if (ctx.baseAgent && BASE_AGENT_ROLE_MAP[ctx.baseAgent]) {
23909
- return BASE_AGENT_ROLE_MAP[ctx.baseAgent];
23910
- }
23911
- return;
23912
- }
23913
- function resolveWorkerPromptPath(ctx) {
23914
- if (ctx.workerPromptPath) {
23915
- return ctx.workerPromptPath;
23916
- }
23917
- if (ctx.featureName && ctx.taskFolder) {
23918
- return `.hive/features/${ctx.featureName}/tasks/${ctx.taskFolder}/worker-prompt.md`;
23919
- }
23920
- return;
23921
- }
23922
- function buildCompactionReanchor(ctx) {
23923
- const role = resolveRole(ctx);
23924
- const kind = ctx.sessionKind ?? "unknown";
23925
- const workerPromptPath = resolveWorkerPromptPath(ctx);
23926
- const lines = [];
23927
- const context = [];
23928
- lines.push("Compaction recovery — you were compacted mid-session.");
23929
- if (role) {
23930
- lines.push(`Role: ${role}`);
23931
- }
23932
- lines.push("Do not switch roles.");
23933
- lines.push("Do not call status tools to rediscover state.");
23934
- lines.push("Do not re-read the full codebase.");
23935
- if (kind === "task-worker") {
23936
- lines.push("Do not delegate.");
23937
- if (workerPromptPath) {
23938
- lines.push("Re-read worker-prompt.md now to recall your assignment.");
23939
- context.push(workerPromptPath);
23940
- } else {
23941
- lines.push("Re-read worker-prompt.md from the Hive task metadata to recall your assignment.");
23942
- }
23943
- }
23944
- if ((kind === "primary" || kind === "subagent") && ctx.directivePrompt) {
23945
- lines.push("Original directive survives via post-compaction replay.");
24557
+ // src/utils/plugin-manifest.ts
24558
+ import * as path12 from "path";
24559
+ import { fileURLToPath } from "url";
24560
+ var HIVE_COMMANDS = [
24561
+ {
24562
+ key: "hive",
24563
+ name: "/hive",
24564
+ description: "Create a new feature: /hive <feature-name>"
23946
24565
  }
23947
- lines.push("Next action: resume from where you left off.");
23948
- return {
23949
- prompt: lines.join(`
23950
- `),
23951
- context
23952
- };
23953
- }
24566
+ ];
24567
+ var HIVE_TOOL_NAMES = [
24568
+ "hive_feature_create",
24569
+ "hive_feature_complete",
24570
+ "hive_plan_write",
24571
+ "hive_plan_read",
24572
+ "hive_plan_approve",
24573
+ "hive_tasks_sync",
24574
+ "hive_task_create",
24575
+ "hive_task_update",
24576
+ "hive_worktree_start",
24577
+ "hive_worktree_create",
24578
+ "hive_worktree_commit",
24579
+ "hive_worktree_discard",
24580
+ "hive_merge",
24581
+ "hive_context_write",
24582
+ "hive_network_query",
24583
+ "hive_status",
24584
+ "hive_skill",
24585
+ "hive_agents_md"
24586
+ ];
24587
+ var packageRoot = path12.resolve(path12.dirname(fileURLToPath(import.meta.url)), "../..");
23954
24588
 
23955
24589
  // src/index.ts
23956
24590
  function formatSkillsXml(skills) {
@@ -24065,28 +24699,51 @@ No Hive skills available.` : base + formatSkillsXml(filteredSkills);
24065
24699
  }
24066
24700
  var plugin = async (ctx) => {
24067
24701
  const { directory, client, worktree } = ctx;
24702
+ const emitConfigWarning = (message) => {
24703
+ const prefixedMessage = `[hive:config] ${message}`;
24704
+ const maybeClient = client;
24705
+ const notified = typeof maybeClient.notify === "function" && maybeClient.notify({
24706
+ type: "warning",
24707
+ level: "warning",
24708
+ title: "Agent Hive Config Warning",
24709
+ message: prefixedMessage
24710
+ }) || typeof maybeClient.notification?.create === "function" && maybeClient.notification.create({
24711
+ type: "warning",
24712
+ level: "warning",
24713
+ title: "Agent Hive Config Warning",
24714
+ message: prefixedMessage
24715
+ });
24716
+ if (!notified) {
24717
+ console.warn(prefixedMessage);
24718
+ }
24719
+ };
24068
24720
  const featureService = new FeatureService(directory);
24069
24721
  const planService = new PlanService(directory);
24070
24722
  const taskService = new TaskService(directory);
24071
24723
  const contextService = new ContextService(directory);
24724
+ const networkService = new NetworkService(directory);
24072
24725
  const agentsMdService = new AgentsMdService(directory, contextService);
24073
- const configService = new ConfigService;
24726
+ const configService = new ConfigService(directory);
24074
24727
  const sessionService = new SessionService(directory);
24075
24728
  const disabledMcps = configService.getDisabledMcps();
24076
24729
  const disabledSkills = configService.getDisabledSkills();
24730
+ const configFallbackWarning = configService.getLastFallbackWarning()?.message ?? null;
24731
+ if (configFallbackWarning) {
24732
+ emitConfigWarning(configFallbackWarning);
24733
+ }
24077
24734
  const builtinMcps = createBuiltinMcps(disabledMcps);
24078
24735
  const filteredSkills = getFilteredSkills(disabledSkills);
24079
24736
  const effectiveAutoLoadSkills = configService.getAgentConfig("hive-master").autoLoadSkills ?? [];
24080
24737
  const worktreeService = new WorktreeService({
24081
24738
  baseDir: directory,
24082
- hiveDir: path11.join(directory, ".hive")
24739
+ hiveDir: path13.join(directory, ".hive")
24083
24740
  });
24084
24741
  const customAgentConfigsForClassification = getCustomAgentConfigsCompat(configService);
24085
24742
  const runtimeContext = detectContext(worktree || directory);
24086
24743
  const taskWorkerRecovery = runtimeContext.isWorktree && runtimeContext.feature && runtimeContext.task ? {
24087
24744
  featureName: runtimeContext.feature,
24088
24745
  taskFolder: runtimeContext.task,
24089
- workerPromptPath: path11.posix.join(".hive", "features", resolveFeatureDirectoryName(directory, runtimeContext.feature), "tasks", runtimeContext.task, "worker-prompt.md")
24746
+ workerPromptPath: path13.posix.join(".hive", "features", resolveFeatureDirectoryName(directory, runtimeContext.feature), "tasks", runtimeContext.task, "worker-prompt.md")
24090
24747
  } : undefined;
24091
24748
  const isOmoSlimEnabled = () => {
24092
24749
  return configService.isOmoSlimEnabled();
@@ -24134,10 +24791,12 @@ var plugin = async (ctx) => {
24134
24791
  const buildDirectiveReplayText = (session) => {
24135
24792
  if (!session.directivePrompt)
24136
24793
  return null;
24137
- const role = session.agent === "scout-researcher" || session.baseAgent === "scout-researcher" ? "Scout" : session.agent === "hygienic-reviewer" || session.baseAgent === "hygienic-reviewer" ? "Hygienic" : session.agent === "architect-planner" || session.baseAgent === "architect-planner" ? "Architect" : session.agent === "swarm-orchestrator" || session.baseAgent === "swarm-orchestrator" ? "Swarm" : session.agent === "hive-master" || session.baseAgent === "hive-master" ? "Hive" : "current role";
24794
+ const role = session.agent === "scout-researcher" || session.baseAgent === "scout-researcher" ? "Scout" : session.agent === "hive-helper" || session.baseAgent === "hive-helper" ? "Hive Helper" : session.agent === "hygienic-reviewer" || session.baseAgent === "hygienic-reviewer" ? "Hygienic" : session.agent === "architect-planner" || session.baseAgent === "architect-planner" ? "Architect" : session.agent === "swarm-orchestrator" || session.baseAgent === "swarm-orchestrator" ? "Swarm" : session.agent === "hive-master" || session.baseAgent === "hive-master" ? "Hive" : "current role";
24138
24795
  return [
24139
24796
  `Post-compaction recovery: You are still ${role}.`,
24140
24797
  "Resume the original assignment below. Do not replace it with a new goal.",
24798
+ "Do not broaden the scope or re-read the full codebase.",
24799
+ "If the exact next step is not explicit in the original assignment, return control to the parent/orchestrator immediately instead of improvising.",
24141
24800
  "",
24142
24801
  session.directivePrompt
24143
24802
  ].join(`
@@ -24146,6 +24805,24 @@ var plugin = async (ctx) => {
24146
24805
  const shouldUseDirectiveReplay = (session) => {
24147
24806
  return session?.sessionKind === "primary" || session?.sessionKind === "subagent";
24148
24807
  };
24808
+ const getDirectiveReplayCompactionPatch = (session) => {
24809
+ if (!session?.directivePrompt || !shouldUseDirectiveReplay(session)) {
24810
+ return null;
24811
+ }
24812
+ if (session.directiveRecoveryState === "escalated") {
24813
+ return null;
24814
+ }
24815
+ if (session.directiveRecoveryState === "consumed") {
24816
+ return {
24817
+ directiveRecoveryState: "escalated",
24818
+ replayDirectivePending: true
24819
+ };
24820
+ }
24821
+ return {
24822
+ directiveRecoveryState: "available",
24823
+ replayDirectivePending: true
24824
+ };
24825
+ };
24149
24826
  const shouldUseWorkerReplay = (session) => {
24150
24827
  return session?.sessionKind === "task-worker" && !!session.featureName && !!session.taskFolder && !!session.workerPromptPath;
24151
24828
  };
@@ -24162,11 +24839,11 @@ var plugin = async (ctx) => {
24162
24839
  `);
24163
24840
  };
24164
24841
  const checkBlocked = (feature) => {
24165
- const fs14 = __require("fs");
24842
+ const fs15 = __require("fs");
24166
24843
  const featureDir = resolveFeatureDirectoryName(directory, feature);
24167
- const blockedPath = path11.join(directory, ".hive", "features", featureDir, "BLOCKED");
24168
- if (fs14.existsSync(blockedPath)) {
24169
- const reason = fs14.readFileSync(blockedPath, "utf-8").trim();
24844
+ const blockedPath = path13.join(directory, ".hive", "features", featureDir, "BLOCKED");
24845
+ if (fs15.existsSync(blockedPath)) {
24846
+ const reason = fs15.readFileSync(blockedPath, "utf-8").trim();
24170
24847
  return `⛔ BLOCKED by Beekeeper
24171
24848
 
24172
24849
  ${reason || "(No reason provided)"}
@@ -24229,7 +24906,7 @@ To unblock: Remove .hive/features/${featureDir}/BLOCKED`;
24229
24906
  });
24230
24907
  const planResult = planService.read(feature);
24231
24908
  const allTasks = taskService.list(feature);
24232
- const executionContextFiles = typeof contextService.listExecutionContext === "function" ? contextService.listExecutionContext(feature) : contextService.list(feature).filter((f) => f.name !== "overview");
24909
+ const executionContextFiles = contextService.listExecutionContext(feature);
24233
24910
  const rawContextFiles = executionContextFiles.map((f) => ({
24234
24911
  name: f.name,
24235
24912
  content: f.content
@@ -24325,9 +25002,9 @@ To unblock: Remove .hive/features/${featureDir}/BLOCKED`;
24325
25002
  spec: specContent,
24326
25003
  workerPrompt
24327
25004
  });
24328
- const hiveDir = path11.join(directory, ".hive");
25005
+ const hiveDir = path13.join(directory, ".hive");
24329
25006
  const workerPromptPath = writeWorkerPromptFile(feature, task, workerPrompt, hiveDir);
24330
- const relativePromptPath = normalizePath(path11.relative(directory, workerPromptPath));
25007
+ const relativePromptPath = normalizePath(path13.relative(directory, workerPromptPath));
24331
25008
  const PREVIEW_MAX_LENGTH = 200;
24332
25009
  const workerPromptPreview = workerPrompt.length > PREVIEW_MAX_LENGTH ? workerPrompt.slice(0, PREVIEW_MAX_LENGTH) + "..." : workerPrompt;
24333
25010
  const taskToolPrompt = `Follow instructions in @${relativePromptPath}`;
@@ -24577,9 +25254,13 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24577
25254
  return respond({
24578
25255
  success: false,
24579
25256
  terminal: true,
25257
+ reason: "task_not_blocked",
25258
+ canRetry: false,
25259
+ retryReason: `Task is in ${taskInfo.status} state. Run hive_status() and follow the current status flow instead of blocked resume.`,
24580
25260
  error: `continueFrom: 'blocked' was specified but task "${task}" is not in blocked state (current status: ${taskInfo.status}).`,
24581
25261
  currentStatus: taskInfo.status,
24582
25262
  hints: [
25263
+ "This blocked-resume call cannot be retried with the same parameters.",
24583
25264
  "Use hive_worktree_start({ feature, task }) for normal starts or re-dispatch.",
24584
25265
  "Use hive_status to verify the current task status before retrying."
24585
25266
  ]
@@ -24614,8 +25295,9 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24614
25295
  }
24615
25296
  const sessionID = input.event.properties.sessionID;
24616
25297
  const existing = sessionService.getGlobal(sessionID);
24617
- if (existing?.directivePrompt && shouldUseDirectiveReplay(existing)) {
24618
- sessionService.trackGlobal(sessionID, { replayDirectivePending: true });
25298
+ const directiveReplayPatch = getDirectiveReplayCompactionPatch(existing);
25299
+ if (directiveReplayPatch) {
25300
+ sessionService.trackGlobal(sessionID, directiveReplayPatch);
24619
25301
  return;
24620
25302
  }
24621
25303
  if (shouldUseWorkerReplay(existing)) {
@@ -24623,59 +25305,6 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24623
25305
  return;
24624
25306
  }
24625
25307
  },
24626
- "experimental.chat.system.transform": async (input, output) => {
24627
- if (!shouldExecuteHook("experimental.chat.system.transform", configService, turnCounters)) {
24628
- return;
24629
- }
24630
- output.system.push(HIVE_SYSTEM_PROMPT);
24631
- const activeFeature = resolveFeature();
24632
- if (activeFeature) {
24633
- const info = featureService.getInfo(activeFeature);
24634
- if (info) {
24635
- const featureInfo = info;
24636
- let statusHint = `
24637
- ### Current Hive Status
24638
- `;
24639
- statusHint += `**Active Feature**: ${info.name} (${info.status})
24640
- `;
24641
- statusHint += `**Progress**: ${info.tasks.filter((t) => t.status === "done").length}/${info.tasks.length} tasks
24642
- `;
24643
- if (featureInfo.hasOverview) {
24644
- statusHint += `**Overview**: available at .hive/features/${resolveFeatureDirectoryName(directory, info.name)}/context/overview.md (primary human-facing doc)
24645
- `;
24646
- } else if (info.hasPlan) {
24647
- statusHint += `**Overview**: missing - write it with hive_context_write({ name: "overview", content })
24648
- `;
24649
- }
24650
- if (info.commentCount > 0) {
24651
- statusHint += `**Comments**: ${info.commentCount} unresolved (plan: ${featureInfo.reviewCounts?.plan ?? 0}, overview: ${featureInfo.reviewCounts?.overview ?? 0})
24652
- `;
24653
- }
24654
- output.system.push(statusHint);
24655
- }
24656
- }
24657
- },
24658
- "experimental.session.compacting": async (_input, output) => {
24659
- const session = sessionService.getGlobal(_input.sessionID);
24660
- if (session) {
24661
- const ctx2 = {
24662
- agent: session.agent,
24663
- baseAgent: session.baseAgent,
24664
- sessionKind: session.sessionKind,
24665
- featureName: session.featureName,
24666
- taskFolder: session.taskFolder,
24667
- workerPromptPath: session.workerPromptPath,
24668
- directivePrompt: session.directivePrompt
24669
- };
24670
- const reanchor = buildCompactionReanchor(ctx2);
24671
- output.prompt = reanchor.prompt;
24672
- output.context.push(...reanchor.context);
24673
- } else {
24674
- const reanchor = buildCompactionReanchor({});
24675
- output.prompt = reanchor.prompt;
24676
- output.context.push(...reanchor.context);
24677
- }
24678
- },
24679
25308
  "chat.message": createVariantHook(configService, sessionService, customAgentConfigsForClassification, taskWorkerRecovery),
24680
25309
  "experimental.chat.messages.transform": async (_input, output) => {
24681
25310
  if (!Array.isArray(output.messages) || output.messages.length === 0) {
@@ -24695,7 +25324,11 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24695
25324
  `);
24696
25325
  const existingDirective = session?.directivePrompt;
24697
25326
  if (directiveText && directiveText !== existingDirective && shouldUseDirectiveReplay(session ?? { sessionKind: "subagent" })) {
24698
- sessionService.trackGlobal(sessionID, { directivePrompt: directiveText });
25327
+ sessionService.trackGlobal(sessionID, {
25328
+ directivePrompt: directiveText,
25329
+ directiveRecoveryState: undefined,
25330
+ replayDirectivePending: false
25331
+ });
24699
25332
  }
24700
25333
  }
24701
25334
  const refreshed = sessionService.getGlobal(sessionID);
@@ -24758,7 +25391,10 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24758
25391
  }
24759
25392
  ]
24760
25393
  });
24761
- sessionService.trackGlobal(sessionID, { replayDirectivePending: false });
25394
+ sessionService.trackGlobal(sessionID, {
25395
+ replayDirectivePending: false,
25396
+ directiveRecoveryState: refreshed.directiveRecoveryState === "available" ? "consumed" : refreshed.directiveRecoveryState
25397
+ });
24762
25398
  },
24763
25399
  "tool.execute.before": async (input, output) => {
24764
25400
  if (!shouldExecuteHook("tool.execute.before", configService, turnCounters, { safetyCritical: true })) {
@@ -24781,7 +25417,7 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24781
25417
  const workdir = output.args?.workdir;
24782
25418
  if (!workdir)
24783
25419
  return;
24784
- const hiveWorktreeBase = path11.join(directory, ".hive", ".worktrees");
25420
+ const hiveWorktreeBase = path13.join(directory, ".hive", ".worktrees");
24785
25421
  if (!workdir.startsWith(hiveWorktreeBase))
24786
25422
  return;
24787
25423
  const wrapped = DockerSandboxService.wrapCommand(workdir, command, sandboxConfig);
@@ -24949,17 +25585,17 @@ Expand your Discovery section and try again.`;
24949
25585
  }
24950
25586
  }),
24951
25587
  hive_task_create: tool({
24952
- description: "Create manual task (not from plan). Manual tasks always have explicit dependsOn (default: []). Provide structured metadata for useful spec.md and worker prompt.",
25588
+ description: "Create append-only manual task (not from plan). Omit order to use the next slot. Explicit dependsOn defaults to [] and is only allowed when every dependency already exists and is done. Provide structured metadata for useful spec.md and worker prompt.",
24953
25589
  args: {
24954
25590
  name: tool.schema.string().describe("Task name"),
24955
- order: tool.schema.number().optional().describe("Task order"),
25591
+ order: tool.schema.number().optional().describe("Task order. Omit to use the next append-only slot; explicit order must equal that next slot."),
24956
25592
  feature: tool.schema.string().optional().describe("Feature name (defaults to detection or single feature)"),
24957
25593
  description: tool.schema.string().optional().describe("What the worker needs to achieve"),
24958
25594
  goal: tool.schema.string().optional().describe("Why this task exists and what done means"),
24959
25595
  acceptanceCriteria: tool.schema.array(tool.schema.string()).optional().describe("Specific observable outcomes"),
24960
25596
  references: tool.schema.array(tool.schema.string()).optional().describe("File paths or line ranges relevant to this task"),
24961
25597
  files: tool.schema.array(tool.schema.string()).optional().describe("Files likely to be modified"),
24962
- dependsOn: tool.schema.array(tool.schema.string()).optional().describe("Task folder names this task depends on (default: [] for no dependencies)"),
25598
+ dependsOn: tool.schema.array(tool.schema.string()).optional().describe("Task folder names this task depends on (default: [] for no dependencies). Explicit dependsOn is allowed only when every dependency already exists and is done; review-sourced tasks must omit it."),
24963
25599
  reason: tool.schema.string().optional().describe("Why this task was created"),
24964
25600
  source: tool.schema.string().optional().describe("Origin: review, operator, or ad_hoc")
24965
25601
  },
@@ -25089,7 +25725,7 @@ Reminder: start work with hive_worktree_start to use its worktree, and ensure an
25089
25725
  });
25090
25726
  }
25091
25727
  const featureDir = resolveFeatureDirectoryName(directory, feature);
25092
- const workerPromptPath = path11.posix.join(".hive", "features", featureDir, "tasks", task, "worker-prompt.md");
25728
+ const workerPromptPath = path13.posix.join(".hive", "features", featureDir, "tasks", task, "worker-prompt.md");
25093
25729
  bindFeatureSession(feature, toolContext, { taskFolder: task, workerPromptPath });
25094
25730
  let verificationNote;
25095
25731
  if (status === "completed") {
@@ -25221,37 +25857,49 @@ Reminder: start work with hive_worktree_start to use its worktree, and ensure an
25221
25857
  task: tool.schema.string().describe("Task folder name to merge"),
25222
25858
  strategy: tool.schema.enum(["merge", "squash", "rebase"]).optional().describe("Merge strategy (default: merge)"),
25223
25859
  message: tool.schema.string().optional().describe("Optional merge message for merge/squash. Empty uses default."),
25860
+ preserveConflicts: tool.schema.boolean().optional().describe("Keep merge conflict state intact instead of auto-aborting (default: false)."),
25861
+ cleanup: tool.schema.enum(["none", "worktree", "worktree+branch"]).optional().describe("Cleanup mode after a successful merge (default: none)."),
25224
25862
  feature: tool.schema.string().optional().describe("Feature name (defaults to active)")
25225
25863
  },
25226
- async execute({ task, strategy = "merge", message, feature: explicitFeature }) {
25864
+ async execute({ task, strategy = "merge", message, preserveConflicts, cleanup, feature: explicitFeature }) {
25865
+ const failure = (error45) => respond({
25866
+ success: false,
25867
+ merged: false,
25868
+ strategy,
25869
+ filesChanged: [],
25870
+ conflicts: [],
25871
+ conflictState: "none",
25872
+ cleanup: {
25873
+ worktreeRemoved: false,
25874
+ branchDeleted: false,
25875
+ pruned: false
25876
+ },
25877
+ error: error45,
25878
+ message: `Merge failed: ${error45}`
25879
+ });
25227
25880
  const feature = resolveFeature(explicitFeature);
25228
25881
  if (!feature)
25229
- return "Error: No feature specified. Create a feature or provide feature param.";
25882
+ return failure("No feature specified. Create a feature or provide feature param.");
25230
25883
  const taskInfo = taskService.get(feature, task);
25231
25884
  if (!taskInfo)
25232
- return `Error: Task "${task}" not found`;
25885
+ return failure(`Task "${task}" not found`);
25233
25886
  if (taskInfo.status !== "done")
25234
- return "Error: Task must be completed before merging. Use hive_worktree_commit first.";
25235
- const result = await worktreeService.merge(feature, task, strategy, message);
25236
- if (!result.success) {
25237
- if (result.conflicts && result.conflicts.length > 0) {
25238
- return `Merge failed with conflicts in:
25239
- ${result.conflicts.map((f) => `- ${f}`).join(`
25240
- `)}
25241
-
25242
- Resolve conflicts manually or try a different strategy.`;
25243
- }
25244
- return `Merge failed: ${result.error}`;
25245
- }
25246
- return `Task "${task}" merged successfully using ${strategy} strategy.
25247
- Commit: ${result.sha}
25248
- Files changed: ${result.filesChanged?.length || 0}`;
25887
+ return failure("Task must be completed before merging. Use hive_worktree_commit first.");
25888
+ const result = await worktreeService.merge(feature, task, strategy, message, {
25889
+ preserveConflicts,
25890
+ cleanup
25891
+ });
25892
+ const responseMessage = result.success ? `Task "${task}" merged successfully using ${strategy} strategy.` : `Merge failed: ${result.error}`;
25893
+ return respond({
25894
+ ...result,
25895
+ message: responseMessage
25896
+ });
25249
25897
  }
25250
25898
  }),
25251
25899
  hive_context_write: tool({
25252
- description: "Write a context file for the feature. Context files store persistent notes, decisions, and reference material.",
25900
+ description: "Write a context file for the feature. System-known names: overview = human-facing summary/history, draft = planner scratchpad, execution-decisions = orchestration log; all other names stay durable free-form context.",
25253
25901
  args: {
25254
- name: tool.schema.string().describe('Context file name (e.g., "decisions", "architecture", "notes")'),
25902
+ name: tool.schema.string().describe('Context file name (e.g., "overview", "draft", "execution-decisions", "learnings"). overview is the human-facing summary/history file, draft is planner scratchpad, execution-decisions is the orchestration log; other names remain durable free-form context.'),
25255
25903
  content: tool.schema.string().describe("Markdown content to write"),
25256
25904
  feature: tool.schema.string().optional().describe("Feature name (defaults to active)")
25257
25905
  },
@@ -25261,7 +25909,29 @@ Files changed: ${result.filesChanged?.length || 0}`;
25261
25909
  return "Error: No feature specified. Create a feature or provide feature param.";
25262
25910
  bindFeatureSession(feature, toolContext);
25263
25911
  const filePath = contextService.write(feature, name, content);
25264
- return `Context file written: ${filePath}`;
25912
+ return `Context file written: ${filePath}. Known names: overview = human-facing summary/history, draft = planner scratchpad, execution-decisions = orchestration log; all other context names remain durable free-form notes.`;
25913
+ }
25914
+ }),
25915
+ hive_network_query: tool({
25916
+ description: "Query prior features for deterministic plan/context snippets. Returns JSON with query, currentFeature, and snippet results only. Callers must opt in to using the returned snippets.",
25917
+ args: {
25918
+ feature: tool.schema.string().optional().describe("Current feature to exclude from results. Defaults to active feature when available."),
25919
+ query: tool.schema.string().describe("Case-insensitive substring query over plan.md and network-safe context")
25920
+ },
25921
+ async execute({ feature: explicitFeature, query }) {
25922
+ const currentFeature = resolveFeature(explicitFeature) ?? null;
25923
+ const results = networkService.query({
25924
+ currentFeature: currentFeature ?? undefined,
25925
+ query,
25926
+ maxFeatures: 10,
25927
+ maxSnippetsPerFeature: 3,
25928
+ maxSnippetChars: 240
25929
+ });
25930
+ return respond({
25931
+ query,
25932
+ currentFeature,
25933
+ results
25934
+ });
25265
25935
  }
25266
25936
  }),
25267
25937
  hive_status: tool({
@@ -25306,23 +25976,23 @@ Files changed: ${result.filesChanged?.length || 0}`;
25306
25976
  }
25307
25977
  const plan = planService.read(feature);
25308
25978
  const tasks = taskService.list(feature);
25309
- const contextFiles = contextService.list(feature);
25310
- const overview = contextFiles.find((file2) => file2.name === "overview") ?? null;
25979
+ const featureContextFiles = contextService.list(feature);
25980
+ const overview = contextService.getOverview(feature);
25311
25981
  const readThreads = (filePath) => {
25312
- if (!fs13.existsSync(filePath)) {
25982
+ if (!fs14.existsSync(filePath)) {
25313
25983
  return null;
25314
25984
  }
25315
25985
  try {
25316
- const data = JSON.parse(fs13.readFileSync(filePath, "utf-8"));
25986
+ const data = JSON.parse(fs14.readFileSync(filePath, "utf-8"));
25317
25987
  return data.threads ?? [];
25318
25988
  } catch {
25319
25989
  return [];
25320
25990
  }
25321
25991
  };
25322
- const featurePath = path11.join(directory, ".hive", "features", resolveFeatureDirectoryName(directory, feature));
25323
- const reviewDir = path11.join(featurePath, "comments");
25324
- const planThreads = readThreads(path11.join(reviewDir, "plan.json")) ?? readThreads(path11.join(featurePath, "comments.json"));
25325
- const overviewThreads = readThreads(path11.join(reviewDir, "overview.json"));
25992
+ const featurePath = path13.join(directory, ".hive", "features", resolveFeatureDirectoryName(directory, feature));
25993
+ const reviewDir = path13.join(featurePath, "comments");
25994
+ const planThreads = readThreads(path13.join(reviewDir, "plan.json")) ?? readThreads(path13.join(featurePath, "comments.json"));
25995
+ const overviewThreads = readThreads(path13.join(reviewDir, "overview.json"));
25326
25996
  const reviewCounts = {
25327
25997
  plan: planThreads?.length ?? 0,
25328
25998
  overview: overviewThreads?.length ?? 0
@@ -25343,14 +26013,21 @@ Files changed: ${result.filesChanged?.length || 0}`;
25343
26013
  } : null
25344
26014
  };
25345
26015
  }));
25346
- const contextSummary = contextFiles.map((c) => ({
26016
+ const contextSummary = featureContextFiles.map((c) => ({
25347
26017
  name: c.name,
25348
26018
  chars: c.content.length,
25349
- updatedAt: c.updatedAt
26019
+ updatedAt: c.updatedAt,
26020
+ role: c.role,
26021
+ includeInExecution: c.includeInExecution,
26022
+ includeInAgentsMdSync: c.includeInAgentsMdSync,
26023
+ includeInNetwork: c.includeInNetwork
25350
26024
  }));
25351
26025
  const pendingTasks = tasksSummary.filter((t) => t.status === "pending");
25352
26026
  const inProgressTasks = tasksSummary.filter((t) => t.status === "in_progress");
25353
26027
  const doneTasks = tasksSummary.filter((t) => t.status === "done");
26028
+ const doneTasksWithLiveWorktrees = tasksSummary.filter((t) => t.status === "done" && t.worktree).map((t) => t.folder);
26029
+ const dirtyWorktrees = tasksSummary.filter((t) => t.worktree && t.worktree.hasChanges === true).map((t) => t.folder);
26030
+ const nonInProgressTasksWithWorktrees = tasksSummary.filter((t) => t.status !== "in_progress" && t.worktree).map((t) => t.folder);
25354
26031
  const tasksWithDeps = tasksSummary.map((t) => ({
25355
26032
  folder: t.folder,
25356
26033
  status: t.status,
@@ -25362,12 +26039,25 @@ Files changed: ${result.filesChanged?.length || 0}`;
25362
26039
  dependsOn: effectiveDeps.get(task.folder)
25363
26040
  }));
25364
26041
  const { runnable, blocked: blockedBy } = computeRunnableAndBlocked(normalizedTasks);
26042
+ const ambiguityFlags = [];
26043
+ if (doneTasksWithLiveWorktrees.length > 0) {
26044
+ ambiguityFlags.push("done_task_has_live_worktree");
26045
+ }
26046
+ if (dirtyWorktrees.some((folder) => nonInProgressTasksWithWorktrees.includes(folder))) {
26047
+ ambiguityFlags.push("dirty_non_in_progress_worktree");
26048
+ }
26049
+ if (runnable.length > 1) {
26050
+ ambiguityFlags.push("multiple_runnable_tasks");
26051
+ }
26052
+ if (pendingTasks.length > 0 && runnable.length === 0) {
26053
+ ambiguityFlags.push("pending_tasks_blocked");
26054
+ }
25365
26055
  const getNextAction = (planStatus2, tasks2, runnableTasks, hasPlan, hasOverview) => {
25366
26056
  if (planStatus2 === "review") {
25367
26057
  return "Wait for plan approval or revise based on comments";
25368
26058
  }
25369
26059
  if (!hasPlan || planStatus2 === "draft") {
25370
- return "Write or revise plan with hive_plan_write. Keep plan.md as the human-facing review artifact; pre-task Mermaid overview diagrams are optional.";
26060
+ return "Write or revise plan with hive_plan_write. Refresh context/overview.md first for human review; plan.md remains execution truth and pre-task Mermaid overview diagrams are optional.";
25371
26061
  }
25372
26062
  if (tasks2.length === 0) {
25373
26063
  return "Generate tasks from plan with hive_tasks_sync";
@@ -25422,10 +26112,31 @@ Files changed: ${result.filesChanged?.length || 0}`;
25422
26112
  runnable,
25423
26113
  blockedBy
25424
26114
  },
26115
+ helperStatus: {
26116
+ doneTasksWithLiveWorktrees,
26117
+ dirtyWorktrees,
26118
+ nonInProgressTasksWithWorktrees,
26119
+ manualTaskPolicy: {
26120
+ order: {
26121
+ omitted: "append_next_order",
26122
+ explicitNextOrder: "append_next_order",
26123
+ explicitOtherOrder: "plan_amendment_required"
26124
+ },
26125
+ dependsOn: {
26126
+ omitted: "store_empty_array",
26127
+ explicitDoneTargetsOnly: "allowed",
26128
+ explicitMissingTarget: "plan_amendment_required",
26129
+ explicitNotDoneTarget: "plan_amendment_required",
26130
+ reviewSourceWithExplicitDependsOn: "plan_amendment_required"
26131
+ }
26132
+ },
26133
+ ambiguityFlags
26134
+ },
25425
26135
  context: {
25426
- fileCount: contextFiles.length,
26136
+ fileCount: featureContextFiles.length,
25427
26137
  files: contextSummary
25428
26138
  },
26139
+ warning: configFallbackWarning ?? undefined,
25429
26140
  nextAction: getNextAction(planStatus, tasksSummary, runnable, !!plan, !!overview)
25430
26141
  });
25431
26142
  }
@@ -25473,8 +26184,8 @@ ${result.diff}
25473
26184
  })
25474
26185
  },
25475
26186
  command: {
25476
- hive: {
25477
- description: "Create a new feature: /hive <feature-name>",
26187
+ [HIVE_COMMANDS[0].key]: {
26188
+ description: HIVE_COMMANDS[0].description,
25478
26189
  async run(args) {
25479
26190
  const name = args.trim();
25480
26191
  if (!name)
@@ -25485,27 +26196,8 @@ ${result.diff}
25485
26196
  },
25486
26197
  config: async (opencodeConfig) => {
25487
26198
  function agentTools(allowed) {
25488
- const allHiveTools = [
25489
- "hive_feature_create",
25490
- "hive_feature_complete",
25491
- "hive_plan_write",
25492
- "hive_plan_read",
25493
- "hive_plan_approve",
25494
- "hive_tasks_sync",
25495
- "hive_task_create",
25496
- "hive_task_update",
25497
- "hive_worktree_start",
25498
- "hive_worktree_create",
25499
- "hive_worktree_commit",
25500
- "hive_worktree_discard",
25501
- "hive_merge",
25502
- "hive_context_write",
25503
- "hive_status",
25504
- "hive_skill",
25505
- "hive_agents_md"
25506
- ];
25507
26199
  const result = {};
25508
- for (const tool3 of allHiveTools) {
26200
+ for (const tool3 of HIVE_TOOL_NAMES) {
25509
26201
  if (!allowed.includes(tool3)) {
25510
26202
  result[tool3] = false;
25511
26203
  }
@@ -25528,7 +26220,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25528
26220
  variant: hiveUserConfig.variant,
25529
26221
  temperature: hiveUserConfig.temperature ?? 0.5,
25530
26222
  description: "Hive (Hybrid) - Plans + orchestrates. Detects phase, loads skills on-demand.",
25531
- prompt: QUEEN_BEE_PROMPT + hiveAutoLoadedSkills + (agentMode === "unified" ? customSubagentAppendix : ""),
26223
+ prompt: QUEEN_BEE_PROMPT + HIVE_SYSTEM_PROMPT + hiveAutoLoadedSkills + (agentMode === "unified" ? customSubagentAppendix : ""),
25532
26224
  permission: {
25533
26225
  question: "allow",
25534
26226
  skill: "allow",
@@ -25543,8 +26235,8 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25543
26235
  variant: architectUserConfig.variant,
25544
26236
  temperature: architectUserConfig.temperature ?? 0.7,
25545
26237
  description: "Architect (Planner) - Plans features, interviews, writes plans. NEVER executes.",
25546
- prompt: ARCHITECT_BEE_PROMPT + architectAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
25547
- tools: agentTools(["hive_feature_create", "hive_plan_write", "hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
26238
+ prompt: ARCHITECT_BEE_PROMPT + HIVE_SYSTEM_PROMPT + architectAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
26239
+ tools: agentTools(["hive_feature_create", "hive_plan_write", "hive_plan_read", "hive_context_write", "hive_network_query", "hive_status", "hive_skill"]),
25548
26240
  permission: {
25549
26241
  edit: "deny",
25550
26242
  task: "allow",
@@ -25562,7 +26254,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25562
26254
  variant: swarmUserConfig.variant,
25563
26255
  temperature: swarmUserConfig.temperature ?? 0.5,
25564
26256
  description: "Swarm (Orchestrator) - Orchestrates execution. Delegates, spawns workers, verifies, merges.",
25565
- prompt: SWARM_BEE_PROMPT + swarmAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
26257
+ prompt: SWARM_BEE_PROMPT + HIVE_SYSTEM_PROMPT + swarmAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
25566
26258
  tools: agentTools([
25567
26259
  "hive_feature_create",
25568
26260
  "hive_feature_complete",
@@ -25576,6 +26268,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25576
26268
  "hive_worktree_discard",
25577
26269
  "hive_merge",
25578
26270
  "hive_context_write",
26271
+ "hive_network_query",
25579
26272
  "hive_status",
25580
26273
  "hive_skill",
25581
26274
  "hive_agents_md"
@@ -25595,7 +26288,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25595
26288
  temperature: scoutUserConfig.temperature ?? 0.5,
25596
26289
  mode: "subagent",
25597
26290
  description: "Scout (Explorer/Researcher/Retrieval) - Researches codebase + external docs/data.",
25598
- prompt: SCOUT_BEE_PROMPT + scoutAutoLoadedSkills,
26291
+ prompt: SCOUT_BEE_PROMPT + HIVE_SYSTEM_PROMPT + scoutAutoLoadedSkills,
25599
26292
  tools: agentTools(["hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
25600
26293
  permission: {
25601
26294
  edit: "deny",
@@ -25613,7 +26306,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25613
26306
  temperature: foragerUserConfig.temperature ?? 0.3,
25614
26307
  mode: "subagent",
25615
26308
  description: "Forager (Worker/Coder) - Executes tasks directly in isolated worktrees. Never delegates.",
25616
- prompt: FORAGER_BEE_PROMPT + foragerAutoLoadedSkills,
26309
+ prompt: FORAGER_BEE_PROMPT + HIVE_SYSTEM_PROMPT + foragerAutoLoadedSkills,
25617
26310
  tools: agentTools(["hive_plan_read", "hive_worktree_commit", "hive_context_write", "hive_skill"]),
25618
26311
  permission: {
25619
26312
  task: "deny",
@@ -25621,6 +26314,21 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25621
26314
  skill: "allow"
25622
26315
  }
25623
26316
  };
26317
+ const hiveHelperUserConfig = configService.getAgentConfig("hive-helper");
26318
+ const hiveHelperConfig = {
26319
+ model: hiveHelperUserConfig.model,
26320
+ variant: hiveHelperUserConfig.variant,
26321
+ temperature: hiveHelperUserConfig.temperature ?? 0.3,
26322
+ mode: "subagent",
26323
+ description: "Hive Helper - Runtime-only bounded hard-task operational assistant for merge recovery, state clarification, and safe manual follow-up assistance.",
26324
+ prompt: HIVE_HELPER_PROMPT + HIVE_SYSTEM_PROMPT,
26325
+ tools: agentTools(["hive_merge", "hive_status", "hive_context_write", "hive_task_create", "hive_skill"]),
26326
+ permission: {
26327
+ task: "deny",
26328
+ delegate: "deny",
26329
+ skill: "allow"
26330
+ }
26331
+ };
25624
26332
  const hygienicUserConfig = configService.getAgentConfig("hygienic-reviewer");
25625
26333
  const hygienicAutoLoadedSkills = await buildAutoLoadedSkillsContent("hygienic-reviewer", configService, directory);
25626
26334
  const hygienicConfig = {
@@ -25629,8 +26337,8 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25629
26337
  temperature: hygienicUserConfig.temperature ?? 0.3,
25630
26338
  mode: "subagent",
25631
26339
  description: "Hygienic (Consultant/Reviewer/Debugger) - Reviews plan documentation quality. OKAY/REJECT verdict.",
25632
- prompt: HYGIENIC_BEE_PROMPT + hygienicAutoLoadedSkills,
25633
- tools: agentTools(["hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
26340
+ prompt: HYGIENIC_BEE_PROMPT + HIVE_SYSTEM_PROMPT + hygienicAutoLoadedSkills,
26341
+ tools: agentTools(["hive_plan_read", "hive_context_write", "hive_network_query", "hive_status", "hive_skill"]),
25634
26342
  permission: {
25635
26343
  edit: "deny",
25636
26344
  task: "deny",
@@ -25644,6 +26352,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25644
26352
  "swarm-orchestrator": swarmConfig,
25645
26353
  "scout-researcher": scoutConfig,
25646
26354
  "forager-worker": foragerConfig,
26355
+ "hive-helper": hiveHelperConfig,
25647
26356
  "hygienic-reviewer": hygienicConfig
25648
26357
  };
25649
26358
  const customAutoLoadedSkills = Object.fromEntries(await Promise.all(Object.entries(customAgentConfigs).map(async ([customAgentName, customAgentConfig]) => {
@@ -25667,12 +26376,14 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25667
26376
  allAgents["hive-master"] = builtInAgentConfigs["hive-master"];
25668
26377
  allAgents["scout-researcher"] = builtInAgentConfigs["scout-researcher"];
25669
26378
  allAgents["forager-worker"] = builtInAgentConfigs["forager-worker"];
26379
+ allAgents["hive-helper"] = builtInAgentConfigs["hive-helper"];
25670
26380
  allAgents["hygienic-reviewer"] = builtInAgentConfigs["hygienic-reviewer"];
25671
26381
  } else {
25672
26382
  allAgents["architect-planner"] = builtInAgentConfigs["architect-planner"];
25673
26383
  allAgents["swarm-orchestrator"] = builtInAgentConfigs["swarm-orchestrator"];
25674
26384
  allAgents["scout-researcher"] = builtInAgentConfigs["scout-researcher"];
25675
26385
  allAgents["forager-worker"] = builtInAgentConfigs["forager-worker"];
26386
+ allAgents["hive-helper"] = builtInAgentConfigs["hive-helper"];
25676
26387
  allAgents["hygienic-reviewer"] = builtInAgentConfigs["hygienic-reviewer"];
25677
26388
  }
25678
26389
  Object.assign(allAgents, customSubagents);
@@ -25692,6 +26403,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25692
26403
  delete configAgent["swarm-orchestrator"];
25693
26404
  delete configAgent["scout-researcher"];
25694
26405
  delete configAgent["forager-worker"];
26406
+ delete configAgent["hive-helper"];
25695
26407
  delete configAgent["hygienic-reviewer"];
25696
26408
  Object.assign(configAgent, allAgents);
25697
26409
  }