opencode-hive 1.3.5 → 1.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -871,8 +871,8 @@ var require_dist2 = __commonJS((exports) => {
871
871
  });
872
872
 
873
873
  // src/index.ts
874
- import * as path11 from "path";
875
- import * as fs13 from "fs";
874
+ import * as path13 from "path";
875
+ import * as fs14 from "fs";
876
876
  import * as os from "os";
877
877
 
878
878
  // ../../node_modules/zod/v4/classic/external.js
@@ -13196,7 +13196,7 @@ function tool(input) {
13196
13196
  }
13197
13197
  tool.schema = exports_external;
13198
13198
  // src/skills/registry.generated.ts
13199
- var BUILTIN_SKILL_NAMES = ["agents-md-mastery", "brainstorming", "code-reviewer", "dispatching-parallel-agents", "docker-mastery", "executing-plans", "parallel-exploration", "systematic-debugging", "test-driven-development", "verification-before-completion", "writing-plans"];
13199
+ var BUILTIN_SKILL_NAMES = ["agents-md-mastery", "brainstorming", "code-reviewer", "dispatching-parallel-agents", "docker-mastery", "executing-plans", "parallel-exploration", "systematic-debugging", "test-driven-development", "verification-before-completion", "verification-reviewer", "writing-plans"];
13200
13200
  var BUILTIN_SKILLS = [
13201
13201
  {
13202
13202
  name: "agents-md-mastery",
@@ -14195,7 +14195,9 @@ Hive detects runtime from project files:
14195
14195
  - \`Dockerfile\` → Builds from project Dockerfile
14196
14196
  - Fallback → \`ubuntu:24.04\`
14197
14197
 
14198
- **Override:** Set \`dockerImage\` in config (\`~/.config/opencode/agent_hive.json\`).
14198
+ **Override:** Set \`dockerImage\` in config (\`<project>/.hive/agent-hive.json\` preferred, legacy \`<project>/.opencode/agent_hive.json\`, \`~/.config/opencode/agent_hive.json\` fallback).
14199
+
14200
+ If project config is missing, invalid JSON, or invalid shape, Hive reads global config next and then falls back to defaults, surfacing a runtime warning when the project config is invalid.
14199
14201
 
14200
14202
  ## Red Flags - STOP
14201
14203
 
@@ -14303,12 +14305,22 @@ When batch complete:
14303
14305
  ### Step 4.5: Post-Batch Hygienic Review
14304
14306
 
14305
14307
  After the batch report, ask the operator if they want a Hygienic code review for the batch.
14306
- If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\` and apply feedback before starting the next batch.
14308
+ If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\`.
14309
+ Route review feedback through this decision tree before continuing:
14310
+
14311
+ | Feedback type | Action |
14312
+ |---------------|--------|
14313
+ | Minor / local to the completed batch | **Inline fix** — apply directly, no new task |
14314
+ | New isolated work that does not affect downstream sequencing | **Manual task** — \`hive_task_create()\` for non-blocking ad-hoc work |
14315
+ | Changes downstream sequencing, dependencies, or scope | **Plan amendment** — update \`plan.md\`, then \`hive_tasks_sync({ refreshPending: true })\` to rewrite pending tasks from the amended plan |
14316
+
14317
+ When amending the plan: append new task numbers at the end (do not renumber), update \`Depends on:\` entries to express the new DAG order, then sync.
14307
14318
 
14308
14319
  ### Step 5: Continue
14309
- Based on feedback:
14310
- - Apply changes if needed
14311
- - Execute next batch
14320
+ After applying review feedback (or if none):
14321
+ - Re-check \`hive_status()\` for the updated **runnable** set — tasks whose dependencies are all satisfied
14322
+ - Tasks blocked by unmet dependencies stay blocked until predecessors complete
14323
+ - Execute the next batch of runnable tasks
14312
14324
  - Repeat until complete
14313
14325
 
14314
14326
  ### Step 6: Complete Development
@@ -14353,7 +14365,7 @@ After all tasks complete and verified:
14353
14365
 
14354
14366
  When you need to answer "where/how does X work?" across multiple domains (codebase, tests, docs, OSS), investigating sequentially wastes time. Each investigation is independent and can happen in parallel.
14355
14367
 
14356
- **Core principle:** Decompose into independent sub-questions, spawn one task per sub-question, collect results asynchronously.
14368
+ **Core principle:** Decompose into independent sub-questions that fit in one context window, spawn one task per sub-question, then synthesize the bounded results.
14357
14369
 
14358
14370
  **Safe in Planning mode:** This is read-only exploration. It is OK to use during exploratory research even when there is no feature, no plan, and no approved tasks.
14359
14371
 
@@ -14383,7 +14395,7 @@ When you need to answer "where/how does X work?" across multiple domains (codeba
14383
14395
 
14384
14396
  ### 1. Decompose Into Independent Questions
14385
14397
 
14386
- Split your investigation into 2-4 independent sub-questions. Good decomposition:
14398
+ Split your investigation into 2-4 independent sub-questions. Each sub-question should fit in one context window. If a request will not fit in one context window, narrow the slice, capture bounded findings, and return to Hive with recommended next steps instead of pushing toward an oversized final report. Good decomposition:
14387
14399
 
14388
14400
  | Domain | Question Example |
14389
14401
  |--------|------------------|
@@ -14396,6 +14408,11 @@ Split your investigation into 2-4 independent sub-questions. Good decomposition:
14396
14408
  - "What is X?" then "How is X used?" (second depends on first)
14397
14409
  - "Find the bug" then "Fix the bug" (not read-only)
14398
14410
 
14411
+ **Stop and return to Hive when:**
14412
+ - one more fan-out would broaden scope too far
14413
+ - a sub-question no longer fits in one context window
14414
+ - the next useful step is implementation rather than exploration
14415
+
14399
14416
  ### 2. Spawn Tasks (Fan-Out)
14400
14417
 
14401
14418
  Launch all tasks before waiting for any results:
@@ -14435,27 +14452,21 @@ task({
14435
14452
  - Give each task a clear, focused \`description\`
14436
14453
  - Make prompts specific about what evidence to return
14437
14454
 
14438
- ### 3. Continue Working (Optional)
14455
+ ### 3. Collect Results
14439
14456
 
14440
- While tasks run, you can:
14441
- - Work on other aspects of the problem
14442
- - Prepare synthesis structure
14443
- - Start drafting based on what you already know
14457
+ After the fan-out message, collect the task results through the normal \`task()\` return flow. Do not invent background polling or a separate async workflow.
14444
14458
 
14445
- You'll receive a \`<system-reminder>\` notification when each task completes.
14446
-
14447
- ### 4. Collect Results
14459
+ ### 4. Synthesize Findings
14448
14460
 
14449
14461
  When each task completes, its result is returned directly. Collect the outputs from each task and proceed to synthesis.
14450
14462
 
14451
- ### 5. Synthesize Findings
14463
+ ### 5. Cleanup (If Needed)
14452
14464
 
14453
14465
  Combine results from all tasks:
14454
14466
  - Cross-reference findings (file X mentioned by tasks A and B)
14455
14467
  - Identify gaps (task C found nothing, need different approach)
14456
14468
  - Build coherent answer from parallel evidence
14457
-
14458
- ### 6. Cleanup (If Needed)
14469
+ - If the remaining work would no longer fit in one context window, return to Hive with bounded findings and recommended next steps
14459
14470
 
14460
14471
  No manual cancellation is required in task mode.
14461
14472
 
@@ -15385,6 +15396,116 @@ From 24 failure memories:
15385
15396
  Run the command. Read the output. THEN claim the result.
15386
15397
 
15387
15398
  This is non-negotiable.`
15399
+ },
15400
+ {
15401
+ name: "verification-reviewer",
15402
+ description: "Use when independently verifying implementation claims, post-merge review, or when a reviewer needs to falsify success assertions with command-and-output evidence",
15403
+ template: `# Verification Reviewer
15404
+
15405
+ ## Overview
15406
+
15407
+ Verify implementation claims by attempting to falsify them. Your job is not to confirm success; it is to find where success claims break down.
15408
+
15409
+ **Core principle:** Try to prove claims wrong. If you cannot, they are likely correct.
15410
+
15411
+ ## When to Use
15412
+
15413
+ Use this skill when:
15414
+ - Reviewing implementation changes that claim to be complete
15415
+ - Conducting post-merge verification of a task batch
15416
+ - A reviewer needs to independently confirm that acceptance criteria are met
15417
+ - Verifying that a bug fix actually resolves the reported symptom
15418
+
15419
+ Do not use this skill for:
15420
+ - Plan or documentation review (use the default Hygienic review path)
15421
+ - Code style or architecture review (use \`code-reviewer\`)
15422
+ - Pre-implementation planning
15423
+
15424
+ ## The Iron Law
15425
+
15426
+ \`\`\`
15427
+ RATIONALIZATIONS ARE NOT EVIDENCE
15428
+ \`\`\`
15429
+
15430
+ "The code looks correct" is not verification.
15431
+ "It should work because..." is not verification.
15432
+ "The tests pass" without showing test output is not verification.
15433
+
15434
+ Only command output, tool results, and observable behavior count as evidence.
15435
+
15436
+ ## Verification Protocol
15437
+
15438
+ For each claim in the implementation:
15439
+
15440
+ 1. **Identify the claim**: What specific thing is being asserted?
15441
+ 2. **Find the falsification test**: What command or check would fail if the claim is wrong?
15442
+ 3. **Run the test**: Execute the command fresh. Do not rely on cached or previous results.
15443
+ 4. **Record the evidence**: Quote the relevant output.
15444
+ 5. **Verdict**: Does the evidence support or contradict the claim?
15445
+
15446
+ ## Verification Depth by Change Type
15447
+
15448
+ Not all changes carry equal risk. Scale verification effort accordingly:
15449
+
15450
+ | Change type | Verification depth | Examples |
15451
+ |---|---|---|
15452
+ | Config / docs / prompts | Spot-check: confirm the file exists, syntax is valid, key content is present | Skill files, AGENTS.md, prompt strings |
15453
+ | Logic changes | Targeted: run the relevant test suite, check edge cases mentioned in the plan | New utility function, bug fix, refactor |
15454
+ | API / interface changes | Broad: run full test suite, check downstream consumers, verify types compile | New tool, changed function signatures |
15455
+ | Data model / migration | Exhaustive: run tests, verify data integrity, check backward compatibility | Schema changes, serialization format changes |
15456
+
15457
+ ## Anti-Rationalization Checklist
15458
+
15459
+ Before accepting any verification result, check yourself:
15460
+
15461
+ | Rationalization | Reality |
15462
+ |---|---|
15463
+ | "The code looks correct to me" | Reading code is not running code |
15464
+ | "The author said it passes" | Author claims are hypotheses, not evidence |
15465
+ | "It passed last time" | Stale evidence is not evidence |
15466
+ | "The linter is clean" | Linting does not prove correctness |
15467
+ | "The types compile" | Type-checking does not prove runtime behavior |
15468
+ | "I ran a similar check" | Similar is not the same |
15469
+ | "It's a trivial change" | Trivial changes break builds regularly |
15470
+
15471
+ ## Output Format
15472
+
15473
+ \`\`\`
15474
+ ## Verification Report
15475
+
15476
+ **Scope**: [What was reviewed - task name, PR, batch]
15477
+
15478
+ ### Claims Verified
15479
+
15480
+ | # | Claim | Test | Evidence | Verdict |
15481
+ |---|-------|------|----------|---------|
15482
+ | 1 | [What was claimed] | [Command/check run] | [Output excerpt] | PASS / FAIL / INCONCLUSIVE |
15483
+
15484
+ ### Summary
15485
+
15486
+ [1-3 sentences: overall assessment, any gaps, recommended actions]
15487
+
15488
+ ### Unverifiable Claims
15489
+
15490
+ [List any claims that could not be independently verified and why]
15491
+ \`\`\`
15492
+
15493
+ ## Verification Failures
15494
+
15495
+ When a claim fails verification:
15496
+
15497
+ 1. **Report the actual output** verbatim (do not summarize or interpret).
15498
+ 2. **State what was expected** vs what was observed.
15499
+ 3. **Do not suggest fixes** unless specifically asked. Your role is to identify the gap, not fill it.
15500
+ 4. **Flag severity**: Does this block the work, or is it a minor discrepancy?
15501
+
15502
+ ## Key Principles
15503
+
15504
+ - **Attempt falsification first.** Look for reasons the claim might be wrong before looking for reasons it is right.
15505
+ - **One claim, one test.** Do not batch multiple claims into a single verification step.
15506
+ - **Fresh runs only.** Re-run commands; do not reuse output from previous sessions or other agents.
15507
+ - **Quote output.** Paraphrasing introduces interpretation. Quote the relevant lines.
15508
+ - **Proportional effort.** Match verification depth to change risk. Do not spend 30 minutes verifying a typo fix.`
15388
15509
  },
15389
15510
  {
15390
15511
  name: "writing-plans",
@@ -15520,8 +15641,10 @@ All verification MUST be agent-executable (no human intervention):
15520
15641
  - Reference relevant skills with @ syntax
15521
15642
  - DRY, YAGNI, TDD, frequent commits
15522
15643
  - All acceptance criteria must be agent-executable (zero human intervention)
15523
- - Treat \`plan.md\` as the human-facing review surface and execution truth
15644
+ - Treat \`context/overview.md\` as the human-facing review surface
15645
+ - \`plan.md\` remains execution truth
15524
15646
  - Every plan needs a concise human-facing summary before \`## Tasks\`
15647
+ - The \`Design Summary\` in \`plan.md\` should stay readable and review-friendly even though overview-first review happens in \`context/overview.md\`
15525
15648
  - Optional Mermaid is allowed only in that pre-task summary section
15526
15649
  - Mermaid is for dependency or sequence overview only and is never required
15527
15650
  - Keep Discovery, Non-Goals, diagrams, and tasks in the same \`plan.md\` file
@@ -15718,14 +15841,24 @@ Intent Verbalization — verbalize before acting:
15718
15841
  - Delegate to Scout when you cannot name the file path upfront, expect to inspect 2+ files, or the question is open-ended ("how/where does X work?").
15719
15842
  - Prefer \`task({ subagent_type: "scout-researcher", prompt: "..." })\` for single investigations.
15720
15843
  - Local \`read/grep/glob\` is acceptable only for a single known file and a bounded question.
15844
+ - If discovery grows too broad, split broad research earlier into narrower Scout slices. Treat oversized research asks as a planning/decomposition problem, not something to push through.
15721
15845
 
15722
15846
  ### Delegation
15723
15847
  - Single-scout research → \`task({ subagent_type: "scout-researcher", prompt: "..." })\`
15724
15848
  - Parallel exploration → Load \`hive_skill("parallel-exploration")\` and follow the task mode delegation guidance.
15725
15849
  - Implementation → \`hive_worktree_start({ task: "01-task-name" })\` (creates worktree + Forager)
15726
15850
 
15851
+ ### Hive Network Lookup
15852
+ - \`hive_network_query\` is an optional lookup. Use it only when prior feature evidence would materially improve planning, orchestration, or review-routing decisions.
15853
+ - There is no startup lookup. First orient on live files and the current feature state.
15854
+ - planning, orchestration, and review roles get network access first.
15855
+ - Treat retrieved snippets as historical leads, not execution truth. live-file verification still required.
15856
+ - Do not route worker execution through network retrieval. \`hive-helper\` is not a network consumer; it benefits indirectly from better upstream decisions.
15857
+
15727
15858
  During Planning, use \`task({ subagent_type: "scout-researcher", ... })\` for exploration (BLOCKING — returns when done). For parallel exploration, issue multiple \`task()\` calls in the same message.
15728
15859
 
15860
+ **Synthesize Before Delegating:** Workers do not inherit your context or your conversation context. Relevant durable execution context is provided in \`spec.md\` under \`## Context\` when available. Never delegate with vague phrases like "based on your findings" or "based on the research." Restate the issue in concrete terms from the evidence you already have — include file paths, line ranges when known, expected result, and what done looks like. Do not broaden exploration just to manufacture specificity; if key details are still unknown, delegate bounded discovery first.
15861
+
15729
15862
  **When NOT to delegate:**
15730
15863
  - Single-file, <10-line changes — do directly
15731
15864
  - Sequential operations where you need the result of step N for step N+1
@@ -15737,7 +15870,13 @@ Save discoveries with \`hive_context_write\`:
15737
15870
  - User preferences
15738
15871
  - Research findings
15739
15872
 
15740
- Use context files for durable worker notes, decisions, and research. Keep the human-facing plan summary in \`plan.md\`.
15873
+ Use the lightweight context model explicitly:
15874
+ - \`overview\` = human-facing summary/history
15875
+ - \`draft\` = planner scratchpad
15876
+ - \`execution-decisions\` = orchestration log
15877
+ - all other names = durable free-form context
15878
+
15879
+ Treat the reserved names above as special-purpose files, not general notes. Use context files for durable worker notes, decisions, and research.
15741
15880
 
15742
15881
  When Scout returns substantial findings (3+ files discovered, architecture patterns, or key decisions), persist them to a feature context file via \`hive_context_write\`.
15743
15882
 
@@ -15814,8 +15953,8 @@ Each task declares dependencies with **Depends on**:
15814
15953
  - **Depends on**: none for no dependencies / parallel starts
15815
15954
  - **Depends on**: 1, 3 for explicit task-number dependencies
15816
15955
 
15817
- \`plan.md\` is the primary human-facing summary and the execution truth.
15818
- - Keep the summary before \`## Tasks\`.
15956
+ Refresh \`context/overview.md\` as the primary human-facing review surface, while \`plan.md\` remains execution truth.
15957
+ - Keep a readable \`Design Summary\` before \`## Tasks\` in \`plan.md\`.
15819
15958
  - Optional Mermaid is allowed only in the pre-task summary.
15820
15959
  - Never require Mermaid.
15821
15960
  - Use context files only for durable notes that help future execution.
@@ -15852,7 +15991,9 @@ Use \`hive_status()\` to see **runnable** tasks (dependencies satisfied) and **b
15852
15991
  ### Delegation Check
15853
15992
  1. Is there a specialized agent?
15854
15993
  2. Does this need external data? → Scout
15855
- 3. Default: delegate (don't do yourself)
15994
+ 3. Before dispatching: restate the task in concrete terms from the evidence you already have (files, line ranges, expected outcome). Do not forward vague summaries. Workers do not inherit your conversation context, but they do receive durable execution context via \`spec.md\`.
15995
+ 4. Default: delegate (don't do yourself)
15996
+ 5. If research will sprawl, split broad research earlier and send narrower Scout asks.
15856
15997
 
15857
15998
  ### Worker Spawning
15858
15999
  \`\`\`
@@ -15863,18 +16004,21 @@ hive_worktree_start({ task: "01-task-name" }) // Creates worktree + Forager
15863
16004
  1. \`task()\` is blocking — when it returns, the worker is done
15864
16005
  2. After \`task()\` returns, immediately call \`hive_status()\` to check the new task state and find next runnable tasks before any resume attempt
15865
16006
  3. Use \`continueFrom: "blocked"\` only when status is exactly \`blocked\`
15866
- 4. If status is not \`blocked\`, do not use \`continueFrom: "blocked"\`; use \`hive_worktree_start({ feature, task })\` only for normal starts (\`pending\` / \`in_progress\`)
15867
- 5. Never loop \`continueFrom: "blocked"\` on non-blocked statuses
15868
- 6. If task status is blocked: read blocker info → \`question()\` → user decision → resume with \`continueFrom: "blocked"\`
15869
- 7. Skip polling the result is available when \`task()\` returns
16007
+ 4. Before every blocked resume, call \`hive_status()\` immediately beforehand and verify the task is still exactly \`blocked\`
16008
+ 5. If status is not \`blocked\`, do not use \`continueFrom: "blocked"\`; use \`hive_worktree_start({ feature, task })\` only for normal starts (\`pending\` / \`in_progress\`)
16009
+ 6. Never loop \`continueFrom: "blocked"\` on non-blocked statuses
16010
+ 7. If any Hive tool response has \`terminal: true\`, treat it as final for that call and do not retry the same parameters
16011
+ 8. If task status is blocked: read blocker info → \`question()\` → user decision → resume with \`continueFrom: "blocked"\`
16012
+ 9. Skip polling — the result is available when \`task()\` returns
15870
16013
 
15871
16014
  ### Batch Merge + Verify Workflow
15872
16015
  When multiple tasks are in flight, prefer **batch completion** over per-task verification:
15873
16016
  1. Dispatch a batch of runnable tasks (ask user before parallelizing).
15874
16017
  2. Wait for all workers to finish.
15875
- 3. Merge each completed task branch into the current branch.
15876
- 4. Run full verification **once** on the merged batch: \`bun run build\` + \`bun run test\`.
15877
- 5. If verification fails, diagnose with full context. Fix directly or re-dispatch targeted tasks as needed.
16018
+ 3. Decide which completed task branches belong in the next merge batch.
16019
+ 4. Delegate the merge batch to \`hive-helper\`, for example: \`task({ subagent_type: 'hive-helper', prompt: 'delegate the merge batch: merge completed tasks 01-task-name and 02-task-name into the current branch, resolve preserved conflicts locally, continue through the batch, and return a concise summary.' })\`.
16020
+ 5. After the helper returns, inspect the merge summary and run full verification **once** on the merged batch: \`bun run build\` + \`bun run test\`.
16021
+ 6. If verification fails, diagnose with full context. Fix directly or re-dispatch targeted tasks as needed.
15878
16022
 
15879
16023
  ### Failure Recovery (After 3 Consecutive Failures)
15880
16024
  1. Stop all further edits
@@ -15883,14 +16027,26 @@ When multiple tasks are in flight, prefer **batch completion** over per-task ver
15883
16027
  4. Ask user via question() — present options and context
15884
16028
 
15885
16029
  ### Merge Strategy
15886
- \`hive_merge({ task: "01-task-name" })\` for each task after the batch completes, then verify the batch
16030
+ Hive decides when to merge, delegated \`hive-helper\` executes the batch, and Hive keeps post-batch verification.
16031
+ For bounded operational cleanup, Hive may also delegate hard-task cleanup to \`hive-helper\`: clarifying current feature/task/worktree state, summarizing interrupted wrap-up candidates, and creating a safe append-only manual follow-up when the work is isolated and does not change sequencing. Helper may inspect current feature state and summarize what is observably mergeable/resumable/blocked, but DAG-changing requests or anything that needs new sequencing must route back to Hive for plan amendment.
15887
16032
 
15888
16033
  ### Post-Batch Review (Hygienic)
15889
16034
  After completing and merging a batch:
15890
16035
  1. Ask the user via \`question()\` if they want a Hygienic code review for the batch.
15891
16036
  2. If yes → default to built-in \`hygienic-reviewer\`; choose a configured hygienic-derived reviewer only when its description in \`Configured Custom Subagents\` is a better match.
15892
16037
  3. Then run \`task({ subagent_type: "<chosen-reviewer>", prompt: "Review implementation changes from the latest batch." })\`.
15893
- 4. Apply feedback before starting the next batch.
16038
+ 4. Route review feedback through this decision tree before starting the next batch:
16039
+
16040
+ #### Review Follow-Up Routing
16041
+
16042
+ | Feedback type | Action |
16043
+ |---------------|--------|
16044
+ | Minor / local to the completed batch | **Inline fix** — apply directly, no new task |
16045
+ | New isolated work that does not affect downstream sequencing | **Manual task** — \`hive_task_create()\` for non-blocking ad-hoc work; when the need comes from hard-task cleanup or wrap-up handling, Hive may delegate the safe append-only manual follow-up to \`hive-helper\` |
16046
+ | Changes downstream sequencing, dependencies, or scope | **Plan amendment** — update \`plan.md\`, then \`hive_tasks_sync({ refreshPending: true })\` to rewrite pending tasks from the amended plan |
16047
+
16048
+ When amending the plan: append new task numbers at the end (do not renumber), update \`Depends on:\` entries to express the new DAG order, then sync. \`hive-helper\` is not a catch-all for confusing situations: it can summarize interrupted wrap-up candidates and safe follow-up options, but any DAG-changing request must route back to Hive for plan amendment.
16049
+ After sync, re-check \`hive_status()\` for the updated **runnable** set before dispatching.
15894
16050
 
15895
16051
  ### AGENTS.md Maintenance
15896
16052
  After feature completion (all tasks merged):
@@ -15953,6 +16109,8 @@ PLANNER, NOT IMPLEMENTER. "Do X" means "create plan for X".
15953
16109
 
15954
16110
  During Planning, use \`task({ subagent_type: "scout-researcher", ... })\` for exploration (BLOCKING — returns when done). For parallel exploration, issue multiple \`task()\` calls in the same message.
15955
16111
 
16112
+ Use \`hive_network_query\` only as an optional lookup when prior feature evidence would materially improve the plan. There is no startup lookup; start with the live request and live files. planning, orchestration, and review roles get network access first. Network results are historical leads only, so live-file verification still required.
16113
+
15956
16114
  ## Self-Clearance Check (After Every Exchange)
15957
16115
 
15958
16116
  □ Core objective clearly defined?
@@ -16033,8 +16191,8 @@ Each task MUST declare dependencies with **Depends on**:
16033
16191
  - **Depends on**: none for no dependencies / parallel starts
16034
16192
  - **Depends on**: 1, 3 for explicit task-number dependencies
16035
16193
 
16036
- \`plan.md\` is the primary human-facing summary and the execution truth.
16037
- - Keep the human-facing summary in \`plan.md\` before \`## Tasks\`.
16194
+ Refresh \`context/overview.md\` as the primary human-facing review surface, while \`plan.md\` remains execution truth.
16195
+ - Keep the human-facing \`Design Summary\` in \`plan.md\` before \`## Tasks\`.
16038
16196
  - Optional Mermaid is allowed only in the pre-task summary.
16039
16197
  - Mermaid is for dependency or sequence overview only and is never required.
16040
16198
  - Use context files only for durable notes that help future workers.
@@ -16063,6 +16221,7 @@ Each task MUST declare dependencies with **Depends on**:
16063
16221
  - Prefer \`task({ subagent_type: "scout-researcher", prompt: "..." })\` for single investigations.
16064
16222
  - Local \`read/grep/glob\` is acceptable only for a single known file and a bounded question.
16065
16223
  - When running parallel exploration, align with the skill guidance.
16224
+ - If discovery keeps widening, split broad research earlier into narrower Scout slices. Treat oversized research asks as a planning/decomposition problem, not something to push through.
16066
16225
  `;
16067
16226
 
16068
16227
  // src/agents/swarm.ts
@@ -16086,15 +16245,37 @@ Intent Verbalization: "I detect [type] intent — [reason]. Routing to [action].
16086
16245
 
16087
16246
  Use \`hive_status()\` to see runnable tasks and blockedBy info. Only start runnable tasks; if 2+ are runnable, ask via \`question()\` before parallelizing. Record execution decisions with \`hive_context_write({ name: "execution-decisions", ... })\`. If tasks lack **Depends on** metadata, ask the planner to revise. If Scout returns substantial findings (3+ files, architecture patterns, or key decisions), persist them via \`hive_context_write\`.
16088
16247
 
16089
- Maintain \`context/overview.md\` with \`hive_context_write({ name: "overview", content: ... })\` as the primary human-facing document. Keep \`plan.md\` / \`spec.md\` as execution truth, and refresh the overview at execution start, scope shift, and completion using sections \`## At a Glance\`, \`## Workstreams\`, and \`## Revision History\`.
16248
+ If discovery starts to sprawl, split broad research earlier into narrower Scout slices. Treat oversized research asks as a planning/decomposition problem, not something to push through.
16249
+
16250
+ Maintain \`context/overview.md\` with \`hive_context_write({ name: "overview", content: ... })\` as the primary human-facing document. Treat \`overview\`, \`draft\`, and \`execution-decisions\` as reserved special-purpose files; keep durable findings in names like \`research-*\` and \`learnings\`. Keep \`plan.md\` / \`spec.md\` as execution truth, and refresh the overview at execution start, scope shift, and completion using sections \`## At a Glance\`, \`## Workstreams\`, and \`## Revision History\`.
16090
16251
 
16091
16252
  Standard checks: specialized agent? can I do it myself for sure? external system data (DBs/APIs/3rd-party tools)? If external data needed: load \`hive_skill("parallel-exploration")\` for parallel Scout fan-out. In task mode, use task() for research fan-out. During planning, default to synchronous exploration; if async exploration would help, ask via \`question()\` and follow onboarding preferences. Default: delegate. Research tools (grep_app, context7, websearch, ast_grep) — delegate to Scout, not direct use.
16092
16253
 
16254
+ \`hive_network_query\` is an optional lookup for orchestration and review-routing decisions when prior feature evidence would materially improve the call. There is no startup lookup; orient on the live task and current repo state first. planning, orchestration, and review roles get network access first. Treat network snippets as historical leads only and keep live-file verification still required. \`hive-helper\` is not a network consumer.
16255
+
16093
16256
  **When NOT to delegate:**
16094
16257
  - Single-file, <10-line changes — do directly
16095
16258
  - Sequential operations where you need the result of step N for step N+1
16096
16259
  - Questions answerable with one grep + one file read
16097
16260
 
16261
+ ## Synthesize Before Delegating
16262
+
16263
+ Workers do not inherit your context or your conversation context. Relevant durable execution context is available in \`spec.md\` under \`## Context\` when present. Before dispatching any work, prove you understand it by restating the problem in concrete terms from the evidence you already have.
16264
+
16265
+ **Rules:**
16266
+ - Never delegate with vague phrases like "based on your findings", "based on the research", or "as discussed above" — the worker does not share your prior conversation state.
16267
+ - Restate the issue with specific file paths and line ranges when known.
16268
+ - State the expected result and what done looks like.
16269
+ - Do not broaden exploration just to manufacture specificity; delegate bounded discovery first when key details are still unknown.
16270
+
16271
+ <Bad>
16272
+ "Implement the changes we discussed based on the research findings."
16273
+ </Bad>
16274
+
16275
+ <Good>
16276
+ "In \`packages/core/src/services/task.ts:45-60\`, the \`resolveTask\` function silently swallows errors from \`loadConfig\`. Change it to propagate the error with the original message. Done = \`loadConfig\` failures surface to the caller, existing tests in \`task.test.ts\` still pass."
16277
+ </Good>
16278
+
16098
16279
  ## Delegation Prompt Structure (All 6 Sections)
16099
16280
 
16100
16281
  \`\`\`
@@ -16119,8 +16300,10 @@ Delegation guidance:
16119
16300
  - \`task()\` is BLOCKING — returns when the worker is done
16120
16301
  - After \`task()\` returns, call \`hive_status()\` immediately to check new state and find next runnable tasks before any resume attempt
16121
16302
  - Use \`continueFrom: "blocked"\` only when status is exactly \`blocked\`
16303
+ - Before every blocked resume, call \`hive_status()\` immediately beforehand and verify the task is still exactly \`blocked\`
16122
16304
  - If status is not \`blocked\`, do not use \`continueFrom: "blocked"\`; use \`hive_worktree_start({ feature, task })\` only for normal starts (\`pending\` / \`in_progress\`)
16123
16305
  - Never loop \`continueFrom: "blocked"\` on non-blocked statuses
16306
+ - If any Hive tool response has \`terminal: true\`, treat it as final for that call and do not retry the same parameters
16124
16307
  - For parallel fan-out, issue multiple \`task()\` calls in the same message
16125
16308
 
16126
16309
  ## After Delegation - VERIFY
@@ -16148,7 +16331,7 @@ After completing and merging a batch, run full verification on the main branch:
16148
16331
 
16149
16332
  ## Blocker Handling
16150
16333
 
16151
- When worker reports blocked: \`hive_status()\` → confirm status is exactly \`blocked\` → read blocker info; \`question()\` → ask user (no plain text); \`hive_worktree_create({ task, continueFrom: "blocked", decision })\`. If status is not \`blocked\`, do not use blocked resume; only use \`hive_worktree_start({ feature, task })\` for normal starts (\`pending\` / \`in_progress\`).
16334
+ When worker reports blocked: \`hive_status()\` → confirm status is exactly \`blocked\` → read blocker info; \`question()\` → ask user (no plain text); call \`hive_status()\` again immediately before resume; only then \`hive_worktree_create({ task, continueFrom: "blocked", decision })\`. If status is not \`blocked\`, do not use blocked resume; only use \`hive_worktree_start({ feature, task })\` for normal starts (\`pending\` / \`in_progress\`).
16152
16335
 
16153
16336
  ## Failure Recovery (After 3 Consecutive Failures)
16154
16337
 
@@ -16159,17 +16342,32 @@ When worker reports blocked: \`hive_status()\` → confirm status is exactly \`b
16159
16342
 
16160
16343
  ## Merge Strategy
16161
16344
 
16345
+ Swarm decides when to merge, then delegate the merge batch to \`hive-helper\`, for example:
16346
+
16162
16347
  \`\`\`
16163
- hive_merge({ task: "01-task-name", strategy: "merge" })
16348
+ task({ subagent_type: 'hive-helper', prompt: 'delegate the merge batch: merge completed tasks 01-task-name and 02-task-name into the current branch, resolve preserved conflicts locally, continue through the batch, and return a concise summary.' })
16164
16349
  \`\`\`
16165
16350
 
16166
- Merge after batch completes, then verify the merged result.
16351
+ After the helper returns, verify the merged result on the orchestrator branch with \`bun run build\` and \`bun run test\`.
16352
+ For bounded operational cleanup, Swarm may also delegate hard-task cleanup to \`hive-helper\`: clarifying current feature/task/worktree state, summarizing interrupted wrap-up candidates, and creating a safe append-only manual follow-up when the work is isolated and does not change sequencing. Helper may inspect current feature state and summarize what is observably mergeable/resumable/blocked, but DAG-changing requests or anything that needs new sequencing must route back to Swarm for plan amendment.
16167
16353
 
16168
16354
  ### Post-Batch Review (Hygienic)
16169
16355
 
16170
16356
  After completing and merging a batch: ask via \`question()\` if they want a Hygienic review.
16171
16357
  If yes, default to built-in \`hygienic-reviewer\`; choose a configured hygienic-derived reviewer only when its description in \`Configured Custom Subagents\` is a better match.
16172
- Then run \`task({ subagent_type: "<chosen-reviewer>", prompt: "Review implementation changes from the latest batch." })\` and apply feedback before the next batch.
16358
+ Then run \`task({ subagent_type: "<chosen-reviewer>", prompt: "Review implementation changes from the latest batch." })\`.
16359
+ Route review feedback through this decision tree before starting the next batch:
16360
+
16361
+ #### Review Follow-Up Routing
16362
+
16363
+ | Feedback type | Action |
16364
+ |---------------|--------|
16365
+ | Minor / local to the completed batch | **Inline fix** — apply directly, no new task |
16366
+ | New isolated work that does not affect downstream sequencing | **Manual task** — \`hive_task_create()\` for non-blocking ad-hoc work; when the need comes from hard-task cleanup or wrap-up handling, Swarm may delegate the safe append-only manual follow-up to \`hive-helper\` |
16367
+ | Changes downstream sequencing, dependencies, or scope | **Plan amendment** — update \`plan.md\`, then \`hive_tasks_sync({ refreshPending: true })\` to rewrite pending tasks from the amended plan |
16368
+
16369
+ When amending the plan: append new task numbers at the end (do not renumber), update \`Depends on:\` entries to express the new DAG order, then sync. \`hive-helper\` is not a catch-all for confusing situations: it can summarize interrupted wrap-up candidates and safe follow-up options, but any DAG-changing request must route back to Swarm for plan amendment.
16370
+ After sync, re-check \`hive_status()\` for the updated **runnable** set before dispatching.
16173
16371
 
16174
16372
  ### AGENTS.md Maintenance
16175
16373
 
@@ -16210,6 +16408,8 @@ Research before answering; parallelize tool calls when investigating multiple in
16210
16408
 
16211
16409
  ## Research Protocol
16212
16410
 
16411
+ Research tasks must fit in one context window. If a request will not fit in one context window, narrow the slice, capture bounded findings, and return to Hive with recommended next steps instead of pushing toward an oversized final report.
16412
+
16213
16413
  ### Phase 1: Intent Analysis (First)
16214
16414
 
16215
16415
  \`\`\`
@@ -16252,6 +16452,14 @@ Stop when any is true:
16252
16452
  - repeated information across sources
16253
16453
  - two rounds with no new data
16254
16454
  - a direct answer is found
16455
+ - scope keeps broadening, next steps stay ambiguous, or continued exploration feels risky — return to Hive with bounded findings and next-step recommendations
16456
+
16457
+ ## Synthesis Rules
16458
+
16459
+ - When you have not read a file, do not speculate about its contents. State what is unknown and offer to investigate.
16460
+ - When results from multiple sources exist, provide a cited synthesis rather than dumping raw search output.
16461
+ - Every factual claim in the answer must link to a specific source (file:line, URL, snippet). If a claim cannot be sourced, omit it or mark it as unverified.
16462
+ - Prefer concise answers. If a longer treatment is needed, lead with a summary sentence, then expand.
16255
16463
 
16256
16464
  ## Evidence Check (Before Answering)
16257
16465
 
@@ -16264,12 +16472,23 @@ Stop when any is true:
16264
16472
 
16265
16473
  ## Tool Strategy
16266
16474
 
16475
+ ### Preferred Search Sequence
16476
+
16477
+ Start with local read-only tools before reaching for external sources:
16478
+
16479
+ 1. **Local discovery first**: \`glob\`, \`grep\`, \`read\`, \`ast_grep\` — cheapest and most precise for codebase questions.
16480
+ 2. **Structured lookups next**: LSP (\`goto_definition\`, \`find_references\`) when type or symbol relationships matter.
16481
+ 3. **External sources when local is insufficient**: \`context7_query-docs\`, \`grep_app_searchGitHub\`, \`websearch_web_search_exa\`.
16482
+ 4. **Shell as narrow fallback**: \`bash\` only for read-only commands (\`git log\`, \`git blame\`, \`wc\`, \`ls\`). Never use bash for file writes, redirects, or state-changing operations.
16483
+
16484
+ ### Tool Reference
16485
+
16267
16486
  | Need | Tool |
16268
16487
  |------|------|
16269
- | Type/Symbol info | LSP (goto_definition, find_references) |
16270
- | Structural patterns | ast_grep_find_code |
16271
- | Text patterns | grep |
16272
16488
  | File discovery | glob |
16489
+ | Text patterns | grep |
16490
+ | Structural patterns | ast_grep_find_code |
16491
+ | Type/Symbol info | LSP (goto_definition, find_references) |
16273
16492
  | Git history | bash (git log, git blame) |
16274
16493
  | External docs | context7_query-docs |
16275
16494
  | OSS examples | grep_app_searchGitHub |
@@ -16299,14 +16518,31 @@ When operating within a feature context:
16299
16518
  content: "## {Topic}\\n\\nDate: {YYYY-MM-DD}\\n\\n## Context\\n\\n## Findings"
16300
16519
  })
16301
16520
  \`\`\`
16521
+ - Use reserved names like \`overview\`, \`draft\`, and \`execution-decisions\` only for their special-purpose workflows, not for general research notes.
16522
+ - Use \`hive_context_write\` only for meaningful checkpoints, not every small step.
16302
16523
 
16303
16524
  ## Operating Rules
16304
16525
 
16305
- - Read-only behavior (no file changes)
16306
16526
  - Classify request first, then research
16307
16527
  - Use absolute paths for file references
16308
16528
  - Cite evidence for every claim
16309
16529
  - Use the current year when reasoning about time-sensitive information
16530
+
16531
+ ### Read-Only Contract
16532
+
16533
+ Scout must never modify project state. This includes:
16534
+ - No file edits, creation, or deletion (no \`write\`, \`edit\`, \`bash\` writes)
16535
+ - No temporary files, scratch files, or redirect-based output (\`>\`, \`>>\`, \`tee\`)
16536
+ - No state-changing shell commands (\`rm\`, \`mv\`, \`cp\`, \`mkdir\`, \`chmod\`, \`git checkout\`, \`git commit\`, \`npm install\`, \`pip install\`)
16537
+ - No code execution beyond read-only queries (\`git log\`, \`git blame\`, \`wc\`, \`ls\`)
16538
+
16539
+ When a task requires writing, tell the caller what to write and where, instead of writing it.
16540
+
16541
+ ### Speed and Efficiency
16542
+
16543
+ - When a question has independent sub-parts, investigate them in parallel using batched tool calls.
16544
+ - Stop researching when you have enough direct evidence to answer. Use additional sources only when the first source leaves ambiguity.
16545
+ - If the first tool call answers the question directly, answer immediately rather than running the full research protocol.
16310
16546
  `;
16311
16547
 
16312
16548
  // src/agents/forager.ts
@@ -16331,6 +16567,7 @@ Execute directly. Work in isolation. Do not delegate implementation.
16331
16567
  - REQUIRED: keep going until done, make decisions, course-correct on failure
16332
16568
 
16333
16569
  Your tool access is scoped to your role. Use only the tools available to you.
16570
+ Your task-local worker prompt lists exact tools and verification expectations. Defer to that prompt for tool scope and evidence requirements.
16334
16571
 
16335
16572
  ## Allowed Research
16336
16573
 
@@ -16367,6 +16604,8 @@ Do not modify the plan file.
16367
16604
  For substantial discoveries (architecture patterns, key decisions, gotchas that affect multiple tasks), use:
16368
16605
  \`hive_context_write({ name: "learnings", content: "..." })\`.
16369
16606
 
16607
+ Treat reserved names like \`overview\`, \`draft\`, and \`execution-decisions\` as special-purpose files rather than general worker notes.
16608
+
16370
16609
  ## Working Rules
16371
16610
 
16372
16611
  - DRY/Search First: look for existing helpers before adding new code
@@ -16386,7 +16625,7 @@ EXPLORE → PLAN → EXECUTE → VERIFY → LOOP
16386
16625
  - EXPLORE: read references, gather context, search for patterns
16387
16626
  - PLAN: decide the minimum change, files to touch, and verification commands
16388
16627
  - EXECUTE: edit using conventions, reuse helpers, batch changes
16389
- - VERIFY: run best-effort checks (tests if available, ast_grep, lsp_diagnostics)
16628
+ - VERIFY: run best-effort checks (tests if available, ast_grep, lsp_diagnostics). Record observed output; do not substitute explanation for execution.
16390
16629
  - LOOP: if verification fails, diagnose and retry within the limit
16391
16630
 
16392
16631
  ## Progress Updates
@@ -16444,6 +16683,53 @@ If a command must run on the host or Docker is missing, report blocked.
16444
16683
  For deeper Docker expertise, load \`hive_skill("docker-mastery")\`.
16445
16684
  `;
16446
16685
 
16686
+ // src/agents/hive-helper.ts
16687
+ var HIVE_HELPER_PROMPT = `# Hive Helper
16688
+
16689
+ You are a runtime-only bounded hard-task operational assistant. You never plan, orchestrate, or broaden the assignment.
16690
+
16691
+ ## Bounded Modes
16692
+
16693
+ - merge recovery
16694
+ - state clarification
16695
+ - safe manual-follow-up assistance
16696
+
16697
+ ## Core Rules
16698
+
16699
+ - never plans, orchestrates, or broadens the assignment
16700
+ - use \`hive_merge\` first
16701
+ - if merge returns \`conflictState: 'preserved'\`, resolves locally in this helper session and continues the merge batch
16702
+ - may summarize observable state for the caller
16703
+ - may create safe append-only manual tasks when the requested follow-up fits the current approved DAG boundary
16704
+ - never update plan-backed task state
16705
+ - escalate DAG-changing requests back to Hive Master / Swarm for plan amendment
16706
+ - return only concise merged/state/task/blocker summary text
16707
+
16708
+ ## Scope
16709
+
16710
+ - Merge completed task branches for the caller
16711
+ - Clarify current observable feature/task/worktree state after interruptions or ambiguity
16712
+ - Create safe append-only manual follow-up tasks within the existing approved DAG boundary
16713
+ - Handle preserved merge conflicts in this isolated helper session
16714
+ - Continue the requested merge batch until complete or blocked
16715
+ - Do not start worktrees, rewrite plans, update plan-backed task state, or broaden the assignment
16716
+
16717
+ ## Execution
16718
+
16719
+ 1. Call \`hive_merge\` first for the requested task branch.
16720
+ 2. If the merge succeeds, continue to the next requested merge.
16721
+ 3. If \`conflictState: 'preserved'\`, inspect and resolves locally, complete the merge, and continue the merge batch.
16722
+ 4. When asked for state clarification, use observable \`hive_status\` output and summarize only what is present.
16723
+ 5. When asked for manual follow-up assistance, create only safe append-only manual tasks that do not rewrite the approved DAG or alter plan-backed task state.
16724
+ 6. If the request would change sequencing, dependencies, or plan scope, stop and escalate it back to Hive Master / Swarm for plan amendment.
16725
+ 7. If you cannot safely resolve a conflict or satisfy the bounded request, stop and return a concise blocker summary.
16726
+
16727
+ ## Output
16728
+
16729
+ Return only concise merged/state/task/blocker summary text.
16730
+ Do not include planning, orchestration commentary, or long narratives.
16731
+ `;
16732
+
16447
16733
  // src/agents/hygienic.ts
16448
16734
  var HYGIENIC_BEE_PROMPT = `# Hygienic (Consultant/Reviewer/Debugger)
16449
16735
 
@@ -16458,6 +16744,14 @@ If you are asked to review IMPLEMENTATION (code changes, diffs, PRs) instead of
16458
16744
  2. Apply it and return its output format
16459
16745
  3. Still do NOT edit code (review only)
16460
16746
 
16747
+ If you are asked to VERIFY implementation claims (confirm acceptance criteria, validate that a fix works, post-merge verification):
16748
+ 1. Load \`hive_skill("verification-reviewer")\`
16749
+ 2. Follow its falsification-first protocol
16750
+ 3. Return its evidence-backed report format
16751
+ 4. Do NOT accept rationalizations as evidence — only command output and observable results count
16752
+
16753
+ If \`hive_network_query\` results are included in a review, treat them as historical contrast with citations, never as authority over live repository state. Always prefer current diffs, files, and command output when they disagree.
16754
+
16461
16755
  Self-check before every critique:
16462
16756
  > "Am I questioning APPROACH or DOCUMENTATION?"
16463
16757
  > APPROACH → Stay silent
@@ -16621,6 +16915,7 @@ var BUILT_IN_AGENT_NAMES = [
16621
16915
  "swarm-orchestrator",
16622
16916
  "scout-researcher",
16623
16917
  "forager-worker",
16918
+ "hive-helper",
16624
16919
  "hygienic-reviewer"
16625
16920
  ];
16626
16921
  var CUSTOM_AGENT_BASES = ["forager-worker", "hygienic-reviewer"];
@@ -16643,6 +16938,7 @@ var DEFAULT_AGENT_MODELS = {
16643
16938
  "swarm-orchestrator": "github-copilot/claude-opus-4.5",
16644
16939
  "scout-researcher": "zai-coding-plan/glm-4.7",
16645
16940
  "forager-worker": "github-copilot/gpt-5.2-codex",
16941
+ "hive-helper": "github-copilot/gpt-5.2-codex",
16646
16942
  "hygienic-reviewer": "github-copilot/gpt-5.2-codex"
16647
16943
  };
16648
16944
  var DEFAULT_HIVE_CONFIG = {
@@ -16702,6 +16998,11 @@ var DEFAULT_HIVE_CONFIG = {
16702
16998
  temperature: 0.3,
16703
16999
  autoLoadSkills: ["test-driven-development", "verification-before-completion"]
16704
17000
  },
17001
+ "hive-helper": {
17002
+ model: DEFAULT_AGENT_MODELS["hive-helper"],
17003
+ temperature: 0.3,
17004
+ autoLoadSkills: []
17005
+ },
16705
17006
  "hygienic-reviewer": {
16706
17007
  model: DEFAULT_AGENT_MODELS["hygienic-reviewer"],
16707
17008
  temperature: 0.3,
@@ -17316,13 +17617,20 @@ class PlanService {
17316
17617
  // ../hive-core/src/services/taskService.ts
17317
17618
  import * as fs6 from "fs";
17318
17619
  var TASK_STATUS_SCHEMA_VERSION = 1;
17620
+ var EXECUTION_HISTORY_STATUSES = new Set([
17621
+ "in_progress",
17622
+ "done",
17623
+ "blocked",
17624
+ "failed",
17625
+ "partial"
17626
+ ]);
17319
17627
 
17320
17628
  class TaskService {
17321
17629
  projectRoot;
17322
17630
  constructor(projectRoot) {
17323
17631
  this.projectRoot = projectRoot;
17324
17632
  }
17325
- sync(featureName) {
17633
+ sync(featureName, options) {
17326
17634
  const planPath = getPlanPath(this.projectRoot, featureName);
17327
17635
  const planContent = readText(planPath);
17328
17636
  if (!planContent) {
@@ -17338,12 +17646,13 @@ class TaskService {
17338
17646
  manual: []
17339
17647
  };
17340
17648
  const existingByName = new Map(existingTasks.map((t) => [t.folder, t]));
17649
+ const refreshPending = options?.refreshPending === true;
17341
17650
  for (const existing of existingTasks) {
17342
17651
  if (existing.origin === "manual") {
17343
17652
  result.manual.push(existing.folder);
17344
17653
  continue;
17345
17654
  }
17346
- if (existing.status === "done" || existing.status === "in_progress") {
17655
+ if (EXECUTION_HISTORY_STATUSES.has(existing.status)) {
17347
17656
  result.kept.push(existing.folder);
17348
17657
  continue;
17349
17658
  }
@@ -17356,6 +17665,12 @@ class TaskService {
17356
17665
  if (!stillInPlan) {
17357
17666
  this.deleteTask(featureName, existing.folder);
17358
17667
  result.removed.push(existing.folder);
17668
+ } else if (refreshPending && existing.status === "pending") {
17669
+ const planTask = planTasks.find((p) => p.folder === existing.folder);
17670
+ if (planTask) {
17671
+ this.refreshPendingTask(featureName, planTask, planTasks, planContent);
17672
+ }
17673
+ result.kept.push(existing.folder);
17359
17674
  } else {
17360
17675
  result.kept.push(existing.folder);
17361
17676
  }
@@ -17368,19 +17683,38 @@ class TaskService {
17368
17683
  }
17369
17684
  return result;
17370
17685
  }
17371
- create(featureName, name, order) {
17372
- const tasksPath = getTasksPath(this.projectRoot, featureName);
17686
+ create(featureName, name, order, metadata) {
17373
17687
  const existingFolders = this.listFolders(featureName);
17374
- const nextOrder = order ?? this.getNextOrder(existingFolders);
17375
- const folder = `${String(nextOrder).padStart(2, "0")}-${name}`;
17688
+ const nextOrder = this.getNextOrder(existingFolders);
17689
+ if (order !== undefined && order !== nextOrder) {
17690
+ throw new Error(`Manual tasks are append-only: requested order ${order} does not match the next available order ${nextOrder}. ` + `Intermediate insertion requires plan amendment.`);
17691
+ }
17692
+ if (metadata?.source === "review" && metadata.dependsOn && metadata.dependsOn.length > 0) {
17693
+ throw new Error(`Review-sourced manual tasks cannot have explicit dependsOn. ` + `Cross-task dependencies require a plan amendment. ` + `Either remove the dependsOn field or amend the plan to express the dependency.`);
17694
+ }
17695
+ const dependsOn = metadata?.dependsOn ?? [];
17696
+ this.validateManualTaskDependsOn(featureName, dependsOn);
17697
+ const resolvedOrder = order ?? nextOrder;
17698
+ const folder = `${String(resolvedOrder).padStart(2, "0")}-${name}`;
17699
+ const collision = existingFolders.find((f) => {
17700
+ const match = f.match(/^(\d+)-/);
17701
+ return match && parseInt(match[1], 10) === resolvedOrder;
17702
+ });
17703
+ if (collision) {
17704
+ throw new Error(`Task folder collision: order ${resolvedOrder} already exists as "${collision}". ` + `Choose a different order number or omit to auto-increment.`);
17705
+ }
17376
17706
  const taskPath = getTaskPath(this.projectRoot, featureName, folder);
17377
17707
  ensureDir(taskPath);
17378
17708
  const status = {
17379
17709
  status: "pending",
17380
17710
  origin: "manual",
17381
- planTitle: name
17711
+ planTitle: name,
17712
+ dependsOn,
17713
+ ...metadata ? { metadata: { ...metadata, dependsOn: undefined } } : {}
17382
17714
  };
17383
17715
  writeJson(getTaskStatusPath(this.projectRoot, featureName, folder), status);
17716
+ const specContent = this.buildManualTaskSpec(featureName, folder, name, dependsOn, metadata);
17717
+ writeText(getTaskSpecPath(this.projectRoot, featureName, folder), specContent);
17384
17718
  return folder;
17385
17719
  }
17386
17720
  createFromPlan(featureName, task, allTasks, planContent) {
@@ -17403,6 +17737,27 @@ class TaskService {
17403
17737
  });
17404
17738
  writeText(getTaskSpecPath(this.projectRoot, featureName, task.folder), specContent);
17405
17739
  }
17740
+ refreshPendingTask(featureName, task, allTasks, planContent) {
17741
+ const dependsOn = this.resolveDependencies(task, allTasks);
17742
+ const statusPath = getTaskStatusPath(this.projectRoot, featureName, task.folder);
17743
+ const current = readJson(statusPath);
17744
+ if (current) {
17745
+ const updated = {
17746
+ ...current,
17747
+ planTitle: task.name,
17748
+ dependsOn
17749
+ };
17750
+ writeJson(statusPath, updated);
17751
+ }
17752
+ const specContent = this.buildSpecContent({
17753
+ featureName,
17754
+ task,
17755
+ dependsOn,
17756
+ allTasks,
17757
+ planContent
17758
+ });
17759
+ writeText(getTaskSpecPath(this.projectRoot, featureName, task.folder), specContent);
17760
+ }
17406
17761
  buildSpecContent(params) {
17407
17762
  const { featureName, task, dependsOn, allTasks, planContent, contextFiles = [], completedTasks = [] } = params;
17408
17763
  const getTaskType = (planSection2, taskName) => {
@@ -17564,6 +17919,10 @@ ${f.content}`).join(`
17564
17919
  writeText(specPath, content);
17565
17920
  return specPath;
17566
17921
  }
17922
+ readSpec(featureName, taskFolder) {
17923
+ const specPath = getTaskSpecPath(this.projectRoot, featureName, taskFolder);
17924
+ return readText(specPath);
17925
+ }
17567
17926
  update(featureName, taskFolder, updates, lockOptions) {
17568
17927
  const statusPath = getTaskStatusPath(this.projectRoot, featureName, taskFolder);
17569
17928
  if (!fileExists(statusPath)) {
@@ -17650,6 +18009,17 @@ ${f.content}`).join(`
17650
18009
  const orders = existingFolders.map((f) => parseInt(f.split("-")[0], 10)).filter((n) => !isNaN(n));
17651
18010
  return Math.max(...orders, 0) + 1;
17652
18011
  }
18012
+ validateManualTaskDependsOn(featureName, dependsOn) {
18013
+ for (const dependency of dependsOn) {
18014
+ const dependencyStatus = this.getRawStatus(featureName, dependency);
18015
+ if (!dependencyStatus) {
18016
+ throw new Error(`Manual tasks are append-only: dependency "${dependency}" does not exist. ` + `Dependencies on unfinished work require plan amendment.`);
18017
+ }
18018
+ if (dependencyStatus.status !== "done") {
18019
+ throw new Error(`Manual tasks are append-only: dependency "${dependency}" is ${dependencyStatus.status}, not done. ` + `Dependencies on unfinished work require plan amendment.`);
18020
+ }
18021
+ }
18022
+ }
17653
18023
  parseTasksFromPlan(content) {
17654
18024
  const tasks = [];
17655
18025
  const lines = content.split(`
@@ -17860,6 +18230,63 @@ _Add detailed instructions here_
17860
18230
  const subtaskOrder = subtaskId.split(".")[1];
17861
18231
  return folders.find((f) => f.startsWith(`${subtaskOrder}-`)) || null;
17862
18232
  }
18233
+ buildManualTaskSpec(featureName, folder, name, dependsOn, metadata) {
18234
+ const lines = [
18235
+ `# Task: ${folder}`,
18236
+ "",
18237
+ `## Feature: ${featureName}`,
18238
+ "",
18239
+ "## Dependencies",
18240
+ ""
18241
+ ];
18242
+ if (dependsOn.length > 0) {
18243
+ for (const dep of dependsOn) {
18244
+ lines.push(`- ${dep}`);
18245
+ }
18246
+ } else {
18247
+ lines.push("_None_");
18248
+ }
18249
+ lines.push("");
18250
+ if (metadata?.goal) {
18251
+ lines.push("## Goal", "", metadata.goal, "");
18252
+ }
18253
+ if (metadata?.description) {
18254
+ lines.push("## Description", "", metadata.description, "");
18255
+ }
18256
+ if (metadata?.acceptanceCriteria && metadata.acceptanceCriteria.length > 0) {
18257
+ lines.push("## Acceptance Criteria", "");
18258
+ for (const criterion of metadata.acceptanceCriteria) {
18259
+ lines.push(`- ${criterion}`);
18260
+ }
18261
+ lines.push("");
18262
+ }
18263
+ if (metadata?.files && metadata.files.length > 0) {
18264
+ lines.push("## Files", "");
18265
+ for (const file2 of metadata.files) {
18266
+ lines.push(`- ${file2}`);
18267
+ }
18268
+ lines.push("");
18269
+ }
18270
+ if (metadata?.references && metadata.references.length > 0) {
18271
+ lines.push("## References", "");
18272
+ for (const ref of metadata.references) {
18273
+ lines.push(`- ${ref}`);
18274
+ }
18275
+ lines.push("");
18276
+ }
18277
+ if (metadata?.source || metadata?.reason) {
18278
+ lines.push("## Origin", "");
18279
+ if (metadata?.source) {
18280
+ lines.push(`**Source:** ${metadata.source}`);
18281
+ }
18282
+ if (metadata?.reason) {
18283
+ lines.push(`**Reason:** ${metadata.reason}`);
18284
+ }
18285
+ lines.push("");
18286
+ }
18287
+ return lines.join(`
18288
+ `);
18289
+ }
17863
18290
  slugify(name) {
17864
18291
  return name.toLowerCase().replace(/\s+/g, "-").replace(/[^a-z0-9-]/g, "");
17865
18292
  }
@@ -22084,19 +22511,27 @@ class WorktreeService {
22084
22511
  const worktreePath = this.getWorktreePath(feature, step);
22085
22512
  const branchName = this.getBranchName(feature, step);
22086
22513
  const git = this.getGit();
22514
+ let worktreeRemoved = false;
22515
+ let branchDeleted = false;
22516
+ let pruned = false;
22087
22517
  try {
22088
22518
  await git.raw(["worktree", "remove", worktreePath, "--force"]);
22519
+ worktreeRemoved = true;
22089
22520
  } catch {
22090
22521
  await fs7.rm(worktreePath, { recursive: true, force: true });
22522
+ worktreeRemoved = true;
22091
22523
  }
22092
22524
  try {
22093
22525
  await git.raw(["worktree", "prune"]);
22526
+ pruned = true;
22094
22527
  } catch {}
22095
22528
  if (deleteBranch) {
22096
22529
  try {
22097
22530
  await git.deleteLocalBranch(branchName, true);
22531
+ branchDeleted = true;
22098
22532
  } catch {}
22099
22533
  }
22534
+ return { worktreeRemoved, branchDeleted, pruned };
22100
22535
  }
22101
22536
  async list(feature) {
22102
22537
  const worktreesDir = this.getWorktreesDir();
@@ -22228,34 +22663,61 @@ class WorktreeService {
22228
22663
  };
22229
22664
  }
22230
22665
  }
22231
- async merge(feature, step, strategy = "merge", message) {
22666
+ async merge(feature, step, strategy = "merge", message, options = {}) {
22232
22667
  const branchName = this.getBranchName(feature, step);
22233
22668
  const git = this.getGit();
22669
+ const cleanupMode = options.cleanup ?? "none";
22670
+ const preserveConflicts = options.preserveConflicts ?? false;
22671
+ const emptyCleanup = {
22672
+ worktreeRemoved: false,
22673
+ branchDeleted: false,
22674
+ pruned: false
22675
+ };
22234
22676
  if (strategy === "rebase" && message) {
22235
22677
  return {
22236
22678
  success: false,
22237
22679
  merged: false,
22680
+ strategy,
22681
+ filesChanged: [],
22682
+ conflicts: [],
22683
+ conflictState: "none",
22684
+ cleanup: emptyCleanup,
22238
22685
  error: "Custom merge message is not supported for rebase strategy"
22239
22686
  };
22240
22687
  }
22688
+ let filesChanged = [];
22241
22689
  try {
22242
22690
  const branches = await git.branch();
22243
22691
  if (!branches.all.includes(branchName)) {
22244
- return { success: false, merged: false, error: `Branch ${branchName} not found` };
22692
+ return {
22693
+ success: false,
22694
+ merged: false,
22695
+ strategy,
22696
+ filesChanged: [],
22697
+ conflicts: [],
22698
+ conflictState: "none",
22699
+ cleanup: emptyCleanup,
22700
+ error: `Branch ${branchName} not found`
22701
+ };
22245
22702
  }
22246
22703
  const currentBranch = branches.current;
22247
22704
  const diffStat = await git.diff([`${currentBranch}...${branchName}`, "--stat"]);
22248
- const filesChanged = diffStat.split(`
22705
+ filesChanged = diffStat.split(`
22249
22706
  `).filter((l) => l.trim() && l.includes("|")).map((l) => l.split("|")[0].trim());
22250
22707
  if (strategy === "squash") {
22251
22708
  await git.raw(["merge", "--squash", branchName]);
22252
22709
  const squashMessage = message || `hive: merge ${step} (squashed)`;
22253
22710
  const result = await git.commit(squashMessage);
22711
+ const cleanup = cleanupMode === "none" ? emptyCleanup : await this.remove(feature, step, cleanupMode === "worktree+branch");
22254
22712
  return {
22255
22713
  success: true,
22256
22714
  merged: true,
22715
+ strategy,
22257
22716
  sha: result.commit,
22258
- filesChanged
22717
+ filesChanged,
22718
+ conflicts: [],
22719
+ conflictState: "none",
22720
+ cleanup
22259
22721
  };
22260
22722
  } else if (strategy === "rebase") {
22261
22723
  const commits = await git.log([`${currentBranch}..${branchName}`]);
@@ -22264,40 +22726,62 @@ class WorktreeService {
22264
22726
  await git.raw(["cherry-pick", commit.hash]);
22265
22727
  }
22266
22728
  const head = (await git.revparse(["HEAD"])).trim();
22729
+ const cleanup = cleanupMode === "none" ? emptyCleanup : await this.remove(feature, step, cleanupMode === "worktree+branch");
22267
22730
  return {
22268
22731
  success: true,
22269
22732
  merged: true,
22733
+ strategy,
22270
22734
  sha: head,
22271
- filesChanged
22735
+ filesChanged,
22736
+ conflicts: [],
22737
+ conflictState: "none",
22738
+ cleanup
22272
22739
  };
22273
22740
  } else {
22274
22741
  const mergeMessage = message || `hive: merge ${step}`;
22275
22742
  const result = await git.merge([branchName, "--no-ff", "-m", mergeMessage]);
22276
22743
  const head = (await git.revparse(["HEAD"])).trim();
22744
+ const cleanup = cleanupMode === "none" ? emptyCleanup : await this.remove(feature, step, cleanupMode === "worktree+branch");
22277
22745
  return {
22278
22746
  success: true,
22279
22747
  merged: !result.failed,
22748
+ strategy,
22280
22749
  sha: head,
22281
22750
  filesChanged,
22282
- conflicts: result.conflicts?.map((c) => c.file || String(c)) || []
22751
+ conflicts: result.conflicts?.map((c) => c.file || String(c)) || [],
22752
+ conflictState: "none",
22753
+ cleanup
22283
22754
  };
22284
22755
  }
22285
22756
  } catch (error45) {
22286
22757
  const err = error45;
22287
22758
  if (err.message?.includes("CONFLICT") || err.message?.includes("conflict")) {
22288
- await git.raw(["merge", "--abort"]).catch(() => {});
22289
- await git.raw(["rebase", "--abort"]).catch(() => {});
22290
- await git.raw(["cherry-pick", "--abort"]).catch(() => {});
22759
+ const conflicts2 = await this.getActiveConflictFiles(git, err.message || "");
22760
+ const conflictState = preserveConflicts ? "preserved" : "aborted";
22761
+ if (!preserveConflicts) {
22762
+ await git.raw(["merge", "--abort"]).catch(() => {});
22763
+ await git.raw(["rebase", "--abort"]).catch(() => {});
22764
+ await git.raw(["cherry-pick", "--abort"]).catch(() => {});
22765
+ }
22291
22766
  return {
22292
22767
  success: false,
22293
22768
  merged: false,
22294
- error: "Merge conflicts detected",
22295
- conflicts: this.parseConflictsFromError(err.message || "")
22769
+ strategy,
22770
+ filesChanged,
22771
+ conflicts: conflicts2,
22772
+ conflictState,
22773
+ cleanup: emptyCleanup,
22774
+ error: "Merge conflicts detected"
22296
22775
  };
22297
22776
  }
22298
22777
  return {
22299
22778
  success: false,
22300
22779
  merged: false,
22780
+ strategy,
22781
+ filesChanged,
22782
+ conflicts: [],
22783
+ conflictState: "none",
22784
+ cleanup: emptyCleanup,
22301
22785
  error: err.message || "Merge failed"
22302
22786
  };
22303
22787
  }
@@ -22325,11 +22809,31 @@ class WorktreeService {
22325
22809
  }
22326
22810
  return conflicts2;
22327
22811
  }
22812
+ async getActiveConflictFiles(git, errorMessage) {
22813
+ try {
22814
+ const status = await git.status();
22815
+ if (status.conflicted.length > 0) {
22816
+ return [...new Set(status.conflicted)];
22817
+ }
22818
+ } catch {}
22819
+ return this.parseConflictsFromError(errorMessage);
22820
+ }
22328
22821
  }
22329
22822
  // ../hive-core/src/services/contextService.ts
22330
22823
  import * as fs8 from "fs";
22331
22824
  import * as path6 from "path";
22332
22825
  var RESERVED_OVERVIEW_CONTEXT = "overview";
22826
+ var DEFAULT_CONTEXT_CLASSIFICATION = {
22827
+ role: "durable",
22828
+ includeInExecution: true,
22829
+ includeInAgentsMdSync: true,
22830
+ includeInNetwork: true
22831
+ };
22832
+ var SPECIAL_CONTEXTS = {
22833
+ overview: { role: "human", includeInExecution: false, includeInAgentsMdSync: false, includeInNetwork: false },
22834
+ draft: { role: "scratchpad", includeInExecution: false, includeInAgentsMdSync: false, includeInNetwork: false },
22835
+ "execution-decisions": { role: "operational", includeInExecution: false, includeInAgentsMdSync: false, includeInNetwork: false }
22836
+ };
22333
22837
 
22334
22838
  class ContextService {
22335
22839
  projectRoot;
@@ -22358,15 +22862,18 @@ class ContextService {
22358
22862
  const contextPath = getContextPath(this.projectRoot, featureName);
22359
22863
  if (!fileExists(contextPath))
22360
22864
  return [];
22361
- const files = fs8.readdirSync(contextPath, { withFileTypes: true }).filter((f) => f.isFile() && f.name.endsWith(".md")).map((f) => f.name);
22865
+ const files = fs8.readdirSync(contextPath, { withFileTypes: true }).filter((f) => f.isFile() && f.name.endsWith(".md")).map((f) => f.name).sort((a, b) => a.localeCompare(b));
22362
22866
  return files.map((name) => {
22363
22867
  const filePath = path6.join(contextPath, name);
22364
22868
  const stat2 = fs8.statSync(filePath);
22365
22869
  const content = readText(filePath) || "";
22870
+ const normalizedName = name.replace(/\.md$/, "");
22871
+ const classification = this.classifyContextName(normalizedName);
22366
22872
  return {
22367
- name: name.replace(/\.md$/, ""),
22873
+ name: normalizedName,
22368
22874
  content,
22369
- updatedAt: stat2.mtime.toISOString()
22875
+ updatedAt: stat2.mtime.toISOString(),
22876
+ ...classification
22370
22877
  };
22371
22878
  });
22372
22879
  }
@@ -22374,7 +22881,13 @@ class ContextService {
22374
22881
  return this.list(featureName).find((file2) => file2.name === RESERVED_OVERVIEW_CONTEXT) ?? null;
22375
22882
  }
22376
22883
  listExecutionContext(featureName) {
22377
- return this.list(featureName).filter((file2) => file2.name !== RESERVED_OVERVIEW_CONTEXT);
22884
+ return this.list(featureName).filter((file2) => file2.includeInExecution);
22885
+ }
22886
+ listAgentsMdSyncContext(featureName) {
22887
+ return this.list(featureName).filter((file2) => file2.includeInAgentsMdSync);
22888
+ }
22889
+ listNetworkContext(featureName) {
22890
+ return this.list(featureName).filter((file2) => file2.includeInNetwork);
22378
22891
  }
22379
22892
  delete(featureName, fileName) {
22380
22893
  const contextPath = getContextPath(this.projectRoot, featureName);
@@ -22433,10 +22946,101 @@ ${f.content}`);
22433
22946
  const normalized = name.replace(/\.md$/, "");
22434
22947
  return `${normalized}.md`;
22435
22948
  }
22949
+ classifyContextName(name) {
22950
+ return SPECIAL_CONTEXTS[name] ?? DEFAULT_CONTEXT_CLASSIFICATION;
22951
+ }
22436
22952
  }
22437
- // ../hive-core/src/services/sessionService.ts
22953
+ // ../hive-core/src/services/networkService.ts
22438
22954
  import * as fs9 from "fs";
22439
22955
  import * as path7 from "path";
22956
+ class NetworkService {
22957
+ projectRoot;
22958
+ contextService;
22959
+ constructor(projectRoot) {
22960
+ this.projectRoot = projectRoot;
22961
+ this.contextService = new ContextService(projectRoot);
22962
+ }
22963
+ query(options) {
22964
+ const normalizedQuery = normalizeText(options.query);
22965
+ if (!normalizedQuery) {
22966
+ return [];
22967
+ }
22968
+ const matchingFeatures = listFeatureDirectories(this.projectRoot).map((entry) => entry.logicalName).filter((featureName) => featureName !== options.currentFeature).sort((left, right) => left.localeCompare(right)).map((featureName) => ({
22969
+ featureName,
22970
+ matches: this.collectMatches(featureName, normalizedQuery, options)
22971
+ })).filter((entry) => entry.matches.length > 0).slice(0, options.maxFeatures);
22972
+ return matchingFeatures.flatMap((entry) => entry.matches);
22973
+ }
22974
+ collectMatches(featureName, normalizedQuery, options) {
22975
+ const candidates = [];
22976
+ const planMatch = this.matchPlan(featureName, normalizedQuery, options.maxSnippetChars);
22977
+ if (planMatch) {
22978
+ candidates.push(planMatch);
22979
+ }
22980
+ const contextMatches = this.contextService.listNetworkContext(featureName).sort((left, right) => left.name.localeCompare(right.name)).map((contextFile) => this.matchContext(featureName, contextFile, normalizedQuery, options.maxSnippetChars)).filter((result) => result !== null);
22981
+ candidates.push(...contextMatches);
22982
+ return candidates.sort((left, right) => {
22983
+ if (left.sortRank !== right.sortRank) {
22984
+ return left.sortRank - right.sortRank;
22985
+ }
22986
+ return left.sourceName.localeCompare(right.sourceName);
22987
+ }).slice(0, options.maxSnippetsPerFeature).map(({ sortRank: _sortRank, ...result }) => result);
22988
+ }
22989
+ matchPlan(featureName, normalizedQuery, maxSnippetChars) {
22990
+ const planPath = getPlanPath(this.projectRoot, featureName);
22991
+ const content = readText(planPath);
22992
+ if (content === null) {
22993
+ return null;
22994
+ }
22995
+ const snippet = extractSnippet(content, normalizedQuery, maxSnippetChars);
22996
+ if (!snippet) {
22997
+ return null;
22998
+ }
22999
+ const stat2 = fs9.statSync(planPath);
23000
+ return {
23001
+ feature: featureName,
23002
+ sourceType: "plan",
23003
+ sourceName: "plan.md",
23004
+ path: planPath,
23005
+ updatedAt: stat2.mtime.toISOString(),
23006
+ snippet,
23007
+ sortRank: 0
23008
+ };
23009
+ }
23010
+ matchContext(featureName, contextFile, normalizedQuery, maxSnippetChars) {
23011
+ const snippet = extractSnippet(contextFile.content, normalizedQuery, maxSnippetChars);
23012
+ if (!snippet) {
23013
+ return null;
23014
+ }
23015
+ return {
23016
+ feature: featureName,
23017
+ sourceType: "context",
23018
+ sourceName: contextFile.name,
23019
+ path: path7.join(this.projectRoot, ".hive", "features", this.resolveDirectoryName(featureName), "context", `${contextFile.name}.md`),
23020
+ updatedAt: contextFile.updatedAt,
23021
+ snippet,
23022
+ sortRank: 1
23023
+ };
23024
+ }
23025
+ resolveDirectoryName(featureName) {
23026
+ const match = listFeatureDirectories(this.projectRoot).find((entry) => entry.logicalName === featureName);
23027
+ return match?.directoryName ?? featureName;
23028
+ }
23029
+ }
23030
+ function normalizeText(value) {
23031
+ return value.toLowerCase().replace(/\s+/g, " ").trim();
23032
+ }
23033
+ function extractSnippet(content, normalizedQuery, maxSnippetChars) {
23034
+ const normalizedContent = content.replace(/\s+/g, " ").trim();
23035
+ const matchIndex = normalizedContent.toLowerCase().indexOf(normalizedQuery);
23036
+ if (matchIndex === -1) {
23037
+ return null;
23038
+ }
23039
+ return normalizedContent.slice(matchIndex, matchIndex + maxSnippetChars).trim();
23040
+ }
23041
+ // ../hive-core/src/services/sessionService.ts
23042
+ import * as fs10 from "fs";
23043
+ import * as path8 from "path";
22440
23044
  class SessionService {
22441
23045
  projectRoot;
22442
23046
  constructor(projectRoot) {
@@ -22447,10 +23051,14 @@ class SessionService {
22447
23051
  return;
22448
23052
  }
22449
23053
  const { sessionId: _sessionId, ...rest } = patch;
22450
- Object.assign(target, rest);
23054
+ for (const [key, value] of Object.entries(rest)) {
23055
+ if (value !== undefined || key === "directiveRecoveryState") {
23056
+ target[key] = value;
23057
+ }
23058
+ }
22451
23059
  }
22452
23060
  getSessionsPath(featureName) {
22453
- return path7.join(getFeaturePath(this.projectRoot, featureName), "sessions.json");
23061
+ return path8.join(getFeaturePath(this.projectRoot, featureName), "sessions.json");
22454
23062
  }
22455
23063
  getSessions(featureName) {
22456
23064
  const sessionsPath = this.getSessionsPath(featureName);
@@ -22458,7 +23066,7 @@ class SessionService {
22458
23066
  }
22459
23067
  saveSessions(featureName, data) {
22460
23068
  const sessionsPath = this.getSessionsPath(featureName);
22461
- ensureDir(path7.dirname(sessionsPath));
23069
+ ensureDir(path8.dirname(sessionsPath));
22462
23070
  writeJson(sessionsPath, data);
22463
23071
  }
22464
23072
  getGlobalSessions() {
@@ -22467,12 +23075,12 @@ class SessionService {
22467
23075
  }
22468
23076
  saveGlobalSessions(data) {
22469
23077
  const globalPath = getGlobalSessionsPath(this.projectRoot);
22470
- ensureDir(path7.dirname(globalPath));
23078
+ ensureDir(path8.dirname(globalPath));
22471
23079
  writeJson(globalPath, data);
22472
23080
  }
22473
23081
  updateGlobalSessions(mutator) {
22474
23082
  const globalPath = getGlobalSessionsPath(this.projectRoot);
22475
- ensureDir(path7.dirname(globalPath));
23083
+ ensureDir(path8.dirname(globalPath));
22476
23084
  const release = acquireLockSync(globalPath);
22477
23085
  try {
22478
23086
  const data = readJson(globalPath) || { sessions: [] };
@@ -22590,10 +23198,10 @@ class SessionService {
22590
23198
  if (globalSession?.featureName) {
22591
23199
  return globalSession.featureName;
22592
23200
  }
22593
- const featuresPath = path7.join(this.projectRoot, ".hive", "features");
22594
- if (!fs9.existsSync(featuresPath))
23201
+ const featuresPath = path8.join(this.projectRoot, ".hive", "features");
23202
+ if (!fs10.existsSync(featuresPath))
22595
23203
  return null;
22596
- const features = fs9.readdirSync(featuresPath, { withFileTypes: true }).filter((d) => d.isDirectory()).map((d) => d.name);
23204
+ const features = fs10.readdirSync(featuresPath, { withFileTypes: true }).filter((d) => d.isDirectory()).map((d) => d.name);
22597
23205
  for (const feature of features) {
22598
23206
  const sessions = this.getSessions(feature);
22599
23207
  if (sessions.sessions.some((s) => s.sessionId === sessionId)) {
@@ -22635,16 +23243,26 @@ class SessionService {
22635
23243
  }
22636
23244
  }
22637
23245
  // ../hive-core/src/services/configService.ts
22638
- import * as fs10 from "fs";
22639
- import * as path8 from "path";
23246
+ import * as fs11 from "fs";
23247
+ import * as path9 from "path";
22640
23248
  class ConfigService {
22641
23249
  configPath;
23250
+ projectConfigPath;
23251
+ legacyProjectConfigPath;
22642
23252
  cachedConfig = null;
22643
23253
  cachedCustomAgentConfigs = null;
22644
- constructor() {
23254
+ activeReadSourceType = "global";
23255
+ activeReadPath;
23256
+ lastFallbackWarning = null;
23257
+ constructor(projectRoot) {
22645
23258
  const homeDir = process.env.HOME || process.env.USERPROFILE || "";
22646
- const configDir = path8.join(homeDir, ".config", "opencode");
22647
- this.configPath = path8.join(configDir, "agent_hive.json");
23259
+ const configDir = path9.join(homeDir, ".config", "opencode");
23260
+ this.configPath = path9.join(configDir, "agent_hive.json");
23261
+ this.activeReadPath = this.configPath;
23262
+ if (projectRoot) {
23263
+ this.projectConfigPath = path9.join(projectRoot, ".hive", "agent-hive.json");
23264
+ this.legacyProjectConfigPath = path9.join(projectRoot, ".opencode", "agent_hive.json");
23265
+ }
22648
23266
  }
22649
23267
  getPath() {
22650
23268
  return this.configPath;
@@ -22653,43 +23271,88 @@ class ConfigService {
22653
23271
  if (this.cachedConfig !== null) {
22654
23272
  return this.cachedConfig;
22655
23273
  }
22656
- try {
22657
- if (!fs10.existsSync(this.configPath)) {
22658
- this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
22659
- this.cachedCustomAgentConfigs = null;
23274
+ if (this.projectConfigPath && fs11.existsSync(this.projectConfigPath)) {
23275
+ const projectStored = this.readStoredConfig(this.projectConfigPath);
23276
+ if (projectStored.ok) {
23277
+ this.activeReadSourceType = "project";
23278
+ this.activeReadPath = this.projectConfigPath;
23279
+ this.lastFallbackWarning = null;
23280
+ this.cachedConfig = this.mergeWithDefaults(projectStored.value);
22660
23281
  return this.cachedConfig;
22661
23282
  }
22662
- const raw = fs10.readFileSync(this.configPath, "utf-8");
22663
- const stored = JSON.parse(raw);
22664
- const storedCustomAgents = this.isObjectRecord(stored.customAgents) ? stored.customAgents : {};
22665
- const mergedBuiltInAgents = BUILT_IN_AGENT_NAMES.reduce((acc, agentName) => {
22666
- acc[agentName] = {
22667
- ...DEFAULT_HIVE_CONFIG.agents?.[agentName],
22668
- ...stored.agents?.[agentName]
22669
- };
22670
- return acc;
22671
- }, {});
22672
- const merged = {
22673
- ...DEFAULT_HIVE_CONFIG,
22674
- ...stored,
22675
- agents: {
22676
- ...DEFAULT_HIVE_CONFIG.agents,
22677
- ...stored.agents,
22678
- ...mergedBuiltInAgents
22679
- },
22680
- customAgents: {
22681
- ...DEFAULT_HIVE_CONFIG.customAgents,
22682
- ...storedCustomAgents
22683
- }
22684
- };
22685
- this.cachedConfig = merged;
23283
+ const fallbackReason2 = "reason" in projectStored ? projectStored.reason : "read_error";
23284
+ this.lastFallbackWarning = this.createProjectFallbackWarning(this.projectConfigPath, fallbackReason2);
23285
+ } else if (this.legacyProjectConfigPath && fs11.existsSync(this.legacyProjectConfigPath)) {
23286
+ const projectStored = this.readStoredConfig(this.legacyProjectConfigPath);
23287
+ if (projectStored.ok) {
23288
+ this.activeReadSourceType = "project";
23289
+ this.activeReadPath = this.legacyProjectConfigPath;
23290
+ this.lastFallbackWarning = null;
23291
+ this.cachedConfig = this.mergeWithDefaults(projectStored.value);
23292
+ return this.cachedConfig;
23293
+ }
23294
+ const fallbackReason2 = "reason" in projectStored ? projectStored.reason : "read_error";
23295
+ this.lastFallbackWarning = this.createProjectFallbackWarning(this.legacyProjectConfigPath, fallbackReason2);
23296
+ }
23297
+ if (!this.projectConfigPath && !this.legacyProjectConfigPath) {
23298
+ this.lastFallbackWarning = null;
23299
+ }
23300
+ if (!fs11.existsSync(this.configPath)) {
23301
+ this.activeReadSourceType = "global";
23302
+ this.activeReadPath = this.configPath;
23303
+ this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
22686
23304
  this.cachedCustomAgentConfigs = null;
23305
+ if (this.lastFallbackWarning && this.lastFallbackWarning.fallbackType !== "defaults") {
23306
+ this.lastFallbackWarning = {
23307
+ message: `Failed to read project config at ${this.lastFallbackWarning.sourcePath}; global config at ${this.configPath} is missing; using defaults`,
23308
+ sourceType: this.lastFallbackWarning.sourceType,
23309
+ sourcePath: this.lastFallbackWarning.sourcePath,
23310
+ fallbackType: "defaults",
23311
+ reason: this.lastFallbackWarning.reason
23312
+ };
23313
+ }
22687
23314
  return this.cachedConfig;
22688
- } catch {
22689
- this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
23315
+ }
23316
+ const globalStored = this.readStoredConfig(this.configPath);
23317
+ if (globalStored.ok) {
23318
+ this.activeReadSourceType = "global";
23319
+ this.activeReadPath = this.configPath;
23320
+ this.cachedConfig = this.mergeWithDefaults(globalStored.value);
22690
23321
  this.cachedCustomAgentConfigs = null;
22691
23322
  return this.cachedConfig;
22692
23323
  }
23324
+ const fallbackReason = "reason" in globalStored ? globalStored.reason : "read_error";
23325
+ this.activeReadSourceType = "global";
23326
+ this.activeReadPath = this.configPath;
23327
+ this.cachedConfig = { ...DEFAULT_HIVE_CONFIG };
23328
+ this.cachedCustomAgentConfigs = null;
23329
+ if (this.lastFallbackWarning) {
23330
+ this.lastFallbackWarning = {
23331
+ message: `Failed to read project config at ${this.lastFallbackWarning.sourcePath}; global config at ${this.configPath} is also invalid; using defaults`,
23332
+ sourceType: this.lastFallbackWarning.sourceType,
23333
+ sourcePath: this.lastFallbackWarning.sourcePath,
23334
+ fallbackType: "defaults",
23335
+ reason: this.lastFallbackWarning.reason
23336
+ };
23337
+ return this.cachedConfig;
23338
+ }
23339
+ this.lastFallbackWarning = {
23340
+ message: `Failed to read global config at ${this.configPath}; using defaults`,
23341
+ sourceType: "global",
23342
+ sourcePath: this.configPath,
23343
+ fallbackType: "defaults",
23344
+ reason: fallbackReason
23345
+ };
23346
+ return this.cachedConfig;
23347
+ }
23348
+ getActiveReadSourceType() {
23349
+ return this.activeReadSourceType;
23350
+ }
23351
+ getActiveReadPath() {
23352
+ return this.activeReadPath;
23353
+ }
23354
+ getLastFallbackWarning() {
23355
+ return this.lastFallbackWarning;
22693
23356
  }
22694
23357
  set(updates) {
22695
23358
  this.cachedConfig = null;
@@ -22707,30 +23370,34 @@ class ConfigService {
22707
23370
  ...updates.customAgents
22708
23371
  } : current.customAgents
22709
23372
  };
22710
- const configDir = path8.dirname(this.configPath);
22711
- if (!fs10.existsSync(configDir)) {
22712
- fs10.mkdirSync(configDir, { recursive: true });
23373
+ const configDir = path9.dirname(this.configPath);
23374
+ if (!fs11.existsSync(configDir)) {
23375
+ fs11.mkdirSync(configDir, { recursive: true });
22713
23376
  }
22714
- fs10.writeFileSync(this.configPath, JSON.stringify(merged, null, 2));
23377
+ fs11.writeFileSync(this.configPath, JSON.stringify(merged, null, 2));
22715
23378
  this.cachedConfig = merged;
22716
23379
  this.cachedCustomAgentConfigs = null;
22717
23380
  return merged;
22718
23381
  }
22719
23382
  exists() {
22720
- return fs10.existsSync(this.configPath);
23383
+ return fs11.existsSync(this.configPath);
22721
23384
  }
22722
23385
  init() {
23386
+ const resolved = this.get();
23387
+ if (this.projectConfigPath || this.legacyProjectConfigPath) {
23388
+ return resolved;
23389
+ }
22723
23390
  if (!this.exists()) {
22724
23391
  return this.set(DEFAULT_HIVE_CONFIG);
22725
23392
  }
22726
- return this.get();
23393
+ return resolved;
22727
23394
  }
22728
23395
  getAgentConfig(agent) {
22729
23396
  const config2 = this.get();
22730
23397
  if (this.isBuiltInAgent(agent)) {
22731
23398
  const agentConfig = config2.agents?.[agent] ?? {};
22732
23399
  const defaultAutoLoadSkills = DEFAULT_HIVE_CONFIG.agents?.[agent]?.autoLoadSkills ?? [];
22733
- const effectiveAutoLoadSkills = this.resolveAutoLoadSkills(defaultAutoLoadSkills, agentConfig.autoLoadSkills ?? [], this.isPlannerAgent(agent));
23400
+ const effectiveAutoLoadSkills = agent === "hive-helper" ? defaultAutoLoadSkills : this.resolveAutoLoadSkills(defaultAutoLoadSkills, agentConfig.autoLoadSkills ?? [], this.isPlannerAgent(agent));
22734
23401
  return {
22735
23402
  ...agentConfig,
22736
23403
  autoLoadSkills: effectiveAutoLoadSkills
@@ -22851,10 +23518,139 @@ class ConfigService {
22851
23518
  }
22852
23519
  return configuredCadence;
22853
23520
  }
23521
+ readStoredConfig(configPath) {
23522
+ try {
23523
+ const raw = fs11.readFileSync(configPath, "utf-8");
23524
+ const parsed = JSON.parse(raw);
23525
+ if (parsed === null || typeof parsed !== "object" || Array.isArray(parsed)) {
23526
+ return { ok: false, reason: "validation_error" };
23527
+ }
23528
+ if (!this.isValidStoredConfig(parsed)) {
23529
+ return { ok: false, reason: "validation_error" };
23530
+ }
23531
+ return { ok: true, value: parsed };
23532
+ } catch (error45) {
23533
+ if (error45 instanceof SyntaxError) {
23534
+ return { ok: false, reason: "parse_error" };
23535
+ }
23536
+ return { ok: false, reason: "read_error" };
23537
+ }
23538
+ }
23539
+ mergeWithDefaults(stored) {
23540
+ const storedCustomAgents = this.isObjectRecord(stored.customAgents) ? stored.customAgents : {};
23541
+ const mergedBuiltInAgents = BUILT_IN_AGENT_NAMES.reduce((acc, agentName) => {
23542
+ acc[agentName] = {
23543
+ ...DEFAULT_HIVE_CONFIG.agents?.[agentName],
23544
+ ...stored.agents?.[agentName]
23545
+ };
23546
+ return acc;
23547
+ }, {});
23548
+ return {
23549
+ ...DEFAULT_HIVE_CONFIG,
23550
+ ...stored,
23551
+ agents: {
23552
+ ...DEFAULT_HIVE_CONFIG.agents,
23553
+ ...stored.agents,
23554
+ ...mergedBuiltInAgents
23555
+ },
23556
+ customAgents: {
23557
+ ...DEFAULT_HIVE_CONFIG.customAgents,
23558
+ ...storedCustomAgents
23559
+ }
23560
+ };
23561
+ }
23562
+ createProjectFallbackWarning(projectConfigPath, reason) {
23563
+ return {
23564
+ message: `Failed to read project config at ${projectConfigPath}; using global config at ${this.configPath}`,
23565
+ sourceType: "project",
23566
+ sourcePath: projectConfigPath,
23567
+ fallbackType: "global",
23568
+ fallbackPath: this.configPath,
23569
+ reason
23570
+ };
23571
+ }
23572
+ isValidStoredConfig(value) {
23573
+ if (!this.isObjectRecord(value)) {
23574
+ return false;
23575
+ }
23576
+ const config2 = value;
23577
+ if (config2.$schema !== undefined && typeof config2.$schema !== "string") {
23578
+ return false;
23579
+ }
23580
+ if (config2.enableToolsFor !== undefined && !this.isStringArray(config2.enableToolsFor)) {
23581
+ return false;
23582
+ }
23583
+ if (config2.disableSkills !== undefined && !this.isStringArray(config2.disableSkills)) {
23584
+ return false;
23585
+ }
23586
+ if (config2.disableMcps !== undefined && !this.isStringArray(config2.disableMcps)) {
23587
+ return false;
23588
+ }
23589
+ if (config2.omoSlimEnabled !== undefined && typeof config2.omoSlimEnabled !== "boolean") {
23590
+ return false;
23591
+ }
23592
+ if (config2.agentMode !== undefined && config2.agentMode !== "unified" && config2.agentMode !== "dedicated") {
23593
+ return false;
23594
+ }
23595
+ if (config2.agents !== undefined && !this.isObjectRecord(config2.agents)) {
23596
+ return false;
23597
+ }
23598
+ if (this.isObjectRecord(config2.agents)) {
23599
+ for (const declaration of Object.values(config2.agents)) {
23600
+ if (!this.isValidAgentConfigDeclaration(declaration)) {
23601
+ return false;
23602
+ }
23603
+ }
23604
+ }
23605
+ if (config2.sandbox !== undefined && config2.sandbox !== "none" && config2.sandbox !== "docker") {
23606
+ return false;
23607
+ }
23608
+ if (config2.dockerImage !== undefined && typeof config2.dockerImage !== "string") {
23609
+ return false;
23610
+ }
23611
+ if (config2.persistentContainers !== undefined && typeof config2.persistentContainers !== "boolean") {
23612
+ return false;
23613
+ }
23614
+ if (config2.hook_cadence !== undefined && !this.isHookCadenceRecord(config2.hook_cadence)) {
23615
+ return false;
23616
+ }
23617
+ return true;
23618
+ }
23619
+ isStringArray(value) {
23620
+ return Array.isArray(value) && value.every((item) => typeof item === "string");
23621
+ }
23622
+ isValidAgentConfigDeclaration(value) {
23623
+ if (!this.isObjectRecord(value)) {
23624
+ return false;
23625
+ }
23626
+ const declaration = value;
23627
+ if (declaration.model !== undefined && typeof declaration.model !== "string") {
23628
+ return false;
23629
+ }
23630
+ if (declaration.temperature !== undefined && typeof declaration.temperature !== "number") {
23631
+ return false;
23632
+ }
23633
+ if (declaration.skills !== undefined && !this.isStringArray(declaration.skills)) {
23634
+ return false;
23635
+ }
23636
+ if (declaration.autoLoadSkills !== undefined && !this.isStringArray(declaration.autoLoadSkills)) {
23637
+ return false;
23638
+ }
23639
+ if (declaration.variant !== undefined && typeof declaration.variant !== "string") {
23640
+ return false;
23641
+ }
23642
+ return true;
23643
+ }
23644
+ isHookCadenceRecord(value) {
23645
+ if (!this.isObjectRecord(value)) {
23646
+ return false;
23647
+ }
23648
+ return Object.values(value).every((entry) => typeof entry === "number");
23649
+ }
22854
23650
  }
22855
23651
  // ../hive-core/src/services/agentsMdService.ts
22856
- import * as fs11 from "fs";
22857
- import * as path9 from "path";
23652
+ import * as fs12 from "fs";
23653
+ import * as path10 from "path";
22858
23654
  class AgentsMdService {
22859
23655
  rootDir;
22860
23656
  contextService;
@@ -22863,7 +23659,7 @@ class AgentsMdService {
22863
23659
  this.contextService = contextService;
22864
23660
  }
22865
23661
  async init() {
22866
- const agentsMdPath = path9.join(this.rootDir, "AGENTS.md");
23662
+ const agentsMdPath = path10.join(this.rootDir, "AGENTS.md");
22867
23663
  const existed = fileExists(agentsMdPath);
22868
23664
  if (existed) {
22869
23665
  const existing = readText(agentsMdPath);
@@ -22873,15 +23669,15 @@ class AgentsMdService {
22873
23669
  return { content, existed: false };
22874
23670
  }
22875
23671
  async sync(featureName) {
22876
- const contexts = this.contextService.list(featureName);
22877
- const agentsMdPath = path9.join(this.rootDir, "AGENTS.md");
22878
- const current = await fs11.promises.readFile(agentsMdPath, "utf-8").catch(() => "");
23672
+ const contexts = this.contextService.listAgentsMdSyncContext(featureName);
23673
+ const agentsMdPath = path10.join(this.rootDir, "AGENTS.md");
23674
+ const current = await fs12.promises.readFile(agentsMdPath, "utf-8").catch(() => "");
22879
23675
  const findings = this.extractFindings(contexts);
22880
23676
  const proposals = this.generateProposals(findings, current);
22881
23677
  return { proposals, diff: this.formatDiff(current, proposals) };
22882
23678
  }
22883
23679
  apply(content) {
22884
- const agentsMdPath = path9.join(this.rootDir, "AGENTS.md");
23680
+ const agentsMdPath = path10.join(this.rootDir, "AGENTS.md");
22885
23681
  const isNew = !fileExists(agentsMdPath);
22886
23682
  writeText(agentsMdPath, content);
22887
23683
  return { path: agentsMdPath, chars: content.length, isNew };
@@ -22941,7 +23737,7 @@ class AgentsMdService {
22941
23737
  return this.generateTemplate(detections);
22942
23738
  }
22943
23739
  async detectProjectInfo() {
22944
- const packageJsonPath = path9.join(this.rootDir, "package.json");
23740
+ const packageJsonPath = path10.join(this.rootDir, "package.json");
22945
23741
  let packageJson = null;
22946
23742
  if (fileExists(packageJsonPath)) {
22947
23743
  try {
@@ -22961,26 +23757,26 @@ class AgentsMdService {
22961
23757
  return info;
22962
23758
  }
22963
23759
  detectPackageManager() {
22964
- if (fileExists(path9.join(this.rootDir, "bun.lockb")))
23760
+ if (fileExists(path10.join(this.rootDir, "bun.lockb")))
22965
23761
  return "bun";
22966
- if (fileExists(path9.join(this.rootDir, "pnpm-lock.yaml")))
23762
+ if (fileExists(path10.join(this.rootDir, "pnpm-lock.yaml")))
22967
23763
  return "pnpm";
22968
- if (fileExists(path9.join(this.rootDir, "yarn.lock")))
23764
+ if (fileExists(path10.join(this.rootDir, "yarn.lock")))
22969
23765
  return "yarn";
22970
- if (fileExists(path9.join(this.rootDir, "package-lock.json")))
23766
+ if (fileExists(path10.join(this.rootDir, "package-lock.json")))
22971
23767
  return "npm";
22972
23768
  return "npm";
22973
23769
  }
22974
23770
  detectLanguage() {
22975
- if (fileExists(path9.join(this.rootDir, "tsconfig.json")))
23771
+ if (fileExists(path10.join(this.rootDir, "tsconfig.json")))
22976
23772
  return "TypeScript";
22977
- if (fileExists(path9.join(this.rootDir, "package.json")))
23773
+ if (fileExists(path10.join(this.rootDir, "package.json")))
22978
23774
  return "JavaScript";
22979
- if (fileExists(path9.join(this.rootDir, "requirements.txt")))
23775
+ if (fileExists(path10.join(this.rootDir, "requirements.txt")))
22980
23776
  return "Python";
22981
- if (fileExists(path9.join(this.rootDir, "go.mod")))
23777
+ if (fileExists(path10.join(this.rootDir, "go.mod")))
22982
23778
  return "Go";
22983
- if (fileExists(path9.join(this.rootDir, "Cargo.toml")))
23779
+ if (fileExists(path10.join(this.rootDir, "Cargo.toml")))
22984
23780
  return "Rust";
22985
23781
  return "Unknown";
22986
23782
  }
@@ -23060,24 +23856,24 @@ class AgentsMdService {
23060
23856
  }
23061
23857
  // ../hive-core/src/services/dockerSandboxService.ts
23062
23858
  import { existsSync as existsSync5 } from "fs";
23063
- import { join as join10, sep } from "path";
23859
+ import { join as join11, sep } from "path";
23064
23860
  import { execSync } from "child_process";
23065
23861
 
23066
23862
  class DockerSandboxService {
23067
23863
  static detectImage(worktreePath) {
23068
- if (existsSync5(join10(worktreePath, "Dockerfile"))) {
23864
+ if (existsSync5(join11(worktreePath, "Dockerfile"))) {
23069
23865
  return null;
23070
23866
  }
23071
- if (existsSync5(join10(worktreePath, "package.json"))) {
23867
+ if (existsSync5(join11(worktreePath, "package.json"))) {
23072
23868
  return "node:22-slim";
23073
23869
  }
23074
- if (existsSync5(join10(worktreePath, "requirements.txt")) || existsSync5(join10(worktreePath, "pyproject.toml"))) {
23870
+ if (existsSync5(join11(worktreePath, "requirements.txt")) || existsSync5(join11(worktreePath, "pyproject.toml"))) {
23075
23871
  return "python:3.12-slim";
23076
23872
  }
23077
- if (existsSync5(join10(worktreePath, "go.mod"))) {
23873
+ if (existsSync5(join11(worktreePath, "go.mod"))) {
23078
23874
  return "golang:1.22-slim";
23079
23875
  }
23080
- if (existsSync5(join10(worktreePath, "Cargo.toml"))) {
23876
+ if (existsSync5(join11(worktreePath, "Cargo.toml"))) {
23081
23877
  return "rust:1.77-slim";
23082
23878
  }
23083
23879
  return "ubuntu:24.04";
@@ -23261,11 +24057,31 @@ ${spec}
23261
24057
  Before writing code, confirm:
23262
24058
  1. Dependencies are satisfied and required context is present.
23263
24059
  2. The exact files/sections to touch (from references) are identified.
23264
- 3. The first failing test to write is clear (TDD).
24060
+ 3. The verification path is clear: a failing test for new behavior, or the existing coverage to keep green for refactor-only work.
23265
24061
  4. The minimal change needed to reach green is planned.
23266
24062
 
23267
24063
  ---
23268
24064
 
24065
+ ## TDD Protocol (Required)
24066
+
24067
+ 1. **Red**: Write failing test first
24068
+ 2. **Green**: Minimal code to pass
24069
+ 3. **Refactor**: Clean up, keep tests green
24070
+
24071
+ When adding new behavior, write the test before the implementation.
24072
+ When refactoring existing tested code, keep tests green throughout; no new failing test is required.
24073
+
24074
+ ## Debugging Protocol (When stuck)
24075
+
24076
+ 1. **Reproduce**: Get consistent failure
24077
+ 2. **Isolate**: Binary search to find cause
24078
+ 3. **Hypothesize**: Form theory, test it
24079
+ 4. **Fix**: Minimal change that resolves
24080
+
24081
+ After 3 failed attempts at same fix: STOP and report blocker.
24082
+
24083
+ ---
24084
+
23269
24085
  ## Blocker Protocol
23270
24086
 
23271
24087
  If you hit a blocker requiring human decision, **DO NOT** use the question tool directly.
@@ -23300,6 +24116,24 @@ This keeps the user focused on ONE conversation (Hive Master) instead of multipl
23300
24116
 
23301
24117
  ---
23302
24118
 
24119
+ ## Verification Evidence
24120
+
24121
+ Before claiming completion, verify your work with command-first evidence proportional to the change type:
24122
+
24123
+ | Change type | Required verification |
24124
+ |---|---|
24125
+ | New behavior | Run tests covering the new code; record pass/fail counts |
24126
+ | Bug fix | Reproduce the original failure, then confirm the fix |
24127
+ | Refactor | Run existing tests; confirm no regressions |
24128
+ | Prompt / text-only | Run relevant local tests if available; otherwise do file-specific sanity checks such as generation, syntax/parse, or conflict-marker scans |
24129
+
24130
+ **Rules:**
24131
+ - Run the command, then record observed output. Do not substitute explanation for execution.
24132
+ - If a check cannot be run (missing deps, no test runner in worktree), explicitly state "Not run: <reason>" instead of omitting it silently.
24133
+ - command-first means: execute first, interpret second. Never claim a result you have not observed.
24134
+
24135
+ ---
24136
+
23303
24137
  ## Completion Protocol
23304
24138
 
23305
24139
  When your task is **fully complete**:
@@ -23323,10 +24157,10 @@ Optional body"
23323
24157
  - Do not provide message with hive_merge(..., strategy: 'rebase').
23324
24158
 
23325
24159
  Then inspect the tool response fields:
23326
- - If \`ok=true\` and \`terminal=true\`: stop the session
23327
- - Otherwise: **DO NOT STOP**. Follow \`nextAction\`, remediate, and retry \`hive_worktree_commit\`
24160
+ - If \`terminal=true\` (regardless of \`ok\`): stop immediately. This call is final and must not be retried with the same parameters.
24161
+ - If \`terminal=false\`: **DO NOT STOP**. Follow \`nextAction\`, remediate, and retry \`hive_worktree_commit\`
23328
24162
 
23329
- **CRITICAL: Stop only on terminal commit result (ok=true and terminal=true).**
24163
+ **CRITICAL: Any terminal commit result is final for this call.**
23330
24164
  If commit returns non-terminal (for example verification_required), DO NOT STOP.
23331
24165
  Follow result.nextAction, fix the issue, and call hive_worktree_commit again.
23332
24166
 
@@ -23364,26 +24198,6 @@ hive_worktree_commit({
23364
24198
 
23365
24199
  ---
23366
24200
 
23367
- ## TDD Protocol (Required)
23368
-
23369
- 1. **Red**: Write failing test first
23370
- 2. **Green**: Minimal code to pass
23371
- 3. **Refactor**: Clean up, keep tests green
23372
-
23373
- Never write implementation before test exists.
23374
- Exception: Pure refactoring of existing tested code.
23375
-
23376
- ## Debugging Protocol (When stuck)
23377
-
23378
- 1. **Reproduce**: Get consistent failure
23379
- 2. **Isolate**: Binary search to find cause
23380
- 3. **Hypothesize**: Form theory, test it
23381
- 4. **Fix**: Minimal change that resolves
23382
-
23383
- After 3 failed attempts at same fix: STOP and report blocker.
23384
-
23385
- ---
23386
-
23387
24201
  ## Tool Access
23388
24202
 
23389
24203
  **You have access to:**
@@ -23396,8 +24210,8 @@ After 3 failed attempts at same fix: STOP and report blocker.
23396
24210
  **You do NOT have access to (or should not use):**
23397
24211
  - \`question\` - Escalate via blocker protocol instead
23398
24212
  - \`hive_worktree_create\` - No spawning sub-workers
23399
- - \`hive_merge\` - Only Hive Master merges
23400
- - \`task\` - No recursive delegation
24213
+ - \`hive_merge\` - Only Hive/Swarm or delegated \`hive-helper\` merges; ordinary task workers must not merge or handle merge/wrap-up operational flows
24214
+ - \`task\` - No recursive delegation; only Hive/Swarm may delegate \`hive-helper\` for merge/wrap-up operational flows
23401
24215
 
23402
24216
  ---
23403
24217
 
@@ -23411,10 +24225,6 @@ After 3 failed attempts at same fix: STOP and report blocker.
23411
24225
 
23412
24226
  ---
23413
24227
 
23414
- **User Input:** ALWAYS use \`question()\` tool for any user input - NEVER ask questions via plain text. This ensures structured responses.
23415
-
23416
- ---
23417
-
23418
24228
  Begin your task now.
23419
24229
  `;
23420
24230
  }
@@ -23647,17 +24457,17 @@ function applyContextBudget(files, config2 = {}) {
23647
24457
  }
23648
24458
 
23649
24459
  // src/utils/prompt-file.ts
23650
- import * as fs12 from "fs";
23651
- import * as path10 from "path";
24460
+ import * as fs13 from "fs";
24461
+ import * as path11 from "path";
23652
24462
  function writeWorkerPromptFile(feature, task, prompt, hiveDir) {
23653
- const projectRoot = path10.dirname(hiveDir);
24463
+ const projectRoot = path11.dirname(hiveDir);
23654
24464
  const featureDir = resolveFeatureDirectoryName(projectRoot, feature);
23655
- const promptDir = path10.join(hiveDir, "features", featureDir, "tasks", task);
23656
- const promptPath = path10.join(promptDir, "worker-prompt.md");
23657
- if (!fs12.existsSync(promptDir)) {
23658
- fs12.mkdirSync(promptDir, { recursive: true });
24465
+ const promptDir = path11.join(hiveDir, "features", featureDir, "tasks", task);
24466
+ const promptPath = path11.join(promptDir, "worker-prompt.md");
24467
+ if (!fs13.existsSync(promptDir)) {
24468
+ fs13.mkdirSync(promptDir, { recursive: true });
23659
24469
  }
23660
- fs12.writeFileSync(promptPath, prompt, "utf-8");
24470
+ fs13.writeFileSync(promptPath, prompt, "utf-8");
23661
24471
  return promptPath;
23662
24472
  }
23663
24473
 
@@ -23674,6 +24484,7 @@ var BUILT_IN_AGENTS = {
23674
24484
  "swarm-orchestrator": { sessionKind: "primary", baseAgent: "swarm-orchestrator" },
23675
24485
  "forager-worker": { sessionKind: "task-worker", baseAgent: "forager-worker" },
23676
24486
  "scout-researcher": { sessionKind: "subagent", baseAgent: "scout-researcher" },
24487
+ "hive-helper": { sessionKind: "subagent", baseAgent: "hive-helper" },
23677
24488
  "hygienic-reviewer": { sessionKind: "subagent", baseAgent: "hygienic-reviewer" }
23678
24489
  };
23679
24490
  var BASE_AGENT_KIND = {
@@ -23743,70 +24554,37 @@ var HIVE_SYSTEM_PROMPT = `
23743
24554
  Use hive_merge to integrate changes into the current branch.
23744
24555
  `;
23745
24556
 
23746
- // src/utils/compaction-anchor.ts
23747
- var AGENT_ROLE_MAP = {
23748
- "hive-master": "Hive",
23749
- "architect-planner": "Architect",
23750
- "swarm-orchestrator": "Swarm",
23751
- "forager-worker": "Forager",
23752
- "scout-researcher": "Scout",
23753
- "hygienic-reviewer": "Hygienic"
23754
- };
23755
- var BASE_AGENT_ROLE_MAP = {
23756
- "forager-worker": "Forager",
23757
- "hygienic-reviewer": "Hygienic",
23758
- "scout-researcher": "Scout"
23759
- };
23760
- function resolveRole(ctx) {
23761
- if (ctx.agent && AGENT_ROLE_MAP[ctx.agent]) {
23762
- return AGENT_ROLE_MAP[ctx.agent];
23763
- }
23764
- if (ctx.baseAgent && BASE_AGENT_ROLE_MAP[ctx.baseAgent]) {
23765
- return BASE_AGENT_ROLE_MAP[ctx.baseAgent];
23766
- }
23767
- return;
23768
- }
23769
- function resolveWorkerPromptPath(ctx) {
23770
- if (ctx.workerPromptPath) {
23771
- return ctx.workerPromptPath;
23772
- }
23773
- if (ctx.featureName && ctx.taskFolder) {
23774
- return `.hive/features/${ctx.featureName}/tasks/${ctx.taskFolder}/worker-prompt.md`;
23775
- }
23776
- return;
23777
- }
23778
- function buildCompactionReanchor(ctx) {
23779
- const role = resolveRole(ctx);
23780
- const kind = ctx.sessionKind ?? "unknown";
23781
- const workerPromptPath = resolveWorkerPromptPath(ctx);
23782
- const lines = [];
23783
- const context = [];
23784
- lines.push("Compaction recovery — you were compacted mid-session.");
23785
- if (role) {
23786
- lines.push(`Role: ${role}`);
23787
- }
23788
- lines.push("Do not switch roles.");
23789
- lines.push("Do not call status tools to rediscover state.");
23790
- lines.push("Do not re-read the full codebase.");
23791
- if (kind === "task-worker") {
23792
- lines.push("Do not delegate.");
23793
- if (workerPromptPath) {
23794
- lines.push("Re-read worker-prompt.md now to recall your assignment.");
23795
- context.push(workerPromptPath);
23796
- } else {
23797
- lines.push("Re-read worker-prompt.md from the Hive task metadata to recall your assignment.");
23798
- }
23799
- }
23800
- if ((kind === "primary" || kind === "subagent") && ctx.directivePrompt) {
23801
- lines.push("Original directive survives via post-compaction replay.");
24557
+ // src/utils/plugin-manifest.ts
24558
+ import * as path12 from "path";
24559
+ import { fileURLToPath } from "url";
24560
+ var HIVE_COMMANDS = [
24561
+ {
24562
+ key: "hive",
24563
+ name: "/hive",
24564
+ description: "Create a new feature: /hive <feature-name>"
23802
24565
  }
23803
- lines.push("Next action: resume from where you left off.");
23804
- return {
23805
- prompt: lines.join(`
23806
- `),
23807
- context
23808
- };
23809
- }
24566
+ ];
24567
+ var HIVE_TOOL_NAMES = [
24568
+ "hive_feature_create",
24569
+ "hive_feature_complete",
24570
+ "hive_plan_write",
24571
+ "hive_plan_read",
24572
+ "hive_plan_approve",
24573
+ "hive_tasks_sync",
24574
+ "hive_task_create",
24575
+ "hive_task_update",
24576
+ "hive_worktree_start",
24577
+ "hive_worktree_create",
24578
+ "hive_worktree_commit",
24579
+ "hive_worktree_discard",
24580
+ "hive_merge",
24581
+ "hive_context_write",
24582
+ "hive_network_query",
24583
+ "hive_status",
24584
+ "hive_skill",
24585
+ "hive_agents_md"
24586
+ ];
24587
+ var packageRoot = path12.resolve(path12.dirname(fileURLToPath(import.meta.url)), "../..");
23810
24588
 
23811
24589
  // src/index.ts
23812
24590
  function formatSkillsXml(skills) {
@@ -23921,28 +24699,51 @@ No Hive skills available.` : base + formatSkillsXml(filteredSkills);
23921
24699
  }
23922
24700
  var plugin = async (ctx) => {
23923
24701
  const { directory, client, worktree } = ctx;
24702
+ const emitConfigWarning = (message) => {
24703
+ const prefixedMessage = `[hive:config] ${message}`;
24704
+ const maybeClient = client;
24705
+ const notified = typeof maybeClient.notify === "function" && maybeClient.notify({
24706
+ type: "warning",
24707
+ level: "warning",
24708
+ title: "Agent Hive Config Warning",
24709
+ message: prefixedMessage
24710
+ }) || typeof maybeClient.notification?.create === "function" && maybeClient.notification.create({
24711
+ type: "warning",
24712
+ level: "warning",
24713
+ title: "Agent Hive Config Warning",
24714
+ message: prefixedMessage
24715
+ });
24716
+ if (!notified) {
24717
+ console.warn(prefixedMessage);
24718
+ }
24719
+ };
23924
24720
  const featureService = new FeatureService(directory);
23925
24721
  const planService = new PlanService(directory);
23926
24722
  const taskService = new TaskService(directory);
23927
24723
  const contextService = new ContextService(directory);
24724
+ const networkService = new NetworkService(directory);
23928
24725
  const agentsMdService = new AgentsMdService(directory, contextService);
23929
- const configService = new ConfigService;
24726
+ const configService = new ConfigService(directory);
23930
24727
  const sessionService = new SessionService(directory);
23931
24728
  const disabledMcps = configService.getDisabledMcps();
23932
24729
  const disabledSkills = configService.getDisabledSkills();
24730
+ const configFallbackWarning = configService.getLastFallbackWarning()?.message ?? null;
24731
+ if (configFallbackWarning) {
24732
+ emitConfigWarning(configFallbackWarning);
24733
+ }
23933
24734
  const builtinMcps = createBuiltinMcps(disabledMcps);
23934
24735
  const filteredSkills = getFilteredSkills(disabledSkills);
23935
24736
  const effectiveAutoLoadSkills = configService.getAgentConfig("hive-master").autoLoadSkills ?? [];
23936
24737
  const worktreeService = new WorktreeService({
23937
24738
  baseDir: directory,
23938
- hiveDir: path11.join(directory, ".hive")
24739
+ hiveDir: path13.join(directory, ".hive")
23939
24740
  });
23940
24741
  const customAgentConfigsForClassification = getCustomAgentConfigsCompat(configService);
23941
24742
  const runtimeContext = detectContext(worktree || directory);
23942
24743
  const taskWorkerRecovery = runtimeContext.isWorktree && runtimeContext.feature && runtimeContext.task ? {
23943
24744
  featureName: runtimeContext.feature,
23944
24745
  taskFolder: runtimeContext.task,
23945
- workerPromptPath: path11.posix.join(".hive", "features", resolveFeatureDirectoryName(directory, runtimeContext.feature), "tasks", runtimeContext.task, "worker-prompt.md")
24746
+ workerPromptPath: path13.posix.join(".hive", "features", resolveFeatureDirectoryName(directory, runtimeContext.feature), "tasks", runtimeContext.task, "worker-prompt.md")
23946
24747
  } : undefined;
23947
24748
  const isOmoSlimEnabled = () => {
23948
24749
  return configService.isOmoSlimEnabled();
@@ -23990,10 +24791,12 @@ var plugin = async (ctx) => {
23990
24791
  const buildDirectiveReplayText = (session) => {
23991
24792
  if (!session.directivePrompt)
23992
24793
  return null;
23993
- const role = session.agent === "scout-researcher" || session.baseAgent === "scout-researcher" ? "Scout" : session.agent === "hygienic-reviewer" || session.baseAgent === "hygienic-reviewer" ? "Hygienic" : session.agent === "architect-planner" || session.baseAgent === "architect-planner" ? "Architect" : session.agent === "swarm-orchestrator" || session.baseAgent === "swarm-orchestrator" ? "Swarm" : session.agent === "hive-master" || session.baseAgent === "hive-master" ? "Hive" : "current role";
24794
+ const role = session.agent === "scout-researcher" || session.baseAgent === "scout-researcher" ? "Scout" : session.agent === "hive-helper" || session.baseAgent === "hive-helper" ? "Hive Helper" : session.agent === "hygienic-reviewer" || session.baseAgent === "hygienic-reviewer" ? "Hygienic" : session.agent === "architect-planner" || session.baseAgent === "architect-planner" ? "Architect" : session.agent === "swarm-orchestrator" || session.baseAgent === "swarm-orchestrator" ? "Swarm" : session.agent === "hive-master" || session.baseAgent === "hive-master" ? "Hive" : "current role";
23994
24795
  return [
23995
24796
  `Post-compaction recovery: You are still ${role}.`,
23996
24797
  "Resume the original assignment below. Do not replace it with a new goal.",
24798
+ "Do not broaden the scope or re-read the full codebase.",
24799
+ "If the exact next step is not explicit in the original assignment, return control to the parent/orchestrator immediately instead of improvising.",
23997
24800
  "",
23998
24801
  session.directivePrompt
23999
24802
  ].join(`
@@ -24002,12 +24805,45 @@ var plugin = async (ctx) => {
24002
24805
  const shouldUseDirectiveReplay = (session) => {
24003
24806
  return session?.sessionKind === "primary" || session?.sessionKind === "subagent";
24004
24807
  };
24808
+ const getDirectiveReplayCompactionPatch = (session) => {
24809
+ if (!session?.directivePrompt || !shouldUseDirectiveReplay(session)) {
24810
+ return null;
24811
+ }
24812
+ if (session.directiveRecoveryState === "escalated") {
24813
+ return null;
24814
+ }
24815
+ if (session.directiveRecoveryState === "consumed") {
24816
+ return {
24817
+ directiveRecoveryState: "escalated",
24818
+ replayDirectivePending: true
24819
+ };
24820
+ }
24821
+ return {
24822
+ directiveRecoveryState: "available",
24823
+ replayDirectivePending: true
24824
+ };
24825
+ };
24826
+ const shouldUseWorkerReplay = (session) => {
24827
+ return session?.sessionKind === "task-worker" && !!session.featureName && !!session.taskFolder && !!session.workerPromptPath;
24828
+ };
24829
+ const buildWorkerReplayText = (session) => {
24830
+ if (!session.featureName || !session.taskFolder || !session.workerPromptPath)
24831
+ return null;
24832
+ const role = "Forager";
24833
+ return [
24834
+ `Post-compaction recovery: You are still the ${role} worker for task ${session.taskFolder}.`,
24835
+ `Resume only this task. Do not merge, do not start the next task, and do not replace this assignment with a new goal.`,
24836
+ `Do not call orchestration tools unless the worker prompt explicitly says so.`,
24837
+ `Re-read @${session.workerPromptPath} and continue from the existing worktree state.`
24838
+ ].join(`
24839
+ `);
24840
+ };
24005
24841
  const checkBlocked = (feature) => {
24006
- const fs14 = __require("fs");
24842
+ const fs15 = __require("fs");
24007
24843
  const featureDir = resolveFeatureDirectoryName(directory, feature);
24008
- const blockedPath = path11.join(directory, ".hive", "features", featureDir, "BLOCKED");
24009
- if (fs14.existsSync(blockedPath)) {
24010
- const reason = fs14.readFileSync(blockedPath, "utf-8").trim();
24844
+ const blockedPath = path13.join(directory, ".hive", "features", featureDir, "BLOCKED");
24845
+ if (fs15.existsSync(blockedPath)) {
24846
+ const reason = fs15.readFileSync(blockedPath, "utf-8").trim();
24011
24847
  return `⛔ BLOCKED by Beekeeper
24012
24848
 
24013
24849
  ${reason || "(No reason provided)"}
@@ -24070,7 +24906,7 @@ To unblock: Remove .hive/features/${featureDir}/BLOCKED`;
24070
24906
  });
24071
24907
  const planResult = planService.read(feature);
24072
24908
  const allTasks = taskService.list(feature);
24073
- const executionContextFiles = typeof contextService.listExecutionContext === "function" ? contextService.listExecutionContext(feature) : contextService.list(feature).filter((f) => f.name !== "overview");
24909
+ const executionContextFiles = contextService.listExecutionContext(feature);
24074
24910
  const rawContextFiles = executionContextFiles.map((f) => ({
24075
24911
  name: f.name,
24076
24912
  content: f.content
@@ -24094,25 +24930,31 @@ To unblock: Remove .hive/features/${featureDir}/BLOCKED`;
24094
24930
  const taskOrder = parseInt(taskInfo.folder.match(/^(\d+)/)?.[1] || "0", 10);
24095
24931
  const status = taskService.getRawStatus(feature, task);
24096
24932
  const dependsOn = status?.dependsOn ?? [];
24097
- const specContent = taskService.buildSpecContent({
24098
- featureName: feature,
24099
- task: {
24100
- folder: task,
24101
- name: taskInfo.planTitle ?? taskInfo.name,
24102
- order: taskOrder,
24103
- description: undefined
24104
- },
24105
- dependsOn,
24106
- allTasks: allTasks.map((t) => ({
24107
- folder: t.folder,
24108
- name: t.name,
24109
- order: parseInt(t.folder.match(/^(\d+)/)?.[1] || "0", 10)
24110
- })),
24111
- planContent: planResult?.content ?? null,
24112
- contextFiles,
24113
- completedTasks: previousTasks
24114
- });
24115
- taskService.writeSpec(feature, task, specContent);
24933
+ let specContent;
24934
+ const existingManualSpec = status?.origin === "manual" ? taskService.readSpec(feature, task) : null;
24935
+ if (existingManualSpec) {
24936
+ specContent = existingManualSpec;
24937
+ } else {
24938
+ specContent = taskService.buildSpecContent({
24939
+ featureName: feature,
24940
+ task: {
24941
+ folder: task,
24942
+ name: taskInfo.planTitle ?? taskInfo.name,
24943
+ order: taskOrder,
24944
+ description: undefined
24945
+ },
24946
+ dependsOn,
24947
+ allTasks: allTasks.map((t) => ({
24948
+ folder: t.folder,
24949
+ name: t.name,
24950
+ order: parseInt(t.folder.match(/^(\d+)/)?.[1] || "0", 10)
24951
+ })),
24952
+ planContent: planResult?.content ?? null,
24953
+ contextFiles,
24954
+ completedTasks: previousTasks
24955
+ });
24956
+ taskService.writeSpec(feature, task, specContent);
24957
+ }
24116
24958
  const workerPrompt = buildWorkerPrompt({
24117
24959
  feature,
24118
24960
  task,
@@ -24160,9 +25002,9 @@ To unblock: Remove .hive/features/${featureDir}/BLOCKED`;
24160
25002
  spec: specContent,
24161
25003
  workerPrompt
24162
25004
  });
24163
- const hiveDir = path11.join(directory, ".hive");
25005
+ const hiveDir = path13.join(directory, ".hive");
24164
25006
  const workerPromptPath = writeWorkerPromptFile(feature, task, workerPrompt, hiveDir);
24165
- const relativePromptPath = normalizePath(path11.relative(directory, workerPromptPath));
25007
+ const relativePromptPath = normalizePath(path13.relative(directory, workerPromptPath));
24166
25008
  const PREVIEW_MAX_LENGTH = 200;
24167
25009
  const workerPromptPreview = workerPrompt.length > PREVIEW_MAX_LENGTH ? workerPrompt.slice(0, PREVIEW_MAX_LENGTH) + "..." : workerPrompt;
24168
25010
  const taskToolPrompt = `Follow instructions in @${relativePromptPath}`;
@@ -24412,9 +25254,13 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24412
25254
  return respond({
24413
25255
  success: false,
24414
25256
  terminal: true,
25257
+ reason: "task_not_blocked",
25258
+ canRetry: false,
25259
+ retryReason: `Task is in ${taskInfo.status} state. Run hive_status() and follow the current status flow instead of blocked resume.`,
24415
25260
  error: `continueFrom: 'blocked' was specified but task "${task}" is not in blocked state (current status: ${taskInfo.status}).`,
24416
25261
  currentStatus: taskInfo.status,
24417
25262
  hints: [
25263
+ "This blocked-resume call cannot be retried with the same parameters.",
24418
25264
  "Use hive_worktree_start({ feature, task }) for normal starts or re-dispatch.",
24419
25265
  "Use hive_status to verify the current task status before retrying."
24420
25266
  ]
@@ -24449,63 +25295,15 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24449
25295
  }
24450
25296
  const sessionID = input.event.properties.sessionID;
24451
25297
  const existing = sessionService.getGlobal(sessionID);
24452
- if (!existing?.directivePrompt || !shouldUseDirectiveReplay(existing)) {
25298
+ const directiveReplayPatch = getDirectiveReplayCompactionPatch(existing);
25299
+ if (directiveReplayPatch) {
25300
+ sessionService.trackGlobal(sessionID, directiveReplayPatch);
24453
25301
  return;
24454
25302
  }
24455
- sessionService.trackGlobal(sessionID, { replayDirectivePending: true });
24456
- },
24457
- "experimental.chat.system.transform": async (input, output) => {
24458
- if (!shouldExecuteHook("experimental.chat.system.transform", configService, turnCounters)) {
25303
+ if (shouldUseWorkerReplay(existing)) {
25304
+ sessionService.trackGlobal(sessionID, { replayDirectivePending: true });
24459
25305
  return;
24460
25306
  }
24461
- output.system.push(HIVE_SYSTEM_PROMPT);
24462
- const activeFeature = resolveFeature();
24463
- if (activeFeature) {
24464
- const info = featureService.getInfo(activeFeature);
24465
- if (info) {
24466
- const featureInfo = info;
24467
- let statusHint = `
24468
- ### Current Hive Status
24469
- `;
24470
- statusHint += `**Active Feature**: ${info.name} (${info.status})
24471
- `;
24472
- statusHint += `**Progress**: ${info.tasks.filter((t) => t.status === "done").length}/${info.tasks.length} tasks
24473
- `;
24474
- if (featureInfo.hasOverview) {
24475
- statusHint += `**Overview**: available at .hive/features/${resolveFeatureDirectoryName(directory, info.name)}/context/overview.md (primary human-facing doc)
24476
- `;
24477
- } else if (info.hasPlan) {
24478
- statusHint += `**Overview**: missing - write it with hive_context_write({ name: "overview", content })
24479
- `;
24480
- }
24481
- if (info.commentCount > 0) {
24482
- statusHint += `**Comments**: ${info.commentCount} unresolved (plan: ${featureInfo.reviewCounts?.plan ?? 0}, overview: ${featureInfo.reviewCounts?.overview ?? 0})
24483
- `;
24484
- }
24485
- output.system.push(statusHint);
24486
- }
24487
- }
24488
- },
24489
- "experimental.session.compacting": async (_input, output) => {
24490
- const session = sessionService.getGlobal(_input.sessionID);
24491
- if (session) {
24492
- const ctx2 = {
24493
- agent: session.agent,
24494
- baseAgent: session.baseAgent,
24495
- sessionKind: session.sessionKind,
24496
- featureName: session.featureName,
24497
- taskFolder: session.taskFolder,
24498
- workerPromptPath: session.workerPromptPath,
24499
- directivePrompt: session.directivePrompt
24500
- };
24501
- const reanchor = buildCompactionReanchor(ctx2);
24502
- output.prompt = reanchor.prompt;
24503
- output.context.push(...reanchor.context);
24504
- } else {
24505
- const reanchor = buildCompactionReanchor({});
24506
- output.prompt = reanchor.prompt;
24507
- output.context.push(...reanchor.context);
24508
- }
24509
25307
  },
24510
25308
  "chat.message": createVariantHook(configService, sessionService, customAgentConfigsForClassification, taskWorkerRecovery),
24511
25309
  "experimental.chat.messages.transform": async (_input, output) => {
@@ -24526,14 +25324,47 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24526
25324
  `);
24527
25325
  const existingDirective = session?.directivePrompt;
24528
25326
  if (directiveText && directiveText !== existingDirective && shouldUseDirectiveReplay(session ?? { sessionKind: "subagent" })) {
24529
- sessionService.trackGlobal(sessionID, { directivePrompt: directiveText });
25327
+ sessionService.trackGlobal(sessionID, {
25328
+ directivePrompt: directiveText,
25329
+ directiveRecoveryState: undefined,
25330
+ replayDirectivePending: false
25331
+ });
24530
25332
  }
24531
25333
  }
24532
25334
  const refreshed = sessionService.getGlobal(sessionID);
24533
- if (!refreshed?.replayDirectivePending || !shouldUseDirectiveReplay(refreshed)) {
24534
- if (refreshed?.replayDirectivePending && !shouldUseDirectiveReplay(refreshed)) {
25335
+ if (!refreshed?.replayDirectivePending) {
25336
+ return;
25337
+ }
25338
+ if (shouldUseWorkerReplay(refreshed)) {
25339
+ const workerText = buildWorkerReplayText(refreshed);
25340
+ if (!workerText) {
24535
25341
  sessionService.trackGlobal(sessionID, { replayDirectivePending: false });
25342
+ return;
24536
25343
  }
25344
+ const now2 = Date.now();
25345
+ output.messages.push({
25346
+ info: {
25347
+ id: `msg_replay_${sessionID}`,
25348
+ sessionID,
25349
+ role: "user",
25350
+ time: { created: now2 }
25351
+ },
25352
+ parts: [
25353
+ {
25354
+ id: `prt_replay_${sessionID}`,
25355
+ sessionID,
25356
+ messageID: `msg_replay_${sessionID}`,
25357
+ type: "text",
25358
+ text: workerText,
25359
+ synthetic: true
25360
+ }
25361
+ ]
25362
+ });
25363
+ sessionService.trackGlobal(sessionID, { replayDirectivePending: false });
25364
+ return;
25365
+ }
25366
+ if (!shouldUseDirectiveReplay(refreshed)) {
25367
+ sessionService.trackGlobal(sessionID, { replayDirectivePending: false });
24537
25368
  return;
24538
25369
  }
24539
25370
  const replayText = buildDirectiveReplayText(refreshed);
@@ -24560,7 +25391,10 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24560
25391
  }
24561
25392
  ]
24562
25393
  });
24563
- sessionService.trackGlobal(sessionID, { replayDirectivePending: false });
25394
+ sessionService.trackGlobal(sessionID, {
25395
+ replayDirectivePending: false,
25396
+ directiveRecoveryState: refreshed.directiveRecoveryState === "available" ? "consumed" : refreshed.directiveRecoveryState
25397
+ });
24564
25398
  },
24565
25399
  "tool.execute.before": async (input, output) => {
24566
25400
  if (!shouldExecuteHook("tool.execute.before", configService, turnCounters, { safetyCritical: true })) {
@@ -24583,7 +25417,7 @@ Use the \`@path\` attachment syntax in the prompt to reference the file. Do not
24583
25417
  const workdir = output.args?.workdir;
24584
25418
  if (!workdir)
24585
25419
  return;
24586
- const hiveWorktreeBase = path11.join(directory, ".hive", ".worktrees");
25420
+ const hiveWorktreeBase = path13.join(directory, ".hive", ".worktrees");
24587
25421
  if (!workdir.startsWith(hiveWorktreeBase))
24588
25422
  return;
24589
25423
  const wrapped = DockerSandboxService.wrapCommand(workdir, command, sandboxConfig);
@@ -24730,11 +25564,12 @@ Expand your Discovery section and try again.`;
24730
25564
  }
24731
25565
  }),
24732
25566
  hive_tasks_sync: tool({
24733
- description: "Generate tasks from approved plan",
25567
+ description: "Generate tasks from approved plan. When refreshPending is true, refresh pending plan tasks from current plan.md and delete removed pending tasks. Manual tasks and tasks with execution history are preserved.",
24734
25568
  args: {
24735
- feature: tool.schema.string().optional().describe("Feature name (defaults to detection or single feature)")
25569
+ feature: tool.schema.string().optional().describe("Feature name (defaults to detection or single feature)"),
25570
+ refreshPending: tool.schema.boolean().optional().describe("When true, refresh pending plan tasks from current plan.md (rewrite dependsOn, planTitle, spec.md) and delete pending tasks removed from plan")
24736
25571
  },
24737
- async execute({ feature: explicitFeature }) {
25572
+ async execute({ feature: explicitFeature, refreshPending }) {
24738
25573
  const feature = resolveFeature(explicitFeature);
24739
25574
  if (!feature)
24740
25575
  return "Error: No feature specified. Create a feature or provide feature param.";
@@ -24742,26 +25577,52 @@ Expand your Discovery section and try again.`;
24742
25577
  if (!featureData || featureData.status === "planning") {
24743
25578
  return "Error: Plan must be approved first";
24744
25579
  }
24745
- const result = taskService.sync(feature);
25580
+ const result = taskService.sync(feature, { refreshPending });
24746
25581
  if (featureData.status === "approved") {
24747
25582
  featureService.updateStatus(feature, "executing");
24748
25583
  }
24749
- return `Tasks synced: ${result.created.length} created, ${result.removed.length} removed, ${result.kept.length} kept`;
25584
+ return `Tasks synced: ${result.created.length} created, ${result.removed.length} removed, ${result.kept.length} kept, ${result.manual.length} manual`;
24750
25585
  }
24751
25586
  }),
24752
25587
  hive_task_create: tool({
24753
- description: "Create manual task (not from plan)",
25588
+ description: "Create append-only manual task (not from plan). Omit order to use the next slot. Explicit dependsOn defaults to [] and is only allowed when every dependency already exists and is done. Provide structured metadata for useful spec.md and worker prompt.",
24754
25589
  args: {
24755
25590
  name: tool.schema.string().describe("Task name"),
24756
- order: tool.schema.number().optional().describe("Task order"),
24757
- feature: tool.schema.string().optional().describe("Feature name (defaults to detection or single feature)")
25591
+ order: tool.schema.number().optional().describe("Task order. Omit to use the next append-only slot; explicit order must equal that next slot."),
25592
+ feature: tool.schema.string().optional().describe("Feature name (defaults to detection or single feature)"),
25593
+ description: tool.schema.string().optional().describe("What the worker needs to achieve"),
25594
+ goal: tool.schema.string().optional().describe("Why this task exists and what done means"),
25595
+ acceptanceCriteria: tool.schema.array(tool.schema.string()).optional().describe("Specific observable outcomes"),
25596
+ references: tool.schema.array(tool.schema.string()).optional().describe("File paths or line ranges relevant to this task"),
25597
+ files: tool.schema.array(tool.schema.string()).optional().describe("Files likely to be modified"),
25598
+ dependsOn: tool.schema.array(tool.schema.string()).optional().describe("Task folder names this task depends on (default: [] for no dependencies). Explicit dependsOn is allowed only when every dependency already exists and is done; review-sourced tasks must omit it."),
25599
+ reason: tool.schema.string().optional().describe("Why this task was created"),
25600
+ source: tool.schema.string().optional().describe("Origin: review, operator, or ad_hoc")
24758
25601
  },
24759
- async execute({ name, order, feature: explicitFeature }) {
25602
+ async execute({ name, order, feature: explicitFeature, description, goal, acceptanceCriteria, references, files, dependsOn, reason, source }) {
24760
25603
  const feature = resolveFeature(explicitFeature);
24761
25604
  if (!feature)
24762
25605
  return "Error: No feature specified. Create a feature or provide feature param.";
24763
- const folder = taskService.create(feature, name, order);
25606
+ const metadata = {};
25607
+ if (description)
25608
+ metadata.description = description;
25609
+ if (goal)
25610
+ metadata.goal = goal;
25611
+ if (acceptanceCriteria)
25612
+ metadata.acceptanceCriteria = acceptanceCriteria;
25613
+ if (references)
25614
+ metadata.references = references;
25615
+ if (files)
25616
+ metadata.files = files;
25617
+ if (dependsOn)
25618
+ metadata.dependsOn = dependsOn;
25619
+ if (reason)
25620
+ metadata.reason = reason;
25621
+ if (source)
25622
+ metadata.source = source;
25623
+ const folder = taskService.create(feature, name, order, Object.keys(metadata).length > 0 ? metadata : undefined);
24764
25624
  return `Manual task created: ${folder}
25625
+ Dependencies: [${(dependsOn ?? []).join(", ")}]
24765
25626
  Reminder: start work with hive_worktree_start to use its worktree, and ensure any subagents work in that worktree too.`;
24766
25627
  }
24767
25628
  }),
@@ -24864,7 +25725,7 @@ Reminder: start work with hive_worktree_start to use its worktree, and ensure an
24864
25725
  });
24865
25726
  }
24866
25727
  const featureDir = resolveFeatureDirectoryName(directory, feature);
24867
- const workerPromptPath = path11.posix.join(".hive", "features", featureDir, "tasks", task, "worker-prompt.md");
25728
+ const workerPromptPath = path13.posix.join(".hive", "features", featureDir, "tasks", task, "worker-prompt.md");
24868
25729
  bindFeatureSession(feature, toolContext, { taskFolder: task, workerPromptPath });
24869
25730
  let verificationNote;
24870
25731
  if (status === "completed") {
@@ -24996,37 +25857,49 @@ Reminder: start work with hive_worktree_start to use its worktree, and ensure an
24996
25857
  task: tool.schema.string().describe("Task folder name to merge"),
24997
25858
  strategy: tool.schema.enum(["merge", "squash", "rebase"]).optional().describe("Merge strategy (default: merge)"),
24998
25859
  message: tool.schema.string().optional().describe("Optional merge message for merge/squash. Empty uses default."),
25860
+ preserveConflicts: tool.schema.boolean().optional().describe("Keep merge conflict state intact instead of auto-aborting (default: false)."),
25861
+ cleanup: tool.schema.enum(["none", "worktree", "worktree+branch"]).optional().describe("Cleanup mode after a successful merge (default: none)."),
24999
25862
  feature: tool.schema.string().optional().describe("Feature name (defaults to active)")
25000
25863
  },
25001
- async execute({ task, strategy = "merge", message, feature: explicitFeature }) {
25864
+ async execute({ task, strategy = "merge", message, preserveConflicts, cleanup, feature: explicitFeature }) {
25865
+ const failure = (error45) => respond({
25866
+ success: false,
25867
+ merged: false,
25868
+ strategy,
25869
+ filesChanged: [],
25870
+ conflicts: [],
25871
+ conflictState: "none",
25872
+ cleanup: {
25873
+ worktreeRemoved: false,
25874
+ branchDeleted: false,
25875
+ pruned: false
25876
+ },
25877
+ error: error45,
25878
+ message: `Merge failed: ${error45}`
25879
+ });
25002
25880
  const feature = resolveFeature(explicitFeature);
25003
25881
  if (!feature)
25004
- return "Error: No feature specified. Create a feature or provide feature param.";
25882
+ return failure("No feature specified. Create a feature or provide feature param.");
25005
25883
  const taskInfo = taskService.get(feature, task);
25006
25884
  if (!taskInfo)
25007
- return `Error: Task "${task}" not found`;
25885
+ return failure(`Task "${task}" not found`);
25008
25886
  if (taskInfo.status !== "done")
25009
- return "Error: Task must be completed before merging. Use hive_worktree_commit first.";
25010
- const result = await worktreeService.merge(feature, task, strategy, message);
25011
- if (!result.success) {
25012
- if (result.conflicts && result.conflicts.length > 0) {
25013
- return `Merge failed with conflicts in:
25014
- ${result.conflicts.map((f) => `- ${f}`).join(`
25015
- `)}
25016
-
25017
- Resolve conflicts manually or try a different strategy.`;
25018
- }
25019
- return `Merge failed: ${result.error}`;
25020
- }
25021
- return `Task "${task}" merged successfully using ${strategy} strategy.
25022
- Commit: ${result.sha}
25023
- Files changed: ${result.filesChanged?.length || 0}`;
25887
+ return failure("Task must be completed before merging. Use hive_worktree_commit first.");
25888
+ const result = await worktreeService.merge(feature, task, strategy, message, {
25889
+ preserveConflicts,
25890
+ cleanup
25891
+ });
25892
+ const responseMessage = result.success ? `Task "${task}" merged successfully using ${strategy} strategy.` : `Merge failed: ${result.error}`;
25893
+ return respond({
25894
+ ...result,
25895
+ message: responseMessage
25896
+ });
25024
25897
  }
25025
25898
  }),
25026
25899
  hive_context_write: tool({
25027
- description: "Write a context file for the feature. Context files store persistent notes, decisions, and reference material.",
25900
+ description: "Write a context file for the feature. System-known names: overview = human-facing summary/history, draft = planner scratchpad, execution-decisions = orchestration log; all other names stay durable free-form context.",
25028
25901
  args: {
25029
- name: tool.schema.string().describe('Context file name (e.g., "decisions", "architecture", "notes")'),
25902
+ name: tool.schema.string().describe('Context file name (e.g., "overview", "draft", "execution-decisions", "learnings"). overview is the human-facing summary/history file, draft is planner scratchpad, execution-decisions is the orchestration log; other names remain durable free-form context.'),
25030
25903
  content: tool.schema.string().describe("Markdown content to write"),
25031
25904
  feature: tool.schema.string().optional().describe("Feature name (defaults to active)")
25032
25905
  },
@@ -25036,7 +25909,29 @@ Files changed: ${result.filesChanged?.length || 0}`;
25036
25909
  return "Error: No feature specified. Create a feature or provide feature param.";
25037
25910
  bindFeatureSession(feature, toolContext);
25038
25911
  const filePath = contextService.write(feature, name, content);
25039
- return `Context file written: ${filePath}`;
25912
+ return `Context file written: ${filePath}. Known names: overview = human-facing summary/history, draft = planner scratchpad, execution-decisions = orchestration log; all other context names remain durable free-form notes.`;
25913
+ }
25914
+ }),
25915
+ hive_network_query: tool({
25916
+ description: "Query prior features for deterministic plan/context snippets. Returns JSON with query, currentFeature, and snippet results only. Callers must opt in to using the returned snippets.",
25917
+ args: {
25918
+ feature: tool.schema.string().optional().describe("Current feature to exclude from results. Defaults to active feature when available."),
25919
+ query: tool.schema.string().describe("Case-insensitive substring query over plan.md and network-safe context")
25920
+ },
25921
+ async execute({ feature: explicitFeature, query }) {
25922
+ const currentFeature = resolveFeature(explicitFeature) ?? null;
25923
+ const results = networkService.query({
25924
+ currentFeature: currentFeature ?? undefined,
25925
+ query,
25926
+ maxFeatures: 10,
25927
+ maxSnippetsPerFeature: 3,
25928
+ maxSnippetChars: 240
25929
+ });
25930
+ return respond({
25931
+ query,
25932
+ currentFeature,
25933
+ results
25934
+ });
25040
25935
  }
25041
25936
  }),
25042
25937
  hive_status: tool({
@@ -25081,23 +25976,23 @@ Files changed: ${result.filesChanged?.length || 0}`;
25081
25976
  }
25082
25977
  const plan = planService.read(feature);
25083
25978
  const tasks = taskService.list(feature);
25084
- const contextFiles = contextService.list(feature);
25085
- const overview = contextFiles.find((file2) => file2.name === "overview") ?? null;
25979
+ const featureContextFiles = contextService.list(feature);
25980
+ const overview = contextService.getOverview(feature);
25086
25981
  const readThreads = (filePath) => {
25087
- if (!fs13.existsSync(filePath)) {
25982
+ if (!fs14.existsSync(filePath)) {
25088
25983
  return null;
25089
25984
  }
25090
25985
  try {
25091
- const data = JSON.parse(fs13.readFileSync(filePath, "utf-8"));
25986
+ const data = JSON.parse(fs14.readFileSync(filePath, "utf-8"));
25092
25987
  return data.threads ?? [];
25093
25988
  } catch {
25094
25989
  return [];
25095
25990
  }
25096
25991
  };
25097
- const featurePath = path11.join(directory, ".hive", "features", resolveFeatureDirectoryName(directory, feature));
25098
- const reviewDir = path11.join(featurePath, "comments");
25099
- const planThreads = readThreads(path11.join(reviewDir, "plan.json")) ?? readThreads(path11.join(featurePath, "comments.json"));
25100
- const overviewThreads = readThreads(path11.join(reviewDir, "overview.json"));
25992
+ const featurePath = path13.join(directory, ".hive", "features", resolveFeatureDirectoryName(directory, feature));
25993
+ const reviewDir = path13.join(featurePath, "comments");
25994
+ const planThreads = readThreads(path13.join(reviewDir, "plan.json")) ?? readThreads(path13.join(featurePath, "comments.json"));
25995
+ const overviewThreads = readThreads(path13.join(reviewDir, "overview.json"));
25101
25996
  const reviewCounts = {
25102
25997
  plan: planThreads?.length ?? 0,
25103
25998
  overview: overviewThreads?.length ?? 0
@@ -25118,14 +26013,21 @@ Files changed: ${result.filesChanged?.length || 0}`;
25118
26013
  } : null
25119
26014
  };
25120
26015
  }));
25121
- const contextSummary = contextFiles.map((c) => ({
26016
+ const contextSummary = featureContextFiles.map((c) => ({
25122
26017
  name: c.name,
25123
26018
  chars: c.content.length,
25124
- updatedAt: c.updatedAt
26019
+ updatedAt: c.updatedAt,
26020
+ role: c.role,
26021
+ includeInExecution: c.includeInExecution,
26022
+ includeInAgentsMdSync: c.includeInAgentsMdSync,
26023
+ includeInNetwork: c.includeInNetwork
25125
26024
  }));
25126
26025
  const pendingTasks = tasksSummary.filter((t) => t.status === "pending");
25127
26026
  const inProgressTasks = tasksSummary.filter((t) => t.status === "in_progress");
25128
26027
  const doneTasks = tasksSummary.filter((t) => t.status === "done");
26028
+ const doneTasksWithLiveWorktrees = tasksSummary.filter((t) => t.status === "done" && t.worktree).map((t) => t.folder);
26029
+ const dirtyWorktrees = tasksSummary.filter((t) => t.worktree && t.worktree.hasChanges === true).map((t) => t.folder);
26030
+ const nonInProgressTasksWithWorktrees = tasksSummary.filter((t) => t.status !== "in_progress" && t.worktree).map((t) => t.folder);
25129
26031
  const tasksWithDeps = tasksSummary.map((t) => ({
25130
26032
  folder: t.folder,
25131
26033
  status: t.status,
@@ -25137,12 +26039,25 @@ Files changed: ${result.filesChanged?.length || 0}`;
25137
26039
  dependsOn: effectiveDeps.get(task.folder)
25138
26040
  }));
25139
26041
  const { runnable, blocked: blockedBy } = computeRunnableAndBlocked(normalizedTasks);
26042
+ const ambiguityFlags = [];
26043
+ if (doneTasksWithLiveWorktrees.length > 0) {
26044
+ ambiguityFlags.push("done_task_has_live_worktree");
26045
+ }
26046
+ if (dirtyWorktrees.some((folder) => nonInProgressTasksWithWorktrees.includes(folder))) {
26047
+ ambiguityFlags.push("dirty_non_in_progress_worktree");
26048
+ }
26049
+ if (runnable.length > 1) {
26050
+ ambiguityFlags.push("multiple_runnable_tasks");
26051
+ }
26052
+ if (pendingTasks.length > 0 && runnable.length === 0) {
26053
+ ambiguityFlags.push("pending_tasks_blocked");
26054
+ }
25140
26055
  const getNextAction = (planStatus2, tasks2, runnableTasks, hasPlan, hasOverview) => {
25141
26056
  if (planStatus2 === "review") {
25142
26057
  return "Wait for plan approval or revise based on comments";
25143
26058
  }
25144
26059
  if (!hasPlan || planStatus2 === "draft") {
25145
- return "Write or revise plan with hive_plan_write. Keep plan.md as the human-facing review artifact; pre-task Mermaid overview diagrams are optional.";
26060
+ return "Write or revise plan with hive_plan_write. Refresh context/overview.md first for human review; plan.md remains execution truth and pre-task Mermaid overview diagrams are optional.";
25146
26061
  }
25147
26062
  if (tasks2.length === 0) {
25148
26063
  return "Generate tasks from plan with hive_tasks_sync";
@@ -25197,10 +26112,31 @@ Files changed: ${result.filesChanged?.length || 0}`;
25197
26112
  runnable,
25198
26113
  blockedBy
25199
26114
  },
26115
+ helperStatus: {
26116
+ doneTasksWithLiveWorktrees,
26117
+ dirtyWorktrees,
26118
+ nonInProgressTasksWithWorktrees,
26119
+ manualTaskPolicy: {
26120
+ order: {
26121
+ omitted: "append_next_order",
26122
+ explicitNextOrder: "append_next_order",
26123
+ explicitOtherOrder: "plan_amendment_required"
26124
+ },
26125
+ dependsOn: {
26126
+ omitted: "store_empty_array",
26127
+ explicitDoneTargetsOnly: "allowed",
26128
+ explicitMissingTarget: "plan_amendment_required",
26129
+ explicitNotDoneTarget: "plan_amendment_required",
26130
+ reviewSourceWithExplicitDependsOn: "plan_amendment_required"
26131
+ }
26132
+ },
26133
+ ambiguityFlags
26134
+ },
25200
26135
  context: {
25201
- fileCount: contextFiles.length,
26136
+ fileCount: featureContextFiles.length,
25202
26137
  files: contextSummary
25203
26138
  },
26139
+ warning: configFallbackWarning ?? undefined,
25204
26140
  nextAction: getNextAction(planStatus, tasksSummary, runnable, !!plan, !!overview)
25205
26141
  });
25206
26142
  }
@@ -25248,8 +26184,8 @@ ${result.diff}
25248
26184
  })
25249
26185
  },
25250
26186
  command: {
25251
- hive: {
25252
- description: "Create a new feature: /hive <feature-name>",
26187
+ [HIVE_COMMANDS[0].key]: {
26188
+ description: HIVE_COMMANDS[0].description,
25253
26189
  async run(args) {
25254
26190
  const name = args.trim();
25255
26191
  if (!name)
@@ -25260,27 +26196,8 @@ ${result.diff}
25260
26196
  },
25261
26197
  config: async (opencodeConfig) => {
25262
26198
  function agentTools(allowed) {
25263
- const allHiveTools = [
25264
- "hive_feature_create",
25265
- "hive_feature_complete",
25266
- "hive_plan_write",
25267
- "hive_plan_read",
25268
- "hive_plan_approve",
25269
- "hive_tasks_sync",
25270
- "hive_task_create",
25271
- "hive_task_update",
25272
- "hive_worktree_start",
25273
- "hive_worktree_create",
25274
- "hive_worktree_commit",
25275
- "hive_worktree_discard",
25276
- "hive_merge",
25277
- "hive_context_write",
25278
- "hive_status",
25279
- "hive_skill",
25280
- "hive_agents_md"
25281
- ];
25282
26199
  const result = {};
25283
- for (const tool3 of allHiveTools) {
26200
+ for (const tool3 of HIVE_TOOL_NAMES) {
25284
26201
  if (!allowed.includes(tool3)) {
25285
26202
  result[tool3] = false;
25286
26203
  }
@@ -25303,7 +26220,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25303
26220
  variant: hiveUserConfig.variant,
25304
26221
  temperature: hiveUserConfig.temperature ?? 0.5,
25305
26222
  description: "Hive (Hybrid) - Plans + orchestrates. Detects phase, loads skills on-demand.",
25306
- prompt: QUEEN_BEE_PROMPT + hiveAutoLoadedSkills + (agentMode === "unified" ? customSubagentAppendix : ""),
26223
+ prompt: QUEEN_BEE_PROMPT + HIVE_SYSTEM_PROMPT + hiveAutoLoadedSkills + (agentMode === "unified" ? customSubagentAppendix : ""),
25307
26224
  permission: {
25308
26225
  question: "allow",
25309
26226
  skill: "allow",
@@ -25318,8 +26235,8 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25318
26235
  variant: architectUserConfig.variant,
25319
26236
  temperature: architectUserConfig.temperature ?? 0.7,
25320
26237
  description: "Architect (Planner) - Plans features, interviews, writes plans. NEVER executes.",
25321
- prompt: ARCHITECT_BEE_PROMPT + architectAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
25322
- tools: agentTools(["hive_feature_create", "hive_plan_write", "hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
26238
+ prompt: ARCHITECT_BEE_PROMPT + HIVE_SYSTEM_PROMPT + architectAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
26239
+ tools: agentTools(["hive_feature_create", "hive_plan_write", "hive_plan_read", "hive_context_write", "hive_network_query", "hive_status", "hive_skill"]),
25323
26240
  permission: {
25324
26241
  edit: "deny",
25325
26242
  task: "allow",
@@ -25337,7 +26254,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25337
26254
  variant: swarmUserConfig.variant,
25338
26255
  temperature: swarmUserConfig.temperature ?? 0.5,
25339
26256
  description: "Swarm (Orchestrator) - Orchestrates execution. Delegates, spawns workers, verifies, merges.",
25340
- prompt: SWARM_BEE_PROMPT + swarmAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
26257
+ prompt: SWARM_BEE_PROMPT + HIVE_SYSTEM_PROMPT + swarmAutoLoadedSkills + (agentMode === "dedicated" ? customSubagentAppendix : ""),
25341
26258
  tools: agentTools([
25342
26259
  "hive_feature_create",
25343
26260
  "hive_feature_complete",
@@ -25351,6 +26268,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25351
26268
  "hive_worktree_discard",
25352
26269
  "hive_merge",
25353
26270
  "hive_context_write",
26271
+ "hive_network_query",
25354
26272
  "hive_status",
25355
26273
  "hive_skill",
25356
26274
  "hive_agents_md"
@@ -25370,7 +26288,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25370
26288
  temperature: scoutUserConfig.temperature ?? 0.5,
25371
26289
  mode: "subagent",
25372
26290
  description: "Scout (Explorer/Researcher/Retrieval) - Researches codebase + external docs/data.",
25373
- prompt: SCOUT_BEE_PROMPT + scoutAutoLoadedSkills,
26291
+ prompt: SCOUT_BEE_PROMPT + HIVE_SYSTEM_PROMPT + scoutAutoLoadedSkills,
25374
26292
  tools: agentTools(["hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
25375
26293
  permission: {
25376
26294
  edit: "deny",
@@ -25388,7 +26306,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25388
26306
  temperature: foragerUserConfig.temperature ?? 0.3,
25389
26307
  mode: "subagent",
25390
26308
  description: "Forager (Worker/Coder) - Executes tasks directly in isolated worktrees. Never delegates.",
25391
- prompt: FORAGER_BEE_PROMPT + foragerAutoLoadedSkills,
26309
+ prompt: FORAGER_BEE_PROMPT + HIVE_SYSTEM_PROMPT + foragerAutoLoadedSkills,
25392
26310
  tools: agentTools(["hive_plan_read", "hive_worktree_commit", "hive_context_write", "hive_skill"]),
25393
26311
  permission: {
25394
26312
  task: "deny",
@@ -25396,6 +26314,21 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25396
26314
  skill: "allow"
25397
26315
  }
25398
26316
  };
26317
+ const hiveHelperUserConfig = configService.getAgentConfig("hive-helper");
26318
+ const hiveHelperConfig = {
26319
+ model: hiveHelperUserConfig.model,
26320
+ variant: hiveHelperUserConfig.variant,
26321
+ temperature: hiveHelperUserConfig.temperature ?? 0.3,
26322
+ mode: "subagent",
26323
+ description: "Hive Helper - Runtime-only bounded hard-task operational assistant for merge recovery, state clarification, and safe manual follow-up assistance.",
26324
+ prompt: HIVE_HELPER_PROMPT + HIVE_SYSTEM_PROMPT,
26325
+ tools: agentTools(["hive_merge", "hive_status", "hive_context_write", "hive_task_create", "hive_skill"]),
26326
+ permission: {
26327
+ task: "deny",
26328
+ delegate: "deny",
26329
+ skill: "allow"
26330
+ }
26331
+ };
25399
26332
  const hygienicUserConfig = configService.getAgentConfig("hygienic-reviewer");
25400
26333
  const hygienicAutoLoadedSkills = await buildAutoLoadedSkillsContent("hygienic-reviewer", configService, directory);
25401
26334
  const hygienicConfig = {
@@ -25404,8 +26337,8 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25404
26337
  temperature: hygienicUserConfig.temperature ?? 0.3,
25405
26338
  mode: "subagent",
25406
26339
  description: "Hygienic (Consultant/Reviewer/Debugger) - Reviews plan documentation quality. OKAY/REJECT verdict.",
25407
- prompt: HYGIENIC_BEE_PROMPT + hygienicAutoLoadedSkills,
25408
- tools: agentTools(["hive_plan_read", "hive_context_write", "hive_status", "hive_skill"]),
26340
+ prompt: HYGIENIC_BEE_PROMPT + HIVE_SYSTEM_PROMPT + hygienicAutoLoadedSkills,
26341
+ tools: agentTools(["hive_plan_read", "hive_context_write", "hive_network_query", "hive_status", "hive_skill"]),
25409
26342
  permission: {
25410
26343
  edit: "deny",
25411
26344
  task: "deny",
@@ -25419,6 +26352,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25419
26352
  "swarm-orchestrator": swarmConfig,
25420
26353
  "scout-researcher": scoutConfig,
25421
26354
  "forager-worker": foragerConfig,
26355
+ "hive-helper": hiveHelperConfig,
25422
26356
  "hygienic-reviewer": hygienicConfig
25423
26357
  };
25424
26358
  const customAutoLoadedSkills = Object.fromEntries(await Promise.all(Object.entries(customAgentConfigs).map(async ([customAgentName, customAgentConfig]) => {
@@ -25442,12 +26376,14 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25442
26376
  allAgents["hive-master"] = builtInAgentConfigs["hive-master"];
25443
26377
  allAgents["scout-researcher"] = builtInAgentConfigs["scout-researcher"];
25444
26378
  allAgents["forager-worker"] = builtInAgentConfigs["forager-worker"];
26379
+ allAgents["hive-helper"] = builtInAgentConfigs["hive-helper"];
25445
26380
  allAgents["hygienic-reviewer"] = builtInAgentConfigs["hygienic-reviewer"];
25446
26381
  } else {
25447
26382
  allAgents["architect-planner"] = builtInAgentConfigs["architect-planner"];
25448
26383
  allAgents["swarm-orchestrator"] = builtInAgentConfigs["swarm-orchestrator"];
25449
26384
  allAgents["scout-researcher"] = builtInAgentConfigs["scout-researcher"];
25450
26385
  allAgents["forager-worker"] = builtInAgentConfigs["forager-worker"];
26386
+ allAgents["hive-helper"] = builtInAgentConfigs["hive-helper"];
25451
26387
  allAgents["hygienic-reviewer"] = builtInAgentConfigs["hygienic-reviewer"];
25452
26388
  }
25453
26389
  Object.assign(allAgents, customSubagents);
@@ -25467,6 +26403,7 @@ ${Object.entries(customAgentConfigs).sort(([left], [right]) => left.localeCompar
25467
26403
  delete configAgent["swarm-orchestrator"];
25468
26404
  delete configAgent["scout-researcher"];
25469
26405
  delete configAgent["forager-worker"];
26406
+ delete configAgent["hive-helper"];
25470
26407
  delete configAgent["hygienic-reviewer"];
25471
26408
  Object.assign(configAgent, allAgents);
25472
26409
  }