opencode-hive 1.0.7 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -12,8 +12,7 @@ var __export = (target, all) => {
12
12
  var __require = /* @__PURE__ */ createRequire(import.meta.url);
13
13
 
14
14
  // src/index.ts
15
- import * as path7 from "path";
16
- import * as fs9 from "fs";
15
+ import * as path8 from "path";
17
16
  import * as os from "os";
18
17
 
19
18
  // ../../node_modules/zod/v4/classic/external.js
@@ -12337,8 +12336,260 @@ function tool(input) {
12337
12336
  }
12338
12337
  tool.schema = exports_external;
12339
12338
  // src/skills/registry.generated.ts
12340
- var BUILTIN_SKILL_NAMES = ["brainstorming", "code-reviewer", "dispatching-parallel-agents", "executing-plans", "onboarding", "parallel-exploration", "systematic-debugging", "test-driven-development", "verification-before-completion", "writing-plans"];
12339
+ var BUILTIN_SKILL_NAMES = ["agents-md-mastery", "brainstorming", "code-reviewer", "dispatching-parallel-agents", "docker-mastery", "executing-plans", "parallel-exploration", "systematic-debugging", "test-driven-development", "verification-before-completion", "writing-plans"];
12341
12340
  var BUILTIN_SKILLS = [
12341
+ {
12342
+ name: "agents-md-mastery",
12343
+ description: "Use when bootstrapping, updating, or reviewing AGENTS.md — teaches what makes effective agent memory, how to structure sections, signal vs noise filtering, and when to prune stale entries",
12344
+ template: `# AGENTS.md Mastery
12345
+
12346
+ ## Overview
12347
+
12348
+ **AGENTS.md is pseudo-memory loaded at session start.** Every line shapes agent behavior for the entire session. Quality beats quantity. Write for agents, not humans.
12349
+
12350
+ Unlike code comments or READMEs, AGENTS.md entries persist across all agent sessions. A bad entry misleads agents hundreds of times. A missing entry causes the same mistake repeatedly.
12351
+
12352
+ **Core principle:** Optimize for agent comprehension and behavioral change, not human readability.
12353
+
12354
+ ## The Iron Law
12355
+
12356
+ \`\`\`
12357
+ EVERY ENTRY MUST CHANGE AGENT BEHAVIOR
12358
+ \`\`\`
12359
+
12360
+ If an entry doesn't:
12361
+ - Prevent a specific mistake
12362
+ - Enable a capability the agent would otherwise miss
12363
+ - Override a default assumption that breaks in this codebase
12364
+
12365
+ ...then it doesn't belong in AGENTS.md.
12366
+
12367
+ **Test:** Would a fresh agent session make a mistake without this entry? If no → noise.
12368
+
12369
+ ## When to Use
12370
+
12371
+ | Trigger | Action |
12372
+ |---------|--------|
12373
+ | New project bootstrap | Write initial AGENTS.md with build/test/style basics |
12374
+ | Feature completion | Sync new learnings via \`hive_agents_md\` tool |
12375
+ | Periodic review | Audit for stale/redundant entries (quarterly) |
12376
+ | Quality issues | Agent repeating mistakes? Check if AGENTS.md has the fix |
12377
+
12378
+ ## What Makes Good Agent Memory
12379
+
12380
+ ### Signal Entries (Keep)
12381
+
12382
+ ✅ **Project-specific conventions:**
12383
+ - "We use Zustand, not Redux — never add Redux"
12384
+ - "Auth lives in \`/lib/auth\` — never create auth elsewhere"
12385
+ - "Run \`bun test\` not \`npm test\` (we don't use npm)"
12386
+
12387
+ ✅ **Non-obvious patterns:**
12388
+ - "Use \`.js\` extension for local imports (ESM requirement)"
12389
+ - "Worktrees don't share \`node_modules\` — run \`bun install\` in each"
12390
+ - "SandboxConfig is in \`dockerSandboxService.ts\`, NOT \`types.ts\`"
12391
+
12392
+ ✅ **Gotchas that break builds:**
12393
+ - "Never use \`ensureDirSync\` — doesn't exist. Use \`ensureDir\` (sync despite name)"
12394
+ - "Import from \`../utils/paths.js\` not \`./paths\` (ESM strict)"
12395
+
12396
+ ### Noise Entries (Remove)
12397
+
12398
+ ❌ **Agent already knows:**
12399
+ - "This project uses TypeScript" (agent detects from files)
12400
+ - "We follow semantic versioning" (universal convention)
12401
+ - "Use descriptive variable names" (generic advice)
12402
+
12403
+ ❌ **Irrelevant metadata:**
12404
+ - "Created on January 2024"
12405
+ - "Originally written by X"
12406
+ - "License: MIT" (in LICENSE file already)
12407
+
12408
+ ❌ **Describes what code does:**
12409
+ - "FeatureService manages features" (agent can read code)
12410
+ - "The system uses git worktrees" (observable from commands)
12411
+
12412
+ ### Rule of Thumb
12413
+
12414
+ **Signal:** Changes how agent acts
12415
+ **Noise:** Documents what agent observes
12416
+
12417
+ ## Section Structure for Fast Comprehension
12418
+
12419
+ Agents read AGENTS.md top-to-bottom once at session start. Put high-value info first:
12420
+
12421
+ \`\`\`markdown
12422
+ # Project Name
12423
+
12424
+ ## Build & Test Commands
12425
+ # ← Agents need this IMMEDIATELY
12426
+ bun run build
12427
+ bun run test
12428
+ bun run release:check
12429
+
12430
+ ## Code Style
12431
+ # ← Prevents syntax/import errors
12432
+ - Semicolons: Yes
12433
+ - Quotes: Single
12434
+ - Imports: Use \`.js\` extension
12435
+
12436
+ ## Architecture
12437
+ # ← Key directories, where things live
12438
+ packages/
12439
+ ├── hive-core/ # Shared logic
12440
+ ├── opencode-hive/ # Plugin
12441
+ └── vscode-hive/ # Extension
12442
+
12443
+ ## Important Patterns
12444
+ # ← How to do common tasks correctly
12445
+ Use \`readText\` from paths.ts, not fs.readFileSync
12446
+
12447
+ ## Gotchas & Anti-Patterns
12448
+ # ← Things that break or mislead
12449
+ NEVER use \`ensureDirSync\` — doesn't exist
12450
+ \`\`\`
12451
+
12452
+ **Keep total under 500 lines.** Beyond that, agents lose focus and miss critical entries.
12453
+
12454
+ ## The Sync Workflow
12455
+
12456
+ After completing a feature, sync learnings to AGENTS.md:
12457
+
12458
+ 1. **Trigger sync:**
12459
+ \`\`\`typescript
12460
+ hive_agents_md({ action: 'sync', feature: 'feature-name' })
12461
+ \`\`\`
12462
+
12463
+ 2. **Review each proposal:**
12464
+ - Read the proposed change
12465
+ - Ask: "Does this change agent behavior?"
12466
+ - Check: Is this already obvious from code/files?
12467
+
12468
+ 3. **Accept signal, reject noise:**
12469
+ - ❌ "TypeScript is used" → Agent detects this
12470
+ - ✅ "Use \`.js\` extension for imports" → Prevents build failures
12471
+
12472
+ 4. **Apply approved changes:**
12473
+ \`\`\`typescript
12474
+ hive_agents_md({ action: 'apply' })
12475
+ \`\`\`
12476
+
12477
+ **Warning:** Don't auto-approve all proposals. One bad entry pollutes all future sessions.
12478
+
12479
+ ## When to Prune
12480
+
12481
+ Remove entries when they become:
12482
+
12483
+ **Outdated:**
12484
+ - "We use Redux" → Project migrated to Zustand
12485
+ - "Node 16 compatibility required" → Now on Node 22
12486
+
12487
+ **Redundant:**
12488
+ - "Use single quotes" + "Strings use single quotes" → Keep one
12489
+ - Near-duplicates in different sections
12490
+
12491
+ **Too generic:**
12492
+ - "Write clear code" → Applies to any project
12493
+ - "Test your changes" → Universal advice
12494
+
12495
+ **Describing code:**
12496
+ - "TaskService manages tasks" → Agent can read \`TaskService\` class
12497
+ - "Worktrees are in \`.hive/.worktrees/\`" → Observable from filesystem
12498
+
12499
+ **Proven unnecessary:**
12500
+ - Entry added 6 months ago, but agents haven't hit that issue since
12501
+
12502
+ ## Red Flags
12503
+
12504
+ | Warning Sign | Why It's Bad | Fix |
12505
+ |-------------|-------------|-----|
12506
+ | AGENTS.md > 800 lines | Agents lose focus, miss critical info | Prune aggressively |
12507
+ | Describes what code does | Agent can read code | Remove descriptions |
12508
+ | Missing build/test commands | First thing agents need | Add at top |
12509
+ | No gotchas section | Agents repeat past mistakes | Document failure modes |
12510
+ | Generic best practices | Doesn't change behavior | Remove or make specific |
12511
+ | Outdated patterns | Misleads agents | Prune during sync |
12512
+
12513
+ ## Anti-Patterns
12514
+
12515
+ | Anti-Pattern | Better Approach |
12516
+ |-------------|----------------|
12517
+ | "Document everything" | Document only what changes behavior |
12518
+ | "Keep for historical record" | Version control is history |
12519
+ | "Might be useful someday" | Add when proven necessary |
12520
+ | "Explains the system" | Agents read code for that |
12521
+ | "Comprehensive reference" | AGENTS.md is a filter, not docs |
12522
+
12523
+ ## Good Examples
12524
+
12525
+ **Build Commands (High value, agents need immediately):**
12526
+ \`\`\`markdown
12527
+ ## Build & Test Commands
12528
+ bun run build # Build all packages
12529
+ bun run test # Run all tests
12530
+ bun run release:check # Full CI check
12531
+ \`\`\`
12532
+
12533
+ **Project-Specific Convention (Prevents mistakes):**
12534
+ \`\`\`markdown
12535
+ ## Code Style
12536
+ - Imports: Use \`.js\` extension for local imports (ESM requirement)
12537
+ - Paths: Import from \`../utils/paths.js\` never \`./paths\`
12538
+ \`\`\`
12539
+
12540
+ **Non-Obvious Gotcha (Prevents build failure):**
12541
+ \`\`\`markdown
12542
+ ## Important Patterns
12543
+ Use \`ensureDir\` from paths.ts — sync despite name
12544
+ NEVER use \`ensureDirSync\` (doesn't exist)
12545
+ \`\`\`
12546
+
12547
+ ## Bad Examples
12548
+
12549
+ **Generic advice (agent already knows):**
12550
+ \`\`\`markdown
12551
+ ## Best Practices
12552
+ - Use meaningful variable names
12553
+ - Write unit tests
12554
+ - Follow DRY principle
12555
+ \`\`\`
12556
+
12557
+ **Describes code (agent can read it):**
12558
+ \`\`\`markdown
12559
+ ## Architecture
12560
+ The FeatureService class manages features. It has methods
12561
+ for create, read, update, and delete operations.
12562
+ \`\`\`
12563
+
12564
+ **Irrelevant metadata:**
12565
+ \`\`\`markdown
12566
+ ## Project History
12567
+ Created in January 2024 by the platform team.
12568
+ Originally built for internal use.
12569
+ \`\`\`
12570
+
12571
+ ## Verification
12572
+
12573
+ Before finalizing AGENTS.md updates:
12574
+
12575
+ - [ ] Every entry answers: "What mistake does this prevent?"
12576
+ - [ ] No generic advice that applies to all projects
12577
+ - [ ] Build/test commands are first
12578
+ - [ ] Gotchas section exists and is populated
12579
+ - [ ] Total length under 500 lines (800 absolute max)
12580
+ - [ ] No entries describing what code does
12581
+ - [ ] Fresh agent session would benefit from each entry
12582
+
12583
+ ## Summary
12584
+
12585
+ AGENTS.md is **behavioral memory**, not documentation:
12586
+ - Write for agents, optimize for behavior change
12587
+ - Signal = prevents mistakes, Noise = describes observables
12588
+ - Sync after features, prune quarterly
12589
+ - Test: Would agent make a mistake without this entry?
12590
+
12591
+ **Quality > quantity. Every line counts.**`
12592
+ },
12342
12593
  {
12343
12594
  name: "brainstorming",
12344
12595
  description: "Use before any creative work - creating features, building components, adding functionality, or modifying behavior. Explores user intent, requirements and design before implementation.",
@@ -12388,7 +12639,8 @@ Start by understanding the current project context, then ask questions one at a
12388
12639
  - **YAGNI ruthlessly** - Remove unnecessary features from all designs
12389
12640
  - **Explore alternatives** - Always propose 2-3 approaches before settling
12390
12641
  - **Incremental validation** - Present design in sections, validate each
12391
- - **Be flexible** - Go back and clarify when something doesn't make sense`
12642
+ - **Be flexible** - Go back and clarify when something doesn't make sense
12643
+ - **Challenge assumptions** - Surface fragile assumptions, ask what changes if they fail, offer lean fallback options`
12392
12644
  },
12393
12645
  {
12394
12646
  name: "code-reviewer",
@@ -12675,9 +12927,9 @@ Each agent gets:
12675
12927
 
12676
12928
  \`\`\`typescript
12677
12929
  // Using Hive tools for parallel execution
12678
- hive_exec_start({ task: "01-fix-abort-tests" })
12679
- hive_exec_start({ task: "02-fix-batch-tests" })
12680
- hive_exec_start({ task: "03-fix-race-condition-tests" })
12930
+ hive_worktree_create({ task: "01-fix-abort-tests" })
12931
+ hive_worktree_create({ task: "02-fix-batch-tests" })
12932
+ hive_worktree_create({ task: "03-fix-race-condition-tests" })
12681
12933
  // All three run concurrently in isolated worktrees
12682
12934
  \`\`\`
12683
12935
 
@@ -12795,6 +13047,351 @@ From debugging session (2025-10-03):
12795
13047
  - All investigations completed concurrently
12796
13048
  - All fixes integrated successfully
12797
13049
  - Zero conflicts between agent changes`
13050
+ },
13051
+ {
13052
+ name: "docker-mastery",
13053
+ description: "Use when working with Docker containers — debugging container failures, writing Dockerfiles, docker-compose for integration tests, image optimization, or deploying containerized applications",
13054
+ template: `# Docker Mastery
13055
+
13056
+ ## Overview
13057
+
13058
+ Docker is a **platform for building, shipping, and running applications**, not just isolation.
13059
+
13060
+ Agents should think in containers: reproducible environments, declarative dependencies, isolated execution.
13061
+
13062
+ **Core principle:** Containers are not virtual machines. They share the kernel but isolate processes, filesystems, and networks.
13063
+
13064
+ **Violating the letter of these guidelines is violating the spirit of containerization.**
13065
+
13066
+ ## The Iron Law
13067
+
13068
+ \`\`\`
13069
+ UNDERSTAND THE CONTAINER BEFORE DEBUGGING INSIDE IT
13070
+ \`\`\`
13071
+
13072
+ Before exec'ing into a container or adding debug commands:
13073
+ 1. Check the image (what's installed?)
13074
+ 2. Check mounts (what host files are visible?)
13075
+ 3. Check environment variables (what config is passed?)
13076
+ 4. Check the Dockerfile (how was it built?)
13077
+
13078
+ Random debugging inside containers wastes time. Context first, then debug.
13079
+
13080
+ ## When to Use
13081
+
13082
+ Use this skill when working with:
13083
+ - **Container build failures** - Dockerfile errors, missing dependencies
13084
+ - **Test environment setup** - Reproducible test environments across machines
13085
+ - **Integration test orchestration** - Multi-service setups (DB + API + tests)
13086
+ - **Dockerfile authoring** - Writing efficient, maintainable Dockerfiles
13087
+ - **Image size optimization** - Reducing image size, layer caching
13088
+ - **Deployment** - Containerized application deployment
13089
+ - **Sandbox debugging** - Issues with Hive's Docker sandbox mode
13090
+
13091
+ **Use this ESPECIALLY when:**
13092
+ - Tests pass locally but fail in CI (environment mismatch)
13093
+ - "Works on my machine" problems
13094
+ - Need to test against specific dependency versions
13095
+ - Multiple services must coordinate (database + API)
13096
+ - Building for production deployment
13097
+
13098
+ ## Core Concepts
13099
+
13100
+ ### Images vs Containers
13101
+
13102
+ - **Image**: Read-only template (built from Dockerfile)
13103
+ - **Container**: Running instance of an image (ephemeral by default)
13104
+
13105
+ \`\`\`bash
13106
+ # Build once
13107
+ docker build -t myapp:latest .
13108
+
13109
+ # Run many times
13110
+ docker run --rm myapp:latest
13111
+ docker run --rm -e DEBUG=true myapp:latest
13112
+ \`\`\`
13113
+
13114
+ **Key insight:** Changes inside containers are lost unless committed or volumes are used.
13115
+
13116
+ ### Volumes & Mounts
13117
+
13118
+ Mount host directories into containers for persistence and code sharing:
13119
+
13120
+ \`\`\`bash
13121
+ # Mount current directory to /app in container
13122
+ docker run -v $(pwd):/app myapp:latest
13123
+
13124
+ # Hive worktrees are mounted automatically
13125
+ # Your code edits (via Read/Write/Edit tools) affect the host
13126
+ # Container sees the same files at runtime
13127
+ \`\`\`
13128
+
13129
+ **How Hive uses this:** Worktree is mounted into container, so file tools work on host, bash commands run in container.
13130
+
13131
+ ### Multi-Stage Builds
13132
+
13133
+ Minimize image size by using multiple FROM statements:
13134
+
13135
+ \`\`\`dockerfile
13136
+ # Build stage (large, has compilers)
13137
+ FROM node:22 AS builder
13138
+ WORKDIR /app
13139
+ COPY package.json bun.lockb ./
13140
+ RUN bun install
13141
+ COPY . .
13142
+ RUN bun run build
13143
+
13144
+ # Runtime stage (small, production only)
13145
+ FROM node:22-slim
13146
+ WORKDIR /app
13147
+ COPY --from=builder /app/dist ./dist
13148
+ COPY --from=builder /app/node_modules ./node_modules
13149
+ CMD ["node", "dist/index.js"]
13150
+ \`\`\`
13151
+
13152
+ **Result:** Builder tools (TypeScript, bundlers) not included in final image.
13153
+
13154
+ ### Docker Compose for Multi-Service Setups
13155
+
13156
+ Define multiple services in \`docker-compose.yml\`:
13157
+
13158
+ \`\`\`yaml
13159
+ version: '3.8'
13160
+ services:
13161
+ db:
13162
+ image: postgres:15
13163
+ environment:
13164
+ POSTGRES_PASSWORD: testpass
13165
+ ports:
13166
+ - "5432:5432"
13167
+
13168
+ api:
13169
+ build: .
13170
+ environment:
13171
+ DATABASE_URL: postgres://db:5432/testdb
13172
+ depends_on:
13173
+ - db
13174
+ ports:
13175
+ - "3000:3000"
13176
+ \`\`\`
13177
+
13178
+ Run with: \`docker-compose up -d\`
13179
+ Teardown with: \`docker-compose down\`
13180
+
13181
+ ### Network Modes
13182
+
13183
+ - **bridge** (default): Isolated network, containers can talk to each other by name
13184
+ - **host**: Container uses host's network directly (no isolation)
13185
+ - **none**: No network access
13186
+
13187
+ **When to use host mode:** Debugging network issues, accessing host services directly.
13188
+
13189
+ ## Common Patterns
13190
+
13191
+ ### Debug a Failing Container
13192
+
13193
+ **Problem:** Container exits immediately, logs unclear.
13194
+
13195
+ **Pattern:**
13196
+ 1. Run interactively with shell:
13197
+ \`\`\`bash
13198
+ docker run -it --entrypoint sh myapp:latest
13199
+ \`\`\`
13200
+ 2. Inspect filesystem, check if dependencies exist:
13201
+ \`\`\`bash
13202
+ ls /app
13203
+ which node
13204
+ cat /etc/os-release
13205
+ \`\`\`
13206
+ 3. Run command manually to see full error:
13207
+ \`\`\`bash
13208
+ node dist/index.js
13209
+ \`\`\`
13210
+
13211
+ ### Integration Tests with Docker Compose
13212
+
13213
+ **Pattern:**
13214
+ 1. Define services in \`docker-compose.test.yml\`
13215
+ 2. Add wait logic (wait for DB to be ready)
13216
+ 3. Run tests
13217
+ 4. Teardown
13218
+
13219
+ \`\`\`yaml
13220
+ # docker-compose.test.yml
13221
+ services:
13222
+ db:
13223
+ image: postgres:15
13224
+ environment:
13225
+ POSTGRES_PASSWORD: test
13226
+ test:
13227
+ build: .
13228
+ command: bun run test:integration
13229
+ depends_on:
13230
+ - db
13231
+ environment:
13232
+ DATABASE_URL: postgres://postgres:test@db:5432/testdb
13233
+ \`\`\`
13234
+
13235
+ \`\`\`bash
13236
+ docker-compose -f docker-compose.test.yml up --abort-on-container-exit
13237
+ docker-compose -f docker-compose.test.yml down
13238
+ \`\`\`
13239
+
13240
+ ### Optimize Dockerfile
13241
+
13242
+ **Anti-pattern:**
13243
+ \`\`\`dockerfile
13244
+ FROM node:22
13245
+ WORKDIR /app
13246
+ COPY . . # Copies everything (including node_modules, .git)
13247
+ RUN bun install # Invalidates cache on any file change
13248
+ CMD ["bun", "run", "start"]
13249
+ \`\`\`
13250
+
13251
+ **Optimized:**
13252
+ \`\`\`dockerfile
13253
+ FROM node:22-slim # Use slim variant
13254
+ WORKDIR /app
13255
+
13256
+ # Copy dependency files first (cache layer)
13257
+ COPY package.json bun.lockb ./
13258
+ RUN bun install --production
13259
+
13260
+ # Copy source code (changes frequently)
13261
+ COPY src ./src
13262
+ COPY tsconfig.json ./
13263
+
13264
+ CMD ["bun", "run", "start"]
13265
+ \`\`\`
13266
+
13267
+ **Add \`.dockerignore\`:**
13268
+ \`\`\`
13269
+ node_modules
13270
+ .git
13271
+ .env
13272
+ *.log
13273
+ dist
13274
+ .DS_Store
13275
+ \`\`\`
13276
+
13277
+ ### Handle Missing Dependencies
13278
+
13279
+ **Problem:** Command fails with "not found" in container.
13280
+
13281
+ **Pattern:**
13282
+ 1. Check if dependency is in image:
13283
+ \`\`\`bash
13284
+ docker run -it myapp:latest which git
13285
+ \`\`\`
13286
+ 2. If missing, add to Dockerfile:
13287
+ \`\`\`dockerfile
13288
+ RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
13289
+ \`\`\`
13290
+ 3. Or use a richer base image (e.g., \`node:22\` instead of \`node:22-slim\`).
13291
+
13292
+ ## Hive Sandbox Integration
13293
+
13294
+ ### How Hive Wraps Commands
13295
+
13296
+ When sandbox mode is active (\`sandbox: 'docker'\` in config):
13297
+ 1. Hive hook intercepts bash commands before execution
13298
+ 2. Wraps with \`docker run --rm -v <worktree>:/workspace -w /workspace <image> sh -c "<command>"\`
13299
+ 3. Command runs in container, but file edits (Read/Write/Edit) still affect host
13300
+
13301
+ **Workers are unaware** — they issue normal bash commands, Hive handles containerization.
13302
+
13303
+ ### When Host Access is Needed
13304
+
13305
+ Some operations MUST run on host:
13306
+ - **Git operations** (commit, push, branch) — repo state is on host
13307
+ - **Host-level tools** (Docker itself, system config)
13308
+ - **Cross-worktree operations** (accessing main repo from worktree)
13309
+
13310
+ **Pattern:** Use \`HOST:\` prefix to escape sandbox:
13311
+ \`\`\`bash
13312
+ HOST: git status
13313
+ HOST: docker ps
13314
+ \`\`\`
13315
+
13316
+ **If you need host access frequently:** Report as blocked and ask user if sandbox should be disabled for this task.
13317
+
13318
+ ### Persistent vs Ephemeral Containers
13319
+
13320
+ **Current (v1.2.0):** Each command runs \`docker run --rm\` (ephemeral). State does NOT persist.
13321
+
13322
+ Example: \`npm install lodash\` in one command → not available in next command.
13323
+
13324
+ **Workaround:** Install dependencies in Dockerfile, not at runtime.
13325
+
13326
+ **Future:** \`docker exec\` will reuse containers, persisting state across commands.
13327
+
13328
+ ### Auto-Detected Images
13329
+
13330
+ Hive detects runtime from project files:
13331
+ - \`package.json\` → \`node:22-slim\`
13332
+ - \`requirements.txt\` / \`pyproject.toml\` → \`python:3.12-slim\`
13333
+ - \`go.mod\` → \`golang:1.22-slim\`
13334
+ - \`Cargo.toml\` → \`rust:1.77-slim\`
13335
+ - \`Dockerfile\` → Builds from project Dockerfile
13336
+ - Fallback → \`ubuntu:24.04\`
13337
+
13338
+ **Override:** Set \`dockerImage\` in config (\`~/.config/opencode/agent_hive.json\`).
13339
+
13340
+ ## Red Flags - STOP
13341
+
13342
+ If you catch yourself:
13343
+ - Installing packages on host instead of in Dockerfile
13344
+ - Running \`docker build\` without \`.dockerignore\` (cache invalidation)
13345
+ - Using \`latest\` tag in production (non-reproducible)
13346
+ - Ignoring container exit codes (hides failures)
13347
+ - Assuming state persists between \`docker run --rm\` commands
13348
+ - Using absolute host paths in Dockerfile (not portable)
13349
+ - Copying secrets into image layers (leaks credentials)
13350
+
13351
+ **ALL of these mean: STOP. Review pattern.**
13352
+
13353
+ ## Anti-Patterns
13354
+
13355
+ | Excuse | Reality |
13356
+ |--------|---------|
13357
+ | "I'll just run it on host" | Container mismatch bugs are worse to debug later. Build happens in container anyway. |
13358
+ | "Works in my container, don't need CI" | CI uses different cache state. Always test in CI-like environment. |
13359
+ | "I'll optimize the Dockerfile later" | Later never comes. Large images slow down deployments now. |
13360
+ | "latest tag is fine for dev" | Dev should match prod. Pin versions or face surprises. |
13361
+ | "Don't need .dockerignore, COPY is fast" | Invalidates cache on every file change. Wastes minutes per build. |
13362
+ | "Install at runtime, not in image" | Ephemeral containers lose state. Slows down every command. |
13363
+ | "Skip depends_on, services start fast" | Race conditions in integration tests. Use wait-for-it or health checks. |
13364
+
13365
+ ## Verification Before Completion
13366
+
13367
+ Before marking Docker work complete:
13368
+
13369
+ - [ ] Container runs successfully: \`docker run --rm <image> <command>\` exits 0
13370
+ - [ ] Tests pass inside container (not just on host)
13371
+ - [ ] No host pollution (dependencies installed in container, not host)
13372
+ - [ ] \`.dockerignore\` exists if using \`COPY . .\`
13373
+ - [ ] Image tags are pinned (not \`latest\`) for production
13374
+ - [ ] Multi-stage build used if applicable (separate build/runtime)
13375
+ - [ ] Integration tests teardown properly (\`docker-compose down\`)
13376
+
13377
+ **If any fail:** Don't claim success. Fix or report blocker.
13378
+
13379
+ ## Quick Reference
13380
+
13381
+ | Task | Command Pattern |
13382
+ |------|----------------|
13383
+ | **Debug container** | \`docker run -it --entrypoint sh <image>\` |
13384
+ | **Run with mounts** | \`docker run -v $(pwd):/app <image>\` |
13385
+ | **Multi-service tests** | \`docker-compose up --abort-on-container-exit\` |
13386
+ | **Check image contents** | \`docker run --rm <image> ls /app\` |
13387
+ | **Optimize build** | Add \`.dockerignore\`, use multi-stage, pin versions |
13388
+ | **Escape Hive sandbox** | Prefix with \`HOST:\` (e.g., \`HOST: git status\`) |
13389
+
13390
+ ## Related Skills
13391
+
13392
+ - **hive_skill:systematic-debugging** - When container behavior is unexpected
13393
+ - **hive_skill:test-driven-development** - Write tests that run in containers
13394
+ - **hive_skill:verification-before-completion** - Verify tests pass in container before claiming done`
12798
13395
  },
12799
13396
  {
12800
13397
  name: "executing-plans",
@@ -12832,7 +13429,7 @@ Only \`done\` satisfies dependencies (not \`blocked\`, \`failed\`, \`partial\`,
12832
13429
  ### Step 3: Execute Batch
12833
13430
 
12834
13431
  For each task in the batch:
12835
- 1. Mark as in_progress via \`hive_exec_start()\`
13432
+ 1. Mark as in_progress via \`hive_worktree_create()\`
12836
13433
  2. Follow each step exactly (plan has bite-sized steps)
12837
13434
  3. Run verifications as specified
12838
13435
  4. Mark as completed
@@ -12843,6 +13440,11 @@ When batch complete:
12843
13440
  - Show verification output
12844
13441
  - Say: "Ready for feedback."
12845
13442
 
13443
+ ### Step 4.5: Post-Batch Hygienic Review
13444
+
13445
+ After the batch report, ask the operator if they want a Hygienic code review for the batch.
13446
+ If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\` and apply feedback before starting the next batch.
13447
+
12846
13448
  ### Step 5: Continue
12847
13449
  Based on feedback:
12848
13450
  - Apply changes if needed
@@ -12852,8 +13454,8 @@ Based on feedback:
12852
13454
  ### Step 6: Complete Development
12853
13455
 
12854
13456
  After all tasks complete and verified:
12855
- - Announce: "I'm using the finishing-a-development-branch skill to complete this work."
12856
- - **REQUIRED SUB-SKILL:** Use hive_skill:finishing-a-development-branch
13457
+ - Announce: "I'm using the verification-before-completion skill to complete this work."
13458
+ - **REQUIRED SUB-SKILL:** Use hive_skill:verification-before-completion
12857
13459
  - Follow that skill to verify tests, present options, execute choice
12858
13460
 
12859
13461
  ## When to Stop and Ask for Help
@@ -12881,70 +13483,10 @@ After all tasks complete and verified:
12881
13483
  - Reference skills when plan says to
12882
13484
  - Between batches: just report and wait
12883
13485
  - Stop when blocked, don't guess`
12884
- },
12885
- {
12886
- name: "onboarding",
12887
- description: "Ask about workflow preferences and store them in .hive/contexts/preferences.md before proceeding.",
12888
- template: `# Onboarding Preferences
12889
-
12890
- ## Overview
12891
-
12892
- Gather workflow preferences so the assistant can match the user's desired working style.
12893
-
12894
- ## When to Ask
12895
-
12896
- - **Immediately when the skill is loaded**, before any other work.
12897
- - If \`.hive/contexts/preferences.md\` does not exist, start onboarding.
12898
- - If later a decision is ambiguous and preferences are missing, ask again.
12899
-
12900
- ## Preference Storage
12901
-
12902
- Use \`hive_context_write\` to write \`.hive/contexts/preferences.md\` with this exact template:
12903
-
12904
- \`\`\`
12905
- # Preferences
12906
-
12907
- ## Exploration Style
12908
- sync
12909
-
12910
- ## Research Depth
12911
- medium
12912
-
12913
- ## Confirmation Level
12914
- standard
12915
-
12916
- ## Commit Behavior
12917
- ask-before-commit
12918
- \`\`\`
12919
-
12920
- ## If Preferences Already Exist
12921
-
12922
- Follow the same pattern used in \`packages/vscode-hive/src/tools/plan.ts\`:
12923
-
12924
- 1. Use \`contextService.list(feature)\` to detect existing contexts.
12925
- 2. Ask **"Preferences already exist. Keep or overwrite?"** using the \`question()\` tool.
12926
- 3. If keep → continue using existing preferences.
12927
- 4. If overwrite → collect new answers and write them with \`hive_context_write\`.
12928
-
12929
- ## Questions to Ask (Always use \`question()\`)
12930
-
12931
- Ask one at a time, with the provided options. Store the answers in \`.hive/contexts/preferences.md\`.
12932
-
12933
- 1. **Exploration Style:** sync | async
12934
- 2. **Research Depth:** shallow | medium | deep
12935
- 3. **Confirmation Level:** minimal | standard | high
12936
- 4. **Commit Behavior:** ask-before-commit | auto-commit | never-commit
12937
-
12938
- ## Requirements
12939
-
12940
- - Use the \`question()\` tool (no plain text questions).
12941
- - Ask immediately when the skill loads if preferences are missing.
12942
- - If later a decision is ambiguous and preferences are missing, ask again.
12943
- - Always store answers using \`hive_context_write\` with the template above.`
12944
13486
  },
12945
13487
  {
12946
13488
  name: "parallel-exploration",
12947
- description: "Use when you need parallel, read-only exploration with hive_background_* or task() (Scout fan-out)",
13489
+ description: "Use when you need parallel, read-only exploration with task() (Scout fan-out)",
12948
13490
  template: `# Parallel Exploration (Scout Fan-Out)
12949
13491
 
12950
13492
  ## Overview
@@ -12955,11 +13497,7 @@ When you need to answer "where/how does X work?" across multiple domains (codeba
12955
13497
 
12956
13498
  **Safe in Planning mode:** This is read-only exploration. It is OK to use during exploratory research even when there is no feature, no plan, and no approved tasks.
12957
13499
 
12958
- **This skill is for read-only research.** For parallel implementation work, use \`hive_skill("dispatching-parallel-agents")\` with \`hive_exec_start\`.
12959
-
12960
- **Two valid execution paths:**
12961
- - **Path A (Hive background tools):** Use \`hive_background_task\`, \`hive_background_output\`, \`hive_background_cancel\` when available.
12962
- - **Path B (Task mode):** Use native \`task()\` for delegation when background tools are not registered.
13500
+ **This skill is for read-only research.** For parallel implementation work, use \`hive_skill("dispatching-parallel-agents")\` with \`hive_worktree_create\`.
12963
13501
 
12964
13502
  ## When to Use
12965
13503
 
@@ -12998,51 +13536,16 @@ Split your investigation into 2-4 independent sub-questions. Good decomposition:
12998
13536
  - "What is X?" then "How is X used?" (second depends on first)
12999
13537
  - "Find the bug" then "Fix the bug" (not read-only)
13000
13538
 
13001
- ### 2. Spawn Background Tasks (Fan-Out)
13539
+ ### 2. Spawn Tasks (Fan-Out)
13002
13540
 
13003
13541
  Launch all tasks before waiting for any results:
13004
13542
 
13005
13543
  \`\`\`typescript
13006
- // Path A: Hive background tools (when available)
13007
- // Fan-out: spawn all tasks first
13008
- hive_background_task({
13009
- agent: "scout-researcher",
13010
- description: "Find hive_background_task implementation",
13011
- prompt: \`Where is hive_background_task implemented and registered?
13012
- - Find the tool definition
13013
- - Find the plugin registration
13014
- - Return file paths with line numbers\`,
13015
- sync: false
13016
- })
13017
-
13018
- hive_background_task({
13019
- agent: "scout-researcher",
13020
- description: "Analyze background task concurrency",
13021
- prompt: \`How does background task concurrency/queueing work?
13022
- - Find the manager/scheduler code
13023
- - Document the concurrency model
13024
- - Return file paths with evidence\`,
13025
- sync: false
13026
- })
13027
-
13028
- hive_background_task({
13029
- agent: "scout-researcher",
13030
- description: "Find parent notification mechanism",
13031
- prompt: \`How does parent notification work for background tasks?
13032
- - Where is the notification built?
13033
- - How is it sent to the parent session?
13034
- - Return file paths with evidence\`,
13035
- sync: false
13036
- })
13037
- \`\`\`
13038
-
13039
- \`\`\`typescript
13040
- // Path B: Task mode (native task tool)
13041
13544
  // Parallelize by issuing multiple task() calls in the same assistant message.
13042
13545
  task({
13043
13546
  subagent_type: 'scout-researcher',
13044
- description: 'Find hive_background_task implementation',
13045
- prompt: \`Where is hive_background_task implemented and registered?
13547
+ description: 'Find API route implementation',
13548
+ prompt: \`Where are API routes implemented and registered?
13046
13549
  - Find the tool definition
13047
13550
  - Find the plugin registration
13048
13551
  - Return file paths with line numbers\`,
@@ -13068,8 +13571,7 @@ task({
13068
13571
  \`\`\`
13069
13572
 
13070
13573
  **Key points:**
13071
- - Use \`agent: "scout-researcher"\` for read-only exploration
13072
- - Use \`sync: false\` to return immediately (non-blocking)
13574
+ - Use \`subagent_type: 'scout-researcher'\` for read-only exploration
13073
13575
  - Give each task a clear, focused \`description\`
13074
13576
  - Make prompts specific about what evidence to return
13075
13577
 
@@ -13084,35 +13586,7 @@ You'll receive a \`<system-reminder>\` notification when each task completes.
13084
13586
 
13085
13587
  ### 4. Collect Results
13086
13588
 
13087
- When notified of completion, retrieve results:
13088
-
13089
- \`\`\`typescript
13090
- // Path A: Hive background tools
13091
- // Get output from completed task
13092
- hive_background_output({
13093
- task_id: "task-abc123",
13094
- block: false // Don't wait, task already done
13095
- })
13096
- \`\`\`
13097
-
13098
- **For incremental output (long-running tasks):**
13099
-
13100
- \`\`\`typescript
13101
- // First call - get initial output
13102
- hive_background_output({
13103
- task_id: "task-abc123",
13104
- block: true, // Wait for output
13105
- timeout: 30000 // 30 second timeout
13106
- })
13107
- // Returns: { output: "...", cursor: "5" }
13108
-
13109
- // Later call - get new output since cursor
13110
- hive_background_output({
13111
- task_id: "task-abc123",
13112
- cursor: "5", // Resume from message 5
13113
- block: true
13114
- })
13115
- \`\`\`
13589
+ When each task completes, its result is returned directly. Collect the outputs from each task and proceed to synthesis.
13116
13590
 
13117
13591
  ### 5. Synthesize Findings
13118
13592
 
@@ -13123,18 +13597,9 @@ Combine results from all tasks:
13123
13597
 
13124
13598
  ### 6. Cleanup (If Needed)
13125
13599
 
13126
- Cancel tasks that are no longer needed:
13600
+ No manual cancellation is required in task mode.
13127
13601
 
13128
- \`\`\`typescript
13129
- // Path A: Hive background tools
13130
- // Cancel specific task
13131
- hive_background_cancel({ task_id: "task-abc123" })
13132
-
13133
- // Cancel all your background tasks
13134
- hive_background_cancel({ all: true })
13135
- \`\`\`
13136
-
13137
- ## Prompt Templates
13602
+ ## Prompt Templates
13138
13603
 
13139
13604
  ### Codebase Slice
13140
13605
 
@@ -13180,48 +13645,20 @@ Return:
13180
13645
 
13181
13646
  ## Real Example
13182
13647
 
13183
- **Investigation:** "How does the background task system work?"
13648
+ **Investigation:** "How does the API routing system work?"
13184
13649
 
13185
13650
  **Decomposition:**
13186
- 1. Implementation: Where is \`hive_background_task\` tool defined?
13187
- 2. Concurrency: How does task scheduling/queueing work?
13188
- 3. Notifications: How does parent session get notified?
13651
+ 1. Implementation: Where are API routes defined?
13652
+ 2. Routing: How does route registration work?
13653
+ 3. Notifications: How are errors surfaced to the caller?
13189
13654
 
13190
13655
  **Fan-out:**
13191
13656
  \`\`\`typescript
13192
- // Path A: Hive background tools
13193
- // Task 1: Implementation
13194
- hive_background_task({
13195
- agent: "scout-researcher",
13196
- description: "Find hive_background_task implementation",
13197
- prompt: "Where is hive_background_task implemented? Find tool definition and registration.",
13198
- sync: false
13199
- })
13200
-
13201
- // Task 2: Concurrency
13202
- hive_background_task({
13203
- agent: "scout-researcher",
13204
- description: "Analyze concurrency model",
13205
- prompt: "How does background task concurrency work? Find the manager/scheduler.",
13206
- sync: false
13207
- })
13208
-
13209
- // Task 3: Notifications
13210
- hive_background_task({
13211
- agent: "scout-researcher",
13212
- description: "Find notification mechanism",
13213
- prompt: "How are parent sessions notified of task completion?",
13214
- sync: false
13215
- })
13216
- \`\`\`
13217
-
13218
- \`\`\`typescript
13219
- // Path B: Task mode (native task tool)
13220
13657
  // Parallelize by issuing multiple task() calls in the same assistant message.
13221
13658
  task({
13222
13659
  subagent_type: 'scout-researcher',
13223
- description: 'Find hive_background_task implementation',
13224
- prompt: 'Where is hive_background_task implemented? Find tool definition and registration.',
13660
+ description: 'Find API route implementation',
13661
+ prompt: 'Where are API routes implemented? Find tool definition and registration.',
13225
13662
  });
13226
13663
 
13227
13664
  task({
@@ -13249,20 +13686,12 @@ task({
13249
13686
  **Spawning sequentially (defeats the purpose):**
13250
13687
  \`\`\`typescript
13251
13688
  // BAD: Wait for each before spawning next
13252
- const result1 = await hive_background_task({ ..., sync: true })
13253
- const result2 = await hive_background_task({ ..., sync: true })
13254
- \`\`\`
13255
-
13256
- \`\`\`typescript
13257
- // GOOD: Spawn all, then collect
13258
- hive_background_task({ ..., sync: false }) // Returns immediately
13259
- hive_background_task({ ..., sync: false }) // Returns immediately
13260
- hive_background_task({ ..., sync: false }) // Returns immediately
13261
- // ... later, collect results with hive_background_output
13689
+ await task({ ... });
13690
+ await task({ ... });
13262
13691
  \`\`\`
13263
13692
 
13264
13693
  \`\`\`typescript
13265
- // GOOD (task mode): Spawn all in the same assistant message
13694
+ // GOOD: Spawn all in the same assistant message
13266
13695
  task({ ... });
13267
13696
  task({ ... });
13268
13697
  task({ ... });
@@ -13291,9 +13720,7 @@ task({ ... });
13291
13720
 
13292
13721
  After using this pattern, verify:
13293
13722
  - [ ] All tasks spawned before collecting any results (true fan-out)
13294
- - [ ] Received notifications for completed tasks (Path A)
13295
- - [ ] Successfully retrieved output with \`hive_background_output\` (Path A)
13296
- - [ ] Verified \`task()\` fan-out pattern used when in task mode (Path B)
13723
+ - [ ] Verified \`task()\` fan-out pattern used for parallel exploration
13297
13724
  - [ ] Synthesized findings into coherent answer`
13298
13725
  },
13299
13726
  {
@@ -13947,7 +14374,7 @@ Never fix bugs without a test.
13947
14374
 
13948
14375
  ## Testing Anti-Patterns
13949
14376
 
13950
- When adding mocks or test utilities, read @testing-anti-patterns.md to avoid common pitfalls:
14377
+ When adding mocks or test utilities, avoid common pitfalls:
13951
14378
  - Testing mock behavior instead of real behavior
13952
14379
  - Adding test-only methods to production classes
13953
14380
  - Mocking without understanding dependencies
@@ -14210,6 +14637,12 @@ Always include **Depends on** for each task. Use \`none\` to enable parallel sta
14210
14637
  **Verify**:
14211
14638
  - [ ] Run: \`{command}\` → {expected}
14212
14639
  - [ ] {Additional acceptance criteria}
14640
+
14641
+ All verification MUST be agent-executable (no human intervention):
14642
+ ✅ \`bun test\` → all pass
14643
+ ✅ \`curl -X POST /api/x\` → 201
14644
+ ❌ "User manually tests..."
14645
+ ❌ "Visually confirm..."
14213
14646
  \`\`\`\`
14214
14647
 
14215
14648
  ## Remember
@@ -14218,6 +14651,7 @@ Always include **Depends on** for each task. Use \`none\` to enable parallel sta
14218
14651
  - Exact commands with expected output
14219
14652
  - Reference relevant skills with @ syntax
14220
14653
  - DRY, YAGNI, TDD, frequent commits
14654
+ - All acceptance criteria must be agent-executable (zero human intervention)
14221
14655
 
14222
14656
  ## Execution Handoff
14223
14657
 
@@ -14374,7 +14808,7 @@ Hybrid agent: plans AND orchestrates. Phase-aware, skills on-demand.
14374
14808
 
14375
14809
  ## Phase Detection (First Action)
14376
14810
 
14377
- Run \`hive_status()\` or \`hive_feature_list()\` to detect phase:
14811
+ Run \`hive_status()\` to detect phase:
14378
14812
 
14379
14813
  | Feature State | Phase | Active Section |
14380
14814
  |---------------|-------|----------------|
@@ -14400,18 +14834,16 @@ Run \`hive_status()\` or \`hive_feature_list()\` to detect phase:
14400
14834
  ### Canonical Delegation Threshold
14401
14835
 
14402
14836
  - Delegate to Scout when you cannot name the file path upfront, expect to inspect 2+ files, or the question is open-ended ("how/where does X work?").
14403
- - Prefer \`hive_background_task(agent: "scout-researcher", sync: true, ...)\` for single investigations; use \`sync: false\` only for multi-scout fan-out.
14837
+ - Prefer \`task({ subagent_type: "scout-researcher", prompt: "..." })\` for single investigations.
14404
14838
  - Local \`read/grep/glob\` is acceptable only for a single known file and a bounded question.
14405
14839
 
14406
14840
  ### Delegation
14407
14841
 
14408
- - Single-scout research → \`hive_background_task(agent: "scout-researcher", sync: true, ...)\` (blocks until complete, simpler flow)
14409
- - Parallel exploration → Load \`hive_skill("parallel-exploration")\` and follow the task vs hive mode delegation guidance.
14410
- - Implementation → \`hive_exec_start(task)\` (spawns Forager)
14411
-
14412
- In task mode, use task() for research fan-out; in hive mode, use hive_background_task.
14842
+ - Single-scout research → \`task({ subagent_type: "scout-researcher", prompt: "..." })\`
14843
+ - Parallel exploration → Load \`hive_skill("parallel-exploration")\` and follow the task mode delegation guidance.
14844
+ - Implementation → \`hive_worktree_create({ task: "01-task-name" })\` (creates worktree + Forager)
14413
14845
 
14414
- During Planning, default to synchronous exploration (\`sync: true\`). If async/parallel exploration would help, ask the user via \`question()\`.
14846
+ During Planning, use \`task({ subagent_type: "scout-researcher", ... })\` for exploration (BLOCKING — returns when done). For parallel exploration, issue multiple \`task()\` calls in the same message.
14415
14847
 
14416
14848
  ### Context Persistence
14417
14849
 
@@ -14420,6 +14852,8 @@ Save discoveries with \`hive_context_write\`:
14420
14852
  - User preferences
14421
14853
  - Research findings
14422
14854
 
14855
+ When Scout returns substantial findings (3+ files discovered, architecture patterns, or key decisions), persist them to a feature context file via \`hive_context_write\`.
14856
+
14423
14857
  ### Checkpoints
14424
14858
 
14425
14859
  Before major transitions, verify:
@@ -14427,14 +14861,32 @@ Before major transitions, verify:
14427
14861
  - [ ] Scope defined?
14428
14862
  - [ ] No critical ambiguities?
14429
14863
 
14864
+ ### Turn Termination
14865
+
14866
+ Valid endings:
14867
+ - Ask a concrete question
14868
+ - Update draft + ask a concrete question
14869
+ - Explicitly state you are waiting on background work (tool/task)
14870
+ - Auto-transition to the next required action
14871
+
14872
+ NEVER end with:
14873
+ - "Let me know if you have questions"
14874
+ - Summary without a follow-up action
14875
+ - "When you're ready..."
14876
+
14430
14877
  ### Loading Skills (On-Demand)
14431
14878
 
14432
14879
  Load when detailed guidance needed:
14433
14880
  - \`hive_skill("brainstorming")\` - exploring ideas and requirements
14434
14881
  - \`hive_skill("writing-plans")\` - structuring implementation plans
14435
14882
  - \`hive_skill("dispatching-parallel-agents")\` - parallel task delegation
14436
- - \`hive_skill("parallel-exploration")\` - parallel read-only research via task() or hive_background_task (Scout fan-out)
14883
+ - \`hive_skill("parallel-exploration")\` - parallel read-only research via task() (Scout fan-out)
14437
14884
  - \`hive_skill("executing-plans")\` - step-by-step plan execution
14885
+ - \`hive_skill("systematic-debugging")\` - encountering bugs, test failures, or unexpected behavior
14886
+ - \`hive_skill("test-driven-development")\` - implementing features with TDD approach
14887
+ - \`hive_skill("verification-before-completion")\` - before claiming work is complete or creating PRs
14888
+ - \`hive_skill("docker-mastery")\` - working with Docker containers, debugging, docker-compose
14889
+ - \`hive_skill("agents-md-mastery")\` - bootstrapping/updating AGENTS.md, quality review
14438
14890
 
14439
14891
  Load ONE skill at a time. Only when you need guidance beyond this prompt.
14440
14892
 
@@ -14457,6 +14909,14 @@ Load ONE skill at a time. Only when you need guidance beyond this prompt.
14457
14909
  | Premature abstraction | "Abstract or inline?" |
14458
14910
  | Over-validation | "Minimal or comprehensive checks?" |
14459
14911
 
14912
+ ### Challenge User Assumptions
14913
+
14914
+ When a proposal relies on fragile assumptions, challenge them explicitly:
14915
+
14916
+ - Identify the assumption and state it plainly.
14917
+ - Ask what changes if the assumption is wrong.
14918
+ - Offer a lean fallback that still meets core goals.
14919
+
14460
14920
  ### Gap Classification
14461
14921
 
14462
14922
  | Gap | Action |
@@ -14493,7 +14953,7 @@ After review decision, offer execution choice (subagent-driven vs parallel sessi
14493
14953
 
14494
14954
  - Research BEFORE asking (use \`hive_skill("parallel-exploration")\` for multi-domain research)
14495
14955
  - Save draft as working memory
14496
- - Don't implement (no edits/worktrees). Read-only exploration is allowed (local tools + Scout via hive_background_task).
14956
+ - Don't implement (no edits/worktrees). Read-only exploration is allowed (local tools + Scout via task()).
14497
14957
 
14498
14958
  ---
14499
14959
 
@@ -14522,24 +14982,15 @@ Use \`hive_status()\` to see **runnable** tasks (dependencies satisfied) and **b
14522
14982
  ### Worker Spawning
14523
14983
 
14524
14984
  \`\`\`
14525
- hive_exec_start({ task: "01-task-name" }) // Creates worktree + Forager
14985
+ hive_worktree_create({ task: "01-task-name" }) // Creates worktree + Forager
14526
14986
  \`\`\`
14527
14987
 
14528
14988
  ### After Delegation
14529
14989
 
14530
- 1. Wait for the completion notification (no polling required)
14531
- 2. Use \`hive_worker_status()\` for spot checks or if you suspect notifications did not deliver
14532
- 3. Use \`hive_background_output\` only if interim output is explicitly needed, or after completion
14533
- 4. When calling \`hive_background_output\`, choose a timeout (30-120s) based on task size
14534
- 5. If blocked: \`question()\` → user decision → \`continueFrom: "blocked"\`
14535
-
14536
- ### Observation Polling (Recommended)
14537
-
14538
- - Prefer completion notifications over polling
14539
- - Use \`hive_worker_status()\` for observation-based spot checks
14540
- - Avoid tight loops with \`hive_background_output\`; if needed, wait 30-60s between checks
14541
- - If you suspect notifications did not deliver, do a single \`hive_worker_status()\` check first
14542
- - If you need to fetch final results, call \`hive_background_output({ task_id, block: false })\` after the completion notice
14990
+ 1. \`task()\` is BLOCKING when it returns, the worker is DONE
14991
+ 2. Immediately call \`hive_status()\` to check the new task state and find next runnable tasks
14992
+ 3. If task status is blocked: read blocker info → \`question()\` user decision resume with \`continueFrom: "blocked"\`
14993
+ 4. Do NOT wait for notifications or poll the result is already available when \`task()\` returns
14543
14994
 
14544
14995
  ### Failure Recovery
14545
14996
 
@@ -14549,6 +15000,24 @@ hive_exec_start({ task: "01-task-name" }) // Creates worktree + Forager
14549
15000
 
14550
15001
  \`hive_merge({ task: "01-task-name" })\` after verification
14551
15002
 
15003
+ ### Post-Batch Review (Hygienic)
15004
+
15005
+ After completing and merging a batch:
15006
+ 1. Ask the user via \`question()\` if they want a Hygienic code review for the batch.
15007
+ 2. If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\`.
15008
+ 3. Apply feedback before starting the next batch.
15009
+
15010
+ ### AGENTS.md Maintenance
15011
+
15012
+ After feature completion (all tasks merged):
15013
+ 1. Sync context findings to AGENTS.md: \`hive_agents_md({ action: "sync", feature: "feature-name" })\`
15014
+ 2. Review the proposed diff with the user
15015
+ 3. Apply approved changes to keep AGENTS.md current
15016
+
15017
+ For projects without AGENTS.md:
15018
+ - Bootstrap with \`hive_agents_md({ action: "init" })\`
15019
+ - Generates initial documentation from codebase analysis
15020
+
14552
15021
  ### Orchestration Iron Laws
14553
15022
 
14554
15023
  - Delegate by default
@@ -14566,11 +15035,19 @@ hive_exec_start({ task: "01-task-name" }) // Creates worktree + Forager
14566
15035
  - Ask user before consulting Hygienic (Consultant/Reviewer/Debugger)
14567
15036
  - Load skills on-demand, one at a time
14568
15037
 
14569
- **Never:**
15038
+ ### Hard Blocks
15039
+
15040
+ NEVER violate:
14570
15041
  - Skip phase detection
14571
15042
  - Mix planning and orchestration in same action
14572
15043
  - Auto-load all skills at start
14573
15044
 
15045
+ ### Anti-Patterns
15046
+
15047
+ BLOCKING violations:
15048
+ - Ending a turn without a next action
15049
+ - Asking for user input in plain text instead of question()
15050
+
14574
15051
  **User Input:** ALWAYS use \`question()\` tool for any user input - NEVER ask questions via plain text. This ensures structured responses.
14575
15052
  `;
14576
15053
 
@@ -14581,25 +15058,38 @@ PLANNER, NOT IMPLEMENTER. "Do X" means "create plan for X".
14581
15058
 
14582
15059
  ## Intent Classification (First)
14583
15060
 
14584
- | Intent | Signals | Action |
14585
- |--------|---------|--------|
14586
- | Trivial | Single file, <10 lines | Do directly. No plan needed. |
14587
- | Simple | 1-2 files, <30 min | Light interview → quick plan |
14588
- | Complex | 3+ files, review needed | Full discovery → detailed plan |
14589
- | Refactor | Existing code changes | Safety: tests, rollback, blast radius |
14590
- | Greenfield | New feature | Research patterns BEFORE asking. Delegate to Scout via \`hive_background_task(agent: "scout-researcher", sync: true, ...)\` for single investigations. |
15061
+ | Intent | Signals | Strategy | Action |
15062
+ |--------|---------|----------|--------|
15063
+ | Trivial | Single file, <10 lines | N/A | Do directly. No plan needed. |
15064
+ | Simple | 1-2 files, <30 min | Quick assessment | Light interview → quick plan |
15065
+ | Complex | 3+ files, review needed | Full discovery | Full discovery → detailed plan |
15066
+ | Refactor | Existing code changes | Safety-first: behavior preservation | Tests → blast radius → plan |
15067
+ | Greenfield | New feature | Discovery-first: explore before asking | Research interview plan |
15068
+ | Architecture | Cross-cutting, multi-system | Strategic: consult Scout | Deep research → plan |
14591
15069
 
14592
- During Planning, default to synchronous exploration (\`sync: true\`). If async/parallel exploration would help, ask the user via \`question()\`.
15070
+ During Planning, use \`task({ subagent_type: "scout-researcher", ... })\` for exploration (BLOCKING — returns when done). For parallel exploration, issue multiple \`task()\` calls in the same message.
14593
15071
 
14594
15072
  ## Self-Clearance Check (After Every Exchange)
14595
15073
 
14596
- □ Core objective clear?
14597
- □ Scope defined (IN/OUT)?
14598
- □ No critical ambiguities?
14599
- Approach decided?
15074
+ □ Core objective clearly defined?
15075
+ □ Scope boundaries established (IN/OUT)?
15076
+ □ No critical ambiguities remaining?
15077
+ Technical approach decided?
15078
+ □ Test strategy confirmed (TDD/tests-after/none)?
15079
+ □ No blocking questions outstanding?
14600
15080
 
14601
- ALL YES → Write plan
14602
- ANY NO → Ask the unclear thing
15081
+ ALL YES → Announce "Requirements clear. Generating plan." → Write plan
15082
+ ANY NO → Ask the specific unclear thing
15083
+
15084
+ ## Test Strategy (Ask Before Planning)
15085
+
15086
+ For Build and Refactor intents, ASK:
15087
+ "Should this include automated tests?"
15088
+ - TDD: Red-Green-Refactor per task
15089
+ - Tests after: Add test tasks after implementation
15090
+ - None: No unit/integration tests
15091
+
15092
+ Record decision in draft. Embed in plan tasks.
14603
15093
 
14604
15094
  ## AI-Slop Flags
14605
15095
 
@@ -14609,6 +15099,7 @@ ANY NO → Ask the unclear thing
14609
15099
  | Premature abstraction | "Extracted to utility" | "Abstract or inline?" |
14610
15100
  | Over-validation | "15 error checks for 3 inputs" | "Minimal or comprehensive error handling?" |
14611
15101
  | Documentation bloat | "Added JSDoc everywhere" | "None, minimal, or full docs?" |
15102
+ | Fragile assumption | "Assuming X is always true" | "If X is wrong, what should change?" |
14612
15103
 
14613
15104
  ## Gap Classification (Self-Review)
14614
15105
 
@@ -14618,6 +15109,18 @@ ANY NO → Ask the unclear thing
14618
15109
  | MINOR | FIX silently, note in summary |
14619
15110
  | AMBIGUOUS | Apply default, DISCLOSE in summary |
14620
15111
 
15112
+ ## Turn Termination
15113
+
15114
+ Valid endings:
15115
+ - Question to user (via question() tool)
15116
+ - Draft update + next question
15117
+ - Auto-transition to plan generation
15118
+
15119
+ NEVER end with:
15120
+ - "Let me know if you have questions"
15121
+ - Summary without follow-up action
15122
+ - "When you're ready..."
15123
+
14621
15124
  ## Draft as Working Memory
14622
15125
 
14623
15126
  Create draft on first exchange. Update after EVERY user response:
@@ -14666,9 +15169,9 @@ Each task MUST declare dependencies with **Depends on**:
14666
15169
  ### Canonical Delegation Threshold
14667
15170
 
14668
15171
  - Delegate to Scout when you cannot name the file path upfront, expect to inspect 2+ files, or the question is open-ended ("how/where does X work?").
14669
- - Prefer \`hive_background_task(agent: "scout-researcher", sync: true, ...)\` for single investigations; use \`sync: false\` only for multi-scout fan-out.
15172
+ - Prefer \`task({ subagent_type: "scout-researcher", prompt: "..." })\` for single investigations.
14670
15173
  - Local \`read/grep/glob\` is acceptable only for a single known file and a bounded question.
14671
- - When calling \`hive_background_output\`, choose a timeout (30-120s) based on task size.
15174
+ - When running parallel exploration, align with the skill guidance.
14672
15175
  `;
14673
15176
 
14674
15177
  // src/agents/swarm.ts
@@ -14695,6 +15198,8 @@ Use \`hive_status()\` to see **runnable** tasks (dependencies satisfied) and **b
14695
15198
  - When 2+ tasks are runnable: ask operator via \`question()\` before parallelizing
14696
15199
  - Record execution decisions with \`hive_context_write({ name: "execution-decisions", ... })\`
14697
15200
 
15201
+ When Scout returns substantial findings (3+ files discovered, architecture patterns, or key decisions), persist them to a feature context file via \`hive_context_write\`.
15202
+
14698
15203
  If tasks are missing **Depends on** metadata, ask the planner to revise the plan before executing.
14699
15204
 
14700
15205
  ### Standard Checks
@@ -14703,7 +15208,7 @@ If tasks are missing **Depends on** metadata, ask the planner to revise the plan
14703
15208
  2. Can I do it myself FOR SURE? REALLY?
14704
15209
  3. Does this require external system data (DBs/APIs/3rd-party tools)?
14705
15210
  → If external data needed: Load \`hive_skill("parallel-exploration")\` for parallel Scout fan-out
14706
- In task mode, use task() for research fan-out; in hive mode, use hive_background_task.
15211
+ In task mode, use task() for research fan-out.
14707
15212
  During Planning, default to synchronous exploration. If async exploration would help, ask the user via \`question()\` and follow the onboarding preferences.
14708
15213
  → Default: DELEGATE
14709
15214
 
@@ -14721,38 +15226,38 @@ During Planning, default to synchronous exploration. If async exploration would
14721
15226
  ## Worker Spawning
14722
15227
 
14723
15228
  \`\`\`
14724
- hive_exec_start({ task: "01-task-name" })
14725
- // If delegationRequired returned:
14726
- hive_background_task({ agent: "forager-worker", prompt: "...", sync: false })
15229
+ hive_worktree_create({ task: "01-task-name" })
14727
15230
  // If external system data is needed (parallel exploration):
14728
15231
  // Load hive_skill("parallel-exploration") for the full playbook, then:
14729
- // In task mode, use task() for research fan-out; in hive mode, use hive_background_task.
15232
+ // In task mode, use task() for research fan-out.
14730
15233
  \`\`\`
14731
15234
 
14732
- **Sync Mode Guidance:**
14733
- - \`sync: true\` Use for single-scout research when you need the result before continuing
14734
- - \`sync: false\` Use for parallel fan-out (multiple scouts) or when you can proceed without waiting
15235
+ **Delegation Guidance:**
15236
+ - \`task()\` is BLOCKING returns when the worker is done
15237
+ - Call \`hive_status()\` immediately after to check new state and find next runnable tasks
15238
+ - For parallel fan-out, issue multiple \`task()\` calls in the same message
14735
15239
 
14736
- ## After Delegation - ALWAYS VERIFY
15240
+ ## After Delegation - VERIFY
14737
15241
 
15242
+ After every delegation, check:
14738
15243
  - Does it work as expected?
14739
- - Followed existing codebase pattern?
14740
- - Followed MUST DO and MUST NOT DO?
15244
+ - Followed existing codebase patterns?
15245
+ - Met MUST DO and MUST NOT DO requirements?
15246
+ - No unintended side effects?
14741
15247
 
14742
15248
  ## Blocker Handling
14743
15249
 
14744
15250
  When worker reports blocked:
14745
- 1. \`hive_worker_status()\` — read blocker info
15251
+ 1. \`hive_status()\` — read blocker info
14746
15252
  2. \`question()\` — ask user (NEVER plain text)
14747
- 3. \`hive_exec_start({ task, continueFrom: "blocked", decision })\`
15253
+ 3. \`hive_worktree_create({ task, continueFrom: "blocked", decision })\`
14748
15254
 
14749
15255
  ## Failure Recovery (After 3 Consecutive Failures)
14750
15256
 
14751
15257
  1. STOP all further edits
14752
15258
  2. REVERT to last known working state
14753
15259
  3. DOCUMENT what was attempted
14754
- 4. Consult: \`task({ subagent_type: "oracle", prompt: "Analyze..." })\`
14755
- 5. If Oracle cannot resolve → ASK USER
15260
+ 4. ASK USER via question() present options and context
14756
15261
 
14757
15262
  ## Merge Strategy
14758
15263
 
@@ -14762,6 +15267,39 @@ hive_merge({ task: "01-task-name", strategy: "merge" })
14762
15267
 
14763
15268
  Merge only after verification passes.
14764
15269
 
15270
+ ### Post-Batch Review (Hygienic)
15271
+
15272
+ After completing and merging a batch:
15273
+ 1. Ask the user via \`question()\` if they want a Hygienic code review for the batch.
15274
+ 2. If yes, run \`task({ subagent_type: "hygienic", prompt: "Review implementation changes from the latest batch." })\`.
15275
+ 3. Apply feedback before starting the next batch.
15276
+
15277
+ ### AGENTS.md Maintenance
15278
+
15279
+ After completing and merging a batch:
15280
+ 1. Sync context findings to AGENTS.md: \`hive_agents_md({ action: "sync", feature: "feature-name" })\`
15281
+ 2. Review the proposed diff with the user
15282
+ 3. Apply approved changes to keep AGENTS.md current
15283
+
15284
+ For quality review of AGENTS.md content, load \`hive_skill("agents-md-mastery")\`.
15285
+
15286
+ For projects without AGENTS.md:
15287
+ - Bootstrap with \`hive_agents_md({ action: "init" })\`
15288
+ - Generates initial documentation from codebase analysis
15289
+
15290
+ ## Turn Termination
15291
+
15292
+ Valid endings:
15293
+ - Worker delegation (hive_worktree_create)
15294
+ - Status check (hive_status)
15295
+ - User question (question())
15296
+ - Merge (hive_merge)
15297
+
15298
+ NEVER end with:
15299
+ - "Let me know when you're ready"
15300
+ - Summary without next action
15301
+ - Waiting for something unspecified
15302
+
14765
15303
  ## Iron Laws
14766
15304
 
14767
15305
  **Never:**
@@ -14871,6 +15409,17 @@ When asked to retrieve raw data from external systems (MongoDB/Stripe/etc.):
14871
15409
  - GitHub: Permalinks with commit SHA
14872
15410
  - Docs: URL with section anchor
14873
15411
 
15412
+ ## Persistence
15413
+
15414
+ When operating within a feature context:
15415
+ - If findings are substantial (3+ files, architecture patterns, or key decisions):
15416
+ \`\`\`
15417
+ hive_context_write({
15418
+ name: "research-{topic}",
15419
+ content: "## {Topic}\\n\\nDate: {YYYY-MM-DD}\\n\\n## Context\\n\\n## Findings"
15420
+ })
15421
+ \`\`\`
15422
+
14874
15423
  ## Iron Laws
14875
15424
 
14876
15425
  **Never:**
@@ -14896,7 +15445,7 @@ Execute directly. NEVER delegate implementation. Work in isolation.
14896
15445
 
14897
15446
  These tools are FORBIDDEN:
14898
15447
  - \`task\` — Orchestrator's job
14899
- - \`hive_exec_start\` — You ARE the spawned worker
15448
+ - \`hive_worktree_create\` — You ARE the spawned worker
14900
15449
  - \`hive_merge\` — Orchestrator's job
14901
15450
 
14902
15451
  ## Allowed Research
@@ -14907,6 +15456,20 @@ CAN use for quick lookups:
14907
15456
  - \`ast_grep_search\` — AST patterns
14908
15457
  - \`glob\`, \`grep\`, \`read\` — Codebase exploration
14909
15458
 
15459
+ ## Resolve Before Blocking
15460
+
15461
+ Default to exploration, questions are LAST resort:
15462
+ 1. Read the referenced files and surrounding code
15463
+ 2. Search for similar patterns in the codebase
15464
+ 3. Try a reasonable approach based on conventions
15465
+
15466
+ Only report as blocked when:
15467
+ - Multiple approaches failed (tried 3+)
15468
+ - Decision requires business logic you can't infer
15469
+ - External dependency is missing or broken
15470
+
15471
+ Context inference: Before asking "what does X do?", READ X first.
15472
+
14910
15473
  ## Plan = READ ONLY
14911
15474
 
14912
15475
  CRITICAL: NEVER MODIFY THE PLAN FILE
@@ -14914,14 +15477,10 @@ CRITICAL: NEVER MODIFY THE PLAN FILE
14914
15477
  - MUST NOT edit, modify, or update plan
14915
15478
  - Only Orchestrator (Swarm) manages plan
14916
15479
 
14917
- ## Notepad Location
15480
+ ## Persistent Notes
14918
15481
 
14919
- Path: \`.hive/features/{feature}/notepads/\`
14920
- - learnings.md: Patterns, conventions, successful approaches
14921
- - issues.md: Problems, blockers, gotchas
14922
- - decisions.md: Architectural choices and rationales
14923
-
14924
- IMPORTANT: Always APPEND — never overwrite.
15482
+ For substantial discoveries (architecture patterns, key decisions, gotchas that affect multiple tasks):
15483
+ Use \`hive_context_write({ name: "learnings", content: "..." })\` to persist for future workers.
14925
15484
 
14926
15485
  ## Execution Flow
14927
15486
 
@@ -14932,7 +15491,17 @@ Read spec for:
14932
15491
  - **Must NOT do** (guardrails)
14933
15492
  - **Acceptance criteria**
14934
15493
 
14935
- ### 2. Implement
15494
+ ### 2. Orient (Pre-flight Before Coding)
15495
+ Before writing code:
15496
+ - Confirm dependencies are satisfied and required context is present
15497
+ - Read the referenced files and surrounding code
15498
+ - Search for similar patterns in the codebase
15499
+ - Identify the exact files/sections to touch (from references)
15500
+ - Decide the first failing test you will write (TDD)
15501
+ - Identify the test command(s) and inputs you will run
15502
+ - Plan the minimum change to reach green
15503
+
15504
+ ### 3. Implement
14936
15505
  Follow spec exactly. Use references for patterns.
14937
15506
 
14938
15507
  \`\`\`
@@ -14941,28 +15510,28 @@ edit(file, { old: "...", new: "..." }) // Implement
14941
15510
  bash("npm test") // Verify
14942
15511
  \`\`\`
14943
15512
 
14944
- ### 3. Verify
15513
+ ### 4. Verify
14945
15514
  Run acceptance criteria:
14946
15515
  - Tests pass
14947
15516
  - Build succeeds
14948
15517
  - lsp_diagnostics clean on changed files
14949
15518
 
14950
- ### 4. Report
15519
+ ### 5. Report
14951
15520
 
14952
15521
  **Success:**
14953
15522
  \`\`\`
14954
- hive_exec_complete({
15523
+ hive_worktree_commit({
14955
15524
  task: "current-task",
14956
15525
  summary: "Implemented X. Tests pass.",
14957
15526
  status: "completed"
14958
15527
  })
14959
15528
  \`\`\`
14960
15529
 
14961
- **CRITICAL: After hive_exec_complete, STOP IMMEDIATELY.**
15530
+ **CRITICAL: After hive_worktree_commit, STOP IMMEDIATELY.**
14962
15531
 
14963
15532
  **Blocked (need user decision):**
14964
15533
  \`\`\`
14965
- hive_exec_complete({
15534
+ hive_worktree_commit({
14966
15535
  task: "current-task",
14967
15536
  summary: "Progress on X. Blocked on Y.",
14968
15537
  status: "blocked",
@@ -14975,6 +15544,16 @@ hive_exec_complete({
14975
15544
  })
14976
15545
  \`\`\`
14977
15546
 
15547
+ ## Completion Checklist
15548
+
15549
+ Before calling hive_worktree_commit:
15550
+ - All tests in scope are run and passing (Record exact commands and results)
15551
+ - Build succeeds if required (Record exact command and result)
15552
+ - lsp_diagnostics clean on changed files (Record exact command and result)
15553
+ - Changes match the spec and references
15554
+ - No extra scope creep or unrelated edits
15555
+ - Summary includes what changed, why, and verification status
15556
+
14978
15557
  ## Failure Recovery
14979
15558
 
14980
15559
  After 3 consecutive failures:
@@ -14984,11 +15563,20 @@ After 3 consecutive failures:
14984
15563
 
14985
15564
  ## Iron Laws
14986
15565
 
15566
+ ### Docker Sandbox
15567
+
15568
+ When sandbox mode is active, ALL bash commands automatically run inside a Docker container.
15569
+ - Your commands are transparently wrapped — you don't need to do anything special
15570
+ - File edits (Read, Write, Edit tools) still work on the host filesystem (worktree is mounted)
15571
+ - If a command must run on the host (e.g., git operations), report as blocked and ask the user
15572
+ - If a command fails with "docker: command not found", report as blocked — the host needs Docker installed
15573
+ - For deeper Docker expertise, load \`hive_skill("docker-mastery")\`
15574
+
14987
15575
  **Never:**
14988
15576
  - Exceed task scope
14989
15577
  - Modify plan file
14990
- - Use \`task\` or \`hive_exec_start\`
14991
- - Continue after hive_exec_complete
15578
+ - Use \`task\` or \`hive_worktree_create\`
15579
+ - Continue after hive_worktree_commit
14992
15580
  - Skip verification
14993
15581
 
14994
15582
  **Always:**
@@ -15026,7 +15614,10 @@ Self-check before every critique:
15026
15614
 
15027
15615
  ### 2. Verification & Acceptance Criteria
15028
15616
  - Are criteria measurable and concrete?
15029
- - Red flags: "should work", "looks good", "properly handles"
15617
+ - Are they agent-executable (tool-runnable) without human judgment?
15618
+ - Do they specify exact commands + expected signals (exit code, output text, counts)?
15619
+ - Red flags: "should work", "looks good", "properly handles", "verify manually"
15620
+ - If manual checks are required, the plan must explain why automation is impossible
15030
15621
 
15031
15622
  ### 3. Context Completeness (90% Confidence)
15032
15623
  - Could a capable worker execute with 90% confidence?
@@ -15158,6 +15749,11 @@ import * as fs8 from "fs";
15158
15749
  import * as path4 from "path";
15159
15750
  import * as fs10 from "fs";
15160
15751
  import * as path6 from "path";
15752
+ import * as fs11 from "fs";
15753
+ import * as path7 from "path";
15754
+ import { existsSync as existsSync5 } from "fs";
15755
+ import { join as join8, sep } from "path";
15756
+ import { execSync } from "child_process";
15161
15757
  var __create = Object.create;
15162
15758
  var __getProtoOf = Object.getPrototypeOf;
15163
15759
  var __defProp2 = Object.defineProperty;
@@ -15995,7 +16591,7 @@ var DEFAULT_HIVE_CONFIG = {
15995
16591
  disableSkills: [],
15996
16592
  disableMcps: [],
15997
16593
  agentMode: "unified",
15998
- delegateMode: "task",
16594
+ sandbox: "none",
15999
16595
  agents: {
16000
16596
  "hive-master": {
16001
16597
  model: DEFAULT_AGENT_MODELS["hive-master"],
@@ -16049,16 +16645,12 @@ var FEATURE_FILE = "feature.json";
16049
16645
  var STATUS_FILE = "status.json";
16050
16646
  var REPORT_FILE = "report.md";
16051
16647
  var APPROVED_FILE = "APPROVED";
16052
- var JOURNAL_FILE = "journal.md";
16053
16648
  function normalizePath(filePath) {
16054
16649
  return filePath.replace(/\\/g, "/");
16055
16650
  }
16056
16651
  function getHivePath(projectRoot) {
16057
16652
  return path2.join(projectRoot, HIVE_DIR);
16058
16653
  }
16059
- function getJournalPath(projectRoot) {
16060
- return path2.join(getHivePath(projectRoot), JOURNAL_FILE);
16061
- }
16062
16654
  function getFeaturesPath(projectRoot) {
16063
16655
  return path2.join(getHivePath(projectRoot), FEATURES_DIR);
16064
16656
  }
@@ -16295,22 +16887,6 @@ function listFeatures(projectRoot) {
16295
16887
  return [];
16296
16888
  return fs22.readdirSync(featuresPath, { withFileTypes: true }).filter((d) => d.isDirectory()).map((d) => d.name);
16297
16889
  }
16298
- var JOURNAL_TEMPLATE = `# Hive Journal
16299
-
16300
- Audit trail of project learnings. Updated when trouble is resolved.
16301
-
16302
- ---
16303
-
16304
- <!-- Entry template:
16305
- ### YYYY-MM-DD: feature-name
16306
-
16307
- **Trouble**: What went wrong
16308
- **Resolution**: How it was fixed
16309
- **Constraint**: Never/Always rule derived (add to Iron Laws if recurring)
16310
- **See**: .hive/features/feature-name/plan.md
16311
- -->
16312
- `;
16313
-
16314
16890
  class FeatureService {
16315
16891
  projectRoot;
16316
16892
  constructor(projectRoot) {
@@ -16324,10 +16900,6 @@ class FeatureService {
16324
16900
  ensureDir(featurePath);
16325
16901
  ensureDir(getContextPath(this.projectRoot, name));
16326
16902
  ensureDir(getTasksPath(this.projectRoot, name));
16327
- const journalPath = getJournalPath(this.projectRoot);
16328
- if (!fileExists(journalPath)) {
16329
- fs3.writeFileSync(journalPath, JOURNAL_TEMPLATE);
16330
- }
16331
16903
  const feature = {
16332
16904
  name,
16333
16905
  status: "planning",
@@ -16596,6 +17168,27 @@ class TaskService {
16596
17168
  }
16597
17169
  buildSpecContent(params) {
16598
17170
  const { featureName, task, dependsOn, allTasks, planContent, contextFiles = [], completedTasks = [] } = params;
17171
+ const getTaskType = (planSection2, taskName) => {
17172
+ if (!planSection2) {
17173
+ return null;
17174
+ }
17175
+ const fileTypeMatches = Array.from(planSection2.matchAll(/-\s*(Create|Modify|Test):/gi)).map((match) => match[1].toLowerCase());
17176
+ const fileTypes = new Set(fileTypeMatches);
17177
+ if (fileTypes.size === 0) {
17178
+ return taskName.toLowerCase().includes("test") ? "testing" : null;
17179
+ }
17180
+ if (fileTypes.size === 1) {
17181
+ const onlyType = Array.from(fileTypes)[0];
17182
+ if (onlyType === "create")
17183
+ return "greenfield";
17184
+ if (onlyType === "test")
17185
+ return "testing";
17186
+ }
17187
+ if (fileTypes.has("modify")) {
17188
+ return "modification";
17189
+ }
17190
+ return null;
17191
+ };
16599
17192
  const specLines = [
16600
17193
  `# Task: ${task.folder}`,
16601
17194
  "",
@@ -16624,6 +17217,10 @@ class TaskService {
16624
17217
  specLines.push("_No plan section available._");
16625
17218
  }
16626
17219
  specLines.push("");
17220
+ const taskType = getTaskType(planSection, task.name);
17221
+ if (taskType) {
17222
+ specLines.push("## Task Type", "", taskType, "");
17223
+ }
16627
17224
  if (contextFiles.length > 0) {
16628
17225
  const contextCompiled = contextFiles.map((f) => `## ${f.name}
16629
17226
 
@@ -21474,6 +22071,12 @@ class ContextService {
21474
22071
  ensureDir(contextPath);
21475
22072
  const filePath = path4.join(contextPath, this.normalizeFileName(fileName));
21476
22073
  writeText(filePath, content);
22074
+ const totalChars = this.list(featureName).reduce((sum, c) => sum + c.content.length, 0);
22075
+ if (totalChars > 20000) {
22076
+ return `${filePath}
22077
+
22078
+ ⚠️ Context total: ${totalChars} chars (exceeds 20,000). Consider archiving older contexts with contextService.archive().`;
22079
+ }
21477
22080
  return filePath;
21478
22081
  }
21479
22082
  read(featureName, fileName) {
@@ -21519,6 +22122,37 @@ ${f.content}`);
21519
22122
 
21520
22123
  `);
21521
22124
  }
22125
+ archive(featureName) {
22126
+ const contexts = this.list(featureName);
22127
+ if (contexts.length === 0)
22128
+ return { archived: [], archivePath: "" };
22129
+ const contextPath = getContextPath(this.projectRoot, featureName);
22130
+ const archiveDir = path4.join(contextPath, "..", "archive");
22131
+ ensureDir(archiveDir);
22132
+ const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
22133
+ const archived = [];
22134
+ for (const ctx of contexts) {
22135
+ const archiveName = `${timestamp}_${ctx.name}.md`;
22136
+ const src = path4.join(contextPath, `${ctx.name}.md`);
22137
+ const dest = path4.join(archiveDir, archiveName);
22138
+ fs8.copyFileSync(src, dest);
22139
+ fs8.unlinkSync(src);
22140
+ archived.push(ctx.name);
22141
+ }
22142
+ return { archived, archivePath: archiveDir };
22143
+ }
22144
+ stats(featureName) {
22145
+ const contexts = this.list(featureName);
22146
+ if (contexts.length === 0)
22147
+ return { count: 0, totalChars: 0 };
22148
+ const sorted2 = [...contexts].sort((a, b) => new Date(a.updatedAt).getTime() - new Date(b.updatedAt).getTime());
22149
+ return {
22150
+ count: contexts.length,
22151
+ totalChars: contexts.reduce((sum, c) => sum + c.content.length, 0),
22152
+ oldest: sorted2[0].name,
22153
+ newest: sorted2[sorted2.length - 1].name
22154
+ };
22155
+ }
21522
22156
  normalizeFileName(name) {
21523
22157
  const normalized = name.replace(/\.md$/, "");
21524
22158
  return `${normalized}.md`;
@@ -21632,12 +22266,303 @@ class ConfigService {
21632
22266
  const config2 = this.get();
21633
22267
  return config2.disableMcps ?? [];
21634
22268
  }
21635
- getDelegateMode() {
22269
+ getSandboxConfig() {
21636
22270
  const config2 = this.get();
21637
- return config2.delegateMode ?? "task";
22271
+ const mode = config2.sandbox ?? "none";
22272
+ const image = config2.dockerImage;
22273
+ const persistent = config2.persistentContainers ?? mode === "docker";
22274
+ return { mode, ...image && { image }, persistent };
22275
+ }
22276
+ }
22277
+
22278
+ class AgentsMdService {
22279
+ rootDir;
22280
+ contextService;
22281
+ constructor(rootDir, contextService) {
22282
+ this.rootDir = rootDir;
22283
+ this.contextService = contextService;
22284
+ }
22285
+ async init() {
22286
+ const agentsMdPath = path7.join(this.rootDir, "AGENTS.md");
22287
+ const existed = fileExists(agentsMdPath);
22288
+ if (existed) {
22289
+ const existing = readText(agentsMdPath);
22290
+ return { content: existing || "", existed: true };
22291
+ }
22292
+ const content = await this.scanAndGenerate();
22293
+ return { content, existed: false };
22294
+ }
22295
+ async sync(featureName) {
22296
+ const contexts = this.contextService.list(featureName);
22297
+ const agentsMdPath = path7.join(this.rootDir, "AGENTS.md");
22298
+ const current = await fs11.promises.readFile(agentsMdPath, "utf-8").catch(() => "");
22299
+ const findings = this.extractFindings(contexts);
22300
+ const proposals = this.generateProposals(findings, current);
22301
+ return { proposals, diff: this.formatDiff(current, proposals) };
22302
+ }
22303
+ apply(content) {
22304
+ const agentsMdPath = path7.join(this.rootDir, "AGENTS.md");
22305
+ const isNew = !fileExists(agentsMdPath);
22306
+ writeText(agentsMdPath, content);
22307
+ return { path: agentsMdPath, chars: content.length, isNew };
22308
+ }
22309
+ extractFindings(contexts) {
22310
+ const findings = [];
22311
+ const patterns = [
22312
+ /we\s+use\s+[^.\n]+/gi,
22313
+ /prefer\s+[^.\n]+\s+over\s+[^.\n]+/gi,
22314
+ /don't\s+use\s+[^.\n]+/gi,
22315
+ /do\s+not\s+use\s+[^.\n]+/gi,
22316
+ /(?:build|test|dev)\s+command:\s*[^.\n]+/gi,
22317
+ /[a-zA-Z]+\s+lives?\s+in\s+\/[^\s.\n]+/gi
22318
+ ];
22319
+ for (const context of contexts) {
22320
+ const lines = context.content.split(`
22321
+ `);
22322
+ for (const line of lines) {
22323
+ const trimmed2 = line.trim();
22324
+ if (!trimmed2 || trimmed2.startsWith("#"))
22325
+ continue;
22326
+ for (const pattern of patterns) {
22327
+ const matches = trimmed2.match(pattern);
22328
+ if (matches) {
22329
+ for (const match of matches) {
22330
+ const finding = match.trim();
22331
+ if (finding && !findings.includes(finding)) {
22332
+ findings.push(finding);
22333
+ }
22334
+ }
22335
+ }
22336
+ }
22337
+ }
22338
+ }
22339
+ return findings;
22340
+ }
22341
+ generateProposals(findings, current) {
22342
+ const proposals = [];
22343
+ const currentLower = current.toLowerCase();
22344
+ for (const finding of findings) {
22345
+ const findingLower = finding.toLowerCase();
22346
+ if (!currentLower.includes(findingLower)) {
22347
+ proposals.push(finding);
22348
+ }
22349
+ }
22350
+ return proposals;
22351
+ }
22352
+ formatDiff(current, proposals) {
22353
+ if (proposals.length === 0)
22354
+ return "";
22355
+ const lines = proposals.map((p) => `+ ${p}`);
22356
+ return lines.join(`
22357
+ `);
22358
+ }
22359
+ async scanAndGenerate() {
22360
+ const detections = await this.detectProjectInfo();
22361
+ return this.generateTemplate(detections);
22362
+ }
22363
+ async detectProjectInfo() {
22364
+ const packageJsonPath = path7.join(this.rootDir, "package.json");
22365
+ let packageJson = null;
22366
+ if (fileExists(packageJsonPath)) {
22367
+ try {
22368
+ const content = readText(packageJsonPath);
22369
+ packageJson = content ? JSON.parse(content) : null;
22370
+ } catch {}
22371
+ }
22372
+ const info = {
22373
+ packageManager: this.detectPackageManager(),
22374
+ language: this.detectLanguage(),
22375
+ testFramework: this.detectTestFramework(packageJson),
22376
+ buildCommand: packageJson?.scripts?.build || null,
22377
+ testCommand: packageJson?.scripts?.test || null,
22378
+ devCommand: packageJson?.scripts?.dev || null,
22379
+ isMonorepo: this.detectMonorepo(packageJson)
22380
+ };
22381
+ return info;
22382
+ }
22383
+ detectPackageManager() {
22384
+ if (fileExists(path7.join(this.rootDir, "bun.lockb")))
22385
+ return "bun";
22386
+ if (fileExists(path7.join(this.rootDir, "pnpm-lock.yaml")))
22387
+ return "pnpm";
22388
+ if (fileExists(path7.join(this.rootDir, "yarn.lock")))
22389
+ return "yarn";
22390
+ if (fileExists(path7.join(this.rootDir, "package-lock.json")))
22391
+ return "npm";
22392
+ return "npm";
22393
+ }
22394
+ detectLanguage() {
22395
+ if (fileExists(path7.join(this.rootDir, "tsconfig.json")))
22396
+ return "TypeScript";
22397
+ if (fileExists(path7.join(this.rootDir, "package.json")))
22398
+ return "JavaScript";
22399
+ if (fileExists(path7.join(this.rootDir, "requirements.txt")))
22400
+ return "Python";
22401
+ if (fileExists(path7.join(this.rootDir, "go.mod")))
22402
+ return "Go";
22403
+ if (fileExists(path7.join(this.rootDir, "Cargo.toml")))
22404
+ return "Rust";
22405
+ return "Unknown";
22406
+ }
22407
+ detectTestFramework(packageJson) {
22408
+ if (!packageJson)
22409
+ return null;
22410
+ const deps = {
22411
+ ...packageJson.dependencies,
22412
+ ...packageJson.devDependencies
22413
+ };
22414
+ if (deps?.vitest)
22415
+ return "vitest";
22416
+ if (deps?.jest)
22417
+ return "jest";
22418
+ if (this.detectPackageManager() === "bun")
22419
+ return "bun test";
22420
+ if (deps?.pytest)
22421
+ return "pytest";
22422
+ return null;
22423
+ }
22424
+ detectMonorepo(packageJson) {
22425
+ if (!packageJson)
22426
+ return false;
22427
+ return !!packageJson.workspaces;
22428
+ }
22429
+ generateTemplate(info) {
22430
+ const sections = [];
22431
+ sections.push(`# Agent Guidelines
22432
+ `);
22433
+ sections.push(`## Overview
22434
+ `);
22435
+ sections.push(`This project uses AI-assisted development. Follow these guidelines.
22436
+ `);
22437
+ sections.push(`## Build & Test Commands
22438
+ `);
22439
+ sections.push("```bash");
22440
+ if (info.isMonorepo) {
22441
+ sections.push("# This is a monorepo using bun workspaces");
22442
+ }
22443
+ if (info.buildCommand) {
22444
+ sections.push(`# Build`);
22445
+ sections.push(`${info.packageManager} run build`);
22446
+ sections.push("");
22447
+ }
22448
+ if (info.testCommand) {
22449
+ sections.push(`# Run tests`);
22450
+ sections.push(`${info.packageManager} ${info.testCommand === "bun test" ? "test" : "run test"}`);
22451
+ sections.push("");
22452
+ }
22453
+ if (info.devCommand) {
22454
+ sections.push(`# Development mode`);
22455
+ sections.push(`${info.packageManager} run dev`);
22456
+ }
22457
+ sections.push("```\n");
22458
+ sections.push(`## Technology Stack
22459
+ `);
22460
+ sections.push(`- **Language**: ${info.language}`);
22461
+ sections.push(`- **Package Manager**: ${info.packageManager}`);
22462
+ if (info.testFramework) {
22463
+ sections.push(`- **Test Framework**: ${info.testFramework}`);
22464
+ }
22465
+ if (info.isMonorepo) {
22466
+ sections.push(`- **Structure**: Monorepo with workspaces`);
22467
+ }
22468
+ sections.push("");
22469
+ sections.push(`## Code Style
22470
+ `);
22471
+ sections.push(`Follow existing patterns in the codebase.
22472
+ `);
22473
+ sections.push(`## Architecture Principles
22474
+ `);
22475
+ sections.push(`Document key architectural decisions here.
22476
+ `);
22477
+ return sections.join(`
22478
+ `);
22479
+ }
22480
+ }
22481
+
22482
+ class DockerSandboxService {
22483
+ static detectImage(worktreePath) {
22484
+ if (existsSync5(join8(worktreePath, "Dockerfile"))) {
22485
+ return null;
22486
+ }
22487
+ if (existsSync5(join8(worktreePath, "package.json"))) {
22488
+ return "node:22-slim";
22489
+ }
22490
+ if (existsSync5(join8(worktreePath, "requirements.txt")) || existsSync5(join8(worktreePath, "pyproject.toml"))) {
22491
+ return "python:3.12-slim";
22492
+ }
22493
+ if (existsSync5(join8(worktreePath, "go.mod"))) {
22494
+ return "golang:1.22-slim";
22495
+ }
22496
+ if (existsSync5(join8(worktreePath, "Cargo.toml"))) {
22497
+ return "rust:1.77-slim";
22498
+ }
22499
+ return "ubuntu:24.04";
22500
+ }
22501
+ static buildRunCommand(worktreePath, command, image) {
22502
+ const escapedCommand = command.replace(/'/g, "'\\''");
22503
+ return `docker run --rm -v ${worktreePath}:/app -w /app ${image} sh -c '${escapedCommand}'`;
22504
+ }
22505
+ static containerName(worktreePath) {
22506
+ const parts = worktreePath.split(sep);
22507
+ const worktreeIdx = parts.indexOf(".worktrees");
22508
+ if (worktreeIdx === -1 || worktreeIdx + 2 >= parts.length) {
22509
+ return `hive-sandbox-${Date.now()}`;
22510
+ }
22511
+ const feature = parts[worktreeIdx + 1];
22512
+ const task = parts[worktreeIdx + 2];
22513
+ const name = `hive-${feature}-${task}`.replace(/[^a-z0-9-]/gi, "-").toLowerCase();
22514
+ return name.slice(0, 63);
22515
+ }
22516
+ static ensureContainer(worktreePath, image) {
22517
+ const name = this.containerName(worktreePath);
22518
+ try {
22519
+ execSync(`docker inspect --format='{{.State.Running}}' ${name}`, { stdio: "pipe" });
22520
+ return name;
22521
+ } catch {
22522
+ execSync(`docker run -d --name ${name} -v ${worktreePath}:/app -w /app ${image} tail -f /dev/null`, { stdio: "pipe" });
22523
+ return name;
22524
+ }
22525
+ }
22526
+ static buildExecCommand(containerName, command) {
22527
+ const escapedCommand = command.replace(/'/g, "'\\''");
22528
+ return `docker exec ${containerName} sh -c '${escapedCommand}'`;
22529
+ }
22530
+ static stopContainer(worktreePath) {
22531
+ const name = this.containerName(worktreePath);
22532
+ try {
22533
+ execSync(`docker rm -f ${name}`, { stdio: "ignore" });
22534
+ } catch {}
22535
+ }
22536
+ static isDockerAvailable() {
22537
+ try {
22538
+ execSync("docker info", { stdio: "ignore" });
22539
+ return true;
22540
+ } catch {
22541
+ return false;
22542
+ }
21638
22543
  }
21639
- isHiveBackgroundEnabled() {
21640
- return this.getDelegateMode() === "hive";
22544
+ static wrapCommand(worktreePath, command, config2) {
22545
+ if (command.startsWith("HOST: ")) {
22546
+ return command.substring(6);
22547
+ }
22548
+ if (config2.mode === "none") {
22549
+ return command;
22550
+ }
22551
+ let image;
22552
+ if (config2.image) {
22553
+ image = config2.image;
22554
+ } else {
22555
+ image = this.detectImage(worktreePath);
22556
+ if (image === null) {
22557
+ return command;
22558
+ }
22559
+ }
22560
+ if (config2.persistent) {
22561
+ const containerName = this.ensureContainer(worktreePath, image);
22562
+ return this.buildExecCommand(containerName, command);
22563
+ } else {
22564
+ return this.buildRunCommand(worktreePath, command, image);
22565
+ }
21641
22566
  }
21642
22567
  }
21643
22568
  function computeRunnableAndBlocked(tasks) {
@@ -21747,16 +22672,26 @@ ${spec}
21747
22672
 
21748
22673
  ---
21749
22674
 
22675
+ ## Pre-implementation Checklist
22676
+
22677
+ Before writing code, confirm:
22678
+ 1. Dependencies are satisfied and required context is present.
22679
+ 2. The exact files/sections to touch (from references) are identified.
22680
+ 3. The first failing test to write is clear (TDD).
22681
+ 4. The minimal change needed to reach green is planned.
22682
+
22683
+ ---
22684
+
21750
22685
  ## Blocker Protocol
21751
22686
 
21752
22687
  If you hit a blocker requiring human decision, **DO NOT** use the question tool directly.
21753
22688
  Instead, escalate via the blocker protocol:
21754
22689
 
21755
22690
  1. **Save your progress** to the worktree (commit if appropriate)
21756
- 2. **Call hive_exec_complete** with blocker info:
22691
+ 2. **Call hive_worktree_commit** with blocker info:
21757
22692
 
21758
22693
  \`\`\`
21759
- hive_exec_complete({
22694
+ hive_worktree_commit({
21760
22695
  task: "${task}",
21761
22696
  feature: "${feature}",
21762
22697
  status: "blocked",
@@ -21770,7 +22705,7 @@ hive_exec_complete({
21770
22705
  })
21771
22706
  \`\`\`
21772
22707
 
21773
- **After calling hive_exec_complete with blocked status, STOP IMMEDIATELY.**
22708
+ **After calling hive_worktree_commit with blocked status, STOP IMMEDIATELY.**
21774
22709
 
21775
22710
  The Hive Master will:
21776
22711
  1. Receive your blocker info
@@ -21786,7 +22721,7 @@ This keeps the user focused on ONE conversation (Hive Master) instead of multipl
21786
22721
  When your task is **fully complete**:
21787
22722
 
21788
22723
  \`\`\`
21789
- hive_exec_complete({
22724
+ hive_worktree_commit({
21790
22725
  task: "${task}",
21791
22726
  feature: "${feature}",
21792
22727
  status: "completed",
@@ -21794,14 +22729,20 @@ hive_exec_complete({
21794
22729
  })
21795
22730
  \`\`\`
21796
22731
 
21797
- **CRITICAL: After calling hive_exec_complete, you MUST STOP IMMEDIATELY.**
22732
+ **CRITICAL: After calling hive_worktree_commit, you MUST STOP IMMEDIATELY.**
21798
22733
  Do NOT continue working. Do NOT respond further. Your session is DONE.
21799
22734
  The Hive Master will take over from here.
21800
22735
 
22736
+ **Summary Guidance** (used verbatim for downstream task context):
22737
+ 1. Start with **what changed** (files/areas touched).
22738
+ 2. Mention **why** if it affects future tasks.
22739
+ 3. Note **verification evidence** (tests/build/lint) or explicitly say "Not run".
22740
+ 4. Keep it **2-4 sentences** max.
22741
+
21801
22742
  If you encounter an **unrecoverable error**:
21802
22743
 
21803
22744
  \`\`\`
21804
- hive_exec_complete({
22745
+ hive_worktree_commit({
21805
22746
  task: "${task}",
21806
22747
  feature: "${feature}",
21807
22748
  status: "failed",
@@ -21812,7 +22753,7 @@ hive_exec_complete({
21812
22753
  If you made **partial progress** but can't continue:
21813
22754
 
21814
22755
  \`\`\`
21815
- hive_exec_complete({
22756
+ hive_worktree_commit({
21816
22757
  task: "${task}",
21817
22758
  feature: "${feature}",
21818
22759
  status: "partial",
@@ -21846,16 +22787,16 @@ After 3 failed attempts at same fix: STOP and report blocker.
21846
22787
 
21847
22788
  **You have access to:**
21848
22789
  - All standard tools (read, write, edit, bash, glob, grep)
21849
- - \`hive_exec_complete\` - Signal task done/blocked/failed
21850
- - \`hive_exec_abort\` - Abort and discard changes
22790
+ - \`hive_worktree_commit\` - Signal task done/blocked/failed
22791
+ - \`hive_worktree_discard\` - Abort and discard changes
21851
22792
  - \`hive_plan_read\` - Re-read plan if needed
21852
22793
  - \`hive_context_write\` - Save learnings for future tasks
21853
22794
 
21854
22795
  **You do NOT have access to (or should not use):**
21855
22796
  - \`question\` - Escalate via blocker protocol instead
21856
- - \`hive_exec_start\` - No spawning sub-workers
22797
+ - \`hive_worktree_create\` - No spawning sub-workers
21857
22798
  - \`hive_merge\` - Only Hive Master merges
21858
- - \`hive_background_task\` / \`task\` - No recursive delegation
22799
+ - \`task\` - No recursive delegation
21859
22800
 
21860
22801
  ---
21861
22802
 
@@ -21865,7 +22806,7 @@ After 3 failed attempts at same fix: STOP and report blocker.
21865
22806
  2. **Stay in scope** - Only do what the spec asks
21866
22807
  3. **Escalate blockers** - Don't guess on important decisions
21867
22808
  4. **Save context** - Use hive_context_write for discoveries
21868
- 5. **Complete cleanly** - Always call hive_exec_complete when done
22809
+ 5. **Complete cleanly** - Always call hive_worktree_commit when done
21869
22810
 
21870
22811
  ---
21871
22812
 
@@ -22107,63 +23048,6 @@ function applyContextBudget(files, config2 = {}) {
22107
23048
  // src/utils/prompt-file.ts
22108
23049
  import * as fs6 from "fs";
22109
23050
  import * as path5 from "path";
22110
- function findWorkspaceRoot(startDir) {
22111
- try {
22112
- let current = path5.resolve(startDir);
22113
- while (true) {
22114
- const hivePath = path5.join(current, ".hive");
22115
- if (fs6.existsSync(hivePath) && fs6.statSync(hivePath).isDirectory()) {
22116
- return current;
22117
- }
22118
- const parent = path5.dirname(current);
22119
- if (parent === current) {
22120
- return null;
22121
- }
22122
- current = parent;
22123
- }
22124
- } catch {
22125
- return null;
22126
- }
22127
- }
22128
- function isValidPromptFilePath(filePath, workspaceRoot) {
22129
- try {
22130
- const normalizedFilePath = path5.resolve(filePath);
22131
- const normalizedWorkspace = path5.resolve(workspaceRoot);
22132
- let normalizedFilePathForCompare = normalizePath(normalizedFilePath);
22133
- let normalizedWorkspaceForCompare = normalizePath(normalizedWorkspace);
22134
- if (process.platform === "win32") {
22135
- normalizedFilePathForCompare = normalizedFilePathForCompare.toLowerCase();
22136
- normalizedWorkspaceForCompare = normalizedWorkspaceForCompare.toLowerCase();
22137
- }
22138
- if (!normalizedFilePathForCompare.startsWith(normalizedWorkspaceForCompare + "/") && normalizedFilePathForCompare !== normalizedWorkspaceForCompare) {
22139
- return false;
22140
- }
22141
- return true;
22142
- } catch {
22143
- return false;
22144
- }
22145
- }
22146
- async function resolvePromptFromFile(promptFilePath, workspaceRoot) {
22147
- if (!isValidPromptFilePath(promptFilePath, workspaceRoot)) {
22148
- return {
22149
- error: `Prompt file path "${promptFilePath}" is outside the workspace. ` + `Only files within "${workspaceRoot}" are allowed.`
22150
- };
22151
- }
22152
- const resolvedPath = path5.resolve(promptFilePath);
22153
- if (!fs6.existsSync(resolvedPath)) {
22154
- return {
22155
- error: `Prompt file not found: "${resolvedPath}"`
22156
- };
22157
- }
22158
- try {
22159
- const content = fs6.readFileSync(resolvedPath, "utf-8");
22160
- return { content };
22161
- } catch (err) {
22162
- return {
22163
- error: `Failed to read prompt file: ${err instanceof Error ? err.message : "Unknown error"}`
22164
- };
22165
- }
22166
- }
22167
23051
  function writeWorkerPromptFile(feature, task, prompt, hiveDir) {
22168
23052
  const promptDir = path5.join(hiveDir, "features", feature, "tasks", task);
22169
23053
  const promptPath = path5.join(promptDir, "worker-prompt.md");
@@ -22174,1451 +23058,58 @@ function writeWorkerPromptFile(feature, task, prompt, hiveDir) {
22174
23058
  return promptPath;
22175
23059
  }
22176
23060
 
22177
- // src/background/types.ts
22178
- var VALID_TRANSITIONS = {
22179
- spawned: ["pending", "running", "error", "cancelled"],
22180
- pending: ["running", "error", "cancelled"],
22181
- running: ["completed", "error", "cancelled", "blocked", "failed"],
22182
- completed: [],
22183
- error: [],
22184
- cancelled: [],
22185
- blocked: ["running", "cancelled"],
22186
- failed: []
22187
- };
22188
- function isTerminalStatus(status) {
22189
- return VALID_TRANSITIONS[status].length === 0;
22190
- }
22191
- function isValidTransition(from, to) {
22192
- return VALID_TRANSITIONS[from].includes(to);
22193
- }
22194
- // src/background/store.ts
22195
- function generateTaskId() {
22196
- const timestamp = Date.now().toString(36);
22197
- const random = Math.random().toString(36).substring(2, 8);
22198
- return `task-${timestamp}-${random}`;
22199
- }
22200
-
22201
- class BackgroundTaskStore {
22202
- tasks = new Map;
22203
- idempotencyIndex = new Map;
22204
- create(options) {
22205
- if (options.idempotencyKey) {
22206
- const existingId = this.idempotencyIndex.get(options.idempotencyKey);
22207
- if (existingId) {
22208
- throw new Error(`Idempotency key "${options.idempotencyKey}" already exists for task "${existingId}". ` + `Use getByIdempotencyKey() to retrieve the existing task.`);
22209
- }
22210
- }
22211
- const taskId = generateTaskId();
22212
- const now = new Date().toISOString();
22213
- const record2 = {
22214
- taskId,
22215
- sessionId: options.sessionId,
22216
- agent: options.agent,
22217
- description: options.description,
22218
- status: "spawned",
22219
- provider: "hive",
22220
- idempotencyKey: options.idempotencyKey,
22221
- createdAt: now,
22222
- lastActiveAt: now,
22223
- parentSessionId: options.parentSessionId,
22224
- parentMessageId: options.parentMessageId,
22225
- parentAgent: options.parentAgent,
22226
- notifyParent: options.notifyParent,
22227
- hiveFeature: options.hiveFeature,
22228
- hiveTaskFolder: options.hiveTaskFolder,
22229
- workdir: options.workdir
22230
- };
22231
- this.tasks.set(taskId, record2);
22232
- if (options.idempotencyKey) {
22233
- this.idempotencyIndex.set(options.idempotencyKey, taskId);
22234
- }
22235
- return record2;
22236
- }
22237
- get(taskId) {
22238
- return this.tasks.get(taskId);
22239
- }
22240
- getByIdempotencyKey(key) {
22241
- const taskId = this.idempotencyIndex.get(key);
22242
- if (!taskId)
22243
- return;
22244
- return this.tasks.get(taskId);
22245
- }
22246
- getByHiveTask(feature, taskFolder) {
22247
- for (const task of this.tasks.values()) {
22248
- if (task.hiveFeature === feature && task.hiveTaskFolder === taskFolder) {
22249
- return task;
22250
- }
22251
- }
23061
+ // src/hooks/variant-hook.ts
23062
+ var HIVE_AGENT_NAMES = [
23063
+ "hive-master",
23064
+ "architect-planner",
23065
+ "swarm-orchestrator",
23066
+ "scout-researcher",
23067
+ "forager-worker",
23068
+ "hygienic-reviewer"
23069
+ ];
23070
+ function isHiveAgent(agent) {
23071
+ return agent !== undefined && HIVE_AGENT_NAMES.includes(agent);
23072
+ }
23073
+ function normalizeVariant(variant) {
23074
+ if (variant === undefined)
22252
23075
  return;
23076
+ const trimmed2 = variant.trim();
23077
+ return trimmed2.length > 0 ? trimmed2 : undefined;
23078
+ }
23079
+
23080
+ // src/index.ts
23081
+ function formatSkillsXml(skills) {
23082
+ if (skills.length === 0)
23083
+ return "";
23084
+ const skillsXml = skills.map((skill) => {
23085
+ return [
23086
+ " <skill>",
23087
+ ` <name>${skill.name}</name>`,
23088
+ ` <description>(hive - Skill) ${skill.description}</description>`,
23089
+ " </skill>"
23090
+ ].join(`
23091
+ `);
23092
+ }).join(`
23093
+ `);
23094
+ return `
23095
+
23096
+ <available_skills>
23097
+ ${skillsXml}
23098
+ </available_skills>`;
23099
+ }
23100
+ async function buildAutoLoadedSkillsContent(agentName, configService, projectRoot) {
23101
+ const agentConfig = configService.getAgentConfig(agentName);
23102
+ const autoLoadSkills = agentConfig.autoLoadSkills ?? [];
23103
+ if (autoLoadSkills.length === 0) {
23104
+ return "";
22253
23105
  }
22254
- updateStatus(taskId, newStatus, updates) {
22255
- const task = this.tasks.get(taskId);
22256
- if (!task) {
22257
- throw new Error(`Task "${taskId}" not found`);
22258
- }
22259
- if (!isValidTransition(task.status, newStatus)) {
22260
- throw new Error(`Invalid state transition: ${task.status} -> ${newStatus} for task "${taskId}"`);
22261
- }
22262
- const now = new Date().toISOString();
22263
- if (newStatus === "running" && !task.startedAt) {
22264
- task.startedAt = now;
22265
- }
22266
- if (isTerminalStatus(newStatus) && !task.completedAt) {
22267
- task.completedAt = now;
22268
- }
22269
- task.status = newStatus;
22270
- task.lastActiveAt = now;
22271
- if (updates?.errorMessage !== undefined) {
22272
- task.errorMessage = updates.errorMessage;
22273
- }
22274
- if (updates?.progress !== undefined) {
22275
- task.progress = { ...task.progress, ...updates.progress };
22276
- }
22277
- return task;
22278
- }
22279
- updateProgress(taskId, progress) {
22280
- const task = this.tasks.get(taskId);
22281
- if (!task) {
22282
- throw new Error(`Task "${taskId}" not found`);
22283
- }
22284
- const now = new Date().toISOString();
22285
- task.lastActiveAt = now;
22286
- task.progress = { ...task.progress, ...progress };
22287
- return task;
22288
- }
22289
- delete(taskId) {
22290
- const task = this.tasks.get(taskId);
22291
- if (!task)
22292
- return false;
22293
- if (task.idempotencyKey) {
22294
- this.idempotencyIndex.delete(task.idempotencyKey);
22295
- }
22296
- return this.tasks.delete(taskId);
22297
- }
22298
- list(filter) {
22299
- let tasks = Array.from(this.tasks.values());
22300
- if (filter?.status) {
22301
- const statuses = Array.isArray(filter.status) ? filter.status : [filter.status];
22302
- tasks = tasks.filter((t) => statuses.includes(t.status));
22303
- }
22304
- if (filter?.parentSessionId) {
22305
- tasks = tasks.filter((t) => t.parentSessionId === filter.parentSessionId);
22306
- }
22307
- if (filter?.hiveFeature) {
22308
- tasks = tasks.filter((t) => t.hiveFeature === filter.hiveFeature);
22309
- }
22310
- return tasks.sort((a, b) => a.createdAt.localeCompare(b.createdAt));
22311
- }
22312
- getActive() {
22313
- return this.list().filter((t) => !isTerminalStatus(t.status));
22314
- }
22315
- countByStatus() {
22316
- const counts = {
22317
- spawned: 0,
22318
- pending: 0,
22319
- running: 0,
22320
- completed: 0,
22321
- error: 0,
22322
- cancelled: 0,
22323
- blocked: 0,
22324
- failed: 0
22325
- };
22326
- for (const task of this.tasks.values()) {
22327
- counts[task.status]++;
22328
- }
22329
- return counts;
22330
- }
22331
- clear() {
22332
- this.tasks.clear();
22333
- this.idempotencyIndex.clear();
22334
- }
22335
- get size() {
22336
- return this.tasks.size;
22337
- }
22338
- }
22339
- var globalStore = null;
22340
- function getStore() {
22341
- if (!globalStore) {
22342
- globalStore = new BackgroundTaskStore;
22343
- }
22344
- return globalStore;
22345
- }
22346
- // src/background/agent-gate.ts
22347
- var BLOCKED_AGENTS = new Set([
22348
- "orchestrator",
22349
- "hive",
22350
- "hive-master",
22351
- "swarm-orchestrator",
22352
- "conductor",
22353
- "main"
22354
- ]);
22355
- var RESTRICTED_AGENTS = new Set([
22356
- "admin",
22357
- "root",
22358
- "superuser"
22359
- ]);
22360
-
22361
- class AgentGate {
22362
- client;
22363
- cachedAgents = null;
22364
- cacheExpiry = 0;
22365
- cacheTtlMs = 30000;
22366
- constructor(client) {
22367
- this.client = client;
22368
- }
22369
- async discoverAgents() {
22370
- const now = Date.now();
22371
- if (this.cachedAgents && now < this.cacheExpiry) {
22372
- return this.cachedAgents;
22373
- }
22374
- try {
22375
- const result = await this.client.app.agents({});
22376
- const agents = result.data ?? [];
22377
- this.cachedAgents = agents;
22378
- this.cacheExpiry = now + this.cacheTtlMs;
22379
- return agents;
22380
- } catch (error45) {
22381
- if (this.cachedAgents) {
22382
- return this.cachedAgents;
22383
- }
22384
- throw new Error(`Failed to discover agents: ${error45 instanceof Error ? error45.message : "Unknown error"}`);
22385
- }
22386
- }
22387
- async validate(agentName, options = {}) {
22388
- const name = agentName.trim().toLowerCase();
22389
- if (BLOCKED_AGENTS.has(name)) {
22390
- return {
22391
- valid: false,
22392
- error: `Agent "${agentName}" is an orchestrator agent and cannot be spawned as a background worker. ` + `Use a worker agent like "forager-worker", "scout-researcher", or "hygienic-reviewer".`
22393
- };
22394
- }
22395
- if (options.additionalBlocked?.includes(name)) {
22396
- return {
22397
- valid: false,
22398
- error: `Agent "${agentName}" is blocked by configuration.`
22399
- };
22400
- }
22401
- if (RESTRICTED_AGENTS.has(name) && !options.allowRestricted) {
22402
- return {
22403
- valid: false,
22404
- error: `Agent "${agentName}" is restricted and requires explicit allowance. ` + `Set allowRestricted: true to spawn this agent.`
22405
- };
22406
- }
22407
- let agents;
22408
- try {
22409
- agents = await this.discoverAgents();
22410
- } catch (error45) {
22411
- return {
22412
- valid: false,
22413
- error: error45 instanceof Error ? error45.message : "Failed to discover agents"
22414
- };
22415
- }
22416
- const agent = agents.find((a) => a.name.toLowerCase() === name);
22417
- if (!agent) {
22418
- const available = agents.filter((a) => !BLOCKED_AGENTS.has(a.name.toLowerCase())).map((a) => ` • ${a.name}${a.description ? ` - ${a.description}` : ""}`).join(`
22419
- `);
22420
- return {
22421
- valid: false,
22422
- error: `Agent "${agentName}" not found in registry.
22423
-
22424
- Available agents:
22425
- ${available || "(none)"}`
22426
- };
22427
- }
22428
- return {
22429
- valid: true,
22430
- agent
22431
- };
22432
- }
22433
- async getWorkerAgents() {
22434
- const agents = await this.discoverAgents();
22435
- return agents.filter((a) => {
22436
- const name = a.name.toLowerCase();
22437
- return !BLOCKED_AGENTS.has(name) && !RESTRICTED_AGENTS.has(name);
22438
- });
22439
- }
22440
- clearCache() {
22441
- this.cachedAgents = null;
22442
- this.cacheExpiry = 0;
22443
- }
22444
- isBlocked(agentName) {
22445
- return BLOCKED_AGENTS.has(agentName.trim().toLowerCase());
22446
- }
22447
- isRestricted(agentName) {
22448
- return RESTRICTED_AGENTS.has(agentName.trim().toLowerCase());
22449
- }
22450
- }
22451
- function createAgentGate(client) {
22452
- return new AgentGate(client);
22453
- }
22454
- // src/background/concurrency.ts
22455
- var DEFAULT_CONFIG = {
22456
- defaultLimit: 3,
22457
- agentLimits: {},
22458
- modelLimits: {},
22459
- queueTimeoutMs: 5 * 60 * 1000,
22460
- minDelayBetweenStartsMs: 1000
22461
- };
22462
-
22463
- class ConcurrencyManager {
22464
- config;
22465
- counts = new Map;
22466
- queues = new Map;
22467
- lastStartTimes = new Map;
22468
- constructor(config2 = {}) {
22469
- this.config = { ...DEFAULT_CONFIG, ...config2 };
22470
- }
22471
- getLimit(key) {
22472
- if (key.includes("/")) {
22473
- const modelLimit = this.config.modelLimits[key];
22474
- if (modelLimit !== undefined) {
22475
- return modelLimit === 0 ? Infinity : modelLimit;
22476
- }
22477
- const provider = key.split("/")[0];
22478
- const providerLimit = this.config.modelLimits[provider];
22479
- if (providerLimit !== undefined) {
22480
- return providerLimit === 0 ? Infinity : providerLimit;
22481
- }
22482
- }
22483
- const agentLimit = this.config.agentLimits[key];
22484
- if (agentLimit !== undefined) {
22485
- return agentLimit === 0 ? Infinity : agentLimit;
22486
- }
22487
- return this.config.defaultLimit === 0 ? Infinity : this.config.defaultLimit;
22488
- }
22489
- async acquire(key) {
22490
- const limit = this.getLimit(key);
22491
- if (limit === Infinity) {
22492
- return;
22493
- }
22494
- await this.enforceRateLimit(key);
22495
- const current = this.counts.get(key) ?? 0;
22496
- if (current < limit) {
22497
- this.counts.set(key, current + 1);
22498
- this.lastStartTimes.set(key, Date.now());
22499
- return;
22500
- }
22501
- return new Promise((resolve2, reject) => {
22502
- const queue = this.queues.get(key) ?? [];
22503
- const entry = {
22504
- resolve: () => {
22505
- if (entry.settled)
22506
- return;
22507
- entry.settled = true;
22508
- clearTimeout(entry.timeoutId);
22509
- this.lastStartTimes.set(key, Date.now());
22510
- resolve2();
22511
- },
22512
- reject: (error45) => {
22513
- if (entry.settled)
22514
- return;
22515
- entry.settled = true;
22516
- clearTimeout(entry.timeoutId);
22517
- reject(error45);
22518
- },
22519
- settled: false,
22520
- enqueuedAt: Date.now(),
22521
- timeoutId: setTimeout(() => {
22522
- if (!entry.settled) {
22523
- entry.settled = true;
22524
- const q = this.queues.get(key);
22525
- if (q) {
22526
- const idx = q.indexOf(entry);
22527
- if (idx !== -1)
22528
- q.splice(idx, 1);
22529
- }
22530
- reject(new Error(`Concurrency queue timeout for key "${key}" after ${this.config.queueTimeoutMs}ms`));
22531
- }
22532
- }, this.config.queueTimeoutMs)
22533
- };
22534
- queue.push(entry);
22535
- this.queues.set(key, queue);
22536
- });
22537
- }
22538
- release(key) {
22539
- const limit = this.getLimit(key);
22540
- if (limit === Infinity) {
22541
- return;
22542
- }
22543
- const queue = this.queues.get(key);
22544
- while (queue && queue.length > 0) {
22545
- const next = queue.shift();
22546
- if (!next.settled) {
22547
- next.resolve();
22548
- return;
22549
- }
22550
- }
22551
- const current = this.counts.get(key) ?? 0;
22552
- if (current > 0) {
22553
- this.counts.set(key, current - 1);
22554
- }
22555
- }
22556
- tryAcquire(key) {
22557
- const limit = this.getLimit(key);
22558
- if (limit === Infinity) {
22559
- return true;
22560
- }
22561
- const current = this.counts.get(key) ?? 0;
22562
- if (current < limit) {
22563
- this.counts.set(key, current + 1);
22564
- this.lastStartTimes.set(key, Date.now());
22565
- return true;
22566
- }
22567
- return false;
22568
- }
22569
- cancelWaiters(key) {
22570
- const queue = this.queues.get(key);
22571
- if (!queue)
22572
- return 0;
22573
- let cancelled = 0;
22574
- for (const entry of queue) {
22575
- if (!entry.settled) {
22576
- entry.reject(new Error(`Concurrency queue cancelled for key: ${key}`));
22577
- cancelled++;
22578
- }
22579
- }
22580
- this.queues.delete(key);
22581
- return cancelled;
22582
- }
22583
- clear() {
22584
- for (const key of this.queues.keys()) {
22585
- this.cancelWaiters(key);
22586
- }
22587
- this.counts.clear();
22588
- this.queues.clear();
22589
- this.lastStartTimes.clear();
22590
- }
22591
- getCount(key) {
22592
- return this.counts.get(key) ?? 0;
22593
- }
22594
- getQueueLength(key) {
22595
- const queue = this.queues.get(key);
22596
- if (!queue)
22597
- return 0;
22598
- return queue.filter((e) => !e.settled).length;
22599
- }
22600
- getAvailable(key) {
22601
- const limit = this.getLimit(key);
22602
- if (limit === Infinity)
22603
- return Infinity;
22604
- const current = this.counts.get(key) ?? 0;
22605
- return Math.max(0, limit - current);
22606
- }
22607
- isAtCapacity(key) {
22608
- const limit = this.getLimit(key);
22609
- if (limit === Infinity)
22610
- return false;
22611
- const current = this.counts.get(key) ?? 0;
22612
- return current >= limit;
22613
- }
22614
- getStatus() {
22615
- const status = {};
22616
- for (const [key, count] of this.counts.entries()) {
22617
- status[key] = {
22618
- count,
22619
- limit: this.getLimit(key),
22620
- queued: this.getQueueLength(key)
22621
- };
22622
- }
22623
- for (const key of this.queues.keys()) {
22624
- if (!status[key]) {
22625
- status[key] = {
22626
- count: 0,
22627
- limit: this.getLimit(key),
22628
- queued: this.getQueueLength(key)
22629
- };
22630
- }
22631
- }
22632
- return status;
22633
- }
22634
- async enforceRateLimit(key) {
22635
- const lastStart = this.lastStartTimes.get(key);
22636
- if (!lastStart)
22637
- return;
22638
- const elapsed = Date.now() - lastStart;
22639
- const delay2 = this.config.minDelayBetweenStartsMs - elapsed;
22640
- if (delay2 > 0) {
22641
- await new Promise((resolve2) => setTimeout(resolve2, delay2));
22642
- }
22643
- }
22644
- }
22645
- function createConcurrencyManager(config2) {
22646
- return new ConcurrencyManager(config2);
22647
- }
22648
- // src/background/poller.ts
22649
- var DEFAULT_CONFIG2 = {
22650
- pollIntervalMs: 5000,
22651
- maxPollIntervalMs: 30000,
22652
- stuckThresholdMs: 10 * 60 * 1000,
22653
- minRuntimeBeforeStuckMs: 30 * 1000,
22654
- stableCountThreshold: 3
22655
- };
22656
-
22657
- class BackgroundPoller {
22658
- store;
22659
- client;
22660
- config;
22661
- handlers;
22662
- pollingState = new Map;
22663
- pollInterval = null;
22664
- isPolling = false;
22665
- currentIntervalMs;
22666
- constructor(store, client, config2 = {}, handlers = {}) {
22667
- this.store = store;
22668
- this.client = client;
22669
- this.config = { ...DEFAULT_CONFIG2, ...config2 };
22670
- this.handlers = handlers;
22671
- this.currentIntervalMs = this.config.pollIntervalMs;
22672
- }
22673
- start() {
22674
- if (this.pollInterval)
22675
- return;
22676
- this.pollInterval = setInterval(() => {
22677
- this.poll().catch((err) => {
22678
- console.warn("[BackgroundPoller] Poll error:", err);
22679
- });
22680
- }, this.currentIntervalMs);
22681
- this.pollInterval.unref();
22682
- }
22683
- stop() {
22684
- if (this.pollInterval) {
22685
- clearInterval(this.pollInterval);
22686
- this.pollInterval = null;
22687
- }
22688
- this.isPolling = false;
22689
- }
22690
- isRunning() {
22691
- return this.pollInterval !== null;
22692
- }
22693
- getObservations() {
22694
- const observations = [];
22695
- const activeTasks = this.store.getActive();
22696
- for (const task of activeTasks) {
22697
- const state = this.pollingState.get(task.taskId);
22698
- const observation = this.buildObservation(task, state);
22699
- observations.push(observation);
22700
- }
22701
- return observations;
22702
- }
22703
- getTaskObservation(taskId) {
22704
- const task = this.store.get(taskId);
22705
- if (!task)
22706
- return null;
22707
- const state = this.pollingState.get(taskId);
22708
- return this.buildObservation(task, state);
22709
- }
22710
- async poll() {
22711
- if (this.isPolling)
22712
- return;
22713
- this.isPolling = true;
22714
- try {
22715
- const activeTasks = this.store.getActive();
22716
- if (activeTasks.length === 0) {
22717
- this.stop();
22718
- return;
22719
- }
22720
- let sessionStatuses = {};
22721
- try {
22722
- const statusResult = await this.client.session.status?.();
22723
- sessionStatuses = statusResult?.data ?? {};
22724
- } catch {}
22725
- for (const task of activeTasks) {
22726
- if (task.status !== "running")
22727
- continue;
22728
- await this.pollTask(task, sessionStatuses);
22729
- }
22730
- this.adjustPollingInterval(activeTasks);
22731
- } finally {
22732
- this.isPolling = false;
22733
- }
22734
- }
22735
- async pollTask(task, sessionStatuses) {
22736
- const state = this.pollingState.get(task.taskId) ?? {
22737
- lastMessageCount: 0,
22738
- stablePolls: 0,
22739
- lastPollAt: Date.now(),
22740
- consecutiveErrors: 0
22741
- };
22742
- try {
22743
- const sessionType = sessionStatuses[task.sessionId]?.type;
22744
- const messagesResult = await this.client.session.messages({
22745
- path: { id: task.sessionId }
22746
- });
22747
- const messages = messagesResult.data ?? [];
22748
- const currentMessageCount = messages.length;
22749
- const now = new Date().toISOString();
22750
- if (currentMessageCount > state.lastMessageCount) {
22751
- state.stablePolls = 0;
22752
- this.store.updateProgress(task.taskId, {
22753
- messageCount: currentMessageCount,
22754
- lastMessageAt: now
22755
- });
22756
- } else {
22757
- state.stablePolls++;
22758
- }
22759
- state.lastMessageCount = currentMessageCount;
22760
- state.lastPollAt = Date.now();
22761
- state.consecutiveErrors = 0;
22762
- this.pollingState.set(task.taskId, state);
22763
- const isIdle = sessionType === "idle" || sessionType === "completed";
22764
- if (isIdle) {
22765
- this.handlers.onSessionIdle?.(task.sessionId, "status");
22766
- return;
22767
- }
22768
- if (!sessionType && currentMessageCount > 0 && state.stablePolls >= this.config.stableCountThreshold) {
22769
- try {
22770
- const session = await this.client.session.get({ path: { id: task.sessionId } });
22771
- const status = session.data?.status;
22772
- if (status === "idle" || status === "completed") {
22773
- this.handlers.onSessionIdle?.(task.sessionId, "status");
22774
- return;
22775
- }
22776
- } catch {}
22777
- this.handlers.onSessionIdle?.(task.sessionId, "stable");
22778
- }
22779
- } catch (error45) {
22780
- state.consecutiveErrors++;
22781
- state.lastPollAt = Date.now();
22782
- this.pollingState.set(task.taskId, state);
22783
- if (state.consecutiveErrors >= 3) {
22784
- console.warn(`[BackgroundPoller] Multiple errors polling task ${task.taskId}:`, error45);
22785
- }
22786
- }
22787
- }
22788
- buildObservation(task, state) {
22789
- const now = Date.now();
22790
- const stablePolls = state?.stablePolls ?? 0;
22791
- const lastActivityAt = task.progress?.lastMessageAt ?? task.lastActiveAt ?? null;
22792
- let maybeStuck = false;
22793
- if (task.status === "running" && task.startedAt) {
22794
- const startedAt = new Date(task.startedAt).getTime();
22795
- const runtime = now - startedAt;
22796
- if (runtime >= this.config.minRuntimeBeforeStuckMs) {
22797
- const lastActivity = lastActivityAt ? new Date(lastActivityAt).getTime() : startedAt;
22798
- const timeSinceActivity = now - lastActivity;
22799
- maybeStuck = timeSinceActivity >= this.config.stuckThresholdMs;
22800
- }
22801
- }
22802
- return {
22803
- taskId: task.taskId,
22804
- sessionId: task.sessionId,
22805
- status: task.status,
22806
- messageCount: task.progress?.messageCount ?? 0,
22807
- lastActivityAt,
22808
- maybeStuck,
22809
- stablePolls,
22810
- isStable: stablePolls >= this.config.stableCountThreshold
22811
- };
22812
- }
22813
- adjustPollingInterval(activeTasks) {
22814
- const now = Date.now();
22815
- let recentActivityCount = 0;
22816
- for (const task of activeTasks) {
22817
- const state = this.pollingState.get(task.taskId);
22818
- if (state && state.stablePolls < 2) {
22819
- recentActivityCount++;
22820
- }
22821
- }
22822
- const stableRatio = activeTasks.length > 0 ? (activeTasks.length - recentActivityCount) / activeTasks.length : 0;
22823
- if (stableRatio > 0.8) {
22824
- this.currentIntervalMs = Math.min(this.currentIntervalMs * 1.5, this.config.maxPollIntervalMs);
22825
- } else if (recentActivityCount > 0) {
22826
- this.currentIntervalMs = this.config.pollIntervalMs;
22827
- }
22828
- if (this.pollInterval && Math.abs(this.currentIntervalMs - this.config.pollIntervalMs) > 1000) {
22829
- this.stop();
22830
- this.start();
22831
- }
22832
- }
22833
- cleanupTask(taskId) {
22834
- this.pollingState.delete(taskId);
22835
- }
22836
- clear() {
22837
- this.stop();
22838
- this.pollingState.clear();
22839
- }
22840
- }
22841
- function createPoller(store, client, config2, handlers) {
22842
- return new BackgroundPoller(store, client, config2, handlers);
22843
- }
22844
- // src/background/manager.ts
22845
- class BackgroundManager {
22846
- store;
22847
- agentGate;
22848
- client;
22849
- taskService;
22850
- concurrencyManager;
22851
- poller;
22852
- enforceHiveSequential;
22853
- constructor(options) {
22854
- this.client = options.client;
22855
- this.store = options.store ?? getStore();
22856
- this.agentGate = createAgentGate(options.client);
22857
- this.taskService = new TaskService(options.projectRoot);
22858
- this.concurrencyManager = createConcurrencyManager(options.concurrency);
22859
- this.poller = createPoller(this.store, options.client, options.poller, {
22860
- onSessionIdle: (sessionId) => this.handleSessionIdle(sessionId)
22861
- });
22862
- this.enforceHiveSequential = options.enforceHiveSequential ?? true;
22863
- }
22864
- async spawn(options) {
22865
- if (options.idempotencyKey) {
22866
- const existing = this.store.getByIdempotencyKey(options.idempotencyKey);
22867
- if (existing) {
22868
- return {
22869
- task: existing,
22870
- wasExisting: true
22871
- };
22872
- }
22873
- }
22874
- if (options.hiveFeature && options.hiveTaskFolder) {
22875
- const existing = this.store.getByHiveTask(options.hiveFeature, options.hiveTaskFolder);
22876
- if (existing && !isTerminalStatus(existing.status)) {
22877
- return {
22878
- task: existing,
22879
- wasExisting: true
22880
- };
22881
- }
22882
- if (this.enforceHiveSequential) {
22883
- const orderingCheck = this.checkHiveTaskOrdering(options.hiveFeature, options.hiveTaskFolder);
22884
- if (!orderingCheck.allowed) {
22885
- return {
22886
- task: null,
22887
- wasExisting: false,
22888
- error: orderingCheck.error
22889
- };
22890
- }
22891
- }
22892
- }
22893
- const validation = await this.agentGate.validate(options.agent);
22894
- if (!validation.valid) {
22895
- return {
22896
- task: null,
22897
- wasExisting: false,
22898
- error: validation.error
22899
- };
22900
- }
22901
- const concurrencyKey = options.agent;
22902
- try {
22903
- await this.concurrencyManager.acquire(concurrencyKey);
22904
- } catch (error45) {
22905
- return {
22906
- task: null,
22907
- wasExisting: false,
22908
- error: `Concurrency limit reached: ${error45 instanceof Error ? error45.message : "Unknown error"}`
22909
- };
22910
- }
22911
- let sessionId;
22912
- try {
22913
- const sessionResult = await this.client.session.create({
22914
- body: {
22915
- title: `Background: ${options.description}`,
22916
- parentID: options.parentSessionId
22917
- }
22918
- });
22919
- if (!sessionResult.data?.id) {
22920
- this.concurrencyManager.release(concurrencyKey);
22921
- return {
22922
- task: null,
22923
- wasExisting: false,
22924
- error: "Failed to create OpenCode session"
22925
- };
22926
- }
22927
- sessionId = sessionResult.data.id;
22928
- } catch (error45) {
22929
- this.concurrencyManager.release(concurrencyKey);
22930
- return {
22931
- task: null,
22932
- wasExisting: false,
22933
- error: `Failed to create session: ${error45 instanceof Error ? error45.message : "Unknown error"}`
22934
- };
22935
- }
22936
- const notifyParent = options.notifyParent ?? !options.sync;
22937
- const task = this.store.create({
22938
- agent: options.agent,
22939
- description: options.description,
22940
- sessionId,
22941
- idempotencyKey: options.idempotencyKey,
22942
- parentSessionId: options.parentSessionId,
22943
- parentMessageId: options.parentMessageId,
22944
- parentAgent: options.parentAgent,
22945
- notifyParent,
22946
- hiveFeature: options.hiveFeature,
22947
- hiveTaskFolder: options.hiveTaskFolder,
22948
- workdir: options.workdir
22949
- });
22950
- if (options.hiveFeature && options.hiveTaskFolder) {
22951
- try {
22952
- const attempt = options.attempt ?? 1;
22953
- this.taskService.patchBackgroundFields(options.hiveFeature, options.hiveTaskFolder, {
22954
- idempotencyKey: options.idempotencyKey,
22955
- workerSession: {
22956
- taskId: task.taskId,
22957
- sessionId: task.sessionId,
22958
- agent: task.agent,
22959
- mode: "delegate",
22960
- attempt
22961
- }
22962
- });
22963
- } catch (error45) {
22964
- console.warn(`[BackgroundManager] Failed to persist to .hive: ${error45 instanceof Error ? error45.message : "Unknown"}`);
22965
- }
22966
- }
22967
- this.store.updateStatus(task.taskId, "running");
22968
- this.poller.start();
22969
- const normalizedVariant = options.variant?.trim() || undefined;
22970
- this.client.session.prompt({
22971
- path: { id: sessionId },
22972
- body: {
22973
- agent: options.agent,
22974
- parts: [{ type: "text", text: options.prompt }],
22975
- tools: {
22976
- background_task: false,
22977
- delegate: false,
22978
- hive_background_task: false,
22979
- hive_background_output: false,
22980
- hive_background_cancel: false,
22981
- task: false
22982
- },
22983
- ...normalizedVariant !== undefined && { variant: normalizedVariant }
22984
- }
22985
- }).catch((error45) => {
22986
- this.updateStatus(task.taskId, "error", { errorMessage: error45.message });
22987
- this.concurrencyManager.release(concurrencyKey);
22988
- this.poller.cleanupTask(task.taskId);
22989
- });
22990
- return {
22991
- task: this.store.get(task.taskId),
22992
- wasExisting: false
22993
- };
22994
- }
22995
- checkHiveTaskOrdering(feature, taskFolder) {
22996
- const taskStatus = this.taskService.getRawStatus(feature, taskFolder);
22997
- if (taskStatus?.dependsOn !== undefined) {
22998
- return this.checkDependencies(feature, taskFolder, taskStatus.dependsOn);
22999
- }
23000
- return this.checkNumericOrdering(feature, taskFolder);
23001
- }
23002
- checkDependencies(feature, taskFolder, dependsOn) {
23003
- if (dependsOn.length === 0) {
23004
- return { allowed: true };
23005
- }
23006
- const unmetDeps = [];
23007
- for (const depFolder of dependsOn) {
23008
- const depStatus = this.taskService.getRawStatus(feature, depFolder);
23009
- if (!depStatus || depStatus.status !== "done") {
23010
- unmetDeps.push({
23011
- folder: depFolder,
23012
- status: depStatus?.status ?? "unknown"
23013
- });
23014
- }
23015
- }
23016
- if (unmetDeps.length > 0) {
23017
- const depList = unmetDeps.map((d) => `"${d.folder}" (${d.status})`).join(", ");
23018
- return {
23019
- allowed: false,
23020
- error: `Dependency constraint: Task "${taskFolder}" cannot start - dependencies not done: ${depList}. ` + `Only tasks with status 'done' satisfy dependencies.`
23021
- };
23022
- }
23023
- return { allowed: true };
23024
- }
23025
- checkNumericOrdering(feature, taskFolder) {
23026
- const orderMatch = taskFolder.match(/^(\d+)-/);
23027
- if (!orderMatch) {
23028
- return { allowed: true };
23029
- }
23030
- const taskOrder = parseInt(orderMatch[1], 10);
23031
- if (taskOrder <= 1) {
23032
- return { allowed: true };
23033
- }
23034
- const activeTasks = this.store.list({
23035
- hiveFeature: feature,
23036
- status: ["spawned", "pending", "running"]
23037
- });
23038
- for (const activeTask of activeTasks) {
23039
- if (!activeTask.hiveTaskFolder)
23040
- continue;
23041
- const activeOrderMatch = activeTask.hiveTaskFolder.match(/^(\d+)-/);
23042
- if (!activeOrderMatch)
23043
- continue;
23044
- const activeOrder = parseInt(activeOrderMatch[1], 10);
23045
- if (activeOrder < taskOrder) {
23046
- return {
23047
- allowed: false,
23048
- error: `Sequential ordering enforced: Task "${taskFolder}" cannot start while earlier task "${activeTask.hiveTaskFolder}" is still ${activeTask.status}. ` + `Complete or cancel the earlier task first. ` + `(Hive default: sequential execution for safety)`
23049
- };
23050
- }
23051
- }
23052
- return { allowed: true };
23053
- }
23054
- getTask(taskId) {
23055
- return this.store.get(taskId);
23056
- }
23057
- getTaskByIdempotencyKey(key) {
23058
- return this.store.getByIdempotencyKey(key);
23059
- }
23060
- getTaskByHiveTask(feature, taskFolder) {
23061
- return this.store.getByHiveTask(feature, taskFolder);
23062
- }
23063
- updateStatus(taskId, status, options) {
23064
- const task = this.store.updateStatus(taskId, status, options);
23065
- if (task.hiveFeature && task.hiveTaskFolder) {
23066
- try {
23067
- this.taskService.patchBackgroundFields(task.hiveFeature, task.hiveTaskFolder, {
23068
- workerSession: {
23069
- sessionId: task.sessionId,
23070
- lastHeartbeatAt: new Date().toISOString()
23071
- }
23072
- });
23073
- } catch {}
23074
- }
23075
- if (isTerminalStatus(task.status) && task.parentSessionId && task.notifyParent !== false) {
23076
- this.notifyParentSession(task);
23077
- }
23078
- return task;
23079
- }
23080
- async notifyParentSession(task) {
23081
- if (!task.parentSessionId)
23082
- return;
23083
- const statusLabel = task.status.toUpperCase();
23084
- const errorLine = task.errorMessage ? `
23085
- **Error:** ${task.errorMessage}` : "";
23086
- const notification = `<system-reminder>
23087
- [BACKGROUND TASK ${statusLabel}]
23088
-
23089
- **ID:** \`${task.taskId}\`
23090
- **Description:** ${task.description}
23091
- **Agent:** ${task.agent}${errorLine}
23092
-
23093
- Use \`hive_background_output({ task_id: "${task.taskId}" })\` to retrieve the result.
23094
- </system-reminder>`;
23095
- try {
23096
- await this.client.session.prompt({
23097
- path: { id: task.parentSessionId },
23098
- body: {
23099
- agent: task.parentAgent || "hive",
23100
- parts: [{ type: "text", text: notification }]
23101
- }
23102
- });
23103
- } catch {}
23104
- }
23105
- async cancel(taskId) {
23106
- const task = this.store.get(taskId);
23107
- if (!task) {
23108
- throw new Error(`Task "${taskId}" not found`);
23109
- }
23110
- if (isTerminalStatus(task.status)) {
23111
- throw new Error(`Cannot cancel task in terminal status: ${task.status}`);
23112
- }
23113
- try {
23114
- await this.client.session.abort({
23115
- path: { id: task.sessionId }
23116
- });
23117
- } catch {}
23118
- this.concurrencyManager.release(task.agent);
23119
- this.poller.cleanupTask(taskId);
23120
- return this.updateStatus(taskId, "cancelled");
23121
- }
23122
- async cancelAll(parentSessionId) {
23123
- const tasks = this.store.list({
23124
- parentSessionId,
23125
- status: ["spawned", "pending", "running"]
23126
- });
23127
- const results = [];
23128
- for (const task of tasks) {
23129
- try {
23130
- const cancelled = await this.cancel(task.taskId);
23131
- results.push(cancelled);
23132
- } catch {}
23133
- }
23134
- return results;
23135
- }
23136
- list(filter) {
23137
- return this.store.list(filter);
23138
- }
23139
- getActive() {
23140
- return this.store.getActive();
23141
- }
23142
- handleSessionIdle(sessionId) {
23143
- const tasks = this.store.list();
23144
- const task = tasks.find((t) => t.sessionId === sessionId);
23145
- if (task && task.status === "running") {
23146
- this.concurrencyManager.release(task.agent);
23147
- this.poller.cleanupTask(task.taskId);
23148
- this.updateStatus(task.taskId, "completed");
23149
- }
23150
- }
23151
- handleMessageEvent(sessionId, messageText) {
23152
- const tasks = this.store.list();
23153
- const task = tasks.find((t) => t.sessionId === sessionId);
23154
- if (task && task.status === "running") {
23155
- this.store.updateProgress(task.taskId, {
23156
- lastMessage: messageText?.slice(0, 200),
23157
- lastMessageAt: new Date().toISOString(),
23158
- messageCount: (task.progress?.messageCount ?? 0) + 1
23159
- });
23160
- }
23161
- }
23162
- getAgentGate() {
23163
- return this.agentGate;
23164
- }
23165
- getConcurrencyManager() {
23166
- return this.concurrencyManager;
23167
- }
23168
- getPoller() {
23169
- return this.poller;
23170
- }
23171
- getObservations() {
23172
- return this.poller.getObservations();
23173
- }
23174
- getTaskObservation(taskId) {
23175
- return this.poller.getTaskObservation(taskId);
23176
- }
23177
- getCounts() {
23178
- return this.store.countByStatus();
23179
- }
23180
- shutdown() {
23181
- this.poller.stop();
23182
- this.concurrencyManager.clear();
23183
- }
23184
- }
23185
- function createBackgroundManager(options) {
23186
- return new BackgroundManager(options);
23187
- }
23188
- // src/utils/format.ts
23189
- function formatElapsed(ms) {
23190
- const totalSeconds = Math.max(0, Math.floor(ms / 1000));
23191
- if (totalSeconds < 60) {
23192
- return `${totalSeconds}s`;
23193
- }
23194
- const totalMinutes = Math.floor(totalSeconds / 60);
23195
- const seconds = totalSeconds % 60;
23196
- if (totalMinutes < 60) {
23197
- return `${totalMinutes}m ${seconds}s`;
23198
- }
23199
- const hours = Math.floor(totalMinutes / 60);
23200
- const minutes = totalMinutes % 60;
23201
- return `${hours}h ${minutes}m`;
23202
- }
23203
- function formatRelativeTime(isoDate) {
23204
- const timestamp = new Date(isoDate).getTime();
23205
- const now = Date.now();
23206
- const elapsedMs = Math.max(0, now - timestamp);
23207
- const totalSeconds = Math.max(0, Math.floor(elapsedMs / 1000));
23208
- if (totalSeconds < 60) {
23209
- return `${totalSeconds}s ago`;
23210
- }
23211
- const totalMinutes = Math.floor(totalSeconds / 60);
23212
- if (totalMinutes < 60) {
23213
- return `${totalMinutes}m ago`;
23214
- }
23215
- const totalHours = Math.floor(totalMinutes / 60);
23216
- return `${totalHours}h ago`;
23217
- }
23218
-
23219
- // src/hooks/variant-hook.ts
23220
- var HIVE_AGENT_NAMES = [
23221
- "hive-master",
23222
- "architect-planner",
23223
- "swarm-orchestrator",
23224
- "scout-researcher",
23225
- "forager-worker",
23226
- "hygienic-reviewer"
23227
- ];
23228
- function isHiveAgent(agent) {
23229
- return agent !== undefined && HIVE_AGENT_NAMES.includes(agent);
23230
- }
23231
- function normalizeVariant(variant) {
23232
- if (variant === undefined)
23233
- return;
23234
- const trimmed2 = variant.trim();
23235
- return trimmed2.length > 0 ? trimmed2 : undefined;
23236
- }
23237
-
23238
- // src/tools/background-tools.ts
23239
- function createBackgroundTools(manager, client, configService) {
23240
- async function maybeFinalizeIfIdle(sessionId) {
23241
- try {
23242
- const statusFn = client.session.status;
23243
- if (statusFn) {
23244
- const statusResult = await statusFn();
23245
- const entry = statusResult.data?.[sessionId];
23246
- const type = entry?.type;
23247
- if (type === "idle" || type === "completed") {
23248
- manager.handleSessionIdle(sessionId);
23249
- return;
23250
- }
23251
- }
23252
- } catch {}
23253
- try {
23254
- const sessionResult = await client.session.get({ path: { id: sessionId } });
23255
- const data = sessionResult.data;
23256
- const status = data?.status ?? data?.type;
23257
- if (status === "idle" || status === "completed") {
23258
- manager.handleSessionIdle(sessionId);
23259
- }
23260
- } catch {}
23261
- }
23262
- return {
23263
- hive_background_task: tool({
23264
- description: "Spawn a background agent task. Use sync=true to wait for completion (returns output). If sync=false (default), the parent session receives a completion <system-reminder> and you can call hive_background_output to fetch the result.",
23265
- args: {
23266
- agent: tool.schema.string().describe('Agent to use (e.g., "forager-worker", "scout-researcher")'),
23267
- prompt: tool.schema.string().optional().describe("Task instructions/prompt (required if promptFile not provided)"),
23268
- promptFile: tool.schema.string().optional().describe("Path to file containing prompt (alternative to inline prompt)"),
23269
- description: tool.schema.string().describe("Human-readable task description"),
23270
- sync: tool.schema.boolean().optional().describe("Wait for completion (default: false)"),
23271
- idempotencyKey: tool.schema.string().optional().describe("Key for safe retries"),
23272
- workdir: tool.schema.string().optional().describe("Working directory for task"),
23273
- feature: tool.schema.string().optional().describe("Hive feature name (for Hive-linked tasks)"),
23274
- task: tool.schema.string().optional().describe("Hive task folder (for Hive-linked tasks)"),
23275
- attempt: tool.schema.number().optional().describe("Hive attempt number (for Hive-linked tasks)")
23276
- },
23277
- async execute({
23278
- agent,
23279
- prompt,
23280
- promptFile,
23281
- description,
23282
- sync = false,
23283
- idempotencyKey,
23284
- workdir,
23285
- feature,
23286
- task: hiveTask,
23287
- attempt
23288
- }, toolContext) {
23289
- const ctx = toolContext;
23290
- const ALLOWED_CALLERS = new Set([
23291
- "hive-master",
23292
- "architect-planner",
23293
- "swarm-orchestrator"
23294
- ]);
23295
- const callerAgent = ctx?.agent;
23296
- if (!callerAgent || !ALLOWED_CALLERS.has(callerAgent)) {
23297
- const output = {
23298
- provider: "hive",
23299
- task_id: "",
23300
- session_id: "",
23301
- status: "error",
23302
- error: `Agent "${callerAgent ?? "unknown"}" is not allowed to spawn background tasks. Only orchestrator agents (${[...ALLOWED_CALLERS].join(", ")}) can delegate.`
23303
- };
23304
- return JSON.stringify(output, null, 2);
23305
- }
23306
- let resolvedPrompt = prompt;
23307
- if (promptFile) {
23308
- const baseDir = workdir || process.cwd();
23309
- const workspaceRoot = findWorkspaceRoot(baseDir) ?? baseDir;
23310
- const fileResult = await resolvePromptFromFile(promptFile, workspaceRoot);
23311
- if (fileResult.error) {
23312
- const output = {
23313
- provider: "hive",
23314
- task_id: "",
23315
- session_id: "",
23316
- status: "error",
23317
- error: `Failed to read prompt file: ${fileResult.error}`
23318
- };
23319
- return JSON.stringify(output, null, 2);
23320
- }
23321
- resolvedPrompt = fileResult.content;
23322
- }
23323
- if (!resolvedPrompt) {
23324
- const output = {
23325
- provider: "hive",
23326
- task_id: "",
23327
- session_id: "",
23328
- status: "error",
23329
- error: "Either prompt or promptFile is required"
23330
- };
23331
- return JSON.stringify(output, null, 2);
23332
- }
23333
- let variant;
23334
- if (configService && isHiveAgent(agent)) {
23335
- const agentConfig = configService.getAgentConfig(agent);
23336
- variant = normalizeVariant(agentConfig.variant);
23337
- }
23338
- const result = await manager.spawn({
23339
- agent,
23340
- prompt: resolvedPrompt,
23341
- description,
23342
- idempotencyKey,
23343
- workdir,
23344
- parentSessionId: ctx?.sessionID,
23345
- parentMessageId: ctx?.messageID,
23346
- parentAgent: ctx?.agent,
23347
- notifyParent: !sync,
23348
- hiveFeature: feature,
23349
- hiveTaskFolder: hiveTask,
23350
- sync,
23351
- attempt,
23352
- variant
23353
- });
23354
- if (result.error) {
23355
- const output = {
23356
- provider: "hive",
23357
- task_id: "",
23358
- session_id: "",
23359
- status: "error",
23360
- error: result.error
23361
- };
23362
- return JSON.stringify(output, null, 2);
23363
- }
23364
- const taskRecord = result.task;
23365
- if (!sync) {
23366
- const output = {
23367
- provider: "hive",
23368
- task_id: taskRecord.taskId,
23369
- session_id: taskRecord.sessionId,
23370
- status: taskRecord.status
23371
- };
23372
- return JSON.stringify(output, null, 2);
23373
- }
23374
- const pollInterval = 1000;
23375
- const maxWait = 30 * 60 * 1000;
23376
- const startTime = Date.now();
23377
- while (true) {
23378
- let current = manager.getTask(taskRecord.taskId);
23379
- if (!current) {
23380
- return JSON.stringify({
23381
- provider: "hive",
23382
- task_id: taskRecord.taskId,
23383
- session_id: taskRecord.sessionId,
23384
- status: "error",
23385
- error: "Task disappeared from store"
23386
- }, null, 2);
23387
- }
23388
- if (!isTerminalStatus(current.status)) {
23389
- await maybeFinalizeIfIdle(current.sessionId);
23390
- current = manager.getTask(taskRecord.taskId);
23391
- }
23392
- if (current && isTerminalStatus(current.status)) {
23393
- const outputText = await getTaskOutput(client, current.sessionId);
23394
- const output = {
23395
- provider: "hive",
23396
- task_id: current.taskId,
23397
- session_id: current.sessionId,
23398
- status: current.status,
23399
- output: outputText,
23400
- done: true
23401
- };
23402
- if (current.errorMessage) {
23403
- output.error = current.errorMessage;
23404
- }
23405
- return JSON.stringify(output, null, 2);
23406
- }
23407
- if (Date.now() - startTime > maxWait) {
23408
- return JSON.stringify({
23409
- provider: "hive",
23410
- task_id: current?.taskId ?? taskRecord.taskId,
23411
- session_id: current?.sessionId ?? taskRecord.sessionId,
23412
- status: current?.status ?? "unknown",
23413
- error: "Sync wait timed out after 30 minutes",
23414
- done: false
23415
- }, null, 2);
23416
- }
23417
- await new Promise((resolve2) => setTimeout(resolve2, pollInterval));
23418
- }
23419
- }
23420
- }),
23421
- hive_background_output: tool({
23422
- description: "Get output from a background task. For sync=false tasks, wait for the completion <system-reminder> and then call with block=false to fetch the result; use block=true only when you need interim output. When blocking, pick a timeout based on task complexity (typically 30-120s).",
23423
- args: {
23424
- task_id: tool.schema.string().describe("Task ID to get output from"),
23425
- block: tool.schema.boolean().optional().describe("Block waiting for new output (default: false)"),
23426
- timeout: tool.schema.number().optional().describe("Timeout in ms when blocking (default: 60000)"),
23427
- cursor: tool.schema.string().optional().describe("Cursor for incremental output (message count)")
23428
- },
23429
- async execute({ task_id, block = false, timeout = 60000, cursor }) {
23430
- const task = manager.getTask(task_id);
23431
- if (!task) {
23432
- return JSON.stringify({
23433
- error: `Task "${task_id}" not found`,
23434
- task_id
23435
- }, null, 2);
23436
- }
23437
- const cursorCount = cursor ? parseInt(cursor, 10) : 0;
23438
- if (block && !isTerminalStatus(task.status)) {
23439
- const startTime = Date.now();
23440
- const pollInterval = 1000;
23441
- while (Date.now() - startTime < timeout) {
23442
- const current2 = manager.getTask(task_id);
23443
- if (!current2)
23444
- break;
23445
- const currentCount = current2.progress?.messageCount ?? 0;
23446
- if (currentCount > cursorCount || isTerminalStatus(current2.status)) {
23447
- break;
23448
- }
23449
- await maybeFinalizeIfIdle(current2.sessionId);
23450
- const afterFinalize = manager.getTask(task_id);
23451
- if (afterFinalize && isTerminalStatus(afterFinalize.status)) {
23452
- break;
23453
- }
23454
- await new Promise((resolve2) => setTimeout(resolve2, pollInterval));
23455
- }
23456
- }
23457
- const current = manager.getTask(task_id);
23458
- if (!current) {
23459
- return JSON.stringify({
23460
- error: `Task "${task_id}" disappeared`,
23461
- task_id
23462
- }, null, 2);
23463
- }
23464
- if (!isTerminalStatus(current.status)) {
23465
- await maybeFinalizeIfIdle(current.sessionId);
23466
- }
23467
- const finalized = manager.getTask(task_id);
23468
- if (!finalized) {
23469
- return JSON.stringify({
23470
- error: `Task "${task_id}" disappeared`,
23471
- task_id
23472
- }, null, 2);
23473
- }
23474
- const output = await getTaskOutput(client, finalized.sessionId, cursorCount);
23475
- const messageCount = finalized.progress?.messageCount ?? 0;
23476
- const observationSnapshot = manager.getTaskObservation(task_id);
23477
- const lastActivityAt = observationSnapshot?.lastActivityAt ?? null;
23478
- const startedAt = finalized.startedAt ? new Date(finalized.startedAt).getTime() : Date.now();
23479
- const elapsedMs = Date.now() - startedAt;
23480
- const lastMessage = finalized.progress?.lastMessage;
23481
- const lastMessagePreview = lastMessage ? `${lastMessage.slice(0, 200)}${lastMessage.length > 200 ? "..." : ""}` : null;
23482
- const observation = {
23483
- elapsedMs,
23484
- elapsedFormatted: formatElapsed(elapsedMs),
23485
- messageCount: observationSnapshot?.messageCount ?? 0,
23486
- lastActivityAgo: lastActivityAt ? formatRelativeTime(lastActivityAt) : "never",
23487
- lastActivityAt,
23488
- lastMessagePreview,
23489
- maybeStuck: observationSnapshot?.maybeStuck ?? false
23490
- };
23491
- return JSON.stringify({
23492
- task_id: finalized.taskId,
23493
- session_id: finalized.sessionId,
23494
- status: finalized.status,
23495
- done: isTerminalStatus(finalized.status),
23496
- output,
23497
- cursor: messageCount.toString(),
23498
- progress: finalized.progress,
23499
- observation
23500
- }, null, 2);
23501
- }
23502
- }),
23503
- hive_background_cancel: tool({
23504
- description: "Cancel running background task(s). Use all=true to cancel all tasks for current session.",
23505
- args: {
23506
- task_id: tool.schema.string().optional().describe("Specific task ID to cancel"),
23507
- idempotencyKey: tool.schema.string().optional().describe("Cancel task by idempotency key"),
23508
- all: tool.schema.boolean().optional().describe("Cancel all tasks for current session")
23509
- },
23510
- async execute({ task_id, idempotencyKey, all }, toolContext) {
23511
- const ctx = toolContext;
23512
- if (all) {
23513
- if (!ctx?.sessionID) {
23514
- return JSON.stringify({
23515
- error: "Cannot cancel all: no parent session context",
23516
- cancelled: 0
23517
- }, null, 2);
23518
- }
23519
- const cancelled2 = await manager.cancelAll(ctx.sessionID);
23520
- return JSON.stringify({
23521
- cancelled: cancelled2.length,
23522
- tasks: cancelled2.map((t) => ({
23523
- task_id: t.taskId,
23524
- status: t.status
23525
- }))
23526
- }, null, 2);
23527
- }
23528
- let task;
23529
- if (task_id) {
23530
- task = manager.getTask(task_id);
23531
- } else if (idempotencyKey) {
23532
- task = manager.getTaskByIdempotencyKey(idempotencyKey);
23533
- }
23534
- if (!task) {
23535
- return JSON.stringify({
23536
- error: task_id ? `Task "${task_id}" not found` : idempotencyKey ? `No task found with idempotency key "${idempotencyKey}"` : "Must provide task_id, idempotencyKey, or all=true"
23537
- }, null, 2);
23538
- }
23539
- if (isTerminalStatus(task.status)) {
23540
- return JSON.stringify({
23541
- task_id: task.taskId,
23542
- status: task.status,
23543
- message: `Task already in terminal status: ${task.status}`
23544
- }, null, 2);
23545
- }
23546
- const cancelled = await manager.cancel(task.taskId);
23547
- return JSON.stringify({
23548
- task_id: cancelled.taskId,
23549
- status: cancelled.status,
23550
- message: "Task cancelled successfully"
23551
- }, null, 2);
23552
- }
23553
- })
23554
- };
23555
- }
23556
- async function getTaskOutput(client, sessionId, afterCount = 0) {
23557
- try {
23558
- const messagesResult = await client.session.messages({
23559
- path: { id: sessionId }
23560
- });
23561
- const messages = messagesResult.data ?? [];
23562
- const newMessages = messages.slice(afterCount);
23563
- const outputParts = [];
23564
- for (const msg of newMessages) {
23565
- const message = msg;
23566
- if (!message.parts)
23567
- continue;
23568
- for (const part of message.parts) {
23569
- if (part.type === "text" && part.text) {
23570
- outputParts.push(part.text);
23571
- } else if (part.type === "tool-result" && part.result) {
23572
- const result = typeof part.result === "string" ? part.result : JSON.stringify(part.result);
23573
- if (result.length > 500) {
23574
- outputParts.push(`[Tool: ${part.name}] ${result.slice(0, 500)}...`);
23575
- } else {
23576
- outputParts.push(`[Tool: ${part.name}] ${result}`);
23577
- }
23578
- }
23579
- }
23580
- }
23581
- return outputParts.join(`
23582
-
23583
- `);
23584
- } catch (error45) {
23585
- return `[Error fetching output: ${error45 instanceof Error ? error45.message : "Unknown"}]`;
23586
- }
23587
- }
23588
-
23589
- // src/index.ts
23590
- function formatSkillsXml(skills) {
23591
- if (skills.length === 0)
23592
- return "";
23593
- const skillsXml = skills.map((skill) => {
23594
- return [
23595
- " <skill>",
23596
- ` <name>${skill.name}</name>`,
23597
- ` <description>(hive - Skill) ${skill.description}</description>`,
23598
- " </skill>"
23599
- ].join(`
23600
- `);
23601
- }).join(`
23602
- `);
23603
- return `
23604
-
23605
- <available_skills>
23606
- ${skillsXml}
23607
- </available_skills>`;
23608
- }
23609
- async function buildAutoLoadedSkillsContent(agentName, configService, projectRoot) {
23610
- const agentConfig = configService.getAgentConfig(agentName);
23611
- const autoLoadSkills = agentConfig.autoLoadSkills ?? [];
23612
- if (autoLoadSkills.length === 0) {
23613
- return "";
23614
- }
23615
- const homeDir = process.env.HOME || os.homedir();
23616
- const skillTemplates = [];
23617
- for (const skillId of autoLoadSkills) {
23618
- const builtinSkill = BUILTIN_SKILLS.find((entry) => entry.name === skillId);
23619
- if (builtinSkill) {
23620
- skillTemplates.push(builtinSkill.template);
23621
- continue;
23106
+ const homeDir = process.env.HOME || os.homedir();
23107
+ const skillTemplates = [];
23108
+ for (const skillId of autoLoadSkills) {
23109
+ const builtinSkill = BUILTIN_SKILLS.find((entry) => entry.name === skillId);
23110
+ if (builtinSkill) {
23111
+ skillTemplates.push(builtinSkill.template);
23112
+ continue;
23622
23113
  }
23623
23114
  const fileResult = await loadFileSkill(skillId, projectRoot, homeDir);
23624
23115
  if (fileResult.found && fileResult.skill) {
@@ -23676,16 +23167,15 @@ var HIVE_SYSTEM_PROMPT = `
23676
23167
 
23677
23168
  Plan-first development: Write plan → User reviews → Approve → Execute tasks
23678
23169
 
23679
- ### Tools (19 total)
23170
+ ### Tools (14 total)
23680
23171
 
23681
23172
  | Domain | Tools |
23682
23173
  |--------|-------|
23683
- | Feature | hive_feature_create, hive_feature_list, hive_feature_complete |
23174
+ | Feature | hive_feature_create, hive_feature_complete |
23684
23175
  | Plan | hive_plan_write, hive_plan_read, hive_plan_approve |
23685
23176
  | Task | hive_tasks_sync, hive_task_create, hive_task_update |
23686
- | Exec | hive_exec_start, hive_exec_complete, hive_exec_abort |
23687
- | Worker | hive_worker_status |
23688
- | Merge | hive_merge, hive_worktree_list |
23177
+ | Worktree | hive_worktree_create, hive_worktree_commit, hive_worktree_discard |
23178
+ | Merge | hive_merge |
23689
23179
  | Context | hive_context_write |
23690
23180
  | Status | hive_status |
23691
23181
  | Skill | hive_skill |
@@ -23697,42 +23187,40 @@ Plan-first development: Write plan → User reviews → Approve → Execute task
23697
23187
  3. User adds comments in VSCode → \`hive_plan_read\` to see them
23698
23188
  4. Revise plan → User approves
23699
23189
  5. \`hive_tasks_sync()\` - Generate tasks from plan
23700
- 6. \`hive_exec_start(task)\` → work in worktree → \`hive_exec_complete(task, summary)\`
23190
+ 6. \`hive_worktree_create(task)\` → work in worktree → \`hive_worktree_commit(task, summary)\`
23701
23191
  7. \`hive_merge(task)\` - Merge task branch into main (when ready)
23702
23192
 
23703
- **Important:** \`hive_exec_complete\` commits changes to task branch but does NOT merge.
23193
+ **Important:** \`hive_worktree_commit\` commits changes to task branch but does NOT merge.
23704
23194
  Use \`hive_merge\` to explicitly integrate changes. Worktrees persist until manually removed.
23705
23195
 
23706
23196
  ### Delegated Execution
23707
23197
 
23708
- \`hive_exec_start\` creates worktree and spawns worker automatically:
23198
+ \`hive_worktree_create\` creates worktree and spawns worker automatically:
23709
23199
 
23710
- 1. \`hive_exec_start(task)\` → Creates worktree + spawns Forager (Worker/Coder) worker
23711
- 2. Worker executes → calls \`hive_exec_complete(status: "completed")\`
23712
- 3. Worker blocked → calls \`hive_exec_complete(status: "blocked", blocker: {...})\`
23200
+ 1. \`hive_worktree_create(task)\` → Creates worktree + spawns Forager (Worker/Coder) worker
23201
+ 2. Worker executes → calls \`hive_worktree_commit(status: "completed")\`
23202
+ 3. Worker blocked → calls \`hive_worktree_commit(status: "blocked", blocker: {...})\`
23713
23203
 
23714
23204
  **Handling blocked workers:**
23715
- 1. Check blockers with \`hive_worker_status()\`
23205
+ 1. Check blockers with \`hive_status()\`
23716
23206
  2. Read the blocker info (reason, options, recommendation, context)
23717
23207
  3. Ask user via \`question()\` tool - NEVER plain text
23718
- 4. Resume with \`hive_exec_start(task, continueFrom: "blocked", decision: answer)\`
23208
+ 4. Resume with \`hive_worktree_create(task, continueFrom: "blocked", decision: answer)\`
23719
23209
 
23720
23210
  **CRITICAL**: When resuming, a NEW worker spawns in the SAME worktree.
23721
23211
  The previous worker's progress is preserved. Include the user's decision in the \`decision\` parameter.
23722
23212
 
23723
- **Observation Polling (Recommended):**
23724
- - Prefer completion notifications over polling
23725
- - Use \`hive_worker_status()\` for observation-based spot checks
23726
- - Avoid tight loops with \`hive_background_output\`; if needed, wait 30-60s between checks
23727
- - If you suspect notifications did not deliver, do a single \`hive_worker_status()\` check first
23728
- - If you need final results, call \`hive_background_output({ task_id, block: false })\` after the completion notice
23213
+ **After task() Returns:**
23214
+ - task() is BLOCKING when it returns, the worker is DONE
23215
+ - Call \`hive_status()\` immediately to check the new task state and find next runnable tasks
23216
+ - No notifications or polling needed the result is already available
23729
23217
 
23730
23218
  **For research**, use MCP tools or parallel exploration:
23731
23219
  - \`grep_app_searchGitHub\` - Find code in OSS
23732
23220
  - \`context7_query-docs\` - Library documentation
23733
23221
  - \`websearch_web_search_exa\` - Web search via Exa
23734
23222
  - \`ast_grep_search\` - AST-based search
23735
- - For exploratory fan-out, load \`hive_skill("parallel-exploration")\` and use \`hive_background_task(agent: "scout-researcher", sync: false, ...)\`
23223
+ - For exploratory fan-out, load \`hive_skill("parallel-exploration")\` and use multiple \`task()\` calls in the same message
23736
23224
 
23737
23225
  ### Planning Phase - Context Management REQUIRED
23738
23226
 
@@ -23743,7 +23231,6 @@ As you research and plan, CONTINUOUSLY save findings using \`hive_context_write\
23743
23231
  - Architecture decisions ("auth lives in /lib/auth")
23744
23232
 
23745
23233
  **Update existing context files** when new info emerges - dont create duplicates.
23746
- Workers depend on context for background. Without it, they work blind.
23747
23234
 
23748
23235
  \`hive_tasks_sync\` parses \`### N. Task Name\` headers.
23749
23236
 
@@ -23760,6 +23247,7 @@ var plugin = async (ctx) => {
23760
23247
  const planService = new PlanService(directory);
23761
23248
  const taskService = new TaskService(directory);
23762
23249
  const contextService = new ContextService(directory);
23250
+ const agentsMdService = new AgentsMdService(directory, contextService);
23763
23251
  const configService = new ConfigService;
23764
23252
  const disabledMcps = configService.getDisabledMcps();
23765
23253
  const disabledSkills = configService.getDisabledSkills();
@@ -23768,15 +23256,8 @@ var plugin = async (ctx) => {
23768
23256
  const effectiveAutoLoadSkills = configService.getAgentConfig("hive-master").autoLoadSkills ?? [];
23769
23257
  const worktreeService = new WorktreeService({
23770
23258
  baseDir: directory,
23771
- hiveDir: path7.join(directory, ".hive")
23772
- });
23773
- const backgroundManager = createBackgroundManager({
23774
- client,
23775
- projectRoot: directory
23259
+ hiveDir: path8.join(directory, ".hive")
23776
23260
  });
23777
- const delegateMode = configService.getDelegateMode();
23778
- const useHiveBackground = delegateMode === "hive";
23779
- const backgroundTools = createBackgroundTools(backgroundManager, client, configService);
23780
23261
  const isOmoSlimEnabled = () => {
23781
23262
  return configService.isOmoSlimEnabled();
23782
23263
  };
@@ -23801,10 +23282,10 @@ var plugin = async (ctx) => {
23801
23282
  }
23802
23283
  };
23803
23284
  const checkBlocked = (feature) => {
23804
- const fs11 = __require("fs");
23805
- const blockedPath = path7.join(directory, ".hive", "features", feature, "BLOCKED");
23806
- if (fs11.existsSync(blockedPath)) {
23807
- const reason = fs11.readFileSync(blockedPath, "utf-8").trim();
23285
+ const fs9 = __require("fs");
23286
+ const blockedPath = path8.join(directory, ".hive", "features", feature, "BLOCKED");
23287
+ if (fs9.existsSync(blockedPath)) {
23288
+ const reason = fs9.readFileSync(blockedPath, "utf-8").trim();
23808
23289
  return `⛔ BLOCKED by Beekeeper
23809
23290
 
23810
23291
  ${reason || "(No reason provided)"}
@@ -23887,14 +23368,34 @@ To unblock: Remove .hive/features/${feature}/BLOCKED`;
23887
23368
  output.message.variant = configuredVariant;
23888
23369
  }
23889
23370
  },
23371
+ "tool.execute.before": async (input, output) => {
23372
+ if (input.tool !== "bash")
23373
+ return;
23374
+ const sandboxConfig = configService.getSandboxConfig();
23375
+ if (sandboxConfig.mode === "none")
23376
+ return;
23377
+ const command = output.args?.command?.trim();
23378
+ if (!command)
23379
+ return;
23380
+ if (/^HOST:\s*/i.test(command)) {
23381
+ const strippedCommand = command.replace(/^HOST:\s*/i, "");
23382
+ console.warn(`[hive:sandbox] HOST bypass: ${strippedCommand.slice(0, 80)}${strippedCommand.length > 80 ? "..." : ""}`);
23383
+ output.args.command = strippedCommand;
23384
+ return;
23385
+ }
23386
+ const workdir = output.args?.workdir;
23387
+ if (!workdir)
23388
+ return;
23389
+ const hiveWorktreeBase = path8.join(directory, ".hive", ".worktrees");
23390
+ if (!workdir.startsWith(hiveWorktreeBase))
23391
+ return;
23392
+ const wrapped = DockerSandboxService.wrapCommand(workdir, command, sandboxConfig);
23393
+ output.args.command = wrapped;
23394
+ output.args.workdir = undefined;
23395
+ },
23890
23396
  mcp: builtinMcps,
23891
23397
  tool: {
23892
23398
  hive_skill: createHiveSkillTool(filteredSkills),
23893
- ...useHiveBackground && {
23894
- hive_background_task: backgroundTools.hive_background_task,
23895
- hive_background_output: backgroundTools.hive_background_output,
23896
- hive_background_cancel: backgroundTools.hive_background_cancel
23897
- },
23898
23399
  hive_feature_create: tool({
23899
23400
  description: "Create a new feature and set it as active",
23900
23401
  args: {
@@ -23939,22 +23440,6 @@ These prevent scope creep and re-proposing rejected solutions.
23939
23440
  NEXT: Ask your first clarifying question about this feature.`;
23940
23441
  }
23941
23442
  }),
23942
- hive_feature_list: tool({
23943
- description: "List all features",
23944
- args: {},
23945
- async execute() {
23946
- const features = featureService.list();
23947
- const active = resolveFeature();
23948
- if (features.length === 0)
23949
- return "No features found.";
23950
- const list = features.map((f) => {
23951
- const info = featureService.getInfo(f);
23952
- return `${f === active ? "* " : " "}${f} (${info?.status || "unknown"})`;
23953
- });
23954
- return list.join(`
23955
- `);
23956
- }
23957
- }),
23958
23443
  hive_feature_complete: tool({
23959
23444
  description: "Mark feature as completed (irreversible)",
23960
23445
  args: { name: tool.schema.string().optional().describe("Feature name (defaults to active)") },
@@ -23966,34 +23451,6 @@ NEXT: Ask your first clarifying question about this feature.`;
23966
23451
  return `Feature "${feature}" marked as completed`;
23967
23452
  }
23968
23453
  }),
23969
- hive_journal_append: tool({
23970
- description: "Append entry to .hive/journal.md for audit trail",
23971
- args: {
23972
- feature: tool.schema.string().describe("Feature name for context"),
23973
- trouble: tool.schema.string().describe("What went wrong"),
23974
- resolution: tool.schema.string().describe("How it was fixed"),
23975
- constraint: tool.schema.string().optional().describe("Never/Always rule derived")
23976
- },
23977
- async execute({ feature, trouble, resolution, constraint }) {
23978
- const journalPath = path7.join(directory, ".hive", "journal.md");
23979
- if (!fs9.existsSync(journalPath)) {
23980
- return `Error: journal.md not found. Create a feature first to initialize the journal.`;
23981
- }
23982
- const date5 = new Date().toISOString().split("T")[0];
23983
- const entry = `
23984
- ### ${date5}: ${feature}
23985
-
23986
- **Trouble**: ${trouble}
23987
- **Resolution**: ${resolution}
23988
- ${constraint ? `**Constraint**: ${constraint}` : ""}
23989
- **See**: .hive/features/${feature}/plan.md
23990
-
23991
- ---
23992
- `;
23993
- fs9.appendFileSync(journalPath, entry);
23994
- return `Journal entry added for ${feature}. ${constraint ? `Constraint: "${constraint}"` : ""}`;
23995
- }
23996
- }),
23997
23454
  hive_plan_write: tool({
23998
23455
  description: "Write plan.md (clears existing comments)",
23999
23456
  args: {
@@ -24004,8 +23461,8 @@ ${constraint ? `**Constraint**: ${constraint}` : ""}
24004
23461
  const feature = resolveFeature(explicitFeature);
24005
23462
  if (!feature)
24006
23463
  return "Error: No feature specified. Create a feature or provide feature param.";
24007
- const hasDiscovery = content.toLowerCase().includes("## discovery");
24008
- if (!hasDiscovery) {
23464
+ const discoveryMatch = content.match(/^##\s+Discovery\s*$/im);
23465
+ if (!discoveryMatch) {
24009
23466
  return `BLOCKED: Discovery section required before planning.
24010
23467
 
24011
23468
  Your plan must include a \`## Discovery\` section documenting:
@@ -24014,6 +23471,19 @@ Your plan must include a \`## Discovery\` section documenting:
24014
23471
  - Key decisions made
24015
23472
 
24016
23473
  Add this section to your plan content and try again.`;
23474
+ }
23475
+ const afterDiscovery = content.slice(discoveryMatch.index + discoveryMatch[0].length);
23476
+ const nextHeading = afterDiscovery.search(/^##\s+/m);
23477
+ const discoveryContent = nextHeading > -1 ? afterDiscovery.slice(0, nextHeading).trim() : afterDiscovery.trim();
23478
+ if (discoveryContent.length < 100) {
23479
+ return `BLOCKED: Discovery section is too thin (${discoveryContent.length} chars, minimum 100).
23480
+
23481
+ A substantive Discovery section should include:
23482
+ - Original request quoted
23483
+ - Interview summary (key decisions)
23484
+ - Research findings with file:line references
23485
+
23486
+ Expand your Discovery section and try again.`;
24017
23487
  }
24018
23488
  captureSession(feature, toolContext);
24019
23489
  const planPath = planService.write(feature, content);
@@ -24087,7 +23557,7 @@ Add this section to your plan content and try again.`;
24087
23557
  return "Error: No feature specified. Create a feature or provide feature param.";
24088
23558
  const folder = taskService.create(feature, name, order);
24089
23559
  return `Manual task created: ${folder}
24090
- Reminder: start work with hive_exec_start to use its worktree, and ensure any subagents work in that worktree too.`;
23560
+ Reminder: start work with hive_worktree_create to use its worktree, and ensure any subagents work in that worktree too.`;
24091
23561
  }
24092
23562
  }),
24093
23563
  hive_task_update: tool({
@@ -24109,7 +23579,7 @@ Reminder: start work with hive_exec_start to use its worktree, and ensure any su
24109
23579
  return `Task "${task}" updated: status=${updated.status}`;
24110
23580
  }
24111
23581
  }),
24112
- hive_exec_start: tool({
23582
+ hive_worktree_create: tool({
24113
23583
  description: "Create worktree and begin work on task. Spawns Forager worker automatically.",
24114
23584
  args: {
24115
23585
  task: tool.schema.string().describe("Task folder name"),
@@ -24234,21 +23704,11 @@ Reminder: start work with hive_exec_start to use its worktree, and ensure any su
24234
23704
  spec: specContent,
24235
23705
  workerPrompt
24236
23706
  });
24237
- const hiveDir = path7.join(directory, ".hive");
23707
+ const hiveDir = path8.join(directory, ".hive");
24238
23708
  const workerPromptPath = writeWorkerPromptFile(feature, task, workerPrompt, hiveDir);
24239
- const relativePromptPath = normalizePath(path7.relative(directory, workerPromptPath));
23709
+ const relativePromptPath = normalizePath(path8.relative(directory, workerPromptPath));
24240
23710
  const PREVIEW_MAX_LENGTH = 200;
24241
23711
  const workerPromptPreview = workerPrompt.length > PREVIEW_MAX_LENGTH ? workerPrompt.slice(0, PREVIEW_MAX_LENGTH) + "..." : workerPrompt;
24242
- const hiveBackgroundInstructions = `## Delegation Required
24243
-
24244
- Call the hive_background_task tool to spawn a Forager (Worker/Coder) worker.
24245
-
24246
- \`backgroundTaskCall\` contains the canonical tool arguments.
24247
-
24248
- - Add \`sync: true\` if you need the result in this session.
24249
- - Otherwise omit \`sync\`. Wait for the completion notification (no polling required). After the <system-reminder> arrives, call \`hive_background_output({ task_id: "<id>", block: false })\` once to fetch the final result.
24250
-
24251
- Troubleshooting: if you see "Unknown parameter: workdir", your hive_background_task tool is not Hive's provider. Ensure agent-hive loads after other background_* tool providers, then re-run hive_exec_start.`;
24252
23712
  const taskToolPrompt = `Follow instructions in @${relativePromptPath}`;
24253
23713
  const taskToolInstructions = `## Delegation Required
24254
23714
 
@@ -24264,39 +23724,22 @@ task({
24264
23724
 
24265
23725
  Use the \`@path\` attachment syntax in the prompt to reference the file. Do not inline the file contents.
24266
23726
 
24267
- Note: delegateMode is set to 'task' in agent_hive.json. To use Hive's background tools instead, set delegateMode to 'hive'.`;
24268
- const delegationInstructions = useHiveBackground ? hiveBackgroundInstructions : taskToolInstructions;
23727
+ `;
24269
23728
  const responseBase = {
24270
23729
  worktreePath: worktree.path,
24271
23730
  branch: worktree.branch,
24272
23731
  mode: "delegate",
24273
- delegateMode,
24274
23732
  agent,
24275
23733
  delegationRequired: true,
24276
23734
  workerPromptPath: relativePromptPath,
24277
23735
  workerPromptPreview,
24278
- ...!useHiveBackground && {
24279
- taskPromptMode: "opencode-at-file"
24280
- },
24281
- ...useHiveBackground && {
24282
- backgroundTaskCall: {
24283
- promptFile: workerPromptPath,
24284
- description: `Hive: ${task}`,
24285
- workdir: worktree.path,
24286
- idempotencyKey,
24287
- feature,
24288
- task,
24289
- attempt
24290
- }
23736
+ taskPromptMode: "opencode-at-file",
23737
+ taskToolCall: {
23738
+ subagent_type: agent,
23739
+ description: `Hive: ${task}`,
23740
+ prompt: taskToolPrompt
24291
23741
  },
24292
- ...!useHiveBackground && {
24293
- taskToolCall: {
24294
- subagent_type: agent,
24295
- description: `Hive: ${task}`,
24296
- prompt: taskToolPrompt
24297
- }
24298
- },
24299
- instructions: delegationInstructions
23742
+ instructions: taskToolInstructions
24300
23743
  };
24301
23744
  const jsonPayload = JSON.stringify(responseBase, null, 2);
24302
23745
  const payloadMeta = calculatePayloadMeta({
@@ -24330,7 +23773,7 @@ Note: delegateMode is set to 'task' in agent_hive.json. To use Hive's background
24330
23773
  }, null, 2);
24331
23774
  }
24332
23775
  }),
24333
- hive_exec_complete: tool({
23776
+ hive_worktree_commit: tool({
24334
23777
  description: "Complete task: commit changes to branch, write report. Supports blocked/failed/partial status for worker communication.",
24335
23778
  args: {
24336
23779
  task: tool.schema.string().describe("Task folder name"),
@@ -24383,7 +23826,7 @@ Re-run with updated summary showing verification results.`;
24383
23826
  summary,
24384
23827
  blocker,
24385
23828
  worktreePath: worktree2?.path,
24386
- message: 'Task blocked. Hive Master will ask user and resume with hive_exec_start(continueFrom: "blocked", decision: answer)'
23829
+ message: 'Task blocked. Hive Master will ask user and resume with hive_worktree_create(continueFrom: "blocked", decision: answer)'
24387
23830
  }, null, 2);
24388
23831
  }
24389
23832
  const commitResult = await worktreeService.commitChanges(feature, task, `hive(${task}): ${summary.slice(0, 50)}`);
@@ -24425,7 +23868,7 @@ Re-run with updated summary showing verification results.`;
24425
23868
  Use hive_merge to integrate changes. Worktree preserved at ${worktree?.path || "unknown"}.`;
24426
23869
  }
24427
23870
  }),
24428
- hive_exec_abort: tool({
23871
+ hive_worktree_discard: tool({
24429
23872
  description: "Abort task: discard changes, reset status",
24430
23873
  args: {
24431
23874
  task: tool.schema.string().describe("Task folder name"),
@@ -24440,102 +23883,6 @@ Use hive_merge to integrate changes. Worktree preserved at ${worktree?.path || "
24440
23883
  return `Task "${task}" aborted. Status reset to pending.`;
24441
23884
  }
24442
23885
  }),
24443
- hive_worker_status: tool({
24444
- description: "Check status of delegated workers. Shows running workers, blockers, and progress.",
24445
- args: {
24446
- task: tool.schema.string().optional().describe("Specific task to check, or omit for all"),
24447
- feature: tool.schema.string().optional().describe("Feature name (defaults to active)")
24448
- },
24449
- async execute({ task: specificTask, feature: explicitFeature }) {
24450
- const feature = resolveFeature(explicitFeature);
24451
- if (!feature)
24452
- return "Error: No feature specified. Create a feature or provide feature param.";
24453
- const STUCK_THRESHOLD = 10 * 60 * 1000;
24454
- const HEARTBEAT_STALE_THRESHOLD = 5 * 60 * 1000;
24455
- const now = Date.now();
24456
- const PREVIEW_MAX_LENGTH = 200;
24457
- const tasks = taskService.list(feature);
24458
- const inProgressTasks = tasks.filter((t) => (t.status === "in_progress" || t.status === "blocked") && (!specificTask || t.folder === specificTask));
24459
- if (inProgressTasks.length === 0) {
24460
- return specificTask ? `No active worker for task "${specificTask}"` : "No active workers.";
24461
- }
24462
- const workers = await Promise.all(inProgressTasks.map(async (t) => {
24463
- const worktree = await worktreeService.get(feature, t.folder);
24464
- const rawStatus = taskService.getRawStatus(feature, t.folder);
24465
- const workerSession = rawStatus?.workerSession;
24466
- const backgroundRecord = backgroundManager.getTaskByHiveTask(feature, t.folder);
24467
- const observation = backgroundRecord ? backgroundManager.getTaskObservation(backgroundRecord.taskId) : null;
24468
- let maybeStuck = false;
24469
- let lastActivityAt = null;
24470
- if (observation?.lastActivityAt) {
24471
- lastActivityAt = new Date(observation.lastActivityAt).getTime();
24472
- } else if (workerSession?.lastHeartbeatAt) {
24473
- lastActivityAt = new Date(workerSession.lastHeartbeatAt).getTime();
24474
- const heartbeatStale = now - lastActivityAt > HEARTBEAT_STALE_THRESHOLD;
24475
- const noRecentMessages = !workerSession.messageCount || workerSession.messageCount === 0;
24476
- maybeStuck = heartbeatStale && t.status === "in_progress";
24477
- } else if (rawStatus?.startedAt) {
24478
- lastActivityAt = new Date(rawStatus.startedAt).getTime();
24479
- maybeStuck = now - lastActivityAt > STUCK_THRESHOLD && t.status === "in_progress";
24480
- }
24481
- if (typeof observation?.maybeStuck === "boolean") {
24482
- maybeStuck = observation.maybeStuck;
24483
- }
24484
- const startedAtIso = backgroundRecord?.startedAt || rawStatus?.startedAt || null;
24485
- const startedAtMs = startedAtIso ? new Date(startedAtIso).getTime() : null;
24486
- const elapsedMs = startedAtMs ? Math.max(0, now - startedAtMs) : 0;
24487
- const lastActivityIso = observation?.lastActivityAt || workerSession?.lastHeartbeatAt || rawStatus?.startedAt || null;
24488
- const lastActivityAgo = lastActivityIso ? formatRelativeTime(lastActivityIso) : "never";
24489
- const messageCount = observation?.messageCount ?? backgroundRecord?.progress?.messageCount ?? workerSession?.messageCount ?? 0;
24490
- const lastMessagePreview = backgroundRecord?.progress?.lastMessage ? backgroundRecord.progress.lastMessage.slice(0, PREVIEW_MAX_LENGTH) : null;
24491
- return {
24492
- task: t.folder,
24493
- name: t.name,
24494
- status: t.status,
24495
- taskId: backgroundRecord?.taskId || workerSession?.taskId || null,
24496
- description: backgroundRecord?.description || null,
24497
- startedAt: startedAtIso,
24498
- workerSession: workerSession || null,
24499
- sessionId: workerSession?.sessionId || null,
24500
- agent: workerSession?.agent || "inline",
24501
- mode: workerSession?.mode || "inline",
24502
- attempt: workerSession?.attempt || 1,
24503
- messageCount: workerSession?.messageCount || 0,
24504
- lastHeartbeatAt: workerSession?.lastHeartbeatAt || null,
24505
- workerId: workerSession?.workerId || null,
24506
- worktreePath: worktree?.path || null,
24507
- branch: worktree?.branch || null,
24508
- maybeStuck,
24509
- activity: {
24510
- elapsedMs,
24511
- elapsedFormatted: formatElapsed(elapsedMs),
24512
- messageCount,
24513
- lastActivityAgo,
24514
- lastMessagePreview,
24515
- maybeStuck
24516
- },
24517
- blocker: rawStatus?.blocker || null,
24518
- summary: t.summary || null
24519
- };
24520
- }));
24521
- const stuckWorkers = workers.filter((worker) => worker.activity?.maybeStuck).length;
24522
- const hint = workers.some((w) => w.status === "blocked") ? 'Use hive_exec_start(task, continueFrom: "blocked", decision: answer) to resume blocked workers' : workers.some((w) => w.maybeStuck) ? "Some workers may be stuck. Use hive_background_output({ task_id }) to check output, or abort with hive_exec_abort." : "Workers in progress. Wait for the completion notification (no polling required). Use hive_worker_status for spot checks; use hive_background_output only if interim output is explicitly needed.";
24523
- const guidance = stuckWorkers > 0 ? `
24524
-
24525
- ⚠️ ${stuckWorkers} worker(s) may be stuck (no activity for 10+ minutes). Consider cancelling or investigating.` : "";
24526
- return JSON.stringify({
24527
- feature,
24528
- delegateMode,
24529
- omoSlimEnabled: isOmoSlimEnabled(),
24530
- backgroundTaskProvider: useHiveBackground ? "hive" : "task",
24531
- workers,
24532
- summary: {
24533
- stuckWorkers
24534
- },
24535
- hint: hint + guidance
24536
- }, null, 2);
24537
- }
24538
- }),
24539
23886
  hive_merge: tool({
24540
23887
  description: "Merge completed task branch into current branch (explicit integration)",
24541
23888
  args: {
@@ -24551,7 +23898,7 @@ Use hive_merge to integrate changes. Worktree preserved at ${worktree?.path || "
24551
23898
  if (!taskInfo)
24552
23899
  return `Error: Task "${task}" not found`;
24553
23900
  if (taskInfo.status !== "done")
24554
- return "Error: Task must be completed before merging. Use hive_exec_complete first.";
23901
+ return "Error: Task must be completed before merging. Use hive_worktree_commit first.";
24555
23902
  const result = await worktreeService.merge(feature, task, strategy);
24556
23903
  if (!result.success) {
24557
23904
  if (result.conflicts && result.conflicts.length > 0) {
@@ -24568,27 +23915,6 @@ Commit: ${result.sha}
24568
23915
  Files changed: ${result.filesChanged?.length || 0}`;
24569
23916
  }
24570
23917
  }),
24571
- hive_worktree_list: tool({
24572
- description: "List all worktrees for current feature",
24573
- args: {
24574
- feature: tool.schema.string().optional().describe("Feature name (defaults to active)")
24575
- },
24576
- async execute({ feature: explicitFeature }) {
24577
- const feature = resolveFeature(explicitFeature);
24578
- if (!feature)
24579
- return "Error: No feature specified. Create a feature or provide feature param.";
24580
- const worktrees = await worktreeService.list(feature);
24581
- if (worktrees.length === 0)
24582
- return "No worktrees found for this feature.";
24583
- const lines = ["| Task | Branch | Has Changes |", "|------|--------|-------------|"];
24584
- for (const wt of worktrees) {
24585
- const hasChanges = await worktreeService.hasUncommittedChanges(wt.feature, wt.step);
24586
- lines.push(`| ${wt.step} | ${wt.branch} | ${hasChanges ? "Yes" : "No"} |`);
24587
- }
24588
- return lines.join(`
24589
- `);
24590
- }
24591
- }),
24592
23918
  hive_context_write: tool({
24593
23919
  description: "Write a context file for the feature. Context files store persistent notes, decisions, and reference material.",
24594
23920
  args: {
@@ -24630,16 +23956,22 @@ Files changed: ${result.filesChanged?.length || 0}`;
24630
23956
  const plan = planService.read(feature);
24631
23957
  const tasks = taskService.list(feature);
24632
23958
  const contextFiles = contextService.list(feature);
24633
- const tasksSummary = tasks.map((t) => {
23959
+ const tasksSummary = await Promise.all(tasks.map(async (t) => {
24634
23960
  const rawStatus = taskService.getRawStatus(feature, t.folder);
23961
+ const worktree = await worktreeService.get(feature, t.folder);
23962
+ const hasChanges = worktree ? await worktreeService.hasUncommittedChanges(worktree.feature, worktree.step) : null;
24635
23963
  return {
24636
23964
  folder: t.folder,
24637
23965
  name: t.name,
24638
23966
  status: t.status,
24639
23967
  origin: t.origin || "plan",
24640
- dependsOn: rawStatus?.dependsOn ?? null
23968
+ dependsOn: rawStatus?.dependsOn ?? null,
23969
+ worktree: worktree ? {
23970
+ branch: worktree.branch,
23971
+ hasChanges
23972
+ } : null
24641
23973
  };
24642
- });
23974
+ }));
24643
23975
  const contextSummary = contextFiles.map((c) => ({
24644
23976
  name: c.name,
24645
23977
  chars: c.content.length,
@@ -24677,7 +24009,7 @@ Files changed: ${result.filesChanged?.length || 0}`;
24677
24009
  return `${runnableTasks.length} tasks are ready to start in parallel: ${runnableTasks.join(", ")}`;
24678
24010
  }
24679
24011
  if (runnableTasks.length === 1) {
24680
- return `Start next task with hive_exec_start: ${runnableTasks[0]}`;
24012
+ return `Start next task with hive_worktree_create: ${runnableTasks[0]}`;
24681
24013
  }
24682
24014
  const pending = tasks2.find((t) => t.status === "pending");
24683
24015
  if (pending) {
@@ -24715,79 +24047,45 @@ Files changed: ${result.filesChanged?.length || 0}`;
24715
24047
  });
24716
24048
  }
24717
24049
  }),
24718
- hive_request_review: tool({
24719
- description: "Request human review of completed task. BLOCKS until human approves or requests changes. Call after completing work, before merging.",
24050
+ hive_agents_md: tool({
24051
+ description: "Initialize or sync AGENTS.md. init: scan codebase and generate (preview only). sync: propose updates from feature contexts. apply: write approved content to disk.",
24720
24052
  args: {
24721
- task: tool.schema.string().describe("Task folder name"),
24722
- summary: tool.schema.string().describe("Summary of what you did for human to review"),
24723
- feature: tool.schema.string().optional().describe("Feature name (defaults to active)")
24053
+ action: tool.schema.enum(["init", "sync", "apply"]).describe("Action to perform"),
24054
+ feature: tool.schema.string().optional().describe("Feature name for sync action"),
24055
+ content: tool.schema.string().optional().describe("Content to write (required for apply action)")
24724
24056
  },
24725
- async execute({ task, summary, feature: explicitFeature }) {
24726
- const feature = resolveFeature(explicitFeature);
24727
- if (!feature)
24728
- return "Error: No feature specified.";
24729
- const taskDir = path7.join(directory, ".hive", "features", feature, "tasks", task);
24730
- if (!fs9.existsSync(taskDir)) {
24731
- return `Error: Task '${task}' not found in feature '${feature}'`;
24732
- }
24733
- const reportPath = path7.join(taskDir, "report.md");
24734
- const existingReport = fs9.existsSync(reportPath) ? fs9.readFileSync(reportPath, "utf-8") : `# Task Report
24735
- `;
24736
- const attemptCount = (existingReport.match(/## Attempt \d+/g) || []).length + 1;
24737
- const timestamp = new Date().toISOString();
24738
- const newContent = existingReport + `
24739
- ## Attempt ${attemptCount}
24740
-
24741
- **Requested**: ${timestamp}
24742
-
24743
- ### Summary
24744
-
24745
- ${summary}
24746
-
24747
- `;
24748
- fs9.writeFileSync(reportPath, newContent);
24749
- const pendingPath = path7.join(taskDir, "PENDING_REVIEW");
24750
- fs9.writeFileSync(pendingPath, JSON.stringify({
24751
- attempt: attemptCount,
24752
- requestedAt: timestamp,
24753
- summary: summary.substring(0, 200) + (summary.length > 200 ? "..." : "")
24754
- }, null, 2));
24755
- const pollInterval = 2000;
24756
- const maxWait = 30 * 60 * 1000;
24757
- const startTime = Date.now();
24758
- while (fs9.existsSync(pendingPath)) {
24759
- if (Date.now() - startTime > maxWait) {
24760
- return "Review timed out after 30 minutes. Human did not respond.";
24057
+ async execute({ action, feature, content }) {
24058
+ if (action === "init") {
24059
+ const result = await agentsMdService.init();
24060
+ if (result.existed) {
24061
+ return `AGENTS.md already exists (${result.content.length} chars). Use 'sync' to propose updates.`;
24761
24062
  }
24762
- await new Promise((resolve2) => setTimeout(resolve2, pollInterval));
24763
- }
24764
- const resultPath = path7.join(taskDir, "REVIEW_RESULT");
24765
- if (!fs9.existsSync(resultPath)) {
24766
- return "Review cancelled (PENDING_REVIEW removed but no REVIEW_RESULT).";
24767
- }
24768
- const result = fs9.readFileSync(resultPath, "utf-8").trim();
24769
- fs9.appendFileSync(reportPath, `### Review Result
24770
-
24771
- ${result}
24772
-
24773
- ---
24774
-
24775
- `);
24776
- if (result.toUpperCase() === "APPROVED") {
24777
- return `✅ APPROVED
24778
-
24779
- Your work has been approved. You may now merge:
24063
+ return `Generated AGENTS.md from codebase scan (${result.content.length} chars):
24780
24064
 
24781
- hive_merge(task="${task}")
24065
+ ${result.content}
24782
24066
 
24783
- After merging, proceed to the next task.`;
24784
- } else {
24785
- return `\uD83D\uDD04 Changes Requested
24067
+ ⚠️ This has NOT been written to disk. Ask the user via question() whether to write it to AGENTS.md.`;
24068
+ }
24069
+ if (action === "sync") {
24070
+ if (!feature)
24071
+ return "Error: feature name required for sync action";
24072
+ const result = await agentsMdService.sync(feature);
24073
+ if (result.proposals.length === 0) {
24074
+ return "No new findings to sync to AGENTS.md.";
24075
+ }
24076
+ return `Proposed AGENTS.md updates from feature "${feature}":
24786
24077
 
24787
- ${result}
24078
+ ${result.diff}
24788
24079
 
24789
- Make the requested changes, then call hive_request_review again.`;
24080
+ ⚠️ These changes have NOT been applied. Ask the user via question() whether to apply them.`;
24081
+ }
24082
+ if (action === "apply") {
24083
+ if (!content)
24084
+ return "Error: content required for apply action. Use init or sync first to get content, then apply with the approved content.";
24085
+ const result = agentsMdService.apply(content);
24086
+ return `AGENTS.md ${result.isNew ? "created" : "updated"} (${result.chars} chars) at ${result.path}`;
24790
24087
  }
24088
+ return "Error: unknown action";
24791
24089
  }
24792
24090
  })
24793
24091
  },
@@ -24802,27 +24100,13 @@ Make the requested changes, then call hive_request_review again.`;
24802
24100
  }
24803
24101
  }
24804
24102
  },
24805
- event: async ({ event }) => {
24806
- if (event.type === "session.idle") {
24807
- const sessionId = event.properties.sessionID;
24808
- if (sessionId) {
24809
- backgroundManager.handleSessionIdle(sessionId);
24810
- }
24811
- }
24812
- if (event.type === "message.updated") {
24813
- const info = event.properties.info;
24814
- const sessionId = info?.sessionID;
24815
- if (sessionId) {
24816
- backgroundManager.handleMessageEvent(sessionId);
24817
- }
24818
- }
24819
- },
24820
24103
  config: async (opencodeConfig) => {
24821
24104
  configService.init();
24822
24105
  const hiveUserConfig = configService.getAgentConfig("hive-master");
24823
24106
  const hiveAutoLoadedSkills = await buildAutoLoadedSkillsContent("hive-master", configService, directory);
24824
24107
  const hiveConfig = {
24825
24108
  model: hiveUserConfig.model,
24109
+ variant: hiveUserConfig.variant,
24826
24110
  temperature: hiveUserConfig.temperature ?? 0.5,
24827
24111
  description: "Hive (Hybrid) - Plans + orchestrates. Detects phase, loads skills on-demand.",
24828
24112
  prompt: QUEEN_BEE_PROMPT + hiveAutoLoadedSkills,
@@ -24830,16 +24114,14 @@ Make the requested changes, then call hive_request_review again.`;
24830
24114
  question: "allow",
24831
24115
  skill: "allow",
24832
24116
  todowrite: "allow",
24833
- todoread: "allow",
24834
- hive_background_task: "allow",
24835
- hive_background_output: "allow",
24836
- hive_background_cancel: "allow"
24117
+ todoread: "allow"
24837
24118
  }
24838
24119
  };
24839
24120
  const architectUserConfig = configService.getAgentConfig("architect-planner");
24840
24121
  const architectAutoLoadedSkills = await buildAutoLoadedSkillsContent("architect-planner", configService, directory);
24841
24122
  const architectConfig = {
24842
24123
  model: architectUserConfig.model,
24124
+ variant: architectUserConfig.variant,
24843
24125
  temperature: architectUserConfig.temperature ?? 0.7,
24844
24126
  description: "Architect (Planner) - Plans features, interviews, writes plans. NEVER executes.",
24845
24127
  prompt: ARCHITECT_BEE_PROMPT + architectAutoLoadedSkills,
@@ -24850,16 +24132,14 @@ Make the requested changes, then call hive_request_review again.`;
24850
24132
  skill: "allow",
24851
24133
  todowrite: "allow",
24852
24134
  todoread: "allow",
24853
- webfetch: "allow",
24854
- hive_background_task: "allow",
24855
- hive_background_output: "allow",
24856
- hive_background_cancel: "allow"
24135
+ webfetch: "allow"
24857
24136
  }
24858
24137
  };
24859
24138
  const swarmUserConfig = configService.getAgentConfig("swarm-orchestrator");
24860
24139
  const swarmAutoLoadedSkills = await buildAutoLoadedSkillsContent("swarm-orchestrator", configService, directory);
24861
24140
  const swarmConfig = {
24862
24141
  model: swarmUserConfig.model,
24142
+ variant: swarmUserConfig.variant,
24863
24143
  temperature: swarmUserConfig.temperature ?? 0.5,
24864
24144
  description: "Swarm (Orchestrator) - Orchestrates execution. Delegates, spawns workers, verifies, merges.",
24865
24145
  prompt: SWARM_BEE_PROMPT + swarmAutoLoadedSkills,
@@ -24867,25 +24147,20 @@ Make the requested changes, then call hive_request_review again.`;
24867
24147
  question: "allow",
24868
24148
  skill: "allow",
24869
24149
  todowrite: "allow",
24870
- todoread: "allow",
24871
- hive_background_task: "allow",
24872
- hive_background_output: "allow",
24873
- hive_background_cancel: "allow"
24150
+ todoread: "allow"
24874
24151
  }
24875
24152
  };
24876
24153
  const scoutUserConfig = configService.getAgentConfig("scout-researcher");
24877
24154
  const scoutAutoLoadedSkills = await buildAutoLoadedSkillsContent("scout-researcher", configService, directory);
24878
24155
  const scoutConfig = {
24879
24156
  model: scoutUserConfig.model,
24157
+ variant: scoutUserConfig.variant,
24880
24158
  temperature: scoutUserConfig.temperature ?? 0.5,
24881
24159
  mode: "subagent",
24882
24160
  description: "Scout (Explorer/Researcher/Retrieval) - Researches codebase + external docs/data.",
24883
24161
  prompt: SCOUT_BEE_PROMPT + scoutAutoLoadedSkills,
24884
24162
  permission: {
24885
24163
  edit: "deny",
24886
- hive_background_task: "deny",
24887
- hive_background_output: "deny",
24888
- hive_background_cancel: "deny",
24889
24164
  task: "deny",
24890
24165
  delegate: "deny",
24891
24166
  skill: "allow",
@@ -24896,14 +24171,12 @@ Make the requested changes, then call hive_request_review again.`;
24896
24171
  const foragerAutoLoadedSkills = await buildAutoLoadedSkillsContent("forager-worker", configService, directory);
24897
24172
  const foragerConfig = {
24898
24173
  model: foragerUserConfig.model,
24174
+ variant: foragerUserConfig.variant,
24899
24175
  temperature: foragerUserConfig.temperature ?? 0.3,
24900
24176
  mode: "subagent",
24901
24177
  description: "Forager (Worker/Coder) - Executes tasks directly in isolated worktrees. Never delegates.",
24902
24178
  prompt: FORAGER_BEE_PROMPT + foragerAutoLoadedSkills,
24903
24179
  permission: {
24904
- hive_background_task: "deny",
24905
- hive_background_output: "deny",
24906
- hive_background_cancel: "deny",
24907
24180
  task: "deny",
24908
24181
  delegate: "deny",
24909
24182
  skill: "allow"
@@ -24913,15 +24186,13 @@ Make the requested changes, then call hive_request_review again.`;
24913
24186
  const hygienicAutoLoadedSkills = await buildAutoLoadedSkillsContent("hygienic-reviewer", configService, directory);
24914
24187
  const hygienicConfig = {
24915
24188
  model: hygienicUserConfig.model,
24189
+ variant: hygienicUserConfig.variant,
24916
24190
  temperature: hygienicUserConfig.temperature ?? 0.3,
24917
24191
  mode: "subagent",
24918
24192
  description: "Hygienic (Consultant/Reviewer/Debugger) - Reviews plan documentation quality. OKAY/REJECT verdict.",
24919
24193
  prompt: HYGIENIC_BEE_PROMPT + hygienicAutoLoadedSkills,
24920
24194
  permission: {
24921
24195
  edit: "deny",
24922
- hive_background_task: "deny",
24923
- hive_background_output: "deny",
24924
- hive_background_cancel: "deny",
24925
24196
  task: "deny",
24926
24197
  delegate: "deny",
24927
24198
  skill: "allow"