@exaudeus/workrail 3.33.0 → 3.34.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli-worktrain.js +167 -8
- package/dist/console-ui/assets/{index-BuJFLLfY.js → index-BVU9OSOb.js} +1 -1
- package/dist/console-ui/index.html +1 -1
- package/dist/daemon/agent-loop.d.ts +1 -0
- package/dist/daemon/agent-loop.js +1 -1
- package/dist/daemon/daemon-events.d.ts +17 -1
- package/dist/daemon/workflow-runner.d.ts +1 -1
- package/dist/daemon/workflow-runner.js +96 -21
- package/dist/manifest.json +45 -69
- package/dist/mcp/handlers/v2-error-mapping.d.ts +3 -0
- package/dist/mcp/handlers/v2-error-mapping.js +2 -0
- package/dist/mcp/handlers/v2-execution/advance.js +25 -0
- package/dist/mcp/handlers/v2-execution/continue-advance.js +7 -0
- package/dist/mcp/transports/http-entry.js +0 -7
- package/dist/mcp/transports/stdio-entry.js +0 -8
- package/dist/mcp-server.d.ts +0 -2
- package/dist/mcp-server.js +1 -42
- package/dist/trigger/polled-event-store.js +8 -6
- package/dist/v2/durable-core/domain/observation-builder.d.ts +3 -0
- package/dist/v2/durable-core/domain/observation-builder.js +2 -2
- package/dist/v2/durable-core/domain/prompt-renderer.d.ts +2 -1
- package/dist/v2/durable-core/domain/prompt-renderer.js +10 -0
- package/dist/v2/usecases/console-service.js +65 -14
- package/dist/v2/usecases/console-types.d.ts +1 -0
- package/docs/design/bridge-removal-pr-a-candidates.md +115 -0
- package/docs/design/bridge-removal-pr-a-design-review.md +79 -0
- package/docs/design/bridge-removal-pr-a-implementation-plan.md +203 -0
- package/docs/discovery/design-candidates.md +180 -0
- package/docs/discovery/design-review-findings.md +110 -0
- package/docs/discovery/wr-discovery-goal-reframing.md +303 -0
- package/docs/ideas/backlog.md +361 -0
- package/package.json +1 -1
- package/workflows/wr.discovery.json +58 -7
- package/dist/mcp/transports/bridge-entry.d.ts +0 -102
- package/dist/mcp/transports/bridge-entry.js +0 -454
- package/dist/mcp/transports/bridge-events.d.ts +0 -55
- package/dist/mcp/transports/bridge-events.js +0 -24
- package/dist/mcp/transports/primary-tombstone.d.ts +0 -21
- package/dist/mcp/transports/primary-tombstone.js +0 -51
package/docs/ideas/backlog.md
CHANGED
|
@@ -4390,3 +4390,364 @@ The existing `DaemonEventEmitter` (written in #498) writes to a separate daily l
|
|
|
4390
4390
|
### FatalToolError: distinguish recoverable from non-recoverable tool failures (follow-up from PR #523)
|
|
4391
4391
|
The blanket try/catch in AgentLoop._executeTools() converts ALL tool throws to isError tool_results. This is correct for Bash/Read/Write (LLM can see and retry), but potentially wrong for continue_workflow failures (LLM retrying with a broken token loops). The discovery agent proposed a FatalToolError subclass: tools throw FatalToolError for non-recoverable errors (session corruption, bad tokens), plain Error for recoverable failures. _executeTools catches plain Error and returns isError; FatalToolError propagates and kills the session. Combined with the DEFAULT_MAX_TURNS cap (PR followup), this provides defense-in-depth.
|
|
4392
4392
|
5. Deprecate `DaemonEventEmitter` once console reads from session events
|
|
4393
|
+
|
|
4394
|
+
---
|
|
4395
|
+
|
|
4396
|
+
### Worktree lifecycle management: automatic cleanup and inventory (Apr 18, 2026)
|
|
4397
|
+
|
|
4398
|
+
**The problem:** every WorkTrain agent that uses `--isolation worktree` leaves a worktree on disk after completion. With 10 concurrent agents running all day, this accumulated to 69 worktrees in `.claude/worktrees/`, triggering hundreds of simultaneous `git status` processes that saturated the CPU.
|
|
4399
|
+
|
|
4400
|
+
**What's needed:**
|
|
4401
|
+
|
|
4402
|
+
1. **Automatic cleanup on session end** -- when a WorkTrain session completes (success or failure), the daemon automatically runs `git worktree remove <path> --force` for the session's worktree. If the branch is already merged to main, also delete the local branch ref.
|
|
4403
|
+
|
|
4404
|
+
2. **Startup pruning** -- `workrail daemon` startup runs `git worktree prune` in each configured workspace before starting the trigger listener.
|
|
4405
|
+
|
|
4406
|
+
3. **`worktrain worktree list`** -- shows all WorkTrain-managed worktrees: path, branch, session ID, age, whether the branch is merged.
|
|
4407
|
+
|
|
4408
|
+
4. **`worktrain worktree clean`** -- removes all worktrees whose branches are merged to main, or older than N days. Dry-run mode by default.
|
|
4409
|
+
|
|
4410
|
+
5. **`worktrain worktree status`** -- summary: how many worktrees, total disk usage, any stale ones.
|
|
4411
|
+
|
|
4412
|
+
6. **Never use main as a worktree** (already in backlog) -- enforced at worktree creation time, not just as a rule.
|
|
4413
|
+
|
|
4414
|
+
**Root cause of the CPU spike:** 69 worktrees × repeated `git status --short` from tools/IDE plugins = hundreds of concurrent git processes. Each `git status` on a large repo with many untracked files is CPU-intensive.
|
|
4415
|
+
|
|
4416
|
+
**Mitigation already in place:** `--isolation worktree` creates branches named `worktree-agent-<id>` -- these are identifiable and bulk-deletable. The daemon's `runStartupRecovery()` could also prune them.
|
|
4417
|
+
|
|
4418
|
+
**Build order:** startup pruning (trivial, high value) → automatic cleanup on session end → `worktrain worktree` CLI commands.
|
|
4419
|
+
|
|
4420
|
+
---
|
|
4421
|
+
|
|
4422
|
+
### Simplify MCP server: remove primary election, bridge, and HTTP serving (architectural cleanup)
|
|
4423
|
+
|
|
4424
|
+
**The core insight:** the bridge/primary-election system exists solely to solve "only one process should serve the console UI on port 3456." Now that `worktrain console` is a standalone file-watching binary (PR #512), that problem is already solved. The entire bridge/election system can be removed.
|
|
4425
|
+
|
|
4426
|
+
**What "allow multiple MCP processes" means in practice:**
|
|
4427
|
+
- Each Claude Code window gets its own MCP server -- no port contention, no primary election, no bridge reconnect cycles
|
|
4428
|
+
- MCP server becomes pure stdio: starts, handles tools, exits. Nothing async needs to write after the pipe closes -- EPIPE is irrelevant.
|
|
4429
|
+
- Session store is append-only JSONL per-session -- multiple processes writing different sessions cannot corrupt each other
|
|
4430
|
+
- `worktrain console` aggregates all sessions from the file store regardless of how many MCP servers ran
|
|
4431
|
+
|
|
4432
|
+
**What to remove:**
|
|
4433
|
+
- `DashboardLock` / `tryBecomePrimary()` / `bindWithPortFallback()` -- the entire primary election system
|
|
4434
|
+
- `bridge-entry.ts` -- the bridge, spawn storm, and reconnect drama are gone
|
|
4435
|
+
- `HttpServer` starting as part of the MCP server -- console owns HTTP, not MCP
|
|
4436
|
+
|
|
4437
|
+
**What remains for the MCP server:** pure stdio MCP protocol + session engine. No HTTP, no port binding, no lock files. Starts instantly, exits cleanly.
|
|
4438
|
+
|
|
4439
|
+
**Why this is safe:**
|
|
4440
|
+
- Tokens are session-scoped UUIDs -- two servers cannot share a session
|
|
4441
|
+
- Append-only JSONL has no exclusive file locks
|
|
4442
|
+
- ~50MB per process × 3 Claude Code windows = 150MB -- acceptable
|
|
4443
|
+
|
|
4444
|
+
**The bridge complexity was always a band-aid.** It was the right solution when the MCP server also owned the console UI. With the standalone console, the band-aid can come off and the system becomes dramatically simpler and more reliable.
|
|
4445
|
+
|
|
4446
|
+
**Build order:** extract `worktrain console` fully (done) → remove HttpServer from MCP startup → remove bridge → remove DashboardLock/primary election → MCP server is pure stdio.
|
|
4447
|
+
|
|
4448
|
+
---
|
|
4449
|
+
|
|
4450
|
+
### Agent-engine communication: first principles design (Apr 18, 2026)
|
|
4451
|
+
|
|
4452
|
+
**The setup for this conversation:**
|
|
4453
|
+
|
|
4454
|
+
Three discovery agents investigated whether the daemon should continue using MCP-style tool calls for workflow control (`continue_workflow`). Their findings:
|
|
4455
|
+
|
|
4456
|
+
- **Discovery 1**: Tool calls are fine; enrich `continue_workflow` with `artifacts` now, explore structured output hybrid later pending Bedrock verification. ~225 tokens/request saved with hybrid.
|
|
4457
|
+
- **Discovery 2**: `complete_step` tool -- daemon owns transitions, continueToken hidden from LLM, notes required at type level. Cleaner DX without paradigm shift.
|
|
4458
|
+
- **Discovery 3**: The field has converged on tool calls. OpenAI Agents SDK, LangGraph, Temporal, Vercel AI SDK all use tool calls for workflow control. WorkRail's `continue_workflow` with HMAC tokens is already field-standard or better.
|
|
4459
|
+
|
|
4460
|
+
**User's response to "the field has converged on tool calls":**
|
|
4461
|
+
|
|
4462
|
+
> "Right, but do we want industry standards? Aren't we trying to build something special? What if there is better?"
|
|
4463
|
+
|
|
4464
|
+
This is the right question. "Field convergence" is a description of where everyone ended up starting from the MCP/function-calling paradigm -- not proof that it's optimal. Every system surveyed treats the workflow engine as external infrastructure the agent calls into. WorkRail is different: **the daemon IS the workflow engine**. The agent loop and the step sequencer run in the same process, sharing the same DI container. Tool calls are a network-origin concept -- they exist because there's an LLM over there and an executor over here. WorkRail doesn't have that constraint.
|
|
4465
|
+
|
|
4466
|
+
---
|
|
4467
|
+
|
|
4468
|
+
#### First-principles alternatives (unexplored territory)
|
|
4469
|
+
|
|
4470
|
+
These were not in any of the discovery agents' outputs -- they emerge from the insight that WorkRail owns both sides of the conversation:
|
|
4471
|
+
|
|
4472
|
+
**1. Structured response parsing (no tool call for workflow control)**
|
|
4473
|
+
The agent outputs a structured response at the end of each turn. The daemon parses it. The LLM never "calls a tool" to advance -- it produces a well-structured output and the daemon acts on it. The continueToken and workflow machinery are completely invisible to the LLM. Example: agent outputs `{"step_complete": true, "notes": "...", "artifacts": [...]}` as its final text, daemon detects this and advances.
|
|
4474
|
+
|
|
4475
|
+
**2. Implicit advancement (criteria-based)**
|
|
4476
|
+
The daemon watches what the agent produces (file writes, bash outcomes, notes) and decides when to advance -- the agent never explicitly signals "I'm done." The workflow step has completion criteria, and the daemon evaluates them against the agent's cumulative output. More like a CI pipeline (tests pass = done) than an API call. The agent just works; the daemon decides when the step is complete.
|
|
4477
|
+
|
|
4478
|
+
**3. Declarative intent + daemon execution**
|
|
4479
|
+
The agent outputs what it *wants* to happen: "I want to commit these files with this message and advance to the next step." The daemon executes. Same as the scripts-over-agent principle applied to the agent's own workflow control -- the agent declares intent, scripts execute. No tool call for the mechanical parts.
|
|
4480
|
+
|
|
4481
|
+
**4. Streaming judgment**
|
|
4482
|
+
The daemon reads the agent's streaming response in real-time, extracts notes and artifacts as they appear, and makes the advance decision before the agent "finishes." No explicit signal from the agent. The daemon monitors and decides.
|
|
4483
|
+
|
|
4484
|
+
**5. Separation of concerns: tools for world, declaration for workflow**
|
|
4485
|
+
Keep tool calls for external actions (Bash, Read, Write) -- these genuinely need interleaved execution and result reasoning. But workflow control (advance, submit artifacts, set context) uses a different mechanism entirely: structured response, implicit detection, or a single lightweight declaration. The protocol distinction: tools are for I/O, declarations are for state.
|
|
4486
|
+
|
|
4487
|
+
---
|
|
4488
|
+
|
|
4489
|
+
#### What makes this hard
|
|
4490
|
+
|
|
4491
|
+
These alternatives trade off in important ways:
|
|
4492
|
+
- **Structured response parsing**: requires reliable structured output from the LLM, which can fail without explicit enforcement
|
|
4493
|
+
- **Implicit advancement**: requires the daemon to correctly evaluate completion criteria -- complex for open-ended steps
|
|
4494
|
+
- **Declarative intent**: still needs some kind of output format; essentially moves the "tool call" into the response text
|
|
4495
|
+
- **Streaming judgment**: hardest to implement correctly; requires the daemon to parse partial responses reliably
|
|
4496
|
+
|
|
4497
|
+
The current tool-call approach works precisely because it's explicit: the agent signals intent exactly once, the daemon acts on it. The alternatives are more elegant but less reliable.
|
|
4498
|
+
|
|
4499
|
+
---
|
|
4500
|
+
|
|
4501
|
+
#### What to actually investigate
|
|
4502
|
+
|
|
4503
|
+
Before committing to any alternative, these questions need answers:
|
|
4504
|
+
|
|
4505
|
+
1. **Does Bedrock support `response_format + tools` simultaneously?** A 10-line test call resolves this. If yes, hybrid structured output is immediately viable for workflow control.
|
|
4506
|
+
2. **What does implicit advancement actually look like for a coding task?** Write out the completion criteria for `coding-task-workflow-agentic` phase-0 (classify). Can a daemon reliably detect "Phase 0 is done" without an explicit signal?
|
|
4507
|
+
3. **What is the actual failure mode of structured response parsing?** How often does Claude 4.6 Sonnet fail to produce valid JSON when asked to end its turn with a structured summary? Under what conditions?
|
|
4508
|
+
4. **What did nexus-core do?** The backlog notes nexus-core as a more advanced system -- how does it handle agent-step transitions?
|
|
4509
|
+
|
|
4510
|
+
These are prototype questions, not design questions. Build the smallest possible test for each before committing to any direction.
|
|
4511
|
+
|
|
4512
|
+
---
|
|
4513
|
+
|
|
4514
|
+
### Bundled trigger templates: zero-config workflow automation via worktrain init (Apr 18, 2026)
|
|
4515
|
+
|
|
4516
|
+
**Problem:** Every user has to write their own triggers.yml manually. Wrong workflow IDs, missing required fields, wrong workspace paths -- all common mistakes (we hit all three today). There's no "just works" path to workflow automation.
|
|
4517
|
+
|
|
4518
|
+
**Solution:** Ship common trigger templates bundled with WorkTrain. `worktrain init` presents a menu and generates a pre-filled triggers.yml.
|
|
4519
|
+
|
|
4520
|
+
**Bundled templates to ship:**
|
|
4521
|
+
|
|
4522
|
+
```yaml
|
|
4523
|
+
# Template: mr-review
|
|
4524
|
+
- id: mr-review
|
|
4525
|
+
workflowId: mr-review-workflow-agentic
|
|
4526
|
+
goal: "Review the PR specified in the webhook payload goal field"
|
|
4527
|
+
concurrencyMode: parallel
|
|
4528
|
+
autoCommit: false
|
|
4529
|
+
agentConfig: { maxSessionMinutes: 30 }
|
|
4530
|
+
|
|
4531
|
+
# Template: coding-task
|
|
4532
|
+
- id: coding-task
|
|
4533
|
+
workflowId: coding-task-workflow-agentic
|
|
4534
|
+
concurrencyMode: parallel
|
|
4535
|
+
autoCommit: false
|
|
4536
|
+
agentConfig: { maxSessionMinutes: 60 }
|
|
4537
|
+
|
|
4538
|
+
# Template: discovery-task
|
|
4539
|
+
- id: discovery-task
|
|
4540
|
+
workflowId: wr.discovery
|
|
4541
|
+
concurrencyMode: parallel
|
|
4542
|
+
autoCommit: false
|
|
4543
|
+
agentConfig: { maxSessionMinutes: 60 }
|
|
4544
|
+
|
|
4545
|
+
# Template: bug-investigation
|
|
4546
|
+
- id: bug-investigation
|
|
4547
|
+
workflowId: bug-investigation.agentic.v2
|
|
4548
|
+
agentConfig: { maxSessionMinutes: 45 }
|
|
4549
|
+
|
|
4550
|
+
# Template: weekly-health-scan (cron, when native cron trigger ships)
|
|
4551
|
+
# - id: weekly-health-scan
|
|
4552
|
+
# type: cron
|
|
4553
|
+
# schedule: "0 9 * * 0"
|
|
4554
|
+
# workflowId: architecture-scalability-audit
|
|
4555
|
+
```
|
|
4556
|
+
|
|
4557
|
+
**`worktrain init` flow:**
|
|
4558
|
+
1. "Which workflows do you want to run automatically?" (checkbox menu)
|
|
4559
|
+
2. For each selected: set `workspacePath` to current directory (overridable)
|
|
4560
|
+
3. Generate `triggers.yml` in the workspace root
|
|
4561
|
+
4. Validate workflow IDs exist before writing (use the startup validator)
|
|
4562
|
+
5. Tell the user how to fire each trigger: `curl -X POST http://localhost:3200/webhook/<id> ...`
|
|
4563
|
+
|
|
4564
|
+
**Why this matters:** The difference between WorkTrain being usable by anyone vs only by engineers who read the source code. A new user should be able to go from `worktrain init` to their first automated workflow in under 5 minutes.
|
|
4565
|
+
|
|
4566
|
+
**Also needed:** `worktrain trigger add <template-name>` to add a single trigger to an existing triggers.yml without re-running init.
|
|
4567
|
+
|
|
4568
|
+
---
|
|
4569
|
+
|
|
4570
|
+
### Coordinator context injection standard: agents start informed, not discovering (Apr 18, 2026)
|
|
4571
|
+
|
|
4572
|
+
**The problem:** subagents spawned by a coordinator are completely blind. They know nothing of prior conversations, existing docs, the pipeline, or what's already been tried. The workflows compensate by spending 3-5 turns on "Phase 0: context gathering" every session -- expensive in tokens, time, and LLM turns -- just to get oriented before work starts.
|
|
4573
|
+
|
|
4574
|
+
**The root cause:** the coordinator spawns agents with task descriptions but not context. "Fix the Windows CI failures" is a task. "The Windows CI failures are in `workflow-runner-bash-tool.test.ts` because `node -e` isn't in PATH on Windows -- the fix is to use `process.execPath` instead of `node`, which is the established pattern in this codebase" is context. The difference is 0 discovery turns vs 5.
|
|
4575
|
+
|
|
4576
|
+
**The standard to establish:**
|
|
4577
|
+
|
|
4578
|
+
Every coordinator-spawned agent gets a pre-packaged context bundle. The coordinator assembles it before calling `worktrain spawn`. The bundle includes:
|
|
4579
|
+
|
|
4580
|
+
1. **Prior session findings** -- what relevant sessions discovered (from session store query)
|
|
4581
|
+
2. **Established patterns** -- the specific invariants and patterns the agent needs (from knowledge graph or AGENTS.md)
|
|
4582
|
+
3. **What NOT to discover** -- explicit list of things already known so the agent doesn't waste turns
|
|
4583
|
+
4. **Failure history** -- what's been tried and didn't work (prevents re-exploring dead ends)
|
|
4584
|
+
|
|
4585
|
+
**Format:** ~2000 tokens max, injected as a `<context>` block before the task description. Structured so the agent can skip Phase 0 context gathering entirely when the bundle is complete.
|
|
4586
|
+
|
|
4587
|
+
**Build order:**
|
|
4588
|
+
1. Write the standard as a prompt template for coordinator scripts (`worktrain spawn` calls)
|
|
4589
|
+
2. The knowledge graph provides the infrastructure for querying relevant context automatically
|
|
4590
|
+
3. Eventually: `worktrain spawn` reads the context bundle from the graph + session store automatically, coordinator doesn't have to assemble it manually
|
|
4591
|
+
|
|
4592
|
+
**Why this is high priority:** every agent spawned today without proper context is burning tokens on discovery that should have been provided upfront. At 10 concurrent agents, that's 10x the waste. With proper context injection, Phase 0 becomes 1 turn instead of 5, and output quality improves because the agent starts with the right mental model.
|
|
4593
|
+
|
|
4594
|
+
---
|
|
4595
|
+
|
|
4596
|
+
### Context budget per spawned agent: capped, structured, queryable (Apr 18, 2026)
|
|
4597
|
+
|
|
4598
|
+
**The companion spec to context injection:**
|
|
4599
|
+
|
|
4600
|
+
Rather than hoping agents discover the right context, the coordinator guarantees a minimum context budget: a pre-packaged bundle of ~2000 tokens that every agent starts with. The knowledge graph is what makes this scalable -- without it, the coordinator has to manually assemble context from files, which is itself expensive.
|
|
4601
|
+
|
|
4602
|
+
**Bundle contents (structured):**
|
|
4603
|
+
- `<relevant_files>` -- paths + key excerpts from files the agent will likely touch (from KG query)
|
|
4604
|
+
- `<prior_sessions>` -- summaries of the last 3 sessions that touched related code (from session store)
|
|
4605
|
+
- `<established_patterns>` -- specific patterns the agent must follow (e.g. "use `tmpPath()` not `/tmp/`")
|
|
4606
|
+
- `<known_facts>` -- things already proven true (e.g. "semantic-release runs automatically after CI, not before")
|
|
4607
|
+
- `<do_not_explore>` -- explicit list of dead ends and already-tried approaches
|
|
4608
|
+
|
|
4609
|
+
**How the knowledge graph enables this:**
|
|
4610
|
+
- `relevant_files`: KG query "what files are related to the goal?" returns the structural subgraph
|
|
4611
|
+
- `prior_sessions`: session store query "what sessions touched these files in the last 7 days?"
|
|
4612
|
+
- `established_patterns`: AGENTS.md + KG pattern nodes
|
|
4613
|
+
- `known_facts` and `do_not_explore`: built by the coordinator from prior session outputs
|
|
4614
|
+
|
|
4615
|
+
**Without the KG (today):** the coordinator manually includes key context in the prompt. Better than nothing, but requires the coordinator to know what's relevant.
|
|
4616
|
+
**With the KG (future):** `worktrain spawn --workflow X --goal "..."` automatically queries the KG and assembles the context bundle. Coordinator just provides the goal.
|
|
4617
|
+
|
|
4618
|
+
---
|
|
4619
|
+
|
|
4620
|
+
### Decouple goal from trigger definition -- late-bound goals for daemon sessions (Apr 18, 2026)
|
|
4621
|
+
|
|
4622
|
+
**The problem:** `goal` is currently required at trigger-definition time (in triggers.yml). For triggers like `mr-review`, the goal is inherently dynamic -- it's the PR title and description, known only when the webhook fires, not when the trigger is configured.
|
|
4623
|
+
|
|
4624
|
+
The current workaround: `goalTemplate: "{{$.goal}}"` with the caller passing `{"goal": "Review PR #123..."}` in the webhook payload. This works but is awkward -- the caller must know the payload field convention, and it's not obvious from the trigger definition.
|
|
4625
|
+
|
|
4626
|
+
**The right model:** separate "which workflow" (trigger definition) from "what to do" (dispatch-time goal).
|
|
4627
|
+
|
|
4628
|
+
```yaml
|
|
4629
|
+
# Trigger definition -- no goal required
|
|
4630
|
+
triggers:
|
|
4631
|
+
- id: mr-review
|
|
4632
|
+
workflowId: mr-review-workflow-agentic
|
|
4633
|
+
workspacePath: ~/git/myproject
|
|
4634
|
+
# No goal here -- goal comes from dispatch context
|
|
4635
|
+
```
|
|
4636
|
+
|
|
4637
|
+
```bash
|
|
4638
|
+
# Dispatch with goal at call time
|
|
4639
|
+
curl -X POST http://localhost:3200/webhook/mr-review \
|
|
4640
|
+
-d '{"goal": "Review PR #123: fix authentication bug"}'
|
|
4641
|
+
|
|
4642
|
+
# Or via worktrain spawn
|
|
4643
|
+
worktrain spawn --trigger mr-review --goal "Review PR #123: fix authentication bug"
|
|
4644
|
+
```
|
|
4645
|
+
|
|
4646
|
+
**Implementation options:**
|
|
4647
|
+
|
|
4648
|
+
1. **goalTemplate with `$.goal` as the default** -- if no `goal` is set in the trigger and no `goalTemplate` is set, default to `goalTemplate: "{{$.goal}}"`. The webhook payload's `goal` field becomes the canonical way to pass a dynamic goal. Zero breaking changes.
|
|
4649
|
+
|
|
4650
|
+
2. **Late-bound goal field on WorkflowTrigger** -- `executeStartWorkflow` accepts `goal` as a separate parameter. The trigger provides everything except the goal; the dispatcher (TriggerRouter) resolves the goal from the webhook payload or a default. This makes the separation explicit at the type level.
|
|
4651
|
+
|
|
4652
|
+
3. **Prompt injection** -- the workflow's first step can read `context.goal` which is injected from the webhook payload. The trigger has a static placeholder; the real goal comes through as a context variable. This is how it currently half-works but without the clean API.
|
|
4653
|
+
|
|
4654
|
+
**Preferred: Option 1 (default goalTemplate)** -- minimal change, backward compatible, works immediately. If `goal` is absent from the trigger and the webhook payload contains `{"goal": "..."}`, use it. Document this as the standard pattern for dynamic-goal triggers.
|
|
4655
|
+
|
|
4656
|
+
**Also needed:** the `worktrain spawn` CLI command should accept `--goal` as a first-class flag (already partially implemented) so coordinator scripts can pass goals without knowing the webhook payload format.
|
|
4657
|
+
|
|
4658
|
+
**Why this matters for WorkTrain being production-ready:** most real-world triggers (PR review, issue investigation, incident response) have dynamic goals that depend on what just happened. Static goals in triggers.yml only work for scheduled/cron tasks. Late-bound goals make the whole trigger system composable with external events.
|
|
4659
|
+
|
|
4660
|
+
---
|
|
4661
|
+
|
|
4662
|
+
### Session identity: a unit of work is one session, not many (Apr 18, 2026)
|
|
4663
|
+
|
|
4664
|
+
**The problem:** WorkTrain creates a separate WorkRail session for every workflow run. A task that involves discovery + design + implementation + review + re-review appears as 5 unrelated sessions in the console. There's no way to know they belong together without reading the goals. The user sees 50 flat sessions instead of 10 units of work.
|
|
4665
|
+
|
|
4666
|
+
**The correct model:** a session is a unit of work, not a workflow run. "Review PR #559" is one session. It might internally run 3 workflow sessions (context gathering, review, re-review) but the user sees one thing with one identity.
|
|
4667
|
+
|
|
4668
|
+
**What's needed:**
|
|
4669
|
+
|
|
4670
|
+
**1. Parent-child session relationships**
|
|
4671
|
+
`session_created` in the session store gets an optional `parentSessionId` field. When a coordinator spawns a child via `worktrain spawn`, the child carries the parent's ID. The session store becomes a tree.
|
|
4672
|
+
|
|
4673
|
+
```typescript
|
|
4674
|
+
// session_created event
|
|
4675
|
+
{
|
|
4676
|
+
kind: 'session_created',
|
|
4677
|
+
sessionId: 'sess_abc123',
|
|
4678
|
+
parentSessionId: 'sess_root456', // NEW -- absent for root sessions
|
|
4679
|
+
workflowId: 'wr.discovery',
|
|
4680
|
+
goal: '...'
|
|
4681
|
+
}
|
|
4682
|
+
```
|
|
4683
|
+
|
|
4684
|
+
**2. Root session as the identity**
|
|
4685
|
+
The root session is what the user sees. It represents the unit of work ("Review PR #559", "Implement GitHub polling adapter"). Child sessions are implementation details -- they may be visible on drill-down but not in the top-level list.
|
|
4686
|
+
|
|
4687
|
+
**3. Console session DAG view**
|
|
4688
|
+
The console shows root sessions, each expandable to show the tree of child sessions:
|
|
4689
|
+
```
|
|
4690
|
+
● Review PR #559 [3 sessions, 22 min]
|
|
4691
|
+
├── wr.discovery (context) [completed, 8 min]
|
|
4692
|
+
├── mr-review-workflow-agentic [completed, 11 min]
|
|
4693
|
+
└── coding-task (fix findings) [running, 3 min...]
|
|
4694
|
+
```
|
|
4695
|
+
|
|
4696
|
+
**4. Session identity propagated through coordinator**
|
|
4697
|
+
`worktrain spawn` accepts `--parent-session <id>` to link child sessions. The coordinator script passes this when spawning each phase of a pipeline. When spawning via the daemon trigger, the trigger's initial session becomes the root.
|
|
4698
|
+
|
|
4699
|
+
**Relationship to coordinator sessions spec:**
|
|
4700
|
+
The coordinator sessions spec (`spawn_session` + `await_sessions` tools) handles the orchestration. This spec handles the identity and visibility. They're complementary: coordinator scripts drive the work, session identity makes the work visible as a coherent unit.
|
|
4701
|
+
|
|
4702
|
+
**Why this matters:**
|
|
4703
|
+
- Today: user sees "what are all these sessions?" -- has to read goals to understand grouping
|
|
4704
|
+
- With this: user sees "here are my 5 units of work today" -- each one tells a coherent story
|
|
4705
|
+
- The console becomes a work log, not a session log
|
|
4706
|
+
|
|
4707
|
+
**Build order:**
|
|
4708
|
+
1. Add `parentSessionId` to `session_created` event schema (small, additive)
|
|
4709
|
+
2. `worktrain spawn --parent-session <id>` flag (wires through TriggerRouter dispatch)
|
|
4710
|
+
3. Console aggregates sessions by root and shows tree on expand
|
|
4711
|
+
4. Dashboard "work sessions" view replaces flat session list as default
|
|
4712
|
+
|
|
4713
|
+
---
|
|
4714
|
+
|
|
4715
|
+
### Trigger-derived tool availability and knowledge configuration (Apr 18, 2026, to investigate)
|
|
4716
|
+
|
|
4717
|
+
**Observation:** the trigger already declares what external system matters. A `gitlab_poll` trigger means the agent will be working on GitLab content. A `jira_poll` trigger means Jira. WorkTrain should use this declaration to automatically configure what tools and knowledge sources the agent gets -- no manual per-trigger MCP configuration.
|
|
4718
|
+
|
|
4719
|
+
**Idea 1: Implicit tool availability from trigger source**
|
|
4720
|
+
If `provider: gitlab_poll` → agent automatically gets GitLab MCP tools.
|
|
4721
|
+
If `provider: github_poll` → agent gets GitHub tools.
|
|
4722
|
+
If `provider: jira_poll` → agent gets Jira tools.
|
|
4723
|
+
The trigger source is a declaration of intent -- WorkTrain infers the tool environment from it. No extra config needed for the common case.
|
|
4724
|
+
|
|
4725
|
+
**Idea 2: Trigger as knowledge configuration**
|
|
4726
|
+
The trigger could declare where the agent gets different kinds of knowledge:
|
|
4727
|
+
|
|
4728
|
+
```yaml
|
|
4729
|
+
- id: jira-bug-fix
|
|
4730
|
+
provider: jira_poll
|
|
4731
|
+
knowledge:
|
|
4732
|
+
general: [glean, confluence] # background org knowledge
|
|
4733
|
+
codebase: [github, local-kg] # structural code knowledge
|
|
4734
|
+
task: [jira-ticket, related-prs] # what this specific task is about
|
|
4735
|
+
style: [team-conventions, agents-md] # how to do the work
|
|
4736
|
+
```
|
|
4737
|
+
|
|
4738
|
+
The daemon assembles a pre-packaged context bundle from these sources before the agent starts. The agent skips Phase 0 discovery entirely for the declared knowledge domains.
|
|
4739
|
+
|
|
4740
|
+
**Why this is interesting:**
|
|
4741
|
+
- Closes the loop between "what triggers the work" and "what context the agent needs"
|
|
4742
|
+
- The trigger author knows better than anyone what knowledge sources are relevant
|
|
4743
|
+
- Eliminates redundant context gathering across sessions for the same trigger type
|
|
4744
|
+
- Natural fit with workspace-scoped MCP config and the knowledge graph
|
|
4745
|
+
|
|
4746
|
+
**What needs investigating:**
|
|
4747
|
+
- Is the trigger → tool mapping always 1:1 (gitlab_poll → gitlab MCP) or does it need explicit override?
|
|
4748
|
+
- What are the right "knowledge categories"? (general, codebase, task, style seem like a reasonable starting set)
|
|
4749
|
+
- How does this interact with the knowledge graph? (local-kg is already planned as a knowledge source)
|
|
4750
|
+
- Can this be inferred automatically or does it always need explicit declaration?
|
|
4751
|
+
- How do you handle a trigger that spans multiple systems (e.g. a Jira ticket about a GitHub PR)?
|
|
4752
|
+
|
|
4753
|
+
**This is a design-first item** -- the ideas are promising but the right shape isn't obvious. Needs a discovery pass before any implementation.
|
package/package.json
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
{
|
|
2
2
|
"id": "wr.discovery",
|
|
3
3
|
"name": "Discovery Workflow",
|
|
4
|
-
"version": "3.
|
|
4
|
+
"version": "3.2.0",
|
|
5
|
+
"validatedAgainstSpecVersion": 3,
|
|
5
6
|
"description": "Use this to explore and think through a problem end-to-end. Moves between landscape exploration, problem framing, candidate generation, adversarial challenge, and uncertainty resolution.",
|
|
6
|
-
"about": "## Discovery Workflow\n\nThis workflow is for structured thinking through an ambiguous problem, opportunity, or decision
|
|
7
|
+
"about": "## Discovery Workflow\n\nThis workflow is for structured thinking through an ambiguous problem, opportunity, or decision -- the kind where you are not sure of the right answer yet and jumping straight to solutions would be premature.\n\n**What it does:**\nBefore starting any research, the workflow challenges the stated goal: it determines whether you handed it a problem or a solution, surfaces hidden assumptions, and defines what success looks like in concrete observable terms. It then selects one of three emphasis paths based on your actual need: `landscape_first` for understanding the current state and comparing options, `full_spectrum` for important or ambiguous problems where both landscape grounding and reframing are needed, and `design_first` when the dominant risk is solving the wrong problem. The workflow moves through landscape research, stakeholder and problem framing, candidate direction generation, adversarial challenge, and an uncertainty-resolution stage that can close with a recommendation, a targeted research follow-up, or a prototype/test plan. A design document is maintained throughout as the human-facing artifact.\n\n**When to use it:**\n- You face a decision, architectural question, or design problem with no obvious right answer\n- You want to explore an opportunity space before committing to a direction\n- You suspect the stated problem might not be the real problem\n- You need a structured recommendation with explicit tradeoffs and alternatives rather than the first plausible answer\n- You want to make sure you are solving the right problem, not just the one you described\n\n**What it produces:**\nA design document covering: the reframed problem (if the original was solution-framed), the selected path and framing, landscape takeaways, chosen direction and why it won, the strongest alternative and why it lost, confidence band, residual risks, and next actions.\n\n**How to get good results:**\nDescribe the problem, opportunity, or decision you want help thinking through -- or describe the solution you are considering, and let the workflow figure out the underlying problem. State what outcome you want (a recommendation, a comparison, a research plan, a prototype direction). The more context you provide upfront about constraints and anti-goals, the sharper the framing will be.",
|
|
7
8
|
"examples": [
|
|
8
9
|
"Decide whether to build a custom notification system or adopt a third-party service",
|
|
9
10
|
"Explore what the right architecture is for moving our monolith to services",
|
|
@@ -53,6 +54,40 @@
|
|
|
53
54
|
}
|
|
54
55
|
],
|
|
55
56
|
"steps": [
|
|
57
|
+
{
|
|
58
|
+
"id": "phase-0-reframe",
|
|
59
|
+
"title": "Phase 0a: Reframe the Goal Before Jumping to Solutions",
|
|
60
|
+
"promptBlocks": {
|
|
61
|
+
"goal": "Challenge the stated goal before we start researching it. Figure out whether I handed you a problem or a solution, surface the assumptions baked into the framing, and define what success actually looks like.",
|
|
62
|
+
"constraints": [
|
|
63
|
+
[
|
|
64
|
+
{
|
|
65
|
+
"kind": "ref",
|
|
66
|
+
"refId": "wr.refs.notes_first_durability"
|
|
67
|
+
}
|
|
68
|
+
],
|
|
69
|
+
"Do not begin landscape research or candidate generation in this step.",
|
|
70
|
+
"If the goal is stated as a solution, find the problem behind it -- do not just accept the solution as given.",
|
|
71
|
+
"Challenge assumptions with specificity: name each assumption, state why it might be wrong, and say what evidence would confirm or refute it.",
|
|
72
|
+
"Define success in terms of outcomes and observable signals, not just delivery of the stated goal."
|
|
73
|
+
],
|
|
74
|
+
"procedure": [
|
|
75
|
+
"Classify the stated goal as a `solution_statement` (names a specific solution or approach) or a `problem_statement` (describes an outcome, pain, or gap without prescribing the fix). Record this as `goalType`.",
|
|
76
|
+
"If `goalType = solution_statement`: identify the underlying problem this solution is trying to solve, name at least one materially different way to solve that problem, and state explicitly whether the stated solution is the best match for the problem or just the most familiar one.",
|
|
77
|
+
"Challenge exactly 3 key assumptions embedded in the stated goal. For each assumption: state the assumption clearly, explain why it might be wrong, and identify what evidence would confirm or refute it. Record these as `challengedAssumptions`.",
|
|
78
|
+
"Define what success looks like before any candidates are generated. Success criteria must be concrete and observable -- not 'the problem is solved' but 'we can measure X, users do Y, the system behaves Z'. Record these as `successCriteria`.",
|
|
79
|
+
"Record a one-sentence `reframedProblem` that captures the real underlying problem, stripped of any solution bias from the original goal.",
|
|
80
|
+
"Set these keys in the next `continue_workflow` call's `context` object: `goalType`, `reframedProblem`, `challengedAssumptions`, `successCriteria`, `goalWasSolutionStatement`."
|
|
81
|
+
],
|
|
82
|
+
"verify": [
|
|
83
|
+
"Each of the 3 challenged assumptions is specific enough that someone could disagree with the challenge.",
|
|
84
|
+
"The success criteria would let a skeptic determine whether the work actually succeeded.",
|
|
85
|
+
"If the goal was a solution-statement, the underlying problem is stated independently of the proposed solution.",
|
|
86
|
+
"The reframed problem is meaningfully different from the original goal wording, or you have explicitly noted why the original framing was already problem-shaped."
|
|
87
|
+
]
|
|
88
|
+
},
|
|
89
|
+
"requireConfirmation": false
|
|
90
|
+
},
|
|
56
91
|
{
|
|
57
92
|
"id": "phase-0-select-path",
|
|
58
93
|
"title": "Phase 0: Understand, Classify, and Recommend a Path",
|
|
@@ -71,13 +106,15 @@
|
|
|
71
106
|
],
|
|
72
107
|
"procedure": [
|
|
73
108
|
"Capture: `problemStatement`, `desiredOutcome`, `coreConstraints`, `antiGoals`, `primaryUncertainty`, `knownApproaches`, `importantStakeholders`, `rigorMode`, `automationLevel`, `pathRecommendation`, `pathRationale`, `designDocPath`.",
|
|
74
|
-
"
|
|
109
|
+
"If `goalWasSolutionStatement = true`, set `problemStatement` from `reframedProblem` rather than from the stated goal. Record the original stated goal as `statedGoal` in the design doc so the distinction is visible.",
|
|
110
|
+
"Choose `landscape_first` when my dominant need is understanding the current landscape or comparing options. Choose `full_spectrum` when both landscape grounding and reframing are needed. Choose `design_first` when the dominant risk is solving the wrong problem or shaping the wrong concept. If `goalWasSolutionStatement = true`, bias toward `design_first` unless the stated solution is clearly the correct framing.",
|
|
75
111
|
"Create or update `designDocPath` with sections for Context / Ask, Path Recommendation, Constraints / Anti-goals, Landscape Packet, Problem Frame Packet, Candidate Directions, Challenge Notes, Resolution Notes, Decision Log, and Final Summary.",
|
|
76
112
|
"Set these keys in the next `continue_workflow` call's `context` object: `problemStatement`, `desiredOutcome`, `coreConstraints`, `antiGoals`, `primaryUncertainty`, `knownApproaches`, `importantStakeholders`, `rigorMode`, `automationLevel`, `pathRecommendation`, `pathRationale`, `designDocPath`.",
|
|
77
113
|
"Also set `goal` in the context object: one sentence describing what you are trying to accomplish. This populates the session title in the Workspace console immediately."
|
|
78
114
|
],
|
|
79
115
|
"verify": [
|
|
80
116
|
"The chosen path is justified against the other two, not just named.",
|
|
117
|
+
"If `goalWasSolutionStatement = true`, the `problemStatement` reflects the reframed problem, not the stated solution.",
|
|
81
118
|
"The design doc exists and the path recommendation is recorded there."
|
|
82
119
|
]
|
|
83
120
|
},
|
|
@@ -284,10 +321,11 @@
|
|
|
284
321
|
"Capture users or stakeholders, jobs or outcomes, pains or tensions, constraints that matter in lived use, success criteria, assumptions, and at least 2 reframes or HMW questions.",
|
|
285
322
|
"If `delegationAvailable = true`, decide whether parallel stakeholder lenses would actually sharpen the result. If yes, run them in parallel. If not, keep going yourself. In either case, you must synthesize the result yourself.",
|
|
286
323
|
"Update `designDocPath` using `problemFrameTemplate`.",
|
|
324
|
+
"Before finishing, name ONE specific concrete condition that would make the current framing wrong -- not a generic caveat, but a specific thing that if discovered to be true would change the path or direction. Record this as `primaryFramingRisk` in the design doc.",
|
|
287
325
|
"Set these keys in the next `continue_workflow` call's `context` object: `problemFrame`, `primaryUsers`, `tensionCount`, `successCriteriaCount`, `framingRiskCount`, `needsChallenge`, `retriageNeeded`."
|
|
288
326
|
],
|
|
289
327
|
"verify": [
|
|
290
|
-
"The framing names what could still be wrong.",
|
|
328
|
+
"The framing names what could still be wrong -- specifically, not generically.",
|
|
291
329
|
"The frame is strong enough to influence candidate generation and later selection."
|
|
292
330
|
]
|
|
293
331
|
},
|
|
@@ -319,10 +357,11 @@
|
|
|
319
357
|
"Capture users or stakeholders, jobs or outcomes, pains or tensions, constraints that matter in lived use, success criteria, assumptions, and at least 2 reframes or HMW questions.",
|
|
320
358
|
"If `delegationAvailable = true`, decide whether parallel stakeholder lenses would actually sharpen the result. If yes, run them in parallel. If not, keep going yourself. In either case, you must synthesize the result yourself.",
|
|
321
359
|
"Update `designDocPath` using `problemFrameTemplate`.",
|
|
360
|
+
"Before finishing, name ONE specific concrete condition that would make the current framing wrong -- not a generic caveat, but a specific thing that if discovered to be true would change the path or direction. Record this as `primaryFramingRisk` in the design doc.",
|
|
322
361
|
"Set these keys in the next `continue_workflow` call's `context` object: `problemFrame`, `primaryUsers`, `tensionCount`, `successCriteriaCount`, `framingRiskCount`, `needsChallenge`, `retriageNeeded`."
|
|
323
362
|
],
|
|
324
363
|
"verify": [
|
|
325
|
-
"The framing names what could still be wrong.",
|
|
364
|
+
"The framing names what could still be wrong -- specifically, not generically.",
|
|
326
365
|
"The framing depth is strong enough to justify a design-first path."
|
|
327
366
|
]
|
|
328
367
|
},
|
|
@@ -335,8 +374,20 @@
|
|
|
335
374
|
"id": "phase-1g-retriage",
|
|
336
375
|
"title": "Phase 1g: Re-Triage After Early Context",
|
|
337
376
|
"runCondition": {
|
|
338
|
-
"
|
|
339
|
-
|
|
377
|
+
"or": [
|
|
378
|
+
{
|
|
379
|
+
"var": "retriageNeeded",
|
|
380
|
+
"equals": true
|
|
381
|
+
},
|
|
382
|
+
{
|
|
383
|
+
"var": "pathRecommendation",
|
|
384
|
+
"equals": "design_first"
|
|
385
|
+
},
|
|
386
|
+
{
|
|
387
|
+
"var": "pathRecommendation",
|
|
388
|
+
"equals": "full_spectrum"
|
|
389
|
+
}
|
|
390
|
+
]
|
|
340
391
|
},
|
|
341
392
|
"promptBlocks": {
|
|
342
393
|
"goal": "Reassess the path now that you have real landscape and framing context instead of just my initial wording.",
|
|
@@ -1,102 +0,0 @@
|
|
|
1
|
-
import type { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js';
|
|
2
|
-
export interface BridgeConfig {
|
|
3
|
-
readonly reconnectBaseDelayMs: number;
|
|
4
|
-
readonly reconnectMaxAttempts: number;
|
|
5
|
-
readonly forwardTimeoutMs: number;
|
|
6
|
-
readonly maxRespawnAttempts: number;
|
|
7
|
-
readonly spawnLockStaleMs: number;
|
|
8
|
-
readonly waitForPrimaryPollMs: number;
|
|
9
|
-
}
|
|
10
|
-
export declare const DEFAULT_BRIDGE_CONFIG: BridgeConfig;
|
|
11
|
-
type HttpBridgeTransport = {
|
|
12
|
-
readonly send: (msg: JSONRPCMessage) => Promise<void>;
|
|
13
|
-
readonly close: () => Promise<void>;
|
|
14
|
-
};
|
|
15
|
-
export type ConnectionState = {
|
|
16
|
-
readonly kind: 'connecting';
|
|
17
|
-
} | {
|
|
18
|
-
readonly kind: 'connected';
|
|
19
|
-
readonly transport: HttpBridgeTransport;
|
|
20
|
-
} | {
|
|
21
|
-
readonly kind: 'reconnecting';
|
|
22
|
-
readonly attempt: number;
|
|
23
|
-
readonly maxAttempts: number;
|
|
24
|
-
readonly respawnBudget: number;
|
|
25
|
-
} | {
|
|
26
|
-
readonly kind: 'waiting_for_primary';
|
|
27
|
-
} | {
|
|
28
|
-
readonly kind: 'closed';
|
|
29
|
-
};
|
|
30
|
-
export type ReconnectOutcome = {
|
|
31
|
-
readonly kind: 'reconnected';
|
|
32
|
-
} | {
|
|
33
|
-
readonly kind: 'exhausted';
|
|
34
|
-
} | {
|
|
35
|
-
readonly kind: 'aborted';
|
|
36
|
-
};
|
|
37
|
-
export type SpawnLockResult = {
|
|
38
|
-
readonly kind: 'acquired';
|
|
39
|
-
} | {
|
|
40
|
-
readonly kind: 'skipped';
|
|
41
|
-
readonly reason: string;
|
|
42
|
-
};
|
|
43
|
-
export type FetchLike = (url: string, init?: RequestInit) => Promise<Response>;
|
|
44
|
-
export type SpawnLike = (command: string, args: ReadonlyArray<string>, opts: {
|
|
45
|
-
readonly env: NodeJS.ProcessEnv;
|
|
46
|
-
readonly detached: boolean;
|
|
47
|
-
readonly stdio: 'ignore';
|
|
48
|
-
}) => {
|
|
49
|
-
unref: () => void;
|
|
50
|
-
};
|
|
51
|
-
export type WriteFileSyncLike = (path: string, content: string, opts: {
|
|
52
|
-
flag: 'wx';
|
|
53
|
-
}) => void;
|
|
54
|
-
export type StatSyncLike = (path: string) => {
|
|
55
|
-
mtimeMs: number;
|
|
56
|
-
};
|
|
57
|
-
export type UnlinkSyncLike = (path: string) => void;
|
|
58
|
-
export type HealthResponse = {
|
|
59
|
-
readonly port: number;
|
|
60
|
-
readonly pid: number;
|
|
61
|
-
};
|
|
62
|
-
export declare function detectHealthyPrimary(port: number, opts?: {
|
|
63
|
-
retries?: number;
|
|
64
|
-
baseDelayMs?: number;
|
|
65
|
-
fetch?: FetchLike;
|
|
66
|
-
}): Promise<HealthResponse | null>;
|
|
67
|
-
export declare function spawnLockPath(port: number): string;
|
|
68
|
-
export declare function acquireSpawnLock(port: number, staleMs: number, deps?: {
|
|
69
|
-
readonly writeFileSync?: WriteFileSyncLike;
|
|
70
|
-
readonly statSync?: StatSyncLike;
|
|
71
|
-
readonly unlinkSync?: UnlinkSyncLike;
|
|
72
|
-
}): SpawnLockResult;
|
|
73
|
-
export declare function releaseSpawnLock(port: number, deps?: {
|
|
74
|
-
readonly unlinkSync?: UnlinkSyncLike;
|
|
75
|
-
}): void;
|
|
76
|
-
export declare function spawnPrimary(port: number, deps: {
|
|
77
|
-
spawn: SpawnLike;
|
|
78
|
-
fetch?: FetchLike;
|
|
79
|
-
}): Promise<void>;
|
|
80
|
-
type ReconnectDeps = {
|
|
81
|
-
readonly detect: (attempt: number) => Promise<boolean>;
|
|
82
|
-
readonly config: Pick<BridgeConfig, 'reconnectBaseDelayMs' | 'reconnectMaxAttempts'>;
|
|
83
|
-
readonly signal: AbortSignal;
|
|
84
|
-
};
|
|
85
|
-
export declare function reconnectWithBackoff(deps: ReconnectDeps): Promise<ReconnectOutcome>;
|
|
86
|
-
type OutcomeHandlerDeps = {
|
|
87
|
-
readonly setConnectionState: (state: ConnectionState) => void;
|
|
88
|
-
readonly performShutdown: (reason: string) => void;
|
|
89
|
-
readonly startReconnectLoop: () => void;
|
|
90
|
-
readonly startWaitLoop: () => void;
|
|
91
|
-
readonly triggerSpawn: () => Promise<void>;
|
|
92
|
-
readonly config: Pick<BridgeConfig, 'reconnectMaxAttempts'>;
|
|
93
|
-
};
|
|
94
|
-
export declare function handleReconnectOutcome(outcome: ReconnectOutcome, reconnectingState: Extract<ConnectionState, {
|
|
95
|
-
kind: 'reconnecting';
|
|
96
|
-
}>, deps: OutcomeHandlerDeps): Promise<void>;
|
|
97
|
-
export declare function startBridgeServer(primaryPort: number, config?: BridgeConfig, deps?: {
|
|
98
|
-
spawn?: SpawnLike;
|
|
99
|
-
fetch?: FetchLike;
|
|
100
|
-
originalPrimaryPid?: number;
|
|
101
|
-
}): Promise<void>;
|
|
102
|
-
export {};
|