@lioneltay/worker-manager 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/plugin.json +10 -0
- package/.mcp.json +8 -0
- package/README.md +337 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +120 -0
- package/dist/orchestrator.d.ts +1 -0
- package/dist/orchestrator.js +368 -0
- package/dist/spawn.d.ts +9 -0
- package/dist/spawn.js +67 -0
- package/dist/state.d.ts +18 -0
- package/dist/state.js +109 -0
- package/dist/types.d.ts +25 -0
- package/dist/types.js +1 -0
- package/dist/worker.d.ts +1 -0
- package/dist/worker.js +99 -0
- package/hooks/hooks.json +26 -0
- package/package.json +35 -0
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "worker-manager",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"description": "Spawn and manage worker agents in isolated worktrees",
|
|
5
|
+
"author": {
|
|
6
|
+
"name": "lioneltay"
|
|
7
|
+
},
|
|
8
|
+
"repository": "https://github.com/lioneltay/agent-forge/tree/main/packages/claude-plugins/worker-manager",
|
|
9
|
+
"license": "MIT"
|
|
10
|
+
}
|
package/.mcp.json
ADDED
package/README.md
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
1
|
+
# @lioneltay/worker-manager
|
|
2
|
+
|
|
3
|
+
A Claude Code plugin that spawns autonomous worker agents in isolated git worktrees. Workers run in tmux sessions with their own Claude Code instance and communicate back to the orchestrator via a file-based mail system.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
Install via the Claude Code plugin marketplace, or manually:
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
npm install -g @lioneltay/worker-manager
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Quick start
|
|
14
|
+
|
|
15
|
+
Once the plugin is installed, the orchestrator tools are available in your Claude Code session:
|
|
16
|
+
|
|
17
|
+
```
|
|
18
|
+
> Spawn a worker to refactor the auth module into separate files
|
|
19
|
+
|
|
20
|
+
# Claude calls start_worker({ title: "refactor-auth", task: "..." })
|
|
21
|
+
# Worker is created in a new worktree on branch worker/refactor-auth-a3f1b2c0
|
|
22
|
+
# Worker runs autonomously in tmux session worker-a3f1b2c0
|
|
23
|
+
|
|
24
|
+
> [next prompt — hook fires automatically]
|
|
25
|
+
# "Worker refactor-auth (a3f1b2c0) COMPLETED: Refactored auth module into..."
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
## How it works
|
|
29
|
+
|
|
30
|
+
A single Node.js binary (`dist/index.js`) serves four different roles depending on how it's invoked:
|
|
31
|
+
|
|
32
|
+
```mermaid
|
|
33
|
+
flowchart TD
|
|
34
|
+
Entry["node dist/index.js"]
|
|
35
|
+
Entry --> CheckHook{"--hook flag?"}
|
|
36
|
+
CheckHook -->|Yes| Hook["UserPromptSubmit Hook
|
|
37
|
+
Read & display pending mail"]
|
|
38
|
+
CheckHook -->|No| CheckStop{"--stop-hook flag?"}
|
|
39
|
+
CheckStop -->|Yes| StopHook["Stop Hook
|
|
40
|
+
Block premature stops"]
|
|
41
|
+
CheckStop -->|No| CheckWorker{"WORKER_ID env var?"}
|
|
42
|
+
CheckWorker -->|Yes| Worker["Worker MCP Server
|
|
43
|
+
Tools: complete, ask"]
|
|
44
|
+
CheckWorker -->|No| Orchestrator["Orchestrator MCP Server
|
|
45
|
+
Tools: start_worker, list_workers,
|
|
46
|
+
nudge_worker, stop_worker, read_mail"]
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
The orchestrator runs as an MCP server in the lead agent's Claude Code session. When it spawns a worker, it launches a new Claude Code instance in a tmux session, configured with a worker MCP server that provides `complete` and `ask` tools. Workers communicate back via the filesystem — no HTTP servers, no daemons.
|
|
50
|
+
|
|
51
|
+
## Tools
|
|
52
|
+
|
|
53
|
+
### Orchestrator tools
|
|
54
|
+
|
|
55
|
+
| Tool | Input | Description |
|
|
56
|
+
| -------------- | --------------------------------------------- | --------------------------------------------------------------------------------- |
|
|
57
|
+
| `start_worker` | `title`, `task`, `useWorktree`, `baseBranch?` | Spawn a worker agent in an isolated worktree or the current directory. |
|
|
58
|
+
| `list_workers` | — | List all workers with status. Cross-references tmux to detect crashed workers. |
|
|
59
|
+
| `nudge_worker` | `id`, `message` | Send a message to a worker's tmux session (answers questions, provides guidance). |
|
|
60
|
+
| `stop_worker` | `id` | Stop a running worker by killing its tmux session. Worktrees are preserved. |
|
|
61
|
+
| `read_mail` | — | Read and clear all pending messages from this orchestrator's workers. |
|
|
62
|
+
|
|
63
|
+
### Worker tools
|
|
64
|
+
|
|
65
|
+
| Tool | Input | Description |
|
|
66
|
+
| ---------- | ---------- | ----------------------------------------------------------------------------------- |
|
|
67
|
+
| `complete` | `summary` | Signal task completion. Writes a mail message and updates the registry. |
|
|
68
|
+
| `ask` | `question` | Ask the orchestrator a question. Sets status to "asking" and writes a mail message. |
|
|
69
|
+
|
|
70
|
+
## Worker lifecycle
|
|
71
|
+
|
|
72
|
+
```mermaid
|
|
73
|
+
sequenceDiagram
|
|
74
|
+
participant User
|
|
75
|
+
participant O as Orchestrator
|
|
76
|
+
participant FS as State (filesystem)
|
|
77
|
+
participant T as tmux
|
|
78
|
+
participant W as Worker (Claude Code)
|
|
79
|
+
participant WT as Git Worktree
|
|
80
|
+
|
|
81
|
+
User->>O: "spawn a worker to do X"
|
|
82
|
+
O->>WT: Create worktree (new branch)
|
|
83
|
+
O->>FS: Register worker in state.json
|
|
84
|
+
O->>T: Create tmux session
|
|
85
|
+
O->>T: Launch: claude --dangerously-skip-permissions<br/>with worker MCP + task prompt
|
|
86
|
+
T->>W: Claude Code starts
|
|
87
|
+
|
|
88
|
+
Note over W: Works autonomously<br/>in isolated worktree
|
|
89
|
+
|
|
90
|
+
alt Task completed
|
|
91
|
+
W->>FS: complete(summary)<br/>Write mail + update status
|
|
92
|
+
else Needs clarification
|
|
93
|
+
W->>FS: ask(question)<br/>Write mail + update status
|
|
94
|
+
end
|
|
95
|
+
|
|
96
|
+
Note over User,O: On next prompt submission...
|
|
97
|
+
FS-->>O: UserPromptSubmit hook<br/>reads and prints mail
|
|
98
|
+
|
|
99
|
+
alt Worker asked a question
|
|
100
|
+
O->>T: nudge_worker(id, message)<br/>tmux send-keys
|
|
101
|
+
T->>W: Message appears as user input
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
Note over User,O: When orchestrator tries to stop...
|
|
105
|
+
FS-->>O: Stop hook checks for unread mail<br/>blocks if messages are pending
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## State management
|
|
109
|
+
|
|
110
|
+
All state is stored in a temp directory derived from the git root, keeping the repository clean:
|
|
111
|
+
|
|
112
|
+
```
|
|
113
|
+
$TMPDIR/worker-manager/<hash>/
|
|
114
|
+
├── state.json # Worker registry
|
|
115
|
+
├── orchestrators/
|
|
116
|
+
│ └── <claude-code-pid> # Maps PID → orchestrator ID
|
|
117
|
+
└── mail/
|
|
118
|
+
├── <orchestrator-id>/ # Per-orchestrator mailbox
|
|
119
|
+
│ ├── 2025-01-15T...-uuid.json
|
|
120
|
+
│ └── 2025-01-15T...-uuid.json
|
|
121
|
+
└── <orchestrator-id>/
|
|
122
|
+
└── ...
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
The `<hash>` is the first 12 characters of the SHA-256 of the git root path. This means different repositories get isolated state, and worktrees of the same repository share state with the main checkout.
|
|
126
|
+
|
|
127
|
+
### Worker registry (`state.json`)
|
|
128
|
+
|
|
129
|
+
Tracks all workers spawned from this git root:
|
|
130
|
+
|
|
131
|
+
```json
|
|
132
|
+
{
|
|
133
|
+
"workers": {
|
|
134
|
+
"a3f1b2c0": {
|
|
135
|
+
"id": "a3f1b2c0",
|
|
136
|
+
"name": "refactor-auth",
|
|
137
|
+
"task": "Refactor the auth module...",
|
|
138
|
+
"status": "running",
|
|
139
|
+
"branch": "worker/refactor-auth-a3f1b2c0",
|
|
140
|
+
"worktreePath": "/repo/.worktrees/worker--refactor-auth-a3f1b2c0",
|
|
141
|
+
"tmuxSession": "worker-a3f1b2c0",
|
|
142
|
+
"createdAt": "2025-01-15T10:30:00.000Z",
|
|
143
|
+
"useWorktree": true
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
Registry writes are atomic — data is written to a temp file first, then renamed into place via `fs.renameSync`.
|
|
150
|
+
|
|
151
|
+
### Worker statuses
|
|
152
|
+
|
|
153
|
+
| Status | Meaning |
|
|
154
|
+
| ----------- | ------------------------------------------------------------- |
|
|
155
|
+
| `running` | Worker is actively processing its task |
|
|
156
|
+
| `completed` | Worker called `complete` — task is done |
|
|
157
|
+
| `asking` | Worker called `ask` — waiting for orchestrator response |
|
|
158
|
+
| `failed` | Detected by `list_workers` when tmux session no longer exists |
|
|
159
|
+
| `stopped` | Worker was explicitly stopped or cleaned up on shutdown |
|
|
160
|
+
|
|
161
|
+
### Mail messages
|
|
162
|
+
|
|
163
|
+
Each message is a separate JSON file named `<timestamp>-<uuid>.json`:
|
|
164
|
+
|
|
165
|
+
```json
|
|
166
|
+
{
|
|
167
|
+
"id": "550e8400-e29b-41d4-a716-446655440000",
|
|
168
|
+
"workerId": "a3f1b2c0",
|
|
169
|
+
"workerName": "refactor-auth",
|
|
170
|
+
"type": "completion",
|
|
171
|
+
"content": "Refactored auth module into login.ts, register.ts, and middleware.ts",
|
|
172
|
+
"timestamp": "2025-01-15T10:35:00.000Z"
|
|
173
|
+
}
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
Message types: `completion`, `question`.
|
|
177
|
+
|
|
178
|
+
Reading mail is destructive — files are deleted after being read. This ensures each message is delivered exactly once.
|
|
179
|
+
|
|
180
|
+
### Multi-session isolation
|
|
181
|
+
|
|
182
|
+
Multiple Claude Code sessions in the same directory each get their own mailbox:
|
|
183
|
+
|
|
184
|
+
```mermaid
|
|
185
|
+
flowchart TB
|
|
186
|
+
subgraph "Claude Code Session A (PID 1234)"
|
|
187
|
+
MCP_A["Orchestrator MCP<br/>id: abc12345"]
|
|
188
|
+
Hook_A["Hooks<br/>ppid: 1234"]
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
subgraph "Claude Code Session B (PID 5678)"
|
|
192
|
+
MCP_B["Orchestrator MCP<br/>id: def67890"]
|
|
193
|
+
Hook_B["Hooks<br/>ppid: 5678"]
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
subgraph "State Directory"
|
|
197
|
+
Mapping["orchestrators/<br/>1234 → abc12345<br/>5678 → def67890"]
|
|
198
|
+
Mail_A["mail/abc12345/<br/>messages..."]
|
|
199
|
+
Mail_B["mail/def67890/<br/>messages..."]
|
|
200
|
+
end
|
|
201
|
+
|
|
202
|
+
MCP_A -->|"writes mapping"| Mapping
|
|
203
|
+
MCP_B -->|"writes mapping"| Mapping
|
|
204
|
+
Hook_A -->|"reads ppid 1234<br/>→ abc12345"| Mapping
|
|
205
|
+
Hook_B -->|"reads ppid 5678<br/>→ def67890"| Mapping
|
|
206
|
+
Hook_A -->|"reads only"| Mail_A
|
|
207
|
+
Hook_B -->|"reads only"| Mail_B
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
**How it works:**
|
|
211
|
+
|
|
212
|
+
1. When the orchestrator MCP server starts, it generates a unique ID and writes a mapping from its parent PID (the Claude Code process) to that ID.
|
|
213
|
+
2. Workers receive the orchestrator ID via the `ORCHESTRATOR_ID` env var and write mail to `mail/<orchestratorId>/`.
|
|
214
|
+
3. Hooks run as children of the same Claude Code process, so `process.ppid` matches the MCP server's `process.ppid`. They look up the mapping to find the right mailbox.
|
|
215
|
+
|
|
216
|
+
This ensures each session only reads its own workers' mail, even when multiple sessions share the same git root.
|
|
217
|
+
|
|
218
|
+
## Hooks
|
|
219
|
+
|
|
220
|
+
The plugin registers two Claude Code hooks:
|
|
221
|
+
|
|
222
|
+
### UserPromptSubmit hook
|
|
223
|
+
|
|
224
|
+
Fires before every prompt. Reads pending mail from this orchestrator's mailbox and prints it to stdout, where Claude Code displays it as context for the next turn.
|
|
225
|
+
|
|
226
|
+
```
|
|
227
|
+
[Worker refactor-auth (a3f1b2c0)] COMPLETED: Refactored auth module into...
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
### Stop hook
|
|
231
|
+
|
|
232
|
+
Fires when Claude Code is about to stop. Has two modes depending on context:
|
|
233
|
+
|
|
234
|
+
**Orchestrator mode** (no `WORKER_ID` env var): Checks for unread mail. If mail is pending, blocks the stop with a JSON decision so the orchestrator processes worker results before going idle.
|
|
235
|
+
|
|
236
|
+
**Worker mode** (`WORKER_ID` env var set): Checks whether the worker called `complete` or `ask`. If not, blocks the stop and reminds the worker to call one of those tools. This prevents workers from silently exiting without reporting results.
|
|
237
|
+
|
|
238
|
+
```mermaid
|
|
239
|
+
flowchart TD
|
|
240
|
+
StopHook["Stop Hook Fires"]
|
|
241
|
+
StopHook --> IsWorker{"WORKER_ID set?"}
|
|
242
|
+
|
|
243
|
+
IsWorker -->|No| OrchestratorPath["Orchestrator Mode"]
|
|
244
|
+
OrchestratorPath --> HasMapping{"Orchestrator ID<br/>mapping exists?"}
|
|
245
|
+
HasMapping -->|No| AllowStop0["Allow stop<br/>(no workers spawned)"]
|
|
246
|
+
HasMapping -->|Yes| HasMail{"Pending mail?"}
|
|
247
|
+
HasMail -->|No| AllowStop1["Allow stop"]
|
|
248
|
+
HasMail -->|Yes| BlockOrch["Block stop<br/>Show mail content"]
|
|
249
|
+
|
|
250
|
+
IsWorker -->|Yes| WorkerPath["Worker Mode"]
|
|
251
|
+
WorkerPath --> CheckStatus{"Worker status?"}
|
|
252
|
+
CheckStatus -->|"completed / asking"| AllowStop2["Allow stop"]
|
|
253
|
+
CheckStatus -->|running| CheckRetries{"Retries >= 2?"}
|
|
254
|
+
CheckRetries -->|Yes| AllowStop3["Allow stop<br/>(give up)"]
|
|
255
|
+
CheckRetries -->|No| BlockWorker["Block stop<br/>'Call complete or ask'"]
|
|
256
|
+
```
|
|
257
|
+
|
|
258
|
+
The worker stop hook has a retry limit of 2 to prevent infinite loops — if the worker ignores the reminder twice, it's allowed to exit.
|
|
259
|
+
|
|
260
|
+
## Worker spawning
|
|
261
|
+
|
|
262
|
+
When `start_worker` is called, the following happens:
|
|
263
|
+
|
|
264
|
+
```mermaid
|
|
265
|
+
sequenceDiagram
|
|
266
|
+
participant O as Orchestrator
|
|
267
|
+
participant WT as Worktree Manager
|
|
268
|
+
participant FS as Filesystem
|
|
269
|
+
participant T as tmux
|
|
270
|
+
|
|
271
|
+
O->>WT: create("worker/name-id", { newBranch: true })
|
|
272
|
+
WT-->>O: { path: "/repo/.worktrees/worker--name-id" }
|
|
273
|
+
|
|
274
|
+
O->>FS: Write worker to state.json
|
|
275
|
+
O->>FS: Write MCP config to $TMPDIR/mcp-config-{id}.json
|
|
276
|
+
|
|
277
|
+
Note over FS: MCP config points worker at same<br/>binary with env vars: WORKER_ID,<br/>WORKER_NAME, STATE_DIR, ORCHESTRATOR_ID
|
|
278
|
+
|
|
279
|
+
O->>T: tmux new-session -d -s worker-{id}
|
|
280
|
+
O->>T: tmux send-keys:<br/>export WORKER_ID=... STATE_DIR=...<br/>claude --dangerously-skip-permissions<br/>--mcp-config {path} --session-id {uuid}<br/>--append-system-prompt "..." "task"
|
|
281
|
+
```
|
|
282
|
+
|
|
283
|
+
Key details:
|
|
284
|
+
|
|
285
|
+
- **Same binary, different mode**: The worker's MCP config points at the same `dist/index.js`. The `WORKER_ID` env var causes it to start as a worker MCP server instead of an orchestrator.
|
|
286
|
+
- **Environment variables in tmux**: `WORKER_ID` and `STATE_DIR` are exported as shell variables in the tmux session before launching Claude. This allows the Stop hook (which inherits the shell environment) to detect that it's running in a worker context.
|
|
287
|
+
- **`--dangerously-skip-permissions`**: Workers run without permission prompts since they're autonomous.
|
|
288
|
+
- **System prompt injection**: Workers are told to call `complete` when done or `ask` if blocked.
|
|
289
|
+
|
|
290
|
+
## Cleanup
|
|
291
|
+
|
|
292
|
+
### Explicit stop
|
|
293
|
+
|
|
294
|
+
Use `stop_worker` to kill a specific worker's tmux session. The worker's status is updated to `"stopped"` and its worktree is preserved on disk for merging.
|
|
295
|
+
|
|
296
|
+
### Shutdown cleanup
|
|
297
|
+
|
|
298
|
+
When the orchestrator MCP server exits (Claude Code session ends, SIGTERM, or SIGINT), all running workers are automatically cleaned up:
|
|
299
|
+
|
|
300
|
+
1. All workers with `status: "running"` have their tmux sessions killed
|
|
301
|
+
2. Their status is updated to `"stopped"` in the registry
|
|
302
|
+
3. The orchestrator's PID mapping file is removed
|
|
303
|
+
|
|
304
|
+
Worktrees are intentionally preserved in both cases — they contain code changes the user may want to merge.
|
|
305
|
+
|
|
306
|
+
### Worktree vs shared directory
|
|
307
|
+
|
|
308
|
+
`useWorktree: true` creates an isolated git worktree via `@lioneltay/worktree-manager`. `useWorktree: false` runs the worker in the current directory on the current branch — useful for parallel tasks that don't need branch isolation (e.g., research, testing).
|
|
309
|
+
|
|
310
|
+
## Source files
|
|
311
|
+
|
|
312
|
+
```
|
|
313
|
+
src/
|
|
314
|
+
├── index.ts # Entry point — mode detection + hook implementations
|
|
315
|
+
├── orchestrator.ts # Orchestrator MCP server (start_worker, list_workers, etc.)
|
|
316
|
+
├── worker.ts # Worker MCP server (complete, ask)
|
|
317
|
+
├── spawn.ts # Worker spawning logic (tmux + Claude CLI)
|
|
318
|
+
├── state.ts # File-based state (registry, mail, orchestrator mapping)
|
|
319
|
+
└── types.ts # Shared type definitions
|
|
320
|
+
```
|
|
321
|
+
|
|
322
|
+
## Plugin structure
|
|
323
|
+
|
|
324
|
+
```
|
|
325
|
+
.claude-plugin/
|
|
326
|
+
plugin.json # Plugin manifest
|
|
327
|
+
.mcp.json # MCP server configuration
|
|
328
|
+
hooks/
|
|
329
|
+
hooks.json # Hook definitions (UserPromptSubmit + Stop)
|
|
330
|
+
```
|
|
331
|
+
|
|
332
|
+
## Dependencies
|
|
333
|
+
|
|
334
|
+
- `@modelcontextprotocol/sdk` — MCP server implementation
|
|
335
|
+
- `@lioneltay/worktree-manager` — Git worktree creation
|
|
336
|
+
- `zod` — Input validation for MCP tool schemas
|
|
337
|
+
- `tmux` — Required system dependency for worker session management
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import * as fs from "node:fs";
|
|
3
|
+
import * as path from "node:path";
|
|
4
|
+
import * as os from "node:os";
|
|
5
|
+
import { getStateDir, getGitRoot, readAndFlushMail, readRegistry, readOrchestratorId, } from "./state.js";
|
|
6
|
+
import { startOrchestratorServer } from "./orchestrator.js";
|
|
7
|
+
import { startWorkerServer } from "./worker.js";
|
|
8
|
+
function runHook() {
|
|
9
|
+
try {
|
|
10
|
+
const gitRoot = getGitRoot();
|
|
11
|
+
const stateDir = getStateDir(gitRoot);
|
|
12
|
+
const orchestratorId = readOrchestratorId(stateDir, process.ppid);
|
|
13
|
+
if (!orchestratorId)
|
|
14
|
+
return;
|
|
15
|
+
const messages = readAndFlushMail(stateDir, orchestratorId);
|
|
16
|
+
if (messages.length === 0) {
|
|
17
|
+
return;
|
|
18
|
+
}
|
|
19
|
+
for (const msg of messages) {
|
|
20
|
+
const prefix = msg.type === "completion" ? "COMPLETED" : "QUESTION";
|
|
21
|
+
process.stdout.write(`[Worker ${msg.workerName} (${msg.workerId})] ${prefix}: ${msg.content}\n`);
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
catch {
|
|
25
|
+
// Silently ignore errors in hook mode
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
function runStopHook() {
|
|
29
|
+
const workerId = process.env.WORKER_ID;
|
|
30
|
+
const stateDir = process.env.STATE_DIR;
|
|
31
|
+
// Orchestrator mode: check for pending mail
|
|
32
|
+
if (!workerId || !stateDir) {
|
|
33
|
+
try {
|
|
34
|
+
const gitRoot = getGitRoot();
|
|
35
|
+
const orchestratorStateDir = getStateDir(gitRoot);
|
|
36
|
+
const orchestratorId = readOrchestratorId(orchestratorStateDir, process.ppid);
|
|
37
|
+
if (!orchestratorId)
|
|
38
|
+
return;
|
|
39
|
+
const messages = readAndFlushMail(orchestratorStateDir, orchestratorId);
|
|
40
|
+
if (messages.length === 0) {
|
|
41
|
+
return;
|
|
42
|
+
}
|
|
43
|
+
const formatted = messages
|
|
44
|
+
.map((m) => {
|
|
45
|
+
const prefix = m.type === "completion" ? "COMPLETED" : "QUESTION";
|
|
46
|
+
return `[Worker ${m.workerName} (${m.workerId})] ${prefix}: ${m.content}`;
|
|
47
|
+
})
|
|
48
|
+
.join("\n");
|
|
49
|
+
const response = {
|
|
50
|
+
decision: "block",
|
|
51
|
+
reason: formatted,
|
|
52
|
+
};
|
|
53
|
+
process.stdout.write(JSON.stringify(response) + "\n");
|
|
54
|
+
}
|
|
55
|
+
catch {
|
|
56
|
+
// Silently ignore errors
|
|
57
|
+
}
|
|
58
|
+
return;
|
|
59
|
+
}
|
|
60
|
+
try {
|
|
61
|
+
const registry = readRegistry(stateDir);
|
|
62
|
+
const worker = registry.workers[workerId];
|
|
63
|
+
if (!worker)
|
|
64
|
+
return;
|
|
65
|
+
// Already completed or asked — let it stop
|
|
66
|
+
if (worker.status === "completed" || worker.status === "asking") {
|
|
67
|
+
return;
|
|
68
|
+
}
|
|
69
|
+
// Check retry count to avoid infinite loops (max 2 attempts)
|
|
70
|
+
const retryFile = path.join(os.tmpdir(), "worker-manager", `stop-retries-${workerId}`);
|
|
71
|
+
let retries = 0;
|
|
72
|
+
try {
|
|
73
|
+
retries = parseInt(fs.readFileSync(retryFile, "utf-8"), 10);
|
|
74
|
+
}
|
|
75
|
+
catch {
|
|
76
|
+
// File doesn't exist yet
|
|
77
|
+
}
|
|
78
|
+
if (retries >= 2) {
|
|
79
|
+
// Give up — clean up retry file
|
|
80
|
+
try {
|
|
81
|
+
fs.unlinkSync(retryFile);
|
|
82
|
+
}
|
|
83
|
+
catch { }
|
|
84
|
+
return;
|
|
85
|
+
}
|
|
86
|
+
// Increment retry count
|
|
87
|
+
const tmpDir = path.join(os.tmpdir(), "worker-manager");
|
|
88
|
+
fs.mkdirSync(tmpDir, { recursive: true });
|
|
89
|
+
fs.writeFileSync(retryFile, String(retries + 1));
|
|
90
|
+
// Block Claude from stopping via JSON decision
|
|
91
|
+
const response = {
|
|
92
|
+
decision: "block",
|
|
93
|
+
reason: "You haven't called the `complete` or `ask` tool yet. " +
|
|
94
|
+
"Please call `complete` with a summary of what you accomplished, " +
|
|
95
|
+
"or `ask` if you need clarification from the orchestrator.",
|
|
96
|
+
};
|
|
97
|
+
process.stdout.write(JSON.stringify(response) + "\n");
|
|
98
|
+
}
|
|
99
|
+
catch {
|
|
100
|
+
// Silently ignore errors
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
if (process.argv.includes("--hook")) {
|
|
104
|
+
runHook();
|
|
105
|
+
}
|
|
106
|
+
else if (process.argv.includes("--stop-hook")) {
|
|
107
|
+
runStopHook();
|
|
108
|
+
}
|
|
109
|
+
else if (process.env.WORKER_ID) {
|
|
110
|
+
startWorkerServer().catch((error) => {
|
|
111
|
+
console.error("Worker server fatal error:", error);
|
|
112
|
+
process.exit(1);
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
else {
|
|
116
|
+
startOrchestratorServer().catch((error) => {
|
|
117
|
+
console.error("Orchestrator server fatal error:", error);
|
|
118
|
+
process.exit(1);
|
|
119
|
+
});
|
|
120
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function startOrchestratorServer(): Promise<void>;
|
|
@@ -0,0 +1,368 @@
|
|
|
1
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
3
|
+
import { z } from "zod";
|
|
4
|
+
import { execFileSync } from "node:child_process";
|
|
5
|
+
import { randomUUID } from "node:crypto";
|
|
6
|
+
import { create } from "@lioneltay/worktree-manager";
|
|
7
|
+
import { getGitRoot, getStateDir, readRegistry, writeRegistry, readAndFlushMail, writeOrchestratorMapping, removeOrchestratorMapping, } from "./state.js";
|
|
8
|
+
import { spawnWorker } from "./spawn.js";
|
|
9
|
+
function generateShortId() {
|
|
10
|
+
return randomUUID().slice(0, 8);
|
|
11
|
+
}
|
|
12
|
+
function titleToName(title) {
|
|
13
|
+
return title
|
|
14
|
+
.toLowerCase()
|
|
15
|
+
.replace(/[^a-z0-9]+/g, "-")
|
|
16
|
+
.replace(/^-|-$/g, "")
|
|
17
|
+
.slice(0, 30);
|
|
18
|
+
}
|
|
19
|
+
function tmuxSessionExists(session) {
|
|
20
|
+
try {
|
|
21
|
+
execFileSync("tmux", ["has-session", "-t", session], {
|
|
22
|
+
stdio: "pipe",
|
|
23
|
+
});
|
|
24
|
+
return true;
|
|
25
|
+
}
|
|
26
|
+
catch {
|
|
27
|
+
return false;
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
function killTmuxSession(session) {
|
|
31
|
+
try {
|
|
32
|
+
execFileSync("tmux", ["kill-session", "-t", session], {
|
|
33
|
+
stdio: "pipe",
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
catch {
|
|
37
|
+
// Session may already be dead
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
function cleanupWorkers(stateDir) {
|
|
41
|
+
const registry = readRegistry(stateDir);
|
|
42
|
+
let changed = false;
|
|
43
|
+
for (const worker of Object.values(registry.workers)) {
|
|
44
|
+
if (worker.status === "running") {
|
|
45
|
+
killTmuxSession(worker.tmuxSession);
|
|
46
|
+
worker.status = "stopped";
|
|
47
|
+
changed = true;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
if (changed) {
|
|
51
|
+
writeRegistry(stateDir, registry);
|
|
52
|
+
}
|
|
53
|
+
removeOrchestratorMapping(stateDir, process.ppid);
|
|
54
|
+
}
|
|
55
|
+
function getCurrentBranch() {
|
|
56
|
+
return execFileSync("git", ["rev-parse", "--abbrev-ref", "HEAD"], {
|
|
57
|
+
encoding: "utf-8",
|
|
58
|
+
}).trim();
|
|
59
|
+
}
|
|
60
|
+
export async function startOrchestratorServer() {
|
|
61
|
+
// Generate a unique ID for this orchestrator session so mail is scoped.
|
|
62
|
+
const orchestratorId = generateShortId();
|
|
63
|
+
const gitRoot = getGitRoot();
|
|
64
|
+
const stateDir = getStateDir(gitRoot);
|
|
65
|
+
// Write mapping so hooks (which share the same parent PID) can find our mailbox.
|
|
66
|
+
writeOrchestratorMapping(stateDir, process.ppid, orchestratorId);
|
|
67
|
+
const server = new McpServer({
|
|
68
|
+
name: "worker-manager",
|
|
69
|
+
version: "0.0.1",
|
|
70
|
+
});
|
|
71
|
+
server.registerTool("start_worker", {
|
|
72
|
+
description: [
|
|
73
|
+
"Spawn an autonomous worker agent to complete a task independently.",
|
|
74
|
+
"",
|
|
75
|
+
"Use useWorktree=true when the worker's changes should land on a separate branch (e.g., a feature that needs its own PR, changes that might conflict with other workers).",
|
|
76
|
+
"Use useWorktree=false to run in the current directory on the current branch (e.g., research, tests, or parallel edits to non-overlapping files).",
|
|
77
|
+
"",
|
|
78
|
+
"Workers send mail when they finish or have questions. Messages are delivered automatically via the prompt hook, or use `read_mail` to check manually.",
|
|
79
|
+
].join("\n"),
|
|
80
|
+
inputSchema: {
|
|
81
|
+
title: z.string().describe("Short title for the worker task"),
|
|
82
|
+
task: z
|
|
83
|
+
.string()
|
|
84
|
+
.describe("Detailed task description for the worker. Be specific about what to do and what files to modify."),
|
|
85
|
+
baseBranch: z
|
|
86
|
+
.string()
|
|
87
|
+
.optional()
|
|
88
|
+
.describe("Base branch to create the worktree from (defaults to current branch). Only used when useWorktree is true."),
|
|
89
|
+
useWorktree: z
|
|
90
|
+
.boolean()
|
|
91
|
+
.describe("true = isolated git worktree on a new branch. false = current directory, current branch."),
|
|
92
|
+
},
|
|
93
|
+
}, async ({ title, task, baseBranch, useWorktree }) => {
|
|
94
|
+
try {
|
|
95
|
+
const id = generateShortId();
|
|
96
|
+
const name = titleToName(title);
|
|
97
|
+
const tmuxSession = `worker-${id}`;
|
|
98
|
+
let workingDir;
|
|
99
|
+
let branchName;
|
|
100
|
+
if (useWorktree) {
|
|
101
|
+
branchName = `worker/${name}-${id}`;
|
|
102
|
+
const worktree = await create(branchName, {
|
|
103
|
+
newBranch: true,
|
|
104
|
+
from: baseBranch,
|
|
105
|
+
});
|
|
106
|
+
workingDir = worktree.path;
|
|
107
|
+
}
|
|
108
|
+
else {
|
|
109
|
+
branchName = getCurrentBranch();
|
|
110
|
+
workingDir = gitRoot;
|
|
111
|
+
}
|
|
112
|
+
const entry = {
|
|
113
|
+
id,
|
|
114
|
+
name,
|
|
115
|
+
task,
|
|
116
|
+
status: "running",
|
|
117
|
+
branch: branchName,
|
|
118
|
+
worktreePath: workingDir,
|
|
119
|
+
tmuxSession,
|
|
120
|
+
createdAt: new Date().toISOString(),
|
|
121
|
+
useWorktree,
|
|
122
|
+
};
|
|
123
|
+
const registry = readRegistry(stateDir);
|
|
124
|
+
registry.workers[id] = entry;
|
|
125
|
+
writeRegistry(stateDir, registry);
|
|
126
|
+
spawnWorker({
|
|
127
|
+
id,
|
|
128
|
+
name,
|
|
129
|
+
task,
|
|
130
|
+
workingDir,
|
|
131
|
+
stateDir,
|
|
132
|
+
tmuxSession,
|
|
133
|
+
orchestratorId,
|
|
134
|
+
});
|
|
135
|
+
const info = [
|
|
136
|
+
`Worker started successfully.`,
|
|
137
|
+
` ID: ${id}`,
|
|
138
|
+
` Name: ${name}`,
|
|
139
|
+
` Branch: ${branchName}`,
|
|
140
|
+
` Directory: ${workingDir}`,
|
|
141
|
+
` Tmux: ${tmuxSession}`,
|
|
142
|
+
];
|
|
143
|
+
if (!useWorktree) {
|
|
144
|
+
info.push(` Mode: shared directory (no worktree)`);
|
|
145
|
+
}
|
|
146
|
+
info.push(``, `The worker will notify you when it completes or has questions.`);
|
|
147
|
+
return {
|
|
148
|
+
content: [{ type: "text", text: info.join("\n") }],
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
catch (error) {
|
|
152
|
+
return {
|
|
153
|
+
content: [
|
|
154
|
+
{
|
|
155
|
+
type: "text",
|
|
156
|
+
text: `Failed to start worker: ${error instanceof Error ? error.message : String(error)}`,
|
|
157
|
+
},
|
|
158
|
+
],
|
|
159
|
+
isError: true,
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
});
|
|
163
|
+
server.registerTool("list_workers", {
|
|
164
|
+
description: "List all workers with their current status. Detects crashed workers automatically.",
|
|
165
|
+
inputSchema: {},
|
|
166
|
+
}, async () => {
|
|
167
|
+
try {
|
|
168
|
+
const registry = readRegistry(stateDir);
|
|
169
|
+
const workers = Object.values(registry.workers);
|
|
170
|
+
if (workers.length === 0) {
|
|
171
|
+
return {
|
|
172
|
+
content: [{ type: "text", text: "No workers found." }],
|
|
173
|
+
};
|
|
174
|
+
}
|
|
175
|
+
const enriched = workers.map((w) => {
|
|
176
|
+
const tmuxAlive = w.status === "running" ? tmuxSessionExists(w.tmuxSession) : null;
|
|
177
|
+
const effectiveStatus = w.status === "running" && !tmuxAlive ? "failed" : w.status;
|
|
178
|
+
return { ...w, effectiveStatus };
|
|
179
|
+
});
|
|
180
|
+
return {
|
|
181
|
+
content: [
|
|
182
|
+
{
|
|
183
|
+
type: "text",
|
|
184
|
+
text: JSON.stringify(enriched, null, 2),
|
|
185
|
+
},
|
|
186
|
+
],
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
catch (error) {
|
|
190
|
+
return {
|
|
191
|
+
content: [
|
|
192
|
+
{
|
|
193
|
+
type: "text",
|
|
194
|
+
text: `Failed to list workers: ${error instanceof Error ? error.message : String(error)}`,
|
|
195
|
+
},
|
|
196
|
+
],
|
|
197
|
+
isError: true,
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
});
|
|
201
|
+
server.registerTool("nudge_worker", {
|
|
202
|
+
description: "Send a message to a running worker. Use this to answer questions or provide guidance.",
|
|
203
|
+
inputSchema: {
|
|
204
|
+
id: z.string().describe("Worker ID"),
|
|
205
|
+
message: z
|
|
206
|
+
.string()
|
|
207
|
+
.describe("Message to send to the worker's terminal"),
|
|
208
|
+
},
|
|
209
|
+
}, async ({ id, message }) => {
|
|
210
|
+
try {
|
|
211
|
+
const registry = readRegistry(stateDir);
|
|
212
|
+
const worker = registry.workers[id];
|
|
213
|
+
if (!worker) {
|
|
214
|
+
return {
|
|
215
|
+
content: [
|
|
216
|
+
{
|
|
217
|
+
type: "text",
|
|
218
|
+
text: `Worker '${id}' not found.`,
|
|
219
|
+
},
|
|
220
|
+
],
|
|
221
|
+
isError: true,
|
|
222
|
+
};
|
|
223
|
+
}
|
|
224
|
+
if (!tmuxSessionExists(worker.tmuxSession)) {
|
|
225
|
+
return {
|
|
226
|
+
content: [
|
|
227
|
+
{
|
|
228
|
+
type: "text",
|
|
229
|
+
text: `Worker '${id}' tmux session is not running.`,
|
|
230
|
+
},
|
|
231
|
+
],
|
|
232
|
+
isError: true,
|
|
233
|
+
};
|
|
234
|
+
}
|
|
235
|
+
execFileSync("tmux", [
|
|
236
|
+
"send-keys",
|
|
237
|
+
"-t",
|
|
238
|
+
worker.tmuxSession,
|
|
239
|
+
"-l",
|
|
240
|
+
message,
|
|
241
|
+
]);
|
|
242
|
+
execFileSync("tmux", ["send-keys", "-t", worker.tmuxSession, "Enter"]);
|
|
243
|
+
return {
|
|
244
|
+
content: [
|
|
245
|
+
{
|
|
246
|
+
type: "text",
|
|
247
|
+
text: `Message sent to worker '${id}' (${worker.name}).`,
|
|
248
|
+
},
|
|
249
|
+
],
|
|
250
|
+
};
|
|
251
|
+
}
|
|
252
|
+
catch (error) {
|
|
253
|
+
return {
|
|
254
|
+
content: [
|
|
255
|
+
{
|
|
256
|
+
type: "text",
|
|
257
|
+
text: `Failed to nudge worker: ${error instanceof Error ? error.message : String(error)}`,
|
|
258
|
+
},
|
|
259
|
+
],
|
|
260
|
+
isError: true,
|
|
261
|
+
};
|
|
262
|
+
}
|
|
263
|
+
});
|
|
264
|
+
server.registerTool("stop_worker", {
|
|
265
|
+
description: "Stop a running worker. The worker's code changes and worktree are preserved.",
|
|
266
|
+
inputSchema: {
|
|
267
|
+
id: z.string().describe("Worker ID"),
|
|
268
|
+
},
|
|
269
|
+
}, async ({ id }) => {
|
|
270
|
+
try {
|
|
271
|
+
const registry = readRegistry(stateDir);
|
|
272
|
+
const worker = registry.workers[id];
|
|
273
|
+
if (!worker) {
|
|
274
|
+
return {
|
|
275
|
+
content: [
|
|
276
|
+
{
|
|
277
|
+
type: "text",
|
|
278
|
+
text: `Worker '${id}' not found.`,
|
|
279
|
+
},
|
|
280
|
+
],
|
|
281
|
+
isError: true,
|
|
282
|
+
};
|
|
283
|
+
}
|
|
284
|
+
if (worker.status === "stopped") {
|
|
285
|
+
return {
|
|
286
|
+
content: [
|
|
287
|
+
{
|
|
288
|
+
type: "text",
|
|
289
|
+
text: `Worker '${id}' (${worker.name}) is already stopped.`,
|
|
290
|
+
},
|
|
291
|
+
],
|
|
292
|
+
};
|
|
293
|
+
}
|
|
294
|
+
if (tmuxSessionExists(worker.tmuxSession)) {
|
|
295
|
+
killTmuxSession(worker.tmuxSession);
|
|
296
|
+
}
|
|
297
|
+
registry.workers[id] = { ...worker, status: "stopped" };
|
|
298
|
+
writeRegistry(stateDir, registry);
|
|
299
|
+
return {
|
|
300
|
+
content: [
|
|
301
|
+
{
|
|
302
|
+
type: "text",
|
|
303
|
+
text: `Worker '${id}' (${worker.name}) has been stopped. Worktree preserved at: ${worker.worktreePath}`,
|
|
304
|
+
},
|
|
305
|
+
],
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
catch (error) {
|
|
309
|
+
return {
|
|
310
|
+
content: [
|
|
311
|
+
{
|
|
312
|
+
type: "text",
|
|
313
|
+
text: `Failed to stop worker: ${error instanceof Error ? error.message : String(error)}`,
|
|
314
|
+
},
|
|
315
|
+
],
|
|
316
|
+
isError: true,
|
|
317
|
+
};
|
|
318
|
+
}
|
|
319
|
+
});
|
|
320
|
+
server.registerTool("read_mail", {
|
|
321
|
+
description: "Read and clear all pending messages from workers. Messages are either task completions or questions. Reading is destructive — messages are removed after being read.",
|
|
322
|
+
inputSchema: {},
|
|
323
|
+
}, async () => {
|
|
324
|
+
try {
|
|
325
|
+
const messages = readAndFlushMail(stateDir, orchestratorId);
|
|
326
|
+
if (messages.length === 0) {
|
|
327
|
+
return {
|
|
328
|
+
content: [
|
|
329
|
+
{ type: "text", text: "No new messages from workers." },
|
|
330
|
+
],
|
|
331
|
+
};
|
|
332
|
+
}
|
|
333
|
+
const formatted = messages
|
|
334
|
+
.map((m) => {
|
|
335
|
+
const prefix = m.type === "completion" ? "COMPLETED" : "QUESTION";
|
|
336
|
+
return `[${prefix}] Worker ${m.workerName} (${m.workerId}):\n${m.content}`;
|
|
337
|
+
})
|
|
338
|
+
.join("\n\n---\n\n");
|
|
339
|
+
return {
|
|
340
|
+
content: [{ type: "text", text: formatted }],
|
|
341
|
+
};
|
|
342
|
+
}
|
|
343
|
+
catch (error) {
|
|
344
|
+
return {
|
|
345
|
+
content: [
|
|
346
|
+
{
|
|
347
|
+
type: "text",
|
|
348
|
+
text: `Failed to read mail: ${error instanceof Error ? error.message : String(error)}`,
|
|
349
|
+
},
|
|
350
|
+
],
|
|
351
|
+
isError: true,
|
|
352
|
+
};
|
|
353
|
+
}
|
|
354
|
+
});
|
|
355
|
+
const transport = new StdioServerTransport();
|
|
356
|
+
await server.connect(transport);
|
|
357
|
+
transport.onclose = () => {
|
|
358
|
+
cleanupWorkers(stateDir);
|
|
359
|
+
};
|
|
360
|
+
process.on("SIGTERM", () => {
|
|
361
|
+
cleanupWorkers(stateDir);
|
|
362
|
+
process.exit(0);
|
|
363
|
+
});
|
|
364
|
+
process.on("SIGINT", () => {
|
|
365
|
+
cleanupWorkers(stateDir);
|
|
366
|
+
process.exit(0);
|
|
367
|
+
});
|
|
368
|
+
}
|
package/dist/spawn.d.ts
ADDED
package/dist/spawn.js
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { randomUUID } from "node:crypto";
|
|
2
|
+
import * as fs from "node:fs";
|
|
3
|
+
import * as path from "node:path";
|
|
4
|
+
import * as os from "node:os";
|
|
5
|
+
import { execFileSync } from "node:child_process";
|
|
6
|
+
import { fileURLToPath } from "node:url";
|
|
7
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
8
|
+
function getBinaryPath() {
|
|
9
|
+
return path.resolve(path.dirname(__filename), "index.js");
|
|
10
|
+
}
|
|
11
|
+
function escapeShellArg(arg) {
|
|
12
|
+
return arg.replace(/'/g, "'\\''");
|
|
13
|
+
}
|
|
14
|
+
export function spawnWorker(options) {
|
|
15
|
+
const { id, name, task, workingDir, stateDir, tmuxSession, orchestratorId } = options;
|
|
16
|
+
const sessionId = randomUUID();
|
|
17
|
+
// Write MCP config pointing worker at same binary with env vars
|
|
18
|
+
const binaryPath = getBinaryPath();
|
|
19
|
+
const mcpConfig = {
|
|
20
|
+
mcpServers: {
|
|
21
|
+
worker: {
|
|
22
|
+
command: "node",
|
|
23
|
+
args: [binaryPath],
|
|
24
|
+
env: {
|
|
25
|
+
WORKER_ID: id,
|
|
26
|
+
WORKER_NAME: name,
|
|
27
|
+
STATE_DIR: stateDir,
|
|
28
|
+
ORCHESTRATOR_ID: orchestratorId,
|
|
29
|
+
},
|
|
30
|
+
},
|
|
31
|
+
},
|
|
32
|
+
};
|
|
33
|
+
const tmpDir = path.join(os.tmpdir(), "worker-manager");
|
|
34
|
+
fs.mkdirSync(tmpDir, { recursive: true });
|
|
35
|
+
const mcpConfigPath = path.join(tmpDir, `mcp-config-${id}.json`);
|
|
36
|
+
fs.writeFileSync(mcpConfigPath, JSON.stringify(mcpConfig));
|
|
37
|
+
// Create tmux session
|
|
38
|
+
execFileSync("tmux", [
|
|
39
|
+
"new-session",
|
|
40
|
+
"-d",
|
|
41
|
+
"-s",
|
|
42
|
+
tmuxSession,
|
|
43
|
+
"-c",
|
|
44
|
+
workingDir,
|
|
45
|
+
]);
|
|
46
|
+
// Build Claude command
|
|
47
|
+
const systemPrompt = "You are a worker agent. Complete the assigned task thoroughly. " +
|
|
48
|
+
"When done, call the `complete` tool with a summary of what you did. " +
|
|
49
|
+
"If you need clarification from the orchestrator, call the `ask` tool with your question.";
|
|
50
|
+
// Export env vars so the Stop hook can identify this as a worker session
|
|
51
|
+
const envExport = `export WORKER_ID=${escapeShellArg(id)} STATE_DIR=${escapeShellArg(stateDir)}`;
|
|
52
|
+
const claudeCmd = [
|
|
53
|
+
"claude",
|
|
54
|
+
"--session-id",
|
|
55
|
+
sessionId,
|
|
56
|
+
"--mcp-config",
|
|
57
|
+
mcpConfigPath,
|
|
58
|
+
"--dangerously-skip-permissions",
|
|
59
|
+
"--append-system-prompt",
|
|
60
|
+
`'${escapeShellArg(systemPrompt)}'`,
|
|
61
|
+
`'${escapeShellArg(task)}'`,
|
|
62
|
+
].join(" ");
|
|
63
|
+
const fullCommand = `${envExport} ; ${claudeCmd}`;
|
|
64
|
+
// Send command to tmux session
|
|
65
|
+
execFileSync("tmux", ["send-keys", "-t", tmuxSession, "-l", fullCommand]);
|
|
66
|
+
execFileSync("tmux", ["send-keys", "-t", tmuxSession, "Enter"]);
|
|
67
|
+
}
|
package/dist/state.d.ts
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import type { WorkerEntry, WorkerRegistry, MailMessage } from "./types.js";
|
|
2
|
+
export declare function getGitRoot(): string;
|
|
3
|
+
export declare function getStateDir(gitRoot?: string): string;
|
|
4
|
+
export declare function writeOrchestratorMapping(stateDir: string, ppid: number, orchestratorId: string): void;
|
|
5
|
+
export declare function readOrchestratorId(stateDir: string, ppid: number): string | null;
|
|
6
|
+
export declare function removeOrchestratorMapping(stateDir: string, ppid: number): void;
|
|
7
|
+
export declare function readRegistry(stateDir: string): WorkerRegistry;
|
|
8
|
+
export declare function writeRegistry(stateDir: string, registry: WorkerRegistry): void;
|
|
9
|
+
export declare function updateWorker(stateDir: string, id: string, update: Partial<WorkerEntry>): void;
|
|
10
|
+
/**
|
|
11
|
+
* Write a mail message to the orchestrator's scoped mailbox at
|
|
12
|
+
* `mail/<orchestratorId>/`.
|
|
13
|
+
*/
|
|
14
|
+
export declare function writeMail(stateDir: string, message: MailMessage, orchestratorId: string): void;
|
|
15
|
+
/**
|
|
16
|
+
* Read and flush all mail from a specific orchestrator's mailbox.
|
|
17
|
+
*/
|
|
18
|
+
export declare function readAndFlushMail(stateDir: string, orchestratorId: string): MailMessage[];
|
package/dist/state.js
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import * as fs from "node:fs";
|
|
2
|
+
import * as path from "node:path";
|
|
3
|
+
import * as crypto from "node:crypto";
|
|
4
|
+
import * as os from "node:os";
|
|
5
|
+
import { execFileSync } from "node:child_process";
|
|
6
|
+
import { randomUUID } from "node:crypto";
|
|
7
|
+
export function getGitRoot() {
|
|
8
|
+
return execFileSync("git", ["rev-parse", "--show-toplevel"], {
|
|
9
|
+
encoding: "utf-8",
|
|
10
|
+
}).trim();
|
|
11
|
+
}
|
|
12
|
+
export function getStateDir(gitRoot) {
|
|
13
|
+
const root = gitRoot ?? getGitRoot();
|
|
14
|
+
const hash = crypto
|
|
15
|
+
.createHash("sha256")
|
|
16
|
+
.update(root)
|
|
17
|
+
.digest("hex")
|
|
18
|
+
.slice(0, 12);
|
|
19
|
+
return path.join(os.tmpdir(), "worker-manager", hash);
|
|
20
|
+
}
|
|
21
|
+
function ensureStateDir(stateDir) {
|
|
22
|
+
fs.mkdirSync(stateDir, { recursive: true });
|
|
23
|
+
}
|
|
24
|
+
// --- Orchestrator identity mapping ---
|
|
25
|
+
// Maps Claude Code PID → orchestrator ID so hooks can find the right mailbox.
|
|
26
|
+
function orchestratorMappingDir(stateDir) {
|
|
27
|
+
return path.join(stateDir, "orchestrators");
|
|
28
|
+
}
|
|
29
|
+
export function writeOrchestratorMapping(stateDir, ppid, orchestratorId) {
|
|
30
|
+
const dir = orchestratorMappingDir(stateDir);
|
|
31
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
32
|
+
fs.writeFileSync(path.join(dir, String(ppid)), orchestratorId);
|
|
33
|
+
}
|
|
34
|
+
export function readOrchestratorId(stateDir, ppid) {
|
|
35
|
+
try {
|
|
36
|
+
return fs
|
|
37
|
+
.readFileSync(path.join(orchestratorMappingDir(stateDir), String(ppid)), "utf-8")
|
|
38
|
+
.trim();
|
|
39
|
+
}
|
|
40
|
+
catch {
|
|
41
|
+
return null;
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
export function removeOrchestratorMapping(stateDir, ppid) {
|
|
45
|
+
try {
|
|
46
|
+
fs.unlinkSync(path.join(orchestratorMappingDir(stateDir), String(ppid)));
|
|
47
|
+
}
|
|
48
|
+
catch { }
|
|
49
|
+
}
|
|
50
|
+
function registryPath(stateDir) {
|
|
51
|
+
return path.join(stateDir, "state.json");
|
|
52
|
+
}
|
|
53
|
+
export function readRegistry(stateDir) {
|
|
54
|
+
const file = registryPath(stateDir);
|
|
55
|
+
if (!fs.existsSync(file)) {
|
|
56
|
+
return { workers: {} };
|
|
57
|
+
}
|
|
58
|
+
return JSON.parse(fs.readFileSync(file, "utf-8"));
|
|
59
|
+
}
|
|
60
|
+
export function writeRegistry(stateDir, registry) {
|
|
61
|
+
ensureStateDir(stateDir);
|
|
62
|
+
const file = registryPath(stateDir);
|
|
63
|
+
const tmpFile = `${file}.${randomUUID()}.tmp`;
|
|
64
|
+
fs.writeFileSync(tmpFile, JSON.stringify(registry, null, 2));
|
|
65
|
+
fs.renameSync(tmpFile, file);
|
|
66
|
+
}
|
|
67
|
+
export function updateWorker(stateDir, id, update) {
|
|
68
|
+
const registry = readRegistry(stateDir);
|
|
69
|
+
const worker = registry.workers[id];
|
|
70
|
+
if (!worker) {
|
|
71
|
+
throw new Error(`Worker '${id}' not found in registry`);
|
|
72
|
+
}
|
|
73
|
+
registry.workers[id] = { ...worker, ...update };
|
|
74
|
+
writeRegistry(stateDir, registry);
|
|
75
|
+
}
|
|
76
|
+
/**
|
|
77
|
+
* Write a mail message to the orchestrator's scoped mailbox at
|
|
78
|
+
* `mail/<orchestratorId>/`.
|
|
79
|
+
*/
|
|
80
|
+
export function writeMail(stateDir, message, orchestratorId) {
|
|
81
|
+
const mailDir = path.join(stateDir, "mail", orchestratorId);
|
|
82
|
+
fs.mkdirSync(mailDir, { recursive: true });
|
|
83
|
+
const filename = `${message.timestamp}-${message.id}.json`;
|
|
84
|
+
fs.writeFileSync(path.join(mailDir, filename), JSON.stringify(message, null, 2));
|
|
85
|
+
}
|
|
86
|
+
/**
|
|
87
|
+
* Read and flush all mail from a specific orchestrator's mailbox.
|
|
88
|
+
*/
|
|
89
|
+
export function readAndFlushMail(stateDir, orchestratorId) {
|
|
90
|
+
const mailDir = path.join(stateDir, "mail", orchestratorId);
|
|
91
|
+
if (!fs.existsSync(mailDir)) {
|
|
92
|
+
return [];
|
|
93
|
+
}
|
|
94
|
+
const files = fs.readdirSync(mailDir).filter((f) => f.endsWith(".json"));
|
|
95
|
+
files.sort();
|
|
96
|
+
const messages = [];
|
|
97
|
+
for (const file of files) {
|
|
98
|
+
const filePath = path.join(mailDir, file);
|
|
99
|
+
try {
|
|
100
|
+
const content = fs.readFileSync(filePath, "utf-8");
|
|
101
|
+
messages.push(JSON.parse(content));
|
|
102
|
+
fs.unlinkSync(filePath);
|
|
103
|
+
}
|
|
104
|
+
catch {
|
|
105
|
+
// Skip corrupted files
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
return messages;
|
|
109
|
+
}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
export type WorkerStatus = "running" | "completed" | "failed" | "asking" | "stopped";
|
|
2
|
+
export type WorkerEntry = {
|
|
3
|
+
id: string;
|
|
4
|
+
name: string;
|
|
5
|
+
task: string;
|
|
6
|
+
status: WorkerStatus;
|
|
7
|
+
branch: string;
|
|
8
|
+
worktreePath: string;
|
|
9
|
+
tmuxSession: string;
|
|
10
|
+
createdAt: string;
|
|
11
|
+
completedAt?: string;
|
|
12
|
+
summary?: string;
|
|
13
|
+
useWorktree: boolean;
|
|
14
|
+
};
|
|
15
|
+
export type WorkerRegistry = {
|
|
16
|
+
workers: Record<string, WorkerEntry>;
|
|
17
|
+
};
|
|
18
|
+
export type MailMessage = {
|
|
19
|
+
id: string;
|
|
20
|
+
workerId: string;
|
|
21
|
+
workerName: string;
|
|
22
|
+
type: "completion" | "question";
|
|
23
|
+
content: string;
|
|
24
|
+
timestamp: string;
|
|
25
|
+
};
|
package/dist/types.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
package/dist/worker.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function startWorkerServer(): Promise<void>;
|
package/dist/worker.js
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
3
|
+
import { z } from "zod";
|
|
4
|
+
import { randomUUID } from "node:crypto";
|
|
5
|
+
import { updateWorker, writeMail } from "./state.js";
|
|
6
|
+
export async function startWorkerServer() {
|
|
7
|
+
const workerId = process.env.WORKER_ID;
|
|
8
|
+
const workerName = process.env.WORKER_NAME ?? workerId;
|
|
9
|
+
const stateDir = process.env.STATE_DIR;
|
|
10
|
+
const orchestratorId = process.env.ORCHESTRATOR_ID;
|
|
11
|
+
const server = new McpServer({
|
|
12
|
+
name: `worker-${workerId}`,
|
|
13
|
+
version: "0.0.1",
|
|
14
|
+
});
|
|
15
|
+
server.registerTool("complete", {
|
|
16
|
+
description: "Signal that you have completed your assigned task. Provide a summary of what you did.",
|
|
17
|
+
inputSchema: {
|
|
18
|
+
summary: z
|
|
19
|
+
.string()
|
|
20
|
+
.describe("Summary of what was accomplished and any notable changes"),
|
|
21
|
+
},
|
|
22
|
+
}, async ({ summary }) => {
|
|
23
|
+
try {
|
|
24
|
+
updateWorker(stateDir, workerId, {
|
|
25
|
+
status: "completed",
|
|
26
|
+
completedAt: new Date().toISOString(),
|
|
27
|
+
summary,
|
|
28
|
+
});
|
|
29
|
+
writeMail(stateDir, {
|
|
30
|
+
id: randomUUID(),
|
|
31
|
+
workerId,
|
|
32
|
+
workerName,
|
|
33
|
+
type: "completion",
|
|
34
|
+
content: summary,
|
|
35
|
+
timestamp: new Date().toISOString(),
|
|
36
|
+
}, orchestratorId);
|
|
37
|
+
return {
|
|
38
|
+
content: [
|
|
39
|
+
{
|
|
40
|
+
type: "text",
|
|
41
|
+
text: "Completion recorded. The orchestrator has been notified.",
|
|
42
|
+
},
|
|
43
|
+
],
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
catch (error) {
|
|
47
|
+
return {
|
|
48
|
+
content: [
|
|
49
|
+
{
|
|
50
|
+
type: "text",
|
|
51
|
+
text: `Failed to record completion: ${error instanceof Error ? error.message : String(error)}`,
|
|
52
|
+
},
|
|
53
|
+
],
|
|
54
|
+
isError: true,
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
});
|
|
58
|
+
server.registerTool("ask", {
|
|
59
|
+
description: "Ask the orchestrator a question when you need clarification or are blocked.",
|
|
60
|
+
inputSchema: {
|
|
61
|
+
question: z.string().describe("Your question for the orchestrator"),
|
|
62
|
+
},
|
|
63
|
+
}, async ({ question }) => {
|
|
64
|
+
try {
|
|
65
|
+
updateWorker(stateDir, workerId, {
|
|
66
|
+
status: "asking",
|
|
67
|
+
});
|
|
68
|
+
writeMail(stateDir, {
|
|
69
|
+
id: randomUUID(),
|
|
70
|
+
workerId,
|
|
71
|
+
workerName,
|
|
72
|
+
type: "question",
|
|
73
|
+
content: question,
|
|
74
|
+
timestamp: new Date().toISOString(),
|
|
75
|
+
}, orchestratorId);
|
|
76
|
+
return {
|
|
77
|
+
content: [
|
|
78
|
+
{
|
|
79
|
+
type: "text",
|
|
80
|
+
text: "Question sent to orchestrator. Wait for a response via your terminal input.",
|
|
81
|
+
},
|
|
82
|
+
],
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
catch (error) {
|
|
86
|
+
return {
|
|
87
|
+
content: [
|
|
88
|
+
{
|
|
89
|
+
type: "text",
|
|
90
|
+
text: `Failed to send question: ${error instanceof Error ? error.message : String(error)}`,
|
|
91
|
+
},
|
|
92
|
+
],
|
|
93
|
+
isError: true,
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
});
|
|
97
|
+
const transport = new StdioServerTransport();
|
|
98
|
+
await server.connect(transport);
|
|
99
|
+
}
|
package/hooks/hooks.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
{
|
|
2
|
+
"hooks": {
|
|
3
|
+
"UserPromptSubmit": [
|
|
4
|
+
{
|
|
5
|
+
"hooks": [
|
|
6
|
+
{
|
|
7
|
+
"type": "command",
|
|
8
|
+
"command": "npx -y @lioneltay/worker-manager --hook",
|
|
9
|
+
"timeout": 5
|
|
10
|
+
}
|
|
11
|
+
]
|
|
12
|
+
}
|
|
13
|
+
],
|
|
14
|
+
"Stop": [
|
|
15
|
+
{
|
|
16
|
+
"hooks": [
|
|
17
|
+
{
|
|
18
|
+
"type": "command",
|
|
19
|
+
"command": "npx -y @lioneltay/worker-manager --stop-hook",
|
|
20
|
+
"timeout": 5
|
|
21
|
+
}
|
|
22
|
+
]
|
|
23
|
+
}
|
|
24
|
+
]
|
|
25
|
+
}
|
|
26
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@lioneltay/worker-manager",
|
|
3
|
+
"version": "0.0.2",
|
|
4
|
+
"description": "Claude Code plugin for spawning and managing worker agents in isolated worktrees",
|
|
5
|
+
"repository": {
|
|
6
|
+
"type": "git",
|
|
7
|
+
"url": "https://github.com/lioneltay/agent-forge",
|
|
8
|
+
"directory": "packages/claude-plugins/worker-manager"
|
|
9
|
+
},
|
|
10
|
+
"type": "module",
|
|
11
|
+
"main": "dist/index.js",
|
|
12
|
+
"bin": {
|
|
13
|
+
"worker-manager": "dist/index.js"
|
|
14
|
+
},
|
|
15
|
+
"files": [
|
|
16
|
+
"dist",
|
|
17
|
+
".claude-plugin",
|
|
18
|
+
".mcp.json",
|
|
19
|
+
"hooks",
|
|
20
|
+
"README.md"
|
|
21
|
+
],
|
|
22
|
+
"scripts": {
|
|
23
|
+
"build": "tsc",
|
|
24
|
+
"dev": "tsc --watch"
|
|
25
|
+
},
|
|
26
|
+
"dependencies": {
|
|
27
|
+
"@modelcontextprotocol/sdk": "^1.12.1",
|
|
28
|
+
"@lioneltay/worktree-manager": "^0.0.1",
|
|
29
|
+
"zod": "^3.24.2"
|
|
30
|
+
},
|
|
31
|
+
"devDependencies": {
|
|
32
|
+
"@types/node": "^22.13.1",
|
|
33
|
+
"typescript": "^5.7.3"
|
|
34
|
+
}
|
|
35
|
+
}
|