@adamancyzhang/claude-orchestrator 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/README.md +437 -0
  2. package/bin/claude-orchestrator +2 -17
  3. package/dist/cli/commands.d.ts +20 -0
  4. package/dist/cli/commands.js +192 -0
  5. package/dist/cli/commands.js.map +1 -0
  6. package/dist/config.d.ts +15 -0
  7. package/dist/config.js +39 -0
  8. package/dist/config.js.map +1 -0
  9. package/dist/index.d.ts +2 -0
  10. package/dist/index.js +342 -0
  11. package/dist/index.js.map +1 -0
  12. package/dist/models/schemas.d.ts +308 -0
  13. package/dist/models/schemas.js +167 -0
  14. package/dist/models/schemas.js.map +1 -0
  15. package/dist/modules/context-store.d.ts +10 -0
  16. package/dist/modules/context-store.js +25 -0
  17. package/dist/modules/context-store.js.map +1 -0
  18. package/dist/modules/message-router.d.ts +12 -0
  19. package/dist/modules/message-router.js +94 -0
  20. package/dist/modules/message-router.js.map +1 -0
  21. package/dist/modules/registry.d.ts +11 -0
  22. package/dist/modules/registry.js +53 -0
  23. package/dist/modules/registry.js.map +1 -0
  24. package/dist/modules/task-queue.d.ts +10 -0
  25. package/dist/modules/task-queue.js +103 -0
  26. package/dist/modules/task-queue.js.map +1 -0
  27. package/dist/server.d.ts +2 -0
  28. package/dist/server.js +425 -0
  29. package/dist/server.js.map +1 -0
  30. package/dist/utils/output.d.ts +1 -0
  31. package/dist/utils/output.js +10 -0
  32. package/dist/utils/output.js.map +1 -0
  33. package/dist/zk/client.d.ts +54 -0
  34. package/dist/zk/client.js +417 -0
  35. package/dist/zk/client.js.map +1 -0
  36. package/dist/zk/paths.d.ts +16 -0
  37. package/dist/zk/paths.js +40 -0
  38. package/dist/zk/paths.js.map +1 -0
  39. package/dist/zk/watcher.d.ts +11 -0
  40. package/dist/zk/watcher.js +16 -0
  41. package/dist/zk/watcher.js.map +1 -0
  42. package/package.json +23 -6
  43. package/scripts/install.js +0 -123
package/README.md ADDED
@@ -0,0 +1,437 @@
1
+ # Claude Orchestrator
2
+
3
+ <p align="center">
4
+ <strong>Turn Claude Code instances into a multi-agent swarm — coordinated through ZooKeeper.</strong>
5
+ <br/>
6
+ <em><a href="README_zh.md">中文文档</a></em>
7
+ </p>
8
+
9
+ <p align="center">
10
+ <a href="https://www.npmjs.com/package/@adamancyzhang/claude-orchestrator"><img src="https://img.shields.io/npm/v/@adamancyzhang/claude-orchestrator?color=blue" alt="npm"></a>
11
+ <a href="https://github.com/adamancyzhang/claude-orchestrator-server"><img src="https://img.shields.io/github/license/adamancyzhang/claude-orchestrator-server" alt="license"></a>
12
+ <img src="https://img.shields.io/badge/node-18%2B-green" alt="node">
13
+ <img src="https://img.shields.io/badge/typescript-5.6%2B-blue" alt="typescript">
14
+ <img src="https://img.shields.io/badge/ZooKeeper-3.8%2B-orange" alt="zookeeper">
15
+ </p>
16
+
17
+ ---
18
+
19
+ ## What is this?
20
+
21
+ **Claude Orchestrator** lets you run multiple Claude Code instances that talk to each other — assign tasks, send messages, share context, and collaborate on real work. Think of it as giving each Claude Code instance a walkie-talkie and a shared kanban board, then watching them build together.
22
+
23
+ Behind the scenes, ZooKeeper acts as the coordination backbone: ephemeral nodes for instance heartbeat, sequential nodes for FIFO task ordering, and watches for real-time change notification.
24
+
25
+ ```
26
+ ┌──────────────────────────────────────────────────────────┐
27
+ │ Claude Orchestrator │
28
+ │ (MCP Server :3100) │
29
+ │ │
30
+ │ ┌──────────────┐ ┌──────────┐ ┌──────────────┐ │
31
+ │ │ Registry │ │ Tasks │ │ Messages │ │
32
+ │ │ who's here? │ │ FIFO Q │ │ P2P + cast │ │
33
+ │ └──────┬───────┘ └────┬─────┘ └──────┬───────┘ │
34
+ │ └────────────────┼──────────────┘ │
35
+ │ ┌──────┴──────┐ │
36
+ │ │ ZooKeeper │ │
37
+ │ └──────┬──────┘ │
38
+ │ ┌──────┴──────┐ │
39
+ │ │ Context │ │
40
+ │ │ KV Store │ │
41
+ │ └─────────────┘ │
42
+ └──────────────────────────────────────────────────────────┘
43
+ ▲ ▲ ▲
44
+ │ │ │
45
+ ┌────┴────┐ ┌────┴────┐ ┌────┴────┐
46
+ │ Tom │ │ Jerry │ │ Bob │
47
+ │Architect│ │Developer│ │ Tester │
48
+ └─────────┘ └─────────┘ └─────────┘
49
+ ```
50
+
51
+ ---
52
+
53
+ ## Quick Start
54
+
55
+ ### 1. Install the CLI
56
+
57
+ ```bash
58
+ npm install -g @adamancyzhang/claude-orchestrator
59
+ ```
60
+
61
+ ### 2. Start ZooKeeper
62
+
63
+ ```bash
64
+ docker-compose up -d
65
+ ```
66
+
67
+ ### 3. Start the MCP Server
68
+
69
+ ```bash
70
+ # From source
71
+ git clone https://github.com/adamancyzhang/claude-orchestrator-server.git
72
+ cd claude-orchestrator-server
73
+ npm install
74
+ npm run build
75
+ node dist/index.js server
76
+ # → MCP server listening on http://127.0.0.1:3100
77
+ ```
78
+
79
+ ### 4. Configure Claude Code
80
+
81
+ In your project's `.claude/mcp.json` (or `~/.claude/mcp.json`):
82
+
83
+ ```json
84
+ {
85
+ "mcpServers": {
86
+ "orchestrator": {
87
+ "type": "http",
88
+ "url": "http://127.0.0.1:3100/mcp"
89
+ }
90
+ }
91
+ }
92
+ ```
93
+
94
+ ### 5. Register & Go
95
+
96
+ Open Claude Code, then:
97
+
98
+ ```
99
+ I have an MCP tool called orchestrator. Call register_instance
100
+ with name="Tom" and role="architect". Remember my instance_id.
101
+ ```
102
+
103
+ Now open another terminal, start a second Claude Code instance, and register Jerry as a developer. They'll discover each other, pass tasks, and collaborate.
104
+
105
+ ---
106
+
107
+ ## How It Works
108
+
109
+ ### Four Modules, One ZooKeeper
110
+
111
+ | Module | What it does | ZK magic |
112
+ |--------|-------------|----------|
113
+ | **Instance Registry** | Register, heartbeat, discover, unregister | Ephemeral nodes → auto-cleanup on disconnect |
114
+ | **Task Queue** | Push → Claim → Complete | Sequential nodes for FIFO, ephemeral claims for atomic locks |
115
+ | **Message Router** | P2P messages, broadcast, help requests, long-poll | Persistent-sequential nodes, ZK watches for push |
116
+ | **Context Store** | Shared key-value storage, watch for changes | Persistent nodes, cross-instance visibility |
117
+
118
+ ### The MCP Tools (18)
119
+
120
+ Each Claude Code instance calls these tools to participate in the swarm:
121
+
122
+ | # | Tool | What it does |
123
+ |---|------|-------------|
124
+ | 1 | `register_instance` | Join the swarm with a name and role |
125
+ | 2 | `heartbeat` | Stay alive, optionally report what you're working on |
126
+ | 3 | `list_instances` | See who's online right now |
127
+ | 4 | `push_task` | Create a task (optionally assign to someone specific) |
128
+ | 5 | `claim_task` | Grab the next task — atomic, no two instances can claim the same one |
129
+ | 6 | `complete_task` | Mark a task done with results |
130
+ | 7 | `list_tasks` | View tasks by status (pending / claimed / completed) |
131
+ | 8 | `send_message` | DM another instance or broadcast to everyone |
132
+ | 9 | `poll_messages` | Check your inbox |
133
+ | 10 | `wait_for_message` | Long-poll — block until a message arrives |
134
+ | 11 | `dismiss_message` | Delete a message from your inbox |
135
+ | 12 | `request_help` | Broadcast a question to the whole team |
136
+ | 13 | `set_context` | Write a shared key-value entry |
137
+ | 14 | `get_context` | Read a shared key-value entry |
138
+ | 15 | `delete_context` | Remove a shared context key |
139
+ | 16 | `list_context_keys` | List all context keys |
140
+ | 17 | `mark_read` | Mark a specific message as read |
141
+ | 18 | `server_status` | Health check |
142
+
143
+ ### Or Use the CLI Directly
144
+
145
+ If you prefer the terminal over Claude Code:
146
+
147
+ ```bash
148
+ # Register
149
+ claude-orchestrator register --name Alice --role developer
150
+
151
+ # See who's around
152
+ claude-orchestrator list-instances
153
+
154
+ # Push a task
155
+ claude-orchestrator push-task --title "Add rate limiting" --priority 0
156
+
157
+ # Claim the next task
158
+ claude-orchestrator claim-task
159
+
160
+ # Send a message
161
+ claude-orchestrator send-message --to <instance-id> --content "How's PR #42 going?"
162
+
163
+ # Check inbox
164
+ claude-orchestrator poll-messages
165
+
166
+ # Wait for messages (blocks until received or timeout)
167
+ claude-orchestrator wait-for-message --timeout 60
168
+
169
+ # Dismiss a message
170
+ claude-orchestrator dismiss-message --message-id msg-0000000000
171
+
172
+ # Share context
173
+ claude-orchestrator set-context --key "api_version" --value "v2.1"
174
+
175
+ # Read shared context
176
+ claude-orchestrator get-context --key "api_version"
177
+
178
+ # List context keys
179
+ claude-orchestrator list-context-keys
180
+
181
+ # Delete context
182
+ claude-orchestrator delete-context --key "api_version"
183
+
184
+ # Watch context for changes
185
+ claude-orchestrator watch-context --key "jwt_strategy"
186
+
187
+ # Watch for new tasks
188
+ claude-orchestrator watch-tasks
189
+
190
+ # Unregister
191
+ claude-orchestrator unregister
192
+
193
+ # Show config
194
+ claude-orchestrator config
195
+
196
+ # Health check
197
+ claude-orchestrator status
198
+ ```
199
+
200
+ All CLI commands return JSON. Every command supports `--zookeeper` / `-z` (or `ZK_HOSTS` env var) for pointing at a remote ZooKeeper.
201
+
202
+ ---
203
+
204
+ ## Example Session
205
+
206
+ Here's a real flow with two instances — Tom (Architect) and Jerry (Developer):
207
+
208
+ **Tom registers:**
209
+ ```json
210
+ { "id": "a1b2c3d4...", "name": "Tom", "role": "architect", "status": "idle" }
211
+ ```
212
+
213
+ **Jerry registers:**
214
+ ```json
215
+ { "id": "f6e5d4c3...", "name": "Jerry", "role": "developer", "status": "idle" }
216
+ ```
217
+
218
+ **Tom lists instances:**
219
+ ```
220
+ 2 active instances:
221
+ [architect] Tom (a1b2c3d4...) status=idle
222
+ [developer] Jerry (f6e5d4c3...) status=idle
223
+ ```
224
+
225
+ **Tom assigns work:**
226
+ ```
227
+ push_task:
228
+ title: "Implement POST /api/auth/login"
229
+ description: "Email+password login, return JWT. Handle validation and errors."
230
+ priority: HIGH (0)
231
+ assignee: f6e5d4c3... (Jerry)
232
+ ```
233
+
234
+ **Jerry claims it:**
235
+ ```
236
+ claim_task → Got it! task-0000000000
237
+ heartbeat current_task="task-0000000000"
238
+ ```
239
+
240
+ **Jerry gets stuck and asks for help:**
241
+ ```
242
+ request_help:
243
+ question: "What should the JWT expiry be? Access vs refresh token?"
244
+ context: "Express + jsonwebtoken, ~100K DAU"
245
+ ```
246
+
247
+ **Tom checks messages and replies:**
248
+ ```
249
+ poll_messages → 1 new message from Jerry
250
+ send_message to=Jerry: "15min access, 7d refresh. Use Redis blacklist for logout."
251
+ ```
252
+
253
+ **Tom records the decision:**
254
+ ```
255
+ set_context key="jwt_strategy" value="access:15min, refresh:7d, blacklist:redis"
256
+ ```
257
+
258
+ **Jerry finishes:**
259
+ ```
260
+ complete_task task_id="task-0000000000" result="PR #42 — implemented login endpoint with tests"
261
+ ```
262
+
263
+ No polling required for task claiming — the atomic claim mechanism means Jerry always gets the right task. Messages are delivered instantly via ZooKeeper's persistent-sequential nodes.
264
+
265
+ ---
266
+
267
+ ## ZooKeeper Schema
268
+
269
+ ```
270
+ /claude-orchestrator
271
+ ├── instances/
272
+ │ ├── a1b2c3d4... [EPHEMERAL] Tom's registration
273
+ │ └── f6e5d4c3... [EPHEMERAL] Jerry's registration
274
+ ├── tasks/
275
+ │ ├── pending/
276
+ │ │ ├── task-0000000000 [PERSISTENT_SEQUENTIAL]
277
+ │ │ └── task-0000000001 [PERSISTENT_SEQUENTIAL]
278
+ │ ├── claimed/
279
+ │ │ └── f6e5d4c3-task-0000000000 [EPHEMERAL] ← atomic lock!
280
+ │ └── completed/
281
+ │ └── task-0000000000 [PERSISTENT]
282
+ ├── messages/
283
+ │ ├── a1b2c3d4.../
284
+ │ │ └── msg-0000000000 [PERSISTENT_SEQUENTIAL]
285
+ │ └── f6e5d4c3.../
286
+ │ └── msg-0000000000 [PERSISTENT_SEQUENTIAL]
287
+ └── context/
288
+ └── jwt_strategy [PERSISTENT]
289
+ ```
290
+
291
+ **Key insight:** Ephemeral nodes mean crashed instances auto-unregister. Ephemeral claim nodes mean abandoned tasks auto-release. No deadlocks, no orphans. ZooKeeper handles the lifecycle.
292
+
293
+ ---
294
+
295
+ ## Installation & Development
296
+
297
+ ### Prerequisites
298
+
299
+ - Node.js 18+
300
+ - Docker (for ZooKeeper)
301
+ - Claude Code (for the MCP integration)
302
+
303
+ ### From Source
304
+
305
+ ```bash
306
+ git clone https://github.com/adamancyzhang/claude-orchestrator-server.git
307
+ cd claude-orchestrator-server
308
+
309
+ # Install dependencies
310
+ npm install
311
+
312
+ # Start ZooKeeper
313
+ docker-compose up -d
314
+
315
+ # Build TypeScript
316
+ npm run build
317
+
318
+ # Start the server
319
+ node dist/index.js server
320
+
321
+ # Or use the CLI directly
322
+ node dist/index.js status
323
+ ```
324
+
325
+ ### Run Tests
326
+
327
+ ```bash
328
+ npm test
329
+ ```
330
+
331
+ ---
332
+
333
+ ## Skills for Claude Code
334
+
335
+ The repo includes Claude Code skills that make the orchestrator even easier to use:
336
+
337
+ | Skill | What it does |
338
+ |-------|-------------|
339
+ | `claude-orchestrator` | Full CLI reference — all 21 commands with examples |
340
+ | `orchestrator-register` | Guided registration flow |
341
+ | `orchestrator-status` | Dashboard: health, instances, tasks |
342
+ | `orchestrator-communicate` | Message patterns: poll, DM, broadcast |
343
+ | `orchestrator-help` | Help-request workflow |
344
+ | `orchestrator-agent` | Autonomous agent loop: check → claim → work → complete |
345
+
346
+ ---
347
+
348
+ ## Why ZooKeeper?
349
+
350
+ | Concern | ZooKeeper answer |
351
+ |---------|-----------------|
352
+ | Instance lifecycle | Ephemeral nodes → auto-cleanup. No heartbeat polling needed. |
353
+ | Task ordering | Sequential nodes → guaranteed FIFO. No race conditions. |
354
+ | Claim atomicity | `create(path, ephemeral=true)` is atomic at the ZK level. Only one winner. |
355
+ | Change notification | Built-in watches → push, not poll. |
356
+ | Dependencies | One dependency (ZK). No external database needed. |
357
+
358
+ Zero external database. All state lives in ZooKeeper.
359
+
360
+ ---
361
+
362
+ ## Roles
363
+
364
+ | Role | Value | Typical behavior |
365
+ |------|-------|-----------------|
366
+ | Architect | `architect` | Sets standards, designs tasks, reviews results |
367
+ | Developer | `developer` | Claims tasks, writes code, submits PRs |
368
+ | Tester | `tester` | Claims test tasks, E2E verification |
369
+ | General | `general` | Any role |
370
+
371
+ ---
372
+
373
+ ## Configuration Reference
374
+
375
+ | Config | Where | Default |
376
+ |--------|-------|---------|
377
+ | ZK hosts | `-z, --zookeeper` flag or `ZK_HOSTS` env | `127.0.0.1:2181` |
378
+ | Instance ID | `-i, --instance-id` flag or `~/.claude-orchestrator/config.json` | auto-saved after `register` |
379
+ | MCP server host | `--host` flag or `ORCHESTRATOR_HOST` env | `127.0.0.1` |
380
+ | MCP server port | `--port` flag or `ORCHESTRATOR_PORT` env | `3100` |
381
+
382
+ ---
383
+
384
+ ## Project Structure
385
+
386
+ ```
387
+ ├── src/
388
+ │ ├── index.ts # CLI entry point (commander)
389
+ │ ├── server.ts # MCP server — 18 tools, 5 resources, 2 prompts
390
+ │ ├── config.ts # Configuration handling
391
+ │ ├── cli/
392
+ │ │ └── commands.ts # CLI subcommand implementations
393
+ │ ├── zk/
394
+ │ │ ├── client.ts # ZooKeeper connection management
395
+ │ │ ├── paths.ts # ZK path constants
396
+ │ │ └── watcher.ts # ZK watch manager
397
+ │ ├── modules/
398
+ │ │ ├── registry.ts # Instance registry
399
+ │ │ ├── task-queue.ts # Task queue with atomic claim
400
+ │ │ ├── message-router.ts # Message routing + long-poll
401
+ │ │ └── context-store.ts # Shared key-value store
402
+ │ ├── models/
403
+ │ │ └── schemas.ts # Zod schemas and inferred types
404
+ │ └── utils/
405
+ │ └── output.ts # CLI output formatting
406
+ ├── bin/
407
+ │ └── claude-orchestrator # npm CLI entry (Node.js)
408
+ ├── scripts/
409
+ │ ├── start-zk.sh # Docker ZK launcher
410
+ │ ├── start-server.sh # Server launcher
411
+ │ ├── stop-all.sh # Tear down
412
+ │ └── publish.sh # npm publish pipeline
413
+ ├── skills/ # Claude Code skills
414
+ ├── docs/
415
+ │ ├── v0.1.0/ # Archived Python v0.1.0 docs
416
+ │ └── v0.2.0/ # Current TypeScript docs
417
+ │ ├── prd/ # Full spec + architecture
418
+ │ └── operations-guide.md # Step-by-step walkthrough
419
+ ├── tests/
420
+ │ ├── unit/
421
+ │ └── integration/
422
+ ├── docker-compose.yml # ZooKeeper
423
+ ├── package.json # npm package definition
424
+ └── tsconfig.json # TypeScript configuration
425
+ ```
426
+
427
+ ---
428
+
429
+ ## License
430
+
431
+ MIT — use it, fork it, ship it.
432
+
433
+ ---
434
+
435
+ <p align="center">
436
+ <sub>Built with TypeScript, ZooKeeper, and the MCP protocol. Orchestrate responsibly.</sub>
437
+ </p>
@@ -1,20 +1,5 @@
1
1
  #!/usr/bin/env node
2
- const { spawnSync } = require("child_process");
3
- const path = require("path");
4
- const fs = require("fs");
5
-
6
- const binary = path.join(__dirname, "orchestrator-binary");
7
-
8
- if (!fs.existsSync(binary)) {
9
- console.error(`Error: orchestrator-binary not found at ${binary}`);
10
- console.error(
11
- "Run 'node scripts/install.js' to download it, or build with 'bash scripts/build-binary.sh'"
12
- );
2
+ import("../dist/index.js").catch((err) => {
3
+ console.error(err);
13
4
  process.exit(1);
14
- }
15
-
16
- const result = spawnSync(binary, process.argv.slice(2), {
17
- stdio: "inherit",
18
5
  });
19
-
20
- process.exit(result.status ?? 1);
@@ -0,0 +1,20 @@
1
+ export declare function cmdStatus(zkHosts: string): Promise<void>;
2
+ export declare function cmdRegister(zkHosts: string, instanceId: string | undefined, name: string, role: string): Promise<void>;
3
+ export declare function cmdHeartbeat(zkHosts: string, cliInstanceId: string | undefined, currentTask?: string): Promise<void>;
4
+ export declare function cmdListInstances(zkHosts: string): Promise<void>;
5
+ export declare function cmdPushTask(zkHosts: string, cliInstanceId: string | undefined, title: string, description: string, priority: number, assignee?: string): Promise<void>;
6
+ export declare function cmdClaimTask(zkHosts: string, cliInstanceId: string | undefined): Promise<void>;
7
+ export declare function cmdCompleteTask(zkHosts: string, cliInstanceId: string | undefined, taskId: string, result: string): Promise<void>;
8
+ export declare function cmdListTasks(zkHosts: string, statusFilter?: string): Promise<void>;
9
+ export declare function cmdSendMessage(zkHosts: string, cliInstanceId: string | undefined, content: string, toInstance?: string, broadcast?: boolean): Promise<void>;
10
+ export declare function cmdPollMessages(zkHosts: string, cliInstanceId: string | undefined): Promise<void>;
11
+ export declare function cmdWaitForMessage(zkHosts: string, cliInstanceId: string | undefined, timeout: number): Promise<void>;
12
+ export declare function cmdDismissMessage(zkHosts: string, cliInstanceId: string | undefined, messageId: string): Promise<void>;
13
+ export declare function cmdRequestHelp(zkHosts: string, cliInstanceId: string | undefined, question: string, ctx?: string): Promise<void>;
14
+ export declare function cmdSetContext(zkHosts: string, cliInstanceId: string | undefined, key: string, value: string): Promise<void>;
15
+ export declare function cmdGetContext(zkHosts: string, key: string): Promise<void>;
16
+ export declare function cmdDeleteContext(zkHosts: string, key: string): Promise<void>;
17
+ export declare function cmdListContextKeys(zkHosts: string): Promise<void>;
18
+ export declare function cmdWatchContext(zkHosts: string, key: string): Promise<void>;
19
+ export declare function cmdWatchTasks(zkHosts: string): Promise<void>;
20
+ export declare function cmdUnregister(zkHosts: string, cliInstanceId: string | undefined): Promise<void>;
@@ -0,0 +1,192 @@
1
+ import { ZkClient } from "../zk/client.js";
2
+ import { InstanceRegistry } from "../modules/registry.js";
3
+ import { TaskQueue } from "../modules/task-queue.js";
4
+ import { MessageRouter } from "../modules/message-router.js";
5
+ import { ContextStore } from "../modules/context-store.js";
6
+ import { resolveInstanceId, saveInstanceId } from "../config.js";
7
+ import { output } from "../utils/output.js";
8
+ async function withZk(hosts, fn) {
9
+ const zk = new ZkClient(hosts);
10
+ await zk.connect();
11
+ const registry = new InstanceRegistry(zk);
12
+ const taskQueue = new TaskQueue(zk);
13
+ const messageRouter = new MessageRouter(zk);
14
+ const contextStore = new ContextStore(zk);
15
+ try {
16
+ return await fn({ zk, registry, taskQueue, messageRouter, contextStore });
17
+ }
18
+ finally {
19
+ await zk.disconnect();
20
+ }
21
+ }
22
+ export async function cmdStatus(zkHosts) {
23
+ await withZk(zkHosts, async ({ zk, registry }) => {
24
+ const connected = zk.connected;
25
+ const instances = await registry.listAll();
26
+ output({
27
+ status: connected ? "healthy" : "degraded",
28
+ zookeeper: connected ? "connected" : "disconnected",
29
+ instances_online: instances.length,
30
+ });
31
+ });
32
+ }
33
+ export async function cmdRegister(zkHosts, instanceId, name, role) {
34
+ await withZk(zkHosts, async ({ registry }) => {
35
+ const instance = await registry.register(name, role, instanceId);
36
+ saveInstanceId(instance.id);
37
+ output(instance);
38
+ });
39
+ }
40
+ export async function cmdHeartbeat(zkHosts, cliInstanceId, currentTask) {
41
+ await withZk(zkHosts, async ({ registry }) => {
42
+ const instanceId = resolveInstanceId(cliInstanceId);
43
+ await registry.heartbeat(instanceId, currentTask);
44
+ output({ status: "ok", instance_id: instanceId });
45
+ });
46
+ }
47
+ export async function cmdListInstances(zkHosts) {
48
+ await withZk(zkHosts, async ({ registry }) => {
49
+ const instances = await registry.listAll();
50
+ output(instances);
51
+ });
52
+ }
53
+ export async function cmdPushTask(zkHosts, cliInstanceId, title, description, priority, assignee) {
54
+ await withZk(zkHosts, async ({ taskQueue }) => {
55
+ const instanceId = cliInstanceId ?? "";
56
+ const task = await taskQueue.push(title, description, priority, instanceId, assignee);
57
+ output(task);
58
+ });
59
+ }
60
+ export async function cmdClaimTask(zkHosts, cliInstanceId) {
61
+ await withZk(zkHosts, async ({ taskQueue }) => {
62
+ const instanceId = resolveInstanceId(cliInstanceId);
63
+ const task = await taskQueue.claim(instanceId);
64
+ if (!task) {
65
+ output({ status: "no_tasks", message: "No pending tasks available." });
66
+ }
67
+ else {
68
+ output(task);
69
+ }
70
+ });
71
+ }
72
+ export async function cmdCompleteTask(zkHosts, cliInstanceId, taskId, result) {
73
+ await withZk(zkHosts, async ({ taskQueue }) => {
74
+ const instanceId = resolveInstanceId(cliInstanceId);
75
+ const task = await taskQueue.complete(instanceId, taskId, result);
76
+ output(task);
77
+ });
78
+ }
79
+ export async function cmdListTasks(zkHosts, statusFilter) {
80
+ await withZk(zkHosts, async ({ taskQueue }) => {
81
+ const tasks = await taskQueue.listTasks(statusFilter);
82
+ output(tasks);
83
+ });
84
+ }
85
+ export async function cmdSendMessage(zkHosts, cliInstanceId, content, toInstance, broadcast = false) {
86
+ await withZk(zkHosts, async ({ registry, messageRouter }) => {
87
+ const instanceId = resolveInstanceId(cliInstanceId);
88
+ const inst = await registry.get(instanceId);
89
+ const fromName = inst?.name ?? instanceId.slice(0, 8);
90
+ const messages = await messageRouter.send(instanceId, fromName, content, toInstance, broadcast);
91
+ const targets = messages.map((m) => m.to_instance);
92
+ output({ sent_to: targets, message_count: targets.length });
93
+ });
94
+ }
95
+ export async function cmdPollMessages(zkHosts, cliInstanceId) {
96
+ await withZk(zkHosts, async ({ messageRouter }) => {
97
+ const instanceId = resolveInstanceId(cliInstanceId);
98
+ const messages = await messageRouter.poll(instanceId);
99
+ output(messages);
100
+ });
101
+ }
102
+ export async function cmdWaitForMessage(zkHosts, cliInstanceId, timeout) {
103
+ await withZk(zkHosts, async ({ messageRouter }) => {
104
+ const instanceId = resolveInstanceId(cliInstanceId);
105
+ const messages = await messageRouter.waitForMessage(instanceId, timeout);
106
+ output(messages.length > 0 ? messages : { status: "timeout", message: "No messages received." });
107
+ });
108
+ }
109
+ export async function cmdDismissMessage(zkHosts, cliInstanceId, messageId) {
110
+ await withZk(zkHosts, async ({ messageRouter }) => {
111
+ const instanceId = resolveInstanceId(cliInstanceId);
112
+ await messageRouter.dismissMessage(instanceId, messageId);
113
+ output({ status: "dismissed", message_id: messageId });
114
+ });
115
+ }
116
+ export async function cmdRequestHelp(zkHosts, cliInstanceId, question, ctx) {
117
+ await withZk(zkHosts, async ({ registry, messageRouter }) => {
118
+ const instanceId = resolveInstanceId(cliInstanceId);
119
+ const inst = await registry.get(instanceId);
120
+ const fromName = inst?.name ?? instanceId.slice(0, 8);
121
+ const messages = await messageRouter.requestHelp(instanceId, fromName, question, ctx);
122
+ const targets = messages.map((m) => m.to_instance);
123
+ output({ sent_to: targets, message_count: targets.length });
124
+ });
125
+ }
126
+ export async function cmdSetContext(zkHosts, cliInstanceId, key, value) {
127
+ await withZk(zkHosts, async ({ contextStore }) => {
128
+ const instanceId = cliInstanceId ?? "";
129
+ const entry = await contextStore.set(key, value, instanceId);
130
+ output(entry);
131
+ });
132
+ }
133
+ export async function cmdGetContext(zkHosts, key) {
134
+ await withZk(zkHosts, async ({ contextStore }) => {
135
+ const value = await contextStore.get(key);
136
+ if (value === null) {
137
+ output({ key, value: null, status: "not_found" });
138
+ }
139
+ else {
140
+ output({ key, value });
141
+ }
142
+ });
143
+ }
144
+ export async function cmdDeleteContext(zkHosts, key) {
145
+ await withZk(zkHosts, async ({ contextStore }) => {
146
+ await contextStore.delete(key);
147
+ output({ key, status: "deleted" });
148
+ });
149
+ }
150
+ export async function cmdListContextKeys(zkHosts) {
151
+ await withZk(zkHosts, async ({ contextStore }) => {
152
+ const keys = await contextStore.listKeys();
153
+ output({ keys, count: keys.length });
154
+ });
155
+ }
156
+ export async function cmdWatchContext(zkHosts, key) {
157
+ await withZk(zkHosts, async ({ zk }) => {
158
+ const value = await zk.watchContextKey(key, (newData) => {
159
+ output({ key, value: newData?.value ?? null, event: "changed" });
160
+ process.exit(0);
161
+ });
162
+ if (value === null) {
163
+ output({ key, value: null, message: `Watching key '${key}' for changes... (Ctrl+C to stop)` });
164
+ }
165
+ else {
166
+ output({ key, value: value.value, message: `Watching key '${key}' for changes... (Ctrl+C to stop)` });
167
+ }
168
+ // Keep process alive waiting for watch callback
169
+ await new Promise(() => { });
170
+ });
171
+ }
172
+ export async function cmdWatchTasks(zkHosts) {
173
+ await withZk(zkHosts, async ({ zk }) => {
174
+ const children = await zk.watchPendingTasks((newChildren) => {
175
+ output({ event: "tasks_changed", pending_count: newChildren.length, tasks: newChildren });
176
+ process.exit(0);
177
+ });
178
+ output({
179
+ pending_count: children.length,
180
+ message: "Watching for new tasks... (Ctrl+C to stop)",
181
+ });
182
+ await new Promise(() => { });
183
+ });
184
+ }
185
+ export async function cmdUnregister(zkHosts, cliInstanceId) {
186
+ await withZk(zkHosts, async ({ registry }) => {
187
+ const instanceId = resolveInstanceId(cliInstanceId);
188
+ await registry.unregister(instanceId);
189
+ output({ status: "unregistered", instance_id: instanceId });
190
+ });
191
+ }
192
+ //# sourceMappingURL=commands.js.map