macro-agent 0.1.10 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/CLAUDE.md +97 -0
  2. package/dist/acp/macro-agent.d.ts.map +1 -1
  3. package/dist/acp/macro-agent.js +42 -6
  4. package/dist/acp/macro-agent.js.map +1 -1
  5. package/dist/adapters/tasks-adapter.d.ts.map +1 -1
  6. package/dist/adapters/tasks-adapter.js +3 -0
  7. package/dist/adapters/tasks-adapter.js.map +1 -1
  8. package/dist/adapters/types.d.ts +1 -0
  9. package/dist/adapters/types.d.ts.map +1 -1
  10. package/dist/agent/agent-manager-v2.d.ts.map +1 -1
  11. package/dist/agent/agent-manager-v2.js +74 -11
  12. package/dist/agent/agent-manager-v2.js.map +1 -1
  13. package/dist/agent/agent-store.d.ts +10 -0
  14. package/dist/agent/agent-store.d.ts.map +1 -1
  15. package/dist/agent/agent-store.js +22 -0
  16. package/dist/agent/agent-store.js.map +1 -1
  17. package/dist/boot-v2.d.ts +88 -1
  18. package/dist/boot-v2.d.ts.map +1 -1
  19. package/dist/boot-v2.js +343 -7
  20. package/dist/boot-v2.js.map +1 -1
  21. package/dist/cli/acp.js +4 -0
  22. package/dist/cli/acp.js.map +1 -1
  23. package/dist/lifecycle/cascade.d.ts +25 -2
  24. package/dist/lifecycle/cascade.d.ts.map +1 -1
  25. package/dist/lifecycle/cascade.js +70 -2
  26. package/dist/lifecycle/cascade.js.map +1 -1
  27. package/dist/map/cascade-action-handler.d.ts +24 -0
  28. package/dist/map/cascade-action-handler.d.ts.map +1 -0
  29. package/dist/map/cascade-action-handler.js +170 -0
  30. package/dist/map/cascade-action-handler.js.map +1 -0
  31. package/dist/map/cascade-bridge.d.ts.map +1 -1
  32. package/dist/map/cascade-bridge.js +42 -5
  33. package/dist/map/cascade-bridge.js.map +1 -1
  34. package/dist/map/coordination-handler.d.ts.map +1 -1
  35. package/dist/map/coordination-handler.js +12 -1
  36. package/dist/map/coordination-handler.js.map +1 -1
  37. package/dist/map/server.d.ts.map +1 -1
  38. package/dist/map/server.js +172 -1
  39. package/dist/map/server.js.map +1 -1
  40. package/dist/map/sidecar.d.ts.map +1 -1
  41. package/dist/map/sidecar.js +18 -2
  42. package/dist/map/sidecar.js.map +1 -1
  43. package/dist/map/types.d.ts +2 -0
  44. package/dist/map/types.d.ts.map +1 -1
  45. package/dist/workspace/git-cascade-adapter.d.ts +1 -1
  46. package/dist/workspace/git-cascade-adapter.d.ts.map +1 -1
  47. package/dist/workspace/git-cascade-adapter.js +26 -0
  48. package/dist/workspace/git-cascade-adapter.js.map +1 -1
  49. package/dist/workspace/landing/merge-to-parent.d.ts.map +1 -1
  50. package/dist/workspace/landing/merge-to-parent.js +1 -0
  51. package/dist/workspace/landing/merge-to-parent.js.map +1 -1
  52. package/dist/workspace/recovery/spawn-resolver.d.ts.map +1 -1
  53. package/dist/workspace/recovery/spawn-resolver.js +8 -1
  54. package/dist/workspace/recovery/spawn-resolver.js.map +1 -1
  55. package/dist/workspace/types-v3.d.ts +7 -0
  56. package/dist/workspace/types-v3.d.ts.map +1 -1
  57. package/dist/workspace/types-v3.js.map +1 -1
  58. package/dist/workspace/types.d.ts +17 -0
  59. package/dist/workspace/types.d.ts.map +1 -1
  60. package/dist/workspace/workspace-manager.d.ts +9 -0
  61. package/dist/workspace/workspace-manager.d.ts.map +1 -1
  62. package/dist/workspace/workspace-manager.js +45 -2
  63. package/dist/workspace/workspace-manager.js.map +1 -1
  64. package/docs/design/task-dispatcher.md +880 -0
  65. package/package.json +3 -2
  66. package/src/__tests__/boot-v2.test.ts +435 -0
  67. package/src/__tests__/e2e/acp-over-map.e2e.test.ts +92 -0
  68. package/src/__tests__/e2e/bootstrap.e2e.test.ts +319 -0
  69. package/src/__tests__/e2e/dispatch-coordination.e2e.test.ts +495 -0
  70. package/src/__tests__/e2e/dispatch-live.e2e.test.ts +564 -0
  71. package/src/__tests__/e2e/dispatch-opentasks.e2e.test.ts +496 -0
  72. package/src/__tests__/e2e/dispatch-phase2-live.e2e.test.ts +456 -0
  73. package/src/__tests__/e2e/dispatch-phase2.e2e.test.ts +386 -0
  74. package/src/__tests__/e2e/dispatch.e2e.test.ts +376 -0
  75. package/src/acp/macro-agent.ts +41 -6
  76. package/src/adapters/__tests__/tasks-adapter.test.ts +1 -0
  77. package/src/adapters/tasks-adapter.ts +3 -0
  78. package/src/adapters/types.ts +1 -0
  79. package/src/agent/__tests__/agent-store.test.ts +52 -0
  80. package/src/agent/agent-manager-v2.ts +79 -11
  81. package/src/agent/agent-store.ts +24 -0
  82. package/src/boot-v2.ts +522 -35
  83. package/src/cli/acp.ts +4 -0
  84. package/src/lifecycle/__tests__/cascade-consolidation.test.ts +240 -0
  85. package/src/lifecycle/cascade.ts +77 -2
  86. package/src/map/__tests__/emit-event.test.ts +71 -0
  87. package/src/map/cascade-action-handler.ts +205 -0
  88. package/src/map/cascade-bridge.ts +43 -5
  89. package/src/map/coordination-handler.ts +13 -1
  90. package/src/map/server.ts +178 -1
  91. package/src/map/sidecar.ts +19 -2
  92. package/src/map/types.ts +3 -0
  93. package/src/workspace/__tests__/land-dispatch.test.ts +214 -0
  94. package/src/workspace/git-cascade-adapter.ts +30 -3
  95. package/src/workspace/landing/__tests__/strategies.test.ts +42 -0
  96. package/src/workspace/landing/merge-to-parent.ts +1 -0
  97. package/src/workspace/recovery/spawn-resolver.ts +8 -1
  98. package/src/workspace/types-v3.ts +7 -0
  99. package/src/workspace/types.ts +20 -0
  100. package/src/workspace/workspace-manager.ts +61 -2
  101. package/dist/workspace/dataplane-adapter.d.ts +0 -260
  102. package/dist/workspace/dataplane-adapter.d.ts.map +0 -1
  103. package/dist/workspace/dataplane-adapter.js +0 -416
  104. package/dist/workspace/dataplane-adapter.js.map +0 -1
package/src/boot-v2.ts CHANGED
@@ -22,6 +22,7 @@
22
22
  import * as path from "path";
23
23
  import * as os from "os";
24
24
  import * as fs from "fs";
25
+ import * as crypto from "crypto";
25
26
  import { AgentStore } from "./agent/agent-store.js";
26
27
  import {
27
28
  DefaultInboxAdapter,
@@ -52,7 +53,22 @@ export interface BootV2Config {
52
53
  /** Working directory (default: process.cwd()) */
53
54
  cwd?: string;
54
55
 
55
- /** Base directory for data storage (default: ~/.macro-agent) */
56
+ /**
57
+ * Stable identifier for this macro-agent run. Controls the default on-disk
58
+ * layout at `~/.macro-agent/<instanceId>/` (agents.db, inbox.db, sockets).
59
+ *
60
+ * Precedence when choosing an id:
61
+ * 1. explicit `instanceId` (this field)
62
+ * 2. `map.swarmId` (the MAP identity, when provided)
63
+ * 3. `inst_<sha256(cwd)[:12]>` (stable per-project fallback)
64
+ *
65
+ * Explicit `baseDir` overrides all of the above. Hosts that manage their
66
+ * own storage layout (openswarm spawns hosted swarms with a unique
67
+ * per-spawn data dir) still win by setting `baseDir` directly.
68
+ */
69
+ instanceId?: string;
70
+
71
+ /** Base directory for data storage. Default: `~/.macro-agent/<instanceId>/` */
56
72
  baseDir?: string;
57
73
 
58
74
  /** Default permission mode for spawned agents */
@@ -99,7 +115,13 @@ export interface BootV2Config {
99
115
  };
100
116
 
101
117
  /** MAP server config (accept inbound connections from TUI/clients) */
102
- mapServer?: { enabled?: boolean; port?: number; host?: string; path?: string; name?: string };
118
+ mapServer?: {
119
+ enabled?: boolean;
120
+ port?: number;
121
+ host?: string;
122
+ path?: string;
123
+ name?: string;
124
+ };
103
125
 
104
126
  /** MAP sidecar config (connect to OpenHive hub) */
105
127
  map?: {
@@ -156,7 +178,7 @@ export interface BootV2Config {
156
178
  * }
157
179
  */
158
180
  resolveTaskRef?: (
159
- spawnOptions: import("./agent/types.js").SpawnAgentOptions
181
+ spawnOptions: import("./agent/types.js").SpawnAgentOptions,
160
182
  ) => import("git-cascade/events").TaskRef | undefined;
161
183
 
162
184
  /**
@@ -170,15 +192,15 @@ export interface BootV2Config {
170
192
  /** minimem (agent memory) — registers as MCP server for all agents */
171
193
  minimem?: {
172
194
  enabled?: boolean;
173
- dir?: string; // default: ".swarm/minimem/"
174
- provider?: string; // "auto" | "openai" | "gemini" | "local"
175
- global?: boolean; // also search ~/.minimem
195
+ dir?: string; // default: ".swarm/minimem/"
196
+ provider?: string; // "auto" | "openai" | "gemini" | "local"
197
+ global?: boolean; // also search ~/.minimem
176
198
  };
177
199
 
178
200
  /** skill-tree (per-role skills) — compiles loadouts at team start, injects into prompts */
179
201
  skilltree?: {
180
202
  enabled?: boolean;
181
- basePath?: string; // default: ".swarm/skill-tree/"
203
+ basePath?: string; // default: ".swarm/skill-tree/"
182
204
  defaultProfile?: string;
183
205
  };
184
206
 
@@ -193,6 +215,76 @@ export interface BootV2Config {
193
215
  enabled?: boolean;
194
216
  peerId?: string;
195
217
  };
218
+
219
+ /** Task dispatch config — opt-in autonomous task dispatch mode */
220
+ dispatch?: {
221
+ enabled?: boolean;
222
+ pollIntervalMs?: number;
223
+ maxConcurrent?: number;
224
+ defaultRole?: string;
225
+ tags?: string[];
226
+ maxRetries?: number;
227
+ retryBaseDelayMs?: number;
228
+ retryMaxDelayMs?: number;
229
+ reconcile?: {
230
+ enabled?: boolean;
231
+ intervalMs?: number;
232
+ stallTimeoutMs?: number;
233
+ };
234
+ eligibility?: import("swarm-dispatch").EligibilityConfig;
235
+ /** Dispatch mode: route-only, spawn-only, prefer-route, prefer-spawn. Default: prefer-route when inbox available, spawn-only otherwise. */
236
+ dispatchMode?: import("swarm-dispatch").DispatchMode;
237
+ /** Enable mail-based work routing via agent-inbox (default: true when dispatch enabled). */
238
+ enableMailRouting?: boolean;
239
+ /** Enable roster-based agent discovery for route-first dispatch (default: true when dispatch enabled). */
240
+ enableRoster?: boolean;
241
+ /** Continuation config. */
242
+ continuation?: { delayMs?: number; maxTurns?: number };
243
+ };
244
+
245
+ /**
246
+ * Boot-time agents to spawn after AgentManager is ready.
247
+ *
248
+ * Currently supports `coordinator` — when set, fires a non-blocking
249
+ * `agentManager.spawn({ role: 'coordinator', parent: null, cwd, ... })`
250
+ * during boot so the swarm has a default head manager ready for chat
251
+ * without an explicit spawn call. Pass `true` for defaults (uses the
252
+ * boot config's cwd) or an object for fine control.
253
+ *
254
+ * Also driven by env var `MACRO_BOOTSTRAP_COORDINATOR=true` (with
255
+ * optional `MACRO_BOOTSTRAP_CWD=<path>`) when this field is unset —
256
+ * lets indirect callers (e.g. openswarm host) opt in without modifying
257
+ * the bootConfig pass-through whitelist.
258
+ */
259
+ bootstrap?: {
260
+ coordinator?: boolean | {
261
+ cwd?: string;
262
+ permissionMode?: PermissionMode;
263
+ agentType?: string;
264
+ customPrompt?: string;
265
+ task?: string;
266
+ };
267
+ /**
268
+ * Rehydration policy for agents that existed before this boot. Controls
269
+ * what the boot script does with agents that outlived their previous
270
+ * host process (agent-store is durable; a restart finds agents still
271
+ * marked `state='running'` but without any live ACP session).
272
+ *
273
+ * - `'none'` — skip rehydration entirely. Always fall through to fresh
274
+ * bootstrap spawn (or no spawn if `bootstrap.coordinator` is unset).
275
+ * - `'coordinators'` (default) — revive only root coordinators for
276
+ * this cwd. Matches the common openhive case where the workspace
277
+ * intent is "I want a coordinator here" and workers are ephemeral.
278
+ * - `'all'` — revive every `state='running'` agent at this cwd
279
+ * (coordinators plus workers/integrators/monitors). Parent-first
280
+ * ordering; children are skipped if their parent failed to revive
281
+ * or is `state='stopped'` (deliberately down).
282
+ *
283
+ * Hosted swarms pass `'all'` via `MACRO_BOOTSTRAP_REHYDRATE=all` so a
284
+ * restart restores the full macro-agent team, not just head managers.
285
+ */
286
+ rehydrate?: "none" | "coordinators" | "all";
287
+ };
196
288
  }
197
289
 
198
290
  // =============================================================================
@@ -224,6 +316,9 @@ export interface MacroAgentSystemV2 {
224
316
  /** Control socket path (for MCP subprocess connection) */
225
317
  controlSocketPath: string;
226
318
 
319
+ /** Task dispatcher (if dispatch mode enabled) */
320
+ taskDispatcher?: import("swarm-dispatch").TaskDispatcher;
321
+
227
322
  /** REST API server (if enabled) */
228
323
  apiServer?: ApiServer;
229
324
 
@@ -248,11 +343,63 @@ export interface MacroAgentSystemV2 {
248
343
  // =============================================================================
249
344
 
250
345
  export async function bootV2(
251
- config: BootV2Config = {}
346
+ config: BootV2Config = {},
252
347
  ): Promise<MacroAgentSystemV2> {
253
348
  const cwd = config.cwd ?? process.cwd();
254
- const baseDir =
255
- config.baseDir ?? path.join(os.homedir(), ".macro-agent");
349
+ // Resolve the instance id with three-tier precedence so the on-disk layout
350
+ // stays meaningful across standalone, MAP-connected, and hosted runs:
351
+ //
352
+ // 1. Explicit `instanceId` — caller-chosen, human-readable.
353
+ // 2. `map.swarmId` — the MAP identity when the caller has pre-registered
354
+ // one. This ties macro-agent's local store to its hub identity, so a
355
+ // swarm with swarm_id=X always resumes its own state.
356
+ // 3. A stable hash of the resolved cwd — the last-resort fallback so two
357
+ // processes in different projects never collide on agents.db, inbox.db,
358
+ // or the control socket. Reruns in the same project reuse their store.
359
+ //
360
+ // Hosts that manage their own storage layout (e.g. openswarm spawning
361
+ // per-swarm instances under a unique data dir) still win by passing
362
+ // `baseDir` directly. Legacy `~/.macro-agent/*.db` from pre-instancing
363
+ // versions is left alone — new boots start fresh under their own subdir.
364
+ const instanceId =
365
+ config.instanceId
366
+ ?? config.map?.swarmId
367
+ ?? ("inst_" + crypto.createHash("sha256").update(path.resolve(cwd)).digest("hex").slice(0, 12));
368
+ const baseDir = config.baseDir ?? path.join(os.homedir(), ".macro-agent", instanceId);
369
+
370
+ // Env-var bridge for hosts that pass through bootConfig with a fixed
371
+ // whitelist (e.g. openswarm). Translates MACRO_BOOTSTRAP_COORDINATOR /
372
+ // MACRO_BOOTSTRAP_CWD / MACRO_BOOTSTRAP_REHYDRATE into the structured
373
+ // bootstrap field if not already set programmatically. Programmatic
374
+ // config wins per field.
375
+ if (
376
+ process.env.MACRO_BOOTSTRAP_COORDINATOR === "true" &&
377
+ !config.bootstrap?.coordinator
378
+ ) {
379
+ const envCwd = process.env.MACRO_BOOTSTRAP_CWD;
380
+ config = {
381
+ ...config,
382
+ bootstrap: {
383
+ ...(config.bootstrap ?? {}),
384
+ coordinator: envCwd ? { cwd: envCwd } : true,
385
+ },
386
+ };
387
+ }
388
+ const envRehydrate = process.env.MACRO_BOOTSTRAP_REHYDRATE;
389
+ if (
390
+ (envRehydrate === "none" ||
391
+ envRehydrate === "coordinators" ||
392
+ envRehydrate === "all") &&
393
+ config.bootstrap?.rehydrate === undefined
394
+ ) {
395
+ config = {
396
+ ...config,
397
+ bootstrap: {
398
+ ...(config.bootstrap ?? {}),
399
+ rehydrate: envRehydrate,
400
+ },
401
+ };
402
+ }
256
403
 
257
404
  // Ensure base directory exists
258
405
  fs.mkdirSync(baseDir, { recursive: true });
@@ -263,8 +410,7 @@ export async function bootV2(
263
410
 
264
411
  // 2. Inbox Adapter (embedded agent-inbox, hybrid mode)
265
412
  const inboxSocketPath =
266
- config.inbox?.socketPath ??
267
- path.join(baseDir, "inbox.sock");
413
+ config.inbox?.socketPath ?? path.join(baseDir, "inbox.sock");
268
414
  const inboxSqlitePath = path.join(baseDir, "inbox.db");
269
415
 
270
416
  const inboxAdapter = new DefaultInboxAdapter({
@@ -294,7 +440,7 @@ export async function bootV2(
294
440
  } catch {
295
441
  // opentasks daemon may not be available — non-fatal
296
442
  console.warn(
297
- "[boot-v2] opentasks daemon not available. Task operations will fail until connected."
443
+ "[boot-v2] opentasks daemon not available. Task operations will fail until connected.",
298
444
  );
299
445
  }
300
446
 
@@ -318,14 +464,18 @@ export async function bootV2(
318
464
  controlSocketPath,
319
465
  taskResourceId: config.cascade?.taskResourceId,
320
466
  resolveTaskRef: config.cascade?.resolveTaskRef,
321
- }
467
+ },
322
468
  );
323
469
 
324
470
  // 6. Federation (cross-instance communication)
325
471
  let federationCleanup: (() => void) | null = null;
326
472
  if (config.federation) {
327
473
  const { setupFederation } = await import("./adapters/federation.js");
328
- federationCleanup = setupFederation(agentManager, inboxAdapter, config.federation);
474
+ federationCleanup = setupFederation(
475
+ agentManager,
476
+ inboxAdapter,
477
+ config.federation,
478
+ );
329
479
  }
330
480
 
331
481
  // 7. Trigger System V2
@@ -340,11 +490,178 @@ export async function bootV2(
340
490
  enableHeartbeat: config.trigger?.enableHeartbeat ?? false,
341
491
  heartbeatIntervalMs: config.trigger?.heartbeatIntervalMs,
342
492
  },
343
- }
493
+ },
344
494
  );
345
495
  await triggerSystem.start();
346
496
 
347
- // 7. Control Server (lifecycle RPC for MCP subprocesses)
497
+ // 7a. Task Dispatch (opt-in autonomous task dispatch mode)
498
+ let taskDispatcher: import("swarm-dispatch").TaskDispatcher | null = null;
499
+
500
+ if (config.dispatch?.enabled && tasksAdapter) {
501
+ const { createOrchestrator, createOpenTasksSource, createAgentInboxPort } =
502
+ await import("swarm-dispatch");
503
+ const { getStableInstanceId } = await import("./cli/stable-instance-id.js");
504
+
505
+ const claimantId = `${os.hostname()}:${process.pid}:${getStableInstanceId(cwd)}`;
506
+ const dispatchAgentId = `dispatcher:${claimantId}`;
507
+
508
+ // Adapt opentasks client → DispatchTaskSource
509
+ const opentasksClient = (tasksAdapter as any).client;
510
+ const source = opentasksClient
511
+ ? createOpenTasksSource(opentasksClient)
512
+ : {
513
+ // Fallback adapter when opentasks client is available via TasksAdapter methods
514
+ queryReady: async (opts?: { tags?: string[]; limit?: number }) =>
515
+ tasksAdapter.queryReady(opts),
516
+ claim: async (taskId: string, claimantIdArg: string) => {
517
+ try {
518
+ await tasksAdapter.assignTask(taskId, claimantIdArg);
519
+ return { success: true as const };
520
+ } catch {
521
+ return { success: false as const };
522
+ }
523
+ },
524
+ release: async (taskId: string) => tasksAdapter.unclaimTask(taskId),
525
+ transition: async (
526
+ taskId: string,
527
+ action: "start" | "complete" | "fail",
528
+ ) => tasksAdapter.transitionTask(taskId, action),
529
+ getTask: async (taskId: string) => tasksAdapter.getTask(taskId),
530
+ listInProgress: async () =>
531
+ tasksAdapter.listTasks({ status: "in_progress" }),
532
+ };
533
+
534
+ // Adapt AgentManagerV2 → DispatchAgentRuntime
535
+ const runtime: import("swarm-dispatch").DispatchAgentRuntime = {
536
+ spawn: async (opts: { prompt: string; taskId: string; role: string }) => {
537
+ const spawned = await agentManager.spawn({
538
+ task: opts.prompt,
539
+ task_id: opts.taskId,
540
+ role: opts.role,
541
+ parent: null,
542
+ });
543
+ return { id: spawned.id };
544
+ },
545
+ terminate: async (agentId: string, reason?: string) => {
546
+ await agentManager.terminate(agentId, (reason ?? "cancelled") as any);
547
+ },
548
+ onStopped: (callback: (agentId: string, reason: string) => void) =>
549
+ agentManager.onLifecycleEvent((event) => {
550
+ if (event.type === "stopped") {
551
+ callback(event.agent.id, event.reason);
552
+ }
553
+ }),
554
+ };
555
+
556
+ // Phase 2: Wire MessagePort via agent-inbox for mail-based work routing
557
+ let messagePort: import("swarm-dispatch").MessagePort | undefined;
558
+ if (config.dispatch.enableMailRouting !== false) {
559
+ const inbox = inboxAdapter.getInbox();
560
+ messagePort = createAgentInboxPort(
561
+ inbox.router as any,
562
+ inbox.events as any,
563
+ {
564
+ dispatcherAgentId: dispatchAgentId,
565
+ classifyMessage: (msg: any) => {
566
+ // Classify inbox messages as dispatchable work when they carry
567
+ // the x-dispatch/work schema. Other messages are ignored.
568
+ const content = msg.content as {
569
+ type?: string;
570
+ schema?: string;
571
+ data?: any;
572
+ };
573
+ if (content?.schema !== "x-dispatch/work") return null;
574
+ const data = content.data;
575
+ if (!data?.taskId) return null;
576
+ return {
577
+ messageId: msg.id,
578
+ correlationId: msg.thread_tag ?? msg.id,
579
+ replyTo: msg.sender_id ? { agentId: msg.sender_id } : undefined,
580
+ task: {
581
+ id: data.taskId,
582
+ title: data.title ?? `Delegated: ${data.taskId}`,
583
+ status: "open",
584
+ content: data.prompt ?? data.content,
585
+ tags: data.tags,
586
+ metadata: {
587
+ ...data.metadata,
588
+ role: data.role,
589
+ },
590
+ },
591
+ };
592
+ },
593
+ },
594
+ );
595
+
596
+ // Register the dispatcher as an agent in the inbox so it can receive messages
597
+ await inboxAdapter.registerAgent(dispatchAgentId, {
598
+ role: "dispatcher",
599
+ scope: "default",
600
+ });
601
+ }
602
+
603
+ // Phase 2: Wire AgentRoster via inbox agent listing for route-first dispatch
604
+ let roster: import("swarm-dispatch").AgentRoster | undefined;
605
+ if (config.dispatch.enableRoster !== false) {
606
+ const inbox = inboxAdapter.getInbox();
607
+ roster = {
608
+ async findAvailable(criteria) {
609
+ // List agents from inbox storage, filter by role and idle state
610
+ const agents = inbox.storage.listAgents();
611
+ return agents
612
+ .filter((a: any) => {
613
+ if (a.agentId === dispatchAgentId) return false;
614
+ if (criteria.role && a.role && a.role !== criteria.role)
615
+ return false;
616
+ if (criteria.notBusy && a.status === "busy") return false;
617
+ return true;
618
+ })
619
+ .map((a: any) => ({
620
+ agentId: a.agentId ?? a.agent_id ?? a.id,
621
+ host: a.host,
622
+ }));
623
+ },
624
+ };
625
+ }
626
+
627
+ // Determine dispatch mode
628
+ const hasRouting = !!messagePort && !!roster;
629
+ const dispatchMode =
630
+ config.dispatch.dispatchMode ??
631
+ (hasRouting ? ("prefer-route" as const) : ("spawn-only" as const));
632
+
633
+ taskDispatcher = createOrchestrator(source, runtime, {
634
+ claimantId,
635
+ pollIntervalMs: config.dispatch.pollIntervalMs ?? 15_000,
636
+ defaultRole: config.dispatch.defaultRole ?? "worker",
637
+ concurrency: { global: config.dispatch.maxConcurrent ?? 3 },
638
+ retry: {
639
+ maxRetries: config.dispatch.maxRetries ?? 3,
640
+ baseDelayMs: config.dispatch.retryBaseDelayMs ?? 10_000,
641
+ maxDelayMs: config.dispatch.retryMaxDelayMs ?? 300_000,
642
+ },
643
+ eligibility: config.dispatch.eligibility,
644
+ tags: config.dispatch.tags,
645
+ reconcile: {
646
+ enabled: config.dispatch.reconcile?.enabled ?? true,
647
+ intervalMs: config.dispatch.reconcile?.intervalMs ?? 60_000,
648
+ stallTimeoutMs: config.dispatch.reconcile?.stallTimeoutMs,
649
+ },
650
+ ...(config.dispatch.continuation && {
651
+ continuation: {
652
+ delayMs: config.dispatch.continuation.delayMs ?? 1_000,
653
+ maxTurns: config.dispatch.continuation.maxTurns ?? 20,
654
+ },
655
+ }),
656
+ messagePort,
657
+ roster,
658
+ dispatchMode,
659
+ });
660
+
661
+ await taskDispatcher.start();
662
+ }
663
+
664
+ // 7b. Control Server (lifecycle RPC for MCP subprocesses)
348
665
  const controlServer = new ControlServer(agentManager, {
349
666
  socketPath: controlSocketPath,
350
667
  });
@@ -358,7 +675,9 @@ export async function bootV2(
358
675
 
359
676
  const healthCheckTimer = setInterval(async () => {
360
677
  try {
361
- const unhealthy = controlServer.getUnhealthyAgents(UNHEALTHY_THRESHOLD_MS);
678
+ const unhealthy = controlServer.getUnhealthyAgents(
679
+ UNHEALTHY_THRESHOLD_MS,
680
+ );
362
681
  for (const { agentId, lastSeen } of unhealthy) {
363
682
  const agent = agentStore.getAgent(agentId);
364
683
  if (!agent || agent.state !== "running") continue;
@@ -379,7 +698,7 @@ export async function bootV2(
379
698
  staleSinceMs: Date.now() - lastSeen,
380
699
  },
381
700
  },
382
- { importance: "high", threadTag: `health:${agentId}` }
701
+ { importance: "high", threadTag: `health:${agentId}` },
383
702
  );
384
703
  } catch {
385
704
  // Best effort notification
@@ -421,20 +740,19 @@ export async function bootV2(
421
740
  // 10. ACP WebSocket server (optional)
422
741
  let acpServer: WebSocketACPServer | null = null;
423
742
  if (config.acp?.enabled) {
424
- const { createWebSocketACPServer } = await import("./acp/websocket-server.js");
425
- acpServer = createWebSocketACPServer(
426
- systemRef,
427
- {
428
- port: config.acp.port,
429
- host: config.acp.host,
430
- path: config.acp.path,
431
- },
432
- );
743
+ const { createWebSocketACPServer } =
744
+ await import("./acp/websocket-server.js");
745
+ acpServer = createWebSocketACPServer(systemRef, {
746
+ port: config.acp.port,
747
+ host: config.acp.host,
748
+ path: config.acp.path,
749
+ });
433
750
  await acpServer.start();
434
751
  }
435
752
 
436
753
  // 11. MAP Server (optional — accept inbound connections from TUI/clients)
437
- let mapServerInstance: import("./map/types.js").MAPServerInstance | null = null;
754
+ let mapServerInstance: import("./map/types.js").MAPServerInstance | null =
755
+ null;
438
756
  if (config.mapServer?.enabled) {
439
757
  try {
440
758
  const { createMAPServerInstance } = await import("./map/server.js");
@@ -467,15 +785,20 @@ export async function bootV2(
467
785
 
468
786
  // 12. Swarmkit integrations (minimem, skill-tree, sessionlog)
469
787
  agentManager.setIntegrationConfigs({
470
- minimem: config.minimem?.enabled ? config.minimem as any : undefined,
471
- skilltree: config.skilltree?.enabled ? config.skilltree as any : undefined,
472
- sessionlog: config.sessionlog?.enabled ? config.sessionlog as any : undefined,
788
+ minimem: config.minimem?.enabled ? (config.minimem as any) : undefined,
789
+ skilltree: config.skilltree?.enabled
790
+ ? (config.skilltree as any)
791
+ : undefined,
792
+ sessionlog: config.sessionlog?.enabled
793
+ ? (config.sessionlog as any)
794
+ : undefined,
473
795
  });
474
796
 
475
797
  // 12b. Skill-tree loadout compilation (if enabled)
476
798
  if (config.skilltree?.enabled) {
477
799
  try {
478
- const { compileAllRoleLoadouts } = await import("./integrations/skilltree.js");
800
+ const { compileAllRoleLoadouts } =
801
+ await import("./integrations/skilltree.js");
479
802
  // Gather roles from the role registry
480
803
  const registeredRoles = roleRegistry.listRoles();
481
804
  const roleNames = registeredRoles.map((r) => r.name);
@@ -536,6 +859,18 @@ export async function bootV2(
536
859
  await mapSidecar.start();
537
860
  // Wire sidecar into agent manager for session-end checkpoints
538
861
  agentManager.setSidecar(mapSidecar);
862
+
863
+ // Bridge dispatch events to MAP for observability. Spread the source
864
+ // event first, then namespace the `type` field — otherwise tsc warns
865
+ // about the literal key being overwritten by the spread.
866
+ if (taskDispatcher && mapSidecar.emitEvent) {
867
+ taskDispatcher.onEvent((event) => {
868
+ mapSidecar!.emitEvent!({
869
+ ...event,
870
+ type: `dispatch.${event.type}`,
871
+ });
872
+ });
873
+ }
539
874
  // Attach to shared system ref so ACP/MAP handlers can access it
540
875
  systemRef.mapSidecar = mapSidecar;
541
876
  } catch (err) {
@@ -546,7 +881,156 @@ export async function bootV2(
546
881
  }
547
882
  }
548
883
 
549
- // 13. Return system handle
884
+ // 13. Boot-time agents (opt-in)
885
+ // Fire after all subsystems are wired so the agent's lifecycle events
886
+ // (spawned/started) flow through the lifecycle bridge → MAP hub. Non-
887
+ // blocking: don't gate boot completion on agent process startup, which
888
+ // takes seconds. Failures are logged but do not abort boot.
889
+ //
890
+ // Rehydration on restart: the agent-store persists across process
891
+ // restarts. When we boot into a workspace that already has one or more
892
+ // coordinators (e.g. openhive spawned this swarm previously + auto-
893
+ // revived it), resume THOSE agents instead of spawning a brand-new one.
894
+ // Otherwise the UI shows a different coordinator name after every
895
+ // server restart — the prior conversations still exist on disk but get
896
+ // buried under stale, state='stopped' records that the UI treats as
897
+ // dead.
898
+ if (config.bootstrap?.coordinator) {
899
+ const opts = config.bootstrap.coordinator === true
900
+ ? {}
901
+ : config.bootstrap.coordinator;
902
+ const bootstrapCwd = opts.cwd ?? cwd;
903
+ const policy = config.bootstrap.rehydrate ?? "coordinators";
904
+
905
+ // Build the revival set based on policy:
906
+ // - 'none' → empty set (always fall through to fresh spawn)
907
+ // - 'coordinators' → root coordinators at this cwd, running or stopped
908
+ // - 'all' → every agent at this cwd, running or stopped
909
+ //
910
+ // Both 'running' and 'stopped' count as revival candidates. The
911
+ // hosted-swarm graceful-restart path transitions agents to 'stopped'
912
+ // on shutdown; an abrupt parent crash leaves them as 'running'.
913
+ // Either way, the workspace intent survives the restart and we want
914
+ // the same coordinators + their children back. Agents that a user
915
+ // explicitly terminated are tracked with a distinct `stop_reason`
916
+ // and — since explicit termination clears them from the cascade — do
917
+ // not appear in the listAgents result at a cwd they no longer
918
+ // inhabit. The `state='failed'` set is also excluded.
919
+ let priors: import("./agent/agent-store.js").AgentRecord[] = [];
920
+ if (policy === "coordinators") {
921
+ priors = agentStore
922
+ .listAgents({ parent_id: null, role: "coordinator" })
923
+ .filter(
924
+ (a) =>
925
+ a.cwd === bootstrapCwd &&
926
+ (a.state === "running" || a.state === "stopped"),
927
+ );
928
+ } else if (policy === "all") {
929
+ priors = agentStore
930
+ .listAgents()
931
+ .filter(
932
+ (a) =>
933
+ a.cwd === bootstrapCwd &&
934
+ (a.state === "running" || a.state === "stopped"),
935
+ );
936
+ }
937
+
938
+ const rehydrateOrSpawn = async () => {
939
+ if (priors.length > 0) {
940
+ // Parent-first ordering so a child's `resume()` sees its parent
941
+ // already back (lineage bookkeeping, inbox subscriptions). Depth
942
+ // = lineage.length: roots are 0, direct children of roots are 1.
943
+ const byDepth = new Map<number, typeof priors>();
944
+ for (const p of priors) {
945
+ const d = p.lineage.length;
946
+ if (!byDepth.has(d)) byDepth.set(d, []);
947
+ byDepth.get(d)!.push(p);
948
+ }
949
+ const depths = Array.from(byDepth.keys()).sort((a, b) => a - b);
950
+
951
+ const priorIds = new Set(priors.map((p) => p.id));
952
+ const resumed = new Set<string>();
953
+ const failed = new Set<string>();
954
+
955
+ // Stagger spawns — each resume fires a Claude Code subprocess and
956
+ // we don't want a coordinator + five workers all booting at once.
957
+ const CONCURRENCY = 2;
958
+
959
+ for (const depth of depths) {
960
+ const atDepth = byDepth.get(depth)!;
961
+ const eligible = atDepth.filter((a) => {
962
+ if (!a.parent_id) return true; // roots are always eligible
963
+ // Skip children whose parent isn't being revived at all
964
+ // (deliberately stopped, or out of scope for this policy).
965
+ if (!priorIds.has(a.parent_id)) {
966
+ console.warn(
967
+ `[boot-v2] Skipping ${a.role} ${a.id}: parent ${a.parent_id} not in revival set`,
968
+ );
969
+ return false;
970
+ }
971
+ // Skip children whose parent resume failed.
972
+ if (failed.has(a.parent_id)) {
973
+ console.warn(
974
+ `[boot-v2] Skipping ${a.role} ${a.id}: parent ${a.parent_id} failed to resume`,
975
+ );
976
+ return false;
977
+ }
978
+ return resumed.has(a.parent_id);
979
+ });
980
+
981
+ for (let i = 0; i < eligible.length; i += CONCURRENCY) {
982
+ const batch = eligible.slice(i, i + CONCURRENCY);
983
+ await Promise.all(
984
+ batch.map(async (prior) => {
985
+ try {
986
+ const r = await agentManager.resume(prior.id);
987
+ resumed.add(prior.id);
988
+ console.log(
989
+ `[boot-v2] Rehydrated ${prior.role}: ${(r as any).name ?? r.id} at ${prior.cwd}`,
990
+ );
991
+ } catch (err) {
992
+ const msg = (err as Error).message;
993
+ if (/ALREADY_RUNNING/i.test(msg)) {
994
+ // Rare lifecycle race — treat as success so children
995
+ // aren't held back waiting on a parent that's actually
996
+ // already alive.
997
+ resumed.add(prior.id);
998
+ } else {
999
+ failed.add(prior.id);
1000
+ console.warn(
1001
+ `[boot-v2] Failed to rehydrate ${prior.role} ${prior.id}: ${msg}`,
1002
+ );
1003
+ }
1004
+ }
1005
+ }),
1006
+ );
1007
+ }
1008
+ }
1009
+ return;
1010
+ }
1011
+ // No priors matched the policy → fresh spawn (first boot, or 'none').
1012
+ const spawned = await agentManager.spawn({
1013
+ role: "coordinator",
1014
+ parent: null,
1015
+ cwd: bootstrapCwd,
1016
+ task: opts.task ?? "Default coordinator (auto-spawn on boot)",
1017
+ permissionMode: opts.permissionMode,
1018
+ agentType: opts.agentType,
1019
+ customPrompt: opts.customPrompt,
1020
+ });
1021
+ console.log(
1022
+ `[boot-v2] Bootstrap coordinator spawned: ${(spawned as any).name ?? spawned.id} at ${bootstrapCwd}`,
1023
+ );
1024
+ };
1025
+
1026
+ rehydrateOrSpawn().catch((err: Error) => {
1027
+ console.warn(
1028
+ `[boot-v2] Bootstrap coordinator init failed: ${err.message}`,
1029
+ );
1030
+ });
1031
+ }
1032
+
1033
+ // 14. Return system handle
550
1034
  return {
551
1035
  agentManager,
552
1036
  agentStore,
@@ -556,14 +1040,17 @@ export async function bootV2(
556
1040
  controlServer,
557
1041
  roleRegistry,
558
1042
  controlSocketPath,
1043
+ ...(taskDispatcher ? { taskDispatcher } : {}),
559
1044
  ...(apiServer ? { apiServer } : {}),
560
1045
  ...(acpServer ? { acpServer } : {}),
561
1046
  ...(mapServerInstance ? { mapServerInstance } : {}),
562
1047
  ...(mapSidecar ? { mapSidecar } : {}),
563
- _sessionlogSyncLevel: config.sessionlog?.sync ?? config.map?.trajectorySyncLevel ?? "full",
1048
+ _sessionlogSyncLevel:
1049
+ config.sessionlog?.sync ?? config.map?.trajectorySyncLevel ?? "full",
564
1050
 
565
1051
  async shutdown(): Promise<void> {
566
1052
  clearInterval(healthCheckTimer);
1053
+ if (taskDispatcher) await taskDispatcher.stop();
567
1054
  if (mapSidecar) await mapSidecar.stop();
568
1055
  if (mapServerInstance) await mapServerInstance.stop();
569
1056
  if (federationCleanup) federationCleanup();