@stoneforge/smithy 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +13 -0
- package/README.md +114 -0
- package/dist/api/index.d.ts +7 -0
- package/dist/api/index.d.ts.map +1 -0
- package/dist/api/index.js +7 -0
- package/dist/api/index.js.map +1 -0
- package/dist/api/orchestrator-api.d.ts +153 -0
- package/dist/api/orchestrator-api.d.ts.map +1 -0
- package/dist/api/orchestrator-api.js +374 -0
- package/dist/api/orchestrator-api.js.map +1 -0
- package/dist/bin/sf.d.ts +3 -0
- package/dist/bin/sf.d.ts.map +1 -0
- package/dist/bin/sf.js +10 -0
- package/dist/bin/sf.js.map +1 -0
- package/dist/cli/commands/agent.d.ts +20 -0
- package/dist/cli/commands/agent.d.ts.map +1 -0
- package/dist/cli/commands/agent.js +861 -0
- package/dist/cli/commands/agent.js.map +1 -0
- package/dist/cli/commands/daemon.d.ts +14 -0
- package/dist/cli/commands/daemon.d.ts.map +1 -0
- package/dist/cli/commands/daemon.js +272 -0
- package/dist/cli/commands/daemon.js.map +1 -0
- package/dist/cli/commands/dispatch.d.ts +9 -0
- package/dist/cli/commands/dispatch.d.ts.map +1 -0
- package/dist/cli/commands/dispatch.js +128 -0
- package/dist/cli/commands/dispatch.js.map +1 -0
- package/dist/cli/commands/merge.d.ts +11 -0
- package/dist/cli/commands/merge.d.ts.map +1 -0
- package/dist/cli/commands/merge.js +246 -0
- package/dist/cli/commands/merge.js.map +1 -0
- package/dist/cli/commands/pool.d.ts +21 -0
- package/dist/cli/commands/pool.d.ts.map +1 -0
- package/dist/cli/commands/pool.js +762 -0
- package/dist/cli/commands/pool.js.map +1 -0
- package/dist/cli/commands/serve.d.ts +54 -0
- package/dist/cli/commands/serve.d.ts.map +1 -0
- package/dist/cli/commands/serve.js +57 -0
- package/dist/cli/commands/serve.js.map +1 -0
- package/dist/cli/commands/task.d.ts +36 -0
- package/dist/cli/commands/task.d.ts.map +1 -0
- package/dist/cli/commands/task.js +889 -0
- package/dist/cli/commands/task.js.map +1 -0
- package/dist/cli/commands/test-orchestration.d.ts +32 -0
- package/dist/cli/commands/test-orchestration.d.ts.map +1 -0
- package/dist/cli/commands/test-orchestration.js +392 -0
- package/dist/cli/commands/test-orchestration.js.map +1 -0
- package/dist/cli/index.d.ts +13 -0
- package/dist/cli/index.d.ts.map +1 -0
- package/dist/cli/index.js +15 -0
- package/dist/cli/index.js.map +1 -0
- package/dist/cli/plugin.d.ts +23 -0
- package/dist/cli/plugin.d.ts.map +1 -0
- package/dist/cli/plugin.js +36 -0
- package/dist/cli/plugin.js.map +1 -0
- package/dist/git/index.d.ts +10 -0
- package/dist/git/index.d.ts.map +1 -0
- package/dist/git/index.js +12 -0
- package/dist/git/index.js.map +1 -0
- package/dist/git/merge.d.ts +79 -0
- package/dist/git/merge.d.ts.map +1 -0
- package/dist/git/merge.js +254 -0
- package/dist/git/merge.js.map +1 -0
- package/dist/git/worktree-manager.d.ts +299 -0
- package/dist/git/worktree-manager.d.ts.map +1 -0
- package/dist/git/worktree-manager.js +744 -0
- package/dist/git/worktree-manager.js.map +1 -0
- package/dist/index.d.ts +24 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +31 -0
- package/dist/index.js.map +1 -0
- package/dist/prompts/director.md +272 -0
- package/dist/prompts/index.d.ts +100 -0
- package/dist/prompts/index.d.ts.map +1 -0
- package/dist/prompts/index.js +294 -0
- package/dist/prompts/index.js.map +1 -0
- package/dist/prompts/message-triage.md +50 -0
- package/dist/prompts/persistent-worker.md +240 -0
- package/dist/prompts/steward-base.md +64 -0
- package/dist/prompts/steward-docs.md +118 -0
- package/dist/prompts/steward-health.md +39 -0
- package/dist/prompts/steward-merge.md +168 -0
- package/dist/prompts/steward-ops.md +28 -0
- package/dist/prompts/steward-reminder.md +26 -0
- package/dist/prompts/worker.md +282 -0
- package/dist/providers/claude/headless.d.ts +18 -0
- package/dist/providers/claude/headless.d.ts.map +1 -0
- package/dist/providers/claude/headless.js +307 -0
- package/dist/providers/claude/headless.js.map +1 -0
- package/dist/providers/claude/index.d.ts +24 -0
- package/dist/providers/claude/index.d.ts.map +1 -0
- package/dist/providers/claude/index.js +80 -0
- package/dist/providers/claude/index.js.map +1 -0
- package/dist/providers/claude/interactive.d.ts +21 -0
- package/dist/providers/claude/interactive.d.ts.map +1 -0
- package/dist/providers/claude/interactive.js +142 -0
- package/dist/providers/claude/interactive.js.map +1 -0
- package/dist/providers/codex/event-mapper.d.ts +91 -0
- package/dist/providers/codex/event-mapper.d.ts.map +1 -0
- package/dist/providers/codex/event-mapper.js +299 -0
- package/dist/providers/codex/event-mapper.js.map +1 -0
- package/dist/providers/codex/headless.d.ts +20 -0
- package/dist/providers/codex/headless.d.ts.map +1 -0
- package/dist/providers/codex/headless.js +174 -0
- package/dist/providers/codex/headless.js.map +1 -0
- package/dist/providers/codex/index.d.ts +30 -0
- package/dist/providers/codex/index.d.ts.map +1 -0
- package/dist/providers/codex/index.js +55 -0
- package/dist/providers/codex/index.js.map +1 -0
- package/dist/providers/codex/interactive.d.ts +21 -0
- package/dist/providers/codex/interactive.d.ts.map +1 -0
- package/dist/providers/codex/interactive.js +141 -0
- package/dist/providers/codex/interactive.js.map +1 -0
- package/dist/providers/codex/jsonrpc-client.d.ts +52 -0
- package/dist/providers/codex/jsonrpc-client.d.ts.map +1 -0
- package/dist/providers/codex/jsonrpc-client.js +141 -0
- package/dist/providers/codex/jsonrpc-client.js.map +1 -0
- package/dist/providers/codex/server-manager.d.ts +100 -0
- package/dist/providers/codex/server-manager.d.ts.map +1 -0
- package/dist/providers/codex/server-manager.js +153 -0
- package/dist/providers/codex/server-manager.js.map +1 -0
- package/dist/providers/index.d.ts +15 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/index.js +19 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/providers/opencode/async-queue.d.ts +21 -0
- package/dist/providers/opencode/async-queue.d.ts.map +1 -0
- package/dist/providers/opencode/async-queue.js +51 -0
- package/dist/providers/opencode/async-queue.js.map +1 -0
- package/dist/providers/opencode/event-mapper.d.ts +132 -0
- package/dist/providers/opencode/event-mapper.d.ts.map +1 -0
- package/dist/providers/opencode/event-mapper.js +204 -0
- package/dist/providers/opencode/event-mapper.js.map +1 -0
- package/dist/providers/opencode/headless.d.ts +25 -0
- package/dist/providers/opencode/headless.d.ts.map +1 -0
- package/dist/providers/opencode/headless.js +190 -0
- package/dist/providers/opencode/headless.js.map +1 -0
- package/dist/providers/opencode/index.d.ts +33 -0
- package/dist/providers/opencode/index.d.ts.map +1 -0
- package/dist/providers/opencode/index.js +42 -0
- package/dist/providers/opencode/index.js.map +1 -0
- package/dist/providers/opencode/interactive.d.ts +21 -0
- package/dist/providers/opencode/interactive.d.ts.map +1 -0
- package/dist/providers/opencode/interactive.js +135 -0
- package/dist/providers/opencode/interactive.js.map +1 -0
- package/dist/providers/opencode/server-manager.d.ts +145 -0
- package/dist/providers/opencode/server-manager.d.ts.map +1 -0
- package/dist/providers/opencode/server-manager.js +163 -0
- package/dist/providers/opencode/server-manager.js.map +1 -0
- package/dist/providers/registry.d.ts +38 -0
- package/dist/providers/registry.d.ts.map +1 -0
- package/dist/providers/registry.js +82 -0
- package/dist/providers/registry.js.map +1 -0
- package/dist/providers/types.d.ts +144 -0
- package/dist/providers/types.d.ts.map +1 -0
- package/dist/providers/types.js +25 -0
- package/dist/providers/types.js.map +1 -0
- package/dist/runtime/event-utils.d.ts +8 -0
- package/dist/runtime/event-utils.d.ts.map +1 -0
- package/dist/runtime/event-utils.js +23 -0
- package/dist/runtime/event-utils.js.map +1 -0
- package/dist/runtime/handoff.d.ts +195 -0
- package/dist/runtime/handoff.d.ts.map +1 -0
- package/dist/runtime/handoff.js +332 -0
- package/dist/runtime/handoff.js.map +1 -0
- package/dist/runtime/index.d.ts +17 -0
- package/dist/runtime/index.d.ts.map +1 -0
- package/dist/runtime/index.js +60 -0
- package/dist/runtime/index.js.map +1 -0
- package/dist/runtime/message-mapper.d.ts +99 -0
- package/dist/runtime/message-mapper.d.ts.map +1 -0
- package/dist/runtime/message-mapper.js +202 -0
- package/dist/runtime/message-mapper.js.map +1 -0
- package/dist/runtime/predecessor-query.d.ts +212 -0
- package/dist/runtime/predecessor-query.d.ts.map +1 -0
- package/dist/runtime/predecessor-query.js +283 -0
- package/dist/runtime/predecessor-query.js.map +1 -0
- package/dist/runtime/session-manager.d.ts +466 -0
- package/dist/runtime/session-manager.d.ts.map +1 -0
- package/dist/runtime/session-manager.js +986 -0
- package/dist/runtime/session-manager.js.map +1 -0
- package/dist/runtime/spawner.d.ts +407 -0
- package/dist/runtime/spawner.d.ts.map +1 -0
- package/dist/runtime/spawner.js +781 -0
- package/dist/runtime/spawner.js.map +1 -0
- package/dist/server/config.d.ts +22 -0
- package/dist/server/config.d.ts.map +1 -0
- package/dist/server/config.js +59 -0
- package/dist/server/config.js.map +1 -0
- package/dist/server/daemon-state.d.ts +50 -0
- package/dist/server/daemon-state.d.ts.map +1 -0
- package/dist/server/daemon-state.js +100 -0
- package/dist/server/daemon-state.js.map +1 -0
- package/dist/server/events-websocket.d.ts +32 -0
- package/dist/server/events-websocket.d.ts.map +1 -0
- package/dist/server/events-websocket.js +96 -0
- package/dist/server/events-websocket.js.map +1 -0
- package/dist/server/formatters.d.ts +94 -0
- package/dist/server/formatters.d.ts.map +1 -0
- package/dist/server/formatters.js +142 -0
- package/dist/server/formatters.js.map +1 -0
- package/dist/server/index.d.ts +17 -0
- package/dist/server/index.d.ts.map +1 -0
- package/dist/server/index.js +153 -0
- package/dist/server/index.js.map +1 -0
- package/dist/server/lsp-websocket.d.ts +33 -0
- package/dist/server/lsp-websocket.d.ts.map +1 -0
- package/dist/server/lsp-websocket.js +161 -0
- package/dist/server/lsp-websocket.js.map +1 -0
- package/dist/server/routes/agents.d.ts +9 -0
- package/dist/server/routes/agents.d.ts.map +1 -0
- package/dist/server/routes/agents.js +369 -0
- package/dist/server/routes/agents.js.map +1 -0
- package/dist/server/routes/daemon.d.ts +13 -0
- package/dist/server/routes/daemon.d.ts.map +1 -0
- package/dist/server/routes/daemon.js +187 -0
- package/dist/server/routes/daemon.js.map +1 -0
- package/dist/server/routes/events.d.ts +23 -0
- package/dist/server/routes/events.d.ts.map +1 -0
- package/dist/server/routes/events.js +282 -0
- package/dist/server/routes/events.js.map +1 -0
- package/dist/server/routes/extensions.d.ts +9 -0
- package/dist/server/routes/extensions.d.ts.map +1 -0
- package/dist/server/routes/extensions.js +202 -0
- package/dist/server/routes/extensions.js.map +1 -0
- package/dist/server/routes/health.d.ts +7 -0
- package/dist/server/routes/health.d.ts.map +1 -0
- package/dist/server/routes/health.js +33 -0
- package/dist/server/routes/health.js.map +1 -0
- package/dist/server/routes/index.d.ts +21 -0
- package/dist/server/routes/index.d.ts.map +1 -0
- package/dist/server/routes/index.js +21 -0
- package/dist/server/routes/index.js.map +1 -0
- package/dist/server/routes/lsp.d.ts +9 -0
- package/dist/server/routes/lsp.d.ts.map +1 -0
- package/dist/server/routes/lsp.js +50 -0
- package/dist/server/routes/lsp.js.map +1 -0
- package/dist/server/routes/plugins.d.ts +9 -0
- package/dist/server/routes/plugins.d.ts.map +1 -0
- package/dist/server/routes/plugins.js +109 -0
- package/dist/server/routes/plugins.js.map +1 -0
- package/dist/server/routes/pools.d.ts +9 -0
- package/dist/server/routes/pools.d.ts.map +1 -0
- package/dist/server/routes/pools.js +189 -0
- package/dist/server/routes/pools.js.map +1 -0
- package/dist/server/routes/scheduler.d.ts +9 -0
- package/dist/server/routes/scheduler.d.ts.map +1 -0
- package/dist/server/routes/scheduler.js +162 -0
- package/dist/server/routes/scheduler.js.map +1 -0
- package/dist/server/routes/sessions.d.ts +27 -0
- package/dist/server/routes/sessions.d.ts.map +1 -0
- package/dist/server/routes/sessions.js +773 -0
- package/dist/server/routes/sessions.js.map +1 -0
- package/dist/server/routes/tasks.d.ts +9 -0
- package/dist/server/routes/tasks.d.ts.map +1 -0
- package/dist/server/routes/tasks.js +954 -0
- package/dist/server/routes/tasks.js.map +1 -0
- package/dist/server/routes/upload.d.ts +8 -0
- package/dist/server/routes/upload.d.ts.map +1 -0
- package/dist/server/routes/upload.js +40 -0
- package/dist/server/routes/upload.js.map +1 -0
- package/dist/server/routes/workflows.d.ts +9 -0
- package/dist/server/routes/workflows.d.ts.map +1 -0
- package/dist/server/routes/workflows.js +532 -0
- package/dist/server/routes/workflows.js.map +1 -0
- package/dist/server/routes/workspace-files.d.ts +12 -0
- package/dist/server/routes/workspace-files.d.ts.map +1 -0
- package/dist/server/routes/workspace-files.js +520 -0
- package/dist/server/routes/workspace-files.js.map +1 -0
- package/dist/server/routes/worktrees.d.ts +9 -0
- package/dist/server/routes/worktrees.d.ts.map +1 -0
- package/dist/server/routes/worktrees.js +94 -0
- package/dist/server/routes/worktrees.js.map +1 -0
- package/dist/server/server.d.ts +14 -0
- package/dist/server/server.d.ts.map +1 -0
- package/dist/server/server.js +258 -0
- package/dist/server/server.js.map +1 -0
- package/dist/server/services/lsp-manager.d.ts +93 -0
- package/dist/server/services/lsp-manager.d.ts.map +1 -0
- package/dist/server/services/lsp-manager.js +291 -0
- package/dist/server/services/lsp-manager.js.map +1 -0
- package/dist/server/services/session-messages.d.ts +61 -0
- package/dist/server/services/session-messages.d.ts.map +1 -0
- package/dist/server/services/session-messages.js +101 -0
- package/dist/server/services/session-messages.js.map +1 -0
- package/dist/server/services.d.ts +35 -0
- package/dist/server/services.d.ts.map +1 -0
- package/dist/server/services.js +159 -0
- package/dist/server/services.js.map +1 -0
- package/dist/server/static.d.ts +18 -0
- package/dist/server/static.d.ts.map +1 -0
- package/dist/server/static.js +71 -0
- package/dist/server/static.js.map +1 -0
- package/dist/server/types.d.ts +20 -0
- package/dist/server/types.d.ts.map +1 -0
- package/dist/server/types.js +7 -0
- package/dist/server/types.js.map +1 -0
- package/dist/server/websocket.d.ts +16 -0
- package/dist/server/websocket.d.ts.map +1 -0
- package/dist/server/websocket.js +143 -0
- package/dist/server/websocket.js.map +1 -0
- package/dist/services/agent-pool-service.d.ts +181 -0
- package/dist/services/agent-pool-service.d.ts.map +1 -0
- package/dist/services/agent-pool-service.js +590 -0
- package/dist/services/agent-pool-service.js.map +1 -0
- package/dist/services/agent-registry.d.ts +185 -0
- package/dist/services/agent-registry.d.ts.map +1 -0
- package/dist/services/agent-registry.js +432 -0
- package/dist/services/agent-registry.js.map +1 -0
- package/dist/services/dispatch-daemon.d.ts +429 -0
- package/dist/services/dispatch-daemon.d.ts.map +1 -0
- package/dist/services/dispatch-daemon.js +1833 -0
- package/dist/services/dispatch-daemon.js.map +1 -0
- package/dist/services/dispatch-service.d.ts +148 -0
- package/dist/services/dispatch-service.d.ts.map +1 -0
- package/dist/services/dispatch-service.js +170 -0
- package/dist/services/dispatch-service.js.map +1 -0
- package/dist/services/docs-steward-service.d.ts +199 -0
- package/dist/services/docs-steward-service.d.ts.map +1 -0
- package/dist/services/docs-steward-service.js +599 -0
- package/dist/services/docs-steward-service.js.map +1 -0
- package/dist/services/health-steward-service.d.ts +446 -0
- package/dist/services/health-steward-service.d.ts.map +1 -0
- package/dist/services/health-steward-service.js +866 -0
- package/dist/services/health-steward-service.js.map +1 -0
- package/dist/services/index.d.ts +26 -0
- package/dist/services/index.d.ts.map +1 -0
- package/dist/services/index.js +111 -0
- package/dist/services/index.js.map +1 -0
- package/dist/services/merge-request-provider.d.ts +59 -0
- package/dist/services/merge-request-provider.d.ts.map +1 -0
- package/dist/services/merge-request-provider.js +89 -0
- package/dist/services/merge-request-provider.js.map +1 -0
- package/dist/services/merge-steward-service.d.ts +268 -0
- package/dist/services/merge-steward-service.d.ts.map +1 -0
- package/dist/services/merge-steward-service.js +568 -0
- package/dist/services/merge-steward-service.js.map +1 -0
- package/dist/services/plugin-executor.d.ts +247 -0
- package/dist/services/plugin-executor.d.ts.map +1 -0
- package/dist/services/plugin-executor.js +451 -0
- package/dist/services/plugin-executor.js.map +1 -0
- package/dist/services/role-definition-service.d.ts +117 -0
- package/dist/services/role-definition-service.d.ts.map +1 -0
- package/dist/services/role-definition-service.js +289 -0
- package/dist/services/role-definition-service.js.map +1 -0
- package/dist/services/steward-scheduler.d.ts +336 -0
- package/dist/services/steward-scheduler.d.ts.map +1 -0
- package/dist/services/steward-scheduler.js +732 -0
- package/dist/services/steward-scheduler.js.map +1 -0
- package/dist/services/task-assignment-service.d.ts +291 -0
- package/dist/services/task-assignment-service.d.ts.map +1 -0
- package/dist/services/task-assignment-service.js +454 -0
- package/dist/services/task-assignment-service.js.map +1 -0
- package/dist/services/worker-task-service.d.ts +202 -0
- package/dist/services/worker-task-service.d.ts.map +1 -0
- package/dist/services/worker-task-service.js +228 -0
- package/dist/services/worker-task-service.js.map +1 -0
- package/dist/testing/index.d.ts +13 -0
- package/dist/testing/index.d.ts.map +1 -0
- package/dist/testing/index.js +17 -0
- package/dist/testing/index.js.map +1 -0
- package/dist/testing/orchestration-tests.d.ts +62 -0
- package/dist/testing/orchestration-tests.d.ts.map +1 -0
- package/dist/testing/orchestration-tests.js +1115 -0
- package/dist/testing/orchestration-tests.js.map +1 -0
- package/dist/testing/test-context.d.ts +171 -0
- package/dist/testing/test-context.d.ts.map +1 -0
- package/dist/testing/test-context.js +665 -0
- package/dist/testing/test-context.js.map +1 -0
- package/dist/testing/test-prompts.d.ts +46 -0
- package/dist/testing/test-prompts.d.ts.map +1 -0
- package/dist/testing/test-prompts.js +140 -0
- package/dist/testing/test-prompts.js.map +1 -0
- package/dist/testing/test-utils.d.ts +200 -0
- package/dist/testing/test-utils.d.ts.map +1 -0
- package/dist/testing/test-utils.js +378 -0
- package/dist/testing/test-utils.js.map +1 -0
- package/dist/types/agent-pool.d.ts +215 -0
- package/dist/types/agent-pool.d.ts.map +1 -0
- package/dist/types/agent-pool.js +143 -0
- package/dist/types/agent-pool.js.map +1 -0
- package/dist/types/agent.d.ts +265 -0
- package/dist/types/agent.d.ts.map +1 -0
- package/dist/types/agent.js +127 -0
- package/dist/types/agent.js.map +1 -0
- package/dist/types/index.d.ts +11 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/index.js +40 -0
- package/dist/types/index.js.map +1 -0
- package/dist/types/message-types.d.ts +294 -0
- package/dist/types/message-types.d.ts.map +1 -0
- package/dist/types/message-types.js +354 -0
- package/dist/types/message-types.js.map +1 -0
- package/dist/types/role-definition.d.ts +272 -0
- package/dist/types/role-definition.d.ts.map +1 -0
- package/dist/types/role-definition.js +144 -0
- package/dist/types/role-definition.js.map +1 -0
- package/dist/types/task-meta.d.ts +248 -0
- package/dist/types/task-meta.d.ts.map +1 -0
- package/dist/types/task-meta.js +213 -0
- package/dist/types/task-meta.js.map +1 -0
- package/package.json +120 -0
- package/web/assets/abap-BrgZPUOV.js +6 -0
- package/web/assets/apex-DyP6w7ZV.js +6 -0
- package/web/assets/azcli-BaLxmfj-.js +6 -0
- package/web/assets/bat-CFOPXBzS.js +6 -0
- package/web/assets/bicep-BfEKNvv3.js +7 -0
- package/web/assets/cameligo-BFG1Mk7z.js +6 -0
- package/web/assets/clojure-DTECt2xU.js +6 -0
- package/web/assets/codicon-DCmgc-ay.ttf +0 -0
- package/web/assets/coffee-CDGzqUPQ.js +6 -0
- package/web/assets/cpp-CLLBncYj.js +6 -0
- package/web/assets/csharp-dUCx_-0o.js +6 -0
- package/web/assets/csp-5Rap-vPy.js +6 -0
- package/web/assets/css-D3h14YRZ.js +8 -0
- package/web/assets/cssMode-DMo-5YLA.js +9 -0
- package/web/assets/cypher-DrQuvNYM.js +6 -0
- package/web/assets/dart-CFKIUWau.js +6 -0
- package/web/assets/dockerfile-Zznr-cwX.js +6 -0
- package/web/assets/ecl-Ce3n6wWz.js +6 -0
- package/web/assets/elixir-deUWdS0T.js +6 -0
- package/web/assets/flow9-i9-g7ZhI.js +6 -0
- package/web/assets/freemarker2-D4qgkQzN.js +8 -0
- package/web/assets/fsharp-CzKuDChf.js +6 -0
- package/web/assets/go-Cphgjts3.js +6 -0
- package/web/assets/graphql-Cg7bfA9N.js +6 -0
- package/web/assets/handlebars-CXFvNjQC.js +6 -0
- package/web/assets/hcl-0cvrggvQ.js +6 -0
- package/web/assets/html-oyuB_D-B.js +6 -0
- package/web/assets/htmlMode-iWuZ24-r.js +9 -0
- package/web/assets/index-DqP-_E4F.css +32 -0
- package/web/assets/index-R1cylSgw.js +1665 -0
- package/web/assets/ini-Drc7WvVn.js +6 -0
- package/web/assets/java-B_fMsGYe.js +6 -0
- package/web/assets/javascript-CRIkN2Pg.js +6 -0
- package/web/assets/jsonMode-DVDkDgex.js +15 -0
- package/web/assets/julia-Bqgm2twL.js +6 -0
- package/web/assets/kotlin-BSkB5QuD.js +6 -0
- package/web/assets/less-BsTHnhdd.js +7 -0
- package/web/assets/lexon-YWi4-JPR.js +6 -0
- package/web/assets/liquid-CSfldbB5.js +6 -0
- package/web/assets/lua-nf6ki56Z.js +6 -0
- package/web/assets/m3-Cpb6xl2v.js +6 -0
- package/web/assets/markdown-DSZPf7rp.js +6 -0
- package/web/assets/mdx-Dd58iymR.js +6 -0
- package/web/assets/mips-B_c3zf-v.js +6 -0
- package/web/assets/monaco-editor-B4lwqA13.js +751 -0
- package/web/assets/monaco-editor-CQpyCxOA.css +1 -0
- package/web/assets/msdax-rUNN04Wq.js +6 -0
- package/web/assets/mysql-DDwshQtU.js +6 -0
- package/web/assets/objective-c-B5zXfXm9.js +6 -0
- package/web/assets/pascal-CXOwvkN_.js +6 -0
- package/web/assets/pascaligo-Bc-ZgV77.js +6 -0
- package/web/assets/perl-CwNk8-XU.js +6 -0
- package/web/assets/pgsql-tGk8EFnU.js +6 -0
- package/web/assets/php-CpIb_Oan.js +6 -0
- package/web/assets/pla-B03wrqEc.js +6 -0
- package/web/assets/postiats-BKlk5iyT.js +6 -0
- package/web/assets/powerquery-Bhzvs7bI.js +6 -0
- package/web/assets/powershell-Dd3NCNK9.js +6 -0
- package/web/assets/protobuf-COyEY5Pt.js +7 -0
- package/web/assets/pug-BaJupSGV.js +6 -0
- package/web/assets/python-XWrMqdhO.js +6 -0
- package/web/assets/qsharp-DXyYeYxl.js +6 -0
- package/web/assets/r-CdQndTaG.js +6 -0
- package/web/assets/razor-DPlhCpIs.js +6 -0
- package/web/assets/redis-CVwtpugi.js +6 -0
- package/web/assets/redshift-25W9uPmb.js +6 -0
- package/web/assets/restructuredtext-DfzH4Xui.js +6 -0
- package/web/assets/router-vendor-DHlGizSU.js +41 -0
- package/web/assets/ruby-Cp1zYvxS.js +6 -0
- package/web/assets/rust-D5C2fndG.js +6 -0
- package/web/assets/sb-CDntyWJ8.js +6 -0
- package/web/assets/scala-BoFRg7Ot.js +6 -0
- package/web/assets/scheme-Bio4gycK.js +6 -0
- package/web/assets/scss-4Ik7cdeQ.js +8 -0
- package/web/assets/shell-CX-rkNHf.js +6 -0
- package/web/assets/solidity-Tw7wswEv.js +6 -0
- package/web/assets/sophia-C5WLch3f.js +6 -0
- package/web/assets/sparql-DHaeiCBh.js +6 -0
- package/web/assets/sql-CCSDG5nI.js +6 -0
- package/web/assets/st-pnP8ivHi.js +6 -0
- package/web/assets/swift-DwJ7jVG9.js +8 -0
- package/web/assets/systemverilog-B9Xyijhd.js +6 -0
- package/web/assets/tcl-DnHyzjbg.js +6 -0
- package/web/assets/tsMode-BbA1Jbf3.js +16 -0
- package/web/assets/twig-CPajHgWi.js +6 -0
- package/web/assets/typescript-DcLHYzvH.js +6 -0
- package/web/assets/typespec-D-MeaMDU.js +6 -0
- package/web/assets/ui-vendor-BSco96uv.js +51 -0
- package/web/assets/utils-vendor-DaJ2Dubl.js +911 -0
- package/web/assets/vb-DgyLZaXg.js +6 -0
- package/web/assets/wgsl-DYQUnd45.js +303 -0
- package/web/assets/xml-xKYS3dO6.js +6 -0
- package/web/assets/yaml-CNmlXqzH.js +6 -0
- package/web/favicon.ico +0 -0
- package/web/index.html +22 -0
- package/web/logo.png +0 -0
|
@@ -0,0 +1,1833 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Dispatch Daemon Service
|
|
3
|
+
*
|
|
4
|
+
* This daemon runs continuous polling loops to coordinate task assignment
|
|
5
|
+
* and message delivery across all agents in the orchestration system.
|
|
6
|
+
*
|
|
7
|
+
* Key features:
|
|
8
|
+
* - Worker availability polling: Assigns unassigned tasks to available workers
|
|
9
|
+
* - Inbox polling: Delivers messages and spawns agents when needed
|
|
10
|
+
* - Steward trigger polling: Activates stewards based on scheduled triggers
|
|
11
|
+
* - Workflow task polling: Assigns workflow tasks to available stewards
|
|
12
|
+
*
|
|
13
|
+
* The daemon implements the dispatch behavior defined in ORCHESTRATION_PLAN.md:
|
|
14
|
+
* - Workers are spawned INSIDE their worktree directory
|
|
15
|
+
* - Handoff branches are reused when present in task metadata
|
|
16
|
+
* - Inbox polling: Routes messages by role (triage for ephemeral, forward for persistent)
|
|
17
|
+
*
|
|
18
|
+
* @module
|
|
19
|
+
*/
|
|
20
|
+
import { EventEmitter } from 'node:events';
|
|
21
|
+
import { InboxStatus, createTimestamp, TaskStatus, asEntityId, asElementId } from '@stoneforge/core';
|
|
22
|
+
import { loadTriagePrompt, loadRolePrompt } from '../prompts/index.js';
|
|
23
|
+
import { getAgentMetadata } from './agent-registry.js';
|
|
24
|
+
import { getOrchestratorTaskMeta, updateOrchestratorTaskMeta, appendTaskSessionHistory, } from '../types/task-meta.js';
|
|
25
|
+
// ============================================================================
|
|
26
|
+
// Constants
|
|
27
|
+
// ============================================================================
|
|
28
|
+
/**
|
|
29
|
+
* Default poll interval in milliseconds for dispatch daemon (5 seconds)
|
|
30
|
+
*/
|
|
31
|
+
export const DISPATCH_DAEMON_DEFAULT_POLL_INTERVAL_MS = 5000;
|
|
32
|
+
/**
|
|
33
|
+
* Minimum poll interval in milliseconds for dispatch daemon (1 second)
|
|
34
|
+
*/
|
|
35
|
+
export const DISPATCH_DAEMON_MIN_POLL_INTERVAL_MS = 1000;
|
|
36
|
+
/**
|
|
37
|
+
* Maximum poll interval in milliseconds for dispatch daemon (1 minute)
|
|
38
|
+
*/
|
|
39
|
+
export const DISPATCH_DAEMON_MAX_POLL_INTERVAL_MS = 60000;
|
|
40
|
+
// ============================================================================
|
|
41
|
+
// Dispatch Daemon Implementation
|
|
42
|
+
// ============================================================================
|
|
43
|
+
/**
|
|
44
|
+
* Implementation of the Dispatch Daemon.
|
|
45
|
+
*/
|
|
46
|
+
export class DispatchDaemonImpl {
|
|
47
|
+
api;
|
|
48
|
+
agentRegistry;
|
|
49
|
+
sessionManager;
|
|
50
|
+
dispatchService;
|
|
51
|
+
worktreeManager;
|
|
52
|
+
taskAssignment;
|
|
53
|
+
stewardScheduler;
|
|
54
|
+
inboxService;
|
|
55
|
+
poolService;
|
|
56
|
+
emitter;
|
|
57
|
+
config;
|
|
58
|
+
running = false;
|
|
59
|
+
polling = false;
|
|
60
|
+
pollIntervalHandle;
|
|
61
|
+
currentPollCycle;
|
|
62
|
+
/**
|
|
63
|
+
* Tracks inbox item IDs that are currently being forwarded to persistent agents.
|
|
64
|
+
* Prevents duplicate message delivery when concurrent pollInboxes() calls
|
|
65
|
+
* race to forward the same unread message before markAsRead() completes.
|
|
66
|
+
*
|
|
67
|
+
* Key: inbox item ID
|
|
68
|
+
* Value: true (item is in-flight, being processed)
|
|
69
|
+
*
|
|
70
|
+
* Items are added before forwarding and removed after markAsRead() completes.
|
|
71
|
+
*/
|
|
72
|
+
forwardingInboxItems = new Set();
|
|
73
|
+
constructor(api, agentRegistry, sessionManager, dispatchService, worktreeManager, taskAssignment, stewardScheduler, inboxService, config, poolService) {
|
|
74
|
+
this.api = api;
|
|
75
|
+
this.agentRegistry = agentRegistry;
|
|
76
|
+
this.sessionManager = sessionManager;
|
|
77
|
+
this.dispatchService = dispatchService;
|
|
78
|
+
this.worktreeManager = worktreeManager;
|
|
79
|
+
this.taskAssignment = taskAssignment;
|
|
80
|
+
this.stewardScheduler = stewardScheduler;
|
|
81
|
+
this.inboxService = inboxService;
|
|
82
|
+
this.poolService = poolService;
|
|
83
|
+
this.emitter = new EventEmitter();
|
|
84
|
+
this.config = this.normalizeConfig(config);
|
|
85
|
+
}
|
|
86
|
+
// ----------------------------------------
|
|
87
|
+
// Lifecycle
|
|
88
|
+
// ----------------------------------------
|
|
89
|
+
async start() {
|
|
90
|
+
if (this.running) {
|
|
91
|
+
return;
|
|
92
|
+
}
|
|
93
|
+
this.running = true;
|
|
94
|
+
// Reconcile stale sessions on startup (M-7)
|
|
95
|
+
try {
|
|
96
|
+
const result = await this.sessionManager.reconcileOnStartup();
|
|
97
|
+
if (result.reconciled > 0) {
|
|
98
|
+
console.log(`[dispatch-daemon] Reconciled ${result.reconciled} stale session(s)`);
|
|
99
|
+
}
|
|
100
|
+
if (result.errors.length > 0) {
|
|
101
|
+
console.warn('[dispatch-daemon] Reconciliation errors:', result.errors);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
catch (error) {
|
|
105
|
+
console.error('[dispatch-daemon] Failed to reconcile on startup:', error);
|
|
106
|
+
}
|
|
107
|
+
// Recover orphaned task assignments (workers with tasks but no session after restart)
|
|
108
|
+
if (this.config.orphanRecoveryEnabled) {
|
|
109
|
+
try {
|
|
110
|
+
const result = await this.recoverOrphanedAssignments();
|
|
111
|
+
if (result.processed > 0) {
|
|
112
|
+
console.log(`[dispatch-daemon] Startup: recovered ${result.processed} orphaned task assignment(s)`);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
catch (error) {
|
|
116
|
+
console.error('[dispatch-daemon] Failed to recover orphaned assignments on startup:', error);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
// Start the main poll loop
|
|
120
|
+
this.pollIntervalHandle = this.createPollInterval();
|
|
121
|
+
// Run an initial poll cycle immediately
|
|
122
|
+
this.currentPollCycle = this.runPollCycle().catch((error) => {
|
|
123
|
+
console.error('[dispatch-daemon] Initial poll cycle error:', error);
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
async stop() {
|
|
127
|
+
if (!this.running) {
|
|
128
|
+
return;
|
|
129
|
+
}
|
|
130
|
+
this.running = false;
|
|
131
|
+
if (this.pollIntervalHandle) {
|
|
132
|
+
clearInterval(this.pollIntervalHandle);
|
|
133
|
+
this.pollIntervalHandle = undefined;
|
|
134
|
+
}
|
|
135
|
+
// Wait for in-flight poll cycle to complete (M-8)
|
|
136
|
+
if (this.currentPollCycle) {
|
|
137
|
+
try {
|
|
138
|
+
await Promise.race([
|
|
139
|
+
this.currentPollCycle,
|
|
140
|
+
new Promise((_, reject) => setTimeout(() => reject(new Error('Shutdown timeout')), 30_000)),
|
|
141
|
+
]);
|
|
142
|
+
}
|
|
143
|
+
catch { /* timeout or error — proceed with shutdown */ }
|
|
144
|
+
this.currentPollCycle = undefined;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
isRunning() {
|
|
148
|
+
return this.running;
|
|
149
|
+
}
|
|
150
|
+
// ----------------------------------------
|
|
151
|
+
// Manual Poll Triggers
|
|
152
|
+
// ----------------------------------------
|
|
153
|
+
async pollWorkerAvailability() {
|
|
154
|
+
const startedAt = new Date().toISOString();
|
|
155
|
+
const startTime = Date.now();
|
|
156
|
+
let processed = 0;
|
|
157
|
+
let errors = 0;
|
|
158
|
+
const errorMessages = [];
|
|
159
|
+
this.emitter.emit('poll:start', 'worker-availability');
|
|
160
|
+
try {
|
|
161
|
+
// 1. Get all ephemeral workers
|
|
162
|
+
const workers = await this.agentRegistry.listAgents({
|
|
163
|
+
role: 'worker',
|
|
164
|
+
workerMode: 'ephemeral',
|
|
165
|
+
});
|
|
166
|
+
// 2. Find workers with no active session and no unread non-dispatch messages.
|
|
167
|
+
// pollInboxes() runs first in each cycle, marking dispatch messages as read.
|
|
168
|
+
// Any remaining unread items are non-dispatch messages needing triage —
|
|
169
|
+
// defer task assignment so the next cycle's triage pass can handle them.
|
|
170
|
+
const availableWorkers = [];
|
|
171
|
+
for (const worker of workers) {
|
|
172
|
+
const session = this.sessionManager.getActiveSession(asEntityId(worker.id));
|
|
173
|
+
if (session)
|
|
174
|
+
continue;
|
|
175
|
+
const unreadItems = this.inboxService.getInbox(asEntityId(worker.id), {
|
|
176
|
+
status: InboxStatus.UNREAD,
|
|
177
|
+
limit: 1,
|
|
178
|
+
});
|
|
179
|
+
if (unreadItems.length > 0)
|
|
180
|
+
continue;
|
|
181
|
+
// Defense in depth: Check if worker already has an assigned task
|
|
182
|
+
// (protects against race conditions where session terminated but assignment wasn't cleared)
|
|
183
|
+
const workerTasks = await this.taskAssignment.getAgentTasks(asEntityId(worker.id), {
|
|
184
|
+
taskStatus: [TaskStatus.OPEN, TaskStatus.IN_PROGRESS, TaskStatus.REVIEW],
|
|
185
|
+
});
|
|
186
|
+
if (workerTasks.length > 0) {
|
|
187
|
+
console.log(`[dispatch-daemon] Worker ${worker.name} already has ${workerTasks.length} assigned task(s), skipping`);
|
|
188
|
+
continue;
|
|
189
|
+
}
|
|
190
|
+
availableWorkers.push(worker);
|
|
191
|
+
}
|
|
192
|
+
// 3. For each available worker, try to assign a task
|
|
193
|
+
for (const worker of availableWorkers) {
|
|
194
|
+
try {
|
|
195
|
+
const assigned = await this.assignTaskToWorker(worker);
|
|
196
|
+
if (assigned) {
|
|
197
|
+
processed++;
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
catch (error) {
|
|
201
|
+
errors++;
|
|
202
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
203
|
+
errorMessages.push(`Worker ${worker.name}: ${errorMessage}`);
|
|
204
|
+
console.error(`[dispatch-daemon] Error assigning task to worker ${worker.name}:`, error);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
catch (error) {
|
|
209
|
+
errors++;
|
|
210
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
211
|
+
errorMessages.push(errorMessage);
|
|
212
|
+
console.error('[dispatch-daemon] Error in pollWorkerAvailability:', error);
|
|
213
|
+
}
|
|
214
|
+
const result = {
|
|
215
|
+
pollType: 'worker-availability',
|
|
216
|
+
startedAt,
|
|
217
|
+
durationMs: Date.now() - startTime,
|
|
218
|
+
processed,
|
|
219
|
+
errors,
|
|
220
|
+
errorMessages: errorMessages.length > 0 ? errorMessages : undefined,
|
|
221
|
+
};
|
|
222
|
+
this.emitter.emit('poll:complete', result);
|
|
223
|
+
return result;
|
|
224
|
+
}
|
|
225
|
+
async pollInboxes() {
|
|
226
|
+
const startedAt = new Date().toISOString();
|
|
227
|
+
const startTime = Date.now();
|
|
228
|
+
let processed = 0;
|
|
229
|
+
let errors = 0;
|
|
230
|
+
const errorMessages = [];
|
|
231
|
+
this.emitter.emit('poll:start', 'inbox');
|
|
232
|
+
// Accumulate deferred items per agent for triage processing
|
|
233
|
+
const deferredItems = new Map();
|
|
234
|
+
try {
|
|
235
|
+
// Get all agents
|
|
236
|
+
const agents = await this.agentRegistry.listAgents();
|
|
237
|
+
for (const agent of agents) {
|
|
238
|
+
try {
|
|
239
|
+
const agentId = asEntityId(agent.id);
|
|
240
|
+
const meta = getAgentMetadata(agent);
|
|
241
|
+
if (!meta)
|
|
242
|
+
continue;
|
|
243
|
+
// Get unread messages for this agent
|
|
244
|
+
const inboxItems = this.inboxService.getInbox(agentId, {
|
|
245
|
+
status: InboxStatus.UNREAD,
|
|
246
|
+
limit: 50, // Process up to 50 messages per agent per cycle
|
|
247
|
+
});
|
|
248
|
+
for (const item of inboxItems) {
|
|
249
|
+
try {
|
|
250
|
+
const messageProcessed = await this.processInboxItem(agent, item, meta);
|
|
251
|
+
if (messageProcessed) {
|
|
252
|
+
processed++;
|
|
253
|
+
}
|
|
254
|
+
else {
|
|
255
|
+
// Item was not processed (deferred for triage)
|
|
256
|
+
// Only ephemeral workers and stewards get triage sessions.
|
|
257
|
+
// Persistent agents (directors, persistent workers) leave messages
|
|
258
|
+
// unread until their session starts — spawning a headless triage
|
|
259
|
+
// session for them would confuse the UI and mark messages as read
|
|
260
|
+
// before the agent can actually process them.
|
|
261
|
+
const isPersistentAgent = meta.agentRole === 'director' ||
|
|
262
|
+
(meta.agentRole === 'worker' && meta.workerMode === 'persistent');
|
|
263
|
+
if (!isPersistentAgent) {
|
|
264
|
+
const activeSession = this.sessionManager.getActiveSession(agentId);
|
|
265
|
+
if (!activeSession) {
|
|
266
|
+
if (!deferredItems.has(agentId)) {
|
|
267
|
+
deferredItems.set(agentId, { agent, items: [] });
|
|
268
|
+
}
|
|
269
|
+
deferredItems.get(agentId).items.push(item);
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
catch (error) {
|
|
275
|
+
errors++;
|
|
276
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
277
|
+
errorMessages.push(`Message ${item.messageId}: ${errorMessage}`);
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
catch (error) {
|
|
282
|
+
errors++;
|
|
283
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
284
|
+
errorMessages.push(`Agent ${agent.name}: ${errorMessage}`);
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
// Process triage batches for idle agents with deferred messages
|
|
288
|
+
if (deferredItems.size > 0) {
|
|
289
|
+
const triageResult = await this.processTriageBatch(deferredItems);
|
|
290
|
+
processed += triageResult;
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
catch (error) {
|
|
294
|
+
errors++;
|
|
295
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
296
|
+
errorMessages.push(errorMessage);
|
|
297
|
+
console.error('[dispatch-daemon] Error in pollInboxes:', error);
|
|
298
|
+
}
|
|
299
|
+
const result = {
|
|
300
|
+
pollType: 'inbox',
|
|
301
|
+
startedAt,
|
|
302
|
+
durationMs: Date.now() - startTime,
|
|
303
|
+
processed,
|
|
304
|
+
errors,
|
|
305
|
+
errorMessages: errorMessages.length > 0 ? errorMessages : undefined,
|
|
306
|
+
};
|
|
307
|
+
this.emitter.emit('poll:complete', result);
|
|
308
|
+
return result;
|
|
309
|
+
}
|
|
310
|
+
async pollStewardTriggers() {
|
|
311
|
+
const startedAt = new Date().toISOString();
|
|
312
|
+
const startTime = Date.now();
|
|
313
|
+
let processed = 0;
|
|
314
|
+
let errors = 0;
|
|
315
|
+
const errorMessages = [];
|
|
316
|
+
this.emitter.emit('poll:start', 'steward-trigger');
|
|
317
|
+
try {
|
|
318
|
+
// The StewardScheduler handles trigger evaluation internally
|
|
319
|
+
// We just need to check if any stewards need to be triggered
|
|
320
|
+
// This is mainly handled by the scheduler's own polling, but
|
|
321
|
+
// we can use this to ensure the scheduler is running
|
|
322
|
+
if (!this.stewardScheduler.isRunning()) {
|
|
323
|
+
// Start the scheduler if it's not running
|
|
324
|
+
await this.stewardScheduler.start();
|
|
325
|
+
processed++;
|
|
326
|
+
}
|
|
327
|
+
// Get stats to report on activity
|
|
328
|
+
const stats = this.stewardScheduler.getStats();
|
|
329
|
+
processed += stats.runningExecutions;
|
|
330
|
+
}
|
|
331
|
+
catch (error) {
|
|
332
|
+
errors++;
|
|
333
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
334
|
+
errorMessages.push(errorMessage);
|
|
335
|
+
console.error('[dispatch-daemon] Error in pollStewardTriggers:', error);
|
|
336
|
+
}
|
|
337
|
+
const result = {
|
|
338
|
+
pollType: 'steward-trigger',
|
|
339
|
+
startedAt,
|
|
340
|
+
durationMs: Date.now() - startTime,
|
|
341
|
+
processed,
|
|
342
|
+
errors,
|
|
343
|
+
errorMessages: errorMessages.length > 0 ? errorMessages : undefined,
|
|
344
|
+
};
|
|
345
|
+
this.emitter.emit('poll:complete', result);
|
|
346
|
+
return result;
|
|
347
|
+
}
|
|
348
|
+
async pollWorkflowTasks() {
|
|
349
|
+
const startedAt = new Date().toISOString();
|
|
350
|
+
const startTime = Date.now();
|
|
351
|
+
let processed = 0;
|
|
352
|
+
let errors = 0;
|
|
353
|
+
const errorMessages = [];
|
|
354
|
+
this.emitter.emit('poll:start', 'workflow-task');
|
|
355
|
+
try {
|
|
356
|
+
const stewards = await this.agentRegistry.getStewards();
|
|
357
|
+
// Find available stewards (no active session)
|
|
358
|
+
const availableStewards = [];
|
|
359
|
+
for (const steward of stewards) {
|
|
360
|
+
const session = this.sessionManager.getActiveSession(asEntityId(steward.id));
|
|
361
|
+
if (!session) {
|
|
362
|
+
availableStewards.push(steward);
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
// Separate merge stewards from other stewards
|
|
366
|
+
const mergeStewards = availableStewards.filter((s) => {
|
|
367
|
+
const meta = getAgentMetadata(s);
|
|
368
|
+
return meta?.stewardFocus === 'merge';
|
|
369
|
+
});
|
|
370
|
+
const otherStewards = availableStewards.filter((s) => {
|
|
371
|
+
const meta = getAgentMetadata(s);
|
|
372
|
+
return meta?.stewardFocus !== 'merge';
|
|
373
|
+
});
|
|
374
|
+
// 1. Handle REVIEW tasks - spawn merge stewards with full context
|
|
375
|
+
// Find tasks in REVIEW status that need merge processing
|
|
376
|
+
const reviewTasks = await this.taskAssignment.listAssignments({
|
|
377
|
+
taskStatus: [TaskStatus.REVIEW],
|
|
378
|
+
mergeStatus: ['pending'],
|
|
379
|
+
});
|
|
380
|
+
// Filter to tasks not already claimed by a steward.
|
|
381
|
+
// We check task.assignee rather than orchestratorMeta.assignedAgent because
|
|
382
|
+
// assignedAgent retains the original worker's ID after completeTask() clears
|
|
383
|
+
// the top-level assignee. A steward claim sets task.assignee to the steward ID
|
|
384
|
+
// (in spawnMergeStewardForTask), so an unset assignee means no steward has it.
|
|
385
|
+
const unclaimedReviewTasks = reviewTasks.filter((ta) => !ta.task.assignee);
|
|
386
|
+
const sortedReviewTasks = [...unclaimedReviewTasks].sort((a, b) => (b.task.priority ?? 0) - (a.task.priority ?? 0));
|
|
387
|
+
for (const steward of mergeStewards) {
|
|
388
|
+
if (sortedReviewTasks.length === 0)
|
|
389
|
+
break;
|
|
390
|
+
const taskAssignment = sortedReviewTasks.shift();
|
|
391
|
+
if (!taskAssignment)
|
|
392
|
+
continue;
|
|
393
|
+
try {
|
|
394
|
+
// Spawn merge steward with full context prompt
|
|
395
|
+
await this.spawnMergeStewardForTask(steward, taskAssignment.task);
|
|
396
|
+
processed++;
|
|
397
|
+
this.emitter.emit('task:dispatched', taskAssignment.taskId, asEntityId(steward.id));
|
|
398
|
+
}
|
|
399
|
+
catch (error) {
|
|
400
|
+
errors++;
|
|
401
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
402
|
+
errorMessages.push(`Merge steward ${steward.name}: ${errorMessage}`);
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
// 2. Handle other workflow tasks (tag-based matching for non-merge stewards)
|
|
406
|
+
for (const steward of otherStewards) {
|
|
407
|
+
try {
|
|
408
|
+
const meta = getAgentMetadata(steward);
|
|
409
|
+
if (!meta)
|
|
410
|
+
continue;
|
|
411
|
+
// Look for unassigned tasks that match this steward's focus
|
|
412
|
+
const focusTag = meta.stewardFocus;
|
|
413
|
+
const unassignedTasks = await this.taskAssignment.getUnassignedTasks({
|
|
414
|
+
taskStatus: [TaskStatus.OPEN, TaskStatus.IN_PROGRESS],
|
|
415
|
+
});
|
|
416
|
+
// Filter tasks that match this steward's focus
|
|
417
|
+
const matchingTasks = unassignedTasks.filter((task) => {
|
|
418
|
+
const tags = task.tags ?? [];
|
|
419
|
+
return tags.includes(focusTag) ||
|
|
420
|
+
tags.includes(`steward-${focusTag}`) ||
|
|
421
|
+
tags.includes('workflow');
|
|
422
|
+
});
|
|
423
|
+
if (matchingTasks.length > 0) {
|
|
424
|
+
// Assign the highest priority task to this steward
|
|
425
|
+
const sortedTasks = [...matchingTasks].sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
|
|
426
|
+
const task = sortedTasks[0];
|
|
427
|
+
const stewardId = asEntityId(steward.id);
|
|
428
|
+
await this.dispatchService.dispatch(task.id, stewardId);
|
|
429
|
+
processed++;
|
|
430
|
+
this.emitter.emit('task:dispatched', task.id, stewardId);
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
catch (error) {
|
|
434
|
+
errors++;
|
|
435
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
436
|
+
errorMessages.push(`Steward ${steward.name}: ${errorMessage}`);
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
catch (error) {
|
|
441
|
+
errors++;
|
|
442
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
443
|
+
errorMessages.push(errorMessage);
|
|
444
|
+
console.error('[dispatch-daemon] Error in pollWorkflowTasks:', error);
|
|
445
|
+
}
|
|
446
|
+
const result = {
|
|
447
|
+
pollType: 'workflow-task',
|
|
448
|
+
startedAt,
|
|
449
|
+
durationMs: Date.now() - startTime,
|
|
450
|
+
processed,
|
|
451
|
+
errors,
|
|
452
|
+
errorMessages: errorMessages.length > 0 ? errorMessages : undefined,
|
|
453
|
+
};
|
|
454
|
+
this.emitter.emit('poll:complete', result);
|
|
455
|
+
return result;
|
|
456
|
+
}
|
|
457
|
+
async recoverOrphanedAssignments() {
|
|
458
|
+
const startedAt = new Date().toISOString();
|
|
459
|
+
const startTime = Date.now();
|
|
460
|
+
let processed = 0;
|
|
461
|
+
let errors = 0;
|
|
462
|
+
const errorMessages = [];
|
|
463
|
+
this.emitter.emit('poll:start', 'orphan-recovery');
|
|
464
|
+
try {
|
|
465
|
+
// 1. Get all ephemeral workers
|
|
466
|
+
const workers = await this.agentRegistry.listAgents({
|
|
467
|
+
role: 'worker',
|
|
468
|
+
workerMode: 'ephemeral',
|
|
469
|
+
});
|
|
470
|
+
for (const worker of workers) {
|
|
471
|
+
const workerId = asEntityId(worker.id);
|
|
472
|
+
// 2. Skip if worker has an active session
|
|
473
|
+
const session = this.sessionManager.getActiveSession(workerId);
|
|
474
|
+
if (session)
|
|
475
|
+
continue;
|
|
476
|
+
// 3. Check if worker has assigned tasks (OPEN or IN_PROGRESS only, not REVIEW)
|
|
477
|
+
const workerTasks = await this.taskAssignment.getAgentTasks(workerId, {
|
|
478
|
+
taskStatus: [TaskStatus.OPEN, TaskStatus.IN_PROGRESS],
|
|
479
|
+
});
|
|
480
|
+
if (workerTasks.length === 0)
|
|
481
|
+
continue;
|
|
482
|
+
// 4. Recover the first orphaned task
|
|
483
|
+
const taskAssignment = workerTasks[0];
|
|
484
|
+
try {
|
|
485
|
+
await this.recoverOrphanedTask(worker, taskAssignment.task, taskAssignment.orchestratorMeta);
|
|
486
|
+
processed++;
|
|
487
|
+
}
|
|
488
|
+
catch (error) {
|
|
489
|
+
errors++;
|
|
490
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
491
|
+
errorMessages.push(`Worker ${worker.name}: ${errorMessage}`);
|
|
492
|
+
console.error(`[dispatch-daemon] Error recovering orphaned task for worker ${worker.name}:`, error);
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
// --- Phase 2: Recover orphaned merge steward assignments ---
|
|
496
|
+
const mergeStewards = await this.agentRegistry.listAgents({
|
|
497
|
+
role: 'steward',
|
|
498
|
+
stewardFocus: 'merge',
|
|
499
|
+
});
|
|
500
|
+
for (const steward of mergeStewards) {
|
|
501
|
+
const stewardId = asEntityId(steward.id);
|
|
502
|
+
// Skip if steward has an active session
|
|
503
|
+
const stewardSession = this.sessionManager.getActiveSession(stewardId);
|
|
504
|
+
if (stewardSession)
|
|
505
|
+
continue;
|
|
506
|
+
// Find REVIEW tasks assigned to this steward that still need processing.
|
|
507
|
+
// Only recover tasks with 'pending' or 'testing' mergeStatus - tasks with
|
|
508
|
+
// 'test_failed', 'conflict', 'failed', or 'merged' have already been processed
|
|
509
|
+
// and should NOT be re-spawned (prevents infinite retry loops on pre-existing failures).
|
|
510
|
+
const stewardTasks = await this.taskAssignment.getAgentTasks(stewardId, {
|
|
511
|
+
taskStatus: [TaskStatus.REVIEW],
|
|
512
|
+
mergeStatus: ['pending', 'testing'],
|
|
513
|
+
});
|
|
514
|
+
if (stewardTasks.length === 0)
|
|
515
|
+
continue;
|
|
516
|
+
const orphanedAssignment = stewardTasks[0];
|
|
517
|
+
try {
|
|
518
|
+
await this.recoverOrphanedStewardTask(steward, orphanedAssignment.task, orphanedAssignment.orchestratorMeta);
|
|
519
|
+
processed++;
|
|
520
|
+
}
|
|
521
|
+
catch (error) {
|
|
522
|
+
errors++;
|
|
523
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
524
|
+
errorMessages.push(`Merge steward ${steward.name}: ${errorMessage}`);
|
|
525
|
+
console.error(`[dispatch-daemon] Error recovering orphaned steward task for ${steward.name}:`, error);
|
|
526
|
+
}
|
|
527
|
+
}
|
|
528
|
+
if (processed > 0) {
|
|
529
|
+
console.log(`[dispatch-daemon] Recovered ${processed} orphaned task assignment(s)`);
|
|
530
|
+
}
|
|
531
|
+
}
|
|
532
|
+
catch (error) {
|
|
533
|
+
errors++;
|
|
534
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
535
|
+
errorMessages.push(errorMessage);
|
|
536
|
+
console.error('[dispatch-daemon] Error in recoverOrphanedAssignments:', error);
|
|
537
|
+
}
|
|
538
|
+
const result = {
|
|
539
|
+
pollType: 'orphan-recovery',
|
|
540
|
+
startedAt,
|
|
541
|
+
durationMs: Date.now() - startTime,
|
|
542
|
+
processed,
|
|
543
|
+
errors,
|
|
544
|
+
errorMessages: errorMessages.length > 0 ? errorMessages : undefined,
|
|
545
|
+
};
|
|
546
|
+
this.emitter.emit('poll:complete', result);
|
|
547
|
+
return result;
|
|
548
|
+
}
|
|
549
|
+
async reconcileClosedUnmergedTasks() {
|
|
550
|
+
const startedAt = new Date().toISOString();
|
|
551
|
+
const startTime = Date.now();
|
|
552
|
+
let processed = 0;
|
|
553
|
+
let errors = 0;
|
|
554
|
+
const errorMessages = [];
|
|
555
|
+
this.emitter.emit('poll:start', 'closed-unmerged-reconciliation');
|
|
556
|
+
try {
|
|
557
|
+
// Find tasks that are CLOSED but have a non-merged mergeStatus
|
|
558
|
+
const stuckTasks = await this.taskAssignment.listAssignments({
|
|
559
|
+
taskStatus: [TaskStatus.CLOSED],
|
|
560
|
+
mergeStatus: ['pending', 'testing', 'merging', 'conflict', 'test_failed', 'failed'],
|
|
561
|
+
});
|
|
562
|
+
const now = Date.now();
|
|
563
|
+
for (const assignment of stuckTasks) {
|
|
564
|
+
try {
|
|
565
|
+
const { task, orchestratorMeta } = assignment;
|
|
566
|
+
// Skip tasks without orchestrator metadata (not managed by orchestrator)
|
|
567
|
+
if (!orchestratorMeta)
|
|
568
|
+
continue;
|
|
569
|
+
// Grace period: skip if closedAt is within the grace period
|
|
570
|
+
if (task.closedAt) {
|
|
571
|
+
const closedAtMs = typeof task.closedAt === 'number'
|
|
572
|
+
? task.closedAt
|
|
573
|
+
: new Date(task.closedAt).getTime();
|
|
574
|
+
if (now - closedAtMs < this.config.closedUnmergedGracePeriodMs) {
|
|
575
|
+
continue;
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
// Safety valve: skip if already reconciled 3+ times (prevents infinite loops)
|
|
579
|
+
const currentCount = orchestratorMeta.reconciliationCount ?? 0;
|
|
580
|
+
if (currentCount >= 3) {
|
|
581
|
+
console.warn(`[dispatch-daemon] Task ${task.id} has been reconciled ${currentCount} times, skipping (safety valve)`);
|
|
582
|
+
continue;
|
|
583
|
+
}
|
|
584
|
+
// Move back to REVIEW with incremented reconciliation count.
|
|
585
|
+
// Clear assignee so steward dispatch sees it as unclaimed.
|
|
586
|
+
// Reset mergeStatus to 'pending' for a clean steward pickup.
|
|
587
|
+
await this.api.update(task.id, {
|
|
588
|
+
status: TaskStatus.REVIEW,
|
|
589
|
+
assignee: undefined,
|
|
590
|
+
closedAt: undefined,
|
|
591
|
+
closeReason: undefined,
|
|
592
|
+
metadata: updateOrchestratorTaskMeta(task.metadata, {
|
|
593
|
+
reconciliationCount: currentCount + 1,
|
|
594
|
+
mergeStatus: 'pending',
|
|
595
|
+
}),
|
|
596
|
+
});
|
|
597
|
+
processed++;
|
|
598
|
+
console.log(`[dispatch-daemon] Reconciled closed-but-unmerged task ${task.id} (mergeStatus=${orchestratorMeta.mergeStatus}, attempt=${currentCount + 1})`);
|
|
599
|
+
}
|
|
600
|
+
catch (error) {
|
|
601
|
+
errors++;
|
|
602
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
603
|
+
errorMessages.push(`Task ${assignment.taskId}: ${errorMessage}`);
|
|
604
|
+
console.error(`[dispatch-daemon] Error reconciling task ${assignment.taskId}:`, error);
|
|
605
|
+
}
|
|
606
|
+
}
|
|
607
|
+
if (processed > 0) {
|
|
608
|
+
console.log(`[dispatch-daemon] Reconciled ${processed} closed-but-unmerged task(s)`);
|
|
609
|
+
}
|
|
610
|
+
}
|
|
611
|
+
catch (error) {
|
|
612
|
+
errors++;
|
|
613
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
614
|
+
errorMessages.push(errorMessage);
|
|
615
|
+
console.error('[dispatch-daemon] Error in reconcileClosedUnmergedTasks:', error);
|
|
616
|
+
}
|
|
617
|
+
const result = {
|
|
618
|
+
pollType: 'closed-unmerged-reconciliation',
|
|
619
|
+
startedAt,
|
|
620
|
+
durationMs: Date.now() - startTime,
|
|
621
|
+
processed,
|
|
622
|
+
errors,
|
|
623
|
+
errorMessages: errorMessages.length > 0 ? errorMessages : undefined,
|
|
624
|
+
};
|
|
625
|
+
this.emitter.emit('poll:complete', result);
|
|
626
|
+
return result;
|
|
627
|
+
}
|
|
628
|
+
/**
|
|
629
|
+
* Detects tasks stuck in 'merging' or 'testing' mergeStatus for too long
|
|
630
|
+
* and resets them to 'pending' for a fresh retry.
|
|
631
|
+
*/
|
|
632
|
+
async recoverStuckMergeTasks() {
|
|
633
|
+
const startedAt = new Date().toISOString();
|
|
634
|
+
const startTime = Date.now();
|
|
635
|
+
let processed = 0;
|
|
636
|
+
let errors = 0;
|
|
637
|
+
const errorMessages = [];
|
|
638
|
+
this.emitter.emit('poll:start', 'stuck-merge-recovery');
|
|
639
|
+
try {
|
|
640
|
+
const stuckTasks = await this.taskAssignment.listAssignments({
|
|
641
|
+
taskStatus: [TaskStatus.REVIEW],
|
|
642
|
+
mergeStatus: ['merging', 'testing'],
|
|
643
|
+
});
|
|
644
|
+
const now = Date.now();
|
|
645
|
+
for (const assignment of stuckTasks) {
|
|
646
|
+
try {
|
|
647
|
+
const { task, orchestratorMeta } = assignment;
|
|
648
|
+
if (!orchestratorMeta)
|
|
649
|
+
continue;
|
|
650
|
+
// Grace period: skip if updatedAt is within the grace period
|
|
651
|
+
if (task.updatedAt) {
|
|
652
|
+
const updatedAtMs = typeof task.updatedAt === 'number'
|
|
653
|
+
? task.updatedAt
|
|
654
|
+
: new Date(task.updatedAt).getTime();
|
|
655
|
+
if (now - updatedAtMs < this.config.stuckMergeRecoveryGracePeriodMs) {
|
|
656
|
+
continue;
|
|
657
|
+
}
|
|
658
|
+
}
|
|
659
|
+
// Skip if steward has an active session (merge still in progress)
|
|
660
|
+
if (orchestratorMeta.assignedAgent) {
|
|
661
|
+
const activeSession = this.sessionManager.getActiveSession(orchestratorMeta.assignedAgent);
|
|
662
|
+
if (activeSession)
|
|
663
|
+
continue;
|
|
664
|
+
}
|
|
665
|
+
// Safety valve: skip if already recovered 3+ times
|
|
666
|
+
const currentCount = orchestratorMeta.stuckMergeRecoveryCount ?? 0;
|
|
667
|
+
if (currentCount >= 3) {
|
|
668
|
+
console.warn(`[dispatch-daemon] Task ${task.id} has been recovered from stuck merge ${currentCount} times, skipping (safety valve)`);
|
|
669
|
+
continue;
|
|
670
|
+
}
|
|
671
|
+
// Reset mergeStatus to 'pending' for fresh steward pickup
|
|
672
|
+
await this.api.update(task.id, {
|
|
673
|
+
assignee: undefined,
|
|
674
|
+
metadata: updateOrchestratorTaskMeta(task.metadata, {
|
|
675
|
+
mergeStatus: 'pending',
|
|
676
|
+
stuckMergeRecoveryCount: currentCount + 1,
|
|
677
|
+
}),
|
|
678
|
+
});
|
|
679
|
+
// Clean up temp merge worktree if it exists
|
|
680
|
+
const mergeDirName = `_merge-${task.id.replace(/[^a-zA-Z0-9-]/g, '-')}`;
|
|
681
|
+
const mergeWorktreePath = `.stoneforge/.worktrees/${mergeDirName}`;
|
|
682
|
+
try {
|
|
683
|
+
const exists = await this.worktreeManager.worktreeExists(mergeWorktreePath);
|
|
684
|
+
if (exists) {
|
|
685
|
+
await this.worktreeManager.removeWorktree(mergeWorktreePath, { force: true });
|
|
686
|
+
}
|
|
687
|
+
}
|
|
688
|
+
catch {
|
|
689
|
+
// Ignore worktree cleanup errors
|
|
690
|
+
}
|
|
691
|
+
processed++;
|
|
692
|
+
console.log(`[dispatch-daemon] Recovered stuck merge task ${task.id} (mergeStatus=${orchestratorMeta.mergeStatus}, attempt=${currentCount + 1})`);
|
|
693
|
+
}
|
|
694
|
+
catch (error) {
|
|
695
|
+
errors++;
|
|
696
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
697
|
+
errorMessages.push(`Task ${assignment.taskId}: ${errorMessage}`);
|
|
698
|
+
console.error(`[dispatch-daemon] Error recovering stuck merge task ${assignment.taskId}:`, error);
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
if (processed > 0) {
|
|
702
|
+
console.log(`[dispatch-daemon] Recovered ${processed} stuck merge task(s)`);
|
|
703
|
+
}
|
|
704
|
+
}
|
|
705
|
+
catch (error) {
|
|
706
|
+
errors++;
|
|
707
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
708
|
+
errorMessages.push(errorMessage);
|
|
709
|
+
console.error('[dispatch-daemon] Error in recoverStuckMergeTasks:', error);
|
|
710
|
+
}
|
|
711
|
+
const stuckResult = {
|
|
712
|
+
pollType: 'stuck-merge-recovery',
|
|
713
|
+
startedAt,
|
|
714
|
+
durationMs: Date.now() - startTime,
|
|
715
|
+
processed,
|
|
716
|
+
errors,
|
|
717
|
+
errorMessages: errorMessages.length > 0 ? errorMessages : undefined,
|
|
718
|
+
};
|
|
719
|
+
this.emitter.emit('poll:complete', stuckResult);
|
|
720
|
+
return stuckResult;
|
|
721
|
+
}
|
|
722
|
+
// ----------------------------------------
|
|
723
|
+
// Configuration
|
|
724
|
+
// ----------------------------------------
|
|
725
|
+
getConfig() {
|
|
726
|
+
return { ...this.config };
|
|
727
|
+
}
|
|
728
|
+
updateConfig(config) {
|
|
729
|
+
const oldPollIntervalMs = this.config.pollIntervalMs;
|
|
730
|
+
this.config = this.normalizeConfig({ ...this.config, ...config });
|
|
731
|
+
if (this.running && this.config.pollIntervalMs !== oldPollIntervalMs) {
|
|
732
|
+
if (this.pollIntervalHandle) {
|
|
733
|
+
clearInterval(this.pollIntervalHandle);
|
|
734
|
+
}
|
|
735
|
+
this.pollIntervalHandle = this.createPollInterval();
|
|
736
|
+
}
|
|
737
|
+
}
|
|
738
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
739
|
+
on(event, listener) {
|
|
740
|
+
this.emitter.on(event, listener);
|
|
741
|
+
}
|
|
742
|
+
off(event, listener) {
|
|
743
|
+
this.emitter.off(event, listener);
|
|
744
|
+
}
|
|
745
|
+
// ----------------------------------------
|
|
746
|
+
// Private Helpers
|
|
747
|
+
// ----------------------------------------
|
|
748
|
+
createPollInterval() {
|
|
749
|
+
return setInterval(async () => {
|
|
750
|
+
if (!this.running)
|
|
751
|
+
return;
|
|
752
|
+
try {
|
|
753
|
+
this.currentPollCycle = this.runPollCycle();
|
|
754
|
+
await this.currentPollCycle;
|
|
755
|
+
}
|
|
756
|
+
catch (error) {
|
|
757
|
+
console.error('[dispatch-daemon] Poll cycle error:', error);
|
|
758
|
+
}
|
|
759
|
+
}, this.config.pollIntervalMs);
|
|
760
|
+
}
|
|
761
|
+
normalizeConfig(config) {
|
|
762
|
+
let pollIntervalMs = config?.pollIntervalMs ?? DISPATCH_DAEMON_DEFAULT_POLL_INTERVAL_MS;
|
|
763
|
+
pollIntervalMs = Math.max(DISPATCH_DAEMON_MIN_POLL_INTERVAL_MS, Math.min(DISPATCH_DAEMON_MAX_POLL_INTERVAL_MS, pollIntervalMs));
|
|
764
|
+
return {
|
|
765
|
+
pollIntervalMs,
|
|
766
|
+
workerAvailabilityPollEnabled: config?.workerAvailabilityPollEnabled ?? true,
|
|
767
|
+
inboxPollEnabled: config?.inboxPollEnabled ?? true,
|
|
768
|
+
stewardTriggerPollEnabled: config?.stewardTriggerPollEnabled ?? true,
|
|
769
|
+
workflowTaskPollEnabled: config?.workflowTaskPollEnabled ?? true,
|
|
770
|
+
orphanRecoveryEnabled: config?.orphanRecoveryEnabled ?? true,
|
|
771
|
+
closedUnmergedReconciliationEnabled: config?.closedUnmergedReconciliationEnabled ?? true,
|
|
772
|
+
closedUnmergedGracePeriodMs: config?.closedUnmergedGracePeriodMs ?? 120_000,
|
|
773
|
+
stuckMergeRecoveryEnabled: config?.stuckMergeRecoveryEnabled ?? true,
|
|
774
|
+
stuckMergeRecoveryGracePeriodMs: config?.stuckMergeRecoveryGracePeriodMs ?? 600_000,
|
|
775
|
+
maxSessionDurationMs: config?.maxSessionDurationMs ?? 0,
|
|
776
|
+
onSessionStarted: config?.onSessionStarted,
|
|
777
|
+
projectRoot: config?.projectRoot ?? process.cwd(),
|
|
778
|
+
directorInboxForwardingEnabled: config?.directorInboxForwardingEnabled ?? true,
|
|
779
|
+
directorInboxIdleThresholdMs: config?.directorInboxIdleThresholdMs ?? 120_000,
|
|
780
|
+
};
|
|
781
|
+
}
|
|
782
|
+
/**
|
|
783
|
+
* Runs a complete poll cycle for all enabled polling loops.
|
|
784
|
+
*/
|
|
785
|
+
async runPollCycle() {
|
|
786
|
+
if (this.polling)
|
|
787
|
+
return;
|
|
788
|
+
this.polling = true;
|
|
789
|
+
try {
|
|
790
|
+
// Recover orphaned assignments first — workers with tasks but no session
|
|
791
|
+
// (e.g. from mid-cycle crashes). Runs before availability polling so
|
|
792
|
+
// orphans are handled before they'd be skipped.
|
|
793
|
+
if (this.config.orphanRecoveryEnabled) {
|
|
794
|
+
await this.recoverOrphanedAssignments();
|
|
795
|
+
}
|
|
796
|
+
// Reap stale sessions before polling for availability
|
|
797
|
+
await this.reapStaleSessions();
|
|
798
|
+
// Run polls sequentially to avoid overwhelming the system.
|
|
799
|
+
// Inbox runs first so triage spawns before task dispatch — idle agents
|
|
800
|
+
// process accumulated non-dispatch messages before picking up new tasks.
|
|
801
|
+
if (this.config.inboxPollEnabled) {
|
|
802
|
+
await this.pollInboxes();
|
|
803
|
+
}
|
|
804
|
+
if (this.config.workerAvailabilityPollEnabled) {
|
|
805
|
+
await this.pollWorkerAvailability();
|
|
806
|
+
}
|
|
807
|
+
if (this.config.stewardTriggerPollEnabled) {
|
|
808
|
+
await this.pollStewardTriggers();
|
|
809
|
+
}
|
|
810
|
+
if (this.config.workflowTaskPollEnabled) {
|
|
811
|
+
await this.pollWorkflowTasks();
|
|
812
|
+
}
|
|
813
|
+
// Reconcile closed-but-unmerged tasks after workflow polling so
|
|
814
|
+
// reconciled tasks get picked up on the next cycle, giving a clean
|
|
815
|
+
// state transition.
|
|
816
|
+
if (this.config.closedUnmergedReconciliationEnabled) {
|
|
817
|
+
await this.reconcileClosedUnmergedTasks();
|
|
818
|
+
}
|
|
819
|
+
// Recover tasks stuck in merging/testing for too long
|
|
820
|
+
if (this.config.stuckMergeRecoveryEnabled) {
|
|
821
|
+
await this.recoverStuckMergeTasks();
|
|
822
|
+
}
|
|
823
|
+
}
|
|
824
|
+
finally {
|
|
825
|
+
this.polling = false;
|
|
826
|
+
}
|
|
827
|
+
}
|
|
828
|
+
/**
|
|
829
|
+
* Terminates sessions that have exceeded the configured max duration.
|
|
830
|
+
* Prevents stuck workers from blocking their slot indefinitely.
|
|
831
|
+
*/
|
|
832
|
+
async reapStaleSessions() {
|
|
833
|
+
if (this.config.maxSessionDurationMs <= 0)
|
|
834
|
+
return;
|
|
835
|
+
const running = this.sessionManager.listSessions({ status: 'running' });
|
|
836
|
+
const now = Date.now();
|
|
837
|
+
for (const session of running) {
|
|
838
|
+
const createdAt = typeof session.createdAt === 'number'
|
|
839
|
+
? session.createdAt
|
|
840
|
+
: new Date(session.createdAt).getTime();
|
|
841
|
+
const age = now - createdAt;
|
|
842
|
+
if (age > this.config.maxSessionDurationMs) {
|
|
843
|
+
try {
|
|
844
|
+
await this.sessionManager.stopSession(session.id, {
|
|
845
|
+
graceful: false,
|
|
846
|
+
reason: `Session exceeded max duration (${Math.round(age / 1000)}s)`,
|
|
847
|
+
});
|
|
848
|
+
}
|
|
849
|
+
catch (error) {
|
|
850
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
851
|
+
if (!message.includes('not found')) {
|
|
852
|
+
console.warn(`[dispatch-daemon] Failed to reap session ${session.id}:`, error);
|
|
853
|
+
}
|
|
854
|
+
}
|
|
855
|
+
}
|
|
856
|
+
}
|
|
857
|
+
}
|
|
858
|
+
/**
|
|
859
|
+
* Recovers a single orphaned task by re-spawning a session for the worker.
|
|
860
|
+
* Tries to resume the previous provider session first (preserves context),
|
|
861
|
+
* falls back to a fresh spawn if no sessionId or resume fails.
|
|
862
|
+
*/
|
|
863
|
+
async recoverOrphanedTask(worker, task, taskMeta) {
|
|
864
|
+
const workerId = asEntityId(worker.id);
|
|
865
|
+
// 1. Resolve worktree — reuse existing or create new
|
|
866
|
+
let worktreePath = taskMeta?.worktree ?? taskMeta?.handoffWorktree;
|
|
867
|
+
let branch = taskMeta?.branch ?? taskMeta?.handoffBranch;
|
|
868
|
+
if (worktreePath) {
|
|
869
|
+
const exists = await this.worktreeManager.worktreeExists(worktreePath);
|
|
870
|
+
if (!exists) {
|
|
871
|
+
const worktreeResult = await this.createWorktreeForTask(worker, task);
|
|
872
|
+
worktreePath = worktreeResult.path;
|
|
873
|
+
branch = worktreeResult.branch;
|
|
874
|
+
// Update task metadata with new worktree info
|
|
875
|
+
await this.api.update(task.id, {
|
|
876
|
+
metadata: updateOrchestratorTaskMeta(task.metadata, { worktree: worktreePath, branch }),
|
|
877
|
+
});
|
|
878
|
+
}
|
|
879
|
+
}
|
|
880
|
+
else {
|
|
881
|
+
const worktreeResult = await this.createWorktreeForTask(worker, task);
|
|
882
|
+
worktreePath = worktreeResult.path;
|
|
883
|
+
branch = worktreeResult.branch;
|
|
884
|
+
await this.api.update(task.id, {
|
|
885
|
+
metadata: updateOrchestratorTaskMeta(task.metadata, { worktree: worktreePath, branch }),
|
|
886
|
+
});
|
|
887
|
+
}
|
|
888
|
+
// 2. Try resume first if we have a previous session ID
|
|
889
|
+
const previousSessionId = taskMeta?.sessionId;
|
|
890
|
+
if (previousSessionId) {
|
|
891
|
+
try {
|
|
892
|
+
const { session, events } = await this.sessionManager.resumeSession(workerId, {
|
|
893
|
+
providerSessionId: previousSessionId,
|
|
894
|
+
workingDirectory: worktreePath,
|
|
895
|
+
worktree: worktreePath,
|
|
896
|
+
checkReadyQueue: false,
|
|
897
|
+
resumePrompt: [
|
|
898
|
+
'Your previous session was interrupted by a server restart.',
|
|
899
|
+
`You are still assigned to task ${task.id}: "${task.title}".`,
|
|
900
|
+
'Please continue working on this task from where you left off.',
|
|
901
|
+
].join('\n'),
|
|
902
|
+
});
|
|
903
|
+
// Record session history entry for recovered worker session
|
|
904
|
+
const resumeHistoryEntry = {
|
|
905
|
+
sessionId: session.id,
|
|
906
|
+
providerSessionId: session.providerSessionId,
|
|
907
|
+
agentId: workerId,
|
|
908
|
+
agentName: worker.name,
|
|
909
|
+
agentRole: 'worker',
|
|
910
|
+
startedAt: createTimestamp(),
|
|
911
|
+
};
|
|
912
|
+
const updatedTask = await this.api.get(task.id);
|
|
913
|
+
if (updatedTask) {
|
|
914
|
+
const metadataWithHistory = appendTaskSessionHistory(updatedTask.metadata, resumeHistoryEntry);
|
|
915
|
+
await this.api.update(task.id, { metadata: metadataWithHistory });
|
|
916
|
+
}
|
|
917
|
+
if (this.config.onSessionStarted) {
|
|
918
|
+
this.config.onSessionStarted(session, events, workerId, `[resumed session for task ${task.id}]`);
|
|
919
|
+
}
|
|
920
|
+
this.emitter.emit('agent:spawned', workerId, worktreePath);
|
|
921
|
+
console.log(`[dispatch-daemon] Resumed session for orphaned task ${task.id} on worker ${worker.name}`);
|
|
922
|
+
return;
|
|
923
|
+
}
|
|
924
|
+
catch (error) {
|
|
925
|
+
console.warn(`[dispatch-daemon] Failed to resume session ${previousSessionId} for worker ${worker.name}, falling back to fresh spawn:`, error);
|
|
926
|
+
}
|
|
927
|
+
}
|
|
928
|
+
// 3. Fall back to fresh spawn
|
|
929
|
+
const initialPrompt = await this.buildTaskPrompt(task, workerId);
|
|
930
|
+
const { session, events } = await this.sessionManager.startSession(workerId, {
|
|
931
|
+
workingDirectory: worktreePath,
|
|
932
|
+
worktree: worktreePath,
|
|
933
|
+
initialPrompt,
|
|
934
|
+
});
|
|
935
|
+
// Record session history entry for fresh spawned worker session
|
|
936
|
+
const freshSpawnHistoryEntry = {
|
|
937
|
+
sessionId: session.id,
|
|
938
|
+
providerSessionId: session.providerSessionId,
|
|
939
|
+
agentId: workerId,
|
|
940
|
+
agentName: worker.name,
|
|
941
|
+
agentRole: 'worker',
|
|
942
|
+
startedAt: createTimestamp(),
|
|
943
|
+
};
|
|
944
|
+
const taskAfterFreshSpawn = await this.api.get(task.id);
|
|
945
|
+
if (taskAfterFreshSpawn) {
|
|
946
|
+
const metadataWithHistory = appendTaskSessionHistory(taskAfterFreshSpawn.metadata, freshSpawnHistoryEntry);
|
|
947
|
+
await this.api.update(task.id, { metadata: metadataWithHistory });
|
|
948
|
+
}
|
|
949
|
+
if (this.config.onSessionStarted) {
|
|
950
|
+
this.config.onSessionStarted(session, events, workerId, initialPrompt);
|
|
951
|
+
}
|
|
952
|
+
this.emitter.emit('agent:spawned', workerId, worktreePath);
|
|
953
|
+
console.log(`[dispatch-daemon] Spawned fresh session for orphaned task ${task.id} on worker ${worker.name}`);
|
|
954
|
+
}
|
|
955
|
+
/**
|
|
956
|
+
* Recovers a single orphaned merge steward task by resuming or re-spawning.
|
|
957
|
+
* Tries to resume the previous provider session first (preserves context),
|
|
958
|
+
* falls back to a fresh spawn via spawnMergeStewardForTask.
|
|
959
|
+
*/
|
|
960
|
+
async recoverOrphanedStewardTask(steward, task, taskMeta) {
|
|
961
|
+
const stewardId = asEntityId(steward.id);
|
|
962
|
+
// 1. Resolve worktree — verify it still exists
|
|
963
|
+
let worktreePath = taskMeta?.worktree;
|
|
964
|
+
if (worktreePath) {
|
|
965
|
+
const exists = await this.worktreeManager.worktreeExists(worktreePath);
|
|
966
|
+
if (!exists) {
|
|
967
|
+
console.warn(`[dispatch-daemon] Worktree ${worktreePath} no longer exists for steward task ${task.id}, using project root`);
|
|
968
|
+
worktreePath = undefined;
|
|
969
|
+
}
|
|
970
|
+
}
|
|
971
|
+
const workingDirectory = worktreePath ?? this.config.projectRoot;
|
|
972
|
+
// 2. Try resume first if we have a previous session ID
|
|
973
|
+
const previousSessionId = taskMeta?.sessionId;
|
|
974
|
+
if (previousSessionId) {
|
|
975
|
+
try {
|
|
976
|
+
const { session, events } = await this.sessionManager.resumeSession(stewardId, {
|
|
977
|
+
providerSessionId: previousSessionId,
|
|
978
|
+
workingDirectory,
|
|
979
|
+
worktree: worktreePath,
|
|
980
|
+
checkReadyQueue: false,
|
|
981
|
+
resumePrompt: [
|
|
982
|
+
'Your previous session was interrupted by a server restart.',
|
|
983
|
+
`You are still assigned to review/merge task ${task.id}: "${task.title}".`,
|
|
984
|
+
'Please continue the merge review from where you left off.',
|
|
985
|
+
].join('\n'),
|
|
986
|
+
});
|
|
987
|
+
// Record session history entry for recovered steward session
|
|
988
|
+
const resumeHistoryEntry = {
|
|
989
|
+
sessionId: session.id,
|
|
990
|
+
providerSessionId: session.providerSessionId,
|
|
991
|
+
agentId: stewardId,
|
|
992
|
+
agentName: steward.name,
|
|
993
|
+
agentRole: 'steward',
|
|
994
|
+
startedAt: createTimestamp(),
|
|
995
|
+
};
|
|
996
|
+
const updatedTask = await this.api.get(task.id);
|
|
997
|
+
if (updatedTask) {
|
|
998
|
+
const metadataWithHistory = appendTaskSessionHistory(updatedTask.metadata, resumeHistoryEntry);
|
|
999
|
+
await this.api.update(task.id, { metadata: metadataWithHistory });
|
|
1000
|
+
}
|
|
1001
|
+
if (this.config.onSessionStarted) {
|
|
1002
|
+
this.config.onSessionStarted(session, events, stewardId, `[resumed steward session for task ${task.id}]`);
|
|
1003
|
+
}
|
|
1004
|
+
this.emitter.emit('agent:spawned', stewardId, worktreePath);
|
|
1005
|
+
console.log(`[dispatch-daemon] Resumed steward session for orphaned task ${task.id} on ${steward.name}`);
|
|
1006
|
+
return;
|
|
1007
|
+
}
|
|
1008
|
+
catch (error) {
|
|
1009
|
+
console.warn(`[dispatch-daemon] Failed to resume steward session ${previousSessionId} for ${steward.name}, falling back to fresh spawn:`, error);
|
|
1010
|
+
}
|
|
1011
|
+
}
|
|
1012
|
+
// 3. Fall back to fresh spawn (spawnMergeStewardForTask handles metadata update AND session history)
|
|
1013
|
+
await this.spawnMergeStewardForTask(steward, task);
|
|
1014
|
+
console.log(`[dispatch-daemon] Spawned fresh steward session for orphaned task ${task.id} on ${steward.name}`);
|
|
1015
|
+
}
|
|
1016
|
+
/**
|
|
1017
|
+
* Assigns the highest priority unassigned task to a worker.
|
|
1018
|
+
* Handles handoff branches by reusing existing worktrees.
|
|
1019
|
+
* Respects agent pool capacity limits.
|
|
1020
|
+
*/
|
|
1021
|
+
async assignTaskToWorker(worker) {
|
|
1022
|
+
// Get ready tasks (already filtered for blocked, draft plans, future-scheduled, etc.)
|
|
1023
|
+
// and sorted by effective priority via api.ready()
|
|
1024
|
+
const readyTasks = await this.api.ready();
|
|
1025
|
+
const unassignedTasks = readyTasks.filter((t) => !t.assignee);
|
|
1026
|
+
if (unassignedTasks.length === 0) {
|
|
1027
|
+
return false;
|
|
1028
|
+
}
|
|
1029
|
+
// ready() already sorts by effective priority, take the first
|
|
1030
|
+
const task = unassignedTasks[0];
|
|
1031
|
+
const workerId = asEntityId(worker.id);
|
|
1032
|
+
// Check pool capacity before spawning
|
|
1033
|
+
if (this.poolService) {
|
|
1034
|
+
const meta = getAgentMetadata(worker);
|
|
1035
|
+
if (meta && meta.agentRole === 'worker') {
|
|
1036
|
+
const workerMeta = meta;
|
|
1037
|
+
const spawnRequest = {
|
|
1038
|
+
role: 'worker',
|
|
1039
|
+
workerMode: workerMeta.workerMode,
|
|
1040
|
+
agentId: workerId,
|
|
1041
|
+
};
|
|
1042
|
+
const poolCheck = await this.poolService.canSpawn(spawnRequest);
|
|
1043
|
+
if (!poolCheck.canSpawn) {
|
|
1044
|
+
console.log(`[dispatch-daemon] Pool capacity reached for worker ${worker.name}: ${poolCheck.reason}`);
|
|
1045
|
+
return false;
|
|
1046
|
+
}
|
|
1047
|
+
}
|
|
1048
|
+
}
|
|
1049
|
+
// Check for existing worktree/branch in task metadata
|
|
1050
|
+
// Priority: handoff > existing assignment > create new
|
|
1051
|
+
const taskMeta = getOrchestratorTaskMeta(task.metadata);
|
|
1052
|
+
const handoffBranch = taskMeta?.handoffBranch;
|
|
1053
|
+
const handoffWorktree = taskMeta?.handoffWorktree;
|
|
1054
|
+
const existingBranch = taskMeta?.branch;
|
|
1055
|
+
const existingWorktree = taskMeta?.worktree;
|
|
1056
|
+
let worktreePath;
|
|
1057
|
+
let branch;
|
|
1058
|
+
// Check handoff first (takes priority)
|
|
1059
|
+
if (handoffBranch && handoffWorktree) {
|
|
1060
|
+
worktreePath = handoffWorktree;
|
|
1061
|
+
branch = handoffBranch;
|
|
1062
|
+
// Verify the worktree still exists
|
|
1063
|
+
const exists = await this.worktreeManager.worktreeExists(worktreePath);
|
|
1064
|
+
if (!exists) {
|
|
1065
|
+
// Worktree was cleaned up, create a new one
|
|
1066
|
+
const worktreeResult = await this.createWorktreeForTask(worker, task);
|
|
1067
|
+
worktreePath = worktreeResult.path;
|
|
1068
|
+
branch = worktreeResult.branch;
|
|
1069
|
+
}
|
|
1070
|
+
}
|
|
1071
|
+
// Check for existing assignment worktree (from previous attempt)
|
|
1072
|
+
else if (existingBranch && existingWorktree) {
|
|
1073
|
+
worktreePath = existingWorktree;
|
|
1074
|
+
branch = existingBranch;
|
|
1075
|
+
// Verify the worktree still exists
|
|
1076
|
+
const exists = await this.worktreeManager.worktreeExists(worktreePath);
|
|
1077
|
+
if (!exists) {
|
|
1078
|
+
// Worktree was cleaned up, create a new one
|
|
1079
|
+
const worktreeResult = await this.createWorktreeForTask(worker, task);
|
|
1080
|
+
worktreePath = worktreeResult.path;
|
|
1081
|
+
branch = worktreeResult.branch;
|
|
1082
|
+
}
|
|
1083
|
+
}
|
|
1084
|
+
// No existing worktree, create a new one
|
|
1085
|
+
else {
|
|
1086
|
+
const worktreeResult = await this.createWorktreeForTask(worker, task);
|
|
1087
|
+
worktreePath = worktreeResult.path;
|
|
1088
|
+
branch = worktreeResult.branch;
|
|
1089
|
+
}
|
|
1090
|
+
// Build initial prompt with task context
|
|
1091
|
+
const initialPrompt = await this.buildTaskPrompt(task, workerId);
|
|
1092
|
+
// Spawn worker INSIDE the worktree BEFORE dispatching the task.
|
|
1093
|
+
// This ensures that if the session fails to start (e.g. provider not
|
|
1094
|
+
// available), the task stays unassigned and available for other agents.
|
|
1095
|
+
const { session, events } = await this.sessionManager.startSession(workerId, {
|
|
1096
|
+
workingDirectory: worktreePath,
|
|
1097
|
+
worktree: worktreePath,
|
|
1098
|
+
initialPrompt,
|
|
1099
|
+
});
|
|
1100
|
+
// Session started successfully — now dispatch the task (assigns + sends message)
|
|
1101
|
+
const dispatchOptions = {
|
|
1102
|
+
branch,
|
|
1103
|
+
worktree: worktreePath,
|
|
1104
|
+
markAsStarted: true,
|
|
1105
|
+
priority: task.priority,
|
|
1106
|
+
sessionId: session.providerSessionId ?? session.id,
|
|
1107
|
+
};
|
|
1108
|
+
await this.dispatchService.dispatch(task.id, workerId, dispatchOptions);
|
|
1109
|
+
this.emitter.emit('task:dispatched', task.id, workerId);
|
|
1110
|
+
// Record session history entry for this worker session
|
|
1111
|
+
// Re-read task to get metadata after dispatch wrote to it
|
|
1112
|
+
const updatedTask = await this.api.get(task.id);
|
|
1113
|
+
if (updatedTask) {
|
|
1114
|
+
const sessionHistoryEntry = {
|
|
1115
|
+
sessionId: session.id,
|
|
1116
|
+
providerSessionId: session.providerSessionId,
|
|
1117
|
+
agentId: workerId,
|
|
1118
|
+
agentName: worker.name,
|
|
1119
|
+
agentRole: 'worker',
|
|
1120
|
+
startedAt: createTimestamp(),
|
|
1121
|
+
};
|
|
1122
|
+
const metadataWithHistory = appendTaskSessionHistory(updatedTask.metadata, sessionHistoryEntry);
|
|
1123
|
+
await this.api.update(task.id, { metadata: metadataWithHistory });
|
|
1124
|
+
}
|
|
1125
|
+
// Call the onSessionStarted callback if provided (for event saver and initial prompt saving)
|
|
1126
|
+
if (this.config.onSessionStarted) {
|
|
1127
|
+
this.config.onSessionStarted(session, events, workerId, initialPrompt);
|
|
1128
|
+
}
|
|
1129
|
+
// Notify pool service that agent was spawned
|
|
1130
|
+
if (this.poolService) {
|
|
1131
|
+
await this.poolService.onAgentSpawned(workerId);
|
|
1132
|
+
}
|
|
1133
|
+
this.emitter.emit('agent:spawned', workerId, worktreePath);
|
|
1134
|
+
return true;
|
|
1135
|
+
}
|
|
1136
|
+
/**
|
|
1137
|
+
* Creates a worktree for a task assignment.
|
|
1138
|
+
* Includes dependency installation so workers have node_modules available.
|
|
1139
|
+
*/
|
|
1140
|
+
async createWorktreeForTask(worker, task) {
|
|
1141
|
+
return this.worktreeManager.createWorktree({
|
|
1142
|
+
agentName: worker.name,
|
|
1143
|
+
taskId: task.id,
|
|
1144
|
+
taskTitle: task.title,
|
|
1145
|
+
installDependencies: true,
|
|
1146
|
+
});
|
|
1147
|
+
}
|
|
1148
|
+
/**
|
|
1149
|
+
* Builds the initial prompt for a task assignment.
|
|
1150
|
+
* Includes the worker role prompt followed by task-specific details.
|
|
1151
|
+
* Fetches the description Document content so handoff notes (appended to
|
|
1152
|
+
* description) are automatically included.
|
|
1153
|
+
*/
|
|
1154
|
+
async buildTaskPrompt(task, workerId) {
|
|
1155
|
+
const parts = [];
|
|
1156
|
+
// Load and include the worker role prompt, framed as operating instructions
|
|
1157
|
+
// so Claude understands this is its role definition, not file content
|
|
1158
|
+
const roleResult = loadRolePrompt('worker', undefined, { projectRoot: this.config.projectRoot, workerMode: 'ephemeral' });
|
|
1159
|
+
if (roleResult) {
|
|
1160
|
+
parts.push('Please read and internalize the following operating instructions. These define your role and how you should behave:', '', roleResult.prompt, '', '---', '');
|
|
1161
|
+
}
|
|
1162
|
+
// Get the director ID for context
|
|
1163
|
+
const director = await this.agentRegistry.getDirector();
|
|
1164
|
+
const directorId = director?.id ?? 'unknown';
|
|
1165
|
+
parts.push('## Task Assignment', '', `**Worker ID:** ${workerId}`, `**Director ID:** ${directorId}`, `**Task ID:** ${task.id}`, `**Title:** ${task.title}`);
|
|
1166
|
+
if (task.priority !== undefined) {
|
|
1167
|
+
parts.push(`**Priority:** ${task.priority}`);
|
|
1168
|
+
}
|
|
1169
|
+
// Fetch and include the actual description content
|
|
1170
|
+
if (task.descriptionRef) {
|
|
1171
|
+
try {
|
|
1172
|
+
const doc = await this.api.get(asElementId(task.descriptionRef));
|
|
1173
|
+
if (doc?.content) {
|
|
1174
|
+
parts.push('', '### Description', doc.content);
|
|
1175
|
+
}
|
|
1176
|
+
}
|
|
1177
|
+
catch {
|
|
1178
|
+
parts.push('', `**Description Document:** ${task.descriptionRef}`);
|
|
1179
|
+
}
|
|
1180
|
+
}
|
|
1181
|
+
// Include acceptance criteria if any
|
|
1182
|
+
if (task.acceptanceCriteria) {
|
|
1183
|
+
parts.push('', '### Acceptance Criteria', task.acceptanceCriteria);
|
|
1184
|
+
}
|
|
1185
|
+
// Handoff notes are now embedded in the description — no separate section needed
|
|
1186
|
+
// Explicit action instructions so the worker knows what to do
|
|
1187
|
+
parts.push('', '### Instructions', '1. Read the task title and acceptance criteria carefully to decide the correct action.', '2. If the task asks you to **hand off**, run: `sf task handoff ' + task.id + ' --message "your handoff note"` and stop.', '3. Otherwise, complete the task: make changes, commit, push, then run: `sf task complete ' + task.id + '`.');
|
|
1188
|
+
return parts.join('\n');
|
|
1189
|
+
}
|
|
1190
|
+
/**
|
|
1191
|
+
* Builds the initial prompt for a merge steward session.
|
|
1192
|
+
* Includes the steward role prompt (steward-merge.md) followed by task context.
|
|
1193
|
+
*
|
|
1194
|
+
* @param task - The task being reviewed
|
|
1195
|
+
* @param stewardId - The steward's entity ID
|
|
1196
|
+
* @param stewardFocus - The steward's focus area (merge or health)
|
|
1197
|
+
* @param syncResult - Optional result from pre-spawn branch sync
|
|
1198
|
+
*/
|
|
1199
|
+
async buildStewardPrompt(task, stewardId, stewardFocus = 'merge', syncResult) {
|
|
1200
|
+
const parts = [];
|
|
1201
|
+
// Load and include the steward role prompt
|
|
1202
|
+
const roleResult = loadRolePrompt('steward', stewardFocus, { projectRoot: this.config.projectRoot });
|
|
1203
|
+
if (roleResult) {
|
|
1204
|
+
parts.push('Please read and internalize the following operating instructions. These define your role and how you should behave:', '', roleResult.prompt, '', '---', '');
|
|
1205
|
+
}
|
|
1206
|
+
// Get orchestrator metadata for PR/branch info
|
|
1207
|
+
const taskMeta = task.metadata;
|
|
1208
|
+
const orchestratorMeta = taskMeta?.orchestrator;
|
|
1209
|
+
const prUrl = orchestratorMeta?.mergeRequestUrl;
|
|
1210
|
+
const branch = orchestratorMeta?.branch;
|
|
1211
|
+
// Get the director ID for context
|
|
1212
|
+
const director = await this.agentRegistry.getDirector();
|
|
1213
|
+
const directorId = director?.id ?? 'unknown';
|
|
1214
|
+
parts.push('## Merge Request Assignment', '', `**Steward ID:** ${stewardId}`, `**Director ID:** ${directorId}`, `**Task ID:** ${task.id}`, `**Title:** ${task.title}`);
|
|
1215
|
+
if (branch) {
|
|
1216
|
+
parts.push(`**Branch:** ${branch}`);
|
|
1217
|
+
}
|
|
1218
|
+
if (prUrl) {
|
|
1219
|
+
parts.push(`**PR URL:** ${prUrl}`);
|
|
1220
|
+
}
|
|
1221
|
+
if (task.priority !== undefined) {
|
|
1222
|
+
parts.push(`**Priority:** ${task.priority}`);
|
|
1223
|
+
}
|
|
1224
|
+
// Include sync status section if sync was attempted
|
|
1225
|
+
if (syncResult) {
|
|
1226
|
+
parts.push('', '## Sync Status', '');
|
|
1227
|
+
parts.push('The branch was synced with master before your review.', '');
|
|
1228
|
+
if (syncResult.success) {
|
|
1229
|
+
parts.push('**Result**: SUCCESS');
|
|
1230
|
+
parts.push('');
|
|
1231
|
+
parts.push('Branch is up-to-date with master. `git diff origin/master..HEAD` will show only this task\'s changes.');
|
|
1232
|
+
}
|
|
1233
|
+
else if (syncResult.conflicts && syncResult.conflicts.length > 0) {
|
|
1234
|
+
parts.push('**Result**: CONFLICTS');
|
|
1235
|
+
parts.push('');
|
|
1236
|
+
parts.push('**Conflicted files**:');
|
|
1237
|
+
for (const file of syncResult.conflicts) {
|
|
1238
|
+
parts.push(`- ${file}`);
|
|
1239
|
+
}
|
|
1240
|
+
parts.push('');
|
|
1241
|
+
parts.push('**Your first step is to resolve these conflicts before reviewing.**');
|
|
1242
|
+
parts.push('See the conflict resolution guidance in your operating instructions.');
|
|
1243
|
+
}
|
|
1244
|
+
else {
|
|
1245
|
+
parts.push('**Result**: ERROR');
|
|
1246
|
+
parts.push('');
|
|
1247
|
+
parts.push(`**Error**: ${syncResult.error ?? syncResult.message}`);
|
|
1248
|
+
parts.push('');
|
|
1249
|
+
parts.push('You may need to manually sync the branch with `sf task sync ' + task.id + '`.');
|
|
1250
|
+
}
|
|
1251
|
+
}
|
|
1252
|
+
// Fetch and include the description content
|
|
1253
|
+
if (task.descriptionRef) {
|
|
1254
|
+
try {
|
|
1255
|
+
const doc = await this.api.get(asElementId(task.descriptionRef));
|
|
1256
|
+
if (doc?.content) {
|
|
1257
|
+
parts.push('', '### Task Description', doc.content);
|
|
1258
|
+
}
|
|
1259
|
+
}
|
|
1260
|
+
catch {
|
|
1261
|
+
parts.push('', `**Description Document:** ${task.descriptionRef}`);
|
|
1262
|
+
}
|
|
1263
|
+
}
|
|
1264
|
+
// Include acceptance criteria if any
|
|
1265
|
+
if (task.acceptanceCriteria) {
|
|
1266
|
+
parts.push('', '### Acceptance Criteria', task.acceptanceCriteria);
|
|
1267
|
+
}
|
|
1268
|
+
return parts.join('\n');
|
|
1269
|
+
}
|
|
1270
|
+
/**
|
|
1271
|
+
* Spawns a merge steward session for a task in REVIEW status.
|
|
1272
|
+
* Syncs the branch with master before spawning to ensure clean diffs.
|
|
1273
|
+
* Respects agent pool capacity limits.
|
|
1274
|
+
*/
|
|
1275
|
+
async spawnMergeStewardForTask(steward, task) {
|
|
1276
|
+
const stewardId = asEntityId(steward.id);
|
|
1277
|
+
const meta = getAgentMetadata(steward);
|
|
1278
|
+
const stewardFocus = meta?.stewardFocus ?? 'merge';
|
|
1279
|
+
// Check pool capacity before spawning
|
|
1280
|
+
if (this.poolService && meta) {
|
|
1281
|
+
const spawnRequest = {
|
|
1282
|
+
role: 'steward',
|
|
1283
|
+
stewardFocus: meta.stewardFocus,
|
|
1284
|
+
agentId: stewardId,
|
|
1285
|
+
};
|
|
1286
|
+
const poolCheck = await this.poolService.canSpawn(spawnRequest);
|
|
1287
|
+
if (!poolCheck.canSpawn) {
|
|
1288
|
+
console.log(`[dispatch-daemon] Pool capacity reached for steward ${steward.name}: ${poolCheck.reason}`);
|
|
1289
|
+
return;
|
|
1290
|
+
}
|
|
1291
|
+
}
|
|
1292
|
+
// Get task metadata for worktree path
|
|
1293
|
+
const taskMeta = task.metadata;
|
|
1294
|
+
const orchestratorMeta = taskMeta?.orchestrator;
|
|
1295
|
+
let worktreePath = orchestratorMeta?.worktree;
|
|
1296
|
+
// Verify the worktree still exists; create a fresh one if cleaned up (NEVER fall back to project root)
|
|
1297
|
+
if (worktreePath) {
|
|
1298
|
+
const exists = await this.worktreeManager.worktreeExists(worktreePath);
|
|
1299
|
+
if (!exists) {
|
|
1300
|
+
console.warn(`[dispatch-daemon] Worktree ${worktreePath} no longer exists for task ${task.id}, creating fresh worktree`);
|
|
1301
|
+
const sourceBranch = orchestratorMeta?.branch;
|
|
1302
|
+
if (sourceBranch) {
|
|
1303
|
+
try {
|
|
1304
|
+
const result = await this.worktreeManager.createReadOnlyWorktree({
|
|
1305
|
+
agentName: stewardId,
|
|
1306
|
+
purpose: `steward-${task.id}`,
|
|
1307
|
+
});
|
|
1308
|
+
worktreePath = result.path;
|
|
1309
|
+
}
|
|
1310
|
+
catch (e) {
|
|
1311
|
+
console.error(`[dispatch-daemon] Failed to create steward worktree: ${e}`);
|
|
1312
|
+
worktreePath = undefined;
|
|
1313
|
+
}
|
|
1314
|
+
}
|
|
1315
|
+
else {
|
|
1316
|
+
worktreePath = undefined;
|
|
1317
|
+
}
|
|
1318
|
+
}
|
|
1319
|
+
}
|
|
1320
|
+
// Guard: never spawn a steward in the project root — skip if no worktree
|
|
1321
|
+
if (!worktreePath) {
|
|
1322
|
+
this.emitter.emit('daemon:notification', {
|
|
1323
|
+
type: 'warning',
|
|
1324
|
+
title: 'Merge steward skipped',
|
|
1325
|
+
message: `Cannot spawn merge steward for task ${task.id}: worktree missing and no branch info available to create a new one.`,
|
|
1326
|
+
});
|
|
1327
|
+
return;
|
|
1328
|
+
}
|
|
1329
|
+
// Phase 1: Sync branch with master before spawning steward
|
|
1330
|
+
// This ensures `git diff origin/master..HEAD` shows only the task's changes
|
|
1331
|
+
let syncResult;
|
|
1332
|
+
if (worktreePath) {
|
|
1333
|
+
console.log(`[dispatch-daemon] Syncing task ${task.id} branch before steward spawn...`);
|
|
1334
|
+
syncResult = await this.syncTaskBranch(task);
|
|
1335
|
+
// Store sync result in task metadata for audit trail
|
|
1336
|
+
await this.api.update(task.id, {
|
|
1337
|
+
metadata: updateOrchestratorTaskMeta(task.metadata, {
|
|
1338
|
+
lastSyncResult: {
|
|
1339
|
+
success: syncResult.success,
|
|
1340
|
+
conflicts: syncResult.conflicts,
|
|
1341
|
+
error: syncResult.error,
|
|
1342
|
+
message: syncResult.message,
|
|
1343
|
+
syncedAt: new Date().toISOString(),
|
|
1344
|
+
},
|
|
1345
|
+
}),
|
|
1346
|
+
});
|
|
1347
|
+
}
|
|
1348
|
+
// Build the steward prompt with full context including sync result
|
|
1349
|
+
const initialPrompt = await this.buildStewardPrompt(task, stewardId, stewardFocus, syncResult);
|
|
1350
|
+
const workingDirectory = worktreePath;
|
|
1351
|
+
// Start the steward session
|
|
1352
|
+
const { session, events } = await this.sessionManager.startSession(stewardId, {
|
|
1353
|
+
workingDirectory,
|
|
1354
|
+
worktree: worktreePath,
|
|
1355
|
+
initialPrompt,
|
|
1356
|
+
interactive: false, // Stewards use headless mode
|
|
1357
|
+
});
|
|
1358
|
+
// Record steward assignment and session history on the task to prevent double-dispatch and enable recovery.
|
|
1359
|
+
// Setting task.assignee makes the steward visible in the UI and enables
|
|
1360
|
+
// getAgentTasks() lookups for orphan recovery.
|
|
1361
|
+
// Re-read task to get latest metadata (after sync result was stored)
|
|
1362
|
+
const taskAfterSync = await this.api.get(task.id);
|
|
1363
|
+
const sessionHistoryEntry = {
|
|
1364
|
+
sessionId: session.id,
|
|
1365
|
+
providerSessionId: session.providerSessionId,
|
|
1366
|
+
agentId: stewardId,
|
|
1367
|
+
agentName: steward.name,
|
|
1368
|
+
agentRole: 'steward',
|
|
1369
|
+
startedAt: createTimestamp(),
|
|
1370
|
+
};
|
|
1371
|
+
// First append session history, then apply steward assignment metadata
|
|
1372
|
+
const metadataWithHistory = appendTaskSessionHistory(taskAfterSync?.metadata, sessionHistoryEntry);
|
|
1373
|
+
const finalMetadata = updateOrchestratorTaskMeta(metadataWithHistory, {
|
|
1374
|
+
assignedAgent: stewardId,
|
|
1375
|
+
mergeStatus: 'testing',
|
|
1376
|
+
sessionId: session.providerSessionId ?? session.id,
|
|
1377
|
+
});
|
|
1378
|
+
await this.api.update(task.id, {
|
|
1379
|
+
assignee: stewardId,
|
|
1380
|
+
metadata: finalMetadata,
|
|
1381
|
+
});
|
|
1382
|
+
// Call the onSessionStarted callback if provided
|
|
1383
|
+
if (this.config.onSessionStarted) {
|
|
1384
|
+
this.config.onSessionStarted(session, events, stewardId, initialPrompt);
|
|
1385
|
+
}
|
|
1386
|
+
// Notify pool service that agent was spawned
|
|
1387
|
+
if (this.poolService) {
|
|
1388
|
+
await this.poolService.onAgentSpawned(stewardId);
|
|
1389
|
+
}
|
|
1390
|
+
this.emitter.emit('agent:spawned', stewardId, worktreePath);
|
|
1391
|
+
console.log(`[dispatch-daemon] Spawned merge steward ${steward.name} for task ${task.id}`);
|
|
1392
|
+
}
|
|
1393
|
+
/**
|
|
1394
|
+
* Processes an inbox item for an agent.
|
|
1395
|
+
* Handles dispatch messages and delegates to role-specific processors.
|
|
1396
|
+
*/
|
|
1397
|
+
async processInboxItem(agent, item, meta) {
|
|
1398
|
+
const agentId = asEntityId(agent.id);
|
|
1399
|
+
const activeSession = this.sessionManager.getActiveSession(agentId);
|
|
1400
|
+
// Get the message to check its type
|
|
1401
|
+
const message = await this.api.get(asElementId(item.messageId));
|
|
1402
|
+
if (!message) {
|
|
1403
|
+
// Message not found, mark as read and skip
|
|
1404
|
+
this.inboxService.markAsRead(item.id);
|
|
1405
|
+
return false;
|
|
1406
|
+
}
|
|
1407
|
+
const messageMetadata = message.metadata;
|
|
1408
|
+
const isDispatchMessage = messageMetadata?.type === 'task-dispatch' ||
|
|
1409
|
+
messageMetadata?.type === 'task-assignment' ||
|
|
1410
|
+
messageMetadata?.type === 'task-reassignment';
|
|
1411
|
+
// Handle based on agent role and session state
|
|
1412
|
+
if (meta.agentRole === 'worker' && meta.workerMode === 'ephemeral') {
|
|
1413
|
+
return this.processEphemeralWorkerMessage(agent, message, item, activeSession, isDispatchMessage);
|
|
1414
|
+
}
|
|
1415
|
+
else if (meta.agentRole === 'steward') {
|
|
1416
|
+
// Stewards use the same two-path model as ephemeral workers
|
|
1417
|
+
return this.processEphemeralWorkerMessage(agent, message, item, activeSession, isDispatchMessage);
|
|
1418
|
+
}
|
|
1419
|
+
else if (meta.agentRole === 'worker' && meta.workerMode === 'persistent') {
|
|
1420
|
+
return this.processPersistentAgentMessage(agent, message, item, activeSession);
|
|
1421
|
+
}
|
|
1422
|
+
else if (meta.agentRole === 'director') {
|
|
1423
|
+
if (this.config.directorInboxForwardingEnabled) {
|
|
1424
|
+
// Only forward if the user hasn't typed recently (debounce)
|
|
1425
|
+
if (activeSession) {
|
|
1426
|
+
const idleMs = this.sessionManager.getSessionUserIdleMs(agentId);
|
|
1427
|
+
// idleMs is undefined when no user input has been recorded yet — treat as idle
|
|
1428
|
+
if (idleMs !== undefined && idleMs < this.config.directorInboxIdleThresholdMs) {
|
|
1429
|
+
// User is actively typing — leave unread for next poll cycle
|
|
1430
|
+
return false;
|
|
1431
|
+
}
|
|
1432
|
+
}
|
|
1433
|
+
return this.processPersistentAgentMessage(agent, message, item, activeSession);
|
|
1434
|
+
}
|
|
1435
|
+
// Default: leave inbox items unread for manual sf inbox checks
|
|
1436
|
+
return false;
|
|
1437
|
+
}
|
|
1438
|
+
return false;
|
|
1439
|
+
}
|
|
1440
|
+
/**
|
|
1441
|
+
* Two-path model for ephemeral worker messages:
|
|
1442
|
+
* 1. Dispatch message → mark as read (task dispatch handled by pollWorkerAvailability)
|
|
1443
|
+
* 2. Non-dispatch message → leave unread if active session (don't forward/interrupt),
|
|
1444
|
+
* or accumulate as deferred item for triage if idle
|
|
1445
|
+
*
|
|
1446
|
+
* Returns { processed: boolean, deferredItem?: ... } so pollInboxes can batch triage.
|
|
1447
|
+
*/
|
|
1448
|
+
async processEphemeralWorkerMessage(_agent, _message, item, activeSession, isDispatchMessage) {
|
|
1449
|
+
if (isDispatchMessage) {
|
|
1450
|
+
// Dispatch message → mark as read (spawn handled elsewhere)
|
|
1451
|
+
this.inboxService.markAsRead(item.id);
|
|
1452
|
+
return true;
|
|
1453
|
+
}
|
|
1454
|
+
// Non-dispatch message:
|
|
1455
|
+
if (activeSession) {
|
|
1456
|
+
// Agent is busy → leave message unread for next poll cycle
|
|
1457
|
+
// Do NOT forward to active session (keeps task-focused sessions uninterrupted)
|
|
1458
|
+
return false;
|
|
1459
|
+
}
|
|
1460
|
+
// Agent is idle → leave unread, will be picked up by triage batch
|
|
1461
|
+
// The caller (pollInboxes) accumulates these for processTriageBatch
|
|
1462
|
+
return false;
|
|
1463
|
+
}
|
|
1464
|
+
/**
|
|
1465
|
+
* Process message for persistent workers and directors.
|
|
1466
|
+
* - If in session -> forward as user input
|
|
1467
|
+
* - Otherwise -> leave for next session
|
|
1468
|
+
*/
|
|
1469
|
+
async processPersistentAgentMessage(agent, message, item, activeSession) {
|
|
1470
|
+
const agentId = asEntityId(agent.id);
|
|
1471
|
+
if (activeSession) {
|
|
1472
|
+
// Guard against duplicate forwarding:
|
|
1473
|
+
// If another concurrent pollInboxes() call is already processing this item,
|
|
1474
|
+
// skip it to prevent duplicate message delivery. The in-flight call will
|
|
1475
|
+
// mark it as read when done.
|
|
1476
|
+
if (this.forwardingInboxItems.has(item.id)) {
|
|
1477
|
+
return false;
|
|
1478
|
+
}
|
|
1479
|
+
// Mark as in-flight before the async operation
|
|
1480
|
+
this.forwardingInboxItems.add(item.id);
|
|
1481
|
+
try {
|
|
1482
|
+
// In session -> forward as user input
|
|
1483
|
+
const forwardedContent = await this.formatForwardedMessage(message);
|
|
1484
|
+
await this.sessionManager.messageSession(activeSession.id, {
|
|
1485
|
+
content: forwardedContent,
|
|
1486
|
+
senderId: message.sender,
|
|
1487
|
+
});
|
|
1488
|
+
this.inboxService.markAsRead(item.id);
|
|
1489
|
+
this.emitter.emit('message:forwarded', message.id, agentId);
|
|
1490
|
+
return true;
|
|
1491
|
+
}
|
|
1492
|
+
finally {
|
|
1493
|
+
// Always clean up the in-flight tracking, even on error
|
|
1494
|
+
this.forwardingInboxItems.delete(item.id);
|
|
1495
|
+
}
|
|
1496
|
+
}
|
|
1497
|
+
// No session -> leave message unread for next session
|
|
1498
|
+
return false;
|
|
1499
|
+
}
|
|
1500
|
+
/**
|
|
1501
|
+
* Processes deferred inbox items for idle agents by spawning triage sessions.
|
|
1502
|
+
*
|
|
1503
|
+
* Groups items by agentId then channelId. For each agent:
|
|
1504
|
+
* - Skips if agent now has an active session (messages stay unread for next cycle)
|
|
1505
|
+
* - Spawns triage session for the first channel group only (single-session constraint)
|
|
1506
|
+
* - Marks those items as read
|
|
1507
|
+
*
|
|
1508
|
+
* @returns Number of items processed
|
|
1509
|
+
*/
|
|
1510
|
+
async processTriageBatch(deferredItems) {
|
|
1511
|
+
let processed = 0;
|
|
1512
|
+
for (const [agentId, { agent, items }] of deferredItems) {
|
|
1513
|
+
// Re-check: agent may have had a session started by task dispatch.
|
|
1514
|
+
// Known race: between this check and startSession() below, another poll
|
|
1515
|
+
// cycle could spawn a session for the same agent. If that happens,
|
|
1516
|
+
// startSession() fails, the error is caught, items stay unread, and
|
|
1517
|
+
// retry happens next cycle. This is acceptable — not a bug.
|
|
1518
|
+
const activeSession = this.sessionManager.getActiveSession(asEntityId(agentId));
|
|
1519
|
+
if (activeSession) {
|
|
1520
|
+
// Agent is now busy — leave messages unread for next cycle
|
|
1521
|
+
continue;
|
|
1522
|
+
}
|
|
1523
|
+
// Group items by channelId
|
|
1524
|
+
const byChannel = new Map();
|
|
1525
|
+
for (const item of items) {
|
|
1526
|
+
const channelKey = String(item.channelId);
|
|
1527
|
+
if (!byChannel.has(channelKey)) {
|
|
1528
|
+
byChannel.set(channelKey, []);
|
|
1529
|
+
}
|
|
1530
|
+
byChannel.get(channelKey).push(item);
|
|
1531
|
+
}
|
|
1532
|
+
// Spawn triage for the first channel group only (single-session constraint)
|
|
1533
|
+
const [channelId, channelItems] = byChannel.entries().next().value;
|
|
1534
|
+
try {
|
|
1535
|
+
await this.spawnTriageSession(agent, channelItems, channelId);
|
|
1536
|
+
// Count items as processed only after spawn succeeds. Items are
|
|
1537
|
+
// marked as read in spawnTriageSession's exit handler after the
|
|
1538
|
+
// triage session completes. If the session crashes, items stay
|
|
1539
|
+
// unread and retry next cycle.
|
|
1540
|
+
processed += channelItems.length;
|
|
1541
|
+
}
|
|
1542
|
+
catch (error) {
|
|
1543
|
+
console.error(`[dispatch-daemon] Failed to spawn triage session for agent ${agent.name}:`, error);
|
|
1544
|
+
}
|
|
1545
|
+
// Only one triage session per poll cycle per agent — remaining channels
|
|
1546
|
+
// will be picked up in subsequent cycles
|
|
1547
|
+
}
|
|
1548
|
+
return processed;
|
|
1549
|
+
}
|
|
1550
|
+
/**
|
|
1551
|
+
* Spawns a triage session for an agent to process deferred messages.
|
|
1552
|
+
*
|
|
1553
|
+
* Creates a read-only worktree on the default branch, builds the triage prompt
|
|
1554
|
+
* with hydrated message contents, starts a headless session, and registers
|
|
1555
|
+
* worktree cleanup on session exit.
|
|
1556
|
+
*/
|
|
1557
|
+
async spawnTriageSession(agent, items, channelId) {
|
|
1558
|
+
const agentId = asEntityId(agent.id);
|
|
1559
|
+
// Create a read-only worktree (detached HEAD on default branch).
|
|
1560
|
+
// The path is deterministic ({agentName}-triage), so a stale worktree
|
|
1561
|
+
// from a previous crash would cause WORKTREE_EXISTS. Handle by removing
|
|
1562
|
+
// the stale worktree and retrying once.
|
|
1563
|
+
let worktreeResult;
|
|
1564
|
+
try {
|
|
1565
|
+
worktreeResult = await this.worktreeManager.createReadOnlyWorktree({
|
|
1566
|
+
agentName: agent.name,
|
|
1567
|
+
purpose: 'triage',
|
|
1568
|
+
});
|
|
1569
|
+
}
|
|
1570
|
+
catch (error) {
|
|
1571
|
+
const errorCode = error?.code;
|
|
1572
|
+
if (errorCode === 'WORKTREE_EXISTS') {
|
|
1573
|
+
// Remove stale worktree from a previous crash and retry.
|
|
1574
|
+
// Path must match the relative path used by createReadOnlyWorktree.
|
|
1575
|
+
try {
|
|
1576
|
+
await this.worktreeManager.removeWorktree(`.stoneforge/.worktrees/${agent.name}-triage`, { force: true });
|
|
1577
|
+
}
|
|
1578
|
+
catch {
|
|
1579
|
+
// Ignore removal errors
|
|
1580
|
+
}
|
|
1581
|
+
worktreeResult = await this.worktreeManager.createReadOnlyWorktree({
|
|
1582
|
+
agentName: agent.name,
|
|
1583
|
+
purpose: 'triage',
|
|
1584
|
+
});
|
|
1585
|
+
}
|
|
1586
|
+
else {
|
|
1587
|
+
throw error;
|
|
1588
|
+
}
|
|
1589
|
+
}
|
|
1590
|
+
// Fetch messages and build the triage prompt
|
|
1591
|
+
// Pair each message with its inbox item ID for the triage prompt
|
|
1592
|
+
const triageItems = [];
|
|
1593
|
+
for (const item of items) {
|
|
1594
|
+
const message = await this.api.get(asElementId(item.messageId));
|
|
1595
|
+
if (message) {
|
|
1596
|
+
triageItems.push({ message, inboxItemId: item.id });
|
|
1597
|
+
}
|
|
1598
|
+
}
|
|
1599
|
+
// All message fetches failed — nothing to triage; clean up worktree
|
|
1600
|
+
if (triageItems.length === 0) {
|
|
1601
|
+
try {
|
|
1602
|
+
await this.worktreeManager.removeWorktree(worktreeResult.path);
|
|
1603
|
+
}
|
|
1604
|
+
catch {
|
|
1605
|
+
// Ignore cleanup errors
|
|
1606
|
+
}
|
|
1607
|
+
return;
|
|
1608
|
+
}
|
|
1609
|
+
const initialPrompt = await this.buildTriagePrompt(agent, triageItems, channelId);
|
|
1610
|
+
// Start a headless session in the read-only worktree
|
|
1611
|
+
const { session, events } = await this.sessionManager.startSession(agentId, {
|
|
1612
|
+
workingDirectory: worktreeResult.path,
|
|
1613
|
+
worktree: worktreeResult.path,
|
|
1614
|
+
initialPrompt,
|
|
1615
|
+
interactive: false,
|
|
1616
|
+
});
|
|
1617
|
+
// Call the onSessionStarted callback if provided
|
|
1618
|
+
if (this.config.onSessionStarted) {
|
|
1619
|
+
this.config.onSessionStarted(session, events, agentId, initialPrompt);
|
|
1620
|
+
}
|
|
1621
|
+
// On session exit: mark triage items as read and clean up worktree.
|
|
1622
|
+
// Items stay unread if the session crashes, so they retry next cycle.
|
|
1623
|
+
// Use .once() since a session only exits once; bump maxListeners to avoid false warning.
|
|
1624
|
+
events.setMaxListeners(events.getMaxListeners() + 1);
|
|
1625
|
+
events.once('exit', async () => {
|
|
1626
|
+
// Mark triage items as read. Use batch for efficiency.
|
|
1627
|
+
// Errors are non-fatal — items stay unread and retry next cycle.
|
|
1628
|
+
try {
|
|
1629
|
+
this.inboxService.markAsReadBatch(items.map((item) => item.id));
|
|
1630
|
+
}
|
|
1631
|
+
catch (error) {
|
|
1632
|
+
console.warn('[dispatch-daemon] Failed to mark triage items as read:', error);
|
|
1633
|
+
}
|
|
1634
|
+
try {
|
|
1635
|
+
await this.worktreeManager.removeWorktree(worktreeResult.path);
|
|
1636
|
+
}
|
|
1637
|
+
catch {
|
|
1638
|
+
// Ignore cleanup errors — worktree may already be removed
|
|
1639
|
+
}
|
|
1640
|
+
});
|
|
1641
|
+
this.emitter.emit('agent:triage-spawned', agentId, channelId, worktreeResult.path);
|
|
1642
|
+
}
|
|
1643
|
+
/**
|
|
1644
|
+
* Builds the triage prompt by loading the message-triage template and
|
|
1645
|
+
* hydrating it with the actual message contents.
|
|
1646
|
+
*/
|
|
1647
|
+
async buildTriagePrompt(agent, triageItems, channelId) {
|
|
1648
|
+
// Load the triage prompt template
|
|
1649
|
+
const triageResult = loadTriagePrompt({ projectRoot: this.config.projectRoot });
|
|
1650
|
+
if (!triageResult) {
|
|
1651
|
+
throw new Error('Failed to load message-triage prompt template');
|
|
1652
|
+
}
|
|
1653
|
+
// Hydrate each message's content
|
|
1654
|
+
const formattedMessages = [];
|
|
1655
|
+
for (const { message, inboxItemId } of triageItems) {
|
|
1656
|
+
const senderId = message.sender ?? 'unknown';
|
|
1657
|
+
const timestamp = message.createdAt ?? 'unknown';
|
|
1658
|
+
// Fetch content document if contentRef is available
|
|
1659
|
+
let content = '[No content available]';
|
|
1660
|
+
if (message.contentRef) {
|
|
1661
|
+
try {
|
|
1662
|
+
const doc = await this.api.get(asElementId(message.contentRef));
|
|
1663
|
+
if (doc?.content) {
|
|
1664
|
+
content = doc.content;
|
|
1665
|
+
}
|
|
1666
|
+
}
|
|
1667
|
+
catch (error) {
|
|
1668
|
+
console.warn(`[dispatch-daemon] Failed to fetch content for message ${message.id}:`, error);
|
|
1669
|
+
}
|
|
1670
|
+
}
|
|
1671
|
+
formattedMessages.push(`--- Inbox Item ID: ${inboxItemId} | Message ID: ${message.id} | From: ${senderId} | At: ${timestamp} ---`, content, '');
|
|
1672
|
+
}
|
|
1673
|
+
// Replace the {{MESSAGES}} placeholder with hydrated content
|
|
1674
|
+
const messagesBlock = formattedMessages.join('\n');
|
|
1675
|
+
const prompt = triageResult.prompt.replace('{{MESSAGES}}', messagesBlock);
|
|
1676
|
+
// Get the director ID for context
|
|
1677
|
+
const director = await this.agentRegistry.getDirector();
|
|
1678
|
+
const directorId = director?.id ?? 'unknown';
|
|
1679
|
+
return `${prompt}\n\n---\n\n**Worker ID:** ${agent.id}\n**Director ID:** ${directorId}\n**Channel:** ${channelId}\n**Agent:** ${agent.name}\n**Message count:** ${triageItems.length}`;
|
|
1680
|
+
}
|
|
1681
|
+
/**
|
|
1682
|
+
* Formats a message for forwarding to an agent session.
|
|
1683
|
+
* Fetches document content from contentRef to provide actual message text.
|
|
1684
|
+
*/
|
|
1685
|
+
async formatForwardedMessage(message) {
|
|
1686
|
+
let content = '[No content available]';
|
|
1687
|
+
if (message.contentRef) {
|
|
1688
|
+
try {
|
|
1689
|
+
const doc = await this.api.get(asElementId(message.contentRef));
|
|
1690
|
+
if (doc?.content) {
|
|
1691
|
+
content = doc.content;
|
|
1692
|
+
}
|
|
1693
|
+
}
|
|
1694
|
+
catch (error) {
|
|
1695
|
+
console.warn(`[dispatch-daemon] Failed to fetch content for forwarded message ${message.id}:`, error);
|
|
1696
|
+
}
|
|
1697
|
+
}
|
|
1698
|
+
return content; // No prefix — messageSession() handles the [Message from ...] prefix
|
|
1699
|
+
}
|
|
1700
|
+
/**
|
|
1701
|
+
* Syncs a task's branch with the main branch before steward review.
|
|
1702
|
+
*
|
|
1703
|
+
* This ensures that when a merge steward reviews a PR, the diff against
|
|
1704
|
+
* master only shows the task's actual changes (not other merged work).
|
|
1705
|
+
*
|
|
1706
|
+
* @param task - The task to sync
|
|
1707
|
+
* @returns SyncResult with success/conflicts/error status
|
|
1708
|
+
*/
|
|
1709
|
+
async syncTaskBranch(task) {
|
|
1710
|
+
const taskMeta = task.metadata;
|
|
1711
|
+
const orchestratorMeta = taskMeta?.orchestrator;
|
|
1712
|
+
const worktreePath = orchestratorMeta?.worktree;
|
|
1713
|
+
const branch = orchestratorMeta?.branch;
|
|
1714
|
+
// Check for worktree path
|
|
1715
|
+
if (!worktreePath) {
|
|
1716
|
+
return {
|
|
1717
|
+
success: false,
|
|
1718
|
+
error: 'No worktree path found in task metadata',
|
|
1719
|
+
message: 'Task has no worktree path - cannot sync',
|
|
1720
|
+
};
|
|
1721
|
+
}
|
|
1722
|
+
// Verify worktree exists
|
|
1723
|
+
const worktreeExists = await this.worktreeManager.worktreeExists(worktreePath);
|
|
1724
|
+
if (!worktreeExists) {
|
|
1725
|
+
return {
|
|
1726
|
+
success: false,
|
|
1727
|
+
error: `Worktree does not exist: ${worktreePath}`,
|
|
1728
|
+
message: `Worktree not found at ${worktreePath}`,
|
|
1729
|
+
worktreePath,
|
|
1730
|
+
branch,
|
|
1731
|
+
};
|
|
1732
|
+
}
|
|
1733
|
+
// Import node modules for git operations
|
|
1734
|
+
const { execFile } = await import('node:child_process');
|
|
1735
|
+
const { promisify } = await import('node:util');
|
|
1736
|
+
const path = await import('node:path');
|
|
1737
|
+
const execFileAsync = promisify(execFile);
|
|
1738
|
+
// Resolve full worktree path
|
|
1739
|
+
const workspaceRoot = this.worktreeManager.getWorkspaceRoot();
|
|
1740
|
+
const fullWorktreePath = path.isAbsolute(worktreePath)
|
|
1741
|
+
? worktreePath
|
|
1742
|
+
: path.join(workspaceRoot, worktreePath);
|
|
1743
|
+
// Fetch from origin
|
|
1744
|
+
try {
|
|
1745
|
+
await execFileAsync('git', ['fetch', 'origin'], {
|
|
1746
|
+
cwd: fullWorktreePath,
|
|
1747
|
+
encoding: 'utf8',
|
|
1748
|
+
timeout: 60_000,
|
|
1749
|
+
});
|
|
1750
|
+
}
|
|
1751
|
+
catch (fetchError) {
|
|
1752
|
+
return {
|
|
1753
|
+
success: false,
|
|
1754
|
+
error: `Failed to fetch from origin: ${fetchError.message}`,
|
|
1755
|
+
message: 'Git fetch failed',
|
|
1756
|
+
worktreePath,
|
|
1757
|
+
branch,
|
|
1758
|
+
};
|
|
1759
|
+
}
|
|
1760
|
+
// Get default branch
|
|
1761
|
+
const defaultBranch = await this.worktreeManager.getDefaultBranch();
|
|
1762
|
+
const remoteBranch = `origin/${defaultBranch}`;
|
|
1763
|
+
// Attempt to merge
|
|
1764
|
+
try {
|
|
1765
|
+
await execFileAsync('git', ['merge', remoteBranch, '--no-edit'], {
|
|
1766
|
+
cwd: fullWorktreePath,
|
|
1767
|
+
encoding: 'utf8',
|
|
1768
|
+
timeout: 120_000,
|
|
1769
|
+
});
|
|
1770
|
+
// Merge succeeded
|
|
1771
|
+
console.log(`[dispatch-daemon] Synced task ${task.id} branch with ${remoteBranch}`);
|
|
1772
|
+
return {
|
|
1773
|
+
success: true,
|
|
1774
|
+
message: `Branch synced with ${remoteBranch}`,
|
|
1775
|
+
worktreePath,
|
|
1776
|
+
branch,
|
|
1777
|
+
};
|
|
1778
|
+
}
|
|
1779
|
+
catch (mergeError) {
|
|
1780
|
+
// Check for merge conflicts
|
|
1781
|
+
try {
|
|
1782
|
+
const { stdout: statusOutput } = await execFileAsync('git', ['status', '--porcelain'], {
|
|
1783
|
+
cwd: fullWorktreePath,
|
|
1784
|
+
encoding: 'utf8',
|
|
1785
|
+
});
|
|
1786
|
+
// Parse conflicted files (UU, AA, DD, AU, UA, DU, UD)
|
|
1787
|
+
const conflictPatterns = /^(UU|AA|DD|AU|UA|DU|UD)\s+(.+)$/gm;
|
|
1788
|
+
const conflicts = [];
|
|
1789
|
+
let match;
|
|
1790
|
+
while ((match = conflictPatterns.exec(statusOutput)) !== null) {
|
|
1791
|
+
conflicts.push(match[2]);
|
|
1792
|
+
}
|
|
1793
|
+
if (conflicts.length > 0) {
|
|
1794
|
+
console.log(`[dispatch-daemon] Merge conflicts detected for task ${task.id}: ${conflicts.join(', ')}`);
|
|
1795
|
+
return {
|
|
1796
|
+
success: false,
|
|
1797
|
+
conflicts,
|
|
1798
|
+
message: `Merge conflicts detected in ${conflicts.length} file(s)`,
|
|
1799
|
+
worktreePath,
|
|
1800
|
+
branch,
|
|
1801
|
+
};
|
|
1802
|
+
}
|
|
1803
|
+
// Some other merge error
|
|
1804
|
+
return {
|
|
1805
|
+
success: false,
|
|
1806
|
+
error: mergeError.message,
|
|
1807
|
+
message: 'Merge failed (not due to conflicts)',
|
|
1808
|
+
worktreePath,
|
|
1809
|
+
branch,
|
|
1810
|
+
};
|
|
1811
|
+
}
|
|
1812
|
+
catch {
|
|
1813
|
+
return {
|
|
1814
|
+
success: false,
|
|
1815
|
+
error: mergeError.message,
|
|
1816
|
+
message: 'Merge failed',
|
|
1817
|
+
worktreePath,
|
|
1818
|
+
branch,
|
|
1819
|
+
};
|
|
1820
|
+
}
|
|
1821
|
+
}
|
|
1822
|
+
}
|
|
1823
|
+
}
|
|
1824
|
+
// ============================================================================
|
|
1825
|
+
// Factory Function
|
|
1826
|
+
// ============================================================================
|
|
1827
|
+
/**
|
|
1828
|
+
* Creates a DispatchDaemon instance
|
|
1829
|
+
*/
|
|
1830
|
+
export function createDispatchDaemon(api, agentRegistry, sessionManager, dispatchService, worktreeManager, taskAssignment, stewardScheduler, inboxService, config, poolService) {
|
|
1831
|
+
return new DispatchDaemonImpl(api, agentRegistry, sessionManager, dispatchService, worktreeManager, taskAssignment, stewardScheduler, inboxService, config, poolService);
|
|
1832
|
+
}
|
|
1833
|
+
//# sourceMappingURL=dispatch-daemon.js.map
|