@elhu/pit 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +380 -0
- package/dist/adapters/claude-code.d.ts +70 -0
- package/dist/adapters/claude-code.d.ts.map +1 -0
- package/dist/adapters/claude-code.js +166 -0
- package/dist/adapters/claude-code.js.map +1 -0
- package/dist/adapters/index.d.ts +16 -0
- package/dist/adapters/index.d.ts.map +1 -0
- package/dist/adapters/index.js +49 -0
- package/dist/adapters/index.js.map +1 -0
- package/dist/adapters/opencode.d.ts +53 -0
- package/dist/adapters/opencode.d.ts.map +1 -0
- package/dist/adapters/opencode.js +120 -0
- package/dist/adapters/opencode.js.map +1 -0
- package/dist/adapters/process-utils.d.ts +29 -0
- package/dist/adapters/process-utils.d.ts.map +1 -0
- package/dist/adapters/process-utils.js +96 -0
- package/dist/adapters/process-utils.js.map +1 -0
- package/dist/adapters/types.d.ts +41 -0
- package/dist/adapters/types.d.ts.map +1 -0
- package/dist/adapters/types.js +6 -0
- package/dist/adapters/types.js.map +1 -0
- package/dist/assets.generated.d.ts +13 -0
- package/dist/assets.generated.d.ts.map +1 -0
- package/dist/assets.generated.js +162 -0
- package/dist/assets.generated.js.map +1 -0
- package/dist/beads.d.ts +85 -0
- package/dist/beads.d.ts.map +1 -0
- package/dist/beads.js +120 -0
- package/dist/beads.js.map +1 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +39 -0
- package/dist/cli.js.map +1 -0
- package/dist/commands/add.d.ts +10 -0
- package/dist/commands/add.d.ts.map +1 -0
- package/dist/commands/add.js +58 -0
- package/dist/commands/add.js.map +1 -0
- package/dist/commands/cleanup.d.ts +13 -0
- package/dist/commands/cleanup.d.ts.map +1 -0
- package/dist/commands/cleanup.js +174 -0
- package/dist/commands/cleanup.js.map +1 -0
- package/dist/commands/daemon.d.ts +3 -0
- package/dist/commands/daemon.d.ts.map +1 -0
- package/dist/commands/daemon.js +162 -0
- package/dist/commands/daemon.js.map +1 -0
- package/dist/commands/init.d.ts +20 -0
- package/dist/commands/init.d.ts.map +1 -0
- package/dist/commands/init.js +125 -0
- package/dist/commands/init.js.map +1 -0
- package/dist/commands/install-keybinding.d.ts +61 -0
- package/dist/commands/install-keybinding.d.ts.map +1 -0
- package/dist/commands/install-keybinding.js +138 -0
- package/dist/commands/install-keybinding.js.map +1 -0
- package/dist/commands/install-status.d.ts +35 -0
- package/dist/commands/install-status.d.ts.map +1 -0
- package/dist/commands/install-status.js +115 -0
- package/dist/commands/install-status.js.map +1 -0
- package/dist/commands/log.d.ts +7 -0
- package/dist/commands/log.d.ts.map +1 -0
- package/dist/commands/log.js +60 -0
- package/dist/commands/log.js.map +1 -0
- package/dist/commands/pause.d.ts +12 -0
- package/dist/commands/pause.d.ts.map +1 -0
- package/dist/commands/pause.js +47 -0
- package/dist/commands/pause.js.map +1 -0
- package/dist/commands/resume.d.ts +12 -0
- package/dist/commands/resume.d.ts.map +1 -0
- package/dist/commands/resume.js +59 -0
- package/dist/commands/resume.js.map +1 -0
- package/dist/commands/shared.d.ts +7 -0
- package/dist/commands/shared.d.ts.map +1 -0
- package/dist/commands/shared.js +56 -0
- package/dist/commands/shared.js.map +1 -0
- package/dist/commands/start.d.ts +12 -0
- package/dist/commands/start.d.ts.map +1 -0
- package/dist/commands/start.js +274 -0
- package/dist/commands/start.js.map +1 -0
- package/dist/commands/status.d.ts +24 -0
- package/dist/commands/status.d.ts.map +1 -0
- package/dist/commands/status.js +101 -0
- package/dist/commands/status.js.map +1 -0
- package/dist/commands/stop.d.ts +11 -0
- package/dist/commands/stop.d.ts.map +1 -0
- package/dist/commands/stop.js +52 -0
- package/dist/commands/stop.js.map +1 -0
- package/dist/commands/teardown.d.ts +15 -0
- package/dist/commands/teardown.d.ts.map +1 -0
- package/dist/commands/teardown.js +72 -0
- package/dist/commands/teardown.js.map +1 -0
- package/dist/config.d.ts +58 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/config.js +129 -0
- package/dist/config.js.map +1 -0
- package/dist/daemon/client.d.ts +38 -0
- package/dist/daemon/client.d.ts.map +1 -0
- package/dist/daemon/client.js +254 -0
- package/dist/daemon/client.js.map +1 -0
- package/dist/daemon/context.d.ts +63 -0
- package/dist/daemon/context.d.ts.map +1 -0
- package/dist/daemon/context.js +14 -0
- package/dist/daemon/context.js.map +1 -0
- package/dist/daemon/handlers.d.ts +79 -0
- package/dist/daemon/handlers.d.ts.map +1 -0
- package/dist/daemon/handlers.js +1260 -0
- package/dist/daemon/handlers.js.map +1 -0
- package/dist/daemon/index.d.ts +6 -0
- package/dist/daemon/index.d.ts.map +1 -0
- package/dist/daemon/index.js +7 -0
- package/dist/daemon/index.js.map +1 -0
- package/dist/daemon/lifecycle.d.ts +56 -0
- package/dist/daemon/lifecycle.d.ts.map +1 -0
- package/dist/daemon/lifecycle.js +341 -0
- package/dist/daemon/lifecycle.js.map +1 -0
- package/dist/daemon/protocol.d.ts +174 -0
- package/dist/daemon/protocol.d.ts.map +1 -0
- package/dist/daemon/protocol.js +3 -0
- package/dist/daemon/protocol.js.map +1 -0
- package/dist/daemon/recovery.d.ts +37 -0
- package/dist/daemon/recovery.d.ts.map +1 -0
- package/dist/daemon/recovery.js +197 -0
- package/dist/daemon/recovery.js.map +1 -0
- package/dist/daemon/server.d.ts +31 -0
- package/dist/daemon/server.d.ts.map +1 -0
- package/dist/daemon/server.js +294 -0
- package/dist/daemon/server.js.map +1 -0
- package/dist/daemon/socket.d.ts +18 -0
- package/dist/daemon/socket.d.ts.map +1 -0
- package/dist/daemon/socket.js +36 -0
- package/dist/daemon/socket.js.map +1 -0
- package/dist/daemon/state.d.ts +60 -0
- package/dist/daemon/state.d.ts.map +1 -0
- package/dist/daemon/state.js +156 -0
- package/dist/daemon/state.js.map +1 -0
- package/dist/daemon/systemd.d.ts +19 -0
- package/dist/daemon/systemd.d.ts.map +1 -0
- package/dist/daemon/systemd.js +131 -0
- package/dist/daemon/systemd.js.map +1 -0
- package/dist/hooks/claude-code-hook.d.ts +32 -0
- package/dist/hooks/claude-code-hook.d.ts.map +1 -0
- package/dist/hooks/claude-code-hook.js +112 -0
- package/dist/hooks/claude-code-hook.js.map +1 -0
- package/dist/instructions-template.d.ts +9 -0
- package/dist/instructions-template.d.ts.map +1 -0
- package/dist/instructions-template.js +123 -0
- package/dist/instructions-template.js.map +1 -0
- package/dist/logger.d.ts +25 -0
- package/dist/logger.d.ts.map +1 -0
- package/dist/logger.js +44 -0
- package/dist/logger.js.map +1 -0
- package/dist/loop.d.ts +88 -0
- package/dist/loop.d.ts.map +1 -0
- package/dist/loop.js +161 -0
- package/dist/loop.js.map +1 -0
- package/dist/orchestrator-instructions-template.d.ts +13 -0
- package/dist/orchestrator-instructions-template.d.ts.map +1 -0
- package/dist/orchestrator-instructions-template.js +147 -0
- package/dist/orchestrator-instructions-template.js.map +1 -0
- package/dist/output.d.ts +12 -0
- package/dist/output.d.ts.map +1 -0
- package/dist/output.js +25 -0
- package/dist/output.js.map +1 -0
- package/dist/plugin/pit.js +57 -0
- package/dist/session.d.ts +55 -0
- package/dist/session.d.ts.map +1 -0
- package/dist/session.js +135 -0
- package/dist/session.js.map +1 -0
- package/dist/setup.d.ts +92 -0
- package/dist/setup.d.ts.map +1 -0
- package/dist/setup.js +382 -0
- package/dist/setup.js.map +1 -0
- package/dist/shell-quote.d.ts +16 -0
- package/dist/shell-quote.d.ts.map +1 -0
- package/dist/shell-quote.js +18 -0
- package/dist/shell-quote.js.map +1 -0
- package/dist/signals.d.ts +17 -0
- package/dist/signals.d.ts.map +1 -0
- package/dist/signals.js +26 -0
- package/dist/signals.js.map +1 -0
- package/dist/state-machine.d.ts +74 -0
- package/dist/state-machine.d.ts.map +1 -0
- package/dist/state-machine.js +153 -0
- package/dist/state-machine.js.map +1 -0
- package/dist/tmux.d.ts +101 -0
- package/dist/tmux.d.ts.map +1 -0
- package/dist/tmux.js +208 -0
- package/dist/tmux.js.map +1 -0
- package/dist/worktree.d.ts +33 -0
- package/dist/worktree.d.ts.map +1 -0
- package/dist/worktree.js +116 -0
- package/dist/worktree.js.map +1 -0
- package/package.json +66 -0
|
@@ -0,0 +1,1260 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Orchestrator command handlers for the pit daemon.
|
|
3
|
+
*
|
|
4
|
+
* This module contains the core orchestration pipeline (clearAndReprompt) and
|
|
5
|
+
* all handler functions for daemon protocol methods: start, status, pause,
|
|
6
|
+
* resume, stop, teardown, add, log, agent-idle, agent-permission.
|
|
7
|
+
*
|
|
8
|
+
* Each handler takes a DaemonContext as its first argument instead of closing
|
|
9
|
+
* over module-level variables. The registerOrchestratorHandlers() function
|
|
10
|
+
* registers all handlers on a PitDaemonServer.
|
|
11
|
+
*/
|
|
12
|
+
import * as path from "node:path";
|
|
13
|
+
import * as beads from "../beads.js";
|
|
14
|
+
import { resolveConfig, DEFAULT_CONFIG } from "../config.js";
|
|
15
|
+
import { resolveAdapter } from "../adapters/index.js";
|
|
16
|
+
import { createDaemonState, createDebouncedSaver, saveDaemonState } from "./state.js";
|
|
17
|
+
import { createLogger } from "../logger.js";
|
|
18
|
+
import { startLoop, updateStatusBar } from "../loop.js";
|
|
19
|
+
import { setupEpic, renderPrompt, resolveTemplateContent, DEFAULT_PROMPT, } from "../setup.js";
|
|
20
|
+
import { sendClearCommand, sendKeys, sendPrompt, sendBell, capturePane, killWindow, sessionExists, createSession, windowExists, getWindowList, getFirstWindowName, createWindow, } from "../tmux.js";
|
|
21
|
+
import { removeWorktree, isWorktreeDirty } from "../worktree.js";
|
|
22
|
+
import { cleanup as cleanupDir } from "../signals.js";
|
|
23
|
+
const logger = createLogger();
|
|
24
|
+
// ---------------------------------------------------------------------------
|
|
25
|
+
// Bell helpers
|
|
26
|
+
// ---------------------------------------------------------------------------
|
|
27
|
+
/**
|
|
28
|
+
* Ring a bell on the control window (first window in the session) so the
|
|
29
|
+
* operator is notified rather than the agent's epic window.
|
|
30
|
+
*
|
|
31
|
+
* Falls back to fallbackWindow if the first window cannot be determined
|
|
32
|
+
* (empty session, tmux error, etc.). All errors are silently swallowed —
|
|
33
|
+
* bell is best-effort.
|
|
34
|
+
*/
|
|
35
|
+
async function ringControlBell(ctx, fallbackWindow) {
|
|
36
|
+
const firstWindow = await getFirstWindowName(ctx.tmuxSession).catch(() => null);
|
|
37
|
+
const target = firstWindow ?? fallbackWindow;
|
|
38
|
+
await sendBell(ctx.tmuxSession, target).catch((_e) => undefined);
|
|
39
|
+
}
|
|
40
|
+
// ---------------------------------------------------------------------------
|
|
41
|
+
// Shared CLEARING pipeline helper
|
|
42
|
+
// ---------------------------------------------------------------------------
|
|
43
|
+
/**
|
|
44
|
+
* Execute the clear-and-reprompt flow (RUNNING/PAUSED → CLEARING → RUNNING or DONE).
|
|
45
|
+
*
|
|
46
|
+
* Callers are responsible for ensuring the state machine is in RUNNING or PAUSED
|
|
47
|
+
* before calling this (the CLEARING transition is validated by the machine).
|
|
48
|
+
*
|
|
49
|
+
* Returns the next ticket if one is available, or null if the epic is done.
|
|
50
|
+
* Throws on CLEARING pipeline errors — callers should handle by transitioning
|
|
51
|
+
* to PAUSED with an appropriate reason.
|
|
52
|
+
*/
|
|
53
|
+
export function makeClearAndReprompt(ctx) {
|
|
54
|
+
return async function clearAndReprompt(options) {
|
|
55
|
+
// 1. Transition → CLEARING
|
|
56
|
+
await options.machine.transition("CLEARING", options.transitionReason);
|
|
57
|
+
// 2. Send clear command
|
|
58
|
+
await sendClearCommand(ctx.tmuxSession, options.windowName, options.adapter.clearCommand);
|
|
59
|
+
// 3. Wait for TUI readiness (use persisted clearDelay from config)
|
|
60
|
+
const clearDelay = ctx.daemonState?.options.clearDelay ?? DEFAULT_CONFIG.clearDelay;
|
|
61
|
+
await options.adapter.waitForReady({
|
|
62
|
+
timeoutMs: clearDelay,
|
|
63
|
+
});
|
|
64
|
+
// 4. Small delay to prevent tmux race condition
|
|
65
|
+
await new Promise((resolve) => setTimeout(resolve, 200));
|
|
66
|
+
// 5. Check for next ticket
|
|
67
|
+
const nextTicket = await beads.ready(options.epicId, { cwd: ctx.projectRoot });
|
|
68
|
+
if (!nextTicket) {
|
|
69
|
+
// No more work — epic complete
|
|
70
|
+
await options.machine.transition("DONE", "EPIC_COMPLETE");
|
|
71
|
+
await sendBell(ctx.tmuxSession, options.windowName).catch(() => {
|
|
72
|
+
// Ignore errors - bell is best-effort
|
|
73
|
+
});
|
|
74
|
+
return { nextTicket: null };
|
|
75
|
+
}
|
|
76
|
+
// 6. Resolve prompt template and send re-prompt
|
|
77
|
+
const resolvedPromptPath = ctx.daemonState?.options.promptTemplate
|
|
78
|
+
? path.resolve(ctx.daemonState.options.promptTemplate)
|
|
79
|
+
: undefined;
|
|
80
|
+
const promptContent = resolveTemplateContent(resolvedPromptPath, ctx.daemonState?.options.inlinePromptTemplate, DEFAULT_PROMPT);
|
|
81
|
+
await sendPrompt(ctx.tmuxSession, options.windowName, renderPrompt(options.epicId, promptContent));
|
|
82
|
+
// 7. Transition CLEARING → RUNNING
|
|
83
|
+
await options.machine.transition("RUNNING", "CLEARED");
|
|
84
|
+
// 8. Reset ticket timer for the new ticket
|
|
85
|
+
if (ctx.daemonState?.epics[options.epicId]) {
|
|
86
|
+
ctx.daemonState.epics[options.epicId].ticketStartedAt = new Date().toISOString();
|
|
87
|
+
}
|
|
88
|
+
return { nextTicket };
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
// ---------------------------------------------------------------------------
|
|
92
|
+
// Handler implementations
|
|
93
|
+
// ---------------------------------------------------------------------------
|
|
94
|
+
async function handleStart(ctx, params) {
|
|
95
|
+
const typedParams = params;
|
|
96
|
+
// Validate required params
|
|
97
|
+
if (!typedParams.epics || !Array.isArray(typedParams.epics) || typedParams.epics.length === 0) {
|
|
98
|
+
throw new Error("epics parameter is required and must be a non-empty array");
|
|
99
|
+
}
|
|
100
|
+
// Build CLI overrides from StartParams (only include fields explicitly sent)
|
|
101
|
+
const cliFlags = {};
|
|
102
|
+
if (typedParams.agent !== undefined)
|
|
103
|
+
cliFlags.agent = typedParams.agent;
|
|
104
|
+
if (typedParams.worktreeDir !== undefined)
|
|
105
|
+
cliFlags.worktreeDir = typedParams.worktreeDir;
|
|
106
|
+
if (typedParams.baseBranch !== undefined)
|
|
107
|
+
cliFlags.baseBranch = typedParams.baseBranch;
|
|
108
|
+
// ticketTimeout: undefined means "not sent", null means "explicitly disabled"
|
|
109
|
+
if (typedParams.ticketTimeout !== undefined)
|
|
110
|
+
cliFlags.ticketTimeout = typedParams.ticketTimeout;
|
|
111
|
+
if (typedParams.model !== undefined)
|
|
112
|
+
cliFlags.model = typedParams.model;
|
|
113
|
+
// Three-tier merge: CLI flags > .pit.json > defaults
|
|
114
|
+
const resolved = resolveConfig(cliFlags, ctx.fileConfig);
|
|
115
|
+
const agent = resolved.agent;
|
|
116
|
+
// resolved.model is string | null (null = no model flag)
|
|
117
|
+
const sessionModel = resolved.model ?? undefined;
|
|
118
|
+
const epicModels = typedParams.epicModels ?? {};
|
|
119
|
+
const options = {
|
|
120
|
+
worktreeDir: resolved.worktreeDir,
|
|
121
|
+
baseBranch: resolved.baseBranch,
|
|
122
|
+
promptTemplate: typedParams.promptTemplate,
|
|
123
|
+
instructionsTemplate: typedParams.instructionsTemplate,
|
|
124
|
+
inlinePromptTemplate: resolved.promptTemplate || undefined,
|
|
125
|
+
clearDelay: resolved.clearDelay,
|
|
126
|
+
initDelay: resolved.initDelay,
|
|
127
|
+
ticketTimeout: resolved.ticketTimeout,
|
|
128
|
+
};
|
|
129
|
+
// Resolve agent type if 'auto'
|
|
130
|
+
if (ctx.daemonState === null) {
|
|
131
|
+
const resolvedAgent = await resolveAdapter(agent);
|
|
132
|
+
ctx.agentInstance = resolvedAgent;
|
|
133
|
+
// Create daemon state with persisted options
|
|
134
|
+
ctx.daemonState = createDaemonState({
|
|
135
|
+
sessionId: ctx.sessionId,
|
|
136
|
+
projectRoot: ctx.projectRoot,
|
|
137
|
+
tmuxSession: ctx.tmuxSession,
|
|
138
|
+
agent: resolvedAgent.name,
|
|
139
|
+
options: {
|
|
140
|
+
worktreeDir: options.worktreeDir,
|
|
141
|
+
baseBranch: options.baseBranch,
|
|
142
|
+
clearDelay: options.clearDelay,
|
|
143
|
+
initDelay: options.initDelay,
|
|
144
|
+
ticketTimeout: options.ticketTimeout,
|
|
145
|
+
},
|
|
146
|
+
});
|
|
147
|
+
// Create debounced saver
|
|
148
|
+
ctx.debouncedSaver = createDebouncedSaver(ctx.sessionId);
|
|
149
|
+
// Save initial state
|
|
150
|
+
ctx.debouncedSaver(ctx.daemonState);
|
|
151
|
+
}
|
|
152
|
+
// Deduplicate epic IDs (log a warning for duplicates)
|
|
153
|
+
const seen = new Set();
|
|
154
|
+
const deduplicatedEpics = [];
|
|
155
|
+
for (const epic of typedParams.epics) {
|
|
156
|
+
if (seen.has(epic)) {
|
|
157
|
+
logger.warn(`Duplicate epic ID in start request, ignoring: ${epic}`);
|
|
158
|
+
}
|
|
159
|
+
else {
|
|
160
|
+
seen.add(epic);
|
|
161
|
+
deduplicatedEpics.push(epic);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
// Ensure the tmux session exists (the daemon doesn't own the session
|
|
165
|
+
// lifecycle — it may have been created by a previous run or manually).
|
|
166
|
+
if (!(await sessionExists(ctx.tmuxSession))) {
|
|
167
|
+
await createSession(ctx.tmuxSession);
|
|
168
|
+
}
|
|
169
|
+
// Setup each epic
|
|
170
|
+
const results = [];
|
|
171
|
+
const setupOptions = {
|
|
172
|
+
agent: ctx.agentInstance ? ctx.agentInstance.name : "opencode",
|
|
173
|
+
epics: deduplicatedEpics,
|
|
174
|
+
worktreeDir: options.worktreeDir,
|
|
175
|
+
baseBranch: options.baseBranch,
|
|
176
|
+
clearDelay: options.clearDelay,
|
|
177
|
+
initDelay: options.initDelay,
|
|
178
|
+
tmuxSession: ctx.tmuxSession,
|
|
179
|
+
};
|
|
180
|
+
const sessionLogger = createLogger();
|
|
181
|
+
for (const epic of deduplicatedEpics) {
|
|
182
|
+
// Check if this epic is already tracked — re-attach instead of re-setup
|
|
183
|
+
if (ctx.daemonState && ctx.daemonState.epics[epic]) {
|
|
184
|
+
const existingState = ctx.daemonState.epics[epic];
|
|
185
|
+
// Map internal MachineState to external state
|
|
186
|
+
let externalState;
|
|
187
|
+
switch (existingState.state) {
|
|
188
|
+
case "SETUP":
|
|
189
|
+
case "RUNNING":
|
|
190
|
+
case "CLEARING":
|
|
191
|
+
externalState = "running";
|
|
192
|
+
break;
|
|
193
|
+
case "PAUSED":
|
|
194
|
+
externalState = "paused";
|
|
195
|
+
break;
|
|
196
|
+
case "DONE":
|
|
197
|
+
externalState = "done";
|
|
198
|
+
break;
|
|
199
|
+
default:
|
|
200
|
+
externalState = "running";
|
|
201
|
+
}
|
|
202
|
+
// Get progress from beads if available
|
|
203
|
+
let progressResult = { done: 0, total: 0 };
|
|
204
|
+
try {
|
|
205
|
+
progressResult = await beads.progress(epic, { cwd: ctx.projectRoot });
|
|
206
|
+
}
|
|
207
|
+
catch {
|
|
208
|
+
// Ignore — progress is best-effort
|
|
209
|
+
}
|
|
210
|
+
// For RUNNING/CLEARING states, verify the agent is still alive.
|
|
211
|
+
// If it has crashed since the last health check, update state to PAUSED
|
|
212
|
+
// so the caller doesn't get stale 'running' state.
|
|
213
|
+
if (existingState.state === "RUNNING" ||
|
|
214
|
+
existingState.state === "CLEARING" ||
|
|
215
|
+
existingState.state === "SETUP") {
|
|
216
|
+
try {
|
|
217
|
+
const adapter = ctx.agentInstance || (await resolveAdapter("auto"));
|
|
218
|
+
const agentOk = await adapter.verifyRunning(ctx.tmuxSession, existingState.windowName);
|
|
219
|
+
if (!agentOk) {
|
|
220
|
+
logger.warn(`Epic agent not running on re-attach, marking as PAUSED: ${epic}`);
|
|
221
|
+
// Update daemonState
|
|
222
|
+
const pauseReason = "agent not running (detected on re-attach)";
|
|
223
|
+
ctx.daemonState.epics[epic].state = "PAUSED";
|
|
224
|
+
ctx.daemonState.epics[epic].pauseReason = pauseReason;
|
|
225
|
+
ctx.debouncedSaver?.(ctx.daemonState);
|
|
226
|
+
// Transition the loop handle's state machine if available
|
|
227
|
+
const loopHandle = ctx.loopHandles.get(epic);
|
|
228
|
+
if (loopHandle) {
|
|
229
|
+
try {
|
|
230
|
+
await loopHandle.machine.transition("PAUSED", "MANUAL_PAUSE", { pauseReason });
|
|
231
|
+
}
|
|
232
|
+
catch (transitionErr) {
|
|
233
|
+
logger.warn(`Failed to transition state machine for epic ${epic} on re-attach`, {
|
|
234
|
+
error: transitionErr instanceof Error ? transitionErr.message : String(transitionErr),
|
|
235
|
+
});
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
externalState = "paused";
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
catch (verifyErr) {
|
|
242
|
+
// verifyRunning threw — log and continue without changing state
|
|
243
|
+
logger.warn(`verifyRunning threw during re-attach for epic ${epic}, skipping state update`, {
|
|
244
|
+
error: verifyErr instanceof Error ? verifyErr.message : String(verifyErr),
|
|
245
|
+
});
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
if (epicModels[epic]) {
|
|
249
|
+
logger.warn(`--epic-model for ${epic} ignored: epic already running (re-attached)`);
|
|
250
|
+
}
|
|
251
|
+
logger.info(`Epic already tracked, re-attaching: ${epic}`, { state: externalState });
|
|
252
|
+
results.push({
|
|
253
|
+
epic,
|
|
254
|
+
status: "ok",
|
|
255
|
+
reused: true,
|
|
256
|
+
state: externalState,
|
|
257
|
+
progress: progressResult,
|
|
258
|
+
worktreePath: existingState.worktreePath,
|
|
259
|
+
windowName: existingState.windowName,
|
|
260
|
+
});
|
|
261
|
+
continue;
|
|
262
|
+
}
|
|
263
|
+
try {
|
|
264
|
+
// Per-epic model resolution: epicModels[epic] > sessionModel > undefined
|
|
265
|
+
const epicModel = epicModels[epic] ?? sessionModel;
|
|
266
|
+
const epicSetupOptions = { ...setupOptions, model: epicModel };
|
|
267
|
+
// Call setupEpic
|
|
268
|
+
const setupResult = await setupEpic(ctx.agentInstance ? ctx.agentInstance : await resolveAdapter("auto"), epic, ctx.sessionId, ctx.tmuxSession, epicSetupOptions, sessionLogger);
|
|
269
|
+
if (setupResult.status === "ok") {
|
|
270
|
+
// Call startLoop to get LoopHandle
|
|
271
|
+
const windowName = `epic-${epic}`;
|
|
272
|
+
const loopHandle = startLoop({
|
|
273
|
+
epic,
|
|
274
|
+
tmuxSession: ctx.tmuxSession,
|
|
275
|
+
windowName,
|
|
276
|
+
adapter: ctx.agentInstance ? ctx.agentInstance : await resolveAdapter(agent),
|
|
277
|
+
logger: sessionLogger,
|
|
278
|
+
onStatusBarUpdate: makeStatusBarUpdater(ctx),
|
|
279
|
+
});
|
|
280
|
+
// Store LoopHandle in daemon memory
|
|
281
|
+
ctx.loopHandles.set(epic, loopHandle);
|
|
282
|
+
// Add epic to DaemonState.epics
|
|
283
|
+
if (ctx.daemonState) {
|
|
284
|
+
ctx.daemonState.epics[epic] = {
|
|
285
|
+
epic,
|
|
286
|
+
state: loopHandle.state,
|
|
287
|
+
pauseReason: loopHandle.pauseReason,
|
|
288
|
+
worktreePath: setupResult.worktreePath ? setupResult.worktreePath : "",
|
|
289
|
+
windowName,
|
|
290
|
+
currentTicket: null,
|
|
291
|
+
ticketStartedAt: null,
|
|
292
|
+
};
|
|
293
|
+
// Set currentTicket to the first ready ticket for this epic
|
|
294
|
+
const firstTicket = await beads.ready(epic, { cwd: ctx.projectRoot });
|
|
295
|
+
if (firstTicket) {
|
|
296
|
+
ctx.daemonState.epics[epic].currentTicket = firstTicket.id;
|
|
297
|
+
ctx.daemonState.epics[epic].ticketStartedAt = new Date().toISOString();
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
results.push({
|
|
301
|
+
epic,
|
|
302
|
+
status: "ok",
|
|
303
|
+
worktreePath: setupResult.worktreePath,
|
|
304
|
+
windowName,
|
|
305
|
+
});
|
|
306
|
+
}
|
|
307
|
+
else {
|
|
308
|
+
results.push({
|
|
309
|
+
epic,
|
|
310
|
+
status: "error",
|
|
311
|
+
error: setupResult.error,
|
|
312
|
+
});
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
catch (error) {
|
|
316
|
+
results.push({
|
|
317
|
+
epic,
|
|
318
|
+
status: "error",
|
|
319
|
+
error: error instanceof Error ? error.message : String(error),
|
|
320
|
+
});
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
// Save state via debounced saver
|
|
324
|
+
if (ctx.debouncedSaver) {
|
|
325
|
+
ctx.debouncedSaver(ctx.daemonState);
|
|
326
|
+
}
|
|
327
|
+
// Return StartResult
|
|
328
|
+
return {
|
|
329
|
+
sessionId: ctx.sessionId,
|
|
330
|
+
epics: results,
|
|
331
|
+
};
|
|
332
|
+
}
|
|
333
|
+
async function handleStatus(ctx) {
|
|
334
|
+
// Get daemon version from package.json
|
|
335
|
+
let daemonVersion;
|
|
336
|
+
try {
|
|
337
|
+
const pkg = await import("../../package.json", { with: { type: "json" } });
|
|
338
|
+
daemonVersion = pkg.default.version;
|
|
339
|
+
}
|
|
340
|
+
catch {
|
|
341
|
+
daemonVersion = "unknown";
|
|
342
|
+
}
|
|
343
|
+
// Protocol version is always '1.0' initially
|
|
344
|
+
const protocolVersion = "1.0";
|
|
345
|
+
// Calculate uptime in seconds
|
|
346
|
+
const uptime = Math.floor((Date.now() - ctx.daemonStartTime) / 1000);
|
|
347
|
+
// Initialize status result
|
|
348
|
+
const statusResult = {
|
|
349
|
+
daemonVersion,
|
|
350
|
+
protocolVersion,
|
|
351
|
+
uptime,
|
|
352
|
+
sessionId: ctx.sessionId,
|
|
353
|
+
projectRoot: ctx.projectRoot,
|
|
354
|
+
tmuxSession: ctx.tmuxSession,
|
|
355
|
+
epics: [],
|
|
356
|
+
};
|
|
357
|
+
// If daemon state is initialized, add epic information
|
|
358
|
+
if (ctx.daemonState) {
|
|
359
|
+
for (const [epicId, epicState] of Object.entries(ctx.daemonState.epics)) {
|
|
360
|
+
// Map internal MachineState to external state
|
|
361
|
+
let externalState;
|
|
362
|
+
switch (epicState.state) {
|
|
363
|
+
case "SETUP":
|
|
364
|
+
case "RUNNING":
|
|
365
|
+
case "CLEARING":
|
|
366
|
+
externalState = "running";
|
|
367
|
+
break;
|
|
368
|
+
case "PAUSED":
|
|
369
|
+
externalState = "paused";
|
|
370
|
+
break;
|
|
371
|
+
case "DONE":
|
|
372
|
+
externalState = "done";
|
|
373
|
+
break;
|
|
374
|
+
default:
|
|
375
|
+
externalState = "running"; // Default fallback
|
|
376
|
+
}
|
|
377
|
+
// Get progress from beads
|
|
378
|
+
let progressResult;
|
|
379
|
+
try {
|
|
380
|
+
progressResult = await beads.progress(epicId, { cwd: ctx.projectRoot });
|
|
381
|
+
}
|
|
382
|
+
catch {
|
|
383
|
+
// If beads fails, return { done: 0, total: 0 }
|
|
384
|
+
progressResult = { done: 0, total: 0 };
|
|
385
|
+
}
|
|
386
|
+
statusResult.epics.push({
|
|
387
|
+
epic: epicId,
|
|
388
|
+
state: externalState,
|
|
389
|
+
progress: progressResult,
|
|
390
|
+
pauseReason: epicState.pauseReason,
|
|
391
|
+
});
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
return statusResult;
|
|
395
|
+
}
|
|
396
|
+
async function handlePause(ctx, params) {
|
|
397
|
+
const typedParams = params;
|
|
398
|
+
// Validate epic exists
|
|
399
|
+
if (!typedParams.epic) {
|
|
400
|
+
throw new Error("epic parameter is required");
|
|
401
|
+
}
|
|
402
|
+
const loopHandle = ctx.loopHandles.get(typedParams.epic);
|
|
403
|
+
if (!loopHandle) {
|
|
404
|
+
throw new Error("EPIC_NOT_FOUND");
|
|
405
|
+
}
|
|
406
|
+
// Check state is RUNNING or CLEARING, else INVALID_STATE
|
|
407
|
+
const previousState = loopHandle.state;
|
|
408
|
+
if (!["RUNNING", "CLEARING"].includes(previousState)) {
|
|
409
|
+
throw new Error("INVALID_STATE");
|
|
410
|
+
}
|
|
411
|
+
// Call machine.transition
|
|
412
|
+
await loopHandle.machine.transition("PAUSED", "MANUAL_PAUSE", { pauseReason: "MANUAL_PAUSE" });
|
|
413
|
+
// Save state via debounced saver
|
|
414
|
+
if (ctx.debouncedSaver && ctx.daemonState) {
|
|
415
|
+
ctx.daemonState.epics[typedParams.epic].state = loopHandle.state;
|
|
416
|
+
ctx.daemonState.epics[typedParams.epic].pauseReason = loopHandle.pauseReason;
|
|
417
|
+
ctx.debouncedSaver(ctx.daemonState);
|
|
418
|
+
}
|
|
419
|
+
// Return result
|
|
420
|
+
return {
|
|
421
|
+
epic: typedParams.epic,
|
|
422
|
+
previousState,
|
|
423
|
+
newState: "paused",
|
|
424
|
+
};
|
|
425
|
+
}
|
|
426
|
+
async function handleResume(ctx, params, clearAndReprompt) {
|
|
427
|
+
const typedParams = params;
|
|
428
|
+
// Validate epic exists
|
|
429
|
+
if (!typedParams.epic) {
|
|
430
|
+
throw new Error("epic parameter is required");
|
|
431
|
+
}
|
|
432
|
+
const loopHandle = ctx.loopHandles.get(typedParams.epic);
|
|
433
|
+
if (!loopHandle) {
|
|
434
|
+
throw new Error("EPIC_NOT_FOUND");
|
|
435
|
+
}
|
|
436
|
+
// Check state is PAUSED, else INVALID_STATE
|
|
437
|
+
const previousState = loopHandle.state;
|
|
438
|
+
if (previousState !== "PAUSED") {
|
|
439
|
+
throw new Error("INVALID_STATE");
|
|
440
|
+
}
|
|
441
|
+
let warning;
|
|
442
|
+
// Handle message vs no message
|
|
443
|
+
if (typedParams.message && typedParams.message.length > 0) {
|
|
444
|
+
// Resume with message: transition to RUNNING, clear stale context, then send message
|
|
445
|
+
await loopHandle.machine.transition("RUNNING", "RESUME_WITH_MESSAGE");
|
|
446
|
+
const epicState = ctx.daemonState?.epics[typedParams.epic];
|
|
447
|
+
const windowName = epicState?.windowName || `epic-${typedParams.epic}`;
|
|
448
|
+
const tmuxSess = ctx.daemonState?.tmuxSession || "pit";
|
|
449
|
+
const adapter = ctx.agentInstance || (await resolveAdapter("auto"));
|
|
450
|
+
// Clear stale conversation context before injecting the message
|
|
451
|
+
try {
|
|
452
|
+
const clearDelay = ctx.daemonState?.options.clearDelay ?? DEFAULT_CONFIG.clearDelay;
|
|
453
|
+
await sendClearCommand(tmuxSess, windowName, adapter.clearCommand);
|
|
454
|
+
await adapter.waitForReady({ timeoutMs: clearDelay });
|
|
455
|
+
// Small delay to prevent tmux race condition
|
|
456
|
+
await new Promise((resolve) => setTimeout(resolve, 200));
|
|
457
|
+
}
|
|
458
|
+
catch (err) {
|
|
459
|
+
logger.warn(`Failed to clear context before sending resume message for epic ${typedParams.epic}`, {
|
|
460
|
+
error: err instanceof Error ? err.message : String(err),
|
|
461
|
+
});
|
|
462
|
+
// Continue anyway — still send the message even if clear failed
|
|
463
|
+
}
|
|
464
|
+
// Send message to TUI
|
|
465
|
+
try {
|
|
466
|
+
await sendPrompt(tmuxSess, windowName, typedParams.message);
|
|
467
|
+
}
|
|
468
|
+
catch (err) {
|
|
469
|
+
logger.error(`Failed to send resume message to epic ${typedParams.epic}`, {
|
|
470
|
+
error: err instanceof Error ? err.message : String(err),
|
|
471
|
+
});
|
|
472
|
+
}
|
|
473
|
+
// Reset ticket timer — human intervened, agent gets a fresh window
|
|
474
|
+
if (ctx.daemonState?.epics[typedParams.epic]) {
|
|
475
|
+
ctx.daemonState.epics[typedParams.epic].ticketStartedAt = new Date().toISOString();
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
else {
|
|
479
|
+
// Resume without message: handle NEEDS_HUMAN_INPUT case
|
|
480
|
+
if (loopHandle.pauseReason?.includes("NEEDS_HUMAN_INPUT")) {
|
|
481
|
+
warning = "Resuming NEEDS_HUMAN_INPUT without message";
|
|
482
|
+
}
|
|
483
|
+
const epicState = ctx.daemonState?.epics[typedParams.epic];
|
|
484
|
+
if (!epicState) {
|
|
485
|
+
throw new Error("EPIC_NOT_FOUND");
|
|
486
|
+
}
|
|
487
|
+
const adapter = ctx.agentInstance || (await resolveAdapter("auto"));
|
|
488
|
+
// Run CLEARING flow via shared helper
|
|
489
|
+
try {
|
|
490
|
+
const { nextTicket } = await clearAndReprompt({
|
|
491
|
+
epicId: typedParams.epic,
|
|
492
|
+
machine: loopHandle.machine,
|
|
493
|
+
windowName: epicState.windowName,
|
|
494
|
+
adapter,
|
|
495
|
+
transitionReason: "RESUME_NO_MESSAGE",
|
|
496
|
+
epicState,
|
|
497
|
+
});
|
|
498
|
+
// Update currentTicket to the next ready ticket
|
|
499
|
+
if (ctx.daemonState) {
|
|
500
|
+
ctx.daemonState.epics[typedParams.epic].currentTicket = nextTicket?.id ?? null;
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
catch (err) {
|
|
504
|
+
logger.error(`Error during CLEARING flow for epic ${typedParams.epic}:`, { err });
|
|
505
|
+
throw err;
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
// Save state via debounced saver
|
|
509
|
+
if (ctx.debouncedSaver && ctx.daemonState) {
|
|
510
|
+
ctx.daemonState.epics[typedParams.epic].state = loopHandle.state;
|
|
511
|
+
ctx.daemonState.epics[typedParams.epic].pauseReason = loopHandle.pauseReason;
|
|
512
|
+
ctx.debouncedSaver(ctx.daemonState);
|
|
513
|
+
}
|
|
514
|
+
// Return result
|
|
515
|
+
const result = {
|
|
516
|
+
epic: typedParams.epic,
|
|
517
|
+
previousState: "paused",
|
|
518
|
+
newState: "running",
|
|
519
|
+
};
|
|
520
|
+
if (warning) {
|
|
521
|
+
result.warning = warning;
|
|
522
|
+
}
|
|
523
|
+
return result;
|
|
524
|
+
}
|
|
525
|
+
async function handleAgentIdle(ctx, params, clearAndReprompt) {
|
|
526
|
+
const typedParams = params;
|
|
527
|
+
// Validate epicId is provided
|
|
528
|
+
if (!typedParams.epicId) {
|
|
529
|
+
throw new Error("INVALID_PARAMS");
|
|
530
|
+
}
|
|
531
|
+
// Validate epic exists in loopHandles
|
|
532
|
+
const loopHandle = ctx.loopHandles.get(typedParams.epicId);
|
|
533
|
+
if (!loopHandle) {
|
|
534
|
+
throw new Error("EPIC_NOT_FOUND");
|
|
535
|
+
}
|
|
536
|
+
// Only process idle events when RUNNING
|
|
537
|
+
if (loopHandle.state !== "RUNNING") {
|
|
538
|
+
logger.debug(`[${typedParams.epicId}] agent-idle ignored, state=${loopHandle.state}`);
|
|
539
|
+
return { ok: true, action: "ignored" };
|
|
540
|
+
}
|
|
541
|
+
const epicState = ctx.daemonState?.epics[typedParams.epicId];
|
|
542
|
+
if (!epicState) {
|
|
543
|
+
throw new Error("EPIC_NOT_FOUND");
|
|
544
|
+
}
|
|
545
|
+
const currentTicket = epicState.currentTicket;
|
|
546
|
+
const adapter = ctx.agentInstance || (await resolveAdapter("auto"));
|
|
547
|
+
if (!currentTicket) {
|
|
548
|
+
// No tracked ticket — unexpected idle, pause
|
|
549
|
+
logger.warn(`[${typedParams.epicId}] agent-idle with no tracked ticket, pausing`);
|
|
550
|
+
await loopHandle.machine.transition("PAUSED", "UNRECOGNIZED_IDLE", {
|
|
551
|
+
pauseReason: "agent idle, no tracked ticket",
|
|
552
|
+
});
|
|
553
|
+
await ringControlBell(ctx, epicState.windowName);
|
|
554
|
+
if (ctx.daemonState) {
|
|
555
|
+
ctx.daemonState.epics[typedParams.epicId].state = loopHandle.state;
|
|
556
|
+
ctx.daemonState.epics[typedParams.epicId].pauseReason = loopHandle.pauseReason;
|
|
557
|
+
ctx.debouncedSaver?.(ctx.daemonState);
|
|
558
|
+
}
|
|
559
|
+
return { ok: true, action: "paused" };
|
|
560
|
+
}
|
|
561
|
+
// Check if current ticket was closed
|
|
562
|
+
let ticket;
|
|
563
|
+
try {
|
|
564
|
+
ticket = await beads.show(currentTicket, { cwd: ctx.projectRoot });
|
|
565
|
+
}
|
|
566
|
+
catch (err) {
|
|
567
|
+
// beads.show() failed — pause for safety
|
|
568
|
+
const reason = `beads.show() failed: ${String(err)}`;
|
|
569
|
+
logger.error(`[${typedParams.epicId}] ${reason}`);
|
|
570
|
+
await loopHandle.machine.transition("PAUSED", "UNRECOGNIZED_IDLE", { pauseReason: reason });
|
|
571
|
+
await ringControlBell(ctx, epicState.windowName);
|
|
572
|
+
if (ctx.daemonState) {
|
|
573
|
+
ctx.daemonState.epics[typedParams.epicId].state = loopHandle.state;
|
|
574
|
+
ctx.daemonState.epics[typedParams.epicId].pauseReason = loopHandle.pauseReason;
|
|
575
|
+
ctx.debouncedSaver?.(ctx.daemonState);
|
|
576
|
+
}
|
|
577
|
+
return { ok: true, action: "paused" };
|
|
578
|
+
}
|
|
579
|
+
if (ticket && ticket.status === "closed") {
|
|
580
|
+
// Ticket complete — clear and re-prompt (or epic complete if no more work)
|
|
581
|
+
try {
|
|
582
|
+
const { nextTicket } = await clearAndReprompt({
|
|
583
|
+
epicId: typedParams.epicId,
|
|
584
|
+
machine: loopHandle.machine,
|
|
585
|
+
windowName: epicState.windowName,
|
|
586
|
+
adapter,
|
|
587
|
+
transitionReason: "TICKET_COMPLETE",
|
|
588
|
+
epicState,
|
|
589
|
+
});
|
|
590
|
+
// Update currentTicket in daemon state
|
|
591
|
+
if (ctx.daemonState) {
|
|
592
|
+
ctx.daemonState.epics[typedParams.epicId].state = loopHandle.state;
|
|
593
|
+
ctx.daemonState.epics[typedParams.epicId].pauseReason = loopHandle.pauseReason;
|
|
594
|
+
ctx.daemonState.epics[typedParams.epicId].currentTicket = nextTicket?.id ?? null;
|
|
595
|
+
ctx.debouncedSaver?.(ctx.daemonState);
|
|
596
|
+
}
|
|
597
|
+
return {
|
|
598
|
+
ok: true,
|
|
599
|
+
action: nextTicket ? "continue" : "done",
|
|
600
|
+
};
|
|
601
|
+
}
|
|
602
|
+
catch (err) {
|
|
603
|
+
// CLEARING failed — transition to PAUSED
|
|
604
|
+
const reason = `CLEARING failed: ${String(err)}`;
|
|
605
|
+
logger.error(`[${typedParams.epicId}] ${reason}`);
|
|
606
|
+
// State machine may be in CLEARING — transition to PAUSED
|
|
607
|
+
try {
|
|
608
|
+
await loopHandle.machine.transition("PAUSED", "UNRECOGNIZED_IDLE", {
|
|
609
|
+
pauseReason: reason,
|
|
610
|
+
});
|
|
611
|
+
}
|
|
612
|
+
catch {
|
|
613
|
+
// Transition may have already happened — ignore
|
|
614
|
+
}
|
|
615
|
+
await ringControlBell(ctx, epicState.windowName);
|
|
616
|
+
if (ctx.daemonState) {
|
|
617
|
+
ctx.daemonState.epics[typedParams.epicId].state = loopHandle.state;
|
|
618
|
+
ctx.daemonState.epics[typedParams.epicId].pauseReason = loopHandle.pauseReason;
|
|
619
|
+
ctx.debouncedSaver?.(ctx.daemonState);
|
|
620
|
+
}
|
|
621
|
+
return { ok: true, action: "paused" };
|
|
622
|
+
}
|
|
623
|
+
}
|
|
624
|
+
else {
|
|
625
|
+
// Ticket not closed — agent went idle without completing work
|
|
626
|
+
const reason = ticket
|
|
627
|
+
? `agent idle, ticket not closed (status: ${ticket.status})`
|
|
628
|
+
: "agent idle, ticket not found";
|
|
629
|
+
logger.warn(`[${typedParams.epicId}] ${reason}`);
|
|
630
|
+
await loopHandle.machine.transition("PAUSED", "UNRECOGNIZED_IDLE", {
|
|
631
|
+
pauseReason: reason,
|
|
632
|
+
});
|
|
633
|
+
await ringControlBell(ctx, epicState.windowName);
|
|
634
|
+
if (ctx.daemonState) {
|
|
635
|
+
ctx.daemonState.epics[typedParams.epicId].state = loopHandle.state;
|
|
636
|
+
ctx.daemonState.epics[typedParams.epicId].pauseReason = loopHandle.pauseReason;
|
|
637
|
+
ctx.debouncedSaver?.(ctx.daemonState);
|
|
638
|
+
}
|
|
639
|
+
return { ok: true, action: "paused" };
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
async function handleAgentPermission(ctx, params) {
|
|
643
|
+
const typedParams = params;
|
|
644
|
+
// Validate epicId is provided
|
|
645
|
+
if (!typedParams.epicId) {
|
|
646
|
+
throw new Error("INVALID_PARAMS");
|
|
647
|
+
}
|
|
648
|
+
// Validate epic exists in loopHandles
|
|
649
|
+
const loopHandle = ctx.loopHandles.get(typedParams.epicId);
|
|
650
|
+
if (!loopHandle) {
|
|
651
|
+
throw new Error("EPIC_NOT_FOUND");
|
|
652
|
+
}
|
|
653
|
+
// Log receipt
|
|
654
|
+
logger.info(`[${typedParams.epicId}] agent-permission received`);
|
|
655
|
+
// Return success
|
|
656
|
+
return {
|
|
657
|
+
ok: true,
|
|
658
|
+
};
|
|
659
|
+
}
|
|
660
|
+
async function handleLog(ctx, params) {
|
|
661
|
+
const typedParams = params;
|
|
662
|
+
// Validate epic exists
|
|
663
|
+
if (!typedParams.epic) {
|
|
664
|
+
throw new Error("epic parameter is required");
|
|
665
|
+
}
|
|
666
|
+
// Validate epic exists in daemon state
|
|
667
|
+
const epicState = ctx.daemonState?.epics[typedParams.epic];
|
|
668
|
+
if (!epicState) {
|
|
669
|
+
throw new Error("EPIC_NOT_FOUND");
|
|
670
|
+
}
|
|
671
|
+
try {
|
|
672
|
+
// Delegate to existing tmux.capturePane()
|
|
673
|
+
const output = await capturePane(ctx.daemonState?.tmuxSession || "pit", epicState.windowName, {
|
|
674
|
+
lines: typedParams.lines ?? 50,
|
|
675
|
+
stripAnsi: !typedParams.raw,
|
|
676
|
+
});
|
|
677
|
+
return {
|
|
678
|
+
output,
|
|
679
|
+
epic: typedParams.epic,
|
|
680
|
+
lines: output.split("\n").length,
|
|
681
|
+
};
|
|
682
|
+
}
|
|
683
|
+
catch (error) {
|
|
684
|
+
// If tmux error, wrap it as internal error
|
|
685
|
+
if (error instanceof Error) {
|
|
686
|
+
const captureError = new Error(`Failed to capture output: ${error.message}`);
|
|
687
|
+
captureError.cause = error;
|
|
688
|
+
throw captureError;
|
|
689
|
+
}
|
|
690
|
+
const captureError = new Error("Failed to capture output: Unknown error");
|
|
691
|
+
captureError.cause = error;
|
|
692
|
+
throw captureError;
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
async function handleStop(ctx, params) {
|
|
696
|
+
const typedParams = params;
|
|
697
|
+
const stopped = [];
|
|
698
|
+
const epicsToStop = typedParams.epic ? [typedParams.epic] : Array.from(ctx.loopHandles.keys());
|
|
699
|
+
for (const epicId of epicsToStop) {
|
|
700
|
+
const loopHandle = ctx.loopHandles.get(epicId);
|
|
701
|
+
if (!loopHandle)
|
|
702
|
+
continue;
|
|
703
|
+
// Stop the loop (transitions to DONE)
|
|
704
|
+
loopHandle.stop();
|
|
705
|
+
stopped.push(epicId);
|
|
706
|
+
}
|
|
707
|
+
// Persist updated state
|
|
708
|
+
if (ctx.debouncedSaver && ctx.daemonState) {
|
|
709
|
+
for (const epicId of stopped) {
|
|
710
|
+
const loopHandle = ctx.loopHandles.get(epicId);
|
|
711
|
+
if (loopHandle && ctx.daemonState.epics[epicId]) {
|
|
712
|
+
ctx.daemonState.epics[epicId].state = loopHandle.state;
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
ctx.debouncedSaver(ctx.daemonState);
|
|
716
|
+
}
|
|
717
|
+
return { success: true, stopped };
|
|
718
|
+
}
|
|
719
|
+
async function handleTeardown(ctx, params) {
|
|
720
|
+
const typedParams = params;
|
|
721
|
+
const { epic: singleEpic, keepWorktrees = false, force = false } = typedParams;
|
|
722
|
+
const cleaned = {
|
|
723
|
+
windows: [],
|
|
724
|
+
worktrees: [],
|
|
725
|
+
dirtyWorktrees: [],
|
|
726
|
+
sessionDir: "",
|
|
727
|
+
};
|
|
728
|
+
// Determine which epics to tear down
|
|
729
|
+
const epicsToTeardown = singleEpic
|
|
730
|
+
? ctx.loopHandles.has(singleEpic)
|
|
731
|
+
? [singleEpic]
|
|
732
|
+
: []
|
|
733
|
+
: Array.from(ctx.loopHandles.keys());
|
|
734
|
+
if (singleEpic && !ctx.loopHandles.has(singleEpic)) {
|
|
735
|
+
throw new Error("EPIC_NOT_FOUND");
|
|
736
|
+
}
|
|
737
|
+
// Safety check: for full teardown, refuse if any epic is running unless --force
|
|
738
|
+
if (!singleEpic && !force && ctx.daemonState) {
|
|
739
|
+
const runningEpics = [];
|
|
740
|
+
for (const [epicId, epicState] of Object.entries(ctx.daemonState.epics)) {
|
|
741
|
+
if (epicState.state === "RUNNING" ||
|
|
742
|
+
epicState.state === "SETUP" ||
|
|
743
|
+
epicState.state === "CLEARING") {
|
|
744
|
+
runningEpics.push(epicId);
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
if (runningEpics.length > 0) {
|
|
748
|
+
throw new Error(`Cannot teardown: epics still running: ${runningEpics.join(", ")}. Use --force to override.`);
|
|
749
|
+
}
|
|
750
|
+
}
|
|
751
|
+
for (const epicId of epicsToTeardown) {
|
|
752
|
+
const loopHandle = ctx.loopHandles.get(epicId);
|
|
753
|
+
if (!loopHandle)
|
|
754
|
+
continue;
|
|
755
|
+
// Stop the loop (transitions to DONE)
|
|
756
|
+
loopHandle.stop();
|
|
757
|
+
// Kill tmux window
|
|
758
|
+
try {
|
|
759
|
+
const epicState = ctx.daemonState?.epics[epicId];
|
|
760
|
+
if (epicState) {
|
|
761
|
+
const tmuxSess = ctx.daemonState?.tmuxSession || "pit";
|
|
762
|
+
// For single-epic teardown: if this is the last window, create a
|
|
763
|
+
// placeholder window first so the session is not auto-destroyed by tmux.
|
|
764
|
+
if (singleEpic) {
|
|
765
|
+
const windows = await getWindowList(tmuxSess);
|
|
766
|
+
if (windows.length === 1 && windows[0] === epicState.windowName) {
|
|
767
|
+
await createWindow(tmuxSess, "pit", ctx.projectRoot);
|
|
768
|
+
}
|
|
769
|
+
}
|
|
770
|
+
await killWindow(tmuxSess, epicState.windowName);
|
|
771
|
+
cleaned.windows.push(epicState.windowName);
|
|
772
|
+
}
|
|
773
|
+
}
|
|
774
|
+
catch (err) {
|
|
775
|
+
// Window may already be gone - log but continue
|
|
776
|
+
logger.warn(`Failed to kill window for epic ${epicId}`, {
|
|
777
|
+
error: err instanceof Error ? err.message : String(err),
|
|
778
|
+
});
|
|
779
|
+
}
|
|
780
|
+
// Remove signal file subdirectory for this epic
|
|
781
|
+
const epicSignalDir = `/tmp/pit/${ctx.sessionId}/${epicId}`;
|
|
782
|
+
try {
|
|
783
|
+
await cleanupDir(epicSignalDir);
|
|
784
|
+
}
|
|
785
|
+
catch (err) {
|
|
786
|
+
logger.warn(`Failed to remove signal dir for epic ${epicId}`, {
|
|
787
|
+
error: err instanceof Error ? err.message : String(err),
|
|
788
|
+
});
|
|
789
|
+
}
|
|
790
|
+
// Remove worktree by default (unless --keep-worktrees)
|
|
791
|
+
if (!keepWorktrees && ctx.daemonState?.epics[epicId]) {
|
|
792
|
+
const epicState = ctx.daemonState.epics[epicId];
|
|
793
|
+
if (epicState.worktreePath && epicState.worktreePath.length > 0) {
|
|
794
|
+
// Check for uncommitted changes before removal
|
|
795
|
+
try {
|
|
796
|
+
const dirty = await isWorktreeDirty(epicState.worktreePath);
|
|
797
|
+
if (dirty) {
|
|
798
|
+
cleaned.dirtyWorktrees.push(epicState.worktreePath);
|
|
799
|
+
}
|
|
800
|
+
}
|
|
801
|
+
catch (err) {
|
|
802
|
+
logger.warn(`Failed to check worktree dirty state for epic ${epicId}`, {
|
|
803
|
+
error: err instanceof Error ? err.message : String(err),
|
|
804
|
+
});
|
|
805
|
+
}
|
|
806
|
+
try {
|
|
807
|
+
// Extract epic name from worktree path for removeWorktree call
|
|
808
|
+
const pathParts = epicState.worktreePath.split("/");
|
|
809
|
+
const epicName = pathParts[pathParts.length - 1];
|
|
810
|
+
const worktreeDir = pathParts.slice(0, -1).join("/");
|
|
811
|
+
await removeWorktree(worktreeDir, epicName);
|
|
812
|
+
cleaned.worktrees.push(epicState.worktreePath);
|
|
813
|
+
}
|
|
814
|
+
catch (err) {
|
|
815
|
+
logger.warn(`Failed to remove worktree for epic ${epicId}`, {
|
|
816
|
+
error: err instanceof Error ? err.message : String(err),
|
|
817
|
+
});
|
|
818
|
+
}
|
|
819
|
+
}
|
|
820
|
+
}
|
|
821
|
+
// Remove epic from loopHandles and daemon state
|
|
822
|
+
ctx.loopHandles.delete(epicId);
|
|
823
|
+
if (ctx.daemonState) {
|
|
824
|
+
const { [epicId]: _removed, ...rest } = ctx.daemonState.epics;
|
|
825
|
+
ctx.daemonState.epics = rest;
|
|
826
|
+
}
|
|
827
|
+
}
|
|
828
|
+
// For full teardown (no singleEpic), clean up the session-level signal directory
|
|
829
|
+
if (!singleEpic) {
|
|
830
|
+
const sessionDir = `/tmp/pit/${ctx.sessionId}`;
|
|
831
|
+
cleaned.sessionDir = sessionDir;
|
|
832
|
+
try {
|
|
833
|
+
await cleanupDir(sessionDir);
|
|
834
|
+
}
|
|
835
|
+
catch (err) {
|
|
836
|
+
logger.warn(`Failed to remove session dir`, {
|
|
837
|
+
error: err instanceof Error ? err.message : String(err),
|
|
838
|
+
});
|
|
839
|
+
}
|
|
840
|
+
}
|
|
841
|
+
// Persist updated state
|
|
842
|
+
if (ctx.debouncedSaver && ctx.daemonState) {
|
|
843
|
+
ctx.debouncedSaver(ctx.daemonState);
|
|
844
|
+
}
|
|
845
|
+
return { success: true, cleaned };
|
|
846
|
+
}
|
|
847
|
+
async function handleAdd(ctx, params) {
|
|
848
|
+
const typedParams = params;
|
|
849
|
+
// Validate epic parameter
|
|
850
|
+
if (!typedParams.epic) {
|
|
851
|
+
throw new Error("epic parameter is required");
|
|
852
|
+
}
|
|
853
|
+
// Session must be active (at least one epic was started)
|
|
854
|
+
if (!ctx.daemonState || Object.keys(ctx.daemonState.epics).length === 0) {
|
|
855
|
+
throw new Error("No active session. Use pit start to begin a session.");
|
|
856
|
+
}
|
|
857
|
+
// Epic must not already exist
|
|
858
|
+
if (ctx.daemonState.epics[typedParams.epic]) {
|
|
859
|
+
throw new Error(`Epic already running: ${typedParams.epic}`);
|
|
860
|
+
}
|
|
861
|
+
// Resolve agent type (inherit from session default if omitted)
|
|
862
|
+
const resolvedAgent = typedParams.agent ?? ctx.daemonState.agent;
|
|
863
|
+
const adapter = ctx.agentInstance ? ctx.agentInstance : await resolveAdapter(resolvedAgent);
|
|
864
|
+
// Read model from AddParams — do NOT inherit from session
|
|
865
|
+
const epicModel = typedParams.model ?? undefined;
|
|
866
|
+
// Create setup options based on daemon state (forward clearDelay/initDelay from config)
|
|
867
|
+
const setupOptions = {
|
|
868
|
+
agent: resolvedAgent,
|
|
869
|
+
epics: [typedParams.epic],
|
|
870
|
+
worktreeDir: ctx.daemonState.options.worktreeDir,
|
|
871
|
+
baseBranch: ctx.daemonState.options.baseBranch,
|
|
872
|
+
tmuxSession: ctx.daemonState.tmuxSession,
|
|
873
|
+
promptTemplate: ctx.daemonState.options.promptTemplate,
|
|
874
|
+
instructionsTemplate: ctx.daemonState.options.instructionsTemplate,
|
|
875
|
+
inlinePromptTemplate: ctx.daemonState.options.inlinePromptTemplate,
|
|
876
|
+
clearDelay: ctx.daemonState.options.clearDelay,
|
|
877
|
+
initDelay: ctx.daemonState.options.initDelay,
|
|
878
|
+
model: epicModel,
|
|
879
|
+
};
|
|
880
|
+
const sessionLogger = createLogger();
|
|
881
|
+
try {
|
|
882
|
+
// Call setupEpic to set up the new epic
|
|
883
|
+
const setupResult = await setupEpic(adapter, typedParams.epic, ctx.daemonState.sessionId, ctx.daemonState.tmuxSession, setupOptions, sessionLogger);
|
|
884
|
+
if (setupResult.status !== "ok" || !setupResult.worktreePath) {
|
|
885
|
+
throw new Error(setupResult.error || "Failed to setup epic");
|
|
886
|
+
}
|
|
887
|
+
// Start loop for new epic
|
|
888
|
+
const windowName = `epic-${typedParams.epic}`;
|
|
889
|
+
const loopHandle = startLoop({
|
|
890
|
+
epic: typedParams.epic,
|
|
891
|
+
tmuxSession: ctx.daemonState.tmuxSession,
|
|
892
|
+
windowName,
|
|
893
|
+
adapter,
|
|
894
|
+
logger: sessionLogger,
|
|
895
|
+
onStatusBarUpdate: makeStatusBarUpdater(ctx),
|
|
896
|
+
});
|
|
897
|
+
// Store LoopHandle in daemon memory
|
|
898
|
+
ctx.loopHandles.set(typedParams.epic, loopHandle);
|
|
899
|
+
// Add epic to DaemonState.epics
|
|
900
|
+
ctx.daemonState.epics[typedParams.epic] = {
|
|
901
|
+
epic: typedParams.epic,
|
|
902
|
+
state: loopHandle.state,
|
|
903
|
+
pauseReason: loopHandle.pauseReason,
|
|
904
|
+
worktreePath: setupResult.worktreePath,
|
|
905
|
+
windowName,
|
|
906
|
+
currentTicket: null,
|
|
907
|
+
ticketStartedAt: null,
|
|
908
|
+
};
|
|
909
|
+
// Set currentTicket to the first ready ticket for this epic
|
|
910
|
+
const firstTicket = await beads.ready(typedParams.epic, { cwd: ctx.projectRoot });
|
|
911
|
+
if (firstTicket) {
|
|
912
|
+
ctx.daemonState.epics[typedParams.epic].currentTicket = firstTicket.id;
|
|
913
|
+
ctx.daemonState.epics[typedParams.epic].ticketStartedAt = new Date().toISOString();
|
|
914
|
+
}
|
|
915
|
+
// Save state via debounced saver
|
|
916
|
+
if (ctx.debouncedSaver) {
|
|
917
|
+
ctx.debouncedSaver(ctx.daemonState);
|
|
918
|
+
}
|
|
919
|
+
return {
|
|
920
|
+
success: true,
|
|
921
|
+
epic: typedParams.epic,
|
|
922
|
+
windowName,
|
|
923
|
+
worktreePath: setupResult.worktreePath,
|
|
924
|
+
agent: resolvedAgent,
|
|
925
|
+
};
|
|
926
|
+
}
|
|
927
|
+
catch (error) {
|
|
928
|
+
const err = new Error(`Failed to add epic ${typedParams.epic}: ${error instanceof Error ? error.message : String(error)}`);
|
|
929
|
+
err.cause = error;
|
|
930
|
+
throw err;
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
// ---------------------------------------------------------------------------
|
|
934
|
+
// Health handler
|
|
935
|
+
// ---------------------------------------------------------------------------
|
|
936
|
+
/**
|
|
937
|
+
* Handle a "health" RPC request.
|
|
938
|
+
*
|
|
939
|
+
* Checks the tmux session and each epic's window/agent liveness. Returns a
|
|
940
|
+
* structured HealthResponse. Does NOT auto-pause degraded epics — that is
|
|
941
|
+
* the job of the periodic health check (startHealthCheck).
|
|
942
|
+
*/
|
|
943
|
+
export async function handleHealth(ctx) {
|
|
944
|
+
const checks = {
|
|
945
|
+
tmuxSession: false,
|
|
946
|
+
epicsHealthy: 0,
|
|
947
|
+
epicsDegraded: 0,
|
|
948
|
+
};
|
|
949
|
+
const degradedReasons = [];
|
|
950
|
+
checks.tmuxSession = await sessionExists(ctx.tmuxSession);
|
|
951
|
+
if (!checks.tmuxSession) {
|
|
952
|
+
degradedReasons.push("tmux session not found");
|
|
953
|
+
}
|
|
954
|
+
if (ctx.daemonState) {
|
|
955
|
+
for (const [epicId, epicState] of Object.entries(ctx.daemonState.epics)) {
|
|
956
|
+
const windowOk = await windowExists(ctx.tmuxSession, epicState.windowName);
|
|
957
|
+
if (!windowOk) {
|
|
958
|
+
checks.epicsDegraded++;
|
|
959
|
+
degradedReasons.push(`tmux window missing for ${epicId}`);
|
|
960
|
+
continue;
|
|
961
|
+
}
|
|
962
|
+
if (epicState.state === "RUNNING") {
|
|
963
|
+
const adapter = ctx.agentInstance || (await resolveAdapter("auto"));
|
|
964
|
+
const agentOk = await adapter.verifyRunning(ctx.tmuxSession, epicState.windowName);
|
|
965
|
+
if (!agentOk) {
|
|
966
|
+
checks.epicsDegraded++;
|
|
967
|
+
degradedReasons.push(`agent not running for ${epicId}`);
|
|
968
|
+
continue;
|
|
969
|
+
}
|
|
970
|
+
}
|
|
971
|
+
checks.epicsHealthy++;
|
|
972
|
+
}
|
|
973
|
+
}
|
|
974
|
+
let status;
|
|
975
|
+
if (!checks.tmuxSession) {
|
|
976
|
+
status = "unhealthy";
|
|
977
|
+
}
|
|
978
|
+
else if (degradedReasons.length > 0) {
|
|
979
|
+
status = "degraded";
|
|
980
|
+
}
|
|
981
|
+
else {
|
|
982
|
+
status = "healthy";
|
|
983
|
+
}
|
|
984
|
+
return {
|
|
985
|
+
success: true,
|
|
986
|
+
status,
|
|
987
|
+
uptime: Math.floor((Date.now() - ctx.daemonStartTime) / 1000),
|
|
988
|
+
memoryMB: Math.round(process.memoryUsage().heapUsed / 1024 / 1024),
|
|
989
|
+
checks,
|
|
990
|
+
degradedReasons,
|
|
991
|
+
};
|
|
992
|
+
}
|
|
993
|
+
// ---------------------------------------------------------------------------
|
|
994
|
+
// Shutdown handler
|
|
995
|
+
// ---------------------------------------------------------------------------
|
|
996
|
+
/**
|
|
997
|
+
* Handle a "shutdown" RPC request.
|
|
998
|
+
*
|
|
999
|
+
* Optionally pauses all running epics and saves state. Returns immediately
|
|
1000
|
+
* so the client receives a response before the process exits. The server
|
|
1001
|
+
* itself calls server.shutdown() after delivering the response.
|
|
1002
|
+
*/
|
|
1003
|
+
export async function handleShutdown(ctx, params) {
|
|
1004
|
+
const typedParams = params;
|
|
1005
|
+
const { pauseEpics = false } = typedParams;
|
|
1006
|
+
if (pauseEpics && ctx.daemonState) {
|
|
1007
|
+
for (const [epicId, epicState] of Object.entries(ctx.daemonState.epics)) {
|
|
1008
|
+
const loopHandle = ctx.loopHandles.get(epicId);
|
|
1009
|
+
if (loopHandle && epicState.state === "RUNNING") {
|
|
1010
|
+
try {
|
|
1011
|
+
await loopHandle.machine.transition("PAUSED", "MANUAL_PAUSE", {
|
|
1012
|
+
pauseReason: "DAEMON_SHUTDOWN",
|
|
1013
|
+
});
|
|
1014
|
+
epicState.state = loopHandle.state;
|
|
1015
|
+
epicState.pauseReason = loopHandle.pauseReason;
|
|
1016
|
+
}
|
|
1017
|
+
catch (err) {
|
|
1018
|
+
logger.warn(`Failed to pause epic ${epicId} during shutdown`, {
|
|
1019
|
+
error: err instanceof Error ? err.message : String(err),
|
|
1020
|
+
});
|
|
1021
|
+
}
|
|
1022
|
+
}
|
|
1023
|
+
}
|
|
1024
|
+
// Use saveDaemonState directly (not the debounced wrapper) to ensure state is flushed
|
|
1025
|
+
// to disk before the process exits. The debounced saver has a 500ms delay that races
|
|
1026
|
+
// with the subsequent process.exit(0) in the shutdown path.
|
|
1027
|
+
if (ctx.daemonState) {
|
|
1028
|
+
try {
|
|
1029
|
+
await saveDaemonState(ctx.daemonState);
|
|
1030
|
+
}
|
|
1031
|
+
catch (err) {
|
|
1032
|
+
logger.warn("Failed to save daemon state during shutdown", {
|
|
1033
|
+
error: err instanceof Error ? err.message : String(err),
|
|
1034
|
+
});
|
|
1035
|
+
}
|
|
1036
|
+
}
|
|
1037
|
+
}
|
|
1038
|
+
return { success: true, message: "Daemon shutting down" };
|
|
1039
|
+
}
|
|
1040
|
+
// ---------------------------------------------------------------------------
|
|
1041
|
+
// Periodic health check
|
|
1042
|
+
// ---------------------------------------------------------------------------
|
|
1043
|
+
/**
|
|
1044
|
+
* One iteration of the self-monitoring loop.
|
|
1045
|
+
*
|
|
1046
|
+
* Checks the tmux session and every epic's window/agent liveness. Degrades
|
|
1047
|
+
* to PAUSED any epic whose agent has crashed. Enforces per-epic ticket
|
|
1048
|
+
* timeouts when configured.
|
|
1049
|
+
*/
|
|
1050
|
+
async function periodicHealthCheck(ctx) {
|
|
1051
|
+
try {
|
|
1052
|
+
const tmuxOk = await sessionExists(ctx.tmuxSession);
|
|
1053
|
+
if (!tmuxOk) {
|
|
1054
|
+
logger.error("Daemon health check UNHEALTHY", { reason: "tmux session not found" });
|
|
1055
|
+
return;
|
|
1056
|
+
}
|
|
1057
|
+
const degradedReasons = [];
|
|
1058
|
+
if (ctx.daemonState) {
|
|
1059
|
+
for (const [epicId, epicState] of Object.entries(ctx.daemonState.epics)) {
|
|
1060
|
+
const windowOk = await windowExists(ctx.tmuxSession, epicState.windowName);
|
|
1061
|
+
if (!windowOk) {
|
|
1062
|
+
degradedReasons.push(`tmux window missing for ${epicId}`);
|
|
1063
|
+
continue;
|
|
1064
|
+
}
|
|
1065
|
+
if (epicState.state === "RUNNING") {
|
|
1066
|
+
const adapter = ctx.agentInstance || (await resolveAdapter("auto"));
|
|
1067
|
+
const agentOk = await adapter.verifyRunning(ctx.tmuxSession, epicState.windowName);
|
|
1068
|
+
if (!agentOk) {
|
|
1069
|
+
degradedReasons.push(`agent not running for ${epicId}`);
|
|
1070
|
+
// Attempt one restart before giving up and pausing.
|
|
1071
|
+
let restartSucceeded = false;
|
|
1072
|
+
try {
|
|
1073
|
+
const worktreePath = ctx.daemonState.epics[epicId]?.worktreePath;
|
|
1074
|
+
const windowName = epicState.windowName;
|
|
1075
|
+
const socketPath = `/tmp/pit/${ctx.sessionId}/daemon.sock`;
|
|
1076
|
+
const env = {
|
|
1077
|
+
PIT_EPIC: epicId,
|
|
1078
|
+
PIT_SOCKET_PATH: socketPath,
|
|
1079
|
+
};
|
|
1080
|
+
const cmd = adapter.startCommand(worktreePath ?? "", env);
|
|
1081
|
+
logger.info(`[${epicId}] Agent crashed — attempting restart`, { cmd });
|
|
1082
|
+
await sendKeys(ctx.tmuxSession, windowName, cmd);
|
|
1083
|
+
await adapter.waitForReady({ timeoutMs: 10_000 });
|
|
1084
|
+
const agentOkAfterRestart = await adapter.verifyRunning(ctx.tmuxSession, windowName);
|
|
1085
|
+
if (agentOkAfterRestart) {
|
|
1086
|
+
logger.info(`[${epicId}] Agent restart succeeded`);
|
|
1087
|
+
restartSucceeded = true;
|
|
1088
|
+
}
|
|
1089
|
+
else {
|
|
1090
|
+
logger.warn(`[${epicId}] Agent restart failed — agent still not running`);
|
|
1091
|
+
}
|
|
1092
|
+
}
|
|
1093
|
+
catch (restartErr) {
|
|
1094
|
+
logger.warn(`[${epicId}] Exception during agent restart — falling through to pause`, {
|
|
1095
|
+
error: restartErr instanceof Error ? restartErr.message : String(restartErr),
|
|
1096
|
+
});
|
|
1097
|
+
}
|
|
1098
|
+
if (restartSucceeded) {
|
|
1099
|
+
continue;
|
|
1100
|
+
}
|
|
1101
|
+
const loopHandle = ctx.loopHandles.get(epicId);
|
|
1102
|
+
if (loopHandle) {
|
|
1103
|
+
try {
|
|
1104
|
+
await loopHandle.machine.transition("PAUSED", "MANUAL_PAUSE", {
|
|
1105
|
+
pauseReason: "agent crashed (detected by health check)",
|
|
1106
|
+
});
|
|
1107
|
+
if (ctx.daemonState.epics[epicId]) {
|
|
1108
|
+
ctx.daemonState.epics[epicId].state = "PAUSED";
|
|
1109
|
+
ctx.daemonState.epics[epicId].pauseReason =
|
|
1110
|
+
"agent crashed (detected by health check)";
|
|
1111
|
+
ctx.debouncedSaver?.(ctx.daemonState);
|
|
1112
|
+
}
|
|
1113
|
+
await ringControlBell(ctx, epicState.windowName);
|
|
1114
|
+
}
|
|
1115
|
+
catch (err) {
|
|
1116
|
+
logger.error(`Failed to auto-pause degraded epic ${epicId}`, {
|
|
1117
|
+
error: err instanceof Error ? err.message : String(err),
|
|
1118
|
+
});
|
|
1119
|
+
}
|
|
1120
|
+
}
|
|
1121
|
+
continue;
|
|
1122
|
+
}
|
|
1123
|
+
// Ticket timeout enforcement
|
|
1124
|
+
const ticketTimeoutMinutes = ctx.daemonState.options.ticketTimeout;
|
|
1125
|
+
if (ticketTimeoutMinutes !== null &&
|
|
1126
|
+
ticketTimeoutMinutes !== undefined &&
|
|
1127
|
+
epicState.ticketStartedAt) {
|
|
1128
|
+
const elapsed = Date.now() - Date.parse(epicState.ticketStartedAt);
|
|
1129
|
+
const thresholdMs = ticketTimeoutMinutes * 60 * 1000;
|
|
1130
|
+
if (elapsed > thresholdMs) {
|
|
1131
|
+
const loopHandle = ctx.loopHandles.get(epicId);
|
|
1132
|
+
if (loopHandle) {
|
|
1133
|
+
const pauseReason = `TIMEOUT: exceeded ${ticketTimeoutMinutes} minutes`;
|
|
1134
|
+
logger.warn(`[${epicId}] Ticket timeout — pausing epic`, {
|
|
1135
|
+
elapsed: Math.floor(elapsed / 1000),
|
|
1136
|
+
thresholdMs,
|
|
1137
|
+
pauseReason,
|
|
1138
|
+
});
|
|
1139
|
+
try {
|
|
1140
|
+
await loopHandle.machine.transition("PAUSED", "TICKET_TIMEOUT", {
|
|
1141
|
+
pauseReason,
|
|
1142
|
+
});
|
|
1143
|
+
if (ctx.daemonState.epics[epicId]) {
|
|
1144
|
+
ctx.daemonState.epics[epicId].state = "PAUSED";
|
|
1145
|
+
ctx.daemonState.epics[epicId].pauseReason = pauseReason;
|
|
1146
|
+
ctx.debouncedSaver?.(ctx.daemonState);
|
|
1147
|
+
}
|
|
1148
|
+
await ringControlBell(ctx, epicState.windowName);
|
|
1149
|
+
}
|
|
1150
|
+
catch (err) {
|
|
1151
|
+
logger.error(`Failed to timeout-pause epic ${epicId}`, {
|
|
1152
|
+
error: err instanceof Error ? err.message : String(err),
|
|
1153
|
+
});
|
|
1154
|
+
}
|
|
1155
|
+
}
|
|
1156
|
+
continue;
|
|
1157
|
+
}
|
|
1158
|
+
}
|
|
1159
|
+
}
|
|
1160
|
+
}
|
|
1161
|
+
}
|
|
1162
|
+
if (degradedReasons.length > 0) {
|
|
1163
|
+
logger.warn("Daemon health check DEGRADED", { reasons: degradedReasons });
|
|
1164
|
+
}
|
|
1165
|
+
}
|
|
1166
|
+
catch (err) {
|
|
1167
|
+
logger.error("Health check failed", { err });
|
|
1168
|
+
}
|
|
1169
|
+
}
|
|
1170
|
+
/**
|
|
1171
|
+
* Start the periodic self-monitoring timer (every 60 seconds).
|
|
1172
|
+
*
|
|
1173
|
+
* Returns the interval handle so the caller can store it in
|
|
1174
|
+
* ctx.healthCheckInterval for later cancellation on shutdown.
|
|
1175
|
+
*/
|
|
1176
|
+
export function startHealthCheck(ctx) {
|
|
1177
|
+
return setInterval(() => {
|
|
1178
|
+
periodicHealthCheck(ctx).catch((err) => {
|
|
1179
|
+
logger.error("Unhandled error in periodic health check", { err });
|
|
1180
|
+
});
|
|
1181
|
+
}, 60_000);
|
|
1182
|
+
}
|
|
1183
|
+
// ---------------------------------------------------------------------------
|
|
1184
|
+
// Status bar updater factory
|
|
1185
|
+
// ---------------------------------------------------------------------------
|
|
1186
|
+
/**
|
|
1187
|
+
* Create the onStatusBarUpdate callback for startLoop().
|
|
1188
|
+
*
|
|
1189
|
+
* The callback is called on every state transition. It:
|
|
1190
|
+
* 1. Fetches beads.progress() for the triggering epic (fails gracefully).
|
|
1191
|
+
* 2. Builds a Map<string, StatusBarEpic> from all ctx.loopHandles + ctx.daemonState.
|
|
1192
|
+
* 3. Calls updateStatusBar() with the new rich format.
|
|
1193
|
+
*
|
|
1194
|
+
* Place this in handlers.ts so it can close over DaemonContext which contains
|
|
1195
|
+
* the projectRoot and tmuxSession needed for beads.progress().
|
|
1196
|
+
*/
|
|
1197
|
+
export function makeStatusBarUpdater(ctx) {
|
|
1198
|
+
return async (triggeringEpic) => {
|
|
1199
|
+
// 1. Fetch progress for the triggering epic
|
|
1200
|
+
let triggeringProgress = null;
|
|
1201
|
+
try {
|
|
1202
|
+
triggeringProgress = await beads.progress(triggeringEpic, { cwd: ctx.projectRoot });
|
|
1203
|
+
}
|
|
1204
|
+
catch {
|
|
1205
|
+
// beads unavailable — fall back to null (no progress shown)
|
|
1206
|
+
}
|
|
1207
|
+
// 2. Build Map<string, StatusBarEpic> from all loop handles
|
|
1208
|
+
const epics = new Map();
|
|
1209
|
+
for (const [epicId, loopHandle] of ctx.loopHandles.entries()) {
|
|
1210
|
+
const progress = epicId === triggeringEpic ? triggeringProgress : null;
|
|
1211
|
+
epics.set(epicId, {
|
|
1212
|
+
state: loopHandle.state,
|
|
1213
|
+
progress,
|
|
1214
|
+
pauseReason: loopHandle.pauseReason,
|
|
1215
|
+
});
|
|
1216
|
+
}
|
|
1217
|
+
// If no loop handles yet (unlikely but defensive), nothing to render
|
|
1218
|
+
if (epics.size === 0)
|
|
1219
|
+
return;
|
|
1220
|
+
// 3. Update the tmux status bar
|
|
1221
|
+
await updateStatusBar(ctx.tmuxSession, epics);
|
|
1222
|
+
};
|
|
1223
|
+
}
|
|
1224
|
+
// ---------------------------------------------------------------------------
|
|
1225
|
+
// Registration
|
|
1226
|
+
// ---------------------------------------------------------------------------
|
|
1227
|
+
/**
|
|
1228
|
+
* Register all orchestrator handlers on the given server.
|
|
1229
|
+
*
|
|
1230
|
+
* Creates a clearAndReprompt function bound to ctx and registers all
|
|
1231
|
+
* command handlers. The health, shutdown, and internal handlers are NOT
|
|
1232
|
+
* registered here — they remain in commands/daemon.ts (shutdown needs
|
|
1233
|
+
* access to the server instance and process lifecycle; health check needs
|
|
1234
|
+
* the interval handle).
|
|
1235
|
+
*/
|
|
1236
|
+
export function registerOrchestratorHandlers(server, ctx) {
|
|
1237
|
+
const clearAndReprompt = makeClearAndReprompt(ctx);
|
|
1238
|
+
server.registerHandler("start", (params) => handleStart(ctx, params));
|
|
1239
|
+
server.registerHandler("status", () => handleStatus(ctx));
|
|
1240
|
+
server.registerHandler("pause", (params) => handlePause(ctx, params));
|
|
1241
|
+
server.registerHandler("resume", (params) => handleResume(ctx, params, clearAndReprompt));
|
|
1242
|
+
server.registerHandler("agent-idle", (params) => handleAgentIdle(ctx, params, clearAndReprompt));
|
|
1243
|
+
server.registerHandler("agent-permission", (params) => handleAgentPermission(ctx, params));
|
|
1244
|
+
server.registerHandler("log", (params) => handleLog(ctx, params));
|
|
1245
|
+
server.registerHandler("stop", (params) => handleStop(ctx, params));
|
|
1246
|
+
server.registerHandler("teardown", (params) => handleTeardown(ctx, params));
|
|
1247
|
+
server.registerHandler("add", (params) => handleAdd(ctx, params));
|
|
1248
|
+
}
|
|
1249
|
+
/**
|
|
1250
|
+
* Register ALL daemon handlers on the given server, including health and
|
|
1251
|
+
* shutdown. This is the single entry point for commands/daemon.ts — it
|
|
1252
|
+
* replaces the three separate inline registrations that previously lived
|
|
1253
|
+
* there.
|
|
1254
|
+
*/
|
|
1255
|
+
export function registerAllHandlers(server, ctx) {
|
|
1256
|
+
registerOrchestratorHandlers(server, ctx);
|
|
1257
|
+
server.registerHandler("health", () => handleHealth(ctx));
|
|
1258
|
+
server.registerHandler("shutdown", (params) => handleShutdown(ctx, params));
|
|
1259
|
+
}
|
|
1260
|
+
//# sourceMappingURL=handlers.js.map
|