@agenticmail/claudecode 0.2.4 → 0.2.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-QSR32ZGW.js +1367 -0
- package/dist/cli.js +0 -1
- package/dist/dispatcher-bin.js +1 -7
- package/dist/dispatcher.js +1 -7
- package/dist/http-routes.js +0 -1
- package/dist/index.js +1 -7
- package/dist/install.js +0 -1
- package/dist/status.js +0 -1
- package/dist/uninstall.js +0 -1
- package/package.json +2 -1
- package/dist/chunk-2ESYSVXG.js +0 -48
- package/dist/chunk-B276KPVO.js +0 -14230
- package/dist/chunk-BYXBJQAS.js +0 -0
- package/dist/chunk-DDJNA5HP.js +0 -20313
- package/dist/chunk-RB5MGRT3.js +0 -24666
- package/dist/chunk-V5R2D3QD.js +0 -13484
- package/dist/chunk-XNNC4MIH.js +0 -623
- package/dist/email-worker-template-BOJPKCVB-3QPP3TCG.js +0 -40
- package/dist/imap-flow-DSPQFUHY.js +0 -6
- package/dist/mailparser-TAVZQM56.js +0 -6
- package/dist/spam-filter-L6KNZ7QI-5RMBDUQG.js +0 -14
|
@@ -0,0 +1,1367 @@
|
|
|
1
|
+
import {
|
|
2
|
+
listAccounts,
|
|
3
|
+
renderPersonaBody,
|
|
4
|
+
resolveConfig
|
|
5
|
+
} from "./chunk-SBP7MJP2.js";
|
|
6
|
+
|
|
7
|
+
// src/persona-loader.ts
|
|
8
|
+
import { existsSync, readFileSync } from "fs";
|
|
9
|
+
import { join } from "path";
|
|
10
|
+
function sanitizeSubagentName(name) {
|
|
11
|
+
return name.toLowerCase().replace(/[^a-z0-9._-]+/g, "-").replace(/^-+|-+$/g, "");
|
|
12
|
+
}
|
|
13
|
+
function stripFrontmatter(raw) {
|
|
14
|
+
const text = raw.replace(/\r\n/g, "\n");
|
|
15
|
+
if (!text.startsWith("---\n")) return text;
|
|
16
|
+
const close = text.indexOf("\n---", 4);
|
|
17
|
+
if (close < 0) return text;
|
|
18
|
+
let cursor = close + 4;
|
|
19
|
+
while (cursor < text.length && (text[cursor] === "\n" || text[cursor] === "\r")) cursor++;
|
|
20
|
+
return text.slice(cursor);
|
|
21
|
+
}
|
|
22
|
+
function loadPersonaForAgent(opts) {
|
|
23
|
+
const { agent, agentsDir, subagentPrefix, mcpServerName } = opts;
|
|
24
|
+
const basename = sanitizeSubagentName(`${subagentPrefix}${agent.name}`);
|
|
25
|
+
const filePath = join(agentsDir, `${basename}.md`);
|
|
26
|
+
if (existsSync(filePath)) {
|
|
27
|
+
try {
|
|
28
|
+
const raw = readFileSync(filePath, "utf-8");
|
|
29
|
+
const body2 = stripFrontmatter(raw).trim();
|
|
30
|
+
if (body2) return { body: body2, source: "file", filePath };
|
|
31
|
+
} catch {
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
const body = renderPersonaBody({ name: basename, agent, mcpServerName });
|
|
35
|
+
return { body, source: "generated" };
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// src/dispatcher.ts
|
|
39
|
+
import { mkdirSync, createWriteStream, rmSync } from "fs";
|
|
40
|
+
import { join as join2 } from "path";
|
|
41
|
+
import { homedir } from "os";
|
|
42
|
+
import { ThreadCache, AgentMemoryStore, threadIdFor, normalizeSubject } from "@agenticmail/core";
|
|
43
|
+
function extractSubject(event) {
|
|
44
|
+
if (typeof event.subject === "string") return event.subject;
|
|
45
|
+
if (event.message && typeof event.message.subject === "string") return event.message.subject;
|
|
46
|
+
return void 0;
|
|
47
|
+
}
|
|
48
|
+
function extractFrom(event) {
|
|
49
|
+
if (typeof event.from === "string") return event.from;
|
|
50
|
+
if (event.message && Array.isArray(event.message.from)) {
|
|
51
|
+
const first = event.message.from[0];
|
|
52
|
+
if (first?.address) return first.address;
|
|
53
|
+
if (first?.name) return first.name;
|
|
54
|
+
}
|
|
55
|
+
return void 0;
|
|
56
|
+
}
|
|
57
|
+
function extractWakeAllowlist(event) {
|
|
58
|
+
const raw = event.wakeAllowlist;
|
|
59
|
+
if (raw === void 0) return void 0;
|
|
60
|
+
if (!Array.isArray(raw)) return void 0;
|
|
61
|
+
return raw.map((x) => String(x).trim().toLowerCase()).filter(Boolean);
|
|
62
|
+
}
|
|
63
|
+
function isAgentOnWakeAllowlist(accountName, list) {
|
|
64
|
+
if (list === void 0) return true;
|
|
65
|
+
if (list.length === 0) return false;
|
|
66
|
+
return list.includes(accountName.trim().toLowerCase());
|
|
67
|
+
}
|
|
68
|
+
var SEEN_CAP = 1024;
|
|
69
|
+
function rememberBounded(set, item) {
|
|
70
|
+
set.add(item);
|
|
71
|
+
if (set.size > SEEN_CAP) {
|
|
72
|
+
const drop = Array.from(set).slice(0, Math.floor(SEEN_CAP / 2));
|
|
73
|
+
for (const x of drop) set.delete(x);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
var DEFAULT_MAX_CONCURRENT = 50;
|
|
77
|
+
var DEFAULT_SYNC_INTERVAL_MS = 3e4;
|
|
78
|
+
var DEFAULT_RECONNECT_BASE_MS = 2e3;
|
|
79
|
+
var DEFAULT_RECONNECT_MAX_MS = 6e4;
|
|
80
|
+
var TASK_MAIL_SUPPRESS_WINDOW_MS = 3e4;
|
|
81
|
+
var TASK_NOTIFICATION_SUBJECT_PREFIXES = ["[RPC]", "[Task]", "[Async-RPC]"];
|
|
82
|
+
function isTaskNotificationSubject(subject) {
|
|
83
|
+
if (!subject) return false;
|
|
84
|
+
const head = subject.trimStart();
|
|
85
|
+
for (const prefix of TASK_NOTIFICATION_SUBJECT_PREFIXES) {
|
|
86
|
+
if (head.toLowerCase().startsWith(prefix.toLowerCase())) return true;
|
|
87
|
+
}
|
|
88
|
+
return false;
|
|
89
|
+
}
|
|
90
|
+
var THREAD_CLOSED_MARKERS = ["[FINAL]", "[DONE]", "[CLOSED]", "[WRAP]"];
|
|
91
|
+
function isThreadClosedSubject(subject) {
|
|
92
|
+
if (!subject) return false;
|
|
93
|
+
const s = subject.toLowerCase();
|
|
94
|
+
return THREAD_CLOSED_MARKERS.some((m) => s.includes(m.toLowerCase()));
|
|
95
|
+
}
|
|
96
|
+
function threadIdFromSubject(subject) {
|
|
97
|
+
if (!subject) return "";
|
|
98
|
+
let s = subject.trim();
|
|
99
|
+
while (true) {
|
|
100
|
+
const next = s.replace(/^(re|fwd?|fw)(\[\d+\])?:\s*/i, "");
|
|
101
|
+
if (next === s) break;
|
|
102
|
+
s = next;
|
|
103
|
+
}
|
|
104
|
+
return s.toLowerCase().trim();
|
|
105
|
+
}
|
|
106
|
+
var DEFAULT_MAX_WAKES_PER_THREAD = 10;
|
|
107
|
+
var DEFAULT_WAKE_WINDOW_MS = 24 * 60 * 60 * 1e3;
|
|
108
|
+
var DEFAULT_WAKE_COALESCE_MS = 3e4;
|
|
109
|
+
async function runWorkerWithCompaction(query, persona, initialPrompt, agent, mcpServerName, mcpCommand, mcpArgs, mcpEnv, log, observer, cwd, maxIterations = 4) {
|
|
110
|
+
let prompt = initialPrompt;
|
|
111
|
+
let lastResult = null;
|
|
112
|
+
const breadcrumbs = [];
|
|
113
|
+
const captureObserver = {
|
|
114
|
+
onMessage(tag, summary) {
|
|
115
|
+
observer.onMessage(tag, summary);
|
|
116
|
+
if (tag === "tool_use") breadcrumbs.push(`\u2713 ${summary}`);
|
|
117
|
+
else if (tag === "tool_result") breadcrumbs.push(` \u2192 ${summary}`);
|
|
118
|
+
}
|
|
119
|
+
};
|
|
120
|
+
for (let iter = 0; iter < maxIterations; iter++) {
|
|
121
|
+
if (iter > 0) {
|
|
122
|
+
log("info", `[dispatcher] compaction iter ${iter + 1}/${maxIterations} for "${agent.name}"`);
|
|
123
|
+
}
|
|
124
|
+
lastResult = await runWorker(
|
|
125
|
+
query,
|
|
126
|
+
persona,
|
|
127
|
+
prompt,
|
|
128
|
+
agent,
|
|
129
|
+
mcpServerName,
|
|
130
|
+
mcpCommand,
|
|
131
|
+
mcpArgs,
|
|
132
|
+
mcpEnv,
|
|
133
|
+
log,
|
|
134
|
+
void 0,
|
|
135
|
+
captureObserver,
|
|
136
|
+
cwd
|
|
137
|
+
);
|
|
138
|
+
if (lastResult.ok) return lastResult;
|
|
139
|
+
if (!isContextOverflowError(lastResult.error)) return lastResult;
|
|
140
|
+
if (iter === maxIterations - 1) {
|
|
141
|
+
return { ok: false, error: `compaction budget exhausted (${maxIterations} iters): ${lastResult.error}` };
|
|
142
|
+
}
|
|
143
|
+
const checkpoint = breadcrumbs.slice(-40).join("\n");
|
|
144
|
+
prompt = [
|
|
145
|
+
initialPrompt,
|
|
146
|
+
"",
|
|
147
|
+
"## Resuming after context reset",
|
|
148
|
+
"",
|
|
149
|
+
"You hit the model context limit on the previous turn. Here is a",
|
|
150
|
+
"breadcrumb of what you already accomplished in that turn \u2014",
|
|
151
|
+
"do NOT redo any of these steps:",
|
|
152
|
+
"",
|
|
153
|
+
checkpoint || "(no breadcrumbs captured)",
|
|
154
|
+
"",
|
|
155
|
+
"Continue from where you left off. If you have already produced",
|
|
156
|
+
"the final deliverable on the previous turn (e.g. submit_result,",
|
|
157
|
+
"reply_email), do nothing this turn and end cleanly."
|
|
158
|
+
].join("\n");
|
|
159
|
+
log("info", `[dispatcher] context overflow on "${agent.name}" \u2014 compacting (${breadcrumbs.length} breadcrumbs)`);
|
|
160
|
+
}
|
|
161
|
+
return lastResult ?? { ok: false, error: "worker did not run" };
|
|
162
|
+
}
|
|
163
|
+
function isContextOverflowError(msg) {
|
|
164
|
+
const m = msg.toLowerCase();
|
|
165
|
+
return m.includes("prompt is too long") || m.includes("context_length_exceeded") || m.includes("context length exceeded") || m.includes("max tokens") || m.includes("maximum context") || m.includes("token limit");
|
|
166
|
+
}
|
|
167
|
+
async function runWorker(query, persona, userPrompt, agent, mcpServerName, mcpCommand, mcpArgs, mcpEnv, log, abortSignal, observer, cwd) {
|
|
168
|
+
const opts = {
|
|
169
|
+
systemPrompt: persona,
|
|
170
|
+
mcpServers: {
|
|
171
|
+
[mcpServerName]: {
|
|
172
|
+
command: mcpCommand,
|
|
173
|
+
args: mcpArgs,
|
|
174
|
+
env: mcpEnv
|
|
175
|
+
}
|
|
176
|
+
},
|
|
177
|
+
// No `allowedTools` restriction.
|
|
178
|
+
//
|
|
179
|
+
// Earlier versions of the dispatcher locked workers to MCP-only tools
|
|
180
|
+
// ("you operate an email account, not a developer environment"). That
|
|
181
|
+
// was the wrong design: AgenticMail agents are real Claude Code
|
|
182
|
+
// subagents running under the host's OAuth, and the work humans
|
|
183
|
+
// delegate to them (write code, run tests, do research, edit files)
|
|
184
|
+
// demands the full native toolset (Read, Write, Edit, Bash, Glob,
|
|
185
|
+
// Grep, WebFetch, WebSearch, NotebookEdit, …). Restricting them
|
|
186
|
+
// turned "Zephyr implements the game" into "Zephyr emails source
|
|
187
|
+
// code as plaintext and the human has to copy-paste it" — which
|
|
188
|
+
// defeats the point of having agents in the first place.
|
|
189
|
+
//
|
|
190
|
+
// Omitting allowedTools lets the SDK fall through to its defaults
|
|
191
|
+
// (all built-in tools + every tool exposed by the MCP servers we
|
|
192
|
+
// declare above). Outbound mail is still guarded by AgenticMail's
|
|
193
|
+
// own outbound guard (HIGH-severity sends held for owner approval)
|
|
194
|
+
// and the worker is sandboxed by Claude Code's permission system
|
|
195
|
+
// just like any other subagent.
|
|
196
|
+
permissionMode: "bypassPermissions",
|
|
197
|
+
abortController: abortSignal ? wrapSignal(abortSignal) : void 0
|
|
198
|
+
};
|
|
199
|
+
if (cwd) opts.cwd = cwd;
|
|
200
|
+
const collectedText = [];
|
|
201
|
+
try {
|
|
202
|
+
for await (const msg of query({ prompt: userPrompt, options: opts })) {
|
|
203
|
+
const m = msg;
|
|
204
|
+
if (m.type === "assistant" && Array.isArray(m.message && m.message.content)) {
|
|
205
|
+
for (const block of m.message.content) {
|
|
206
|
+
const b = block;
|
|
207
|
+
if (b.type === "text" && typeof b.text === "string") {
|
|
208
|
+
collectedText.push(b.text);
|
|
209
|
+
if (observer) observer.onMessage("assistant", b.text.slice(0, 240).replace(/\s+/g, " ").trim());
|
|
210
|
+
} else if (b.type === "tool_use" && typeof b.name === "string") {
|
|
211
|
+
const inputSummary = (() => {
|
|
212
|
+
try {
|
|
213
|
+
return JSON.stringify(b.input).slice(0, 200);
|
|
214
|
+
} catch {
|
|
215
|
+
return "(uninspectable input)";
|
|
216
|
+
}
|
|
217
|
+
})();
|
|
218
|
+
if (observer) observer.onMessage("tool_use", `${b.name} ${inputSummary}`);
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
} else if (m.type === "user" && Array.isArray(m.message && m.message.content)) {
|
|
222
|
+
for (const block of m.message.content) {
|
|
223
|
+
const b = block;
|
|
224
|
+
if (b.type === "tool_result") {
|
|
225
|
+
const bodyStr = typeof b.content === "string" ? b.content : Array.isArray(b.content) ? b.content.map((c) => c.text ?? "").join(" ") : "";
|
|
226
|
+
if (observer) observer.onMessage("tool_result", bodyStr.slice(0, 240).replace(/\s+/g, " ").trim());
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
if (m.type === "result") {
|
|
231
|
+
const r = m;
|
|
232
|
+
if (typeof r.result === "string") {
|
|
233
|
+
collectedText.push(r.result);
|
|
234
|
+
if (observer) observer.onMessage("result", r.result.slice(0, 240).replace(/\s+/g, " ").trim());
|
|
235
|
+
}
|
|
236
|
+
if (r.usage && observer) {
|
|
237
|
+
const u = r.usage;
|
|
238
|
+
const summary = `in=${u.input_tokens ?? 0} out=${u.output_tokens ?? 0} cacheR=${u.cache_read_input_tokens ?? 0} cacheW=${u.cache_creation_input_tokens ?? 0}${typeof r.total_cost_usd === "number" ? ` cost=$${r.total_cost_usd.toFixed(4)}` : ""}`;
|
|
239
|
+
observer.onMessage("usage", summary);
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
const text = collectedText.join("\n").trim();
|
|
244
|
+
log("info", `[dispatcher] worker for "${agent.name}" finished (${text.length} chars output)`);
|
|
245
|
+
return { ok: true, text };
|
|
246
|
+
} catch (err) {
|
|
247
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
248
|
+
log("error", `[dispatcher] worker for "${agent.name}" failed: ${msg}`);
|
|
249
|
+
if (observer) observer.onMessage("error", msg);
|
|
250
|
+
return { ok: false, error: msg };
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
function wrapSignal(signal) {
|
|
254
|
+
const c = new AbortController();
|
|
255
|
+
if (signal.aborted) c.abort();
|
|
256
|
+
else signal.addEventListener("abort", () => c.abort(), { once: true });
|
|
257
|
+
return c;
|
|
258
|
+
}
|
|
259
|
+
function newMailPromptForBatch(agent, events) {
|
|
260
|
+
const lines = [];
|
|
261
|
+
const count = events.length;
|
|
262
|
+
lines.push(`You have ${count} new messages on this thread (coalesced \u2014 they arrived in a burst and you are seeing them in one turn).`);
|
|
263
|
+
lines.push("");
|
|
264
|
+
lines.push("### Burst details");
|
|
265
|
+
for (const ev of events) {
|
|
266
|
+
const f = extractFrom(ev) ?? "unknown";
|
|
267
|
+
const s = extractSubject(ev) ?? "(no subject)";
|
|
268
|
+
lines.push(`- UID ${ev.uid ?? "?"} \xB7 ${f} \xB7 "${s}"`);
|
|
269
|
+
}
|
|
270
|
+
lines.push("");
|
|
271
|
+
lines.push(`The LATEST message in the burst is UID ${events[events.length - 1].uid ?? "?"}.`);
|
|
272
|
+
lines.push("Read it first (and any others on the thread you have not yet seen). Then decide:");
|
|
273
|
+
lines.push("- If the burst is multiple replies converging on one ask, respond ONCE on the thread.");
|
|
274
|
+
lines.push("- If the burst is genuinely N independent asks addressed to you, handle them in one reply where possible.");
|
|
275
|
+
lines.push("- If your prior work already addressed the burst, do NOT repeat yourself \u2014 stay silent for this wake.");
|
|
276
|
+
lines.push("");
|
|
277
|
+
lines.push("Reuse the standard thread-aware coordination protocol below; the only difference is the batch shape.");
|
|
278
|
+
lines.push("");
|
|
279
|
+
const latest = events[events.length - 1];
|
|
280
|
+
lines.push(newMailPrompt(agent, latest));
|
|
281
|
+
return lines.join("\n");
|
|
282
|
+
}
|
|
283
|
+
function newMailPrompt(agent, event) {
|
|
284
|
+
const from = extractFrom(event) ?? "unknown sender";
|
|
285
|
+
const subject = extractSubject(event) ?? "(no subject)";
|
|
286
|
+
const uid = event.uid;
|
|
287
|
+
return [
|
|
288
|
+
`You have new mail.`,
|
|
289
|
+
``,
|
|
290
|
+
`- From: ${from}`,
|
|
291
|
+
`- Subject: ${subject}`,
|
|
292
|
+
uid ? `- UID: ${uid}` : "",
|
|
293
|
+
``,
|
|
294
|
+
`## Thread-aware coordination protocol`,
|
|
295
|
+
``,
|
|
296
|
+
`You are ${agent.name}. Multiple agents may be CC'd on the same thread \u2014`,
|
|
297
|
+
`that is intentional: a thread is the shared workspace, and turn-taking is`,
|
|
298
|
+
`implicit from context (who was addressed last, whose stage of the workflow`,
|
|
299
|
+
`is next, who was @mentioned). Follow these steps in order:`,
|
|
300
|
+
``,
|
|
301
|
+
`1. **Read this message.** read_email({ uid: ${uid ?? "<uid>"}, _account: "${agent.name}" }).`,
|
|
302
|
+
``,
|
|
303
|
+
`2. **If this is a reply (Subject starts with "Re:" or an In-Reply-To header is present), load the rest of the thread.**`,
|
|
304
|
+
` Use search_emails({ subject: "<core subject without Re:>", _account: "${agent.name}" })`,
|
|
305
|
+
` to surface earlier messages in the thread, then read_email each prior UID.`,
|
|
306
|
+
` You MUST read the full thread before deciding what to do.`,
|
|
307
|
+
``,
|
|
308
|
+
`3. **CHECK YOUR PRIOR CONTRIBUTIONS to this thread.** When you searched`,
|
|
309
|
+
` in step 2, look at how many of the messages were sent BY YOU`,
|
|
310
|
+
` (from: ${agent.email}). If you have already contributed your work`,
|
|
311
|
+
` to this thread, **do NOT redo it on a new wake**. Redelivering`,
|
|
312
|
+
` identical content when a teammate posts an update is the most`,
|
|
313
|
+
` common multi-agent failure mode \u2014 it triples noise and wastes`,
|
|
314
|
+
` tokens. Only re-contribute if EITHER:`,
|
|
315
|
+
` (a) the latest reply contains a NEW specific ask addressed to`,
|
|
316
|
+
` you by name and you have not yet answered THAT ask, OR`,
|
|
317
|
+
` (b) a teammate's reply genuinely changes the picture and your`,
|
|
318
|
+
` prior work needs an explicit revision (not a re-post).`,
|
|
319
|
+
` Otherwise stay silent.`,
|
|
320
|
+
``,
|
|
321
|
+
`4. **Identify the participants.** Look at To + CC across the thread. Those`,
|
|
322
|
+
` are your collaborators. Their names map to AgenticMail agents at`,
|
|
323
|
+
` <name>@localhost. They will each be woken on every reply-all the same way you were.`,
|
|
324
|
+
``,
|
|
325
|
+
`5. **Decide: is it MY turn?** Yes if any of:`,
|
|
326
|
+
` - The latest message addresses you by name ("Vesper, please \u2026", "@${agent.name} \u2026").`,
|
|
327
|
+
` - The previous-stage handoff is to your role (e.g. designer \u2192 developer, and you are the developer).`,
|
|
328
|
+
` - You were directly asked a question and nobody has answered yet.`,
|
|
329
|
+
` No if:`,
|
|
330
|
+
` - The current ask is targeted at a teammate (their turn, not yours).`,
|
|
331
|
+
` - **A teammate replied within the last 60 seconds.** They are likely`,
|
|
332
|
+
` already handling this turn; jumping in creates simultaneous replies`,
|
|
333
|
+
` and confusion. Assume good faith and stay silent unless their reply`,
|
|
334
|
+
` was clearly off-target.`,
|
|
335
|
+
` - You have nothing substantive to add right now.`,
|
|
336
|
+
` When in doubt, stay silent \u2014 over-replying creates noise. Better to let`,
|
|
337
|
+
` the right teammate take the turn than to step on theirs.`,
|
|
338
|
+
``,
|
|
339
|
+
`6. **If it's your turn \u2014 do the actual work, THEN reply-all about it.**`,
|
|
340
|
+
` You have full native tools: Read, Write, Edit, Bash, Glob, Grep, WebFetch,`,
|
|
341
|
+
` WebSearch, NotebookEdit, etc. If the task is "implement X", write the file`,
|
|
342
|
+
` with Write or Edit and verify with Bash \u2014 do NOT paste source code into an`,
|
|
343
|
+
` email body and call it shipped. The thread is for COORDINATION ("done,`,
|
|
344
|
+
` see ./foo.py, runs with \`python3 foo.py\`"); the filesystem is for`,
|
|
345
|
+
` DELIVERABLES. Then:`,
|
|
346
|
+
` reply_email({ uid: ${uid ?? "<uid>"}, replyAll: true, text: "...", _account: "${agent.name}" })`,
|
|
347
|
+
` Sign with your name. Be substantive but concise.`,
|
|
348
|
+
``,
|
|
349
|
+
` ## Reply addressing \u2014 CRITICAL for wake control`,
|
|
350
|
+
` reply_email({ replyAll: true }) automatically builds the right shape:`,
|
|
351
|
+
` the ORIGINAL SENDER ends up on To (so they wake by default),`,
|
|
352
|
+
` every other participant ends up on Cc (so they see it without`,
|
|
353
|
+
` waking). DO NOT pass a hand-rolled comma-separated address list`,
|
|
354
|
+
` via send_email \u2014 that puts every recipient on To and re-wakes`,
|
|
355
|
+
` the whole thread, defeating the wake gating. Trust replyAll.`,
|
|
356
|
+
``,
|
|
357
|
+
` If you want to wake someone OTHER than the original sender`,
|
|
358
|
+
` (e.g. you are handing off to a different next actor), name them`,
|
|
359
|
+
` explicitly in the reply body ("Orion \u2014 over to you, please\u2026")`,
|
|
360
|
+
` AND pass \`wake: ["orion"]\` so the dispatcher gives them a`,
|
|
361
|
+
` Claude turn instead. Example:`,
|
|
362
|
+
` reply_email({ uid, replyAll: true, text: "Orion \u2014 your turn \u2026",`,
|
|
363
|
+
` wake: ["orion"], _account: "${agent.name}" })`,
|
|
364
|
+
` If nobody specific is next (the work is complete and you're just`,
|
|
365
|
+
` signing off), pass \`wake: []\` to deliver silently \u2014 every`,
|
|
366
|
+
` participant still sees the reply, no Claude turn is spawned.`,
|
|
367
|
+
``,
|
|
368
|
+
`7. **If you need additional help from a teammate not yet on the thread,**`,
|
|
369
|
+
` include them by CC'ing in your reply-all \u2014 DO NOT spin up a separate`,
|
|
370
|
+
` call_agent / message_agent side-channel. The thread is the workspace;`,
|
|
371
|
+
` everyone stays in context.`,
|
|
372
|
+
``,
|
|
373
|
+
`8. **If it's NOT your turn,** mark the message read with mark_read and return.`,
|
|
374
|
+
` Do not reply just to acknowledge. Silence IS a valid contribution.`,
|
|
375
|
+
``,
|
|
376
|
+
`## How threads end`,
|
|
377
|
+
``,
|
|
378
|
+
`A thread is done when the host (or any participant) sends a wrap-up`,
|
|
379
|
+
`message with one of these markers in the subject: \`[FINAL]\`, \`[DONE]\`,`,
|
|
380
|
+
`\`[CLOSED]\`, \`[WRAP]\`. The dispatcher will stop waking workers on any`,
|
|
381
|
+
`further replies to that thread. If you are sending a wrap-up yourself`,
|
|
382
|
+
`(because the work is complete and no more contributions are needed),`,
|
|
383
|
+
`include one of those markers in your reply subject.`,
|
|
384
|
+
``,
|
|
385
|
+
`When you finish, return a one-line summary of what you did:`,
|
|
386
|
+
` "Contributed: <one-line description>" OR "Stayed silent \u2014 not my turn."`,
|
|
387
|
+
``,
|
|
388
|
+
`## Fallback for non-thread mail`,
|
|
389
|
+
``,
|
|
390
|
+
`If this is a fresh standalone email (not part of a thread, only addressed`,
|
|
391
|
+
`to you), handle it directly: answer the question, do the work, reply.`,
|
|
392
|
+
`Spam: trust the auto-filter unless something obviously slipped through.`
|
|
393
|
+
].filter(Boolean).join("\n");
|
|
394
|
+
}
|
|
395
|
+
function taskPrompt(agent, event) {
|
|
396
|
+
const taskId = event.taskId ?? "(missing taskId)";
|
|
397
|
+
const taskText = event.task ?? "(no task description)";
|
|
398
|
+
const taskType = event.taskType ?? "generic";
|
|
399
|
+
const from = event.from ?? "unknown";
|
|
400
|
+
return [
|
|
401
|
+
`You have a pending task \u2014 handle it now.`,
|
|
402
|
+
``,
|
|
403
|
+
`- Task ID: ${taskId}`,
|
|
404
|
+
`- Type: ${taskType}`,
|
|
405
|
+
`- From: ${from}`,
|
|
406
|
+
`- Task: ${taskText}`,
|
|
407
|
+
``,
|
|
408
|
+
`Workflow:`,
|
|
409
|
+
` 1. Call claim_task({ id: "${taskId}", _account: "${agent.name}" }) to mark yourself as the owner.`,
|
|
410
|
+
` 2. Do the work using whatever pre-loaded or invoke-able MCP tools fit.`,
|
|
411
|
+
` 3. Call submit_result({ id: "${taskId}", result: { ... }, _account: "${agent.name}" }) with structured JSON.`,
|
|
412
|
+
` The caller is waiting on a synchronous long-poll \u2014 submit_result is what wakes them.`,
|
|
413
|
+
``,
|
|
414
|
+
`If you cannot complete the task, submit_result with { status: "failed", reason: "..." }. Never leave it unclaimed \u2014 that strands the caller until timeout.`
|
|
415
|
+
].join("\n");
|
|
416
|
+
}
|
|
417
|
+
var Dispatcher = class {
|
|
418
|
+
cfg;
|
|
419
|
+
maxConcurrent;
|
|
420
|
+
syncIntervalMs;
|
|
421
|
+
reconnectBaseMs;
|
|
422
|
+
reconnectMaxMs;
|
|
423
|
+
query;
|
|
424
|
+
fetchImpl;
|
|
425
|
+
log;
|
|
426
|
+
channels = /* @__PURE__ */ new Map();
|
|
427
|
+
// keyed by account.id
|
|
428
|
+
accountSyncTimer = null;
|
|
429
|
+
systemChannelController = null;
|
|
430
|
+
running = 0;
|
|
431
|
+
waiters = [];
|
|
432
|
+
stopped = false;
|
|
433
|
+
/**
|
|
434
|
+
* Wake-budget store, keyed by `${accountId}::${threadId}`. See the
|
|
435
|
+
* comment block on WakeBudgetEntry for the failure modes this guards.
|
|
436
|
+
* Pruned opportunistically on each lookup — no separate timer.
|
|
437
|
+
*/
|
|
438
|
+
wakeBudget = /* @__PURE__ */ new Map();
|
|
439
|
+
maxWakesPerThread;
|
|
440
|
+
wakeWindowMs;
|
|
441
|
+
now;
|
|
442
|
+
/**
|
|
443
|
+
* Layered wake-context system. ThreadCache holds the last K
|
|
444
|
+
* envelopes per thread (built passively on every SSE new-mail
|
|
445
|
+
* event, even when no agent wakes). AgentMemoryStore holds
|
|
446
|
+
* per-(agent, thread) markdown that workers write at end-of-
|
|
447
|
+
* wake via the save_thread_memory MCP tool. Both are read on
|
|
448
|
+
* worker spawn and injected into the wake prompt — see
|
|
449
|
+
* spawnWorker for the rendering.
|
|
450
|
+
*/
|
|
451
|
+
threadCache;
|
|
452
|
+
agentMemory;
|
|
453
|
+
/**
|
|
454
|
+
* Coalesced wake queue. Keyed by `${accountId}::${threadId}`,
|
|
455
|
+
* each entry holds the pending events + the timer that will
|
|
456
|
+
* fire the spawn. A new event arriving while the entry exists
|
|
457
|
+
* EXTENDS the timer (debounce, not throttle) and appends to
|
|
458
|
+
* the event list. When the timer fires, a single Claude turn
|
|
459
|
+
* sees the union of new messages and replies once.
|
|
460
|
+
*
|
|
461
|
+
* Why debounce + not throttle: bursts of replies from one
|
|
462
|
+
* sender are typically a single logical handoff, not N
|
|
463
|
+
* separate actions. Throttling would still produce a stale
|
|
464
|
+
* wake after the burst settles; debouncing collapses the
|
|
465
|
+
* whole burst into one wake at the trailing edge.
|
|
466
|
+
*/
|
|
467
|
+
wakeCoalesce = /* @__PURE__ */ new Map();
|
|
468
|
+
wakeCoalesceMs;
|
|
469
|
+
/** Wall-clock timestamp the dispatcher started. Surfaced via
|
|
470
|
+
* process-heartbeat so check_activity can show uptime. */
|
|
471
|
+
startedAtMs = Date.now();
|
|
472
|
+
/** Periodic timer that posts a process-heartbeat to the API.
|
|
473
|
+
* Without this, a hung dispatcher looks identical to "no
|
|
474
|
+
* events to wake on" — the host has no liveness signal. */
|
|
475
|
+
processHeartbeatTimer = null;
|
|
476
|
+
constructor(opts = {}) {
|
|
477
|
+
this.cfg = resolveConfig(opts);
|
|
478
|
+
this.maxConcurrent = opts.maxConcurrentWorkers ?? DEFAULT_MAX_CONCURRENT;
|
|
479
|
+
this.syncIntervalMs = opts.accountSyncIntervalMs ?? DEFAULT_SYNC_INTERVAL_MS;
|
|
480
|
+
this.reconnectBaseMs = opts.sseReconnectBaseMs ?? DEFAULT_RECONNECT_BASE_MS;
|
|
481
|
+
this.reconnectMaxMs = opts.sseReconnectMaxMs ?? DEFAULT_RECONNECT_MAX_MS;
|
|
482
|
+
this.query = opts.querySdk ?? defaultQuery();
|
|
483
|
+
this.fetchImpl = opts.fetchImpl ?? globalThis.fetch;
|
|
484
|
+
this.log = opts.log ?? defaultLog;
|
|
485
|
+
this.maxWakesPerThread = opts.maxWakesPerThread ?? DEFAULT_MAX_WAKES_PER_THREAD;
|
|
486
|
+
this.wakeWindowMs = opts.wakeWindowMs ?? DEFAULT_WAKE_WINDOW_MS;
|
|
487
|
+
this.now = opts.nowMs ?? Date.now;
|
|
488
|
+
this.threadCache = new ThreadCache({ cacheDir: opts.threadCacheDir });
|
|
489
|
+
this.agentMemory = new AgentMemoryStore({ memoryDir: opts.agentMemoryDir });
|
|
490
|
+
this.wakeCoalesceMs = opts.wakeCoalesceMs ?? DEFAULT_WAKE_COALESCE_MS;
|
|
491
|
+
if (!this.cfg.masterKey) {
|
|
492
|
+
throw new Error("Dispatcher requires AgenticMail master key. Run `agenticmail setup` first.");
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
/**
|
|
496
|
+
* Charge one wake against the (agent, thread) budget. Returns true
|
|
497
|
+
* if the wake should proceed, false if the circuit breaker is open.
|
|
498
|
+
*
|
|
499
|
+
* Empty threadId means "no thread context" (a fresh standalone email
|
|
500
|
+
* with no Subject — rare); we always allow those since there is no
|
|
501
|
+
* thread to runaway on.
|
|
502
|
+
*/
|
|
503
|
+
chargeWake(accountId, threadId) {
|
|
504
|
+
if (!threadId) return { ok: true };
|
|
505
|
+
const key = `${accountId}::${threadId}`;
|
|
506
|
+
const now = this.now();
|
|
507
|
+
let entry = this.wakeBudget.get(key);
|
|
508
|
+
if (entry && now - entry.firstWakeAtMs >= this.wakeWindowMs) {
|
|
509
|
+
entry = void 0;
|
|
510
|
+
this.wakeBudget.delete(key);
|
|
511
|
+
}
|
|
512
|
+
if (!entry) {
|
|
513
|
+
entry = { count: 1, firstWakeAtMs: now };
|
|
514
|
+
this.wakeBudget.set(key, entry);
|
|
515
|
+
this.maybePruneWakeBudget(now);
|
|
516
|
+
return { ok: true, count: 1 };
|
|
517
|
+
}
|
|
518
|
+
if (entry.count >= this.maxWakesPerThread) {
|
|
519
|
+
return {
|
|
520
|
+
ok: false,
|
|
521
|
+
count: entry.count,
|
|
522
|
+
mutedUntilMs: entry.firstWakeAtMs + this.wakeWindowMs
|
|
523
|
+
};
|
|
524
|
+
}
|
|
525
|
+
entry.count++;
|
|
526
|
+
return { ok: true, count: entry.count };
|
|
527
|
+
}
|
|
528
|
+
/**
|
|
529
|
+
* Drop wake-budget entries that have aged out of their window.
|
|
530
|
+
*
|
|
531
|
+
* Called inline from chargeWake, but at most once per ~1024 inserts so
|
|
532
|
+
* the cost stays bounded. We don't need a separate timer because the
|
|
533
|
+
* Map only grows on real wakes (capped by maxWakesPerThread per pair),
|
|
534
|
+
* and the prune is O(n) over the current entries — cheap enough.
|
|
535
|
+
*/
|
|
536
|
+
wakeBudgetInsertsSinceLastPrune = 0;
|
|
537
|
+
maybePruneWakeBudget(now) {
|
|
538
|
+
this.wakeBudgetInsertsSinceLastPrune++;
|
|
539
|
+
if (this.wakeBudgetInsertsSinceLastPrune < 1024) return;
|
|
540
|
+
this.wakeBudgetInsertsSinceLastPrune = 0;
|
|
541
|
+
for (const [k, v] of this.wakeBudget) {
|
|
542
|
+
if (now - v.firstWakeAtMs >= this.wakeWindowMs) this.wakeBudget.delete(k);
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
async start() {
|
|
546
|
+
this.log("info", `[dispatcher] starting (maxConcurrent=${this.maxConcurrent}, syncEvery=${this.syncIntervalMs}ms)`);
|
|
547
|
+
this.startedAtMs = Date.now();
|
|
548
|
+
await this.syncAccounts();
|
|
549
|
+
this.accountSyncTimer = setInterval(() => {
|
|
550
|
+
this.syncAccounts().catch((err) => this.log("warn", `[dispatcher] account sync failed: ${err}`));
|
|
551
|
+
}, this.syncIntervalMs);
|
|
552
|
+
this.processHeartbeatTimer = setInterval(() => {
|
|
553
|
+
this.postActivity("/dispatcher/process-heartbeat", {
|
|
554
|
+
startedAtMs: this.startedAtMs,
|
|
555
|
+
uptimeMs: Date.now() - this.startedAtMs,
|
|
556
|
+
channels: this.channels.size,
|
|
557
|
+
coalesceQueueSize: this.wakeCoalesce.size,
|
|
558
|
+
running: this.running,
|
|
559
|
+
maxConcurrent: this.maxConcurrent
|
|
560
|
+
});
|
|
561
|
+
}, 3e4);
|
|
562
|
+
this.processHeartbeatTimer.unref?.();
|
|
563
|
+
this.postActivity("/dispatcher/process-heartbeat", {
|
|
564
|
+
startedAtMs: this.startedAtMs,
|
|
565
|
+
uptimeMs: 0,
|
|
566
|
+
channels: this.channels.size,
|
|
567
|
+
coalesceQueueSize: 0,
|
|
568
|
+
running: 0,
|
|
569
|
+
maxConcurrent: this.maxConcurrent
|
|
570
|
+
});
|
|
571
|
+
void this.runSystemChannel();
|
|
572
|
+
}
|
|
573
|
+
async stop() {
|
|
574
|
+
this.stopped = true;
|
|
575
|
+
if (this.accountSyncTimer) clearInterval(this.accountSyncTimer);
|
|
576
|
+
this.accountSyncTimer = null;
|
|
577
|
+
if (this.processHeartbeatTimer) clearInterval(this.processHeartbeatTimer);
|
|
578
|
+
this.processHeartbeatTimer = null;
|
|
579
|
+
if (this.systemChannelController) {
|
|
580
|
+
try {
|
|
581
|
+
this.systemChannelController.abort();
|
|
582
|
+
} catch {
|
|
583
|
+
}
|
|
584
|
+
this.systemChannelController = null;
|
|
585
|
+
}
|
|
586
|
+
for (const ch of this.channels.values()) {
|
|
587
|
+
ch.stopping = true;
|
|
588
|
+
ch.controller?.abort();
|
|
589
|
+
}
|
|
590
|
+
this.channels.clear();
|
|
591
|
+
for (const entry of this.wakeCoalesce.values()) clearTimeout(entry.timer);
|
|
592
|
+
this.wakeCoalesce.clear();
|
|
593
|
+
this.log("info", "[dispatcher] stopped");
|
|
594
|
+
}
|
|
595
|
+
/** Public for tests — directly hand an event to the routing path. */
|
|
596
|
+
async handleEvent(account, event) {
|
|
597
|
+
if (this.stopped) return;
|
|
598
|
+
if (event.type === "new" && typeof event.uid === "number") {
|
|
599
|
+
const ch = this.channels.get(account.id);
|
|
600
|
+
if (ch?.seenUids.has(event.uid)) return;
|
|
601
|
+
const subject = extractSubject(event);
|
|
602
|
+
const cacheThreadId = threadIdFor({ subject });
|
|
603
|
+
try {
|
|
604
|
+
const fromAddr = extractFrom(event) ?? "(unknown)";
|
|
605
|
+
const previewSource = event.preview ?? event.message?.preview ?? "";
|
|
606
|
+
this.threadCache.pushMessage(cacheThreadId, {
|
|
607
|
+
uid: event.uid,
|
|
608
|
+
from: fromAddr,
|
|
609
|
+
fromAddr,
|
|
610
|
+
subject: subject ?? "(no subject)",
|
|
611
|
+
preview: typeof previewSource === "string" ? previewSource : "",
|
|
612
|
+
date: (/* @__PURE__ */ new Date()).toISOString()
|
|
613
|
+
}, {
|
|
614
|
+
subject: normalizeSubject(subject),
|
|
615
|
+
rootFromAddr: fromAddr
|
|
616
|
+
});
|
|
617
|
+
} catch (err) {
|
|
618
|
+
this.log("warn", `[dispatcher] thread-cache push failed for "${account.name}" uid=${event.uid}: ${err.message}`);
|
|
619
|
+
}
|
|
620
|
+
if (ch && Date.now() < ch.suppressTaskMailUntilMs && isTaskNotificationSubject(subject)) {
|
|
621
|
+
this.log("info", `[dispatcher] suppressed task-notification mail wake for "${account.name}" (uid=${event.uid}, subject="${subject}") \u2014 task event already dispatched`);
|
|
622
|
+
rememberBounded(ch.seenUids, event.uid);
|
|
623
|
+
return;
|
|
624
|
+
}
|
|
625
|
+
if (ch) rememberBounded(ch.seenUids, event.uid);
|
|
626
|
+
if (isThreadClosedSubject(subject)) {
|
|
627
|
+
this.log("info", `[dispatcher] thread closed (subject="${subject ?? ""}") \u2014 skipping wake for "${account.name}" uid=${event.uid}`);
|
|
628
|
+
this.postSkipped(account, event, "thread-closed", `subject contains a thread-close marker: "${subject ?? ""}"`);
|
|
629
|
+
try {
|
|
630
|
+
this.threadCache.delete(cacheThreadId);
|
|
631
|
+
} catch {
|
|
632
|
+
}
|
|
633
|
+
try {
|
|
634
|
+
this.agentMemory.delete(account.id, cacheThreadId);
|
|
635
|
+
} catch {
|
|
636
|
+
}
|
|
637
|
+
return;
|
|
638
|
+
}
|
|
639
|
+
const allowlist = extractWakeAllowlist(event);
|
|
640
|
+
if (!isAgentOnWakeAllowlist(account.name, allowlist)) {
|
|
641
|
+
this.log("info", `[dispatcher] wake allowlist excludes "${account.name}" (list=${JSON.stringify(allowlist)}) \u2014 mail delivered, no Claude turn`);
|
|
642
|
+
this.postSkipped(account, event, "allowlist-excluded", `wake list ${JSON.stringify(allowlist)} did not include "${account.name}"`);
|
|
643
|
+
return;
|
|
644
|
+
}
|
|
645
|
+
const wakeOnCc = account.wakeOnCc !== false;
|
|
646
|
+
if (!wakeOnCc) {
|
|
647
|
+
const wasOnTo = event.wasOnTo === true;
|
|
648
|
+
if (!wasOnTo) {
|
|
649
|
+
this.log("info", `[dispatcher] "${account.name}" has wake_on_cc:false and was not on To \u2014 mail delivered, no Claude turn (uid=${event.uid})`);
|
|
650
|
+
this.postSkipped(account, event, "wake-on-cc", `"${account.name}" has wake_on_cc:false; not on To`);
|
|
651
|
+
return;
|
|
652
|
+
}
|
|
653
|
+
}
|
|
654
|
+
const threadId = threadIdFromSubject(subject);
|
|
655
|
+
await this.scheduleCoalescedWake(account, event, threadId);
|
|
656
|
+
return;
|
|
657
|
+
}
|
|
658
|
+
if (event.type === "task" && typeof event.taskId === "string") {
|
|
659
|
+
if (typeof event.assignee === "string" && event.assignee.toLowerCase() !== account.name.toLowerCase()) return;
|
|
660
|
+
const ch = this.channels.get(account.id);
|
|
661
|
+
if (ch?.seenTaskIds.has(event.taskId)) return;
|
|
662
|
+
if (ch) {
|
|
663
|
+
rememberBounded(ch.seenTaskIds, event.taskId);
|
|
664
|
+
ch.suppressTaskMailUntilMs = Date.now() + TASK_MAIL_SUPPRESS_WINDOW_MS;
|
|
665
|
+
}
|
|
666
|
+
await this.spawnWorker(account, taskPrompt(account, event), {
|
|
667
|
+
kind: "task",
|
|
668
|
+
taskId: event.taskId,
|
|
669
|
+
subject: typeof event.task === "string" ? event.task.slice(0, 120) : void 0,
|
|
670
|
+
from: typeof event.from === "string" ? event.from : void 0
|
|
671
|
+
});
|
|
672
|
+
return;
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
/**
|
|
676
|
+
* Should the dispatcher own a wake-channel for this account?
|
|
677
|
+
*
|
|
678
|
+
* We skip the bridge agent (default name "claudecode"). The bridge is
|
|
679
|
+
* the host session's own inbox proxy — when mail lands there, the
|
|
680
|
+
* HOST Claude Code session reads it via MCP (`list_inbox` /
|
|
681
|
+
* `wait_for_email` / `read_email`), NOT via a separately-spawned
|
|
682
|
+
* dispatcher worker. Spawning a worker for the bridge would:
|
|
683
|
+
* 1. Compete with the host (two Claude instances trying to "be"
|
|
684
|
+
* Claude Code, both potentially replying autonomously).
|
|
685
|
+
* 2. Waste tokens — the host is already aware via its MCP polling.
|
|
686
|
+
* 3. Send the bridge into an autonomous loop if it ever replies-all
|
|
687
|
+
* (because that mail would wake it again, ad infinitum).
|
|
688
|
+
*
|
|
689
|
+
* Role="bridge" is also skipped for symmetry with selectExposableAgents
|
|
690
|
+
* in install.ts — anything tagged as a bridge is host-managed.
|
|
691
|
+
*/
|
|
692
|
+
shouldWatch(account) {
|
|
693
|
+
const bridgeName = this.cfg.bridgeAgentName.toLowerCase();
|
|
694
|
+
if (account.name.toLowerCase() === bridgeName) return false;
|
|
695
|
+
if (account.role === "bridge") return false;
|
|
696
|
+
return true;
|
|
697
|
+
}
|
|
698
|
+
/** Re-fetch /accounts; open SSE for new ones, close for vanished ones. */
|
|
699
|
+
async syncAccounts() {
|
|
700
|
+
let accounts;
|
|
701
|
+
try {
|
|
702
|
+
accounts = await listAccounts(this.cfg.apiUrl, this.cfg.masterKey);
|
|
703
|
+
} catch (err) {
|
|
704
|
+
this.log("warn", `[dispatcher] could not list accounts: ${err.message}`);
|
|
705
|
+
return;
|
|
706
|
+
}
|
|
707
|
+
accounts = accounts.filter((a) => this.shouldWatch(a));
|
|
708
|
+
const liveIds = new Set(accounts.map((a) => a.id));
|
|
709
|
+
for (const [id, ch] of this.channels) {
|
|
710
|
+
if (!liveIds.has(id)) {
|
|
711
|
+
ch.stopping = true;
|
|
712
|
+
ch.controller?.abort();
|
|
713
|
+
this.channels.delete(id);
|
|
714
|
+
this.log("info", `[dispatcher] account "${ch.account.name}" removed \u2014 closed SSE channel`);
|
|
715
|
+
}
|
|
716
|
+
}
|
|
717
|
+
for (const account of accounts) {
|
|
718
|
+
if (this.channels.has(account.id)) {
|
|
719
|
+
this.channels.get(account.id).account = account;
|
|
720
|
+
continue;
|
|
721
|
+
}
|
|
722
|
+
const ch = {
|
|
723
|
+
account,
|
|
724
|
+
controller: null,
|
|
725
|
+
stopping: false,
|
|
726
|
+
backoffMs: this.reconnectBaseMs,
|
|
727
|
+
seenUids: /* @__PURE__ */ new Set(),
|
|
728
|
+
seenTaskIds: /* @__PURE__ */ new Set(),
|
|
729
|
+
suppressTaskMailUntilMs: 0
|
|
730
|
+
};
|
|
731
|
+
this.channels.set(account.id, ch);
|
|
732
|
+
this.log("info", `[dispatcher] opening SSE for "${account.name}" (${account.email})`);
|
|
733
|
+
void this.runChannel(ch);
|
|
734
|
+
}
|
|
735
|
+
}
|
|
736
|
+
/**
|
|
737
|
+
* Subscribe to the API's master-scoped system events SSE.
|
|
738
|
+
*
|
|
739
|
+
* Pushes from /system/events arrive as JSON-per-frame just like the
|
|
740
|
+
* per-account stream:
|
|
741
|
+
* { type: "connected" }
|
|
742
|
+
* { type: "account_created", account: { id, name, email, apiKey, ... } }
|
|
743
|
+
* { type: "account_deleted", accountId, name }
|
|
744
|
+
*
|
|
745
|
+
* On `account_created` we eagerly open a per-account SSE channel using
|
|
746
|
+
* the apiKey carried in the event payload — no extra round trip, the
|
|
747
|
+
* channel is live within milliseconds of the POST /accounts response.
|
|
748
|
+
*
|
|
749
|
+
* Reconnect with the same exponential backoff scheme as per-account
|
|
750
|
+
* channels. If the API is older and doesn't expose /system/events
|
|
751
|
+
* (404), we log once and stop trying — polling-only fallback still
|
|
752
|
+
* works.
|
|
753
|
+
*/
|
|
754
|
+
async runSystemChannel() {
|
|
755
|
+
let backoff = this.reconnectBaseMs;
|
|
756
|
+
let giveUp = false;
|
|
757
|
+
while (!this.stopped && !giveUp) {
|
|
758
|
+
this.systemChannelController = new AbortController();
|
|
759
|
+
try {
|
|
760
|
+
const url = `${this.cfg.apiUrl.replace(/\/$/, "")}/api/agenticmail/system/events`;
|
|
761
|
+
const res = await this.fetchImpl(url, {
|
|
762
|
+
headers: {
|
|
763
|
+
"Authorization": `Bearer ${this.cfg.masterKey}`,
|
|
764
|
+
"Accept": "text/event-stream"
|
|
765
|
+
},
|
|
766
|
+
signal: this.systemChannelController.signal
|
|
767
|
+
});
|
|
768
|
+
if (res.status === 404) {
|
|
769
|
+
this.log("warn", "[dispatcher] /system/events not available on this API \u2014 falling back to polling-only account discovery (please upgrade @agenticmail/api to >=0.7.3)");
|
|
770
|
+
giveUp = true;
|
|
771
|
+
break;
|
|
772
|
+
}
|
|
773
|
+
if (!res.ok || !res.body) {
|
|
774
|
+
throw new Error(`system/events HTTP ${res.status}`);
|
|
775
|
+
}
|
|
776
|
+
backoff = this.reconnectBaseMs;
|
|
777
|
+
const reader = res.body.getReader();
|
|
778
|
+
const decoder = new TextDecoder();
|
|
779
|
+
let buffer = "";
|
|
780
|
+
while (!this.stopped) {
|
|
781
|
+
const { value, done } = await reader.read();
|
|
782
|
+
if (done) break;
|
|
783
|
+
buffer += decoder.decode(value, { stream: true });
|
|
784
|
+
let boundary;
|
|
785
|
+
while ((boundary = buffer.indexOf("\n\n")) !== -1) {
|
|
786
|
+
const frame = buffer.slice(0, boundary);
|
|
787
|
+
buffer = buffer.slice(boundary + 2);
|
|
788
|
+
for (const line of frame.split("\n")) {
|
|
789
|
+
if (!line.startsWith("data: ")) continue;
|
|
790
|
+
try {
|
|
791
|
+
const event = JSON.parse(line.slice(6));
|
|
792
|
+
this.handleSystemEvent(event);
|
|
793
|
+
} catch {
|
|
794
|
+
}
|
|
795
|
+
}
|
|
796
|
+
}
|
|
797
|
+
}
|
|
798
|
+
} catch (err) {
|
|
799
|
+
if (this.stopped) break;
|
|
800
|
+
this.log("warn", `[dispatcher] system-events stream error: ${err.message}; reconnecting in ${backoff}ms`);
|
|
801
|
+
}
|
|
802
|
+
if (this.stopped || giveUp) break;
|
|
803
|
+
await sleep(backoff);
|
|
804
|
+
backoff = Math.min(backoff * 2, this.reconnectMaxMs);
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
/** Apply an account-lifecycle event from /system/events. */
|
|
808
|
+
handleSystemEvent(event) {
|
|
809
|
+
const type = typeof event.type === "string" ? event.type : "";
|
|
810
|
+
if (type === "account_created" && event.account && typeof event.account === "object") {
|
|
811
|
+
const account = event.account;
|
|
812
|
+
if (!account.id || !account.name || !account.apiKey) {
|
|
813
|
+
this.log("warn", "[dispatcher] account_created event missing required fields; ignoring");
|
|
814
|
+
return;
|
|
815
|
+
}
|
|
816
|
+
if (!this.shouldWatch(account)) {
|
|
817
|
+
this.log("info", `[dispatcher] account_created "${account.name}" \u2014 skipping (bridge/role excluded)`);
|
|
818
|
+
return;
|
|
819
|
+
}
|
|
820
|
+
if (this.channels.has(account.id)) return;
|
|
821
|
+
const ch = {
|
|
822
|
+
account,
|
|
823
|
+
controller: null,
|
|
824
|
+
stopping: false,
|
|
825
|
+
backoffMs: this.reconnectBaseMs,
|
|
826
|
+
seenUids: /* @__PURE__ */ new Set(),
|
|
827
|
+
seenTaskIds: /* @__PURE__ */ new Set(),
|
|
828
|
+
suppressTaskMailUntilMs: 0
|
|
829
|
+
};
|
|
830
|
+
this.channels.set(account.id, ch);
|
|
831
|
+
this.log("info", `[dispatcher] account_created "${account.name}" (${account.email}) \u2014 opening SSE channel immediately`);
|
|
832
|
+
void this.runChannel(ch);
|
|
833
|
+
return;
|
|
834
|
+
}
|
|
835
|
+
if (type === "account_deleted" && typeof event.accountId === "string") {
|
|
836
|
+
const ch = this.channels.get(event.accountId);
|
|
837
|
+
if (!ch) return;
|
|
838
|
+
ch.stopping = true;
|
|
839
|
+
try {
|
|
840
|
+
ch.controller?.abort();
|
|
841
|
+
} catch {
|
|
842
|
+
}
|
|
843
|
+
this.channels.delete(event.accountId);
|
|
844
|
+
this.log("info", `[dispatcher] account_deleted "${ch.account.name}" \u2014 closed SSE channel`);
|
|
845
|
+
return;
|
|
846
|
+
}
|
|
847
|
+
}
|
|
848
|
+
/** Watch one account's SSE stream forever; reconnect with backoff on drop. */
|
|
849
|
+
async runChannel(ch) {
|
|
850
|
+
while (!ch.stopping && !this.stopped) {
|
|
851
|
+
try {
|
|
852
|
+
ch.controller = new AbortController();
|
|
853
|
+
await this.streamOne(ch);
|
|
854
|
+
if (!ch.stopping) {
|
|
855
|
+
this.log("warn", `[dispatcher] SSE for "${ch.account.name}" ended unexpectedly; reconnecting in ${ch.backoffMs}ms`);
|
|
856
|
+
}
|
|
857
|
+
} catch (err) {
|
|
858
|
+
if (ch.stopping) break;
|
|
859
|
+
this.log("warn", `[dispatcher] SSE error for "${ch.account.name}": ${err.message}; reconnecting in ${ch.backoffMs}ms`);
|
|
860
|
+
}
|
|
861
|
+
if (ch.stopping) break;
|
|
862
|
+
await sleep(ch.backoffMs);
|
|
863
|
+
ch.backoffMs = Math.min(ch.backoffMs * 2, this.reconnectMaxMs);
|
|
864
|
+
}
|
|
865
|
+
}
|
|
866
|
+
/** Single SSE attach. Returns when the stream closes for any reason. */
|
|
867
|
+
async streamOne(ch) {
|
|
868
|
+
const url = `${this.cfg.apiUrl.replace(/\/$/, "")}/api/agenticmail/events`;
|
|
869
|
+
const res = await this.fetchImpl(url, {
|
|
870
|
+
headers: {
|
|
871
|
+
"Authorization": `Bearer ${ch.account.apiKey}`,
|
|
872
|
+
"Accept": "text/event-stream"
|
|
873
|
+
},
|
|
874
|
+
signal: ch.controller.signal
|
|
875
|
+
});
|
|
876
|
+
if (!res.ok || !res.body) {
|
|
877
|
+
throw new Error(`SSE handshake HTTP ${res.status}`);
|
|
878
|
+
}
|
|
879
|
+
ch.backoffMs = this.reconnectBaseMs;
|
|
880
|
+
const reader = res.body.getReader();
|
|
881
|
+
const decoder = new TextDecoder();
|
|
882
|
+
let buffer = "";
|
|
883
|
+
while (!ch.stopping) {
|
|
884
|
+
const { value, done } = await reader.read();
|
|
885
|
+
if (done) return;
|
|
886
|
+
buffer += decoder.decode(value, { stream: true });
|
|
887
|
+
let boundary;
|
|
888
|
+
while ((boundary = buffer.indexOf("\n\n")) !== -1) {
|
|
889
|
+
const frame = buffer.slice(0, boundary);
|
|
890
|
+
buffer = buffer.slice(boundary + 2);
|
|
891
|
+
for (const line of frame.split("\n")) {
|
|
892
|
+
if (!line.startsWith("data: ")) continue;
|
|
893
|
+
let event;
|
|
894
|
+
try {
|
|
895
|
+
event = JSON.parse(line.slice(6));
|
|
896
|
+
} catch {
|
|
897
|
+
continue;
|
|
898
|
+
}
|
|
899
|
+
this.handleEvent(ch.account, event).catch(
|
|
900
|
+
(err) => this.log("error", `[dispatcher] handleEvent threw for "${ch.account.name}": ${err}`)
|
|
901
|
+
);
|
|
902
|
+
}
|
|
903
|
+
}
|
|
904
|
+
}
|
|
905
|
+
}
|
|
906
|
+
/**
|
|
907
|
+
* Enqueue (or extend) a wake for `(account, thread)`. First
|
|
908
|
+
* event creates the entry + starts the debounce timer; every
|
|
909
|
+
* subsequent event within the window APPENDS to the event
|
|
910
|
+
* list and EXTENDS the timer to `now + wakeCoalesceMs`.
|
|
911
|
+
*
|
|
912
|
+
* When the timer fires, `fireCoalescedWake` synthesises a
|
|
913
|
+
* single wake prompt covering every event that arrived in
|
|
914
|
+
* the burst and spawns one worker. The wake-budget is
|
|
915
|
+
* charged ONCE for the batch (a burst of 4 replies is one
|
|
916
|
+
* logical handoff, not four).
|
|
917
|
+
*
|
|
918
|
+
* When `wakeCoalesceMs` is 0 (test mode / opt-out), we skip
|
|
919
|
+
* the queue and spawn immediately to keep the pre-0.9.0
|
|
920
|
+
* one-event-per-wake semantics.
|
|
921
|
+
*/
|
|
922
|
+
async scheduleCoalescedWake(account, event, threadId) {
|
|
923
|
+
if (this.wakeCoalesceMs <= 0) {
|
|
924
|
+
await this.fireWakeImmediately(account, event, threadId);
|
|
925
|
+
return;
|
|
926
|
+
}
|
|
927
|
+
const key = `${account.id}::${threadId}`;
|
|
928
|
+
const existing = this.wakeCoalesce.get(key);
|
|
929
|
+
if (!existing) {
|
|
930
|
+
const entry = {
|
|
931
|
+
events: [],
|
|
932
|
+
// empty — first event already fired
|
|
933
|
+
account,
|
|
934
|
+
threadId,
|
|
935
|
+
firstScheduledAt: this.now(),
|
|
936
|
+
timer: setTimeout(() => this.fireCoalescedWake(key), this.wakeCoalesceMs)
|
|
937
|
+
};
|
|
938
|
+
entry.timer.unref?.();
|
|
939
|
+
this.wakeCoalesce.set(key, entry);
|
|
940
|
+
await this.fireWakeImmediately(account, event, threadId);
|
|
941
|
+
return;
|
|
942
|
+
}
|
|
943
|
+
clearTimeout(existing.timer);
|
|
944
|
+
existing.events.push(event);
|
|
945
|
+
this.postActivity("/dispatcher/worker-queued", {
|
|
946
|
+
agentName: account.name,
|
|
947
|
+
agentId: account.id,
|
|
948
|
+
threadId,
|
|
949
|
+
queuedCount: existing.events.length,
|
|
950
|
+
fireAtMs: this.now() + this.wakeCoalesceMs,
|
|
951
|
+
reason: "coalescing subsequent burst events"
|
|
952
|
+
});
|
|
953
|
+
existing.timer = setTimeout(() => this.fireCoalescedWake(key), this.wakeCoalesceMs);
|
|
954
|
+
existing.timer.unref?.();
|
|
955
|
+
const elapsedFromFirst = this.now() - existing.firstScheduledAt;
|
|
956
|
+
if (elapsedFromFirst > this.wakeCoalesceMs * 5) {
|
|
957
|
+
clearTimeout(existing.timer);
|
|
958
|
+
this.fireCoalescedWake(key);
|
|
959
|
+
}
|
|
960
|
+
}
|
|
961
|
+
/**
|
|
962
|
+
* Pre-0.9.0 fast path used when coalescing is disabled. Same
|
|
963
|
+
* spawn that scheduleCoalescedWake/fireCoalescedWake would do
|
|
964
|
+
* for a single-event batch.
|
|
965
|
+
*/
|
|
966
|
+
async fireWakeImmediately(account, event, threadId) {
|
|
967
|
+
const verdict = this.chargeWake(account.id, threadId);
|
|
968
|
+
if (!verdict.ok) {
|
|
969
|
+
this.log("warn", `[dispatcher] wake-budget exhausted for "${account.name}" on thread "${threadId}" \u2014 dropped uid=${event.uid}`);
|
|
970
|
+
this.postSkipped(account, event, "budget-exhausted", `wake budget exhausted for thread "${threadId}" (count=${verdict.count}, cap=${this.maxWakesPerThread})`);
|
|
971
|
+
return;
|
|
972
|
+
}
|
|
973
|
+
await this.spawnWorker(account, newMailPrompt(account, event), {
|
|
974
|
+
kind: "new-mail",
|
|
975
|
+
uid: event.uid,
|
|
976
|
+
subject: extractSubject(event),
|
|
977
|
+
from: extractFrom(event)
|
|
978
|
+
});
|
|
979
|
+
}
|
|
980
|
+
/**
|
|
981
|
+
* Timer callback for the coalesced wake. Builds a single wake
|
|
982
|
+
* prompt that summarises every event in the batch and fires
|
|
983
|
+
* one worker. Wake budget is charged once for the batch.
|
|
984
|
+
*/
|
|
985
|
+
fireCoalescedWake(key) {
|
|
986
|
+
const entry = this.wakeCoalesce.get(key);
|
|
987
|
+
if (!entry) return;
|
|
988
|
+
this.wakeCoalesce.delete(key);
|
|
989
|
+
if (this.stopped) return;
|
|
990
|
+
if (entry.events.length === 0) return;
|
|
991
|
+
const verdict = this.chargeWake(entry.account.id, entry.threadId);
|
|
992
|
+
if (!verdict.ok) {
|
|
993
|
+
this.log("warn", `[dispatcher] wake-budget exhausted for "${entry.account.name}" on thread "${entry.threadId}" \u2014 dropped batch of ${entry.events.length}`);
|
|
994
|
+
return;
|
|
995
|
+
}
|
|
996
|
+
const lastEvent = entry.events[entry.events.length - 1];
|
|
997
|
+
const prompt = entry.events.length === 1 ? newMailPrompt(entry.account, lastEvent) : newMailPromptForBatch(entry.account, entry.events);
|
|
998
|
+
if (entry.events.length > 1) {
|
|
999
|
+
this.log("info", `[dispatcher] coalesced ${entry.events.length} wakes into one Claude turn for "${entry.account.name}" on thread "${entry.threadId}"`);
|
|
1000
|
+
}
|
|
1001
|
+
void this.spawnWorker(entry.account, prompt, {
|
|
1002
|
+
kind: "new-mail",
|
|
1003
|
+
uid: lastEvent.uid,
|
|
1004
|
+
subject: extractSubject(lastEvent),
|
|
1005
|
+
from: extractFrom(lastEvent)
|
|
1006
|
+
});
|
|
1007
|
+
}
|
|
1008
|
+
/**
|
|
1009
|
+
* Prepend the thread-context block (cache + memory) to the
|
|
1010
|
+
* wake prompt for a given account. Returns the prompt
|
|
1011
|
+
* unchanged when neither layer has content — the very first
|
|
1012
|
+
* wake on a brand-new thread shouldn't show the agent an
|
|
1013
|
+
* empty "Thread context" section that screams "you've seen
|
|
1014
|
+
* this before" when there's nothing to see.
|
|
1015
|
+
*
|
|
1016
|
+
* Exposed as a separate method so tests can drive it
|
|
1017
|
+
* directly without invoking the SDK.
|
|
1018
|
+
*/
|
|
1019
|
+
composeWakePromptWithContext(account, ctx, prompt) {
|
|
1020
|
+
if (ctx.kind !== "new-mail" && ctx.kind !== "task") return prompt;
|
|
1021
|
+
const t = threadIdFor({ subject: ctx.subject });
|
|
1022
|
+
let cacheBlock = "";
|
|
1023
|
+
let memoryBlock = "";
|
|
1024
|
+
try {
|
|
1025
|
+
const entry = this.threadCache.read(t);
|
|
1026
|
+
if (entry) {
|
|
1027
|
+
const filtered = ctx.uid ? { ...entry, messages: entry.messages.filter((m) => m.uid !== ctx.uid) } : entry;
|
|
1028
|
+
cacheBlock = filtered.messages.length > 0 ? this.threadCache.renderForPrompt(filtered) : "";
|
|
1029
|
+
}
|
|
1030
|
+
} catch {
|
|
1031
|
+
}
|
|
1032
|
+
try {
|
|
1033
|
+
memoryBlock = this.agentMemory.renderForPrompt(this.agentMemory.read(account.id, t));
|
|
1034
|
+
} catch {
|
|
1035
|
+
}
|
|
1036
|
+
if (!cacheBlock && !memoryBlock) return prompt;
|
|
1037
|
+
const sections = [
|
|
1038
|
+
"## Thread context",
|
|
1039
|
+
"",
|
|
1040
|
+
"You have seen this thread before. The two blocks below are",
|
|
1041
|
+
"your shortcut to context \u2014 DO NOT re-read every prior message",
|
|
1042
|
+
"on this thread. Read only the NEW event at the bottom of this",
|
|
1043
|
+
"prompt and decide based on these blocks plus that event.",
|
|
1044
|
+
""
|
|
1045
|
+
];
|
|
1046
|
+
if (cacheBlock) {
|
|
1047
|
+
sections.push("### Facts (last messages on this thread, newest first)");
|
|
1048
|
+
sections.push(cacheBlock);
|
|
1049
|
+
sections.push("");
|
|
1050
|
+
}
|
|
1051
|
+
if (memoryBlock) {
|
|
1052
|
+
sections.push("### Your own memory of this thread");
|
|
1053
|
+
sections.push(memoryBlock);
|
|
1054
|
+
sections.push("");
|
|
1055
|
+
}
|
|
1056
|
+
sections.push("## NEW event");
|
|
1057
|
+
sections.push("");
|
|
1058
|
+
sections.push(prompt);
|
|
1059
|
+
sections.push("");
|
|
1060
|
+
sections.push("---");
|
|
1061
|
+
sections.push("At end of turn, call `save_thread_memory` with `threadId`,");
|
|
1062
|
+
sections.push("a one-paragraph `summary` of where the thread stands, your");
|
|
1063
|
+
sections.push("current `commitments`, any `openQuestions`, your `lastAction`,");
|
|
1064
|
+
sections.push("and the newest `lastUid` you have digested. Future wakes on");
|
|
1065
|
+
sections.push("this thread will load that memory into context for you.");
|
|
1066
|
+
return sections.join("\n");
|
|
1067
|
+
}
|
|
1068
|
+
/** Acquire a concurrency slot, run a worker, release the slot. */
|
|
1069
|
+
async spawnWorker(account, prompt, ctx) {
|
|
1070
|
+
const releaseAgentLock = await this.acquireAgentSerial(account.id);
|
|
1071
|
+
await this.acquireSlot();
|
|
1072
|
+
const workerId = `${account.id}:${ctx.kind}:${ctx.uid ?? ctx.taskId ?? ""}:${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
1073
|
+
let workerResult = null;
|
|
1074
|
+
this.postActivity("/dispatcher/worker-started", {
|
|
1075
|
+
workerId,
|
|
1076
|
+
agentName: account.name,
|
|
1077
|
+
agentEmail: account.email,
|
|
1078
|
+
kind: ctx.kind,
|
|
1079
|
+
trigger: { uid: ctx.uid, taskId: ctx.taskId, subject: ctx.subject, from: ctx.from }
|
|
1080
|
+
});
|
|
1081
|
+
const logsDir = join2(homedir(), ".agenticmail", "worker-logs");
|
|
1082
|
+
try {
|
|
1083
|
+
mkdirSync(logsDir, { recursive: true });
|
|
1084
|
+
} catch {
|
|
1085
|
+
}
|
|
1086
|
+
const logPath = join2(logsDir, `${sanitizeId(workerId)}.log`);
|
|
1087
|
+
let logStream = null;
|
|
1088
|
+
try {
|
|
1089
|
+
logStream = createWriteStream(logPath, { flags: "a" });
|
|
1090
|
+
} catch {
|
|
1091
|
+
}
|
|
1092
|
+
const writeLog = (line) => {
|
|
1093
|
+
try {
|
|
1094
|
+
logStream?.write(`[${(/* @__PURE__ */ new Date()).toISOString()}] ${line}
|
|
1095
|
+
`);
|
|
1096
|
+
} catch {
|
|
1097
|
+
}
|
|
1098
|
+
};
|
|
1099
|
+
writeLog(`worker_started agent=${account.name} kind=${ctx.kind}${ctx.uid ? " uid=" + ctx.uid : ""}${ctx.taskId ? " task=" + ctx.taskId : ""}`);
|
|
1100
|
+
const cwdDir = join2(homedir(), ".agenticmail", "worker-cwds", sanitizeId(workerId));
|
|
1101
|
+
try {
|
|
1102
|
+
mkdirSync(cwdDir, { recursive: true });
|
|
1103
|
+
} catch {
|
|
1104
|
+
}
|
|
1105
|
+
let turnCount = 0;
|
|
1106
|
+
let lastTool = "";
|
|
1107
|
+
let lastUsage;
|
|
1108
|
+
const digestedUids = /* @__PURE__ */ new Set();
|
|
1109
|
+
const observer = {
|
|
1110
|
+
onMessage: (tag, summary) => {
|
|
1111
|
+
writeLog(`${tag} ${summary}`);
|
|
1112
|
+
if (tag === "tool_use") {
|
|
1113
|
+
lastTool = summary.split(" ")[0];
|
|
1114
|
+
turnCount++;
|
|
1115
|
+
const m = /read_email\b[^}]*"uid"\s*:\s*(\d+)/.exec(summary);
|
|
1116
|
+
if (m) {
|
|
1117
|
+
const uid = parseInt(m[1], 10);
|
|
1118
|
+
if (Number.isFinite(uid) && uid > 0) digestedUids.add(uid);
|
|
1119
|
+
}
|
|
1120
|
+
}
|
|
1121
|
+
if (tag === "usage") lastUsage = summary;
|
|
1122
|
+
}
|
|
1123
|
+
};
|
|
1124
|
+
const heartbeatHandle = setInterval(() => {
|
|
1125
|
+
this.postActivity("/dispatcher/worker-heartbeat", {
|
|
1126
|
+
workerId,
|
|
1127
|
+
agentName: account.name,
|
|
1128
|
+
lastTool: lastTool || void 0,
|
|
1129
|
+
turnCount
|
|
1130
|
+
});
|
|
1131
|
+
}, 3e4);
|
|
1132
|
+
heartbeatHandle.unref?.();
|
|
1133
|
+
try {
|
|
1134
|
+
const { body } = loadPersonaForAgent({
|
|
1135
|
+
agent: account,
|
|
1136
|
+
agentsDir: this.cfg.agentsDir,
|
|
1137
|
+
subagentPrefix: this.cfg.subagentPrefix,
|
|
1138
|
+
mcpServerName: this.cfg.mcpServerName
|
|
1139
|
+
});
|
|
1140
|
+
this.log("info", `[dispatcher] waking "${account.name}" \u2014 ${ctx.kind}${ctx.taskId ? " " + ctx.taskId : ctx.uid ? " uid=" + ctx.uid : ""}`);
|
|
1141
|
+
const mcpEnv = await this.buildMcpEnv();
|
|
1142
|
+
const composedPrompt = this.composeWakePromptWithContext(account, ctx, prompt);
|
|
1143
|
+
workerResult = await runWorkerWithCompaction(
|
|
1144
|
+
this.query,
|
|
1145
|
+
body,
|
|
1146
|
+
composedPrompt,
|
|
1147
|
+
account,
|
|
1148
|
+
this.cfg.mcpServerName,
|
|
1149
|
+
this.cfg.mcpCommand,
|
|
1150
|
+
this.cfg.mcpArgs,
|
|
1151
|
+
mcpEnv,
|
|
1152
|
+
this.log,
|
|
1153
|
+
observer,
|
|
1154
|
+
cwdDir
|
|
1155
|
+
);
|
|
1156
|
+
} finally {
|
|
1157
|
+
clearInterval(heartbeatHandle);
|
|
1158
|
+
this.releaseSlot();
|
|
1159
|
+
if (digestedUids.size > 0) {
|
|
1160
|
+
const prefix = `${account.id}::`;
|
|
1161
|
+
for (const [key, entry] of this.wakeCoalesce.entries()) {
|
|
1162
|
+
if (!key.startsWith(prefix)) continue;
|
|
1163
|
+
const before = entry.events.length;
|
|
1164
|
+
entry.events = entry.events.filter((e) => !(typeof e.uid === "number" && digestedUids.has(e.uid)));
|
|
1165
|
+
if (entry.events.length < before) {
|
|
1166
|
+
this.log("info", `[dispatcher] dropped ${before - entry.events.length} queued wake(s) for "${account.name}" \u2014 UIDs already digested this turn`);
|
|
1167
|
+
}
|
|
1168
|
+
if (entry.events.length === 0) {
|
|
1169
|
+
try {
|
|
1170
|
+
clearTimeout(entry.timer);
|
|
1171
|
+
} catch {
|
|
1172
|
+
}
|
|
1173
|
+
this.wakeCoalesce.delete(key);
|
|
1174
|
+
}
|
|
1175
|
+
}
|
|
1176
|
+
const ch = this.channels.get(account.id);
|
|
1177
|
+
if (ch) for (const uid of digestedUids) rememberBounded(ch.seenUids, uid);
|
|
1178
|
+
}
|
|
1179
|
+
try {
|
|
1180
|
+
releaseAgentLock();
|
|
1181
|
+
} catch {
|
|
1182
|
+
}
|
|
1183
|
+
const ok = workerResult?.ok === true;
|
|
1184
|
+
const preview = workerResult?.ok ? workerResult.text : workerResult ? workerResult.error : "worker did not start";
|
|
1185
|
+
writeLog(`worker_finished ok=${ok} chars=${preview.length}`);
|
|
1186
|
+
try {
|
|
1187
|
+
logStream?.end();
|
|
1188
|
+
} catch {
|
|
1189
|
+
}
|
|
1190
|
+
try {
|
|
1191
|
+
rmSync(cwdDir, { recursive: true, force: true });
|
|
1192
|
+
} catch {
|
|
1193
|
+
}
|
|
1194
|
+
this.postActivity("/dispatcher/worker-finished", {
|
|
1195
|
+
workerId,
|
|
1196
|
+
agentName: account.name,
|
|
1197
|
+
ok,
|
|
1198
|
+
turnCount,
|
|
1199
|
+
// Context-budget telemetry: the SDK-reported usage line
|
|
1200
|
+
// (input/output/cache tokens + cost). Forwarded so
|
|
1201
|
+
// check_activity can show real cost per worker and the
|
|
1202
|
+
// cache+memory savings vs pre-0.9.0 become measurable.
|
|
1203
|
+
usage: lastUsage,
|
|
1204
|
+
resultPreview: typeof preview === "string" ? preview.slice(0, 240) : void 0
|
|
1205
|
+
});
|
|
1206
|
+
}
|
|
1207
|
+
}
|
|
1208
|
+
/**
|
|
1209
|
+
* Fire-and-forget POST to the API's worker-activity endpoints.
|
|
1210
|
+
*
|
|
1211
|
+
* Failures are swallowed deliberately — the dispatcher must never
|
|
1212
|
+
* block worker spawn or interrupt teardown because the API is briefly
|
|
1213
|
+
* unreachable. The activity registry is best-effort observability, not
|
|
1214
|
+
* load-bearing state.
|
|
1215
|
+
*/
|
|
1216
|
+
postActivity(path, body) {
|
|
1217
|
+
const url = `${this.cfg.apiUrl.replace(/\/$/, "")}/api/agenticmail${path}`;
|
|
1218
|
+
try {
|
|
1219
|
+
const result = this.fetchImpl(url, {
|
|
1220
|
+
method: "POST",
|
|
1221
|
+
headers: {
|
|
1222
|
+
"Content-Type": "application/json",
|
|
1223
|
+
"Authorization": `Bearer ${this.cfg.masterKey}`
|
|
1224
|
+
},
|
|
1225
|
+
body: JSON.stringify(body)
|
|
1226
|
+
});
|
|
1227
|
+
if (result && typeof result.catch === "function") {
|
|
1228
|
+
void result.catch(() => {
|
|
1229
|
+
});
|
|
1230
|
+
}
|
|
1231
|
+
} catch {
|
|
1232
|
+
}
|
|
1233
|
+
}
|
|
1234
|
+
/**
|
|
1235
|
+
* Post a "skipped wake" notification with the reason the
|
|
1236
|
+
* dispatcher decided not to fire a Claude turn. Surfaced in
|
|
1237
|
+
* `check_activity` so the host can see the decision instead
|
|
1238
|
+
* of just observing silence ("did my mail land? did the
|
|
1239
|
+
* dispatcher skip it? is the dispatcher even alive?").
|
|
1240
|
+
*
|
|
1241
|
+
* Reasons cover every filter that drops a wake:
|
|
1242
|
+
* - thread-closed — subject had [FINAL]/[DONE]/[CLOSED]/[WRAP]
|
|
1243
|
+
* - allowlist-excluded — sender's `wake` list did not include the agent
|
|
1244
|
+
* - wake-on-cc — agent registered wake_on_cc:false and was on Cc
|
|
1245
|
+
* - dedup — duplicate UID seen recently
|
|
1246
|
+
* - rpc-suppress — RPC-notification mail right after a task event
|
|
1247
|
+
* - budget-exhausted — per-(agent, thread) wake budget hit the cap
|
|
1248
|
+
*/
|
|
1249
|
+
postSkipped(account, event, reason, detail) {
|
|
1250
|
+
this.postActivity("/dispatcher/worker-skipped", {
|
|
1251
|
+
agentId: account.id,
|
|
1252
|
+
agentName: account.name,
|
|
1253
|
+
uid: event.uid,
|
|
1254
|
+
subject: extractSubject(event),
|
|
1255
|
+
from: extractFrom(event),
|
|
1256
|
+
reason,
|
|
1257
|
+
detail
|
|
1258
|
+
});
|
|
1259
|
+
}
|
|
1260
|
+
/** Build the env block we pass to the worker's MCP server child process. */
|
|
1261
|
+
async buildMcpEnv() {
|
|
1262
|
+
return {
|
|
1263
|
+
AGENTICMAIL_API_URL: this.cfg.apiUrl,
|
|
1264
|
+
AGENTICMAIL_MASTER_KEY: this.cfg.masterKey
|
|
1265
|
+
// No AGENTICMAIL_API_KEY: workers should ALWAYS pass `_account`
|
|
1266
|
+
// explicitly. Omitting the default key forces that discipline at
|
|
1267
|
+
// the MCP-server level (any forgotten `_account` becomes a clear
|
|
1268
|
+
// error rather than a silent identity drift).
|
|
1269
|
+
};
|
|
1270
|
+
}
|
|
1271
|
+
acquireSlot() {
|
|
1272
|
+
if (this.running < this.maxConcurrent) {
|
|
1273
|
+
this.running++;
|
|
1274
|
+
return Promise.resolve();
|
|
1275
|
+
}
|
|
1276
|
+
return new Promise((resolve) => {
|
|
1277
|
+
this.waiters.push(() => {
|
|
1278
|
+
this.running++;
|
|
1279
|
+
resolve();
|
|
1280
|
+
});
|
|
1281
|
+
});
|
|
1282
|
+
}
|
|
1283
|
+
releaseSlot() {
|
|
1284
|
+
this.running--;
|
|
1285
|
+
const next = this.waiters.shift();
|
|
1286
|
+
if (next) next();
|
|
1287
|
+
}
|
|
1288
|
+
/**
|
|
1289
|
+
* Per-agent serialization. At most ONE worker runs for any
|
|
1290
|
+
* given agent at a time. When a new wake fires for an agent
|
|
1291
|
+
* whose worker is still running, the new wake's spawnWorker
|
|
1292
|
+
* waits on the prior worker's tail before proceeding.
|
|
1293
|
+
*
|
|
1294
|
+
* This is the fix for the "dispatcher crashed when sender
|
|
1295
|
+
* broadcast to a 5-CC thread" failure mode: under the old
|
|
1296
|
+
* design, 5 emails landing for vesper-on-3-different-threads
|
|
1297
|
+
* in the same second spawned 5 simultaneous vesper workers,
|
|
1298
|
+
* each opening its own IMAP connection, each calling the
|
|
1299
|
+
* SDK, racing on the same inbox cache. With this gate they
|
|
1300
|
+
* queue tail-to-head and run sequentially.
|
|
1301
|
+
*
|
|
1302
|
+
* `nextRun` is a chained promise: each new spawn calls
|
|
1303
|
+
* `then()` on the previous tail so the order is preserved.
|
|
1304
|
+
* When the chain resolves to a no-op (empty queue), the
|
|
1305
|
+
* entry is garbage-collected from the map so memory stays
|
|
1306
|
+
* bounded at #active-agents.
|
|
1307
|
+
*/
|
|
1308
|
+
agentSerial = /* @__PURE__ */ new Map();
|
|
1309
|
+
async acquireAgentSerial(agentId) {
|
|
1310
|
+
const prev = this.agentSerial.get(agentId);
|
|
1311
|
+
let release;
|
|
1312
|
+
const next = new Promise((resolve) => {
|
|
1313
|
+
release = resolve;
|
|
1314
|
+
});
|
|
1315
|
+
this.agentSerial.set(agentId, prev ? prev.then(() => next).catch(() => next) : next);
|
|
1316
|
+
if (prev) await prev.catch(() => {
|
|
1317
|
+
});
|
|
1318
|
+
return () => {
|
|
1319
|
+
release();
|
|
1320
|
+
if (this.agentSerial.get(agentId) === next) this.agentSerial.delete(agentId);
|
|
1321
|
+
};
|
|
1322
|
+
}
|
|
1323
|
+
};
|
|
1324
|
+
function sleep(ms) {
|
|
1325
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
1326
|
+
}
|
|
1327
|
+
function sanitizeId(id) {
|
|
1328
|
+
return id.replace(/[^a-zA-Z0-9._-]/g, "_");
|
|
1329
|
+
}
|
|
1330
|
+
function defaultLog(level, msg) {
|
|
1331
|
+
const stream = level === "error" ? process.stderr : process.stdout;
|
|
1332
|
+
stream.write(`[${(/* @__PURE__ */ new Date()).toISOString()}] [${level}] ${msg}
|
|
1333
|
+
`);
|
|
1334
|
+
}
|
|
1335
|
+
function defaultQuery() {
|
|
1336
|
+
return (params) => {
|
|
1337
|
+
let inner = null;
|
|
1338
|
+
const init = async () => {
|
|
1339
|
+
try {
|
|
1340
|
+
const mod = await import("@anthropic-ai/claude-agent-sdk");
|
|
1341
|
+
return mod.query(params);
|
|
1342
|
+
} catch (err) {
|
|
1343
|
+
throw new Error(
|
|
1344
|
+
`Dispatcher needs @anthropic-ai/claude-agent-sdk installed in the package, but: ${err.message}`
|
|
1345
|
+
);
|
|
1346
|
+
}
|
|
1347
|
+
};
|
|
1348
|
+
return {
|
|
1349
|
+
[Symbol.asyncIterator]() {
|
|
1350
|
+
return {
|
|
1351
|
+
async next() {
|
|
1352
|
+
if (!inner) inner = await init();
|
|
1353
|
+
const it = inner[Symbol.asyncIterator]();
|
|
1354
|
+
const self = this;
|
|
1355
|
+
self.next = it.next.bind(it);
|
|
1356
|
+
return it.next();
|
|
1357
|
+
}
|
|
1358
|
+
};
|
|
1359
|
+
}
|
|
1360
|
+
};
|
|
1361
|
+
};
|
|
1362
|
+
}
|
|
1363
|
+
|
|
1364
|
+
export {
|
|
1365
|
+
loadPersonaForAgent,
|
|
1366
|
+
Dispatcher
|
|
1367
|
+
};
|