@adaptic/maestro 1.9.3 → 1.9.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,70 @@
1
+ /**
2
+ * Maestro — Claude CLI binary resolver.
3
+ *
4
+ * Shared utility for every code path that spawns `claude --print`.
5
+ * launchd's bare environment does NOT include ~/.local/bin or homebrew
6
+ * paths, so a plain `spawn('claude', …)` from a daemon-spawned process
7
+ * fails with ENOENT — even though the operator's interactive shell finds
8
+ * it without trouble.
9
+ *
10
+ * Caller sites:
11
+ * - scripts/daemon/dispatcher.mjs (reactive: replies to Slack/Gmail/etc)
12
+ * - scripts/daemon/responder.mjs (quick replies + holding messages)
13
+ * - scripts/daemon/cadence-consumer.mjs (scheduled cadence ticks)
14
+ * - scripts/poller/intra-session-check.mjs
15
+ * - any other module that spawns claude from a non-interactive context
16
+ */
17
+
18
+ import { existsSync } from "node:fs";
19
+ import { join } from "node:path";
20
+ import { homedir } from "node:os";
21
+
22
+ let _resolved = null;
23
+
24
+ /**
25
+ * Return the absolute path to the Claude CLI. Searches, in order:
26
+ * 1. $CLAUDE_BIN env var (if set + executable on disk)
27
+ * 2. ~/.local/bin/claude (default Claude Code install location)
28
+ * 3. /opt/homebrew/bin/claude (Apple Silicon homebrew)
29
+ * 4. /usr/local/bin/claude (Intel homebrew)
30
+ * 5. /usr/bin/claude
31
+ *
32
+ * Falls back to bare "claude" so the spawn's own error stays informative
33
+ * if nothing on the search path exists.
34
+ */
35
+ export function resolveClaudeBin() {
36
+ if (_resolved) return _resolved;
37
+ const envOverride = process.env.CLAUDE_BIN;
38
+ const candidates = [
39
+ envOverride,
40
+ join(homedir(), ".local/bin/claude"),
41
+ "/opt/homebrew/bin/claude",
42
+ "/usr/local/bin/claude",
43
+ "/usr/bin/claude",
44
+ ].filter(Boolean);
45
+ for (const c of candidates) {
46
+ if (existsSync(c)) { _resolved = c; return c; }
47
+ }
48
+ _resolved = "claude";
49
+ return _resolved;
50
+ }
51
+
52
+ /**
53
+ * Build a PATH suitable for child processes spawned from a daemon.
54
+ * launchd strips PATH down to /usr/bin:/bin; this returns a string that
55
+ * preserves the caller's PATH if set, then appends the locations claude
56
+ * subsessions might need to find node, jq, gh, etc.
57
+ */
58
+ export function augmentedPath() {
59
+ return [
60
+ process.env.PATH || "",
61
+ `${homedir()}/.local/bin`,
62
+ "/opt/homebrew/bin",
63
+ "/opt/homebrew/sbin",
64
+ "/usr/local/bin",
65
+ "/usr/bin",
66
+ "/bin",
67
+ "/usr/sbin",
68
+ "/sbin",
69
+ ].filter(Boolean).join(":");
70
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@adaptic/maestro",
3
- "version": "1.9.3",
3
+ "version": "1.9.5",
4
4
  "description": "Maestro — Autonomous AI agent operating system. Deploy AI employees on dedicated Mac minis.",
5
5
  "type": "module",
6
6
  "bin": {
@@ -46,7 +46,7 @@
46
46
  },
47
47
  "always-build-npm": true,
48
48
  "scripts": {
49
- "test": "node --test lib/cadence-bus.test.mjs scripts/cadence/enqueue-cadence-tick.test.mjs scripts/daemon/cadence-consumer.test.mjs scripts/daemon/lib/session-router.test.mjs scripts/local-triggers/generate-plists.test.mjs bin/maestro.test.mjs",
49
+ "test": "node --test lib/cadence-bus.test.mjs scripts/cadence/enqueue-cadence-tick.test.mjs scripts/daemon/cadence-consumer.test.mjs scripts/daemon/dispatcher-cooldown.test.mjs scripts/daemon/lib/session-router.test.mjs scripts/local-triggers/generate-plists.test.mjs bin/maestro.test.mjs",
50
50
  "test:cadence": "node --test lib/cadence-bus.test.mjs scripts/cadence/enqueue-cadence-tick.test.mjs scripts/daemon/cadence-consumer.test.mjs",
51
51
  "test:cli": "node --test bin/maestro.test.mjs",
52
52
  "test:plists": "node --test scripts/local-triggers/generate-plists.test.mjs",
@@ -0,0 +1,562 @@
1
+ #!/usr/bin/env node
2
+ // =============================================================================
3
+ // Agent Daemon — Reactive event-driven message processor
4
+ // =============================================================================
5
+ //
6
+ // Persistent Node.js process that:
7
+ // 1. Polls Slack/Gmail/Calendar every 30s
8
+ // 2. Classifies each item via Haiku API (~0.5-1s)
9
+ // 3. Spawns dedicated claude --print sessions per item (up to 10 parallel)
10
+ // 4. Sweeps backlog queues every 2 min for stale items
11
+ //
12
+ // Replaces: poller + inbox-processor + backlog-executor (3-stage pipeline)
13
+ // Target: CEO DM → response in under 2 minutes
14
+ //
15
+ // Run: node scripts/daemon/<agent>-daemon.mjs (e.g. ravi-daemon.mjs)
16
+ // Install: launchd plist with KeepAlive: true
17
+ // =============================================================================
18
+
19
+ import { config } from "dotenv";
20
+ import { resolve, join } from "path";
21
+ import { readdirSync, readFileSync, renameSync, mkdirSync, appendFileSync } from "fs";
22
+
23
+ // Load .env before anything else
24
+ const AGENT_REPO_DIR = process.env.AGENT_DIR || resolve(new URL(".", import.meta.url).pathname, "../..");
25
+ config({ path: join(AGENT_REPO_DIR, ".env") });
26
+
27
+ // Load agent identity (canonical SOT) so filters can match the running
28
+ // agent's own name/slack-id rather than a hardcoded one.
29
+ let _agent = null;
30
+ function loadAgent() {
31
+ if (_agent) return _agent;
32
+ try {
33
+ _agent = JSON.parse(readFileSync(join(AGENT_REPO_DIR, "config/agent.json"), "utf-8"));
34
+ } catch {
35
+ _agent = { firstName: "Agent", lastName: "", slackMemberId: "" };
36
+ }
37
+ return _agent;
38
+ }
39
+
40
+ import { pollSlack } from "../poller/slack-poller.mjs";
41
+ import { pollGmail } from "../poller/gmail-poller.mjs";
42
+ import { pollCalendar } from "../poller/calendar-poller.mjs";
43
+ import { pollMehranGmail } from "../poller/mehran-gmail-poller.mjs";
44
+ import { isPriorityItem } from "../poller/utils.mjs";
45
+ import { classifyItem, isDirectedAtAgent } from "./classifier.mjs";
46
+ import { dispatch, getStatus, availableSlots, canDispatchBacklog, resetActiveSessions } from "./dispatcher.mjs";
47
+ import { buildPrompt } from "./prompt-builder.mjs";
48
+ import { sendQuickResponse, sendHoldingMessage, isQuickReply } from "./responder.mjs";
49
+ import { recordPoll, recordClassification, recordSession, writeHealthDashboard } from "./health.mjs";
50
+ import { acquireLock, updateLock, scanStaleLocks, acquireThreadLock, claimRequest, hasActiveClaim, sweepStaleItemClaims } from "./session-lock.mjs";
51
+
52
+ // ---------------------------------------------------------------------------
53
+ // Configuration
54
+ // ---------------------------------------------------------------------------
55
+
56
+ const POLL_INTERVAL = parseInt(process.env.DAEMON_POLL_INTERVAL || "60000", 10); // 60s (up from 30s to avoid Slack rate limits)
57
+ // Backlog sweep cadence — bumped 2min → 10min in 1.10. The dispatcher
58
+ // applies a per-item post-completion cooldown (4h success, 30min failure)
59
+ // so we no longer need a tight sweep loop. Operators with high-throughput
60
+ // queues can override via DAEMON_BACKLOG_INTERVAL.
61
+ const BACKLOG_INTERVAL = parseInt(process.env.DAEMON_BACKLOG_INTERVAL || "600000", 10); // 10 min
62
+ const HEALTH_INTERVAL = 60000; // 1 min
63
+ // Note: dedup is now handled by file-based locks in session-lock.mjs
64
+
65
+ // ---------------------------------------------------------------------------
66
+ // Logging
67
+ // ---------------------------------------------------------------------------
68
+
69
+ function logDir() {
70
+ const dir = join(AGENT_REPO_DIR, "logs", "daemon");
71
+ mkdirSync(dir, { recursive: true });
72
+ return dir;
73
+ }
74
+
75
+ function today() {
76
+ return new Date().toISOString().split("T")[0];
77
+ }
78
+
79
+ function logEvent(type, entry) {
80
+ const path = join(logDir(), `${today()}-${type}.jsonl`);
81
+ appendFileSync(path, JSON.stringify({ timestamp: new Date().toISOString(), ...entry }) + "\n");
82
+ }
83
+
84
+ // ---------------------------------------------------------------------------
85
+ // Deduplication — now handled by file-based locks in session-lock.mjs
86
+ // ---------------------------------------------------------------------------
87
+
88
+ // ---------------------------------------------------------------------------
89
+ // Poll Loop
90
+ // ---------------------------------------------------------------------------
91
+
92
+ async function poll() {
93
+ const services = [
94
+ { name: "slack", fn: pollSlack },
95
+ { name: "gmail", fn: pollGmail },
96
+ { name: "mehran-gmail", fn: pollMehranGmail },
97
+ { name: "calendar", fn: pollCalendar },
98
+ ];
99
+
100
+ let totalItems = 0;
101
+
102
+ for (const svc of services) {
103
+ try {
104
+ const result = await svc.fn();
105
+ // Dedup by raw_ref first: the same Slack message arrives via
106
+ // multiple paths (channel scan, thread scan, events-server JSON)
107
+ // with different item IDs. Using raw_ref as the lock key ensures
108
+ // a single message is only processed once regardless of path.
109
+ const seenRefs = new Set();
110
+ const newItems = result.items.filter((item) => {
111
+ // Skip the agent's own messages — defensive check in case intake filters miss them
112
+ const sender = (item.sender || "").toLowerCase();
113
+ const me = loadAgent();
114
+ const myFirst = (me.firstName || "").toLowerCase();
115
+ const myFull = (me.fullName || "").toLowerCase().replace(/\s+/g, "-");
116
+ if (myFirst && (sender === myFirst || sender === myFull)) return false;
117
+
118
+ const lockKey = item.raw_ref || item.id || `${svc.name}-${Date.now()}`;
119
+ if (seenRefs.has(lockKey)) return false;
120
+ seenRefs.add(lockKey);
121
+ const lock = acquireLock(lockKey, {
122
+ sender: item.sender || "unknown",
123
+ channel: item.channel || item.channel_id || "unknown",
124
+ });
125
+ return lock.acquired;
126
+ });
127
+ totalItems += newItems.length;
128
+
129
+ for (const item of newItems) {
130
+ await processItem(item, svc.name);
131
+ }
132
+
133
+ if (result.errors.length > 0) {
134
+ console.warn(`[daemon] ${svc.name} errors:`, result.errors.slice(0, 3));
135
+ }
136
+ } catch (err) {
137
+ console.error(`[daemon] ${svc.name} poll failed:`, err.message);
138
+ }
139
+ }
140
+
141
+ recordPoll(totalItems);
142
+ if (totalItems > 0) {
143
+ console.log(`[daemon] Poll found ${totalItems} new items`);
144
+ }
145
+
146
+ }
147
+
148
+ // ---------------------------------------------------------------------------
149
+ // Process a single item
150
+ // ---------------------------------------------------------------------------
151
+
152
+ async function processItem(item, service) {
153
+ const itemId = item.raw_ref || item.id || `${service}-${Date.now()}`;
154
+
155
+ try {
156
+ // Enrich item with channel-type metadata for directed-message detection
157
+ const channelStr = (item.channel || "").toLowerCase();
158
+ const channelId = item.channel_id || "";
159
+ const isDm = channelStr.startsWith("dm/") || channelId.startsWith("D");
160
+ const myFirstName = loadAgent().firstName || "Agent";
161
+ const agentThreadRegex = new RegExp(`^${myFirstName}:`, "m");
162
+ const agentInThread = !!(item.thread_context && agentThreadRegex.test(item.thread_context));
163
+ item.is_dm = isDm;
164
+ item.agent_in_thread = agentInThread;
165
+
166
+ // Classify via Haiku API
167
+ const classResult = await classifyItem({
168
+ sender: item.sender || "unknown",
169
+ sender_privilege: item.sender_privilege || item.priority_signals?.from_ceo ? "ceo" : "unknown",
170
+ channel: item.channel || "unknown",
171
+ service,
172
+ content: item.content || item.subject || "",
173
+ is_reply: item.is_reply || false,
174
+ thread_context: item.thread_context || null,
175
+ subject: item.subject || "",
176
+ is_dm: isDm,
177
+ is_group: !isDm && service === "slack",
178
+ agent_in_thread: agentInThread,
179
+ });
180
+
181
+ recordClassification(true);
182
+
183
+ logEvent("classifications", {
184
+ item_id: itemId,
185
+ sender: item.sender,
186
+ service,
187
+ ...classResult,
188
+ });
189
+
190
+ console.log(`[daemon] Classified: [${classResult.priority}] ${classResult.summary} → ${classResult.model} (${classResult.action})`);
191
+
192
+ // Skip ignored items
193
+ if (classResult.category === "ignore" || classResult.action === "ignore") {
194
+ markProcessed(item, service);
195
+ return;
196
+ }
197
+
198
+ // DIRECTED-MESSAGE GATE: In channels and group chats, only respond to
199
+ // messages that are clearly directed at the agent. DMs always pass.
200
+ // This prevents the agent from inserting itself into every conversation.
201
+ //
202
+ // Two-layer check:
203
+ // 1. If LLM says NOT directed → verify with rules (catch missed @mentions, CEO, DMs)
204
+ // 2. If LLM says directed BUT it's a non-DM channel → verify with rules (catch over-eager LLM)
205
+ if (service === "slack") {
206
+ const isDm = item.is_dm || (item.channel || "").startsWith("dm/") || (item.channel_id || "").startsWith("D");
207
+
208
+ if (!classResult.directed_at_agent) {
209
+ // LLM says not directed — double-check with rule-based heuristics
210
+ // to catch clear signals the LLM may have missed (DM, CEO, @mention)
211
+ const ruleCheck = isDirectedAtAgent(item);
212
+ if (!ruleCheck) {
213
+ console.log(`[daemon] Directed-message filter: skipping non-directed message from ${item.sender} in ${item.channel}`);
214
+ logEvent("classifications", {
215
+ item_id: itemId,
216
+ sender: item.sender,
217
+ service,
218
+ skipped: true,
219
+ reason: "not_directed_at_agent",
220
+ classifier_directed: false,
221
+ rule_directed: false,
222
+ summary: classResult.summary,
223
+ });
224
+ markProcessed(item, service);
225
+ return;
226
+ }
227
+ // Rule-based says directed — override LLM
228
+ console.log(`[daemon] Directed-message override: LLM said not directed but rule-based detected direction (${item.sender} in ${item.channel})`);
229
+ } else if (!isDm) {
230
+ // LLM says directed in a channel/group — sanity-check with rules.
231
+ // If rule-based also agrees, proceed. If rules say no AND the message
232
+ // doesn't contain the agent's name, the LLM was probably over-eager.
233
+ const ruleCheck = isDirectedAtAgent(item);
234
+ const content = (item.content || "").toLowerCase();
235
+ const me = loadAgent();
236
+ const myFirst = (me.firstName || "").toLowerCase();
237
+ const mySlackPrefix = (me.slackMemberId || "").slice(0, 3); // e.g. "U09"
238
+ const mentionsAgent =
239
+ (myFirst && content.includes(myFirst)) ||
240
+ (mySlackPrefix && content.includes(`<@${mySlackPrefix}`));
241
+ if (!ruleCheck && !mentionsAgent) {
242
+ console.log(`[daemon] Directed-message filter (LLM override): LLM said directed but rules disagree for ${item.sender} in ${item.channel} — skipping`);
243
+ logEvent("classifications", {
244
+ item_id: itemId,
245
+ sender: item.sender,
246
+ service,
247
+ skipped: true,
248
+ reason: "llm_over_eager_directed",
249
+ classifier_directed: true,
250
+ rule_directed: false,
251
+ summary: classResult.summary,
252
+ });
253
+ markProcessed(item, service);
254
+ return;
255
+ }
256
+ }
257
+ }
258
+
259
+ // THREAD / CHANNEL DEDUP: Prevent multiple sessions from responding to the
260
+ // same conversation. For thread replies, lock by thread_ts. For non-threaded
261
+ // DMs, lock by channel_id (so rapid-fire DMs don't each spawn a session).
262
+ // This check runs BEFORE quick reply to prevent ALL duplicate responses.
263
+ {
264
+ const channel = item.channel_id || (item.raw_ref ? item.raw_ref.match(/slack:([^:]+):/)?.[1] : null) || item.channel;
265
+ const threadTs = item.thread_id || (isDm ? `dm-channel` : null);
266
+ if (threadTs && channel) {
267
+ const threadCheck = acquireThreadLock(channel, threadTs);
268
+ if (!threadCheck.allowed) {
269
+ console.log(`[daemon] Thread/channel dedup: skipping item from ${item.sender} — ${threadCheck.reason}`);
270
+ logEvent("classifications", {
271
+ item_id: itemId,
272
+ sender: item.sender,
273
+ service,
274
+ skipped: true,
275
+ reason: `thread_dedup: ${threadCheck.reason}`,
276
+ });
277
+ markProcessed(item, service);
278
+ return;
279
+ }
280
+ }
281
+ }
282
+
283
+ // REQUEST CLAIMING: For action_required items, claim the request so
284
+ // parallel sessions (from overlapping poll cycles or backlog sweep)
285
+ // don't independently process the same "email me the brief" request.
286
+ // Claim key: action_type + recipient channel + summary (normalized).
287
+ if (classResult.category === "action_required" && (classResult.action === "respond" || classResult.action === "draft" || classResult.action === "research")) {
288
+ const claimAttrs = {
289
+ recipient: item.channel_id || item.channel || item.sender || "unknown",
290
+ subject: classResult.summary || item.subject || "",
291
+ action_type: classResult.action,
292
+ };
293
+ const claim = claimRequest(claimAttrs);
294
+ if (!claim.claimed) {
295
+ console.log(`[daemon] Request claim denied for ${item.sender}: ${claim.reason}`);
296
+ logEvent("classifications", {
297
+ item_id: itemId,
298
+ sender: item.sender,
299
+ service,
300
+ skipped: true,
301
+ reason: `request_claim_denied: ${claim.reason}`,
302
+ summary: classResult.summary,
303
+ });
304
+ markProcessed(item, service);
305
+ return;
306
+ }
307
+ }
308
+
309
+ // QUICK REPLY PATH: Direct responses via API + Slack/Gmail posting.
310
+ // No claude --print session needed. ~4-8 seconds total.
311
+ if (isQuickReply(classResult)) {
312
+ console.log(`[daemon] Quick reply path for ${item.sender} (${classResult.model})`);
313
+ const result = await sendQuickResponse(item, classResult);
314
+ if (result.sent) {
315
+ markProcessed(item, service);
316
+ return;
317
+ }
318
+ // If quick reply failed to send or was blocked by validation, fall through to dispatch a full session
319
+ const reason = result.blocked ? `validation blocked: ${result.issues?.map(i => i.rule).join(", ")}` : "send failed";
320
+ console.warn(`[daemon] Quick reply not sent (${reason}), falling through to session dispatch`);
321
+ }
322
+
323
+ // COMPLEX WORK PATH: Send immediate holding message, then dispatch full session
324
+ let holdingText = null;
325
+ try {
326
+ if (classResult.action === "respond" || classResult.action === "draft" || classResult.action === "research") {
327
+ const holdResult = await sendHoldingMessage(item, classResult);
328
+ holdingText = holdResult.sent ? holdResult.holdingText : null;
329
+ if (holdResult.sent) {
330
+ updateLock(itemId, { holdingSent: true });
331
+ }
332
+ }
333
+ } catch (err) {
334
+ console.error(`[daemon] Holding message failed (non-fatal): ${err.message}`);
335
+ }
336
+
337
+ // Build prompt with holding message context and dispatch
338
+ const prompt = await buildPrompt(item, classResult, {
339
+ type: "inbox",
340
+ holdingMessage: holdingText,
341
+ });
342
+ dispatch(prompt, item, classResult);
343
+ recordSession(false); // spawned
344
+ markProcessed(item, service);
345
+
346
+ } catch (err) {
347
+ console.error(`[daemon] Failed to process item ${itemId}:`, err.message);
348
+ recordClassification(false);
349
+ }
350
+ }
351
+
352
+ // ---------------------------------------------------------------------------
353
+ // Mark inbox item as processed
354
+ // ---------------------------------------------------------------------------
355
+
356
+ function markProcessed(item, service) {
357
+ // The pollers write files to state/inbox/{service}/
358
+ // We mark them by renaming to .processed
359
+ try {
360
+ const inboxDir = join(AGENT_REPO_DIR, "state", "inbox", service);
361
+ const files = readdirSync(inboxDir).filter(
362
+ (f) => !f.endsWith(".processed") && (f.includes(item.id) || f.includes(item.raw_ref))
363
+ );
364
+ for (const file of files) {
365
+ const src = join(inboxDir, file);
366
+ const dst = join(inboxDir, file + ".processed");
367
+ renameSync(src, dst);
368
+ }
369
+ } catch {
370
+ // Not all items have inbox files (e.g. backlog items)
371
+ }
372
+ }
373
+
374
+ // ---------------------------------------------------------------------------
375
+ // Backlog Sweep
376
+ // ---------------------------------------------------------------------------
377
+
378
+ async function sweepBacklog() {
379
+ const slots = availableSlots();
380
+ if (slots <= 0) return; // No capacity
381
+
382
+ try {
383
+ const queueDir = join(AGENT_REPO_DIR, "state", "queues");
384
+ const files = readdirSync(queueDir).filter((f) => f.endsWith(".yaml"));
385
+ const actionableItems = [];
386
+
387
+ for (const file of files) {
388
+ try {
389
+ const content = readFileSync(join(queueDir, file), "utf-8");
390
+ // Simple YAML parsing — look for items with status: open
391
+ const items = content.split(/^-\s+/m).filter(Boolean);
392
+ for (const itemBlock of items) {
393
+ // Strip comment lines to avoid matching schema templates (e.g. "# title: string")
394
+ const cleanBlock = itemBlock.split("\n").filter((l) => !l.trimStart().startsWith("#")).join("\n");
395
+ // Match both quoted ("title":) and unquoted (title:) YAML keys
396
+ const statusMatch = cleanBlock.match(/"?status"?:\s*["']?(open|in_progress)["']?/);
397
+ const blockedMatch = cleanBlock.match(/"?status"?:\s*["']?blocked["']?/);
398
+ if (statusMatch && !blockedMatch) {
399
+ const titleMatch = cleanBlock.match(/"?title"?:\s*["']?(.+?)["']?\s*\n/);
400
+ const priorityMatch = cleanBlock.match(/"?priority"?:\s*["']?(critical|high|normal|low)["']?/);
401
+ const nextActionMatch = cleanBlock.match(/"?next_action"?:\s*["']?(.+?)["']?\s*\n/);
402
+ if (titleMatch && nextActionMatch) {
403
+ actionableItems.push({
404
+ title: titleMatch[1],
405
+ priority: priorityMatch?.[1] || "normal",
406
+ next_action: nextActionMatch[1],
407
+ source_file: file,
408
+ raw: cleanBlock.substring(0, 500),
409
+ });
410
+ }
411
+ }
412
+ }
413
+ } catch {
414
+ // Skip unparseable queue files
415
+ }
416
+ }
417
+
418
+ // Sort: critical first, then high, then normal
419
+ const priorityOrder = { critical: 0, high: 1, normal: 2, low: 3 };
420
+ actionableItems.sort((a, b) => (priorityOrder[a.priority] || 3) - (priorityOrder[b.priority] || 3));
421
+
422
+ // Filter out items that already have active sessions or exceeded retries
423
+ const dispatchable = actionableItems.filter((qi) => {
424
+ const check = canDispatchBacklog(qi);
425
+ if (!check.allowed) {
426
+ if (check.reason === "max_retries_exceeded") {
427
+ console.log(`[daemon] Backlog skip: "${qi.title}" — retries exhausted`);
428
+ }
429
+ return false;
430
+ }
431
+
432
+ // File-based item claim check — survives daemon restart and is visible
433
+ // to concurrent launchd triggers. Complements in-memory activeBacklogKeys.
434
+ // (ib-20260407-001b: concurrent session coordination)
435
+ if (qi.id && hasActiveClaim(qi.id)) {
436
+ console.log(`[daemon] Backlog skip: "${qi.title}" — item claimed by another session`);
437
+ return false;
438
+ }
439
+
440
+ return true;
441
+ });
442
+
443
+ // Dispatch top items up to available slots
444
+ const toDispatch = dispatchable.slice(0, Math.min(slots, 5));
445
+ let dispatched = 0;
446
+
447
+ for (const queueItem of toDispatch) {
448
+ const classResult = {
449
+ priority: queueItem.priority,
450
+ action: "queue",
451
+ model: queueItem.priority === "critical" ? "opus" : "sonnet",
452
+ summary: queueItem.title,
453
+ category: "action_required",
454
+ };
455
+
456
+ const prompt = await buildPrompt(null, classResult, {
457
+ type: "backlog",
458
+ queueItem,
459
+ });
460
+
461
+ dispatch(prompt, queueItem, classResult, "backlog");
462
+ recordSession(false);
463
+ dispatched++;
464
+ }
465
+
466
+ if (dispatched > 0) {
467
+ console.log(`[daemon] Backlog sweep: dispatched ${dispatched} items (${actionableItems.length - dispatchable.length} skipped: in-flight or exhausted)`);
468
+ }
469
+ } catch (err) {
470
+ console.error(`[daemon] Backlog sweep error:`, err.message);
471
+ }
472
+ }
473
+
474
+ // ---------------------------------------------------------------------------
475
+ // Main
476
+ // ---------------------------------------------------------------------------
477
+
478
+ async function main() {
479
+ console.log("╔══════════════════════════════════════════════════════════╗");
480
+ console.log(`║ ${(loadAgent().firstName || "Agent").padEnd(8)} Daemon — Reactive Event Processor ║`);
481
+ console.log("╠══════════════════════════════════════════════════════════╣");
482
+ console.log(`║ Directory: ${AGENT_REPO_DIR}`);
483
+ console.log(`║ Poll: every ${POLL_INTERVAL / 1000}s`);
484
+ console.log(`║ Backlog: every ${BACKLOG_INTERVAL / 1000}s`);
485
+ console.log(`║ Concurrency: up to ${process.env.DAEMON_MAX_CONCURRENT || 10} parallel sessions`);
486
+ console.log("╚══════════════════════════════════════════════════════════╝");
487
+
488
+ // Check for emergency stop
489
+ try {
490
+ readFileSync(join(AGENT_REPO_DIR, ".emergency-stop"));
491
+ console.error("[daemon] Emergency stop active — exiting");
492
+ process.exit(0);
493
+ } catch {
494
+ // No emergency stop — continue
495
+ }
496
+
497
+ // Clear orphaned state from prior daemon instances
498
+ const staleCleared = scanStaleLocks();
499
+ if (staleCleared > 0) {
500
+ console.log(`[daemon] Cleared ${staleCleared} stale session locks`);
501
+ }
502
+ resetActiveSessions();
503
+
504
+ // Initial poll immediately
505
+ await poll();
506
+
507
+ // Poll loop
508
+ setInterval(async () => {
509
+ try {
510
+ // Check emergency stop
511
+ try { readFileSync(join(AGENT_REPO_DIR, ".emergency-stop")); process.exit(0); } catch {}
512
+ await poll();
513
+ } catch (err) {
514
+ console.error("[daemon] Poll loop error:", err.message);
515
+ }
516
+ }, POLL_INTERVAL);
517
+
518
+ // Backlog sweep
519
+ setInterval(async () => {
520
+ try {
521
+ await sweepBacklog();
522
+ } catch (err) {
523
+ console.error("[daemon] Backlog sweep error:", err.message);
524
+ }
525
+ }, BACKLOG_INTERVAL);
526
+
527
+ // Health dashboard + stale claim sweep
528
+ setInterval(() => {
529
+ try {
530
+ writeHealthDashboard();
531
+ // Sweep stale item claims (ib-20260407-001b: concurrent session coordination)
532
+ const claimsSwept = sweepStaleItemClaims();
533
+ if (claimsSwept > 0) {
534
+ console.log(`[daemon] Swept ${claimsSwept} stale item claims`);
535
+ }
536
+ } catch (err) {
537
+ console.error("[daemon] Health write error:", err.message);
538
+ }
539
+ }, HEALTH_INTERVAL);
540
+
541
+ // Graceful shutdown — clean up active.json so next startup doesn't see stale sessions
542
+ process.on("SIGTERM", () => {
543
+ console.log("[daemon] SIGTERM received, shutting down gracefully");
544
+ resetActiveSessions();
545
+ writeHealthDashboard();
546
+ process.exit(0);
547
+ });
548
+
549
+ process.on("SIGINT", () => {
550
+ console.log("[daemon] SIGINT received, shutting down");
551
+ resetActiveSessions();
552
+ writeHealthDashboard();
553
+ process.exit(0);
554
+ });
555
+
556
+ console.log("[daemon] Running. Ctrl+C to stop.");
557
+ }
558
+
559
+ main().catch((err) => {
560
+ console.error("[daemon] Fatal:", err);
561
+ process.exit(1);
562
+ });