@adaptic/maestro 1.9.3 → 1.9.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/claude-bin.mjs +70 -0
- package/package.json +1 -1
- package/scripts/daemon/agent-daemon.mjs +558 -0
- package/scripts/daemon/cadence-consumer.mjs +7 -47
- package/scripts/daemon/classifier.mjs +5 -3
- package/scripts/daemon/dispatcher.mjs +6 -2
- package/scripts/daemon/maestro-daemon.mjs +12 -9
- package/scripts/daemon/responder.mjs +5 -2
- package/scripts/daemon/sophie-daemon.mjs +11 -552
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Maestro — Claude CLI binary resolver.
|
|
3
|
+
*
|
|
4
|
+
* Shared utility for every code path that spawns `claude --print`.
|
|
5
|
+
* launchd's bare environment does NOT include ~/.local/bin or homebrew
|
|
6
|
+
* paths, so a plain `spawn('claude', …)` from a daemon-spawned process
|
|
7
|
+
* fails with ENOENT — even though the operator's interactive shell finds
|
|
8
|
+
* it without trouble.
|
|
9
|
+
*
|
|
10
|
+
* Caller sites:
|
|
11
|
+
* - scripts/daemon/dispatcher.mjs (reactive: replies to Slack/Gmail/etc)
|
|
12
|
+
* - scripts/daemon/responder.mjs (quick replies + holding messages)
|
|
13
|
+
* - scripts/daemon/cadence-consumer.mjs (scheduled cadence ticks)
|
|
14
|
+
* - scripts/poller/intra-session-check.mjs
|
|
15
|
+
* - any other module that spawns claude from a non-interactive context
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
import { existsSync } from "node:fs";
|
|
19
|
+
import { join } from "node:path";
|
|
20
|
+
import { homedir } from "node:os";
|
|
21
|
+
|
|
22
|
+
let _resolved = null;
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Return the absolute path to the Claude CLI. Searches, in order:
|
|
26
|
+
* 1. $CLAUDE_BIN env var (if set + executable on disk)
|
|
27
|
+
* 2. ~/.local/bin/claude (default Claude Code install location)
|
|
28
|
+
* 3. /opt/homebrew/bin/claude (Apple Silicon homebrew)
|
|
29
|
+
* 4. /usr/local/bin/claude (Intel homebrew)
|
|
30
|
+
* 5. /usr/bin/claude
|
|
31
|
+
*
|
|
32
|
+
* Falls back to bare "claude" so the spawn's own error stays informative
|
|
33
|
+
* if nothing on the search path exists.
|
|
34
|
+
*/
|
|
35
|
+
export function resolveClaudeBin() {
|
|
36
|
+
if (_resolved) return _resolved;
|
|
37
|
+
const envOverride = process.env.CLAUDE_BIN;
|
|
38
|
+
const candidates = [
|
|
39
|
+
envOverride,
|
|
40
|
+
join(homedir(), ".local/bin/claude"),
|
|
41
|
+
"/opt/homebrew/bin/claude",
|
|
42
|
+
"/usr/local/bin/claude",
|
|
43
|
+
"/usr/bin/claude",
|
|
44
|
+
].filter(Boolean);
|
|
45
|
+
for (const c of candidates) {
|
|
46
|
+
if (existsSync(c)) { _resolved = c; return c; }
|
|
47
|
+
}
|
|
48
|
+
_resolved = "claude";
|
|
49
|
+
return _resolved;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Build a PATH suitable for child processes spawned from a daemon.
|
|
54
|
+
* launchd strips PATH down to /usr/bin:/bin; this returns a string that
|
|
55
|
+
* preserves the caller's PATH if set, then appends the locations claude
|
|
56
|
+
* subsessions might need to find node, jq, gh, etc.
|
|
57
|
+
*/
|
|
58
|
+
export function augmentedPath() {
|
|
59
|
+
return [
|
|
60
|
+
process.env.PATH || "",
|
|
61
|
+
`${homedir()}/.local/bin`,
|
|
62
|
+
"/opt/homebrew/bin",
|
|
63
|
+
"/opt/homebrew/sbin",
|
|
64
|
+
"/usr/local/bin",
|
|
65
|
+
"/usr/bin",
|
|
66
|
+
"/bin",
|
|
67
|
+
"/usr/sbin",
|
|
68
|
+
"/sbin",
|
|
69
|
+
].filter(Boolean).join(":");
|
|
70
|
+
}
|
package/package.json
CHANGED
|
@@ -0,0 +1,558 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// =============================================================================
|
|
3
|
+
// Agent Daemon — Reactive event-driven message processor
|
|
4
|
+
// =============================================================================
|
|
5
|
+
//
|
|
6
|
+
// Persistent Node.js process that:
|
|
7
|
+
// 1. Polls Slack/Gmail/Calendar every 30s
|
|
8
|
+
// 2. Classifies each item via Haiku API (~0.5-1s)
|
|
9
|
+
// 3. Spawns dedicated claude --print sessions per item (up to 10 parallel)
|
|
10
|
+
// 4. Sweeps backlog queues every 2 min for stale items
|
|
11
|
+
//
|
|
12
|
+
// Replaces: poller + inbox-processor + backlog-executor (3-stage pipeline)
|
|
13
|
+
// Target: CEO DM → response in under 2 minutes
|
|
14
|
+
//
|
|
15
|
+
// Run: node scripts/daemon/<agent>-daemon.mjs (e.g. ravi-daemon.mjs)
|
|
16
|
+
// Install: launchd plist with KeepAlive: true
|
|
17
|
+
// =============================================================================
|
|
18
|
+
|
|
19
|
+
import { config } from "dotenv";
|
|
20
|
+
import { resolve, join } from "path";
|
|
21
|
+
import { readdirSync, readFileSync, renameSync, mkdirSync, appendFileSync } from "fs";
|
|
22
|
+
|
|
23
|
+
// Load .env before anything else
|
|
24
|
+
const AGENT_REPO_DIR = process.env.AGENT_DIR || resolve(new URL(".", import.meta.url).pathname, "../..");
|
|
25
|
+
config({ path: join(AGENT_REPO_DIR, ".env") });
|
|
26
|
+
|
|
27
|
+
// Load agent identity (canonical SOT) so filters can match the running
|
|
28
|
+
// agent's own name/slack-id rather than a hardcoded one.
|
|
29
|
+
let _agent = null;
|
|
30
|
+
function loadAgent() {
|
|
31
|
+
if (_agent) return _agent;
|
|
32
|
+
try {
|
|
33
|
+
_agent = JSON.parse(readFileSync(join(AGENT_REPO_DIR, "config/agent.json"), "utf-8"));
|
|
34
|
+
} catch {
|
|
35
|
+
_agent = { firstName: "Agent", lastName: "", slackMemberId: "" };
|
|
36
|
+
}
|
|
37
|
+
return _agent;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
import { pollSlack } from "../poller/slack-poller.mjs";
|
|
41
|
+
import { pollGmail } from "../poller/gmail-poller.mjs";
|
|
42
|
+
import { pollCalendar } from "../poller/calendar-poller.mjs";
|
|
43
|
+
import { pollMehranGmail } from "../poller/mehran-gmail-poller.mjs";
|
|
44
|
+
import { isPriorityItem } from "../poller/utils.mjs";
|
|
45
|
+
import { classifyItem, isDirectedAtAgent } from "./classifier.mjs";
|
|
46
|
+
import { dispatch, getStatus, availableSlots, canDispatchBacklog, resetActiveSessions } from "./dispatcher.mjs";
|
|
47
|
+
import { buildPrompt } from "./prompt-builder.mjs";
|
|
48
|
+
import { sendQuickResponse, sendHoldingMessage, isQuickReply } from "./responder.mjs";
|
|
49
|
+
import { recordPoll, recordClassification, recordSession, writeHealthDashboard } from "./health.mjs";
|
|
50
|
+
import { acquireLock, updateLock, scanStaleLocks, acquireThreadLock, claimRequest, hasActiveClaim, sweepStaleItemClaims } from "./session-lock.mjs";
|
|
51
|
+
|
|
52
|
+
// ---------------------------------------------------------------------------
|
|
53
|
+
// Configuration
|
|
54
|
+
// ---------------------------------------------------------------------------
|
|
55
|
+
|
|
56
|
+
const POLL_INTERVAL = parseInt(process.env.DAEMON_POLL_INTERVAL || "60000", 10); // 60s (up from 30s to avoid Slack rate limits)
|
|
57
|
+
const BACKLOG_INTERVAL = parseInt(process.env.DAEMON_BACKLOG_INTERVAL || "120000", 10); // 2 min
|
|
58
|
+
const HEALTH_INTERVAL = 60000; // 1 min
|
|
59
|
+
// Note: dedup is now handled by file-based locks in session-lock.mjs
|
|
60
|
+
|
|
61
|
+
// ---------------------------------------------------------------------------
|
|
62
|
+
// Logging
|
|
63
|
+
// ---------------------------------------------------------------------------
|
|
64
|
+
|
|
65
|
+
function logDir() {
|
|
66
|
+
const dir = join(AGENT_REPO_DIR, "logs", "daemon");
|
|
67
|
+
mkdirSync(dir, { recursive: true });
|
|
68
|
+
return dir;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
function today() {
|
|
72
|
+
return new Date().toISOString().split("T")[0];
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
function logEvent(type, entry) {
|
|
76
|
+
const path = join(logDir(), `${today()}-${type}.jsonl`);
|
|
77
|
+
appendFileSync(path, JSON.stringify({ timestamp: new Date().toISOString(), ...entry }) + "\n");
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// ---------------------------------------------------------------------------
|
|
81
|
+
// Deduplication — now handled by file-based locks in session-lock.mjs
|
|
82
|
+
// ---------------------------------------------------------------------------
|
|
83
|
+
|
|
84
|
+
// ---------------------------------------------------------------------------
|
|
85
|
+
// Poll Loop
|
|
86
|
+
// ---------------------------------------------------------------------------
|
|
87
|
+
|
|
88
|
+
async function poll() {
|
|
89
|
+
const services = [
|
|
90
|
+
{ name: "slack", fn: pollSlack },
|
|
91
|
+
{ name: "gmail", fn: pollGmail },
|
|
92
|
+
{ name: "mehran-gmail", fn: pollMehranGmail },
|
|
93
|
+
{ name: "calendar", fn: pollCalendar },
|
|
94
|
+
];
|
|
95
|
+
|
|
96
|
+
let totalItems = 0;
|
|
97
|
+
|
|
98
|
+
for (const svc of services) {
|
|
99
|
+
try {
|
|
100
|
+
const result = await svc.fn();
|
|
101
|
+
// Dedup by raw_ref first: the same Slack message arrives via
|
|
102
|
+
// multiple paths (channel scan, thread scan, events-server JSON)
|
|
103
|
+
// with different item IDs. Using raw_ref as the lock key ensures
|
|
104
|
+
// a single message is only processed once regardless of path.
|
|
105
|
+
const seenRefs = new Set();
|
|
106
|
+
const newItems = result.items.filter((item) => {
|
|
107
|
+
// Skip the agent's own messages — defensive check in case intake filters miss them
|
|
108
|
+
const sender = (item.sender || "").toLowerCase();
|
|
109
|
+
const me = loadAgent();
|
|
110
|
+
const myFirst = (me.firstName || "").toLowerCase();
|
|
111
|
+
const myFull = (me.fullName || "").toLowerCase().replace(/\s+/g, "-");
|
|
112
|
+
if (myFirst && (sender === myFirst || sender === myFull)) return false;
|
|
113
|
+
|
|
114
|
+
const lockKey = item.raw_ref || item.id || `${svc.name}-${Date.now()}`;
|
|
115
|
+
if (seenRefs.has(lockKey)) return false;
|
|
116
|
+
seenRefs.add(lockKey);
|
|
117
|
+
const lock = acquireLock(lockKey, {
|
|
118
|
+
sender: item.sender || "unknown",
|
|
119
|
+
channel: item.channel || item.channel_id || "unknown",
|
|
120
|
+
});
|
|
121
|
+
return lock.acquired;
|
|
122
|
+
});
|
|
123
|
+
totalItems += newItems.length;
|
|
124
|
+
|
|
125
|
+
for (const item of newItems) {
|
|
126
|
+
await processItem(item, svc.name);
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
if (result.errors.length > 0) {
|
|
130
|
+
console.warn(`[daemon] ${svc.name} errors:`, result.errors.slice(0, 3));
|
|
131
|
+
}
|
|
132
|
+
} catch (err) {
|
|
133
|
+
console.error(`[daemon] ${svc.name} poll failed:`, err.message);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
recordPoll(totalItems);
|
|
138
|
+
if (totalItems > 0) {
|
|
139
|
+
console.log(`[daemon] Poll found ${totalItems} new items`);
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// ---------------------------------------------------------------------------
|
|
145
|
+
// Process a single item
|
|
146
|
+
// ---------------------------------------------------------------------------
|
|
147
|
+
|
|
148
|
+
async function processItem(item, service) {
|
|
149
|
+
const itemId = item.raw_ref || item.id || `${service}-${Date.now()}`;
|
|
150
|
+
|
|
151
|
+
try {
|
|
152
|
+
// Enrich item with channel-type metadata for directed-message detection
|
|
153
|
+
const channelStr = (item.channel || "").toLowerCase();
|
|
154
|
+
const channelId = item.channel_id || "";
|
|
155
|
+
const isDm = channelStr.startsWith("dm/") || channelId.startsWith("D");
|
|
156
|
+
const myFirstName = loadAgent().firstName || "Agent";
|
|
157
|
+
const agentThreadRegex = new RegExp(`^${myFirstName}:`, "m");
|
|
158
|
+
const agentInThread = !!(item.thread_context && agentThreadRegex.test(item.thread_context));
|
|
159
|
+
item.is_dm = isDm;
|
|
160
|
+
item.agent_in_thread = agentInThread;
|
|
161
|
+
|
|
162
|
+
// Classify via Haiku API
|
|
163
|
+
const classResult = await classifyItem({
|
|
164
|
+
sender: item.sender || "unknown",
|
|
165
|
+
sender_privilege: item.sender_privilege || item.priority_signals?.from_ceo ? "ceo" : "unknown",
|
|
166
|
+
channel: item.channel || "unknown",
|
|
167
|
+
service,
|
|
168
|
+
content: item.content || item.subject || "",
|
|
169
|
+
is_reply: item.is_reply || false,
|
|
170
|
+
thread_context: item.thread_context || null,
|
|
171
|
+
subject: item.subject || "",
|
|
172
|
+
is_dm: isDm,
|
|
173
|
+
is_group: !isDm && service === "slack",
|
|
174
|
+
agent_in_thread: agentInThread,
|
|
175
|
+
});
|
|
176
|
+
|
|
177
|
+
recordClassification(true);
|
|
178
|
+
|
|
179
|
+
logEvent("classifications", {
|
|
180
|
+
item_id: itemId,
|
|
181
|
+
sender: item.sender,
|
|
182
|
+
service,
|
|
183
|
+
...classResult,
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
console.log(`[daemon] Classified: [${classResult.priority}] ${classResult.summary} → ${classResult.model} (${classResult.action})`);
|
|
187
|
+
|
|
188
|
+
// Skip ignored items
|
|
189
|
+
if (classResult.category === "ignore" || classResult.action === "ignore") {
|
|
190
|
+
markProcessed(item, service);
|
|
191
|
+
return;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
// DIRECTED-MESSAGE GATE: In channels and group chats, only respond to
|
|
195
|
+
// messages that are clearly directed at the agent. DMs always pass.
|
|
196
|
+
// This prevents the agent from inserting itself into every conversation.
|
|
197
|
+
//
|
|
198
|
+
// Two-layer check:
|
|
199
|
+
// 1. If LLM says NOT directed → verify with rules (catch missed @mentions, CEO, DMs)
|
|
200
|
+
// 2. If LLM says directed BUT it's a non-DM channel → verify with rules (catch over-eager LLM)
|
|
201
|
+
if (service === "slack") {
|
|
202
|
+
const isDm = item.is_dm || (item.channel || "").startsWith("dm/") || (item.channel_id || "").startsWith("D");
|
|
203
|
+
|
|
204
|
+
if (!classResult.directed_at_agent) {
|
|
205
|
+
// LLM says not directed — double-check with rule-based heuristics
|
|
206
|
+
// to catch clear signals the LLM may have missed (DM, CEO, @mention)
|
|
207
|
+
const ruleCheck = isDirectedAtAgent(item);
|
|
208
|
+
if (!ruleCheck) {
|
|
209
|
+
console.log(`[daemon] Directed-message filter: skipping non-directed message from ${item.sender} in ${item.channel}`);
|
|
210
|
+
logEvent("classifications", {
|
|
211
|
+
item_id: itemId,
|
|
212
|
+
sender: item.sender,
|
|
213
|
+
service,
|
|
214
|
+
skipped: true,
|
|
215
|
+
reason: "not_directed_at_agent",
|
|
216
|
+
classifier_directed: false,
|
|
217
|
+
rule_directed: false,
|
|
218
|
+
summary: classResult.summary,
|
|
219
|
+
});
|
|
220
|
+
markProcessed(item, service);
|
|
221
|
+
return;
|
|
222
|
+
}
|
|
223
|
+
// Rule-based says directed — override LLM
|
|
224
|
+
console.log(`[daemon] Directed-message override: LLM said not directed but rule-based detected direction (${item.sender} in ${item.channel})`);
|
|
225
|
+
} else if (!isDm) {
|
|
226
|
+
// LLM says directed in a channel/group — sanity-check with rules.
|
|
227
|
+
// If rule-based also agrees, proceed. If rules say no AND the message
|
|
228
|
+
// doesn't contain the agent's name, the LLM was probably over-eager.
|
|
229
|
+
const ruleCheck = isDirectedAtAgent(item);
|
|
230
|
+
const content = (item.content || "").toLowerCase();
|
|
231
|
+
const me = loadAgent();
|
|
232
|
+
const myFirst = (me.firstName || "").toLowerCase();
|
|
233
|
+
const mySlackPrefix = (me.slackMemberId || "").slice(0, 3); // e.g. "U09"
|
|
234
|
+
const mentionsAgent =
|
|
235
|
+
(myFirst && content.includes(myFirst)) ||
|
|
236
|
+
(mySlackPrefix && content.includes(`<@${mySlackPrefix}`));
|
|
237
|
+
if (!ruleCheck && !mentionsAgent) {
|
|
238
|
+
console.log(`[daemon] Directed-message filter (LLM override): LLM said directed but rules disagree for ${item.sender} in ${item.channel} — skipping`);
|
|
239
|
+
logEvent("classifications", {
|
|
240
|
+
item_id: itemId,
|
|
241
|
+
sender: item.sender,
|
|
242
|
+
service,
|
|
243
|
+
skipped: true,
|
|
244
|
+
reason: "llm_over_eager_directed",
|
|
245
|
+
classifier_directed: true,
|
|
246
|
+
rule_directed: false,
|
|
247
|
+
summary: classResult.summary,
|
|
248
|
+
});
|
|
249
|
+
markProcessed(item, service);
|
|
250
|
+
return;
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
// THREAD / CHANNEL DEDUP: Prevent multiple sessions from responding to the
|
|
256
|
+
// same conversation. For thread replies, lock by thread_ts. For non-threaded
|
|
257
|
+
// DMs, lock by channel_id (so rapid-fire DMs don't each spawn a session).
|
|
258
|
+
// This check runs BEFORE quick reply to prevent ALL duplicate responses.
|
|
259
|
+
{
|
|
260
|
+
const channel = item.channel_id || (item.raw_ref ? item.raw_ref.match(/slack:([^:]+):/)?.[1] : null) || item.channel;
|
|
261
|
+
const threadTs = item.thread_id || (isDm ? `dm-channel` : null);
|
|
262
|
+
if (threadTs && channel) {
|
|
263
|
+
const threadCheck = acquireThreadLock(channel, threadTs);
|
|
264
|
+
if (!threadCheck.allowed) {
|
|
265
|
+
console.log(`[daemon] Thread/channel dedup: skipping item from ${item.sender} — ${threadCheck.reason}`);
|
|
266
|
+
logEvent("classifications", {
|
|
267
|
+
item_id: itemId,
|
|
268
|
+
sender: item.sender,
|
|
269
|
+
service,
|
|
270
|
+
skipped: true,
|
|
271
|
+
reason: `thread_dedup: ${threadCheck.reason}`,
|
|
272
|
+
});
|
|
273
|
+
markProcessed(item, service);
|
|
274
|
+
return;
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// REQUEST CLAIMING: For action_required items, claim the request so
|
|
280
|
+
// parallel sessions (from overlapping poll cycles or backlog sweep)
|
|
281
|
+
// don't independently process the same "email me the brief" request.
|
|
282
|
+
// Claim key: action_type + recipient channel + summary (normalized).
|
|
283
|
+
if (classResult.category === "action_required" && (classResult.action === "respond" || classResult.action === "draft" || classResult.action === "research")) {
|
|
284
|
+
const claimAttrs = {
|
|
285
|
+
recipient: item.channel_id || item.channel || item.sender || "unknown",
|
|
286
|
+
subject: classResult.summary || item.subject || "",
|
|
287
|
+
action_type: classResult.action,
|
|
288
|
+
};
|
|
289
|
+
const claim = claimRequest(claimAttrs);
|
|
290
|
+
if (!claim.claimed) {
|
|
291
|
+
console.log(`[daemon] Request claim denied for ${item.sender}: ${claim.reason}`);
|
|
292
|
+
logEvent("classifications", {
|
|
293
|
+
item_id: itemId,
|
|
294
|
+
sender: item.sender,
|
|
295
|
+
service,
|
|
296
|
+
skipped: true,
|
|
297
|
+
reason: `request_claim_denied: ${claim.reason}`,
|
|
298
|
+
summary: classResult.summary,
|
|
299
|
+
});
|
|
300
|
+
markProcessed(item, service);
|
|
301
|
+
return;
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
// QUICK REPLY PATH: Direct responses via API + Slack/Gmail posting.
|
|
306
|
+
// No claude --print session needed. ~4-8 seconds total.
|
|
307
|
+
if (isQuickReply(classResult)) {
|
|
308
|
+
console.log(`[daemon] Quick reply path for ${item.sender} (${classResult.model})`);
|
|
309
|
+
const result = await sendQuickResponse(item, classResult);
|
|
310
|
+
if (result.sent) {
|
|
311
|
+
markProcessed(item, service);
|
|
312
|
+
return;
|
|
313
|
+
}
|
|
314
|
+
// If quick reply failed to send or was blocked by validation, fall through to dispatch a full session
|
|
315
|
+
const reason = result.blocked ? `validation blocked: ${result.issues?.map(i => i.rule).join(", ")}` : "send failed";
|
|
316
|
+
console.warn(`[daemon] Quick reply not sent (${reason}), falling through to session dispatch`);
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
// COMPLEX WORK PATH: Send immediate holding message, then dispatch full session
|
|
320
|
+
let holdingText = null;
|
|
321
|
+
try {
|
|
322
|
+
if (classResult.action === "respond" || classResult.action === "draft" || classResult.action === "research") {
|
|
323
|
+
const holdResult = await sendHoldingMessage(item, classResult);
|
|
324
|
+
holdingText = holdResult.sent ? holdResult.holdingText : null;
|
|
325
|
+
if (holdResult.sent) {
|
|
326
|
+
updateLock(itemId, { holdingSent: true });
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
} catch (err) {
|
|
330
|
+
console.error(`[daemon] Holding message failed (non-fatal): ${err.message}`);
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// Build prompt with holding message context and dispatch
|
|
334
|
+
const prompt = await buildPrompt(item, classResult, {
|
|
335
|
+
type: "inbox",
|
|
336
|
+
holdingMessage: holdingText,
|
|
337
|
+
});
|
|
338
|
+
dispatch(prompt, item, classResult);
|
|
339
|
+
recordSession(false); // spawned
|
|
340
|
+
markProcessed(item, service);
|
|
341
|
+
|
|
342
|
+
} catch (err) {
|
|
343
|
+
console.error(`[daemon] Failed to process item ${itemId}:`, err.message);
|
|
344
|
+
recordClassification(false);
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
// ---------------------------------------------------------------------------
|
|
349
|
+
// Mark inbox item as processed
|
|
350
|
+
// ---------------------------------------------------------------------------
|
|
351
|
+
|
|
352
|
+
function markProcessed(item, service) {
|
|
353
|
+
// The pollers write files to state/inbox/{service}/
|
|
354
|
+
// We mark them by renaming to .processed
|
|
355
|
+
try {
|
|
356
|
+
const inboxDir = join(AGENT_REPO_DIR, "state", "inbox", service);
|
|
357
|
+
const files = readdirSync(inboxDir).filter(
|
|
358
|
+
(f) => !f.endsWith(".processed") && (f.includes(item.id) || f.includes(item.raw_ref))
|
|
359
|
+
);
|
|
360
|
+
for (const file of files) {
|
|
361
|
+
const src = join(inboxDir, file);
|
|
362
|
+
const dst = join(inboxDir, file + ".processed");
|
|
363
|
+
renameSync(src, dst);
|
|
364
|
+
}
|
|
365
|
+
} catch {
|
|
366
|
+
// Not all items have inbox files (e.g. backlog items)
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
// ---------------------------------------------------------------------------
|
|
371
|
+
// Backlog Sweep
|
|
372
|
+
// ---------------------------------------------------------------------------
|
|
373
|
+
|
|
374
|
+
async function sweepBacklog() {
|
|
375
|
+
const slots = availableSlots();
|
|
376
|
+
if (slots <= 0) return; // No capacity
|
|
377
|
+
|
|
378
|
+
try {
|
|
379
|
+
const queueDir = join(AGENT_REPO_DIR, "state", "queues");
|
|
380
|
+
const files = readdirSync(queueDir).filter((f) => f.endsWith(".yaml"));
|
|
381
|
+
const actionableItems = [];
|
|
382
|
+
|
|
383
|
+
for (const file of files) {
|
|
384
|
+
try {
|
|
385
|
+
const content = readFileSync(join(queueDir, file), "utf-8");
|
|
386
|
+
// Simple YAML parsing — look for items with status: open
|
|
387
|
+
const items = content.split(/^-\s+/m).filter(Boolean);
|
|
388
|
+
for (const itemBlock of items) {
|
|
389
|
+
// Strip comment lines to avoid matching schema templates (e.g. "# title: string")
|
|
390
|
+
const cleanBlock = itemBlock.split("\n").filter((l) => !l.trimStart().startsWith("#")).join("\n");
|
|
391
|
+
// Match both quoted ("title":) and unquoted (title:) YAML keys
|
|
392
|
+
const statusMatch = cleanBlock.match(/"?status"?:\s*["']?(open|in_progress)["']?/);
|
|
393
|
+
const blockedMatch = cleanBlock.match(/"?status"?:\s*["']?blocked["']?/);
|
|
394
|
+
if (statusMatch && !blockedMatch) {
|
|
395
|
+
const titleMatch = cleanBlock.match(/"?title"?:\s*["']?(.+?)["']?\s*\n/);
|
|
396
|
+
const priorityMatch = cleanBlock.match(/"?priority"?:\s*["']?(critical|high|normal|low)["']?/);
|
|
397
|
+
const nextActionMatch = cleanBlock.match(/"?next_action"?:\s*["']?(.+?)["']?\s*\n/);
|
|
398
|
+
if (titleMatch && nextActionMatch) {
|
|
399
|
+
actionableItems.push({
|
|
400
|
+
title: titleMatch[1],
|
|
401
|
+
priority: priorityMatch?.[1] || "normal",
|
|
402
|
+
next_action: nextActionMatch[1],
|
|
403
|
+
source_file: file,
|
|
404
|
+
raw: cleanBlock.substring(0, 500),
|
|
405
|
+
});
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
} catch {
|
|
410
|
+
// Skip unparseable queue files
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
// Sort: critical first, then high, then normal
|
|
415
|
+
const priorityOrder = { critical: 0, high: 1, normal: 2, low: 3 };
|
|
416
|
+
actionableItems.sort((a, b) => (priorityOrder[a.priority] || 3) - (priorityOrder[b.priority] || 3));
|
|
417
|
+
|
|
418
|
+
// Filter out items that already have active sessions or exceeded retries
|
|
419
|
+
const dispatchable = actionableItems.filter((qi) => {
|
|
420
|
+
const check = canDispatchBacklog(qi);
|
|
421
|
+
if (!check.allowed) {
|
|
422
|
+
if (check.reason === "max_retries_exceeded") {
|
|
423
|
+
console.log(`[daemon] Backlog skip: "${qi.title}" — retries exhausted`);
|
|
424
|
+
}
|
|
425
|
+
return false;
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
// File-based item claim check — survives daemon restart and is visible
|
|
429
|
+
// to concurrent launchd triggers. Complements in-memory activeBacklogKeys.
|
|
430
|
+
// (ib-20260407-001b: concurrent session coordination)
|
|
431
|
+
if (qi.id && hasActiveClaim(qi.id)) {
|
|
432
|
+
console.log(`[daemon] Backlog skip: "${qi.title}" — item claimed by another session`);
|
|
433
|
+
return false;
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
return true;
|
|
437
|
+
});
|
|
438
|
+
|
|
439
|
+
// Dispatch top items up to available slots
|
|
440
|
+
const toDispatch = dispatchable.slice(0, Math.min(slots, 5));
|
|
441
|
+
let dispatched = 0;
|
|
442
|
+
|
|
443
|
+
for (const queueItem of toDispatch) {
|
|
444
|
+
const classResult = {
|
|
445
|
+
priority: queueItem.priority,
|
|
446
|
+
action: "queue",
|
|
447
|
+
model: queueItem.priority === "critical" ? "opus" : "sonnet",
|
|
448
|
+
summary: queueItem.title,
|
|
449
|
+
category: "action_required",
|
|
450
|
+
};
|
|
451
|
+
|
|
452
|
+
const prompt = await buildPrompt(null, classResult, {
|
|
453
|
+
type: "backlog",
|
|
454
|
+
queueItem,
|
|
455
|
+
});
|
|
456
|
+
|
|
457
|
+
dispatch(prompt, queueItem, classResult, "backlog");
|
|
458
|
+
recordSession(false);
|
|
459
|
+
dispatched++;
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
if (dispatched > 0) {
|
|
463
|
+
console.log(`[daemon] Backlog sweep: dispatched ${dispatched} items (${actionableItems.length - dispatchable.length} skipped: in-flight or exhausted)`);
|
|
464
|
+
}
|
|
465
|
+
} catch (err) {
|
|
466
|
+
console.error(`[daemon] Backlog sweep error:`, err.message);
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
// ---------------------------------------------------------------------------
|
|
471
|
+
// Main
|
|
472
|
+
// ---------------------------------------------------------------------------
|
|
473
|
+
|
|
474
|
+
async function main() {
|
|
475
|
+
console.log("╔══════════════════════════════════════════════════════════╗");
|
|
476
|
+
console.log(`║ ${(loadAgent().firstName || "Agent").padEnd(8)} Daemon — Reactive Event Processor ║`);
|
|
477
|
+
console.log("╠══════════════════════════════════════════════════════════╣");
|
|
478
|
+
console.log(`║ Directory: ${AGENT_REPO_DIR}`);
|
|
479
|
+
console.log(`║ Poll: every ${POLL_INTERVAL / 1000}s`);
|
|
480
|
+
console.log(`║ Backlog: every ${BACKLOG_INTERVAL / 1000}s`);
|
|
481
|
+
console.log(`║ Concurrency: up to ${process.env.DAEMON_MAX_CONCURRENT || 10} parallel sessions`);
|
|
482
|
+
console.log("╚══════════════════════════════════════════════════════════╝");
|
|
483
|
+
|
|
484
|
+
// Check for emergency stop
|
|
485
|
+
try {
|
|
486
|
+
readFileSync(join(AGENT_REPO_DIR, ".emergency-stop"));
|
|
487
|
+
console.error("[daemon] Emergency stop active — exiting");
|
|
488
|
+
process.exit(0);
|
|
489
|
+
} catch {
|
|
490
|
+
// No emergency stop — continue
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
// Clear orphaned state from prior daemon instances
|
|
494
|
+
const staleCleared = scanStaleLocks();
|
|
495
|
+
if (staleCleared > 0) {
|
|
496
|
+
console.log(`[daemon] Cleared ${staleCleared} stale session locks`);
|
|
497
|
+
}
|
|
498
|
+
resetActiveSessions();
|
|
499
|
+
|
|
500
|
+
// Initial poll immediately
|
|
501
|
+
await poll();
|
|
502
|
+
|
|
503
|
+
// Poll loop
|
|
504
|
+
setInterval(async () => {
|
|
505
|
+
try {
|
|
506
|
+
// Check emergency stop
|
|
507
|
+
try { readFileSync(join(AGENT_REPO_DIR, ".emergency-stop")); process.exit(0); } catch {}
|
|
508
|
+
await poll();
|
|
509
|
+
} catch (err) {
|
|
510
|
+
console.error("[daemon] Poll loop error:", err.message);
|
|
511
|
+
}
|
|
512
|
+
}, POLL_INTERVAL);
|
|
513
|
+
|
|
514
|
+
// Backlog sweep
|
|
515
|
+
setInterval(async () => {
|
|
516
|
+
try {
|
|
517
|
+
await sweepBacklog();
|
|
518
|
+
} catch (err) {
|
|
519
|
+
console.error("[daemon] Backlog sweep error:", err.message);
|
|
520
|
+
}
|
|
521
|
+
}, BACKLOG_INTERVAL);
|
|
522
|
+
|
|
523
|
+
// Health dashboard + stale claim sweep
|
|
524
|
+
setInterval(() => {
|
|
525
|
+
try {
|
|
526
|
+
writeHealthDashboard();
|
|
527
|
+
// Sweep stale item claims (ib-20260407-001b: concurrent session coordination)
|
|
528
|
+
const claimsSwept = sweepStaleItemClaims();
|
|
529
|
+
if (claimsSwept > 0) {
|
|
530
|
+
console.log(`[daemon] Swept ${claimsSwept} stale item claims`);
|
|
531
|
+
}
|
|
532
|
+
} catch (err) {
|
|
533
|
+
console.error("[daemon] Health write error:", err.message);
|
|
534
|
+
}
|
|
535
|
+
}, HEALTH_INTERVAL);
|
|
536
|
+
|
|
537
|
+
// Graceful shutdown — clean up active.json so next startup doesn't see stale sessions
|
|
538
|
+
process.on("SIGTERM", () => {
|
|
539
|
+
console.log("[daemon] SIGTERM received, shutting down gracefully");
|
|
540
|
+
resetActiveSessions();
|
|
541
|
+
writeHealthDashboard();
|
|
542
|
+
process.exit(0);
|
|
543
|
+
});
|
|
544
|
+
|
|
545
|
+
process.on("SIGINT", () => {
|
|
546
|
+
console.log("[daemon] SIGINT received, shutting down");
|
|
547
|
+
resetActiveSessions();
|
|
548
|
+
writeHealthDashboard();
|
|
549
|
+
process.exit(0);
|
|
550
|
+
});
|
|
551
|
+
|
|
552
|
+
console.log("[daemon] Running. Ctrl+C to stop.");
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
main().catch((err) => {
|
|
556
|
+
console.error("[daemon] Fatal:", err);
|
|
557
|
+
process.exit(1);
|
|
558
|
+
});
|