@adaptic/maestro 1.10.5 → 1.10.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@adaptic/maestro",
3
- "version": "1.10.5",
3
+ "version": "1.10.6",
4
4
  "description": "Maestro — Autonomous AI agent operating system. Deploy AI employees on dedicated Mac minis.",
5
5
  "type": "module",
6
6
  "bin": {
@@ -46,7 +46,7 @@
46
46
  },
47
47
  "always-build-npm": true,
48
48
  "scripts": {
49
- "test": "node --test lib/cadence-bus.test.mjs scripts/cadence/enqueue-cadence-tick.test.mjs scripts/daemon/cadence-consumer.test.mjs scripts/daemon/dispatcher-cooldown.test.mjs scripts/daemon/lib/session-router.test.mjs scripts/local-triggers/generate-plists.test.mjs scripts/poller/slack-socket-mode.test.mjs bin/maestro.test.mjs",
49
+ "test": "node --test lib/cadence-bus.test.mjs scripts/cadence/enqueue-cadence-tick.test.mjs scripts/daemon/cadence-consumer.test.mjs scripts/daemon/dispatcher-cooldown.test.mjs scripts/daemon/inbox-deferral.test.mjs scripts/daemon/lib/session-router.test.mjs scripts/local-triggers/generate-plists.test.mjs scripts/poller/slack-socket-mode.test.mjs bin/maestro.test.mjs",
50
50
  "test:cadence": "node --test lib/cadence-bus.test.mjs scripts/cadence/enqueue-cadence-tick.test.mjs scripts/daemon/cadence-consumer.test.mjs",
51
51
  "test:cli": "node --test bin/maestro.test.mjs",
52
52
  "test:plists": "node --test scripts/local-triggers/generate-plists.test.mjs",
@@ -47,7 +47,8 @@ import { dispatch, getStatus, availableSlots, canDispatchBacklog, resetActiveSes
47
47
  import { buildPrompt } from "./prompt-builder.mjs";
48
48
  import { sendQuickResponse, sendHoldingMessage, isQuickReply } from "./responder.mjs";
49
49
  import { recordPoll, recordClassification, recordSession, writeHealthDashboard } from "./health.mjs";
50
- import { acquireLock, updateLock, scanStaleLocks, acquireThreadLock, claimRequest, hasActiveClaim, sweepStaleItemClaims } from "./session-lock.mjs";
50
+ import { acquireLock, releaseLock, updateLock, scanStaleLocks, acquireThreadLock, claimRequest, hasActiveClaim, sweepStaleItemClaims } from "./session-lock.mjs";
51
+ import { markDeferred } from "./inbox-deferral.mjs";
51
52
 
52
53
  // ---------------------------------------------------------------------------
53
54
  // Configuration
@@ -266,15 +267,28 @@ async function processItem(item, service) {
266
267
  if (threadTs && channel) {
267
268
  const threadCheck = acquireThreadLock(channel, threadTs);
268
269
  if (!threadCheck.allowed) {
269
- console.log(`[daemon] Thread/channel dedup: skipping item from ${item.sender} ${threadCheck.reason}`);
270
+ // Defer instead of dropping: rename to .deferred so the file
271
+ // is preserved and re-promoted to the live inbox when the
272
+ // active session for this channel releases its lock. The
273
+ // dispatcher calls promoteDeferred() from its session-close
274
+ // path; the next poll cycle then picks the latest deferred
275
+ // item up and dispatches a single session that sees the full
276
+ // thread context (older bursts are bundled).
277
+ //
278
+ // We also release the per-item lock that the poll filter
279
+ // acquired at the top of the cycle — otherwise the next
280
+ // poll's acquireLock would block re-processing of this
281
+ // exact item after we promote it back to live.
282
+ const deferred = markDeferred(item, service, AGENT_REPO_DIR);
283
+ releaseLock(itemId);
284
+ console.log(`[daemon] Thread/channel dedup: deferring item from ${item.sender} — ${threadCheck.reason}${deferred ? "" : " (no inbox file to defer)"}`);
270
285
  logEvent("classifications", {
271
286
  item_id: itemId,
272
287
  sender: item.sender,
273
288
  service,
274
- skipped: true,
289
+ deferred: deferred > 0,
275
290
  reason: `thread_dedup: ${threadCheck.reason}`,
276
291
  });
277
- markProcessed(item, service);
278
292
  return;
279
293
  }
280
294
  }
@@ -6,6 +6,7 @@ import { spawn } from "child_process";
6
6
  import { appendFileSync, mkdirSync, writeFileSync, readFileSync, renameSync } from "fs";
7
7
  import { join, dirname } from "path";
8
8
  import { releaseLock, releaseThreadLock, releaseRequestClaim, claimItem, releaseItemClaim } from "./session-lock.mjs";
9
+ import { promoteDeferred } from "./inbox-deferral.mjs";
9
10
  import { recordSession } from "./health.mjs";
10
11
 
11
12
  const AGENT_REPO_DIR = process.env.AGENT_DIR || join(new URL(".", import.meta.url).pathname, "../..");
@@ -384,10 +385,35 @@ function spawnSession(entry) {
384
385
  const itemId = item.raw_ref || item.id || item.title;
385
386
  if (itemId) releaseLock(itemId);
386
387
 
387
- // Release thread lock so new messages in this thread can be processed
388
- if (item.thread_id) {
388
+ // Release thread/channel lock so new messages in this thread/DM can
389
+ // be processed. We must release unconditionally when a channel is
390
+ // known — the previous `if (item.thread_id)` gate skipped DMs (which
391
+ // always have empty thread_id), so DM-channel locks never cleared
392
+ // until the 60min TTL fired. acquireThreadLock normalises DM
393
+ // channels to `dm-channel` internally; releaseThreadLock mirrors
394
+ // that normalisation, so calling it with an empty thread_id on a
395
+ // DM channel does the right thing.
396
+ {
389
397
  const channel = item.channel_id || (item.raw_ref ? (item.raw_ref.match(/slack:([^:]+):/) || [])[1] : null) || item.channel;
390
- if (channel) releaseThreadLock(channel, item.thread_id);
398
+ if (channel) {
399
+ releaseThreadLock(channel, item.thread_id);
400
+ // Now that the lock is gone, promote any messages that were
401
+ // deferred behind it. Latest-wins: a burst of N messages
402
+ // collapses into ONE re-dispatch carrying the most recent
403
+ // message (its thread_context already includes the earlier
404
+ // ones), so the user gets a single coherent reply rather than
405
+ // N replies serialised over N session-durations.
406
+ const promo = promoteDeferred(channel, AGENT_REPO_DIR);
407
+ if (promo.promoted > 0) {
408
+ logSession({
409
+ event: "deferred_promoted",
410
+ channel,
411
+ promoted: promo.promoted,
412
+ bundled: promo.bundled,
413
+ service: promo.service,
414
+ });
415
+ }
416
+ }
391
417
  }
392
418
 
393
419
  // Release request claim so the same type of request can be processed again
@@ -462,10 +488,24 @@ function spawnSession(entry) {
462
488
  const itemId = item.raw_ref || item.id || item.title;
463
489
  if (itemId) releaseLock(itemId);
464
490
 
465
- // Release thread lock so new messages in this thread can be processed.
466
- if (item.thread_id) {
491
+ // Release thread/channel lock so new messages can be processed.
492
+ // Mirror of close-handler logic — unconditional when channel is
493
+ // known, since releaseThreadLock handles the DM normalization.
494
+ {
467
495
  const channel = item.channel_id || (item.raw_ref ? (item.raw_ref.match(/slack:([^:]+):/) || [])[1] : null) || item.channel;
468
- if (channel) releaseThreadLock(channel, item.thread_id);
496
+ if (channel) {
497
+ releaseThreadLock(channel, item.thread_id);
498
+ const promo = promoteDeferred(channel, AGENT_REPO_DIR);
499
+ if (promo.promoted > 0) {
500
+ logSession({
501
+ event: "deferred_promoted_on_error",
502
+ channel,
503
+ promoted: promo.promoted,
504
+ bundled: promo.bundled,
505
+ service: promo.service,
506
+ });
507
+ }
508
+ }
469
509
  }
470
510
 
471
511
  // Release request claim + emit explicit claim_released event so
@@ -0,0 +1,171 @@
1
+ /**
2
+ * Maestro — Inbox deferral utilities
3
+ *
4
+ * When the dispatcher hits a thread/channel-level dedup lock, the
5
+ * incoming message used to be marked `.processed` and dropped on the
6
+ * floor. That meant rapid-fire DMs (or thread bursts) silently lost
7
+ * every message after the first.
8
+ *
9
+ * This module replaces that with a defer-then-promote pattern:
10
+ *
11
+ * markDeferred(item, service, agentRoot)
12
+ * Renames the inbox file from `<name>` to `<name>.deferred`.
13
+ * The daemon's inbox scanner skips `.deferred` files, so the
14
+ * message is parked but not lost.
15
+ *
16
+ * promoteDeferred(channel, agentRoot)
17
+ * Called from the dispatcher's session-close release path. Finds
18
+ * every `.deferred` file in any service inbox dir whose body
19
+ * references `channel`. If exactly one is found, renames it back
20
+ * to its original name (next poll picks it up). If N>1 are found,
21
+ * keeps only the LATEST (by timestamp), promotes that one, and
22
+ * marks the others `.processed-bundled` for the audit trail —
23
+ * the latest item's `thread_context` already contains the prior
24
+ * messages as conversation history, so Claude sees everything
25
+ * and can compose a single coherent reply.
26
+ *
27
+ * The file format is the YAML emitted by the slack/gmail/calendar
28
+ * pollers (a flat top-level object with quoted scalar fields). We
29
+ * deliberately avoid pulling in a YAML parser — regex extraction of
30
+ * `channel_id:` and `timestamp:` is sufficient for the routing
31
+ * decision and keeps this module dependency-free.
32
+ */
33
+
34
+ import { readdirSync, readFileSync, renameSync, existsSync } from "node:fs";
35
+ import { join } from "node:path";
36
+
37
+ const SERVICE_DIRS = ["slack", "gmail", "calendar", "internal", "sms", "whatsapp"];
38
+
39
+ /**
40
+ * Rename a live inbox file to its `.deferred` suffix so the scanner
41
+ * skips it until the channel/thread lock releases.
42
+ *
43
+ * Mirrors markProcessed's file-matching strategy (id or raw_ref
44
+ * substring) so we hit the same file that markProcessed would have.
45
+ *
46
+ * Idempotent: if the file is already `.deferred` or `.processed`, no-op.
47
+ *
48
+ * @returns {number} number of files renamed (usually 1, occasionally 0
49
+ * for backlog items with no inbox file)
50
+ */
51
+ export function markDeferred(item, service, agentRoot) {
52
+ if (!item || !service || !agentRoot) return 0;
53
+ const inboxDir = join(agentRoot, "state", "inbox", service);
54
+ if (!existsSync(inboxDir)) return 0;
55
+ let renamed = 0;
56
+ try {
57
+ const needle = item.id || item.raw_ref;
58
+ if (!needle) return 0;
59
+ const files = readdirSync(inboxDir).filter(
60
+ (f) => !f.endsWith(".processed") &&
61
+ !f.endsWith(".deferred") &&
62
+ !f.endsWith(".processed-bundled") &&
63
+ (f.includes(item.id || "") || f.includes(item.raw_ref || ""))
64
+ );
65
+ for (const file of files) {
66
+ renameSync(join(inboxDir, file), join(inboxDir, `${file}.deferred`));
67
+ renamed++;
68
+ }
69
+ } catch {
70
+ // Inbox dir may not exist for non-poller-backed items (e.g. backlog).
71
+ }
72
+ return renamed;
73
+ }
74
+
75
+ /**
76
+ * Extract a top-level scalar value from a poller-written inbox YAML
77
+ * file. Returns `null` if the field is absent.
78
+ *
79
+ * We only need this for `channel_id` (routing) and `timestamp`
80
+ * (latest-wins selection) — both are written as quoted single-line
81
+ * scalars by every poller, so a one-line regex is enough.
82
+ */
83
+ function readScalar(body, field) {
84
+ // Matches: ` field: "value"` or `field: value` (leading space ok)
85
+ const re = new RegExp(`^\\s*${field}\\s*:\\s*"?([^"\\n]+?)"?\\s*$`, "m");
86
+ const m = body.match(re);
87
+ return m ? m[1].trim() : null;
88
+ }
89
+
90
+ /**
91
+ * Promote every `.deferred` item targeting `channel` back into the
92
+ * live inbox. Bundles bursts: if multiple deferred items exist for
93
+ * the same channel, only the latest is re-queued (the others are
94
+ * marked `.processed-bundled` since their content is already present
95
+ * in the latest item's thread_context).
96
+ *
97
+ * Called from the dispatcher's session-close path, AFTER the channel
98
+ * lock has been released, so the next poll cycle is free to re-acquire.
99
+ *
100
+ * @returns {{promoted: number, bundled: number, service: string|null}}
101
+ */
102
+ export function promoteDeferred(channel, agentRoot) {
103
+ const result = { promoted: 0, bundled: 0, service: null };
104
+ if (!channel || !agentRoot) return result;
105
+
106
+ for (const service of SERVICE_DIRS) {
107
+ const inboxDir = join(agentRoot, "state", "inbox", service);
108
+ if (!existsSync(inboxDir)) continue;
109
+
110
+ let deferredFiles;
111
+ try {
112
+ deferredFiles = readdirSync(inboxDir).filter((f) => f.endsWith(".deferred"));
113
+ } catch {
114
+ continue;
115
+ }
116
+ if (deferredFiles.length === 0) continue;
117
+
118
+ // Filter to items whose body references this channel. Reading
119
+ // every .deferred file is cheap (a handful at most) — we stay
120
+ // dependency-free by regex-scanning rather than parsing YAML.
121
+ const matches = [];
122
+ for (const file of deferredFiles) {
123
+ let body;
124
+ try {
125
+ body = readFileSync(join(inboxDir, file), "utf-8");
126
+ } catch {
127
+ continue;
128
+ }
129
+ const fileChannel = readScalar(body, "channel_id");
130
+ if (fileChannel !== channel) continue;
131
+ matches.push({
132
+ file,
133
+ timestamp: readScalar(body, "timestamp") || "",
134
+ });
135
+ }
136
+ if (matches.length === 0) continue;
137
+
138
+ // Latest-wins: lex-sort ISO timestamps, take the most recent.
139
+ matches.sort((a, b) => a.timestamp.localeCompare(b.timestamp));
140
+ const latest = matches[matches.length - 1];
141
+ const older = matches.slice(0, -1);
142
+
143
+ // Promote the latest back to live inbox.
144
+ try {
145
+ const live = latest.file.replace(/\.deferred$/, "");
146
+ renameSync(join(inboxDir, latest.file), join(inboxDir, live));
147
+ result.promoted++;
148
+ result.service = service;
149
+ } catch {
150
+ // If the rename failed (race with another process), leave it
151
+ // as .deferred — the next session-close will retry.
152
+ }
153
+
154
+ // Mark older bursts as bundled — their content is already part of
155
+ // the latest item's thread_context, so they don't need their own
156
+ // session, but we preserve the file for audit.
157
+ for (const m of older) {
158
+ try {
159
+ renameSync(
160
+ join(inboxDir, m.file),
161
+ join(inboxDir, m.file.replace(/\.deferred$/, ".processed-bundled"))
162
+ );
163
+ result.bundled++;
164
+ } catch {
165
+ // Best-effort; missing file is fine.
166
+ }
167
+ }
168
+ }
169
+
170
+ return result;
171
+ }
@@ -0,0 +1,154 @@
1
+ import { test } from "node:test";
2
+ import assert from "node:assert/strict";
3
+ import { mkdtempSync, mkdirSync, writeFileSync, readdirSync, rmSync } from "node:fs";
4
+ import { join } from "node:path";
5
+ import { tmpdir } from "node:os";
6
+
7
+ import { markDeferred, promoteDeferred } from "./inbox-deferral.mjs";
8
+
9
+ function makeAgentRoot() {
10
+ const root = mkdtempSync(join(tmpdir(), "maestro-deferral-"));
11
+ mkdirSync(join(root, "state", "inbox", "slack"), { recursive: true });
12
+ return root;
13
+ }
14
+
15
+ function writeInboxItem(root, name, { channel_id, timestamp, id = "test-id" }) {
16
+ const body = [
17
+ `id: "${id}"`,
18
+ `service: "slack"`,
19
+ `channel_id: "${channel_id}"`,
20
+ `timestamp: "${timestamp}"`,
21
+ `content: |`,
22
+ ` body`,
23
+ "",
24
+ ].join("\n");
25
+ writeFileSync(join(root, "state", "inbox", "slack", name), body);
26
+ }
27
+
28
+ test("markDeferred renames live file to .deferred", () => {
29
+ const root = makeAgentRoot();
30
+ try {
31
+ writeInboxItem(root, "msg-A.yaml", {
32
+ channel_id: "D001",
33
+ timestamp: "2026-05-13T00:00:00Z",
34
+ id: "msg-A",
35
+ });
36
+ const n = markDeferred({ id: "msg-A" }, "slack", root);
37
+ assert.equal(n, 1);
38
+ const files = readdirSync(join(root, "state", "inbox", "slack"));
39
+ assert.deepEqual(files, ["msg-A.yaml.deferred"]);
40
+ } finally {
41
+ rmSync(root, { recursive: true, force: true });
42
+ }
43
+ });
44
+
45
+ test("markDeferred is idempotent — already-deferred files are skipped", () => {
46
+ const root = makeAgentRoot();
47
+ try {
48
+ writeInboxItem(root, "msg-B.yaml.deferred", {
49
+ channel_id: "D001",
50
+ timestamp: "2026-05-13T00:00:00Z",
51
+ id: "msg-B",
52
+ });
53
+ const n = markDeferred({ id: "msg-B" }, "slack", root);
54
+ assert.equal(n, 0);
55
+ } finally {
56
+ rmSync(root, { recursive: true, force: true });
57
+ }
58
+ });
59
+
60
+ test("markDeferred is no-op when item has no matching file (e.g. backlog)", () => {
61
+ const root = makeAgentRoot();
62
+ try {
63
+ const n = markDeferred({ id: "no-such-id" }, "slack", root);
64
+ assert.equal(n, 0);
65
+ } finally {
66
+ rmSync(root, { recursive: true, force: true });
67
+ }
68
+ });
69
+
70
+ test("promoteDeferred with single item: renames back to live", () => {
71
+ const root = makeAgentRoot();
72
+ try {
73
+ writeInboxItem(root, "msg-C.yaml.deferred", {
74
+ channel_id: "D099N1JEA10",
75
+ timestamp: "2026-05-13T00:00:00Z",
76
+ id: "msg-C",
77
+ });
78
+ const r = promoteDeferred("D099N1JEA10", root);
79
+ assert.equal(r.promoted, 1);
80
+ assert.equal(r.bundled, 0);
81
+ assert.equal(r.service, "slack");
82
+ const files = readdirSync(join(root, "state", "inbox", "slack"));
83
+ assert.deepEqual(files, ["msg-C.yaml"]);
84
+ } finally {
85
+ rmSync(root, { recursive: true, force: true });
86
+ }
87
+ });
88
+
89
+ test("promoteDeferred with multiple items: keeps latest, bundles rest", () => {
90
+ const root = makeAgentRoot();
91
+ try {
92
+ writeInboxItem(root, "msg-1.yaml.deferred", {
93
+ channel_id: "D001",
94
+ timestamp: "2026-05-13T00:00:00Z",
95
+ id: "msg-1",
96
+ });
97
+ writeInboxItem(root, "msg-2.yaml.deferred", {
98
+ channel_id: "D001",
99
+ timestamp: "2026-05-13T00:01:00Z",
100
+ id: "msg-2",
101
+ });
102
+ writeInboxItem(root, "msg-3.yaml.deferred", {
103
+ channel_id: "D001",
104
+ timestamp: "2026-05-13T00:02:00Z",
105
+ id: "msg-3",
106
+ });
107
+ const r = promoteDeferred("D001", root);
108
+ assert.equal(r.promoted, 1);
109
+ assert.equal(r.bundled, 2);
110
+ const files = readdirSync(join(root, "state", "inbox", "slack")).sort();
111
+ assert.deepEqual(files, [
112
+ "msg-1.yaml.processed-bundled",
113
+ "msg-2.yaml.processed-bundled",
114
+ "msg-3.yaml", // latest survives as live
115
+ ]);
116
+ } finally {
117
+ rmSync(root, { recursive: true, force: true });
118
+ }
119
+ });
120
+
121
+ test("promoteDeferred ignores items in other channels", () => {
122
+ const root = makeAgentRoot();
123
+ try {
124
+ writeInboxItem(root, "for-A.yaml.deferred", {
125
+ channel_id: "D-A",
126
+ timestamp: "2026-05-13T00:00:00Z",
127
+ id: "for-A",
128
+ });
129
+ writeInboxItem(root, "for-B.yaml.deferred", {
130
+ channel_id: "D-B",
131
+ timestamp: "2026-05-13T00:00:00Z",
132
+ id: "for-B",
133
+ });
134
+ const r = promoteDeferred("D-A", root);
135
+ assert.equal(r.promoted, 1);
136
+ assert.equal(r.bundled, 0);
137
+ const files = readdirSync(join(root, "state", "inbox", "slack")).sort();
138
+ assert.deepEqual(files, ["for-A.yaml", "for-B.yaml.deferred"]);
139
+ } finally {
140
+ rmSync(root, { recursive: true, force: true });
141
+ }
142
+ });
143
+
144
+ test("promoteDeferred no-op when nothing to promote", () => {
145
+ const root = makeAgentRoot();
146
+ try {
147
+ const r = promoteDeferred("D999", root);
148
+ assert.equal(r.promoted, 0);
149
+ assert.equal(r.bundled, 0);
150
+ assert.equal(r.service, null);
151
+ } finally {
152
+ rmSync(root, { recursive: true, force: true });
153
+ }
154
+ });
@@ -17,11 +17,19 @@ export function writeInboxItem(service, item) {
17
17
  const filename = `${ts}-${item.id}.yaml`;
18
18
  const filePath = join(dir, filename);
19
19
 
20
- // Skip if this item was already written or already processed.
21
- // The daemon renames files to .processed after handling them;
22
- // without this check the 30-min thread lookback re-creates the
23
- // file every poll cycle, causing duplicate responses.
24
- if (existsSync(filePath) || existsSync(filePath + ".processed")) {
20
+ // Skip if this item was already written or already processed/deferred.
21
+ // The daemon renames files to .processed after handling, to .deferred
22
+ // when parked behind an active channel lock, and to .processed-bundled
23
+ // when older entries are folded into a newer message during promotion.
24
+ // Without all four checks the 30-min thread lookback re-creates the
25
+ // file every poll cycle, causing duplicate responses or surprise
26
+ // re-promotion of work that was already bundled.
27
+ if (
28
+ existsSync(filePath) ||
29
+ existsSync(filePath + ".processed") ||
30
+ existsSync(filePath + ".deferred") ||
31
+ existsSync(filePath + ".processed-bundled")
32
+ ) {
25
33
  return;
26
34
  }
27
35