@agent-team-foundation/first-tree-hub 0.10.3 → 0.10.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{bootstrap-CBAVWQUT.mjs → bootstrap-jx5nN1qZ.mjs} +1 -0
- package/dist/cli/index.mjs +7 -7
- package/dist/{dist-DUCelK3Z.mjs → dist-CbX9mUVH.mjs} +52 -4
- package/dist/{feishu-Boy3n8CT.mjs → feishu-DvjRZMdZ.mjs} +1 -1
- package/dist/index.mjs +5 -5
- package/dist/{invitation-C_zAhB8x-8Khychlu.mjs → invitation-BljIolbO-DLeHfURd.mjs} +3 -2
- package/dist/{invitation-BTlGMy0o-Coj07kYi.mjs → invitation-D3feYxet-366MNOor.mjs} +2 -2
- package/dist/{saas-connect-3p-vBkuY.mjs → saas-connect-2puW1r3r.mjs} +582 -84
- package/dist/web/assets/index-5SNLeFZA.js +392 -0
- package/dist/web/assets/{index-CHoaSIzI.js → index-BxQQDavm.js} +1 -1
- package/dist/web/assets/index-DKZFiOjh.css +1 -0
- package/dist/web/index.html +2 -2
- package/package.json +1 -1
- package/dist/web/assets/index-CP8uLPyO.css +0 -1
- package/dist/web/assets/index-D7OzKrI2.js +0 -387
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { m as __toESM } from "./esm-CYu4tXXn.mjs";
|
|
2
2
|
import { _ as withSpan, a as endWsConnectionSpan, b as require_pino, c as messageAttrs, d as rootLogger$1, g as startWsConnectionSpan, i as currentTraceId, n as applyLoggerConfig, o as getFastifyOtelPlugin, p as setWsConnectionAttrs, r as createLogger$1, t as adapterAttrs, u as observabilityPlugin, v as withWsMessageSpan, y as FIRST_TREE_HUB_ATTR } from "./observability-DPyf745N-BSc8QNcR.mjs";
|
|
3
|
-
import { C as serverConfigSchema, S as resolveConfigReadonly, _ as loadAgents, b as resetConfig, c as saveCredentials, d as DEFAULT_HOME_DIR$1, f as agentConfigSchema, g as initConfig, i as loadCredentials, l as DEFAULT_CONFIG_DIR, m as collectMissingPrompts, n as ensureFreshAccessToken, o as resolveServerUrl, p as clientConfigSchema, s as saveAgentConfig, u as DEFAULT_DATA_DIR$1, v as migrateLegacyHome, w as setConfigValue, x as resetConfigMeta } from "./bootstrap-
|
|
4
|
-
import { $ as
|
|
5
|
-
import { a as ForbiddenError, c as buildInviteUrl, d as getActiveInvitation, f as invitationRedemptions, g as recordRedemption, i as ConflictError, l as ensureActiveInvitation, m as organizations, n as BadRequestError, o as NotFoundError, p as invitations, r as ClientOrgMismatchError$1, s as UnauthorizedError, t as AppError, u as findActiveByToken, v as users, y as uuidv7 } from "./invitation-
|
|
3
|
+
import { C as serverConfigSchema, S as resolveConfigReadonly, _ as loadAgents, b as resetConfig, c as saveCredentials, d as DEFAULT_HOME_DIR$1, f as agentConfigSchema, g as initConfig, i as loadCredentials, l as DEFAULT_CONFIG_DIR, m as collectMissingPrompts, n as ensureFreshAccessToken, o as resolveServerUrl, p as clientConfigSchema, s as saveAgentConfig, u as DEFAULT_DATA_DIR$1, v as migrateLegacyHome, w as setConfigValue, x as resetConfigMeta } from "./bootstrap-jx5nN1qZ.mjs";
|
|
4
|
+
import { $ as refreshTokenSchema, A as createOrgFromMeSchema, B as imageInlineContentSchema, C as clientRegisterSchema, D as createAgentSchema, E as createAdapterMappingSchema, F as dryRunAgentRuntimeConfigSchema, G as isReservedAgentName$1, H as inboxDeliverFrameSchema$1, I as extractMentions, J as loginSchema, K as joinByInvitationSchema, L as githubCallbackQuerySchema, M as createTaskSchema, N as defaultRuntimeConfigPayload, O as createChatSchema, P as delegateFeishuUserSchema, Q as rebindAgentSchema, R as githubDevCallbackQuerySchema, S as clientCapabilitiesSchema$1, St as wsAuthFrameSchema, T as createAdapterConfigSchema, U as inboxPollQuerySchema, V as inboxAckFrameSchema, W as isRedactedEnvValue, X as notificationQuerySchema, Y as messageSourceSchema$1, Z as paginationQuerySchema, _ as adminUpdateTaskSchema, _t as updateClientCapabilitiesSchema, a as AGENT_STATUSES, at as sendToAgentSchema, b as agentRuntimeConfigPayloadSchema$1, bt as updateSystemConfigSchema, ct as sessionEventSchema$1, d as TASK_HEALTH_SIGNALS, dt as switchOrgSchema, et as runtimeStateMessageSchema, f as TASK_STATUSES, ft as taskListQuerySchema, g as adminCreateTaskSchema, gt as updateChatSchema, h as addParticipantSchema, ht as updateAgentSchema, i as AGENT_SOURCES, it as sendMessageSchema, j as createOrganizationSchema, k as createMemberSchema, l as SYSTEM_CONFIG_DEFAULTS, lt as sessionReconcileRequestSchema, m as WS_AUTH_FRAME_TIMEOUT_MS, mt as updateAgentRuntimeConfigSchema, n as AGENT_NAME_REGEX$1, nt as scanMentionTokens, o as AGENT_TYPES, ot as sessionCompletionMessageSchema, p as TASK_TERMINAL_STATUSES, pt as updateAdapterConfigSchema, q as linkTaskChatSchema, r as AGENT_SELECTOR_HEADER$1, rt as selfServiceFeishuBotSchema, s as AGENT_VISIBILITY, st as sessionEventMessageSchema, t as AGENT_BIND_REJECT_REASONS, tt as safeRedirectPath, u as TASK_CREATOR_TYPES, ut as sessionStateMessageSchema, v as agentBindRequestSchema, vt as updateMemberSchema, w as connectTokenExchangeSchema, x as agentTypeSchema$1, xt as updateTaskStatusSchema, y as agentPinnedMessageSchema$1, yt as updateOrganizationSchema, z as githubStartQuerySchema } from "./dist-CbX9mUVH.mjs";
|
|
5
|
+
import { a as ForbiddenError, c as buildInviteUrl, d as getActiveInvitation, f as invitationRedemptions, g as recordRedemption, i as ConflictError, l as ensureActiveInvitation, m as organizations, n as BadRequestError, o as NotFoundError, p as invitations, r as ClientOrgMismatchError$1, s as UnauthorizedError, t as AppError, u as findActiveByToken, v as users, y as uuidv7 } from "./invitation-BljIolbO-DLeHfURd.mjs";
|
|
6
6
|
import { createRequire } from "node:module";
|
|
7
7
|
import { ZodError, z } from "zod";
|
|
8
8
|
import { delimiter, dirname, isAbsolute, join, resolve } from "node:path";
|
|
@@ -754,11 +754,21 @@ z.object({
|
|
|
754
754
|
lastSeenAt: z.string(),
|
|
755
755
|
metadata: z.record(z.string(), z.unknown()).nullable()
|
|
756
756
|
});
|
|
757
|
+
/**
|
|
758
|
+
* Optional opt-in flags the client carries on `client:register` to advertise
|
|
759
|
+
* which negotiable wire-protocol features it implements. Distinct from
|
|
760
|
+
* `clientCapabilitiesSchema` (per-runtime-provider availability — different
|
|
761
|
+
* concept). Older clients omit the field; the server treats every unset flag
|
|
762
|
+
* as `false` and falls back to the legacy path. See proposal
|
|
763
|
+
* hub-inbox-ws-data-plane §3.6.
|
|
764
|
+
*/
|
|
765
|
+
const clientWireCapabilitiesSchema = z.object({ wsInboxDeliver: z.boolean().default(false) }).partial();
|
|
757
766
|
z.object({
|
|
758
767
|
clientId: z.string().min(1).max(100),
|
|
759
768
|
hostname: z.string().max(100).optional(),
|
|
760
769
|
os: z.string().max(50).optional(),
|
|
761
|
-
sdkVersion: z.string().max(50).optional()
|
|
770
|
+
sdkVersion: z.string().max(50).optional(),
|
|
771
|
+
wireCapabilities: clientWireCapabilitiesSchema.optional()
|
|
762
772
|
});
|
|
763
773
|
const capabilityStateSchema = z.enum([
|
|
764
774
|
"ok",
|
|
@@ -968,11 +978,37 @@ z.object({
|
|
|
968
978
|
ackedAt: z.string().nullable()
|
|
969
979
|
}).extend({ message: clientMessageSchema });
|
|
970
980
|
z.object({ limit: z.coerce.number().int().min(1).max(50).default(10) });
|
|
981
|
+
/**
|
|
982
|
+
* server → client: a single inbox entry pushed over the active WS connection,
|
|
983
|
+
* replacing the legacy `new_message` doorbell + HTTP `/inbox` poll round-trip.
|
|
984
|
+
*
|
|
985
|
+
* `entryId` is the server-side `inbox_entries.id` the client must echo back
|
|
986
|
+
* in `inbox:ack`. `message` is exactly what the legacy poll path returned —
|
|
987
|
+
* `clientMessageSchema` already carries `precedingMessages`, so the client-
|
|
988
|
+
* side dispatch logic is reused verbatim (see proposal
|
|
989
|
+
* hub-inbox-ws-data-plane §3.1).
|
|
990
|
+
*
|
|
991
|
+
* `.passthrough()` so a forward-rolling server may extend the frame without
|
|
992
|
+
* breaking older clients that validate strictly. Older clients drop unknown
|
|
993
|
+
* fields silently.
|
|
994
|
+
*/
|
|
995
|
+
const inboxDeliverFrameSchema = z.object({
|
|
996
|
+
type: z.literal("inbox:deliver"),
|
|
997
|
+
entryId: z.number().int().nonnegative(),
|
|
998
|
+
inboxId: z.string().min(1),
|
|
999
|
+
chatId: z.string().nullable(),
|
|
1000
|
+
message: clientMessageSchema
|
|
1001
|
+
}).passthrough();
|
|
1002
|
+
z.object({
|
|
1003
|
+
type: z.literal("inbox:ack"),
|
|
1004
|
+
entryId: z.number().int().nonnegative()
|
|
1005
|
+
});
|
|
971
1006
|
z.object({
|
|
972
1007
|
organizationId: z.string(),
|
|
973
1008
|
organizationName: z.string(),
|
|
974
1009
|
organizationDisplayName: z.string(),
|
|
975
|
-
role: z.string()
|
|
1010
|
+
role: z.string(),
|
|
1011
|
+
expiresAt: z.string().nullable()
|
|
976
1012
|
});
|
|
977
1013
|
z.object({
|
|
978
1014
|
id: z.string(),
|
|
@@ -1343,6 +1379,13 @@ z.object({
|
|
|
1343
1379
|
token: z.string().min(1)
|
|
1344
1380
|
});
|
|
1345
1381
|
/**
|
|
1382
|
+
* Negotiable wire-protocol features the server advertises in its `welcome`
|
|
1383
|
+
* frame. Older clients drop the `capabilities` field silently because the
|
|
1384
|
+
* frame is `.passthrough()`. New clients gate optional code paths on it —
|
|
1385
|
+
* absent ⇒ feature off, never assumed.
|
|
1386
|
+
*/
|
|
1387
|
+
const serverCapabilitiesSchema = z.object({ wsInboxDeliver: z.boolean().default(false) }).partial();
|
|
1388
|
+
/**
|
|
1346
1389
|
* Advisory frame sent server → client immediately after `auth:ok`. It carries
|
|
1347
1390
|
* the Command-package version the server was bundled with, so the client can
|
|
1348
1391
|
* detect version drift on startup and on each reconnect. `.passthrough()` so
|
|
@@ -1352,7 +1395,8 @@ z.object({
|
|
|
1352
1395
|
const serverWelcomeFrameSchema = z.object({
|
|
1353
1396
|
type: z.literal("server:welcome"),
|
|
1354
1397
|
serverCommandVersion: z.string().min(1),
|
|
1355
|
-
serverTimeMs: z.number().int().nonnegative()
|
|
1398
|
+
serverTimeMs: z.number().int().nonnegative(),
|
|
1399
|
+
capabilities: serverCapabilitiesSchema.optional()
|
|
1356
1400
|
}).passthrough();
|
|
1357
1401
|
/** Declare a config field with a Zod schema and optional metadata. */
|
|
1358
1402
|
function field(schema, options) {
|
|
@@ -1488,6 +1532,7 @@ defineConfig({
|
|
|
1488
1532
|
loginMax: field(z.number().default(5), { env: "FIRST_TREE_HUB_RATE_LIMIT_LOGIN_MAX" }),
|
|
1489
1533
|
webhookMax: field(z.number().default(60), { env: "FIRST_TREE_HUB_RATE_LIMIT_WEBHOOK_MAX" })
|
|
1490
1534
|
}),
|
|
1535
|
+
inbox: optional({ maxInFlightPerAgent: field(z.number().int().min(1).max(1024).default(32), { env: "FIRST_TREE_HUB_INBOX_MAX_IN_FLIGHT_PER_AGENT" }) }),
|
|
1491
1536
|
kael: optional({
|
|
1492
1537
|
endpoint: field(z.string(), { env: "KAEL_ENDPOINT" }),
|
|
1493
1538
|
apiKey: field(z.string(), {
|
|
@@ -1731,6 +1776,17 @@ const RECONNECT_MAX_MS = 3e4;
|
|
|
1731
1776
|
const WS_CONNECT_TIMEOUT_MS = 1e4;
|
|
1732
1777
|
const HEARTBEAT_INTERVAL_MS = 3e4;
|
|
1733
1778
|
/**
|
|
1779
|
+
* Client-side opt-in for the WS inbox data plane. Gates BOTH the
|
|
1780
|
+
* `wireCapabilities.wsInboxDeliver` flag we declare on `client:register`
|
|
1781
|
+
* AND how we interpret the server's welcome capability — without this AND,
|
|
1782
|
+
* a future client kill-switch could land in a half-state where we tell the
|
|
1783
|
+
* server "no thanks" but still treat welcome's `wsInboxDeliver:true` as
|
|
1784
|
+
* authoritative and stop the 5s HTTP poll, leaving messages stuck if a
|
|
1785
|
+
* NOTIFY ever drops. Hard-coded `true` for now; flip to a config knob if
|
|
1786
|
+
* you need a runtime kill-switch.
|
|
1787
|
+
*/
|
|
1788
|
+
const WS_INBOX_DELIVER_OPT_IN = true;
|
|
1789
|
+
/**
|
|
1734
1790
|
* Unified-user-token C5: reconnect PROACTIVELY this many ms before the JWT's
|
|
1735
1791
|
* `exp` claim so the client rotates to a fresh JWT without ever hitting the
|
|
1736
1792
|
* server-side `auth:expired` push. The provider's next `getAccessToken()` call
|
|
@@ -1782,6 +1838,15 @@ var ClientConnection = class extends EventEmitter {
|
|
|
1782
1838
|
/** Count of `server:welcome` frames received; drives `isReconnect` flag. */
|
|
1783
1839
|
welcomeFramesReceived = 0;
|
|
1784
1840
|
/**
|
|
1841
|
+
* Whether the most recent `server:welcome` frame advertised
|
|
1842
|
+
* `capabilities.wsInboxDeliver`. The runtime (AgentSlot) reads this
|
|
1843
|
+
* (via {@link supportsWsInboxDeliver}) to decide whether to keep the
|
|
1844
|
+
* legacy 5s HTTP poll or rely entirely on `inbox:deliver` push frames.
|
|
1845
|
+
* Re-evaluated on every reconnect — the welcome frame is the source of
|
|
1846
|
+
* truth, never assumed sticky across connections.
|
|
1847
|
+
*/
|
|
1848
|
+
wsInboxDeliverActive = false;
|
|
1849
|
+
/**
|
|
1785
1850
|
* Last handshake error, stashed for the `close` handler to surface a typed
|
|
1786
1851
|
* reason (e.g. {@link ClientOrgMismatchError}) instead of a generic
|
|
1787
1852
|
* "closed before ready" when `connect()` is pending.
|
|
@@ -1817,6 +1882,30 @@ var ClientConnection = class extends EventEmitter {
|
|
|
1817
1882
|
get agents() {
|
|
1818
1883
|
return this.boundAgents;
|
|
1819
1884
|
}
|
|
1885
|
+
/**
|
|
1886
|
+
* True when the current connection's `server:welcome` advertised
|
|
1887
|
+
* `capabilities.wsInboxDeliver` — meaning the server will push
|
|
1888
|
+
* `inbox:deliver` frames and accept `inbox:ack` frames over this WS.
|
|
1889
|
+
* Resets to false on every reconnect until the new welcome arrives.
|
|
1890
|
+
*/
|
|
1891
|
+
get supportsWsInboxDeliver() {
|
|
1892
|
+
return this.wsInboxDeliverActive;
|
|
1893
|
+
}
|
|
1894
|
+
/**
|
|
1895
|
+
* Ack a delivered inbox entry over the WS data plane. Replaces the legacy
|
|
1896
|
+
* `sdk.ack()` HTTP call when the connection has negotiated
|
|
1897
|
+
* `wsInboxDeliver`. Safe to call when the WS is closed — the frame is
|
|
1898
|
+
* dropped silently and the entry will time out and re-deliver on
|
|
1899
|
+
* reconnect, mirroring how the legacy timeout reaper handles HTTP
|
|
1900
|
+
* ack-loss.
|
|
1901
|
+
*/
|
|
1902
|
+
sendInboxAck(entryId) {
|
|
1903
|
+
if (!this.ws || this.ws.readyState !== WebSocket.OPEN) return;
|
|
1904
|
+
this.ws.send(JSON.stringify({
|
|
1905
|
+
type: "inbox:ack",
|
|
1906
|
+
entryId
|
|
1907
|
+
}));
|
|
1908
|
+
}
|
|
1820
1909
|
async connect() {
|
|
1821
1910
|
this.closing = false;
|
|
1822
1911
|
await this.openWebSocket();
|
|
@@ -1968,6 +2057,7 @@ var ClientConnection = class extends EventEmitter {
|
|
|
1968
2057
|
this.clearAuthRefreshTimer();
|
|
1969
2058
|
const wasRegistered = this.registered;
|
|
1970
2059
|
this.registered = false;
|
|
2060
|
+
this.wsInboxDeliverActive = false;
|
|
1971
2061
|
this.rejectAllPendingBinds("WebSocket closed");
|
|
1972
2062
|
if (!settled) {
|
|
1973
2063
|
this.wsLogger.warn({ code }, "closed before ready");
|
|
@@ -1999,7 +2089,8 @@ var ClientConnection = class extends EventEmitter {
|
|
|
1999
2089
|
clientId: this.clientId,
|
|
2000
2090
|
hostname: hostname(),
|
|
2001
2091
|
os: platform(),
|
|
2002
|
-
sdkVersion: this.sdkVersion
|
|
2092
|
+
sdkVersion: this.sdkVersion,
|
|
2093
|
+
wireCapabilities: { wsInboxDeliver: WS_INBOX_DELIVER_OPT_IN }
|
|
2003
2094
|
}));
|
|
2004
2095
|
return;
|
|
2005
2096
|
}
|
|
@@ -2009,6 +2100,7 @@ var ClientConnection = class extends EventEmitter {
|
|
|
2009
2100
|
this.wsLogger.warn({ issues: parsed.error.issues.map((i) => i.message) }, "ignoring malformed server:welcome frame");
|
|
2010
2101
|
return;
|
|
2011
2102
|
}
|
|
2103
|
+
this.wsInboxDeliverActive = parsed.data.capabilities?.wsInboxDeliver === true && WS_INBOX_DELIVER_OPT_IN;
|
|
2012
2104
|
const isReconnect = this.welcomeFramesReceived > 0;
|
|
2013
2105
|
this.welcomeFramesReceived++;
|
|
2014
2106
|
this.emit("server:welcome", {
|
|
@@ -2153,6 +2245,25 @@ var ClientConnection = class extends EventEmitter {
|
|
|
2153
2245
|
else this.emit("agent:message", inboxId, msg);
|
|
2154
2246
|
return;
|
|
2155
2247
|
}
|
|
2248
|
+
if (type === "inbox:deliver") {
|
|
2249
|
+
const parsed = inboxDeliverFrameSchema.safeParse(msg);
|
|
2250
|
+
if (!parsed.success) {
|
|
2251
|
+
this.wsLogger.warn({
|
|
2252
|
+
issues: parsed.error.issues.map((i) => ({
|
|
2253
|
+
path: i.path.join("."),
|
|
2254
|
+
code: i.code,
|
|
2255
|
+
message: i.message
|
|
2256
|
+
})),
|
|
2257
|
+
frameKeys: Object.keys(msg),
|
|
2258
|
+
messageKeys: msg.message && typeof msg.message === "object" ? Object.keys(msg.message) : null
|
|
2259
|
+
}, "malformed inbox:deliver frame — dropping");
|
|
2260
|
+
return;
|
|
2261
|
+
}
|
|
2262
|
+
const emit = () => this.emit("inbox:deliver", parsed.data.inboxId, parsed.data);
|
|
2263
|
+
if (this.pendingImageWrites.size > 0) Promise.all([...this.pendingImageWrites]).finally(emit);
|
|
2264
|
+
else emit();
|
|
2265
|
+
return;
|
|
2266
|
+
}
|
|
2156
2267
|
if (type === "error") {
|
|
2157
2268
|
const errorMsg = msg.message;
|
|
2158
2269
|
const ref = msg.ref;
|
|
@@ -4878,11 +4989,19 @@ var SessionManager = class {
|
|
|
4878
4989
|
this.lastReportedStates.set(chatId, state);
|
|
4879
4990
|
this.config.onStateChange(chatId, state);
|
|
4880
4991
|
}
|
|
4881
|
-
/**
|
|
4992
|
+
/**
|
|
4993
|
+
* ACK an inbox entry — delayed until handler starts processing.
|
|
4994
|
+
*
|
|
4995
|
+
* Routes through `config.ackEntry` when set (WS push path) or falls back to
|
|
4996
|
+
* `sdk.ack` (HTTP poll path). One ack per entry, one channel per slot —
|
|
4997
|
+
* mixing channels in one slot would leak the server's per-agent in-flight
|
|
4998
|
+
* counter (proposal hub-inbox-ws-data-plane §3.5).
|
|
4999
|
+
*/
|
|
4882
5000
|
async ackEntry(entryId, chatId) {
|
|
4883
5001
|
if (entryId === void 0) return;
|
|
4884
5002
|
try {
|
|
4885
|
-
await this.config.
|
|
5003
|
+
if (this.config.ackEntry) await this.config.ackEntry(entryId);
|
|
5004
|
+
else await this.config.sdk.ack(entryId);
|
|
4886
5005
|
} catch {
|
|
4887
5006
|
this.config.log.warn({
|
|
4888
5007
|
chatId,
|
|
@@ -5033,6 +5152,12 @@ var AgentSlot = class {
|
|
|
5033
5152
|
pollingTimer = null;
|
|
5034
5153
|
reconcileTimer = null;
|
|
5035
5154
|
listeners = [];
|
|
5155
|
+
/**
|
|
5156
|
+
* The inbox this slot's agent owns — used to filter `inbox:deliver`
|
|
5157
|
+
* frames addressed to other agents on the same client. Captured at
|
|
5158
|
+
* `start()` from `sdk.register()`.
|
|
5159
|
+
*/
|
|
5160
|
+
inboxId = null;
|
|
5036
5161
|
constructor(config) {
|
|
5037
5162
|
this.config = config;
|
|
5038
5163
|
this.logger = createLogger("slot").child({
|
|
@@ -5075,9 +5200,19 @@ var AgentSlot = class {
|
|
|
5075
5200
|
this.logger.error({ err }, "failed to fetch agent config — bind aborted");
|
|
5076
5201
|
throw new Error(`Hub unreachable while loading agent config: ${msg}`);
|
|
5077
5202
|
}
|
|
5203
|
+
this.inboxId = agent.inboxId;
|
|
5078
5204
|
const onMessage = (agentId) => {
|
|
5079
5205
|
if (agentId === this.config.agentId) this.pullAndDispatch();
|
|
5080
5206
|
};
|
|
5207
|
+
const onInboxDeliver = (inboxId, frame) => {
|
|
5208
|
+
if (inboxId !== this.inboxId) return;
|
|
5209
|
+
this.dispatchPushedFrame(frame).catch((err) => {
|
|
5210
|
+
this.logger.warn({
|
|
5211
|
+
err,
|
|
5212
|
+
entryId: frame.entryId
|
|
5213
|
+
}, "inbox:deliver dispatch error");
|
|
5214
|
+
});
|
|
5215
|
+
};
|
|
5081
5216
|
const onBound = (boundAgent) => {
|
|
5082
5217
|
if (boundAgent.agentId === this.config.agentId) {
|
|
5083
5218
|
this.fullStateSync();
|
|
@@ -5088,11 +5223,15 @@ var AgentSlot = class {
|
|
|
5088
5223
|
if (result.agentId === this.config.agentId && this.sessionManager) this.sessionManager.applyStaleChatIds(result.staleChatIds);
|
|
5089
5224
|
};
|
|
5090
5225
|
this.clientConnection.on("agent:message", onMessage);
|
|
5226
|
+
this.clientConnection.on("inbox:deliver", onInboxDeliver);
|
|
5091
5227
|
this.clientConnection.on("agent:bound", onBound);
|
|
5092
5228
|
this.clientConnection.on("session:reconcile:result", onReconcileResult);
|
|
5093
5229
|
this.listeners.push({
|
|
5094
5230
|
event: "agent:message",
|
|
5095
5231
|
fn: onMessage
|
|
5232
|
+
}, {
|
|
5233
|
+
event: "inbox:deliver",
|
|
5234
|
+
fn: onInboxDeliver
|
|
5096
5235
|
}, {
|
|
5097
5236
|
event: "agent:bound",
|
|
5098
5237
|
fn: onBound
|
|
@@ -5108,6 +5247,10 @@ var AgentSlot = class {
|
|
|
5108
5247
|
agentId: this.config.agentId
|
|
5109
5248
|
})
|
|
5110
5249
|
});
|
|
5250
|
+
const ackEntry = this.clientConnection.supportsWsInboxDeliver ? (entryId) => {
|
|
5251
|
+
this.clientConnection.sendInboxAck(entryId);
|
|
5252
|
+
return Promise.resolve();
|
|
5253
|
+
} : void 0;
|
|
5111
5254
|
this.sessionManager = new SessionManager({
|
|
5112
5255
|
session: this.config.session,
|
|
5113
5256
|
concurrency: this.config.concurrency,
|
|
@@ -5129,6 +5272,7 @@ var AgentSlot = class {
|
|
|
5129
5272
|
log: this.logger,
|
|
5130
5273
|
registryPath,
|
|
5131
5274
|
agentConfigCache: this.agentConfigCache,
|
|
5275
|
+
ackEntry,
|
|
5132
5276
|
onStateChange: (chatId, state) => this.reportSessionState(chatId, state),
|
|
5133
5277
|
onRuntimeStateChange: (state) => this.reportRuntimeState(state),
|
|
5134
5278
|
onSessionEvent: (chatId, event) => this.reportSessionEvent(chatId, event),
|
|
@@ -5162,6 +5306,7 @@ var AgentSlot = class {
|
|
|
5162
5306
|
this.reconcileTimer = null;
|
|
5163
5307
|
}
|
|
5164
5308
|
for (const entry of this.listeners) if (entry.event === "agent:message") this.clientConnection.off(entry.event, entry.fn);
|
|
5309
|
+
else if (entry.event === "inbox:deliver") this.clientConnection.off(entry.event, entry.fn);
|
|
5165
5310
|
else if (entry.event === "agent:bound") this.clientConnection.off(entry.event, entry.fn);
|
|
5166
5311
|
else if (entry.event === "session:reconcile:result") this.clientConnection.off(entry.event, entry.fn);
|
|
5167
5312
|
else this.clientConnection.off(entry.event, entry.fn);
|
|
@@ -5189,11 +5334,47 @@ var AgentSlot = class {
|
|
|
5189
5334
|
if (runtimeState) this.clientConnection.reportRuntimeState(this.config.agentId, runtimeState);
|
|
5190
5335
|
}
|
|
5191
5336
|
startPolling() {
|
|
5337
|
+
if (this.clientConnection.supportsWsInboxDeliver) {
|
|
5338
|
+
this.logger.info("WS inbox data plane active — skipping 5s HTTP poll");
|
|
5339
|
+
return;
|
|
5340
|
+
}
|
|
5192
5341
|
this.pollingTimer = setInterval(() => {
|
|
5193
5342
|
this.pullAndDispatch();
|
|
5194
5343
|
}, 5e3);
|
|
5195
5344
|
this.pullAndDispatch();
|
|
5196
5345
|
}
|
|
5346
|
+
/**
|
|
5347
|
+
* Translate an `inbox:deliver` push frame into the {@link InboxEntryWithMessage}
|
|
5348
|
+
* shape `SessionManager.dispatch` expects, then dispatch.
|
|
5349
|
+
*
|
|
5350
|
+
* Ack happens INSIDE `dispatch` via the `ackEntry` callback we pinned at
|
|
5351
|
+
* construction time — for push slots that's `clientConnection.sendInboxAck`,
|
|
5352
|
+
* for poll slots it stays the legacy `sdk.ack`. Sending an additional ack
|
|
5353
|
+
* here would double-ack: HTTP first (`delivered → acked`) followed by a
|
|
5354
|
+
* WS frame the server can no longer match against any `delivered` row,
|
|
5355
|
+
* which leaks the server's per-agent in-flight counter and stalls push
|
|
5356
|
+
* after `inboxMaxInFlightPerAgent` messages.
|
|
5357
|
+
*
|
|
5358
|
+
* Dispatch errors propagate up; the entry stays `delivered` server-side
|
|
5359
|
+
* and the 300s timeout reaper rolls it back to `pending` for replay
|
|
5360
|
+
* (proposal §3.7).
|
|
5361
|
+
*/
|
|
5362
|
+
async dispatchPushedFrame(frame) {
|
|
5363
|
+
if (!this.sessionManager) return;
|
|
5364
|
+
const entry = {
|
|
5365
|
+
id: frame.entryId,
|
|
5366
|
+
inboxId: frame.inboxId,
|
|
5367
|
+
messageId: frame.message.id,
|
|
5368
|
+
chatId: frame.chatId,
|
|
5369
|
+
status: "delivered",
|
|
5370
|
+
retryCount: 0,
|
|
5371
|
+
createdAt: frame.message.createdAt,
|
|
5372
|
+
deliveredAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
5373
|
+
ackedAt: null,
|
|
5374
|
+
message: frame.message
|
|
5375
|
+
};
|
|
5376
|
+
await this.sessionManager.dispatch(entry);
|
|
5377
|
+
}
|
|
5197
5378
|
startReconcileLoop() {
|
|
5198
5379
|
const intervalSec = this.config.session.reconcile_interval_seconds ?? 300;
|
|
5199
5380
|
this.reconcileTimer = setInterval(() => this.reconcileNow(), intervalSec * 1e3);
|
|
@@ -7371,7 +7552,7 @@ async function onboardCreate(args) {
|
|
|
7371
7552
|
}
|
|
7372
7553
|
const runtimeAgent = args.type === "human" ? args.assistant : args.id;
|
|
7373
7554
|
if (args.feishuBotAppId && args.feishuBotAppSecret) {
|
|
7374
|
-
const { bindFeishuBot } = await import("./feishu-
|
|
7555
|
+
const { bindFeishuBot } = await import("./feishu-DvjRZMdZ.mjs").then((n) => n.r);
|
|
7375
7556
|
const targetAgentUuid = args.type === "human" ? assistantUuid : primary.uuid;
|
|
7376
7557
|
if (!targetAgentUuid) print.line(`Warning: Cannot bind Feishu bot — no runtime agent available for "${args.id}".\n`);
|
|
7377
7558
|
else {
|
|
@@ -8351,7 +8532,7 @@ function createFeedbackHandler(config) {
|
|
|
8351
8532
|
return { handle };
|
|
8352
8533
|
}
|
|
8353
8534
|
//#endregion
|
|
8354
|
-
//#region ../server/dist/app-
|
|
8535
|
+
//#region ../server/dist/app-fbgPnPWI.mjs
|
|
8355
8536
|
var __defProp = Object.defineProperty;
|
|
8356
8537
|
var __exportAll = (all, no_symbols) => {
|
|
8357
8538
|
let target = {};
|
|
@@ -10718,27 +10899,31 @@ function createNotifier(listenClient) {
|
|
|
10718
10899
|
const messageId = payload.slice(sepIdx + 1);
|
|
10719
10900
|
const sockets = subscriptions.get(inboxId);
|
|
10720
10901
|
if (!sockets) return;
|
|
10721
|
-
const
|
|
10902
|
+
const doorbellFrame = JSON.stringify({
|
|
10722
10903
|
type: "new_message",
|
|
10723
10904
|
inboxId,
|
|
10724
10905
|
messageId
|
|
10725
10906
|
});
|
|
10726
|
-
for (const ws of sockets)
|
|
10907
|
+
for (const [ws, pushHandler] of sockets) {
|
|
10908
|
+
if (ws.readyState !== ws.OPEN) continue;
|
|
10909
|
+
if (pushHandler) Promise.resolve(pushHandler(messageId)).catch(() => {});
|
|
10910
|
+
else ws.send(doorbellFrame);
|
|
10911
|
+
}
|
|
10727
10912
|
}
|
|
10728
10913
|
return {
|
|
10729
|
-
subscribe(inboxId, ws) {
|
|
10730
|
-
let
|
|
10731
|
-
if (!
|
|
10732
|
-
|
|
10733
|
-
subscriptions.set(inboxId,
|
|
10914
|
+
subscribe(inboxId, ws, pushHandler) {
|
|
10915
|
+
let map = subscriptions.get(inboxId);
|
|
10916
|
+
if (!map) {
|
|
10917
|
+
map = /* @__PURE__ */ new Map();
|
|
10918
|
+
subscriptions.set(inboxId, map);
|
|
10734
10919
|
}
|
|
10735
|
-
set
|
|
10920
|
+
map.set(ws, pushHandler ?? null);
|
|
10736
10921
|
},
|
|
10737
10922
|
unsubscribe(inboxId, ws) {
|
|
10738
|
-
const
|
|
10739
|
-
if (
|
|
10740
|
-
|
|
10741
|
-
if (
|
|
10923
|
+
const map = subscriptions.get(inboxId);
|
|
10924
|
+
if (map) {
|
|
10925
|
+
map.delete(ws);
|
|
10926
|
+
if (map.size === 0) subscriptions.delete(inboxId);
|
|
10742
10927
|
}
|
|
10743
10928
|
},
|
|
10744
10929
|
async notify(inboxId, messageId) {
|
|
@@ -10762,11 +10947,11 @@ function createNotifier(listenClient) {
|
|
|
10762
10947
|
} catch {}
|
|
10763
10948
|
},
|
|
10764
10949
|
async pushFrameToInbox(inboxId, frame) {
|
|
10765
|
-
const
|
|
10766
|
-
if (!
|
|
10950
|
+
const map = subscriptions.get(inboxId);
|
|
10951
|
+
if (!map) return 0;
|
|
10767
10952
|
let queued = 0;
|
|
10768
10953
|
const pending = [];
|
|
10769
|
-
for (const ws of
|
|
10954
|
+
for (const ws of map.keys()) {
|
|
10770
10955
|
if (ws.readyState !== ws.OPEN) continue;
|
|
10771
10956
|
pending.push(new Promise((resolve) => {
|
|
10772
10957
|
ws.send(frame, (err) => {
|
|
@@ -13206,8 +13391,8 @@ async function pollInbox(db, inboxId, limit) {
|
|
|
13206
13391
|
}, () => pollInboxInner(db, inboxId, limit));
|
|
13207
13392
|
}
|
|
13208
13393
|
async function pollInboxInner(db, inboxId, limit) {
|
|
13209
|
-
return
|
|
13210
|
-
|
|
13394
|
+
return db.transaction(async (tx) => {
|
|
13395
|
+
return bundleDeliveryWithSilentContext(tx, inboxId, await tx.execute(sql`
|
|
13211
13396
|
UPDATE inbox_entries
|
|
13212
13397
|
SET status = 'delivered', delivered_at = NOW()
|
|
13213
13398
|
WHERE id IN (
|
|
@@ -13218,53 +13403,132 @@ async function pollInboxInner(db, inboxId, limit) {
|
|
|
13218
13403
|
FOR UPDATE SKIP LOCKED
|
|
13219
13404
|
)
|
|
13220
13405
|
RETURNING *
|
|
13221
|
-
`);
|
|
13222
|
-
if (claimed.length === 0) return [];
|
|
13223
|
-
claimed.sort((a, b) => a.created_at.localeCompare(b.created_at));
|
|
13224
|
-
const precedingByEntryId = await collectPrecedingContext(tx, inboxId, claimed);
|
|
13225
|
-
const messageIds = claimed.map((e) => e.message_id);
|
|
13226
|
-
const msgs = await tx.select().from(messages).where(inArray(messages.id, messageIds));
|
|
13227
|
-
const msgMap = new Map(msgs.map((m) => [m.id, m]));
|
|
13228
|
-
const payloads = await buildClientMessagePayloadsForInbox(tx, inboxId, claimed.map((entry) => {
|
|
13229
|
-
const msg = msgMap.get(entry.message_id);
|
|
13230
|
-
if (!msg) throw new Error(`Unexpected: message ${entry.message_id} not found`);
|
|
13231
|
-
return {
|
|
13232
|
-
entryChatId: entry.chat_id,
|
|
13233
|
-
precedingMessages: precedingByEntryId.get(entry.id) ?? [],
|
|
13234
|
-
message: {
|
|
13235
|
-
id: msg.id,
|
|
13236
|
-
chatId: msg.chatId,
|
|
13237
|
-
senderId: msg.senderId,
|
|
13238
|
-
format: msg.format,
|
|
13239
|
-
content: msg.content,
|
|
13240
|
-
metadata: msg.metadata,
|
|
13241
|
-
replyToInbox: msg.replyToInbox,
|
|
13242
|
-
replyToChat: msg.replyToChat,
|
|
13243
|
-
inReplyTo: msg.inReplyTo,
|
|
13244
|
-
source: msg.source,
|
|
13245
|
-
createdAt: msg.createdAt.toISOString()
|
|
13246
|
-
}
|
|
13247
|
-
};
|
|
13248
|
-
}));
|
|
13249
|
-
return claimed.map((entry, idx) => {
|
|
13250
|
-
const payload = payloads[idx];
|
|
13251
|
-
if (!payload) throw new Error(`Unexpected: payload for entry ${entry.id} not built`);
|
|
13252
|
-
return {
|
|
13253
|
-
id: entry.id,
|
|
13254
|
-
inboxId: entry.inbox_id,
|
|
13255
|
-
messageId: entry.message_id,
|
|
13256
|
-
chatId: entry.chat_id,
|
|
13257
|
-
status: entry.status,
|
|
13258
|
-
retryCount: entry.retry_count,
|
|
13259
|
-
createdAt: entry.created_at,
|
|
13260
|
-
deliveredAt: entry.delivered_at ?? null,
|
|
13261
|
-
ackedAt: entry.acked_at ?? null,
|
|
13262
|
-
message: payload
|
|
13263
|
-
};
|
|
13264
|
-
});
|
|
13406
|
+
`));
|
|
13265
13407
|
});
|
|
13266
13408
|
}
|
|
13267
13409
|
/**
|
|
13410
|
+
* Shared payload assembler for already-claimed `inbox_entries` rows.
|
|
13411
|
+
*
|
|
13412
|
+
* Both the HTTP poll path (`pollInbox`) and the WS push path
|
|
13413
|
+
* (`claimAndBuildForPush`) call this with rows they have just `UPDATE`d to
|
|
13414
|
+
* `status='delivered'`. Keeping the silent-context bundling in one place is
|
|
13415
|
+
* the only way to keep the two paths from drifting (proposal
|
|
13416
|
+
* hub-inbox-ws-data-plane §3.2 risk #1).
|
|
13417
|
+
*
|
|
13418
|
+
* Steps:
|
|
13419
|
+
* 1. Sort by `created_at` ASC (PG `RETURNING` does not guarantee order).
|
|
13420
|
+
* 2. For each trigger, collect silent context & bulk-ack stale silent rows.
|
|
13421
|
+
* 3. Fetch the trigger messages.
|
|
13422
|
+
* 4. Build wire payloads via the single dispatcher.
|
|
13423
|
+
*
|
|
13424
|
+
* Returns `[]` if `claimed` is empty.
|
|
13425
|
+
*/
|
|
13426
|
+
async function bundleDeliveryWithSilentContext(tx, inboxId, claimed) {
|
|
13427
|
+
if (claimed.length === 0) return [];
|
|
13428
|
+
claimed.sort((a, b) => a.created_at.localeCompare(b.created_at));
|
|
13429
|
+
const precedingByEntryId = await collectPrecedingContext(tx, inboxId, claimed);
|
|
13430
|
+
const messageIds = claimed.map((e) => e.message_id);
|
|
13431
|
+
const msgs = await tx.select().from(messages).where(inArray(messages.id, messageIds));
|
|
13432
|
+
const msgMap = new Map(msgs.map((m) => [m.id, m]));
|
|
13433
|
+
const payloads = await buildClientMessagePayloadsForInbox(tx, inboxId, claimed.map((entry) => {
|
|
13434
|
+
const msg = msgMap.get(entry.message_id);
|
|
13435
|
+
if (!msg) throw new Error(`Unexpected: message ${entry.message_id} not found`);
|
|
13436
|
+
return {
|
|
13437
|
+
entryChatId: entry.chat_id,
|
|
13438
|
+
precedingMessages: precedingByEntryId.get(entry.id) ?? [],
|
|
13439
|
+
message: {
|
|
13440
|
+
id: msg.id,
|
|
13441
|
+
chatId: msg.chatId,
|
|
13442
|
+
senderId: msg.senderId,
|
|
13443
|
+
format: msg.format,
|
|
13444
|
+
content: msg.content,
|
|
13445
|
+
metadata: msg.metadata,
|
|
13446
|
+
replyToInbox: msg.replyToInbox,
|
|
13447
|
+
replyToChat: msg.replyToChat,
|
|
13448
|
+
inReplyTo: msg.inReplyTo,
|
|
13449
|
+
source: msg.source,
|
|
13450
|
+
createdAt: msg.createdAt.toISOString()
|
|
13451
|
+
}
|
|
13452
|
+
};
|
|
13453
|
+
}));
|
|
13454
|
+
return claimed.map((entry, idx) => {
|
|
13455
|
+
const payload = payloads[idx];
|
|
13456
|
+
if (!payload) throw new Error(`Unexpected: payload for entry ${entry.id} not built`);
|
|
13457
|
+
return {
|
|
13458
|
+
id: Number(entry.id),
|
|
13459
|
+
inboxId: entry.inbox_id,
|
|
13460
|
+
messageId: entry.message_id,
|
|
13461
|
+
chatId: entry.chat_id,
|
|
13462
|
+
status: entry.status,
|
|
13463
|
+
retryCount: entry.retry_count,
|
|
13464
|
+
createdAt: entry.created_at,
|
|
13465
|
+
deliveredAt: entry.delivered_at ?? null,
|
|
13466
|
+
ackedAt: entry.acked_at ?? null,
|
|
13467
|
+
message: payload
|
|
13468
|
+
};
|
|
13469
|
+
});
|
|
13470
|
+
}
|
|
13471
|
+
/**
|
|
13472
|
+
* Realistic upper bound on rows a single NOTIFY references. The unique
|
|
13473
|
+
* constraint `(inbox_id, message_id, chat_id)` caps a `(inbox, message)`
|
|
13474
|
+
* pair at one row per chatId; the only way to exceed 1 today is the replyTo
|
|
13475
|
+
* cross-chat path (`message.ts` writes a second row keyed by the original's
|
|
13476
|
+
* `replyToChat`). 8 leaves headroom for any future fan-out variant without
|
|
13477
|
+
* requiring a schema change here.
|
|
13478
|
+
*/
|
|
13479
|
+
const PUSH_CLAIM_BATCH_LIMIT = 8;
|
|
13480
|
+
/**
|
|
13481
|
+
* WS-push path: atomically claim every pending entry the just-fired
|
|
13482
|
+
* `NOTIFY (inboxId:messageId)` references and assemble their wire payloads.
|
|
13483
|
+
*
|
|
13484
|
+
* Returns `[]` if no row matches — benign race with HTTP poll or another
|
|
13485
|
+
* server instance that already claimed the entry. NOTIFY is fire-and-forget
|
|
13486
|
+
* (proposal §3.2).
|
|
13487
|
+
*
|
|
13488
|
+
* Why an array, not a single row: `sendMessage` can write **two** rows for
|
|
13489
|
+
* the same `(inbox, messageId)` pair when the recipient is both a chat
|
|
13490
|
+
* participant and the `replyToInbox` of an earlier message — the unique key
|
|
13491
|
+
* is `(inbox_id, message_id, chat_id)`, so the rows differ by chatId. The
|
|
13492
|
+
* old `LIMIT 1` shape would only push the first; the second sat `pending`
|
|
13493
|
+
* until reconnect. Aligning with `pollInboxInner`'s `LIMIT N` shape closes
|
|
13494
|
+
* that gap and keeps push/poll behaviour interchangeable.
|
|
13495
|
+
*/
|
|
13496
|
+
async function claimAndBuildForPush(db, inboxId, messageId) {
|
|
13497
|
+
return withSpan("inbox.deliver.push", {
|
|
13498
|
+
"inbox.id": inboxId,
|
|
13499
|
+
"message.id": messageId
|
|
13500
|
+
}, () => db.transaction(async (tx) => {
|
|
13501
|
+
return bundleDeliveryWithSilentContext(tx, inboxId, await tx.execute(sql`
|
|
13502
|
+
UPDATE inbox_entries
|
|
13503
|
+
SET status = 'delivered', delivered_at = NOW()
|
|
13504
|
+
WHERE id IN (
|
|
13505
|
+
SELECT id FROM inbox_entries
|
|
13506
|
+
WHERE inbox_id = ${inboxId}
|
|
13507
|
+
AND message_id = ${messageId}
|
|
13508
|
+
AND status = 'pending'
|
|
13509
|
+
AND notify = true
|
|
13510
|
+
ORDER BY created_at
|
|
13511
|
+
LIMIT ${PUSH_CLAIM_BATCH_LIMIT}
|
|
13512
|
+
FOR UPDATE SKIP LOCKED
|
|
13513
|
+
)
|
|
13514
|
+
RETURNING *
|
|
13515
|
+
`));
|
|
13516
|
+
}));
|
|
13517
|
+
}
|
|
13518
|
+
/**
|
|
13519
|
+
* WS-push backlog path: on agent rebind (or once an in-flight slot frees up
|
|
13520
|
+
* after an ack), drain up to `limit` pending `notify=true` entries oldest-
|
|
13521
|
+
* first and assemble wire payloads. Identical claim shape to the HTTP poll
|
|
13522
|
+
* path — they are intentionally interchangeable so a hot-path bug fixed in
|
|
13523
|
+
* one shows up in the other (proposal §3.3 / §3.5).
|
|
13524
|
+
*/
|
|
13525
|
+
async function claimBacklogForPush(db, inboxId, limit) {
|
|
13526
|
+
return withSpan("inbox.deliver.backlog", {
|
|
13527
|
+
"inbox.id": inboxId,
|
|
13528
|
+
"inbox.backlog.limit": limit
|
|
13529
|
+
}, () => pollInboxInner(db, inboxId, limit));
|
|
13530
|
+
}
|
|
13531
|
+
/**
|
|
13268
13532
|
* Per claimed trigger: SELECT silent (notify=false) pending rows in the same
|
|
13269
13533
|
* chat that occurred between the previous trigger in this batch (or beginning
|
|
13270
13534
|
* of time) and this trigger, capped by `PRECEDING_CONTEXT_MAX_ENTRIES` and
|
|
@@ -13340,6 +13604,26 @@ async function ackEntry$2(db, entryId, inboxId) {
|
|
|
13340
13604
|
return entry;
|
|
13341
13605
|
});
|
|
13342
13606
|
}
|
|
13607
|
+
/**
|
|
13608
|
+
* Ack a delivered entry from the WS data plane, scoped to the inboxes the
|
|
13609
|
+
* connected socket has bound. Returns the acked row on success, `null` if no
|
|
13610
|
+
* row matches — a benign outcome the caller should ignore (the entry may
|
|
13611
|
+
* have already been acked, timed out, or never belonged to this socket).
|
|
13612
|
+
*
|
|
13613
|
+
* Distinct from {@link ackEntry} so the WS path can ack without trusting an
|
|
13614
|
+
* `inboxId` from the wire — only entries whose `inboxId` is in `inboxIds`
|
|
13615
|
+
* are eligible. Empty `inboxIds` short-circuits to `null`.
|
|
13616
|
+
*/
|
|
13617
|
+
async function ackEntryByIdForBoundAgents(db, entryId, inboxIds) {
|
|
13618
|
+
if (inboxIds.length === 0) return null;
|
|
13619
|
+
return withSpan("inbox.ack.ws", { [FIRST_TREE_HUB_ATTR.INBOX_ENTRY_ID]: String(entryId) }, async () => {
|
|
13620
|
+
const [entry] = await db.update(inboxEntries).set({
|
|
13621
|
+
status: "acked",
|
|
13622
|
+
ackedAt: /* @__PURE__ */ new Date()
|
|
13623
|
+
}).where(and(eq(inboxEntries.id, entryId), inArray(inboxEntries.inboxId, inboxIds), eq(inboxEntries.status, "delivered"))).returning();
|
|
13624
|
+
return entry ?? null;
|
|
13625
|
+
});
|
|
13626
|
+
}
|
|
13343
13627
|
async function renewEntry(db, entryId, inboxId) {
|
|
13344
13628
|
const [entry] = await db.update(inboxEntries).set({ deliveredAt: /* @__PURE__ */ new Date() }).where(and(eq(inboxEntries.id, entryId), eq(inboxEntries.inboxId, inboxId), eq(inboxEntries.status, "delivered"))).returning();
|
|
13345
13629
|
if (!entry) throw new NotFoundError("Inbox entry not found or not in delivered status");
|
|
@@ -13588,6 +13872,27 @@ async function agentTaskRoutes(app) {
|
|
|
13588
13872
|
return getTaskHealth(app.db, request.params.taskId, identity.organizationId);
|
|
13589
13873
|
});
|
|
13590
13874
|
}
|
|
13875
|
+
/**
|
|
13876
|
+
* Default per-agent in-flight cap when `server.inbox.maxInFlightPerAgent` is
|
|
13877
|
+
* unset. Mirrors the schema default so a hub running without an explicit
|
|
13878
|
+
* `inbox` block still gets reasonable backpressure once `wsDataPlane` is
|
|
13879
|
+
* flipped on. See proposal hub-inbox-ws-data-plane §3.5.
|
|
13880
|
+
*/
|
|
13881
|
+
const DEFAULT_INBOX_MAX_IN_FLIGHT_PER_AGENT = 32;
|
|
13882
|
+
/**
|
|
13883
|
+
* Hard cap on entries scanned in a single backlog drain so a recovering
|
|
13884
|
+
* client doesn't trigger an arbitrarily large transaction or burst of
|
|
13885
|
+
* frames. Anything beyond this stays `pending` and gets picked up by
|
|
13886
|
+
* subsequent post-ack drains. Same constant covers both the agent:bound
|
|
13887
|
+
* recovery path and the post-ack top-up.
|
|
13888
|
+
*
|
|
13889
|
+
* Lower than proposal §3.3's 500 on purpose: the actual limit per drain is
|
|
13890
|
+
* `min(remainingInFlightBudget, INBOX_BACKLOG_BATCH_LIMIT)`, so with a
|
|
13891
|
+
* default cap of 32 the drain SQL never asks for more than ~32 anyway.
|
|
13892
|
+
* Subsequent NOTIFYs and post-ack top-ups continue draining without a
|
|
13893
|
+
* single-transaction megabatch.
|
|
13894
|
+
*/
|
|
13895
|
+
const INBOX_BACKLOG_BATCH_LIMIT = 50;
|
|
13591
13896
|
const wsMessageSchema = z.object({
|
|
13592
13897
|
type: z.string(),
|
|
13593
13898
|
agentId: z.string().optional(),
|
|
@@ -13632,6 +13937,7 @@ function sendRejected(socket, ref, reason) {
|
|
|
13632
13937
|
function clientWsRoutes(notifier, instanceId) {
|
|
13633
13938
|
return async (app) => {
|
|
13634
13939
|
const jwtSecretBytes = new TextEncoder().encode(app.config.secrets.jwtSecret);
|
|
13940
|
+
const inboxMaxInFlightPerAgent = app.config.inbox?.maxInFlightPerAgent ?? DEFAULT_INBOX_MAX_IN_FLIGHT_PER_AGENT;
|
|
13635
13941
|
app.get("/client", {
|
|
13636
13942
|
websocket: true,
|
|
13637
13943
|
config: { otel: false }
|
|
@@ -13641,6 +13947,157 @@ function clientWsRoutes(notifier, instanceId) {
|
|
|
13641
13947
|
let clientId = null;
|
|
13642
13948
|
let authExpiryTimer = null;
|
|
13643
13949
|
const boundAgents = /* @__PURE__ */ new Map();
|
|
13950
|
+
/**
|
|
13951
|
+
* Whether the connected client opted into the WS inbox data plane via
|
|
13952
|
+
* `client:register.wireCapabilities.wsInboxDeliver`. Set per-socket
|
|
13953
|
+
* because client SDKs are upgraded independently — an old client
|
|
13954
|
+
* connecting to a new server must keep receiving the legacy
|
|
13955
|
+
* `new_message` doorbell + HTTP poll path (proposal §3.6).
|
|
13956
|
+
*/
|
|
13957
|
+
let clientWantsWsInboxDeliver = false;
|
|
13958
|
+
/**
|
|
13959
|
+
* Per-agent in-flight `inbox:deliver` counter for backpressure. Lives on
|
|
13960
|
+
* the socket — when the WS closes it goes with it; that's intentional,
|
|
13961
|
+
* because re-counting on a fresh connection would bias the cap against
|
|
13962
|
+
* a healthy reconnect (proposal §3.5).
|
|
13963
|
+
*/
|
|
13964
|
+
const inboxInFlight = /* @__PURE__ */ new Map();
|
|
13965
|
+
function pushUseWsDataPlane() {
|
|
13966
|
+
return clientWantsWsInboxDeliver;
|
|
13967
|
+
}
|
|
13968
|
+
/**
|
|
13969
|
+
* Returns `false` when the socket has already moved out of `OPEN` —
|
|
13970
|
+
* the only failure mode the caller can observe synchronously.
|
|
13971
|
+
*
|
|
13972
|
+
* Note: `ws.send` is fire-and-forget; a buffered frame that fails
|
|
13973
|
+
* to actually flush (TCP slow-close, internal queue full) does NOT
|
|
13974
|
+
* surface here. That class of loss is recovered by the 300s timeout
|
|
13975
|
+
* reaper rolling the entry back to `pending` (§3.7). If you ever
|
|
13976
|
+
* need flush-level confirmation, switch to the `ws.send(frame, cb)`
|
|
13977
|
+
* callback form (see `notifier.ts pushFrameToInbox`).
|
|
13978
|
+
*/
|
|
13979
|
+
function sendInboxDeliverFrame(entry) {
|
|
13980
|
+
if (socket.readyState !== socket.OPEN) return false;
|
|
13981
|
+
const frame = {
|
|
13982
|
+
type: "inbox:deliver",
|
|
13983
|
+
entryId: entry.id,
|
|
13984
|
+
inboxId: entry.inboxId,
|
|
13985
|
+
chatId: entry.chatId,
|
|
13986
|
+
message: entry.message
|
|
13987
|
+
};
|
|
13988
|
+
const validated = inboxDeliverFrameSchema$1.safeParse(frame);
|
|
13989
|
+
if (!validated.success) app.log.error({
|
|
13990
|
+
entryId: entry.id,
|
|
13991
|
+
inboxId: entry.inboxId,
|
|
13992
|
+
issues: validated.error.issues.map((i) => ({
|
|
13993
|
+
path: i.path.join("."),
|
|
13994
|
+
code: i.code,
|
|
13995
|
+
message: i.message
|
|
13996
|
+
}))
|
|
13997
|
+
}, "inbox:deliver frame failed self-validation — wire shape drift");
|
|
13998
|
+
socket.send(JSON.stringify(frame));
|
|
13999
|
+
return true;
|
|
14000
|
+
}
|
|
14001
|
+
/**
|
|
14002
|
+
* Build the per-socket push handler bound to a specific agent. Closes
|
|
14003
|
+
* over `agentId`, `inboxId`, the socket, and the in-flight counter.
|
|
14004
|
+
*
|
|
14005
|
+
* Backpressure: when the agent is at-cap we drop the NOTIFY (entry
|
|
14006
|
+
* stays `pending` server-side) and a debug log records the drop so
|
|
14007
|
+
* staging can correlate "messages slow" reports against cap hits.
|
|
14008
|
+
* The dropped row is replayed by `drainBacklogForAgent` once an ack
|
|
14009
|
+
* frees a slot, or by the next NOTIFY when we're back below cap (§3.5).
|
|
14010
|
+
*
|
|
14011
|
+
* Multi-row claims: a single `(inboxId, messageId)` pair can map to
|
|
14012
|
+
* more than one `inbox_entries` row (replyTo cross-chat case writes
|
|
14013
|
+
* a second row with a different chatId). We push every row claimed
|
|
14014
|
+
* by this NOTIFY in one go — see `claimAndBuildForPush`.
|
|
14015
|
+
*
|
|
14016
|
+
* The cap is intentionally **soft**: claim happens after the gate
|
|
14017
|
+
* check, so an N>1 claim can nudge in-flight slightly past
|
|
14018
|
+
* `inboxMaxInFlightPerAgent`. N is bounded by the
|
|
14019
|
+
* `(inbox_id, message_id, chat_id)` unique constraint (≤2 today),
|
|
14020
|
+
* so worst-case overshoot is small and the memory headroom in §3.5's
|
|
14021
|
+
* 64MB estimate covers it.
|
|
14022
|
+
*/
|
|
14023
|
+
function makeInboxPushHandler(agentId, inboxId) {
|
|
14024
|
+
return async (messageId) => {
|
|
14025
|
+
const current = inboxInFlight.get(agentId) ?? 0;
|
|
14026
|
+
if (current >= inboxMaxInFlightPerAgent) {
|
|
14027
|
+
app.log.debug({
|
|
14028
|
+
agentId,
|
|
14029
|
+
inboxId,
|
|
14030
|
+
messageId,
|
|
14031
|
+
inFlightCount: current,
|
|
14032
|
+
cap: inboxMaxInFlightPerAgent
|
|
14033
|
+
}, "inbox push: at cap, dropping NOTIFY (will replay via post-ack drain)");
|
|
14034
|
+
return;
|
|
14035
|
+
}
|
|
14036
|
+
let entries;
|
|
14037
|
+
try {
|
|
14038
|
+
entries = await claimAndBuildForPush(app.db, inboxId, messageId);
|
|
14039
|
+
} catch (err) {
|
|
14040
|
+
app.log.error({
|
|
14041
|
+
err,
|
|
14042
|
+
inboxId,
|
|
14043
|
+
messageId,
|
|
14044
|
+
agentId
|
|
14045
|
+
}, "claimAndBuildForPush failed");
|
|
14046
|
+
return;
|
|
14047
|
+
}
|
|
14048
|
+
if (entries.length === 0) return;
|
|
14049
|
+
for (const entry of entries) {
|
|
14050
|
+
inboxInFlight.set(agentId, (inboxInFlight.get(agentId) ?? 0) + 1);
|
|
14051
|
+
if (!sendInboxDeliverFrame(entry)) {
|
|
14052
|
+
inboxInFlight.set(agentId, Math.max(0, (inboxInFlight.get(agentId) ?? 1) - 1));
|
|
14053
|
+
return;
|
|
14054
|
+
}
|
|
14055
|
+
}
|
|
14056
|
+
};
|
|
14057
|
+
}
|
|
14058
|
+
/**
|
|
14059
|
+
* Drain up to `INBOX_BACKLOG_BATCH_LIMIT` pending entries for an agent
|
|
14060
|
+
* over the current WS, capped by the remaining in-flight budget so a
|
|
14061
|
+
* full drain stays within the per-agent backpressure cap (§3.3, §3.5).
|
|
14062
|
+
*
|
|
14063
|
+
* Used in two places:
|
|
14064
|
+
* 1. Right after `agent:bound` — covers reconnects where NOTIFYs
|
|
14065
|
+
* were dropped while the socket was offline.
|
|
14066
|
+
* 2. Right after an `inbox:ack` — top up the in-flight slot just
|
|
14067
|
+
* freed, in case the previous NOTIFY was dropped at-cap.
|
|
14068
|
+
*
|
|
14069
|
+
* The cap is **soft**: this function reads `slotsFree` once before the
|
|
14070
|
+
* `claimBacklogForPush` round-trip, and a NOTIFY-driven push handler
|
|
14071
|
+
* may increment the counter concurrently. In the worst case in-flight
|
|
14072
|
+
* temporarily exceeds the cap by the number of concurrent pushes. With
|
|
14073
|
+
* the default cap of 32 and N ≤ 2 per push handler invocation, the
|
|
14074
|
+
* memory headroom in §3.5's 64MB estimate covers this.
|
|
14075
|
+
*/
|
|
14076
|
+
async function drainBacklogForAgent(agentId, inboxId) {
|
|
14077
|
+
if (socket.readyState !== socket.OPEN) return;
|
|
14078
|
+
const slotsFree = inboxMaxInFlightPerAgent - (inboxInFlight.get(agentId) ?? 0);
|
|
14079
|
+
if (slotsFree <= 0) return;
|
|
14080
|
+
const limit = Math.min(slotsFree, INBOX_BACKLOG_BATCH_LIMIT);
|
|
14081
|
+
let entries;
|
|
14082
|
+
try {
|
|
14083
|
+
entries = await claimBacklogForPush(app.db, inboxId, limit);
|
|
14084
|
+
} catch (err) {
|
|
14085
|
+
app.log.error({
|
|
14086
|
+
err,
|
|
14087
|
+
agentId,
|
|
14088
|
+
inboxId,
|
|
14089
|
+
limit
|
|
14090
|
+
}, "claimBacklogForPush failed");
|
|
14091
|
+
return;
|
|
14092
|
+
}
|
|
14093
|
+
for (const entry of entries) {
|
|
14094
|
+
inboxInFlight.set(agentId, (inboxInFlight.get(agentId) ?? 0) + 1);
|
|
14095
|
+
if (!sendInboxDeliverFrame(entry)) {
|
|
14096
|
+
inboxInFlight.set(agentId, Math.max(0, (inboxInFlight.get(agentId) ?? 1) - 1));
|
|
14097
|
+
return;
|
|
14098
|
+
}
|
|
14099
|
+
}
|
|
14100
|
+
}
|
|
13644
14101
|
const sessionOpQueues = /* @__PURE__ */ new Map();
|
|
13645
14102
|
function chainSessionOp(agentId, chatId, op) {
|
|
13646
14103
|
const key = `${agentId}:${chatId}`;
|
|
@@ -13745,7 +14202,8 @@ function clientWsRoutes(notifier, instanceId) {
|
|
|
13745
14202
|
socket.send(JSON.stringify({
|
|
13746
14203
|
type: "server:welcome",
|
|
13747
14204
|
serverCommandVersion: app.commandVersion,
|
|
13748
|
-
serverTimeMs: Date.now()
|
|
14205
|
+
serverTimeMs: Date.now(),
|
|
14206
|
+
capabilities: { wsInboxDeliver: true }
|
|
13749
14207
|
}));
|
|
13750
14208
|
} catch (err) {
|
|
13751
14209
|
const message = err instanceof Error ? err.message : "auth failure";
|
|
@@ -13762,6 +14220,7 @@ function clientWsRoutes(notifier, instanceId) {
|
|
|
13762
14220
|
try {
|
|
13763
14221
|
if (type === "client:register") {
|
|
13764
14222
|
const data = clientRegisterSchema.parse(msg);
|
|
14223
|
+
clientWantsWsInboxDeliver = data.wireCapabilities?.wsInboxDeliver === true;
|
|
13765
14224
|
try {
|
|
13766
14225
|
await registerClient(app.db, {
|
|
13767
14226
|
clientId: data.clientId,
|
|
@@ -13885,7 +14344,9 @@ function clientWsRoutes(notifier, instanceId) {
|
|
|
13885
14344
|
agentId: agent.id,
|
|
13886
14345
|
inboxId: agent.inboxId
|
|
13887
14346
|
});
|
|
13888
|
-
|
|
14347
|
+
const wsPushActive = pushUseWsDataPlane();
|
|
14348
|
+
if (wsPushActive) notifier.subscribe(agent.inboxId, socket, makeInboxPushHandler(agent.id, agent.inboxId));
|
|
14349
|
+
else notifier.subscribe(agent.inboxId, socket);
|
|
13889
14350
|
socket.send(JSON.stringify({
|
|
13890
14351
|
type: "agent:bound",
|
|
13891
14352
|
ref,
|
|
@@ -13893,6 +14354,12 @@ function clientWsRoutes(notifier, instanceId) {
|
|
|
13893
14354
|
displayName: agent.displayName,
|
|
13894
14355
|
agentType: agent.type
|
|
13895
14356
|
}));
|
|
14357
|
+
if (wsPushActive) drainBacklogForAgent(agent.id, agent.inboxId).catch((err) => {
|
|
14358
|
+
app.log.error({
|
|
14359
|
+
err,
|
|
14360
|
+
agentId: agent.id
|
|
14361
|
+
}, "post-bind backlog drain crashed");
|
|
14362
|
+
});
|
|
13896
14363
|
} else if (type === "agent:unbind") {
|
|
13897
14364
|
const agentId = parsed.data.agentId;
|
|
13898
14365
|
if (!agentId || !boundAgents.has(agentId)) {
|
|
@@ -13907,6 +14374,7 @@ function clientWsRoutes(notifier, instanceId) {
|
|
|
13907
14374
|
await unbindAgent(app.db, agentId);
|
|
13908
14375
|
unbindAgentFromClient(agentId);
|
|
13909
14376
|
boundAgents.delete(agentId);
|
|
14377
|
+
inboxInFlight.delete(agentId);
|
|
13910
14378
|
socket.send(JSON.stringify({
|
|
13911
14379
|
type: "agent:unbound",
|
|
13912
14380
|
agentId
|
|
@@ -14008,6 +14476,35 @@ function clientWsRoutes(notifier, instanceId) {
|
|
|
14008
14476
|
}
|
|
14009
14477
|
const payload = sessionCompletionMessageSchema.parse(msg);
|
|
14010
14478
|
if (shouldNotify(agentId, `session_completed:${payload.chatId}`)) notifyAgentEvent(app.db, agentId, "session_completed", "low", payload.chatId).catch(() => {});
|
|
14479
|
+
} else if (type === "inbox:ack") {
|
|
14480
|
+
const payloadResult = inboxAckFrameSchema.safeParse(msg);
|
|
14481
|
+
if (!payloadResult.success) {
|
|
14482
|
+
socket.send(JSON.stringify({
|
|
14483
|
+
type: "error",
|
|
14484
|
+
message: "Malformed inbox:ack frame"
|
|
14485
|
+
}));
|
|
14486
|
+
return;
|
|
14487
|
+
}
|
|
14488
|
+
const { entryId } = payloadResult.data;
|
|
14489
|
+
try {
|
|
14490
|
+
const ackedEntry = await ackEntryByIdForBoundAgents(app.db, entryId, [...boundAgents.values()].map((a) => a.inboxId));
|
|
14491
|
+
if (!ackedEntry) return;
|
|
14492
|
+
const owner = [...boundAgents.values()].find((a) => a.inboxId === ackedEntry.inboxId);
|
|
14493
|
+
if (owner) {
|
|
14494
|
+
inboxInFlight.set(owner.agentId, Math.max(0, (inboxInFlight.get(owner.agentId) ?? 1) - 1));
|
|
14495
|
+
drainBacklogForAgent(owner.agentId, owner.inboxId).catch((err) => {
|
|
14496
|
+
app.log.error({
|
|
14497
|
+
err,
|
|
14498
|
+
agentId: owner.agentId
|
|
14499
|
+
}, "post-ack backlog drain crashed");
|
|
14500
|
+
});
|
|
14501
|
+
}
|
|
14502
|
+
} catch (err) {
|
|
14503
|
+
app.log.error({
|
|
14504
|
+
err,
|
|
14505
|
+
entryId
|
|
14506
|
+
}, "inbox:ack handling failed");
|
|
14507
|
+
}
|
|
14011
14508
|
} else if (type === "heartbeat") {
|
|
14012
14509
|
if (clientId) {
|
|
14013
14510
|
await heartbeatClient(app.db, clientId);
|
|
@@ -14396,19 +14893,20 @@ function sanitizeAgentName(login) {
|
|
|
14396
14893
|
return login.toLowerCase().replace(/[^a-z0-9_-]/g, "-").replace(/^-+|-+$/g, "").slice(0, 60) || "user";
|
|
14397
14894
|
}
|
|
14398
14895
|
/**
|
|
14399
|
-
* Create a fresh
|
|
14400
|
-
*
|
|
14896
|
+
* Create a fresh default team org for a brand-new user, plus the matching
|
|
14897
|
+
* admin membership + 1:1 human agent. Slug strategy:
|
|
14401
14898
|
*
|
|
14402
|
-
* - First try: `${login}
|
|
14899
|
+
* - First try: `${login}` (lowercased, sanitized)
|
|
14403
14900
|
* - On collision: append a 4-char hex disambiguator
|
|
14404
14901
|
*
|
|
14405
|
-
*
|
|
14406
|
-
*
|
|
14407
|
-
*
|
|
14902
|
+
* Display name is the user's GitHub real name (or login as fallback). No
|
|
14903
|
+
* "Personal Team" suffix — the user might invite teammates later, and we
|
|
14904
|
+
* don't want a label that reads like a private sandbox to be the team name
|
|
14905
|
+
* other members see. Users rename freely via Settings.
|
|
14408
14906
|
*/
|
|
14409
14907
|
async function createPersonalTeam(db, input) {
|
|
14410
|
-
const baseSlug = sanitizeOrgSlug(
|
|
14411
|
-
const displayName =
|
|
14908
|
+
const baseSlug = sanitizeOrgSlug(input.loginSeed);
|
|
14909
|
+
const displayName = input.userDisplayName;
|
|
14412
14910
|
const orgId = uuidv7();
|
|
14413
14911
|
return {
|
|
14414
14912
|
organizationId: orgId,
|
|
@@ -15098,7 +15596,7 @@ async function inferWizardStep(app, m) {
|
|
|
15098
15596
|
* landing page.
|
|
15099
15597
|
*/
|
|
15100
15598
|
async function publicInvitePreviewRoute(app) {
|
|
15101
|
-
const { previewInvitation } = await import("./invitation-
|
|
15599
|
+
const { previewInvitation } = await import("./invitation-D3feYxet-366MNOor.mjs");
|
|
15102
15600
|
app.get("/:token/preview", async (request, reply) => {
|
|
15103
15601
|
if (!request.params.token) throw new UnauthorizedError("Token required");
|
|
15104
15602
|
const preview = await previewInvitation(app.db, request.params.token);
|
|
@@ -15128,7 +15626,7 @@ async function adminInvitationRoutes(app) {
|
|
|
15128
15626
|
const m = requireMember(request);
|
|
15129
15627
|
if (m.role !== "admin") throw new ForbiddenError("Admin role required");
|
|
15130
15628
|
if (request.params.id !== m.organizationId) throw new ForbiddenError("Cannot rotate invitations for another organization");
|
|
15131
|
-
const { rotateInvitation } = await import("./invitation-
|
|
15629
|
+
const { rotateInvitation } = await import("./invitation-D3feYxet-366MNOor.mjs");
|
|
15132
15630
|
const inv = await rotateInvitation(app.db, m.organizationId, m.userId);
|
|
15133
15631
|
return {
|
|
15134
15632
|
id: inv.id,
|