@agent-team-foundation/first-tree-hub 0.11.0 → 0.11.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{bootstrap-DUeYbwm-.mjs → bootstrap-TJRy0B9m.mjs} +10 -2
- package/dist/chunk-BSw8zbkd.mjs +37 -0
- package/dist/cli/index.mjs +94 -35
- package/dist/client-BCaK653p-CZjDNcdM.mjs +516 -0
- package/dist/client-m1OM4Iag-HKWgB3Yk.mjs +4 -0
- package/dist/dist-BLY7Bu-l.mjs +430 -0
- package/dist/{dist-BoHl9HwW.mjs → dist-BkvrONSQ.mjs} +2 -22
- package/dist/drizzle/0031_drop_system_configs.sql +11 -0
- package/dist/drizzle/meta/_journal.json +7 -0
- package/dist/errors-BmyRwN0Y-CIZZ_sDc.mjs +92 -0
- package/dist/{esm-CYu4tXXn.mjs → esm-iadMkGbV.mjs} +2 -37
- package/dist/execAsync-CCyouKZM.mjs +10 -0
- package/dist/{execAsync-XMc-nFn-.mjs → execAsync-pImxPKN5.mjs} +1 -1
- package/dist/{feishu-Dxk6ArOK.mjs → feishu-AEMHwT6L.mjs} +2 -2
- package/dist/from-CaD373S1.mjs +3840 -0
- package/dist/{getMachineId-bsd-D0w3uAZa.mjs → getMachineId-bsd-DR4-Dysy.mjs} +3 -2
- package/dist/getMachineId-bsd-DjLgZlll.mjs +27 -0
- package/dist/getMachineId-darwin-B6WCAhc4.mjs +24 -0
- package/dist/{getMachineId-darwin-DOoYFb2_.mjs → getMachineId-darwin-CaD2juTg.mjs} +3 -2
- package/dist/getMachineId-linux-BeWHG1gK.mjs +20 -0
- package/dist/{getMachineId-linux-MlY63Zsw.mjs → getMachineId-linux-Dk3gWdQK.mjs} +2 -1
- package/dist/getMachineId-unsupported-BMJQItvF.mjs +15 -0
- package/dist/{getMachineId-unsupported-BS652RIy.mjs → getMachineId-unsupported-Bgz_Je1J.mjs} +2 -1
- package/dist/getMachineId-win-CdgcrzCW.mjs +26 -0
- package/dist/{getMachineId-win-B6hY8edq.mjs → getMachineId-win-vJ6VfDRI.mjs} +3 -2
- package/dist/index.mjs +9 -6
- package/dist/invitation-DWlyNb8x-DZTW9I26.mjs +4 -0
- package/dist/{invitation-B1pjAyOz-BaCA9PII.mjs → invitation-Dnn5gGGX-Ce7zbZpn.mjs} +4 -90
- package/dist/multipart-parser-BIksYTkk.mjs +294 -0
- package/dist/observability-C3nY6Jcz-Dpsi3eFk.mjs +96006 -0
- package/dist/observability-Co8OO0og.mjs +5 -0
- package/dist/{saas-connect-DLSyrQcC.mjs → saas-connect-Bd0g0v_b.mjs} +6442 -6923
- package/dist/src-DNBS5Yjj.mjs +735 -0
- package/dist/src-uVZSbShB.mjs +1176 -0
- package/dist/web/assets/{index-DDqPt6PI.css → index-7RvlJjJ9.css} +1 -1
- package/dist/web/assets/{index-COflQOwF.js → index-Dbwa40_B.js} +1 -1
- package/dist/web/assets/index-cpdSFHAJ.js +383 -0
- package/dist/web/index.html +2 -2
- package/package.json +1 -1
- package/dist/invitation-CBnQyB7o-TmnIj3kx.mjs +0 -3
- package/dist/observability-C08jUFsJ.mjs +0 -4
- package/dist/observability-DPyf745N-BSc8QNcR.mjs +0 -33897
- package/dist/web/assets/index-BxGzfDTS.js +0 -383
|
@@ -0,0 +1,516 @@
|
|
|
1
|
+
import { C as clientCapabilitiesSchema } from "./dist-BkvrONSQ.mjs";
|
|
2
|
+
import { a as ConflictError, i as ClientUserMismatchError, l as organizations, n as BadRequestError, s as NotFoundError, u as users } from "./errors-BmyRwN0Y-CIZZ_sDc.mjs";
|
|
3
|
+
import { and, eq, inArray, ne, sql } from "drizzle-orm";
|
|
4
|
+
import { index, integer, jsonb, pgTable, text, timestamp, unique } from "drizzle-orm/pg-core";
|
|
5
|
+
//#region ../server/dist/client-BCaK653p.mjs
|
|
6
|
+
/**
|
|
7
|
+
* Client connections. A client is a single SDK process (AgentRuntime) that may
|
|
8
|
+
* host multiple agents. From the unified-user-token milestone on, a client is
|
|
9
|
+
* owned by a user — Rule R-RUN requires `clients.user_id == jwt.userId` for
|
|
10
|
+
* every `agent:bind` request. `user_id` is nullable only to accommodate legacy
|
|
11
|
+
* rows created before JWT-on-handshake; the WS handshake claims the row on
|
|
12
|
+
* first re-register under an authenticated JWT (see `client:register` M13).
|
|
13
|
+
*
|
|
14
|
+
* A client is also bound to exactly one organization for its lifetime. The
|
|
15
|
+
* `organization_id` column is populated on first registration from the
|
|
16
|
+
* authenticated JWT's org claim and never changes thereafter. Re-registering
|
|
17
|
+
* the same clientId under a JWT for a different org is rejected with
|
|
18
|
+
* `CLIENT_ORG_MISMATCH` — the CLI responds by abandoning the local clientId
|
|
19
|
+
* and registering a new one instead (see docs/multi-tenancy-hardening-design.md).
|
|
20
|
+
*/
|
|
21
|
+
const clients = pgTable("clients", {
|
|
22
|
+
id: text("id").primaryKey(),
|
|
23
|
+
userId: text("user_id").references(() => users.id, { onDelete: "set null" }),
|
|
24
|
+
organizationId: text("organization_id").notNull().references(() => organizations.id),
|
|
25
|
+
status: text("status").notNull().default("disconnected"),
|
|
26
|
+
sdkVersion: text("sdk_version"),
|
|
27
|
+
hostname: text("hostname"),
|
|
28
|
+
os: text("os"),
|
|
29
|
+
instanceId: text("instance_id"),
|
|
30
|
+
connectedAt: timestamp("connected_at", { withTimezone: true }),
|
|
31
|
+
lastSeenAt: timestamp("last_seen_at", { withTimezone: true }).notNull().defaultNow(),
|
|
32
|
+
metadata: jsonb("metadata").$type()
|
|
33
|
+
}, (table) => [index("idx_clients_user").on(table.userId), index("idx_clients_org").on(table.organizationId)]);
|
|
34
|
+
/** Agent registration. Each agent owns a unique inboxId for message delivery. */
|
|
35
|
+
const agents = pgTable("agents", {
|
|
36
|
+
uuid: text("uuid").primaryKey(),
|
|
37
|
+
name: text("name"),
|
|
38
|
+
organizationId: text("organization_id").notNull().references(() => organizations.id),
|
|
39
|
+
type: text("type").notNull(),
|
|
40
|
+
displayName: text("display_name").notNull(),
|
|
41
|
+
delegateMention: text("delegate_mention"),
|
|
42
|
+
inboxId: text("inbox_id").unique().notNull(),
|
|
43
|
+
status: text("status").notNull().default("active"),
|
|
44
|
+
source: text("source"),
|
|
45
|
+
visibility: text("visibility").notNull().default("private"),
|
|
46
|
+
metadata: jsonb("metadata").$type().notNull().default({}),
|
|
47
|
+
managerId: text("manager_id").notNull(),
|
|
48
|
+
clientId: text("client_id").references(() => clients.id, { onDelete: "restrict" }),
|
|
49
|
+
runtimeProvider: text("runtime_provider").notNull().default("claude-code"),
|
|
50
|
+
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
|
|
51
|
+
updatedAt: timestamp("updated_at", { withTimezone: true }).notNull().defaultNow()
|
|
52
|
+
}, (table) => [
|
|
53
|
+
index("idx_agents_org").on(table.organizationId),
|
|
54
|
+
index("idx_agents_manager").on(table.managerId),
|
|
55
|
+
index("idx_agents_visibility_org").on(table.organizationId, table.visibility),
|
|
56
|
+
index("idx_agents_client").on(table.clientId),
|
|
57
|
+
unique("uq_agents_org_name").on(table.organizationId, table.name)
|
|
58
|
+
]);
|
|
59
|
+
/** Organization membership. Links a user to an org with a role and a 1:1 human agent. */
|
|
60
|
+
const members = pgTable("members", {
|
|
61
|
+
id: text("id").primaryKey(),
|
|
62
|
+
userId: text("user_id").notNull().references(() => users.id),
|
|
63
|
+
organizationId: text("organization_id").notNull().references(() => organizations.id),
|
|
64
|
+
agentId: text("agent_id").unique().notNull().references(() => agents.uuid),
|
|
65
|
+
role: text("role").notNull(),
|
|
66
|
+
status: text("status").notNull().default("active"),
|
|
67
|
+
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow()
|
|
68
|
+
}, (table) => [
|
|
69
|
+
unique("uq_members_user_org").on(table.userId, table.organizationId),
|
|
70
|
+
index("idx_members_user").on(table.userId),
|
|
71
|
+
index("idx_members_org").on(table.organizationId)
|
|
72
|
+
]);
|
|
73
|
+
/** Agent presence and runtime state. Tracked via WebSocket connections; stale entries are cleaned up using server_instances heartbeat. */
|
|
74
|
+
const agentPresence = pgTable("agent_presence", {
|
|
75
|
+
agentId: text("agent_id").primaryKey().references(() => agents.uuid, { onDelete: "cascade" }),
|
|
76
|
+
status: text("status").notNull().default("offline"),
|
|
77
|
+
instanceId: text("instance_id"),
|
|
78
|
+
connectedAt: timestamp("connected_at", { withTimezone: true }),
|
|
79
|
+
lastSeenAt: timestamp("last_seen_at", { withTimezone: true }).notNull().defaultNow(),
|
|
80
|
+
clientId: text("client_id").references(() => clients.id, { onDelete: "set null" }),
|
|
81
|
+
runtimeType: text("runtime_type"),
|
|
82
|
+
runtimeVersion: text("runtime_version"),
|
|
83
|
+
runtimeState: text("runtime_state"),
|
|
84
|
+
activeSessions: integer("active_sessions"),
|
|
85
|
+
totalSessions: integer("total_sessions"),
|
|
86
|
+
runtimeUpdatedAt: timestamp("runtime_updated_at", { withTimezone: true })
|
|
87
|
+
});
|
|
88
|
+
/** Server instance heartbeat. Used to detect crashed instances and clean up associated agent_presence records. */
|
|
89
|
+
const serverInstances = pgTable("server_instances", {
|
|
90
|
+
instanceId: text("instance_id").primaryKey(),
|
|
91
|
+
lastHeartbeat: timestamp("last_heartbeat", { withTimezone: true }).notNull().defaultNow()
|
|
92
|
+
});
|
|
93
|
+
/** Common field reset when agent goes offline or is unbound. */
|
|
94
|
+
function runtimeFieldsReset(now) {
|
|
95
|
+
return {
|
|
96
|
+
runtimeState: null,
|
|
97
|
+
activeSessions: null,
|
|
98
|
+
totalSessions: null,
|
|
99
|
+
runtimeUpdatedAt: now,
|
|
100
|
+
lastSeenAt: now
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
async function setOffline(db, agentId) {
|
|
104
|
+
const now = /* @__PURE__ */ new Date();
|
|
105
|
+
await db.update(agentPresence).set({
|
|
106
|
+
status: "offline",
|
|
107
|
+
instanceId: null,
|
|
108
|
+
...runtimeFieldsReset(now)
|
|
109
|
+
}).where(eq(agentPresence.agentId, agentId));
|
|
110
|
+
}
|
|
111
|
+
async function getPresence(db, agentId) {
|
|
112
|
+
const [row] = await db.select().from(agentPresence).where(eq(agentPresence.agentId, agentId)).limit(1);
|
|
113
|
+
return row ?? null;
|
|
114
|
+
}
|
|
115
|
+
async function getOnlineCount(db) {
|
|
116
|
+
const [result] = await db.select({ count: sql`count(*)::int` }).from(agentPresence).where(eq(agentPresence.status, "online"));
|
|
117
|
+
return result?.count ?? 0;
|
|
118
|
+
}
|
|
119
|
+
async function bindAgent(db, agentId, data) {
|
|
120
|
+
const now = /* @__PURE__ */ new Date();
|
|
121
|
+
await db.insert(agentPresence).values({
|
|
122
|
+
agentId,
|
|
123
|
+
status: "online",
|
|
124
|
+
instanceId: data.instanceId,
|
|
125
|
+
clientId: data.clientId,
|
|
126
|
+
runtimeType: data.runtimeType,
|
|
127
|
+
runtimeVersion: data.runtimeVersion ?? null,
|
|
128
|
+
runtimeState: "idle",
|
|
129
|
+
connectedAt: now,
|
|
130
|
+
lastSeenAt: now,
|
|
131
|
+
runtimeUpdatedAt: now
|
|
132
|
+
}).onConflictDoUpdate({
|
|
133
|
+
target: agentPresence.agentId,
|
|
134
|
+
set: {
|
|
135
|
+
status: "online",
|
|
136
|
+
instanceId: data.instanceId,
|
|
137
|
+
clientId: data.clientId,
|
|
138
|
+
runtimeType: data.runtimeType,
|
|
139
|
+
runtimeVersion: data.runtimeVersion ?? null,
|
|
140
|
+
runtimeState: "idle",
|
|
141
|
+
activeSessions: null,
|
|
142
|
+
totalSessions: null,
|
|
143
|
+
connectedAt: now,
|
|
144
|
+
lastSeenAt: now,
|
|
145
|
+
runtimeUpdatedAt: now
|
|
146
|
+
}
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
async function unbindAgent(db, agentId) {
|
|
150
|
+
const now = /* @__PURE__ */ new Date();
|
|
151
|
+
await db.update(agentPresence).set({
|
|
152
|
+
status: "offline",
|
|
153
|
+
clientId: null,
|
|
154
|
+
...runtimeFieldsReset(now)
|
|
155
|
+
}).where(eq(agentPresence.agentId, agentId));
|
|
156
|
+
}
|
|
157
|
+
/** Set runtime state directly from client-reported value.
|
|
158
|
+
*
|
|
159
|
+
* When an org-scoped notifier is provided, emit a PG NOTIFY on the
|
|
160
|
+
* `runtime_state_changes` channel so the pulse aggregator (and any future
|
|
161
|
+
* admin-side consumers) can observe the transition. Fire-and-forget to match
|
|
162
|
+
* notifier semantics elsewhere in this module. */
|
|
163
|
+
async function setRuntimeState(db, agentId, runtimeState, options) {
|
|
164
|
+
const now = /* @__PURE__ */ new Date();
|
|
165
|
+
await db.update(agentPresence).set({
|
|
166
|
+
runtimeState,
|
|
167
|
+
runtimeUpdatedAt: now,
|
|
168
|
+
lastSeenAt: now
|
|
169
|
+
}).where(eq(agentPresence.agentId, agentId));
|
|
170
|
+
if (options?.notifier && options.organizationId) options.notifier.notifyRuntimeStateChange(agentId, runtimeState, options.organizationId).catch(() => {});
|
|
171
|
+
}
|
|
172
|
+
/** Touch agent last_seen_at on heartbeat (per-agent liveness). */
|
|
173
|
+
async function touchAgent(db, agentId) {
|
|
174
|
+
await db.update(agentPresence).set({ lastSeenAt: /* @__PURE__ */ new Date() }).where(eq(agentPresence.agentId, agentId));
|
|
175
|
+
}
|
|
176
|
+
async function heartbeatInstance(db, instanceId) {
|
|
177
|
+
await db.insert(serverInstances).values({
|
|
178
|
+
instanceId,
|
|
179
|
+
lastHeartbeat: /* @__PURE__ */ new Date()
|
|
180
|
+
}).onConflictDoUpdate({
|
|
181
|
+
target: serverInstances.instanceId,
|
|
182
|
+
set: { lastHeartbeat: /* @__PURE__ */ new Date() }
|
|
183
|
+
});
|
|
184
|
+
}
|
|
185
|
+
/**
|
|
186
|
+
* M1: Mark agents as offline whose last_seen_at is older than staleSeconds.
|
|
187
|
+
* Unlike cleanupStalePresence (which checks instance liveness), this checks
|
|
188
|
+
* per-agent heartbeat liveness — detecting agents that stopped heartbeating
|
|
189
|
+
* while the client process may still be alive.
|
|
190
|
+
*
|
|
191
|
+
* Returns the list of agent IDs that were marked stale (for notification in Step 6).
|
|
192
|
+
*/
|
|
193
|
+
async function markStaleAgents(db, staleSeconds = 60) {
|
|
194
|
+
return (await db.execute(sql`
|
|
195
|
+
UPDATE agent_presence SET
|
|
196
|
+
status = 'offline',
|
|
197
|
+
client_id = NULL,
|
|
198
|
+
runtime_state = NULL,
|
|
199
|
+
active_sessions = NULL,
|
|
200
|
+
total_sessions = NULL,
|
|
201
|
+
runtime_updated_at = NOW()
|
|
202
|
+
WHERE status = 'online'
|
|
203
|
+
AND last_seen_at < NOW() - make_interval(secs => ${staleSeconds})
|
|
204
|
+
RETURNING agent_id
|
|
205
|
+
`)).map((r) => r.agent_id);
|
|
206
|
+
}
|
|
207
|
+
async function cleanupStalePresence(db, staleSeconds = 60) {
|
|
208
|
+
return (await db.execute(sql`
|
|
209
|
+
UPDATE agent_presence SET status = 'offline', instance_id = NULL,
|
|
210
|
+
runtime_state = NULL,
|
|
211
|
+
active_sessions = NULL, total_sessions = NULL,
|
|
212
|
+
runtime_updated_at = NOW()
|
|
213
|
+
WHERE instance_id IN (
|
|
214
|
+
SELECT instance_id FROM server_instances
|
|
215
|
+
WHERE last_heartbeat < NOW() - make_interval(secs => ${staleSeconds})
|
|
216
|
+
)
|
|
217
|
+
AND status = 'online'
|
|
218
|
+
RETURNING agent_id
|
|
219
|
+
`)).length;
|
|
220
|
+
}
|
|
221
|
+
/**
|
|
222
|
+
* Assert the caller can act on this client. Throws 404 for both "not found"
|
|
223
|
+
* and "not yours" to prevent UUID enumeration. The client is owned by exactly
|
|
224
|
+
* one user; cross-user admin access is no longer supported by this code path
|
|
225
|
+
* (see decouple-client-from-identity-design §4.10.5 option A). Cross-user
|
|
226
|
+
* ownership transfer goes through `claimClient` in PR-B.
|
|
227
|
+
*/
|
|
228
|
+
async function assertClientOwner(db, clientId, scope) {
|
|
229
|
+
const [row] = await db.select({
|
|
230
|
+
id: clients.id,
|
|
231
|
+
userId: clients.userId
|
|
232
|
+
}).from(clients).where(eq(clients.id, clientId)).limit(1);
|
|
233
|
+
if (!row || row.userId !== scope.userId) throw new NotFoundError(`Client "${clientId}" not found`);
|
|
234
|
+
}
|
|
235
|
+
/**
|
|
236
|
+
* Upsert the clients row for a given `client_id` under an authenticated user.
|
|
237
|
+
*
|
|
238
|
+
* Claim semantics (decouple-client-from-identity §4.1.1):
|
|
239
|
+
* - New client_id → INSERT with the authenticated user_id. `organization_id`
|
|
240
|
+
* is written as a placeholder (NOT NULL legacy column; no longer consumed
|
|
241
|
+
* by any read path) sourced from the caller-supplied JWT default org.
|
|
242
|
+
* - Existing row with the same user_id → refresh runtime columns.
|
|
243
|
+
* `organization_id` is **not** updated on conflict, so the placeholder set
|
|
244
|
+
* at first insert sticks for the row's lifetime.
|
|
245
|
+
* - Existing row with a different user_id → raises
|
|
246
|
+
* {@link ClientUserMismatchError} (WS close 4403). The CLI guides the
|
|
247
|
+
* operator through `first-tree-hub client claim --confirm` to take
|
|
248
|
+
* ownership, which unpins the previous owner's agents from the machine.
|
|
249
|
+
*/
|
|
250
|
+
async function registerClient(db, data) {
|
|
251
|
+
const now = /* @__PURE__ */ new Date();
|
|
252
|
+
const [existing] = await db.select({
|
|
253
|
+
id: clients.id,
|
|
254
|
+
userId: clients.userId
|
|
255
|
+
}).from(clients).where(eq(clients.id, data.clientId)).limit(1);
|
|
256
|
+
if (existing?.userId && existing.userId !== data.userId) throw new ClientUserMismatchError(`Client "${data.clientId}" is owned by a different user. Run \`first-tree-hub client claim --confirm\` to transfer ownership.`);
|
|
257
|
+
await db.insert(clients).values({
|
|
258
|
+
id: data.clientId,
|
|
259
|
+
userId: data.userId,
|
|
260
|
+
organizationId: data.organizationId,
|
|
261
|
+
status: "connected",
|
|
262
|
+
instanceId: data.instanceId,
|
|
263
|
+
hostname: data.hostname ?? null,
|
|
264
|
+
os: data.os ?? null,
|
|
265
|
+
sdkVersion: data.sdkVersion ?? null,
|
|
266
|
+
connectedAt: now,
|
|
267
|
+
lastSeenAt: now
|
|
268
|
+
}).onConflictDoUpdate({
|
|
269
|
+
target: clients.id,
|
|
270
|
+
set: {
|
|
271
|
+
userId: data.userId,
|
|
272
|
+
status: "connected",
|
|
273
|
+
instanceId: data.instanceId,
|
|
274
|
+
hostname: data.hostname ?? null,
|
|
275
|
+
os: data.os ?? null,
|
|
276
|
+
sdkVersion: data.sdkVersion ?? null,
|
|
277
|
+
connectedAt: now,
|
|
278
|
+
lastSeenAt: now
|
|
279
|
+
}
|
|
280
|
+
});
|
|
281
|
+
}
|
|
282
|
+
/**
|
|
283
|
+
* Transfer ownership of a client row to a new user, unpinning any agents
|
|
284
|
+
* whose manager belonged to the previous owner. Atomic: caller is guaranteed
|
|
285
|
+
* either a fully-applied ownership flip + bulk unpin, or no change. Idempotent
|
|
286
|
+
* when `newUserId` already owns the row.
|
|
287
|
+
*
|
|
288
|
+
* Manager → user resolution goes through the members JOIN (the agents table
|
|
289
|
+
* carries only `manager_id`); cross-org agents under the same previous owner
|
|
290
|
+
* are unpinned together (decouple-client-from-identity §4.4).
|
|
291
|
+
*
|
|
292
|
+
* Caller is responsible for the caller-side authorization (the new owner must
|
|
293
|
+
* be the authenticated request's user). The structured log
|
|
294
|
+
* `event: client.owner_transfer` is emitted by the caller after the
|
|
295
|
+
* transaction commits, using the returned `previousUserId` /
|
|
296
|
+
* `unpinnedAgentIds`.
|
|
297
|
+
*/
|
|
298
|
+
async function claimClient(db, clientId, newUserId) {
|
|
299
|
+
return db.transaction(async (tx) => {
|
|
300
|
+
const [locked] = await tx.execute(sql`SELECT id, user_id FROM clients WHERE id = ${clientId} FOR UPDATE`);
|
|
301
|
+
if (!locked) throw new NotFoundError(`Client "${clientId}" not found`);
|
|
302
|
+
const previousUserId = locked.user_id;
|
|
303
|
+
if (previousUserId === newUserId) return {
|
|
304
|
+
previousUserId,
|
|
305
|
+
unpinnedAgentIds: []
|
|
306
|
+
};
|
|
307
|
+
let unpinnedAgentIds = [];
|
|
308
|
+
if (previousUserId !== null) {
|
|
309
|
+
unpinnedAgentIds = (await tx.select({ uuid: agents.uuid }).from(agents).innerJoin(members, eq(agents.managerId, members.id)).where(and(eq(agents.clientId, clientId), eq(members.userId, previousUserId)))).map((r) => r.uuid);
|
|
310
|
+
if (unpinnedAgentIds.length > 0) {
|
|
311
|
+
const now = /* @__PURE__ */ new Date();
|
|
312
|
+
await tx.update(agents).set({
|
|
313
|
+
clientId: null,
|
|
314
|
+
updatedAt: now
|
|
315
|
+
}).where(inArray(agents.uuid, unpinnedAgentIds));
|
|
316
|
+
await tx.update(agentPresence).set({
|
|
317
|
+
status: "offline",
|
|
318
|
+
clientId: null,
|
|
319
|
+
...runtimeFieldsReset(now)
|
|
320
|
+
}).where(inArray(agentPresence.agentId, unpinnedAgentIds));
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
await tx.update(clients).set({ userId: newUserId }).where(eq(clients.id, clientId));
|
|
324
|
+
return {
|
|
325
|
+
previousUserId,
|
|
326
|
+
unpinnedAgentIds
|
|
327
|
+
};
|
|
328
|
+
});
|
|
329
|
+
}
|
|
330
|
+
async function disconnectClient(db, clientId) {
|
|
331
|
+
const now = /* @__PURE__ */ new Date();
|
|
332
|
+
await db.update(agentPresence).set({
|
|
333
|
+
status: "offline",
|
|
334
|
+
clientId: null,
|
|
335
|
+
...runtimeFieldsReset(now)
|
|
336
|
+
}).where(eq(agentPresence.clientId, clientId));
|
|
337
|
+
await db.update(clients).set({
|
|
338
|
+
status: "disconnected",
|
|
339
|
+
lastSeenAt: now
|
|
340
|
+
}).where(eq(clients.id, clientId));
|
|
341
|
+
}
|
|
342
|
+
async function heartbeatClient(db, clientId) {
|
|
343
|
+
await db.update(clients).set({ lastSeenAt: /* @__PURE__ */ new Date() }).where(eq(clients.id, clientId));
|
|
344
|
+
}
|
|
345
|
+
async function getClient(db, clientId) {
|
|
346
|
+
const [row] = await db.select().from(clients).where(eq(clients.id, clientId)).limit(1);
|
|
347
|
+
return row ?? null;
|
|
348
|
+
}
|
|
349
|
+
/**
|
|
350
|
+
* List the active agents currently pinned to a client. Used by the WS
|
|
351
|
+
* registration handshake to backfill `agent:pinned` notifications missed while
|
|
352
|
+
* the client was offline — without it, an admin who pinned an agent during a
|
|
353
|
+
* client outage would still need a manual `first-tree-hub agent add`.
|
|
354
|
+
*
|
|
355
|
+
* Excludes soft-deleted agents (status = "deleted"). Human agents are
|
|
356
|
+
* naturally excluded by the `clientId` filter — they never carry a clientId.
|
|
357
|
+
*/
|
|
358
|
+
async function listActiveAgentsPinnedToClient(db, clientId) {
|
|
359
|
+
return db.select({
|
|
360
|
+
uuid: agents.uuid,
|
|
361
|
+
name: agents.name,
|
|
362
|
+
displayName: agents.displayName,
|
|
363
|
+
type: agents.type,
|
|
364
|
+
runtimeProvider: agents.runtimeProvider
|
|
365
|
+
}).from(agents).where(and(eq(agents.clientId, clientId), ne(agents.status, "deleted")));
|
|
366
|
+
}
|
|
367
|
+
/**
|
|
368
|
+
* Member-scoped: every active agent pinned to a client owned by this user.
|
|
369
|
+
* Used by client startup to reconcile its local YAML against the authoritative
|
|
370
|
+
* `agents.runtime_provider`. Cross-org by design — a client is owned by a
|
|
371
|
+
* user, not an org (decouple-client-from-identity §4.1).
|
|
372
|
+
*/
|
|
373
|
+
async function listMyPinnedAgents(db, scope) {
|
|
374
|
+
return (await db.select({
|
|
375
|
+
agentId: agents.uuid,
|
|
376
|
+
clientId: agents.clientId,
|
|
377
|
+
runtimeProvider: agents.runtimeProvider
|
|
378
|
+
}).from(agents).innerJoin(clients, eq(agents.clientId, clients.id)).where(and(eq(clients.userId, scope.userId), ne(agents.status, "deleted")))).filter((r) => r.clientId !== null).map((r) => ({
|
|
379
|
+
agentId: r.agentId,
|
|
380
|
+
clientId: r.clientId,
|
|
381
|
+
runtimeProvider: r.runtimeProvider
|
|
382
|
+
}));
|
|
383
|
+
}
|
|
384
|
+
/**
|
|
385
|
+
* Replace this client's capabilities snapshot. Capabilities live under
|
|
386
|
+
* `clients.metadata.capabilities` (Option C — no dedicated column); other
|
|
387
|
+
* `metadata` subkeys are preserved on merge.
|
|
388
|
+
*
|
|
389
|
+
* Caller is expected to have already passed `assertClientOwner`.
|
|
390
|
+
*/
|
|
391
|
+
async function updateClientCapabilities(db, clientId, capabilities) {
|
|
392
|
+
const parsed = clientCapabilitiesSchema.safeParse(capabilities);
|
|
393
|
+
if (!parsed.success) throw new BadRequestError(`Invalid capabilities payload: ${parsed.error.message}`);
|
|
394
|
+
const [client] = await db.select({ metadata: clients.metadata }).from(clients).where(eq(clients.id, clientId)).limit(1);
|
|
395
|
+
if (!client) throw new NotFoundError(`Client "${clientId}" not found`);
|
|
396
|
+
const merged = {
|
|
397
|
+
...client.metadata ?? {},
|
|
398
|
+
capabilities: parsed.data
|
|
399
|
+
};
|
|
400
|
+
await db.update(clients).set({ metadata: merged }).where(eq(clients.id, clientId));
|
|
401
|
+
}
|
|
402
|
+
/**
|
|
403
|
+
* Admin-only cross-user listing: every client owned by an active member of
|
|
404
|
+
* `orgId`. Joining `clients → members.user_id` instead of `clients.organization_id`
|
|
405
|
+
* keeps the read path consistent with the rule that connection has no
|
|
406
|
+
* runtime relationship to organization (decouple-client-from-identity §A).
|
|
407
|
+
*
|
|
408
|
+
* The caller must verify admin role realtime via `requireMemberInOrg` before
|
|
409
|
+
* invoking this function — the service does not re-check, so it is
|
|
410
|
+
* unsafe to expose without that gate.
|
|
411
|
+
*/
|
|
412
|
+
async function listClientsForOrgAdmin(db, orgId) {
|
|
413
|
+
return attachAgentCounts(db, await db.select({
|
|
414
|
+
id: clients.id,
|
|
415
|
+
userId: clients.userId,
|
|
416
|
+
organizationId: clients.organizationId,
|
|
417
|
+
status: clients.status,
|
|
418
|
+
sdkVersion: clients.sdkVersion,
|
|
419
|
+
hostname: clients.hostname,
|
|
420
|
+
os: clients.os,
|
|
421
|
+
instanceId: clients.instanceId,
|
|
422
|
+
connectedAt: clients.connectedAt,
|
|
423
|
+
lastSeenAt: clients.lastSeenAt,
|
|
424
|
+
metadata: clients.metadata
|
|
425
|
+
}).from(clients).innerJoin(members, eq(members.userId, clients.userId)).where(and(eq(members.organizationId, orgId), eq(members.status, "active"))));
|
|
426
|
+
}
|
|
427
|
+
/**
|
|
428
|
+
* Infer whether the client's locally-cached refresh token can plausibly
|
|
429
|
+
* still mint access tokens. Used by the Web admin dashboard to render an
|
|
430
|
+
* "AUTH EXPIRED" pill on rows whose offline duration has exceeded the
|
|
431
|
+
* server's configured refresh-token TTL.
|
|
432
|
+
*
|
|
433
|
+
* Uses `lastSeenAt` (not `connectedAt`) because a healthy long-lived
|
|
434
|
+
* client slides the refresh token continuously, so the absolute connect
|
|
435
|
+
* time is no proxy for liveness. `lastSeenAt` is updated on register,
|
|
436
|
+
* heartbeat, and the final disconnect — it lower-bounds the issue time
|
|
437
|
+
* of the refresh token the client most likely still holds.
|
|
438
|
+
*
|
|
439
|
+
* Pure function, no DB access; the column-less design means there's no
|
|
440
|
+
* server-side revocation path yet — every "expired" decision is purely
|
|
441
|
+
* time-based. If we ever want admin-driven revocation, add a column
|
|
442
|
+
* back and OR its value into this function.
|
|
443
|
+
*/
|
|
444
|
+
function deriveAuthState(row, refreshTokenExpirySeconds) {
|
|
445
|
+
if (row.status === "disconnected") {
|
|
446
|
+
if (Date.now() - row.lastSeenAt.getTime() > refreshTokenExpirySeconds * 1e3) return "expired";
|
|
447
|
+
}
|
|
448
|
+
return "ok";
|
|
449
|
+
}
|
|
450
|
+
async function attachAgentCounts(db, rows) {
|
|
451
|
+
const counts = await db.select({
|
|
452
|
+
clientId: agents.clientId,
|
|
453
|
+
count: sql`count(*)::int`
|
|
454
|
+
}).from(agents).where(and(sql`${agents.clientId} IS NOT NULL`, ne(agents.status, "deleted"))).groupBy(agents.clientId);
|
|
455
|
+
const countMap = new Map(counts.map((c) => [c.clientId, c.count]));
|
|
456
|
+
return rows.map((row) => ({
|
|
457
|
+
...row,
|
|
458
|
+
agentCount: countMap.get(row.id) ?? 0
|
|
459
|
+
}));
|
|
460
|
+
}
|
|
461
|
+
/**
|
|
462
|
+
* Retire a client row. Refuses while any non-deleted agent is still pinned to
|
|
463
|
+
* it — per proposal M12, the operator must delete the agents first
|
|
464
|
+
* (no reassign in this milestone). Throws {@link ConflictError} with the
|
|
465
|
+
* pinned agent list so the UI can show the exact names.
|
|
466
|
+
*
|
|
467
|
+
* Runs in a single transaction with `SELECT … FOR UPDATE` on the client row
|
|
468
|
+
* so a concurrent `createAgent(clientId=X)` cannot land between the pinned
|
|
469
|
+
* check and the DELETE — otherwise the agents.client_id RESTRICT FK would
|
|
470
|
+
* surface as a raw PG 23503 instead of the ConflictError the caller expects.
|
|
471
|
+
*/
|
|
472
|
+
async function retireClient(db, clientId) {
|
|
473
|
+
await db.transaction(async (tx) => {
|
|
474
|
+
const [locked] = await tx.execute(sql`SELECT id FROM clients WHERE id = ${clientId} FOR UPDATE`);
|
|
475
|
+
if (!locked) throw new NotFoundError(`Client "${clientId}" not found`);
|
|
476
|
+
const pinned = await tx.select({
|
|
477
|
+
uuid: agents.uuid,
|
|
478
|
+
name: agents.name
|
|
479
|
+
}).from(agents).where(and(eq(agents.clientId, clientId), ne(agents.status, "deleted")));
|
|
480
|
+
if (pinned.length > 0) {
|
|
481
|
+
const names = pinned.map((a) => a.name ?? a.uuid).join(", ");
|
|
482
|
+
throw new ConflictError(`Cannot retire client "${clientId}" — ${pinned.length} agent(s) still pinned (${names}). Delete the pinned agents first (no reassign is available in this milestone).`);
|
|
483
|
+
}
|
|
484
|
+
await tx.update(agents).set({ clientId: null }).where(and(eq(agents.clientId, clientId), eq(agents.status, "deleted")));
|
|
485
|
+
await tx.delete(clients).where(eq(clients.id, clientId));
|
|
486
|
+
});
|
|
487
|
+
}
|
|
488
|
+
/**
|
|
489
|
+
* System-scope sweep: mark clients as disconnected when their last-seen
|
|
490
|
+
* server instance stopped sending heartbeats. Runs globally across all orgs
|
|
491
|
+
* by design — it is invoked only by internal timers, never from a
|
|
492
|
+
* user-scoped request, so the per-org filter the read paths enforce does not
|
|
493
|
+
* apply. Org isolation on the data these clients belong to is still
|
|
494
|
+
* enforced at the read paths (see `assertClientOwner` / `listClients`).
|
|
495
|
+
*/
|
|
496
|
+
async function cleanupStaleClients(db, staleSeconds = 60) {
|
|
497
|
+
const result = await db.execute(sql`
|
|
498
|
+
UPDATE clients SET status = 'disconnected'
|
|
499
|
+
WHERE instance_id IN (
|
|
500
|
+
SELECT instance_id FROM server_instances
|
|
501
|
+
WHERE last_heartbeat < NOW() - make_interval(secs => ${staleSeconds})
|
|
502
|
+
)
|
|
503
|
+
AND status = 'connected'
|
|
504
|
+
RETURNING id
|
|
505
|
+
`);
|
|
506
|
+
if (result.length > 0) {
|
|
507
|
+
const staleIds = result.map((r) => r.id);
|
|
508
|
+
await db.update(agentPresence).set({
|
|
509
|
+
status: "offline",
|
|
510
|
+
...runtimeFieldsReset(/* @__PURE__ */ new Date())
|
|
511
|
+
}).where(inArray(agentPresence.clientId, staleIds));
|
|
512
|
+
}
|
|
513
|
+
return result.length;
|
|
514
|
+
}
|
|
515
|
+
//#endregion
|
|
516
|
+
export { serverInstances as C, unbindAgent as D, touchAgent as E, updateClientCapabilities as O, retireClient as S, setRuntimeState as T, listClientsForOrgAdmin as _, claimClient as a, members as b, clients as c, getClient as d, getOnlineCount as f, listActiveAgentsPinnedToClient as g, heartbeatInstance as h, bindAgent as i, deriveAuthState as l, heartbeatClient as m, agents as n, cleanupStaleClients as o, getPresence as p, assertClientOwner as r, cleanupStalePresence as s, agentPresence as t, disconnectClient as u, listMyPinnedAgents as v, setOffline as w, registerClient as x, markStaleAgents as y };
|