@naisys/hub 3.0.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,279 @@
1
+ import { AgentConfigFileSchema, calculatePeriodBoundaries, } from "@naisys/common";
2
+ import { CostWriteRequestSchema, HubEvents, } from "@naisys/hub-protocol";
3
+ const SPEND_LIMIT_CHECK_INTERVAL_MS = 10_000;
4
+ /** Handles cost_write events from NAISYS instances (fire-and-forget) */
5
+ export function createHubCostService(naisysServer, { hubDb }, logService, heartbeatService, configService) {
6
+ // Track which users have been suspended due to spend limit overrun
7
+ const suspendedByGlobal = new Set();
8
+ const suspendedByAgent = new Set();
9
+ naisysServer.registerEvent(HubEvents.COST_WRITE, async (hostId, data, ack) => {
10
+ try {
11
+ const parsed = CostWriteRequestSchema.parse(data);
12
+ // Roll up cost deltas by user/run/session for supervisor push,
13
+ // and per-user totals for budget_left decrement
14
+ const costPushMap = new Map();
15
+ const userCostTotals = new Map();
16
+ for (const entry of parsed.entries) {
17
+ await hubDb.costs.create({
18
+ data: {
19
+ user_id: entry.userId,
20
+ run_id: entry.runId,
21
+ session_id: entry.sessionId,
22
+ host_id: hostId,
23
+ source: entry.source,
24
+ model: entry.model,
25
+ cost: entry.cost,
26
+ input_tokens: entry.inputTokens,
27
+ output_tokens: entry.outputTokens,
28
+ cache_write_tokens: entry.cacheWriteTokens,
29
+ cache_read_tokens: entry.cacheReadTokens,
30
+ },
31
+ });
32
+ // Update run_session total_cost
33
+ await hubDb.run_session.updateMany({
34
+ where: {
35
+ user_id: entry.userId,
36
+ run_id: entry.runId,
37
+ session_id: entry.sessionId,
38
+ },
39
+ data: {
40
+ total_cost: { increment: entry.cost },
41
+ },
42
+ });
43
+ const key = `${entry.userId}:${entry.runId}:${entry.sessionId}`;
44
+ const existing = costPushMap.get(key);
45
+ if (existing) {
46
+ existing.costDelta += entry.cost;
47
+ }
48
+ else {
49
+ costPushMap.set(key, {
50
+ userId: entry.userId,
51
+ runId: entry.runId,
52
+ sessionId: entry.sessionId,
53
+ costDelta: entry.cost,
54
+ });
55
+ }
56
+ userCostTotals.set(entry.userId, (userCostTotals.get(entry.userId) ?? 0) + entry.cost);
57
+ }
58
+ // Push rolled-up cost deltas to supervisor connections
59
+ if (costPushMap.size > 0) {
60
+ naisysServer.broadcastToSupervisors(HubEvents.COST_PUSH, {
61
+ entries: Array.from(costPushMap.values()),
62
+ });
63
+ }
64
+ // Re-send cost_control to any suspended users still writing costs
65
+ for (const userId of userCostTotals.keys()) {
66
+ if (suspendedByGlobal.has(userId) || suspendedByAgent.has(userId)) {
67
+ sendCostControl(userId, false, "Spend limit exceeded");
68
+ }
69
+ }
70
+ // Decrement budget_left and return updated values
71
+ const budgets = await Promise.all(Array.from(userCostTotals.entries()).map(([userId, batchCost]) => decrementBudgetLeft(hubDb, userId, batchCost)));
72
+ ack({ budgets });
73
+ }
74
+ catch (error) {
75
+ logService.error(`[Hub:Costs] Error processing cost_write from host ${hostId}: ${error}`);
76
+ ack({ budgets: [] });
77
+ }
78
+ });
79
+ // Periodic spend limit checking
80
+ const spendLimitCheckInterval = setInterval(() => void checkSpendLimits().catch((error) => {
81
+ logService.error(`[Hub:Costs] Error in spend limit check: ${error}`);
82
+ }), SPEND_LIMIT_CHECK_INTERVAL_MS);
83
+ async function checkSpendLimits(candidateUserIds) {
84
+ const activeUserIds = heartbeatService.getActiveUserIds();
85
+ const usersToCheck = new Set(activeUserIds);
86
+ for (const userId of suspendedByGlobal)
87
+ usersToCheck.add(userId);
88
+ for (const userId of suspendedByAgent)
89
+ usersToCheck.add(userId);
90
+ if (candidateUserIds) {
91
+ for (const userId of candidateUserIds)
92
+ usersToCheck.add(userId);
93
+ }
94
+ if (usersToCheck.size === 0)
95
+ return;
96
+ const config = configService.getConfig().config;
97
+ const spendLimitDollars = config?.spendLimitDollars;
98
+ const spendLimitHours = config?.spendLimitHours;
99
+ // Query user configs (needed by both global and per-agent checks)
100
+ const users = await hubDb.users.findMany({
101
+ where: { id: { in: Array.from(usersToCheck) } },
102
+ select: {
103
+ id: true,
104
+ config: true,
105
+ user_notifications: {
106
+ select: { spend_limit_reset_at: true },
107
+ },
108
+ },
109
+ });
110
+ // Identify which users have per-agent spend limits (exempt from global)
111
+ const usersWithAgentLimit = new Set();
112
+ for (const user of users) {
113
+ try {
114
+ const parsed = AgentConfigFileSchema.safeParse(JSON.parse(user.config));
115
+ if (parsed.success && parsed.data.spendLimitDollars !== undefined) {
116
+ usersWithAgentLimit.add(user.id);
117
+ }
118
+ }
119
+ catch {
120
+ /* ignore parse errors */
121
+ }
122
+ }
123
+ // 1. Global spend limit — only applies to agents WITHOUT a per-agent limit
124
+ if (spendLimitDollars !== undefined) {
125
+ await checkGlobalSpendLimit(hubDb, usersToCheck, spendLimitDollars, spendLimitHours, usersWithAgentLimit);
126
+ }
127
+ // 2. Per-agent spend limit checks
128
+ for (const user of users) {
129
+ try {
130
+ const parsed = AgentConfigFileSchema.safeParse(JSON.parse(user.config));
131
+ if (!parsed.success)
132
+ continue;
133
+ const config = parsed.data;
134
+ if (config.spendLimitDollars === undefined)
135
+ continue;
136
+ await checkAgentSpendLimit(hubDb, user.id, config.spendLimitDollars, config.spendLimitHours, user.user_notifications?.spend_limit_reset_at ?? undefined);
137
+ }
138
+ catch (userError) {
139
+ logService.error(`[Hub:Costs] Error checking spend limit for user ${user.id}: ${userError}`);
140
+ }
141
+ }
142
+ }
143
+ /**
144
+ * Finds the total cost over the period, if this period is used up, we wait until the next period to resume.
145
+ * We don't use a sliding window as that would cause the LLM to get stuck in a cycle of sending off a query,
146
+ * only for the window to close again, and the LLM cache to *expire* creating constant cache misses.
147
+ */
148
+ async function queryCostSum(hubDb, spendLimitHours, userIdFilter, spendLimitResetAt) {
149
+ const where = {};
150
+ if (userIdFilter) {
151
+ where.user_id = userIdFilter;
152
+ }
153
+ let effectiveStart;
154
+ if (spendLimitHours !== undefined) {
155
+ const { periodStart } = calculatePeriodBoundaries(spendLimitHours);
156
+ effectiveStart = periodStart;
157
+ }
158
+ if (spendLimitResetAt &&
159
+ (!effectiveStart || spendLimitResetAt > effectiveStart)) {
160
+ effectiveStart = spendLimitResetAt;
161
+ }
162
+ if (effectiveStart) {
163
+ where.created_at = { gte: effectiveStart };
164
+ }
165
+ const result = await hubDb.costs.aggregate({
166
+ where,
167
+ _sum: { cost: true },
168
+ });
169
+ return result._sum.cost ?? 0;
170
+ }
171
+ function sendCostControl(userId, enabled, reason) {
172
+ const hostIds = heartbeatService.findHostsForAgent(userId);
173
+ for (const hostId of hostIds) {
174
+ naisysServer.sendMessage(hostId, HubEvents.COST_CONTROL, {
175
+ userId,
176
+ enabled,
177
+ reason,
178
+ });
179
+ }
180
+ }
181
+ async function setCostSuspendedReason(hubDb, userId, reason) {
182
+ await hubDb.user_notifications.updateMany({
183
+ where: { user_id: userId },
184
+ data: { cost_suspended_reason: reason },
185
+ });
186
+ }
187
+ /** Check the global spend limit — only applies to agents without a per-agent limit */
188
+ async function checkGlobalSpendLimit(hubDb, usersToCheck, spendLimit, spendLimitHours, usersWithAgentLimit) {
189
+ const totalCost = await queryCostSum(hubDb, spendLimitHours);
190
+ const isOverLimit = totalCost >= spendLimit;
191
+ async function resumeFromGlobal(userId, reason) {
192
+ logService.log(`[Hub:Costs] Resuming user ${userId} (global limit): ${reason}`);
193
+ sendCostControl(userId, true, reason);
194
+ suspendedByGlobal.delete(userId);
195
+ if (!suspendedByAgent.has(userId)) {
196
+ await setCostSuspendedReason(hubDb, userId, null);
197
+ }
198
+ }
199
+ for (const userId of usersToCheck) {
200
+ // Agents with their own spend limit are exempt from the global limit
201
+ if (usersWithAgentLimit.has(userId)) {
202
+ if (suspendedByGlobal.has(userId)) {
203
+ await resumeFromGlobal(userId, "Agent has per-agent spend limit");
204
+ }
205
+ continue;
206
+ }
207
+ const wasSuspended = suspendedByGlobal.has(userId);
208
+ if (isOverLimit && !wasSuspended) {
209
+ const reason = `Global spend limit of $${spendLimit} reached (total: $${totalCost.toFixed(2)})`;
210
+ logService.log(`[Hub:Costs] Suspending user ${userId} (global limit): ${reason}`);
211
+ sendCostControl(userId, false, reason);
212
+ suspendedByGlobal.add(userId);
213
+ await setCostSuspendedReason(hubDb, userId, reason);
214
+ }
215
+ else if (!isOverLimit && wasSuspended) {
216
+ await resumeFromGlobal(userId, `Global spend limit period reset (total: $${totalCost.toFixed(2)}, limit: $${spendLimit})`);
217
+ }
218
+ }
219
+ }
220
+ /** Check a per-agent spend limit */
221
+ async function checkAgentSpendLimit(hubDb, userId, spendLimit, spendLimitHours, spendLimitResetAt) {
222
+ const periodCost = await queryCostSum(hubDb, spendLimitHours, userId, spendLimitResetAt);
223
+ const isOverLimit = periodCost >= spendLimit;
224
+ const wasSuspended = suspendedByAgent.has(userId);
225
+ // Persist budget_left for supervisor display
226
+ const budgetLeft = Math.max(0, spendLimit - periodCost);
227
+ await hubDb.user_notifications.updateMany({
228
+ where: { user_id: userId },
229
+ data: { budget_left: budgetLeft },
230
+ });
231
+ if (isOverLimit && !wasSuspended) {
232
+ const reason = `Spend limit of $${spendLimit} reached (current: $${periodCost.toFixed(2)})`;
233
+ logService.log(`[Hub:Costs] Suspending user ${userId}: ${reason}`);
234
+ sendCostControl(userId, false, reason);
235
+ suspendedByAgent.add(userId);
236
+ await setCostSuspendedReason(hubDb, userId, reason);
237
+ }
238
+ else if (!isOverLimit && wasSuspended) {
239
+ const reason = `Spend limit period reset (current: $${periodCost.toFixed(2)}, limit: $${spendLimit})`;
240
+ logService.log(`[Hub:Costs] Resuming user ${userId}: ${reason}`);
241
+ sendCostControl(userId, true, reason);
242
+ suspendedByAgent.delete(userId);
243
+ await setCostSuspendedReason(hubDb, userId, null);
244
+ }
245
+ }
246
+ /** Decrement budget_left by the batch cost and return the updated value */
247
+ async function decrementBudgetLeft(hubDb, userId, batchCost) {
248
+ try {
249
+ const notification = await hubDb.user_notifications.findUnique({
250
+ where: { user_id: userId },
251
+ select: { budget_left: true },
252
+ });
253
+ if (notification?.budget_left == null) {
254
+ return { userId, budgetLeft: null };
255
+ }
256
+ const budgetLeft = Math.max(0, Number(notification.budget_left) - batchCost);
257
+ await hubDb.user_notifications.update({
258
+ where: { user_id: userId },
259
+ data: { budget_left: budgetLeft },
260
+ });
261
+ return { userId, budgetLeft };
262
+ }
263
+ catch {
264
+ return { userId, budgetLeft: null };
265
+ }
266
+ }
267
+ function isUserSpendSuspended(userId) {
268
+ return suspendedByGlobal.has(userId) || suspendedByAgent.has(userId);
269
+ }
270
+ function cleanup() {
271
+ clearInterval(spendLimitCheckInterval);
272
+ }
273
+ return {
274
+ cleanup,
275
+ checkSpendLimits,
276
+ isUserSpendSuspended,
277
+ };
278
+ }
279
+ //# sourceMappingURL=hubCostService.js.map
@@ -0,0 +1,132 @@
1
+ import { HeartbeatSchema, HUB_HEARTBEAT_INTERVAL_MS, HubEvents, } from "@naisys/hub-protocol";
2
+ /** Tracks NAISYS instance heartbeats and pushes aggregate active user status to all instances */
3
+ export function createHubHeartbeatService(naisysServer, { hubDb }, logService) {
4
+ // Track active agent user IDs per host from heartbeat data
5
+ const hostActiveAgents = new Map();
6
+ // Track per-agent notification IDs (latestLogId, latestMailId)
7
+ const agentNotifications = new Map();
8
+ /** Update a single notification field for an agent */
9
+ function updateAgentNotification(userId, field, value) {
10
+ let entry = agentNotifications.get(userId);
11
+ if (!entry) {
12
+ entry = { latestLogId: 0, latestMailId: 0, latestChatId: 0 };
13
+ agentNotifications.set(userId, entry);
14
+ }
15
+ entry[field] = value;
16
+ }
17
+ // Handle heartbeat from NAISYS instances
18
+ naisysServer.registerEvent(HubEvents.HEARTBEAT, async (hostId, data) => {
19
+ const parsed = HeartbeatSchema.parse(data);
20
+ // Update in-memory per-host active agent IDs
21
+ hostActiveAgents.set(hostId, parsed.activeUserIds);
22
+ try {
23
+ const now = new Date().toISOString();
24
+ // Update host last_active
25
+ await hubDb.hosts.updateMany({
26
+ where: { id: hostId },
27
+ data: { last_active: now },
28
+ });
29
+ // Update user_notifications.last_active for each active user
30
+ if (parsed.activeUserIds.length > 0) {
31
+ await hubDb.user_notifications.updateMany({
32
+ where: { user_id: { in: parsed.activeUserIds } },
33
+ data: { last_active: now, latest_host_id: hostId },
34
+ });
35
+ }
36
+ }
37
+ catch (error) {
38
+ logService.error(`[Hub:Heartbeat] Error updating heartbeat for host ${hostId}: ${error}`);
39
+ }
40
+ });
41
+ // Clean up tracking when a host disconnects
42
+ naisysServer.registerEvent(HubEvents.CLIENT_DISCONNECTED, (hostId) => {
43
+ hostActiveAgents.delete(hostId);
44
+ throttledPushAgentsStatus();
45
+ });
46
+ /** Push aggregate agent status to all connected NAISYS instances */
47
+ let lastPushedJson = "";
48
+ function pushAgentsStatus() {
49
+ const payload = {
50
+ hostActiveAgents: Object.fromEntries(hostActiveAgents),
51
+ agentNotifications: Object.fromEntries(agentNotifications),
52
+ };
53
+ const json = JSON.stringify(payload);
54
+ if (json === lastPushedJson)
55
+ return;
56
+ lastPushedJson = json;
57
+ naisysServer.broadcastToAll(HubEvents.AGENTS_STATUS, payload);
58
+ }
59
+ /** Throttled push for agent start/stop changes — at most once per 500ms */
60
+ let throttleTimer = null;
61
+ function throttledPushAgentsStatus() {
62
+ if (throttleTimer)
63
+ return;
64
+ pushAgentsStatus();
65
+ throttleTimer = setTimeout(() => {
66
+ throttleTimer = null;
67
+ }, 500);
68
+ }
69
+ // Periodically push aggregate active user status to all NAISYS instances
70
+ const pushInterval = setInterval(pushAgentsStatus, HUB_HEARTBEAT_INTERVAL_MS);
71
+ function getHostActiveAgentCount(hostId) {
72
+ return hostActiveAgents.get(hostId)?.length ?? 0;
73
+ }
74
+ /** Find which hosts a given agent is currently running on */
75
+ function findHostsForAgent(userId) {
76
+ const hostIds = [];
77
+ for (const [hostId, userIds] of hostActiveAgents) {
78
+ if (userIds.includes(userId)) {
79
+ hostIds.push(hostId);
80
+ }
81
+ }
82
+ return hostIds;
83
+ }
84
+ /** Add a userId to a host's active list after a successful start */
85
+ function addStartedAgent(hostId, userId) {
86
+ const userIds = hostActiveAgents.get(hostId);
87
+ if (userIds) {
88
+ if (!userIds.includes(userId)) {
89
+ userIds.push(userId);
90
+ }
91
+ }
92
+ else {
93
+ hostActiveAgents.set(hostId, [userId]);
94
+ }
95
+ throttledPushAgentsStatus();
96
+ }
97
+ /** Remove a userId from a host's active list after a successful stop */
98
+ function removeStoppedAgent(hostId, userId) {
99
+ const userIds = hostActiveAgents.get(hostId);
100
+ if (userIds) {
101
+ const index = userIds.indexOf(userId);
102
+ if (index !== -1) {
103
+ userIds.splice(index, 1);
104
+ }
105
+ }
106
+ throttledPushAgentsStatus();
107
+ }
108
+ function cleanup() {
109
+ clearInterval(pushInterval);
110
+ }
111
+ /** Get all active user IDs across all connected hosts */
112
+ function getActiveUserIds() {
113
+ const allActiveUserIds = new Set();
114
+ for (const userIds of hostActiveAgents.values()) {
115
+ for (const id of userIds) {
116
+ allActiveUserIds.add(id);
117
+ }
118
+ }
119
+ return allActiveUserIds;
120
+ }
121
+ return {
122
+ cleanup,
123
+ getActiveUserIds,
124
+ getHostActiveAgentCount,
125
+ findHostsForAgent,
126
+ addStartedAgent,
127
+ removeStoppedAgent,
128
+ updateAgentNotification,
129
+ throttledPushAgentsStatus,
130
+ };
131
+ }
132
+ //# sourceMappingURL=hubHeartbeatService.js.map
@@ -0,0 +1,35 @@
1
+ import { HubEvents } from "@naisys/hub-protocol";
2
+ /** Pushes the host list to all connections when connected hosts change */
3
+ export function createHubHostService(naisysServer, hostRegistrar, logService) {
4
+ let cachedHostListJson = "";
5
+ function broadcastHostList(newConnection) {
6
+ const connectedHostIds = new Set(naisysServer.getConnectedClients().map((c) => c.getHostId()));
7
+ const hosts = hostRegistrar.getAllHosts().map((h) => ({
8
+ ...h,
9
+ online: connectedHostIds.has(h.hostId),
10
+ }));
11
+ const payload = { hosts };
12
+ const json = JSON.stringify(payload);
13
+ // Send to the newly connecting client directly
14
+ if (newConnection) {
15
+ newConnection.sendMessage(HubEvents.HOSTS_UPDATED, payload);
16
+ }
17
+ // Broadcast to all connections only if the list changed
18
+ // (new connection may get a harmless duplicate — HOSTS_UPDATED is idempotent)
19
+ if (json !== cachedHostListJson) {
20
+ cachedHostListJson = json;
21
+ logService.log(`[Hub:Hosts] Broadcasting host list (${hosts.length} hosts)`);
22
+ naisysServer.broadcastToAll(HubEvents.HOSTS_UPDATED, payload);
23
+ }
24
+ }
25
+ naisysServer.registerEvent(HubEvents.CLIENT_CONNECTED, (_hostId, connection) => {
26
+ broadcastHostList(connection);
27
+ });
28
+ naisysServer.registerEvent(HubEvents.HOSTS_CHANGED, async () => {
29
+ logService.log("[Hub:Hosts] Received HOSTS_CHANGED, refreshing cache...");
30
+ await hostRegistrar.refreshHosts();
31
+ cachedHostListJson = ""; // Force broadcast
32
+ broadcastHostList();
33
+ });
34
+ }
35
+ //# sourceMappingURL=hubHostService.js.map
@@ -0,0 +1,121 @@
1
+ import { HubEvents, LogWriteRequestSchema, } from "@naisys/hub-protocol";
2
+ /** Handles log_write events from NAISYS instances (fire-and-forget) */
3
+ export function createHubLogService(naisysServer, { hubDb }, logService, heartbeatService) {
4
+ // Track last pushed log ID per session for gap detection
5
+ const lastPushedLogId = new Map();
6
+ naisysServer.registerEvent(HubEvents.LOG_WRITE, async (hostId, data) => {
7
+ try {
8
+ const parsed = LogWriteRequestSchema.parse(data);
9
+ // Collect push entries and session deltas
10
+ const pushEntries = [];
11
+ const sessionUpdates = new Map();
12
+ for (const entry of parsed.entries) {
13
+ const now = new Date().toISOString();
14
+ const lineCount = entry.message.split("\n").length;
15
+ const log = await hubDb.context_log.create({
16
+ data: {
17
+ user_id: entry.userId,
18
+ run_id: entry.runId,
19
+ session_id: entry.sessionId,
20
+ host_id: hostId,
21
+ role: entry.role,
22
+ source: entry.source ?? null,
23
+ type: entry.type ?? null,
24
+ message: entry.message,
25
+ created_at: entry.createdAt,
26
+ attachment_id: entry.attachmentId ?? null,
27
+ },
28
+ });
29
+ // Update session table with total lines and last active
30
+ await hubDb.run_session.updateMany({
31
+ where: {
32
+ user_id: entry.userId,
33
+ run_id: entry.runId,
34
+ session_id: entry.sessionId,
35
+ },
36
+ data: {
37
+ last_active: now,
38
+ latest_log_id: log.id,
39
+ total_lines: {
40
+ increment: lineCount,
41
+ },
42
+ },
43
+ });
44
+ // Update user_notifications with latest_log_id and last_active
45
+ await hubDb.user_notifications.updateMany({
46
+ where: {
47
+ user_id: entry.userId,
48
+ },
49
+ data: {
50
+ latest_log_id: log.id,
51
+ last_active: now,
52
+ },
53
+ });
54
+ // Push notification ID update via heartbeat
55
+ heartbeatService.updateAgentNotification(entry.userId, "latestLogId", log.id);
56
+ // Look up attachment metadata if present
57
+ let attachmentPublicId;
58
+ let attachmentFilename;
59
+ let attachmentFileSize;
60
+ if (entry.attachmentId) {
61
+ const att = await hubDb.attachments.findUnique({
62
+ where: { id: entry.attachmentId },
63
+ select: { public_id: true, filename: true, file_size: true },
64
+ });
65
+ if (att) {
66
+ attachmentPublicId = att.public_id;
67
+ attachmentFilename = att.filename;
68
+ attachmentFileSize = att.file_size;
69
+ }
70
+ }
71
+ // Collect push entry with DB-assigned ID
72
+ const sessionKey = `${entry.userId}-${entry.runId}-${entry.sessionId}`;
73
+ const previousId = lastPushedLogId.get(sessionKey) ?? null;
74
+ pushEntries.push({
75
+ id: log.id,
76
+ previousId,
77
+ userId: entry.userId,
78
+ runId: entry.runId,
79
+ sessionId: entry.sessionId,
80
+ role: entry.role,
81
+ source: entry.source,
82
+ type: entry.type,
83
+ message: entry.message,
84
+ createdAt: entry.createdAt,
85
+ attachmentId: attachmentPublicId,
86
+ attachmentFilename,
87
+ attachmentFileSize,
88
+ });
89
+ lastPushedLogId.set(sessionKey, log.id);
90
+ // Track session delta (accumulate totalLinesDelta, keep latest logId)
91
+ const existing = sessionUpdates.get(sessionKey);
92
+ if (existing) {
93
+ existing.latestLogId = Math.max(existing.latestLogId, log.id);
94
+ existing.lastActive = now;
95
+ existing.totalLinesDelta += lineCount;
96
+ }
97
+ else {
98
+ sessionUpdates.set(sessionKey, {
99
+ userId: entry.userId,
100
+ runId: entry.runId,
101
+ sessionId: entry.sessionId,
102
+ lastActive: now,
103
+ latestLogId: log.id,
104
+ totalLinesDelta: lineCount,
105
+ });
106
+ }
107
+ }
108
+ // Push full log data to supervisor connections
109
+ naisysServer.broadcastToSupervisors(HubEvents.LOG_PUSH, {
110
+ entries: pushEntries,
111
+ sessionUpdates: Array.from(sessionUpdates.values()),
112
+ });
113
+ // Trigger throttled push after all entries processed
114
+ heartbeatService.throttledPushAgentsStatus();
115
+ }
116
+ catch (error) {
117
+ logService.error(`[Hub:Logs] Error processing log_write from host ${hostId}: ${error}`);
118
+ }
119
+ });
120
+ }
121
+ //# sourceMappingURL=hubLogService.js.map