@vuer-ai/vuer-rtc-server 0.2.0 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env +1 -0
- package/S3_COMPRESSION_GUIDE.md +233 -0
- package/dist/archive/ArchivalService.d.ts +117 -0
- package/dist/archive/ArchivalService.d.ts.map +1 -0
- package/dist/archive/ArchivalService.js +181 -0
- package/dist/archive/ArchivalService.js.map +1 -0
- package/dist/broker/InMemoryBroker.d.ts +2 -0
- package/dist/broker/InMemoryBroker.d.ts.map +1 -1
- package/dist/broker/InMemoryBroker.js +4 -0
- package/dist/broker/InMemoryBroker.js.map +1 -1
- package/dist/compression/CompressionUtils.d.ts +57 -0
- package/dist/compression/CompressionUtils.d.ts.map +1 -0
- package/dist/compression/CompressionUtils.js +90 -0
- package/dist/compression/CompressionUtils.js.map +1 -0
- package/dist/compression/index.d.ts +7 -0
- package/dist/compression/index.d.ts.map +1 -0
- package/dist/compression/index.js +7 -0
- package/dist/compression/index.js.map +1 -0
- package/dist/journal/CoalescingService.d.ts +63 -0
- package/dist/journal/CoalescingService.d.ts.map +1 -0
- package/dist/journal/CoalescingService.js +507 -0
- package/dist/journal/CoalescingService.js.map +1 -0
- package/dist/journal/JournalRLE.d.ts +81 -0
- package/dist/journal/JournalRLE.d.ts.map +1 -0
- package/dist/journal/JournalRLE.js +199 -0
- package/dist/journal/JournalRLE.js.map +1 -0
- package/dist/journal/JournalService.d.ts +7 -3
- package/dist/journal/JournalService.d.ts.map +1 -1
- package/dist/journal/JournalService.js +152 -12
- package/dist/journal/JournalService.js.map +1 -1
- package/dist/journal/RLECompression.d.ts +73 -0
- package/dist/journal/RLECompression.d.ts.map +1 -0
- package/dist/journal/RLECompression.js +152 -0
- package/dist/journal/RLECompression.js.map +1 -0
- package/dist/journal/rle-demo.d.ts +8 -0
- package/dist/journal/rle-demo.d.ts.map +1 -0
- package/dist/journal/rle-demo.js +159 -0
- package/dist/journal/rle-demo.js.map +1 -0
- package/dist/persistence/S3ColdStorage.d.ts +62 -0
- package/dist/persistence/S3ColdStorage.d.ts.map +1 -0
- package/dist/persistence/S3ColdStorage.js +88 -0
- package/dist/persistence/S3ColdStorage.js.map +1 -0
- package/dist/persistence/S3ColdStorageIntegration.d.ts +78 -0
- package/dist/persistence/S3ColdStorageIntegration.d.ts.map +1 -0
- package/dist/persistence/S3ColdStorageIntegration.js +93 -0
- package/dist/persistence/S3ColdStorageIntegration.js.map +1 -0
- package/dist/serve.d.ts +2 -0
- package/dist/serve.d.ts.map +1 -1
- package/dist/serve.js +623 -15
- package/dist/serve.js.map +1 -1
- package/docs/RLE_COMPRESSION.md +397 -0
- package/examples/compression-example.ts +259 -0
- package/package.json +14 -14
- package/src/archive/ArchivalService.ts +250 -0
- package/src/broker/InMemoryBroker.ts +5 -0
- package/src/compression/CompressionUtils.ts +113 -0
- package/src/compression/index.ts +14 -0
- package/src/journal/COALESCING.md +267 -0
- package/src/journal/CoalescingService.ts +626 -0
- package/src/journal/JournalRLE.ts +265 -0
- package/src/journal/JournalService.ts +163 -11
- package/src/journal/RLECompression.ts +210 -0
- package/src/journal/rle-demo.ts +193 -0
- package/src/serve.ts +702 -15
- package/tests/benchmark/journal-optimization-benchmark.test.ts +482 -0
- package/tests/compression/compression.test.ts +343 -0
- package/tests/integration/repositories.test.ts +89 -0
- package/tests/journal/compaction-load-bug.test.ts +409 -0
- package/tests/journal/compaction.test.ts +42 -2
- package/tests/journal/journal-rle.test.ts +511 -0
- package/tests/journal/lww-ordering-bug.test.ts +248 -0
- package/tests/journal/multi-session-coalescing.test.ts +871 -0
- package/tests/journal/rle-compression.test.ts +526 -0
- package/tests/journal/text-coalescing.test.ts +210 -0
- package/tests/unit/s3-compression.test.ts +257 -0
- package/PHASE1_SUMMARY.md +0 -94
package/src/serve.ts
CHANGED
|
@@ -13,6 +13,8 @@
|
|
|
13
13
|
* GET /api/documents/:id/sessions
|
|
14
14
|
* GET /api/rooms/:roomId/state
|
|
15
15
|
* DELETE /api/rooms/:roomId
|
|
16
|
+
* POST /api/documents/:id/compact — trigger journal compaction
|
|
17
|
+
* POST /api/documents/:id/coalesce — operation coalescing (placeholder)
|
|
16
18
|
*/
|
|
17
19
|
|
|
18
20
|
import { createServer, type IncomingMessage, type ServerResponse } from 'http';
|
|
@@ -21,12 +23,16 @@ import { InMemoryBroker } from './broker/index.js';
|
|
|
21
23
|
import { RTCServer } from './transport/index.js';
|
|
22
24
|
import { createPrismaClient } from './persistence/PrismaClient.js';
|
|
23
25
|
import { JournalService } from './journal/index.js';
|
|
26
|
+
import { SessionRepository } from './persistence/SessionRepository.js';
|
|
27
|
+
import { CoalescingService } from './journal/CoalescingService.js';
|
|
24
28
|
|
|
25
29
|
const PORT = Number(process.env.PORT) || 8080;
|
|
26
30
|
const prisma = createPrismaClient();
|
|
27
31
|
|
|
28
32
|
const broker = new InMemoryBroker();
|
|
29
33
|
const journalService = new JournalService(prisma);
|
|
34
|
+
const sessionRepo = new SessionRepository(prisma);
|
|
35
|
+
const coalescingService = new CoalescingService(prisma);
|
|
30
36
|
|
|
31
37
|
// Wire the broker's member clocks into the journal service so compaction
|
|
32
38
|
// only folds entries that all connected clients have acknowledged.
|
|
@@ -42,6 +48,99 @@ journalService.setMemberClockProvider(async (docId: string) => {
|
|
|
42
48
|
|
|
43
49
|
journalService.startCompactionLoop();
|
|
44
50
|
|
|
51
|
+
// ── Session TTL cleanup loop ──
|
|
52
|
+
// Automatically removes disconnected sessions after TTL expires.
|
|
53
|
+
const SESSION_TTL_MS = 2 * 60 * 1000; // 2 minutes
|
|
54
|
+
const SESSION_CLEANUP_INTERVAL_MS = 30_000; // Run every 30 seconds
|
|
55
|
+
const SESSION_DB_RETENTION_MS = SESSION_TTL_MS; // Match broker TTL for demo visibility
|
|
56
|
+
let sessionCleanupTimer: NodeJS.Timeout | null = null;
|
|
57
|
+
|
|
58
|
+
async function cleanupStaleSessions() {
|
|
59
|
+
try {
|
|
60
|
+
const now = Date.now();
|
|
61
|
+
const roomIds = broker.getAllRoomIds();
|
|
62
|
+
let totalRemoved = 0;
|
|
63
|
+
|
|
64
|
+
// Clean up stale sessions from broker memory
|
|
65
|
+
for (const roomId of roomIds) {
|
|
66
|
+
const members = await broker.getMembers(roomId);
|
|
67
|
+
for (const [sessionId, member] of members) {
|
|
68
|
+
if (!member.connected && (now - member.lastSeen) > SESSION_TTL_MS) {
|
|
69
|
+
broker.removeMember(roomId, sessionId);
|
|
70
|
+
totalRemoved++;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// Clean up stale sessions from MongoDB
|
|
76
|
+
// Keep disconnected sessions for auditing period, then delete
|
|
77
|
+
const deleteOlderThan = new Date(now - SESSION_DB_RETENTION_MS);
|
|
78
|
+
const deletedCount = await sessionRepo.deleteDisconnected(deleteOlderThan);
|
|
79
|
+
|
|
80
|
+
if (totalRemoved > 0 || deletedCount > 0) {
|
|
81
|
+
console.log(`[session-ttl] cleaned up ${totalRemoved} stale sessions from broker, ${deletedCount} old sessions from DB`);
|
|
82
|
+
}
|
|
83
|
+
} catch (err) {
|
|
84
|
+
console.error('[session-ttl] cleanup failed:', err);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
function startSessionCleanupLoop() {
|
|
89
|
+
sessionCleanupTimer = setInterval(cleanupStaleSessions, SESSION_CLEANUP_INTERVAL_MS);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
function stopSessionCleanupLoop() {
|
|
93
|
+
if (sessionCleanupTimer) {
|
|
94
|
+
clearInterval(sessionCleanupTimer);
|
|
95
|
+
sessionCleanupTimer = null;
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
startSessionCleanupLoop();
|
|
100
|
+
|
|
101
|
+
// ── Session heartbeat sync ──
|
|
102
|
+
// Periodically sync session state from broker to database.
|
|
103
|
+
// This updates lastSeen timestamps and vector clocks for connected sessions.
|
|
104
|
+
const SESSION_SYNC_INTERVAL_MS = 30_000; // 30 seconds
|
|
105
|
+
let sessionSyncTimer: NodeJS.Timeout | null = null;
|
|
106
|
+
|
|
107
|
+
async function syncSessionHeartbeats() {
|
|
108
|
+
try {
|
|
109
|
+
// Iterate through all tracked sessions
|
|
110
|
+
for (const [clientId, dbSessionId] of sessionIdMap.entries()) {
|
|
111
|
+
// Find the room/document for this session
|
|
112
|
+
for (const [roomId, docPromise] of roomDocPromises.entries()) {
|
|
113
|
+
const docId = await docPromise;
|
|
114
|
+
const members = await broker.getMembers(roomId);
|
|
115
|
+
const member = members.get(clientId);
|
|
116
|
+
|
|
117
|
+
if (member && member.connected) {
|
|
118
|
+
// Update last seen and clock value
|
|
119
|
+
const clockValue = Object.values(member.vectorClock).reduce((sum, v) => sum + v, 0);
|
|
120
|
+
await sessionRepo.updateLastSeen(dbSessionId);
|
|
121
|
+
// Could also update clock value here if needed
|
|
122
|
+
break; // Found the member, no need to check other rooms
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
} catch (err) {
|
|
127
|
+
console.error('[session-sync] heartbeat sync failed:', err);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
function startSessionHeartbeatSync() {
|
|
132
|
+
sessionSyncTimer = setInterval(syncSessionHeartbeats, SESSION_SYNC_INTERVAL_MS);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
function stopSessionHeartbeatSync() {
|
|
136
|
+
if (sessionSyncTimer) {
|
|
137
|
+
clearInterval(sessionSyncTimer);
|
|
138
|
+
sessionSyncTimer = null;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
startSessionHeartbeatSync();
|
|
143
|
+
|
|
45
144
|
// ── Room → Document mapping ──
|
|
46
145
|
// RTCServer uses roomId strings, but MongoDB documents have ObjectId IDs.
|
|
47
146
|
// This adapter auto-creates a Document per room and maps between them.
|
|
@@ -49,6 +148,10 @@ journalService.startCompactionLoop();
|
|
|
49
148
|
// Promise-based dedup: concurrent calls for the same roomId share one promise.
|
|
50
149
|
const roomDocPromises = new Map<string, Promise<string>>();
|
|
51
150
|
|
|
151
|
+
// Session ID mapping: clientId (from query param) -> DB session ObjectId
|
|
152
|
+
// This allows us to track which database session corresponds to each WebSocket connection
|
|
153
|
+
const sessionIdMap = new Map<string, string>();
|
|
154
|
+
|
|
52
155
|
function ensureRoomDocument(roomId: string): Promise<string> {
|
|
53
156
|
if (!roomDocPromises.has(roomId)) {
|
|
54
157
|
roomDocPromises.set(roomId, (async () => {
|
|
@@ -82,8 +185,9 @@ function json(res: ServerResponse, data: unknown, status = 200) {
|
|
|
82
185
|
|
|
83
186
|
function cors(res: ServerResponse) {
|
|
84
187
|
res.setHeader('Access-Control-Allow-Origin', '*');
|
|
85
|
-
res.setHeader('Access-Control-Allow-Methods', 'GET, DELETE, OPTIONS');
|
|
86
|
-
res.setHeader('Access-Control-Allow-Headers', 'Content-Type');
|
|
188
|
+
res.setHeader('Access-Control-Allow-Methods', 'GET, POST, DELETE, OPTIONS, PUT, PATCH');
|
|
189
|
+
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, X-Requested-With, Accept');
|
|
190
|
+
res.setHeader('Access-Control-Max-Age', '86400'); // 24 hours
|
|
87
191
|
}
|
|
88
192
|
|
|
89
193
|
// ── REST API router ──
|
|
@@ -113,6 +217,183 @@ async function handleApi(req: IncomingMessage, res: ServerResponse): Promise<voi
|
|
|
113
217
|
return;
|
|
114
218
|
}
|
|
115
219
|
|
|
220
|
+
// GET /api/stats/sizes — detailed size breakdown
|
|
221
|
+
if (path === '/api/stats/sizes') {
|
|
222
|
+
// Use MongoDB aggregation to compute sizes directly in database
|
|
223
|
+
// This avoids loading all records into memory
|
|
224
|
+
|
|
225
|
+
// Compute document state sizes using aggregation
|
|
226
|
+
const docSizesPipeline: any = [
|
|
227
|
+
{
|
|
228
|
+
$project: {
|
|
229
|
+
_id: 1,
|
|
230
|
+
name: 1,
|
|
231
|
+
stateSize: {
|
|
232
|
+
$cond: [
|
|
233
|
+
{ $ne: ['$currentState', null] },
|
|
234
|
+
{ $strLenBytes: { $toString: '$currentState' } },
|
|
235
|
+
0
|
|
236
|
+
]
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
];
|
|
241
|
+
|
|
242
|
+
const docSizesResult = await (db as any).document.aggregateRaw({
|
|
243
|
+
pipeline: docSizesPipeline
|
|
244
|
+
});
|
|
245
|
+
|
|
246
|
+
// Aggregate journal batch sizes by document
|
|
247
|
+
const journalPipeline: any = [
|
|
248
|
+
{
|
|
249
|
+
$project: {
|
|
250
|
+
documentId: 1,
|
|
251
|
+
opSize: {
|
|
252
|
+
$cond: [
|
|
253
|
+
{ $ne: ['$operations', null] },
|
|
254
|
+
{ $strLenBytes: { $toString: '$operations' } },
|
|
255
|
+
0
|
|
256
|
+
]
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
},
|
|
260
|
+
{
|
|
261
|
+
$group: {
|
|
262
|
+
_id: '$documentId',
|
|
263
|
+
count: { $sum: 1 },
|
|
264
|
+
totalSize: { $sum: '$opSize' }
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
];
|
|
268
|
+
|
|
269
|
+
const journalSizesResult = await (db as any).journalBatch.aggregateRaw({
|
|
270
|
+
pipeline: journalPipeline
|
|
271
|
+
});
|
|
272
|
+
|
|
273
|
+
// Aggregate operation sizes by document
|
|
274
|
+
const opPipeline: any = [
|
|
275
|
+
{
|
|
276
|
+
$project: {
|
|
277
|
+
documentId: 1,
|
|
278
|
+
dataSize: {
|
|
279
|
+
$cond: [
|
|
280
|
+
{ $ne: ['$data', null] },
|
|
281
|
+
{ $strLenBytes: { $toString: '$data' } },
|
|
282
|
+
0
|
|
283
|
+
]
|
|
284
|
+
},
|
|
285
|
+
clockSize: {
|
|
286
|
+
$cond: [
|
|
287
|
+
{ $ne: ['$vectorClock', null] },
|
|
288
|
+
{ $strLenBytes: { $toString: '$vectorClock' } },
|
|
289
|
+
0
|
|
290
|
+
]
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
},
|
|
294
|
+
{
|
|
295
|
+
$group: {
|
|
296
|
+
_id: '$documentId',
|
|
297
|
+
count: { $sum: 1 },
|
|
298
|
+
totalSize: { $sum: { $add: ['$dataSize', '$clockSize'] } }
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
];
|
|
302
|
+
|
|
303
|
+
const opSizesResult = await (db as any).operation.aggregateRaw({
|
|
304
|
+
pipeline: opPipeline
|
|
305
|
+
});
|
|
306
|
+
|
|
307
|
+
// Aggregate session sizes by document
|
|
308
|
+
const sessionPipeline: any = [
|
|
309
|
+
{
|
|
310
|
+
$project: {
|
|
311
|
+
documentId: 1,
|
|
312
|
+
presenceSize: {
|
|
313
|
+
$cond: [
|
|
314
|
+
{ $ne: ['$presence', null] },
|
|
315
|
+
{ $strLenBytes: { $toString: '$presence' } },
|
|
316
|
+
0
|
|
317
|
+
]
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
},
|
|
321
|
+
{
|
|
322
|
+
$group: {
|
|
323
|
+
_id: '$documentId',
|
|
324
|
+
count: { $sum: 1 },
|
|
325
|
+
totalSize: { $sum: '$presenceSize' }
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
];
|
|
329
|
+
|
|
330
|
+
const sessionSizesResult = await (db as any).session.aggregateRaw({
|
|
331
|
+
pipeline: sessionPipeline
|
|
332
|
+
});
|
|
333
|
+
|
|
334
|
+
// Convert aggregation results to lookup maps
|
|
335
|
+
const journalSizesByDoc: Record<string, { count: number; totalSize: number }> = {};
|
|
336
|
+
for (const item of journalSizesResult) {
|
|
337
|
+
journalSizesByDoc[item._id] = { count: item.count, totalSize: item.totalSize };
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
const opSizesByDoc: Record<string, { count: number; totalSize: number }> = {};
|
|
341
|
+
for (const item of opSizesResult) {
|
|
342
|
+
opSizesByDoc[item._id] = { count: item.count, totalSize: item.totalSize };
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
const sessionSizesByDoc: Record<string, { count: number; totalSize: number }> = {};
|
|
346
|
+
for (const item of sessionSizesResult) {
|
|
347
|
+
sessionSizesByDoc[item._id] = { count: item.count, totalSize: item.totalSize };
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Build per-document summary
|
|
351
|
+
const documents = docSizesResult.map((d: any) => {
|
|
352
|
+
const id = d._id;
|
|
353
|
+
const stateSize = d.stateSize ?? 0;
|
|
354
|
+
const journalSize = journalSizesByDoc[id]?.totalSize ?? 0;
|
|
355
|
+
const operationsSize = opSizesByDoc[id]?.totalSize ?? 0;
|
|
356
|
+
const sessionsSize = sessionSizesByDoc[id]?.totalSize ?? 0;
|
|
357
|
+
|
|
358
|
+
return {
|
|
359
|
+
id,
|
|
360
|
+
name: d.name,
|
|
361
|
+
stateSize,
|
|
362
|
+
journalBatches: journalSizesByDoc[id]?.count ?? 0,
|
|
363
|
+
journalSize,
|
|
364
|
+
operations: opSizesByDoc[id]?.count ?? 0,
|
|
365
|
+
operationsSize,
|
|
366
|
+
sessions: sessionSizesByDoc[id]?.count ?? 0,
|
|
367
|
+
sessionsSize,
|
|
368
|
+
totalSize: stateSize + journalSize + operationsSize + sessionsSize,
|
|
369
|
+
};
|
|
370
|
+
});
|
|
371
|
+
|
|
372
|
+
// Compute aggregate totals
|
|
373
|
+
const totalStateSize = docSizesResult.reduce((sum: number, d: any) => sum + (d.stateSize ?? 0), 0);
|
|
374
|
+
const totalJournalSize = Object.values(journalSizesByDoc).reduce((sum, j) => sum + j.totalSize, 0);
|
|
375
|
+
const totalOpSize = Object.values(opSizesByDoc).reduce((sum, o) => sum + o.totalSize, 0);
|
|
376
|
+
const totalSessionSize = Object.values(sessionSizesByDoc).reduce((sum, s) => sum + s.totalSize, 0);
|
|
377
|
+
const totalSize = totalStateSize + totalJournalSize + totalOpSize + totalSessionSize;
|
|
378
|
+
|
|
379
|
+
const journalBatchCount = Object.values(journalSizesByDoc).reduce((sum, j) => sum + j.count, 0);
|
|
380
|
+
const operationsCount = Object.values(opSizesByDoc).reduce((sum, o) => sum + o.count, 0);
|
|
381
|
+
const sessionsCount = Object.values(sessionSizesByDoc).reduce((sum, s) => sum + s.count, 0);
|
|
382
|
+
|
|
383
|
+
json(res, {
|
|
384
|
+
totalSize,
|
|
385
|
+
totalStateSize,
|
|
386
|
+
totalJournalSize,
|
|
387
|
+
totalOpSize,
|
|
388
|
+
totalSessionSize,
|
|
389
|
+
journalBatchCount,
|
|
390
|
+
operationsCount,
|
|
391
|
+
sessionsCount,
|
|
392
|
+
documents,
|
|
393
|
+
});
|
|
394
|
+
return;
|
|
395
|
+
}
|
|
396
|
+
|
|
116
397
|
// GET /api/documents
|
|
117
398
|
if (path === '/api/documents') {
|
|
118
399
|
const docs = await db.document.findMany({
|
|
@@ -125,7 +406,7 @@ async function handleApi(req: IncomingMessage, res: ServerResponse): Promise<voi
|
|
|
125
406
|
|
|
126
407
|
// GET /api/documents/:id[/sub]
|
|
127
408
|
const docMatch = path.match(/^\/api\/documents\/([^/]+)(\/.*)?$/);
|
|
128
|
-
if (docMatch) {
|
|
409
|
+
if (docMatch && req.method === 'GET') {
|
|
129
410
|
const docId = docMatch[1];
|
|
130
411
|
const sub = docMatch[2] ?? '';
|
|
131
412
|
|
|
@@ -137,28 +418,108 @@ async function handleApi(req: IncomingMessage, res: ServerResponse): Promise<voi
|
|
|
137
418
|
}
|
|
138
419
|
|
|
139
420
|
if (sub === '/journal') {
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
421
|
+
// Parse query params: order (asc/desc), limit, offset
|
|
422
|
+
const order = url.searchParams.get('order') === 'asc' ? 'asc' : 'desc';
|
|
423
|
+
const limit = Math.min(Math.max(1, Number(url.searchParams.get('limit')) || 100), 1000);
|
|
424
|
+
const offset = Math.max(0, Number(url.searchParams.get('offset')) || 0);
|
|
425
|
+
|
|
426
|
+
const [batches, total] = await Promise.all([
|
|
427
|
+
db.journalBatch.findMany({
|
|
428
|
+
where: { documentId: docId },
|
|
429
|
+
orderBy: { persistedAt: order },
|
|
430
|
+
take: limit,
|
|
431
|
+
skip: offset,
|
|
432
|
+
}),
|
|
433
|
+
db.journalBatch.count({ where: { documentId: docId } }),
|
|
434
|
+
]);
|
|
435
|
+
json(res, { batches, total, limit, offset, order });
|
|
145
436
|
return;
|
|
146
437
|
}
|
|
147
438
|
|
|
148
439
|
if (sub === '/operations') {
|
|
149
|
-
|
|
440
|
+
// Operations are embedded in JournalBatch.operations[], not in a separate collection
|
|
441
|
+
// Extract and flatten all operations from journal batches
|
|
442
|
+
const order = url.searchParams.get('order') === 'asc' ? 'asc' : 'desc';
|
|
443
|
+
const limit = Math.min(parseInt(url.searchParams.get('limit') ?? '100', 10), 1000);
|
|
444
|
+
const offset = parseInt(url.searchParams.get('offset') ?? '0', 10);
|
|
445
|
+
|
|
446
|
+
const batches = await db.journalBatch.findMany({
|
|
150
447
|
where: { documentId: docId },
|
|
151
|
-
orderBy: { lamportTime:
|
|
448
|
+
orderBy: { lamportTime: order },
|
|
152
449
|
});
|
|
153
|
-
|
|
450
|
+
|
|
451
|
+
// Flatten operations from all batches
|
|
452
|
+
const allOps: any[] = [];
|
|
453
|
+
for (const batch of batches) {
|
|
454
|
+
const ops = Array.isArray(batch.operations) ? batch.operations : [];
|
|
455
|
+
for (const op of ops) {
|
|
456
|
+
allOps.push({
|
|
457
|
+
...(op as object),
|
|
458
|
+
sessionId: batch.sessionId,
|
|
459
|
+
batchId: batch.batchId,
|
|
460
|
+
lamportTime: batch.lamportTime,
|
|
461
|
+
persistedAt: batch.persistedAt,
|
|
462
|
+
});
|
|
463
|
+
}
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
// Sort by lamport time
|
|
467
|
+
if (order === 'desc') {
|
|
468
|
+
allOps.sort((a, b) => (b.lamportTime ?? 0) - (a.lamportTime ?? 0));
|
|
469
|
+
} else {
|
|
470
|
+
allOps.sort((a, b) => (a.lamportTime ?? 0) - (b.lamportTime ?? 0));
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
const total = allOps.length;
|
|
474
|
+
const paginatedOps = allOps.slice(offset, offset + limit);
|
|
475
|
+
|
|
476
|
+
json(res, { operations: paginatedOps, total, limit, offset, order });
|
|
154
477
|
return;
|
|
155
478
|
}
|
|
156
479
|
|
|
157
480
|
if (sub === '/sessions') {
|
|
158
|
-
|
|
481
|
+
// Sessions are derived from JournalBatch sessionIds since Session collection isn't used
|
|
482
|
+
const batches = await db.journalBatch.findMany({
|
|
159
483
|
where: { documentId: docId },
|
|
160
|
-
orderBy: {
|
|
484
|
+
orderBy: { persistedAt: 'desc' },
|
|
161
485
|
});
|
|
486
|
+
|
|
487
|
+
// Aggregate session info from batches
|
|
488
|
+
const sessionMap = new Map<string, {
|
|
489
|
+
sessionId: string;
|
|
490
|
+
batchCount: number;
|
|
491
|
+
operationCount: number;
|
|
492
|
+
firstSeen: Date;
|
|
493
|
+
lastSeen: Date;
|
|
494
|
+
lastLamportTime: number;
|
|
495
|
+
}>();
|
|
496
|
+
|
|
497
|
+
for (const batch of batches) {
|
|
498
|
+
const sid = batch.sessionId;
|
|
499
|
+
const ops = Array.isArray(batch.operations) ? batch.operations : [];
|
|
500
|
+
const existing = sessionMap.get(sid);
|
|
501
|
+
if (existing) {
|
|
502
|
+
existing.batchCount++;
|
|
503
|
+
existing.operationCount += ops.length;
|
|
504
|
+
if (batch.persistedAt < existing.firstSeen) existing.firstSeen = batch.persistedAt;
|
|
505
|
+
if (batch.persistedAt > existing.lastSeen) existing.lastSeen = batch.persistedAt;
|
|
506
|
+
existing.lastLamportTime = Math.max(existing.lastLamportTime, batch.lamportTime ?? 0);
|
|
507
|
+
} else {
|
|
508
|
+
sessionMap.set(sid, {
|
|
509
|
+
sessionId: sid,
|
|
510
|
+
batchCount: 1,
|
|
511
|
+
operationCount: ops.length,
|
|
512
|
+
firstSeen: batch.persistedAt,
|
|
513
|
+
lastSeen: batch.persistedAt,
|
|
514
|
+
lastLamportTime: batch.lamportTime ?? 0,
|
|
515
|
+
});
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
const sessions = Array.from(sessionMap.values()).sort((a, b) =>
|
|
520
|
+
b.lastSeen.getTime() - a.lastSeen.getTime()
|
|
521
|
+
);
|
|
522
|
+
|
|
162
523
|
json(res, sessions);
|
|
163
524
|
return;
|
|
164
525
|
}
|
|
@@ -198,6 +559,301 @@ async function handleApi(req: IncomingMessage, res: ServerResponse): Promise<voi
|
|
|
198
559
|
return;
|
|
199
560
|
}
|
|
200
561
|
|
|
562
|
+
// POST /api/documents/:id/compact — manually trigger compaction
|
|
563
|
+
const compactMatch = path.match(/^\/api\/documents\/([^/]+)\/compact$/);
|
|
564
|
+
if (compactMatch && req.method === 'POST') {
|
|
565
|
+
const docId = compactMatch[1];
|
|
566
|
+
|
|
567
|
+
try {
|
|
568
|
+
// Get before stats (count ops from journal batches, not empty Operation collection)
|
|
569
|
+
const beforeBatches = await db.journalBatch.findMany({ where: { documentId: docId } });
|
|
570
|
+
const beforeOpsCount = beforeBatches.reduce((sum, b) => {
|
|
571
|
+
const ops = Array.isArray(b.operations) ? b.operations : [];
|
|
572
|
+
return sum + ops.length;
|
|
573
|
+
}, 0);
|
|
574
|
+
|
|
575
|
+
console.log(`[compact] Starting compaction for doc ${docId}: ${beforeBatches.length} batches, ${beforeOpsCount} ops`);
|
|
576
|
+
|
|
577
|
+
// Run compaction (no watermark = compact everything)
|
|
578
|
+
await journalService.compact(docId);
|
|
579
|
+
|
|
580
|
+
// Get after stats
|
|
581
|
+
const afterBatches = await db.journalBatch.findMany({ where: { documentId: docId } });
|
|
582
|
+
const afterOpsCount = afterBatches.reduce((sum, b) => {
|
|
583
|
+
const ops = Array.isArray(b.operations) ? b.operations : [];
|
|
584
|
+
return sum + ops.length;
|
|
585
|
+
}, 0);
|
|
586
|
+
|
|
587
|
+
console.log(`[compact] Completed compaction for doc ${docId}: ${afterBatches.length} batches, ${afterOpsCount} ops`);
|
|
588
|
+
|
|
589
|
+
json(res, {
|
|
590
|
+
ok: true,
|
|
591
|
+
before: { journalBatches: beforeBatches.length, operations: beforeOpsCount },
|
|
592
|
+
after: { journalBatches: afterBatches.length, operations: afterOpsCount },
|
|
593
|
+
reduction: {
|
|
594
|
+
journalBatches: beforeBatches.length - afterBatches.length,
|
|
595
|
+
operations: beforeOpsCount - afterOpsCount,
|
|
596
|
+
},
|
|
597
|
+
});
|
|
598
|
+
} catch (e: any) {
|
|
599
|
+
console.error(`[compact] Error compacting doc ${docId}:`, e);
|
|
600
|
+
json(res, { ok: false, error: e.message, stack: e.stack }, 500);
|
|
601
|
+
}
|
|
602
|
+
return;
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
// POST /api/documents/:id/coalesce — operation coalescing
|
|
606
|
+
const coalesceMatch = path.match(/^\/api\/documents\/([^/]+)\/coalesce$/);
|
|
607
|
+
if (coalesceMatch && req.method === 'POST') {
|
|
608
|
+
const docId = coalesceMatch[1];
|
|
609
|
+
|
|
610
|
+
try {
|
|
611
|
+
// Parse optional config from request body
|
|
612
|
+
let config: { setThresholdMs?: number; enableTextCoalesce?: boolean; enableSetCoalesce?: boolean; enableVectorCoalesce?: boolean } = {};
|
|
613
|
+
if (req.headers['content-length'] && parseInt(req.headers['content-length']) > 0) {
|
|
614
|
+
const body = await new Promise<string>((resolve) => {
|
|
615
|
+
let data = '';
|
|
616
|
+
req.on('data', chunk => data += chunk);
|
|
617
|
+
req.on('end', () => resolve(data));
|
|
618
|
+
});
|
|
619
|
+
if (body) {
|
|
620
|
+
try {
|
|
621
|
+
config = JSON.parse(body);
|
|
622
|
+
} catch {
|
|
623
|
+
// Ignore parse errors, use defaults
|
|
624
|
+
}
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
const result = await coalescingService.coalesce(docId, config);
|
|
629
|
+
json(res, result);
|
|
630
|
+
} catch (e: any) {
|
|
631
|
+
json(res, { ok: false, error: e.message }, 500);
|
|
632
|
+
}
|
|
633
|
+
return;
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
// DELETE /api/documents/:id — permanently delete a document and all its data
|
|
637
|
+
const docDeleteMatch = path.match(/^\/api\/documents\/([^/]+)$/);
|
|
638
|
+
if (docDeleteMatch && req.method === 'DELETE') {
|
|
639
|
+
const docId = docDeleteMatch[1];
|
|
640
|
+
|
|
641
|
+
try {
|
|
642
|
+
// Delete in order: sessions -> journal batches -> operations -> document
|
|
643
|
+
const sessionsDeleted = await db.session.deleteMany({ where: { documentId: docId } });
|
|
644
|
+
const batchesDeleted = await db.journalBatch.deleteMany({ where: { documentId: docId } });
|
|
645
|
+
const opsDeleted = await db.operation.deleteMany({ where: { documentId: docId } });
|
|
646
|
+
const doc = await db.document.delete({ where: { id: docId } });
|
|
647
|
+
|
|
648
|
+
// Also clear any in-memory state for the room
|
|
649
|
+
const roomId = doc.name;
|
|
650
|
+
rtcServer.clearRoom(roomId);
|
|
651
|
+
await broker.clearRoom(roomId);
|
|
652
|
+
roomDocPromises.delete(roomId);
|
|
653
|
+
|
|
654
|
+
json(res, {
|
|
655
|
+
ok: true,
|
|
656
|
+
deleted: {
|
|
657
|
+
document: doc.name,
|
|
658
|
+
sessions: sessionsDeleted.count,
|
|
659
|
+
journalBatches: batchesDeleted.count,
|
|
660
|
+
operations: opsDeleted.count,
|
|
661
|
+
},
|
|
662
|
+
});
|
|
663
|
+
} catch (e: any) {
|
|
664
|
+
json(res, { ok: false, error: e.message }, 404);
|
|
665
|
+
}
|
|
666
|
+
return;
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
// POST /api/rooms/:id/cleanup-sessions — remove stale disconnected sessions
|
|
670
|
+
const cleanupMatch = path.match(/^\/api\/rooms\/([^/]+)\/cleanup-sessions$/);
|
|
671
|
+
if (cleanupMatch && req.method === 'POST') {
|
|
672
|
+
const roomId = cleanupMatch[1];
|
|
673
|
+
const maxAge = 60_000; // Remove sessions disconnected > 1 minute ago
|
|
674
|
+
const now = Date.now();
|
|
675
|
+
|
|
676
|
+
const members = await broker.getMembers(roomId);
|
|
677
|
+
let removed = 0;
|
|
678
|
+
for (const [sessionId, member] of members) {
|
|
679
|
+
if (!member.connected && (now - member.lastSeen) > maxAge) {
|
|
680
|
+
broker.removeMember(roomId, sessionId);
|
|
681
|
+
removed++;
|
|
682
|
+
}
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
json(res, { ok: true, removed, remaining: members.size - removed });
|
|
686
|
+
return;
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
// POST /api/rooms/:id/leave — immediate session cleanup (for beforeunload beacon)
|
|
690
|
+
const leaveMatch = path.match(/^\/api\/rooms\/([^/]+)\/leave$/);
|
|
691
|
+
if (leaveMatch && req.method === 'POST') {
|
|
692
|
+
const roomId = leaveMatch[1];
|
|
693
|
+
|
|
694
|
+
// Parse body for sessionId
|
|
695
|
+
let body = '';
|
|
696
|
+
for await (const chunk of req) {
|
|
697
|
+
body += chunk;
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
let sessionId: string | undefined;
|
|
701
|
+
try {
|
|
702
|
+
const data = JSON.parse(body);
|
|
703
|
+
sessionId = data.sessionId;
|
|
704
|
+
} catch {
|
|
705
|
+
// Ignore parse errors - beacon may send empty body
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
if (sessionId) {
|
|
709
|
+
// Mark as disconnected and remove immediately
|
|
710
|
+
const members = await broker.getMembers(roomId);
|
|
711
|
+
if (members.has(sessionId)) {
|
|
712
|
+
broker.removeMember(roomId, sessionId);
|
|
713
|
+
json(res, { ok: true, removed: true, sessionId });
|
|
714
|
+
return;
|
|
715
|
+
}
|
|
716
|
+
}
|
|
717
|
+
|
|
718
|
+
json(res, { ok: true, removed: false });
|
|
719
|
+
return;
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
// GET /api/documents/:id/metrics — performance metrics inspection
|
|
723
|
+
const metricsMatch = path.match(/^\/api\/documents\/([^/]+)\/metrics$/);
|
|
724
|
+
if (metricsMatch) {
|
|
725
|
+
const docId = metricsMatch[1];
|
|
726
|
+
|
|
727
|
+
// Import compression utilities
|
|
728
|
+
const { getCompressionStats } = await import('./compression/CompressionUtils.js');
|
|
729
|
+
const { encodeRLE } = await import('./journal/RLECompression.js');
|
|
730
|
+
|
|
731
|
+
// Fetch document and journal batches
|
|
732
|
+
const doc = await db.document.findUnique({ where: { id: docId } });
|
|
733
|
+
if (!doc) { json(res, { error: 'Not found' }, 404); return; }
|
|
734
|
+
|
|
735
|
+
const batches = await db.journalBatch.findMany({
|
|
736
|
+
where: { documentId: docId },
|
|
737
|
+
orderBy: { persistedAt: 'asc' },
|
|
738
|
+
});
|
|
739
|
+
|
|
740
|
+
// Calculate storage metrics
|
|
741
|
+
const rawSize = JSON.stringify(batches.map(b => b.operations)).length;
|
|
742
|
+
|
|
743
|
+
// Convert batches to CRDTMessage format for RLE encoding
|
|
744
|
+
const allMessages: any[] = batches.map(batch => ({
|
|
745
|
+
sessionId: batch.sessionId,
|
|
746
|
+
lamportTime: batch.lamportTime ?? 0,
|
|
747
|
+
timestamp: new Date(batch.persistedAt).getTime(),
|
|
748
|
+
ops: Array.isArray(batch.operations) ? batch.operations : [],
|
|
749
|
+
}));
|
|
750
|
+
|
|
751
|
+
const rleEncoded = encodeRLE(allMessages);
|
|
752
|
+
const rleSize = JSON.stringify(rleEncoded).length;
|
|
753
|
+
|
|
754
|
+
// Calculate gzip compression
|
|
755
|
+
const gzipStats = getCompressionStats(batches.map(b => b.operations));
|
|
756
|
+
const gzipSize = gzipStats.compressedSize;
|
|
757
|
+
const compressionRatio = rawSize > 0 ? (rawSize - gzipSize) / rawSize : 0;
|
|
758
|
+
|
|
759
|
+
// Calculate batching metrics
|
|
760
|
+
const uniqueSessions = new Set(allMessages.map(m => m.sessionId)).size;
|
|
761
|
+
const totalOps = allMessages.reduce((sum, m) => sum + (m.ops?.length ?? 1), 0);
|
|
762
|
+
const avgOpsPerBatch = batches.length > 0 ? totalOps / batches.length : 0;
|
|
763
|
+
const batchingEfficiency = allMessages.length > 0 ? batches.length / allMessages.length : 0;
|
|
764
|
+
|
|
765
|
+
// Calculate sync metrics
|
|
766
|
+
const snapshotSize = JSON.stringify(doc.currentState ?? {}).length;
|
|
767
|
+
const journalSize = rawSize;
|
|
768
|
+
const totalTransferSize = snapshotSize + journalSize;
|
|
769
|
+
const estimatedSyncMs = {
|
|
770
|
+
mobile1Mbps: Math.round((totalTransferSize * 8) / (1024 * 1024) * 1000),
|
|
771
|
+
desktop10Mbps: Math.round((totalTransferSize * 8) / (10 * 1024 * 1024) * 1000),
|
|
772
|
+
};
|
|
773
|
+
|
|
774
|
+
// Calculate compaction metrics
|
|
775
|
+
const journalLength = batches.length;
|
|
776
|
+
const threshold = 500; // COMPACTION_THRESHOLD_ENTRIES from JournalService
|
|
777
|
+
const lastCompactedBatch = batches.length > 0 ? batches[0] : null;
|
|
778
|
+
const lastCompactedAt = lastCompactedBatch?.persistedAt ?? doc.updatedAt;
|
|
779
|
+
|
|
780
|
+
// Estimate compaction candidates (entries older than 1 hour)
|
|
781
|
+
const oneHourAgo = new Date(Date.now() - 3600000);
|
|
782
|
+
const compactionCandidates = batches.filter(
|
|
783
|
+
b => new Date(b.persistedAt) < oneHourAgo
|
|
784
|
+
).length;
|
|
785
|
+
|
|
786
|
+
// Flatten all operations from journal batches for metrics
|
|
787
|
+
const flatOps: any[] = [];
|
|
788
|
+
for (const batch of batches) {
|
|
789
|
+
const ops = Array.isArray(batch.operations) ? batch.operations : [];
|
|
790
|
+
for (const op of ops) {
|
|
791
|
+
flatOps.push({
|
|
792
|
+
...(op as object),
|
|
793
|
+
lamportTime: batch.lamportTime,
|
|
794
|
+
vectorClock: batch.vectorClock,
|
|
795
|
+
});
|
|
796
|
+
}
|
|
797
|
+
}
|
|
798
|
+
|
|
799
|
+
// Calculate operation type distribution from journal ops
|
|
800
|
+
const typeDistribution: Record<string, number> = {};
|
|
801
|
+
for (const op of flatOps) {
|
|
802
|
+
const otype = op.otype ?? op.type ?? 'unknown';
|
|
803
|
+
typeDistribution[otype] = (typeDistribution[otype] ?? 0) + 1;
|
|
804
|
+
}
|
|
805
|
+
|
|
806
|
+
// Calculate lamport range from batches
|
|
807
|
+
const lamportTimes = batches.map(b => b.lamportTime ?? 0);
|
|
808
|
+
const lamportRange = lamportTimes.length > 0
|
|
809
|
+
? [Math.min(...lamportTimes), Math.max(...lamportTimes)]
|
|
810
|
+
: [0, 0];
|
|
811
|
+
|
|
812
|
+
// Calculate vector clock complexity from batches
|
|
813
|
+
const uniqueSessionsInClocks = new Set<string>();
|
|
814
|
+
for (const batch of batches) {
|
|
815
|
+
const clock = batch.vectorClock as Record<string, number> | null;
|
|
816
|
+
if (clock && typeof clock === 'object') {
|
|
817
|
+
Object.keys(clock).forEach(sessionId => uniqueSessionsInClocks.add(sessionId));
|
|
818
|
+
}
|
|
819
|
+
}
|
|
820
|
+
const vectorClockComplexity = uniqueSessionsInClocks.size;
|
|
821
|
+
|
|
822
|
+
json(res, {
|
|
823
|
+
storage: {
|
|
824
|
+
rawSize,
|
|
825
|
+
rleSize,
|
|
826
|
+
gzipSize,
|
|
827
|
+
compressionRatio,
|
|
828
|
+
totalBatches: batches.length,
|
|
829
|
+
totalOperations: flatOps.length,
|
|
830
|
+
},
|
|
831
|
+
coalescing: {
|
|
832
|
+
uniqueSessions,
|
|
833
|
+
avgOpsPerBatch,
|
|
834
|
+
batchingEfficiency,
|
|
835
|
+
},
|
|
836
|
+
sync: {
|
|
837
|
+
snapshotSize,
|
|
838
|
+
journalSize,
|
|
839
|
+
totalTransferSize,
|
|
840
|
+
estimatedSyncMs,
|
|
841
|
+
},
|
|
842
|
+
compaction: {
|
|
843
|
+
journalLength,
|
|
844
|
+
threshold,
|
|
845
|
+
lastCompactedAt,
|
|
846
|
+
compactionCandidates,
|
|
847
|
+
},
|
|
848
|
+
operations: {
|
|
849
|
+
typeDistribution,
|
|
850
|
+
lamportRange,
|
|
851
|
+
vectorClockComplexity,
|
|
852
|
+
},
|
|
853
|
+
});
|
|
854
|
+
return;
|
|
855
|
+
}
|
|
856
|
+
|
|
201
857
|
json(res, { error: 'Not found' }, 404);
|
|
202
858
|
} catch (err: any) {
|
|
203
859
|
console.error('[api]', err);
|
|
@@ -243,10 +899,39 @@ wss.on('connection', (ws, req) => {
|
|
|
243
899
|
// The journalAdapter handles room creation lazily inside processMessage.
|
|
244
900
|
rtcServer.handleConnection(ws, roomId, sessionId);
|
|
245
901
|
|
|
246
|
-
//
|
|
902
|
+
// Create session record in database after ensuring document exists
|
|
247
903
|
ensureRoomDocument(roomId)
|
|
248
|
-
.then(() =>
|
|
904
|
+
.then(async (docId) => {
|
|
905
|
+
// Create session in database
|
|
906
|
+
try {
|
|
907
|
+
const dbSession = await sessionRepo.create({
|
|
908
|
+
documentId: docId,
|
|
909
|
+
userId: url.searchParams.get('userId') ?? 'anonymous',
|
|
910
|
+
clientId: sessionId,
|
|
911
|
+
});
|
|
912
|
+
sessionIdMap.set(sessionId, dbSession.id);
|
|
913
|
+
// console.log(`[session] created session ${dbSession.id} for client ${sessionId} in doc ${docId}`);
|
|
914
|
+
} catch (err) {
|
|
915
|
+
console.error(`[session] failed to create session for ${sessionId}:`, err);
|
|
916
|
+
}
|
|
917
|
+
|
|
918
|
+
// Send stored state
|
|
919
|
+
return rtcServer.handleStateTransfer(ws, roomId);
|
|
920
|
+
})
|
|
249
921
|
.catch((err) => console.error(`[state-transfer] room="${roomId}":`, err));
|
|
922
|
+
|
|
923
|
+
// Handle disconnect — mark session as disconnected
|
|
924
|
+
ws.on('close', () => {
|
|
925
|
+
const dbSessionId = sessionIdMap.get(sessionId);
|
|
926
|
+
if (dbSessionId) {
|
|
927
|
+
sessionRepo.disconnect(dbSessionId)
|
|
928
|
+
.then(() => {
|
|
929
|
+
// console.log(`[session] disconnected session ${dbSessionId} for client ${sessionId}`);
|
|
930
|
+
sessionIdMap.delete(sessionId);
|
|
931
|
+
})
|
|
932
|
+
.catch((err) => console.error(`[session] failed to disconnect session ${dbSessionId}:`, err));
|
|
933
|
+
}
|
|
934
|
+
});
|
|
250
935
|
});
|
|
251
936
|
|
|
252
937
|
server.listen(PORT, () => {
|
|
@@ -257,6 +942,8 @@ server.listen(PORT, () => {
|
|
|
257
942
|
|
|
258
943
|
function shutdown() {
|
|
259
944
|
journalService.stopCompactionLoop();
|
|
945
|
+
stopSessionHeartbeatSync();
|
|
946
|
+
stopSessionCleanupLoop();
|
|
260
947
|
server.close(() => {
|
|
261
948
|
prisma.$disconnect().catch(() => {});
|
|
262
949
|
});
|