@syncular/server-hono 0.0.1-100

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/dist/api-key-auth.d.ts +49 -0
  2. package/dist/api-key-auth.d.ts.map +1 -0
  3. package/dist/api-key-auth.js +110 -0
  4. package/dist/api-key-auth.js.map +1 -0
  5. package/dist/blobs.d.ts +69 -0
  6. package/dist/blobs.d.ts.map +1 -0
  7. package/dist/blobs.js +383 -0
  8. package/dist/blobs.js.map +1 -0
  9. package/dist/console/index.d.ts +8 -0
  10. package/dist/console/index.d.ts.map +1 -0
  11. package/dist/console/index.js +7 -0
  12. package/dist/console/index.js.map +1 -0
  13. package/dist/console/routes.d.ts +106 -0
  14. package/dist/console/routes.d.ts.map +1 -0
  15. package/dist/console/routes.js +1612 -0
  16. package/dist/console/routes.js.map +1 -0
  17. package/dist/console/schemas.d.ts +308 -0
  18. package/dist/console/schemas.d.ts.map +1 -0
  19. package/dist/console/schemas.js +201 -0
  20. package/dist/console/schemas.js.map +1 -0
  21. package/dist/create-server.d.ts +80 -0
  22. package/dist/create-server.d.ts.map +1 -0
  23. package/dist/create-server.js +100 -0
  24. package/dist/create-server.js.map +1 -0
  25. package/dist/index.d.ts +16 -0
  26. package/dist/index.d.ts.map +1 -0
  27. package/dist/index.js +25 -0
  28. package/dist/index.js.map +1 -0
  29. package/dist/openapi.d.ts +45 -0
  30. package/dist/openapi.d.ts.map +1 -0
  31. package/dist/openapi.js +59 -0
  32. package/dist/openapi.js.map +1 -0
  33. package/dist/proxy/connection-manager.d.ts +78 -0
  34. package/dist/proxy/connection-manager.d.ts.map +1 -0
  35. package/dist/proxy/connection-manager.js +251 -0
  36. package/dist/proxy/connection-manager.js.map +1 -0
  37. package/dist/proxy/index.d.ts +8 -0
  38. package/dist/proxy/index.d.ts.map +1 -0
  39. package/dist/proxy/index.js +8 -0
  40. package/dist/proxy/index.js.map +1 -0
  41. package/dist/proxy/routes.d.ts +74 -0
  42. package/dist/proxy/routes.d.ts.map +1 -0
  43. package/dist/proxy/routes.js +147 -0
  44. package/dist/proxy/routes.js.map +1 -0
  45. package/dist/rate-limit.d.ts +101 -0
  46. package/dist/rate-limit.d.ts.map +1 -0
  47. package/dist/rate-limit.js +186 -0
  48. package/dist/rate-limit.js.map +1 -0
  49. package/dist/routes.d.ts +126 -0
  50. package/dist/routes.d.ts.map +1 -0
  51. package/dist/routes.js +884 -0
  52. package/dist/routes.js.map +1 -0
  53. package/dist/ws.d.ts +230 -0
  54. package/dist/ws.d.ts.map +1 -0
  55. package/dist/ws.js +601 -0
  56. package/dist/ws.js.map +1 -0
  57. package/package.json +73 -0
  58. package/src/__tests__/create-server.test.ts +187 -0
  59. package/src/__tests__/pull-chunk-storage.test.ts +572 -0
  60. package/src/__tests__/rate-limit.test.ts +78 -0
  61. package/src/__tests__/realtime-bridge.test.ts +131 -0
  62. package/src/__tests__/sync-rate-limit-routing.test.ts +181 -0
  63. package/src/__tests__/ws-connection-manager.test.ts +176 -0
  64. package/src/api-key-auth.ts +179 -0
  65. package/src/blobs.ts +534 -0
  66. package/src/console/index.ts +17 -0
  67. package/src/console/routes.ts +2155 -0
  68. package/src/console/schemas.ts +299 -0
  69. package/src/create-server.ts +186 -0
  70. package/src/index.ts +42 -0
  71. package/src/openapi.ts +74 -0
  72. package/src/proxy/connection-manager.ts +340 -0
  73. package/src/proxy/index.ts +8 -0
  74. package/src/proxy/routes.ts +223 -0
  75. package/src/rate-limit.ts +321 -0
  76. package/src/routes.ts +1305 -0
  77. package/src/ws.ts +789 -0
package/src/routes.ts ADDED
@@ -0,0 +1,1305 @@
1
+ /**
2
+ * @syncular/server-hono - Sync routes for Hono
3
+ *
4
+ * Provides:
5
+ * - POST / (combined push + pull in one round-trip)
6
+ * - GET /snapshot-chunks/:chunkId (download encoded snapshot chunks)
7
+ * - GET /realtime (optional WebSocket "wake up" notifications)
8
+ */
9
+
10
+ import {
11
+ captureSyncException,
12
+ createSyncTimer,
13
+ ErrorResponseSchema,
14
+ logSyncEvent,
15
+ SyncCombinedRequestSchema,
16
+ SyncCombinedResponseSchema,
17
+ SyncPushRequestSchema,
18
+ } from '@syncular/core';
19
+ import type {
20
+ ServerSyncDialect,
21
+ ServerTableHandler,
22
+ SnapshotChunkStorage,
23
+ SyncCoreDb,
24
+ SyncRealtimeBroadcaster,
25
+ SyncRealtimeEvent,
26
+ } from '@syncular/server';
27
+ import {
28
+ type CompactOptions,
29
+ InvalidSubscriptionScopeError,
30
+ type PruneOptions,
31
+ type PullResult,
32
+ pull,
33
+ pushCommit,
34
+ readSnapshotChunk,
35
+ recordClientCursor,
36
+ TableRegistry,
37
+ } from '@syncular/server';
38
+ import type { Context, MiddlewareHandler } from 'hono';
39
+ import { Hono } from 'hono';
40
+
41
+ import type { UpgradeWebSocket } from 'hono/ws';
42
+ import { describeRoute, resolver, validator as zValidator } from 'hono-openapi';
43
+ import {
44
+ type Kysely,
45
+ type SelectQueryBuilder,
46
+ type SqlBool,
47
+ sql,
48
+ } from 'kysely';
49
+ import { z } from 'zod';
50
+ import {
51
+ createRateLimiter,
52
+ DEFAULT_SYNC_RATE_LIMITS,
53
+ type SyncRateLimitConfig,
54
+ } from './rate-limit';
55
+ import {
56
+ createWebSocketConnection,
57
+ type WebSocketConnection,
58
+ WebSocketConnectionManager,
59
+ } from './ws';
60
+
61
+ /**
62
+ * WeakMaps for storing Hono-instance-specific data without augmenting the type.
63
+ */
64
+ const wsConnectionManagerMap = new WeakMap<Hono, WebSocketConnectionManager>();
65
+ const realtimeUnsubscribeMap = new WeakMap<Hono, () => void>();
66
+
67
+ export interface SyncAuthResult {
68
+ actorId: string;
69
+ partitionId?: string;
70
+ }
71
+
72
+ /**
73
+ * WebSocket configuration for realtime sync.
74
+ *
75
+ * Note: this endpoint is only a "wake up" mechanism; clients must still pull.
76
+ */
77
+ export interface SyncWebSocketConfig {
78
+ enabled?: boolean;
79
+ /**
80
+ * Runtime-provided WebSocket upgrader (e.g. from `hono/bun`'s `createBunWebSocket()`).
81
+ */
82
+ upgradeWebSocket?: UpgradeWebSocket;
83
+ heartbeatIntervalMs?: number;
84
+ /**
85
+ * Maximum number of concurrent WebSocket connections across the entire process.
86
+ * Default: 5000
87
+ */
88
+ maxConnectionsTotal?: number;
89
+ /**
90
+ * Maximum number of concurrent WebSocket connections per clientId.
91
+ * Default: 3
92
+ */
93
+ maxConnectionsPerClient?: number;
94
+ }
95
+
96
+ export interface SyncRoutesConfigWithRateLimit {
97
+ /**
98
+ * Max commits per pull request.
99
+ * Default: 100
100
+ */
101
+ maxPullLimitCommits?: number;
102
+ /**
103
+ * Max subscriptions per pull request.
104
+ * Default: 200
105
+ */
106
+ maxSubscriptionsPerPull?: number;
107
+ /**
108
+ * Max snapshot rows per snapshot page.
109
+ * Default: 5000
110
+ */
111
+ maxPullLimitSnapshotRows?: number;
112
+ /**
113
+ * Max snapshot pages per subscription per pull response.
114
+ * Default: 10
115
+ */
116
+ maxPullMaxSnapshotPages?: number;
117
+ /**
118
+ * Max operations per pushed commit.
119
+ * Default: 200
120
+ */
121
+ maxOperationsPerPush?: number;
122
+ /**
123
+ * Rate limiting configuration.
124
+ * Set to false to disable all rate limiting.
125
+ */
126
+ rateLimit?: SyncRateLimitConfig | false;
127
+ /**
128
+ * WebSocket realtime configuration.
129
+ */
130
+ websocket?: SyncWebSocketConfig;
131
+
132
+ /**
133
+ * Optional pruning configuration. When enabled, the server periodically prunes
134
+ * old commit history based on active client cursors.
135
+ */
136
+ prune?: {
137
+ /** Minimum time between prune runs. Default: 5 minutes. */
138
+ minIntervalMs?: number;
139
+ /** Pruning watermark options. */
140
+ options?: PruneOptions;
141
+ };
142
+
143
+ /**
144
+ * Optional compaction configuration. When enabled, the server periodically
145
+ * compacts older change history to reduce storage.
146
+ */
147
+ compact?: {
148
+ /** Minimum time between compaction runs. Default: 30 minutes. */
149
+ minIntervalMs?: number;
150
+ /** Compaction options. */
151
+ options?: CompactOptions;
152
+ };
153
+
154
+ /**
155
+ * Optional multi-instance realtime broadcaster.
156
+ * When provided, instances publish/subscribe commit wakeups via the broadcaster.
157
+ */
158
+ realtime?: {
159
+ broadcaster: SyncRealtimeBroadcaster;
160
+ /** Optional stable instance id (useful in tests). */
161
+ instanceId?: string;
162
+ };
163
+ }
164
+
165
+ export interface CreateSyncRoutesOptions<DB extends SyncCoreDb = SyncCoreDb> {
166
+ db: Kysely<DB>;
167
+ dialect: ServerSyncDialect;
168
+ handlers: ServerTableHandler<DB>[];
169
+ authenticate: (c: Context) => Promise<SyncAuthResult | null>;
170
+ sync?: SyncRoutesConfigWithRateLimit;
171
+ wsConnectionManager?: WebSocketConnectionManager;
172
+ /**
173
+ * Optional snapshot chunk storage adapter.
174
+ * When provided, stores snapshot chunk bodies in external storage
175
+ * (S3, R2, etc.) instead of inline in the database.
176
+ */
177
+ chunkStorage?: SnapshotChunkStorage;
178
+ }
179
+
180
+ // ============================================================================
181
+ // Route Schemas
182
+ // ============================================================================
183
+
184
+ const snapshotChunkParamsSchema = z.object({
185
+ chunkId: z.string().min(1),
186
+ });
187
+
188
+ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
189
+ options: CreateSyncRoutesOptions<DB>
190
+ ): Hono {
191
+ const routes = new Hono();
192
+ routes.onError((error, c) => {
193
+ captureSyncException(error, {
194
+ event: 'sync.route.unhandled',
195
+ method: c.req.method,
196
+ path: c.req.path,
197
+ });
198
+ return c.text('Internal Server Error', 500);
199
+ });
200
+ const handlerRegistry = new TableRegistry<DB>();
201
+ for (const handler of options.handlers) {
202
+ handlerRegistry.register(handler);
203
+ }
204
+ const config = options.sync ?? {};
205
+ const maxPullLimitCommits = config.maxPullLimitCommits ?? 100;
206
+ const maxSubscriptionsPerPull = config.maxSubscriptionsPerPull ?? 200;
207
+ const maxPullLimitSnapshotRows = config.maxPullLimitSnapshotRows ?? 5000;
208
+ const maxPullMaxSnapshotPages = config.maxPullMaxSnapshotPages ?? 10;
209
+ const maxOperationsPerPush = config.maxOperationsPerPush ?? 200;
210
+
211
+ // -------------------------------------------------------------------------
212
+ // Optional WebSocket manager (scope-key based wake-ups)
213
+ // -------------------------------------------------------------------------
214
+
215
+ const websocketConfig = config.websocket;
216
+ if (websocketConfig?.enabled && !websocketConfig.upgradeWebSocket) {
217
+ throw new Error(
218
+ 'sync.websocket.enabled requires sync.websocket.upgradeWebSocket'
219
+ );
220
+ }
221
+
222
+ const wsConnectionManager = websocketConfig?.enabled
223
+ ? (options.wsConnectionManager ??
224
+ new WebSocketConnectionManager({
225
+ heartbeatIntervalMs: websocketConfig.heartbeatIntervalMs ?? 30_000,
226
+ }))
227
+ : null;
228
+
229
+ if (wsConnectionManager) {
230
+ wsConnectionManagerMap.set(routes, wsConnectionManager);
231
+ }
232
+
233
+ // -------------------------------------------------------------------------
234
+ // Multi-instance realtime broadcaster (optional)
235
+ // -------------------------------------------------------------------------
236
+
237
+ const realtimeBroadcaster = config.realtime?.broadcaster ?? null;
238
+ const instanceId =
239
+ config.realtime?.instanceId ??
240
+ (typeof crypto !== 'undefined' && 'randomUUID' in crypto
241
+ ? crypto.randomUUID()
242
+ : `${Date.now()}-${Math.random().toString(16).slice(2)}`);
243
+ const loggedAsyncFailureKeys = new Set<string>();
244
+ const logAsyncFailureOnce = (
245
+ key: string,
246
+ event: {
247
+ event: string;
248
+ error: string;
249
+ [key: string]: unknown;
250
+ }
251
+ ) => {
252
+ if (loggedAsyncFailureKeys.has(key)) return;
253
+ loggedAsyncFailureKeys.add(key);
254
+ logSyncEvent(event);
255
+ };
256
+
257
+ if (wsConnectionManager && realtimeBroadcaster) {
258
+ const unsubscribe = realtimeBroadcaster.subscribe(
259
+ (event: SyncRealtimeEvent) => {
260
+ void handleRealtimeEvent(event).catch((error) => {
261
+ logAsyncFailureOnce('sync.realtime.broadcast_delivery_failed', {
262
+ event: 'sync.realtime.broadcast_delivery_failed',
263
+ error: error instanceof Error ? error.message : String(error),
264
+ sourceEventType: event.type,
265
+ });
266
+ });
267
+ }
268
+ );
269
+
270
+ realtimeUnsubscribeMap.set(routes, unsubscribe);
271
+ }
272
+
273
+ // -------------------------------------------------------------------------
274
+ // Request event recording (for console inspector)
275
+ // -------------------------------------------------------------------------
276
+
277
+ type RequestEvent = {
278
+ eventType: 'push' | 'pull';
279
+ actorId: string;
280
+ clientId: string;
281
+ transportPath: 'direct' | 'relay';
282
+ statusCode: number;
283
+ outcome: string;
284
+ durationMs: number;
285
+ commitSeq?: number | null;
286
+ operationCount?: number | null;
287
+ rowCount?: number | null;
288
+ tables?: string[];
289
+ errorMessage?: string | null;
290
+ };
291
+
292
+ const recordRequestEvent = async (event: RequestEvent) => {
293
+ const tablesValue = options.dialect.arrayToDb(event.tables ?? []);
294
+ await sql`
295
+ INSERT INTO sync_request_events (
296
+ event_type, actor_id, client_id, status_code, outcome,
297
+ duration_ms, commit_seq, operation_count, row_count,
298
+ tables, error_message, transport_path
299
+ ) VALUES (
300
+ ${event.eventType}, ${event.actorId}, ${event.clientId},
301
+ ${event.statusCode}, ${event.outcome}, ${event.durationMs},
302
+ ${event.commitSeq ?? null}, ${event.operationCount ?? null},
303
+ ${event.rowCount ?? null}, ${tablesValue}, ${event.errorMessage ?? null},
304
+ ${event.transportPath}
305
+ )
306
+ `.execute(options.db);
307
+ };
308
+
309
+ const recordRequestEventInBackground = (event: RequestEvent): void => {
310
+ void recordRequestEvent(event).catch((error) => {
311
+ logAsyncFailureOnce('sync.request_event_record_failed', {
312
+ event: 'sync.request_event_record_failed',
313
+ userId: event.actorId,
314
+ clientId: event.clientId,
315
+ requestEventType: event.eventType,
316
+ error: error instanceof Error ? error.message : String(error),
317
+ });
318
+ });
319
+ };
320
+
321
+ const authCache = new WeakMap<Context, Promise<SyncAuthResult | null>>();
322
+ const getAuth = (c: Context): Promise<SyncAuthResult | null> => {
323
+ const cached = authCache.get(c);
324
+ if (cached) return cached;
325
+ const pending = options.authenticate(c);
326
+ authCache.set(c, pending);
327
+ return pending;
328
+ };
329
+
330
+ // -------------------------------------------------------------------------
331
+ // Rate limiting (optional)
332
+ // -------------------------------------------------------------------------
333
+
334
+ const rateLimitConfig = config.rateLimit;
335
+ if (rateLimitConfig !== false) {
336
+ const pullRateLimit =
337
+ rateLimitConfig?.pull ?? DEFAULT_SYNC_RATE_LIMITS.pull;
338
+ const pushRateLimit =
339
+ rateLimitConfig?.push ?? DEFAULT_SYNC_RATE_LIMITS.push;
340
+
341
+ const createAuthBasedRateLimiter = (
342
+ limitConfig: Omit<SyncRateLimitConfig['pull'], never> | false | undefined
343
+ ) => {
344
+ if (limitConfig === false || !limitConfig) return null;
345
+ return createRateLimiter({
346
+ ...limitConfig,
347
+ keyGenerator: async (c) => {
348
+ const auth = await getAuth(c);
349
+ return auth?.actorId ?? null;
350
+ },
351
+ });
352
+ };
353
+
354
+ const pullLimiter = createAuthBasedRateLimiter(pullRateLimit);
355
+ const pushLimiter = createAuthBasedRateLimiter(pushRateLimit);
356
+
357
+ const syncRateLimiter: MiddlewareHandler = async (c, next) => {
358
+ if (!pullLimiter && !pushLimiter) return next();
359
+
360
+ let shouldApplyPull = pullLimiter !== null;
361
+ let shouldApplyPush = pushLimiter !== null;
362
+
363
+ if (pullLimiter && pushLimiter && c.req.method === 'POST') {
364
+ try {
365
+ const parsed = await c.req.raw.clone().json();
366
+ if (parsed !== null && typeof parsed === 'object') {
367
+ shouldApplyPull = Reflect.get(parsed, 'pull') !== undefined;
368
+ shouldApplyPush = Reflect.get(parsed, 'push') !== undefined;
369
+ }
370
+ } catch {
371
+ // Keep default behavior and apply both limiters when payload parsing fails.
372
+ }
373
+ }
374
+
375
+ if (pullLimiter && shouldApplyPull && pushLimiter && shouldApplyPush) {
376
+ return pullLimiter(c, async () => {
377
+ const pushResult = await pushLimiter(c, next);
378
+ if (pushResult instanceof Response) {
379
+ c.res = pushResult;
380
+ }
381
+ });
382
+ }
383
+ if (pullLimiter && shouldApplyPull) {
384
+ return pullLimiter(c, next);
385
+ }
386
+ if (pushLimiter && shouldApplyPush) {
387
+ return pushLimiter(c, next);
388
+ }
389
+
390
+ return next();
391
+ };
392
+
393
+ routes.use('/', syncRateLimiter);
394
+ }
395
+
396
+ // -------------------------------------------------------------------------
397
+ // GET /health
398
+ // -------------------------------------------------------------------------
399
+
400
+ routes.get('/health', (c) => {
401
+ return c.json({
402
+ status: 'healthy',
403
+ timestamp: new Date().toISOString(),
404
+ });
405
+ });
406
+
407
+ // -------------------------------------------------------------------------
408
+ // POST / (combined push + pull in one round-trip)
409
+ // -------------------------------------------------------------------------
410
+
411
+ routes.post(
412
+ '/',
413
+ describeRoute({
414
+ tags: ['sync'],
415
+ summary: 'Combined push and pull',
416
+ description:
417
+ 'Perform push and/or pull in a single request to reduce round-trips',
418
+ responses: {
419
+ 200: {
420
+ description: 'Combined sync response',
421
+ content: {
422
+ 'application/json': {
423
+ schema: resolver(SyncCombinedResponseSchema),
424
+ },
425
+ },
426
+ },
427
+ 400: {
428
+ description: 'Invalid request',
429
+ content: {
430
+ 'application/json': { schema: resolver(ErrorResponseSchema) },
431
+ },
432
+ },
433
+ 401: {
434
+ description: 'Unauthenticated',
435
+ content: {
436
+ 'application/json': { schema: resolver(ErrorResponseSchema) },
437
+ },
438
+ },
439
+ },
440
+ }),
441
+ zValidator('json', SyncCombinedRequestSchema),
442
+ async (c) => {
443
+ const auth = await getAuth(c);
444
+ if (!auth) return c.json({ error: 'UNAUTHENTICATED' }, 401);
445
+ const partitionId = auth.partitionId ?? 'default';
446
+
447
+ const body = c.req.valid('json');
448
+ const clientId = body.clientId;
449
+
450
+ let pushResponse:
451
+ | undefined
452
+ | Awaited<ReturnType<typeof pushCommit>>['response'];
453
+ let pullResponse: undefined | PullResult['response'];
454
+
455
+ // --- Push phase ---
456
+ if (body.push) {
457
+ const pushOps = body.push.operations ?? [];
458
+ if (pushOps.length > maxOperationsPerPush) {
459
+ return c.json(
460
+ {
461
+ error: 'TOO_MANY_OPERATIONS',
462
+ message: `Maximum ${maxOperationsPerPush} operations per push`,
463
+ },
464
+ 400
465
+ );
466
+ }
467
+
468
+ const timer = createSyncTimer();
469
+
470
+ const pushed = await pushCommit({
471
+ db: options.db,
472
+ dialect: options.dialect,
473
+ shapes: handlerRegistry,
474
+ actorId: auth.actorId,
475
+ partitionId,
476
+ request: {
477
+ clientId,
478
+ clientCommitId: body.push.clientCommitId,
479
+ operations: body.push.operations,
480
+ schemaVersion: body.push.schemaVersion,
481
+ },
482
+ });
483
+
484
+ const pushDurationMs = timer();
485
+
486
+ logSyncEvent({
487
+ event: 'sync.push',
488
+ userId: auth.actorId,
489
+ durationMs: pushDurationMs,
490
+ operationCount: pushOps.length,
491
+ status: pushed.response.status,
492
+ commitSeq: pushed.response.commitSeq,
493
+ });
494
+
495
+ recordRequestEventInBackground({
496
+ eventType: 'push',
497
+ actorId: auth.actorId,
498
+ clientId,
499
+ transportPath: readTransportPath(c),
500
+ statusCode: 200,
501
+ outcome: pushed.response.status,
502
+ durationMs: pushDurationMs,
503
+ commitSeq: pushed.response.commitSeq,
504
+ operationCount: pushOps.length,
505
+ tables: pushed.affectedTables,
506
+ });
507
+
508
+ // WS notifications
509
+ if (
510
+ wsConnectionManager &&
511
+ pushed.response.ok === true &&
512
+ pushed.response.status === 'applied' &&
513
+ typeof pushed.response.commitSeq === 'number'
514
+ ) {
515
+ const scopeKeys = applyPartitionToScopeKeys(
516
+ partitionId,
517
+ pushed.scopeKeys
518
+ );
519
+ if (scopeKeys.length > 0) {
520
+ wsConnectionManager.notifyScopeKeys(
521
+ scopeKeys,
522
+ pushed.response.commitSeq,
523
+ {
524
+ excludeClientIds: [clientId],
525
+ changes: pushed.emittedChanges,
526
+ }
527
+ );
528
+
529
+ if (realtimeBroadcaster) {
530
+ realtimeBroadcaster
531
+ .publish({
532
+ type: 'commit',
533
+ commitSeq: pushed.response.commitSeq,
534
+ partitionId,
535
+ scopeKeys,
536
+ sourceInstanceId: instanceId,
537
+ })
538
+ .catch((error) => {
539
+ logAsyncFailureOnce(
540
+ 'sync.realtime.broadcast_publish_failed',
541
+ {
542
+ event: 'sync.realtime.broadcast_publish_failed',
543
+ userId: auth.actorId,
544
+ clientId,
545
+ error:
546
+ error instanceof Error ? error.message : String(error),
547
+ }
548
+ );
549
+ });
550
+ }
551
+ }
552
+ }
553
+
554
+ pushResponse = pushed.response;
555
+ }
556
+
557
+ // --- Pull phase ---
558
+ if (body.pull) {
559
+ if (body.pull.subscriptions.length > maxSubscriptionsPerPull) {
560
+ return c.json(
561
+ {
562
+ error: 'INVALID_REQUEST',
563
+ message: `Too many subscriptions (max ${maxSubscriptionsPerPull})`,
564
+ },
565
+ 400
566
+ );
567
+ }
568
+
569
+ const seenSubscriptionIds = new Set<string>();
570
+ for (const sub of body.pull.subscriptions) {
571
+ const id = sub.id;
572
+ if (seenSubscriptionIds.has(id)) {
573
+ return c.json(
574
+ {
575
+ error: 'INVALID_REQUEST',
576
+ message: `Duplicate subscription id: ${id}`,
577
+ },
578
+ 400
579
+ );
580
+ }
581
+ seenSubscriptionIds.add(id);
582
+ }
583
+
584
+ const request = {
585
+ clientId,
586
+ limitCommits: clampInt(
587
+ body.pull.limitCommits ?? 50,
588
+ 1,
589
+ maxPullLimitCommits
590
+ ),
591
+ limitSnapshotRows: clampInt(
592
+ body.pull.limitSnapshotRows ?? 1000,
593
+ 1,
594
+ maxPullLimitSnapshotRows
595
+ ),
596
+ maxSnapshotPages: clampInt(
597
+ body.pull.maxSnapshotPages ?? 1,
598
+ 1,
599
+ maxPullMaxSnapshotPages
600
+ ),
601
+ dedupeRows: body.pull.dedupeRows === true,
602
+ subscriptions: body.pull.subscriptions.map((sub) => ({
603
+ id: sub.id,
604
+ shape: sub.shape,
605
+ scopes: (sub.scopes ?? {}) as Record<string, string | string[]>,
606
+ params: sub.params as Record<string, unknown>,
607
+ cursor: Math.max(-1, sub.cursor),
608
+ bootstrapState: sub.bootstrapState ?? null,
609
+ })),
610
+ };
611
+
612
+ const timer = createSyncTimer();
613
+
614
+ let pullResult: PullResult;
615
+ try {
616
+ pullResult = await pull({
617
+ db: options.db,
618
+ dialect: options.dialect,
619
+ shapes: handlerRegistry,
620
+ actorId: auth.actorId,
621
+ partitionId,
622
+ request,
623
+ chunkStorage: options.chunkStorage,
624
+ });
625
+ } catch (err) {
626
+ if (err instanceof InvalidSubscriptionScopeError) {
627
+ return c.json(
628
+ { error: 'INVALID_SUBSCRIPTION', message: err.message },
629
+ 400
630
+ );
631
+ }
632
+ throw err;
633
+ }
634
+
635
+ // Fire-and-forget bookkeeping
636
+ void recordClientCursor(options.db, options.dialect, {
637
+ partitionId,
638
+ clientId,
639
+ actorId: auth.actorId,
640
+ cursor: pullResult.clientCursor,
641
+ effectiveScopes: pullResult.effectiveScopes,
642
+ }).catch((error) => {
643
+ logAsyncFailureOnce('sync.client_cursor_record_failed', {
644
+ event: 'sync.client_cursor_record_failed',
645
+ userId: auth.actorId,
646
+ clientId,
647
+ error: error instanceof Error ? error.message : String(error),
648
+ });
649
+ });
650
+
651
+ wsConnectionManager?.updateClientScopeKeys(
652
+ clientId,
653
+ applyPartitionToScopeKeys(
654
+ partitionId,
655
+ scopeValuesToScopeKeys(pullResult.effectiveScopes)
656
+ )
657
+ );
658
+
659
+ const pullDurationMs = timer();
660
+
661
+ logSyncEvent({
662
+ event: 'sync.pull',
663
+ userId: auth.actorId,
664
+ durationMs: pullDurationMs,
665
+ subscriptionCount: pullResult.response.subscriptions.length,
666
+ clientCursor: pullResult.clientCursor,
667
+ });
668
+
669
+ recordRequestEventInBackground({
670
+ eventType: 'pull',
671
+ actorId: auth.actorId,
672
+ clientId,
673
+ transportPath: readTransportPath(c),
674
+ statusCode: 200,
675
+ outcome: 'applied',
676
+ durationMs: pullDurationMs,
677
+ });
678
+
679
+ pullResponse = pullResult.response;
680
+ }
681
+
682
+ return c.json(
683
+ {
684
+ ok: true as const,
685
+ ...(pushResponse ? { push: pushResponse } : {}),
686
+ ...(pullResponse ? { pull: pullResponse } : {}),
687
+ },
688
+ 200
689
+ );
690
+ }
691
+ );
692
+
693
+ // -------------------------------------------------------------------------
694
+ // GET /snapshot-chunks/:chunkId
695
+ // -------------------------------------------------------------------------
696
+
697
+ routes.get(
698
+ '/snapshot-chunks/:chunkId',
699
+ describeRoute({
700
+ tags: ['sync'],
701
+ summary: 'Download snapshot chunk',
702
+ description: 'Download an encoded bootstrap snapshot chunk',
703
+ responses: {
704
+ 200: {
705
+ description: 'Snapshot chunk data (gzip-compressed framed JSON rows)',
706
+ content: {
707
+ 'application/octet-stream': {
708
+ schema: resolver(z.string()),
709
+ },
710
+ },
711
+ },
712
+ 304: {
713
+ description: 'Not modified (cached)',
714
+ },
715
+ 401: {
716
+ description: 'Unauthenticated',
717
+ content: {
718
+ 'application/json': { schema: resolver(ErrorResponseSchema) },
719
+ },
720
+ },
721
+ 403: {
722
+ description: 'Forbidden',
723
+ content: {
724
+ 'application/json': { schema: resolver(ErrorResponseSchema) },
725
+ },
726
+ },
727
+ 404: {
728
+ description: 'Not found',
729
+ content: {
730
+ 'application/json': { schema: resolver(ErrorResponseSchema) },
731
+ },
732
+ },
733
+ },
734
+ }),
735
+ zValidator('param', snapshotChunkParamsSchema),
736
+ async (c) => {
737
+ const auth = await getAuth(c);
738
+ if (!auth) return c.json({ error: 'UNAUTHENTICATED' }, 401);
739
+ const partitionId = auth.partitionId ?? 'default';
740
+
741
+ const { chunkId } = c.req.valid('param');
742
+
743
+ const chunk = await readSnapshotChunk(options.db, chunkId, {
744
+ chunkStorage: options.chunkStorage,
745
+ });
746
+ if (!chunk) return c.json({ error: 'NOT_FOUND' }, 404);
747
+ if (chunk.partitionId !== partitionId) {
748
+ return c.json({ error: 'FORBIDDEN' }, 403);
749
+ }
750
+
751
+ const nowIso = new Date().toISOString();
752
+ if (chunk.expiresAt <= nowIso) {
753
+ return c.json({ error: 'NOT_FOUND' }, 404);
754
+ }
755
+
756
+ // Note: Snapshot chunks are created during authorized pull requests
757
+ // and have opaque IDs that expire. Additional authorization is handled
758
+ // at the pull layer via shape-level resolveScopes.
759
+
760
+ const etag = `"sha256:${chunk.sha256}"`;
761
+ const ifNoneMatch = c.req.header('if-none-match');
762
+ if (ifNoneMatch && ifNoneMatch === etag) {
763
+ return new Response(null, {
764
+ status: 304,
765
+ headers: {
766
+ ETag: etag,
767
+ 'Cache-Control': 'private, max-age=0',
768
+ Vary: 'Authorization',
769
+ },
770
+ });
771
+ }
772
+
773
+ return new Response(chunk.body as BodyInit, {
774
+ status: 200,
775
+ headers: {
776
+ 'Content-Type': 'application/octet-stream',
777
+ 'Content-Encoding': 'gzip',
778
+ 'Content-Length': String(chunk.byteLength),
779
+ ETag: etag,
780
+ 'Cache-Control': 'private, max-age=0',
781
+ Vary: 'Authorization',
782
+ 'X-Sync-Chunk-Id': chunk.chunkId,
783
+ 'X-Sync-Chunk-Sha256': chunk.sha256,
784
+ 'X-Sync-Chunk-Encoding': chunk.encoding,
785
+ 'X-Sync-Chunk-Compression': chunk.compression,
786
+ },
787
+ });
788
+ }
789
+ );
790
+
791
+ // -------------------------------------------------------------------------
792
+ // GET /realtime (optional WebSocket wake-ups)
793
+ // -------------------------------------------------------------------------
794
+
795
+ if (wsConnectionManager && websocketConfig?.enabled) {
796
+ routes.get('/realtime', async (c) => {
797
+ const auth = await getAuth(c);
798
+ if (!auth) return c.json({ error: 'UNAUTHENTICATED' }, 401);
799
+ const partitionId = auth.partitionId ?? 'default';
800
+
801
+ const clientId = c.req.query('clientId');
802
+ if (!clientId || typeof clientId !== 'string') {
803
+ return c.json(
804
+ {
805
+ error: 'INVALID_REQUEST',
806
+ message: 'clientId query param is required',
807
+ },
808
+ 400
809
+ );
810
+ }
811
+ const realtimeTransportPath = readTransportPath(
812
+ c,
813
+ c.req.query('transportPath')
814
+ );
815
+
816
+ // Load last-known effective scopes for this client (best-effort).
817
+ // Keeps /realtime lightweight and avoids sending large subscription payloads over the URL.
818
+ let initialScopeKeys: string[] = [];
819
+ try {
820
+ const cursorsQ = options.db.selectFrom(
821
+ 'sync_client_cursors'
822
+ ) as SelectQueryBuilder<
823
+ DB,
824
+ 'sync_client_cursors',
825
+ // biome-ignore lint/complexity/noBannedTypes: Kysely uses `{}` as the initial "no selected columns yet" marker.
826
+ {}
827
+ >;
828
+
829
+ const row = await cursorsQ
830
+ .selectAll()
831
+ .where(sql<SqlBool>`partition_id = ${partitionId}`)
832
+ .where(sql<SqlBool>`client_id = ${clientId}`)
833
+ .executeTakeFirst();
834
+
835
+ if (row && row.actor_id !== auth.actorId) {
836
+ return c.json({ error: 'FORBIDDEN' }, 403);
837
+ }
838
+
839
+ const raw = row?.effective_scopes;
840
+ let parsed: unknown = raw;
841
+ if (typeof raw === 'string') {
842
+ try {
843
+ parsed = JSON.parse(raw);
844
+ } catch {
845
+ parsed = null;
846
+ }
847
+ }
848
+
849
+ initialScopeKeys = applyPartitionToScopeKeys(
850
+ partitionId,
851
+ scopeValuesToScopeKeys(parsed)
852
+ );
853
+ } catch {
854
+ // ignore; realtime is best-effort
855
+ }
856
+
857
+ const maxConnectionsTotal = websocketConfig.maxConnectionsTotal ?? 5000;
858
+ const maxConnectionsPerClient =
859
+ websocketConfig.maxConnectionsPerClient ?? 3;
860
+
861
+ if (
862
+ maxConnectionsTotal > 0 &&
863
+ wsConnectionManager.getTotalConnections() >= maxConnectionsTotal
864
+ ) {
865
+ logSyncEvent({
866
+ event: 'sync.realtime.rejected',
867
+ userId: auth.actorId,
868
+ reason: 'max_total',
869
+ });
870
+ return c.json({ error: 'WEBSOCKET_CONNECTION_LIMIT_TOTAL' }, 429);
871
+ }
872
+
873
+ if (
874
+ maxConnectionsPerClient > 0 &&
875
+ wsConnectionManager.getConnectionCount(clientId) >=
876
+ maxConnectionsPerClient
877
+ ) {
878
+ logSyncEvent({
879
+ event: 'sync.realtime.rejected',
880
+ userId: auth.actorId,
881
+ reason: 'max_per_client',
882
+ });
883
+ return c.json({ error: 'WEBSOCKET_CONNECTION_LIMIT_CLIENT' }, 429);
884
+ }
885
+
886
+ logSyncEvent({ event: 'sync.realtime.connect', userId: auth.actorId });
887
+
888
+ let unregister: (() => void) | null = null;
889
+ let connRef: ReturnType<typeof createWebSocketConnection> | null = null;
890
+
891
+ const upgradeWebSocket = websocketConfig.upgradeWebSocket;
892
+ if (!upgradeWebSocket) {
893
+ return c.json({ error: 'WEBSOCKET_NOT_CONFIGURED' }, 500);
894
+ }
895
+
896
+ return upgradeWebSocket(c, {
897
+ onOpen(_evt, ws) {
898
+ const conn = createWebSocketConnection(ws, {
899
+ actorId: auth.actorId,
900
+ clientId,
901
+ transportPath: realtimeTransportPath,
902
+ });
903
+ connRef = conn;
904
+
905
+ unregister = wsConnectionManager.register(conn, initialScopeKeys);
906
+ conn.sendHeartbeat();
907
+ },
908
+ onClose(_evt, _ws) {
909
+ unregister?.();
910
+ unregister = null;
911
+ connRef = null;
912
+ logSyncEvent({
913
+ event: 'sync.realtime.disconnect',
914
+ userId: auth.actorId,
915
+ });
916
+ },
917
+ onError(_evt, _ws) {
918
+ unregister?.();
919
+ unregister = null;
920
+ connRef = null;
921
+ logSyncEvent({
922
+ event: 'sync.realtime.disconnect',
923
+ userId: auth.actorId,
924
+ });
925
+ },
926
+ onMessage(evt, _ws) {
927
+ if (!connRef) return;
928
+ try {
929
+ const raw =
930
+ typeof evt.data === 'string' ? evt.data : String(evt.data);
931
+ const msg = JSON.parse(raw);
932
+ if (!msg || typeof msg !== 'object') return;
933
+
934
+ if (msg.type === 'push') {
935
+ void handleWsPush(
936
+ msg,
937
+ connRef,
938
+ auth.actorId,
939
+ partitionId,
940
+ clientId
941
+ );
942
+ return;
943
+ }
944
+
945
+ if (msg.type !== 'presence' || !msg.scopeKey) return;
946
+
947
+ const scopeKey = normalizeScopeKeyForPartition(
948
+ partitionId,
949
+ String(msg.scopeKey)
950
+ );
951
+ if (!scopeKey) return;
952
+
953
+ switch (msg.action) {
954
+ case 'join':
955
+ if (
956
+ !wsConnectionManager.joinPresence(
957
+ clientId,
958
+ scopeKey,
959
+ msg.metadata
960
+ )
961
+ ) {
962
+ logSyncEvent({
963
+ event: 'sync.realtime.presence.rejected',
964
+ userId: auth.actorId,
965
+ reason: 'scope_not_authorized',
966
+ scopeKey,
967
+ });
968
+ return;
969
+ }
970
+ // Send presence snapshot back to the joining client
971
+ {
972
+ const entries = wsConnectionManager.getPresence(scopeKey);
973
+ connRef.sendPresence({
974
+ action: 'snapshot',
975
+ scopeKey,
976
+ entries,
977
+ });
978
+ }
979
+ break;
980
+ case 'leave':
981
+ wsConnectionManager.leavePresence(clientId, scopeKey);
982
+ break;
983
+ case 'update':
984
+ if (
985
+ !wsConnectionManager.updatePresenceMetadata(
986
+ clientId,
987
+ scopeKey,
988
+ msg.metadata ?? {}
989
+ ) &&
990
+ !wsConnectionManager.isClientSubscribedToScopeKey(
991
+ clientId,
992
+ scopeKey
993
+ )
994
+ ) {
995
+ logSyncEvent({
996
+ event: 'sync.realtime.presence.rejected',
997
+ userId: auth.actorId,
998
+ reason: 'scope_not_authorized',
999
+ scopeKey,
1000
+ });
1001
+ }
1002
+ break;
1003
+ }
1004
+ } catch {
1005
+ // Ignore malformed messages
1006
+ }
1007
+ },
1008
+ });
1009
+ });
1010
+ }
1011
+
1012
+ return routes;
1013
+
1014
+ async function handleRealtimeEvent(event: SyncRealtimeEvent): Promise<void> {
1015
+ if (!wsConnectionManager) return;
1016
+ if (event.type !== 'commit') return;
1017
+ if (event.sourceInstanceId && event.sourceInstanceId === instanceId) return;
1018
+
1019
+ const commitSeq = event.commitSeq;
1020
+ const partitionId = event.partitionId ?? 'default';
1021
+ const scopeKeys =
1022
+ event.scopeKeys && event.scopeKeys.length > 0
1023
+ ? event.scopeKeys
1024
+ : await readCommitScopeKeys(options.db, commitSeq, partitionId);
1025
+
1026
+ if (scopeKeys.length === 0) return;
1027
+ wsConnectionManager.notifyScopeKeys(scopeKeys, commitSeq);
1028
+ }
1029
+
1030
+ async function handleWsPush(
1031
+ msg: Record<string, unknown>,
1032
+ conn: WebSocketConnection,
1033
+ actorId: string,
1034
+ partitionId: string,
1035
+ clientId: string
1036
+ ): Promise<void> {
1037
+ const requestId = typeof msg.requestId === 'string' ? msg.requestId : '';
1038
+ if (!requestId) return;
1039
+
1040
+ try {
1041
+ // Validate the push payload
1042
+ const parsed = SyncPushRequestSchema.omit({ clientId: true }).safeParse(
1043
+ msg
1044
+ );
1045
+ if (!parsed.success) {
1046
+ conn.sendPushResponse({
1047
+ requestId,
1048
+ ok: false,
1049
+ status: 'rejected',
1050
+ results: [
1051
+ { opIndex: 0, status: 'error', error: 'Invalid push payload' },
1052
+ ],
1053
+ });
1054
+ return;
1055
+ }
1056
+
1057
+ const pushOps = parsed.data.operations ?? [];
1058
+ if (pushOps.length > maxOperationsPerPush) {
1059
+ conn.sendPushResponse({
1060
+ requestId,
1061
+ ok: false,
1062
+ status: 'rejected',
1063
+ results: [
1064
+ {
1065
+ opIndex: 0,
1066
+ status: 'error',
1067
+ error: `Maximum ${maxOperationsPerPush} operations per push`,
1068
+ },
1069
+ ],
1070
+ });
1071
+ return;
1072
+ }
1073
+
1074
+ const timer = createSyncTimer();
1075
+
1076
+ const pushed = await pushCommit({
1077
+ db: options.db,
1078
+ dialect: options.dialect,
1079
+ shapes: handlerRegistry,
1080
+ actorId,
1081
+ partitionId,
1082
+ request: {
1083
+ clientId,
1084
+ clientCommitId: parsed.data.clientCommitId,
1085
+ operations: parsed.data.operations,
1086
+ schemaVersion: parsed.data.schemaVersion,
1087
+ },
1088
+ });
1089
+
1090
+ const pushDurationMs = timer();
1091
+
1092
+ logSyncEvent({
1093
+ event: 'sync.push',
1094
+ userId: actorId,
1095
+ durationMs: pushDurationMs,
1096
+ operationCount: pushOps.length,
1097
+ status: pushed.response.status,
1098
+ commitSeq: pushed.response.commitSeq,
1099
+ });
1100
+
1101
+ recordRequestEventInBackground({
1102
+ eventType: 'push',
1103
+ actorId,
1104
+ clientId,
1105
+ transportPath: conn.transportPath,
1106
+ statusCode: 200,
1107
+ outcome: pushed.response.status,
1108
+ durationMs: pushDurationMs,
1109
+ commitSeq: pushed.response.commitSeq,
1110
+ operationCount: pushOps.length,
1111
+ tables: pushed.affectedTables,
1112
+ });
1113
+
1114
+ // WS notifications to other clients
1115
+ if (
1116
+ wsConnectionManager &&
1117
+ pushed.response.ok === true &&
1118
+ pushed.response.status === 'applied' &&
1119
+ typeof pushed.response.commitSeq === 'number'
1120
+ ) {
1121
+ const scopeKeys = applyPartitionToScopeKeys(
1122
+ partitionId,
1123
+ pushed.scopeKeys
1124
+ );
1125
+ if (scopeKeys.length > 0) {
1126
+ wsConnectionManager.notifyScopeKeys(
1127
+ scopeKeys,
1128
+ pushed.response.commitSeq,
1129
+ {
1130
+ excludeClientIds: [clientId],
1131
+ changes: pushed.emittedChanges,
1132
+ }
1133
+ );
1134
+
1135
+ if (realtimeBroadcaster) {
1136
+ realtimeBroadcaster
1137
+ .publish({
1138
+ type: 'commit',
1139
+ commitSeq: pushed.response.commitSeq,
1140
+ partitionId,
1141
+ scopeKeys,
1142
+ sourceInstanceId: instanceId,
1143
+ })
1144
+ .catch((error) => {
1145
+ logAsyncFailureOnce('sync.realtime.broadcast_publish_failed', {
1146
+ event: 'sync.realtime.broadcast_publish_failed',
1147
+ userId: actorId,
1148
+ clientId,
1149
+ error: error instanceof Error ? error.message : String(error),
1150
+ });
1151
+ });
1152
+ }
1153
+ }
1154
+ }
1155
+
1156
+ conn.sendPushResponse({
1157
+ requestId,
1158
+ ok: pushed.response.ok,
1159
+ status: pushed.response.status,
1160
+ commitSeq: pushed.response.commitSeq,
1161
+ results: pushed.response.results,
1162
+ });
1163
+ } catch (err) {
1164
+ captureSyncException(err, {
1165
+ event: 'sync.realtime.push_failed',
1166
+ requestId,
1167
+ clientId,
1168
+ actorId,
1169
+ partitionId,
1170
+ });
1171
+ const message =
1172
+ err instanceof Error ? err.message : 'Internal server error';
1173
+ conn.sendPushResponse({
1174
+ requestId,
1175
+ ok: false,
1176
+ status: 'rejected',
1177
+ results: [{ opIndex: 0, status: 'error', error: message }],
1178
+ });
1179
+ }
1180
+ }
1181
+ }
1182
+
1183
+ export function getSyncWebSocketConnectionManager(
1184
+ routes: Hono
1185
+ ): WebSocketConnectionManager | undefined {
1186
+ return wsConnectionManagerMap.get(routes);
1187
+ }
1188
+
1189
+ export function getSyncRealtimeUnsubscribe(
1190
+ routes: Hono
1191
+ ): (() => void) | undefined {
1192
+ return realtimeUnsubscribeMap.get(routes);
1193
+ }
1194
+
1195
+ function clampInt(value: number, min: number, max: number): number {
1196
+ return Math.max(min, Math.min(max, value));
1197
+ }
1198
+
1199
+ function readTransportPath(
1200
+ c: Context,
1201
+ queryValue?: string | null
1202
+ ): 'direct' | 'relay' {
1203
+ if (queryValue === 'relay' || queryValue === 'direct') {
1204
+ return queryValue;
1205
+ }
1206
+
1207
+ const headerValue = c.req.header('x-syncular-transport-path');
1208
+ if (headerValue === 'relay' || headerValue === 'direct') {
1209
+ return headerValue;
1210
+ }
1211
+
1212
+ return 'direct';
1213
+ }
1214
+
1215
+ function scopeValuesToScopeKeys(scopes: unknown): string[] {
1216
+ if (!scopes || typeof scopes !== 'object') return [];
1217
+ const scopeKeys = new Set<string>();
1218
+
1219
+ for (const [key, value] of Object.entries(scopes)) {
1220
+ if (!value) continue;
1221
+ const prefix = key.replace(/_id$/, '');
1222
+
1223
+ if (Array.isArray(value)) {
1224
+ for (const v of value) {
1225
+ if (typeof v !== 'string') continue;
1226
+ if (!v) continue;
1227
+ scopeKeys.add(`${prefix}:${v}`);
1228
+ }
1229
+ continue;
1230
+ }
1231
+
1232
+ if (typeof value === 'string') {
1233
+ if (!value) continue;
1234
+ scopeKeys.add(`${prefix}:${value}`);
1235
+ continue;
1236
+ }
1237
+
1238
+ // Best-effort: stringify scalars.
1239
+ if (typeof value === 'number' || typeof value === 'bigint') {
1240
+ scopeKeys.add(`${prefix}:${String(value)}`);
1241
+ }
1242
+ }
1243
+
1244
+ return Array.from(scopeKeys);
1245
+ }
1246
+
1247
+ function partitionScopeKey(partitionId: string, scopeKey: string): string {
1248
+ return `${partitionId}::${scopeKey}`;
1249
+ }
1250
+
1251
+ function applyPartitionToScopeKeys(
1252
+ partitionId: string,
1253
+ scopeKeys: readonly string[]
1254
+ ): string[] {
1255
+ const prefixed = new Set<string>();
1256
+ for (const scopeKey of scopeKeys) {
1257
+ if (!scopeKey) continue;
1258
+ if (scopeKey.startsWith(`${partitionId}::`)) {
1259
+ prefixed.add(scopeKey);
1260
+ continue;
1261
+ }
1262
+ prefixed.add(partitionScopeKey(partitionId, scopeKey));
1263
+ }
1264
+ return Array.from(prefixed);
1265
+ }
1266
+
1267
+ function normalizeScopeKeyForPartition(
1268
+ partitionId: string,
1269
+ scopeKey: string
1270
+ ): string {
1271
+ if (scopeKey.startsWith(`${partitionId}::`)) return scopeKey;
1272
+ if (scopeKey.includes('::')) return '';
1273
+ return partitionScopeKey(partitionId, scopeKey);
1274
+ }
1275
+
1276
+ async function readCommitScopeKeys<DB extends SyncCoreDb>(
1277
+ db: Kysely<DB>,
1278
+ commitSeq: number,
1279
+ partitionId: string
1280
+ ): Promise<string[]> {
1281
+ // Read scopes from the JSONB column and convert to scope strings
1282
+ const rowsResult = await sql<{ scopes: unknown }>`
1283
+ select scopes
1284
+ from ${sql.table('sync_changes')}
1285
+ where commit_seq = ${commitSeq}
1286
+ and partition_id = ${partitionId}
1287
+ `.execute(db);
1288
+ const rows = rowsResult.rows;
1289
+
1290
+ const scopeKeys = new Set<string>();
1291
+
1292
+ for (const row of rows) {
1293
+ const scopes =
1294
+ typeof row.scopes === 'string' ? JSON.parse(row.scopes) : row.scopes;
1295
+
1296
+ for (const k of applyPartitionToScopeKeys(
1297
+ partitionId,
1298
+ scopeValuesToScopeKeys(scopes)
1299
+ )) {
1300
+ scopeKeys.add(k);
1301
+ }
1302
+ }
1303
+
1304
+ return Array.from(scopeKeys);
1305
+ }