@syncular/server-hono 0.0.1 → 0.0.2-127

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +23 -0
  2. package/dist/api-key-auth.js +1 -1
  3. package/dist/blobs.d.ts.map +1 -1
  4. package/dist/blobs.js +31 -8
  5. package/dist/blobs.js.map +1 -1
  6. package/dist/console/index.d.ts +1 -1
  7. package/dist/console/index.d.ts.map +1 -1
  8. package/dist/console/index.js +1 -1
  9. package/dist/console/index.js.map +1 -1
  10. package/dist/console/routes.d.ts +1 -2
  11. package/dist/console/routes.d.ts.map +1 -1
  12. package/dist/console/routes.js +65 -2
  13. package/dist/console/routes.js.map +1 -1
  14. package/dist/console/schemas.d.ts +138 -496
  15. package/dist/console/schemas.d.ts.map +1 -1
  16. package/dist/console/schemas.js +3 -9
  17. package/dist/console/schemas.js.map +1 -1
  18. package/dist/create-server.d.ts +3 -1
  19. package/dist/create-server.d.ts.map +1 -1
  20. package/dist/create-server.js +4 -3
  21. package/dist/create-server.js.map +1 -1
  22. package/dist/index.d.ts +3 -3
  23. package/dist/index.d.ts.map +1 -1
  24. package/dist/index.js +9 -9
  25. package/dist/index.js.map +1 -1
  26. package/dist/proxy/connection-manager.d.ts +1 -1
  27. package/dist/proxy/connection-manager.d.ts.map +1 -1
  28. package/dist/proxy/connection-manager.js +1 -1
  29. package/dist/proxy/connection-manager.js.map +1 -1
  30. package/dist/proxy/index.js +2 -2
  31. package/dist/proxy/routes.d.ts +2 -2
  32. package/dist/proxy/routes.d.ts.map +1 -1
  33. package/dist/proxy/routes.js +3 -3
  34. package/dist/proxy/routes.js.map +1 -1
  35. package/dist/routes.d.ts +2 -2
  36. package/dist/routes.d.ts.map +1 -1
  37. package/dist/routes.js +447 -260
  38. package/dist/routes.js.map +1 -1
  39. package/dist/ws.d.ts +40 -3
  40. package/dist/ws.d.ts.map +1 -1
  41. package/dist/ws.js +51 -6
  42. package/dist/ws.js.map +1 -1
  43. package/package.json +32 -9
  44. package/src/__tests__/pull-chunk-storage.test.ts +415 -27
  45. package/src/__tests__/realtime-bridge.test.ts +3 -1
  46. package/src/__tests__/sync-rate-limit-routing.test.ts +181 -0
  47. package/src/blobs.ts +31 -8
  48. package/src/console/index.ts +1 -0
  49. package/src/console/routes.ts +78 -25
  50. package/src/console/schemas.ts +0 -31
  51. package/src/create-server.ts +6 -0
  52. package/src/index.ts +12 -3
  53. package/src/proxy/connection-manager.ts +2 -2
  54. package/src/proxy/routes.ts +3 -3
  55. package/src/routes.ts +570 -327
  56. package/src/ws.ts +76 -13
package/src/routes.ts CHANGED
@@ -2,20 +2,19 @@
2
2
  * @syncular/server-hono - Sync routes for Hono
3
3
  *
4
4
  * Provides:
5
- * - POST /pull (commit stream + optional bootstrap snapshots)
6
- * - POST /push (commit ingestion)
5
+ * - POST / (combined push + pull in one round-trip)
7
6
  * - GET /snapshot-chunks/:chunkId (download encoded snapshot chunks)
8
7
  * - GET /realtime (optional WebSocket "wake up" notifications)
9
8
  */
10
9
 
11
10
  import {
11
+ captureSyncException,
12
12
  createSyncTimer,
13
13
  ErrorResponseSchema,
14
14
  logSyncEvent,
15
- SyncPullRequestSchema,
16
- SyncPullResponseSchema,
15
+ SyncCombinedRequestSchema,
16
+ SyncCombinedResponseSchema,
17
17
  SyncPushRequestSchema,
18
- SyncPushResponseSchema,
19
18
  } from '@syncular/core';
20
19
  import type {
21
20
  ServerSyncDialect,
@@ -28,8 +27,6 @@ import type {
28
27
  import {
29
28
  type CompactOptions,
30
29
  InvalidSubscriptionScopeError,
31
- maybeCompactChanges,
32
- maybePruneSync,
33
30
  type PruneOptions,
34
31
  type PullResult,
35
32
  pull,
@@ -38,8 +35,9 @@ import {
38
35
  recordClientCursor,
39
36
  TableRegistry,
40
37
  } from '@syncular/server';
41
- import type { Context } from 'hono';
38
+ import type { Context, MiddlewareHandler } from 'hono';
42
39
  import { Hono } from 'hono';
40
+
43
41
  import type { UpgradeWebSocket } from 'hono/ws';
44
42
  import { describeRoute, resolver, validator as zValidator } from 'hono-openapi';
45
43
  import {
@@ -54,7 +52,11 @@ import {
54
52
  DEFAULT_SYNC_RATE_LIMITS,
55
53
  type SyncRateLimitConfig,
56
54
  } from './rate-limit';
57
- import { createWebSocketConnection, WebSocketConnectionManager } from './ws';
55
+ import {
56
+ createWebSocketConnection,
57
+ type WebSocketConnection,
58
+ WebSocketConnectionManager,
59
+ } from './ws';
58
60
 
59
61
  /**
60
62
  * WeakMaps for storing Hono-instance-specific data without augmenting the type.
@@ -64,6 +66,7 @@ const realtimeUnsubscribeMap = new WeakMap<Hono, () => void>();
64
66
 
65
67
  export interface SyncAuthResult {
66
68
  actorId: string;
69
+ partitionId?: string;
67
70
  }
68
71
 
69
72
  /**
@@ -186,6 +189,14 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
186
189
  options: CreateSyncRoutesOptions<DB>
187
190
  ): Hono {
188
191
  const routes = new Hono();
192
+ routes.onError((error, c) => {
193
+ captureSyncException(error, {
194
+ event: 'sync.route.unhandled',
195
+ method: c.req.method,
196
+ path: c.req.path,
197
+ });
198
+ return c.text('Internal Server Error', 500);
199
+ });
189
200
  const handlerRegistry = new TableRegistry<DB>();
190
201
  for (const handler of options.handlers) {
191
202
  handlerRegistry.register(handler);
@@ -229,11 +240,30 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
229
240
  (typeof crypto !== 'undefined' && 'randomUUID' in crypto
230
241
  ? crypto.randomUUID()
231
242
  : `${Date.now()}-${Math.random().toString(16).slice(2)}`);
243
+ const loggedAsyncFailureKeys = new Set<string>();
244
+ const logAsyncFailureOnce = (
245
+ key: string,
246
+ event: {
247
+ event: string;
248
+ error: string;
249
+ [key: string]: unknown;
250
+ }
251
+ ) => {
252
+ if (loggedAsyncFailureKeys.has(key)) return;
253
+ loggedAsyncFailureKeys.add(key);
254
+ logSyncEvent(event);
255
+ };
232
256
 
233
257
  if (wsConnectionManager && realtimeBroadcaster) {
234
258
  const unsubscribe = realtimeBroadcaster.subscribe(
235
259
  (event: SyncRealtimeEvent) => {
236
- void handleRealtimeEvent(event).catch(() => {});
260
+ void handleRealtimeEvent(event).catch((error) => {
261
+ logAsyncFailureOnce('sync.realtime.broadcast_delivery_failed', {
262
+ event: 'sync.realtime.broadcast_delivery_failed',
263
+ error: error instanceof Error ? error.message : String(error),
264
+ sourceEventType: event.type,
265
+ });
266
+ });
237
267
  }
238
268
  );
239
269
 
@@ -244,7 +274,7 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
244
274
  // Request event recording (for console inspector)
245
275
  // -------------------------------------------------------------------------
246
276
 
247
- const recordRequestEvent = async (event: {
277
+ type RequestEvent = {
248
278
  eventType: 'push' | 'pull';
249
279
  actorId: string;
250
280
  clientId: string;
@@ -257,25 +287,44 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
257
287
  rowCount?: number | null;
258
288
  tables?: string[];
259
289
  errorMessage?: string | null;
260
- }) => {
261
- try {
262
- const tablesValue = options.dialect.arrayToDb(event.tables ?? []);
263
- await sql`
264
- INSERT INTO sync_request_events (
265
- event_type, actor_id, client_id, status_code, outcome,
266
- duration_ms, commit_seq, operation_count, row_count,
267
- tables, error_message, transport_path
268
- ) VALUES (
269
- ${event.eventType}, ${event.actorId}, ${event.clientId},
270
- ${event.statusCode}, ${event.outcome}, ${event.durationMs},
271
- ${event.commitSeq ?? null}, ${event.operationCount ?? null},
272
- ${event.rowCount ?? null}, ${tablesValue}, ${event.errorMessage ?? null},
273
- ${event.transportPath}
274
- )
275
- `.execute(options.db);
276
- } catch {
277
- // Silently ignore - event recording should not block sync
278
- }
290
+ };
291
+
292
+ const recordRequestEvent = async (event: RequestEvent) => {
293
+ const tablesValue = options.dialect.arrayToDb(event.tables ?? []);
294
+ await sql`
295
+ INSERT INTO sync_request_events (
296
+ event_type, actor_id, client_id, status_code, outcome,
297
+ duration_ms, commit_seq, operation_count, row_count,
298
+ tables, error_message, transport_path
299
+ ) VALUES (
300
+ ${event.eventType}, ${event.actorId}, ${event.clientId},
301
+ ${event.statusCode}, ${event.outcome}, ${event.durationMs},
302
+ ${event.commitSeq ?? null}, ${event.operationCount ?? null},
303
+ ${event.rowCount ?? null}, ${tablesValue}, ${event.errorMessage ?? null},
304
+ ${event.transportPath}
305
+ )
306
+ `.execute(options.db);
307
+ };
308
+
309
+ const recordRequestEventInBackground = (event: RequestEvent): void => {
310
+ void recordRequestEvent(event).catch((error) => {
311
+ logAsyncFailureOnce('sync.request_event_record_failed', {
312
+ event: 'sync.request_event_record_failed',
313
+ userId: event.actorId,
314
+ clientId: event.clientId,
315
+ requestEventType: event.eventType,
316
+ error: error instanceof Error ? error.message : String(error),
317
+ });
318
+ });
319
+ };
320
+
321
+ const authCache = new WeakMap<Context, Promise<SyncAuthResult | null>>();
322
+ const getAuth = (c: Context): Promise<SyncAuthResult | null> => {
323
+ const cached = authCache.get(c);
324
+ if (cached) return cached;
325
+ const pending = options.authenticate(c);
326
+ authCache.set(c, pending);
327
+ return pending;
279
328
  };
280
329
 
281
330
  // -------------------------------------------------------------------------
@@ -296,17 +345,52 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
296
345
  return createRateLimiter({
297
346
  ...limitConfig,
298
347
  keyGenerator: async (c) => {
299
- const auth = await options.authenticate(c);
348
+ const auth = await getAuth(c);
300
349
  return auth?.actorId ?? null;
301
350
  },
302
351
  });
303
352
  };
304
353
 
305
354
  const pullLimiter = createAuthBasedRateLimiter(pullRateLimit);
306
- if (pullLimiter) routes.use('/pull', pullLimiter);
307
-
308
355
  const pushLimiter = createAuthBasedRateLimiter(pushRateLimit);
309
- if (pushLimiter) routes.use('/push', pushLimiter);
356
+
357
+ const syncRateLimiter: MiddlewareHandler = async (c, next) => {
358
+ if (!pullLimiter && !pushLimiter) return next();
359
+
360
+ let shouldApplyPull = pullLimiter !== null;
361
+ let shouldApplyPush = pushLimiter !== null;
362
+
363
+ if (pullLimiter && pushLimiter && c.req.method === 'POST') {
364
+ try {
365
+ const parsed = await c.req.raw.clone().json();
366
+ if (parsed !== null && typeof parsed === 'object') {
367
+ shouldApplyPull = Reflect.get(parsed, 'pull') !== undefined;
368
+ shouldApplyPush = Reflect.get(parsed, 'push') !== undefined;
369
+ }
370
+ } catch {
371
+ // Keep default behavior and apply both limiters when payload parsing fails.
372
+ }
373
+ }
374
+
375
+ if (pullLimiter && shouldApplyPull && pushLimiter && shouldApplyPush) {
376
+ return pullLimiter(c, async () => {
377
+ const pushResult = await pushLimiter(c, next);
378
+ if (pushResult instanceof Response) {
379
+ c.res = pushResult;
380
+ }
381
+ });
382
+ }
383
+ if (pullLimiter && shouldApplyPull) {
384
+ return pullLimiter(c, next);
385
+ }
386
+ if (pushLimiter && shouldApplyPush) {
387
+ return pushLimiter(c, next);
388
+ }
389
+
390
+ return next();
391
+ };
392
+
393
+ routes.use('/', syncRateLimiter);
310
394
  }
311
395
 
312
396
  // -------------------------------------------------------------------------
@@ -321,21 +405,23 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
321
405
  });
322
406
 
323
407
  // -------------------------------------------------------------------------
324
- // POST /pull
408
+ // POST / (combined push + pull in one round-trip)
325
409
  // -------------------------------------------------------------------------
326
410
 
327
411
  routes.post(
328
- '/pull',
412
+ '/',
329
413
  describeRoute({
330
414
  tags: ['sync'],
331
- summary: 'Pull commits and snapshots',
415
+ summary: 'Combined push and pull',
332
416
  description:
333
- 'Pull commits and optional bootstrap snapshots for subscriptions',
417
+ 'Perform push and/or pull in a single request to reduce round-trips',
334
418
  responses: {
335
419
  200: {
336
- description: 'Successful pull response',
420
+ description: 'Combined sync response',
337
421
  content: {
338
- 'application/json': { schema: resolver(SyncPullResponseSchema) },
422
+ 'application/json': {
423
+ schema: resolver(SyncCombinedResponseSchema),
424
+ },
339
425
  },
340
426
  },
341
427
  400: {
@@ -352,190 +438,255 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
352
438
  },
353
439
  },
354
440
  }),
355
- zValidator('json', SyncPullRequestSchema),
441
+ zValidator('json', SyncCombinedRequestSchema),
356
442
  async (c) => {
357
- const auth = await options.authenticate(c);
443
+ const auth = await getAuth(c);
358
444
  if (!auth) return c.json({ error: 'UNAUTHENTICATED' }, 401);
445
+ const partitionId = auth.partitionId ?? 'default';
359
446
 
360
447
  const body = c.req.valid('json');
448
+ const clientId = body.clientId;
361
449
 
362
- const timer = createSyncTimer();
450
+ let pushResponse:
451
+ | undefined
452
+ | Awaited<ReturnType<typeof pushCommit>>['response'];
453
+ let pullResponse: undefined | PullResult['response'];
363
454
 
364
- if (body.subscriptions.length > maxSubscriptionsPerPull) {
365
- return c.json(
366
- {
367
- error: 'INVALID_REQUEST',
368
- message: `Too many subscriptions (max ${maxSubscriptionsPerPull})`,
369
- },
370
- 400
371
- );
372
- }
373
-
374
- // Guardrail: unique subscription ids in a single request.
375
- const seenSubscriptionIds = new Set<string>();
376
- for (const sub of body.subscriptions) {
377
- const id = sub.id;
378
- if (seenSubscriptionIds.has(id)) {
455
+ // --- Push phase ---
456
+ if (body.push) {
457
+ const pushOps = body.push.operations ?? [];
458
+ if (pushOps.length > maxOperationsPerPush) {
379
459
  return c.json(
380
460
  {
381
- error: 'INVALID_REQUEST',
382
- message: `Duplicate subscription id: ${id}`,
461
+ error: 'TOO_MANY_OPERATIONS',
462
+ message: `Maximum ${maxOperationsPerPush} operations per push`,
383
463
  },
384
464
  400
385
465
  );
386
466
  }
387
- seenSubscriptionIds.add(id);
388
- }
389
467
 
390
- const request = {
391
- clientId: body.clientId,
392
- limitCommits: clampInt(body.limitCommits ?? 50, 1, maxPullLimitCommits),
393
- limitSnapshotRows: clampInt(
394
- body.limitSnapshotRows ?? 1000,
395
- 1,
396
- maxPullLimitSnapshotRows
397
- ),
398
- maxSnapshotPages: clampInt(
399
- body.maxSnapshotPages ?? 1,
400
- 1,
401
- maxPullMaxSnapshotPages
402
- ),
403
- dedupeRows: body.dedupeRows === true,
404
- subscriptions: body.subscriptions.map((sub) => ({
405
- id: sub.id,
406
- shape: sub.shape,
407
- scopes: (sub.scopes ?? {}) as Record<string, string | string[]>,
408
- params: sub.params as Record<string, unknown>,
409
- cursor: Math.max(-1, sub.cursor),
410
- bootstrapState: sub.bootstrapState ?? null,
411
- })),
412
- };
413
-
414
- let pullResult: PullResult;
415
- try {
416
- pullResult = await pull({
468
+ const timer = createSyncTimer();
469
+
470
+ const pushed = await pushCommit({
417
471
  db: options.db,
418
472
  dialect: options.dialect,
419
- shapes: handlerRegistry,
473
+ handlers: handlerRegistry,
420
474
  actorId: auth.actorId,
421
- request,
422
- chunkStorage: options.chunkStorage,
475
+ partitionId,
476
+ request: {
477
+ clientId,
478
+ clientCommitId: body.push.clientCommitId,
479
+ operations: body.push.operations,
480
+ schemaVersion: body.push.schemaVersion,
481
+ },
423
482
  });
424
- } catch (err) {
425
- if (err instanceof InvalidSubscriptionScopeError) {
426
- return c.json(
427
- { error: 'INVALID_SUBSCRIPTION', message: err.message },
428
- 400
483
+
484
+ const pushDurationMs = timer();
485
+
486
+ logSyncEvent({
487
+ event: 'sync.push',
488
+ userId: auth.actorId,
489
+ durationMs: pushDurationMs,
490
+ operationCount: pushOps.length,
491
+ status: pushed.response.status,
492
+ commitSeq: pushed.response.commitSeq,
493
+ });
494
+
495
+ recordRequestEventInBackground({
496
+ eventType: 'push',
497
+ actorId: auth.actorId,
498
+ clientId,
499
+ transportPath: readTransportPath(c),
500
+ statusCode: 200,
501
+ outcome: pushed.response.status,
502
+ durationMs: pushDurationMs,
503
+ commitSeq: pushed.response.commitSeq,
504
+ operationCount: pushOps.length,
505
+ tables: pushed.affectedTables,
506
+ });
507
+
508
+ // WS notifications
509
+ if (
510
+ wsConnectionManager &&
511
+ pushed.response.ok === true &&
512
+ pushed.response.status === 'applied' &&
513
+ typeof pushed.response.commitSeq === 'number'
514
+ ) {
515
+ const scopeKeys = applyPartitionToScopeKeys(
516
+ partitionId,
517
+ pushed.scopeKeys
429
518
  );
519
+ if (scopeKeys.length > 0) {
520
+ wsConnectionManager.notifyScopeKeys(
521
+ scopeKeys,
522
+ pushed.response.commitSeq,
523
+ {
524
+ excludeClientIds: [clientId],
525
+ changes: pushed.emittedChanges,
526
+ }
527
+ );
528
+
529
+ if (realtimeBroadcaster) {
530
+ realtimeBroadcaster
531
+ .publish({
532
+ type: 'commit',
533
+ commitSeq: pushed.response.commitSeq,
534
+ partitionId,
535
+ scopeKeys,
536
+ sourceInstanceId: instanceId,
537
+ })
538
+ .catch((error) => {
539
+ logAsyncFailureOnce(
540
+ 'sync.realtime.broadcast_publish_failed',
541
+ {
542
+ event: 'sync.realtime.broadcast_publish_failed',
543
+ userId: auth.actorId,
544
+ clientId,
545
+ error:
546
+ error instanceof Error ? error.message : String(error),
547
+ }
548
+ );
549
+ });
550
+ }
551
+ }
430
552
  }
431
- throw err;
553
+
554
+ pushResponse = pushed.response;
432
555
  }
433
556
 
434
- await recordClientCursor(options.db, options.dialect, {
435
- clientId: request.clientId,
436
- actorId: auth.actorId,
437
- cursor: pullResult.clientCursor,
438
- effectiveScopes: pullResult.effectiveScopes,
439
- });
557
+ // --- Pull phase ---
558
+ if (body.pull) {
559
+ if (body.pull.subscriptions.length > maxSubscriptionsPerPull) {
560
+ return c.json(
561
+ {
562
+ error: 'INVALID_REQUEST',
563
+ message: `Too many subscriptions (max ${maxSubscriptionsPerPull})`,
564
+ },
565
+ 400
566
+ );
567
+ }
440
568
 
441
- // Update WebSocket manager with effective scopes for this client.
442
- // Realtime wake-ups are best-effort; correctness always comes from pull+cursors.
443
- wsConnectionManager?.updateClientScopeKeys(
444
- request.clientId,
445
- scopeValuesToScopeKeys(pullResult.effectiveScopes)
446
- );
569
+ const seenSubscriptionIds = new Set<string>();
570
+ for (const sub of body.pull.subscriptions) {
571
+ const id = sub.id;
572
+ if (seenSubscriptionIds.has(id)) {
573
+ return c.json(
574
+ {
575
+ error: 'INVALID_REQUEST',
576
+ message: `Duplicate subscription id: ${id}`,
577
+ },
578
+ 400
579
+ );
580
+ }
581
+ seenSubscriptionIds.add(id);
582
+ }
447
583
 
448
- const pruneCfg = config.prune;
449
- if (pruneCfg) {
450
- const deletedCommits = await maybePruneSync(options.db, {
451
- minIntervalMs: pruneCfg.minIntervalMs ?? 5 * 60 * 1000,
452
- options: pruneCfg.options,
453
- });
454
- if (deletedCommits > 0) {
455
- logSyncEvent({
456
- event: 'sync.prune',
457
- userId: auth.actorId,
458
- deletedCommits,
584
+ const request = {
585
+ clientId,
586
+ limitCommits: clampInt(
587
+ body.pull.limitCommits ?? 50,
588
+ 1,
589
+ maxPullLimitCommits
590
+ ),
591
+ limitSnapshotRows: clampInt(
592
+ body.pull.limitSnapshotRows ?? 1000,
593
+ 1,
594
+ maxPullLimitSnapshotRows
595
+ ),
596
+ maxSnapshotPages: clampInt(
597
+ body.pull.maxSnapshotPages ?? 1,
598
+ 1,
599
+ maxPullMaxSnapshotPages
600
+ ),
601
+ dedupeRows: body.pull.dedupeRows === true,
602
+ subscriptions: body.pull.subscriptions.map((sub) => ({
603
+ id: sub.id,
604
+ table: sub.table,
605
+ scopes: (sub.scopes ?? {}) as Record<string, string | string[]>,
606
+ params: sub.params as Record<string, unknown>,
607
+ cursor: Math.max(-1, sub.cursor),
608
+ bootstrapState: sub.bootstrapState ?? null,
609
+ })),
610
+ };
611
+
612
+ const timer = createSyncTimer();
613
+
614
+ let pullResult: PullResult;
615
+ try {
616
+ pullResult = await pull({
617
+ db: options.db,
618
+ dialect: options.dialect,
619
+ handlers: handlerRegistry,
620
+ actorId: auth.actorId,
621
+ partitionId,
622
+ request,
623
+ chunkStorage: options.chunkStorage,
459
624
  });
625
+ } catch (err) {
626
+ if (err instanceof InvalidSubscriptionScopeError) {
627
+ return c.json(
628
+ { error: 'INVALID_SUBSCRIPTION', message: err.message },
629
+ 400
630
+ );
631
+ }
632
+ throw err;
460
633
  }
461
- }
462
634
 
463
- const compactCfg = config.compact;
464
- if (compactCfg) {
465
- const deletedChanges = await maybeCompactChanges(options.db, {
466
- dialect: options.dialect,
467
- minIntervalMs: compactCfg.minIntervalMs ?? 30 * 60 * 1000,
468
- options: {
469
- fullHistoryHours: compactCfg.options?.fullHistoryHours ?? 24 * 7,
470
- },
471
- });
472
- if (deletedChanges > 0) {
473
- logSyncEvent({
474
- event: 'sync.compact',
635
+ // Fire-and-forget bookkeeping
636
+ void recordClientCursor(options.db, options.dialect, {
637
+ partitionId,
638
+ clientId,
639
+ actorId: auth.actorId,
640
+ cursor: pullResult.clientCursor,
641
+ effectiveScopes: pullResult.effectiveScopes,
642
+ }).catch((error) => {
643
+ logAsyncFailureOnce('sync.client_cursor_record_failed', {
644
+ event: 'sync.client_cursor_record_failed',
475
645
  userId: auth.actorId,
476
- deletedChanges,
646
+ clientId,
647
+ error: error instanceof Error ? error.message : String(error),
477
648
  });
478
- }
479
- }
649
+ });
480
650
 
481
- const rowCount = pullResult.response.subscriptions.reduce(
482
- (sum: number, s) => {
483
- if (s.bootstrap) {
484
- return (
485
- sum +
486
- (s.snapshots ?? []).reduce(
487
- (ss: number, snap) => ss + (snap.rows?.length ?? 0),
488
- 0
489
- )
490
- );
491
- }
492
- return (
493
- sum +
494
- s.commits.reduce(
495
- (cs: number, commit) => cs + commit.changes.length,
496
- 0
497
- )
498
- );
499
- },
500
- 0
501
- );
651
+ wsConnectionManager?.updateClientScopeKeys(
652
+ clientId,
653
+ applyPartitionToScopeKeys(
654
+ partitionId,
655
+ scopeValuesToScopeKeys(pullResult.effectiveScopes)
656
+ )
657
+ );
502
658
 
503
- const bootstrapCount = pullResult.response.subscriptions.filter(
504
- (s) => s.bootstrap
505
- ).length;
506
- const activeCount = pullResult.response.subscriptions.filter(
507
- (s) => s.status === 'active'
508
- ).length;
659
+ const pullDurationMs = timer();
509
660
 
510
- const pullDurationMs = timer();
661
+ logSyncEvent({
662
+ event: 'sync.pull',
663
+ userId: auth.actorId,
664
+ durationMs: pullDurationMs,
665
+ subscriptionCount: pullResult.response.subscriptions.length,
666
+ clientCursor: pullResult.clientCursor,
667
+ });
511
668
 
512
- logSyncEvent({
513
- event: 'sync.pull',
514
- userId: auth.actorId,
515
- durationMs: pullDurationMs,
516
- rowCount,
517
- subscriptionCount: pullResult.response.subscriptions.length,
518
- activeSubscriptionCount: activeCount,
519
- bootstrapCount,
520
- effectiveTableCount: Object.keys(pullResult.effectiveScopes).length,
521
- clientCursor: pullResult.clientCursor,
522
- });
669
+ recordRequestEventInBackground({
670
+ eventType: 'pull',
671
+ actorId: auth.actorId,
672
+ clientId,
673
+ transportPath: readTransportPath(c),
674
+ statusCode: 200,
675
+ outcome: 'applied',
676
+ durationMs: pullDurationMs,
677
+ });
523
678
 
524
- // Record event for console inspector (non-blocking)
525
- recordRequestEvent({
526
- eventType: 'pull',
527
- actorId: auth.actorId,
528
- clientId: request.clientId,
529
- transportPath: readTransportPath(c),
530
- statusCode: 200,
531
- outcome:
532
- bootstrapCount > 0 ? 'applied' : rowCount > 0 ? 'applied' : 'cached',
533
- durationMs: pullDurationMs,
534
- rowCount,
535
- tables: Object.keys(pullResult.effectiveScopes),
536
- });
679
+ pullResponse = pullResult.response;
680
+ }
537
681
 
538
- return c.json(pullResult.response, 200);
682
+ return c.json(
683
+ {
684
+ ok: true as const,
685
+ ...(pushResponse ? { push: pushResponse } : {}),
686
+ ...(pullResponse ? { pull: pullResponse } : {}),
687
+ },
688
+ 200
689
+ );
539
690
  }
540
691
  );
541
692
 
@@ -551,7 +702,12 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
551
702
  description: 'Download an encoded bootstrap snapshot chunk',
552
703
  responses: {
553
704
  200: {
554
- description: 'Snapshot chunk data (gzip-compressed NDJSON)',
705
+ description: 'Snapshot chunk data (gzip-compressed framed JSON rows)',
706
+ content: {
707
+ 'application/octet-stream': {
708
+ schema: resolver(z.string()),
709
+ },
710
+ },
555
711
  },
556
712
  304: {
557
713
  description: 'Not modified (cached)',
@@ -578,8 +734,9 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
578
734
  }),
579
735
  zValidator('param', snapshotChunkParamsSchema),
580
736
  async (c) => {
581
- const auth = await options.authenticate(c);
737
+ const auth = await getAuth(c);
582
738
  if (!auth) return c.json({ error: 'UNAUTHENTICATED' }, 401);
739
+ const partitionId = auth.partitionId ?? 'default';
583
740
 
584
741
  const { chunkId } = c.req.valid('param');
585
742
 
@@ -587,6 +744,9 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
587
744
  chunkStorage: options.chunkStorage,
588
745
  });
589
746
  if (!chunk) return c.json({ error: 'NOT_FOUND' }, 404);
747
+ if (chunk.partitionId !== partitionId) {
748
+ return c.json({ error: 'FORBIDDEN' }, 403);
749
+ }
590
750
 
591
751
  const nowIso = new Date().toISOString();
592
752
  if (chunk.expiresAt <= nowIso) {
@@ -595,7 +755,7 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
595
755
 
596
756
  // Note: Snapshot chunks are created during authorized pull requests
597
757
  // and have opaque IDs that expire. Additional authorization is handled
598
- // at the pull layer via shape-level resolveScopes.
758
+ // at the pull layer via table-level resolveScopes.
599
759
 
600
760
  const etag = `"sha256:${chunk.sha256}"`;
601
761
  const ifNoneMatch = c.req.header('if-none-match');
@@ -613,9 +773,9 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
613
773
  return new Response(chunk.body as BodyInit, {
614
774
  status: 200,
615
775
  headers: {
616
- 'Content-Type': 'application/x-ndjson; charset=utf-8',
776
+ 'Content-Type': 'application/octet-stream',
617
777
  'Content-Encoding': 'gzip',
618
- 'Content-Length': String(chunk.body.length),
778
+ 'Content-Length': String(chunk.byteLength),
619
779
  ETag: etag,
620
780
  'Cache-Control': 'private, max-age=0',
621
781
  Vary: 'Authorization',
@@ -628,134 +788,15 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
628
788
  }
629
789
  );
630
790
 
631
- // -------------------------------------------------------------------------
632
- // POST /push
633
- // -------------------------------------------------------------------------
634
-
635
- routes.post(
636
- '/push',
637
- describeRoute({
638
- tags: ['sync'],
639
- summary: 'Push a commit',
640
- description: 'Push a client commit with operations to the server',
641
- responses: {
642
- 200: {
643
- description: 'Successful push response',
644
- content: {
645
- 'application/json': { schema: resolver(SyncPushResponseSchema) },
646
- },
647
- },
648
- 400: {
649
- description: 'Invalid request',
650
- content: {
651
- 'application/json': { schema: resolver(ErrorResponseSchema) },
652
- },
653
- },
654
- 401: {
655
- description: 'Unauthenticated',
656
- content: {
657
- 'application/json': { schema: resolver(ErrorResponseSchema) },
658
- },
659
- },
660
- },
661
- }),
662
- zValidator('json', SyncPushRequestSchema),
663
- async (c) => {
664
- const auth = await options.authenticate(c);
665
- if (!auth) return c.json({ error: 'UNAUTHENTICATED' }, 401);
666
-
667
- const body = c.req.valid('json');
668
-
669
- if (body.operations.length > maxOperationsPerPush) {
670
- return c.json(
671
- {
672
- error: 'TOO_MANY_OPERATIONS',
673
- message: `Maximum ${maxOperationsPerPush} operations per push`,
674
- },
675
- 400
676
- );
677
- }
678
-
679
- const timer = createSyncTimer();
680
-
681
- const pushed = await pushCommit({
682
- db: options.db,
683
- dialect: options.dialect,
684
- shapes: handlerRegistry,
685
- actorId: auth.actorId,
686
- request: body,
687
- });
688
-
689
- const pushDurationMs = timer();
690
-
691
- logSyncEvent({
692
- event: 'sync.push',
693
- userId: auth.actorId,
694
- durationMs: pushDurationMs,
695
- operationCount: body.operations.length,
696
- status: pushed.response.status,
697
- commitSeq: pushed.response.commitSeq,
698
- });
699
-
700
- // Record event for console inspector (non-blocking)
701
- recordRequestEvent({
702
- eventType: 'push',
703
- actorId: auth.actorId,
704
- clientId: body.clientId,
705
- transportPath: readTransportPath(c),
706
- statusCode: 200,
707
- outcome: pushed.response.status,
708
- durationMs: pushDurationMs,
709
- commitSeq: pushed.response.commitSeq,
710
- operationCount: body.operations.length,
711
- tables: pushed.affectedTables,
712
- });
713
-
714
- if (
715
- wsConnectionManager &&
716
- pushed.response.ok === true &&
717
- pushed.response.status === 'applied' &&
718
- typeof pushed.response.commitSeq === 'number'
719
- ) {
720
- const scopeKeys = await readCommitScopeKeys(
721
- options.db,
722
- pushed.response.commitSeq
723
- );
724
-
725
- if (scopeKeys.length > 0) {
726
- wsConnectionManager.notifyScopeKeys(
727
- scopeKeys,
728
- pushed.response.commitSeq,
729
- {
730
- excludeClientIds: [body.clientId],
731
- }
732
- );
733
-
734
- if (realtimeBroadcaster) {
735
- realtimeBroadcaster
736
- .publish({
737
- type: 'commit',
738
- commitSeq: pushed.response.commitSeq,
739
- scopeKeys,
740
- sourceInstanceId: instanceId,
741
- })
742
- .catch(() => {});
743
- }
744
- }
745
- }
746
-
747
- return c.json(pushed.response, 200);
748
- }
749
- );
750
-
751
791
  // -------------------------------------------------------------------------
752
792
  // GET /realtime (optional WebSocket wake-ups)
753
793
  // -------------------------------------------------------------------------
754
794
 
755
795
  if (wsConnectionManager && websocketConfig?.enabled) {
756
796
  routes.get('/realtime', async (c) => {
757
- const auth = await options.authenticate(c);
797
+ const auth = await getAuth(c);
758
798
  if (!auth) return c.json({ error: 'UNAUTHENTICATED' }, 401);
799
+ const partitionId = auth.partitionId ?? 'default';
759
800
 
760
801
  const clientId = c.req.query('clientId');
761
802
  if (!clientId || typeof clientId !== 'string') {
@@ -787,6 +828,7 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
787
828
 
788
829
  const row = await cursorsQ
789
830
  .selectAll()
831
+ .where(sql<SqlBool>`partition_id = ${partitionId}`)
790
832
  .where(sql<SqlBool>`client_id = ${clientId}`)
791
833
  .executeTakeFirst();
792
834
 
@@ -804,7 +846,10 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
804
846
  }
805
847
  }
806
848
 
807
- initialScopeKeys = scopeValuesToScopeKeys(parsed);
849
+ initialScopeKeys = applyPartitionToScopeKeys(
850
+ partitionId,
851
+ scopeValuesToScopeKeys(parsed)
852
+ );
808
853
  } catch {
809
854
  // ignore; realtime is best-effort
810
855
  }
@@ -884,15 +929,26 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
884
929
  const raw =
885
930
  typeof evt.data === 'string' ? evt.data : String(evt.data);
886
931
  const msg = JSON.parse(raw);
887
- if (
888
- !msg ||
889
- typeof msg !== 'object' ||
890
- msg.type !== 'presence' ||
891
- !msg.scopeKey
892
- )
932
+ if (!msg || typeof msg !== 'object') return;
933
+
934
+ if (msg.type === 'push') {
935
+ void handleWsPush(
936
+ msg,
937
+ connRef,
938
+ auth.actorId,
939
+ partitionId,
940
+ clientId
941
+ );
893
942
  return;
943
+ }
894
944
 
895
- const scopeKey = String(msg.scopeKey);
945
+ if (msg.type !== 'presence' || !msg.scopeKey) return;
946
+
947
+ const scopeKey = normalizeScopeKeyForPartition(
948
+ partitionId,
949
+ String(msg.scopeKey)
950
+ );
951
+ if (!scopeKey) return;
896
952
 
897
953
  switch (msg.action) {
898
954
  case 'join':
@@ -961,14 +1017,167 @@ export function createSyncRoutes<DB extends SyncCoreDb = SyncCoreDb>(
961
1017
  if (event.sourceInstanceId && event.sourceInstanceId === instanceId) return;
962
1018
 
963
1019
  const commitSeq = event.commitSeq;
1020
+ const partitionId = event.partitionId ?? 'default';
964
1021
  const scopeKeys =
965
1022
  event.scopeKeys && event.scopeKeys.length > 0
966
1023
  ? event.scopeKeys
967
- : await readCommitScopeKeys(options.db, commitSeq);
1024
+ : await readCommitScopeKeys(options.db, commitSeq, partitionId);
968
1025
 
969
1026
  if (scopeKeys.length === 0) return;
970
1027
  wsConnectionManager.notifyScopeKeys(scopeKeys, commitSeq);
971
1028
  }
1029
+
1030
+ async function handleWsPush(
1031
+ msg: Record<string, unknown>,
1032
+ conn: WebSocketConnection,
1033
+ actorId: string,
1034
+ partitionId: string,
1035
+ clientId: string
1036
+ ): Promise<void> {
1037
+ const requestId = typeof msg.requestId === 'string' ? msg.requestId : '';
1038
+ if (!requestId) return;
1039
+
1040
+ try {
1041
+ // Validate the push payload
1042
+ const parsed = SyncPushRequestSchema.omit({ clientId: true }).safeParse(
1043
+ msg
1044
+ );
1045
+ if (!parsed.success) {
1046
+ conn.sendPushResponse({
1047
+ requestId,
1048
+ ok: false,
1049
+ status: 'rejected',
1050
+ results: [
1051
+ { opIndex: 0, status: 'error', error: 'Invalid push payload' },
1052
+ ],
1053
+ });
1054
+ return;
1055
+ }
1056
+
1057
+ const pushOps = parsed.data.operations ?? [];
1058
+ if (pushOps.length > maxOperationsPerPush) {
1059
+ conn.sendPushResponse({
1060
+ requestId,
1061
+ ok: false,
1062
+ status: 'rejected',
1063
+ results: [
1064
+ {
1065
+ opIndex: 0,
1066
+ status: 'error',
1067
+ error: `Maximum ${maxOperationsPerPush} operations per push`,
1068
+ },
1069
+ ],
1070
+ });
1071
+ return;
1072
+ }
1073
+
1074
+ const timer = createSyncTimer();
1075
+
1076
+ const pushed = await pushCommit({
1077
+ db: options.db,
1078
+ dialect: options.dialect,
1079
+ handlers: handlerRegistry,
1080
+ actorId,
1081
+ partitionId,
1082
+ request: {
1083
+ clientId,
1084
+ clientCommitId: parsed.data.clientCommitId,
1085
+ operations: parsed.data.operations,
1086
+ schemaVersion: parsed.data.schemaVersion,
1087
+ },
1088
+ });
1089
+
1090
+ const pushDurationMs = timer();
1091
+
1092
+ logSyncEvent({
1093
+ event: 'sync.push',
1094
+ userId: actorId,
1095
+ durationMs: pushDurationMs,
1096
+ operationCount: pushOps.length,
1097
+ status: pushed.response.status,
1098
+ commitSeq: pushed.response.commitSeq,
1099
+ });
1100
+
1101
+ recordRequestEventInBackground({
1102
+ eventType: 'push',
1103
+ actorId,
1104
+ clientId,
1105
+ transportPath: conn.transportPath,
1106
+ statusCode: 200,
1107
+ outcome: pushed.response.status,
1108
+ durationMs: pushDurationMs,
1109
+ commitSeq: pushed.response.commitSeq,
1110
+ operationCount: pushOps.length,
1111
+ tables: pushed.affectedTables,
1112
+ });
1113
+
1114
+ // WS notifications to other clients
1115
+ if (
1116
+ wsConnectionManager &&
1117
+ pushed.response.ok === true &&
1118
+ pushed.response.status === 'applied' &&
1119
+ typeof pushed.response.commitSeq === 'number'
1120
+ ) {
1121
+ const scopeKeys = applyPartitionToScopeKeys(
1122
+ partitionId,
1123
+ pushed.scopeKeys
1124
+ );
1125
+ if (scopeKeys.length > 0) {
1126
+ wsConnectionManager.notifyScopeKeys(
1127
+ scopeKeys,
1128
+ pushed.response.commitSeq,
1129
+ {
1130
+ excludeClientIds: [clientId],
1131
+ changes: pushed.emittedChanges,
1132
+ }
1133
+ );
1134
+
1135
+ if (realtimeBroadcaster) {
1136
+ realtimeBroadcaster
1137
+ .publish({
1138
+ type: 'commit',
1139
+ commitSeq: pushed.response.commitSeq,
1140
+ partitionId,
1141
+ scopeKeys,
1142
+ sourceInstanceId: instanceId,
1143
+ })
1144
+ .catch((error) => {
1145
+ logAsyncFailureOnce('sync.realtime.broadcast_publish_failed', {
1146
+ event: 'sync.realtime.broadcast_publish_failed',
1147
+ userId: actorId,
1148
+ clientId,
1149
+ error: error instanceof Error ? error.message : String(error),
1150
+ });
1151
+ });
1152
+ }
1153
+ }
1154
+ }
1155
+
1156
+ conn.sendPushResponse({
1157
+ requestId,
1158
+ ok: pushed.response.ok,
1159
+ status: pushed.response.status,
1160
+ commitSeq: pushed.response.commitSeq,
1161
+ results: pushed.response.results,
1162
+ });
1163
+ } catch (err) {
1164
+ captureSyncException(err, {
1165
+ event: 'sync.realtime.push_failed',
1166
+ requestId,
1167
+ clientId,
1168
+ actorId,
1169
+ partitionId,
1170
+ });
1171
+ const message =
1172
+ err instanceof Error ? err.message : 'Internal server error';
1173
+ conn.sendPushResponse({
1174
+ requestId,
1175
+ ok: false,
1176
+ status: 'rejected',
1177
+ results: [{ opIndex: 0, status: 'error', error: message }],
1178
+ });
1179
+ }
1180
+ }
972
1181
  }
973
1182
 
974
1183
  export function getSyncWebSocketConnectionManager(
@@ -1035,15 +1244,46 @@ function scopeValuesToScopeKeys(scopes: unknown): string[] {
1035
1244
  return Array.from(scopeKeys);
1036
1245
  }
1037
1246
 
1247
+ function partitionScopeKey(partitionId: string, scopeKey: string): string {
1248
+ return `${partitionId}::${scopeKey}`;
1249
+ }
1250
+
1251
+ function applyPartitionToScopeKeys(
1252
+ partitionId: string,
1253
+ scopeKeys: readonly string[]
1254
+ ): string[] {
1255
+ const prefixed = new Set<string>();
1256
+ for (const scopeKey of scopeKeys) {
1257
+ if (!scopeKey) continue;
1258
+ if (scopeKey.startsWith(`${partitionId}::`)) {
1259
+ prefixed.add(scopeKey);
1260
+ continue;
1261
+ }
1262
+ prefixed.add(partitionScopeKey(partitionId, scopeKey));
1263
+ }
1264
+ return Array.from(prefixed);
1265
+ }
1266
+
1267
+ function normalizeScopeKeyForPartition(
1268
+ partitionId: string,
1269
+ scopeKey: string
1270
+ ): string {
1271
+ if (scopeKey.startsWith(`${partitionId}::`)) return scopeKey;
1272
+ if (scopeKey.includes('::')) return '';
1273
+ return partitionScopeKey(partitionId, scopeKey);
1274
+ }
1275
+
1038
1276
  async function readCommitScopeKeys<DB extends SyncCoreDb>(
1039
1277
  db: Kysely<DB>,
1040
- commitSeq: number
1278
+ commitSeq: number,
1279
+ partitionId: string
1041
1280
  ): Promise<string[]> {
1042
1281
  // Read scopes from the JSONB column and convert to scope strings
1043
1282
  const rowsResult = await sql<{ scopes: unknown }>`
1044
1283
  select scopes
1045
1284
  from ${sql.table('sync_changes')}
1046
1285
  where commit_seq = ${commitSeq}
1286
+ and partition_id = ${partitionId}
1047
1287
  `.execute(db);
1048
1288
  const rows = rowsResult.rows;
1049
1289
 
@@ -1053,7 +1293,10 @@ async function readCommitScopeKeys<DB extends SyncCoreDb>(
1053
1293
  const scopes =
1054
1294
  typeof row.scopes === 'string' ? JSON.parse(row.scopes) : row.scopes;
1055
1295
 
1056
- for (const k of scopeValuesToScopeKeys(scopes)) {
1296
+ for (const k of applyPartitionToScopeKeys(
1297
+ partitionId,
1298
+ scopeValuesToScopeKeys(scopes)
1299
+ )) {
1057
1300
  scopeKeys.add(k);
1058
1301
  }
1059
1302
  }