@syncular/server-hono 0.0.1 → 0.0.2-126

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +23 -0
  2. package/dist/api-key-auth.js +1 -1
  3. package/dist/blobs.d.ts.map +1 -1
  4. package/dist/blobs.js +31 -8
  5. package/dist/blobs.js.map +1 -1
  6. package/dist/console/index.d.ts +1 -1
  7. package/dist/console/index.d.ts.map +1 -1
  8. package/dist/console/index.js +1 -1
  9. package/dist/console/index.js.map +1 -1
  10. package/dist/console/routes.d.ts +1 -2
  11. package/dist/console/routes.d.ts.map +1 -1
  12. package/dist/console/routes.js +65 -2
  13. package/dist/console/routes.js.map +1 -1
  14. package/dist/console/schemas.d.ts +138 -496
  15. package/dist/console/schemas.d.ts.map +1 -1
  16. package/dist/console/schemas.js +3 -9
  17. package/dist/console/schemas.js.map +1 -1
  18. package/dist/create-server.d.ts +3 -1
  19. package/dist/create-server.d.ts.map +1 -1
  20. package/dist/create-server.js +4 -3
  21. package/dist/create-server.js.map +1 -1
  22. package/dist/index.d.ts +3 -3
  23. package/dist/index.d.ts.map +1 -1
  24. package/dist/index.js +9 -9
  25. package/dist/index.js.map +1 -1
  26. package/dist/proxy/connection-manager.d.ts +1 -1
  27. package/dist/proxy/connection-manager.d.ts.map +1 -1
  28. package/dist/proxy/connection-manager.js +1 -1
  29. package/dist/proxy/connection-manager.js.map +1 -1
  30. package/dist/proxy/index.js +2 -2
  31. package/dist/proxy/routes.d.ts +2 -2
  32. package/dist/proxy/routes.d.ts.map +1 -1
  33. package/dist/proxy/routes.js +3 -3
  34. package/dist/proxy/routes.js.map +1 -1
  35. package/dist/routes.d.ts +2 -2
  36. package/dist/routes.d.ts.map +1 -1
  37. package/dist/routes.js +447 -260
  38. package/dist/routes.js.map +1 -1
  39. package/dist/ws.d.ts +40 -3
  40. package/dist/ws.d.ts.map +1 -1
  41. package/dist/ws.js +51 -6
  42. package/dist/ws.js.map +1 -1
  43. package/package.json +32 -9
  44. package/src/__tests__/pull-chunk-storage.test.ts +415 -27
  45. package/src/__tests__/realtime-bridge.test.ts +3 -1
  46. package/src/__tests__/sync-rate-limit-routing.test.ts +181 -0
  47. package/src/blobs.ts +31 -8
  48. package/src/console/index.ts +1 -0
  49. package/src/console/routes.ts +78 -25
  50. package/src/console/schemas.ts +0 -31
  51. package/src/create-server.ts +6 -0
  52. package/src/index.ts +12 -3
  53. package/src/proxy/connection-manager.ts +2 -2
  54. package/src/proxy/routes.ts +3 -3
  55. package/src/routes.ts +570 -327
  56. package/src/ws.ts +76 -13
package/dist/routes.js CHANGED
@@ -2,19 +2,18 @@
2
2
  * @syncular/server-hono - Sync routes for Hono
3
3
  *
4
4
  * Provides:
5
- * - POST /pull (commit stream + optional bootstrap snapshots)
6
- * - POST /push (commit ingestion)
5
+ * - POST / (combined push + pull in one round-trip)
7
6
  * - GET /snapshot-chunks/:chunkId (download encoded snapshot chunks)
8
7
  * - GET /realtime (optional WebSocket "wake up" notifications)
9
8
  */
10
- import { createSyncTimer, ErrorResponseSchema, logSyncEvent, SyncPullRequestSchema, SyncPullResponseSchema, SyncPushRequestSchema, SyncPushResponseSchema, } from '@syncular/core';
11
- import { InvalidSubscriptionScopeError, maybeCompactChanges, maybePruneSync, pull, pushCommit, readSnapshotChunk, recordClientCursor, TableRegistry, } from '@syncular/server';
9
+ import { captureSyncException, createSyncTimer, ErrorResponseSchema, logSyncEvent, SyncCombinedRequestSchema, SyncCombinedResponseSchema, SyncPushRequestSchema, } from '@syncular/core';
10
+ import { InvalidSubscriptionScopeError, pull, pushCommit, readSnapshotChunk, recordClientCursor, TableRegistry, } from '@syncular/server';
12
11
  import { Hono } from 'hono';
13
12
  import { describeRoute, resolver, validator as zValidator } from 'hono-openapi';
14
13
  import { sql, } from 'kysely';
15
14
  import { z } from 'zod';
16
- import { createRateLimiter, DEFAULT_SYNC_RATE_LIMITS, } from './rate-limit';
17
- import { createWebSocketConnection, WebSocketConnectionManager } from './ws';
15
+ import { createRateLimiter, DEFAULT_SYNC_RATE_LIMITS, } from './rate-limit.js';
16
+ import { createWebSocketConnection, WebSocketConnectionManager, } from './ws.js';
18
17
  /**
19
18
  * WeakMaps for storing Hono-instance-specific data without augmenting the type.
20
19
  */
@@ -28,6 +27,14 @@ const snapshotChunkParamsSchema = z.object({
28
27
  });
29
28
  export function createSyncRoutes(options) {
30
29
  const routes = new Hono();
30
+ routes.onError((error, c) => {
31
+ captureSyncException(error, {
32
+ event: 'sync.route.unhandled',
33
+ method: c.req.method,
34
+ path: c.req.path,
35
+ });
36
+ return c.text('Internal Server Error', 500);
37
+ });
31
38
  const handlerRegistry = new TableRegistry();
32
39
  for (const handler of options.handlers) {
33
40
  handlerRegistry.register(handler);
@@ -62,35 +69,60 @@ export function createSyncRoutes(options) {
62
69
  (typeof crypto !== 'undefined' && 'randomUUID' in crypto
63
70
  ? crypto.randomUUID()
64
71
  : `${Date.now()}-${Math.random().toString(16).slice(2)}`);
72
+ const loggedAsyncFailureKeys = new Set();
73
+ const logAsyncFailureOnce = (key, event) => {
74
+ if (loggedAsyncFailureKeys.has(key))
75
+ return;
76
+ loggedAsyncFailureKeys.add(key);
77
+ logSyncEvent(event);
78
+ };
65
79
  if (wsConnectionManager && realtimeBroadcaster) {
66
80
  const unsubscribe = realtimeBroadcaster.subscribe((event) => {
67
- void handleRealtimeEvent(event).catch(() => { });
81
+ void handleRealtimeEvent(event).catch((error) => {
82
+ logAsyncFailureOnce('sync.realtime.broadcast_delivery_failed', {
83
+ event: 'sync.realtime.broadcast_delivery_failed',
84
+ error: error instanceof Error ? error.message : String(error),
85
+ sourceEventType: event.type,
86
+ });
87
+ });
68
88
  });
69
89
  realtimeUnsubscribeMap.set(routes, unsubscribe);
70
90
  }
71
- // -------------------------------------------------------------------------
72
- // Request event recording (for console inspector)
73
- // -------------------------------------------------------------------------
74
91
  const recordRequestEvent = async (event) => {
75
- try {
76
- const tablesValue = options.dialect.arrayToDb(event.tables ?? []);
77
- await sql `
78
- INSERT INTO sync_request_events (
79
- event_type, actor_id, client_id, status_code, outcome,
80
- duration_ms, commit_seq, operation_count, row_count,
81
- tables, error_message, transport_path
82
- ) VALUES (
83
- ${event.eventType}, ${event.actorId}, ${event.clientId},
84
- ${event.statusCode}, ${event.outcome}, ${event.durationMs},
85
- ${event.commitSeq ?? null}, ${event.operationCount ?? null},
86
- ${event.rowCount ?? null}, ${tablesValue}, ${event.errorMessage ?? null},
87
- ${event.transportPath}
88
- )
89
- `.execute(options.db);
90
- }
91
- catch {
92
- // Silently ignore - event recording should not block sync
93
- }
92
+ const tablesValue = options.dialect.arrayToDb(event.tables ?? []);
93
+ await sql `
94
+ INSERT INTO sync_request_events (
95
+ event_type, actor_id, client_id, status_code, outcome,
96
+ duration_ms, commit_seq, operation_count, row_count,
97
+ tables, error_message, transport_path
98
+ ) VALUES (
99
+ ${event.eventType}, ${event.actorId}, ${event.clientId},
100
+ ${event.statusCode}, ${event.outcome}, ${event.durationMs},
101
+ ${event.commitSeq ?? null}, ${event.operationCount ?? null},
102
+ ${event.rowCount ?? null}, ${tablesValue}, ${event.errorMessage ?? null},
103
+ ${event.transportPath}
104
+ )
105
+ `.execute(options.db);
106
+ };
107
+ const recordRequestEventInBackground = (event) => {
108
+ void recordRequestEvent(event).catch((error) => {
109
+ logAsyncFailureOnce('sync.request_event_record_failed', {
110
+ event: 'sync.request_event_record_failed',
111
+ userId: event.actorId,
112
+ clientId: event.clientId,
113
+ requestEventType: event.eventType,
114
+ error: error instanceof Error ? error.message : String(error),
115
+ });
116
+ });
117
+ };
118
+ const authCache = new WeakMap();
119
+ const getAuth = (c) => {
120
+ const cached = authCache.get(c);
121
+ if (cached)
122
+ return cached;
123
+ const pending = options.authenticate(c);
124
+ authCache.set(c, pending);
125
+ return pending;
94
126
  };
95
127
  // -------------------------------------------------------------------------
96
128
  // Rate limiting (optional)
@@ -105,17 +137,47 @@ export function createSyncRoutes(options) {
105
137
  return createRateLimiter({
106
138
  ...limitConfig,
107
139
  keyGenerator: async (c) => {
108
- const auth = await options.authenticate(c);
140
+ const auth = await getAuth(c);
109
141
  return auth?.actorId ?? null;
110
142
  },
111
143
  });
112
144
  };
113
145
  const pullLimiter = createAuthBasedRateLimiter(pullRateLimit);
114
- if (pullLimiter)
115
- routes.use('/pull', pullLimiter);
116
146
  const pushLimiter = createAuthBasedRateLimiter(pushRateLimit);
117
- if (pushLimiter)
118
- routes.use('/push', pushLimiter);
147
+ const syncRateLimiter = async (c, next) => {
148
+ if (!pullLimiter && !pushLimiter)
149
+ return next();
150
+ let shouldApplyPull = pullLimiter !== null;
151
+ let shouldApplyPush = pushLimiter !== null;
152
+ if (pullLimiter && pushLimiter && c.req.method === 'POST') {
153
+ try {
154
+ const parsed = await c.req.raw.clone().json();
155
+ if (parsed !== null && typeof parsed === 'object') {
156
+ shouldApplyPull = Reflect.get(parsed, 'pull') !== undefined;
157
+ shouldApplyPush = Reflect.get(parsed, 'push') !== undefined;
158
+ }
159
+ }
160
+ catch {
161
+ // Keep default behavior and apply both limiters when payload parsing fails.
162
+ }
163
+ }
164
+ if (pullLimiter && shouldApplyPull && pushLimiter && shouldApplyPush) {
165
+ return pullLimiter(c, async () => {
166
+ const pushResult = await pushLimiter(c, next);
167
+ if (pushResult instanceof Response) {
168
+ c.res = pushResult;
169
+ }
170
+ });
171
+ }
172
+ if (pullLimiter && shouldApplyPull) {
173
+ return pullLimiter(c, next);
174
+ }
175
+ if (pushLimiter && shouldApplyPush) {
176
+ return pushLimiter(c, next);
177
+ }
178
+ return next();
179
+ };
180
+ routes.use('/', syncRateLimiter);
119
181
  }
120
182
  // -------------------------------------------------------------------------
121
183
  // GET /health
@@ -127,17 +189,19 @@ export function createSyncRoutes(options) {
127
189
  });
128
190
  });
129
191
  // -------------------------------------------------------------------------
130
- // POST /pull
192
+ // POST / (combined push + pull in one round-trip)
131
193
  // -------------------------------------------------------------------------
132
- routes.post('/pull', describeRoute({
194
+ routes.post('/', describeRoute({
133
195
  tags: ['sync'],
134
- summary: 'Pull commits and snapshots',
135
- description: 'Pull commits and optional bootstrap snapshots for subscriptions',
196
+ summary: 'Combined push and pull',
197
+ description: 'Perform push and/or pull in a single request to reduce round-trips',
136
198
  responses: {
137
199
  200: {
138
- description: 'Successful pull response',
200
+ description: 'Combined sync response',
139
201
  content: {
140
- 'application/json': { schema: resolver(SyncPullResponseSchema) },
202
+ 'application/json': {
203
+ schema: resolver(SyncCombinedResponseSchema),
204
+ },
141
205
  },
142
206
  },
143
207
  400: {
@@ -153,137 +217,185 @@ export function createSyncRoutes(options) {
153
217
  },
154
218
  },
155
219
  },
156
- }), zValidator('json', SyncPullRequestSchema), async (c) => {
157
- const auth = await options.authenticate(c);
220
+ }), zValidator('json', SyncCombinedRequestSchema), async (c) => {
221
+ const auth = await getAuth(c);
158
222
  if (!auth)
159
223
  return c.json({ error: 'UNAUTHENTICATED' }, 401);
224
+ const partitionId = auth.partitionId ?? 'default';
160
225
  const body = c.req.valid('json');
161
- const timer = createSyncTimer();
162
- if (body.subscriptions.length > maxSubscriptionsPerPull) {
163
- return c.json({
164
- error: 'INVALID_REQUEST',
165
- message: `Too many subscriptions (max ${maxSubscriptionsPerPull})`,
166
- }, 400);
167
- }
168
- // Guardrail: unique subscription ids in a single request.
169
- const seenSubscriptionIds = new Set();
170
- for (const sub of body.subscriptions) {
171
- const id = sub.id;
172
- if (seenSubscriptionIds.has(id)) {
226
+ const clientId = body.clientId;
227
+ let pushResponse;
228
+ let pullResponse;
229
+ // --- Push phase ---
230
+ if (body.push) {
231
+ const pushOps = body.push.operations ?? [];
232
+ if (pushOps.length > maxOperationsPerPush) {
173
233
  return c.json({
174
- error: 'INVALID_REQUEST',
175
- message: `Duplicate subscription id: ${id}`,
234
+ error: 'TOO_MANY_OPERATIONS',
235
+ message: `Maximum ${maxOperationsPerPush} operations per push`,
176
236
  }, 400);
177
237
  }
178
- seenSubscriptionIds.add(id);
179
- }
180
- const request = {
181
- clientId: body.clientId,
182
- limitCommits: clampInt(body.limitCommits ?? 50, 1, maxPullLimitCommits),
183
- limitSnapshotRows: clampInt(body.limitSnapshotRows ?? 1000, 1, maxPullLimitSnapshotRows),
184
- maxSnapshotPages: clampInt(body.maxSnapshotPages ?? 1, 1, maxPullMaxSnapshotPages),
185
- dedupeRows: body.dedupeRows === true,
186
- subscriptions: body.subscriptions.map((sub) => ({
187
- id: sub.id,
188
- shape: sub.shape,
189
- scopes: (sub.scopes ?? {}),
190
- params: sub.params,
191
- cursor: Math.max(-1, sub.cursor),
192
- bootstrapState: sub.bootstrapState ?? null,
193
- })),
194
- };
195
- let pullResult;
196
- try {
197
- pullResult = await pull({
238
+ const timer = createSyncTimer();
239
+ const pushed = await pushCommit({
198
240
  db: options.db,
199
241
  dialect: options.dialect,
200
- shapes: handlerRegistry,
242
+ handlers: handlerRegistry,
201
243
  actorId: auth.actorId,
202
- request,
203
- chunkStorage: options.chunkStorage,
244
+ partitionId,
245
+ request: {
246
+ clientId,
247
+ clientCommitId: body.push.clientCommitId,
248
+ operations: body.push.operations,
249
+ schemaVersion: body.push.schemaVersion,
250
+ },
204
251
  });
205
- }
206
- catch (err) {
207
- if (err instanceof InvalidSubscriptionScopeError) {
208
- return c.json({ error: 'INVALID_SUBSCRIPTION', message: err.message }, 400);
252
+ const pushDurationMs = timer();
253
+ logSyncEvent({
254
+ event: 'sync.push',
255
+ userId: auth.actorId,
256
+ durationMs: pushDurationMs,
257
+ operationCount: pushOps.length,
258
+ status: pushed.response.status,
259
+ commitSeq: pushed.response.commitSeq,
260
+ });
261
+ recordRequestEventInBackground({
262
+ eventType: 'push',
263
+ actorId: auth.actorId,
264
+ clientId,
265
+ transportPath: readTransportPath(c),
266
+ statusCode: 200,
267
+ outcome: pushed.response.status,
268
+ durationMs: pushDurationMs,
269
+ commitSeq: pushed.response.commitSeq,
270
+ operationCount: pushOps.length,
271
+ tables: pushed.affectedTables,
272
+ });
273
+ // WS notifications
274
+ if (wsConnectionManager &&
275
+ pushed.response.ok === true &&
276
+ pushed.response.status === 'applied' &&
277
+ typeof pushed.response.commitSeq === 'number') {
278
+ const scopeKeys = applyPartitionToScopeKeys(partitionId, pushed.scopeKeys);
279
+ if (scopeKeys.length > 0) {
280
+ wsConnectionManager.notifyScopeKeys(scopeKeys, pushed.response.commitSeq, {
281
+ excludeClientIds: [clientId],
282
+ changes: pushed.emittedChanges,
283
+ });
284
+ if (realtimeBroadcaster) {
285
+ realtimeBroadcaster
286
+ .publish({
287
+ type: 'commit',
288
+ commitSeq: pushed.response.commitSeq,
289
+ partitionId,
290
+ scopeKeys,
291
+ sourceInstanceId: instanceId,
292
+ })
293
+ .catch((error) => {
294
+ logAsyncFailureOnce('sync.realtime.broadcast_publish_failed', {
295
+ event: 'sync.realtime.broadcast_publish_failed',
296
+ userId: auth.actorId,
297
+ clientId,
298
+ error: error instanceof Error ? error.message : String(error),
299
+ });
300
+ });
301
+ }
302
+ }
209
303
  }
210
- throw err;
304
+ pushResponse = pushed.response;
211
305
  }
212
- await recordClientCursor(options.db, options.dialect, {
213
- clientId: request.clientId,
214
- actorId: auth.actorId,
215
- cursor: pullResult.clientCursor,
216
- effectiveScopes: pullResult.effectiveScopes,
217
- });
218
- // Update WebSocket manager with effective scopes for this client.
219
- // Realtime wake-ups are best-effort; correctness always comes from pull+cursors.
220
- wsConnectionManager?.updateClientScopeKeys(request.clientId, scopeValuesToScopeKeys(pullResult.effectiveScopes));
221
- const pruneCfg = config.prune;
222
- if (pruneCfg) {
223
- const deletedCommits = await maybePruneSync(options.db, {
224
- minIntervalMs: pruneCfg.minIntervalMs ?? 5 * 60 * 1000,
225
- options: pruneCfg.options,
226
- });
227
- if (deletedCommits > 0) {
228
- logSyncEvent({
229
- event: 'sync.prune',
230
- userId: auth.actorId,
231
- deletedCommits,
306
+ // --- Pull phase ---
307
+ if (body.pull) {
308
+ if (body.pull.subscriptions.length > maxSubscriptionsPerPull) {
309
+ return c.json({
310
+ error: 'INVALID_REQUEST',
311
+ message: `Too many subscriptions (max ${maxSubscriptionsPerPull})`,
312
+ }, 400);
313
+ }
314
+ const seenSubscriptionIds = new Set();
315
+ for (const sub of body.pull.subscriptions) {
316
+ const id = sub.id;
317
+ if (seenSubscriptionIds.has(id)) {
318
+ return c.json({
319
+ error: 'INVALID_REQUEST',
320
+ message: `Duplicate subscription id: ${id}`,
321
+ }, 400);
322
+ }
323
+ seenSubscriptionIds.add(id);
324
+ }
325
+ const request = {
326
+ clientId,
327
+ limitCommits: clampInt(body.pull.limitCommits ?? 50, 1, maxPullLimitCommits),
328
+ limitSnapshotRows: clampInt(body.pull.limitSnapshotRows ?? 1000, 1, maxPullLimitSnapshotRows),
329
+ maxSnapshotPages: clampInt(body.pull.maxSnapshotPages ?? 1, 1, maxPullMaxSnapshotPages),
330
+ dedupeRows: body.pull.dedupeRows === true,
331
+ subscriptions: body.pull.subscriptions.map((sub) => ({
332
+ id: sub.id,
333
+ table: sub.table,
334
+ scopes: (sub.scopes ?? {}),
335
+ params: sub.params,
336
+ cursor: Math.max(-1, sub.cursor),
337
+ bootstrapState: sub.bootstrapState ?? null,
338
+ })),
339
+ };
340
+ const timer = createSyncTimer();
341
+ let pullResult;
342
+ try {
343
+ pullResult = await pull({
344
+ db: options.db,
345
+ dialect: options.dialect,
346
+ handlers: handlerRegistry,
347
+ actorId: auth.actorId,
348
+ partitionId,
349
+ request,
350
+ chunkStorage: options.chunkStorage,
232
351
  });
233
352
  }
234
- }
235
- const compactCfg = config.compact;
236
- if (compactCfg) {
237
- const deletedChanges = await maybeCompactChanges(options.db, {
238
- dialect: options.dialect,
239
- minIntervalMs: compactCfg.minIntervalMs ?? 30 * 60 * 1000,
240
- options: {
241
- fullHistoryHours: compactCfg.options?.fullHistoryHours ?? 24 * 7,
242
- },
243
- });
244
- if (deletedChanges > 0) {
245
- logSyncEvent({
246
- event: 'sync.compact',
353
+ catch (err) {
354
+ if (err instanceof InvalidSubscriptionScopeError) {
355
+ return c.json({ error: 'INVALID_SUBSCRIPTION', message: err.message }, 400);
356
+ }
357
+ throw err;
358
+ }
359
+ // Fire-and-forget bookkeeping
360
+ void recordClientCursor(options.db, options.dialect, {
361
+ partitionId,
362
+ clientId,
363
+ actorId: auth.actorId,
364
+ cursor: pullResult.clientCursor,
365
+ effectiveScopes: pullResult.effectiveScopes,
366
+ }).catch((error) => {
367
+ logAsyncFailureOnce('sync.client_cursor_record_failed', {
368
+ event: 'sync.client_cursor_record_failed',
247
369
  userId: auth.actorId,
248
- deletedChanges,
370
+ clientId,
371
+ error: error instanceof Error ? error.message : String(error),
249
372
  });
250
- }
373
+ });
374
+ wsConnectionManager?.updateClientScopeKeys(clientId, applyPartitionToScopeKeys(partitionId, scopeValuesToScopeKeys(pullResult.effectiveScopes)));
375
+ const pullDurationMs = timer();
376
+ logSyncEvent({
377
+ event: 'sync.pull',
378
+ userId: auth.actorId,
379
+ durationMs: pullDurationMs,
380
+ subscriptionCount: pullResult.response.subscriptions.length,
381
+ clientCursor: pullResult.clientCursor,
382
+ });
383
+ recordRequestEventInBackground({
384
+ eventType: 'pull',
385
+ actorId: auth.actorId,
386
+ clientId,
387
+ transportPath: readTransportPath(c),
388
+ statusCode: 200,
389
+ outcome: 'applied',
390
+ durationMs: pullDurationMs,
391
+ });
392
+ pullResponse = pullResult.response;
251
393
  }
252
- const rowCount = pullResult.response.subscriptions.reduce((sum, s) => {
253
- if (s.bootstrap) {
254
- return (sum +
255
- (s.snapshots ?? []).reduce((ss, snap) => ss + (snap.rows?.length ?? 0), 0));
256
- }
257
- return (sum +
258
- s.commits.reduce((cs, commit) => cs + commit.changes.length, 0));
259
- }, 0);
260
- const bootstrapCount = pullResult.response.subscriptions.filter((s) => s.bootstrap).length;
261
- const activeCount = pullResult.response.subscriptions.filter((s) => s.status === 'active').length;
262
- const pullDurationMs = timer();
263
- logSyncEvent({
264
- event: 'sync.pull',
265
- userId: auth.actorId,
266
- durationMs: pullDurationMs,
267
- rowCount,
268
- subscriptionCount: pullResult.response.subscriptions.length,
269
- activeSubscriptionCount: activeCount,
270
- bootstrapCount,
271
- effectiveTableCount: Object.keys(pullResult.effectiveScopes).length,
272
- clientCursor: pullResult.clientCursor,
273
- });
274
- // Record event for console inspector (non-blocking)
275
- recordRequestEvent({
276
- eventType: 'pull',
277
- actorId: auth.actorId,
278
- clientId: request.clientId,
279
- transportPath: readTransportPath(c),
280
- statusCode: 200,
281
- outcome: bootstrapCount > 0 ? 'applied' : rowCount > 0 ? 'applied' : 'cached',
282
- durationMs: pullDurationMs,
283
- rowCount,
284
- tables: Object.keys(pullResult.effectiveScopes),
285
- });
286
- return c.json(pullResult.response, 200);
394
+ return c.json({
395
+ ok: true,
396
+ ...(pushResponse ? { push: pushResponse } : {}),
397
+ ...(pullResponse ? { pull: pullResponse } : {}),
398
+ }, 200);
287
399
  });
288
400
  // -------------------------------------------------------------------------
289
401
  // GET /snapshot-chunks/:chunkId
@@ -294,7 +406,12 @@ export function createSyncRoutes(options) {
294
406
  description: 'Download an encoded bootstrap snapshot chunk',
295
407
  responses: {
296
408
  200: {
297
- description: 'Snapshot chunk data (gzip-compressed NDJSON)',
409
+ description: 'Snapshot chunk data (gzip-compressed framed JSON rows)',
410
+ content: {
411
+ 'application/octet-stream': {
412
+ schema: resolver(z.string()),
413
+ },
414
+ },
298
415
  },
299
416
  304: {
300
417
  description: 'Not modified (cached)',
@@ -319,22 +436,26 @@ export function createSyncRoutes(options) {
319
436
  },
320
437
  },
321
438
  }), zValidator('param', snapshotChunkParamsSchema), async (c) => {
322
- const auth = await options.authenticate(c);
439
+ const auth = await getAuth(c);
323
440
  if (!auth)
324
441
  return c.json({ error: 'UNAUTHENTICATED' }, 401);
442
+ const partitionId = auth.partitionId ?? 'default';
325
443
  const { chunkId } = c.req.valid('param');
326
444
  const chunk = await readSnapshotChunk(options.db, chunkId, {
327
445
  chunkStorage: options.chunkStorage,
328
446
  });
329
447
  if (!chunk)
330
448
  return c.json({ error: 'NOT_FOUND' }, 404);
449
+ if (chunk.partitionId !== partitionId) {
450
+ return c.json({ error: 'FORBIDDEN' }, 403);
451
+ }
331
452
  const nowIso = new Date().toISOString();
332
453
  if (chunk.expiresAt <= nowIso) {
333
454
  return c.json({ error: 'NOT_FOUND' }, 404);
334
455
  }
335
456
  // Note: Snapshot chunks are created during authorized pull requests
336
457
  // and have opaque IDs that expire. Additional authorization is handled
337
- // at the pull layer via shape-level resolveScopes.
458
+ // at the pull layer via table-level resolveScopes.
338
459
  const etag = `"sha256:${chunk.sha256}"`;
339
460
  const ifNoneMatch = c.req.header('if-none-match');
340
461
  if (ifNoneMatch && ifNoneMatch === etag) {
@@ -350,9 +471,9 @@ export function createSyncRoutes(options) {
350
471
  return new Response(chunk.body, {
351
472
  status: 200,
352
473
  headers: {
353
- 'Content-Type': 'application/x-ndjson; charset=utf-8',
474
+ 'Content-Type': 'application/octet-stream',
354
475
  'Content-Encoding': 'gzip',
355
- 'Content-Length': String(chunk.body.length),
476
+ 'Content-Length': String(chunk.byteLength),
356
477
  ETag: etag,
357
478
  'Cache-Control': 'private, max-age=0',
358
479
  Vary: 'Authorization',
@@ -364,104 +485,14 @@ export function createSyncRoutes(options) {
364
485
  });
365
486
  });
366
487
  // -------------------------------------------------------------------------
367
- // POST /push
368
- // -------------------------------------------------------------------------
369
- routes.post('/push', describeRoute({
370
- tags: ['sync'],
371
- summary: 'Push a commit',
372
- description: 'Push a client commit with operations to the server',
373
- responses: {
374
- 200: {
375
- description: 'Successful push response',
376
- content: {
377
- 'application/json': { schema: resolver(SyncPushResponseSchema) },
378
- },
379
- },
380
- 400: {
381
- description: 'Invalid request',
382
- content: {
383
- 'application/json': { schema: resolver(ErrorResponseSchema) },
384
- },
385
- },
386
- 401: {
387
- description: 'Unauthenticated',
388
- content: {
389
- 'application/json': { schema: resolver(ErrorResponseSchema) },
390
- },
391
- },
392
- },
393
- }), zValidator('json', SyncPushRequestSchema), async (c) => {
394
- const auth = await options.authenticate(c);
395
- if (!auth)
396
- return c.json({ error: 'UNAUTHENTICATED' }, 401);
397
- const body = c.req.valid('json');
398
- if (body.operations.length > maxOperationsPerPush) {
399
- return c.json({
400
- error: 'TOO_MANY_OPERATIONS',
401
- message: `Maximum ${maxOperationsPerPush} operations per push`,
402
- }, 400);
403
- }
404
- const timer = createSyncTimer();
405
- const pushed = await pushCommit({
406
- db: options.db,
407
- dialect: options.dialect,
408
- shapes: handlerRegistry,
409
- actorId: auth.actorId,
410
- request: body,
411
- });
412
- const pushDurationMs = timer();
413
- logSyncEvent({
414
- event: 'sync.push',
415
- userId: auth.actorId,
416
- durationMs: pushDurationMs,
417
- operationCount: body.operations.length,
418
- status: pushed.response.status,
419
- commitSeq: pushed.response.commitSeq,
420
- });
421
- // Record event for console inspector (non-blocking)
422
- recordRequestEvent({
423
- eventType: 'push',
424
- actorId: auth.actorId,
425
- clientId: body.clientId,
426
- transportPath: readTransportPath(c),
427
- statusCode: 200,
428
- outcome: pushed.response.status,
429
- durationMs: pushDurationMs,
430
- commitSeq: pushed.response.commitSeq,
431
- operationCount: body.operations.length,
432
- tables: pushed.affectedTables,
433
- });
434
- if (wsConnectionManager &&
435
- pushed.response.ok === true &&
436
- pushed.response.status === 'applied' &&
437
- typeof pushed.response.commitSeq === 'number') {
438
- const scopeKeys = await readCommitScopeKeys(options.db, pushed.response.commitSeq);
439
- if (scopeKeys.length > 0) {
440
- wsConnectionManager.notifyScopeKeys(scopeKeys, pushed.response.commitSeq, {
441
- excludeClientIds: [body.clientId],
442
- });
443
- if (realtimeBroadcaster) {
444
- realtimeBroadcaster
445
- .publish({
446
- type: 'commit',
447
- commitSeq: pushed.response.commitSeq,
448
- scopeKeys,
449
- sourceInstanceId: instanceId,
450
- })
451
- .catch(() => { });
452
- }
453
- }
454
- }
455
- return c.json(pushed.response, 200);
456
- });
457
- // -------------------------------------------------------------------------
458
488
  // GET /realtime (optional WebSocket wake-ups)
459
489
  // -------------------------------------------------------------------------
460
490
  if (wsConnectionManager && websocketConfig?.enabled) {
461
491
  routes.get('/realtime', async (c) => {
462
- const auth = await options.authenticate(c);
492
+ const auth = await getAuth(c);
463
493
  if (!auth)
464
494
  return c.json({ error: 'UNAUTHENTICATED' }, 401);
495
+ const partitionId = auth.partitionId ?? 'default';
465
496
  const clientId = c.req.query('clientId');
466
497
  if (!clientId || typeof clientId !== 'string') {
467
498
  return c.json({
@@ -477,6 +508,7 @@ export function createSyncRoutes(options) {
477
508
  const cursorsQ = options.db.selectFrom('sync_client_cursors');
478
509
  const row = await cursorsQ
479
510
  .selectAll()
511
+ .where(sql `partition_id = ${partitionId}`)
480
512
  .where(sql `client_id = ${clientId}`)
481
513
  .executeTakeFirst();
482
514
  if (row && row.actor_id !== auth.actorId) {
@@ -492,7 +524,7 @@ export function createSyncRoutes(options) {
492
524
  parsed = null;
493
525
  }
494
526
  }
495
- initialScopeKeys = scopeValuesToScopeKeys(parsed);
527
+ initialScopeKeys = applyPartitionToScopeKeys(partitionId, scopeValuesToScopeKeys(parsed));
496
528
  }
497
529
  catch {
498
530
  // ignore; realtime is best-effort
@@ -560,12 +592,17 @@ export function createSyncRoutes(options) {
560
592
  try {
561
593
  const raw = typeof evt.data === 'string' ? evt.data : String(evt.data);
562
594
  const msg = JSON.parse(raw);
563
- if (!msg ||
564
- typeof msg !== 'object' ||
565
- msg.type !== 'presence' ||
566
- !msg.scopeKey)
595
+ if (!msg || typeof msg !== 'object')
596
+ return;
597
+ if (msg.type === 'push') {
598
+ void handleWsPush(msg, connRef, auth.actorId, partitionId, clientId);
599
+ return;
600
+ }
601
+ if (msg.type !== 'presence' || !msg.scopeKey)
602
+ return;
603
+ const scopeKey = normalizeScopeKeyForPartition(partitionId, String(msg.scopeKey));
604
+ if (!scopeKey)
567
605
  return;
568
- const scopeKey = String(msg.scopeKey);
569
606
  switch (msg.action) {
570
607
  case 'join':
571
608
  if (!wsConnectionManager.joinPresence(clientId, scopeKey, msg.metadata)) {
@@ -619,13 +656,139 @@ export function createSyncRoutes(options) {
619
656
  if (event.sourceInstanceId && event.sourceInstanceId === instanceId)
620
657
  return;
621
658
  const commitSeq = event.commitSeq;
659
+ const partitionId = event.partitionId ?? 'default';
622
660
  const scopeKeys = event.scopeKeys && event.scopeKeys.length > 0
623
661
  ? event.scopeKeys
624
- : await readCommitScopeKeys(options.db, commitSeq);
662
+ : await readCommitScopeKeys(options.db, commitSeq, partitionId);
625
663
  if (scopeKeys.length === 0)
626
664
  return;
627
665
  wsConnectionManager.notifyScopeKeys(scopeKeys, commitSeq);
628
666
  }
667
+ async function handleWsPush(msg, conn, actorId, partitionId, clientId) {
668
+ const requestId = typeof msg.requestId === 'string' ? msg.requestId : '';
669
+ if (!requestId)
670
+ return;
671
+ try {
672
+ // Validate the push payload
673
+ const parsed = SyncPushRequestSchema.omit({ clientId: true }).safeParse(msg);
674
+ if (!parsed.success) {
675
+ conn.sendPushResponse({
676
+ requestId,
677
+ ok: false,
678
+ status: 'rejected',
679
+ results: [
680
+ { opIndex: 0, status: 'error', error: 'Invalid push payload' },
681
+ ],
682
+ });
683
+ return;
684
+ }
685
+ const pushOps = parsed.data.operations ?? [];
686
+ if (pushOps.length > maxOperationsPerPush) {
687
+ conn.sendPushResponse({
688
+ requestId,
689
+ ok: false,
690
+ status: 'rejected',
691
+ results: [
692
+ {
693
+ opIndex: 0,
694
+ status: 'error',
695
+ error: `Maximum ${maxOperationsPerPush} operations per push`,
696
+ },
697
+ ],
698
+ });
699
+ return;
700
+ }
701
+ const timer = createSyncTimer();
702
+ const pushed = await pushCommit({
703
+ db: options.db,
704
+ dialect: options.dialect,
705
+ handlers: handlerRegistry,
706
+ actorId,
707
+ partitionId,
708
+ request: {
709
+ clientId,
710
+ clientCommitId: parsed.data.clientCommitId,
711
+ operations: parsed.data.operations,
712
+ schemaVersion: parsed.data.schemaVersion,
713
+ },
714
+ });
715
+ const pushDurationMs = timer();
716
+ logSyncEvent({
717
+ event: 'sync.push',
718
+ userId: actorId,
719
+ durationMs: pushDurationMs,
720
+ operationCount: pushOps.length,
721
+ status: pushed.response.status,
722
+ commitSeq: pushed.response.commitSeq,
723
+ });
724
+ recordRequestEventInBackground({
725
+ eventType: 'push',
726
+ actorId,
727
+ clientId,
728
+ transportPath: conn.transportPath,
729
+ statusCode: 200,
730
+ outcome: pushed.response.status,
731
+ durationMs: pushDurationMs,
732
+ commitSeq: pushed.response.commitSeq,
733
+ operationCount: pushOps.length,
734
+ tables: pushed.affectedTables,
735
+ });
736
+ // WS notifications to other clients
737
+ if (wsConnectionManager &&
738
+ pushed.response.ok === true &&
739
+ pushed.response.status === 'applied' &&
740
+ typeof pushed.response.commitSeq === 'number') {
741
+ const scopeKeys = applyPartitionToScopeKeys(partitionId, pushed.scopeKeys);
742
+ if (scopeKeys.length > 0) {
743
+ wsConnectionManager.notifyScopeKeys(scopeKeys, pushed.response.commitSeq, {
744
+ excludeClientIds: [clientId],
745
+ changes: pushed.emittedChanges,
746
+ });
747
+ if (realtimeBroadcaster) {
748
+ realtimeBroadcaster
749
+ .publish({
750
+ type: 'commit',
751
+ commitSeq: pushed.response.commitSeq,
752
+ partitionId,
753
+ scopeKeys,
754
+ sourceInstanceId: instanceId,
755
+ })
756
+ .catch((error) => {
757
+ logAsyncFailureOnce('sync.realtime.broadcast_publish_failed', {
758
+ event: 'sync.realtime.broadcast_publish_failed',
759
+ userId: actorId,
760
+ clientId,
761
+ error: error instanceof Error ? error.message : String(error),
762
+ });
763
+ });
764
+ }
765
+ }
766
+ }
767
+ conn.sendPushResponse({
768
+ requestId,
769
+ ok: pushed.response.ok,
770
+ status: pushed.response.status,
771
+ commitSeq: pushed.response.commitSeq,
772
+ results: pushed.response.results,
773
+ });
774
+ }
775
+ catch (err) {
776
+ captureSyncException(err, {
777
+ event: 'sync.realtime.push_failed',
778
+ requestId,
779
+ clientId,
780
+ actorId,
781
+ partitionId,
782
+ });
783
+ const message = err instanceof Error ? err.message : 'Internal server error';
784
+ conn.sendPushResponse({
785
+ requestId,
786
+ ok: false,
787
+ status: 'rejected',
788
+ results: [{ opIndex: 0, status: 'error', error: message }],
789
+ });
790
+ }
791
+ }
629
792
  }
630
793
  export function getSyncWebSocketConnectionManager(routes) {
631
794
  return wsConnectionManagerMap.get(routes);
@@ -677,18 +840,42 @@ function scopeValuesToScopeKeys(scopes) {
677
840
  }
678
841
  return Array.from(scopeKeys);
679
842
  }
680
- async function readCommitScopeKeys(db, commitSeq) {
843
+ function partitionScopeKey(partitionId, scopeKey) {
844
+ return `${partitionId}::${scopeKey}`;
845
+ }
846
+ function applyPartitionToScopeKeys(partitionId, scopeKeys) {
847
+ const prefixed = new Set();
848
+ for (const scopeKey of scopeKeys) {
849
+ if (!scopeKey)
850
+ continue;
851
+ if (scopeKey.startsWith(`${partitionId}::`)) {
852
+ prefixed.add(scopeKey);
853
+ continue;
854
+ }
855
+ prefixed.add(partitionScopeKey(partitionId, scopeKey));
856
+ }
857
+ return Array.from(prefixed);
858
+ }
859
+ function normalizeScopeKeyForPartition(partitionId, scopeKey) {
860
+ if (scopeKey.startsWith(`${partitionId}::`))
861
+ return scopeKey;
862
+ if (scopeKey.includes('::'))
863
+ return '';
864
+ return partitionScopeKey(partitionId, scopeKey);
865
+ }
866
+ async function readCommitScopeKeys(db, commitSeq, partitionId) {
681
867
  // Read scopes from the JSONB column and convert to scope strings
682
868
  const rowsResult = await sql `
683
869
  select scopes
684
870
  from ${sql.table('sync_changes')}
685
871
  where commit_seq = ${commitSeq}
872
+ and partition_id = ${partitionId}
686
873
  `.execute(db);
687
874
  const rows = rowsResult.rows;
688
875
  const scopeKeys = new Set();
689
876
  for (const row of rows) {
690
877
  const scopes = typeof row.scopes === 'string' ? JSON.parse(row.scopes) : row.scopes;
691
- for (const k of scopeValuesToScopeKeys(scopes)) {
878
+ for (const k of applyPartitionToScopeKeys(partitionId, scopeValuesToScopeKeys(scopes))) {
692
879
  scopeKeys.add(k);
693
880
  }
694
881
  }