@syncular/server-dialect-postgres 0.0.1-60

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.ts ADDED
@@ -0,0 +1,837 @@
1
+ /**
2
+ * @syncular/server-dialect-postgres - PostgreSQL Server Sync Dialect
3
+ *
4
+ * Driver-agnostic PostgreSQL dialect for sync. Works with any Postgres-compatible
5
+ * Kysely dialect (pg, pglite, neon, etc.).
6
+ *
7
+ * Tables:
8
+ * - sync_commits: commit log (idempotency + ordering)
9
+ * - sync_table_commits: commit routing index (fast pull by table)
10
+ * - sync_changes: change log (JSONB scopes for filtering)
11
+ * - sync_client_cursors: per-client cursor tracking (pruning/observability)
12
+ */
13
+
14
+ import type { ScopeValues, StoredScopes, SyncOp } from '@syncular/core';
15
+ import type { DbExecutor, ServerSyncDialect } from '@syncular/server';
16
+ import type {
17
+ SyncChangeRow,
18
+ SyncCommitRow,
19
+ SyncCoreDb,
20
+ } from '@syncular/server/schema';
21
+ import type { Kysely, Transaction } from 'kysely';
22
+ import { sql } from 'kysely';
23
+
24
+ function coerceNumber(value: unknown): number | null {
25
+ if (value === null || value === undefined) return null;
26
+ if (typeof value === 'number') return Number.isFinite(value) ? value : null;
27
+ if (typeof value === 'bigint')
28
+ return Number.isFinite(Number(value)) ? Number(value) : null;
29
+ if (typeof value === 'string') {
30
+ const n = Number(value);
31
+ return Number.isFinite(n) ? n : null;
32
+ }
33
+ return null;
34
+ }
35
+
36
+ function coerceIsoString(value: unknown): string {
37
+ if (typeof value === 'string') return value;
38
+ if (value instanceof Date) return value.toISOString();
39
+ return String(value);
40
+ }
41
+
42
+ function parseScopes(value: unknown): StoredScopes {
43
+ if (value === null || value === undefined) return {};
44
+ if (typeof value === 'object' && !Array.isArray(value)) {
45
+ const result: StoredScopes = {};
46
+ for (const [k, v] of Object.entries(value as Record<string, unknown>)) {
47
+ if (typeof v === 'string') {
48
+ result[k] = v;
49
+ }
50
+ }
51
+ return result;
52
+ }
53
+ return {};
54
+ }
55
+
56
+ export class PostgresServerSyncDialect implements ServerSyncDialect {
57
+ readonly name = 'postgres' as const;
58
+ readonly supportsForUpdate = true;
59
+ readonly supportsSavepoints = true;
60
+
61
+ // ===========================================================================
62
+ // Schema Setup
63
+ // ===========================================================================
64
+
65
+ async ensureSyncSchema<DB extends SyncCoreDb>(db: Kysely<DB>): Promise<void> {
66
+ await db.schema
67
+ .createTable('sync_commits')
68
+ .ifNotExists()
69
+ .addColumn('commit_seq', 'bigserial', (col) => col.primaryKey())
70
+ .addColumn('partition_id', 'text', (col) =>
71
+ col.notNull().defaultTo('default')
72
+ )
73
+ .addColumn('actor_id', 'text', (col) => col.notNull())
74
+ .addColumn('client_id', 'text', (col) => col.notNull())
75
+ .addColumn('client_commit_id', 'text', (col) => col.notNull())
76
+ .addColumn('created_at', 'timestamptz', (col) =>
77
+ col.notNull().defaultTo(sql`now()`)
78
+ )
79
+ .addColumn('meta', 'jsonb')
80
+ .addColumn('result_json', 'jsonb')
81
+ .addColumn('change_count', 'integer', (col) => col.notNull().defaultTo(0))
82
+ .addColumn('affected_tables', sql`text[]`, (col) =>
83
+ col.notNull().defaultTo(sql`ARRAY[]::text[]`)
84
+ )
85
+ .execute();
86
+
87
+ // Ensure new columns exist for dev environments that already created the table.
88
+ await sql`ALTER TABLE sync_commits
89
+ ADD COLUMN IF NOT EXISTS change_count integer NOT NULL DEFAULT 0`.execute(
90
+ db
91
+ );
92
+ await sql`ALTER TABLE sync_commits
93
+ ADD COLUMN IF NOT EXISTS affected_tables text[] NOT NULL DEFAULT ARRAY[]::text[]`.execute(
94
+ db
95
+ );
96
+ await sql`ALTER TABLE sync_commits
97
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(
98
+ db
99
+ );
100
+
101
+ await sql`DROP INDEX IF EXISTS idx_sync_commits_client_commit`.execute(db);
102
+ await db.schema
103
+ .createIndex('idx_sync_commits_client_commit')
104
+ .ifNotExists()
105
+ .on('sync_commits')
106
+ .columns(['partition_id', 'client_id', 'client_commit_id'])
107
+ .unique()
108
+ .execute();
109
+
110
+ // Table-based commit routing index
111
+ await db.schema
112
+ .createTable('sync_table_commits')
113
+ .ifNotExists()
114
+ .addColumn('partition_id', 'text', (col) =>
115
+ col.notNull().defaultTo('default')
116
+ )
117
+ .addColumn('table', 'text', (col) => col.notNull())
118
+ .addColumn('commit_seq', 'bigint', (col) =>
119
+ col.notNull().references('sync_commits.commit_seq').onDelete('cascade')
120
+ )
121
+ .addPrimaryKeyConstraint('sync_table_commits_pk', [
122
+ 'partition_id',
123
+ 'table',
124
+ 'commit_seq',
125
+ ])
126
+ .execute();
127
+
128
+ await sql`ALTER TABLE sync_table_commits
129
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(
130
+ db
131
+ );
132
+
133
+ await db.schema
134
+ .createIndex('idx_sync_table_commits_commit_seq')
135
+ .ifNotExists()
136
+ .on('sync_table_commits')
137
+ .columns(['partition_id', 'commit_seq'])
138
+ .execute();
139
+
140
+ // Changes table with JSONB scopes
141
+ await db.schema
142
+ .createTable('sync_changes')
143
+ .ifNotExists()
144
+ .addColumn('change_id', 'bigserial', (col) => col.primaryKey())
145
+ .addColumn('partition_id', 'text', (col) =>
146
+ col.notNull().defaultTo('default')
147
+ )
148
+ .addColumn('commit_seq', 'bigint', (col) =>
149
+ col.notNull().references('sync_commits.commit_seq').onDelete('cascade')
150
+ )
151
+ .addColumn('table', 'text', (col) => col.notNull())
152
+ .addColumn('row_id', 'text', (col) => col.notNull())
153
+ .addColumn('op', 'text', (col) => col.notNull())
154
+ .addColumn('row_json', 'jsonb')
155
+ .addColumn('row_version', 'bigint')
156
+ .addColumn('scopes', 'jsonb', (col) => col.notNull())
157
+ .execute();
158
+
159
+ await sql`ALTER TABLE sync_changes
160
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(
161
+ db
162
+ );
163
+
164
+ await db.schema
165
+ .createIndex('idx_sync_changes_commit_seq')
166
+ .ifNotExists()
167
+ .on('sync_changes')
168
+ .columns(['partition_id', 'commit_seq'])
169
+ .execute();
170
+
171
+ await db.schema
172
+ .createIndex('idx_sync_changes_table')
173
+ .ifNotExists()
174
+ .on('sync_changes')
175
+ .columns(['partition_id', 'table'])
176
+ .execute();
177
+
178
+ await this.ensureIndex(
179
+ db,
180
+ 'idx_sync_changes_scopes',
181
+ 'CREATE INDEX idx_sync_changes_scopes ON sync_changes USING GIN (scopes)'
182
+ );
183
+
184
+ await db.schema
185
+ .createTable('sync_client_cursors')
186
+ .ifNotExists()
187
+ .addColumn('partition_id', 'text', (col) =>
188
+ col.notNull().defaultTo('default')
189
+ )
190
+ .addColumn('client_id', 'text', (col) => col.notNull())
191
+ .addColumn('actor_id', 'text', (col) => col.notNull())
192
+ .addColumn('cursor', 'bigint', (col) => col.notNull().defaultTo(0))
193
+ .addColumn('effective_scopes', 'jsonb', (col) =>
194
+ col.notNull().defaultTo(sql`'{}'::jsonb`)
195
+ )
196
+ .addColumn('updated_at', 'timestamptz', (col) =>
197
+ col.notNull().defaultTo(sql`now()`)
198
+ )
199
+ .addPrimaryKeyConstraint('sync_client_cursors_pk', [
200
+ 'partition_id',
201
+ 'client_id',
202
+ ])
203
+ .execute();
204
+
205
+ await sql`ALTER TABLE sync_client_cursors
206
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(
207
+ db
208
+ );
209
+
210
+ await db.schema
211
+ .createIndex('idx_sync_client_cursors_updated_at')
212
+ .ifNotExists()
213
+ .on('sync_client_cursors')
214
+ .columns(['updated_at'])
215
+ .execute();
216
+
217
+ await db.schema
218
+ .createTable('sync_snapshot_chunks')
219
+ .ifNotExists()
220
+ .addColumn('chunk_id', 'text', (col) => col.primaryKey())
221
+ .addColumn('partition_id', 'text', (col) =>
222
+ col.notNull().defaultTo('default')
223
+ )
224
+ .addColumn('scope_key', 'text', (col) => col.notNull())
225
+ .addColumn('scope', 'text', (col) => col.notNull())
226
+ .addColumn('as_of_commit_seq', 'bigint', (col) => col.notNull())
227
+ .addColumn('row_cursor', 'text', (col) => col.notNull().defaultTo(''))
228
+ .addColumn('row_limit', 'integer', (col) => col.notNull())
229
+ .addColumn('encoding', 'text', (col) => col.notNull())
230
+ .addColumn('compression', 'text', (col) => col.notNull())
231
+ .addColumn('sha256', 'text', (col) => col.notNull())
232
+ .addColumn('byte_length', 'integer', (col) => col.notNull())
233
+ .addColumn('blob_hash', 'text', (col) => col.notNull().defaultTo(''))
234
+ .addColumn('body', 'bytea') // Deprecated: use blob storage
235
+ .addColumn('created_at', 'timestamptz', (col) =>
236
+ col.notNull().defaultTo(sql`now()`)
237
+ )
238
+ .addColumn('expires_at', 'timestamptz', (col) => col.notNull())
239
+ .execute();
240
+
241
+ await sql`ALTER TABLE sync_snapshot_chunks
242
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(
243
+ db
244
+ );
245
+
246
+ await db.schema
247
+ .createIndex('idx_sync_snapshot_chunks_expires_at')
248
+ .ifNotExists()
249
+ .on('sync_snapshot_chunks')
250
+ .columns(['expires_at'])
251
+ .execute();
252
+
253
+ await db.schema
254
+ .createIndex('idx_sync_snapshot_chunks_page_key')
255
+ .ifNotExists()
256
+ .on('sync_snapshot_chunks')
257
+ .columns([
258
+ 'partition_id',
259
+ 'scope_key',
260
+ 'scope',
261
+ 'as_of_commit_seq',
262
+ 'row_cursor',
263
+ 'row_limit',
264
+ 'encoding',
265
+ 'compression',
266
+ ])
267
+ .unique()
268
+ .execute();
269
+ }
270
+
271
+ // ===========================================================================
272
+ // Transaction Control
273
+ // ===========================================================================
274
+
275
+ async executeInTransaction<DB extends SyncCoreDb, T>(
276
+ db: Kysely<DB>,
277
+ fn: (executor: DbExecutor<DB>) => Promise<T>
278
+ ): Promise<T> {
279
+ return db.transaction().execute(fn);
280
+ }
281
+
282
+ async setRepeatableRead<DB extends SyncCoreDb>(
283
+ trx: DbExecutor<DB>
284
+ ): Promise<void> {
285
+ await sql`SET TRANSACTION ISOLATION LEVEL REPEATABLE READ`.execute(trx);
286
+ }
287
+
288
+ // ===========================================================================
289
+ // Commit/Change Log Queries
290
+ // ===========================================================================
291
+
292
+ async readMaxCommitSeq<DB extends SyncCoreDb>(
293
+ db: Kysely<DB> | Transaction<DB>,
294
+ options?: { partitionId?: string }
295
+ ): Promise<number> {
296
+ const partitionId = options?.partitionId ?? 'default';
297
+ const res = await sql<{ max_seq: unknown }>`
298
+ SELECT max(commit_seq) as max_seq
299
+ FROM sync_commits
300
+ WHERE partition_id = ${partitionId}
301
+ `.execute(db);
302
+
303
+ return coerceNumber(res.rows[0]?.max_seq) ?? 0;
304
+ }
305
+
306
+ async readMinCommitSeq<DB extends SyncCoreDb>(
307
+ db: Kysely<DB> | Transaction<DB>,
308
+ options?: { partitionId?: string }
309
+ ): Promise<number> {
310
+ const partitionId = options?.partitionId ?? 'default';
311
+ const res = await sql<{ min_seq: unknown }>`
312
+ SELECT min(commit_seq) as min_seq
313
+ FROM sync_commits
314
+ WHERE partition_id = ${partitionId}
315
+ `.execute(db);
316
+
317
+ return coerceNumber(res.rows[0]?.min_seq) ?? 0;
318
+ }
319
+
320
+ async readCommitSeqsForPull<DB extends SyncCoreDb>(
321
+ db: Kysely<DB> | Transaction<DB>,
322
+ args: {
323
+ cursor: number;
324
+ limitCommits: number;
325
+ tables: string[];
326
+ partitionId?: string;
327
+ }
328
+ ): Promise<number[]> {
329
+ const partitionId = args.partitionId ?? 'default';
330
+ if (args.tables.length === 0) return [];
331
+
332
+ if (args.tables.length === 1) {
333
+ const res = await sql<{ commit_seq: unknown }>`
334
+ SELECT commit_seq
335
+ FROM sync_table_commits
336
+ WHERE partition_id = ${partitionId}
337
+ AND "table" = ${args.tables[0]}
338
+ AND commit_seq > ${args.cursor}
339
+ ORDER BY commit_seq ASC
340
+ LIMIT ${args.limitCommits}
341
+ `.execute(db);
342
+
343
+ return res.rows
344
+ .map((r) => coerceNumber(r.commit_seq))
345
+ .filter(
346
+ (n): n is number =>
347
+ typeof n === 'number' && Number.isFinite(n) && n > args.cursor
348
+ );
349
+ }
350
+
351
+ const res = await sql<{ commit_seq: unknown }>`
352
+ SELECT DISTINCT commit_seq
353
+ FROM sync_table_commits
354
+ WHERE partition_id = ${partitionId}
355
+ AND "table" = ANY(${args.tables}::text[])
356
+ AND commit_seq > ${args.cursor}
357
+ ORDER BY commit_seq ASC
358
+ LIMIT ${args.limitCommits}
359
+ `.execute(db);
360
+
361
+ return res.rows
362
+ .map((r) => coerceNumber(r.commit_seq))
363
+ .filter(
364
+ (n): n is number =>
365
+ typeof n === 'number' && Number.isFinite(n) && n > args.cursor
366
+ );
367
+ }
368
+
369
+ async readCommits<DB extends SyncCoreDb>(
370
+ db: Kysely<DB> | Transaction<DB>,
371
+ commitSeqs: number[],
372
+ options?: { partitionId?: string }
373
+ ): Promise<SyncCommitRow[]> {
374
+ const partitionId = options?.partitionId ?? 'default';
375
+ if (commitSeqs.length === 0) return [];
376
+
377
+ const res = await sql<{
378
+ commit_seq: unknown;
379
+ actor_id: string;
380
+ created_at: unknown;
381
+ result_json: unknown | null;
382
+ }>`
383
+ SELECT commit_seq, actor_id, created_at, result_json
384
+ FROM sync_commits
385
+ WHERE commit_seq = ANY(${commitSeqs}::bigint[])
386
+ AND partition_id = ${partitionId}
387
+ ORDER BY commit_seq ASC
388
+ `.execute(db);
389
+
390
+ return res.rows.map((row) => ({
391
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
392
+ actor_id: row.actor_id,
393
+ created_at: coerceIsoString(row.created_at),
394
+ result_json: row.result_json ?? null,
395
+ }));
396
+ }
397
+
398
+ async readChangesForCommits<DB extends SyncCoreDb>(
399
+ db: Kysely<DB> | Transaction<DB>,
400
+ args: {
401
+ commitSeqs: number[];
402
+ table: string;
403
+ scopes: ScopeValues;
404
+ partitionId?: string;
405
+ }
406
+ ): Promise<SyncChangeRow[]> {
407
+ const partitionId = args.partitionId ?? 'default';
408
+ if (args.commitSeqs.length === 0) return [];
409
+
410
+ // Build JSONB containment conditions for scope filtering
411
+ // For each scope key/value, we need: scopes->>'key' = 'value' OR scopes->>'key' IN (values)
412
+ const scopeConditions: ReturnType<typeof sql>[] = [];
413
+ for (const [key, value] of Object.entries(args.scopes)) {
414
+ if (Array.isArray(value)) {
415
+ // OR condition for array values
416
+ scopeConditions.push(sql`scopes->>${key} = ANY(${value}::text[])`);
417
+ } else {
418
+ scopeConditions.push(sql`scopes->>${key} = ${value}`);
419
+ }
420
+ }
421
+
422
+ let query = sql<{
423
+ commit_seq: unknown;
424
+ table: string;
425
+ row_id: string;
426
+ op: string;
427
+ row_json: unknown | null;
428
+ row_version: unknown | null;
429
+ scopes: unknown;
430
+ }>`
431
+ SELECT commit_seq, "table", row_id, op, row_json, row_version, scopes
432
+ FROM sync_changes
433
+ WHERE commit_seq = ANY(${args.commitSeqs}::bigint[])
434
+ AND partition_id = ${partitionId}
435
+ AND "table" = ${args.table}
436
+ `;
437
+
438
+ if (scopeConditions.length > 0) {
439
+ const scopeFilter = sql.join(scopeConditions, sql` AND `);
440
+ query = sql<{
441
+ commit_seq: unknown;
442
+ table: string;
443
+ row_id: string;
444
+ op: string;
445
+ row_json: unknown | null;
446
+ row_version: unknown | null;
447
+ scopes: unknown;
448
+ }>`
449
+ SELECT commit_seq, "table", row_id, op, row_json, row_version, scopes
450
+ FROM sync_changes
451
+ WHERE commit_seq = ANY(${args.commitSeqs}::bigint[])
452
+ AND partition_id = ${partitionId}
453
+ AND "table" = ${args.table}
454
+ AND (${scopeFilter})
455
+ ORDER BY commit_seq ASC, change_id ASC
456
+ `;
457
+ }
458
+
459
+ const res = await query.execute(db);
460
+
461
+ return res.rows.map((row) => ({
462
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
463
+ table: row.table,
464
+ row_id: row.row_id,
465
+ op: row.op as SyncOp,
466
+ row_json: row.row_json ?? null,
467
+ row_version: coerceNumber(row.row_version),
468
+ scopes: parseScopes(row.scopes),
469
+ }));
470
+ }
471
+
472
+ async readIncrementalPullRows<DB extends SyncCoreDb>(
473
+ db: Kysely<DB> | Transaction<DB>,
474
+ args: {
475
+ table: string;
476
+ scopes: ScopeValues;
477
+ cursor: number;
478
+ limitCommits: number;
479
+ partitionId?: string;
480
+ }
481
+ ): Promise<
482
+ Array<{
483
+ commit_seq: number;
484
+ actor_id: string;
485
+ created_at: string;
486
+ change_id: number;
487
+ table: string;
488
+ row_id: string;
489
+ op: SyncOp;
490
+ row_json: unknown | null;
491
+ row_version: number | null;
492
+ scopes: StoredScopes;
493
+ }>
494
+ > {
495
+ const partitionId = args.partitionId ?? 'default';
496
+ const limitCommits = Math.max(1, Math.min(500, args.limitCommits));
497
+
498
+ // Build scope filter conditions
499
+ const scopeConditions: ReturnType<typeof sql>[] = [];
500
+ for (const [key, value] of Object.entries(args.scopes)) {
501
+ if (Array.isArray(value)) {
502
+ scopeConditions.push(sql`c.scopes->>${key} = ANY(${value}::text[])`);
503
+ } else {
504
+ scopeConditions.push(sql`c.scopes->>${key} = ${value}`);
505
+ }
506
+ }
507
+
508
+ const scopeFilter =
509
+ scopeConditions.length > 0
510
+ ? sql.join(scopeConditions, sql` AND `)
511
+ : sql`TRUE`;
512
+
513
+ const res = await sql<{
514
+ commit_seq: unknown;
515
+ actor_id: string;
516
+ created_at: unknown;
517
+ change_id: unknown;
518
+ table: string;
519
+ row_id: string;
520
+ op: string;
521
+ row_json: unknown | null;
522
+ row_version: unknown | null;
523
+ scopes: unknown;
524
+ }>`
525
+ WITH commit_seqs AS (
526
+ SELECT DISTINCT tc.commit_seq
527
+ FROM sync_table_commits tc
528
+ JOIN sync_commits cm ON cm.commit_seq = tc.commit_seq
529
+ WHERE tc.partition_id = ${partitionId}
530
+ AND tc."table" = ${args.table}
531
+ AND cm.partition_id = ${partitionId}
532
+ AND tc.commit_seq > ${args.cursor}
533
+ AND EXISTS (
534
+ SELECT 1
535
+ FROM sync_changes c
536
+ WHERE c.commit_seq = tc.commit_seq
537
+ AND c.partition_id = ${partitionId}
538
+ AND c."table" = ${args.table}
539
+ AND (${scopeFilter})
540
+ )
541
+ ORDER BY tc.commit_seq ASC
542
+ LIMIT ${limitCommits}
543
+ )
544
+ SELECT
545
+ cm.commit_seq,
546
+ cm.actor_id,
547
+ cm.created_at,
548
+ c.change_id,
549
+ c."table",
550
+ c.row_id,
551
+ c.op,
552
+ c.row_json,
553
+ c.row_version,
554
+ c.scopes
555
+ FROM commit_seqs cs
556
+ JOIN sync_commits cm ON cm.commit_seq = cs.commit_seq
557
+ JOIN sync_changes c ON c.commit_seq = cs.commit_seq
558
+ WHERE cm.partition_id = ${partitionId}
559
+ AND c.partition_id = ${partitionId}
560
+ AND c."table" = ${args.table}
561
+ AND (${scopeFilter})
562
+ ORDER BY cm.commit_seq ASC, c.change_id ASC
563
+ `.execute(db);
564
+
565
+ return res.rows.map((row) => ({
566
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
567
+ actor_id: row.actor_id,
568
+ created_at: coerceIsoString(row.created_at),
569
+ change_id: coerceNumber(row.change_id) ?? 0,
570
+ table: row.table,
571
+ row_id: row.row_id,
572
+ op: row.op as SyncOp,
573
+ row_json: row.row_json ?? null,
574
+ row_version: coerceNumber(row.row_version),
575
+ scopes: parseScopes(row.scopes),
576
+ }));
577
+ }
578
+
579
+ async *streamIncrementalPullRows<DB extends SyncCoreDb>(
580
+ db: Kysely<DB> | Transaction<DB>,
581
+ args: {
582
+ table: string;
583
+ scopes: ScopeValues;
584
+ cursor: number;
585
+ limitCommits: number;
586
+ batchSize?: number;
587
+ partitionId?: string;
588
+ }
589
+ ): AsyncGenerator<{
590
+ commit_seq: number;
591
+ actor_id: string;
592
+ created_at: string;
593
+ change_id: number;
594
+ table: string;
595
+ row_id: string;
596
+ op: SyncOp;
597
+ row_json: unknown | null;
598
+ row_version: number | null;
599
+ scopes: StoredScopes;
600
+ }> {
601
+ // PostgreSQL: use batching approach (could use pg-query-stream for true streaming)
602
+ const batchSize = Math.min(100, args.batchSize ?? 100);
603
+ let processed = 0;
604
+
605
+ while (processed < args.limitCommits) {
606
+ const batch = await this.readIncrementalPullRows(db, {
607
+ ...args,
608
+ limitCommits: Math.min(batchSize, args.limitCommits - processed),
609
+ cursor: args.cursor + processed,
610
+ });
611
+
612
+ if (batch.length === 0) break;
613
+
614
+ for (const row of batch) {
615
+ yield row;
616
+ }
617
+
618
+ processed += batch.length;
619
+ if (batch.length < batchSize) break;
620
+ }
621
+ }
622
+
623
+ async compactChanges<DB extends SyncCoreDb>(
624
+ db: Kysely<DB> | Transaction<DB>,
625
+ args: { fullHistoryHours: number }
626
+ ): Promise<number> {
627
+ const cutoffIso = new Date(
628
+ Date.now() - args.fullHistoryHours * 60 * 60 * 1000
629
+ ).toISOString();
630
+
631
+ const res = await sql`
632
+ WITH ranked AS (
633
+ SELECT
634
+ c.change_id,
635
+ row_number() OVER (
636
+ PARTITION BY c.partition_id, c."table", c.row_id, c.scopes
637
+ ORDER BY c.commit_seq DESC, c.change_id DESC
638
+ ) AS rn
639
+ FROM sync_changes c
640
+ JOIN sync_commits cm ON cm.commit_seq = c.commit_seq
641
+ WHERE cm.created_at < ${cutoffIso}
642
+ )
643
+ DELETE FROM sync_changes
644
+ WHERE change_id IN (SELECT change_id FROM ranked WHERE rn > 1)
645
+ `.execute(db);
646
+
647
+ const deletedChanges = Number(res.numAffectedRows ?? 0);
648
+
649
+ // Remove routing index entries that no longer have any remaining changes
650
+ await sql`
651
+ DELETE FROM sync_table_commits tc
652
+ USING sync_commits cm
653
+ WHERE cm.commit_seq = tc.commit_seq
654
+ AND cm.partition_id = tc.partition_id
655
+ AND cm.created_at < ${cutoffIso}
656
+ AND NOT EXISTS (
657
+ SELECT 1
658
+ FROM sync_changes c
659
+ WHERE c.commit_seq = tc.commit_seq
660
+ AND c.partition_id = tc.partition_id
661
+ AND c."table" = tc."table"
662
+ )
663
+ `.execute(db);
664
+
665
+ return deletedChanges;
666
+ }
667
+
668
+ // ===========================================================================
669
+ // Client Cursor Recording
670
+ // ===========================================================================
671
+
672
+ async recordClientCursor<DB extends SyncCoreDb>(
673
+ db: Kysely<DB> | Transaction<DB>,
674
+ args: {
675
+ partitionId?: string;
676
+ clientId: string;
677
+ actorId: string;
678
+ cursor: number;
679
+ effectiveScopes: ScopeValues;
680
+ }
681
+ ): Promise<void> {
682
+ const partitionId = args.partitionId ?? 'default';
683
+ const now = new Date().toISOString();
684
+ const scopesJson = JSON.stringify(args.effectiveScopes);
685
+
686
+ await sql`
687
+ INSERT INTO sync_client_cursors (partition_id, client_id, actor_id, cursor, effective_scopes, updated_at)
688
+ VALUES (${partitionId}, ${args.clientId}, ${args.actorId}, ${args.cursor}, ${scopesJson}::jsonb, ${now})
689
+ ON CONFLICT(partition_id, client_id) DO UPDATE SET
690
+ actor_id = ${args.actorId},
691
+ cursor = ${args.cursor},
692
+ effective_scopes = ${scopesJson}::jsonb,
693
+ updated_at = ${now}
694
+ `.execute(db);
695
+ }
696
+
697
+ // ===========================================================================
698
+ // Scope Conversion Helpers
699
+ // ===========================================================================
700
+
701
+ scopesToDb(scopes: StoredScopes): StoredScopes {
702
+ return scopes;
703
+ }
704
+
705
+ dbToScopes(value: unknown): StoredScopes {
706
+ return parseScopes(value);
707
+ }
708
+
709
+ dbToArray(value: unknown): string[] {
710
+ if (Array.isArray(value)) {
711
+ return value.filter((k: unknown): k is string => typeof k === 'string');
712
+ }
713
+ return [];
714
+ }
715
+
716
+ arrayToDb(values: string[]): string[] {
717
+ return values.filter((v) => v.length > 0);
718
+ }
719
+
720
+ async readAffectedTablesFromChanges<DB extends SyncCoreDb>(
721
+ db: Kysely<DB> | Transaction<DB>,
722
+ commitSeq: number,
723
+ options?: { partitionId?: string }
724
+ ): Promise<string[]> {
725
+ const partitionId = options?.partitionId ?? 'default';
726
+ const res = await sql<{ table: string }>`
727
+ SELECT DISTINCT "table"
728
+ FROM sync_changes
729
+ WHERE commit_seq = ${commitSeq}
730
+ AND partition_id = ${partitionId}
731
+ `.execute(db);
732
+
733
+ return res.rows
734
+ .map((r) => r.table)
735
+ .filter((t): t is string => typeof t === 'string' && t.length > 0);
736
+ }
737
+
738
+ // ===========================================================================
739
+ // Console Schema (Request Events)
740
+ // ===========================================================================
741
+
742
+ async ensureConsoleSchema<DB extends SyncCoreDb>(
743
+ db: Kysely<DB>
744
+ ): Promise<void> {
745
+ await sql`
746
+ CREATE TABLE IF NOT EXISTS sync_request_events (
747
+ event_id BIGSERIAL PRIMARY KEY,
748
+ event_type TEXT NOT NULL,
749
+ actor_id TEXT NOT NULL,
750
+ client_id TEXT NOT NULL,
751
+ transport_path TEXT NOT NULL DEFAULT 'direct',
752
+ status_code INTEGER NOT NULL,
753
+ outcome TEXT NOT NULL,
754
+ duration_ms INTEGER NOT NULL,
755
+ commit_seq BIGINT,
756
+ operation_count INTEGER,
757
+ row_count INTEGER,
758
+ tables TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
759
+ error_message TEXT,
760
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
761
+ )
762
+ `.execute(db);
763
+ await sql`
764
+ ALTER TABLE sync_request_events
765
+ ADD COLUMN IF NOT EXISTS transport_path TEXT NOT NULL DEFAULT 'direct'
766
+ `.execute(db);
767
+
768
+ await this.ensureIndex(
769
+ db,
770
+ 'idx_sync_request_events_created_at',
771
+ 'CREATE INDEX idx_sync_request_events_created_at ON sync_request_events(created_at DESC)'
772
+ );
773
+ await this.ensureIndex(
774
+ db,
775
+ 'idx_sync_request_events_event_type',
776
+ 'CREATE INDEX idx_sync_request_events_event_type ON sync_request_events(event_type)'
777
+ );
778
+ await this.ensureIndex(
779
+ db,
780
+ 'idx_sync_request_events_client_id',
781
+ 'CREATE INDEX idx_sync_request_events_client_id ON sync_request_events(client_id)'
782
+ );
783
+
784
+ // API Keys table
785
+ await sql`
786
+ CREATE TABLE IF NOT EXISTS sync_api_keys (
787
+ key_id TEXT PRIMARY KEY,
788
+ key_hash TEXT NOT NULL,
789
+ key_prefix TEXT NOT NULL,
790
+ name TEXT NOT NULL,
791
+ key_type TEXT NOT NULL,
792
+ scope_keys TEXT[] DEFAULT ARRAY[]::TEXT[],
793
+ actor_id TEXT,
794
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
795
+ expires_at TIMESTAMPTZ,
796
+ last_used_at TIMESTAMPTZ,
797
+ revoked_at TIMESTAMPTZ
798
+ )
799
+ `.execute(db);
800
+
801
+ await this.ensureIndex(
802
+ db,
803
+ 'idx_sync_api_keys_key_hash',
804
+ 'CREATE INDEX idx_sync_api_keys_key_hash ON sync_api_keys(key_hash)'
805
+ );
806
+ await this.ensureIndex(
807
+ db,
808
+ 'idx_sync_api_keys_key_type',
809
+ 'CREATE INDEX idx_sync_api_keys_key_type ON sync_api_keys(key_type)'
810
+ );
811
+ }
812
+
813
+ // ===========================================================================
814
+ // Private Helpers
815
+ // ===========================================================================
816
+
817
+ private async ensureIndex<DB extends SyncCoreDb>(
818
+ db: Kysely<DB>,
819
+ indexName: string,
820
+ createSql: string
821
+ ): Promise<void> {
822
+ const exists = await sql<{ ok: 1 }>`
823
+ SELECT 1 as ok
824
+ FROM pg_indexes
825
+ WHERE schemaname = 'public'
826
+ AND indexname = ${indexName}
827
+ LIMIT 1
828
+ `.execute(db);
829
+
830
+ if (exists.rows.length > 0) return;
831
+ await sql.raw(createSql).execute(db);
832
+ }
833
+ }
834
+
835
+ export function createPostgresServerDialect(): PostgresServerSyncDialect {
836
+ return new PostgresServerSyncDialect();
837
+ }