@syncular/server-dialect-sqlite 0.0.1-60

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.ts ADDED
@@ -0,0 +1,928 @@
1
+ /**
2
+ * @syncular/server-dialect-sqlite - SQLite Server Sync Dialect
3
+ *
4
+ * SQLite adaptation of the commit-log based sync system.
5
+ * Works with any SQLite-compatible Kysely dialect (bun:sqlite, wa-sqlite, better-sqlite3, etc.).
6
+ *
7
+ * Key differences from Postgres:
8
+ * - No bigserial → INTEGER PRIMARY KEY AUTOINCREMENT
9
+ * - No JSONB → JSON stored as TEXT (with json_extract for filtering)
10
+ * - No array && overlap → JSON object key matching
11
+ * - No timestamptz → TEXT with ISO format
12
+ * - No GIN index → regular index + manual filtering
13
+ * - REPEATABLE READ → no-op (SQLite uses serializable by default)
14
+ */
15
+
16
+ import type { ScopeValues, StoredScopes, SyncOp } from '@syncular/core';
17
+ import type { DbExecutor, ServerSyncDialect } from '@syncular/server';
18
+ import type {
19
+ SyncChangeRow,
20
+ SyncCommitRow,
21
+ SyncCoreDb,
22
+ } from '@syncular/server/schema';
23
+ import type { Kysely, Transaction } from 'kysely';
24
+ import { sql } from 'kysely';
25
+
26
+ function coerceNumber(value: unknown): number | null {
27
+ if (value === null || value === undefined) return null;
28
+ if (typeof value === 'number') return Number.isFinite(value) ? value : null;
29
+ if (typeof value === 'bigint')
30
+ return Number.isFinite(Number(value)) ? Number(value) : null;
31
+ if (typeof value === 'string') {
32
+ const n = Number(value);
33
+ return Number.isFinite(n) ? n : null;
34
+ }
35
+ return null;
36
+ }
37
+
38
+ function coerceIsoString(value: unknown): string {
39
+ if (typeof value === 'string') return value;
40
+ if (value instanceof Date) return value.toISOString();
41
+ return String(value);
42
+ }
43
+
44
+ function parseJsonValue(value: unknown): unknown {
45
+ if (value === null || value === undefined) return null;
46
+ if (typeof value === 'object') return value;
47
+ if (typeof value === 'string') {
48
+ try {
49
+ return JSON.parse(value);
50
+ } catch {
51
+ return value;
52
+ }
53
+ }
54
+ return value;
55
+ }
56
+
57
+ function parseScopes(value: unknown): StoredScopes {
58
+ if (value === null || value === undefined) return {};
59
+ if (typeof value === 'object' && !Array.isArray(value)) {
60
+ const result: StoredScopes = {};
61
+ for (const [k, v] of Object.entries(value as Record<string, unknown>)) {
62
+ if (typeof v === 'string') {
63
+ result[k] = v;
64
+ }
65
+ }
66
+ return result;
67
+ }
68
+ if (typeof value === 'string') {
69
+ try {
70
+ const parsed = JSON.parse(value);
71
+ if (
72
+ typeof parsed === 'object' &&
73
+ parsed !== null &&
74
+ !Array.isArray(parsed)
75
+ ) {
76
+ const result: StoredScopes = {};
77
+ for (const [k, v] of Object.entries(
78
+ parsed as Record<string, unknown>
79
+ )) {
80
+ if (typeof v === 'string') {
81
+ result[k] = v;
82
+ }
83
+ }
84
+ return result;
85
+ }
86
+ } catch {
87
+ // ignore
88
+ }
89
+ }
90
+ return {};
91
+ }
92
+
93
+ function toStringArray(value: unknown): string[] {
94
+ if (Array.isArray(value)) {
95
+ return value.filter((k: unknown): k is string => typeof k === 'string');
96
+ }
97
+ if (typeof value === 'string') {
98
+ try {
99
+ const parsed = JSON.parse(value);
100
+ if (Array.isArray(parsed)) {
101
+ return parsed.filter(
102
+ (k: unknown): k is string => typeof k === 'string'
103
+ );
104
+ }
105
+ } catch {
106
+ // ignore
107
+ }
108
+ }
109
+ return [];
110
+ }
111
+
112
+ /**
113
+ * Check if stored scopes match the requested scope values.
114
+ * Uses OR semantics for arrays and treats missing keys as wildcards.
115
+ */
116
+ function scopesMatch(stored: StoredScopes, requested: ScopeValues): boolean {
117
+ for (const [key, value] of Object.entries(requested)) {
118
+ const storedValue = stored[key];
119
+ if (storedValue === undefined) return false;
120
+ if (Array.isArray(value)) {
121
+ if (!value.includes(storedValue)) return false;
122
+ } else {
123
+ if (storedValue !== value) return false;
124
+ }
125
+ }
126
+ return true;
127
+ }
128
+
129
+ async function ensurePartitionColumn<DB extends SyncCoreDb>(
130
+ db: Kysely<DB>,
131
+ table: string
132
+ ): Promise<void> {
133
+ try {
134
+ await sql.raw(
135
+ `ALTER TABLE ${table} ADD COLUMN partition_id TEXT NOT NULL DEFAULT 'default'`
136
+ ).execute(db);
137
+ } catch {
138
+ // Ignore when column already exists (or table is immutable in the current backend).
139
+ }
140
+ }
141
+
142
+ export class SqliteServerSyncDialect implements ServerSyncDialect {
143
+ readonly name = 'sqlite' as const;
144
+ readonly supportsForUpdate = false;
145
+ readonly supportsSavepoints: boolean;
146
+ private readonly _supportsTransactions: boolean;
147
+
148
+ constructor(options?: { supportsTransactions?: boolean }) {
149
+ this._supportsTransactions = options?.supportsTransactions ?? true;
150
+ this.supportsSavepoints = this._supportsTransactions;
151
+ }
152
+
153
+ // ===========================================================================
154
+ // Schema Setup
155
+ // ===========================================================================
156
+
157
+ async ensureSyncSchema<DB extends SyncCoreDb>(db: Kysely<DB>): Promise<void> {
158
+ await sql`PRAGMA foreign_keys = ON`.execute(db);
159
+
160
+ const nowIso = sql`(strftime('%Y-%m-%dT%H:%M:%fZ','now'))`;
161
+
162
+ // sync_commits table
163
+ await db.schema
164
+ .createTable('sync_commits')
165
+ .ifNotExists()
166
+ .addColumn('commit_seq', 'integer', (col) =>
167
+ col.primaryKey().autoIncrement()
168
+ )
169
+ .addColumn('partition_id', 'text', (col) =>
170
+ col.notNull().defaultTo('default')
171
+ )
172
+ .addColumn('actor_id', 'text', (col) => col.notNull())
173
+ .addColumn('client_id', 'text', (col) => col.notNull())
174
+ .addColumn('client_commit_id', 'text', (col) => col.notNull())
175
+ .addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
176
+ .addColumn('meta', 'json')
177
+ .addColumn('result_json', 'json')
178
+ .addColumn('change_count', 'integer', (col) => col.notNull().defaultTo(0))
179
+ .addColumn('affected_tables', 'text', (col) =>
180
+ col.notNull().defaultTo('[]')
181
+ )
182
+ .execute();
183
+ await ensurePartitionColumn(db, 'sync_commits');
184
+
185
+ await sql`DROP INDEX IF EXISTS idx_sync_commits_client_commit`.execute(db);
186
+ await sql`CREATE UNIQUE INDEX IF NOT EXISTS idx_sync_commits_client_commit
187
+ ON sync_commits(partition_id, client_id, client_commit_id)`.execute(db);
188
+
189
+ // sync_table_commits table (index of which commits affect which tables)
190
+ await db.schema
191
+ .createTable('sync_table_commits')
192
+ .ifNotExists()
193
+ .addColumn('partition_id', 'text', (col) =>
194
+ col.notNull().defaultTo('default')
195
+ )
196
+ .addColumn('table', 'text', (col) => col.notNull())
197
+ .addColumn('commit_seq', 'integer', (col) =>
198
+ col.notNull().references('sync_commits.commit_seq').onDelete('cascade')
199
+ )
200
+ .addPrimaryKeyConstraint('sync_table_commits_pk', [
201
+ 'partition_id',
202
+ 'table',
203
+ 'commit_seq',
204
+ ])
205
+ .execute();
206
+ await ensurePartitionColumn(db, 'sync_table_commits');
207
+
208
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_table_commits_commit_seq
209
+ ON sync_table_commits(partition_id, commit_seq)`.execute(db);
210
+
211
+ // sync_changes table - uses JSON for scopes
212
+ await db.schema
213
+ .createTable('sync_changes')
214
+ .ifNotExists()
215
+ .addColumn('change_id', 'integer', (col) =>
216
+ col.primaryKey().autoIncrement()
217
+ )
218
+ .addColumn('partition_id', 'text', (col) =>
219
+ col.notNull().defaultTo('default')
220
+ )
221
+ .addColumn('commit_seq', 'integer', (col) =>
222
+ col.notNull().references('sync_commits.commit_seq').onDelete('cascade')
223
+ )
224
+ .addColumn('table', 'text', (col) => col.notNull())
225
+ .addColumn('row_id', 'text', (col) => col.notNull())
226
+ .addColumn('op', 'text', (col) => col.notNull())
227
+ .addColumn('row_json', 'json')
228
+ .addColumn('row_version', 'integer')
229
+ .addColumn('scopes', 'json', (col) => col.notNull())
230
+ .execute();
231
+ await ensurePartitionColumn(db, 'sync_changes');
232
+
233
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_changes_commit_seq
234
+ ON sync_changes(partition_id, commit_seq)`.execute(db);
235
+
236
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_changes_table
237
+ ON sync_changes(partition_id, "table")`.execute(db);
238
+
239
+ // sync_client_cursors table
240
+ await db.schema
241
+ .createTable('sync_client_cursors')
242
+ .ifNotExists()
243
+ .addColumn('partition_id', 'text', (col) =>
244
+ col.notNull().defaultTo('default')
245
+ )
246
+ .addColumn('client_id', 'text', (col) => col.notNull())
247
+ .addColumn('actor_id', 'text', (col) => col.notNull())
248
+ .addColumn('cursor', 'integer', (col) => col.notNull().defaultTo(0))
249
+ .addColumn('effective_scopes', 'json', (col) =>
250
+ col.notNull().defaultTo('{}')
251
+ )
252
+ .addColumn('updated_at', 'text', (col) => col.notNull().defaultTo(nowIso))
253
+ .addPrimaryKeyConstraint('sync_client_cursors_pk', [
254
+ 'partition_id',
255
+ 'client_id',
256
+ ])
257
+ .execute();
258
+ await ensurePartitionColumn(db, 'sync_client_cursors');
259
+
260
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_client_cursors_updated_at
261
+ ON sync_client_cursors(updated_at)`.execute(db);
262
+
263
+ // sync_snapshot_chunks table
264
+ await db.schema
265
+ .createTable('sync_snapshot_chunks')
266
+ .ifNotExists()
267
+ .addColumn('chunk_id', 'text', (col) => col.primaryKey())
268
+ .addColumn('partition_id', 'text', (col) =>
269
+ col.notNull().defaultTo('default')
270
+ )
271
+ .addColumn('scope_key', 'text', (col) => col.notNull())
272
+ .addColumn('scope', 'text', (col) => col.notNull())
273
+ .addColumn('as_of_commit_seq', 'integer', (col) => col.notNull())
274
+ .addColumn('row_cursor', 'text', (col) => col.notNull().defaultTo(''))
275
+ .addColumn('row_limit', 'integer', (col) => col.notNull())
276
+ .addColumn('encoding', 'text', (col) => col.notNull())
277
+ .addColumn('compression', 'text', (col) => col.notNull())
278
+ .addColumn('sha256', 'text', (col) => col.notNull())
279
+ .addColumn('byte_length', 'integer', (col) => col.notNull())
280
+ .addColumn('blob_hash', 'text', (col) => col.notNull().defaultTo(''))
281
+ .addColumn('body', 'blob') // Deprecated: use blob storage
282
+ .addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
283
+ .addColumn('expires_at', 'text', (col) => col.notNull())
284
+ .execute();
285
+ await ensurePartitionColumn(db, 'sync_snapshot_chunks');
286
+
287
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_snapshot_chunks_expires_at
288
+ ON sync_snapshot_chunks(expires_at)`.execute(db);
289
+
290
+ await sql`CREATE UNIQUE INDEX IF NOT EXISTS idx_sync_snapshot_chunks_page_key
291
+ ON sync_snapshot_chunks(partition_id, scope_key, scope, as_of_commit_seq, row_cursor, row_limit, encoding, compression)`.execute(
292
+ db
293
+ );
294
+
295
+ // Cleanup orphaned rows
296
+ await sql`
297
+ DELETE FROM sync_table_commits
298
+ WHERE commit_seq NOT IN (SELECT commit_seq FROM sync_commits)
299
+ `.execute(db);
300
+ await sql`
301
+ DELETE FROM sync_changes
302
+ WHERE commit_seq NOT IN (SELECT commit_seq FROM sync_commits)
303
+ `.execute(db);
304
+ }
305
+
306
+ // ===========================================================================
307
+ // Transaction Control
308
+ // ===========================================================================
309
+
310
+ async executeInTransaction<DB extends SyncCoreDb, T>(
311
+ db: Kysely<DB>,
312
+ fn: (executor: DbExecutor<DB>) => Promise<T>
313
+ ): Promise<T> {
314
+ if (this._supportsTransactions) {
315
+ return db.transaction().execute(fn);
316
+ }
317
+ return fn(db);
318
+ }
319
+
320
+ async setRepeatableRead<DB extends SyncCoreDb>(
321
+ _trx: DbExecutor<DB>
322
+ ): Promise<void> {
323
+ // SQLite uses serializable isolation by default in WAL mode.
324
+ }
325
+
326
+ // ===========================================================================
327
+ // Commit/Change Log Queries
328
+ // ===========================================================================
329
+
330
+ async readMaxCommitSeq<DB extends SyncCoreDb>(
331
+ db: Kysely<DB> | Transaction<DB>,
332
+ options?: { partitionId?: string }
333
+ ): Promise<number> {
334
+ const partitionId = options?.partitionId ?? 'default';
335
+ const res = await sql<{ max_seq: unknown }>`
336
+ SELECT max(commit_seq) as max_seq
337
+ FROM sync_commits
338
+ WHERE partition_id = ${partitionId}
339
+ `.execute(db);
340
+
341
+ return coerceNumber(res.rows[0]?.max_seq) ?? 0;
342
+ }
343
+
344
+ async readMinCommitSeq<DB extends SyncCoreDb>(
345
+ db: Kysely<DB> | Transaction<DB>,
346
+ options?: { partitionId?: string }
347
+ ): Promise<number> {
348
+ const partitionId = options?.partitionId ?? 'default';
349
+ const res = await sql<{ min_seq: unknown }>`
350
+ SELECT min(commit_seq) as min_seq
351
+ FROM sync_commits
352
+ WHERE partition_id = ${partitionId}
353
+ `.execute(db);
354
+
355
+ return coerceNumber(res.rows[0]?.min_seq) ?? 0;
356
+ }
357
+
358
+ async readCommitSeqsForPull<DB extends SyncCoreDb>(
359
+ db: Kysely<DB> | Transaction<DB>,
360
+ args: {
361
+ cursor: number;
362
+ limitCommits: number;
363
+ tables: string[];
364
+ partitionId?: string;
365
+ }
366
+ ): Promise<number[]> {
367
+ const partitionId = args.partitionId ?? 'default';
368
+ if (args.tables.length === 0) return [];
369
+
370
+ const tablesIn = sql.join(
371
+ args.tables.map((t) => sql`${t}`),
372
+ sql`, `
373
+ );
374
+
375
+ const res = await sql<{ commit_seq: unknown }>`
376
+ SELECT DISTINCT commit_seq
377
+ FROM sync_table_commits
378
+ WHERE partition_id = ${partitionId}
379
+ AND "table" IN (${tablesIn})
380
+ AND commit_seq > ${args.cursor}
381
+ ORDER BY commit_seq ASC
382
+ LIMIT ${args.limitCommits}
383
+ `.execute(db);
384
+
385
+ return res.rows
386
+ .map((r) => coerceNumber(r.commit_seq))
387
+ .filter(
388
+ (n): n is number =>
389
+ typeof n === 'number' && Number.isFinite(n) && n > args.cursor
390
+ );
391
+ }
392
+
393
+ async readCommits<DB extends SyncCoreDb>(
394
+ db: Kysely<DB> | Transaction<DB>,
395
+ commitSeqs: number[],
396
+ options?: { partitionId?: string }
397
+ ): Promise<SyncCommitRow[]> {
398
+ const partitionId = options?.partitionId ?? 'default';
399
+ if (commitSeqs.length === 0) return [];
400
+
401
+ const commitSeqsIn = sql.join(
402
+ commitSeqs.map((seq) => sql`${seq}`),
403
+ sql`, `
404
+ );
405
+
406
+ const res = await sql<{
407
+ commit_seq: unknown;
408
+ actor_id: string;
409
+ created_at: unknown;
410
+ result_json: unknown | null;
411
+ }>`
412
+ SELECT commit_seq, actor_id, created_at, result_json
413
+ FROM sync_commits
414
+ WHERE commit_seq IN (${commitSeqsIn})
415
+ AND partition_id = ${partitionId}
416
+ ORDER BY commit_seq ASC
417
+ `.execute(db);
418
+
419
+ return res.rows.map((row) => ({
420
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
421
+ actor_id: row.actor_id,
422
+ created_at: coerceIsoString(row.created_at),
423
+ result_json: row.result_json ?? null,
424
+ }));
425
+ }
426
+
427
+ async readChangesForCommits<DB extends SyncCoreDb>(
428
+ db: Kysely<DB> | Transaction<DB>,
429
+ args: {
430
+ commitSeqs: number[];
431
+ table: string;
432
+ scopes: ScopeValues;
433
+ partitionId?: string;
434
+ }
435
+ ): Promise<SyncChangeRow[]> {
436
+ const partitionId = args.partitionId ?? 'default';
437
+ if (args.commitSeqs.length === 0) return [];
438
+
439
+ const commitSeqsIn = sql.join(
440
+ args.commitSeqs.map((seq) => sql`${seq}`),
441
+ sql`, `
442
+ );
443
+
444
+ // Fetch all changes for the table and commit sequences
445
+ const res = await sql<{
446
+ commit_seq: unknown;
447
+ table: string;
448
+ row_id: string;
449
+ op: string;
450
+ row_json: unknown | null;
451
+ row_version: unknown | null;
452
+ scopes: unknown;
453
+ }>`
454
+ SELECT commit_seq, "table", row_id, op, row_json, row_version, scopes
455
+ FROM sync_changes
456
+ WHERE commit_seq IN (${commitSeqsIn})
457
+ AND partition_id = ${partitionId}
458
+ AND "table" = ${args.table}
459
+ ORDER BY commit_seq ASC, change_id ASC
460
+ `.execute(db);
461
+
462
+ // Filter by scopes (manual, since SQLite JSON operators are limited)
463
+ return res.rows
464
+ .filter((row) => {
465
+ const storedScopes = parseScopes(row.scopes);
466
+ return scopesMatch(storedScopes, args.scopes);
467
+ })
468
+ .map((row) => ({
469
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
470
+ table: row.table,
471
+ row_id: row.row_id,
472
+ op: row.op as SyncOp,
473
+ row_json: parseJsonValue(row.row_json),
474
+ row_version: coerceNumber(row.row_version),
475
+ scopes: parseScopes(row.scopes),
476
+ }));
477
+ }
478
+
479
+ async readIncrementalPullRows<DB extends SyncCoreDb>(
480
+ db: Kysely<DB> | Transaction<DB>,
481
+ args: {
482
+ table: string;
483
+ scopes: ScopeValues;
484
+ cursor: number;
485
+ limitCommits: number;
486
+ partitionId?: string;
487
+ }
488
+ ): Promise<
489
+ Array<{
490
+ commit_seq: number;
491
+ actor_id: string;
492
+ created_at: string;
493
+ change_id: number;
494
+ table: string;
495
+ row_id: string;
496
+ op: SyncOp;
497
+ row_json: unknown | null;
498
+ row_version: number | null;
499
+ scopes: StoredScopes;
500
+ }>
501
+ > {
502
+ const partitionId = args.partitionId ?? 'default';
503
+ const limitCommits = Math.max(1, Math.min(500, args.limitCommits));
504
+
505
+ // Get commit_seqs for this table
506
+ const commitSeqsRes = await sql<{ commit_seq: unknown }>`
507
+ SELECT commit_seq
508
+ FROM sync_table_commits
509
+ WHERE partition_id = ${partitionId}
510
+ AND "table" = ${args.table}
511
+ AND commit_seq > ${args.cursor}
512
+ AND EXISTS (
513
+ SELECT 1
514
+ FROM sync_commits cm
515
+ WHERE cm.commit_seq = sync_table_commits.commit_seq
516
+ AND cm.partition_id = ${partitionId}
517
+ )
518
+ ORDER BY commit_seq ASC
519
+ LIMIT ${limitCommits}
520
+ `.execute(db);
521
+
522
+ const commitSeqs = commitSeqsRes.rows
523
+ .map((r) => coerceNumber(r.commit_seq))
524
+ .filter((n): n is number => n !== null);
525
+
526
+ if (commitSeqs.length === 0) return [];
527
+
528
+ const commitSeqsIn = sql.join(
529
+ commitSeqs.map((seq) => sql`${seq}`),
530
+ sql`, `
531
+ );
532
+
533
+ // Get commits and changes for these commit_seqs
534
+ const changesRes = await sql<{
535
+ commit_seq: unknown;
536
+ actor_id: string;
537
+ created_at: unknown;
538
+ change_id: unknown;
539
+ table: string;
540
+ row_id: string;
541
+ op: string;
542
+ row_json: unknown | null;
543
+ row_version: unknown | null;
544
+ scopes: unknown;
545
+ }>`
546
+ SELECT
547
+ cm.commit_seq,
548
+ cm.actor_id,
549
+ cm.created_at,
550
+ c.change_id,
551
+ c."table",
552
+ c.row_id,
553
+ c.op,
554
+ c.row_json,
555
+ c.row_version,
556
+ c.scopes
557
+ FROM sync_commits cm
558
+ JOIN sync_changes c ON c.commit_seq = cm.commit_seq
559
+ WHERE cm.commit_seq IN (${commitSeqsIn})
560
+ AND cm.partition_id = ${partitionId}
561
+ AND c.partition_id = ${partitionId}
562
+ AND c."table" = ${args.table}
563
+ ORDER BY cm.commit_seq ASC, c.change_id ASC
564
+ `.execute(db);
565
+
566
+ // Filter by scopes and transform
567
+ return changesRes.rows
568
+ .filter((row) => {
569
+ const storedScopes = parseScopes(row.scopes);
570
+ return scopesMatch(storedScopes, args.scopes);
571
+ })
572
+ .map((row) => ({
573
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
574
+ actor_id: row.actor_id,
575
+ created_at: coerceIsoString(row.created_at),
576
+ change_id: coerceNumber(row.change_id) ?? 0,
577
+ table: row.table,
578
+ row_id: row.row_id,
579
+ op: row.op as SyncOp,
580
+ row_json: parseJsonValue(row.row_json),
581
+ row_version: coerceNumber(row.row_version),
582
+ scopes: parseScopes(row.scopes),
583
+ }));
584
+ }
585
+
586
+ /**
587
+ * Streaming version of incremental pull for large result sets.
588
+ * Yields changes one at a time instead of loading all into memory.
589
+ */
590
+ async *streamIncrementalPullRows<DB extends SyncCoreDb>(
591
+ db: Kysely<DB> | Transaction<DB>,
592
+ args: {
593
+ table: string;
594
+ scopes: ScopeValues;
595
+ cursor: number;
596
+ limitCommits: number;
597
+ partitionId?: string;
598
+ }
599
+ ): AsyncGenerator<{
600
+ commit_seq: number;
601
+ actor_id: string;
602
+ created_at: string;
603
+ change_id: number;
604
+ table: string;
605
+ row_id: string;
606
+ op: SyncOp;
607
+ row_json: unknown | null;
608
+ row_version: number | null;
609
+ scopes: StoredScopes;
610
+ }> {
611
+ const partitionId = args.partitionId ?? 'default';
612
+ const limitCommits = Math.max(1, Math.min(500, args.limitCommits));
613
+
614
+ // Get commit_seqs for this table
615
+ const commitSeqsRes = await sql<{ commit_seq: unknown }>`
616
+ SELECT commit_seq
617
+ FROM sync_table_commits
618
+ WHERE partition_id = ${partitionId}
619
+ AND "table" = ${args.table}
620
+ AND commit_seq > ${args.cursor}
621
+ AND EXISTS (
622
+ SELECT 1
623
+ FROM sync_commits cm
624
+ WHERE cm.commit_seq = sync_table_commits.commit_seq
625
+ AND cm.partition_id = ${partitionId}
626
+ )
627
+ ORDER BY commit_seq ASC
628
+ LIMIT ${limitCommits}
629
+ `.execute(db);
630
+
631
+ const commitSeqs = commitSeqsRes.rows
632
+ .map((r) => coerceNumber(r.commit_seq))
633
+ .filter((n): n is number => n !== null);
634
+
635
+ if (commitSeqs.length === 0) return;
636
+
637
+ // Process in smaller batches to avoid memory issues
638
+ const batchSize = 100;
639
+ for (let i = 0; i < commitSeqs.length; i += batchSize) {
640
+ const batch = commitSeqs.slice(i, i + batchSize);
641
+ const commitSeqsIn = sql.join(
642
+ batch.map((seq) => sql`${seq}`),
643
+ sql`, `
644
+ );
645
+
646
+ const changesRes = await sql<{
647
+ commit_seq: unknown;
648
+ actor_id: string;
649
+ created_at: unknown;
650
+ change_id: unknown;
651
+ table: string;
652
+ row_id: string;
653
+ op: string;
654
+ row_json: unknown | null;
655
+ row_version: unknown | null;
656
+ scopes: unknown;
657
+ }>`
658
+ SELECT
659
+ cm.commit_seq,
660
+ cm.actor_id,
661
+ cm.created_at,
662
+ c.change_id,
663
+ c."table",
664
+ c.row_id,
665
+ c.op,
666
+ c.row_json,
667
+ c.row_version,
668
+ c.scopes
669
+ FROM sync_commits cm
670
+ JOIN sync_changes c ON c.commit_seq = cm.commit_seq
671
+ WHERE cm.commit_seq IN (${commitSeqsIn})
672
+ AND cm.partition_id = ${partitionId}
673
+ AND c.partition_id = ${partitionId}
674
+ AND c."table" = ${args.table}
675
+ ORDER BY cm.commit_seq ASC, c.change_id ASC
676
+ `.execute(db);
677
+
678
+ // Filter and yield each row
679
+ for (const row of changesRes.rows) {
680
+ const storedScopes = parseScopes(row.scopes);
681
+ if (scopesMatch(storedScopes, args.scopes)) {
682
+ yield {
683
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
684
+ actor_id: row.actor_id,
685
+ created_at: coerceIsoString(row.created_at),
686
+ change_id: coerceNumber(row.change_id) ?? 0,
687
+ table: row.table,
688
+ row_id: row.row_id,
689
+ op: row.op as SyncOp,
690
+ row_json: parseJsonValue(row.row_json),
691
+ row_version: coerceNumber(row.row_version),
692
+ scopes: storedScopes,
693
+ };
694
+ }
695
+ }
696
+ }
697
+ }
698
+
699
+ async compactChanges<DB extends SyncCoreDb>(
700
+ db: Kysely<DB> | Transaction<DB>,
701
+ args: { fullHistoryHours: number }
702
+ ): Promise<number> {
703
+ const cutoffIso = new Date(
704
+ Date.now() - args.fullHistoryHours * 60 * 60 * 1000
705
+ ).toISOString();
706
+
707
+ // Find all old changes
708
+ const oldChanges = await sql<{
709
+ change_id: unknown;
710
+ partition_id: string;
711
+ commit_seq: unknown;
712
+ table: string;
713
+ row_id: string;
714
+ scopes: unknown;
715
+ }>`
716
+ SELECT c.change_id, c.partition_id, c.commit_seq, c."table", c.row_id, c.scopes
717
+ FROM sync_changes c
718
+ JOIN sync_commits cm ON cm.commit_seq = c.commit_seq
719
+ WHERE cm.created_at < ${cutoffIso}
720
+ `.execute(db);
721
+
722
+ // Group by (partition_id, table, row_id, scopes)
723
+ const groups = new Map<
724
+ string,
725
+ Array<{ change_id: number; commit_seq: number }>
726
+ >();
727
+
728
+ for (const row of oldChanges.rows) {
729
+ const scopesStr = JSON.stringify(parseScopes(row.scopes));
730
+ const key = `${row.partition_id}|${row.table}|${row.row_id}|${scopesStr}`;
731
+ if (!groups.has(key)) {
732
+ groups.set(key, []);
733
+ }
734
+ groups.get(key)!.push({
735
+ change_id: coerceNumber(row.change_id) ?? 0,
736
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
737
+ });
738
+ }
739
+
740
+ // Find change_ids to delete (all but the one with highest commit_seq)
741
+ const toDelete: number[] = [];
742
+ for (const changes of groups.values()) {
743
+ if (changes.length <= 1) continue;
744
+
745
+ changes.sort((a, b) => {
746
+ if (a.commit_seq !== b.commit_seq) return b.commit_seq - a.commit_seq;
747
+ return b.change_id - a.change_id;
748
+ });
749
+
750
+ for (let i = 1; i < changes.length; i++) {
751
+ toDelete.push(changes[i]!.change_id);
752
+ }
753
+ }
754
+
755
+ if (toDelete.length === 0) return 0;
756
+
757
+ // Delete in batches
758
+ const batchSize = 500;
759
+ let deleted = 0;
760
+
761
+ for (let i = 0; i < toDelete.length; i += batchSize) {
762
+ const batch = toDelete.slice(i, i + batchSize);
763
+ const batchIn = sql.join(
764
+ batch.map((id) => sql`${id}`),
765
+ sql`, `
766
+ );
767
+
768
+ const res = await sql`
769
+ DELETE FROM sync_changes
770
+ WHERE change_id IN (${batchIn})
771
+ `.execute(db);
772
+
773
+ deleted += Number(res.numAffectedRows ?? 0);
774
+ }
775
+
776
+ // Remove routing index entries that no longer have any remaining changes
777
+ await sql`
778
+ DELETE FROM sync_table_commits
779
+ WHERE commit_seq IN (
780
+ SELECT commit_seq
781
+ FROM sync_commits
782
+ WHERE created_at < ${cutoffIso}
783
+ AND partition_id = sync_table_commits.partition_id
784
+ )
785
+ AND NOT EXISTS (
786
+ SELECT 1
787
+ FROM sync_changes c
788
+ WHERE c.commit_seq = sync_table_commits.commit_seq
789
+ AND c.partition_id = sync_table_commits.partition_id
790
+ AND c."table" = sync_table_commits."table"
791
+ )
792
+ `.execute(db);
793
+
794
+ return deleted;
795
+ }
796
+
797
+ // ===========================================================================
798
+ // Client Cursor Recording
799
+ // ===========================================================================
800
+
801
+ async recordClientCursor<DB extends SyncCoreDb>(
802
+ db: Kysely<DB> | Transaction<DB>,
803
+ args: {
804
+ partitionId?: string;
805
+ clientId: string;
806
+ actorId: string;
807
+ cursor: number;
808
+ effectiveScopes: ScopeValues;
809
+ }
810
+ ): Promise<void> {
811
+ const partitionId = args.partitionId ?? 'default';
812
+ const now = new Date().toISOString();
813
+ const scopesJson = JSON.stringify(args.effectiveScopes);
814
+
815
+ await sql`
816
+ INSERT INTO sync_client_cursors (partition_id, client_id, actor_id, cursor, effective_scopes, updated_at)
817
+ VALUES (${partitionId}, ${args.clientId}, ${args.actorId}, ${args.cursor}, ${scopesJson}, ${now})
818
+ ON CONFLICT(partition_id, client_id) DO UPDATE SET
819
+ actor_id = ${args.actorId},
820
+ cursor = ${args.cursor},
821
+ effective_scopes = ${scopesJson},
822
+ updated_at = ${now}
823
+ `.execute(db);
824
+ }
825
+
826
+ // ===========================================================================
827
+ // Scope Conversion Helpers
828
+ // ===========================================================================
829
+
830
+ scopesToDb(scopes: StoredScopes): string {
831
+ return JSON.stringify(scopes);
832
+ }
833
+
834
+ dbToScopes(value: unknown): StoredScopes {
835
+ return parseScopes(value);
836
+ }
837
+
838
+ dbToArray(value: unknown): string[] {
839
+ return toStringArray(value);
840
+ }
841
+
842
+ arrayToDb(values: string[]): string {
843
+ return JSON.stringify(values.filter((v) => v.length > 0));
844
+ }
845
+
846
+ async readAffectedTablesFromChanges<DB extends SyncCoreDb>(
847
+ db: Kysely<DB> | Transaction<DB>,
848
+ commitSeq: number,
849
+ options?: { partitionId?: string }
850
+ ): Promise<string[]> {
851
+ const partitionId = options?.partitionId ?? 'default';
852
+ const res = await sql<{ table: string }>`
853
+ SELECT DISTINCT "table"
854
+ FROM sync_changes
855
+ WHERE commit_seq = ${commitSeq}
856
+ AND partition_id = ${partitionId}
857
+ `.execute(db);
858
+
859
+ return res.rows
860
+ .map((r) => r.table)
861
+ .filter((t): t is string => typeof t === 'string' && t.length > 0);
862
+ }
863
+
864
+ // ===========================================================================
865
+ // Console Schema (Request Events)
866
+ // ===========================================================================
867
+
868
+ async ensureConsoleSchema<DB extends SyncCoreDb>(
869
+ db: Kysely<DB>
870
+ ): Promise<void> {
871
+ const nowIso = sql`(strftime('%Y-%m-%dT%H:%M:%fZ','now'))`;
872
+
873
+ await db.schema
874
+ .createTable('sync_request_events')
875
+ .ifNotExists()
876
+ .addColumn('event_id', 'integer', (col) =>
877
+ col.primaryKey().autoIncrement()
878
+ )
879
+ .addColumn('event_type', 'text', (col) => col.notNull())
880
+ .addColumn('actor_id', 'text', (col) => col.notNull())
881
+ .addColumn('client_id', 'text', (col) => col.notNull())
882
+ .addColumn('status_code', 'integer', (col) => col.notNull())
883
+ .addColumn('outcome', 'text', (col) => col.notNull())
884
+ .addColumn('duration_ms', 'integer', (col) => col.notNull())
885
+ .addColumn('commit_seq', 'integer')
886
+ .addColumn('operation_count', 'integer')
887
+ .addColumn('row_count', 'integer')
888
+ .addColumn('tables', 'text', (col) => col.notNull().defaultTo('[]'))
889
+ .addColumn('error_message', 'text')
890
+ .addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
891
+ .execute();
892
+
893
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_request_events_created_at
894
+ ON sync_request_events(created_at DESC)`.execute(db);
895
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_request_events_event_type
896
+ ON sync_request_events(event_type)`.execute(db);
897
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_request_events_client_id
898
+ ON sync_request_events(client_id)`.execute(db);
899
+
900
+ // API Keys table
901
+ await db.schema
902
+ .createTable('sync_api_keys')
903
+ .ifNotExists()
904
+ .addColumn('key_id', 'text', (col) => col.primaryKey())
905
+ .addColumn('key_hash', 'text', (col) => col.notNull())
906
+ .addColumn('key_prefix', 'text', (col) => col.notNull())
907
+ .addColumn('name', 'text', (col) => col.notNull())
908
+ .addColumn('key_type', 'text', (col) => col.notNull())
909
+ .addColumn('scope_keys', 'text', (col) => col.defaultTo('[]'))
910
+ .addColumn('actor_id', 'text')
911
+ .addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
912
+ .addColumn('expires_at', 'text')
913
+ .addColumn('last_used_at', 'text')
914
+ .addColumn('revoked_at', 'text')
915
+ .execute();
916
+
917
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_api_keys_key_hash
918
+ ON sync_api_keys(key_hash)`.execute(db);
919
+ await sql`CREATE INDEX IF NOT EXISTS idx_sync_api_keys_key_type
920
+ ON sync_api_keys(key_type)`.execute(db);
921
+ }
922
+ }
923
+
924
+ export function createSqliteServerDialect(options?: {
925
+ supportsTransactions?: boolean;
926
+ }): SqliteServerSyncDialect {
927
+ return new SqliteServerSyncDialect(options);
928
+ }