@syncular/server-dialect-sqlite 0.0.1-60

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,655 @@
1
+ /**
2
+ * @syncular/server-dialect-sqlite - SQLite Server Sync Dialect
3
+ *
4
+ * SQLite adaptation of the commit-log based sync system.
5
+ * Works with any SQLite-compatible Kysely dialect (bun:sqlite, wa-sqlite, better-sqlite3, etc.).
6
+ *
7
+ * Key differences from Postgres:
8
+ * - No bigserial → INTEGER PRIMARY KEY AUTOINCREMENT
9
+ * - No JSONB → JSON stored as TEXT (with json_extract for filtering)
10
+ * - No array && overlap → JSON object key matching
11
+ * - No timestamptz → TEXT with ISO format
12
+ * - No GIN index → regular index + manual filtering
13
+ * - REPEATABLE READ → no-op (SQLite uses serializable by default)
14
+ */
15
+ import { sql } from 'kysely';
16
+ function coerceNumber(value) {
17
+ if (value === null || value === undefined)
18
+ return null;
19
+ if (typeof value === 'number')
20
+ return Number.isFinite(value) ? value : null;
21
+ if (typeof value === 'bigint')
22
+ return Number.isFinite(Number(value)) ? Number(value) : null;
23
+ if (typeof value === 'string') {
24
+ const n = Number(value);
25
+ return Number.isFinite(n) ? n : null;
26
+ }
27
+ return null;
28
+ }
29
+ function coerceIsoString(value) {
30
+ if (typeof value === 'string')
31
+ return value;
32
+ if (value instanceof Date)
33
+ return value.toISOString();
34
+ return String(value);
35
+ }
36
+ function parseJsonValue(value) {
37
+ if (value === null || value === undefined)
38
+ return null;
39
+ if (typeof value === 'object')
40
+ return value;
41
+ if (typeof value === 'string') {
42
+ try {
43
+ return JSON.parse(value);
44
+ }
45
+ catch {
46
+ return value;
47
+ }
48
+ }
49
+ return value;
50
+ }
51
+ function parseScopes(value) {
52
+ if (value === null || value === undefined)
53
+ return {};
54
+ if (typeof value === 'object' && !Array.isArray(value)) {
55
+ const result = {};
56
+ for (const [k, v] of Object.entries(value)) {
57
+ if (typeof v === 'string') {
58
+ result[k] = v;
59
+ }
60
+ }
61
+ return result;
62
+ }
63
+ if (typeof value === 'string') {
64
+ try {
65
+ const parsed = JSON.parse(value);
66
+ if (typeof parsed === 'object' &&
67
+ parsed !== null &&
68
+ !Array.isArray(parsed)) {
69
+ const result = {};
70
+ for (const [k, v] of Object.entries(parsed)) {
71
+ if (typeof v === 'string') {
72
+ result[k] = v;
73
+ }
74
+ }
75
+ return result;
76
+ }
77
+ }
78
+ catch {
79
+ // ignore
80
+ }
81
+ }
82
+ return {};
83
+ }
84
+ function toStringArray(value) {
85
+ if (Array.isArray(value)) {
86
+ return value.filter((k) => typeof k === 'string');
87
+ }
88
+ if (typeof value === 'string') {
89
+ try {
90
+ const parsed = JSON.parse(value);
91
+ if (Array.isArray(parsed)) {
92
+ return parsed.filter((k) => typeof k === 'string');
93
+ }
94
+ }
95
+ catch {
96
+ // ignore
97
+ }
98
+ }
99
+ return [];
100
+ }
101
+ /**
102
+ * Check if stored scopes match the requested scope values.
103
+ * Uses OR semantics for arrays and treats missing keys as wildcards.
104
+ */
105
+ function scopesMatch(stored, requested) {
106
+ for (const [key, value] of Object.entries(requested)) {
107
+ const storedValue = stored[key];
108
+ if (storedValue === undefined)
109
+ return false;
110
+ if (Array.isArray(value)) {
111
+ if (!value.includes(storedValue))
112
+ return false;
113
+ }
114
+ else {
115
+ if (storedValue !== value)
116
+ return false;
117
+ }
118
+ }
119
+ return true;
120
+ }
121
+ async function ensurePartitionColumn(db, table) {
122
+ try {
123
+ await sql.raw(`ALTER TABLE ${table} ADD COLUMN partition_id TEXT NOT NULL DEFAULT 'default'`).execute(db);
124
+ }
125
+ catch {
126
+ // Ignore when column already exists (or table is immutable in the current backend).
127
+ }
128
+ }
129
+ export class SqliteServerSyncDialect {
130
+ name = 'sqlite';
131
+ supportsForUpdate = false;
132
+ supportsSavepoints;
133
+ _supportsTransactions;
134
+ constructor(options) {
135
+ this._supportsTransactions = options?.supportsTransactions ?? true;
136
+ this.supportsSavepoints = this._supportsTransactions;
137
+ }
138
+ // ===========================================================================
139
+ // Schema Setup
140
+ // ===========================================================================
141
+ async ensureSyncSchema(db) {
142
+ await sql `PRAGMA foreign_keys = ON`.execute(db);
143
+ const nowIso = sql `(strftime('%Y-%m-%dT%H:%M:%fZ','now'))`;
144
+ // sync_commits table
145
+ await db.schema
146
+ .createTable('sync_commits')
147
+ .ifNotExists()
148
+ .addColumn('commit_seq', 'integer', (col) => col.primaryKey().autoIncrement())
149
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
150
+ .addColumn('actor_id', 'text', (col) => col.notNull())
151
+ .addColumn('client_id', 'text', (col) => col.notNull())
152
+ .addColumn('client_commit_id', 'text', (col) => col.notNull())
153
+ .addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
154
+ .addColumn('meta', 'json')
155
+ .addColumn('result_json', 'json')
156
+ .addColumn('change_count', 'integer', (col) => col.notNull().defaultTo(0))
157
+ .addColumn('affected_tables', 'text', (col) => col.notNull().defaultTo('[]'))
158
+ .execute();
159
+ await ensurePartitionColumn(db, 'sync_commits');
160
+ await sql `DROP INDEX IF EXISTS idx_sync_commits_client_commit`.execute(db);
161
+ await sql `CREATE UNIQUE INDEX IF NOT EXISTS idx_sync_commits_client_commit
162
+ ON sync_commits(partition_id, client_id, client_commit_id)`.execute(db);
163
+ // sync_table_commits table (index of which commits affect which tables)
164
+ await db.schema
165
+ .createTable('sync_table_commits')
166
+ .ifNotExists()
167
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
168
+ .addColumn('table', 'text', (col) => col.notNull())
169
+ .addColumn('commit_seq', 'integer', (col) => col.notNull().references('sync_commits.commit_seq').onDelete('cascade'))
170
+ .addPrimaryKeyConstraint('sync_table_commits_pk', [
171
+ 'partition_id',
172
+ 'table',
173
+ 'commit_seq',
174
+ ])
175
+ .execute();
176
+ await ensurePartitionColumn(db, 'sync_table_commits');
177
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_table_commits_commit_seq
178
+ ON sync_table_commits(partition_id, commit_seq)`.execute(db);
179
+ // sync_changes table - uses JSON for scopes
180
+ await db.schema
181
+ .createTable('sync_changes')
182
+ .ifNotExists()
183
+ .addColumn('change_id', 'integer', (col) => col.primaryKey().autoIncrement())
184
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
185
+ .addColumn('commit_seq', 'integer', (col) => col.notNull().references('sync_commits.commit_seq').onDelete('cascade'))
186
+ .addColumn('table', 'text', (col) => col.notNull())
187
+ .addColumn('row_id', 'text', (col) => col.notNull())
188
+ .addColumn('op', 'text', (col) => col.notNull())
189
+ .addColumn('row_json', 'json')
190
+ .addColumn('row_version', 'integer')
191
+ .addColumn('scopes', 'json', (col) => col.notNull())
192
+ .execute();
193
+ await ensurePartitionColumn(db, 'sync_changes');
194
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_changes_commit_seq
195
+ ON sync_changes(partition_id, commit_seq)`.execute(db);
196
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_changes_table
197
+ ON sync_changes(partition_id, "table")`.execute(db);
198
+ // sync_client_cursors table
199
+ await db.schema
200
+ .createTable('sync_client_cursors')
201
+ .ifNotExists()
202
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
203
+ .addColumn('client_id', 'text', (col) => col.notNull())
204
+ .addColumn('actor_id', 'text', (col) => col.notNull())
205
+ .addColumn('cursor', 'integer', (col) => col.notNull().defaultTo(0))
206
+ .addColumn('effective_scopes', 'json', (col) => col.notNull().defaultTo('{}'))
207
+ .addColumn('updated_at', 'text', (col) => col.notNull().defaultTo(nowIso))
208
+ .addPrimaryKeyConstraint('sync_client_cursors_pk', [
209
+ 'partition_id',
210
+ 'client_id',
211
+ ])
212
+ .execute();
213
+ await ensurePartitionColumn(db, 'sync_client_cursors');
214
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_client_cursors_updated_at
215
+ ON sync_client_cursors(updated_at)`.execute(db);
216
+ // sync_snapshot_chunks table
217
+ await db.schema
218
+ .createTable('sync_snapshot_chunks')
219
+ .ifNotExists()
220
+ .addColumn('chunk_id', 'text', (col) => col.primaryKey())
221
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
222
+ .addColumn('scope_key', 'text', (col) => col.notNull())
223
+ .addColumn('scope', 'text', (col) => col.notNull())
224
+ .addColumn('as_of_commit_seq', 'integer', (col) => col.notNull())
225
+ .addColumn('row_cursor', 'text', (col) => col.notNull().defaultTo(''))
226
+ .addColumn('row_limit', 'integer', (col) => col.notNull())
227
+ .addColumn('encoding', 'text', (col) => col.notNull())
228
+ .addColumn('compression', 'text', (col) => col.notNull())
229
+ .addColumn('sha256', 'text', (col) => col.notNull())
230
+ .addColumn('byte_length', 'integer', (col) => col.notNull())
231
+ .addColumn('blob_hash', 'text', (col) => col.notNull().defaultTo(''))
232
+ .addColumn('body', 'blob') // Deprecated: use blob storage
233
+ .addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
234
+ .addColumn('expires_at', 'text', (col) => col.notNull())
235
+ .execute();
236
+ await ensurePartitionColumn(db, 'sync_snapshot_chunks');
237
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_snapshot_chunks_expires_at
238
+ ON sync_snapshot_chunks(expires_at)`.execute(db);
239
+ await sql `CREATE UNIQUE INDEX IF NOT EXISTS idx_sync_snapshot_chunks_page_key
240
+ ON sync_snapshot_chunks(partition_id, scope_key, scope, as_of_commit_seq, row_cursor, row_limit, encoding, compression)`.execute(db);
241
+ // Cleanup orphaned rows
242
+ await sql `
243
+ DELETE FROM sync_table_commits
244
+ WHERE commit_seq NOT IN (SELECT commit_seq FROM sync_commits)
245
+ `.execute(db);
246
+ await sql `
247
+ DELETE FROM sync_changes
248
+ WHERE commit_seq NOT IN (SELECT commit_seq FROM sync_commits)
249
+ `.execute(db);
250
+ }
251
+ // ===========================================================================
252
+ // Transaction Control
253
+ // ===========================================================================
254
+ async executeInTransaction(db, fn) {
255
+ if (this._supportsTransactions) {
256
+ return db.transaction().execute(fn);
257
+ }
258
+ return fn(db);
259
+ }
260
+ async setRepeatableRead(_trx) {
261
+ // SQLite uses serializable isolation by default in WAL mode.
262
+ }
263
+ // ===========================================================================
264
+ // Commit/Change Log Queries
265
+ // ===========================================================================
266
+ async readMaxCommitSeq(db, options) {
267
+ const partitionId = options?.partitionId ?? 'default';
268
+ const res = await sql `
269
+ SELECT max(commit_seq) as max_seq
270
+ FROM sync_commits
271
+ WHERE partition_id = ${partitionId}
272
+ `.execute(db);
273
+ return coerceNumber(res.rows[0]?.max_seq) ?? 0;
274
+ }
275
+ async readMinCommitSeq(db, options) {
276
+ const partitionId = options?.partitionId ?? 'default';
277
+ const res = await sql `
278
+ SELECT min(commit_seq) as min_seq
279
+ FROM sync_commits
280
+ WHERE partition_id = ${partitionId}
281
+ `.execute(db);
282
+ return coerceNumber(res.rows[0]?.min_seq) ?? 0;
283
+ }
284
+ async readCommitSeqsForPull(db, args) {
285
+ const partitionId = args.partitionId ?? 'default';
286
+ if (args.tables.length === 0)
287
+ return [];
288
+ const tablesIn = sql.join(args.tables.map((t) => sql `${t}`), sql `, `);
289
+ const res = await sql `
290
+ SELECT DISTINCT commit_seq
291
+ FROM sync_table_commits
292
+ WHERE partition_id = ${partitionId}
293
+ AND "table" IN (${tablesIn})
294
+ AND commit_seq > ${args.cursor}
295
+ ORDER BY commit_seq ASC
296
+ LIMIT ${args.limitCommits}
297
+ `.execute(db);
298
+ return res.rows
299
+ .map((r) => coerceNumber(r.commit_seq))
300
+ .filter((n) => typeof n === 'number' && Number.isFinite(n) && n > args.cursor);
301
+ }
302
+ async readCommits(db, commitSeqs, options) {
303
+ const partitionId = options?.partitionId ?? 'default';
304
+ if (commitSeqs.length === 0)
305
+ return [];
306
+ const commitSeqsIn = sql.join(commitSeqs.map((seq) => sql `${seq}`), sql `, `);
307
+ const res = await sql `
308
+ SELECT commit_seq, actor_id, created_at, result_json
309
+ FROM sync_commits
310
+ WHERE commit_seq IN (${commitSeqsIn})
311
+ AND partition_id = ${partitionId}
312
+ ORDER BY commit_seq ASC
313
+ `.execute(db);
314
+ return res.rows.map((row) => ({
315
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
316
+ actor_id: row.actor_id,
317
+ created_at: coerceIsoString(row.created_at),
318
+ result_json: row.result_json ?? null,
319
+ }));
320
+ }
321
+ async readChangesForCommits(db, args) {
322
+ const partitionId = args.partitionId ?? 'default';
323
+ if (args.commitSeqs.length === 0)
324
+ return [];
325
+ const commitSeqsIn = sql.join(args.commitSeqs.map((seq) => sql `${seq}`), sql `, `);
326
+ // Fetch all changes for the table and commit sequences
327
+ const res = await sql `
328
+ SELECT commit_seq, "table", row_id, op, row_json, row_version, scopes
329
+ FROM sync_changes
330
+ WHERE commit_seq IN (${commitSeqsIn})
331
+ AND partition_id = ${partitionId}
332
+ AND "table" = ${args.table}
333
+ ORDER BY commit_seq ASC, change_id ASC
334
+ `.execute(db);
335
+ // Filter by scopes (manual, since SQLite JSON operators are limited)
336
+ return res.rows
337
+ .filter((row) => {
338
+ const storedScopes = parseScopes(row.scopes);
339
+ return scopesMatch(storedScopes, args.scopes);
340
+ })
341
+ .map((row) => ({
342
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
343
+ table: row.table,
344
+ row_id: row.row_id,
345
+ op: row.op,
346
+ row_json: parseJsonValue(row.row_json),
347
+ row_version: coerceNumber(row.row_version),
348
+ scopes: parseScopes(row.scopes),
349
+ }));
350
+ }
351
+ async readIncrementalPullRows(db, args) {
352
+ const partitionId = args.partitionId ?? 'default';
353
+ const limitCommits = Math.max(1, Math.min(500, args.limitCommits));
354
+ // Get commit_seqs for this table
355
+ const commitSeqsRes = await sql `
356
+ SELECT commit_seq
357
+ FROM sync_table_commits
358
+ WHERE partition_id = ${partitionId}
359
+ AND "table" = ${args.table}
360
+ AND commit_seq > ${args.cursor}
361
+ AND EXISTS (
362
+ SELECT 1
363
+ FROM sync_commits cm
364
+ WHERE cm.commit_seq = sync_table_commits.commit_seq
365
+ AND cm.partition_id = ${partitionId}
366
+ )
367
+ ORDER BY commit_seq ASC
368
+ LIMIT ${limitCommits}
369
+ `.execute(db);
370
+ const commitSeqs = commitSeqsRes.rows
371
+ .map((r) => coerceNumber(r.commit_seq))
372
+ .filter((n) => n !== null);
373
+ if (commitSeqs.length === 0)
374
+ return [];
375
+ const commitSeqsIn = sql.join(commitSeqs.map((seq) => sql `${seq}`), sql `, `);
376
+ // Get commits and changes for these commit_seqs
377
+ const changesRes = await sql `
378
+ SELECT
379
+ cm.commit_seq,
380
+ cm.actor_id,
381
+ cm.created_at,
382
+ c.change_id,
383
+ c."table",
384
+ c.row_id,
385
+ c.op,
386
+ c.row_json,
387
+ c.row_version,
388
+ c.scopes
389
+ FROM sync_commits cm
390
+ JOIN sync_changes c ON c.commit_seq = cm.commit_seq
391
+ WHERE cm.commit_seq IN (${commitSeqsIn})
392
+ AND cm.partition_id = ${partitionId}
393
+ AND c.partition_id = ${partitionId}
394
+ AND c."table" = ${args.table}
395
+ ORDER BY cm.commit_seq ASC, c.change_id ASC
396
+ `.execute(db);
397
+ // Filter by scopes and transform
398
+ return changesRes.rows
399
+ .filter((row) => {
400
+ const storedScopes = parseScopes(row.scopes);
401
+ return scopesMatch(storedScopes, args.scopes);
402
+ })
403
+ .map((row) => ({
404
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
405
+ actor_id: row.actor_id,
406
+ created_at: coerceIsoString(row.created_at),
407
+ change_id: coerceNumber(row.change_id) ?? 0,
408
+ table: row.table,
409
+ row_id: row.row_id,
410
+ op: row.op,
411
+ row_json: parseJsonValue(row.row_json),
412
+ row_version: coerceNumber(row.row_version),
413
+ scopes: parseScopes(row.scopes),
414
+ }));
415
+ }
416
+ /**
417
+ * Streaming version of incremental pull for large result sets.
418
+ * Yields changes one at a time instead of loading all into memory.
419
+ */
420
+ async *streamIncrementalPullRows(db, args) {
421
+ const partitionId = args.partitionId ?? 'default';
422
+ const limitCommits = Math.max(1, Math.min(500, args.limitCommits));
423
+ // Get commit_seqs for this table
424
+ const commitSeqsRes = await sql `
425
+ SELECT commit_seq
426
+ FROM sync_table_commits
427
+ WHERE partition_id = ${partitionId}
428
+ AND "table" = ${args.table}
429
+ AND commit_seq > ${args.cursor}
430
+ AND EXISTS (
431
+ SELECT 1
432
+ FROM sync_commits cm
433
+ WHERE cm.commit_seq = sync_table_commits.commit_seq
434
+ AND cm.partition_id = ${partitionId}
435
+ )
436
+ ORDER BY commit_seq ASC
437
+ LIMIT ${limitCommits}
438
+ `.execute(db);
439
+ const commitSeqs = commitSeqsRes.rows
440
+ .map((r) => coerceNumber(r.commit_seq))
441
+ .filter((n) => n !== null);
442
+ if (commitSeqs.length === 0)
443
+ return;
444
+ // Process in smaller batches to avoid memory issues
445
+ const batchSize = 100;
446
+ for (let i = 0; i < commitSeqs.length; i += batchSize) {
447
+ const batch = commitSeqs.slice(i, i + batchSize);
448
+ const commitSeqsIn = sql.join(batch.map((seq) => sql `${seq}`), sql `, `);
449
+ const changesRes = await sql `
450
+ SELECT
451
+ cm.commit_seq,
452
+ cm.actor_id,
453
+ cm.created_at,
454
+ c.change_id,
455
+ c."table",
456
+ c.row_id,
457
+ c.op,
458
+ c.row_json,
459
+ c.row_version,
460
+ c.scopes
461
+ FROM sync_commits cm
462
+ JOIN sync_changes c ON c.commit_seq = cm.commit_seq
463
+ WHERE cm.commit_seq IN (${commitSeqsIn})
464
+ AND cm.partition_id = ${partitionId}
465
+ AND c.partition_id = ${partitionId}
466
+ AND c."table" = ${args.table}
467
+ ORDER BY cm.commit_seq ASC, c.change_id ASC
468
+ `.execute(db);
469
+ // Filter and yield each row
470
+ for (const row of changesRes.rows) {
471
+ const storedScopes = parseScopes(row.scopes);
472
+ if (scopesMatch(storedScopes, args.scopes)) {
473
+ yield {
474
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
475
+ actor_id: row.actor_id,
476
+ created_at: coerceIsoString(row.created_at),
477
+ change_id: coerceNumber(row.change_id) ?? 0,
478
+ table: row.table,
479
+ row_id: row.row_id,
480
+ op: row.op,
481
+ row_json: parseJsonValue(row.row_json),
482
+ row_version: coerceNumber(row.row_version),
483
+ scopes: storedScopes,
484
+ };
485
+ }
486
+ }
487
+ }
488
+ }
489
+ async compactChanges(db, args) {
490
+ const cutoffIso = new Date(Date.now() - args.fullHistoryHours * 60 * 60 * 1000).toISOString();
491
+ // Find all old changes
492
+ const oldChanges = await sql `
493
+ SELECT c.change_id, c.partition_id, c.commit_seq, c."table", c.row_id, c.scopes
494
+ FROM sync_changes c
495
+ JOIN sync_commits cm ON cm.commit_seq = c.commit_seq
496
+ WHERE cm.created_at < ${cutoffIso}
497
+ `.execute(db);
498
+ // Group by (partition_id, table, row_id, scopes)
499
+ const groups = new Map();
500
+ for (const row of oldChanges.rows) {
501
+ const scopesStr = JSON.stringify(parseScopes(row.scopes));
502
+ const key = `${row.partition_id}|${row.table}|${row.row_id}|${scopesStr}`;
503
+ if (!groups.has(key)) {
504
+ groups.set(key, []);
505
+ }
506
+ groups.get(key).push({
507
+ change_id: coerceNumber(row.change_id) ?? 0,
508
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
509
+ });
510
+ }
511
+ // Find change_ids to delete (all but the one with highest commit_seq)
512
+ const toDelete = [];
513
+ for (const changes of groups.values()) {
514
+ if (changes.length <= 1)
515
+ continue;
516
+ changes.sort((a, b) => {
517
+ if (a.commit_seq !== b.commit_seq)
518
+ return b.commit_seq - a.commit_seq;
519
+ return b.change_id - a.change_id;
520
+ });
521
+ for (let i = 1; i < changes.length; i++) {
522
+ toDelete.push(changes[i].change_id);
523
+ }
524
+ }
525
+ if (toDelete.length === 0)
526
+ return 0;
527
+ // Delete in batches
528
+ const batchSize = 500;
529
+ let deleted = 0;
530
+ for (let i = 0; i < toDelete.length; i += batchSize) {
531
+ const batch = toDelete.slice(i, i + batchSize);
532
+ const batchIn = sql.join(batch.map((id) => sql `${id}`), sql `, `);
533
+ const res = await sql `
534
+ DELETE FROM sync_changes
535
+ WHERE change_id IN (${batchIn})
536
+ `.execute(db);
537
+ deleted += Number(res.numAffectedRows ?? 0);
538
+ }
539
+ // Remove routing index entries that no longer have any remaining changes
540
+ await sql `
541
+ DELETE FROM sync_table_commits
542
+ WHERE commit_seq IN (
543
+ SELECT commit_seq
544
+ FROM sync_commits
545
+ WHERE created_at < ${cutoffIso}
546
+ AND partition_id = sync_table_commits.partition_id
547
+ )
548
+ AND NOT EXISTS (
549
+ SELECT 1
550
+ FROM sync_changes c
551
+ WHERE c.commit_seq = sync_table_commits.commit_seq
552
+ AND c.partition_id = sync_table_commits.partition_id
553
+ AND c."table" = sync_table_commits."table"
554
+ )
555
+ `.execute(db);
556
+ return deleted;
557
+ }
558
+ // ===========================================================================
559
+ // Client Cursor Recording
560
+ // ===========================================================================
561
+ async recordClientCursor(db, args) {
562
+ const partitionId = args.partitionId ?? 'default';
563
+ const now = new Date().toISOString();
564
+ const scopesJson = JSON.stringify(args.effectiveScopes);
565
+ await sql `
566
+ INSERT INTO sync_client_cursors (partition_id, client_id, actor_id, cursor, effective_scopes, updated_at)
567
+ VALUES (${partitionId}, ${args.clientId}, ${args.actorId}, ${args.cursor}, ${scopesJson}, ${now})
568
+ ON CONFLICT(partition_id, client_id) DO UPDATE SET
569
+ actor_id = ${args.actorId},
570
+ cursor = ${args.cursor},
571
+ effective_scopes = ${scopesJson},
572
+ updated_at = ${now}
573
+ `.execute(db);
574
+ }
575
+ // ===========================================================================
576
+ // Scope Conversion Helpers
577
+ // ===========================================================================
578
+ scopesToDb(scopes) {
579
+ return JSON.stringify(scopes);
580
+ }
581
+ dbToScopes(value) {
582
+ return parseScopes(value);
583
+ }
584
+ dbToArray(value) {
585
+ return toStringArray(value);
586
+ }
587
+ arrayToDb(values) {
588
+ return JSON.stringify(values.filter((v) => v.length > 0));
589
+ }
590
+ async readAffectedTablesFromChanges(db, commitSeq, options) {
591
+ const partitionId = options?.partitionId ?? 'default';
592
+ const res = await sql `
593
+ SELECT DISTINCT "table"
594
+ FROM sync_changes
595
+ WHERE commit_seq = ${commitSeq}
596
+ AND partition_id = ${partitionId}
597
+ `.execute(db);
598
+ return res.rows
599
+ .map((r) => r.table)
600
+ .filter((t) => typeof t === 'string' && t.length > 0);
601
+ }
602
+ // ===========================================================================
603
+ // Console Schema (Request Events)
604
+ // ===========================================================================
605
+ async ensureConsoleSchema(db) {
606
+ const nowIso = sql `(strftime('%Y-%m-%dT%H:%M:%fZ','now'))`;
607
+ await db.schema
608
+ .createTable('sync_request_events')
609
+ .ifNotExists()
610
+ .addColumn('event_id', 'integer', (col) => col.primaryKey().autoIncrement())
611
+ .addColumn('event_type', 'text', (col) => col.notNull())
612
+ .addColumn('actor_id', 'text', (col) => col.notNull())
613
+ .addColumn('client_id', 'text', (col) => col.notNull())
614
+ .addColumn('status_code', 'integer', (col) => col.notNull())
615
+ .addColumn('outcome', 'text', (col) => col.notNull())
616
+ .addColumn('duration_ms', 'integer', (col) => col.notNull())
617
+ .addColumn('commit_seq', 'integer')
618
+ .addColumn('operation_count', 'integer')
619
+ .addColumn('row_count', 'integer')
620
+ .addColumn('tables', 'text', (col) => col.notNull().defaultTo('[]'))
621
+ .addColumn('error_message', 'text')
622
+ .addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
623
+ .execute();
624
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_request_events_created_at
625
+ ON sync_request_events(created_at DESC)`.execute(db);
626
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_request_events_event_type
627
+ ON sync_request_events(event_type)`.execute(db);
628
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_request_events_client_id
629
+ ON sync_request_events(client_id)`.execute(db);
630
+ // API Keys table
631
+ await db.schema
632
+ .createTable('sync_api_keys')
633
+ .ifNotExists()
634
+ .addColumn('key_id', 'text', (col) => col.primaryKey())
635
+ .addColumn('key_hash', 'text', (col) => col.notNull())
636
+ .addColumn('key_prefix', 'text', (col) => col.notNull())
637
+ .addColumn('name', 'text', (col) => col.notNull())
638
+ .addColumn('key_type', 'text', (col) => col.notNull())
639
+ .addColumn('scope_keys', 'text', (col) => col.defaultTo('[]'))
640
+ .addColumn('actor_id', 'text')
641
+ .addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
642
+ .addColumn('expires_at', 'text')
643
+ .addColumn('last_used_at', 'text')
644
+ .addColumn('revoked_at', 'text')
645
+ .execute();
646
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_api_keys_key_hash
647
+ ON sync_api_keys(key_hash)`.execute(db);
648
+ await sql `CREATE INDEX IF NOT EXISTS idx_sync_api_keys_key_type
649
+ ON sync_api_keys(key_type)`.execute(db);
650
+ }
651
+ }
652
+ export function createSqliteServerDialect(options) {
653
+ return new SqliteServerSyncDialect(options);
654
+ }
655
+ //# sourceMappingURL=index.js.map