@syncular/server-dialect-sqlite 0.0.1-100
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +51 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +501 -0
- package/dist/index.js.map +1 -0
- package/package.json +57 -0
- package/src/index.test.ts +149 -0
- package/src/index.ts +676 -0
package/src/index.ts
ADDED
|
@@ -0,0 +1,676 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @syncular/server-dialect-sqlite - SQLite Server Sync Dialect
|
|
3
|
+
*
|
|
4
|
+
* SQLite adaptation of the commit-log based sync system.
|
|
5
|
+
* Works with any SQLite-compatible Kysely dialect (bun:sqlite, wa-sqlite, better-sqlite3, etc.).
|
|
6
|
+
*
|
|
7
|
+
* Key differences from Postgres:
|
|
8
|
+
* - No bigserial → INTEGER PRIMARY KEY AUTOINCREMENT
|
|
9
|
+
* - No JSONB → JSON stored as TEXT (with json_extract for filtering)
|
|
10
|
+
* - No array && overlap → JSON object key matching
|
|
11
|
+
* - No timestamptz → TEXT with ISO format
|
|
12
|
+
* - No GIN index → regular index + manual filtering
|
|
13
|
+
* - REPEATABLE READ → no-op (SQLite uses serializable by default)
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import type { ScopeValues, StoredScopes, SyncOp } from '@syncular/core';
|
|
17
|
+
import type { DbExecutor } from '@syncular/server';
|
|
18
|
+
import {
|
|
19
|
+
BaseServerSyncDialect,
|
|
20
|
+
coerceIsoString,
|
|
21
|
+
coerceNumber,
|
|
22
|
+
type IncrementalPullRow,
|
|
23
|
+
type IncrementalPullRowsArgs,
|
|
24
|
+
parseScopes,
|
|
25
|
+
} from '@syncular/server';
|
|
26
|
+
import type { SyncChangeRow, SyncCoreDb } from '@syncular/server/schema';
|
|
27
|
+
import type { Kysely, RawBuilder, Transaction } from 'kysely';
|
|
28
|
+
import { sql } from 'kysely';
|
|
29
|
+
|
|
30
|
+
function isActiveTransaction<DB extends SyncCoreDb>(
|
|
31
|
+
db: Kysely<DB>
|
|
32
|
+
): db is Kysely<DB> & Transaction<DB> {
|
|
33
|
+
return (db as { isTransaction?: boolean }).isTransaction === true;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
function createSavepointName(): string {
|
|
37
|
+
const randomPart = Math.floor(Math.random() * 1_000_000_000).toString(36);
|
|
38
|
+
return `syncular_sp_${Date.now().toString(36)}_${randomPart}`;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
function parseJsonValue(value: unknown): unknown {
|
|
42
|
+
if (value === null || value === undefined) return null;
|
|
43
|
+
if (typeof value === 'object') return value;
|
|
44
|
+
if (typeof value === 'string') {
|
|
45
|
+
try {
|
|
46
|
+
return JSON.parse(value);
|
|
47
|
+
} catch {
|
|
48
|
+
return value;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
return value;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
function toStringArray(value: unknown): string[] {
|
|
55
|
+
if (Array.isArray(value)) {
|
|
56
|
+
return value.filter((k: unknown): k is string => typeof k === 'string');
|
|
57
|
+
}
|
|
58
|
+
if (typeof value === 'string') {
|
|
59
|
+
try {
|
|
60
|
+
const parsed = JSON.parse(value);
|
|
61
|
+
if (Array.isArray(parsed)) {
|
|
62
|
+
return parsed.filter(
|
|
63
|
+
(k: unknown): k is string => typeof k === 'string'
|
|
64
|
+
);
|
|
65
|
+
}
|
|
66
|
+
} catch {
|
|
67
|
+
// ignore
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
return [];
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Check if stored scopes match the requested scope values.
|
|
75
|
+
* Uses OR semantics for arrays and treats missing keys as wildcards.
|
|
76
|
+
*/
|
|
77
|
+
function scopesMatch(stored: StoredScopes, requested: ScopeValues): boolean {
|
|
78
|
+
for (const [key, value] of Object.entries(requested)) {
|
|
79
|
+
const storedValue = stored[key];
|
|
80
|
+
if (storedValue === undefined) return false;
|
|
81
|
+
if (Array.isArray(value)) {
|
|
82
|
+
if (!value.includes(storedValue)) return false;
|
|
83
|
+
} else {
|
|
84
|
+
if (storedValue !== value) return false;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
return true;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
async function ensurePartitionColumn<DB extends SyncCoreDb>(
|
|
91
|
+
db: Kysely<DB>,
|
|
92
|
+
table: string
|
|
93
|
+
): Promise<void> {
|
|
94
|
+
try {
|
|
95
|
+
await sql
|
|
96
|
+
.raw(
|
|
97
|
+
`ALTER TABLE ${table} ADD COLUMN partition_id TEXT NOT NULL DEFAULT 'default'`
|
|
98
|
+
)
|
|
99
|
+
.execute(db);
|
|
100
|
+
} catch {
|
|
101
|
+
// Ignore when column already exists (or table is immutable in the current backend).
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
async function ensureTransportPathColumn<DB extends SyncCoreDb>(
|
|
106
|
+
db: Kysely<DB>
|
|
107
|
+
): Promise<void> {
|
|
108
|
+
try {
|
|
109
|
+
await sql
|
|
110
|
+
.raw(
|
|
111
|
+
"ALTER TABLE sync_request_events ADD COLUMN transport_path TEXT NOT NULL DEFAULT 'direct'"
|
|
112
|
+
)
|
|
113
|
+
.execute(db);
|
|
114
|
+
} catch {
|
|
115
|
+
// Ignore when column already exists (or table is immutable in the current backend).
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
export class SqliteServerSyncDialect extends BaseServerSyncDialect {
|
|
120
|
+
readonly name = 'sqlite' as const;
|
|
121
|
+
readonly supportsForUpdate = false;
|
|
122
|
+
readonly supportsSavepoints: boolean;
|
|
123
|
+
private readonly _supportsTransactions: boolean;
|
|
124
|
+
|
|
125
|
+
constructor(options?: { supportsTransactions?: boolean }) {
|
|
126
|
+
super();
|
|
127
|
+
this._supportsTransactions = options?.supportsTransactions ?? true;
|
|
128
|
+
this.supportsSavepoints = this._supportsTransactions;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// ===========================================================================
|
|
132
|
+
// SQL Fragment Hooks
|
|
133
|
+
// ===========================================================================
|
|
134
|
+
|
|
135
|
+
protected buildNumberListFilter(values: number[]): RawBuilder<unknown> {
|
|
136
|
+
const list = sql.join(
|
|
137
|
+
values.map((v) => sql`${v}`),
|
|
138
|
+
sql`, `
|
|
139
|
+
);
|
|
140
|
+
return sql`IN (${list})`;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
protected buildStringListFilter(values: string[]): RawBuilder<unknown> {
|
|
144
|
+
const list = sql.join(
|
|
145
|
+
values.map((v) => sql`${v}`),
|
|
146
|
+
sql`, `
|
|
147
|
+
);
|
|
148
|
+
return sql`IN (${list})`;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// ===========================================================================
|
|
152
|
+
// Schema Setup
|
|
153
|
+
// ===========================================================================
|
|
154
|
+
|
|
155
|
+
async ensureSyncSchema<DB extends SyncCoreDb>(db: Kysely<DB>): Promise<void> {
|
|
156
|
+
await sql`PRAGMA foreign_keys = ON`.execute(db);
|
|
157
|
+
|
|
158
|
+
const nowIso = sql`(strftime('%Y-%m-%dT%H:%M:%fZ','now'))`;
|
|
159
|
+
|
|
160
|
+
// sync_commits table
|
|
161
|
+
await db.schema
|
|
162
|
+
.createTable('sync_commits')
|
|
163
|
+
.ifNotExists()
|
|
164
|
+
.addColumn('commit_seq', 'integer', (col) =>
|
|
165
|
+
col.primaryKey().autoIncrement()
|
|
166
|
+
)
|
|
167
|
+
.addColumn('partition_id', 'text', (col) =>
|
|
168
|
+
col.notNull().defaultTo('default')
|
|
169
|
+
)
|
|
170
|
+
.addColumn('actor_id', 'text', (col) => col.notNull())
|
|
171
|
+
.addColumn('client_id', 'text', (col) => col.notNull())
|
|
172
|
+
.addColumn('client_commit_id', 'text', (col) => col.notNull())
|
|
173
|
+
.addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
|
|
174
|
+
.addColumn('meta', 'json')
|
|
175
|
+
.addColumn('result_json', 'json')
|
|
176
|
+
.addColumn('change_count', 'integer', (col) => col.notNull().defaultTo(0))
|
|
177
|
+
.addColumn('affected_tables', 'text', (col) =>
|
|
178
|
+
col.notNull().defaultTo('[]')
|
|
179
|
+
)
|
|
180
|
+
.execute();
|
|
181
|
+
await ensurePartitionColumn(db, 'sync_commits');
|
|
182
|
+
|
|
183
|
+
await sql`DROP INDEX IF EXISTS idx_sync_commits_client_commit`.execute(db);
|
|
184
|
+
await sql`CREATE UNIQUE INDEX IF NOT EXISTS idx_sync_commits_client_commit
|
|
185
|
+
ON sync_commits(partition_id, client_id, client_commit_id)`.execute(db);
|
|
186
|
+
|
|
187
|
+
// sync_table_commits table (index of which commits affect which tables)
|
|
188
|
+
await db.schema
|
|
189
|
+
.createTable('sync_table_commits')
|
|
190
|
+
.ifNotExists()
|
|
191
|
+
.addColumn('partition_id', 'text', (col) =>
|
|
192
|
+
col.notNull().defaultTo('default')
|
|
193
|
+
)
|
|
194
|
+
.addColumn('table', 'text', (col) => col.notNull())
|
|
195
|
+
.addColumn('commit_seq', 'integer', (col) =>
|
|
196
|
+
col.notNull().references('sync_commits.commit_seq').onDelete('cascade')
|
|
197
|
+
)
|
|
198
|
+
.addPrimaryKeyConstraint('sync_table_commits_pk', [
|
|
199
|
+
'partition_id',
|
|
200
|
+
'table',
|
|
201
|
+
'commit_seq',
|
|
202
|
+
])
|
|
203
|
+
.execute();
|
|
204
|
+
await ensurePartitionColumn(db, 'sync_table_commits');
|
|
205
|
+
|
|
206
|
+
// Ensure unique index matches ON CONFLICT clause in push.ts
|
|
207
|
+
// (needed when migrating from old schema where PK was only (table, commit_seq))
|
|
208
|
+
await sql`CREATE UNIQUE INDEX IF NOT EXISTS idx_sync_table_commits_partition_pk
|
|
209
|
+
ON sync_table_commits(partition_id, "table", commit_seq)`.execute(db);
|
|
210
|
+
|
|
211
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_table_commits_commit_seq
|
|
212
|
+
ON sync_table_commits(partition_id, commit_seq)`.execute(db);
|
|
213
|
+
|
|
214
|
+
// sync_changes table - uses JSON for scopes
|
|
215
|
+
await db.schema
|
|
216
|
+
.createTable('sync_changes')
|
|
217
|
+
.ifNotExists()
|
|
218
|
+
.addColumn('change_id', 'integer', (col) =>
|
|
219
|
+
col.primaryKey().autoIncrement()
|
|
220
|
+
)
|
|
221
|
+
.addColumn('partition_id', 'text', (col) =>
|
|
222
|
+
col.notNull().defaultTo('default')
|
|
223
|
+
)
|
|
224
|
+
.addColumn('commit_seq', 'integer', (col) =>
|
|
225
|
+
col.notNull().references('sync_commits.commit_seq').onDelete('cascade')
|
|
226
|
+
)
|
|
227
|
+
.addColumn('table', 'text', (col) => col.notNull())
|
|
228
|
+
.addColumn('row_id', 'text', (col) => col.notNull())
|
|
229
|
+
.addColumn('op', 'text', (col) => col.notNull())
|
|
230
|
+
.addColumn('row_json', 'json')
|
|
231
|
+
.addColumn('row_version', 'integer')
|
|
232
|
+
.addColumn('scopes', 'json', (col) => col.notNull())
|
|
233
|
+
.execute();
|
|
234
|
+
await ensurePartitionColumn(db, 'sync_changes');
|
|
235
|
+
|
|
236
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_changes_commit_seq
|
|
237
|
+
ON sync_changes(partition_id, commit_seq)`.execute(db);
|
|
238
|
+
|
|
239
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_changes_table
|
|
240
|
+
ON sync_changes(partition_id, "table")`.execute(db);
|
|
241
|
+
|
|
242
|
+
// sync_client_cursors table
|
|
243
|
+
await db.schema
|
|
244
|
+
.createTable('sync_client_cursors')
|
|
245
|
+
.ifNotExists()
|
|
246
|
+
.addColumn('partition_id', 'text', (col) =>
|
|
247
|
+
col.notNull().defaultTo('default')
|
|
248
|
+
)
|
|
249
|
+
.addColumn('client_id', 'text', (col) => col.notNull())
|
|
250
|
+
.addColumn('actor_id', 'text', (col) => col.notNull())
|
|
251
|
+
.addColumn('cursor', 'integer', (col) => col.notNull().defaultTo(0))
|
|
252
|
+
.addColumn('effective_scopes', 'json', (col) =>
|
|
253
|
+
col.notNull().defaultTo('{}')
|
|
254
|
+
)
|
|
255
|
+
.addColumn('updated_at', 'text', (col) => col.notNull().defaultTo(nowIso))
|
|
256
|
+
.addPrimaryKeyConstraint('sync_client_cursors_pk', [
|
|
257
|
+
'partition_id',
|
|
258
|
+
'client_id',
|
|
259
|
+
])
|
|
260
|
+
.execute();
|
|
261
|
+
await ensurePartitionColumn(db, 'sync_client_cursors');
|
|
262
|
+
|
|
263
|
+
// Ensure unique index matches ON CONFLICT clause in recordClientCursor
|
|
264
|
+
// (needed when migrating from old schema where PK was only (client_id))
|
|
265
|
+
await sql`CREATE UNIQUE INDEX IF NOT EXISTS idx_sync_client_cursors_partition_pk
|
|
266
|
+
ON sync_client_cursors(partition_id, client_id)`.execute(db);
|
|
267
|
+
|
|
268
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_client_cursors_updated_at
|
|
269
|
+
ON sync_client_cursors(updated_at)`.execute(db);
|
|
270
|
+
|
|
271
|
+
// sync_snapshot_chunks table
|
|
272
|
+
await db.schema
|
|
273
|
+
.createTable('sync_snapshot_chunks')
|
|
274
|
+
.ifNotExists()
|
|
275
|
+
.addColumn('chunk_id', 'text', (col) => col.primaryKey())
|
|
276
|
+
.addColumn('partition_id', 'text', (col) =>
|
|
277
|
+
col.notNull().defaultTo('default')
|
|
278
|
+
)
|
|
279
|
+
.addColumn('scope_key', 'text', (col) => col.notNull())
|
|
280
|
+
.addColumn('scope', 'text', (col) => col.notNull())
|
|
281
|
+
.addColumn('as_of_commit_seq', 'integer', (col) => col.notNull())
|
|
282
|
+
.addColumn('row_cursor', 'text', (col) => col.notNull().defaultTo(''))
|
|
283
|
+
.addColumn('row_limit', 'integer', (col) => col.notNull())
|
|
284
|
+
.addColumn('encoding', 'text', (col) => col.notNull())
|
|
285
|
+
.addColumn('compression', 'text', (col) => col.notNull())
|
|
286
|
+
.addColumn('sha256', 'text', (col) => col.notNull())
|
|
287
|
+
.addColumn('byte_length', 'integer', (col) => col.notNull())
|
|
288
|
+
.addColumn('blob_hash', 'text', (col) => col.notNull().defaultTo(''))
|
|
289
|
+
.addColumn('body', 'blob') // Deprecated: use blob storage
|
|
290
|
+
.addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
|
|
291
|
+
.addColumn('expires_at', 'text', (col) => col.notNull())
|
|
292
|
+
.execute();
|
|
293
|
+
await ensurePartitionColumn(db, 'sync_snapshot_chunks');
|
|
294
|
+
|
|
295
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_snapshot_chunks_expires_at
|
|
296
|
+
ON sync_snapshot_chunks(expires_at)`.execute(db);
|
|
297
|
+
|
|
298
|
+
await sql`CREATE UNIQUE INDEX IF NOT EXISTS idx_sync_snapshot_chunks_page_key
|
|
299
|
+
ON sync_snapshot_chunks(partition_id, scope_key, scope, as_of_commit_seq, row_cursor, row_limit, encoding, compression)`.execute(
|
|
300
|
+
db
|
|
301
|
+
);
|
|
302
|
+
|
|
303
|
+
// Cleanup orphaned rows
|
|
304
|
+
await sql`
|
|
305
|
+
DELETE FROM sync_table_commits
|
|
306
|
+
WHERE commit_seq NOT IN (SELECT commit_seq FROM sync_commits)
|
|
307
|
+
`.execute(db);
|
|
308
|
+
await sql`
|
|
309
|
+
DELETE FROM sync_changes
|
|
310
|
+
WHERE commit_seq NOT IN (SELECT commit_seq FROM sync_commits)
|
|
311
|
+
`.execute(db);
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
// ===========================================================================
|
|
315
|
+
// Transaction Control
|
|
316
|
+
// ===========================================================================
|
|
317
|
+
|
|
318
|
+
async executeInTransaction<DB extends SyncCoreDb, T>(
|
|
319
|
+
db: Kysely<DB>,
|
|
320
|
+
fn: (executor: DbExecutor<DB>) => Promise<T>
|
|
321
|
+
): Promise<T> {
|
|
322
|
+
if (isActiveTransaction(db)) {
|
|
323
|
+
if (!this._supportsTransactions) {
|
|
324
|
+
return fn(db);
|
|
325
|
+
}
|
|
326
|
+
const savepoint = createSavepointName();
|
|
327
|
+
await sql.raw(`SAVEPOINT ${savepoint}`).execute(db);
|
|
328
|
+
try {
|
|
329
|
+
const result = await fn(db);
|
|
330
|
+
await sql.raw(`RELEASE SAVEPOINT ${savepoint}`).execute(db);
|
|
331
|
+
return result;
|
|
332
|
+
} catch (error) {
|
|
333
|
+
await sql.raw(`ROLLBACK TO SAVEPOINT ${savepoint}`).execute(db);
|
|
334
|
+
await sql.raw(`RELEASE SAVEPOINT ${savepoint}`).execute(db);
|
|
335
|
+
throw error;
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
if (this._supportsTransactions) {
|
|
339
|
+
return db.transaction().execute(fn);
|
|
340
|
+
}
|
|
341
|
+
return fn(db);
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
async setRepeatableRead<DB extends SyncCoreDb>(
|
|
345
|
+
_trx: DbExecutor<DB>
|
|
346
|
+
): Promise<void> {
|
|
347
|
+
// SQLite uses serializable isolation by default in WAL mode.
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// ===========================================================================
|
|
351
|
+
// Commit/Change Log Queries (dialect-specific)
|
|
352
|
+
// ===========================================================================
|
|
353
|
+
|
|
354
|
+
async readChangesForCommits<DB extends SyncCoreDb>(
|
|
355
|
+
db: DbExecutor<DB>,
|
|
356
|
+
args: {
|
|
357
|
+
commitSeqs: number[];
|
|
358
|
+
table: string;
|
|
359
|
+
scopes: ScopeValues;
|
|
360
|
+
partitionId?: string;
|
|
361
|
+
}
|
|
362
|
+
): Promise<SyncChangeRow[]> {
|
|
363
|
+
const partitionId = args.partitionId ?? 'default';
|
|
364
|
+
if (args.commitSeqs.length === 0) return [];
|
|
365
|
+
|
|
366
|
+
const commitSeqsIn = sql.join(
|
|
367
|
+
args.commitSeqs.map((seq) => sql`${seq}`),
|
|
368
|
+
sql`, `
|
|
369
|
+
);
|
|
370
|
+
|
|
371
|
+
// Fetch all changes for the table and commit sequences
|
|
372
|
+
const res = await sql<{
|
|
373
|
+
commit_seq: unknown;
|
|
374
|
+
table: string;
|
|
375
|
+
row_id: string;
|
|
376
|
+
op: string;
|
|
377
|
+
row_json: unknown | null;
|
|
378
|
+
row_version: unknown | null;
|
|
379
|
+
scopes: unknown;
|
|
380
|
+
}>`
|
|
381
|
+
SELECT commit_seq, "table", row_id, op, row_json, row_version, scopes
|
|
382
|
+
FROM sync_changes
|
|
383
|
+
WHERE commit_seq IN (${commitSeqsIn})
|
|
384
|
+
AND partition_id = ${partitionId}
|
|
385
|
+
AND "table" = ${args.table}
|
|
386
|
+
ORDER BY commit_seq ASC, change_id ASC
|
|
387
|
+
`.execute(db);
|
|
388
|
+
|
|
389
|
+
// Filter by scopes (manual, since SQLite JSON operators are limited)
|
|
390
|
+
return res.rows
|
|
391
|
+
.filter((row) => {
|
|
392
|
+
const storedScopes = parseScopes(row.scopes);
|
|
393
|
+
return scopesMatch(storedScopes, args.scopes);
|
|
394
|
+
})
|
|
395
|
+
.map((row) => ({
|
|
396
|
+
commit_seq: coerceNumber(row.commit_seq) ?? 0,
|
|
397
|
+
table: row.table,
|
|
398
|
+
row_id: row.row_id,
|
|
399
|
+
op: row.op as SyncOp,
|
|
400
|
+
row_json: parseJsonValue(row.row_json),
|
|
401
|
+
row_version: coerceNumber(row.row_version),
|
|
402
|
+
scopes: parseScopes(row.scopes),
|
|
403
|
+
}));
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
protected override async readIncrementalPullRowsBatch<DB extends SyncCoreDb>(
|
|
407
|
+
db: DbExecutor<DB>,
|
|
408
|
+
args: Omit<IncrementalPullRowsArgs, 'batchSize'>
|
|
409
|
+
): Promise<IncrementalPullRow[]> {
|
|
410
|
+
const partitionId = args.partitionId ?? 'default';
|
|
411
|
+
const limitCommits = Math.max(1, Math.min(500, args.limitCommits));
|
|
412
|
+
|
|
413
|
+
// Get commit_seqs for this table
|
|
414
|
+
const commitSeqsRes = await sql<{ commit_seq: unknown }>`
|
|
415
|
+
SELECT commit_seq
|
|
416
|
+
FROM sync_table_commits
|
|
417
|
+
WHERE partition_id = ${partitionId}
|
|
418
|
+
AND "table" = ${args.table}
|
|
419
|
+
AND commit_seq > ${args.cursor}
|
|
420
|
+
AND EXISTS (
|
|
421
|
+
SELECT 1
|
|
422
|
+
FROM sync_commits cm
|
|
423
|
+
WHERE cm.commit_seq = sync_table_commits.commit_seq
|
|
424
|
+
AND cm.partition_id = ${partitionId}
|
|
425
|
+
)
|
|
426
|
+
ORDER BY commit_seq ASC
|
|
427
|
+
LIMIT ${limitCommits}
|
|
428
|
+
`.execute(db);
|
|
429
|
+
|
|
430
|
+
const commitSeqs = commitSeqsRes.rows
|
|
431
|
+
.map((r) => coerceNumber(r.commit_seq))
|
|
432
|
+
.filter((n): n is number => n !== null);
|
|
433
|
+
|
|
434
|
+
if (commitSeqs.length === 0) return [];
|
|
435
|
+
|
|
436
|
+
const commitSeqsIn = sql.join(
|
|
437
|
+
commitSeqs.map((seq) => sql`${seq}`),
|
|
438
|
+
sql`, `
|
|
439
|
+
);
|
|
440
|
+
|
|
441
|
+
// Get commits and changes for these commit_seqs
|
|
442
|
+
const changesRes = await sql<{
|
|
443
|
+
commit_seq: unknown;
|
|
444
|
+
actor_id: string;
|
|
445
|
+
created_at: unknown;
|
|
446
|
+
change_id: unknown;
|
|
447
|
+
table: string;
|
|
448
|
+
row_id: string;
|
|
449
|
+
op: string;
|
|
450
|
+
row_json: unknown | null;
|
|
451
|
+
row_version: unknown | null;
|
|
452
|
+
scopes: unknown;
|
|
453
|
+
}>`
|
|
454
|
+
SELECT
|
|
455
|
+
cm.commit_seq,
|
|
456
|
+
cm.actor_id,
|
|
457
|
+
cm.created_at,
|
|
458
|
+
c.change_id,
|
|
459
|
+
c."table",
|
|
460
|
+
c.row_id,
|
|
461
|
+
c.op,
|
|
462
|
+
c.row_json,
|
|
463
|
+
c.row_version,
|
|
464
|
+
c.scopes
|
|
465
|
+
FROM sync_commits cm
|
|
466
|
+
JOIN sync_changes c ON c.commit_seq = cm.commit_seq
|
|
467
|
+
WHERE cm.commit_seq IN (${commitSeqsIn})
|
|
468
|
+
AND cm.partition_id = ${partitionId}
|
|
469
|
+
AND c.partition_id = ${partitionId}
|
|
470
|
+
AND c."table" = ${args.table}
|
|
471
|
+
ORDER BY cm.commit_seq ASC, c.change_id ASC
|
|
472
|
+
`.execute(db);
|
|
473
|
+
|
|
474
|
+
// Filter by scopes and transform
|
|
475
|
+
return changesRes.rows
|
|
476
|
+
.filter((row) => {
|
|
477
|
+
const storedScopes = parseScopes(row.scopes);
|
|
478
|
+
return scopesMatch(storedScopes, args.scopes);
|
|
479
|
+
})
|
|
480
|
+
.map((row) => ({
|
|
481
|
+
commit_seq: coerceNumber(row.commit_seq) ?? 0,
|
|
482
|
+
actor_id: row.actor_id,
|
|
483
|
+
created_at: coerceIsoString(row.created_at),
|
|
484
|
+
change_id: coerceNumber(row.change_id) ?? 0,
|
|
485
|
+
table: row.table,
|
|
486
|
+
row_id: row.row_id,
|
|
487
|
+
op: row.op as SyncOp,
|
|
488
|
+
row_json: parseJsonValue(row.row_json),
|
|
489
|
+
row_version: coerceNumber(row.row_version),
|
|
490
|
+
scopes: parseScopes(row.scopes),
|
|
491
|
+
}));
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
async compactChanges<DB extends SyncCoreDb>(
|
|
495
|
+
db: DbExecutor<DB>,
|
|
496
|
+
args: { fullHistoryHours: number }
|
|
497
|
+
): Promise<number> {
|
|
498
|
+
const cutoffIso = new Date(
|
|
499
|
+
Date.now() - args.fullHistoryHours * 60 * 60 * 1000
|
|
500
|
+
).toISOString();
|
|
501
|
+
|
|
502
|
+
// Find all old changes
|
|
503
|
+
const oldChanges = await sql<{
|
|
504
|
+
change_id: unknown;
|
|
505
|
+
partition_id: string;
|
|
506
|
+
commit_seq: unknown;
|
|
507
|
+
table: string;
|
|
508
|
+
row_id: string;
|
|
509
|
+
scopes: unknown;
|
|
510
|
+
}>`
|
|
511
|
+
SELECT c.change_id, c.partition_id, c.commit_seq, c."table", c.row_id, c.scopes
|
|
512
|
+
FROM sync_changes c
|
|
513
|
+
JOIN sync_commits cm ON cm.commit_seq = c.commit_seq
|
|
514
|
+
WHERE cm.created_at < ${cutoffIso}
|
|
515
|
+
`.execute(db);
|
|
516
|
+
|
|
517
|
+
// Group by (partition_id, table, row_id, scopes)
|
|
518
|
+
const groups = new Map<
|
|
519
|
+
string,
|
|
520
|
+
Array<{ change_id: number; commit_seq: number }>
|
|
521
|
+
>();
|
|
522
|
+
|
|
523
|
+
for (const row of oldChanges.rows) {
|
|
524
|
+
const scopesStr = JSON.stringify(parseScopes(row.scopes));
|
|
525
|
+
const key = `${row.partition_id}|${row.table}|${row.row_id}|${scopesStr}`;
|
|
526
|
+
if (!groups.has(key)) {
|
|
527
|
+
groups.set(key, []);
|
|
528
|
+
}
|
|
529
|
+
groups.get(key)!.push({
|
|
530
|
+
change_id: coerceNumber(row.change_id) ?? 0,
|
|
531
|
+
commit_seq: coerceNumber(row.commit_seq) ?? 0,
|
|
532
|
+
});
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
// Find change_ids to delete (all but the one with highest commit_seq)
|
|
536
|
+
const toDelete: number[] = [];
|
|
537
|
+
for (const changes of groups.values()) {
|
|
538
|
+
if (changes.length <= 1) continue;
|
|
539
|
+
|
|
540
|
+
changes.sort((a, b) => {
|
|
541
|
+
if (a.commit_seq !== b.commit_seq) return b.commit_seq - a.commit_seq;
|
|
542
|
+
return b.change_id - a.change_id;
|
|
543
|
+
});
|
|
544
|
+
|
|
545
|
+
for (let i = 1; i < changes.length; i++) {
|
|
546
|
+
toDelete.push(changes[i]!.change_id);
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
if (toDelete.length === 0) return 0;
|
|
551
|
+
|
|
552
|
+
// Delete in batches
|
|
553
|
+
const deleteBatchSize = 500;
|
|
554
|
+
let deleted = 0;
|
|
555
|
+
|
|
556
|
+
for (let i = 0; i < toDelete.length; i += deleteBatchSize) {
|
|
557
|
+
const batch = toDelete.slice(i, i + deleteBatchSize);
|
|
558
|
+
const batchIn = sql.join(
|
|
559
|
+
batch.map((id) => sql`${id}`),
|
|
560
|
+
sql`, `
|
|
561
|
+
);
|
|
562
|
+
|
|
563
|
+
const res = await sql`
|
|
564
|
+
DELETE FROM sync_changes
|
|
565
|
+
WHERE change_id IN (${batchIn})
|
|
566
|
+
`.execute(db);
|
|
567
|
+
|
|
568
|
+
deleted += Number(res.numAffectedRows ?? 0);
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
// Remove routing index entries that no longer have any remaining changes
|
|
572
|
+
await sql`
|
|
573
|
+
DELETE FROM sync_table_commits
|
|
574
|
+
WHERE commit_seq IN (
|
|
575
|
+
SELECT commit_seq
|
|
576
|
+
FROM sync_commits
|
|
577
|
+
WHERE created_at < ${cutoffIso}
|
|
578
|
+
AND partition_id = sync_table_commits.partition_id
|
|
579
|
+
)
|
|
580
|
+
AND NOT EXISTS (
|
|
581
|
+
SELECT 1
|
|
582
|
+
FROM sync_changes c
|
|
583
|
+
WHERE c.commit_seq = sync_table_commits.commit_seq
|
|
584
|
+
AND c.partition_id = sync_table_commits.partition_id
|
|
585
|
+
AND c."table" = sync_table_commits."table"
|
|
586
|
+
)
|
|
587
|
+
`.execute(db);
|
|
588
|
+
|
|
589
|
+
return deleted;
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
// ===========================================================================
|
|
593
|
+
// Scope Conversion Helpers
|
|
594
|
+
// ===========================================================================
|
|
595
|
+
|
|
596
|
+
scopesToDb(scopes: StoredScopes): string {
|
|
597
|
+
return JSON.stringify(scopes);
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
dbToArray(value: unknown): string[] {
|
|
601
|
+
return toStringArray(value);
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
arrayToDb(values: string[]): string {
|
|
605
|
+
return JSON.stringify(values.filter((v) => v.length > 0));
|
|
606
|
+
}
|
|
607
|
+
|
|
608
|
+
// ===========================================================================
|
|
609
|
+
// Console Schema (Request Events)
|
|
610
|
+
// ===========================================================================
|
|
611
|
+
|
|
612
|
+
async ensureConsoleSchema<DB extends SyncCoreDb>(
|
|
613
|
+
db: Kysely<DB>
|
|
614
|
+
): Promise<void> {
|
|
615
|
+
const nowIso = sql`(strftime('%Y-%m-%dT%H:%M:%fZ','now'))`;
|
|
616
|
+
|
|
617
|
+
await db.schema
|
|
618
|
+
.createTable('sync_request_events')
|
|
619
|
+
.ifNotExists()
|
|
620
|
+
.addColumn('event_id', 'integer', (col) =>
|
|
621
|
+
col.primaryKey().autoIncrement()
|
|
622
|
+
)
|
|
623
|
+
.addColumn('event_type', 'text', (col) => col.notNull())
|
|
624
|
+
.addColumn('actor_id', 'text', (col) => col.notNull())
|
|
625
|
+
.addColumn('client_id', 'text', (col) => col.notNull())
|
|
626
|
+
.addColumn('status_code', 'integer', (col) => col.notNull())
|
|
627
|
+
.addColumn('outcome', 'text', (col) => col.notNull())
|
|
628
|
+
.addColumn('duration_ms', 'integer', (col) => col.notNull())
|
|
629
|
+
.addColumn('commit_seq', 'integer')
|
|
630
|
+
.addColumn('operation_count', 'integer')
|
|
631
|
+
.addColumn('row_count', 'integer')
|
|
632
|
+
.addColumn('tables', 'text', (col) => col.notNull().defaultTo('[]'))
|
|
633
|
+
.addColumn('error_message', 'text')
|
|
634
|
+
.addColumn('transport_path', 'text', (col) =>
|
|
635
|
+
col.notNull().defaultTo('direct')
|
|
636
|
+
)
|
|
637
|
+
.addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
|
|
638
|
+
.execute();
|
|
639
|
+
await ensureTransportPathColumn(db);
|
|
640
|
+
|
|
641
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_request_events_created_at
|
|
642
|
+
ON sync_request_events(created_at DESC)`.execute(db);
|
|
643
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_request_events_event_type
|
|
644
|
+
ON sync_request_events(event_type)`.execute(db);
|
|
645
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_request_events_client_id
|
|
646
|
+
ON sync_request_events(client_id)`.execute(db);
|
|
647
|
+
|
|
648
|
+
// API Keys table
|
|
649
|
+
await db.schema
|
|
650
|
+
.createTable('sync_api_keys')
|
|
651
|
+
.ifNotExists()
|
|
652
|
+
.addColumn('key_id', 'text', (col) => col.primaryKey())
|
|
653
|
+
.addColumn('key_hash', 'text', (col) => col.notNull())
|
|
654
|
+
.addColumn('key_prefix', 'text', (col) => col.notNull())
|
|
655
|
+
.addColumn('name', 'text', (col) => col.notNull())
|
|
656
|
+
.addColumn('key_type', 'text', (col) => col.notNull())
|
|
657
|
+
.addColumn('scope_keys', 'text', (col) => col.defaultTo('[]'))
|
|
658
|
+
.addColumn('actor_id', 'text')
|
|
659
|
+
.addColumn('created_at', 'text', (col) => col.notNull().defaultTo(nowIso))
|
|
660
|
+
.addColumn('expires_at', 'text')
|
|
661
|
+
.addColumn('last_used_at', 'text')
|
|
662
|
+
.addColumn('revoked_at', 'text')
|
|
663
|
+
.execute();
|
|
664
|
+
|
|
665
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_api_keys_key_hash
|
|
666
|
+
ON sync_api_keys(key_hash)`.execute(db);
|
|
667
|
+
await sql`CREATE INDEX IF NOT EXISTS idx_sync_api_keys_key_type
|
|
668
|
+
ON sync_api_keys(key_type)`.execute(db);
|
|
669
|
+
}
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
export function createSqliteServerDialect(options?: {
|
|
673
|
+
supportsTransactions?: boolean;
|
|
674
|
+
}): SqliteServerSyncDialect {
|
|
675
|
+
return new SqliteServerSyncDialect(options);
|
|
676
|
+
}
|