lakesync 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/README.md +74 -0
  2. package/dist/adapter.d.ts +369 -0
  3. package/dist/adapter.js +39 -0
  4. package/dist/adapter.js.map +1 -0
  5. package/dist/analyst.d.ts +268 -0
  6. package/dist/analyst.js +495 -0
  7. package/dist/analyst.js.map +1 -0
  8. package/dist/auth-CAVutXzx.d.ts +30 -0
  9. package/dist/base-poller-Qo_SmCZs.d.ts +82 -0
  10. package/dist/catalogue.d.ts +65 -0
  11. package/dist/catalogue.js +17 -0
  12. package/dist/catalogue.js.map +1 -0
  13. package/dist/chunk-4ARO6KTJ.js +257 -0
  14. package/dist/chunk-4ARO6KTJ.js.map +1 -0
  15. package/dist/chunk-5YOFCJQ7.js +1115 -0
  16. package/dist/chunk-5YOFCJQ7.js.map +1 -0
  17. package/dist/chunk-7D4SUZUM.js +38 -0
  18. package/dist/chunk-7D4SUZUM.js.map +1 -0
  19. package/dist/chunk-BNJOGBYK.js +335 -0
  20. package/dist/chunk-BNJOGBYK.js.map +1 -0
  21. package/dist/chunk-ICNT7I3K.js +1180 -0
  22. package/dist/chunk-ICNT7I3K.js.map +1 -0
  23. package/dist/chunk-P5DRFKIT.js +413 -0
  24. package/dist/chunk-P5DRFKIT.js.map +1 -0
  25. package/dist/chunk-X3RO5SYJ.js +880 -0
  26. package/dist/chunk-X3RO5SYJ.js.map +1 -0
  27. package/dist/client.d.ts +428 -0
  28. package/dist/client.js +2048 -0
  29. package/dist/client.js.map +1 -0
  30. package/dist/compactor.d.ts +342 -0
  31. package/dist/compactor.js +793 -0
  32. package/dist/compactor.js.map +1 -0
  33. package/dist/coordinator-CxckTzYW.d.ts +396 -0
  34. package/dist/db-types-BR6Kt4uf.d.ts +29 -0
  35. package/dist/gateway-D5SaaMvT.d.ts +337 -0
  36. package/dist/gateway-server.d.ts +306 -0
  37. package/dist/gateway-server.js +4663 -0
  38. package/dist/gateway-server.js.map +1 -0
  39. package/dist/gateway.d.ts +196 -0
  40. package/dist/gateway.js +79 -0
  41. package/dist/gateway.js.map +1 -0
  42. package/dist/hlc-DiD8QNG3.d.ts +70 -0
  43. package/dist/index.d.ts +245 -0
  44. package/dist/index.js +102 -0
  45. package/dist/index.js.map +1 -0
  46. package/dist/json-dYtqiL0F.d.ts +18 -0
  47. package/dist/nessie-client-DrNikVXy.d.ts +160 -0
  48. package/dist/parquet.d.ts +78 -0
  49. package/dist/parquet.js +15 -0
  50. package/dist/parquet.js.map +1 -0
  51. package/dist/proto.d.ts +434 -0
  52. package/dist/proto.js +67 -0
  53. package/dist/proto.js.map +1 -0
  54. package/dist/react.d.ts +147 -0
  55. package/dist/react.js +224 -0
  56. package/dist/react.js.map +1 -0
  57. package/dist/resolver-C3Wphi6O.d.ts +10 -0
  58. package/dist/result-CojzlFE2.d.ts +64 -0
  59. package/dist/src-QU2YLPZY.js +383 -0
  60. package/dist/src-QU2YLPZY.js.map +1 -0
  61. package/dist/src-WYBF5LOI.js +102 -0
  62. package/dist/src-WYBF5LOI.js.map +1 -0
  63. package/dist/src-WZNPHANQ.js +426 -0
  64. package/dist/src-WZNPHANQ.js.map +1 -0
  65. package/dist/types-Bs-QyOe-.d.ts +143 -0
  66. package/dist/types-DAQL_vU_.d.ts +118 -0
  67. package/dist/types-DSC_EiwR.d.ts +45 -0
  68. package/dist/types-V_jVu2sA.d.ts +73 -0
  69. package/package.json +119 -0
package/dist/client.js ADDED
@@ -0,0 +1,2048 @@
1
+ import {
2
+ TAG_BROADCAST,
3
+ TAG_SYNC_PULL,
4
+ TAG_SYNC_PUSH,
5
+ decodeBroadcastFrame,
6
+ decodeSyncResponse,
7
+ encodeSyncPull,
8
+ encodeSyncPush
9
+ } from "./chunk-BNJOGBYK.js";
10
+ import {
11
+ Err,
12
+ HLC,
13
+ LWWResolver,
14
+ LakeSyncError,
15
+ Ok,
16
+ SchemaError,
17
+ assertValidIdentifier,
18
+ bigintReplacer,
19
+ bigintReviver,
20
+ extractDelta,
21
+ isActionError,
22
+ quoteIdentifier,
23
+ toError,
24
+ unwrapOrThrow
25
+ } from "./chunk-ICNT7I3K.js";
26
+ import "./chunk-7D4SUZUM.js";
27
+
28
+ // ../client/src/db/idb-persistence.ts
29
+ import { openDB } from "idb";
30
+ var IDB_DB_NAME = "lakesync-snapshots";
31
+ var IDB_DB_VERSION = 1;
32
+ var STORE_NAME = "snapshots";
33
+ var cachedDb = null;
34
+ async function getDb() {
35
+ if (cachedDb) return cachedDb;
36
+ cachedDb = await openDB(IDB_DB_NAME, IDB_DB_VERSION, {
37
+ upgrade(db) {
38
+ if (!db.objectStoreNames.contains(STORE_NAME)) {
39
+ db.createObjectStore(STORE_NAME);
40
+ }
41
+ }
42
+ });
43
+ return cachedDb;
44
+ }
45
+ async function loadSnapshot(dbName) {
46
+ const idb = await getDb();
47
+ const data = await idb.get(STORE_NAME, dbName);
48
+ if (data instanceof Uint8Array) return data;
49
+ return null;
50
+ }
51
+ async function saveSnapshot(dbName, data) {
52
+ const idb = await getDb();
53
+ await idb.put(STORE_NAME, data, dbName);
54
+ }
55
+ async function deleteSnapshot(dbName) {
56
+ const idb = await getDb();
57
+ await idb.delete(STORE_NAME, dbName);
58
+ }
59
+
60
+ // ../client/src/db/local-db.ts
61
+ import initSqlJs from "sql.js";
62
+
63
+ // ../client/src/db/types.ts
64
+ var DbError = class extends LakeSyncError {
65
+ constructor(message, cause) {
66
+ super(message, "DB_ERROR", cause);
67
+ }
68
+ };
69
+
70
+ // ../client/src/db/local-db.ts
71
+ function mapResultRows(results) {
72
+ if (results.length === 0 || !results[0]) {
73
+ return [];
74
+ }
75
+ const { columns, values } = results[0];
76
+ return values.map((row) => {
77
+ const obj = {};
78
+ for (let i = 0; i < columns.length; i++) {
79
+ const col = columns[i];
80
+ if (col !== void 0) {
81
+ obj[col] = row[i];
82
+ }
83
+ }
84
+ return obj;
85
+ });
86
+ }
87
+ function wrapDbError(label, fn) {
88
+ try {
89
+ return Ok(fn());
90
+ } catch (err) {
91
+ return Err(new DbError(label, toError(err)));
92
+ }
93
+ }
94
+ var LocalDB = class _LocalDB {
95
+ #db;
96
+ #config;
97
+ #backend;
98
+ constructor(db, config, backend) {
99
+ this.#db = db;
100
+ this.#config = config;
101
+ this.#backend = backend;
102
+ }
103
+ /** The database name from configuration */
104
+ get name() {
105
+ return this.#config.name;
106
+ }
107
+ /** The resolved storage backend for this instance */
108
+ get backend() {
109
+ return this.#backend;
110
+ }
111
+ /**
112
+ * Open a new LocalDB instance.
113
+ *
114
+ * Initialises the sql.js WASM engine and creates a database. When the
115
+ * backend is `"idb"`, any existing snapshot is loaded from IndexedDB.
116
+ * If no backend is specified, auto-detects based on `indexedDB` availability.
117
+ */
118
+ static async open(config) {
119
+ try {
120
+ const backend = resolveBackend(config.backend);
121
+ const SQL = await initSqlJs();
122
+ let data = null;
123
+ if (backend === "idb") {
124
+ data = await loadSnapshot(config.name);
125
+ }
126
+ const db = data ? new SQL.Database(data) : new SQL.Database();
127
+ return Ok(new _LocalDB(db, config, backend));
128
+ } catch (err) {
129
+ return Err(new DbError(`Failed to open database "${config.name}"`, toError(err)));
130
+ }
131
+ }
132
+ /**
133
+ * Execute a SQL statement (INSERT, UPDATE, DELETE, CREATE, etc.).
134
+ *
135
+ * Returns `Ok(void)` on success, or `Err(DbError)` on failure.
136
+ */
137
+ async exec(sql, params) {
138
+ return wrapDbError(`Failed to execute SQL: ${sql}`, () => {
139
+ this.#db.run(sql, params);
140
+ });
141
+ }
142
+ /**
143
+ * Query the database and return typed rows as an array of objects.
144
+ *
145
+ * Each row is mapped from sql.js column-array format into a keyed object.
146
+ */
147
+ async query(sql, params) {
148
+ return wrapDbError(`Failed to query SQL: ${sql}`, () => {
149
+ const results = this.#db.exec(sql, params);
150
+ return mapResultRows(results);
151
+ });
152
+ }
153
+ /**
154
+ * Execute a function within a database transaction.
155
+ *
156
+ * Begins a transaction, executes the callback with a `Transaction` object,
157
+ * commits on success, or rolls back if the callback throws.
158
+ */
159
+ async transaction(fn) {
160
+ const tx = this.#createTransaction();
161
+ const beginResult = wrapDbError("Failed to begin transaction", () => {
162
+ this.#db.run("BEGIN");
163
+ });
164
+ if (!beginResult.ok) return beginResult;
165
+ try {
166
+ const result = fn(tx);
167
+ this.#db.run("COMMIT");
168
+ return Ok(result);
169
+ } catch (err) {
170
+ try {
171
+ this.#db.run("ROLLBACK");
172
+ } catch (_rollbackErr) {
173
+ }
174
+ return Err(new DbError("Transaction failed", toError(err)));
175
+ }
176
+ }
177
+ /**
178
+ * Export the current database state and persist it to IndexedDB.
179
+ *
180
+ * No-op when the backend is `"memory"`.
181
+ */
182
+ async save() {
183
+ if (this.#backend !== "idb") {
184
+ return Ok(void 0);
185
+ }
186
+ try {
187
+ const data = this.#db.export();
188
+ await saveSnapshot(this.#config.name, data);
189
+ return Ok(void 0);
190
+ } catch (err) {
191
+ return Err(
192
+ new DbError(`Failed to save database "${this.#config.name}" to IndexedDB`, toError(err))
193
+ );
194
+ }
195
+ }
196
+ /**
197
+ * Close the database and release resources.
198
+ *
199
+ * When the backend is `"idb"`, the database snapshot is persisted
200
+ * to IndexedDB before closing.
201
+ */
202
+ async close() {
203
+ if (this.#backend === "idb") {
204
+ await this.save();
205
+ }
206
+ this.#db.close();
207
+ }
208
+ #createTransaction() {
209
+ const db = this.#db;
210
+ return {
211
+ exec(sql, params) {
212
+ return wrapDbError(`Transaction exec failed: ${sql}`, () => {
213
+ db.run(sql, params);
214
+ });
215
+ },
216
+ query(sql, params) {
217
+ return wrapDbError(`Transaction query failed: ${sql}`, () => {
218
+ const results = db.exec(sql, params);
219
+ return mapResultRows(results);
220
+ });
221
+ }
222
+ };
223
+ }
224
+ };
225
+ function resolveBackend(configured) {
226
+ if (configured === "memory") return "memory";
227
+ if (configured === "idb") return "idb";
228
+ if (typeof indexedDB !== "undefined") return "idb";
229
+ return "memory";
230
+ }
231
+
232
+ // ../client/src/db/schema-registry.ts
233
+ var COLUMN_TYPE_MAP = {
234
+ string: "TEXT",
235
+ number: "REAL",
236
+ boolean: "INTEGER",
237
+ json: "TEXT",
238
+ null: "TEXT"
239
+ };
240
+ async function ensureMetaTable(db) {
241
+ return db.exec(`
242
+ CREATE TABLE IF NOT EXISTS _lakesync_meta (
243
+ table_name TEXT PRIMARY KEY,
244
+ schema_version INTEGER NOT NULL DEFAULT 1,
245
+ schema_json TEXT NOT NULL,
246
+ updated_at TEXT NOT NULL
247
+ )
248
+ `);
249
+ }
250
+ async function registerSchema(db, schema) {
251
+ const tableCheck = assertValidIdentifier(schema.table);
252
+ if (!tableCheck.ok) return tableCheck;
253
+ for (const col of schema.columns) {
254
+ const colCheck = assertValidIdentifier(col.name);
255
+ if (!colCheck.ok) return colCheck;
256
+ }
257
+ const metaResult = await ensureMetaTable(db);
258
+ if (!metaResult.ok) return metaResult;
259
+ return db.transaction((tx) => {
260
+ const now = (/* @__PURE__ */ new Date()).toISOString();
261
+ const schemaJson = JSON.stringify(schema);
262
+ unwrapOrThrow(
263
+ tx.exec(
264
+ `INSERT INTO _lakesync_meta (table_name, schema_version, schema_json, updated_at)
265
+ VALUES (?, 1, ?, ?)
266
+ ON CONFLICT(table_name) DO UPDATE SET
267
+ schema_json = excluded.schema_json,
268
+ updated_at = excluded.updated_at`,
269
+ [schema.table, schemaJson, now]
270
+ )
271
+ );
272
+ const quotedTable = quoteIdentifier(schema.table);
273
+ const columnDefs = schema.columns.map((col) => `${quoteIdentifier(col.name)} ${COLUMN_TYPE_MAP[col.type]}`).join(", ");
274
+ const createSql = columnDefs ? `CREATE TABLE IF NOT EXISTS ${quotedTable} (_rowId TEXT PRIMARY KEY, ${columnDefs})` : `CREATE TABLE IF NOT EXISTS ${quotedTable} (_rowId TEXT PRIMARY KEY)`;
275
+ unwrapOrThrow(tx.exec(createSql));
276
+ });
277
+ }
278
+ async function getSchema(db, table) {
279
+ const metaResult = await ensureMetaTable(db);
280
+ if (!metaResult.ok) return metaResult;
281
+ const queryResult = await db.query(
282
+ "SELECT schema_json FROM _lakesync_meta WHERE table_name = ?",
283
+ [table]
284
+ );
285
+ if (!queryResult.ok) return queryResult;
286
+ const firstRow = queryResult.value[0];
287
+ if (!firstRow) {
288
+ return Ok(null);
289
+ }
290
+ try {
291
+ const schema = JSON.parse(firstRow.schema_json);
292
+ return Ok(schema);
293
+ } catch (err) {
294
+ return Err(
295
+ new DbError(
296
+ `Failed to parse schema JSON for table "${table}"`,
297
+ err instanceof Error ? err : void 0
298
+ )
299
+ );
300
+ }
301
+ }
302
+ async function migrateSchema(db, oldSchema, newSchema) {
303
+ if (oldSchema.table !== newSchema.table) {
304
+ return Err(
305
+ new SchemaError(
306
+ `Table name mismatch: old schema references "${oldSchema.table}" but new schema references "${newSchema.table}"`
307
+ )
308
+ );
309
+ }
310
+ const tableName = newSchema.table;
311
+ const tableCheck = assertValidIdentifier(tableName);
312
+ if (!tableCheck.ok) return tableCheck;
313
+ for (const col of newSchema.columns) {
314
+ const colCheck = assertValidIdentifier(col.name);
315
+ if (!colCheck.ok) return colCheck;
316
+ }
317
+ const oldColumnMap = /* @__PURE__ */ new Map();
318
+ for (const col of oldSchema.columns) {
319
+ oldColumnMap.set(col.name, col.type);
320
+ }
321
+ const newColumnMap = /* @__PURE__ */ new Map();
322
+ for (const col of newSchema.columns) {
323
+ newColumnMap.set(col.name, col.type);
324
+ }
325
+ for (const col of oldSchema.columns) {
326
+ if (!newColumnMap.has(col.name)) {
327
+ return Err(
328
+ new SchemaError(
329
+ `Cannot remove column "${col.name}" from table "${tableName}". Only additive migrations are supported.`
330
+ )
331
+ );
332
+ }
333
+ }
334
+ for (const col of newSchema.columns) {
335
+ const oldType = oldColumnMap.get(col.name);
336
+ if (oldType !== void 0 && oldType !== col.type) {
337
+ return Err(
338
+ new SchemaError(
339
+ `Cannot change type of column "${col.name}" in table "${tableName}" from "${oldType}" to "${col.type}". Type changes are not supported.`
340
+ )
341
+ );
342
+ }
343
+ }
344
+ const addedColumns = newSchema.columns.filter((col) => !oldColumnMap.has(col.name));
345
+ const metaResult = await ensureMetaTable(db);
346
+ if (!metaResult.ok) return metaResult;
347
+ return db.transaction((tx) => {
348
+ const quotedTable = quoteIdentifier(tableName);
349
+ for (const col of addedColumns) {
350
+ unwrapOrThrow(
351
+ tx.exec(
352
+ `ALTER TABLE ${quotedTable} ADD COLUMN ${quoteIdentifier(col.name)} ${COLUMN_TYPE_MAP[col.type]}`
353
+ )
354
+ );
355
+ }
356
+ const now = (/* @__PURE__ */ new Date()).toISOString();
357
+ const schemaJson = JSON.stringify(newSchema);
358
+ unwrapOrThrow(
359
+ tx.exec(
360
+ `UPDATE _lakesync_meta
361
+ SET schema_json = ?,
362
+ schema_version = schema_version + 1,
363
+ updated_at = ?
364
+ WHERE table_name = ?`,
365
+ [schemaJson, now, tableName]
366
+ )
367
+ );
368
+ });
369
+ }
370
+
371
+ // ../client/src/queue/idb-action-queue.ts
372
+ import { openDB as openDB2 } from "idb";
373
+ var DB_NAME = "lakesync-action-queue";
374
+ var DB_VERSION = 1;
375
+ var STORE_NAME2 = "entries";
376
+ function serialiseAction(action) {
377
+ return { ...action, hlc: action.hlc.toString() };
378
+ }
379
+ function deserialiseAction(serialised) {
380
+ return { ...serialised, hlc: BigInt(serialised.hlc) };
381
+ }
382
+ function serialiseEntry(entry) {
383
+ return { ...entry, action: serialiseAction(entry.action) };
384
+ }
385
+ function deserialiseEntry(serialised) {
386
+ return { ...serialised, action: deserialiseAction(serialised.action) };
387
+ }
388
+ async function wrapIdbOp(operation, fn) {
389
+ try {
390
+ return Ok(await fn());
391
+ } catch (error) {
392
+ const message = error instanceof Error ? error.message : String(error);
393
+ return Err(new LakeSyncError(`Failed to ${operation}: ${message}`, "QUEUE_ERROR"));
394
+ }
395
+ }
396
+ var IDBActionQueue = class {
397
+ dbPromise;
398
+ counter = 0;
399
+ /**
400
+ * Create a new IDB-backed action queue.
401
+ *
402
+ * @param dbName - Optional database name. Defaults to `'lakesync-action-queue'`.
403
+ */
404
+ constructor(dbName = DB_NAME) {
405
+ this.dbPromise = openDB2(dbName, DB_VERSION, {
406
+ upgrade(db) {
407
+ const store = db.createObjectStore(STORE_NAME2, { keyPath: "id" });
408
+ store.createIndex("status", "status");
409
+ store.createIndex("createdAt", "createdAt");
410
+ }
411
+ });
412
+ }
413
+ /** Add an action to the queue. */
414
+ async push(action) {
415
+ return wrapIdbOp("push to action queue", async () => {
416
+ const db = await this.dbPromise;
417
+ const entry = {
418
+ id: `idb-action-${Date.now()}-${++this.counter}`,
419
+ action,
420
+ status: "pending",
421
+ createdAt: Date.now(),
422
+ retryCount: 0
423
+ };
424
+ await db.put(STORE_NAME2, serialiseEntry(entry));
425
+ return entry;
426
+ });
427
+ }
428
+ /** Peek at pending entries (ordered by createdAt). */
429
+ async peek(limit) {
430
+ return wrapIdbOp("peek action queue", async () => {
431
+ const db = await this.dbPromise;
432
+ const tx = db.transaction(STORE_NAME2, "readonly");
433
+ const index = tx.objectStore(STORE_NAME2).index("createdAt");
434
+ const results = [];
435
+ let cursor = await index.openCursor();
436
+ while (cursor && results.length < limit) {
437
+ const serialised = cursor.value;
438
+ if (serialised.status === "pending") {
439
+ const entry = deserialiseEntry(serialised);
440
+ if (entry.retryAfter === void 0 || entry.retryAfter <= Date.now()) {
441
+ results.push(entry);
442
+ }
443
+ }
444
+ cursor = await cursor.continue();
445
+ }
446
+ return results;
447
+ });
448
+ }
449
+ /** Mark entries as currently being sent. */
450
+ async markSending(ids) {
451
+ return wrapIdbOp("mark sending", async () => {
452
+ const db = await this.dbPromise;
453
+ const tx = db.transaction(STORE_NAME2, "readwrite");
454
+ const store = tx.objectStore(STORE_NAME2);
455
+ for (const id of ids) {
456
+ const serialised = await store.get(id);
457
+ if (serialised?.status === "pending") {
458
+ serialised.status = "sending";
459
+ await store.put(serialised);
460
+ }
461
+ }
462
+ await tx.done;
463
+ });
464
+ }
465
+ /** Acknowledge successful delivery (removes entries). */
466
+ async ack(ids) {
467
+ return wrapIdbOp("ack", async () => {
468
+ const db = await this.dbPromise;
469
+ const tx = db.transaction(STORE_NAME2, "readwrite");
470
+ for (const id of ids) {
471
+ await tx.objectStore(STORE_NAME2).delete(id);
472
+ }
473
+ await tx.done;
474
+ });
475
+ }
476
+ /** Negative acknowledge — reset to pending with incremented retryCount and exponential backoff. */
477
+ async nack(ids) {
478
+ return wrapIdbOp("nack", async () => {
479
+ const db = await this.dbPromise;
480
+ const tx = db.transaction(STORE_NAME2, "readwrite");
481
+ const store = tx.objectStore(STORE_NAME2);
482
+ for (const id of ids) {
483
+ const serialised = await store.get(id);
484
+ if (serialised) {
485
+ serialised.status = "pending";
486
+ serialised.retryCount++;
487
+ const backoffMs = Math.min(1e3 * 2 ** serialised.retryCount, 3e4);
488
+ serialised.retryAfter = Date.now() + backoffMs;
489
+ await store.put(serialised);
490
+ }
491
+ }
492
+ await tx.done;
493
+ });
494
+ }
495
+ /** Get the number of pending + sending entries. */
496
+ async depth() {
497
+ return wrapIdbOp("get depth", async () => {
498
+ const db = await this.dbPromise;
499
+ const all = await db.getAll(STORE_NAME2);
500
+ return all.filter((e) => e.status === "pending" || e.status === "sending").length;
501
+ });
502
+ }
503
+ /** Remove all entries. */
504
+ async clear() {
505
+ return wrapIdbOp("clear action queue", async () => {
506
+ const db = await this.dbPromise;
507
+ await db.clear(STORE_NAME2);
508
+ });
509
+ }
510
+ };
511
+
512
+ // ../client/src/queue/idb-queue.ts
513
+ import { openDB as openDB3 } from "idb";
514
+ var DB_NAME2 = "lakesync-queue";
515
+ var DB_VERSION2 = 1;
516
+ var STORE_NAME3 = "entries";
517
+ function serialiseDelta(delta) {
518
+ return { ...delta, hlc: delta.hlc.toString() };
519
+ }
520
+ function deserialiseDelta(serialised) {
521
+ return { ...serialised, hlc: BigInt(serialised.hlc) };
522
+ }
523
+ function serialiseEntry2(entry) {
524
+ return { ...entry, delta: serialiseDelta(entry.delta) };
525
+ }
526
+ function deserialiseEntry2(serialised) {
527
+ return { ...serialised, delta: deserialiseDelta(serialised.delta) };
528
+ }
529
+ async function wrapIdbOp2(operation, fn) {
530
+ try {
531
+ return Ok(await fn());
532
+ } catch (error) {
533
+ const message = error instanceof Error ? error.message : String(error);
534
+ return Err(new LakeSyncError(`Failed to ${operation}: ${message}`, "QUEUE_ERROR"));
535
+ }
536
+ }
537
+ var IDBQueue = class {
538
+ dbPromise;
539
+ counter = 0;
540
+ /**
541
+ * Create a new IDB-backed sync queue.
542
+ *
543
+ * @param dbName - Optional database name. Defaults to `'lakesync-queue'`.
544
+ * Useful for tests or running multiple independent queues.
545
+ */
546
+ constructor(dbName = DB_NAME2) {
547
+ this.dbPromise = openDB3(dbName, DB_VERSION2, {
548
+ upgrade(db) {
549
+ const store = db.createObjectStore(STORE_NAME3, { keyPath: "id" });
550
+ store.createIndex("status", "status");
551
+ store.createIndex("createdAt", "createdAt");
552
+ }
553
+ });
554
+ }
555
+ /** Add a delta to the queue */
556
+ async push(delta) {
557
+ return wrapIdbOp2("push to queue", async () => {
558
+ const db = await this.dbPromise;
559
+ const entry = {
560
+ id: `idb-${Date.now()}-${++this.counter}`,
561
+ delta,
562
+ status: "pending",
563
+ createdAt: Date.now(),
564
+ retryCount: 0
565
+ };
566
+ await db.put(STORE_NAME3, serialiseEntry2(entry));
567
+ return entry;
568
+ });
569
+ }
570
+ /** Peek at pending entries (ordered by createdAt) */
571
+ async peek(limit) {
572
+ return wrapIdbOp2("peek queue", async () => {
573
+ const db = await this.dbPromise;
574
+ const tx = db.transaction(STORE_NAME3, "readonly");
575
+ const index = tx.objectStore(STORE_NAME3).index("createdAt");
576
+ const results = [];
577
+ let cursor = await index.openCursor();
578
+ while (cursor && results.length < limit) {
579
+ const serialised = cursor.value;
580
+ if (serialised.status === "pending") {
581
+ const entry = deserialiseEntry2(serialised);
582
+ if (entry.retryAfter === void 0 || entry.retryAfter <= Date.now()) {
583
+ results.push(entry);
584
+ }
585
+ }
586
+ cursor = await cursor.continue();
587
+ }
588
+ return results;
589
+ });
590
+ }
591
+ /** Mark entries as currently being sent */
592
+ async markSending(ids) {
593
+ return wrapIdbOp2("mark sending", async () => {
594
+ const db = await this.dbPromise;
595
+ const tx = db.transaction(STORE_NAME3, "readwrite");
596
+ const store = tx.objectStore(STORE_NAME3);
597
+ for (const id of ids) {
598
+ const serialised = await store.get(id);
599
+ if (serialised?.status === "pending") {
600
+ serialised.status = "sending";
601
+ await store.put(serialised);
602
+ }
603
+ }
604
+ await tx.done;
605
+ });
606
+ }
607
+ /** Acknowledge successful delivery (removes entries) */
608
+ async ack(ids) {
609
+ return wrapIdbOp2("ack", async () => {
610
+ const db = await this.dbPromise;
611
+ const tx = db.transaction(STORE_NAME3, "readwrite");
612
+ for (const id of ids) {
613
+ await tx.objectStore(STORE_NAME3).delete(id);
614
+ }
615
+ await tx.done;
616
+ });
617
+ }
618
+ /** Negative acknowledge — reset to pending with incremented retryCount and exponential backoff */
619
+ async nack(ids) {
620
+ return wrapIdbOp2("nack", async () => {
621
+ const db = await this.dbPromise;
622
+ const tx = db.transaction(STORE_NAME3, "readwrite");
623
+ const store = tx.objectStore(STORE_NAME3);
624
+ for (const id of ids) {
625
+ const serialised = await store.get(id);
626
+ if (serialised) {
627
+ serialised.status = "pending";
628
+ serialised.retryCount++;
629
+ const backoffMs = Math.min(1e3 * 2 ** serialised.retryCount, 3e4);
630
+ serialised.retryAfter = Date.now() + backoffMs;
631
+ await store.put(serialised);
632
+ }
633
+ }
634
+ await tx.done;
635
+ });
636
+ }
637
+ /** Get the number of pending + sending entries */
638
+ async depth() {
639
+ return wrapIdbOp2("get depth", async () => {
640
+ const db = await this.dbPromise;
641
+ const all = await db.getAll(STORE_NAME3);
642
+ return all.filter((e) => e.status !== "acked").length;
643
+ });
644
+ }
645
+ /** Remove all entries */
646
+ async clear() {
647
+ return wrapIdbOp2("clear queue", async () => {
648
+ const db = await this.dbPromise;
649
+ await db.clear(STORE_NAME3);
650
+ });
651
+ }
652
+ };
653
+
654
+ // ../client/src/queue/memory-action-queue.ts
655
+ var MemoryActionQueue = class {
656
+ entries = /* @__PURE__ */ new Map();
657
+ counter = 0;
658
+ /** Add an action to the queue. */
659
+ async push(action) {
660
+ const entry = {
661
+ id: `mem-action-${++this.counter}`,
662
+ action,
663
+ status: "pending",
664
+ createdAt: Date.now(),
665
+ retryCount: 0
666
+ };
667
+ this.entries.set(entry.id, entry);
668
+ return Ok(entry);
669
+ }
670
+ /** Peek at pending entries (ordered by createdAt), skipping entries with future retryAfter. */
671
+ async peek(limit) {
672
+ const now = Date.now();
673
+ const pending = [...this.entries.values()].filter((e) => e.status === "pending" && (e.retryAfter === void 0 || e.retryAfter <= now)).sort((a, b) => a.createdAt - b.createdAt).slice(0, limit);
674
+ return Ok(pending);
675
+ }
676
+ /** Mark entries as currently being sent. */
677
+ async markSending(ids) {
678
+ for (const id of ids) {
679
+ const entry = this.entries.get(id);
680
+ if (entry?.status === "pending") {
681
+ entry.status = "sending";
682
+ }
683
+ }
684
+ return Ok(void 0);
685
+ }
686
+ /** Acknowledge successful delivery (removes entries). */
687
+ async ack(ids) {
688
+ for (const id of ids) {
689
+ this.entries.delete(id);
690
+ }
691
+ return Ok(void 0);
692
+ }
693
+ /** Negative acknowledge — reset to pending with incremented retryCount and exponential backoff. */
694
+ async nack(ids) {
695
+ for (const id of ids) {
696
+ const entry = this.entries.get(id);
697
+ if (entry) {
698
+ entry.status = "pending";
699
+ entry.retryCount++;
700
+ const backoffMs = Math.min(1e3 * 2 ** entry.retryCount, 3e4);
701
+ entry.retryAfter = Date.now() + backoffMs;
702
+ }
703
+ }
704
+ return Ok(void 0);
705
+ }
706
+ /** Get the number of pending + sending entries. */
707
+ async depth() {
708
+ const count = [...this.entries.values()].filter(
709
+ (e) => e.status === "pending" || e.status === "sending"
710
+ ).length;
711
+ return Ok(count);
712
+ }
713
+ /** Remove all entries. */
714
+ async clear() {
715
+ this.entries.clear();
716
+ return Ok(void 0);
717
+ }
718
+ };
719
+
720
+ // ../client/src/queue/memory-queue.ts
721
+ var MemoryQueue = class {
722
+ entries = /* @__PURE__ */ new Map();
723
+ counter = 0;
724
+ /** Add a delta to the queue */
725
+ async push(delta) {
726
+ const entry = {
727
+ id: `mem-${++this.counter}`,
728
+ delta,
729
+ status: "pending",
730
+ createdAt: Date.now(),
731
+ retryCount: 0
732
+ };
733
+ this.entries.set(entry.id, entry);
734
+ return Ok(entry);
735
+ }
736
+ /** Peek at pending entries (ordered by createdAt), skipping entries with future retryAfter */
737
+ async peek(limit) {
738
+ const now = Date.now();
739
+ const pending = [...this.entries.values()].filter((e) => e.status === "pending" && (e.retryAfter === void 0 || e.retryAfter <= now)).sort((a, b) => a.createdAt - b.createdAt).slice(0, limit);
740
+ return Ok(pending);
741
+ }
742
+ /** Mark entries as currently being sent */
743
+ async markSending(ids) {
744
+ for (const id of ids) {
745
+ const entry = this.entries.get(id);
746
+ if (entry?.status === "pending") {
747
+ entry.status = "sending";
748
+ }
749
+ }
750
+ return Ok(void 0);
751
+ }
752
+ /** Acknowledge successful delivery (removes entries) */
753
+ async ack(ids) {
754
+ for (const id of ids) {
755
+ this.entries.delete(id);
756
+ }
757
+ return Ok(void 0);
758
+ }
759
+ /** Negative acknowledge — reset to pending with incremented retryCount and exponential backoff */
760
+ async nack(ids) {
761
+ for (const id of ids) {
762
+ const entry = this.entries.get(id);
763
+ if (entry) {
764
+ entry.status = "pending";
765
+ entry.retryCount++;
766
+ const backoffMs = Math.min(1e3 * 2 ** entry.retryCount, 3e4);
767
+ entry.retryAfter = Date.now() + backoffMs;
768
+ }
769
+ }
770
+ return Ok(void 0);
771
+ }
772
+ /** Get the number of pending + sending entries */
773
+ async depth() {
774
+ const count = [...this.entries.values()].filter((e) => e.status !== "acked").length;
775
+ return Ok(count);
776
+ }
777
+ /** Remove all entries */
778
+ async clear() {
779
+ this.entries.clear();
780
+ return Ok(void 0);
781
+ }
782
+ };
783
+
784
+ // ../client/src/sync/applier.ts
785
+ async function applyRemoteDeltas(db, deltas, resolver, pendingQueue) {
786
+ if (deltas.length === 0) {
787
+ return Ok(0);
788
+ }
789
+ const cursorTableResult = await db.exec(`
790
+ CREATE TABLE IF NOT EXISTS _sync_cursor (
791
+ table_name TEXT PRIMARY KEY,
792
+ last_synced_hlc TEXT NOT NULL
793
+ )
794
+ `);
795
+ if (!cursorTableResult.ok) {
796
+ return Err(
797
+ new LakeSyncError(
798
+ "Failed to create _sync_cursor table",
799
+ "APPLY_ERROR",
800
+ cursorTableResult.error
801
+ )
802
+ );
803
+ }
804
+ const beginResult = await db.exec("BEGIN");
805
+ if (!beginResult.ok) {
806
+ return Err(
807
+ new LakeSyncError(
808
+ "Failed to begin transaction for remote delta application",
809
+ "APPLY_ERROR",
810
+ beginResult.error
811
+ )
812
+ );
813
+ }
814
+ const peekResult = await pendingQueue.peek(Number.MAX_SAFE_INTEGER);
815
+ if (!peekResult.ok) {
816
+ await db.exec("ROLLBACK");
817
+ return Err(
818
+ new LakeSyncError(
819
+ "Failed to peek pending queue for conflict detection",
820
+ "APPLY_ERROR",
821
+ peekResult.error
822
+ )
823
+ );
824
+ }
825
+ const pendingMap = /* @__PURE__ */ new Map();
826
+ for (const entry of peekResult.value) {
827
+ pendingMap.set(`${entry.delta.table}:${entry.delta.rowId}`, entry);
828
+ }
829
+ let appliedCount = 0;
830
+ const maxHlcPerTable = /* @__PURE__ */ new Map();
831
+ for (const remoteDelta of deltas) {
832
+ const result = await applyOneDelta(db, remoteDelta, resolver, pendingQueue, pendingMap);
833
+ if (!result.ok) {
834
+ await db.exec("ROLLBACK");
835
+ return result;
836
+ }
837
+ if (result.value) {
838
+ appliedCount++;
839
+ }
840
+ const currentMax = maxHlcPerTable.get(remoteDelta.table);
841
+ if (currentMax === void 0 || HLC.compare(remoteDelta.hlc, currentMax) > 0) {
842
+ maxHlcPerTable.set(remoteDelta.table, remoteDelta.hlc);
843
+ }
844
+ }
845
+ for (const [tableName, hlc] of maxHlcPerTable) {
846
+ const cursorResult = await db.exec(
847
+ "INSERT OR REPLACE INTO _sync_cursor (table_name, last_synced_hlc) VALUES (?, ?)",
848
+ [tableName, hlc.toString()]
849
+ );
850
+ if (!cursorResult.ok) {
851
+ await db.exec("ROLLBACK");
852
+ return Err(
853
+ new LakeSyncError(
854
+ `Failed to update sync cursor for table "${tableName}"`,
855
+ "APPLY_ERROR",
856
+ cursorResult.error
857
+ )
858
+ );
859
+ }
860
+ }
861
+ const commitResult = await db.exec("COMMIT");
862
+ if (!commitResult.ok) {
863
+ await db.exec("ROLLBACK");
864
+ return Err(
865
+ new LakeSyncError(
866
+ "Failed to commit transaction for remote delta application",
867
+ "APPLY_ERROR",
868
+ commitResult.error
869
+ )
870
+ );
871
+ }
872
+ return Ok(appliedCount);
873
+ }
874
+ async function applyOneDelta(db, remoteDelta, resolver, pendingQueue, pendingMap) {
875
+ const conflictingEntry = pendingMap.get(`${remoteDelta.table}:${remoteDelta.rowId}`);
876
+ if (conflictingEntry) {
877
+ const localDelta = conflictingEntry.delta;
878
+ const resolveResult = resolver.resolve(localDelta, remoteDelta);
879
+ if (!resolveResult.ok) {
880
+ return Err(
881
+ new LakeSyncError(
882
+ `Conflict resolution failed for row "${remoteDelta.rowId}" in table "${remoteDelta.table}"`,
883
+ "APPLY_ERROR",
884
+ resolveResult.error
885
+ )
886
+ );
887
+ }
888
+ const resolved = resolveResult.value;
889
+ const remoteWon = resolved.clientId === remoteDelta.clientId && resolved.hlc === remoteDelta.hlc;
890
+ if (remoteWon) {
891
+ const applyResult2 = await applySqlDelta(db, resolved);
892
+ if (!applyResult2.ok) {
893
+ return applyResult2;
894
+ }
895
+ const ackResult = await pendingQueue.ack([conflictingEntry.id]);
896
+ if (!ackResult.ok) {
897
+ return Err(
898
+ new LakeSyncError(
899
+ `Failed to ack local queue entry "${conflictingEntry.id}" after remote win`,
900
+ "APPLY_ERROR",
901
+ ackResult.error
902
+ )
903
+ );
904
+ }
905
+ return Ok(true);
906
+ }
907
+ return Ok(false);
908
+ }
909
+ const applyResult = await applySqlDelta(db, remoteDelta);
910
+ if (!applyResult.ok) {
911
+ return applyResult;
912
+ }
913
+ return Ok(true);
914
+ }
915
+ function validateDeltaIdentifiers(delta) {
916
+ const tableCheck = assertValidIdentifier(delta.table);
917
+ if (!tableCheck.ok) {
918
+ return Err(new LakeSyncError(tableCheck.error.message, "APPLY_ERROR"));
919
+ }
920
+ for (const col of delta.columns) {
921
+ const colCheck = assertValidIdentifier(col.column);
922
+ if (!colCheck.ok) {
923
+ return Err(new LakeSyncError(colCheck.error.message, "APPLY_ERROR"));
924
+ }
925
+ }
926
+ return Ok(void 0);
927
+ }
928
+ async function applySqlDelta(db, delta) {
929
+ const identifierCheck = validateDeltaIdentifiers(delta);
930
+ if (!identifierCheck.ok) return identifierCheck;
931
+ const quotedTable = quoteIdentifier(delta.table);
932
+ switch (delta.op) {
933
+ case "INSERT": {
934
+ const colNames = delta.columns.map((c) => quoteIdentifier(c.column));
935
+ const allColumns = ["_rowId", ...colNames];
936
+ const placeholders = allColumns.map(() => "?").join(", ");
937
+ const values = [delta.rowId, ...delta.columns.map((c) => c.value)];
938
+ const sql = `INSERT INTO ${quotedTable} (${allColumns.join(", ")}) VALUES (${placeholders})`;
939
+ const result = await db.exec(sql, values);
940
+ if (!result.ok) {
941
+ return Err(
942
+ new LakeSyncError(
943
+ `Failed to apply INSERT for row "${delta.rowId}" in table "${delta.table}"`,
944
+ "APPLY_ERROR",
945
+ result.error
946
+ )
947
+ );
948
+ }
949
+ return Ok(true);
950
+ }
951
+ case "UPDATE": {
952
+ if (delta.columns.length === 0) {
953
+ return Ok(true);
954
+ }
955
+ const setClauses = delta.columns.map((c) => `${quoteIdentifier(c.column)} = ?`).join(", ");
956
+ const values = [...delta.columns.map((c) => c.value), delta.rowId];
957
+ const sql = `UPDATE ${quotedTable} SET ${setClauses} WHERE _rowId = ?`;
958
+ const result = await db.exec(sql, values);
959
+ if (!result.ok) {
960
+ return Err(
961
+ new LakeSyncError(
962
+ `Failed to apply UPDATE for row "${delta.rowId}" in table "${delta.table}"`,
963
+ "APPLY_ERROR",
964
+ result.error
965
+ )
966
+ );
967
+ }
968
+ return Ok(true);
969
+ }
970
+ case "DELETE": {
971
+ const sql = `DELETE FROM ${quotedTable} WHERE _rowId = ?`;
972
+ const result = await db.exec(sql, [delta.rowId]);
973
+ if (!result.ok) {
974
+ return Err(
975
+ new LakeSyncError(
976
+ `Failed to apply DELETE for row "${delta.rowId}" in table "${delta.table}"`,
977
+ "APPLY_ERROR",
978
+ result.error
979
+ )
980
+ );
981
+ }
982
+ return Ok(true);
983
+ }
984
+ }
985
+ }
986
+
987
+ // ../client/src/sync/tracker.ts
988
+ function rowWithoutId(row) {
989
+ const result = {};
990
+ for (const [key, value] of Object.entries(row)) {
991
+ if (key !== "_rowId") {
992
+ result[key] = value;
993
+ }
994
+ }
995
+ return result;
996
+ }
997
+ var SyncTracker = class {
998
+ constructor(db, queue, hlc, clientId) {
999
+ this.db = db;
1000
+ this.queue = queue;
1001
+ this.hlc = hlc;
1002
+ this.clientId = clientId;
1003
+ }
1004
+ schemaCache = /* @__PURE__ */ new Map();
1005
+ async getCachedSchema(table) {
1006
+ if (this.schemaCache.has(table)) {
1007
+ const cached = this.schemaCache.get(table);
1008
+ return Ok(cached ?? void 0);
1009
+ }
1010
+ const result = await getSchema(this.db, table);
1011
+ if (result.ok) {
1012
+ this.schemaCache.set(table, result.value);
1013
+ return Ok(result.value ?? void 0);
1014
+ }
1015
+ return result;
1016
+ }
1017
+ /**
1018
+ * Insert a new row into the specified table.
1019
+ *
1020
+ * Writes the row to SQLite and pushes an INSERT delta to the queue.
1021
+ *
1022
+ * @param table - The target table name
1023
+ * @param rowId - The unique row identifier
1024
+ * @param data - Column name/value pairs for the new row
1025
+ * @returns Ok on success, or Err with a LakeSyncError on failure
1026
+ */
1027
+ async insert(table, rowId, data) {
1028
+ const tableCheck = assertValidIdentifier(table);
1029
+ if (!tableCheck.ok) return tableCheck;
1030
+ for (const col of Object.keys(data)) {
1031
+ const colCheck = assertValidIdentifier(col);
1032
+ if (!colCheck.ok) return colCheck;
1033
+ }
1034
+ const schemaResult = await this.getCachedSchema(table);
1035
+ if (!schemaResult.ok) return schemaResult;
1036
+ const schema = schemaResult.value;
1037
+ const columns = Object.keys(data);
1038
+ const allColumns = ["_rowId", ...columns.map((c) => quoteIdentifier(c))];
1039
+ const placeholders = allColumns.map(() => "?").join(", ");
1040
+ const columnList = allColumns.join(", ");
1041
+ const values = [rowId, ...columns.map((col) => data[col])];
1042
+ const sql = `INSERT INTO ${quoteIdentifier(table)} (${columnList}) VALUES (${placeholders})`;
1043
+ const execResult = await this.db.exec(sql, values);
1044
+ if (!execResult.ok) return execResult;
1045
+ const hlc = this.hlc.now();
1046
+ const delta = await extractDelta(null, data, {
1047
+ table,
1048
+ rowId,
1049
+ clientId: this.clientId,
1050
+ hlc,
1051
+ schema
1052
+ });
1053
+ if (delta) {
1054
+ const pushResult = await this.queue.push(delta);
1055
+ if (!pushResult.ok) return pushResult;
1056
+ }
1057
+ return Ok(void 0);
1058
+ }
1059
+ /**
1060
+ * Update an existing row in the specified table.
1061
+ *
1062
+ * Reads the current row state, applies partial updates, and pushes
1063
+ * an UPDATE delta containing only the changed columns.
1064
+ *
1065
+ * @param table - The target table name
1066
+ * @param rowId - The unique row identifier
1067
+ * @param data - Column name/value pairs to update (partial)
1068
+ * @returns Ok on success, Err if the row is not found or on failure
1069
+ */
1070
+ async update(table, rowId, data) {
1071
+ const tableCheck = assertValidIdentifier(table);
1072
+ if (!tableCheck.ok) return tableCheck;
1073
+ for (const col of Object.keys(data)) {
1074
+ const colCheck = assertValidIdentifier(col);
1075
+ if (!colCheck.ok) return colCheck;
1076
+ }
1077
+ const schemaResult = await this.getCachedSchema(table);
1078
+ if (!schemaResult.ok) return schemaResult;
1079
+ const schema = schemaResult.value;
1080
+ const queryResult = await this.db.query(
1081
+ `SELECT * FROM ${quoteIdentifier(table)} WHERE _rowId = ?`,
1082
+ [rowId]
1083
+ );
1084
+ if (!queryResult.ok) return queryResult;
1085
+ const rows = queryResult.value;
1086
+ if (rows.length === 0 || !rows[0]) {
1087
+ return Err(
1088
+ new LakeSyncError(`Row "${rowId}" not found in table "${table}"`, "ROW_NOT_FOUND")
1089
+ );
1090
+ }
1091
+ const before = rowWithoutId(rows[0]);
1092
+ const columns = Object.keys(data);
1093
+ const setClauses = columns.map((col) => `${quoteIdentifier(col)} = ?`).join(", ");
1094
+ const values = [...columns.map((col) => data[col]), rowId];
1095
+ const sql = `UPDATE ${quoteIdentifier(table)} SET ${setClauses} WHERE _rowId = ?`;
1096
+ const execResult = await this.db.exec(sql, values);
1097
+ if (!execResult.ok) return execResult;
1098
+ const after = { ...before, ...data };
1099
+ const hlc = this.hlc.now();
1100
+ const delta = await extractDelta(before, after, {
1101
+ table,
1102
+ rowId,
1103
+ clientId: this.clientId,
1104
+ hlc,
1105
+ schema
1106
+ });
1107
+ if (delta) {
1108
+ const pushResult = await this.queue.push(delta);
1109
+ if (!pushResult.ok) return pushResult;
1110
+ }
1111
+ return Ok(void 0);
1112
+ }
1113
+ /**
1114
+ * Delete a row from the specified table.
1115
+ *
1116
+ * Reads the current row state for delta extraction, removes the row
1117
+ * from SQLite, and pushes a DELETE delta to the queue.
1118
+ *
1119
+ * @param table - The target table name
1120
+ * @param rowId - The unique row identifier
1121
+ * @returns Ok on success, Err if the row is not found or on failure
1122
+ */
1123
+ async delete(table, rowId) {
1124
+ const tableCheck = assertValidIdentifier(table);
1125
+ if (!tableCheck.ok) return tableCheck;
1126
+ const schemaResult = await this.getCachedSchema(table);
1127
+ if (!schemaResult.ok) return schemaResult;
1128
+ const schema = schemaResult.value;
1129
+ const queryResult = await this.db.query(
1130
+ `SELECT * FROM ${quoteIdentifier(table)} WHERE _rowId = ?`,
1131
+ [rowId]
1132
+ );
1133
+ if (!queryResult.ok) return queryResult;
1134
+ const rows = queryResult.value;
1135
+ if (rows.length === 0 || !rows[0]) {
1136
+ return Err(
1137
+ new LakeSyncError(`Row "${rowId}" not found in table "${table}"`, "ROW_NOT_FOUND")
1138
+ );
1139
+ }
1140
+ const before = rowWithoutId(rows[0]);
1141
+ const execResult = await this.db.exec(
1142
+ `DELETE FROM ${quoteIdentifier(table)} WHERE _rowId = ?`,
1143
+ [rowId]
1144
+ );
1145
+ if (!execResult.ok) return execResult;
1146
+ const hlc = this.hlc.now();
1147
+ const delta = await extractDelta(before, null, {
1148
+ table,
1149
+ rowId,
1150
+ clientId: this.clientId,
1151
+ hlc,
1152
+ schema
1153
+ });
1154
+ if (delta) {
1155
+ const pushResult = await this.queue.push(delta);
1156
+ if (!pushResult.ok) return pushResult;
1157
+ }
1158
+ return Ok(void 0);
1159
+ }
1160
+ /**
1161
+ * Query the local database.
1162
+ *
1163
+ * Pass-through to the underlying LocalDB query method.
1164
+ *
1165
+ * @param sql - The SQL query to execute
1166
+ * @param params - Optional bind parameters
1167
+ * @returns The query results as typed rows, or a DbError on failure
1168
+ */
1169
+ async query(sql, params) {
1170
+ return this.db.query(sql, params);
1171
+ }
1172
+ };
1173
+
1174
+ // ../client/src/sync/coordinator.ts
1175
+ var AUTO_SYNC_INTERVAL_MS = 1e4;
1176
+ var REALTIME_HEARTBEAT_MS = 6e4;
1177
+ var SyncCoordinator = class {
1178
+ tracker;
1179
+ queue;
1180
+ hlc;
1181
+ transport;
1182
+ db;
1183
+ resolver = new LWWResolver();
1184
+ _clientId;
1185
+ maxRetries;
1186
+ syncMode;
1187
+ autoSyncIntervalMs;
1188
+ realtimeHeartbeatMs;
1189
+ lastSyncedHlc = HLC.encode(0, 0);
1190
+ _lastSyncTime = null;
1191
+ syncIntervalId = null;
1192
+ visibilityHandler = null;
1193
+ syncing = false;
1194
+ actionQueue;
1195
+ maxActionRetries;
1196
+ listeners = {
1197
+ onChange: [],
1198
+ onSyncComplete: [],
1199
+ onError: [],
1200
+ onActionComplete: []
1201
+ };
1202
+ constructor(db, transport, config) {
1203
+ this.db = db;
1204
+ this.transport = transport;
1205
+ this.hlc = config?.hlc ?? new HLC();
1206
+ this.queue = config?.queue ?? new IDBQueue();
1207
+ this._clientId = config?.clientId ?? `client-${crypto.randomUUID()}`;
1208
+ this.maxRetries = config?.maxRetries ?? 10;
1209
+ this.syncMode = config?.syncMode ?? "full";
1210
+ this.autoSyncIntervalMs = config?.autoSyncIntervalMs ?? AUTO_SYNC_INTERVAL_MS;
1211
+ this.realtimeHeartbeatMs = config?.realtimeHeartbeatMs ?? REALTIME_HEARTBEAT_MS;
1212
+ this.actionQueue = config?.actionQueue ?? null;
1213
+ this.maxActionRetries = config?.maxActionRetries ?? 5;
1214
+ this.tracker = new SyncTracker(db, this.queue, this.hlc, this._clientId);
1215
+ if (this.transport.onBroadcast) {
1216
+ this.transport.onBroadcast((deltas, serverHlc) => {
1217
+ void this.handleBroadcast(deltas, serverHlc);
1218
+ });
1219
+ }
1220
+ }
1221
+ /** Register an event listener */
1222
+ on(event, listener) {
1223
+ this.listeners[event].push(listener);
1224
+ }
1225
+ /** Remove an event listener */
1226
+ off(event, listener) {
1227
+ const arr = this.listeners[event];
1228
+ const idx = arr.indexOf(listener);
1229
+ if (idx !== -1) arr.splice(idx, 1);
1230
+ }
1231
+ emit(event, ...args) {
1232
+ for (const fn of this.listeners[event]) {
1233
+ try {
1234
+ fn(...args);
1235
+ } catch {
1236
+ }
1237
+ }
1238
+ }
1239
+ /** Push pending deltas to the gateway via the transport */
1240
+ async pushToGateway() {
1241
+ const peekResult = await this.queue.peek(100);
1242
+ if (!peekResult.ok || peekResult.value.length === 0) return;
1243
+ const deadLettered = peekResult.value.filter((e) => e.retryCount >= this.maxRetries);
1244
+ const entries = peekResult.value.filter((e) => e.retryCount < this.maxRetries);
1245
+ if (deadLettered.length > 0) {
1246
+ console.warn(
1247
+ `[SyncCoordinator] Dead-lettering ${deadLettered.length} entries after ${this.maxRetries} retries`
1248
+ );
1249
+ await this.queue.ack(deadLettered.map((e) => e.id));
1250
+ this.emit(
1251
+ "onError",
1252
+ new Error(`Dead-lettered ${deadLettered.length} entries after ${this.maxRetries} retries`)
1253
+ );
1254
+ }
1255
+ if (entries.length === 0) return;
1256
+ const ids = entries.map((e) => e.id);
1257
+ await this.queue.markSending(ids);
1258
+ const pushResult = await this.transport.push({
1259
+ clientId: this._clientId,
1260
+ deltas: entries.map((e) => e.delta),
1261
+ lastSeenHlc: this.hlc.now()
1262
+ });
1263
+ if (pushResult.ok) {
1264
+ await this.queue.ack(ids);
1265
+ this.lastSyncedHlc = pushResult.value.serverHlc;
1266
+ this._lastSyncTime = /* @__PURE__ */ new Date();
1267
+ } else {
1268
+ await this.queue.nack(ids);
1269
+ }
1270
+ }
1271
+ /**
1272
+ * Pull deltas from a named adapter source.
1273
+ *
1274
+ * Convenience wrapper around {@link pullFromGateway} that passes the
1275
+ * `source` field through to the gateway, triggering an adapter-sourced
1276
+ * pull instead of a buffer pull.
1277
+ */
1278
+ async pullFrom(source) {
1279
+ return this.pullFromGateway(source);
1280
+ }
1281
+ /** Pull remote deltas from the gateway and apply them */
1282
+ async pullFromGateway(source) {
1283
+ const pullResult = await this.transport.pull({
1284
+ clientId: this._clientId,
1285
+ sinceHlc: this.lastSyncedHlc,
1286
+ maxDeltas: 1e3,
1287
+ source
1288
+ });
1289
+ if (!pullResult.ok || pullResult.value.deltas.length === 0) return 0;
1290
+ const { deltas, serverHlc } = pullResult.value;
1291
+ const applyResult = await applyRemoteDeltas(this.db, deltas, this.resolver, this.queue);
1292
+ if (applyResult.ok) {
1293
+ this.lastSyncedHlc = serverHlc;
1294
+ this._lastSyncTime = /* @__PURE__ */ new Date();
1295
+ if (applyResult.value > 0) {
1296
+ this.emit("onChange", applyResult.value);
1297
+ }
1298
+ return applyResult.value;
1299
+ }
1300
+ return 0;
1301
+ }
1302
+ /**
1303
+ * Handle a server-initiated broadcast of deltas.
1304
+ *
1305
+ * Applies the deltas using the same conflict resolution and idempotency
1306
+ * logic as a regular pull. Advances `lastSyncedHlc` and emits `onChange`.
1307
+ */
1308
+ async handleBroadcast(deltas, serverHlc) {
1309
+ if (deltas.length === 0) return;
1310
+ try {
1311
+ const applyResult = await applyRemoteDeltas(this.db, deltas, this.resolver, this.queue);
1312
+ if (applyResult.ok) {
1313
+ if (HLC.compare(serverHlc, this.lastSyncedHlc) > 0) {
1314
+ this.lastSyncedHlc = serverHlc;
1315
+ }
1316
+ this._lastSyncTime = /* @__PURE__ */ new Date();
1317
+ if (applyResult.value > 0) {
1318
+ this.emit("onChange", applyResult.value);
1319
+ }
1320
+ }
1321
+ } catch (err) {
1322
+ this.emit("onError", err instanceof Error ? err : new Error(String(err)));
1323
+ }
1324
+ }
1325
+ /** Get the queue depth */
1326
+ async queueDepth() {
1327
+ const result = await this.queue.depth();
1328
+ return result.ok ? result.value : 0;
1329
+ }
1330
+ /** Get the client identifier */
1331
+ get clientId() {
1332
+ return this._clientId;
1333
+ }
1334
+ /** Get the last successful sync time, or null if never synced */
1335
+ get lastSyncTime() {
1336
+ return this._lastSyncTime;
1337
+ }
1338
+ /**
1339
+ * Start auto-sync: periodic interval + visibility change handler.
1340
+ * Synchronises (push + pull) on tab focus and every 10 seconds.
1341
+ */
1342
+ startAutoSync() {
1343
+ this.transport.connect?.();
1344
+ const intervalMs = this.transport.supportsRealtime ? this.realtimeHeartbeatMs : this.autoSyncIntervalMs;
1345
+ this.syncIntervalId = setInterval(() => {
1346
+ void this.syncOnce();
1347
+ }, intervalMs);
1348
+ this.setupVisibilitySync();
1349
+ }
1350
+ /** Register a visibility change listener to sync on tab focus. */
1351
+ setupVisibilitySync() {
1352
+ this.visibilityHandler = () => {
1353
+ if (typeof document !== "undefined" && document.visibilityState === "visible") {
1354
+ void this.syncOnce();
1355
+ }
1356
+ };
1357
+ if (typeof document !== "undefined") {
1358
+ document.addEventListener("visibilitychange", this.visibilityHandler);
1359
+ }
1360
+ }
1361
+ /**
1362
+ * Perform initial sync via checkpoint download.
1363
+ *
1364
+ * Called on first sync when `lastSyncedHlc` is zero. Downloads the
1365
+ * server's checkpoint (which is pre-filtered by JWT claims server-side),
1366
+ * applies the deltas locally, and advances the sync cursor to the
1367
+ * snapshot's HLC. If no checkpoint is available or the transport does
1368
+ * not support checkpoints, falls back to incremental pull.
1369
+ */
1370
+ async initialSync() {
1371
+ if (!this.transport.checkpoint) return;
1372
+ const result = await this.transport.checkpoint();
1373
+ if (!result.ok || result.value === null) return;
1374
+ const { deltas, snapshotHlc } = result.value;
1375
+ if (deltas.length > 0) {
1376
+ await applyRemoteDeltas(this.db, deltas, this.resolver, this.queue);
1377
+ }
1378
+ this.lastSyncedHlc = snapshotHlc;
1379
+ this._lastSyncTime = /* @__PURE__ */ new Date();
1380
+ }
1381
+ /** Perform a single sync cycle (push + pull + actions, depending on syncMode). */
1382
+ async syncOnce() {
1383
+ if (this.syncing) return;
1384
+ this.syncing = true;
1385
+ try {
1386
+ if (this.syncMode !== "pushOnly") {
1387
+ if (this.lastSyncedHlc === HLC.encode(0, 0)) {
1388
+ await this.initialSync();
1389
+ }
1390
+ await this.pullFromGateway();
1391
+ }
1392
+ if (this.syncMode !== "pullOnly") {
1393
+ await this.pushToGateway();
1394
+ }
1395
+ await this.processActionQueue();
1396
+ this.emit("onSyncComplete");
1397
+ } catch (err) {
1398
+ this.emit("onError", err instanceof Error ? err : new Error(String(err)));
1399
+ } finally {
1400
+ this.syncing = false;
1401
+ }
1402
+ }
1403
+ /**
1404
+ * Submit an action for execution.
1405
+ *
1406
+ * Pushes the action to the ActionQueue and triggers immediate processing.
1407
+ * The action will be sent to the gateway on the next sync cycle or
1408
+ * immediately if not currently syncing.
1409
+ *
1410
+ * @param params - Partial action (connector, actionType, params). ActionId and HLC are generated.
1411
+ */
1412
+ async executeAction(params) {
1413
+ if (!this.actionQueue) {
1414
+ this.emit("onError", new Error("No action queue configured"));
1415
+ return;
1416
+ }
1417
+ const hlc = this.hlc.now();
1418
+ const { generateActionId } = await import("./src-WYBF5LOI.js");
1419
+ const actionId = await generateActionId({
1420
+ clientId: this._clientId,
1421
+ hlc,
1422
+ connector: params.connector,
1423
+ actionType: params.actionType,
1424
+ params: params.params
1425
+ });
1426
+ const action = {
1427
+ actionId,
1428
+ clientId: this._clientId,
1429
+ hlc,
1430
+ connector: params.connector,
1431
+ actionType: params.actionType,
1432
+ params: params.params,
1433
+ idempotencyKey: params.idempotencyKey
1434
+ };
1435
+ await this.actionQueue.push(action);
1436
+ void this.processActionQueue();
1437
+ }
1438
+ /**
1439
+ * Process pending actions from the action queue.
1440
+ *
1441
+ * Peeks at pending entries, sends them to the gateway via
1442
+ * `transport.executeAction()`, and acks/nacks based on the result.
1443
+ * Dead-letters entries after `maxActionRetries` failures.
1444
+ * Triggers an immediate `syncOnce()` on success to pull fresh state.
1445
+ */
1446
+ async processActionQueue() {
1447
+ if (!this.actionQueue || !this.transport.executeAction) return;
1448
+ const peekResult = await this.actionQueue.peek(100);
1449
+ if (!peekResult.ok || peekResult.value.length === 0) return;
1450
+ const deadLettered = peekResult.value.filter((e) => e.retryCount >= this.maxActionRetries);
1451
+ const entries = peekResult.value.filter((e) => e.retryCount < this.maxActionRetries);
1452
+ if (deadLettered.length > 0) {
1453
+ console.warn(
1454
+ `[SyncCoordinator] Dead-lettering ${deadLettered.length} actions after ${this.maxActionRetries} retries`
1455
+ );
1456
+ await this.actionQueue.ack(deadLettered.map((e) => e.id));
1457
+ for (const entry of deadLettered) {
1458
+ this.emit("onActionComplete", entry.action.actionId, {
1459
+ actionId: entry.action.actionId,
1460
+ code: "DEAD_LETTERED",
1461
+ message: `Action dead-lettered after ${this.maxActionRetries} retries`,
1462
+ retryable: false
1463
+ });
1464
+ }
1465
+ }
1466
+ if (entries.length === 0) return;
1467
+ const ids = entries.map((e) => e.id);
1468
+ await this.actionQueue.markSending(ids);
1469
+ const transportResult = await this.transport.executeAction({
1470
+ clientId: this._clientId,
1471
+ actions: entries.map((e) => e.action)
1472
+ });
1473
+ if (transportResult.ok) {
1474
+ await this.actionQueue.ack(ids);
1475
+ for (const result of transportResult.value.results) {
1476
+ this.emit("onActionComplete", result.actionId, result);
1477
+ }
1478
+ const retryableIds = [];
1479
+ const ackableIds = [];
1480
+ for (let i = 0; i < transportResult.value.results.length; i++) {
1481
+ const result = transportResult.value.results[i];
1482
+ if (isActionError(result) && result.retryable) {
1483
+ retryableIds.push(ids[i]);
1484
+ } else {
1485
+ ackableIds.push(ids[i]);
1486
+ }
1487
+ }
1488
+ } else {
1489
+ await this.actionQueue.nack(ids);
1490
+ }
1491
+ }
1492
+ /**
1493
+ * Discover available connectors and their supported action types.
1494
+ *
1495
+ * Delegates to the transport's `describeActions()` method. Returns
1496
+ * empty connectors when the transport does not support discovery.
1497
+ */
1498
+ async describeActions() {
1499
+ if (!this.transport.describeActions) {
1500
+ return { ok: true, value: { connectors: {} } };
1501
+ }
1502
+ return this.transport.describeActions();
1503
+ }
1504
+ /** Stop auto-sync and clean up listeners */
1505
+ stopAutoSync() {
1506
+ if (this.syncIntervalId !== null) {
1507
+ clearInterval(this.syncIntervalId);
1508
+ this.syncIntervalId = null;
1509
+ }
1510
+ if (this.visibilityHandler) {
1511
+ if (typeof document !== "undefined") {
1512
+ document.removeEventListener("visibilitychange", this.visibilityHandler);
1513
+ }
1514
+ this.visibilityHandler = null;
1515
+ }
1516
+ this.transport.disconnect?.();
1517
+ }
1518
+ };
1519
+
1520
+ // ../client/src/sync/schema-sync.ts
1521
+ var SchemaSynchroniser = class {
1522
+ constructor(db) {
1523
+ this.db = db;
1524
+ }
1525
+ /**
1526
+ * Compare local schema version with server and apply migrations if behind.
1527
+ *
1528
+ * If the local version is already equal to or ahead of the server version,
1529
+ * this is a no-op. Otherwise, the local schema is migrated to match the
1530
+ * server schema via `migrateSchema()`, which runs ALTER TABLE ... ADD COLUMN
1531
+ * for each new column.
1532
+ *
1533
+ * @param table - The table name to synchronise
1534
+ * @param serverSchema - The server's current TableSchema
1535
+ * @param serverVersion - The server's schema version number
1536
+ * @returns Ok on success, or Err with a LakeSyncError on failure
1537
+ */
1538
+ async synchronise(table, serverSchema, serverVersion) {
1539
+ const localSchemaResult = await getSchema(this.db, table);
1540
+ if (!localSchemaResult.ok) return localSchemaResult;
1541
+ const localSchema = localSchemaResult.value;
1542
+ if (!localSchema) {
1543
+ return Err(
1544
+ new SchemaError(
1545
+ `Cannot synchronise schema for table "${table}": no local schema registered`
1546
+ )
1547
+ );
1548
+ }
1549
+ const localVersionResult = await this.getLocalVersion(table);
1550
+ if (!localVersionResult.ok) return localVersionResult;
1551
+ const localVersion = localVersionResult.value;
1552
+ if (localVersion >= serverVersion) {
1553
+ return Ok(void 0);
1554
+ }
1555
+ const migrateResult = await migrateSchema(this.db, localSchema, serverSchema);
1556
+ if (!migrateResult.ok) return migrateResult;
1557
+ const updateResult = await this.setLocalVersion(table, serverVersion);
1558
+ if (!updateResult.ok) return updateResult;
1559
+ return Ok(void 0);
1560
+ }
1561
+ /**
1562
+ * Retrieve the local schema version for a given table from `_lakesync_meta`.
1563
+ *
1564
+ * @param table - The table name to look up
1565
+ * @returns The schema version number, or 0 if the table is not registered
1566
+ */
1567
+ async getLocalVersion(table) {
1568
+ const result = await this.db.query(
1569
+ "SELECT schema_version FROM _lakesync_meta WHERE table_name = ?",
1570
+ [table]
1571
+ );
1572
+ if (!result.ok) return result;
1573
+ const rows = result.value;
1574
+ if (rows.length === 0 || !rows[0]) {
1575
+ return Ok(0);
1576
+ }
1577
+ return Ok(rows[0].schema_version);
1578
+ }
1579
+ /**
1580
+ * Set the local schema version for a given table in `_lakesync_meta`.
1581
+ *
1582
+ * @param table - The table name to update
1583
+ * @param version - The version number to set
1584
+ * @returns Ok on success, or Err with a DbError on failure
1585
+ */
1586
+ async setLocalVersion(table, version) {
1587
+ return this.db.exec("UPDATE _lakesync_meta SET schema_version = ? WHERE table_name = ?", [
1588
+ version,
1589
+ table
1590
+ ]);
1591
+ }
1592
+ };
1593
+
1594
+ // ../client/src/sync/transport-http.ts
1595
+ var HttpTransport = class {
1596
+ baseUrl;
1597
+ gatewayId;
1598
+ token;
1599
+ _fetch;
1600
+ constructor(config) {
1601
+ this.baseUrl = config.baseUrl.replace(/\/+$/, "");
1602
+ this.gatewayId = config.gatewayId;
1603
+ this.token = config.token;
1604
+ this._fetch = config.fetch ?? globalThis.fetch.bind(globalThis);
1605
+ }
1606
+ /**
1607
+ * Push local deltas to the remote gateway.
1608
+ *
1609
+ * Sends a POST request with the push payload as BigInt-safe JSON.
1610
+ */
1611
+ async push(msg) {
1612
+ const url = `${this.baseUrl}/sync/${this.gatewayId}/push`;
1613
+ try {
1614
+ const response = await this._fetch(url, {
1615
+ method: "POST",
1616
+ headers: {
1617
+ "Content-Type": "application/json",
1618
+ Authorization: `Bearer ${this.token}`
1619
+ },
1620
+ body: JSON.stringify(msg, bigintReplacer)
1621
+ });
1622
+ if (!response.ok) {
1623
+ const text = await response.text().catch(() => "Unknown error");
1624
+ return Err(new LakeSyncError(`Push failed (${response.status}): ${text}`, "TRANSPORT_ERROR"));
1625
+ }
1626
+ const raw = await response.text();
1627
+ const data = JSON.parse(raw, bigintReviver);
1628
+ return Ok(data);
1629
+ } catch (error) {
1630
+ const cause = toError(error);
1631
+ return Err(new LakeSyncError(`Push request failed: ${cause.message}`, "TRANSPORT_ERROR", cause));
1632
+ }
1633
+ }
1634
+ /**
1635
+ * Pull remote deltas from the gateway.
1636
+ *
1637
+ * Sends a GET request with query parameters for the pull cursor.
1638
+ */
1639
+ async pull(msg) {
1640
+ const params = new URLSearchParams({
1641
+ since: msg.sinceHlc.toString(),
1642
+ clientId: msg.clientId,
1643
+ limit: msg.maxDeltas.toString()
1644
+ });
1645
+ if (msg.source) {
1646
+ params.set("source", msg.source);
1647
+ }
1648
+ const url = `${this.baseUrl}/sync/${this.gatewayId}/pull?${params}`;
1649
+ try {
1650
+ const response = await this._fetch(url, {
1651
+ method: "GET",
1652
+ headers: {
1653
+ Authorization: `Bearer ${this.token}`
1654
+ }
1655
+ });
1656
+ if (!response.ok) {
1657
+ const text = await response.text().catch(() => "Unknown error");
1658
+ return Err(new LakeSyncError(`Pull failed (${response.status}): ${text}`, "TRANSPORT_ERROR"));
1659
+ }
1660
+ const raw = await response.text();
1661
+ const data = JSON.parse(raw, bigintReviver);
1662
+ return Ok(data);
1663
+ } catch (error) {
1664
+ const cause = toError(error);
1665
+ return Err(new LakeSyncError(`Pull request failed: ${cause.message}`, "TRANSPORT_ERROR", cause));
1666
+ }
1667
+ }
1668
+ /**
1669
+ * Execute imperative actions against external systems via the gateway.
1670
+ *
1671
+ * Sends a POST request with the action payload as BigInt-safe JSON.
1672
+ */
1673
+ async executeAction(msg) {
1674
+ const url = `${this.baseUrl}/sync/${this.gatewayId}/action`;
1675
+ try {
1676
+ const response = await this._fetch(url, {
1677
+ method: "POST",
1678
+ headers: {
1679
+ "Content-Type": "application/json",
1680
+ Authorization: `Bearer ${this.token}`
1681
+ },
1682
+ body: JSON.stringify(msg, bigintReplacer)
1683
+ });
1684
+ if (!response.ok) {
1685
+ const text = await response.text().catch(() => "Unknown error");
1686
+ return Err(new LakeSyncError(`Action failed (${response.status}): ${text}`, "TRANSPORT_ERROR"));
1687
+ }
1688
+ const raw = await response.text();
1689
+ const data = JSON.parse(raw, bigintReviver);
1690
+ return Ok(data);
1691
+ } catch (error) {
1692
+ const cause = toError(error);
1693
+ return Err(new LakeSyncError(`Action request failed: ${cause.message}`, "TRANSPORT_ERROR", cause));
1694
+ }
1695
+ }
1696
+ /**
1697
+ * Discover available connectors and their supported action types.
1698
+ *
1699
+ * Sends a GET request to the actions discovery endpoint.
1700
+ */
1701
+ async describeActions() {
1702
+ const url = `${this.baseUrl}/sync/${this.gatewayId}/actions`;
1703
+ try {
1704
+ const response = await this._fetch(url, {
1705
+ method: "GET",
1706
+ headers: {
1707
+ Authorization: `Bearer ${this.token}`
1708
+ }
1709
+ });
1710
+ if (!response.ok) {
1711
+ const text = await response.text().catch(() => "Unknown error");
1712
+ return Err(
1713
+ new LakeSyncError(`Describe actions failed (${response.status}): ${text}`, "TRANSPORT_ERROR")
1714
+ );
1715
+ }
1716
+ const data = await response.json();
1717
+ return Ok(data);
1718
+ } catch (error) {
1719
+ const cause = toError(error);
1720
+ return Err(
1721
+ new LakeSyncError(`Describe actions request failed: ${cause.message}`, "TRANSPORT_ERROR", cause)
1722
+ );
1723
+ }
1724
+ }
1725
+ /**
1726
+ * Download checkpoint for initial sync.
1727
+ *
1728
+ * Requests the streaming checkpoint format via Accept header and reads
1729
+ * length-prefixed proto frames from the response body.
1730
+ */
1731
+ async checkpoint() {
1732
+ const url = `${this.baseUrl}/sync/${this.gatewayId}/checkpoint`;
1733
+ try {
1734
+ const response = await this._fetch(url, {
1735
+ method: "GET",
1736
+ headers: {
1737
+ Authorization: `Bearer ${this.token}`,
1738
+ Accept: "application/x-lakesync-checkpoint-stream"
1739
+ }
1740
+ });
1741
+ if (response.status === 404) {
1742
+ return Ok(null);
1743
+ }
1744
+ if (!response.ok) {
1745
+ const text = await response.text().catch(() => "Unknown error");
1746
+ return Err(
1747
+ new LakeSyncError(`Checkpoint failed (${response.status}): ${text}`, "TRANSPORT_ERROR")
1748
+ );
1749
+ }
1750
+ const deltas = await readStreamingCheckpointDeltas(response);
1751
+ const hlcHeader = response.headers.get("X-Checkpoint-Hlc");
1752
+ const snapshotHlc = hlcHeader ? BigInt(hlcHeader) : 0n;
1753
+ return Ok({ deltas, snapshotHlc });
1754
+ } catch (error) {
1755
+ const cause = toError(error);
1756
+ return Err(
1757
+ new LakeSyncError(`Checkpoint request failed: ${cause.message}`, "TRANSPORT_ERROR", cause)
1758
+ );
1759
+ }
1760
+ }
1761
+ };
1762
+ async function readStreamingCheckpointDeltas(response) {
1763
+ const reader = response.body.getReader();
1764
+ const allDeltas = [];
1765
+ let buffer = new Uint8Array(0);
1766
+ for (; ; ) {
1767
+ const { done, value } = await reader.read();
1768
+ if (done) break;
1769
+ const newBuffer = new Uint8Array(buffer.length + value.length);
1770
+ newBuffer.set(buffer);
1771
+ newBuffer.set(value, buffer.length);
1772
+ buffer = newBuffer;
1773
+ while (buffer.length >= 4) {
1774
+ const frameLength = new DataView(buffer.buffer, buffer.byteOffset).getUint32(0, false);
1775
+ if (buffer.length < 4 + frameLength) break;
1776
+ const frameData = buffer.slice(4, 4 + frameLength);
1777
+ buffer = buffer.slice(4 + frameLength);
1778
+ const decoded = decodeSyncResponse(frameData);
1779
+ if (decoded.ok) {
1780
+ allDeltas.push(...decoded.value.deltas);
1781
+ }
1782
+ }
1783
+ }
1784
+ return allDeltas;
1785
+ }
1786
+
1787
+ // ../client/src/sync/transport-local.ts
1788
+ var LocalTransport = class {
1789
+ constructor(gateway) {
1790
+ this.gateway = gateway;
1791
+ }
1792
+ /** Push local deltas to the in-process gateway */
1793
+ async push(msg) {
1794
+ return this.gateway.handlePush(msg);
1795
+ }
1796
+ /** Pull remote deltas from the in-process gateway */
1797
+ async pull(msg) {
1798
+ const result = this.gateway.handlePull(msg);
1799
+ return result instanceof Promise ? result : result;
1800
+ }
1801
+ /** Local transport has no checkpoint — returns null */
1802
+ async checkpoint() {
1803
+ return Ok(null);
1804
+ }
1805
+ /** Execute actions against the in-process gateway. */
1806
+ async executeAction(msg) {
1807
+ if (!this.gateway.handleAction) {
1808
+ return Err(new LakeSyncError("Local gateway does not support actions", "TRANSPORT_ERROR"));
1809
+ }
1810
+ const result = await this.gateway.handleAction(msg);
1811
+ if (!result.ok) {
1812
+ return Err(new LakeSyncError(result.error.message, result.error.code));
1813
+ }
1814
+ return Ok(result.value);
1815
+ }
1816
+ /** Discover available connectors and their supported action types. */
1817
+ async describeActions() {
1818
+ if (!this.gateway.describeActions) {
1819
+ return Ok({ connectors: {} });
1820
+ }
1821
+ return Ok(this.gateway.describeActions());
1822
+ }
1823
+ };
1824
+
1825
+ // ../client/src/sync/transport-ws.ts
1826
+ var DEFAULT_RECONNECT_BASE_MS = 1e3;
1827
+ var DEFAULT_RECONNECT_MAX_MS = 3e4;
1828
+ var WebSocketTransport = class {
1829
+ config;
1830
+ reconnectBaseMs;
1831
+ reconnectMaxMs;
1832
+ httpTransport;
1833
+ ws = null;
1834
+ reconnectTimer = null;
1835
+ reconnectAttempts = 0;
1836
+ _connected = false;
1837
+ intentionalClose = false;
1838
+ /** Pending request/response promise (push or pull). */
1839
+ pending = null;
1840
+ /** Broadcast callback registered by the SyncCoordinator. */
1841
+ broadcastCallback = null;
1842
+ constructor(config) {
1843
+ this.config = config;
1844
+ this.reconnectBaseMs = config.reconnectBaseMs ?? DEFAULT_RECONNECT_BASE_MS;
1845
+ this.reconnectMaxMs = config.reconnectMaxMs ?? DEFAULT_RECONNECT_MAX_MS;
1846
+ this.httpTransport = config.httpConfig ? new HttpTransport(config.httpConfig) : null;
1847
+ if (config.onBroadcast) {
1848
+ this.broadcastCallback = config.onBroadcast;
1849
+ }
1850
+ }
1851
+ /** Whether the WebSocket is currently connected. */
1852
+ get connected() {
1853
+ return this._connected;
1854
+ }
1855
+ /** Whether this transport supports real-time server push. */
1856
+ get supportsRealtime() {
1857
+ return true;
1858
+ }
1859
+ /** Register callback for server-initiated broadcasts. */
1860
+ onBroadcast(callback) {
1861
+ this.broadcastCallback = callback;
1862
+ }
1863
+ /** Open the WebSocket connection. */
1864
+ connect() {
1865
+ if (this.ws) return;
1866
+ this.intentionalClose = false;
1867
+ this.openWebSocket();
1868
+ }
1869
+ /** Close the WebSocket connection and stop reconnecting. */
1870
+ disconnect() {
1871
+ this.intentionalClose = true;
1872
+ if (this.reconnectTimer !== null) {
1873
+ clearTimeout(this.reconnectTimer);
1874
+ this.reconnectTimer = null;
1875
+ }
1876
+ if (this.ws) {
1877
+ this.ws.close(1e3, "Client disconnect");
1878
+ this.ws = null;
1879
+ }
1880
+ this._connected = false;
1881
+ this.reconnectAttempts = 0;
1882
+ if (this.pending) {
1883
+ this.pending.resolve(Err(new LakeSyncError("WebSocket disconnected", "TRANSPORT_ERROR")));
1884
+ this.pending = null;
1885
+ }
1886
+ }
1887
+ /**
1888
+ * Push local deltas to the gateway via WebSocket.
1889
+ */
1890
+ async push(msg) {
1891
+ const encoded = encodeSyncPush({
1892
+ clientId: msg.clientId,
1893
+ deltas: msg.deltas,
1894
+ lastSeenHlc: msg.lastSeenHlc
1895
+ });
1896
+ if (!encoded.ok) {
1897
+ return Err(new LakeSyncError(`Failed to encode push: ${encoded.error.message}`, "TRANSPORT_ERROR"));
1898
+ }
1899
+ const frame = new Uint8Array(1 + encoded.value.length);
1900
+ frame[0] = TAG_SYNC_PUSH;
1901
+ frame.set(encoded.value, 1);
1902
+ const response = await this.sendAndAwaitResponse(frame);
1903
+ if (!response.ok) return response;
1904
+ return Ok({
1905
+ serverHlc: response.value.serverHlc,
1906
+ accepted: response.value.deltas.length === 0 ? msg.deltas.length : response.value.deltas.length
1907
+ });
1908
+ }
1909
+ /**
1910
+ * Pull remote deltas from the gateway via WebSocket.
1911
+ */
1912
+ async pull(msg) {
1913
+ const encoded = encodeSyncPull({
1914
+ clientId: msg.clientId,
1915
+ sinceHlc: msg.sinceHlc,
1916
+ maxDeltas: msg.maxDeltas
1917
+ });
1918
+ if (!encoded.ok) {
1919
+ return Err(new LakeSyncError(`Failed to encode pull: ${encoded.error.message}`, "TRANSPORT_ERROR"));
1920
+ }
1921
+ const frame = new Uint8Array(1 + encoded.value.length);
1922
+ frame[0] = TAG_SYNC_PULL;
1923
+ frame.set(encoded.value, 1);
1924
+ return this.sendAndAwaitResponse(frame);
1925
+ }
1926
+ /**
1927
+ * Download checkpoint via HTTP (large binary payloads are better over HTTP).
1928
+ */
1929
+ async checkpoint() {
1930
+ if (!this.httpTransport) {
1931
+ return Ok(null);
1932
+ }
1933
+ return this.httpTransport.checkpoint();
1934
+ }
1935
+ // -----------------------------------------------------------------------
1936
+ // Internal
1937
+ // -----------------------------------------------------------------------
1938
+ openWebSocket() {
1939
+ const url = `${this.config.url}?token=${encodeURIComponent(this.config.token)}`;
1940
+ this.ws = new WebSocket(url);
1941
+ this.ws.binaryType = "arraybuffer";
1942
+ this.ws.onopen = () => {
1943
+ this._connected = true;
1944
+ this.reconnectAttempts = 0;
1945
+ };
1946
+ this.ws.onmessage = (event) => {
1947
+ if (!(event.data instanceof ArrayBuffer)) return;
1948
+ const bytes = new Uint8Array(event.data);
1949
+ if (bytes.length < 2) return;
1950
+ const tag = bytes[0];
1951
+ if (tag === TAG_BROADCAST) {
1952
+ const decoded = decodeBroadcastFrame(bytes);
1953
+ if (decoded.ok && this.broadcastCallback) {
1954
+ this.broadcastCallback(decoded.value.deltas, decoded.value.serverHlc);
1955
+ }
1956
+ } else {
1957
+ const decoded = decodeSyncResponse(bytes);
1958
+ if (this.pending) {
1959
+ if (decoded.ok) {
1960
+ this.pending.resolve(Ok(decoded.value));
1961
+ } else {
1962
+ this.pending.resolve(
1963
+ Err(
1964
+ new LakeSyncError(
1965
+ `Failed to decode response: ${decoded.error.message}`,
1966
+ "TRANSPORT_ERROR"
1967
+ )
1968
+ )
1969
+ );
1970
+ }
1971
+ this.pending = null;
1972
+ }
1973
+ }
1974
+ };
1975
+ this.ws.onclose = () => {
1976
+ this._connected = false;
1977
+ this.ws = null;
1978
+ if (this.pending) {
1979
+ this.pending.resolve(
1980
+ Err(new LakeSyncError("WebSocket closed before response", "TRANSPORT_ERROR"))
1981
+ );
1982
+ this.pending = null;
1983
+ }
1984
+ if (!this.intentionalClose) {
1985
+ this.scheduleReconnect();
1986
+ }
1987
+ };
1988
+ this.ws.onerror = () => {
1989
+ };
1990
+ }
1991
+ scheduleReconnect() {
1992
+ const delay = Math.min(this.reconnectBaseMs * 2 ** this.reconnectAttempts, this.reconnectMaxMs);
1993
+ this.reconnectAttempts++;
1994
+ this.reconnectTimer = setTimeout(() => {
1995
+ this.reconnectTimer = null;
1996
+ this.openWebSocket();
1997
+ }, delay);
1998
+ }
1999
+ sendAndAwaitResponse(frame) {
2000
+ return new Promise((resolve) => {
2001
+ if (!this.ws || !this._connected) {
2002
+ resolve(Err(new LakeSyncError("WebSocket not connected", "TRANSPORT_ERROR")));
2003
+ return;
2004
+ }
2005
+ if (this.pending) {
2006
+ this.pending.resolve(
2007
+ Err(new LakeSyncError("New request superseded pending request", "TRANSPORT_ERROR"))
2008
+ );
2009
+ }
2010
+ this.pending = {
2011
+ resolve,
2012
+ reject: (reason) => {
2013
+ resolve(Err(new LakeSyncError(reason.message, "TRANSPORT_ERROR")));
2014
+ }
2015
+ };
2016
+ try {
2017
+ this.ws.send(frame);
2018
+ } catch (error) {
2019
+ const cause = toError(error);
2020
+ this.pending = null;
2021
+ resolve(Err(new LakeSyncError(`WebSocket send failed: ${cause.message}`, "TRANSPORT_ERROR")));
2022
+ }
2023
+ });
2024
+ }
2025
+ };
2026
+ export {
2027
+ DbError,
2028
+ HttpTransport,
2029
+ IDBActionQueue,
2030
+ IDBQueue,
2031
+ LocalDB,
2032
+ LocalTransport,
2033
+ MemoryActionQueue,
2034
+ MemoryQueue,
2035
+ SchemaSynchroniser,
2036
+ SyncCoordinator,
2037
+ SyncTracker,
2038
+ WebSocketTransport,
2039
+ applyRemoteDeltas,
2040
+ deleteSnapshot,
2041
+ getSchema,
2042
+ loadSnapshot,
2043
+ migrateSchema,
2044
+ registerSchema,
2045
+ saveSnapshot,
2046
+ unwrapOrThrow
2047
+ };
2048
+ //# sourceMappingURL=client.js.map