lakesync 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/README.md +74 -0
  2. package/dist/adapter.d.ts +369 -0
  3. package/dist/adapter.js +39 -0
  4. package/dist/adapter.js.map +1 -0
  5. package/dist/analyst.d.ts +268 -0
  6. package/dist/analyst.js +495 -0
  7. package/dist/analyst.js.map +1 -0
  8. package/dist/auth-CAVutXzx.d.ts +30 -0
  9. package/dist/base-poller-Qo_SmCZs.d.ts +82 -0
  10. package/dist/catalogue.d.ts +65 -0
  11. package/dist/catalogue.js +17 -0
  12. package/dist/catalogue.js.map +1 -0
  13. package/dist/chunk-4ARO6KTJ.js +257 -0
  14. package/dist/chunk-4ARO6KTJ.js.map +1 -0
  15. package/dist/chunk-5YOFCJQ7.js +1115 -0
  16. package/dist/chunk-5YOFCJQ7.js.map +1 -0
  17. package/dist/chunk-7D4SUZUM.js +38 -0
  18. package/dist/chunk-7D4SUZUM.js.map +1 -0
  19. package/dist/chunk-BNJOGBYK.js +335 -0
  20. package/dist/chunk-BNJOGBYK.js.map +1 -0
  21. package/dist/chunk-ICNT7I3K.js +1180 -0
  22. package/dist/chunk-ICNT7I3K.js.map +1 -0
  23. package/dist/chunk-P5DRFKIT.js +413 -0
  24. package/dist/chunk-P5DRFKIT.js.map +1 -0
  25. package/dist/chunk-X3RO5SYJ.js +880 -0
  26. package/dist/chunk-X3RO5SYJ.js.map +1 -0
  27. package/dist/client.d.ts +428 -0
  28. package/dist/client.js +2048 -0
  29. package/dist/client.js.map +1 -0
  30. package/dist/compactor.d.ts +342 -0
  31. package/dist/compactor.js +793 -0
  32. package/dist/compactor.js.map +1 -0
  33. package/dist/coordinator-CxckTzYW.d.ts +396 -0
  34. package/dist/db-types-BR6Kt4uf.d.ts +29 -0
  35. package/dist/gateway-D5SaaMvT.d.ts +337 -0
  36. package/dist/gateway-server.d.ts +306 -0
  37. package/dist/gateway-server.js +4663 -0
  38. package/dist/gateway-server.js.map +1 -0
  39. package/dist/gateway.d.ts +196 -0
  40. package/dist/gateway.js +79 -0
  41. package/dist/gateway.js.map +1 -0
  42. package/dist/hlc-DiD8QNG3.d.ts +70 -0
  43. package/dist/index.d.ts +245 -0
  44. package/dist/index.js +102 -0
  45. package/dist/index.js.map +1 -0
  46. package/dist/json-dYtqiL0F.d.ts +18 -0
  47. package/dist/nessie-client-DrNikVXy.d.ts +160 -0
  48. package/dist/parquet.d.ts +78 -0
  49. package/dist/parquet.js +15 -0
  50. package/dist/parquet.js.map +1 -0
  51. package/dist/proto.d.ts +434 -0
  52. package/dist/proto.js +67 -0
  53. package/dist/proto.js.map +1 -0
  54. package/dist/react.d.ts +147 -0
  55. package/dist/react.js +224 -0
  56. package/dist/react.js.map +1 -0
  57. package/dist/resolver-C3Wphi6O.d.ts +10 -0
  58. package/dist/result-CojzlFE2.d.ts +64 -0
  59. package/dist/src-QU2YLPZY.js +383 -0
  60. package/dist/src-QU2YLPZY.js.map +1 -0
  61. package/dist/src-WYBF5LOI.js +102 -0
  62. package/dist/src-WYBF5LOI.js.map +1 -0
  63. package/dist/src-WZNPHANQ.js +426 -0
  64. package/dist/src-WZNPHANQ.js.map +1 -0
  65. package/dist/types-Bs-QyOe-.d.ts +143 -0
  66. package/dist/types-DAQL_vU_.d.ts +118 -0
  67. package/dist/types-DSC_EiwR.d.ts +45 -0
  68. package/dist/types-V_jVu2sA.d.ts +73 -0
  69. package/package.json +119 -0
@@ -0,0 +1,880 @@
1
+ import {
2
+ AdapterError,
3
+ Err,
4
+ Ok,
5
+ toError
6
+ } from "./chunk-ICNT7I3K.js";
7
+
8
+ // ../adapter/src/shared.ts
9
+ function toCause(error) {
10
+ return error instanceof Error ? error : void 0;
11
+ }
12
+ async function wrapAsync(operation, errorMessage) {
13
+ try {
14
+ const value = await operation();
15
+ return Ok(value);
16
+ } catch (error) {
17
+ if (error instanceof AdapterError) {
18
+ return Err(error);
19
+ }
20
+ return Err(new AdapterError(errorMessage, toCause(error)));
21
+ }
22
+ }
23
+ function mergeLatestState(rows) {
24
+ if (rows.length === 0) return null;
25
+ const lastRow = rows[rows.length - 1];
26
+ if (lastRow.op === "DELETE") return null;
27
+ const state = {};
28
+ for (const row of rows) {
29
+ if (row.op === "DELETE") {
30
+ for (const key of Object.keys(state)) {
31
+ delete state[key];
32
+ }
33
+ continue;
34
+ }
35
+ const columns = typeof row.columns === "string" ? JSON.parse(row.columns) : row.columns;
36
+ for (const col of columns) {
37
+ state[col.column] = col.value;
38
+ }
39
+ }
40
+ return state;
41
+ }
42
+
43
+ // ../adapter/src/bigquery.ts
44
+ import { BigQuery } from "@google-cloud/bigquery";
45
+ function rowToRowDelta(row) {
46
+ const columns = typeof row.columns === "string" ? JSON.parse(row.columns) : row.columns;
47
+ const hlcRaw = row.hlc;
48
+ const hlcString = typeof hlcRaw === "object" && hlcRaw !== null && "value" in hlcRaw ? hlcRaw.value : String(hlcRaw);
49
+ return {
50
+ deltaId: row.delta_id,
51
+ table: row.table,
52
+ rowId: row.row_id,
53
+ columns,
54
+ hlc: BigInt(hlcString),
55
+ clientId: row.client_id,
56
+ op: row.op
57
+ };
58
+ }
59
+ var BigQueryAdapter = class {
60
+ /** @internal */
61
+ client;
62
+ /** @internal */
63
+ dataset;
64
+ /** @internal */
65
+ location;
66
+ constructor(config) {
67
+ this.client = new BigQuery({
68
+ projectId: config.projectId,
69
+ keyFilename: config.keyFilename
70
+ });
71
+ this.dataset = config.dataset;
72
+ this.location = config.location ?? "US";
73
+ }
74
+ /**
75
+ * Insert deltas into the database in a single batch.
76
+ * Idempotent via MERGE — existing deltaIds are silently skipped.
77
+ */
78
+ async insertDeltas(deltas) {
79
+ if (deltas.length === 0) {
80
+ return Ok(void 0);
81
+ }
82
+ return wrapAsync(async () => {
83
+ const params = {};
84
+ const selects = [];
85
+ for (let i = 0; i < deltas.length; i++) {
86
+ const d = deltas[i];
87
+ params[`did_${i}`] = d.deltaId;
88
+ params[`tbl_${i}`] = d.table;
89
+ params[`rid_${i}`] = d.rowId;
90
+ params[`col_${i}`] = JSON.stringify(d.columns);
91
+ params[`hlc_${i}`] = d.hlc.toString();
92
+ params[`cid_${i}`] = d.clientId;
93
+ params[`op_${i}`] = d.op;
94
+ selects.push(
95
+ `SELECT @did_${i} AS delta_id, @tbl_${i} AS \`table\`, @rid_${i} AS row_id, @col_${i} AS columns, CAST(@hlc_${i} AS INT64) AS hlc, @cid_${i} AS client_id, @op_${i} AS op`
96
+ );
97
+ }
98
+ const sql = `MERGE \`${this.dataset}.lakesync_deltas\` AS target
99
+ USING (${selects.join(" UNION ALL ")}) AS source
100
+ ON target.delta_id = source.delta_id
101
+ WHEN NOT MATCHED THEN INSERT (delta_id, \`table\`, row_id, columns, hlc, client_id, op)
102
+ VALUES (source.delta_id, source.\`table\`, source.row_id, source.columns, source.hlc, source.client_id, source.op)`;
103
+ await this.client.query({ query: sql, params, location: this.location });
104
+ }, "Failed to insert deltas");
105
+ }
106
+ /**
107
+ * Query deltas with HLC greater than the given timestamp, optionally filtered by table.
108
+ */
109
+ async queryDeltasSince(hlc, tables) {
110
+ return wrapAsync(async () => {
111
+ let sql;
112
+ const params = {
113
+ sinceHlc: hlc.toString()
114
+ };
115
+ if (tables && tables.length > 0) {
116
+ sql = `SELECT delta_id, \`table\`, row_id, columns, hlc, client_id, op
117
+ FROM \`${this.dataset}.lakesync_deltas\`
118
+ WHERE hlc > CAST(@sinceHlc AS INT64) AND \`table\` IN UNNEST(@tables)
119
+ ORDER BY hlc ASC`;
120
+ params.tables = tables;
121
+ } else {
122
+ sql = `SELECT delta_id, \`table\`, row_id, columns, hlc, client_id, op
123
+ FROM \`${this.dataset}.lakesync_deltas\`
124
+ WHERE hlc > CAST(@sinceHlc AS INT64)
125
+ ORDER BY hlc ASC`;
126
+ }
127
+ const [rows] = await this.client.query({
128
+ query: sql,
129
+ params,
130
+ location: this.location
131
+ });
132
+ return rows.map(rowToRowDelta);
133
+ }, "Failed to query deltas");
134
+ }
135
+ /**
136
+ * Get the latest merged state for a specific row using column-level LWW.
137
+ * Returns null if no deltas exist for this row.
138
+ */
139
+ async getLatestState(table, rowId) {
140
+ return wrapAsync(async () => {
141
+ const sql = `SELECT columns, hlc, client_id, op
142
+ FROM \`${this.dataset}.lakesync_deltas\`
143
+ WHERE \`table\` = @tbl AND row_id = @rid
144
+ ORDER BY hlc ASC`;
145
+ const [rows] = await this.client.query({
146
+ query: sql,
147
+ params: { tbl: table, rid: rowId },
148
+ location: this.location
149
+ });
150
+ return mergeLatestState(rows);
151
+ }, `Failed to get latest state for ${table}:${rowId}`);
152
+ }
153
+ /**
154
+ * Ensure the BigQuery dataset and lakesync_deltas table exist.
155
+ * The `schema` parameter is accepted for interface compliance but the
156
+ * internal table structure is fixed (deltas store column data as JSON).
157
+ */
158
+ async ensureSchema(_schema) {
159
+ return wrapAsync(async () => {
160
+ const datasetRef = this.client.dataset(this.dataset);
161
+ const [datasetExists] = await datasetRef.exists();
162
+ if (!datasetExists) {
163
+ await this.client.createDataset(this.dataset, {
164
+ location: this.location
165
+ });
166
+ }
167
+ await this.client.query({
168
+ query: `CREATE TABLE IF NOT EXISTS \`${this.dataset}.lakesync_deltas\` (
169
+ delta_id STRING NOT NULL,
170
+ \`table\` STRING NOT NULL,
171
+ row_id STRING NOT NULL,
172
+ columns JSON NOT NULL,
173
+ hlc INT64 NOT NULL,
174
+ client_id STRING NOT NULL,
175
+ op STRING NOT NULL
176
+ )
177
+ CLUSTER BY \`table\`, hlc`,
178
+ location: this.location
179
+ });
180
+ }, "Failed to ensure schema");
181
+ }
182
+ /**
183
+ * No-op — BigQuery client is HTTP-based with no persistent connections.
184
+ */
185
+ async close() {
186
+ }
187
+ };
188
+
189
+ // ../adapter/src/composite.ts
190
+ var CompositeAdapter = class {
191
+ routeMap;
192
+ adapters;
193
+ defaultAdapter;
194
+ constructor(config) {
195
+ this.routeMap = /* @__PURE__ */ new Map();
196
+ this.adapters = /* @__PURE__ */ new Set();
197
+ this.defaultAdapter = config.defaultAdapter;
198
+ this.adapters.add(config.defaultAdapter);
199
+ for (const route of config.routes) {
200
+ this.adapters.add(route.adapter);
201
+ for (const table of route.tables) {
202
+ if (this.routeMap.has(table)) {
203
+ throw new Error(`Duplicate table route: "${table}" appears in multiple routes`);
204
+ }
205
+ this.routeMap.set(table, route.adapter);
206
+ }
207
+ }
208
+ }
209
+ /** Insert deltas, routing each group to the correct adapter by table. */
210
+ async insertDeltas(deltas) {
211
+ const groups = /* @__PURE__ */ new Map();
212
+ for (const delta of deltas) {
213
+ const adapter = this.routeMap.get(delta.table) ?? this.defaultAdapter;
214
+ let group = groups.get(adapter);
215
+ if (!group) {
216
+ group = [];
217
+ groups.set(adapter, group);
218
+ }
219
+ group.push(delta);
220
+ }
221
+ for (const [adapter, group] of groups) {
222
+ const result = await adapter.insertDeltas(group);
223
+ if (!result.ok) {
224
+ return result;
225
+ }
226
+ }
227
+ return Ok(void 0);
228
+ }
229
+ /** Query deltas since a given HLC, fanning out to relevant adapters and merging results. */
230
+ async queryDeltasSince(hlc, tables) {
231
+ const adapterSet = /* @__PURE__ */ new Set();
232
+ const adapterTables = /* @__PURE__ */ new Map();
233
+ if (tables && tables.length > 0) {
234
+ for (const table of tables) {
235
+ const adapter = this.routeMap.get(table) ?? this.defaultAdapter;
236
+ adapterSet.add(adapter);
237
+ let existing = adapterTables.get(adapter);
238
+ if (!existing) {
239
+ existing = [];
240
+ adapterTables.set(adapter, existing);
241
+ }
242
+ existing.push(table);
243
+ }
244
+ } else {
245
+ for (const adapter of this.adapters) {
246
+ adapterSet.add(adapter);
247
+ }
248
+ }
249
+ const merged = [];
250
+ for (const adapter of adapterSet) {
251
+ const filterTables = adapterTables.get(adapter);
252
+ const result = await adapter.queryDeltasSince(hlc, filterTables);
253
+ if (!result.ok) {
254
+ return result;
255
+ }
256
+ merged.push(...result.value);
257
+ }
258
+ merged.sort((a, b) => a.hlc < b.hlc ? -1 : a.hlc > b.hlc ? 1 : 0);
259
+ return Ok(merged);
260
+ }
261
+ /** Get the latest state for a row, routing to the correct adapter. */
262
+ async getLatestState(table, rowId) {
263
+ const adapter = this.routeMap.get(table) ?? this.defaultAdapter;
264
+ return adapter.getLatestState(table, rowId);
265
+ }
266
+ /** Ensure schema exists, routing to the correct adapter for the table. */
267
+ async ensureSchema(schema) {
268
+ const adapter = this.routeMap.get(schema.table) ?? this.defaultAdapter;
269
+ return adapter.ensureSchema(schema);
270
+ }
271
+ /** Close all unique adapters (routes + default, deduplicated). */
272
+ async close() {
273
+ for (const adapter of this.adapters) {
274
+ await adapter.close();
275
+ }
276
+ }
277
+ };
278
+
279
+ // ../adapter/src/db-types.ts
280
+ var BIGQUERY_TYPE_MAP = {
281
+ string: "STRING",
282
+ number: "FLOAT64",
283
+ boolean: "BOOL",
284
+ json: "JSON",
285
+ null: "STRING"
286
+ };
287
+ function lakeSyncTypeToBigQuery(type) {
288
+ return BIGQUERY_TYPE_MAP[type];
289
+ }
290
+ function isDatabaseAdapter(adapter) {
291
+ return adapter !== null && typeof adapter === "object" && "insertDeltas" in adapter && "queryDeltasSince" in adapter && typeof adapter.insertDeltas === "function";
292
+ }
293
+
294
+ // ../adapter/src/mysql.ts
295
+ import mysql from "mysql2/promise";
296
+ var MYSQL_TYPE_MAP = {
297
+ string: "TEXT",
298
+ number: "DOUBLE",
299
+ boolean: "TINYINT(1)",
300
+ json: "JSON",
301
+ null: "TEXT"
302
+ };
303
+ function lakeSyncTypeToMySQL(type) {
304
+ return MYSQL_TYPE_MAP[type];
305
+ }
306
+ var MySQLAdapter = class {
307
+ /** @internal */
308
+ pool;
309
+ constructor(config) {
310
+ this.pool = mysql.createPool(config.connectionString);
311
+ }
312
+ /**
313
+ * Insert deltas into the database in a single batch.
314
+ * Uses INSERT IGNORE for idempotent writes — duplicate deltaIds are silently skipped.
315
+ */
316
+ async insertDeltas(deltas) {
317
+ if (deltas.length === 0) {
318
+ return Ok(void 0);
319
+ }
320
+ return wrapAsync(async () => {
321
+ const sql = `INSERT IGNORE INTO lakesync_deltas (delta_id, \`table\`, row_id, columns, hlc, client_id, op) VALUES ${deltas.map(() => "(?, ?, ?, ?, ?, ?, ?)").join(", ")}`;
322
+ const values = [];
323
+ for (const delta of deltas) {
324
+ values.push(
325
+ delta.deltaId,
326
+ delta.table,
327
+ delta.rowId,
328
+ JSON.stringify(delta.columns),
329
+ delta.hlc.toString(),
330
+ delta.clientId,
331
+ delta.op
332
+ );
333
+ }
334
+ await this.pool.execute(sql, values);
335
+ }, "Failed to insert deltas");
336
+ }
337
+ /**
338
+ * Query deltas with HLC greater than the given timestamp.
339
+ * Optionally filtered by table name(s).
340
+ */
341
+ async queryDeltasSince(hlc, tables) {
342
+ return wrapAsync(async () => {
343
+ let sql = "SELECT delta_id, `table`, row_id, columns, hlc, client_id, op FROM lakesync_deltas WHERE hlc > ?";
344
+ const params = [hlc.toString()];
345
+ if (tables && tables.length > 0) {
346
+ sql += ` AND \`table\` IN (${tables.map(() => "?").join(", ")})`;
347
+ params.push(...tables);
348
+ }
349
+ sql += " ORDER BY hlc ASC";
350
+ const [rows] = await this.pool.execute(sql, params);
351
+ return rows.map(rowToDelta);
352
+ }, "Failed to query deltas");
353
+ }
354
+ /**
355
+ * Get the latest merged state for a specific row using column-level LWW.
356
+ * Returns null if no deltas exist or if the row is tombstoned by DELETE.
357
+ */
358
+ async getLatestState(table, rowId) {
359
+ return wrapAsync(async () => {
360
+ const sql = "SELECT columns, hlc, client_id, op FROM lakesync_deltas WHERE `table` = ? AND row_id = ? ORDER BY hlc ASC";
361
+ const [rows] = await this.pool.execute(sql, [table, rowId]);
362
+ return mergeLatestState(rows);
363
+ }, `Failed to get latest state for ${table}:${rowId}`);
364
+ }
365
+ /**
366
+ * Ensure the database schema exists. Creates the lakesync_deltas table
367
+ * and a user table matching the given TableSchema definition.
368
+ */
369
+ async ensureSchema(schema) {
370
+ return wrapAsync(async () => {
371
+ await this.pool.execute(`
372
+ CREATE TABLE IF NOT EXISTS lakesync_deltas (
373
+ delta_id VARCHAR(255) PRIMARY KEY,
374
+ \`table\` VARCHAR(255) NOT NULL,
375
+ row_id VARCHAR(255) NOT NULL,
376
+ columns JSON NOT NULL,
377
+ hlc BIGINT NOT NULL,
378
+ client_id VARCHAR(255) NOT NULL,
379
+ op VARCHAR(50) NOT NULL DEFAULT 'upsert',
380
+ INDEX idx_hlc (hlc),
381
+ INDEX idx_table_row (\`table\`, row_id)
382
+ )
383
+ `);
384
+ const columnDefs = schema.columns.map((col) => `\`${col.name}\` ${lakeSyncTypeToMySQL(col.type)}`).join(", ");
385
+ await this.pool.execute(
386
+ `CREATE TABLE IF NOT EXISTS \`${schema.table}\` (row_id VARCHAR(255) PRIMARY KEY, ${columnDefs})`
387
+ );
388
+ }, `Failed to ensure schema for table ${schema.table}`);
389
+ }
390
+ /** Close the database connection pool and release resources. */
391
+ async close() {
392
+ await this.pool.end();
393
+ }
394
+ };
395
+ function rowToDelta(row) {
396
+ return {
397
+ deltaId: row.delta_id,
398
+ table: row.table,
399
+ rowId: row.row_id,
400
+ columns: typeof row.columns === "string" ? JSON.parse(row.columns) : row.columns,
401
+ hlc: BigInt(row.hlc),
402
+ clientId: row.client_id,
403
+ op: row.op
404
+ };
405
+ }
406
+
407
+ // ../adapter/src/postgres.ts
408
+ import { Pool } from "pg";
409
+ var PostgresAdapter = class {
410
+ /** @internal */
411
+ pool;
412
+ constructor(config) {
413
+ const poolConfig = {
414
+ connectionString: config.connectionString
415
+ };
416
+ this.pool = new Pool(poolConfig);
417
+ }
418
+ /**
419
+ * Insert deltas into the database in a single batch.
420
+ * Idempotent via `ON CONFLICT (delta_id) DO NOTHING`.
421
+ */
422
+ async insertDeltas(deltas) {
423
+ if (deltas.length === 0) {
424
+ return Ok(void 0);
425
+ }
426
+ return wrapAsync(async () => {
427
+ const values = [];
428
+ const rows = [];
429
+ for (let i = 0; i < deltas.length; i++) {
430
+ const d = deltas[i];
431
+ const offset = i * 7;
432
+ rows.push(
433
+ `($${offset + 1}, $${offset + 2}, $${offset + 3}, $${offset + 4}, $${offset + 5}, $${offset + 6}, $${offset + 7})`
434
+ );
435
+ values.push(
436
+ d.deltaId,
437
+ d.table,
438
+ d.rowId,
439
+ JSON.stringify(d.columns),
440
+ d.hlc.toString(),
441
+ d.clientId,
442
+ d.op
443
+ );
444
+ }
445
+ const sql = `INSERT INTO lakesync_deltas (delta_id, "table", row_id, columns, hlc, client_id, op)
446
+ VALUES ${rows.join(", ")}
447
+ ON CONFLICT (delta_id) DO NOTHING`;
448
+ await this.pool.query(sql, values);
449
+ }, "Failed to insert deltas");
450
+ }
451
+ /**
452
+ * Query deltas with HLC greater than the given timestamp, optionally filtered by table.
453
+ */
454
+ async queryDeltasSince(hlc, tables) {
455
+ return wrapAsync(async () => {
456
+ let sql;
457
+ let params;
458
+ if (tables && tables.length > 0) {
459
+ sql = `SELECT delta_id, "table", row_id, columns, hlc, client_id, op
460
+ FROM lakesync_deltas
461
+ WHERE hlc > $1 AND "table" = ANY($2)
462
+ ORDER BY hlc ASC`;
463
+ params = [hlc.toString(), tables];
464
+ } else {
465
+ sql = `SELECT delta_id, "table", row_id, columns, hlc, client_id, op
466
+ FROM lakesync_deltas
467
+ WHERE hlc > $1
468
+ ORDER BY hlc ASC`;
469
+ params = [hlc.toString()];
470
+ }
471
+ const result = await this.pool.query(sql, params);
472
+ return result.rows.map(rowToRowDelta2);
473
+ }, "Failed to query deltas");
474
+ }
475
+ /**
476
+ * Get the latest merged state for a specific row using column-level LWW.
477
+ * Returns null if no deltas exist for this row.
478
+ */
479
+ async getLatestState(table, rowId) {
480
+ return wrapAsync(async () => {
481
+ const sql = `SELECT columns, hlc, client_id, op
482
+ FROM lakesync_deltas
483
+ WHERE "table" = $1 AND row_id = $2
484
+ ORDER BY hlc ASC`;
485
+ const result = await this.pool.query(sql, [table, rowId]);
486
+ return mergeLatestState(result.rows);
487
+ }, `Failed to get latest state for ${table}:${rowId}`);
488
+ }
489
+ /**
490
+ * Ensure the lakesync_deltas table and indices exist.
491
+ * The `schema` parameter is accepted for interface compliance but the
492
+ * internal table structure is fixed (deltas store column data as JSONB).
493
+ */
494
+ async ensureSchema(_schema) {
495
+ return wrapAsync(async () => {
496
+ await this.pool.query(`
497
+ CREATE TABLE IF NOT EXISTS lakesync_deltas (
498
+ delta_id TEXT PRIMARY KEY,
499
+ "table" TEXT NOT NULL,
500
+ row_id TEXT NOT NULL,
501
+ columns JSONB NOT NULL,
502
+ hlc BIGINT NOT NULL,
503
+ client_id TEXT NOT NULL,
504
+ op TEXT NOT NULL DEFAULT 'INSERT'
505
+ );
506
+ CREATE INDEX IF NOT EXISTS idx_lakesync_deltas_hlc ON lakesync_deltas (hlc);
507
+ CREATE INDEX IF NOT EXISTS idx_lakesync_deltas_table_row ON lakesync_deltas ("table", row_id);
508
+ `);
509
+ }, "Failed to ensure schema");
510
+ }
511
+ /** Close the database connection pool and release resources. */
512
+ async close() {
513
+ await this.pool.end();
514
+ }
515
+ };
516
+ function rowToRowDelta2(row) {
517
+ const columns = typeof row.columns === "string" ? JSON.parse(row.columns) : row.columns;
518
+ return {
519
+ deltaId: row.delta_id,
520
+ table: row.table,
521
+ rowId: row.row_id,
522
+ columns,
523
+ hlc: BigInt(row.hlc),
524
+ clientId: row.client_id,
525
+ op: row.op
526
+ };
527
+ }
528
+
529
+ // ../adapter/src/factory.ts
530
+ function createDatabaseAdapter(config) {
531
+ try {
532
+ switch (config.type) {
533
+ case "postgres": {
534
+ if (!config.postgres) {
535
+ return Err(new AdapterError("Postgres connector config missing postgres field"));
536
+ }
537
+ return Ok(
538
+ new PostgresAdapter({
539
+ connectionString: config.postgres.connectionString
540
+ })
541
+ );
542
+ }
543
+ case "mysql": {
544
+ if (!config.mysql) {
545
+ return Err(new AdapterError("MySQL connector config missing mysql field"));
546
+ }
547
+ return Ok(
548
+ new MySQLAdapter({
549
+ connectionString: config.mysql.connectionString
550
+ })
551
+ );
552
+ }
553
+ case "bigquery": {
554
+ if (!config.bigquery) {
555
+ return Err(new AdapterError("BigQuery connector config missing bigquery field"));
556
+ }
557
+ return Ok(
558
+ new BigQueryAdapter({
559
+ projectId: config.bigquery.projectId,
560
+ dataset: config.bigquery.dataset,
561
+ keyFilename: config.bigquery.keyFilename,
562
+ location: config.bigquery.location
563
+ })
564
+ );
565
+ }
566
+ default:
567
+ return Err(new AdapterError(`Unsupported connector type: ${config.type}`));
568
+ }
569
+ } catch (err) {
570
+ return Err(new AdapterError(`Failed to create adapter: ${toError(err).message}`));
571
+ }
572
+ }
573
+
574
+ // ../adapter/src/fan-out.ts
575
+ var FanOutAdapter = class {
576
+ primary;
577
+ secondaries;
578
+ constructor(config) {
579
+ this.primary = config.primary;
580
+ this.secondaries = config.secondaries;
581
+ }
582
+ /** Insert deltas into the primary, then replicate to secondaries (fire-and-forget). */
583
+ async insertDeltas(deltas) {
584
+ const result = await this.primary.insertDeltas(deltas);
585
+ if (!result.ok) {
586
+ return result;
587
+ }
588
+ for (const secondary of this.secondaries) {
589
+ secondary.insertDeltas(deltas).catch(() => {
590
+ });
591
+ }
592
+ return Ok(void 0);
593
+ }
594
+ /** Query deltas from the primary adapter only. */
595
+ async queryDeltasSince(hlc, tables) {
596
+ return this.primary.queryDeltasSince(hlc, tables);
597
+ }
598
+ /** Get the latest state from the primary adapter only. */
599
+ async getLatestState(table, rowId) {
600
+ return this.primary.getLatestState(table, rowId);
601
+ }
602
+ /** Ensure schema on the primary first, then best-effort on secondaries. */
603
+ async ensureSchema(schema) {
604
+ const result = await this.primary.ensureSchema(schema);
605
+ if (!result.ok) {
606
+ return result;
607
+ }
608
+ for (const secondary of this.secondaries) {
609
+ secondary.ensureSchema(schema).catch(() => {
610
+ });
611
+ }
612
+ return Ok(void 0);
613
+ }
614
+ /** Close primary and all secondary adapters. */
615
+ async close() {
616
+ await this.primary.close();
617
+ for (const secondary of this.secondaries) {
618
+ await secondary.close();
619
+ }
620
+ }
621
+ };
622
+
623
+ // ../adapter/src/lifecycle.ts
624
+ var LifecycleAdapter = class {
625
+ hot;
626
+ cold;
627
+ maxAgeMs;
628
+ constructor(config) {
629
+ this.hot = config.hot.adapter;
630
+ this.cold = config.cold.adapter;
631
+ this.maxAgeMs = config.hot.maxAgeMs;
632
+ }
633
+ /** Insert deltas into the hot adapter — new data is always hot. */
634
+ async insertDeltas(deltas) {
635
+ return this.hot.insertDeltas(deltas);
636
+ }
637
+ /**
638
+ * Query deltas since the given HLC.
639
+ *
640
+ * If `sinceHlc` is older than `now - maxAgeMs`, queries both hot and cold
641
+ * adapters and merges the results sorted by HLC. Otherwise queries hot only.
642
+ */
643
+ async queryDeltasSince(hlc, tables) {
644
+ const sinceWallMs = Number(hlc >> 16n);
645
+ const thresholdMs = Date.now() - this.maxAgeMs;
646
+ if (sinceWallMs < thresholdMs) {
647
+ const [hotResult, coldResult] = await Promise.all([
648
+ this.hot.queryDeltasSince(hlc, tables),
649
+ this.cold.queryDeltasSince(hlc, tables)
650
+ ]);
651
+ if (!hotResult.ok) return hotResult;
652
+ if (!coldResult.ok) return coldResult;
653
+ const merged = [...hotResult.value, ...coldResult.value];
654
+ merged.sort((a, b) => a.hlc < b.hlc ? -1 : a.hlc > b.hlc ? 1 : 0);
655
+ return Ok(merged);
656
+ }
657
+ return this.hot.queryDeltasSince(hlc, tables);
658
+ }
659
+ /** Get latest state — try hot first, fall back to cold if hot returns null. */
660
+ async getLatestState(table, rowId) {
661
+ const hotResult = await this.hot.getLatestState(table, rowId);
662
+ if (!hotResult.ok) return hotResult;
663
+ if (hotResult.value !== null) {
664
+ return hotResult;
665
+ }
666
+ return this.cold.getLatestState(table, rowId);
667
+ }
668
+ /** Ensure schema exists on both hot and cold adapters. */
669
+ async ensureSchema(schema) {
670
+ const hotResult = await this.hot.ensureSchema(schema);
671
+ if (!hotResult.ok) return hotResult;
672
+ return this.cold.ensureSchema(schema);
673
+ }
674
+ /** Close both hot and cold adapters. */
675
+ async close() {
676
+ await this.hot.close();
677
+ await this.cold.close();
678
+ }
679
+ };
680
+ async function migrateToTier(hot, cold, maxAgeMs) {
681
+ const thresholdMs = Date.now() - maxAgeMs;
682
+ const thresholdHlc = BigInt(0) << 16n;
683
+ const result = await hot.queryDeltasSince(thresholdHlc);
684
+ if (!result.ok) return result;
685
+ const oldDeltas = result.value.filter((delta) => {
686
+ const wallMs = Number(delta.hlc >> 16n);
687
+ return wallMs < thresholdMs;
688
+ });
689
+ if (oldDeltas.length === 0) {
690
+ return Ok({ migrated: 0 });
691
+ }
692
+ const insertResult = await cold.insertDeltas(oldDeltas);
693
+ if (!insertResult.ok) return insertResult;
694
+ return Ok({ migrated: oldDeltas.length });
695
+ }
696
+
697
+ // ../adapter/src/migrate.ts
698
+ async function migrateAdapter(opts) {
699
+ const batchSize = opts.batchSize ?? 1e3;
700
+ const readResult = await opts.from.queryDeltasSince(BigInt(0), opts.tables);
701
+ if (!readResult.ok) {
702
+ return readResult;
703
+ }
704
+ const deltas = readResult.value;
705
+ if (deltas.length === 0) {
706
+ return Ok({ totalDeltas: 0, batches: 0 });
707
+ }
708
+ let batchCount = 0;
709
+ let totalSoFar = 0;
710
+ for (let i = 0; i < deltas.length; i += batchSize) {
711
+ const batch = deltas.slice(i, i + batchSize);
712
+ const writeResult = await opts.to.insertDeltas(batch);
713
+ if (!writeResult.ok) {
714
+ return writeResult;
715
+ }
716
+ batchCount++;
717
+ totalSoFar += batch.length;
718
+ opts.onProgress?.({ batch: batchCount, totalSoFar });
719
+ }
720
+ return Ok({ totalDeltas: totalSoFar, batches: batchCount });
721
+ }
722
+
723
+ // ../adapter/src/minio.ts
724
+ import {
725
+ DeleteObjectCommand,
726
+ DeleteObjectsCommand,
727
+ GetObjectCommand,
728
+ HeadObjectCommand,
729
+ ListObjectsV2Command,
730
+ PutObjectCommand,
731
+ S3Client
732
+ } from "@aws-sdk/client-s3";
733
+ var MinIOAdapter = class {
734
+ client;
735
+ bucket;
736
+ constructor(config) {
737
+ this.bucket = config.bucket;
738
+ this.client = new S3Client({
739
+ endpoint: config.endpoint,
740
+ region: config.region ?? "us-east-1",
741
+ credentials: config.credentials,
742
+ forcePathStyle: true
743
+ // Required for MinIO
744
+ });
745
+ }
746
+ /** Store an object in the lake */
747
+ async putObject(path, data, contentType) {
748
+ return wrapAsync(async () => {
749
+ await this.client.send(
750
+ new PutObjectCommand({
751
+ Bucket: this.bucket,
752
+ Key: path,
753
+ Body: data,
754
+ ContentType: contentType
755
+ })
756
+ );
757
+ }, `Failed to put object: ${path}`);
758
+ }
759
+ /** Retrieve an object from the lake */
760
+ async getObject(path) {
761
+ return wrapAsync(async () => {
762
+ const response = await this.client.send(
763
+ new GetObjectCommand({
764
+ Bucket: this.bucket,
765
+ Key: path
766
+ })
767
+ );
768
+ const bytes = await response.Body?.transformToByteArray();
769
+ if (!bytes) {
770
+ throw new AdapterError(`Empty response for object: ${path}`);
771
+ }
772
+ return bytes;
773
+ }, `Failed to get object: ${path}`);
774
+ }
775
+ /** Get object metadata without retrieving the body */
776
+ async headObject(path) {
777
+ return wrapAsync(async () => {
778
+ const response = await this.client.send(
779
+ new HeadObjectCommand({
780
+ Bucket: this.bucket,
781
+ Key: path
782
+ })
783
+ );
784
+ return {
785
+ size: response.ContentLength ?? 0,
786
+ lastModified: response.LastModified ?? /* @__PURE__ */ new Date(0)
787
+ };
788
+ }, `Failed to head object: ${path}`);
789
+ }
790
+ /** List objects matching a given prefix */
791
+ async listObjects(prefix) {
792
+ return wrapAsync(async () => {
793
+ const response = await this.client.send(
794
+ new ListObjectsV2Command({
795
+ Bucket: this.bucket,
796
+ Prefix: prefix
797
+ })
798
+ );
799
+ return (response.Contents ?? []).map((item) => ({
800
+ key: item.Key ?? "",
801
+ size: item.Size ?? 0,
802
+ lastModified: item.LastModified ?? /* @__PURE__ */ new Date(0)
803
+ }));
804
+ }, `Failed to list objects with prefix: ${prefix}`);
805
+ }
806
+ /** Delete a single object from the lake */
807
+ async deleteObject(path) {
808
+ return wrapAsync(async () => {
809
+ await this.client.send(
810
+ new DeleteObjectCommand({
811
+ Bucket: this.bucket,
812
+ Key: path
813
+ })
814
+ );
815
+ }, `Failed to delete object: ${path}`);
816
+ }
817
+ /** Delete multiple objects from the lake in a single batch operation */
818
+ async deleteObjects(paths) {
819
+ if (paths.length === 0) {
820
+ return Ok(void 0);
821
+ }
822
+ return wrapAsync(async () => {
823
+ await this.client.send(
824
+ new DeleteObjectsCommand({
825
+ Bucket: this.bucket,
826
+ Delete: {
827
+ Objects: paths.map((key) => ({ Key: key })),
828
+ Quiet: true
829
+ }
830
+ })
831
+ );
832
+ }, `Failed to batch delete ${paths.length} objects`);
833
+ }
834
+ };
835
+
836
+ // ../adapter/src/query-fn.ts
837
+ async function createQueryFn(config) {
838
+ switch (config.type) {
839
+ case "postgres": {
840
+ if (!config.postgres) return null;
841
+ const { Pool: Pool2 } = await import("pg");
842
+ const pool = new Pool2({ connectionString: config.postgres.connectionString });
843
+ return async (sql, params) => {
844
+ const result = await pool.query(sql, params);
845
+ return result.rows;
846
+ };
847
+ }
848
+ case "mysql": {
849
+ if (!config.mysql) return null;
850
+ const mysql2 = await import("mysql2/promise");
851
+ const pool = mysql2.createPool(config.mysql.connectionString);
852
+ return async (sql, params) => {
853
+ const [rows] = await pool.query(sql, params);
854
+ return rows;
855
+ };
856
+ }
857
+ default:
858
+ return null;
859
+ }
860
+ }
861
+
862
+ export {
863
+ toCause,
864
+ wrapAsync,
865
+ mergeLatestState,
866
+ BigQueryAdapter,
867
+ CompositeAdapter,
868
+ lakeSyncTypeToBigQuery,
869
+ isDatabaseAdapter,
870
+ MySQLAdapter,
871
+ PostgresAdapter,
872
+ createDatabaseAdapter,
873
+ FanOutAdapter,
874
+ LifecycleAdapter,
875
+ migrateToTier,
876
+ migrateAdapter,
877
+ MinIOAdapter,
878
+ createQueryFn
879
+ };
880
+ //# sourceMappingURL=chunk-X3RO5SYJ.js.map