lakesync 0.1.4 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/dist/adapter.d.ts +20 -5
  2. package/dist/adapter.js +8 -2
  3. package/dist/analyst.js +1 -1
  4. package/dist/{base-poller-CBvhdvcj.d.ts → base-poller-BpUyuG2R.d.ts} +1 -1
  5. package/dist/catalogue.d.ts +1 -1
  6. package/dist/catalogue.js +2 -2
  7. package/dist/{chunk-6OCFE42A.js → chunk-FHVTUKXL.js} +2 -2
  8. package/dist/{chunk-LZ6R74PT.js → chunk-GUJWMK5P.js} +97 -31
  9. package/dist/chunk-GUJWMK5P.js.map +1 -0
  10. package/dist/{chunk-4LP2EWSC.js → chunk-IRJ4QRWV.js} +41 -5
  11. package/dist/{chunk-4LP2EWSC.js.map → chunk-IRJ4QRWV.js.map} +1 -1
  12. package/dist/{chunk-VDBZ2AOS.js → chunk-NCZYFZ3B.js} +4 -2
  13. package/dist/{chunk-VDBZ2AOS.js.map → chunk-NCZYFZ3B.js.map} +1 -1
  14. package/dist/{chunk-B257DXIS.js → chunk-P3FT7QCW.js} +295 -1
  15. package/dist/chunk-P3FT7QCW.js.map +1 -0
  16. package/dist/{chunk-HJ2MOKJ5.js → chunk-QMS7TGFL.js} +4 -2
  17. package/dist/{chunk-HJ2MOKJ5.js.map → chunk-QMS7TGFL.js.map} +1 -1
  18. package/dist/{chunk-D7KSRAWK.js → chunk-SF7Y6ZUA.js} +2 -2
  19. package/dist/{chunk-H3BD4SMD.js → chunk-UAUQGP3B.js} +2 -2
  20. package/dist/client.d.ts +12 -4
  21. package/dist/client.js +48 -3
  22. package/dist/client.js.map +1 -1
  23. package/dist/compactor.d.ts +1 -1
  24. package/dist/compactor.js +3 -3
  25. package/dist/connector-jira.d.ts +2 -2
  26. package/dist/connector-jira.js +2 -2
  27. package/dist/connector-salesforce.d.ts +2 -2
  28. package/dist/connector-salesforce.js +2 -2
  29. package/dist/{coordinator-DFbyrQEU.d.ts → coordinator-D32a5rNk.d.ts} +11 -1
  30. package/dist/{db-types-B6_JKQWK.d.ts → db-types-BlN-4KbQ.d.ts} +1 -1
  31. package/dist/{gateway-CvO7Xy3T.d.ts → gateway-Bpvatd9n.d.ts} +3 -3
  32. package/dist/gateway-server.d.ts +4 -4
  33. package/dist/gateway-server.js +14 -8
  34. package/dist/gateway-server.js.map +1 -1
  35. package/dist/gateway.d.ts +11 -7
  36. package/dist/gateway.js +7 -5
  37. package/dist/index.d.ts +6 -5
  38. package/dist/index.js +9 -1
  39. package/dist/parquet.d.ts +1 -1
  40. package/dist/parquet.js +2 -2
  41. package/dist/proto.d.ts +1 -1
  42. package/dist/proto.js +2 -2
  43. package/dist/react.d.ts +32 -3
  44. package/dist/react.js +54 -19
  45. package/dist/react.js.map +1 -1
  46. package/dist/registry-CPTgO9jv.d.ts +53 -0
  47. package/dist/{resolver-BZURzdlL.d.ts → resolver-CbuXm3nB.d.ts} +1 -1
  48. package/dist/{src-2LUI4O6N.js → src-CLCALYDT.js} +3 -3
  49. package/dist/{src-CLFH5JSA.js → src-FPJQYQNA.js} +3 -3
  50. package/dist/{src-5ABL6A7J.js → src-RHKJFQKR.js} +10 -2
  51. package/dist/{types-GGBfZBKQ.d.ts → types-CLlD4XOy.d.ts} +6 -0
  52. package/package.json +1 -1
  53. package/dist/chunk-B257DXIS.js.map +0 -1
  54. package/dist/chunk-LZ6R74PT.js.map +0 -1
  55. /package/dist/{chunk-6OCFE42A.js.map → chunk-FHVTUKXL.js.map} +0 -0
  56. /package/dist/{chunk-D7KSRAWK.js.map → chunk-SF7Y6ZUA.js.map} +0 -0
  57. /package/dist/{chunk-H3BD4SMD.js.map → chunk-UAUQGP3B.js.map} +0 -0
  58. /package/dist/{src-2LUI4O6N.js.map → src-CLCALYDT.js.map} +0 -0
  59. /package/dist/{src-5ABL6A7J.js.map → src-FPJQYQNA.js.map} +0 -0
  60. /package/dist/{src-CLFH5JSA.js.map → src-RHKJFQKR.js.map} +0 -0
package/dist/adapter.d.ts CHANGED
@@ -1,8 +1,8 @@
1
1
  import { BigQuery } from '@google-cloud/bigquery';
2
2
  import { R as Result, A as AdapterError, H as HLCTimestamp } from './result-CojzlFE2.js';
3
- import { R as RowDelta, T as TableSchema, C as ColumnDelta } from './types-GGBfZBKQ.js';
4
- import { D as DatabaseAdapter, a as DatabaseAdapterConfig } from './db-types-B6_JKQWK.js';
5
- export { i as isDatabaseAdapter, l as lakeSyncTypeToBigQuery } from './db-types-B6_JKQWK.js';
3
+ import { R as RowDelta, T as TableSchema, C as ColumnDelta } from './types-CLlD4XOy.js';
4
+ import { D as DatabaseAdapter, a as DatabaseAdapterConfig } from './db-types-BlN-4KbQ.js';
5
+ export { i as isDatabaseAdapter, l as lakeSyncTypeToBigQuery } from './db-types-BlN-4KbQ.js';
6
6
  import { C as ConnectorConfig } from './types-D-E0VrfS.js';
7
7
  import { L as LakeAdapter, A as AdapterConfig, O as ObjectInfo } from './types-DSC_EiwR.js';
8
8
  import mysql from 'mysql2/promise';
@@ -39,6 +39,20 @@ interface Materialisable {
39
39
  * Uses duck-typing (same pattern as `isDatabaseAdapter`).
40
40
  */
41
41
  declare function isMaterialisable(adapter: unknown): adapter is Materialisable;
42
+ /**
43
+ * Resolve the primary key columns for a table schema.
44
+ * Defaults to `["row_id"]` when not explicitly set.
45
+ */
46
+ declare function resolvePrimaryKey(schema: TableSchema): string[];
47
+ /**
48
+ * Resolve the conflict columns used for upsert ON CONFLICT targeting.
49
+ * When `externalIdColumn` is set, upserts resolve on that column instead of the PK.
50
+ */
51
+ declare function resolveConflictColumns(schema: TableSchema): string[];
52
+ /**
53
+ * Whether tombstoned rows should be soft-deleted (default) or hard-deleted.
54
+ */
55
+ declare function isSoftDelete(schema: TableSchema): boolean;
42
56
  /**
43
57
  * Group deltas by their table name, collecting the set of affected row IDs per table.
44
58
  *
@@ -367,7 +381,8 @@ declare class MySQLAdapter implements DatabaseAdapter, Materialisable {
367
381
  *
368
382
  * For each table with a matching schema, merges delta history into the
369
383
  * latest row state and upserts into the destination table. Tombstoned
370
- * rows are deleted. The `props` column is never touched.
384
+ * rows are soft-deleted (default) or hard-deleted. The `props` column
385
+ * is never touched.
371
386
  */
372
387
  materialise(deltas: RowDelta[], schemas: ReadonlyArray<TableSchema>): Promise<Result<void, AdapterError>>;
373
388
  /** Close the database connection pool and release resources. */
@@ -444,4 +459,4 @@ declare function mergeLatestState(rows: Array<{
444
459
  op: string;
445
460
  }>): Record<string, unknown> | null;
446
461
 
447
- export { AdapterConfig, BigQueryAdapter, type BigQueryAdapterConfig, CompositeAdapter, type CompositeAdapterConfig, type CompositeRoute, DatabaseAdapter, DatabaseAdapterConfig, FanOutAdapter, type FanOutAdapterConfig, LakeAdapter, LifecycleAdapter, type LifecycleAdapterConfig, type Materialisable, type MigrateOptions, type MigrateProgress, type MigrateResult, MinIOAdapter, MySQLAdapter, ObjectInfo, PostgresAdapter, type QueryFn, buildSchemaIndex, createDatabaseAdapter, createQueryFn, groupDeltasByTable, isMaterialisable, mergeLatestState, migrateAdapter, migrateToTier, toCause, wrapAsync };
462
+ export { AdapterConfig, BigQueryAdapter, type BigQueryAdapterConfig, CompositeAdapter, type CompositeAdapterConfig, type CompositeRoute, DatabaseAdapter, DatabaseAdapterConfig, FanOutAdapter, type FanOutAdapterConfig, LakeAdapter, LifecycleAdapter, type LifecycleAdapterConfig, type Materialisable, type MigrateOptions, type MigrateProgress, type MigrateResult, MinIOAdapter, MySQLAdapter, ObjectInfo, PostgresAdapter, type QueryFn, buildSchemaIndex, createDatabaseAdapter, createQueryFn, groupDeltasByTable, isMaterialisable, isSoftDelete, mergeLatestState, migrateAdapter, migrateToTier, resolveConflictColumns, resolvePrimaryKey, toCause, wrapAsync };
package/dist/adapter.js CHANGED
@@ -12,14 +12,17 @@ import {
12
12
  groupDeltasByTable,
13
13
  isDatabaseAdapter,
14
14
  isMaterialisable,
15
+ isSoftDelete,
15
16
  lakeSyncTypeToBigQuery,
16
17
  mergeLatestState,
17
18
  migrateAdapter,
18
19
  migrateToTier,
20
+ resolveConflictColumns,
21
+ resolvePrimaryKey,
19
22
  toCause,
20
23
  wrapAsync
21
- } from "./chunk-LZ6R74PT.js";
22
- import "./chunk-B257DXIS.js";
24
+ } from "./chunk-GUJWMK5P.js";
25
+ import "./chunk-P3FT7QCW.js";
23
26
  import "./chunk-7D4SUZUM.js";
24
27
  export {
25
28
  BigQueryAdapter,
@@ -35,10 +38,13 @@ export {
35
38
  groupDeltasByTable,
36
39
  isDatabaseAdapter,
37
40
  isMaterialisable,
41
+ isSoftDelete,
38
42
  lakeSyncTypeToBigQuery,
39
43
  mergeLatestState,
40
44
  migrateAdapter,
41
45
  migrateToTier,
46
+ resolveConflictColumns,
47
+ resolvePrimaryKey,
42
48
  toCause,
43
49
  wrapAsync
44
50
  };
package/dist/analyst.js CHANGED
@@ -2,7 +2,7 @@ import {
2
2
  Err,
3
3
  LakeSyncError,
4
4
  Ok
5
- } from "./chunk-B257DXIS.js";
5
+ } from "./chunk-P3FT7QCW.js";
6
6
  import "./chunk-7D4SUZUM.js";
7
7
 
8
8
  // ../analyst/src/duckdb.ts
@@ -1,4 +1,4 @@
1
- import { S as SyncPush, R as RowDelta } from './types-GGBfZBKQ.js';
1
+ import { S as SyncPush, R as RowDelta } from './types-CLlD4XOy.js';
2
2
  import { H as HLC } from './hlc-DiD8QNG3.js';
3
3
  import { R as Result, F as FlushError } from './result-CojzlFE2.js';
4
4
 
@@ -1,6 +1,6 @@
1
1
  import { I as IcebergSchema, P as PartitionSpec } from './nessie-client-DrNikVXy.js';
2
2
  export { C as CatalogueConfig, a as CatalogueError, D as DataFile, b as IcebergField, N as NessieCatalogueClient, S as Snapshot, T as TableMetadata } from './nessie-client-DrNikVXy.js';
3
- import { T as TableSchema } from './types-GGBfZBKQ.js';
3
+ import { T as TableSchema } from './types-CLlD4XOy.js';
4
4
  import './result-CojzlFE2.js';
5
5
 
6
6
  /**
package/dist/catalogue.js CHANGED
@@ -4,8 +4,8 @@ import {
4
4
  buildPartitionSpec,
5
5
  lakeSyncTableName,
6
6
  tableSchemaToIceberg
7
- } from "./chunk-H3BD4SMD.js";
8
- import "./chunk-B257DXIS.js";
7
+ } from "./chunk-UAUQGP3B.js";
8
+ import "./chunk-P3FT7QCW.js";
9
9
  import "./chunk-7D4SUZUM.js";
10
10
  export {
11
11
  CatalogueError,
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  Err,
3
3
  Ok
4
- } from "./chunk-B257DXIS.js";
4
+ } from "./chunk-P3FT7QCW.js";
5
5
 
6
6
  // ../proto/src/gen/lakesync_pb.ts
7
7
  import { enumDesc, fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv2";
@@ -332,4 +332,4 @@ export {
332
332
  encodeActionResponse,
333
333
  decodeActionResponse
334
334
  };
335
- //# sourceMappingURL=chunk-6OCFE42A.js.map
335
+ //# sourceMappingURL=chunk-FHVTUKXL.js.map
@@ -3,7 +3,7 @@ import {
3
3
  Err,
4
4
  Ok,
5
5
  toError
6
- } from "./chunk-B257DXIS.js";
6
+ } from "./chunk-P3FT7QCW.js";
7
7
 
8
8
  // ../adapter/src/db-types.ts
9
9
  var BIGQUERY_TYPE_MAP = {
@@ -24,6 +24,15 @@ function isDatabaseAdapter(adapter) {
24
24
  function isMaterialisable(adapter) {
25
25
  return adapter !== null && typeof adapter === "object" && "materialise" in adapter && typeof adapter.materialise === "function";
26
26
  }
27
+ function resolvePrimaryKey(schema) {
28
+ return schema.primaryKey ?? ["row_id"];
29
+ }
30
+ function resolveConflictColumns(schema) {
31
+ return schema.externalIdColumn ? [schema.externalIdColumn] : resolvePrimaryKey(schema);
32
+ }
33
+ function isSoftDelete(schema) {
34
+ return schema.softDelete !== false;
35
+ }
27
36
  function groupDeltasByTable(deltas) {
28
37
  const result = /* @__PURE__ */ new Map();
29
38
  for (const delta of deltas) {
@@ -237,14 +246,20 @@ CLUSTER BY \`table\`, hlc`,
237
246
  for (const [sourceTable, rowIds] of tableRowIds) {
238
247
  const schema = schemaIndex.get(sourceTable);
239
248
  if (!schema) continue;
249
+ const pk = resolvePrimaryKey(schema);
250
+ const conflictCols = resolveConflictColumns(schema);
251
+ const soft = isSoftDelete(schema);
240
252
  const colDefs = schema.columns.map((c) => `${c.name} ${lakeSyncTypeToBigQuery(c.type)}`).join(", ");
253
+ const deletedAtCol = soft ? `,
254
+ deleted_at TIMESTAMP` : "";
241
255
  await this.client.query({
242
256
  query: `CREATE TABLE IF NOT EXISTS \`${this.dataset}.${schema.table}\` (
243
257
  row_id STRING NOT NULL,
244
258
  ${colDefs},
245
- props JSON DEFAULT '{}',
259
+ props JSON DEFAULT '{}'${deletedAtCol},
246
260
  synced_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP()
247
- )`,
261
+ )
262
+ CLUSTER BY ${pk.map((c) => c === "row_id" ? "row_id" : c).join(", ")}`,
248
263
  location: this.location
249
264
  });
250
265
  const rowIdArray = [...rowIds];
@@ -284,29 +299,34 @@ ORDER BY hlc ASC`,
284
299
  params[`c${schema.columns.indexOf(col)}_${i}`] = u.state[col.name] ?? null;
285
300
  }
286
301
  const colSelects = schema.columns.map((col, ci) => `@c${ci}_${i} AS ${col.name}`).join(", ");
302
+ const deletedAtSelect = soft ? ", CAST(NULL AS TIMESTAMP) AS deleted_at" : "";
287
303
  selects.push(
288
- `SELECT @rid_${i} AS row_id, ${colSelects}, CURRENT_TIMESTAMP() AS synced_at`
304
+ `SELECT @rid_${i} AS row_id, ${colSelects}${deletedAtSelect}, CURRENT_TIMESTAMP() AS synced_at`
289
305
  );
290
306
  }
307
+ const mergeOn = conflictCols.map((c) => `t.${c === "row_id" ? "row_id" : c} = s.${c === "row_id" ? "row_id" : c}`).join(" AND ");
291
308
  const updateSet = schema.columns.map((col) => `${col.name} = s.${col.name}`).join(", ");
292
- const insertCols = [
309
+ const softUpdateExtra = soft ? ", deleted_at = s.deleted_at" : "";
310
+ const insertColsList = [
293
311
  "row_id",
294
312
  ...schema.columns.map((c) => c.name),
295
313
  "props",
314
+ ...soft ? ["deleted_at"] : [],
296
315
  "synced_at"
297
316
  ].join(", ");
298
- const insertVals = [
317
+ const insertValsList = [
299
318
  "s.row_id",
300
319
  ...schema.columns.map((c) => `s.${c.name}`),
301
320
  "'{}'",
321
+ ...soft ? ["s.deleted_at"] : [],
302
322
  "s.synced_at"
303
323
  ].join(", ");
304
324
  const mergeSql = `MERGE \`${this.dataset}.${schema.table}\` AS t
305
325
  USING (${selects.join(" UNION ALL ")}) AS s
306
- ON t.row_id = s.row_id
307
- WHEN MATCHED THEN UPDATE SET ${updateSet}, synced_at = s.synced_at
308
- WHEN NOT MATCHED THEN INSERT (${insertCols})
309
- VALUES (${insertVals})`;
326
+ ON ${mergeOn}
327
+ WHEN MATCHED THEN UPDATE SET ${updateSet}${softUpdateExtra}, synced_at = s.synced_at
328
+ WHEN NOT MATCHED THEN INSERT (${insertColsList})
329
+ VALUES (${insertValsList})`;
310
330
  await this.client.query({
311
331
  query: mergeSql,
312
332
  params,
@@ -314,11 +334,19 @@ VALUES (${insertVals})`;
314
334
  });
315
335
  }
316
336
  if (deleteRowIds.length > 0) {
317
- await this.client.query({
318
- query: `DELETE FROM \`${this.dataset}.${schema.table}\` WHERE row_id IN UNNEST(@rowIds)`,
319
- params: { rowIds: deleteRowIds },
320
- location: this.location
321
- });
337
+ if (soft) {
338
+ await this.client.query({
339
+ query: `UPDATE \`${this.dataset}.${schema.table}\` SET deleted_at = CURRENT_TIMESTAMP(), synced_at = CURRENT_TIMESTAMP() WHERE row_id IN UNNEST(@rowIds)`,
340
+ params: { rowIds: deleteRowIds },
341
+ location: this.location
342
+ });
343
+ } else {
344
+ await this.client.query({
345
+ query: `DELETE FROM \`${this.dataset}.${schema.table}\` WHERE row_id IN UNNEST(@rowIds)`,
346
+ params: { rowIds: deleteRowIds },
347
+ location: this.location
348
+ });
349
+ }
322
350
  }
323
351
  }
324
352
  }, "Failed to materialise deltas");
@@ -521,7 +549,8 @@ var MySQLAdapter = class {
521
549
  *
522
550
  * For each table with a matching schema, merges delta history into the
523
551
  * latest row state and upserts into the destination table. Tombstoned
524
- * rows are deleted. The `props` column is never touched.
552
+ * rows are soft-deleted (default) or hard-deleted. The `props` column
553
+ * is never touched.
525
554
  */
526
555
  async materialise(deltas, schemas) {
527
556
  if (deltas.length === 0) {
@@ -533,9 +562,14 @@ var MySQLAdapter = class {
533
562
  for (const [tableName, rowIds] of grouped) {
534
563
  const schema = schemaIndex.get(tableName);
535
564
  if (!schema) continue;
565
+ const pk = resolvePrimaryKey(schema);
566
+ const soft = isSoftDelete(schema);
536
567
  const typedCols = schema.columns.map((col) => `\`${col.name}\` ${lakeSyncTypeToMySQL(col.type)}`).join(", ");
568
+ const pkConstraint = `PRIMARY KEY (${pk.map((c) => `\`${c}\``).join(", ")})`;
569
+ const deletedAtCol = soft ? `, deleted_at TIMESTAMP NULL` : "";
570
+ const uniqueConstraint = schema.externalIdColumn ? `, UNIQUE KEY (\`${schema.externalIdColumn}\`)` : "";
537
571
  await this.pool.execute(
538
- `CREATE TABLE IF NOT EXISTS \`${schema.table}\` (row_id VARCHAR(255) PRIMARY KEY, ${typedCols}, props JSON NOT NULL DEFAULT ('{}'), synced_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP)`
572
+ `CREATE TABLE IF NOT EXISTS \`${schema.table}\` (row_id VARCHAR(255) NOT NULL, ${typedCols}, props JSON NOT NULL DEFAULT ('{}')${deletedAtCol}, synced_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, ${pkConstraint}${uniqueConstraint})`
539
573
  );
540
574
  const rowIdArray = [...rowIds];
541
575
  const placeholders = rowIdArray.map(() => "?").join(", ");
@@ -564,7 +598,7 @@ var MySQLAdapter = class {
564
598
  }
565
599
  if (upserts.length > 0) {
566
600
  const cols = schema.columns.map((c) => c.name);
567
- const valuePlaceholders = upserts.map(() => `(?, ${cols.map(() => "?").join(", ")}, NOW())`).join(", ");
601
+ const valuePlaceholders = soft ? upserts.map(() => `(?, ${cols.map(() => "?").join(", ")}, NULL, NOW())`).join(", ") : upserts.map(() => `(?, ${cols.map(() => "?").join(", ")}, NOW())`).join(", ");
568
602
  const values = [];
569
603
  for (const { rowId, state } of upserts) {
570
604
  values.push(rowId);
@@ -573,17 +607,26 @@ var MySQLAdapter = class {
573
607
  }
574
608
  }
575
609
  const updateCols = cols.map((c) => `\`${c}\` = VALUES(\`${c}\`)`).join(", ");
610
+ const softUpdateExtra = soft ? ", deleted_at = NULL" : "";
611
+ const colList = soft ? `row_id, ${cols.map((c) => `\`${c}\``).join(", ")}, deleted_at, synced_at` : `row_id, ${cols.map((c) => `\`${c}\``).join(", ")}, synced_at`;
576
612
  await this.pool.execute(
577
- `INSERT INTO \`${schema.table}\` (row_id, ${cols.map((c) => `\`${c}\``).join(", ")}, synced_at) VALUES ${valuePlaceholders} ON DUPLICATE KEY UPDATE ${updateCols}, synced_at = VALUES(synced_at)`,
613
+ `INSERT INTO \`${schema.table}\` (${colList}) VALUES ${valuePlaceholders} ON DUPLICATE KEY UPDATE ${updateCols}${softUpdateExtra}, synced_at = VALUES(synced_at)`,
578
614
  values
579
615
  );
580
616
  }
581
617
  if (deleteIds.length > 0) {
582
618
  const delPlaceholders = deleteIds.map(() => "?").join(", ");
583
- await this.pool.execute(
584
- `DELETE FROM \`${schema.table}\` WHERE row_id IN (${delPlaceholders})`,
585
- deleteIds
586
- );
619
+ if (soft) {
620
+ await this.pool.execute(
621
+ `UPDATE \`${schema.table}\` SET deleted_at = NOW(), synced_at = NOW() WHERE row_id IN (${delPlaceholders})`,
622
+ deleteIds
623
+ );
624
+ } else {
625
+ await this.pool.execute(
626
+ `DELETE FROM \`${schema.table}\` WHERE row_id IN (${delPlaceholders})`,
627
+ deleteIds
628
+ );
629
+ }
587
630
  }
588
631
  }
589
632
  }, "Failed to materialise deltas");
@@ -734,13 +777,22 @@ CREATE INDEX IF NOT EXISTS idx_lakesync_deltas_table_row ON lakesync_deltas ("ta
734
777
  const schema = schemaIndex.get(tableName);
735
778
  if (!schema) continue;
736
779
  const dest = schema.table;
780
+ const pk = resolvePrimaryKey(schema);
781
+ const conflictCols = resolveConflictColumns(schema);
782
+ const soft = isSoftDelete(schema);
737
783
  const columnDefs = schema.columns.map((c) => `"${c.name}" ${POSTGRES_TYPE_MAP[c.type]}`).join(", ");
784
+ const pkConstraint = `PRIMARY KEY (${pk.map((c) => `"${c}"`).join(", ")})`;
785
+ const deletedAtCol = soft ? `,
786
+ deleted_at TIMESTAMPTZ` : "";
787
+ const uniqueConstraint = schema.externalIdColumn ? `,
788
+ UNIQUE ("${schema.externalIdColumn}")` : "";
738
789
  await this.pool.query(
739
790
  `CREATE TABLE IF NOT EXISTS "${dest}" (
740
- row_id TEXT PRIMARY KEY,
791
+ row_id TEXT NOT NULL,
741
792
  ${columnDefs},
742
- props JSONB NOT NULL DEFAULT '{}',
743
- synced_at TIMESTAMPTZ NOT NULL DEFAULT now()
793
+ props JSONB NOT NULL DEFAULT '{}'${deletedAtCol},
794
+ synced_at TIMESTAMPTZ NOT NULL DEFAULT now(),
795
+ ${pkConstraint}${uniqueConstraint}
744
796
  )`
745
797
  );
746
798
  const sourceTable = schema.sourceTable ?? schema.table;
@@ -771,7 +823,8 @@ CREATE INDEX IF NOT EXISTS idx_lakesync_deltas_table_row ON lakesync_deltas ("ta
771
823
  }
772
824
  if (upserts.length > 0) {
773
825
  const colNames = schema.columns.map((c) => c.name);
774
- const allCols = ["row_id", ...colNames, "synced_at"];
826
+ const baseCols = ["row_id", ...colNames];
827
+ const allCols = soft ? [...baseCols, "deleted_at", "synced_at"] : [...baseCols, "synced_at"];
775
828
  const colList = allCols.map((c) => `"${c}"`).join(", ");
776
829
  const values = [];
777
830
  const valueRows = [];
@@ -785,16 +838,26 @@ CREATE INDEX IF NOT EXISTS idx_lakesync_deltas_table_row ON lakesync_deltas ("ta
785
838
  for (const col of colNames) {
786
839
  values.push(u.state[col] ?? null);
787
840
  }
841
+ if (soft) values.push(null);
788
842
  values.push(/* @__PURE__ */ new Date());
789
843
  }
790
- const updateSet = [...colNames, "synced_at"].map((c) => `"${c}" = EXCLUDED."${c}"`).join(", ");
844
+ const conflictList = conflictCols.map((c) => `"${c}"`).join(", ");
845
+ const updateCols = soft ? [...colNames, "deleted_at", "synced_at"] : [...colNames, "synced_at"];
846
+ const updateSet = updateCols.map((c) => `"${c}" = EXCLUDED."${c}"`).join(", ");
791
847
  await this.pool.query(
792
- `INSERT INTO "${dest}" (${colList}) VALUES ${valueRows.join(", ")} ON CONFLICT (row_id) DO UPDATE SET ${updateSet}`,
848
+ `INSERT INTO "${dest}" (${colList}) VALUES ${valueRows.join(", ")} ON CONFLICT (${conflictList}) DO UPDATE SET ${updateSet}`,
793
849
  values
794
850
  );
795
851
  }
796
852
  if (deleteIds.length > 0) {
797
- await this.pool.query(`DELETE FROM "${dest}" WHERE row_id = ANY($1)`, [deleteIds]);
853
+ if (soft) {
854
+ await this.pool.query(
855
+ `UPDATE "${dest}" SET deleted_at = now(), synced_at = now() WHERE row_id = ANY($1)`,
856
+ [deleteIds]
857
+ );
858
+ } else {
859
+ await this.pool.query(`DELETE FROM "${dest}" WHERE row_id = ANY($1)`, [deleteIds]);
860
+ }
798
861
  }
799
862
  }
800
863
  }, "Failed to materialise deltas");
@@ -1177,6 +1240,9 @@ export {
1177
1240
  lakeSyncTypeToBigQuery,
1178
1241
  isDatabaseAdapter,
1179
1242
  isMaterialisable,
1243
+ resolvePrimaryKey,
1244
+ resolveConflictColumns,
1245
+ isSoftDelete,
1180
1246
  groupDeltasByTable,
1181
1247
  buildSchemaIndex,
1182
1248
  toCause,
@@ -1194,4 +1260,4 @@ export {
1194
1260
  MinIOAdapter,
1195
1261
  createQueryFn
1196
1262
  };
1197
- //# sourceMappingURL=chunk-LZ6R74PT.js.map
1263
+ //# sourceMappingURL=chunk-GUJWMK5P.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../adapter/src/db-types.ts","../../adapter/src/materialise.ts","../../adapter/src/shared.ts","../../adapter/src/bigquery.ts","../../adapter/src/composite.ts","../../adapter/src/mysql.ts","../../adapter/src/postgres.ts","../../adapter/src/factory.ts","../../adapter/src/fan-out.ts","../../adapter/src/lifecycle.ts","../../adapter/src/migrate.ts","../../adapter/src/minio.ts","../../adapter/src/query-fn.ts"],"sourcesContent":["import type { AdapterError, HLCTimestamp, Result, RowDelta, TableSchema } from \"@lakesync/core\";\n\n/** Configuration for a database adapter connection. */\nexport interface DatabaseAdapterConfig {\n\t/** Connection string (e.g. postgres://user:pass@host/db) */\n\tconnectionString: string;\n}\n\n/**\n * Abstract interface for SQL database storage operations.\n * Alternative to LakeAdapter for small-data backends (Postgres, MySQL, etc).\n */\nexport interface DatabaseAdapter {\n\t/** Insert deltas into the database in a single batch. Idempotent via deltaId uniqueness. */\n\tinsertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>>;\n\n\t/** Query deltas with HLC greater than the given timestamp, optionally filtered by table. */\n\tqueryDeltasSince(hlc: HLCTimestamp, tables?: string[]): Promise<Result<RowDelta[], AdapterError>>;\n\n\t/** Get the latest merged state for a specific row. Returns null if the row doesn't exist. */\n\tgetLatestState(\n\t\ttable: string,\n\t\trowId: string,\n\t): Promise<Result<Record<string, unknown> | null, AdapterError>>;\n\n\t/** Ensure the database schema matches the given TableSchema. Creates/alters tables as needed. */\n\tensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>>;\n\n\t/** Close the database connection and release resources. */\n\tclose(): Promise<void>;\n}\n\n/**\n * Map a LakeSync column type to a BigQuery column definition.\n */\nconst BIGQUERY_TYPE_MAP: Record<TableSchema[\"columns\"][number][\"type\"], string> = {\n\tstring: \"STRING\",\n\tnumber: \"FLOAT64\",\n\tboolean: \"BOOL\",\n\tjson: \"JSON\",\n\tnull: \"STRING\",\n};\n\nexport function lakeSyncTypeToBigQuery(type: TableSchema[\"columns\"][number][\"type\"]): string {\n\treturn BIGQUERY_TYPE_MAP[type];\n}\n\n/** Type guard to distinguish DatabaseAdapter from LakeAdapter at runtime. */\nexport function isDatabaseAdapter(adapter: unknown): adapter is DatabaseAdapter {\n\treturn (\n\t\tadapter !== null &&\n\t\ttypeof adapter === \"object\" &&\n\t\t\"insertDeltas\" in adapter &&\n\t\t\"queryDeltasSince\" in adapter &&\n\t\ttypeof (adapter as DatabaseAdapter).insertDeltas === \"function\"\n\t);\n}\n","import type { AdapterError, Result, RowDelta, TableSchema } from \"@lakesync/core\";\n\n/**\n * Opt-in capability for adapters that can materialise deltas into destination tables.\n *\n * Materialisation is a separate concern from delta storage — adapters that store\n * deltas (via `DatabaseAdapter.insertDeltas`) may also materialise them into\n * queryable destination tables by implementing this interface.\n *\n * Destination tables follow the hybrid column model:\n * - Synced columns (written by materialiser, derived from `TableSchema.columns`)\n * - `props JSONB DEFAULT '{}'` — consumer-extensible, never touched by materialiser\n * - `synced_at` — updated on every materialise cycle\n */\nexport interface Materialisable {\n\t/**\n\t * Materialise deltas into destination tables.\n\t *\n\t * For each table with a matching schema, merges delta history into the\n\t * latest row state and upserts into the destination table. Tombstoned\n\t * rows are deleted. The `props` column is never touched.\n\t *\n\t * @param deltas - The deltas that were just flushed.\n\t * @param schemas - Table schemas defining destination tables and column mappings.\n\t */\n\tmaterialise(\n\t\tdeltas: RowDelta[],\n\t\tschemas: ReadonlyArray<TableSchema>,\n\t): Promise<Result<void, AdapterError>>;\n}\n\n/**\n * Type guard to check if an adapter supports materialisation.\n *\n * Uses duck-typing (same pattern as `isDatabaseAdapter`).\n */\nexport function isMaterialisable(adapter: unknown): adapter is Materialisable {\n\treturn (\n\t\tadapter !== null &&\n\t\ttypeof adapter === \"object\" &&\n\t\t\"materialise\" in adapter &&\n\t\ttypeof (adapter as Materialisable).materialise === \"function\"\n\t);\n}\n\n/**\n * Resolve the primary key columns for a table schema.\n * Defaults to `[\"row_id\"]` when not explicitly set.\n */\nexport function resolvePrimaryKey(schema: TableSchema): string[] {\n\treturn schema.primaryKey ?? [\"row_id\"];\n}\n\n/**\n * Resolve the conflict columns used for upsert ON CONFLICT targeting.\n * When `externalIdColumn` is set, upserts resolve on that column instead of the PK.\n */\nexport function resolveConflictColumns(schema: TableSchema): string[] {\n\treturn schema.externalIdColumn ? [schema.externalIdColumn] : resolvePrimaryKey(schema);\n}\n\n/**\n * Whether tombstoned rows should be soft-deleted (default) or hard-deleted.\n */\nexport function isSoftDelete(schema: TableSchema): boolean {\n\treturn schema.softDelete !== false;\n}\n\n/**\n * Group deltas by their table name, collecting the set of affected row IDs per table.\n *\n * @param deltas - The deltas to group.\n * @returns A map from table name to the set of affected row IDs.\n */\nexport function groupDeltasByTable(deltas: ReadonlyArray<RowDelta>): Map<string, Set<string>> {\n\tconst result = new Map<string, Set<string>>();\n\tfor (const delta of deltas) {\n\t\tlet rowIds = result.get(delta.table);\n\t\tif (!rowIds) {\n\t\t\trowIds = new Set<string>();\n\t\t\tresult.set(delta.table, rowIds);\n\t\t}\n\t\trowIds.add(delta.rowId);\n\t}\n\treturn result;\n}\n\n/**\n * Build an index from source table name to schema.\n *\n * Keys are `schema.sourceTable ?? schema.table`, so deltas can be matched\n * by their `table` field to find the correct destination schema.\n *\n * @param schemas - The table schemas to index.\n * @returns A map from source table name to schema.\n */\nexport function buildSchemaIndex(schemas: ReadonlyArray<TableSchema>): Map<string, TableSchema> {\n\tconst index = new Map<string, TableSchema>();\n\tfor (const schema of schemas) {\n\t\tconst key = schema.sourceTable ?? schema.table;\n\t\tindex.set(key, schema);\n\t}\n\treturn index;\n}\n","import { AdapterError, type ColumnDelta, Err, Ok, type Result } from \"@lakesync/core\";\n\n/** Normalise a caught value into an Error or undefined. */\nexport function toCause(error: unknown): Error | undefined {\n\treturn error instanceof Error ? error : undefined;\n}\n\n/** Execute an async operation and wrap errors into an AdapterError Result. */\nexport async function wrapAsync<T>(\n\toperation: () => Promise<T>,\n\terrorMessage: string,\n): Promise<Result<T, AdapterError>> {\n\ttry {\n\t\tconst value = await operation();\n\t\treturn Ok(value);\n\t} catch (error) {\n\t\tif (error instanceof AdapterError) {\n\t\t\treturn Err(error);\n\t\t}\n\t\treturn Err(new AdapterError(errorMessage, toCause(error)));\n\t}\n}\n\n/**\n * Merge delta rows into final state using column-level LWW.\n * Shared by Postgres, MySQL, and BigQuery getLatestState implementations.\n * Rows must be sorted by HLC ascending.\n */\nexport function mergeLatestState(\n\trows: Array<{ columns: string | ColumnDelta[]; op: string }>,\n): Record<string, unknown> | null {\n\tif (rows.length === 0) return null;\n\n\tconst lastRow = rows[rows.length - 1]!;\n\tif (lastRow.op === \"DELETE\") return null;\n\n\tconst state: Record<string, unknown> = {};\n\n\tfor (const row of rows) {\n\t\tif (row.op === \"DELETE\") {\n\t\t\tfor (const key of Object.keys(state)) {\n\t\t\t\tdelete state[key];\n\t\t\t}\n\t\t\tcontinue;\n\t\t}\n\n\t\tconst columns: ColumnDelta[] =\n\t\t\ttypeof row.columns === \"string\" ? JSON.parse(row.columns) : row.columns;\n\n\t\tfor (const col of columns) {\n\t\t\tstate[col.column] = col.value;\n\t\t}\n\t}\n\n\treturn state;\n}\n","import { BigQuery } from \"@google-cloud/bigquery\";\nimport {\n\ttype AdapterError,\n\ttype ColumnDelta,\n\ttype HLCTimestamp,\n\tOk,\n\ttype Result,\n\ttype RowDelta,\n\ttype TableSchema,\n} from \"@lakesync/core\";\nimport type { DatabaseAdapter } from \"./db-types\";\nimport { lakeSyncTypeToBigQuery } from \"./db-types\";\nimport type { Materialisable } from \"./materialise\";\nimport {\n\tbuildSchemaIndex,\n\tgroupDeltasByTable,\n\tisSoftDelete,\n\tresolveConflictColumns,\n\tresolvePrimaryKey,\n} from \"./materialise\";\nimport { mergeLatestState, wrapAsync } from \"./shared\";\n\n/**\n * Configuration for the BigQuery adapter.\n * Unlike SQL adapters, BigQuery is HTTP-based — no connection string needed.\n */\nexport interface BigQueryAdapterConfig {\n\t/** GCP project ID. */\n\tprojectId: string;\n\t/** BigQuery dataset name. */\n\tdataset: string;\n\t/** Path to a service account JSON key file. Falls back to ADC if omitted. */\n\tkeyFilename?: string;\n\t/** Dataset location (default: \"US\"). */\n\tlocation?: string;\n}\n\n/** Shape of a row returned from the lakesync_deltas table. */\ninterface BigQueryDeltaRow {\n\tdelta_id: string;\n\ttable: string;\n\trow_id: string;\n\tcolumns: string;\n\thlc: { value: string } | string | number;\n\tclient_id: string;\n\top: string;\n}\n\n/**\n * Convert a raw BigQuery row into a RowDelta.\n * BigQuery returns INT64 as `{ value: string }` objects to avoid precision loss.\n */\nfunction rowToRowDelta(row: BigQueryDeltaRow): RowDelta {\n\tconst columns: ColumnDelta[] =\n\t\ttypeof row.columns === \"string\" ? JSON.parse(row.columns) : row.columns;\n\n\t// BigQuery INT64 comes back as { value: \"123\" } to preserve precision\n\tconst hlcRaw = row.hlc;\n\tconst hlcString =\n\t\ttypeof hlcRaw === \"object\" && hlcRaw !== null && \"value\" in hlcRaw\n\t\t\t? hlcRaw.value\n\t\t\t: String(hlcRaw);\n\n\treturn {\n\t\tdeltaId: row.delta_id,\n\t\ttable: row.table,\n\t\trowId: row.row_id,\n\t\tcolumns,\n\t\thlc: BigInt(hlcString) as HLCTimestamp,\n\t\tclientId: row.client_id,\n\t\top: row.op as RowDelta[\"op\"],\n\t};\n}\n\n/**\n * BigQuery database adapter for LakeSync.\n *\n * Stores deltas in a `lakesync_deltas` table using standard SQL DML.\n * Idempotent inserts via MERGE statement. All public methods return\n * `Result` and never throw.\n *\n * **Note:** BigQuery DML is limited to 1,500 statements per table per day\n * on standard (non-partitioned) tables. Query latency is seconds, not\n * milliseconds — this adapter is designed for the analytics tier.\n */\nexport class BigQueryAdapter implements DatabaseAdapter, Materialisable {\n\t/** @internal */\n\treadonly client: BigQuery;\n\t/** @internal */\n\treadonly dataset: string;\n\t/** @internal */\n\treadonly location: string;\n\n\tconstructor(config: BigQueryAdapterConfig) {\n\t\tthis.client = new BigQuery({\n\t\t\tprojectId: config.projectId,\n\t\t\tkeyFilename: config.keyFilename,\n\t\t});\n\t\tthis.dataset = config.dataset;\n\t\tthis.location = config.location ?? \"US\";\n\t}\n\n\t/**\n\t * Insert deltas into the database in a single batch.\n\t * Idempotent via MERGE — existing deltaIds are silently skipped.\n\t */\n\tasync insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>> {\n\t\tif (deltas.length === 0) {\n\t\t\treturn Ok(undefined);\n\t\t}\n\n\t\treturn wrapAsync(async () => {\n\t\t\t// Build MERGE source from UNION ALL of parameterised SELECTs\n\t\t\tconst params: Record<string, string> = {};\n\t\t\tconst selects: string[] = [];\n\n\t\t\tfor (let i = 0; i < deltas.length; i++) {\n\t\t\t\tconst d = deltas[i]!;\n\t\t\t\tparams[`did_${i}`] = d.deltaId;\n\t\t\t\tparams[`tbl_${i}`] = d.table;\n\t\t\t\tparams[`rid_${i}`] = d.rowId;\n\t\t\t\tparams[`col_${i}`] = JSON.stringify(d.columns);\n\t\t\t\tparams[`hlc_${i}`] = d.hlc.toString();\n\t\t\t\tparams[`cid_${i}`] = d.clientId;\n\t\t\t\tparams[`op_${i}`] = d.op;\n\n\t\t\t\tselects.push(\n\t\t\t\t\t`SELECT @did_${i} AS delta_id, @tbl_${i} AS \\`table\\`, @rid_${i} AS row_id, @col_${i} AS columns, CAST(@hlc_${i} AS INT64) AS hlc, @cid_${i} AS client_id, @op_${i} AS op`,\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst sql = `MERGE \\`${this.dataset}.lakesync_deltas\\` AS target\nUSING (${selects.join(\" UNION ALL \")}) AS source\nON target.delta_id = source.delta_id\nWHEN NOT MATCHED THEN INSERT (delta_id, \\`table\\`, row_id, columns, hlc, client_id, op)\nVALUES (source.delta_id, source.\\`table\\`, source.row_id, source.columns, source.hlc, source.client_id, source.op)`;\n\n\t\t\tawait this.client.query({ query: sql, params, location: this.location });\n\t\t}, \"Failed to insert deltas\");\n\t}\n\n\t/**\n\t * Query deltas with HLC greater than the given timestamp, optionally filtered by table.\n\t */\n\tasync queryDeltasSince(\n\t\thlc: HLCTimestamp,\n\t\ttables?: string[],\n\t): Promise<Result<RowDelta[], AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tlet sql: string;\n\t\t\tconst params: Record<string, string | string[]> = {\n\t\t\t\tsinceHlc: hlc.toString(),\n\t\t\t};\n\n\t\t\tif (tables && tables.length > 0) {\n\t\t\t\tsql = `SELECT delta_id, \\`table\\`, row_id, columns, hlc, client_id, op\nFROM \\`${this.dataset}.lakesync_deltas\\`\nWHERE hlc > CAST(@sinceHlc AS INT64) AND \\`table\\` IN UNNEST(@tables)\nORDER BY hlc ASC`;\n\t\t\t\tparams.tables = tables;\n\t\t\t} else {\n\t\t\t\tsql = `SELECT delta_id, \\`table\\`, row_id, columns, hlc, client_id, op\nFROM \\`${this.dataset}.lakesync_deltas\\`\nWHERE hlc > CAST(@sinceHlc AS INT64)\nORDER BY hlc ASC`;\n\t\t\t}\n\n\t\t\tconst [rows] = await this.client.query({\n\t\t\t\tquery: sql,\n\t\t\t\tparams,\n\t\t\t\tlocation: this.location,\n\t\t\t});\n\t\t\treturn (rows as BigQueryDeltaRow[]).map(rowToRowDelta);\n\t\t}, \"Failed to query deltas\");\n\t}\n\n\t/**\n\t * Get the latest merged state for a specific row using column-level LWW.\n\t * Returns null if no deltas exist for this row.\n\t */\n\tasync getLatestState(\n\t\ttable: string,\n\t\trowId: string,\n\t): Promise<Result<Record<string, unknown> | null, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tconst sql = `SELECT columns, hlc, client_id, op\nFROM \\`${this.dataset}.lakesync_deltas\\`\nWHERE \\`table\\` = @tbl AND row_id = @rid\nORDER BY hlc ASC`;\n\n\t\t\tconst [rows] = await this.client.query({\n\t\t\t\tquery: sql,\n\t\t\t\tparams: { tbl: table, rid: rowId },\n\t\t\t\tlocation: this.location,\n\t\t\t});\n\t\t\treturn mergeLatestState(rows as BigQueryDeltaRow[]);\n\t\t}, `Failed to get latest state for ${table}:${rowId}`);\n\t}\n\n\t/**\n\t * Ensure the BigQuery dataset and lakesync_deltas table exist.\n\t * The `schema` parameter is accepted for interface compliance but the\n\t * internal table structure is fixed (deltas store column data as JSON).\n\t */\n\tasync ensureSchema(_schema: TableSchema): Promise<Result<void, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\t// Create dataset if it doesn't exist\n\t\t\tconst datasetRef = this.client.dataset(this.dataset);\n\t\t\tconst [datasetExists] = await datasetRef.exists();\n\t\t\tif (!datasetExists) {\n\t\t\t\tawait this.client.createDataset(this.dataset, {\n\t\t\t\t\tlocation: this.location,\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Create the deltas table\n\t\t\tawait this.client.query({\n\t\t\t\tquery: `CREATE TABLE IF NOT EXISTS \\`${this.dataset}.lakesync_deltas\\` (\n\tdelta_id STRING NOT NULL,\n\t\\`table\\` STRING NOT NULL,\n\trow_id STRING NOT NULL,\n\tcolumns JSON NOT NULL,\n\thlc INT64 NOT NULL,\n\tclient_id STRING NOT NULL,\n\top STRING NOT NULL\n)\nCLUSTER BY \\`table\\`, hlc`,\n\t\t\t\tlocation: this.location,\n\t\t\t});\n\t\t}, \"Failed to ensure schema\");\n\t}\n\n\t/**\n\t * Materialise deltas into destination tables.\n\t *\n\t * For each affected table, queries the full delta history for touched rows,\n\t * merges to latest state via column-level LWW, then upserts live rows and\n\t * deletes tombstoned rows. The consumer-owned `props` column is never\n\t * touched on UPDATE.\n\t */\n\tasync materialise(\n\t\tdeltas: RowDelta[],\n\t\tschemas: ReadonlyArray<TableSchema>,\n\t): Promise<Result<void, AdapterError>> {\n\t\tif (deltas.length === 0) {\n\t\t\treturn Ok(undefined);\n\t\t}\n\n\t\treturn wrapAsync(async () => {\n\t\t\tconst tableRowIds = groupDeltasByTable(deltas);\n\t\t\tconst schemaIndex = buildSchemaIndex(schemas);\n\n\t\t\tfor (const [sourceTable, rowIds] of tableRowIds) {\n\t\t\t\tconst schema = schemaIndex.get(sourceTable);\n\t\t\t\tif (!schema) continue;\n\n\t\t\t\tconst pk = resolvePrimaryKey(schema);\n\t\t\t\tconst conflictCols = resolveConflictColumns(schema);\n\t\t\t\tconst soft = isSoftDelete(schema);\n\n\t\t\t\t// Ensure destination table exists\n\t\t\t\tconst colDefs = schema.columns\n\t\t\t\t\t.map((c) => `${c.name} ${lakeSyncTypeToBigQuery(c.type)}`)\n\t\t\t\t\t.join(\", \");\n\t\t\t\tconst deletedAtCol = soft ? `,\\n\\tdeleted_at TIMESTAMP` : \"\";\n\t\t\t\tawait this.client.query({\n\t\t\t\t\tquery: `CREATE TABLE IF NOT EXISTS \\`${this.dataset}.${schema.table}\\` (\n\trow_id STRING NOT NULL,\n\t${colDefs},\n\tprops JSON DEFAULT '{}'${deletedAtCol},\n\tsynced_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP()\n)\nCLUSTER BY ${pk.map((c) => (c === \"row_id\" ? \"row_id\" : c)).join(\", \")}`,\n\t\t\t\t\tlocation: this.location,\n\t\t\t\t});\n\n\t\t\t\t// Query delta history for affected rows\n\t\t\t\tconst rowIdArray = [...rowIds];\n\t\t\t\tconst [deltaRows] = await this.client.query({\n\t\t\t\t\tquery: `SELECT row_id, columns, op FROM \\`${this.dataset}.lakesync_deltas\\`\nWHERE \\`table\\` = @sourceTable AND row_id IN UNNEST(@rowIds)\nORDER BY hlc ASC`,\n\t\t\t\t\tparams: { sourceTable, rowIds: rowIdArray },\n\t\t\t\t\tlocation: this.location,\n\t\t\t\t});\n\n\t\t\t\t// Group by row_id and merge to latest state\n\t\t\t\tconst rowGroups = new Map<string, Array<{ columns: string | ColumnDelta[]; op: string }>>();\n\t\t\t\tfor (const row of deltaRows as Array<{\n\t\t\t\t\trow_id: string;\n\t\t\t\t\tcolumns: string | ColumnDelta[];\n\t\t\t\t\top: string;\n\t\t\t\t}>) {\n\t\t\t\t\tlet group = rowGroups.get(row.row_id);\n\t\t\t\t\tif (!group) {\n\t\t\t\t\t\tgroup = [];\n\t\t\t\t\t\trowGroups.set(row.row_id, group);\n\t\t\t\t\t}\n\t\t\t\t\tgroup.push({ columns: row.columns, op: row.op });\n\t\t\t\t}\n\n\t\t\t\tconst upserts: Array<{ rowId: string; state: Record<string, unknown> }> = [];\n\t\t\t\tconst deleteRowIds: string[] = [];\n\n\t\t\t\tfor (const [rowId, group] of rowGroups) {\n\t\t\t\t\tconst state = mergeLatestState(group);\n\t\t\t\t\tif (state === null) {\n\t\t\t\t\t\tdeleteRowIds.push(rowId);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tupserts.push({ rowId, state });\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// MERGE upserts\n\t\t\t\tif (upserts.length > 0) {\n\t\t\t\t\tconst params: Record<string, unknown> = {};\n\t\t\t\t\tconst selects: string[] = [];\n\n\t\t\t\t\tfor (let i = 0; i < upserts.length; i++) {\n\t\t\t\t\t\tconst u = upserts[i]!;\n\t\t\t\t\t\tparams[`rid_${i}`] = u.rowId;\n\t\t\t\t\t\tfor (const col of schema.columns) {\n\t\t\t\t\t\t\tparams[`c${schema.columns.indexOf(col)}_${i}`] = u.state[col.name] ?? null;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tconst colSelects = schema.columns\n\t\t\t\t\t\t\t.map((col, ci) => `@c${ci}_${i} AS ${col.name}`)\n\t\t\t\t\t\t\t.join(\", \");\n\t\t\t\t\t\tconst deletedAtSelect = soft ? \", CAST(NULL AS TIMESTAMP) AS deleted_at\" : \"\";\n\t\t\t\t\t\tselects.push(\n\t\t\t\t\t\t\t`SELECT @rid_${i} AS row_id, ${colSelects}${deletedAtSelect}, CURRENT_TIMESTAMP() AS synced_at`,\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\tconst mergeOn = conflictCols\n\t\t\t\t\t\t.map((c) => `t.${c === \"row_id\" ? \"row_id\" : c} = s.${c === \"row_id\" ? \"row_id\" : c}`)\n\t\t\t\t\t\t.join(\" AND \");\n\n\t\t\t\t\tconst updateSet = schema.columns.map((col) => `${col.name} = s.${col.name}`).join(\", \");\n\t\t\t\t\tconst softUpdateExtra = soft ? \", deleted_at = s.deleted_at\" : \"\";\n\n\t\t\t\t\tconst insertColsList = [\n\t\t\t\t\t\t\"row_id\",\n\t\t\t\t\t\t...schema.columns.map((c) => c.name),\n\t\t\t\t\t\t\"props\",\n\t\t\t\t\t\t...(soft ? [\"deleted_at\"] : []),\n\t\t\t\t\t\t\"synced_at\",\n\t\t\t\t\t].join(\", \");\n\t\t\t\t\tconst insertValsList = [\n\t\t\t\t\t\t\"s.row_id\",\n\t\t\t\t\t\t...schema.columns.map((c) => `s.${c.name}`),\n\t\t\t\t\t\t\"'{}'\",\n\t\t\t\t\t\t...(soft ? [\"s.deleted_at\"] : []),\n\t\t\t\t\t\t\"s.synced_at\",\n\t\t\t\t\t].join(\", \");\n\n\t\t\t\t\tconst mergeSql = `MERGE \\`${this.dataset}.${schema.table}\\` AS t\nUSING (${selects.join(\" UNION ALL \")}) AS s\nON ${mergeOn}\nWHEN MATCHED THEN UPDATE SET ${updateSet}${softUpdateExtra}, synced_at = s.synced_at\nWHEN NOT MATCHED THEN INSERT (${insertColsList})\nVALUES (${insertValsList})`;\n\n\t\t\t\t\tawait this.client.query({\n\t\t\t\t\t\tquery: mergeSql,\n\t\t\t\t\t\tparams,\n\t\t\t\t\t\tlocation: this.location,\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// DELETE / soft-delete tombstoned rows\n\t\t\t\tif (deleteRowIds.length > 0) {\n\t\t\t\t\tif (soft) {\n\t\t\t\t\t\tawait this.client.query({\n\t\t\t\t\t\t\tquery: `UPDATE \\`${this.dataset}.${schema.table}\\` SET deleted_at = CURRENT_TIMESTAMP(), synced_at = CURRENT_TIMESTAMP() WHERE row_id IN UNNEST(@rowIds)`,\n\t\t\t\t\t\t\tparams: { rowIds: deleteRowIds },\n\t\t\t\t\t\t\tlocation: this.location,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else {\n\t\t\t\t\t\tawait this.client.query({\n\t\t\t\t\t\t\tquery: `DELETE FROM \\`${this.dataset}.${schema.table}\\` WHERE row_id IN UNNEST(@rowIds)`,\n\t\t\t\t\t\t\tparams: { rowIds: deleteRowIds },\n\t\t\t\t\t\t\tlocation: this.location,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}, \"Failed to materialise deltas\");\n\t}\n\n\t/**\n\t * No-op — BigQuery client is HTTP-based with no persistent connections.\n\t */\n\tasync close(): Promise<void> {\n\t\t// No-op: BigQuery uses HTTP requests, no connection pool to close\n\t}\n}\n","import {\n\ttype AdapterError,\n\ttype HLCTimestamp,\n\tOk,\n\ttype Result,\n\ttype RowDelta,\n\ttype TableSchema,\n} from \"@lakesync/core\";\n\nimport type { DatabaseAdapter } from \"./db-types\";\n\n/** A routing rule that maps specific tables to a database adapter. */\nexport interface CompositeRoute {\n\t/** Tables handled by this adapter */\n\ttables: string[];\n\t/** The adapter for these tables */\n\tadapter: DatabaseAdapter;\n}\n\n/** Configuration for CompositeAdapter routing. */\nexport interface CompositeAdapterConfig {\n\t/** Table-to-adapter routing rules */\n\troutes: CompositeRoute[];\n\t/** Fallback adapter for tables not matching any route */\n\tdefaultAdapter: DatabaseAdapter;\n}\n\n/**\n * Routes database operations to different adapters based on table name.\n * Implements DatabaseAdapter so it can be used as a drop-in replacement.\n */\nexport class CompositeAdapter implements DatabaseAdapter {\n\tprivate readonly routeMap: Map<string, DatabaseAdapter>;\n\tprivate readonly adapters: Set<DatabaseAdapter>;\n\tprivate readonly defaultAdapter: DatabaseAdapter;\n\n\tconstructor(config: CompositeAdapterConfig) {\n\t\tthis.routeMap = new Map();\n\t\tthis.adapters = new Set();\n\t\tthis.defaultAdapter = config.defaultAdapter;\n\t\tthis.adapters.add(config.defaultAdapter);\n\n\t\tfor (const route of config.routes) {\n\t\t\tthis.adapters.add(route.adapter);\n\t\t\tfor (const table of route.tables) {\n\t\t\t\tif (this.routeMap.has(table)) {\n\t\t\t\t\tthrow new Error(`Duplicate table route: \"${table}\" appears in multiple routes`);\n\t\t\t\t}\n\t\t\t\tthis.routeMap.set(table, route.adapter);\n\t\t\t}\n\t\t}\n\t}\n\n\t/** Insert deltas, routing each group to the correct adapter by table. */\n\tasync insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>> {\n\t\tconst groups = new Map<DatabaseAdapter, RowDelta[]>();\n\n\t\tfor (const delta of deltas) {\n\t\t\tconst adapter = this.routeMap.get(delta.table) ?? this.defaultAdapter;\n\t\t\tlet group = groups.get(adapter);\n\t\t\tif (!group) {\n\t\t\t\tgroup = [];\n\t\t\t\tgroups.set(adapter, group);\n\t\t\t}\n\t\t\tgroup.push(delta);\n\t\t}\n\n\t\tfor (const [adapter, group] of groups) {\n\t\t\tconst result = await adapter.insertDeltas(group);\n\t\t\tif (!result.ok) {\n\t\t\t\treturn result;\n\t\t\t}\n\t\t}\n\n\t\treturn Ok(undefined);\n\t}\n\n\t/** Query deltas since a given HLC, fanning out to relevant adapters and merging results. */\n\tasync queryDeltasSince(\n\t\thlc: HLCTimestamp,\n\t\ttables?: string[],\n\t): Promise<Result<RowDelta[], AdapterError>> {\n\t\tconst adapterSet = new Set<DatabaseAdapter>();\n\t\tconst adapterTables = new Map<DatabaseAdapter, string[]>();\n\n\t\tif (tables && tables.length > 0) {\n\t\t\tfor (const table of tables) {\n\t\t\t\tconst adapter = this.routeMap.get(table) ?? this.defaultAdapter;\n\t\t\t\tadapterSet.add(adapter);\n\t\t\t\tlet existing = adapterTables.get(adapter);\n\t\t\t\tif (!existing) {\n\t\t\t\t\texisting = [];\n\t\t\t\t\tadapterTables.set(adapter, existing);\n\t\t\t\t}\n\t\t\t\texisting.push(table);\n\t\t\t}\n\t\t} else {\n\t\t\tfor (const adapter of this.adapters) {\n\t\t\t\tadapterSet.add(adapter);\n\t\t\t}\n\t\t}\n\n\t\tconst merged: RowDelta[] = [];\n\n\t\tfor (const adapter of adapterSet) {\n\t\t\tconst filterTables = adapterTables.get(adapter);\n\t\t\tconst result = await adapter.queryDeltasSince(hlc, filterTables);\n\t\t\tif (!result.ok) {\n\t\t\t\treturn result;\n\t\t\t}\n\t\t\tmerged.push(...result.value);\n\t\t}\n\n\t\tmerged.sort((a, b) => (a.hlc < b.hlc ? -1 : a.hlc > b.hlc ? 1 : 0));\n\n\t\treturn Ok(merged);\n\t}\n\n\t/** Get the latest state for a row, routing to the correct adapter. */\n\tasync getLatestState(\n\t\ttable: string,\n\t\trowId: string,\n\t): Promise<Result<Record<string, unknown> | null, AdapterError>> {\n\t\tconst adapter = this.routeMap.get(table) ?? this.defaultAdapter;\n\t\treturn adapter.getLatestState(table, rowId);\n\t}\n\n\t/** Ensure schema exists, routing to the correct adapter for the table. */\n\tasync ensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>> {\n\t\tconst adapter = this.routeMap.get(schema.table) ?? this.defaultAdapter;\n\t\treturn adapter.ensureSchema(schema);\n\t}\n\n\t/** Close all unique adapters (routes + default, deduplicated). */\n\tasync close(): Promise<void> {\n\t\tfor (const adapter of this.adapters) {\n\t\t\tawait adapter.close();\n\t\t}\n\t}\n}\n","import {\n\ttype AdapterError,\n\ttype HLCTimestamp,\n\tOk,\n\ttype Result,\n\ttype RowDelta,\n\ttype TableSchema,\n} from \"@lakesync/core\";\nimport mysql from \"mysql2/promise\";\nimport type { DatabaseAdapter, DatabaseAdapterConfig } from \"./db-types\";\nimport type { Materialisable } from \"./materialise\";\nimport {\n\tbuildSchemaIndex,\n\tgroupDeltasByTable,\n\tisSoftDelete,\n\tresolvePrimaryKey,\n} from \"./materialise\";\nimport { mergeLatestState, wrapAsync } from \"./shared\";\n\n/**\n * Map a LakeSync column type to a MySQL column definition.\n */\nconst MYSQL_TYPE_MAP: Record<TableSchema[\"columns\"][number][\"type\"], string> = {\n\tstring: \"TEXT\",\n\tnumber: \"DOUBLE\",\n\tboolean: \"TINYINT(1)\",\n\tjson: \"JSON\",\n\tnull: \"TEXT\",\n};\n\nfunction lakeSyncTypeToMySQL(type: TableSchema[\"columns\"][number][\"type\"]): string {\n\treturn MYSQL_TYPE_MAP[type];\n}\n\n/**\n * MySQL database adapter for LakeSync.\n *\n * Stores deltas in a `lakesync_deltas` table using INSERT IGNORE for\n * idempotent writes. All public methods return `Result` and never throw.\n * Uses mysql2/promise connection pool for async operations.\n */\nexport class MySQLAdapter implements DatabaseAdapter, Materialisable {\n\t/** @internal */\n\treadonly pool: mysql.Pool;\n\n\tconstructor(config: DatabaseAdapterConfig) {\n\t\tthis.pool = mysql.createPool(config.connectionString);\n\t}\n\n\t/**\n\t * Insert deltas into the database in a single batch.\n\t * Uses INSERT IGNORE for idempotent writes — duplicate deltaIds are silently skipped.\n\t */\n\tasync insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>> {\n\t\tif (deltas.length === 0) {\n\t\t\treturn Ok(undefined);\n\t\t}\n\n\t\treturn wrapAsync(async () => {\n\t\t\tconst sql = `INSERT IGNORE INTO lakesync_deltas (delta_id, \\`table\\`, row_id, columns, hlc, client_id, op) VALUES ${deltas.map(() => \"(?, ?, ?, ?, ?, ?, ?)\").join(\", \")}`;\n\n\t\t\tconst values: unknown[] = [];\n\t\t\tfor (const delta of deltas) {\n\t\t\t\tvalues.push(\n\t\t\t\t\tdelta.deltaId,\n\t\t\t\t\tdelta.table,\n\t\t\t\t\tdelta.rowId,\n\t\t\t\t\tJSON.stringify(delta.columns),\n\t\t\t\t\tdelta.hlc.toString(),\n\t\t\t\t\tdelta.clientId,\n\t\t\t\t\tdelta.op,\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tawait this.pool.execute(sql, values);\n\t\t}, \"Failed to insert deltas\");\n\t}\n\n\t/**\n\t * Query deltas with HLC greater than the given timestamp.\n\t * Optionally filtered by table name(s).\n\t */\n\tasync queryDeltasSince(\n\t\thlc: HLCTimestamp,\n\t\ttables?: string[],\n\t): Promise<Result<RowDelta[], AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tlet sql =\n\t\t\t\t\"SELECT delta_id, `table`, row_id, columns, hlc, client_id, op FROM lakesync_deltas WHERE hlc > ?\";\n\t\t\tconst params: unknown[] = [hlc.toString()];\n\n\t\t\tif (tables && tables.length > 0) {\n\t\t\t\tsql += ` AND \\`table\\` IN (${tables.map(() => \"?\").join(\", \")})`;\n\t\t\t\tparams.push(...tables);\n\t\t\t}\n\n\t\t\tsql += \" ORDER BY hlc ASC\";\n\n\t\t\tconst [rows] = await this.pool.execute(sql, params);\n\t\t\treturn (rows as MySQLDeltaRow[]).map(rowToDelta);\n\t\t}, \"Failed to query deltas\");\n\t}\n\n\t/**\n\t * Get the latest merged state for a specific row using column-level LWW.\n\t * Returns null if no deltas exist or if the row is tombstoned by DELETE.\n\t */\n\tasync getLatestState(\n\t\ttable: string,\n\t\trowId: string,\n\t): Promise<Result<Record<string, unknown> | null, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tconst sql =\n\t\t\t\t\"SELECT columns, hlc, client_id, op FROM lakesync_deltas WHERE `table` = ? AND row_id = ? ORDER BY hlc ASC\";\n\t\t\tconst [rows] = await this.pool.execute(sql, [table, rowId]);\n\t\t\treturn mergeLatestState(rows as MySQLDeltaRow[]);\n\t\t}, `Failed to get latest state for ${table}:${rowId}`);\n\t}\n\n\t/**\n\t * Ensure the database schema exists. Creates the lakesync_deltas table\n\t * and a user table matching the given TableSchema definition.\n\t */\n\tasync ensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\t// Create the deltas table\n\t\t\tawait this.pool.execute(`\n\t\t\t\tCREATE TABLE IF NOT EXISTS lakesync_deltas (\n\t\t\t\t\tdelta_id VARCHAR(255) PRIMARY KEY,\n\t\t\t\t\t\\`table\\` VARCHAR(255) NOT NULL,\n\t\t\t\t\trow_id VARCHAR(255) NOT NULL,\n\t\t\t\t\tcolumns JSON NOT NULL,\n\t\t\t\t\thlc BIGINT NOT NULL,\n\t\t\t\t\tclient_id VARCHAR(255) NOT NULL,\n\t\t\t\t\top VARCHAR(50) NOT NULL DEFAULT 'upsert',\n\t\t\t\t\tINDEX idx_hlc (hlc),\n\t\t\t\t\tINDEX idx_table_row (\\`table\\`, row_id)\n\t\t\t\t)\n\t\t\t`);\n\n\t\t\t// Create the user-defined table\n\t\t\tconst columnDefs = schema.columns\n\t\t\t\t.map((col) => `\\`${col.name}\\` ${lakeSyncTypeToMySQL(col.type)}`)\n\t\t\t\t.join(\", \");\n\n\t\t\tawait this.pool.execute(\n\t\t\t\t`CREATE TABLE IF NOT EXISTS \\`${schema.table}\\` (row_id VARCHAR(255) PRIMARY KEY, ${columnDefs}, props JSON NOT NULL DEFAULT ('{}'), synced_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP)`,\n\t\t\t);\n\t\t}, `Failed to ensure schema for table ${schema.table}`);\n\t}\n\n\t/**\n\t * Materialise deltas into destination tables.\n\t *\n\t * For each table with a matching schema, merges delta history into the\n\t * latest row state and upserts into the destination table. Tombstoned\n\t * rows are soft-deleted (default) or hard-deleted. The `props` column\n\t * is never touched.\n\t */\n\tasync materialise(\n\t\tdeltas: RowDelta[],\n\t\tschemas: ReadonlyArray<TableSchema>,\n\t): Promise<Result<void, AdapterError>> {\n\t\tif (deltas.length === 0) {\n\t\t\treturn Ok(undefined);\n\t\t}\n\n\t\treturn wrapAsync(async () => {\n\t\t\tconst grouped = groupDeltasByTable(deltas);\n\t\t\tconst schemaIndex = buildSchemaIndex(schemas);\n\n\t\t\tfor (const [tableName, rowIds] of grouped) {\n\t\t\t\tconst schema = schemaIndex.get(tableName);\n\t\t\t\tif (!schema) continue;\n\n\t\t\t\tconst pk = resolvePrimaryKey(schema);\n\t\t\t\tconst soft = isSoftDelete(schema);\n\n\t\t\t\t// Ensure destination table exists\n\t\t\t\tconst typedCols = schema.columns\n\t\t\t\t\t.map((col) => `\\`${col.name}\\` ${lakeSyncTypeToMySQL(col.type)}`)\n\t\t\t\t\t.join(\", \");\n\n\t\t\t\tconst pkConstraint = `PRIMARY KEY (${pk.map((c) => `\\`${c}\\``).join(\", \")})`;\n\t\t\t\tconst deletedAtCol = soft ? `, deleted_at TIMESTAMP NULL` : \"\";\n\t\t\t\tconst uniqueConstraint = schema.externalIdColumn\n\t\t\t\t\t? `, UNIQUE KEY (\\`${schema.externalIdColumn}\\`)`\n\t\t\t\t\t: \"\";\n\n\t\t\t\tawait this.pool.execute(\n\t\t\t\t\t`CREATE TABLE IF NOT EXISTS \\`${schema.table}\\` (row_id VARCHAR(255) NOT NULL, ${typedCols}, props JSON NOT NULL DEFAULT ('{}')${deletedAtCol}, synced_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, ${pkConstraint}${uniqueConstraint})`,\n\t\t\t\t);\n\n\t\t\t\t// Query delta history for affected rows\n\t\t\t\tconst rowIdArray = [...rowIds];\n\t\t\t\tconst placeholders = rowIdArray.map(() => \"?\").join(\", \");\n\t\t\t\tconst [rows] = await this.pool.execute(\n\t\t\t\t\t`SELECT row_id, columns, op FROM lakesync_deltas WHERE \\`table\\` = ? AND row_id IN (${placeholders}) ORDER BY hlc ASC`,\n\t\t\t\t\t[tableName, ...rowIdArray],\n\t\t\t\t);\n\n\t\t\t\t// Group by row_id and merge\n\t\t\t\tconst byRow = new Map<string, Array<{ columns: string; op: string }>>();\n\t\t\t\tfor (const row of rows as Array<{ row_id: string; columns: string; op: string }>) {\n\t\t\t\t\tlet list = byRow.get(row.row_id);\n\t\t\t\t\tif (!list) {\n\t\t\t\t\t\tlist = [];\n\t\t\t\t\t\tbyRow.set(row.row_id, list);\n\t\t\t\t\t}\n\t\t\t\t\tlist.push(row);\n\t\t\t\t}\n\n\t\t\t\tconst upserts: Array<{ rowId: string; state: Record<string, unknown> }> = [];\n\t\t\t\tconst deleteIds: string[] = [];\n\n\t\t\t\tfor (const [rowId, rowDeltas] of byRow) {\n\t\t\t\t\tconst state = mergeLatestState(rowDeltas);\n\t\t\t\t\tif (state === null) {\n\t\t\t\t\t\tdeleteIds.push(rowId);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tupserts.push({ rowId, state });\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// UPSERT rows\n\t\t\t\tif (upserts.length > 0) {\n\t\t\t\t\tconst cols = schema.columns.map((c) => c.name);\n\t\t\t\t\tconst valuePlaceholders = soft\n\t\t\t\t\t\t? upserts.map(() => `(?, ${cols.map(() => \"?\").join(\", \")}, NULL, NOW())`).join(\", \")\n\t\t\t\t\t\t: upserts.map(() => `(?, ${cols.map(() => \"?\").join(\", \")}, NOW())`).join(\", \");\n\n\t\t\t\t\tconst values: unknown[] = [];\n\t\t\t\t\tfor (const { rowId, state } of upserts) {\n\t\t\t\t\t\tvalues.push(rowId);\n\t\t\t\t\t\tfor (const col of cols) {\n\t\t\t\t\t\t\tvalues.push(state[col] ?? null);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tconst updateCols = cols.map((c) => `\\`${c}\\` = VALUES(\\`${c}\\`)`).join(\", \");\n\t\t\t\t\tconst softUpdateExtra = soft ? \", deleted_at = NULL\" : \"\";\n\t\t\t\t\tconst colList = soft\n\t\t\t\t\t\t? `row_id, ${cols.map((c) => `\\`${c}\\``).join(\", \")}, deleted_at, synced_at`\n\t\t\t\t\t\t: `row_id, ${cols.map((c) => `\\`${c}\\``).join(\", \")}, synced_at`;\n\n\t\t\t\t\tawait this.pool.execute(\n\t\t\t\t\t\t`INSERT INTO \\`${schema.table}\\` (${colList}) VALUES ${valuePlaceholders} ON DUPLICATE KEY UPDATE ${updateCols}${softUpdateExtra}, synced_at = VALUES(synced_at)`,\n\t\t\t\t\t\tvalues,\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// DELETE / soft-delete tombstoned rows\n\t\t\t\tif (deleteIds.length > 0) {\n\t\t\t\t\tconst delPlaceholders = deleteIds.map(() => \"?\").join(\", \");\n\t\t\t\t\tif (soft) {\n\t\t\t\t\t\tawait this.pool.execute(\n\t\t\t\t\t\t\t`UPDATE \\`${schema.table}\\` SET deleted_at = NOW(), synced_at = NOW() WHERE row_id IN (${delPlaceholders})`,\n\t\t\t\t\t\t\tdeleteIds,\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tawait this.pool.execute(\n\t\t\t\t\t\t\t`DELETE FROM \\`${schema.table}\\` WHERE row_id IN (${delPlaceholders})`,\n\t\t\t\t\t\t\tdeleteIds,\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}, \"Failed to materialise deltas\");\n\t}\n\n\t/** Close the database connection pool and release resources. */\n\tasync close(): Promise<void> {\n\t\tawait this.pool.end();\n\t}\n}\n\n/** Shape of a row returned from the lakesync_deltas table. */\ninterface MySQLDeltaRow {\n\tdelta_id: string;\n\ttable: string;\n\trow_id: string;\n\tcolumns: string;\n\thlc: string | bigint;\n\tclient_id: string;\n\top: string;\n}\n\n/**\n * Convert a raw MySQL row into a RowDelta.\n * Handles both string and bigint HLC representations.\n */\nfunction rowToDelta(row: MySQLDeltaRow): RowDelta {\n\treturn {\n\t\tdeltaId: row.delta_id,\n\t\ttable: row.table,\n\t\trowId: row.row_id,\n\t\tcolumns: typeof row.columns === \"string\" ? JSON.parse(row.columns) : row.columns,\n\t\thlc: BigInt(row.hlc) as HLCTimestamp,\n\t\tclientId: row.client_id,\n\t\top: row.op as RowDelta[\"op\"],\n\t};\n}\n","import {\n\ttype AdapterError,\n\ttype ColumnDelta,\n\ttype HLCTimestamp,\n\tOk,\n\ttype Result,\n\ttype RowDelta,\n\ttype TableSchema,\n} from \"@lakesync/core\";\nimport { Pool, type PoolConfig } from \"pg\";\nimport type { DatabaseAdapter, DatabaseAdapterConfig } from \"./db-types\";\nimport type { Materialisable } from \"./materialise\";\nimport {\n\tbuildSchemaIndex,\n\tgroupDeltasByTable,\n\tisSoftDelete,\n\tresolveConflictColumns,\n\tresolvePrimaryKey,\n} from \"./materialise\";\nimport { mergeLatestState, wrapAsync } from \"./shared\";\n\nconst POSTGRES_TYPE_MAP: Record<TableSchema[\"columns\"][number][\"type\"], string> = {\n\tstring: \"TEXT\",\n\tnumber: \"DOUBLE PRECISION\",\n\tboolean: \"BOOLEAN\",\n\tjson: \"JSONB\",\n\tnull: \"TEXT\",\n};\n\n/**\n * PostgreSQL database adapter for LakeSync.\n *\n * Stores deltas in a `lakesync_deltas` table using pg Pool.\n * All public methods return `Result` and never throw.\n */\nexport class PostgresAdapter implements DatabaseAdapter, Materialisable {\n\t/** @internal */\n\treadonly pool: Pool;\n\n\tconstructor(config: DatabaseAdapterConfig) {\n\t\tconst poolConfig: PoolConfig = {\n\t\t\tconnectionString: config.connectionString,\n\t\t};\n\t\tthis.pool = new Pool(poolConfig);\n\t}\n\n\t/**\n\t * Insert deltas into the database in a single batch.\n\t * Idempotent via `ON CONFLICT (delta_id) DO NOTHING`.\n\t */\n\tasync insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>> {\n\t\tif (deltas.length === 0) {\n\t\t\treturn Ok(undefined);\n\t\t}\n\n\t\treturn wrapAsync(async () => {\n\t\t\t// Build a multi-row INSERT with parameterised values\n\t\t\tconst values: unknown[] = [];\n\t\t\tconst rows: string[] = [];\n\n\t\t\tfor (let i = 0; i < deltas.length; i++) {\n\t\t\t\tconst d = deltas[i]!;\n\t\t\t\tconst offset = i * 7;\n\t\t\t\trows.push(\n\t\t\t\t\t`($${offset + 1}, $${offset + 2}, $${offset + 3}, $${offset + 4}, $${offset + 5}, $${offset + 6}, $${offset + 7})`,\n\t\t\t\t);\n\t\t\t\tvalues.push(\n\t\t\t\t\td.deltaId,\n\t\t\t\t\td.table,\n\t\t\t\t\td.rowId,\n\t\t\t\t\tJSON.stringify(d.columns),\n\t\t\t\t\td.hlc.toString(),\n\t\t\t\t\td.clientId,\n\t\t\t\t\td.op,\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst sql = `INSERT INTO lakesync_deltas (delta_id, \"table\", row_id, columns, hlc, client_id, op)\nVALUES ${rows.join(\", \")}\nON CONFLICT (delta_id) DO NOTHING`;\n\n\t\t\tawait this.pool.query(sql, values);\n\t\t}, \"Failed to insert deltas\");\n\t}\n\n\t/**\n\t * Query deltas with HLC greater than the given timestamp, optionally filtered by table.\n\t */\n\tasync queryDeltasSince(\n\t\thlc: HLCTimestamp,\n\t\ttables?: string[],\n\t): Promise<Result<RowDelta[], AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tlet sql: string;\n\t\t\tlet params: unknown[];\n\n\t\t\tif (tables && tables.length > 0) {\n\t\t\t\tsql = `SELECT delta_id, \"table\", row_id, columns, hlc, client_id, op\nFROM lakesync_deltas\nWHERE hlc > $1 AND \"table\" = ANY($2)\nORDER BY hlc ASC`;\n\t\t\t\tparams = [hlc.toString(), tables];\n\t\t\t} else {\n\t\t\t\tsql = `SELECT delta_id, \"table\", row_id, columns, hlc, client_id, op\nFROM lakesync_deltas\nWHERE hlc > $1\nORDER BY hlc ASC`;\n\t\t\t\tparams = [hlc.toString()];\n\t\t\t}\n\n\t\t\tconst result = await this.pool.query(sql, params);\n\t\t\treturn result.rows.map(rowToRowDelta);\n\t\t}, \"Failed to query deltas\");\n\t}\n\n\t/**\n\t * Get the latest merged state for a specific row using column-level LWW.\n\t * Returns null if no deltas exist for this row.\n\t */\n\tasync getLatestState(\n\t\ttable: string,\n\t\trowId: string,\n\t): Promise<Result<Record<string, unknown> | null, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tconst sql = `SELECT columns, hlc, client_id, op\nFROM lakesync_deltas\nWHERE \"table\" = $1 AND row_id = $2\nORDER BY hlc ASC`;\n\n\t\t\tconst result = await this.pool.query(sql, [table, rowId]);\n\t\t\treturn mergeLatestState(result.rows);\n\t\t}, `Failed to get latest state for ${table}:${rowId}`);\n\t}\n\n\t/**\n\t * Ensure the lakesync_deltas table and indices exist.\n\t * The `schema` parameter is accepted for interface compliance but the\n\t * internal table structure is fixed (deltas store column data as JSONB).\n\t */\n\tasync ensureSchema(_schema: TableSchema): Promise<Result<void, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tawait this.pool.query(`\nCREATE TABLE IF NOT EXISTS lakesync_deltas (\n\tdelta_id TEXT PRIMARY KEY,\n\t\"table\" TEXT NOT NULL,\n\trow_id TEXT NOT NULL,\n\tcolumns JSONB NOT NULL,\n\thlc BIGINT NOT NULL,\n\tclient_id TEXT NOT NULL,\n\top TEXT NOT NULL DEFAULT 'INSERT'\n);\nCREATE INDEX IF NOT EXISTS idx_lakesync_deltas_hlc ON lakesync_deltas (hlc);\nCREATE INDEX IF NOT EXISTS idx_lakesync_deltas_table_row ON lakesync_deltas (\"table\", row_id);\n`);\n\t\t}, \"Failed to ensure schema\");\n\t}\n\n\t/**\n\t * Materialise deltas into destination tables.\n\t *\n\t * For each table with a matching schema, merges delta history into the\n\t * latest row state and upserts into the destination table. Tombstoned\n\t * rows are deleted. The `props` column is never touched.\n\t */\n\tasync materialise(\n\t\tdeltas: RowDelta[],\n\t\tschemas: ReadonlyArray<TableSchema>,\n\t): Promise<Result<void, AdapterError>> {\n\t\tif (deltas.length === 0) {\n\t\t\treturn Ok(undefined);\n\t\t}\n\n\t\treturn wrapAsync(async () => {\n\t\t\tconst grouped = groupDeltasByTable(deltas);\n\t\t\tconst schemaIndex = buildSchemaIndex(schemas);\n\n\t\t\tfor (const [tableName, rowIds] of grouped) {\n\t\t\t\tconst schema = schemaIndex.get(tableName);\n\t\t\t\tif (!schema) continue;\n\n\t\t\t\tconst dest = schema.table;\n\t\t\t\tconst pk = resolvePrimaryKey(schema);\n\t\t\t\tconst conflictCols = resolveConflictColumns(schema);\n\t\t\t\tconst soft = isSoftDelete(schema);\n\n\t\t\t\tconst columnDefs = schema.columns\n\t\t\t\t\t.map((c) => `\"${c.name}\" ${POSTGRES_TYPE_MAP[c.type]}`)\n\t\t\t\t\t.join(\", \");\n\n\t\t\t\tconst pkConstraint = `PRIMARY KEY (${pk.map((c) => `\"${c}\"`).join(\", \")})`;\n\t\t\t\tconst deletedAtCol = soft ? `,\\n\\tdeleted_at TIMESTAMPTZ` : \"\";\n\t\t\t\tconst uniqueConstraint = schema.externalIdColumn\n\t\t\t\t\t? `,\\n\\tUNIQUE (\"${schema.externalIdColumn}\")`\n\t\t\t\t\t: \"\";\n\n\t\t\t\tawait this.pool.query(\n\t\t\t\t\t`CREATE TABLE IF NOT EXISTS \"${dest}\" (\n\trow_id TEXT NOT NULL,\n\t${columnDefs},\n\tprops JSONB NOT NULL DEFAULT '{}'${deletedAtCol},\n\tsynced_at TIMESTAMPTZ NOT NULL DEFAULT now(),\n\t${pkConstraint}${uniqueConstraint}\n)`,\n\t\t\t\t);\n\n\t\t\t\tconst sourceTable = schema.sourceTable ?? schema.table;\n\t\t\t\tconst rowIdArray = [...rowIds];\n\n\t\t\t\tconst deltaResult = await this.pool.query(\n\t\t\t\t\t`SELECT row_id, columns, op FROM lakesync_deltas WHERE \"table\" = $1 AND row_id = ANY($2) ORDER BY hlc ASC`,\n\t\t\t\t\t[sourceTable, rowIdArray],\n\t\t\t\t);\n\n\t\t\t\t// Group results by row_id\n\t\t\t\tconst byRowId = new Map<string, Array<{ columns: string | ColumnDelta[]; op: string }>>();\n\t\t\t\tfor (const row of deltaResult.rows) {\n\t\t\t\t\tconst rid = row.row_id as string;\n\t\t\t\t\tlet arr = byRowId.get(rid);\n\t\t\t\t\tif (!arr) {\n\t\t\t\t\t\tarr = [];\n\t\t\t\t\t\tbyRowId.set(rid, arr);\n\t\t\t\t\t}\n\t\t\t\t\tarr.push(row as { columns: string | ColumnDelta[]; op: string });\n\t\t\t\t}\n\n\t\t\t\tconst upserts: Array<{ rowId: string; state: Record<string, unknown> }> = [];\n\t\t\t\tconst deleteIds: string[] = [];\n\n\t\t\t\tfor (const [rowId, rows] of byRowId) {\n\t\t\t\t\tconst state = mergeLatestState(rows);\n\t\t\t\t\tif (state !== null) {\n\t\t\t\t\t\tupserts.push({ rowId, state });\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdeleteIds.push(rowId);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (upserts.length > 0) {\n\t\t\t\t\tconst colNames = schema.columns.map((c) => c.name);\n\t\t\t\t\tconst baseCols = [\"row_id\", ...colNames];\n\t\t\t\t\tconst allCols = soft\n\t\t\t\t\t\t? [...baseCols, \"deleted_at\", \"synced_at\"]\n\t\t\t\t\t\t: [...baseCols, \"synced_at\"];\n\t\t\t\t\tconst colList = allCols.map((c) => `\"${c}\"`).join(\", \");\n\n\t\t\t\t\tconst values: unknown[] = [];\n\t\t\t\t\tconst valueRows: string[] = [];\n\t\t\t\t\tconst paramsPerRow = allCols.length;\n\n\t\t\t\t\tfor (let i = 0; i < upserts.length; i++) {\n\t\t\t\t\t\tconst u = upserts[i]!;\n\t\t\t\t\t\tconst offset = i * paramsPerRow;\n\t\t\t\t\t\tconst placeholders = allCols.map((_, j) => `$${offset + j + 1}`);\n\t\t\t\t\t\tvalueRows.push(`(${placeholders.join(\", \")})`);\n\n\t\t\t\t\t\tvalues.push(u.rowId);\n\t\t\t\t\t\tfor (const col of colNames) {\n\t\t\t\t\t\t\tvalues.push(u.state[col] ?? null);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (soft) values.push(null); // deleted_at = NULL (un-delete)\n\t\t\t\t\t\tvalues.push(new Date());\n\t\t\t\t\t}\n\n\t\t\t\t\tconst conflictList = conflictCols.map((c) => `\"${c}\"`).join(\", \");\n\t\t\t\t\tconst updateCols = soft\n\t\t\t\t\t\t? [...colNames, \"deleted_at\", \"synced_at\"]\n\t\t\t\t\t\t: [...colNames, \"synced_at\"];\n\t\t\t\t\tconst updateSet = updateCols.map((c) => `\"${c}\" = EXCLUDED.\"${c}\"`).join(\", \");\n\n\t\t\t\t\tawait this.pool.query(\n\t\t\t\t\t\t`INSERT INTO \"${dest}\" (${colList}) VALUES ${valueRows.join(\", \")} ON CONFLICT (${conflictList}) DO UPDATE SET ${updateSet}`,\n\t\t\t\t\t\tvalues,\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tif (deleteIds.length > 0) {\n\t\t\t\t\tif (soft) {\n\t\t\t\t\t\tawait this.pool.query(\n\t\t\t\t\t\t\t`UPDATE \"${dest}\" SET deleted_at = now(), synced_at = now() WHERE row_id = ANY($1)`,\n\t\t\t\t\t\t\t[deleteIds],\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tawait this.pool.query(`DELETE FROM \"${dest}\" WHERE row_id = ANY($1)`, [deleteIds]);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}, \"Failed to materialise deltas\");\n\t}\n\n\t/** Close the database connection pool and release resources. */\n\tasync close(): Promise<void> {\n\t\tawait this.pool.end();\n\t}\n}\n\n/**\n * Convert a raw Postgres row into a RowDelta.\n */\nfunction rowToRowDelta(row: Record<string, unknown>): RowDelta {\n\tconst columns: ColumnDelta[] =\n\t\ttypeof row.columns === \"string\"\n\t\t\t? JSON.parse(row.columns as string)\n\t\t\t: (row.columns as ColumnDelta[]);\n\n\treturn {\n\t\tdeltaId: row.delta_id as string,\n\t\ttable: row.table as string,\n\t\trowId: row.row_id as string,\n\t\tcolumns,\n\t\thlc: BigInt(row.hlc as string) as HLCTimestamp,\n\t\tclientId: row.client_id as string,\n\t\top: row.op as RowDelta[\"op\"],\n\t};\n}\n","import { AdapterError, type ConnectorConfig, Err, Ok, type Result, toError } from \"@lakesync/core\";\nimport { BigQueryAdapter } from \"./bigquery\";\nimport type { DatabaseAdapter } from \"./db-types\";\nimport { MySQLAdapter } from \"./mysql\";\nimport { PostgresAdapter } from \"./postgres\";\n\n/**\n * Instantiate a {@link DatabaseAdapter} from a {@link ConnectorConfig}.\n *\n * Switches on `config.type` and creates the matching adapter using\n * the type-specific connection configuration. Returns an {@link AdapterError}\n * if the type-specific config is missing or the adapter constructor throws.\n *\n * @param config - Validated connector configuration.\n * @returns The instantiated adapter or an error.\n */\nexport function createDatabaseAdapter(\n\tconfig: ConnectorConfig,\n): Result<DatabaseAdapter, AdapterError> {\n\ttry {\n\t\tswitch (config.type) {\n\t\t\tcase \"postgres\": {\n\t\t\t\tif (!config.postgres) {\n\t\t\t\t\treturn Err(new AdapterError(\"Postgres connector config missing postgres field\"));\n\t\t\t\t}\n\t\t\t\treturn Ok(\n\t\t\t\t\tnew PostgresAdapter({\n\t\t\t\t\t\tconnectionString: config.postgres.connectionString,\n\t\t\t\t\t}),\n\t\t\t\t);\n\t\t\t}\n\t\t\tcase \"mysql\": {\n\t\t\t\tif (!config.mysql) {\n\t\t\t\t\treturn Err(new AdapterError(\"MySQL connector config missing mysql field\"));\n\t\t\t\t}\n\t\t\t\treturn Ok(\n\t\t\t\t\tnew MySQLAdapter({\n\t\t\t\t\t\tconnectionString: config.mysql.connectionString,\n\t\t\t\t\t}),\n\t\t\t\t);\n\t\t\t}\n\t\t\tcase \"bigquery\": {\n\t\t\t\tif (!config.bigquery) {\n\t\t\t\t\treturn Err(new AdapterError(\"BigQuery connector config missing bigquery field\"));\n\t\t\t\t}\n\t\t\t\treturn Ok(\n\t\t\t\t\tnew BigQueryAdapter({\n\t\t\t\t\t\tprojectId: config.bigquery.projectId,\n\t\t\t\t\t\tdataset: config.bigquery.dataset,\n\t\t\t\t\t\tkeyFilename: config.bigquery.keyFilename,\n\t\t\t\t\t\tlocation: config.bigquery.location,\n\t\t\t\t\t}),\n\t\t\t\t);\n\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn Err(new AdapterError(`Unsupported connector type: ${config.type}`));\n\t\t}\n\t} catch (err: unknown) {\n\t\treturn Err(new AdapterError(`Failed to create adapter: ${toError(err).message}`));\n\t}\n}\n","import {\n\ttype AdapterError,\n\ttype HLCTimestamp,\n\tOk,\n\ttype Result,\n\ttype RowDelta,\n\ttype TableSchema,\n} from \"@lakesync/core\";\n\nimport type { DatabaseAdapter } from \"./db-types\";\nimport type { Materialisable } from \"./materialise\";\nimport { isMaterialisable } from \"./materialise\";\n\n/** Configuration for the FanOutAdapter. */\nexport interface FanOutAdapterConfig {\n\t/** The primary adapter that handles all reads and authoritative writes. */\n\tprimary: DatabaseAdapter;\n\t/** Secondary adapters that receive replicated writes on a best-effort basis. */\n\tsecondaries: DatabaseAdapter[];\n}\n\n/**\n * Writes to a primary adapter synchronously and replicates to secondary\n * adapters asynchronously. Reads always go to the primary.\n *\n * Secondary failures are silently caught and never affect the return value.\n * Use case: write to Postgres (fast, operational), replicate to BigQuery (analytics).\n */\nexport class FanOutAdapter implements DatabaseAdapter, Materialisable {\n\tprivate readonly primary: DatabaseAdapter;\n\tprivate readonly secondaries: ReadonlyArray<DatabaseAdapter>;\n\n\tconstructor(config: FanOutAdapterConfig) {\n\t\tthis.primary = config.primary;\n\t\tthis.secondaries = config.secondaries;\n\t}\n\n\t/** Insert deltas into the primary, then replicate to secondaries (fire-and-forget). */\n\tasync insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>> {\n\t\tconst result = await this.primary.insertDeltas(deltas);\n\t\tif (!result.ok) {\n\t\t\treturn result;\n\t\t}\n\n\t\tfor (const secondary of this.secondaries) {\n\t\t\tsecondary.insertDeltas(deltas).catch(() => {});\n\t\t}\n\n\t\treturn Ok(undefined);\n\t}\n\n\t/** Query deltas from the primary adapter only. */\n\tasync queryDeltasSince(\n\t\thlc: HLCTimestamp,\n\t\ttables?: string[],\n\t): Promise<Result<RowDelta[], AdapterError>> {\n\t\treturn this.primary.queryDeltasSince(hlc, tables);\n\t}\n\n\t/** Get the latest state from the primary adapter only. */\n\tasync getLatestState(\n\t\ttable: string,\n\t\trowId: string,\n\t): Promise<Result<Record<string, unknown> | null, AdapterError>> {\n\t\treturn this.primary.getLatestState(table, rowId);\n\t}\n\n\t/** Ensure schema on the primary first, then best-effort on secondaries. */\n\tasync ensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>> {\n\t\tconst result = await this.primary.ensureSchema(schema);\n\t\tif (!result.ok) {\n\t\t\treturn result;\n\t\t}\n\n\t\tfor (const secondary of this.secondaries) {\n\t\t\tsecondary.ensureSchema(schema).catch(() => {});\n\t\t}\n\n\t\treturn Ok(undefined);\n\t}\n\n\t/** Materialise via primary, then replicate to materialisable secondaries (fire-and-forget). */\n\tasync materialise(\n\t\tdeltas: RowDelta[],\n\t\tschemas: ReadonlyArray<TableSchema>,\n\t): Promise<Result<void, AdapterError>> {\n\t\tif (isMaterialisable(this.primary)) {\n\t\t\tconst result = await this.primary.materialise(deltas, schemas);\n\t\t\tif (!result.ok) {\n\t\t\t\treturn result;\n\t\t\t}\n\t\t}\n\n\t\tfor (const secondary of this.secondaries) {\n\t\t\tif (isMaterialisable(secondary)) {\n\t\t\t\tsecondary.materialise(deltas, schemas).catch(() => {});\n\t\t\t}\n\t\t}\n\n\t\treturn Ok(undefined);\n\t}\n\n\t/** Close primary and all secondary adapters. */\n\tasync close(): Promise<void> {\n\t\tawait this.primary.close();\n\t\tfor (const secondary of this.secondaries) {\n\t\t\tawait secondary.close();\n\t\t}\n\t}\n}\n","import {\n\ttype AdapterError,\n\ttype HLCTimestamp,\n\tOk,\n\ttype Result,\n\ttype RowDelta,\n\ttype TableSchema,\n} from \"@lakesync/core\";\n\nimport type { DatabaseAdapter } from \"./db-types\";\nimport type { Materialisable } from \"./materialise\";\nimport { isMaterialisable } from \"./materialise\";\n\n/** Configuration for age-based tiered storage. */\nexport interface LifecycleAdapterConfig {\n\t/** Hot tier — recent data, fast queries. */\n\thot: {\n\t\t/** The adapter storing recent deltas. */\n\t\tadapter: DatabaseAdapter;\n\t\t/** Maximum age in milliseconds before data is considered cold. */\n\t\tmaxAgeMs: number;\n\t};\n\t/** Cold tier — older data, cheap storage. */\n\tcold: {\n\t\t/** The adapter storing archived deltas. */\n\t\tadapter: DatabaseAdapter;\n\t};\n}\n\n/**\n * Routes database operations across hot and cold tiers based on delta age.\n *\n * Writes always go to the hot adapter. Reads fan out to both tiers when\n * the requested HLC is older than the configured `maxAgeMs` threshold.\n *\n * Use {@link migrateToTier} as a background job to copy aged-out deltas\n * from hot to cold.\n */\nexport class LifecycleAdapter implements DatabaseAdapter, Materialisable {\n\tprivate readonly hot: DatabaseAdapter;\n\tprivate readonly cold: DatabaseAdapter;\n\tprivate readonly maxAgeMs: number;\n\n\tconstructor(config: LifecycleAdapterConfig) {\n\t\tthis.hot = config.hot.adapter;\n\t\tthis.cold = config.cold.adapter;\n\t\tthis.maxAgeMs = config.hot.maxAgeMs;\n\t}\n\n\t/** Insert deltas into the hot adapter — new data is always hot. */\n\tasync insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>> {\n\t\treturn this.hot.insertDeltas(deltas);\n\t}\n\n\t/**\n\t * Query deltas since the given HLC.\n\t *\n\t * If `sinceHlc` is older than `now - maxAgeMs`, queries both hot and cold\n\t * adapters and merges the results sorted by HLC. Otherwise queries hot only.\n\t */\n\tasync queryDeltasSince(\n\t\thlc: HLCTimestamp,\n\t\ttables?: string[],\n\t): Promise<Result<RowDelta[], AdapterError>> {\n\t\tconst sinceWallMs = Number(hlc >> 16n);\n\t\tconst thresholdMs = Date.now() - this.maxAgeMs;\n\n\t\tif (sinceWallMs < thresholdMs) {\n\t\t\t// Query spans into cold territory — fan out to both tiers\n\t\t\tconst [hotResult, coldResult] = await Promise.all([\n\t\t\t\tthis.hot.queryDeltasSince(hlc, tables),\n\t\t\t\tthis.cold.queryDeltasSince(hlc, tables),\n\t\t\t]);\n\n\t\t\tif (!hotResult.ok) return hotResult;\n\t\t\tif (!coldResult.ok) return coldResult;\n\n\t\t\tconst merged = [...hotResult.value, ...coldResult.value];\n\t\t\tmerged.sort((a, b) => (a.hlc < b.hlc ? -1 : a.hlc > b.hlc ? 1 : 0));\n\n\t\t\treturn Ok(merged);\n\t\t}\n\n\t\t// Recent query — hot tier only\n\t\treturn this.hot.queryDeltasSince(hlc, tables);\n\t}\n\n\t/** Get latest state — try hot first, fall back to cold if hot returns null. */\n\tasync getLatestState(\n\t\ttable: string,\n\t\trowId: string,\n\t): Promise<Result<Record<string, unknown> | null, AdapterError>> {\n\t\tconst hotResult = await this.hot.getLatestState(table, rowId);\n\t\tif (!hotResult.ok) return hotResult;\n\n\t\tif (hotResult.value !== null) {\n\t\t\treturn hotResult;\n\t\t}\n\n\t\treturn this.cold.getLatestState(table, rowId);\n\t}\n\n\t/** Ensure schema exists on both hot and cold adapters. */\n\tasync ensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>> {\n\t\tconst hotResult = await this.hot.ensureSchema(schema);\n\t\tif (!hotResult.ok) return hotResult;\n\n\t\treturn this.cold.ensureSchema(schema);\n\t}\n\n\t/** Materialise via hot tier only — cold tier stores archived deltas, not destination tables. */\n\tasync materialise(\n\t\tdeltas: RowDelta[],\n\t\tschemas: ReadonlyArray<TableSchema>,\n\t): Promise<Result<void, AdapterError>> {\n\t\tif (isMaterialisable(this.hot)) {\n\t\t\treturn this.hot.materialise(deltas, schemas);\n\t\t}\n\t\treturn Ok(undefined);\n\t}\n\n\t/** Close both hot and cold adapters. */\n\tasync close(): Promise<void> {\n\t\tawait this.hot.close();\n\t\tawait this.cold.close();\n\t}\n}\n\n/**\n * Migrate aged-out deltas from the hot adapter to the cold adapter.\n *\n * Queries the hot adapter for all deltas since HLC 0, filters those with\n * wall time older than `Date.now() - maxAgeMs`, and inserts them into the\n * cold adapter. Insertion is idempotent via deltaId uniqueness.\n *\n * Does NOT delete from hot — that is a separate cleanup concern.\n *\n * @param hot - The hot-tier adapter to read old deltas from.\n * @param cold - The cold-tier adapter to write old deltas to.\n * @param maxAgeMs - Age threshold in milliseconds.\n * @returns The count of migrated deltas, or an AdapterError.\n */\nexport async function migrateToTier(\n\thot: DatabaseAdapter,\n\tcold: DatabaseAdapter,\n\tmaxAgeMs: number,\n): Promise<Result<{ migrated: number }, AdapterError>> {\n\tconst thresholdMs = Date.now() - maxAgeMs;\n\tconst thresholdHlc = (BigInt(0) << 16n) as HLCTimestamp;\n\n\tconst result = await hot.queryDeltasSince(thresholdHlc);\n\tif (!result.ok) return result;\n\n\tconst oldDeltas = result.value.filter((delta) => {\n\t\tconst wallMs = Number(delta.hlc >> 16n);\n\t\treturn wallMs < thresholdMs;\n\t});\n\n\tif (oldDeltas.length === 0) {\n\t\treturn Ok({ migrated: 0 });\n\t}\n\n\tconst insertResult = await cold.insertDeltas(oldDeltas);\n\tif (!insertResult.ok) return insertResult;\n\n\treturn Ok({ migrated: oldDeltas.length });\n}\n","import { type AdapterError, type HLCTimestamp, Ok, type Result } from \"@lakesync/core\";\n\nimport type { DatabaseAdapter } from \"./db-types\";\n\n/** Options for migrating deltas between database adapters. */\nexport interface MigrateOptions {\n\t/** Source adapter to read from */\n\tfrom: DatabaseAdapter;\n\t/** Target adapter to write to */\n\tto: DatabaseAdapter;\n\t/** Optional: only migrate specific tables */\n\ttables?: string[];\n\t/** Batch size for writing (default: 1000) */\n\tbatchSize?: number;\n\t/** Progress callback invoked after each batch write */\n\tonProgress?: (info: MigrateProgress) => void;\n}\n\n/** Progress information reported during migration. */\nexport interface MigrateProgress {\n\t/** Current batch number (1-based) */\n\tbatch: number;\n\t/** Total deltas migrated so far */\n\ttotalSoFar: number;\n}\n\n/** Result of a successful migration. */\nexport interface MigrateResult {\n\t/** Total number of deltas migrated */\n\ttotalDeltas: number;\n\t/** Number of batches processed */\n\tbatches: number;\n}\n\n/**\n * Migrate deltas from one database adapter to another.\n * Reads all matching deltas from the source, then writes them in batches to the target.\n * Idempotent via deltaId uniqueness in the target adapter.\n */\nexport async function migrateAdapter(\n\topts: MigrateOptions,\n): Promise<Result<MigrateResult, AdapterError>> {\n\tconst batchSize = opts.batchSize ?? 1000;\n\n\tconst readResult = await opts.from.queryDeltasSince(BigInt(0) as HLCTimestamp, opts.tables);\n\tif (!readResult.ok) {\n\t\treturn readResult;\n\t}\n\n\tconst deltas = readResult.value;\n\n\tif (deltas.length === 0) {\n\t\treturn Ok({ totalDeltas: 0, batches: 0 });\n\t}\n\n\tlet batchCount = 0;\n\tlet totalSoFar = 0;\n\n\tfor (let i = 0; i < deltas.length; i += batchSize) {\n\t\tconst batch = deltas.slice(i, i + batchSize);\n\t\tconst writeResult = await opts.to.insertDeltas(batch);\n\t\tif (!writeResult.ok) {\n\t\t\treturn writeResult;\n\t\t}\n\n\t\tbatchCount++;\n\t\ttotalSoFar += batch.length;\n\n\t\topts.onProgress?.({ batch: batchCount, totalSoFar });\n\t}\n\n\treturn Ok({ totalDeltas: totalSoFar, batches: batchCount });\n}\n","import {\n\tDeleteObjectCommand,\n\tDeleteObjectsCommand,\n\tGetObjectCommand,\n\tHeadObjectCommand,\n\tListObjectsV2Command,\n\tPutObjectCommand,\n\tS3Client,\n} from \"@aws-sdk/client-s3\";\nimport { AdapterError, Ok, type Result } from \"@lakesync/core\";\nimport { wrapAsync } from \"./shared\";\nimport type { AdapterConfig, LakeAdapter, ObjectInfo } from \"./types\";\n\n/**\n * MinIO/S3-compatible lake adapter.\n *\n * Wraps the AWS S3 SDK to provide a Result-based interface for\n * interacting with MinIO or any S3-compatible object store.\n * All public methods return `Result` and never throw.\n */\nexport class MinIOAdapter implements LakeAdapter {\n\tprivate readonly client: S3Client;\n\tprivate readonly bucket: string;\n\n\tconstructor(config: AdapterConfig) {\n\t\tthis.bucket = config.bucket;\n\t\tthis.client = new S3Client({\n\t\t\tendpoint: config.endpoint,\n\t\t\tregion: config.region ?? \"us-east-1\",\n\t\t\tcredentials: config.credentials,\n\t\t\tforcePathStyle: true, // Required for MinIO\n\t\t});\n\t}\n\n\t/** Store an object in the lake */\n\tasync putObject(\n\t\tpath: string,\n\t\tdata: Uint8Array,\n\t\tcontentType?: string,\n\t): Promise<Result<void, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tawait this.client.send(\n\t\t\t\tnew PutObjectCommand({\n\t\t\t\t\tBucket: this.bucket,\n\t\t\t\t\tKey: path,\n\t\t\t\t\tBody: data,\n\t\t\t\t\tContentType: contentType,\n\t\t\t\t}),\n\t\t\t);\n\t\t}, `Failed to put object: ${path}`);\n\t}\n\n\t/** Retrieve an object from the lake */\n\tasync getObject(path: string): Promise<Result<Uint8Array, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tconst response = await this.client.send(\n\t\t\t\tnew GetObjectCommand({\n\t\t\t\t\tBucket: this.bucket,\n\t\t\t\t\tKey: path,\n\t\t\t\t}),\n\t\t\t);\n\t\t\tconst bytes = await response.Body?.transformToByteArray();\n\t\t\tif (!bytes) {\n\t\t\t\tthrow new AdapterError(`Empty response for object: ${path}`);\n\t\t\t}\n\t\t\treturn bytes;\n\t\t}, `Failed to get object: ${path}`);\n\t}\n\n\t/** Get object metadata without retrieving the body */\n\tasync headObject(\n\t\tpath: string,\n\t): Promise<Result<{ size: number; lastModified: Date }, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tconst response = await this.client.send(\n\t\t\t\tnew HeadObjectCommand({\n\t\t\t\t\tBucket: this.bucket,\n\t\t\t\t\tKey: path,\n\t\t\t\t}),\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsize: response.ContentLength ?? 0,\n\t\t\t\tlastModified: response.LastModified ?? new Date(0),\n\t\t\t};\n\t\t}, `Failed to head object: ${path}`);\n\t}\n\n\t/** List objects matching a given prefix */\n\tasync listObjects(prefix: string): Promise<Result<ObjectInfo[], AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tconst response = await this.client.send(\n\t\t\t\tnew ListObjectsV2Command({\n\t\t\t\t\tBucket: this.bucket,\n\t\t\t\t\tPrefix: prefix,\n\t\t\t\t}),\n\t\t\t);\n\t\t\treturn (response.Contents ?? []).map((item) => ({\n\t\t\t\tkey: item.Key ?? \"\",\n\t\t\t\tsize: item.Size ?? 0,\n\t\t\t\tlastModified: item.LastModified ?? new Date(0),\n\t\t\t}));\n\t\t}, `Failed to list objects with prefix: ${prefix}`);\n\t}\n\n\t/** Delete a single object from the lake */\n\tasync deleteObject(path: string): Promise<Result<void, AdapterError>> {\n\t\treturn wrapAsync(async () => {\n\t\t\tawait this.client.send(\n\t\t\t\tnew DeleteObjectCommand({\n\t\t\t\t\tBucket: this.bucket,\n\t\t\t\t\tKey: path,\n\t\t\t\t}),\n\t\t\t);\n\t\t}, `Failed to delete object: ${path}`);\n\t}\n\n\t/** Delete multiple objects from the lake in a single batch operation */\n\tasync deleteObjects(paths: string[]): Promise<Result<void, AdapterError>> {\n\t\tif (paths.length === 0) {\n\t\t\treturn Ok(undefined);\n\t\t}\n\n\t\treturn wrapAsync(async () => {\n\t\t\tawait this.client.send(\n\t\t\t\tnew DeleteObjectsCommand({\n\t\t\t\t\tBucket: this.bucket,\n\t\t\t\t\tDelete: {\n\t\t\t\t\t\tObjects: paths.map((key) => ({ Key: key })),\n\t\t\t\t\t\tQuiet: true,\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\t);\n\t\t}, `Failed to batch delete ${paths.length} objects`);\n\t}\n}\n","import type { ConnectorConfig } from \"@lakesync/core\";\n\n/** Generic query function — abstracts any SQL database connection. */\nexport type QueryFn = (sql: string, params?: unknown[]) => Promise<Record<string, unknown>[]>;\n\n/**\n * Create a raw SQL query function from a {@link ConnectorConfig}.\n *\n * Uses dynamic imports so the database drivers (pg, mysql2) are only\n * loaded when actually needed. Returns `null` for connector types that\n * do not support the standard SQL polling model (e.g. BigQuery).\n *\n * @param config - Validated connector configuration.\n * @returns A query function or `null` if the connector type is unsupported.\n */\nexport async function createQueryFn(config: ConnectorConfig): Promise<QueryFn | null> {\n\tswitch (config.type) {\n\t\tcase \"postgres\": {\n\t\t\tif (!config.postgres) return null;\n\t\t\tconst { Pool } = await import(\"pg\");\n\t\t\tconst pool = new Pool({ connectionString: config.postgres.connectionString });\n\t\t\treturn async (sql: string, params?: unknown[]) => {\n\t\t\t\tconst result = await pool.query(sql, params);\n\t\t\t\treturn result.rows as Record<string, unknown>[];\n\t\t\t};\n\t\t}\n\t\tcase \"mysql\": {\n\t\t\tif (!config.mysql) return null;\n\t\t\tconst mysql = await import(\"mysql2/promise\");\n\t\t\tconst pool = mysql.createPool(config.mysql.connectionString);\n\t\t\treturn async (sql: string, params?: unknown[]) => {\n\t\t\t\tconst [rows] = await pool.query(sql, params);\n\t\t\t\treturn rows as Record<string, unknown>[];\n\t\t\t};\n\t\t}\n\t\tdefault:\n\t\t\treturn null;\n\t}\n}\n"],"mappings":";;;;;;;;AAmCA,IAAM,oBAA4E;AAAA,EACjF,QAAQ;AAAA,EACR,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,MAAM;AAAA,EACN,MAAM;AACP;AAEO,SAAS,uBAAuB,MAAsD;AAC5F,SAAO,kBAAkB,IAAI;AAC9B;AAGO,SAAS,kBAAkB,SAA8C;AAC/E,SACC,YAAY,QACZ,OAAO,YAAY,YACnB,kBAAkB,WAClB,sBAAsB,WACtB,OAAQ,QAA4B,iBAAiB;AAEvD;;;ACpBO,SAAS,iBAAiB,SAA6C;AAC7E,SACC,YAAY,QACZ,OAAO,YAAY,YACnB,iBAAiB,WACjB,OAAQ,QAA2B,gBAAgB;AAErD;AAMO,SAAS,kBAAkB,QAA+B;AAChE,SAAO,OAAO,cAAc,CAAC,QAAQ;AACtC;AAMO,SAAS,uBAAuB,QAA+B;AACrE,SAAO,OAAO,mBAAmB,CAAC,OAAO,gBAAgB,IAAI,kBAAkB,MAAM;AACtF;AAKO,SAAS,aAAa,QAA8B;AAC1D,SAAO,OAAO,eAAe;AAC9B;AAQO,SAAS,mBAAmB,QAA2D;AAC7F,QAAM,SAAS,oBAAI,IAAyB;AAC5C,aAAW,SAAS,QAAQ;AAC3B,QAAI,SAAS,OAAO,IAAI,MAAM,KAAK;AACnC,QAAI,CAAC,QAAQ;AACZ,eAAS,oBAAI,IAAY;AACzB,aAAO,IAAI,MAAM,OAAO,MAAM;AAAA,IAC/B;AACA,WAAO,IAAI,MAAM,KAAK;AAAA,EACvB;AACA,SAAO;AACR;AAWO,SAAS,iBAAiB,SAA+D;AAC/F,QAAM,QAAQ,oBAAI,IAAyB;AAC3C,aAAW,UAAU,SAAS;AAC7B,UAAM,MAAM,OAAO,eAAe,OAAO;AACzC,UAAM,IAAI,KAAK,MAAM;AAAA,EACtB;AACA,SAAO;AACR;;;ACpGO,SAAS,QAAQ,OAAmC;AAC1D,SAAO,iBAAiB,QAAQ,QAAQ;AACzC;AAGA,eAAsB,UACrB,WACA,cACmC;AACnC,MAAI;AACH,UAAM,QAAQ,MAAM,UAAU;AAC9B,WAAO,GAAG,KAAK;AAAA,EAChB,SAAS,OAAO;AACf,QAAI,iBAAiB,cAAc;AAClC,aAAO,IAAI,KAAK;AAAA,IACjB;AACA,WAAO,IAAI,IAAI,aAAa,cAAc,QAAQ,KAAK,CAAC,CAAC;AAAA,EAC1D;AACD;AAOO,SAAS,iBACf,MACiC;AACjC,MAAI,KAAK,WAAW,EAAG,QAAO;AAE9B,QAAM,UAAU,KAAK,KAAK,SAAS,CAAC;AACpC,MAAI,QAAQ,OAAO,SAAU,QAAO;AAEpC,QAAM,QAAiC,CAAC;AAExC,aAAW,OAAO,MAAM;AACvB,QAAI,IAAI,OAAO,UAAU;AACxB,iBAAW,OAAO,OAAO,KAAK,KAAK,GAAG;AACrC,eAAO,MAAM,GAAG;AAAA,MACjB;AACA;AAAA,IACD;AAEA,UAAM,UACL,OAAO,IAAI,YAAY,WAAW,KAAK,MAAM,IAAI,OAAO,IAAI,IAAI;AAEjE,eAAW,OAAO,SAAS;AAC1B,YAAM,IAAI,MAAM,IAAI,IAAI;AAAA,IACzB;AAAA,EACD;AAEA,SAAO;AACR;;;ACvDA,SAAS,gBAAgB;AAoDzB,SAAS,cAAc,KAAiC;AACvD,QAAM,UACL,OAAO,IAAI,YAAY,WAAW,KAAK,MAAM,IAAI,OAAO,IAAI,IAAI;AAGjE,QAAM,SAAS,IAAI;AACnB,QAAM,YACL,OAAO,WAAW,YAAY,WAAW,QAAQ,WAAW,SACzD,OAAO,QACP,OAAO,MAAM;AAEjB,SAAO;AAAA,IACN,SAAS,IAAI;AAAA,IACb,OAAO,IAAI;AAAA,IACX,OAAO,IAAI;AAAA,IACX;AAAA,IACA,KAAK,OAAO,SAAS;AAAA,IACrB,UAAU,IAAI;AAAA,IACd,IAAI,IAAI;AAAA,EACT;AACD;AAaO,IAAM,kBAAN,MAAiE;AAAA;AAAA,EAE9D;AAAA;AAAA,EAEA;AAAA;AAAA,EAEA;AAAA,EAET,YAAY,QAA+B;AAC1C,SAAK,SAAS,IAAI,SAAS;AAAA,MAC1B,WAAW,OAAO;AAAA,MAClB,aAAa,OAAO;AAAA,IACrB,CAAC;AACD,SAAK,UAAU,OAAO;AACtB,SAAK,WAAW,OAAO,YAAY;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,aAAa,QAAyD;AAC3E,QAAI,OAAO,WAAW,GAAG;AACxB,aAAO,GAAG,MAAS;AAAA,IACpB;AAEA,WAAO,UAAU,YAAY;AAE5B,YAAM,SAAiC,CAAC;AACxC,YAAM,UAAoB,CAAC;AAE3B,eAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACvC,cAAM,IAAI,OAAO,CAAC;AAClB,eAAO,OAAO,CAAC,EAAE,IAAI,EAAE;AACvB,eAAO,OAAO,CAAC,EAAE,IAAI,EAAE;AACvB,eAAO,OAAO,CAAC,EAAE,IAAI,EAAE;AACvB,eAAO,OAAO,CAAC,EAAE,IAAI,KAAK,UAAU,EAAE,OAAO;AAC7C,eAAO,OAAO,CAAC,EAAE,IAAI,EAAE,IAAI,SAAS;AACpC,eAAO,OAAO,CAAC,EAAE,IAAI,EAAE;AACvB,eAAO,MAAM,CAAC,EAAE,IAAI,EAAE;AAEtB,gBAAQ;AAAA,UACP,eAAe,CAAC,sBAAsB,CAAC,uBAAuB,CAAC,oBAAoB,CAAC,0BAA0B,CAAC,2BAA2B,CAAC,sBAAsB,CAAC;AAAA,QACnK;AAAA,MACD;AAEA,YAAM,MAAM,WAAW,KAAK,OAAO;AAAA,SAC7B,QAAQ,KAAK,aAAa,CAAC;AAAA;AAAA;AAAA;AAKjC,YAAM,KAAK,OAAO,MAAM,EAAE,OAAO,KAAK,QAAQ,UAAU,KAAK,SAAS,CAAC;AAAA,IACxE,GAAG,yBAAyB;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,iBACL,KACA,QAC4C;AAC5C,WAAO,UAAU,YAAY;AAC5B,UAAI;AACJ,YAAM,SAA4C;AAAA,QACjD,UAAU,IAAI,SAAS;AAAA,MACxB;AAEA,UAAI,UAAU,OAAO,SAAS,GAAG;AAChC,cAAM;AAAA,SACD,KAAK,OAAO;AAAA;AAAA;AAGjB,eAAO,SAAS;AAAA,MACjB,OAAO;AACN,cAAM;AAAA,SACD,KAAK,OAAO;AAAA;AAAA;AAAA,MAGlB;AAEA,YAAM,CAAC,IAAI,IAAI,MAAM,KAAK,OAAO,MAAM;AAAA,QACtC,OAAO;AAAA,QACP;AAAA,QACA,UAAU,KAAK;AAAA,MAChB,CAAC;AACD,aAAQ,KAA4B,IAAI,aAAa;AAAA,IACtD,GAAG,wBAAwB;AAAA,EAC5B;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,eACL,OACA,OACgE;AAChE,WAAO,UAAU,YAAY;AAC5B,YAAM,MAAM;AAAA,SACN,KAAK,OAAO;AAAA;AAAA;AAIlB,YAAM,CAAC,IAAI,IAAI,MAAM,KAAK,OAAO,MAAM;AAAA,QACtC,OAAO;AAAA,QACP,QAAQ,EAAE,KAAK,OAAO,KAAK,MAAM;AAAA,QACjC,UAAU,KAAK;AAAA,MAChB,CAAC;AACD,aAAO,iBAAiB,IAA0B;AAAA,IACnD,GAAG,kCAAkC,KAAK,IAAI,KAAK,EAAE;AAAA,EACtD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,aAAa,SAA2D;AAC7E,WAAO,UAAU,YAAY;AAE5B,YAAM,aAAa,KAAK,OAAO,QAAQ,KAAK,OAAO;AACnD,YAAM,CAAC,aAAa,IAAI,MAAM,WAAW,OAAO;AAChD,UAAI,CAAC,eAAe;AACnB,cAAM,KAAK,OAAO,cAAc,KAAK,SAAS;AAAA,UAC7C,UAAU,KAAK;AAAA,QAChB,CAAC;AAAA,MACF;AAGA,YAAM,KAAK,OAAO,MAAM;AAAA,QACvB,OAAO,gCAAgC,KAAK,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAUnD,UAAU,KAAK;AAAA,MAChB,CAAC;AAAA,IACF,GAAG,yBAAyB;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,YACL,QACA,SACsC;AACtC,QAAI,OAAO,WAAW,GAAG;AACxB,aAAO,GAAG,MAAS;AAAA,IACpB;AAEA,WAAO,UAAU,YAAY;AAC5B,YAAM,cAAc,mBAAmB,MAAM;AAC7C,YAAM,cAAc,iBAAiB,OAAO;AAE5C,iBAAW,CAAC,aAAa,MAAM,KAAK,aAAa;AAChD,cAAM,SAAS,YAAY,IAAI,WAAW;AAC1C,YAAI,CAAC,OAAQ;AAEb,cAAM,KAAK,kBAAkB,MAAM;AACnC,cAAM,eAAe,uBAAuB,MAAM;AAClD,cAAM,OAAO,aAAa,MAAM;AAGhC,cAAM,UAAU,OAAO,QACrB,IAAI,CAAC,MAAM,GAAG,EAAE,IAAI,IAAI,uBAAuB,EAAE,IAAI,CAAC,EAAE,EACxD,KAAK,IAAI;AACX,cAAM,eAAe,OAAO;AAAA,yBAA8B;AAC1D,cAAM,KAAK,OAAO,MAAM;AAAA,UACvB,OAAO,gCAAgC,KAAK,OAAO,IAAI,OAAO,KAAK;AAAA;AAAA,GAErE,OAAO;AAAA,0BACgB,YAAY;AAAA;AAAA;AAAA,aAGzB,GAAG,IAAI,CAAC,MAAO,MAAM,WAAW,WAAW,CAAE,EAAE,KAAK,IAAI,CAAC;AAAA,UACjE,UAAU,KAAK;AAAA,QAChB,CAAC;AAGD,cAAM,aAAa,CAAC,GAAG,MAAM;AAC7B,cAAM,CAAC,SAAS,IAAI,MAAM,KAAK,OAAO,MAAM;AAAA,UAC3C,OAAO,qCAAqC,KAAK,OAAO;AAAA;AAAA;AAAA,UAGxD,QAAQ,EAAE,aAAa,QAAQ,WAAW;AAAA,UAC1C,UAAU,KAAK;AAAA,QAChB,CAAC;AAGD,cAAM,YAAY,oBAAI,IAAoE;AAC1F,mBAAW,OAAO,WAId;AACH,cAAI,QAAQ,UAAU,IAAI,IAAI,MAAM;AACpC,cAAI,CAAC,OAAO;AACX,oBAAQ,CAAC;AACT,sBAAU,IAAI,IAAI,QAAQ,KAAK;AAAA,UAChC;AACA,gBAAM,KAAK,EAAE,SAAS,IAAI,SAAS,IAAI,IAAI,GAAG,CAAC;AAAA,QAChD;AAEA,cAAM,UAAoE,CAAC;AAC3E,cAAM,eAAyB,CAAC;AAEhC,mBAAW,CAAC,OAAO,KAAK,KAAK,WAAW;AACvC,gBAAM,QAAQ,iBAAiB,KAAK;AACpC,cAAI,UAAU,MAAM;AACnB,yBAAa,KAAK,KAAK;AAAA,UACxB,OAAO;AACN,oBAAQ,KAAK,EAAE,OAAO,MAAM,CAAC;AAAA,UAC9B;AAAA,QACD;AAGA,YAAI,QAAQ,SAAS,GAAG;AACvB,gBAAM,SAAkC,CAAC;AACzC,gBAAM,UAAoB,CAAC;AAE3B,mBAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACxC,kBAAM,IAAI,QAAQ,CAAC;AACnB,mBAAO,OAAO,CAAC,EAAE,IAAI,EAAE;AACvB,uBAAW,OAAO,OAAO,SAAS;AACjC,qBAAO,IAAI,OAAO,QAAQ,QAAQ,GAAG,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,MAAM,IAAI,IAAI,KAAK;AAAA,YACvE;AAEA,kBAAM,aAAa,OAAO,QACxB,IAAI,CAAC,KAAK,OAAO,KAAK,EAAE,IAAI,CAAC,OAAO,IAAI,IAAI,EAAE,EAC9C,KAAK,IAAI;AACX,kBAAM,kBAAkB,OAAO,4CAA4C;AAC3E,oBAAQ;AAAA,cACP,eAAe,CAAC,eAAe,UAAU,GAAG,eAAe;AAAA,YAC5D;AAAA,UACD;AAEA,gBAAM,UAAU,aACd,IAAI,CAAC,MAAM,KAAK,MAAM,WAAW,WAAW,CAAC,QAAQ,MAAM,WAAW,WAAW,CAAC,EAAE,EACpF,KAAK,OAAO;AAEd,gBAAM,YAAY,OAAO,QAAQ,IAAI,CAAC,QAAQ,GAAG,IAAI,IAAI,QAAQ,IAAI,IAAI,EAAE,EAAE,KAAK,IAAI;AACtF,gBAAM,kBAAkB,OAAO,gCAAgC;AAE/D,gBAAM,iBAAiB;AAAA,YACtB;AAAA,YACA,GAAG,OAAO,QAAQ,IAAI,CAAC,MAAM,EAAE,IAAI;AAAA,YACnC;AAAA,YACA,GAAI,OAAO,CAAC,YAAY,IAAI,CAAC;AAAA,YAC7B;AAAA,UACD,EAAE,KAAK,IAAI;AACX,gBAAM,iBAAiB;AAAA,YACtB;AAAA,YACA,GAAG,OAAO,QAAQ,IAAI,CAAC,MAAM,KAAK,EAAE,IAAI,EAAE;AAAA,YAC1C;AAAA,YACA,GAAI,OAAO,CAAC,cAAc,IAAI,CAAC;AAAA,YAC/B;AAAA,UACD,EAAE,KAAK,IAAI;AAEX,gBAAM,WAAW,WAAW,KAAK,OAAO,IAAI,OAAO,KAAK;AAAA,SACpD,QAAQ,KAAK,aAAa,CAAC;AAAA,KAC/B,OAAO;AAAA,+BACmB,SAAS,GAAG,eAAe;AAAA,gCAC1B,cAAc;AAAA,UACpC,cAAc;AAEnB,gBAAM,KAAK,OAAO,MAAM;AAAA,YACvB,OAAO;AAAA,YACP;AAAA,YACA,UAAU,KAAK;AAAA,UAChB,CAAC;AAAA,QACF;AAGA,YAAI,aAAa,SAAS,GAAG;AAC5B,cAAI,MAAM;AACT,kBAAM,KAAK,OAAO,MAAM;AAAA,cACvB,OAAO,YAAY,KAAK,OAAO,IAAI,OAAO,KAAK;AAAA,cAC/C,QAAQ,EAAE,QAAQ,aAAa;AAAA,cAC/B,UAAU,KAAK;AAAA,YAChB,CAAC;AAAA,UACF,OAAO;AACN,kBAAM,KAAK,OAAO,MAAM;AAAA,cACvB,OAAO,iBAAiB,KAAK,OAAO,IAAI,OAAO,KAAK;AAAA,cACpD,QAAQ,EAAE,QAAQ,aAAa;AAAA,cAC/B,UAAU,KAAK;AAAA,YAChB,CAAC;AAAA,UACF;AAAA,QACD;AAAA,MACD;AAAA,IACD,GAAG,8BAA8B;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,QAAuB;AAAA,EAE7B;AACD;;;AC7WO,IAAM,mBAAN,MAAkD;AAAA,EACvC;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,QAAgC;AAC3C,SAAK,WAAW,oBAAI,IAAI;AACxB,SAAK,WAAW,oBAAI,IAAI;AACxB,SAAK,iBAAiB,OAAO;AAC7B,SAAK,SAAS,IAAI,OAAO,cAAc;AAEvC,eAAW,SAAS,OAAO,QAAQ;AAClC,WAAK,SAAS,IAAI,MAAM,OAAO;AAC/B,iBAAW,SAAS,MAAM,QAAQ;AACjC,YAAI,KAAK,SAAS,IAAI,KAAK,GAAG;AAC7B,gBAAM,IAAI,MAAM,2BAA2B,KAAK,8BAA8B;AAAA,QAC/E;AACA,aAAK,SAAS,IAAI,OAAO,MAAM,OAAO;AAAA,MACvC;AAAA,IACD;AAAA,EACD;AAAA;AAAA,EAGA,MAAM,aAAa,QAAyD;AAC3E,UAAM,SAAS,oBAAI,IAAiC;AAEpD,eAAW,SAAS,QAAQ;AAC3B,YAAM,UAAU,KAAK,SAAS,IAAI,MAAM,KAAK,KAAK,KAAK;AACvD,UAAI,QAAQ,OAAO,IAAI,OAAO;AAC9B,UAAI,CAAC,OAAO;AACX,gBAAQ,CAAC;AACT,eAAO,IAAI,SAAS,KAAK;AAAA,MAC1B;AACA,YAAM,KAAK,KAAK;AAAA,IACjB;AAEA,eAAW,CAAC,SAAS,KAAK,KAAK,QAAQ;AACtC,YAAM,SAAS,MAAM,QAAQ,aAAa,KAAK;AAC/C,UAAI,CAAC,OAAO,IAAI;AACf,eAAO;AAAA,MACR;AAAA,IACD;AAEA,WAAO,GAAG,MAAS;AAAA,EACpB;AAAA;AAAA,EAGA,MAAM,iBACL,KACA,QAC4C;AAC5C,UAAM,aAAa,oBAAI,IAAqB;AAC5C,UAAM,gBAAgB,oBAAI,IAA+B;AAEzD,QAAI,UAAU,OAAO,SAAS,GAAG;AAChC,iBAAW,SAAS,QAAQ;AAC3B,cAAM,UAAU,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK;AACjD,mBAAW,IAAI,OAAO;AACtB,YAAI,WAAW,cAAc,IAAI,OAAO;AACxC,YAAI,CAAC,UAAU;AACd,qBAAW,CAAC;AACZ,wBAAc,IAAI,SAAS,QAAQ;AAAA,QACpC;AACA,iBAAS,KAAK,KAAK;AAAA,MACpB;AAAA,IACD,OAAO;AACN,iBAAW,WAAW,KAAK,UAAU;AACpC,mBAAW,IAAI,OAAO;AAAA,MACvB;AAAA,IACD;AAEA,UAAM,SAAqB,CAAC;AAE5B,eAAW,WAAW,YAAY;AACjC,YAAM,eAAe,cAAc,IAAI,OAAO;AAC9C,YAAM,SAAS,MAAM,QAAQ,iBAAiB,KAAK,YAAY;AAC/D,UAAI,CAAC,OAAO,IAAI;AACf,eAAO;AAAA,MACR;AACA,aAAO,KAAK,GAAG,OAAO,KAAK;AAAA,IAC5B;AAEA,WAAO,KAAK,CAAC,GAAG,MAAO,EAAE,MAAM,EAAE,MAAM,KAAK,EAAE,MAAM,EAAE,MAAM,IAAI,CAAE;AAElE,WAAO,GAAG,MAAM;AAAA,EACjB;AAAA;AAAA,EAGA,MAAM,eACL,OACA,OACgE;AAChE,UAAM,UAAU,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK;AACjD,WAAO,QAAQ,eAAe,OAAO,KAAK;AAAA,EAC3C;AAAA;AAAA,EAGA,MAAM,aAAa,QAA0D;AAC5E,UAAM,UAAU,KAAK,SAAS,IAAI,OAAO,KAAK,KAAK,KAAK;AACxD,WAAO,QAAQ,aAAa,MAAM;AAAA,EACnC;AAAA;AAAA,EAGA,MAAM,QAAuB;AAC5B,eAAW,WAAW,KAAK,UAAU;AACpC,YAAM,QAAQ,MAAM;AAAA,IACrB;AAAA,EACD;AACD;;;ACnIA,OAAO,WAAW;AAclB,IAAM,iBAAyE;AAAA,EAC9E,QAAQ;AAAA,EACR,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,MAAM;AAAA,EACN,MAAM;AACP;AAEA,SAAS,oBAAoB,MAAsD;AAClF,SAAO,eAAe,IAAI;AAC3B;AASO,IAAM,eAAN,MAA8D;AAAA;AAAA,EAE3D;AAAA,EAET,YAAY,QAA+B;AAC1C,SAAK,OAAO,MAAM,WAAW,OAAO,gBAAgB;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,aAAa,QAAyD;AAC3E,QAAI,OAAO,WAAW,GAAG;AACxB,aAAO,GAAG,MAAS;AAAA,IACpB;AAEA,WAAO,UAAU,YAAY;AAC5B,YAAM,MAAM,wGAAwG,OAAO,IAAI,MAAM,uBAAuB,EAAE,KAAK,IAAI,CAAC;AAExK,YAAM,SAAoB,CAAC;AAC3B,iBAAW,SAAS,QAAQ;AAC3B,eAAO;AAAA,UACN,MAAM;AAAA,UACN,MAAM;AAAA,UACN,MAAM;AAAA,UACN,KAAK,UAAU,MAAM,OAAO;AAAA,UAC5B,MAAM,IAAI,SAAS;AAAA,UACnB,MAAM;AAAA,UACN,MAAM;AAAA,QACP;AAAA,MACD;AAEA,YAAM,KAAK,KAAK,QAAQ,KAAK,MAAM;AAAA,IACpC,GAAG,yBAAyB;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,iBACL,KACA,QAC4C;AAC5C,WAAO,UAAU,YAAY;AAC5B,UAAI,MACH;AACD,YAAM,SAAoB,CAAC,IAAI,SAAS,CAAC;AAEzC,UAAI,UAAU,OAAO,SAAS,GAAG;AAChC,eAAO,sBAAsB,OAAO,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI,CAAC;AAC7D,eAAO,KAAK,GAAG,MAAM;AAAA,MACtB;AAEA,aAAO;AAEP,YAAM,CAAC,IAAI,IAAI,MAAM,KAAK,KAAK,QAAQ,KAAK,MAAM;AAClD,aAAQ,KAAyB,IAAI,UAAU;AAAA,IAChD,GAAG,wBAAwB;AAAA,EAC5B;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,eACL,OACA,OACgE;AAChE,WAAO,UAAU,YAAY;AAC5B,YAAM,MACL;AACD,YAAM,CAAC,IAAI,IAAI,MAAM,KAAK,KAAK,QAAQ,KAAK,CAAC,OAAO,KAAK,CAAC;AAC1D,aAAO,iBAAiB,IAAuB;AAAA,IAChD,GAAG,kCAAkC,KAAK,IAAI,KAAK,EAAE;AAAA,EACtD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,aAAa,QAA0D;AAC5E,WAAO,UAAU,YAAY;AAE5B,YAAM,KAAK,KAAK,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAYvB;AAGD,YAAM,aAAa,OAAO,QACxB,IAAI,CAAC,QAAQ,KAAK,IAAI,IAAI,MAAM,oBAAoB,IAAI,IAAI,CAAC,EAAE,EAC/D,KAAK,IAAI;AAEX,YAAM,KAAK,KAAK;AAAA,QACf,gCAAgC,OAAO,KAAK,wCAAwC,UAAU;AAAA,MAC/F;AAAA,IACD,GAAG,qCAAqC,OAAO,KAAK,EAAE;AAAA,EACvD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,YACL,QACA,SACsC;AACtC,QAAI,OAAO,WAAW,GAAG;AACxB,aAAO,GAAG,MAAS;AAAA,IACpB;AAEA,WAAO,UAAU,YAAY;AAC5B,YAAM,UAAU,mBAAmB,MAAM;AACzC,YAAM,cAAc,iBAAiB,OAAO;AAE5C,iBAAW,CAAC,WAAW,MAAM,KAAK,SAAS;AAC1C,cAAM,SAAS,YAAY,IAAI,SAAS;AACxC,YAAI,CAAC,OAAQ;AAEb,cAAM,KAAK,kBAAkB,MAAM;AACnC,cAAM,OAAO,aAAa,MAAM;AAGhC,cAAM,YAAY,OAAO,QACvB,IAAI,CAAC,QAAQ,KAAK,IAAI,IAAI,MAAM,oBAAoB,IAAI,IAAI,CAAC,EAAE,EAC/D,KAAK,IAAI;AAEX,cAAM,eAAe,gBAAgB,GAAG,IAAI,CAAC,MAAM,KAAK,CAAC,IAAI,EAAE,KAAK,IAAI,CAAC;AACzE,cAAM,eAAe,OAAO,gCAAgC;AAC5D,cAAM,mBAAmB,OAAO,mBAC7B,mBAAmB,OAAO,gBAAgB,QAC1C;AAEH,cAAM,KAAK,KAAK;AAAA,UACf,gCAAgC,OAAO,KAAK,qCAAqC,SAAS,uCAAuC,YAAY,6DAA6D,YAAY,GAAG,gBAAgB;AAAA,QAC1O;AAGA,cAAM,aAAa,CAAC,GAAG,MAAM;AAC7B,cAAM,eAAe,WAAW,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI;AACxD,cAAM,CAAC,IAAI,IAAI,MAAM,KAAK,KAAK;AAAA,UAC9B,sFAAsF,YAAY;AAAA,UAClG,CAAC,WAAW,GAAG,UAAU;AAAA,QAC1B;AAGA,cAAM,QAAQ,oBAAI,IAAoD;AACtE,mBAAW,OAAO,MAAgE;AACjF,cAAI,OAAO,MAAM,IAAI,IAAI,MAAM;AAC/B,cAAI,CAAC,MAAM;AACV,mBAAO,CAAC;AACR,kBAAM,IAAI,IAAI,QAAQ,IAAI;AAAA,UAC3B;AACA,eAAK,KAAK,GAAG;AAAA,QACd;AAEA,cAAM,UAAoE,CAAC;AAC3E,cAAM,YAAsB,CAAC;AAE7B,mBAAW,CAAC,OAAO,SAAS,KAAK,OAAO;AACvC,gBAAM,QAAQ,iBAAiB,SAAS;AACxC,cAAI,UAAU,MAAM;AACnB,sBAAU,KAAK,KAAK;AAAA,UACrB,OAAO;AACN,oBAAQ,KAAK,EAAE,OAAO,MAAM,CAAC;AAAA,UAC9B;AAAA,QACD;AAGA,YAAI,QAAQ,SAAS,GAAG;AACvB,gBAAM,OAAO,OAAO,QAAQ,IAAI,CAAC,MAAM,EAAE,IAAI;AAC7C,gBAAM,oBAAoB,OACvB,QAAQ,IAAI,MAAM,OAAO,KAAK,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI,CAAC,gBAAgB,EAAE,KAAK,IAAI,IAClF,QAAQ,IAAI,MAAM,OAAO,KAAK,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI,CAAC,UAAU,EAAE,KAAK,IAAI;AAE/E,gBAAM,SAAoB,CAAC;AAC3B,qBAAW,EAAE,OAAO,MAAM,KAAK,SAAS;AACvC,mBAAO,KAAK,KAAK;AACjB,uBAAW,OAAO,MAAM;AACvB,qBAAO,KAAK,MAAM,GAAG,KAAK,IAAI;AAAA,YAC/B;AAAA,UACD;AAEA,gBAAM,aAAa,KAAK,IAAI,CAAC,MAAM,KAAK,CAAC,iBAAiB,CAAC,KAAK,EAAE,KAAK,IAAI;AAC3E,gBAAM,kBAAkB,OAAO,wBAAwB;AACvD,gBAAM,UAAU,OACb,WAAW,KAAK,IAAI,CAAC,MAAM,KAAK,CAAC,IAAI,EAAE,KAAK,IAAI,CAAC,4BACjD,WAAW,KAAK,IAAI,CAAC,MAAM,KAAK,CAAC,IAAI,EAAE,KAAK,IAAI,CAAC;AAEpD,gBAAM,KAAK,KAAK;AAAA,YACf,iBAAiB,OAAO,KAAK,OAAO,OAAO,YAAY,iBAAiB,4BAA4B,UAAU,GAAG,eAAe;AAAA,YAChI;AAAA,UACD;AAAA,QACD;AAGA,YAAI,UAAU,SAAS,GAAG;AACzB,gBAAM,kBAAkB,UAAU,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI;AAC1D,cAAI,MAAM;AACT,kBAAM,KAAK,KAAK;AAAA,cACf,YAAY,OAAO,KAAK,iEAAiE,eAAe;AAAA,cACxG;AAAA,YACD;AAAA,UACD,OAAO;AACN,kBAAM,KAAK,KAAK;AAAA,cACf,iBAAiB,OAAO,KAAK,uBAAuB,eAAe;AAAA,cACnE;AAAA,YACD;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD,GAAG,8BAA8B;AAAA,EAClC;AAAA;AAAA,EAGA,MAAM,QAAuB;AAC5B,UAAM,KAAK,KAAK,IAAI;AAAA,EACrB;AACD;AAiBA,SAAS,WAAW,KAA8B;AACjD,SAAO;AAAA,IACN,SAAS,IAAI;AAAA,IACb,OAAO,IAAI;AAAA,IACX,OAAO,IAAI;AAAA,IACX,SAAS,OAAO,IAAI,YAAY,WAAW,KAAK,MAAM,IAAI,OAAO,IAAI,IAAI;AAAA,IACzE,KAAK,OAAO,IAAI,GAAG;AAAA,IACnB,UAAU,IAAI;AAAA,IACd,IAAI,IAAI;AAAA,EACT;AACD;;;ACpSA,SAAS,YAA6B;AAYtC,IAAM,oBAA4E;AAAA,EACjF,QAAQ;AAAA,EACR,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,MAAM;AAAA,EACN,MAAM;AACP;AAQO,IAAM,kBAAN,MAAiE;AAAA;AAAA,EAE9D;AAAA,EAET,YAAY,QAA+B;AAC1C,UAAM,aAAyB;AAAA,MAC9B,kBAAkB,OAAO;AAAA,IAC1B;AACA,SAAK,OAAO,IAAI,KAAK,UAAU;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,aAAa,QAAyD;AAC3E,QAAI,OAAO,WAAW,GAAG;AACxB,aAAO,GAAG,MAAS;AAAA,IACpB;AAEA,WAAO,UAAU,YAAY;AAE5B,YAAM,SAAoB,CAAC;AAC3B,YAAM,OAAiB,CAAC;AAExB,eAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACvC,cAAM,IAAI,OAAO,CAAC;AAClB,cAAM,SAAS,IAAI;AACnB,aAAK;AAAA,UACJ,KAAK,SAAS,CAAC,MAAM,SAAS,CAAC,MAAM,SAAS,CAAC,MAAM,SAAS,CAAC,MAAM,SAAS,CAAC,MAAM,SAAS,CAAC,MAAM,SAAS,CAAC;AAAA,QAChH;AACA,eAAO;AAAA,UACN,EAAE;AAAA,UACF,EAAE;AAAA,UACF,EAAE;AAAA,UACF,KAAK,UAAU,EAAE,OAAO;AAAA,UACxB,EAAE,IAAI,SAAS;AAAA,UACf,EAAE;AAAA,UACF,EAAE;AAAA,QACH;AAAA,MACD;AAEA,YAAM,MAAM;AAAA,SACN,KAAK,KAAK,IAAI,CAAC;AAAA;AAGrB,YAAM,KAAK,KAAK,MAAM,KAAK,MAAM;AAAA,IAClC,GAAG,yBAAyB;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,iBACL,KACA,QAC4C;AAC5C,WAAO,UAAU,YAAY;AAC5B,UAAI;AACJ,UAAI;AAEJ,UAAI,UAAU,OAAO,SAAS,GAAG;AAChC,cAAM;AAAA;AAAA;AAAA;AAIN,iBAAS,CAAC,IAAI,SAAS,GAAG,MAAM;AAAA,MACjC,OAAO;AACN,cAAM;AAAA;AAAA;AAAA;AAIN,iBAAS,CAAC,IAAI,SAAS,CAAC;AAAA,MACzB;AAEA,YAAM,SAAS,MAAM,KAAK,KAAK,MAAM,KAAK,MAAM;AAChD,aAAO,OAAO,KAAK,IAAIA,cAAa;AAAA,IACrC,GAAG,wBAAwB;AAAA,EAC5B;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,eACL,OACA,OACgE;AAChE,WAAO,UAAU,YAAY;AAC5B,YAAM,MAAM;AAAA;AAAA;AAAA;AAKZ,YAAM,SAAS,MAAM,KAAK,KAAK,MAAM,KAAK,CAAC,OAAO,KAAK,CAAC;AACxD,aAAO,iBAAiB,OAAO,IAAI;AAAA,IACpC,GAAG,kCAAkC,KAAK,IAAI,KAAK,EAAE;AAAA,EACtD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,aAAa,SAA2D;AAC7E,WAAO,UAAU,YAAY;AAC5B,YAAM,KAAK,KAAK,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,CAYxB;AAAA,IACC,GAAG,yBAAyB;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,YACL,QACA,SACsC;AACtC,QAAI,OAAO,WAAW,GAAG;AACxB,aAAO,GAAG,MAAS;AAAA,IACpB;AAEA,WAAO,UAAU,YAAY;AAC5B,YAAM,UAAU,mBAAmB,MAAM;AACzC,YAAM,cAAc,iBAAiB,OAAO;AAE5C,iBAAW,CAAC,WAAW,MAAM,KAAK,SAAS;AAC1C,cAAM,SAAS,YAAY,IAAI,SAAS;AACxC,YAAI,CAAC,OAAQ;AAEb,cAAM,OAAO,OAAO;AACpB,cAAM,KAAK,kBAAkB,MAAM;AACnC,cAAM,eAAe,uBAAuB,MAAM;AAClD,cAAM,OAAO,aAAa,MAAM;AAEhC,cAAM,aAAa,OAAO,QACxB,IAAI,CAAC,MAAM,IAAI,EAAE,IAAI,KAAK,kBAAkB,EAAE,IAAI,CAAC,EAAE,EACrD,KAAK,IAAI;AAEX,cAAM,eAAe,gBAAgB,GAAG,IAAI,CAAC,MAAM,IAAI,CAAC,GAAG,EAAE,KAAK,IAAI,CAAC;AACvE,cAAM,eAAe,OAAO;AAAA,2BAAgC;AAC5D,cAAM,mBAAmB,OAAO,mBAC7B;AAAA,YAAiB,OAAO,gBAAgB,OACxC;AAEH,cAAM,KAAK,KAAK;AAAA,UACf,+BAA+B,IAAI;AAAA;AAAA,GAErC,UAAU;AAAA,oCACuB,YAAY;AAAA;AAAA,GAE7C,YAAY,GAAG,gBAAgB;AAAA;AAAA,QAE9B;AAEA,cAAM,cAAc,OAAO,eAAe,OAAO;AACjD,cAAM,aAAa,CAAC,GAAG,MAAM;AAE7B,cAAM,cAAc,MAAM,KAAK,KAAK;AAAA,UACnC;AAAA,UACA,CAAC,aAAa,UAAU;AAAA,QACzB;AAGA,cAAM,UAAU,oBAAI,IAAoE;AACxF,mBAAW,OAAO,YAAY,MAAM;AACnC,gBAAM,MAAM,IAAI;AAChB,cAAI,MAAM,QAAQ,IAAI,GAAG;AACzB,cAAI,CAAC,KAAK;AACT,kBAAM,CAAC;AACP,oBAAQ,IAAI,KAAK,GAAG;AAAA,UACrB;AACA,cAAI,KAAK,GAAsD;AAAA,QAChE;AAEA,cAAM,UAAoE,CAAC;AAC3E,cAAM,YAAsB,CAAC;AAE7B,mBAAW,CAAC,OAAO,IAAI,KAAK,SAAS;AACpC,gBAAM,QAAQ,iBAAiB,IAAI;AACnC,cAAI,UAAU,MAAM;AACnB,oBAAQ,KAAK,EAAE,OAAO,MAAM,CAAC;AAAA,UAC9B,OAAO;AACN,sBAAU,KAAK,KAAK;AAAA,UACrB;AAAA,QACD;AAEA,YAAI,QAAQ,SAAS,GAAG;AACvB,gBAAM,WAAW,OAAO,QAAQ,IAAI,CAAC,MAAM,EAAE,IAAI;AACjD,gBAAM,WAAW,CAAC,UAAU,GAAG,QAAQ;AACvC,gBAAM,UAAU,OACb,CAAC,GAAG,UAAU,cAAc,WAAW,IACvC,CAAC,GAAG,UAAU,WAAW;AAC5B,gBAAM,UAAU,QAAQ,IAAI,CAAC,MAAM,IAAI,CAAC,GAAG,EAAE,KAAK,IAAI;AAEtD,gBAAM,SAAoB,CAAC;AAC3B,gBAAM,YAAsB,CAAC;AAC7B,gBAAM,eAAe,QAAQ;AAE7B,mBAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACxC,kBAAM,IAAI,QAAQ,CAAC;AACnB,kBAAM,SAAS,IAAI;AACnB,kBAAM,eAAe,QAAQ,IAAI,CAAC,GAAG,MAAM,IAAI,SAAS,IAAI,CAAC,EAAE;AAC/D,sBAAU,KAAK,IAAI,aAAa,KAAK,IAAI,CAAC,GAAG;AAE7C,mBAAO,KAAK,EAAE,KAAK;AACnB,uBAAW,OAAO,UAAU;AAC3B,qBAAO,KAAK,EAAE,MAAM,GAAG,KAAK,IAAI;AAAA,YACjC;AACA,gBAAI,KAAM,QAAO,KAAK,IAAI;AAC1B,mBAAO,KAAK,oBAAI,KAAK,CAAC;AAAA,UACvB;AAEA,gBAAM,eAAe,aAAa,IAAI,CAAC,MAAM,IAAI,CAAC,GAAG,EAAE,KAAK,IAAI;AAChE,gBAAM,aAAa,OAChB,CAAC,GAAG,UAAU,cAAc,WAAW,IACvC,CAAC,GAAG,UAAU,WAAW;AAC5B,gBAAM,YAAY,WAAW,IAAI,CAAC,MAAM,IAAI,CAAC,iBAAiB,CAAC,GAAG,EAAE,KAAK,IAAI;AAE7E,gBAAM,KAAK,KAAK;AAAA,YACf,gBAAgB,IAAI,MAAM,OAAO,YAAY,UAAU,KAAK,IAAI,CAAC,iBAAiB,YAAY,mBAAmB,SAAS;AAAA,YAC1H;AAAA,UACD;AAAA,QACD;AAEA,YAAI,UAAU,SAAS,GAAG;AACzB,cAAI,MAAM;AACT,kBAAM,KAAK,KAAK;AAAA,cACf,WAAW,IAAI;AAAA,cACf,CAAC,SAAS;AAAA,YACX;AAAA,UACD,OAAO;AACN,kBAAM,KAAK,KAAK,MAAM,gBAAgB,IAAI,4BAA4B,CAAC,SAAS,CAAC;AAAA,UAClF;AAAA,QACD;AAAA,MACD;AAAA,IACD,GAAG,8BAA8B;AAAA,EAClC;AAAA;AAAA,EAGA,MAAM,QAAuB;AAC5B,UAAM,KAAK,KAAK,IAAI;AAAA,EACrB;AACD;AAKA,SAASA,eAAc,KAAwC;AAC9D,QAAM,UACL,OAAO,IAAI,YAAY,WACpB,KAAK,MAAM,IAAI,OAAiB,IAC/B,IAAI;AAET,SAAO;AAAA,IACN,SAAS,IAAI;AAAA,IACb,OAAO,IAAI;AAAA,IACX,OAAO,IAAI;AAAA,IACX;AAAA,IACA,KAAK,OAAO,IAAI,GAAa;AAAA,IAC7B,UAAU,IAAI;AAAA,IACd,IAAI,IAAI;AAAA,EACT;AACD;;;ACzSO,SAAS,sBACf,QACwC;AACxC,MAAI;AACH,YAAQ,OAAO,MAAM;AAAA,MACpB,KAAK,YAAY;AAChB,YAAI,CAAC,OAAO,UAAU;AACrB,iBAAO,IAAI,IAAI,aAAa,kDAAkD,CAAC;AAAA,QAChF;AACA,eAAO;AAAA,UACN,IAAI,gBAAgB;AAAA,YACnB,kBAAkB,OAAO,SAAS;AAAA,UACnC,CAAC;AAAA,QACF;AAAA,MACD;AAAA,MACA,KAAK,SAAS;AACb,YAAI,CAAC,OAAO,OAAO;AAClB,iBAAO,IAAI,IAAI,aAAa,4CAA4C,CAAC;AAAA,QAC1E;AACA,eAAO;AAAA,UACN,IAAI,aAAa;AAAA,YAChB,kBAAkB,OAAO,MAAM;AAAA,UAChC,CAAC;AAAA,QACF;AAAA,MACD;AAAA,MACA,KAAK,YAAY;AAChB,YAAI,CAAC,OAAO,UAAU;AACrB,iBAAO,IAAI,IAAI,aAAa,kDAAkD,CAAC;AAAA,QAChF;AACA,eAAO;AAAA,UACN,IAAI,gBAAgB;AAAA,YACnB,WAAW,OAAO,SAAS;AAAA,YAC3B,SAAS,OAAO,SAAS;AAAA,YACzB,aAAa,OAAO,SAAS;AAAA,YAC7B,UAAU,OAAO,SAAS;AAAA,UAC3B,CAAC;AAAA,QACF;AAAA,MACD;AAAA,MACA;AACC,eAAO,IAAI,IAAI,aAAa,+BAA+B,OAAO,IAAI,EAAE,CAAC;AAAA,IAC3E;AAAA,EACD,SAAS,KAAc;AACtB,WAAO,IAAI,IAAI,aAAa,6BAA6B,QAAQ,GAAG,EAAE,OAAO,EAAE,CAAC;AAAA,EACjF;AACD;;;AChCO,IAAM,gBAAN,MAA+D;AAAA,EACpD;AAAA,EACA;AAAA,EAEjB,YAAY,QAA6B;AACxC,SAAK,UAAU,OAAO;AACtB,SAAK,cAAc,OAAO;AAAA,EAC3B;AAAA;AAAA,EAGA,MAAM,aAAa,QAAyD;AAC3E,UAAM,SAAS,MAAM,KAAK,QAAQ,aAAa,MAAM;AACrD,QAAI,CAAC,OAAO,IAAI;AACf,aAAO;AAAA,IACR;AAEA,eAAW,aAAa,KAAK,aAAa;AACzC,gBAAU,aAAa,MAAM,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAAA,IAC9C;AAEA,WAAO,GAAG,MAAS;AAAA,EACpB;AAAA;AAAA,EAGA,MAAM,iBACL,KACA,QAC4C;AAC5C,WAAO,KAAK,QAAQ,iBAAiB,KAAK,MAAM;AAAA,EACjD;AAAA;AAAA,EAGA,MAAM,eACL,OACA,OACgE;AAChE,WAAO,KAAK,QAAQ,eAAe,OAAO,KAAK;AAAA,EAChD;AAAA;AAAA,EAGA,MAAM,aAAa,QAA0D;AAC5E,UAAM,SAAS,MAAM,KAAK,QAAQ,aAAa,MAAM;AACrD,QAAI,CAAC,OAAO,IAAI;AACf,aAAO;AAAA,IACR;AAEA,eAAW,aAAa,KAAK,aAAa;AACzC,gBAAU,aAAa,MAAM,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAAA,IAC9C;AAEA,WAAO,GAAG,MAAS;AAAA,EACpB;AAAA;AAAA,EAGA,MAAM,YACL,QACA,SACsC;AACtC,QAAI,iBAAiB,KAAK,OAAO,GAAG;AACnC,YAAM,SAAS,MAAM,KAAK,QAAQ,YAAY,QAAQ,OAAO;AAC7D,UAAI,CAAC,OAAO,IAAI;AACf,eAAO;AAAA,MACR;AAAA,IACD;AAEA,eAAW,aAAa,KAAK,aAAa;AACzC,UAAI,iBAAiB,SAAS,GAAG;AAChC,kBAAU,YAAY,QAAQ,OAAO,EAAE,MAAM,MAAM;AAAA,QAAC,CAAC;AAAA,MACtD;AAAA,IACD;AAEA,WAAO,GAAG,MAAS;AAAA,EACpB;AAAA;AAAA,EAGA,MAAM,QAAuB;AAC5B,UAAM,KAAK,QAAQ,MAAM;AACzB,eAAW,aAAa,KAAK,aAAa;AACzC,YAAM,UAAU,MAAM;AAAA,IACvB;AAAA,EACD;AACD;;;ACvEO,IAAM,mBAAN,MAAkE;AAAA,EACvD;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,QAAgC;AAC3C,SAAK,MAAM,OAAO,IAAI;AACtB,SAAK,OAAO,OAAO,KAAK;AACxB,SAAK,WAAW,OAAO,IAAI;AAAA,EAC5B;AAAA;AAAA,EAGA,MAAM,aAAa,QAAyD;AAC3E,WAAO,KAAK,IAAI,aAAa,MAAM;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,iBACL,KACA,QAC4C;AAC5C,UAAM,cAAc,OAAO,OAAO,GAAG;AACrC,UAAM,cAAc,KAAK,IAAI,IAAI,KAAK;AAEtC,QAAI,cAAc,aAAa;AAE9B,YAAM,CAAC,WAAW,UAAU,IAAI,MAAM,QAAQ,IAAI;AAAA,QACjD,KAAK,IAAI,iBAAiB,KAAK,MAAM;AAAA,QACrC,KAAK,KAAK,iBAAiB,KAAK,MAAM;AAAA,MACvC,CAAC;AAED,UAAI,CAAC,UAAU,GAAI,QAAO;AAC1B,UAAI,CAAC,WAAW,GAAI,QAAO;AAE3B,YAAM,SAAS,CAAC,GAAG,UAAU,OAAO,GAAG,WAAW,KAAK;AACvD,aAAO,KAAK,CAAC,GAAG,MAAO,EAAE,MAAM,EAAE,MAAM,KAAK,EAAE,MAAM,EAAE,MAAM,IAAI,CAAE;AAElE,aAAO,GAAG,MAAM;AAAA,IACjB;AAGA,WAAO,KAAK,IAAI,iBAAiB,KAAK,MAAM;AAAA,EAC7C;AAAA;AAAA,EAGA,MAAM,eACL,OACA,OACgE;AAChE,UAAM,YAAY,MAAM,KAAK,IAAI,eAAe,OAAO,KAAK;AAC5D,QAAI,CAAC,UAAU,GAAI,QAAO;AAE1B,QAAI,UAAU,UAAU,MAAM;AAC7B,aAAO;AAAA,IACR;AAEA,WAAO,KAAK,KAAK,eAAe,OAAO,KAAK;AAAA,EAC7C;AAAA;AAAA,EAGA,MAAM,aAAa,QAA0D;AAC5E,UAAM,YAAY,MAAM,KAAK,IAAI,aAAa,MAAM;AACpD,QAAI,CAAC,UAAU,GAAI,QAAO;AAE1B,WAAO,KAAK,KAAK,aAAa,MAAM;AAAA,EACrC;AAAA;AAAA,EAGA,MAAM,YACL,QACA,SACsC;AACtC,QAAI,iBAAiB,KAAK,GAAG,GAAG;AAC/B,aAAO,KAAK,IAAI,YAAY,QAAQ,OAAO;AAAA,IAC5C;AACA,WAAO,GAAG,MAAS;AAAA,EACpB;AAAA;AAAA,EAGA,MAAM,QAAuB;AAC5B,UAAM,KAAK,IAAI,MAAM;AACrB,UAAM,KAAK,KAAK,MAAM;AAAA,EACvB;AACD;AAgBA,eAAsB,cACrB,KACA,MACA,UACsD;AACtD,QAAM,cAAc,KAAK,IAAI,IAAI;AACjC,QAAM,eAAgB,OAAO,CAAC,KAAK;AAEnC,QAAM,SAAS,MAAM,IAAI,iBAAiB,YAAY;AACtD,MAAI,CAAC,OAAO,GAAI,QAAO;AAEvB,QAAM,YAAY,OAAO,MAAM,OAAO,CAAC,UAAU;AAChD,UAAM,SAAS,OAAO,MAAM,OAAO,GAAG;AACtC,WAAO,SAAS;AAAA,EACjB,CAAC;AAED,MAAI,UAAU,WAAW,GAAG;AAC3B,WAAO,GAAG,EAAE,UAAU,EAAE,CAAC;AAAA,EAC1B;AAEA,QAAM,eAAe,MAAM,KAAK,aAAa,SAAS;AACtD,MAAI,CAAC,aAAa,GAAI,QAAO;AAE7B,SAAO,GAAG,EAAE,UAAU,UAAU,OAAO,CAAC;AACzC;;;AC/HA,eAAsB,eACrB,MAC+C;AAC/C,QAAM,YAAY,KAAK,aAAa;AAEpC,QAAM,aAAa,MAAM,KAAK,KAAK,iBAAiB,OAAO,CAAC,GAAmB,KAAK,MAAM;AAC1F,MAAI,CAAC,WAAW,IAAI;AACnB,WAAO;AAAA,EACR;AAEA,QAAM,SAAS,WAAW;AAE1B,MAAI,OAAO,WAAW,GAAG;AACxB,WAAO,GAAG,EAAE,aAAa,GAAG,SAAS,EAAE,CAAC;AAAA,EACzC;AAEA,MAAI,aAAa;AACjB,MAAI,aAAa;AAEjB,WAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK,WAAW;AAClD,UAAM,QAAQ,OAAO,MAAM,GAAG,IAAI,SAAS;AAC3C,UAAM,cAAc,MAAM,KAAK,GAAG,aAAa,KAAK;AACpD,QAAI,CAAC,YAAY,IAAI;AACpB,aAAO;AAAA,IACR;AAEA;AACA,kBAAc,MAAM;AAEpB,SAAK,aAAa,EAAE,OAAO,YAAY,WAAW,CAAC;AAAA,EACpD;AAEA,SAAO,GAAG,EAAE,aAAa,YAAY,SAAS,WAAW,CAAC;AAC3D;;;ACxEA;AAAA,EACC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACM;AAYA,IAAM,eAAN,MAA0C;AAAA,EAC/B;AAAA,EACA;AAAA,EAEjB,YAAY,QAAuB;AAClC,SAAK,SAAS,OAAO;AACrB,SAAK,SAAS,IAAI,SAAS;AAAA,MAC1B,UAAU,OAAO;AAAA,MACjB,QAAQ,OAAO,UAAU;AAAA,MACzB,aAAa,OAAO;AAAA,MACpB,gBAAgB;AAAA;AAAA,IACjB,CAAC;AAAA,EACF;AAAA;AAAA,EAGA,MAAM,UACL,MACA,MACA,aACsC;AACtC,WAAO,UAAU,YAAY;AAC5B,YAAM,KAAK,OAAO;AAAA,QACjB,IAAI,iBAAiB;AAAA,UACpB,QAAQ,KAAK;AAAA,UACb,KAAK;AAAA,UACL,MAAM;AAAA,UACN,aAAa;AAAA,QACd,CAAC;AAAA,MACF;AAAA,IACD,GAAG,yBAAyB,IAAI,EAAE;AAAA,EACnC;AAAA;AAAA,EAGA,MAAM,UAAU,MAAyD;AACxE,WAAO,UAAU,YAAY;AAC5B,YAAM,WAAW,MAAM,KAAK,OAAO;AAAA,QAClC,IAAI,iBAAiB;AAAA,UACpB,QAAQ,KAAK;AAAA,UACb,KAAK;AAAA,QACN,CAAC;AAAA,MACF;AACA,YAAM,QAAQ,MAAM,SAAS,MAAM,qBAAqB;AACxD,UAAI,CAAC,OAAO;AACX,cAAM,IAAI,aAAa,8BAA8B,IAAI,EAAE;AAAA,MAC5D;AACA,aAAO;AAAA,IACR,GAAG,yBAAyB,IAAI,EAAE;AAAA,EACnC;AAAA;AAAA,EAGA,MAAM,WACL,MACsE;AACtE,WAAO,UAAU,YAAY;AAC5B,YAAM,WAAW,MAAM,KAAK,OAAO;AAAA,QAClC,IAAI,kBAAkB;AAAA,UACrB,QAAQ,KAAK;AAAA,UACb,KAAK;AAAA,QACN,CAAC;AAAA,MACF;AACA,aAAO;AAAA,QACN,MAAM,SAAS,iBAAiB;AAAA,QAChC,cAAc,SAAS,gBAAgB,oBAAI,KAAK,CAAC;AAAA,MAClD;AAAA,IACD,GAAG,0BAA0B,IAAI,EAAE;AAAA,EACpC;AAAA;AAAA,EAGA,MAAM,YAAY,QAA6D;AAC9E,WAAO,UAAU,YAAY;AAC5B,YAAM,WAAW,MAAM,KAAK,OAAO;AAAA,QAClC,IAAI,qBAAqB;AAAA,UACxB,QAAQ,KAAK;AAAA,UACb,QAAQ;AAAA,QACT,CAAC;AAAA,MACF;AACA,cAAQ,SAAS,YAAY,CAAC,GAAG,IAAI,CAAC,UAAU;AAAA,QAC/C,KAAK,KAAK,OAAO;AAAA,QACjB,MAAM,KAAK,QAAQ;AAAA,QACnB,cAAc,KAAK,gBAAgB,oBAAI,KAAK,CAAC;AAAA,MAC9C,EAAE;AAAA,IACH,GAAG,uCAAuC,MAAM,EAAE;AAAA,EACnD;AAAA;AAAA,EAGA,MAAM,aAAa,MAAmD;AACrE,WAAO,UAAU,YAAY;AAC5B,YAAM,KAAK,OAAO;AAAA,QACjB,IAAI,oBAAoB;AAAA,UACvB,QAAQ,KAAK;AAAA,UACb,KAAK;AAAA,QACN,CAAC;AAAA,MACF;AAAA,IACD,GAAG,4BAA4B,IAAI,EAAE;AAAA,EACtC;AAAA;AAAA,EAGA,MAAM,cAAc,OAAsD;AACzE,QAAI,MAAM,WAAW,GAAG;AACvB,aAAO,GAAG,MAAS;AAAA,IACpB;AAEA,WAAO,UAAU,YAAY;AAC5B,YAAM,KAAK,OAAO;AAAA,QACjB,IAAI,qBAAqB;AAAA,UACxB,QAAQ,KAAK;AAAA,UACb,QAAQ;AAAA,YACP,SAAS,MAAM,IAAI,CAAC,SAAS,EAAE,KAAK,IAAI,EAAE;AAAA,YAC1C,OAAO;AAAA,UACR;AAAA,QACD,CAAC;AAAA,MACF;AAAA,IACD,GAAG,0BAA0B,MAAM,MAAM,UAAU;AAAA,EACpD;AACD;;;ACvHA,eAAsB,cAAc,QAAkD;AACrF,UAAQ,OAAO,MAAM;AAAA,IACpB,KAAK,YAAY;AAChB,UAAI,CAAC,OAAO,SAAU,QAAO;AAC7B,YAAM,EAAE,MAAAC,MAAK,IAAI,MAAM,OAAO,IAAI;AAClC,YAAM,OAAO,IAAIA,MAAK,EAAE,kBAAkB,OAAO,SAAS,iBAAiB,CAAC;AAC5E,aAAO,OAAO,KAAa,WAAuB;AACjD,cAAM,SAAS,MAAM,KAAK,MAAM,KAAK,MAAM;AAC3C,eAAO,OAAO;AAAA,MACf;AAAA,IACD;AAAA,IACA,KAAK,SAAS;AACb,UAAI,CAAC,OAAO,MAAO,QAAO;AAC1B,YAAMC,SAAQ,MAAM,OAAO,gBAAgB;AAC3C,YAAM,OAAOA,OAAM,WAAW,OAAO,MAAM,gBAAgB;AAC3D,aAAO,OAAO,KAAa,WAAuB;AACjD,cAAM,CAAC,IAAI,IAAI,MAAM,KAAK,MAAM,KAAK,MAAM;AAC3C,eAAO;AAAA,MACR;AAAA,IACD;AAAA,IACA;AACC,aAAO;AAAA,EACT;AACD;","names":["rowToRowDelta","Pool","mysql"]}