lakesync 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/dist/adapter.d.ts +185 -20
  2. package/dist/adapter.js +13 -3
  3. package/dist/analyst.js +2 -2
  4. package/dist/{base-poller-BpUyuG2R.d.ts → base-poller-Bj9kX9dv.d.ts} +76 -19
  5. package/dist/catalogue.d.ts +1 -1
  6. package/dist/catalogue.js +3 -3
  7. package/dist/chunk-DGUM43GV.js +11 -0
  8. package/dist/{chunk-IRJ4QRWV.js → chunk-JI4C4R5H.js} +249 -140
  9. package/dist/chunk-JI4C4R5H.js.map +1 -0
  10. package/dist/{chunk-FHVTUKXL.js → chunk-KVSWLIJR.js} +2 -2
  11. package/dist/{chunk-P3FT7QCW.js → chunk-LDFFCG2K.js} +377 -247
  12. package/dist/chunk-LDFFCG2K.js.map +1 -0
  13. package/dist/{chunk-GUJWMK5P.js → chunk-LPWXOYNS.js} +373 -350
  14. package/dist/chunk-LPWXOYNS.js.map +1 -0
  15. package/dist/{chunk-QMS7TGFL.js → chunk-PYRS74YP.js} +15 -4
  16. package/dist/{chunk-QMS7TGFL.js.map → chunk-PYRS74YP.js.map} +1 -1
  17. package/dist/{chunk-NCZYFZ3B.js → chunk-QNITY4F6.js} +30 -7
  18. package/dist/{chunk-NCZYFZ3B.js.map → chunk-QNITY4F6.js.map} +1 -1
  19. package/dist/{chunk-SF7Y6ZUA.js → chunk-SSICS5KI.js} +2 -2
  20. package/dist/{chunk-UAUQGP3B.js → chunk-TMLG32QV.js} +2 -2
  21. package/dist/client.d.ts +164 -13
  22. package/dist/client.js +310 -163
  23. package/dist/client.js.map +1 -1
  24. package/dist/compactor.d.ts +1 -1
  25. package/dist/compactor.js +4 -4
  26. package/dist/connector-jira.d.ts +2 -2
  27. package/dist/connector-jira.js +3 -3
  28. package/dist/connector-salesforce.d.ts +2 -2
  29. package/dist/connector-salesforce.js +3 -3
  30. package/dist/{coordinator-D32a5rNk.d.ts → coordinator-NXy6tA0h.d.ts} +23 -16
  31. package/dist/{db-types-BlN-4KbQ.d.ts → db-types-CfLMUBfW.d.ts} +1 -1
  32. package/dist/gateway-server.d.ts +158 -64
  33. package/dist/gateway-server.js +482 -4003
  34. package/dist/gateway-server.js.map +1 -1
  35. package/dist/gateway.d.ts +61 -104
  36. package/dist/gateway.js +12 -6
  37. package/dist/index.d.ts +45 -10
  38. package/dist/index.js +14 -2
  39. package/dist/parquet.d.ts +1 -1
  40. package/dist/parquet.js +3 -3
  41. package/dist/proto.d.ts +1 -1
  42. package/dist/proto.js +3 -3
  43. package/dist/react.d.ts +47 -10
  44. package/dist/react.js +88 -40
  45. package/dist/react.js.map +1 -1
  46. package/dist/{registry-CPTgO9jv.d.ts → registry-BcspAtZI.d.ts} +19 -4
  47. package/dist/{gateway-Bpvatd9n.d.ts → request-handler-pUvL7ozF.d.ts} +139 -10
  48. package/dist/{resolver-CbuXm3nB.d.ts → resolver-CXxmC0jR.d.ts} +1 -1
  49. package/dist/{src-FPJQYQNA.js → src-B6NLV3FP.js} +4 -4
  50. package/dist/{src-RHKJFQKR.js → src-ROW4XLO7.js} +15 -3
  51. package/dist/{src-CLCALYDT.js → src-ZRHKG42A.js} +4 -4
  52. package/dist/{types-CLlD4XOy.d.ts → types-BdGBv2ba.d.ts} +17 -2
  53. package/dist/{types-D-E0VrfS.d.ts → types-BrcD1oJg.d.ts} +26 -19
  54. package/package.json +1 -1
  55. package/dist/chunk-7D4SUZUM.js +0 -38
  56. package/dist/chunk-GUJWMK5P.js.map +0 -1
  57. package/dist/chunk-IRJ4QRWV.js.map +0 -1
  58. package/dist/chunk-P3FT7QCW.js.map +0 -1
  59. /package/dist/{chunk-7D4SUZUM.js.map → chunk-DGUM43GV.js.map} +0 -0
  60. /package/dist/{chunk-FHVTUKXL.js.map → chunk-KVSWLIJR.js.map} +0 -0
  61. /package/dist/{chunk-SF7Y6ZUA.js.map → chunk-SSICS5KI.js.map} +0 -0
  62. /package/dist/{chunk-UAUQGP3B.js.map → chunk-TMLG32QV.js.map} +0 -0
  63. /package/dist/{src-CLCALYDT.js.map → src-B6NLV3FP.js.map} +0 -0
  64. /package/dist/{src-FPJQYQNA.js.map → src-ROW4XLO7.js.map} +0 -0
  65. /package/dist/{src-RHKJFQKR.js.map → src-ZRHKG42A.js.map} +0 -0
package/dist/adapter.d.ts CHANGED
@@ -1,9 +1,9 @@
1
1
  import { BigQuery } from '@google-cloud/bigquery';
2
2
  import { R as Result, A as AdapterError, H as HLCTimestamp } from './result-CojzlFE2.js';
3
- import { R as RowDelta, T as TableSchema, C as ColumnDelta } from './types-CLlD4XOy.js';
4
- import { D as DatabaseAdapter, a as DatabaseAdapterConfig } from './db-types-BlN-4KbQ.js';
5
- export { i as isDatabaseAdapter, l as lakeSyncTypeToBigQuery } from './db-types-BlN-4KbQ.js';
6
- import { C as ConnectorConfig } from './types-D-E0VrfS.js';
3
+ import { R as RowDelta, T as TableSchema, a as ColumnDelta } from './types-BdGBv2ba.js';
4
+ import { D as DatabaseAdapter, a as DatabaseAdapterConfig } from './db-types-CfLMUBfW.js';
5
+ export { i as isDatabaseAdapter, l as lakeSyncTypeToBigQuery } from './db-types-CfLMUBfW.js';
6
+ import { C as ConnectorConfig } from './types-BrcD1oJg.js';
7
7
  import { L as LakeAdapter, A as AdapterConfig, O as ObjectInfo } from './types-DSC_EiwR.js';
8
8
  import mysql from 'mysql2/promise';
9
9
  import { Pool } from 'pg';
@@ -70,6 +70,56 @@ declare function groupDeltasByTable(deltas: ReadonlyArray<RowDelta>): Map<string
70
70
  * @returns A map from source table name to schema.
71
71
  */
72
72
  declare function buildSchemaIndex(schemas: ReadonlyArray<TableSchema>): Map<string, TableSchema>;
73
+ /** Minimal query interface for executing SQL against a database. */
74
+ interface QueryExecutor {
75
+ query(sql: string, params: unknown[]): Promise<void>;
76
+ queryRows(sql: string, params: unknown[]): Promise<Array<{
77
+ row_id: string;
78
+ columns: string | ColumnDelta[];
79
+ op: string;
80
+ }>>;
81
+ }
82
+ /**
83
+ * SQL dialect interface — encapsulates the syntactic differences between
84
+ * Postgres, MySQL, and BigQuery for the materialise algorithm.
85
+ */
86
+ interface SqlDialect {
87
+ /** Generate CREATE TABLE IF NOT EXISTS for the destination table. */
88
+ createDestinationTable(dest: string, schema: TableSchema, pk: string[], softDelete: boolean): {
89
+ sql: string;
90
+ params: unknown[];
91
+ };
92
+ /** Generate a query to fetch delta history for a set of affected row IDs. */
93
+ queryDeltaHistory(sourceTable: string, rowIds: string[]): {
94
+ sql: string;
95
+ params: unknown[];
96
+ };
97
+ /** Generate an upsert statement for the merged row states. */
98
+ buildUpsert(dest: string, schema: TableSchema, conflictCols: string[], softDelete: boolean, upserts: Array<{
99
+ rowId: string;
100
+ state: Record<string, unknown>;
101
+ }>): {
102
+ sql: string;
103
+ params: unknown[];
104
+ };
105
+ /** Generate a delete (hard or soft) statement for tombstoned row IDs. */
106
+ buildDelete(dest: string, deleteIds: string[], softDelete: boolean): {
107
+ sql: string;
108
+ params: unknown[];
109
+ };
110
+ }
111
+ /**
112
+ * Execute the shared materialise algorithm using the provided dialect and executor.
113
+ *
114
+ * Algorithm: group by table -> build schema index -> for each table:
115
+ * create dest table -> query history -> merge -> upsert -> delete.
116
+ *
117
+ * @param executor - Executes SQL statements against the database.
118
+ * @param dialect - Generates dialect-specific SQL.
119
+ * @param deltas - The deltas that were just flushed.
120
+ * @param schemas - Table schemas defining destination tables and column mappings.
121
+ */
122
+ declare function executeMaterialise(executor: QueryExecutor, dialect: SqlDialect, deltas: RowDelta[], schemas: ReadonlyArray<TableSchema>): Promise<Result<void, AdapterError>>;
73
123
 
74
124
  /**
75
125
  * Configuration for the BigQuery adapter.
@@ -85,6 +135,40 @@ interface BigQueryAdapterConfig {
85
135
  /** Dataset location (default: "US"). */
86
136
  location?: string;
87
137
  }
138
+ /**
139
+ * BigQuery SQL dialect for the shared materialise algorithm.
140
+ *
141
+ * Uses `@name` named parameters, `MERGE` for upserts, `JSON` type,
142
+ * `TIMESTAMP` types, and `CLUSTER BY` for table creation.
143
+ *
144
+ * BigQuery's parameterised query API uses named params in a flat object,
145
+ * so this dialect encodes params as named keys and returns them as an
146
+ * array of `[key, value]` pairs that the executor unpacks into a
147
+ * `Record<string, unknown>`.
148
+ */
149
+ declare class BigQuerySqlDialect implements SqlDialect {
150
+ private readonly dataset;
151
+ constructor(dataset: string);
152
+ createDestinationTable(dest: string, schema: TableSchema, pk: string[], softDelete: boolean): {
153
+ sql: string;
154
+ params: unknown[];
155
+ };
156
+ queryDeltaHistory(sourceTable: string, rowIds: string[]): {
157
+ sql: string;
158
+ params: unknown[];
159
+ };
160
+ buildUpsert(dest: string, schema: TableSchema, conflictCols: string[], softDelete: boolean, upserts: Array<{
161
+ rowId: string;
162
+ state: Record<string, unknown>;
163
+ }>): {
164
+ sql: string;
165
+ params: unknown[];
166
+ };
167
+ buildDelete(dest: string, deleteIds: string[], softDelete: boolean): {
168
+ sql: string;
169
+ params: unknown[];
170
+ };
171
+ }
88
172
  /**
89
173
  * BigQuery database adapter for LakeSync.
90
174
  *
@@ -103,6 +187,8 @@ declare class BigQueryAdapter implements DatabaseAdapter, Materialisable {
103
187
  readonly dataset: string;
104
188
  /** @internal */
105
189
  readonly location: string;
190
+ private readonly dialect;
191
+ private readonly executor;
106
192
  constructor(config: BigQueryAdapterConfig);
107
193
  /**
108
194
  * Insert deltas into the database in a single batch.
@@ -127,10 +213,8 @@ declare class BigQueryAdapter implements DatabaseAdapter, Materialisable {
127
213
  /**
128
214
  * Materialise deltas into destination tables.
129
215
  *
130
- * For each affected table, queries the full delta history for touched rows,
131
- * merges to latest state via column-level LWW, then upserts live rows and
132
- * deletes tombstoned rows. The consumer-owned `props` column is never
133
- * touched on UPDATE.
216
+ * Delegates to the shared `executeMaterialise` algorithm with the
217
+ * BigQuery SQL dialect.
134
218
  */
135
219
  materialise(deltas: RowDelta[], schemas: ReadonlyArray<TableSchema>): Promise<Result<void, AdapterError>>;
136
220
  /**
@@ -179,7 +263,7 @@ declare class CompositeAdapter implements DatabaseAdapter {
179
263
  *
180
264
  * Switches on `config.type` and creates the matching adapter using
181
265
  * the type-specific connection configuration. Returns an {@link AdapterError}
182
- * if the type-specific config is missing or the adapter constructor throws.
266
+ * if the type is unsupported or the adapter constructor throws.
183
267
  *
184
268
  * @param config - Validated connector configuration.
185
269
  * @returns The instantiated adapter or an error.
@@ -199,8 +283,14 @@ interface FanOutAdapterConfig {
199
283
  *
200
284
  * Secondary failures are silently caught and never affect the return value.
201
285
  * Use case: write to Postgres (fast, operational), replicate to BigQuery (analytics).
286
+ *
287
+ * **Materialisation:** This adapter exposes a `materialise()` method for
288
+ * duck-type compatibility with `isMaterialisable()`. When the primary adapter
289
+ * is itself materialisable, materialisation is delegated to it; otherwise
290
+ * the method is a graceful no-op returning `Ok`. Materialisable secondaries
291
+ * receive fire-and-forget replication.
202
292
  */
203
- declare class FanOutAdapter implements DatabaseAdapter, Materialisable {
293
+ declare class FanOutAdapter implements DatabaseAdapter {
204
294
  private readonly primary;
205
295
  private readonly secondaries;
206
296
  constructor(config: FanOutAdapterConfig);
@@ -241,8 +331,14 @@ interface LifecycleAdapterConfig {
241
331
  *
242
332
  * Use {@link migrateToTier} as a background job to copy aged-out deltas
243
333
  * from hot to cold.
334
+ *
335
+ * **Materialisation:** This adapter exposes a `materialise()` method for
336
+ * duck-type compatibility with `isMaterialisable()`. Materialisation is
337
+ * delegated to the hot tier only — cold tier stores archived deltas, not
338
+ * destination tables. When the hot adapter is not materialisable, the
339
+ * method is a graceful no-op returning `Ok`.
244
340
  */
245
- declare class LifecycleAdapter implements DatabaseAdapter, Materialisable {
341
+ declare class LifecycleAdapter implements DatabaseAdapter {
246
342
  private readonly hot;
247
343
  private readonly cold;
248
344
  private readonly maxAgeMs;
@@ -345,6 +441,33 @@ declare class MinIOAdapter implements LakeAdapter {
345
441
  deleteObjects(paths: string[]): Promise<Result<void, AdapterError>>;
346
442
  }
347
443
 
444
+ /**
445
+ * MySQL SQL dialect for the shared materialise algorithm.
446
+ *
447
+ * Uses `?` positional parameters, `ON DUPLICATE KEY UPDATE`,
448
+ * `JSON` type, and `TIMESTAMP` types.
449
+ */
450
+ declare class MySqlDialect implements SqlDialect {
451
+ createDestinationTable(dest: string, schema: TableSchema, pk: string[], softDelete: boolean): {
452
+ sql: string;
453
+ params: unknown[];
454
+ };
455
+ queryDeltaHistory(sourceTable: string, rowIds: string[]): {
456
+ sql: string;
457
+ params: unknown[];
458
+ };
459
+ buildUpsert(dest: string, schema: TableSchema, _conflictCols: string[], softDelete: boolean, upserts: Array<{
460
+ rowId: string;
461
+ state: Record<string, unknown>;
462
+ }>): {
463
+ sql: string;
464
+ params: unknown[];
465
+ };
466
+ buildDelete(dest: string, deleteIds: string[], softDelete: boolean): {
467
+ sql: string;
468
+ params: unknown[];
469
+ };
470
+ }
348
471
  /**
349
472
  * MySQL database adapter for LakeSync.
350
473
  *
@@ -355,7 +478,9 @@ declare class MinIOAdapter implements LakeAdapter {
355
478
  declare class MySQLAdapter implements DatabaseAdapter, Materialisable {
356
479
  /** @internal */
357
480
  readonly pool: mysql.Pool;
481
+ private readonly dialect;
358
482
  constructor(config: DatabaseAdapterConfig);
483
+ private get executor();
359
484
  /**
360
485
  * Insert deltas into the database in a single batch.
361
486
  * Uses INSERT IGNORE for idempotent writes — duplicate deltaIds are silently skipped.
@@ -379,16 +504,41 @@ declare class MySQLAdapter implements DatabaseAdapter, Materialisable {
379
504
  /**
380
505
  * Materialise deltas into destination tables.
381
506
  *
382
- * For each table with a matching schema, merges delta history into the
383
- * latest row state and upserts into the destination table. Tombstoned
384
- * rows are soft-deleted (default) or hard-deleted. The `props` column
385
- * is never touched.
507
+ * Delegates to the shared `executeMaterialise` algorithm with the
508
+ * MySQL SQL dialect.
386
509
  */
387
510
  materialise(deltas: RowDelta[], schemas: ReadonlyArray<TableSchema>): Promise<Result<void, AdapterError>>;
388
511
  /** Close the database connection pool and release resources. */
389
512
  close(): Promise<void>;
390
513
  }
391
514
 
515
+ /**
516
+ * Postgres SQL dialect for the shared materialise algorithm.
517
+ *
518
+ * Uses `$N` positional parameters, `ON CONFLICT DO UPDATE`, `JSONB`,
519
+ * and `TIMESTAMPTZ` types.
520
+ */
521
+ declare class PostgresSqlDialect implements SqlDialect {
522
+ createDestinationTable(dest: string, schema: TableSchema, pk: string[], softDelete: boolean): {
523
+ sql: string;
524
+ params: unknown[];
525
+ };
526
+ queryDeltaHistory(sourceTable: string, rowIds: string[]): {
527
+ sql: string;
528
+ params: unknown[];
529
+ };
530
+ buildUpsert(dest: string, schema: TableSchema, conflictCols: string[], softDelete: boolean, upserts: Array<{
531
+ rowId: string;
532
+ state: Record<string, unknown>;
533
+ }>): {
534
+ sql: string;
535
+ params: unknown[];
536
+ };
537
+ buildDelete(dest: string, deleteIds: string[], softDelete: boolean): {
538
+ sql: string;
539
+ params: unknown[];
540
+ };
541
+ }
392
542
  /**
393
543
  * PostgreSQL database adapter for LakeSync.
394
544
  *
@@ -398,7 +548,9 @@ declare class MySQLAdapter implements DatabaseAdapter, Materialisable {
398
548
  declare class PostgresAdapter implements DatabaseAdapter, Materialisable {
399
549
  /** @internal */
400
550
  readonly pool: Pool;
551
+ private readonly dialect;
401
552
  constructor(config: DatabaseAdapterConfig);
553
+ private get executor();
402
554
  /**
403
555
  * Insert deltas into the database in a single batch.
404
556
  * Idempotent via `ON CONFLICT (delta_id) DO NOTHING`.
@@ -422,9 +574,8 @@ declare class PostgresAdapter implements DatabaseAdapter, Materialisable {
422
574
  /**
423
575
  * Materialise deltas into destination tables.
424
576
  *
425
- * For each table with a matching schema, merges delta history into the
426
- * latest row state and upserts into the destination table. Tombstoned
427
- * rows are deleted. The `props` column is never touched.
577
+ * Delegates to the shared `executeMaterialise` algorithm with the
578
+ * Postgres SQL dialect.
428
579
  */
429
580
  materialise(deltas: RowDelta[], schemas: ReadonlyArray<TableSchema>): Promise<Result<void, AdapterError>>;
430
581
  /** Close the database connection pool and release resources. */
@@ -438,13 +589,27 @@ type QueryFn = (sql: string, params?: unknown[]) => Promise<Record<string, unkno
438
589
  *
439
590
  * Uses dynamic imports so the database drivers (pg, mysql2) are only
440
591
  * loaded when actually needed. Returns `null` for connector types that
441
- * do not support the standard SQL polling model (e.g. BigQuery).
592
+ * do not support the standard SQL polling model (e.g. BigQuery, Jira, Salesforce).
442
593
  *
443
594
  * @param config - Validated connector configuration.
444
595
  * @returns A query function or `null` if the connector type is unsupported.
445
596
  */
446
597
  declare function createQueryFn(config: ConnectorConfig): Promise<QueryFn | null>;
447
598
 
599
+ /**
600
+ * Group raw delta rows by row_id, merge to latest state, and partition into upserts and deletes.
601
+ */
602
+ declare function groupAndMerge(rows: Array<{
603
+ row_id: string;
604
+ columns: string | ColumnDelta[];
605
+ op: string;
606
+ }>): {
607
+ upserts: Array<{
608
+ rowId: string;
609
+ state: Record<string, unknown>;
610
+ }>;
611
+ deleteIds: string[];
612
+ };
448
613
  /** Normalise a caught value into an Error or undefined. */
449
614
  declare function toCause(error: unknown): Error | undefined;
450
615
  /** Execute an async operation and wrap errors into an AdapterError Result. */
@@ -459,4 +624,4 @@ declare function mergeLatestState(rows: Array<{
459
624
  op: string;
460
625
  }>): Record<string, unknown> | null;
461
626
 
462
- export { AdapterConfig, BigQueryAdapter, type BigQueryAdapterConfig, CompositeAdapter, type CompositeAdapterConfig, type CompositeRoute, DatabaseAdapter, DatabaseAdapterConfig, FanOutAdapter, type FanOutAdapterConfig, LakeAdapter, LifecycleAdapter, type LifecycleAdapterConfig, type Materialisable, type MigrateOptions, type MigrateProgress, type MigrateResult, MinIOAdapter, MySQLAdapter, ObjectInfo, PostgresAdapter, type QueryFn, buildSchemaIndex, createDatabaseAdapter, createQueryFn, groupDeltasByTable, isMaterialisable, isSoftDelete, mergeLatestState, migrateAdapter, migrateToTier, resolveConflictColumns, resolvePrimaryKey, toCause, wrapAsync };
627
+ export { AdapterConfig, BigQueryAdapter, type BigQueryAdapterConfig, BigQuerySqlDialect, CompositeAdapter, type CompositeAdapterConfig, type CompositeRoute, DatabaseAdapter, DatabaseAdapterConfig, FanOutAdapter, type FanOutAdapterConfig, LakeAdapter, LifecycleAdapter, type LifecycleAdapterConfig, type Materialisable, type MigrateOptions, type MigrateProgress, type MigrateResult, MinIOAdapter, MySQLAdapter, MySqlDialect, ObjectInfo, PostgresAdapter, PostgresSqlDialect, type QueryExecutor, type QueryFn, type SqlDialect, buildSchemaIndex, createDatabaseAdapter, createQueryFn, executeMaterialise, groupAndMerge, groupDeltasByTable, isMaterialisable, isSoftDelete, mergeLatestState, migrateAdapter, migrateToTier, resolveConflictColumns, resolvePrimaryKey, toCause, wrapAsync };
package/dist/adapter.js CHANGED
@@ -1,14 +1,19 @@
1
1
  import {
2
2
  BigQueryAdapter,
3
+ BigQuerySqlDialect,
3
4
  CompositeAdapter,
4
5
  FanOutAdapter,
5
6
  LifecycleAdapter,
6
7
  MinIOAdapter,
7
8
  MySQLAdapter,
9
+ MySqlDialect,
8
10
  PostgresAdapter,
11
+ PostgresSqlDialect,
9
12
  buildSchemaIndex,
10
13
  createDatabaseAdapter,
11
14
  createQueryFn,
15
+ executeMaterialise,
16
+ groupAndMerge,
12
17
  groupDeltasByTable,
13
18
  isDatabaseAdapter,
14
19
  isMaterialisable,
@@ -21,20 +26,25 @@ import {
21
26
  resolvePrimaryKey,
22
27
  toCause,
23
28
  wrapAsync
24
- } from "./chunk-GUJWMK5P.js";
25
- import "./chunk-P3FT7QCW.js";
26
- import "./chunk-7D4SUZUM.js";
29
+ } from "./chunk-LPWXOYNS.js";
30
+ import "./chunk-LDFFCG2K.js";
31
+ import "./chunk-DGUM43GV.js";
27
32
  export {
28
33
  BigQueryAdapter,
34
+ BigQuerySqlDialect,
29
35
  CompositeAdapter,
30
36
  FanOutAdapter,
31
37
  LifecycleAdapter,
32
38
  MinIOAdapter,
33
39
  MySQLAdapter,
40
+ MySqlDialect,
34
41
  PostgresAdapter,
42
+ PostgresSqlDialect,
35
43
  buildSchemaIndex,
36
44
  createDatabaseAdapter,
37
45
  createQueryFn,
46
+ executeMaterialise,
47
+ groupAndMerge,
38
48
  groupDeltasByTable,
39
49
  isDatabaseAdapter,
40
50
  isMaterialisable,
package/dist/analyst.js CHANGED
@@ -2,8 +2,8 @@ import {
2
2
  Err,
3
3
  LakeSyncError,
4
4
  Ok
5
- } from "./chunk-P3FT7QCW.js";
6
- import "./chunk-7D4SUZUM.js";
5
+ } from "./chunk-LDFFCG2K.js";
6
+ import "./chunk-DGUM43GV.js";
7
7
 
8
8
  // ../analyst/src/duckdb.ts
9
9
  var DuckDBClient = class {
@@ -1,11 +1,78 @@
1
- import { S as SyncPush, R as RowDelta } from './types-CLlD4XOy.js';
1
+ import { S as SyncPush, R as RowDelta } from './types-BdGBv2ba.js';
2
2
  import { H as HLC } from './hlc-DiD8QNG3.js';
3
3
  import { R as Result, F as FlushError } from './result-CojzlFE2.js';
4
4
 
5
+ /**
6
+ * Target that supports flush and buffer inspection.
7
+ * Implemented by SyncGateway so pollers can trigger flushes to relieve memory pressure.
8
+ */
9
+ interface FlushableTarget {
10
+ flush(): Promise<Result<void, FlushError>>;
11
+ shouldFlush(): boolean;
12
+ readonly bufferStats: {
13
+ logSize: number;
14
+ indexSize: number;
15
+ byteSize: number;
16
+ };
17
+ }
18
+ /**
19
+ * Monitors buffer pressure on a {@link FlushableTarget} and triggers
20
+ * flushes when thresholds are exceeded. Created at construction time
21
+ * only when the target supports flushing — no runtime type checks needed.
22
+ */
23
+ declare class PressureManager {
24
+ private readonly target;
25
+ private readonly memoryBudgetBytes;
26
+ private readonly flushThreshold;
27
+ constructor(config: {
28
+ target: FlushableTarget;
29
+ memoryBudgetBytes?: number;
30
+ flushThreshold?: number;
31
+ });
32
+ /** Check buffer pressure and flush if thresholds are exceeded. */
33
+ checkAndFlush(): Promise<void>;
34
+ /** Force a flush regardless of current pressure. */
35
+ forceFlush(): Promise<void>;
36
+ private shouldFlush;
37
+ }
38
+
5
39
  /** Minimal interface for a push target (avoids depending on @lakesync/gateway). */
6
40
  interface PushTarget {
7
41
  handlePush(push: SyncPush): unknown;
8
42
  }
43
+ /**
44
+ * Manages chunked pushing of deltas to a {@link PushTarget}.
45
+ * Handles backpressure retry (flush + retry once) when a
46
+ * {@link PressureManager} is present.
47
+ *
48
+ * No runtime type checks — the PressureManager is either provided
49
+ * at construction (target supports flush) or null.
50
+ */
51
+ declare class ChunkedPusher {
52
+ private readonly target;
53
+ private readonly clientId;
54
+ private readonly chunkSize;
55
+ private readonly pressure;
56
+ private pendingDeltas;
57
+ constructor(config: {
58
+ target: PushTarget;
59
+ clientId: string;
60
+ chunkSize: number;
61
+ pressure: PressureManager | null;
62
+ });
63
+ /**
64
+ * Accumulate a single delta. When `chunkSize` is reached, the pending
65
+ * deltas are automatically pushed (and flushed if needed).
66
+ */
67
+ accumulate(delta: RowDelta): Promise<void>;
68
+ /** Flush any remaining accumulated deltas. */
69
+ flush(): Promise<void>;
70
+ /** Push deltas directly (single-shot, backward compat). */
71
+ pushImmediate(deltas: RowDelta[]): void;
72
+ private pushPendingChunk;
73
+ private pushChunkWithFlush;
74
+ }
75
+
9
76
  /**
10
77
  * Extended push target that supports flush and buffer inspection.
11
78
  * Implemented by SyncGateway so pollers can trigger flushes to relieve memory pressure.
@@ -32,19 +99,18 @@ interface PollerMemoryConfig {
32
99
  }
33
100
  /**
34
101
  * Base class for source pollers that poll an external API and push deltas
35
- * to a SyncGateway. Handles lifecycle (start/stop/schedule), and push.
102
+ * to a SyncGateway.
103
+ *
104
+ * Composes {@link PollingScheduler} (lifecycle), {@link ChunkedPusher}
105
+ * (chunked push with backpressure), and {@link PressureManager}
106
+ * (memory-budget flush decisions).
36
107
  */
37
108
  declare abstract class BaseSourcePoller {
38
109
  protected readonly gateway: PushTarget;
39
110
  protected readonly hlc: HLC;
40
111
  protected readonly clientId: string;
41
- private readonly intervalMs;
42
- private timer;
43
- private running;
44
- private readonly chunkSize;
45
- private readonly memoryBudgetBytes;
46
- private readonly flushThreshold;
47
- private pendingDeltas;
112
+ private readonly scheduler;
113
+ private readonly pusher;
48
114
  constructor(config: {
49
115
  name: string;
50
116
  intervalMs: number;
@@ -77,15 +143,6 @@ declare abstract class BaseSourcePoller {
77
143
  protected accumulateDelta(delta: RowDelta): Promise<void>;
78
144
  /** Flush any remaining accumulated deltas. Call at the end of `poll()`. */
79
145
  protected flushAccumulator(): Promise<void>;
80
- /**
81
- * Push a chunk of pending deltas. If the gateway is an IngestTarget,
82
- * checks memory pressure and flushes before/after push when needed.
83
- * On backpressure, flushes once and retries.
84
- */
85
- private pushPendingChunk;
86
- private pushChunkWithFlush;
87
- private shouldFlushTarget;
88
- private schedulePoll;
89
146
  }
90
147
 
91
- export { BaseSourcePoller as B, type IngestTarget as I, type PushTarget as P, type PollerMemoryConfig as a, isIngestTarget as i };
148
+ export { BaseSourcePoller as B, ChunkedPusher as C, type FlushableTarget as F, type IngestTarget as I, type PushTarget as P, type PollerMemoryConfig as a, PressureManager as b, isIngestTarget as i };
@@ -1,6 +1,6 @@
1
1
  import { I as IcebergSchema, P as PartitionSpec } from './nessie-client-DrNikVXy.js';
2
2
  export { C as CatalogueConfig, a as CatalogueError, D as DataFile, b as IcebergField, N as NessieCatalogueClient, S as Snapshot, T as TableMetadata } from './nessie-client-DrNikVXy.js';
3
- import { T as TableSchema } from './types-CLlD4XOy.js';
3
+ import { T as TableSchema } from './types-BdGBv2ba.js';
4
4
  import './result-CojzlFE2.js';
5
5
 
6
6
  /**
package/dist/catalogue.js CHANGED
@@ -4,9 +4,9 @@ import {
4
4
  buildPartitionSpec,
5
5
  lakeSyncTableName,
6
6
  tableSchemaToIceberg
7
- } from "./chunk-UAUQGP3B.js";
8
- import "./chunk-P3FT7QCW.js";
9
- import "./chunk-7D4SUZUM.js";
7
+ } from "./chunk-TMLG32QV.js";
8
+ import "./chunk-LDFFCG2K.js";
9
+ import "./chunk-DGUM43GV.js";
10
10
  export {
11
11
  CatalogueError,
12
12
  NessieCatalogueClient,
@@ -0,0 +1,11 @@
1
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
2
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
3
+ }) : x)(function(x) {
4
+ if (typeof require !== "undefined") return require.apply(this, arguments);
5
+ throw Error('Dynamic require of "' + x + '" is not supported');
6
+ });
7
+
8
+ export {
9
+ __require
10
+ };
11
+ //# sourceMappingURL=chunk-DGUM43GV.js.map