lakesync 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -0
- package/dist/adapter.d.ts +369 -0
- package/dist/adapter.js +39 -0
- package/dist/adapter.js.map +1 -0
- package/dist/analyst.d.ts +268 -0
- package/dist/analyst.js +495 -0
- package/dist/analyst.js.map +1 -0
- package/dist/auth-CAVutXzx.d.ts +30 -0
- package/dist/base-poller-Qo_SmCZs.d.ts +82 -0
- package/dist/catalogue.d.ts +65 -0
- package/dist/catalogue.js +17 -0
- package/dist/catalogue.js.map +1 -0
- package/dist/chunk-4ARO6KTJ.js +257 -0
- package/dist/chunk-4ARO6KTJ.js.map +1 -0
- package/dist/chunk-5YOFCJQ7.js +1115 -0
- package/dist/chunk-5YOFCJQ7.js.map +1 -0
- package/dist/chunk-7D4SUZUM.js +38 -0
- package/dist/chunk-7D4SUZUM.js.map +1 -0
- package/dist/chunk-BNJOGBYK.js +335 -0
- package/dist/chunk-BNJOGBYK.js.map +1 -0
- package/dist/chunk-ICNT7I3K.js +1180 -0
- package/dist/chunk-ICNT7I3K.js.map +1 -0
- package/dist/chunk-P5DRFKIT.js +413 -0
- package/dist/chunk-P5DRFKIT.js.map +1 -0
- package/dist/chunk-X3RO5SYJ.js +880 -0
- package/dist/chunk-X3RO5SYJ.js.map +1 -0
- package/dist/client.d.ts +428 -0
- package/dist/client.js +2048 -0
- package/dist/client.js.map +1 -0
- package/dist/compactor.d.ts +342 -0
- package/dist/compactor.js +793 -0
- package/dist/compactor.js.map +1 -0
- package/dist/coordinator-CxckTzYW.d.ts +396 -0
- package/dist/db-types-BR6Kt4uf.d.ts +29 -0
- package/dist/gateway-D5SaaMvT.d.ts +337 -0
- package/dist/gateway-server.d.ts +306 -0
- package/dist/gateway-server.js +4663 -0
- package/dist/gateway-server.js.map +1 -0
- package/dist/gateway.d.ts +196 -0
- package/dist/gateway.js +79 -0
- package/dist/gateway.js.map +1 -0
- package/dist/hlc-DiD8QNG3.d.ts +70 -0
- package/dist/index.d.ts +245 -0
- package/dist/index.js +102 -0
- package/dist/index.js.map +1 -0
- package/dist/json-dYtqiL0F.d.ts +18 -0
- package/dist/nessie-client-DrNikVXy.d.ts +160 -0
- package/dist/parquet.d.ts +78 -0
- package/dist/parquet.js +15 -0
- package/dist/parquet.js.map +1 -0
- package/dist/proto.d.ts +434 -0
- package/dist/proto.js +67 -0
- package/dist/proto.js.map +1 -0
- package/dist/react.d.ts +147 -0
- package/dist/react.js +224 -0
- package/dist/react.js.map +1 -0
- package/dist/resolver-C3Wphi6O.d.ts +10 -0
- package/dist/result-CojzlFE2.d.ts +64 -0
- package/dist/src-QU2YLPZY.js +383 -0
- package/dist/src-QU2YLPZY.js.map +1 -0
- package/dist/src-WYBF5LOI.js +102 -0
- package/dist/src-WYBF5LOI.js.map +1 -0
- package/dist/src-WZNPHANQ.js +426 -0
- package/dist/src-WZNPHANQ.js.map +1 -0
- package/dist/types-Bs-QyOe-.d.ts +143 -0
- package/dist/types-DAQL_vU_.d.ts +118 -0
- package/dist/types-DSC_EiwR.d.ts +45 -0
- package/dist/types-V_jVu2sA.d.ts +73 -0
- package/package.json +119 -0
package/README.md
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# lakesync
|
|
2
|
+
|
|
3
|
+
Unified npm package for LakeSync -- a single install that provides access to all LakeSync sub-packages via subpath exports.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install lakesync
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
The default export re-exports `@lakesync/core`. Each sub-package is available via a subpath:
|
|
14
|
+
|
|
15
|
+
```ts
|
|
16
|
+
// Core types and utilities (default export)
|
|
17
|
+
import { HLC, extractDelta, Ok, Err } from "lakesync";
|
|
18
|
+
|
|
19
|
+
// Client SDK
|
|
20
|
+
import { LocalDB, SyncCoordinator, HttpTransport } from "lakesync/client";
|
|
21
|
+
|
|
22
|
+
// Gateway
|
|
23
|
+
import { SyncGateway, DeltaBuffer } from "lakesync/gateway";
|
|
24
|
+
|
|
25
|
+
// Storage adapter
|
|
26
|
+
import { MinIOAdapter } from "lakesync/adapter";
|
|
27
|
+
|
|
28
|
+
// Protobuf codec
|
|
29
|
+
import { encodeSyncPush, decodeSyncPush } from "lakesync/proto";
|
|
30
|
+
|
|
31
|
+
// Parquet read/write
|
|
32
|
+
import { writeDeltasToParquet, readParquetToDeltas } from "lakesync/parquet";
|
|
33
|
+
|
|
34
|
+
// Iceberg catalogue
|
|
35
|
+
import { NessieCatalogueClient } from "lakesync/catalogue";
|
|
36
|
+
|
|
37
|
+
// Compaction
|
|
38
|
+
import { Compactor, MaintenanceRunner } from "lakesync/compactor";
|
|
39
|
+
|
|
40
|
+
// Analytics
|
|
41
|
+
import { DuckDBClient, UnionReader, TimeTraveller } from "lakesync/analyst";
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Subpath exports
|
|
45
|
+
|
|
46
|
+
| Subpath | Maps to | Description |
|
|
47
|
+
|---------|---------|-------------|
|
|
48
|
+
| `lakesync` | `@lakesync/core` | HLC, delta types, conflict resolution, Result type |
|
|
49
|
+
| `lakesync/client` | `@lakesync/client` | SyncCoordinator, LocalDB, transports, queues |
|
|
50
|
+
| `lakesync/gateway` | `@lakesync/gateway` | Sync gateway with delta buffer and flush |
|
|
51
|
+
| `lakesync/adapter` | `@lakesync/adapter` | Storage adapter interface + S3/MinIO implementation |
|
|
52
|
+
| `lakesync/proto` | `@lakesync/proto` | Protobuf codec for the wire protocol |
|
|
53
|
+
| `lakesync/parquet` | `@lakesync/parquet` | Parquet read/write via parquet-wasm |
|
|
54
|
+
| `lakesync/catalogue` | `@lakesync/catalogue` | Iceberg REST catalogue client |
|
|
55
|
+
| `lakesync/compactor` | `@lakesync/compactor` | Parquet compaction and maintenance |
|
|
56
|
+
| `lakesync/analyst` | `@lakesync/analyst` | DuckDB-WASM analytics and time-travel queries |
|
|
57
|
+
|
|
58
|
+
## Peer dependencies
|
|
59
|
+
|
|
60
|
+
Heavy runtime dependencies are optional peer dependencies -- install only what you need:
|
|
61
|
+
|
|
62
|
+
| Peer dependency | Required for |
|
|
63
|
+
|-----------------|-------------|
|
|
64
|
+
| `sql.js` | `lakesync/client` (LocalDB) |
|
|
65
|
+
| `idb` | `lakesync/client` (IDBQueue, IDB persistence) |
|
|
66
|
+
| `@aws-sdk/client-s3` | `lakesync/adapter` (MinIOAdapter) |
|
|
67
|
+
| `@bufbuild/protobuf` | `lakesync/proto` |
|
|
68
|
+
| `parquet-wasm`, `apache-arrow` | `lakesync/parquet` |
|
|
69
|
+
| `@duckdb/duckdb-wasm` | `lakesync/analyst` |
|
|
70
|
+
|
|
71
|
+
## When to use this vs `@lakesync/*`
|
|
72
|
+
|
|
73
|
+
- **Use `lakesync`** for quick prototyping or when you want a single dependency
|
|
74
|
+
- **Use individual `@lakesync/*` packages** for production to minimise bundle size and control peer dependencies precisely
|
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
import { BigQuery } from '@google-cloud/bigquery';
|
|
2
|
+
import { R as Result, A as AdapterError, H as HLCTimestamp } from './result-CojzlFE2.js';
|
|
3
|
+
import { R as RowDelta, T as TableSchema, C as ColumnDelta } from './types-V_jVu2sA.js';
|
|
4
|
+
import { D as DatabaseAdapter, a as DatabaseAdapterConfig } from './db-types-BR6Kt4uf.js';
|
|
5
|
+
export { i as isDatabaseAdapter, l as lakeSyncTypeToBigQuery } from './db-types-BR6Kt4uf.js';
|
|
6
|
+
import { C as ConnectorConfig } from './types-DAQL_vU_.js';
|
|
7
|
+
import { L as LakeAdapter, A as AdapterConfig, O as ObjectInfo } from './types-DSC_EiwR.js';
|
|
8
|
+
import mysql from 'mysql2/promise';
|
|
9
|
+
import { Pool } from 'pg';
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Configuration for the BigQuery adapter.
|
|
13
|
+
* Unlike SQL adapters, BigQuery is HTTP-based — no connection string needed.
|
|
14
|
+
*/
|
|
15
|
+
interface BigQueryAdapterConfig {
|
|
16
|
+
/** GCP project ID. */
|
|
17
|
+
projectId: string;
|
|
18
|
+
/** BigQuery dataset name. */
|
|
19
|
+
dataset: string;
|
|
20
|
+
/** Path to a service account JSON key file. Falls back to ADC if omitted. */
|
|
21
|
+
keyFilename?: string;
|
|
22
|
+
/** Dataset location (default: "US"). */
|
|
23
|
+
location?: string;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* BigQuery database adapter for LakeSync.
|
|
27
|
+
*
|
|
28
|
+
* Stores deltas in a `lakesync_deltas` table using standard SQL DML.
|
|
29
|
+
* Idempotent inserts via MERGE statement. All public methods return
|
|
30
|
+
* `Result` and never throw.
|
|
31
|
+
*
|
|
32
|
+
* **Note:** BigQuery DML is limited to 1,500 statements per table per day
|
|
33
|
+
* on standard (non-partitioned) tables. Query latency is seconds, not
|
|
34
|
+
* milliseconds — this adapter is designed for the analytics tier.
|
|
35
|
+
*/
|
|
36
|
+
declare class BigQueryAdapter implements DatabaseAdapter {
|
|
37
|
+
/** @internal */
|
|
38
|
+
readonly client: BigQuery;
|
|
39
|
+
/** @internal */
|
|
40
|
+
readonly dataset: string;
|
|
41
|
+
/** @internal */
|
|
42
|
+
readonly location: string;
|
|
43
|
+
constructor(config: BigQueryAdapterConfig);
|
|
44
|
+
/**
|
|
45
|
+
* Insert deltas into the database in a single batch.
|
|
46
|
+
* Idempotent via MERGE — existing deltaIds are silently skipped.
|
|
47
|
+
*/
|
|
48
|
+
insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>>;
|
|
49
|
+
/**
|
|
50
|
+
* Query deltas with HLC greater than the given timestamp, optionally filtered by table.
|
|
51
|
+
*/
|
|
52
|
+
queryDeltasSince(hlc: HLCTimestamp, tables?: string[]): Promise<Result<RowDelta[], AdapterError>>;
|
|
53
|
+
/**
|
|
54
|
+
* Get the latest merged state for a specific row using column-level LWW.
|
|
55
|
+
* Returns null if no deltas exist for this row.
|
|
56
|
+
*/
|
|
57
|
+
getLatestState(table: string, rowId: string): Promise<Result<Record<string, unknown> | null, AdapterError>>;
|
|
58
|
+
/**
|
|
59
|
+
* Ensure the BigQuery dataset and lakesync_deltas table exist.
|
|
60
|
+
* The `schema` parameter is accepted for interface compliance but the
|
|
61
|
+
* internal table structure is fixed (deltas store column data as JSON).
|
|
62
|
+
*/
|
|
63
|
+
ensureSchema(_schema: TableSchema): Promise<Result<void, AdapterError>>;
|
|
64
|
+
/**
|
|
65
|
+
* No-op — BigQuery client is HTTP-based with no persistent connections.
|
|
66
|
+
*/
|
|
67
|
+
close(): Promise<void>;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/** A routing rule that maps specific tables to a database adapter. */
|
|
71
|
+
interface CompositeRoute {
|
|
72
|
+
/** Tables handled by this adapter */
|
|
73
|
+
tables: string[];
|
|
74
|
+
/** The adapter for these tables */
|
|
75
|
+
adapter: DatabaseAdapter;
|
|
76
|
+
}
|
|
77
|
+
/** Configuration for CompositeAdapter routing. */
|
|
78
|
+
interface CompositeAdapterConfig {
|
|
79
|
+
/** Table-to-adapter routing rules */
|
|
80
|
+
routes: CompositeRoute[];
|
|
81
|
+
/** Fallback adapter for tables not matching any route */
|
|
82
|
+
defaultAdapter: DatabaseAdapter;
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Routes database operations to different adapters based on table name.
|
|
86
|
+
* Implements DatabaseAdapter so it can be used as a drop-in replacement.
|
|
87
|
+
*/
|
|
88
|
+
declare class CompositeAdapter implements DatabaseAdapter {
|
|
89
|
+
private readonly routeMap;
|
|
90
|
+
private readonly adapters;
|
|
91
|
+
private readonly defaultAdapter;
|
|
92
|
+
constructor(config: CompositeAdapterConfig);
|
|
93
|
+
/** Insert deltas, routing each group to the correct adapter by table. */
|
|
94
|
+
insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>>;
|
|
95
|
+
/** Query deltas since a given HLC, fanning out to relevant adapters and merging results. */
|
|
96
|
+
queryDeltasSince(hlc: HLCTimestamp, tables?: string[]): Promise<Result<RowDelta[], AdapterError>>;
|
|
97
|
+
/** Get the latest state for a row, routing to the correct adapter. */
|
|
98
|
+
getLatestState(table: string, rowId: string): Promise<Result<Record<string, unknown> | null, AdapterError>>;
|
|
99
|
+
/** Ensure schema exists, routing to the correct adapter for the table. */
|
|
100
|
+
ensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>>;
|
|
101
|
+
/** Close all unique adapters (routes + default, deduplicated). */
|
|
102
|
+
close(): Promise<void>;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Instantiate a {@link DatabaseAdapter} from a {@link ConnectorConfig}.
|
|
107
|
+
*
|
|
108
|
+
* Switches on `config.type` and creates the matching adapter using
|
|
109
|
+
* the type-specific connection configuration. Returns an {@link AdapterError}
|
|
110
|
+
* if the type-specific config is missing or the adapter constructor throws.
|
|
111
|
+
*
|
|
112
|
+
* @param config - Validated connector configuration.
|
|
113
|
+
* @returns The instantiated adapter or an error.
|
|
114
|
+
*/
|
|
115
|
+
declare function createDatabaseAdapter(config: ConnectorConfig): Result<DatabaseAdapter, AdapterError>;
|
|
116
|
+
|
|
117
|
+
/** Configuration for the FanOutAdapter. */
|
|
118
|
+
interface FanOutAdapterConfig {
|
|
119
|
+
/** The primary adapter that handles all reads and authoritative writes. */
|
|
120
|
+
primary: DatabaseAdapter;
|
|
121
|
+
/** Secondary adapters that receive replicated writes on a best-effort basis. */
|
|
122
|
+
secondaries: DatabaseAdapter[];
|
|
123
|
+
}
|
|
124
|
+
/**
|
|
125
|
+
* Writes to a primary adapter synchronously and replicates to secondary
|
|
126
|
+
* adapters asynchronously. Reads always go to the primary.
|
|
127
|
+
*
|
|
128
|
+
* Secondary failures are silently caught and never affect the return value.
|
|
129
|
+
* Use case: write to Postgres (fast, operational), replicate to BigQuery (analytics).
|
|
130
|
+
*/
|
|
131
|
+
declare class FanOutAdapter implements DatabaseAdapter {
|
|
132
|
+
private readonly primary;
|
|
133
|
+
private readonly secondaries;
|
|
134
|
+
constructor(config: FanOutAdapterConfig);
|
|
135
|
+
/** Insert deltas into the primary, then replicate to secondaries (fire-and-forget). */
|
|
136
|
+
insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>>;
|
|
137
|
+
/** Query deltas from the primary adapter only. */
|
|
138
|
+
queryDeltasSince(hlc: HLCTimestamp, tables?: string[]): Promise<Result<RowDelta[], AdapterError>>;
|
|
139
|
+
/** Get the latest state from the primary adapter only. */
|
|
140
|
+
getLatestState(table: string, rowId: string): Promise<Result<Record<string, unknown> | null, AdapterError>>;
|
|
141
|
+
/** Ensure schema on the primary first, then best-effort on secondaries. */
|
|
142
|
+
ensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>>;
|
|
143
|
+
/** Close primary and all secondary adapters. */
|
|
144
|
+
close(): Promise<void>;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
/** Configuration for age-based tiered storage. */
|
|
148
|
+
interface LifecycleAdapterConfig {
|
|
149
|
+
/** Hot tier — recent data, fast queries. */
|
|
150
|
+
hot: {
|
|
151
|
+
/** The adapter storing recent deltas. */
|
|
152
|
+
adapter: DatabaseAdapter;
|
|
153
|
+
/** Maximum age in milliseconds before data is considered cold. */
|
|
154
|
+
maxAgeMs: number;
|
|
155
|
+
};
|
|
156
|
+
/** Cold tier — older data, cheap storage. */
|
|
157
|
+
cold: {
|
|
158
|
+
/** The adapter storing archived deltas. */
|
|
159
|
+
adapter: DatabaseAdapter;
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
/**
|
|
163
|
+
* Routes database operations across hot and cold tiers based on delta age.
|
|
164
|
+
*
|
|
165
|
+
* Writes always go to the hot adapter. Reads fan out to both tiers when
|
|
166
|
+
* the requested HLC is older than the configured `maxAgeMs` threshold.
|
|
167
|
+
*
|
|
168
|
+
* Use {@link migrateToTier} as a background job to copy aged-out deltas
|
|
169
|
+
* from hot to cold.
|
|
170
|
+
*/
|
|
171
|
+
declare class LifecycleAdapter implements DatabaseAdapter {
|
|
172
|
+
private readonly hot;
|
|
173
|
+
private readonly cold;
|
|
174
|
+
private readonly maxAgeMs;
|
|
175
|
+
constructor(config: LifecycleAdapterConfig);
|
|
176
|
+
/** Insert deltas into the hot adapter — new data is always hot. */
|
|
177
|
+
insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>>;
|
|
178
|
+
/**
|
|
179
|
+
* Query deltas since the given HLC.
|
|
180
|
+
*
|
|
181
|
+
* If `sinceHlc` is older than `now - maxAgeMs`, queries both hot and cold
|
|
182
|
+
* adapters and merges the results sorted by HLC. Otherwise queries hot only.
|
|
183
|
+
*/
|
|
184
|
+
queryDeltasSince(hlc: HLCTimestamp, tables?: string[]): Promise<Result<RowDelta[], AdapterError>>;
|
|
185
|
+
/** Get latest state — try hot first, fall back to cold if hot returns null. */
|
|
186
|
+
getLatestState(table: string, rowId: string): Promise<Result<Record<string, unknown> | null, AdapterError>>;
|
|
187
|
+
/** Ensure schema exists on both hot and cold adapters. */
|
|
188
|
+
ensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>>;
|
|
189
|
+
/** Close both hot and cold adapters. */
|
|
190
|
+
close(): Promise<void>;
|
|
191
|
+
}
|
|
192
|
+
/**
|
|
193
|
+
* Migrate aged-out deltas from the hot adapter to the cold adapter.
|
|
194
|
+
*
|
|
195
|
+
* Queries the hot adapter for all deltas since HLC 0, filters those with
|
|
196
|
+
* wall time older than `Date.now() - maxAgeMs`, and inserts them into the
|
|
197
|
+
* cold adapter. Insertion is idempotent via deltaId uniqueness.
|
|
198
|
+
*
|
|
199
|
+
* Does NOT delete from hot — that is a separate cleanup concern.
|
|
200
|
+
*
|
|
201
|
+
* @param hot - The hot-tier adapter to read old deltas from.
|
|
202
|
+
* @param cold - The cold-tier adapter to write old deltas to.
|
|
203
|
+
* @param maxAgeMs - Age threshold in milliseconds.
|
|
204
|
+
* @returns The count of migrated deltas, or an AdapterError.
|
|
205
|
+
*/
|
|
206
|
+
declare function migrateToTier(hot: DatabaseAdapter, cold: DatabaseAdapter, maxAgeMs: number): Promise<Result<{
|
|
207
|
+
migrated: number;
|
|
208
|
+
}, AdapterError>>;
|
|
209
|
+
|
|
210
|
+
/** Options for migrating deltas between database adapters. */
|
|
211
|
+
interface MigrateOptions {
|
|
212
|
+
/** Source adapter to read from */
|
|
213
|
+
from: DatabaseAdapter;
|
|
214
|
+
/** Target adapter to write to */
|
|
215
|
+
to: DatabaseAdapter;
|
|
216
|
+
/** Optional: only migrate specific tables */
|
|
217
|
+
tables?: string[];
|
|
218
|
+
/** Batch size for writing (default: 1000) */
|
|
219
|
+
batchSize?: number;
|
|
220
|
+
/** Progress callback invoked after each batch write */
|
|
221
|
+
onProgress?: (info: MigrateProgress) => void;
|
|
222
|
+
}
|
|
223
|
+
/** Progress information reported during migration. */
|
|
224
|
+
interface MigrateProgress {
|
|
225
|
+
/** Current batch number (1-based) */
|
|
226
|
+
batch: number;
|
|
227
|
+
/** Total deltas migrated so far */
|
|
228
|
+
totalSoFar: number;
|
|
229
|
+
}
|
|
230
|
+
/** Result of a successful migration. */
|
|
231
|
+
interface MigrateResult {
|
|
232
|
+
/** Total number of deltas migrated */
|
|
233
|
+
totalDeltas: number;
|
|
234
|
+
/** Number of batches processed */
|
|
235
|
+
batches: number;
|
|
236
|
+
}
|
|
237
|
+
/**
|
|
238
|
+
* Migrate deltas from one database adapter to another.
|
|
239
|
+
* Reads all matching deltas from the source, then writes them in batches to the target.
|
|
240
|
+
* Idempotent via deltaId uniqueness in the target adapter.
|
|
241
|
+
*/
|
|
242
|
+
declare function migrateAdapter(opts: MigrateOptions): Promise<Result<MigrateResult, AdapterError>>;
|
|
243
|
+
|
|
244
|
+
/**
|
|
245
|
+
* MinIO/S3-compatible lake adapter.
|
|
246
|
+
*
|
|
247
|
+
* Wraps the AWS S3 SDK to provide a Result-based interface for
|
|
248
|
+
* interacting with MinIO or any S3-compatible object store.
|
|
249
|
+
* All public methods return `Result` and never throw.
|
|
250
|
+
*/
|
|
251
|
+
declare class MinIOAdapter implements LakeAdapter {
|
|
252
|
+
private readonly client;
|
|
253
|
+
private readonly bucket;
|
|
254
|
+
constructor(config: AdapterConfig);
|
|
255
|
+
/** Store an object in the lake */
|
|
256
|
+
putObject(path: string, data: Uint8Array, contentType?: string): Promise<Result<void, AdapterError>>;
|
|
257
|
+
/** Retrieve an object from the lake */
|
|
258
|
+
getObject(path: string): Promise<Result<Uint8Array, AdapterError>>;
|
|
259
|
+
/** Get object metadata without retrieving the body */
|
|
260
|
+
headObject(path: string): Promise<Result<{
|
|
261
|
+
size: number;
|
|
262
|
+
lastModified: Date;
|
|
263
|
+
}, AdapterError>>;
|
|
264
|
+
/** List objects matching a given prefix */
|
|
265
|
+
listObjects(prefix: string): Promise<Result<ObjectInfo[], AdapterError>>;
|
|
266
|
+
/** Delete a single object from the lake */
|
|
267
|
+
deleteObject(path: string): Promise<Result<void, AdapterError>>;
|
|
268
|
+
/** Delete multiple objects from the lake in a single batch operation */
|
|
269
|
+
deleteObjects(paths: string[]): Promise<Result<void, AdapterError>>;
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
/**
|
|
273
|
+
* MySQL database adapter for LakeSync.
|
|
274
|
+
*
|
|
275
|
+
* Stores deltas in a `lakesync_deltas` table using INSERT IGNORE for
|
|
276
|
+
* idempotent writes. All public methods return `Result` and never throw.
|
|
277
|
+
* Uses mysql2/promise connection pool for async operations.
|
|
278
|
+
*/
|
|
279
|
+
declare class MySQLAdapter implements DatabaseAdapter {
|
|
280
|
+
/** @internal */
|
|
281
|
+
readonly pool: mysql.Pool;
|
|
282
|
+
constructor(config: DatabaseAdapterConfig);
|
|
283
|
+
/**
|
|
284
|
+
* Insert deltas into the database in a single batch.
|
|
285
|
+
* Uses INSERT IGNORE for idempotent writes — duplicate deltaIds are silently skipped.
|
|
286
|
+
*/
|
|
287
|
+
insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>>;
|
|
288
|
+
/**
|
|
289
|
+
* Query deltas with HLC greater than the given timestamp.
|
|
290
|
+
* Optionally filtered by table name(s).
|
|
291
|
+
*/
|
|
292
|
+
queryDeltasSince(hlc: HLCTimestamp, tables?: string[]): Promise<Result<RowDelta[], AdapterError>>;
|
|
293
|
+
/**
|
|
294
|
+
* Get the latest merged state for a specific row using column-level LWW.
|
|
295
|
+
* Returns null if no deltas exist or if the row is tombstoned by DELETE.
|
|
296
|
+
*/
|
|
297
|
+
getLatestState(table: string, rowId: string): Promise<Result<Record<string, unknown> | null, AdapterError>>;
|
|
298
|
+
/**
|
|
299
|
+
* Ensure the database schema exists. Creates the lakesync_deltas table
|
|
300
|
+
* and a user table matching the given TableSchema definition.
|
|
301
|
+
*/
|
|
302
|
+
ensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>>;
|
|
303
|
+
/** Close the database connection pool and release resources. */
|
|
304
|
+
close(): Promise<void>;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/**
|
|
308
|
+
* PostgreSQL database adapter for LakeSync.
|
|
309
|
+
*
|
|
310
|
+
* Stores deltas in a `lakesync_deltas` table using pg Pool.
|
|
311
|
+
* All public methods return `Result` and never throw.
|
|
312
|
+
*/
|
|
313
|
+
declare class PostgresAdapter implements DatabaseAdapter {
|
|
314
|
+
/** @internal */
|
|
315
|
+
readonly pool: Pool;
|
|
316
|
+
constructor(config: DatabaseAdapterConfig);
|
|
317
|
+
/**
|
|
318
|
+
* Insert deltas into the database in a single batch.
|
|
319
|
+
* Idempotent via `ON CONFLICT (delta_id) DO NOTHING`.
|
|
320
|
+
*/
|
|
321
|
+
insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>>;
|
|
322
|
+
/**
|
|
323
|
+
* Query deltas with HLC greater than the given timestamp, optionally filtered by table.
|
|
324
|
+
*/
|
|
325
|
+
queryDeltasSince(hlc: HLCTimestamp, tables?: string[]): Promise<Result<RowDelta[], AdapterError>>;
|
|
326
|
+
/**
|
|
327
|
+
* Get the latest merged state for a specific row using column-level LWW.
|
|
328
|
+
* Returns null if no deltas exist for this row.
|
|
329
|
+
*/
|
|
330
|
+
getLatestState(table: string, rowId: string): Promise<Result<Record<string, unknown> | null, AdapterError>>;
|
|
331
|
+
/**
|
|
332
|
+
* Ensure the lakesync_deltas table and indices exist.
|
|
333
|
+
* The `schema` parameter is accepted for interface compliance but the
|
|
334
|
+
* internal table structure is fixed (deltas store column data as JSONB).
|
|
335
|
+
*/
|
|
336
|
+
ensureSchema(_schema: TableSchema): Promise<Result<void, AdapterError>>;
|
|
337
|
+
/** Close the database connection pool and release resources. */
|
|
338
|
+
close(): Promise<void>;
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
/** Generic query function — abstracts any SQL database connection. */
|
|
342
|
+
type QueryFn = (sql: string, params?: unknown[]) => Promise<Record<string, unknown>[]>;
|
|
343
|
+
/**
|
|
344
|
+
* Create a raw SQL query function from a {@link ConnectorConfig}.
|
|
345
|
+
*
|
|
346
|
+
* Uses dynamic imports so the database drivers (pg, mysql2) are only
|
|
347
|
+
* loaded when actually needed. Returns `null` for connector types that
|
|
348
|
+
* do not support the standard SQL polling model (e.g. BigQuery).
|
|
349
|
+
*
|
|
350
|
+
* @param config - Validated connector configuration.
|
|
351
|
+
* @returns A query function or `null` if the connector type is unsupported.
|
|
352
|
+
*/
|
|
353
|
+
declare function createQueryFn(config: ConnectorConfig): Promise<QueryFn | null>;
|
|
354
|
+
|
|
355
|
+
/** Normalise a caught value into an Error or undefined. */
|
|
356
|
+
declare function toCause(error: unknown): Error | undefined;
|
|
357
|
+
/** Execute an async operation and wrap errors into an AdapterError Result. */
|
|
358
|
+
declare function wrapAsync<T>(operation: () => Promise<T>, errorMessage: string): Promise<Result<T, AdapterError>>;
|
|
359
|
+
/**
|
|
360
|
+
* Merge delta rows into final state using column-level LWW.
|
|
361
|
+
* Shared by Postgres, MySQL, and BigQuery getLatestState implementations.
|
|
362
|
+
* Rows must be sorted by HLC ascending.
|
|
363
|
+
*/
|
|
364
|
+
declare function mergeLatestState(rows: Array<{
|
|
365
|
+
columns: string | ColumnDelta[];
|
|
366
|
+
op: string;
|
|
367
|
+
}>): Record<string, unknown> | null;
|
|
368
|
+
|
|
369
|
+
export { AdapterConfig, BigQueryAdapter, type BigQueryAdapterConfig, CompositeAdapter, type CompositeAdapterConfig, type CompositeRoute, DatabaseAdapter, DatabaseAdapterConfig, FanOutAdapter, type FanOutAdapterConfig, LakeAdapter, LifecycleAdapter, type LifecycleAdapterConfig, type MigrateOptions, type MigrateProgress, type MigrateResult, MinIOAdapter, MySQLAdapter, ObjectInfo, PostgresAdapter, type QueryFn, createDatabaseAdapter, createQueryFn, mergeLatestState, migrateAdapter, migrateToTier, toCause, wrapAsync };
|
package/dist/adapter.js
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import {
|
|
2
|
+
BigQueryAdapter,
|
|
3
|
+
CompositeAdapter,
|
|
4
|
+
FanOutAdapter,
|
|
5
|
+
LifecycleAdapter,
|
|
6
|
+
MinIOAdapter,
|
|
7
|
+
MySQLAdapter,
|
|
8
|
+
PostgresAdapter,
|
|
9
|
+
createDatabaseAdapter,
|
|
10
|
+
createQueryFn,
|
|
11
|
+
isDatabaseAdapter,
|
|
12
|
+
lakeSyncTypeToBigQuery,
|
|
13
|
+
mergeLatestState,
|
|
14
|
+
migrateAdapter,
|
|
15
|
+
migrateToTier,
|
|
16
|
+
toCause,
|
|
17
|
+
wrapAsync
|
|
18
|
+
} from "./chunk-X3RO5SYJ.js";
|
|
19
|
+
import "./chunk-ICNT7I3K.js";
|
|
20
|
+
import "./chunk-7D4SUZUM.js";
|
|
21
|
+
export {
|
|
22
|
+
BigQueryAdapter,
|
|
23
|
+
CompositeAdapter,
|
|
24
|
+
FanOutAdapter,
|
|
25
|
+
LifecycleAdapter,
|
|
26
|
+
MinIOAdapter,
|
|
27
|
+
MySQLAdapter,
|
|
28
|
+
PostgresAdapter,
|
|
29
|
+
createDatabaseAdapter,
|
|
30
|
+
createQueryFn,
|
|
31
|
+
isDatabaseAdapter,
|
|
32
|
+
lakeSyncTypeToBigQuery,
|
|
33
|
+
mergeLatestState,
|
|
34
|
+
migrateAdapter,
|
|
35
|
+
migrateToTier,
|
|
36
|
+
toCause,
|
|
37
|
+
wrapAsync
|
|
38
|
+
};
|
|
39
|
+
//# sourceMappingURL=adapter.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
|