lakesync 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -0
- package/dist/adapter.d.ts +369 -0
- package/dist/adapter.js +39 -0
- package/dist/adapter.js.map +1 -0
- package/dist/analyst.d.ts +268 -0
- package/dist/analyst.js +495 -0
- package/dist/analyst.js.map +1 -0
- package/dist/auth-CAVutXzx.d.ts +30 -0
- package/dist/base-poller-Qo_SmCZs.d.ts +82 -0
- package/dist/catalogue.d.ts +65 -0
- package/dist/catalogue.js +17 -0
- package/dist/catalogue.js.map +1 -0
- package/dist/chunk-4ARO6KTJ.js +257 -0
- package/dist/chunk-4ARO6KTJ.js.map +1 -0
- package/dist/chunk-5YOFCJQ7.js +1115 -0
- package/dist/chunk-5YOFCJQ7.js.map +1 -0
- package/dist/chunk-7D4SUZUM.js +38 -0
- package/dist/chunk-7D4SUZUM.js.map +1 -0
- package/dist/chunk-BNJOGBYK.js +335 -0
- package/dist/chunk-BNJOGBYK.js.map +1 -0
- package/dist/chunk-ICNT7I3K.js +1180 -0
- package/dist/chunk-ICNT7I3K.js.map +1 -0
- package/dist/chunk-P5DRFKIT.js +413 -0
- package/dist/chunk-P5DRFKIT.js.map +1 -0
- package/dist/chunk-X3RO5SYJ.js +880 -0
- package/dist/chunk-X3RO5SYJ.js.map +1 -0
- package/dist/client.d.ts +428 -0
- package/dist/client.js +2048 -0
- package/dist/client.js.map +1 -0
- package/dist/compactor.d.ts +342 -0
- package/dist/compactor.js +793 -0
- package/dist/compactor.js.map +1 -0
- package/dist/coordinator-CxckTzYW.d.ts +396 -0
- package/dist/db-types-BR6Kt4uf.d.ts +29 -0
- package/dist/gateway-D5SaaMvT.d.ts +337 -0
- package/dist/gateway-server.d.ts +306 -0
- package/dist/gateway-server.js +4663 -0
- package/dist/gateway-server.js.map +1 -0
- package/dist/gateway.d.ts +196 -0
- package/dist/gateway.js +79 -0
- package/dist/gateway.js.map +1 -0
- package/dist/hlc-DiD8QNG3.d.ts +70 -0
- package/dist/index.d.ts +245 -0
- package/dist/index.js +102 -0
- package/dist/index.js.map +1 -0
- package/dist/json-dYtqiL0F.d.ts +18 -0
- package/dist/nessie-client-DrNikVXy.d.ts +160 -0
- package/dist/parquet.d.ts +78 -0
- package/dist/parquet.js +15 -0
- package/dist/parquet.js.map +1 -0
- package/dist/proto.d.ts +434 -0
- package/dist/proto.js +67 -0
- package/dist/proto.js.map +1 -0
- package/dist/react.d.ts +147 -0
- package/dist/react.js +224 -0
- package/dist/react.js.map +1 -0
- package/dist/resolver-C3Wphi6O.d.ts +10 -0
- package/dist/result-CojzlFE2.d.ts +64 -0
- package/dist/src-QU2YLPZY.js +383 -0
- package/dist/src-QU2YLPZY.js.map +1 -0
- package/dist/src-WYBF5LOI.js +102 -0
- package/dist/src-WYBF5LOI.js.map +1 -0
- package/dist/src-WZNPHANQ.js +426 -0
- package/dist/src-WZNPHANQ.js.map +1 -0
- package/dist/types-Bs-QyOe-.d.ts +143 -0
- package/dist/types-DAQL_vU_.d.ts +118 -0
- package/dist/types-DSC_EiwR.d.ts +45 -0
- package/dist/types-V_jVu2sA.d.ts +73 -0
- package/package.json +119 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../compactor/src/checkpoint-generator.ts","../../compactor/src/equality-delete.ts","../../compactor/src/compactor.ts","../../compactor/src/maintenance.ts","../../compactor/src/scheduler.ts","../../compactor/src/types.ts"],"sourcesContent":["import type { LakeAdapter } from \"@lakesync/adapter\";\nimport {\n\tErr,\n\ttype HLCTimestamp,\n\tLakeSyncError,\n\tOk,\n\ttype Result,\n\ttype RowDelta,\n\ttype TableSchema,\n} from \"@lakesync/core\";\nimport { readParquetToDeltas } from \"@lakesync/parquet\";\nimport { encodeSyncResponse } from \"@lakesync/proto\";\n\n/** Configuration for checkpoint generation */\nexport interface CheckpointConfig {\n\t/** Max raw proto bytes per chunk. Tune to serving runtime memory budget. */\n\tchunkBytes: number;\n}\n\n/** Default checkpoint configuration (16 MB chunks for 128 MB DO) */\nexport const DEFAULT_CHECKPOINT_CONFIG: CheckpointConfig = {\n\tchunkBytes: 16 * 1024 * 1024,\n};\n\n/** Result of a checkpoint generation operation */\nexport interface CheckpointResult {\n\t/** Number of chunk files written */\n\tchunksWritten: number;\n\t/** Total bytes written across all chunks */\n\tbytesWritten: number;\n\t/** Snapshot HLC timestamp */\n\tsnapshotHlc: HLCTimestamp;\n}\n\n/** Manifest stored alongside checkpoint chunks */\nexport interface CheckpointManifest {\n\t/** Snapshot HLC as decimal string (JSON-safe bigint) */\n\tsnapshotHlc: string;\n\t/** ISO 8601 generation timestamp */\n\tgeneratedAt: string;\n\t/** Number of chunks */\n\tchunkCount: number;\n\t/** Total deltas across all chunks */\n\ttotalDeltas: number;\n\t/** Ordered list of chunk file names */\n\tchunks: string[];\n}\n\n/** Estimated bytes per delta for chunk sizing (200 base + 50 per column) */\nconst ESTIMATED_BASE_BYTES = 200;\nconst ESTIMATED_BYTES_PER_COLUMN = 50;\n\n/**\n * Generates checkpoint files from base Parquet files.\n *\n * Reads compacted base files, encodes ALL rows as proto SyncResponse chunks\n * sized to a configurable byte budget, and writes them to storage. Chunks\n * contain all rows (not per-user); filtering happens at serve time.\n */\nexport class CheckpointGenerator {\n\tprivate readonly adapter: LakeAdapter;\n\tprivate readonly gatewayId: string;\n\tprivate readonly config: CheckpointConfig;\n\n\tconstructor(\n\t\tadapter: LakeAdapter,\n\t\t_schema: TableSchema,\n\t\tgatewayId: string,\n\t\tconfig?: CheckpointConfig,\n\t) {\n\t\tthis.adapter = adapter;\n\t\tthis.gatewayId = gatewayId;\n\t\tthis.config = config ?? DEFAULT_CHECKPOINT_CONFIG;\n\t}\n\n\t/**\n\t * Generate checkpoint chunks from base Parquet files.\n\t *\n\t * Reads each base file sequentially, accumulates deltas, and flushes\n\t * chunks when the estimated byte size exceeds the configured threshold.\n\t *\n\t * @param baseFileKeys - Storage keys of the base Parquet files\n\t * @param snapshotHlc - The HLC timestamp representing this snapshot point\n\t * @returns A Result containing the CheckpointResult, or a LakeSyncError on failure\n\t */\n\tasync generate(\n\t\tbaseFileKeys: string[],\n\t\tsnapshotHlc: HLCTimestamp,\n\t): Promise<Result<CheckpointResult, LakeSyncError>> {\n\t\tif (baseFileKeys.length === 0) {\n\t\t\treturn Ok({ chunksWritten: 0, bytesWritten: 0, snapshotHlc });\n\t\t}\n\n\t\tconst prefix = `checkpoints/${this.gatewayId}`;\n\t\tconst chunkNames: string[] = [];\n\t\tlet totalBytesWritten = 0;\n\t\tlet totalDeltas = 0;\n\n\t\t// Accumulator for current chunk\n\t\tlet accumulator: RowDelta[] = [];\n\t\tlet accumulatedBytes = 0;\n\n\t\tfor (const key of baseFileKeys) {\n\t\t\tconst getResult = await this.adapter.getObject(key);\n\t\t\tif (!getResult.ok) {\n\t\t\t\treturn Err(\n\t\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t\t`Failed to read base file: ${key}`,\n\t\t\t\t\t\t\"CHECKPOINT_READ_ERROR\",\n\t\t\t\t\t\tgetResult.error,\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst parseResult = await readParquetToDeltas(getResult.value);\n\t\t\tif (!parseResult.ok) {\n\t\t\t\treturn Err(\n\t\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t\t`Failed to parse base file: ${key}`,\n\t\t\t\t\t\t\"CHECKPOINT_PARSE_ERROR\",\n\t\t\t\t\t\tparseResult.error,\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tfor (const delta of parseResult.value) {\n\t\t\t\taccumulator.push(delta);\n\t\t\t\taccumulatedBytes +=\n\t\t\t\t\tESTIMATED_BASE_BYTES + delta.columns.length * ESTIMATED_BYTES_PER_COLUMN;\n\n\t\t\t\tif (accumulatedBytes >= this.config.chunkBytes) {\n\t\t\t\t\tconst flushResult = await this.flushChunk(\n\t\t\t\t\t\tprefix,\n\t\t\t\t\t\tchunkNames.length,\n\t\t\t\t\t\taccumulator,\n\t\t\t\t\t\tsnapshotHlc,\n\t\t\t\t\t);\n\t\t\t\t\tif (!flushResult.ok) return flushResult;\n\n\t\t\t\t\ttotalBytesWritten += flushResult.value;\n\t\t\t\t\ttotalDeltas += accumulator.length;\n\t\t\t\t\tchunkNames.push(this.chunkFileName(chunkNames.length));\n\t\t\t\t\taccumulator = [];\n\t\t\t\t\taccumulatedBytes = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Flush remaining accumulator\n\t\tif (accumulator.length > 0) {\n\t\t\tconst flushResult = await this.flushChunk(\n\t\t\t\tprefix,\n\t\t\t\tchunkNames.length,\n\t\t\t\taccumulator,\n\t\t\t\tsnapshotHlc,\n\t\t\t);\n\t\t\tif (!flushResult.ok) return flushResult;\n\n\t\t\ttotalBytesWritten += flushResult.value;\n\t\t\ttotalDeltas += accumulator.length;\n\t\t\tchunkNames.push(this.chunkFileName(chunkNames.length));\n\t\t}\n\n\t\t// Write manifest\n\t\tconst manifest: CheckpointManifest = {\n\t\t\tsnapshotHlc: snapshotHlc.toString(),\n\t\t\tgeneratedAt: new Date().toISOString(),\n\t\t\tchunkCount: chunkNames.length,\n\t\t\ttotalDeltas,\n\t\t\tchunks: chunkNames,\n\t\t};\n\n\t\tconst manifestBytes = new TextEncoder().encode(JSON.stringify(manifest));\n\t\tconst manifestResult = await this.adapter.putObject(\n\t\t\t`${prefix}/manifest.json`,\n\t\t\tmanifestBytes,\n\t\t\t\"application/json\",\n\t\t);\n\n\t\tif (!manifestResult.ok) {\n\t\t\treturn Err(\n\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t\"Failed to write checkpoint manifest\",\n\t\t\t\t\t\"CHECKPOINT_WRITE_ERROR\",\n\t\t\t\t\tmanifestResult.error,\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\n\t\ttotalBytesWritten += manifestBytes.byteLength;\n\n\t\treturn Ok({\n\t\t\tchunksWritten: chunkNames.length,\n\t\t\tbytesWritten: totalBytesWritten,\n\t\t\tsnapshotHlc,\n\t\t});\n\t}\n\n\t/**\n\t * Get all storage keys produced by a checkpoint generation.\n\t * Useful for adding to activeKeys in maintenance to prevent orphan removal.\n\t */\n\tgetCheckpointKeys(chunkCount: number): string[] {\n\t\tconst prefix = `checkpoints/${this.gatewayId}`;\n\t\tconst keys = [`${prefix}/manifest.json`];\n\t\tfor (let i = 0; i < chunkCount; i++) {\n\t\t\tkeys.push(`${prefix}/${this.chunkFileName(i)}`);\n\t\t}\n\t\treturn keys;\n\t}\n\n\tprivate chunkFileName(index: number): string {\n\t\treturn `chunk-${String(index).padStart(3, \"0\")}.bin`;\n\t}\n\n\tprivate async flushChunk(\n\t\tprefix: string,\n\t\tindex: number,\n\t\tdeltas: RowDelta[],\n\t\tsnapshotHlc: HLCTimestamp,\n\t): Promise<Result<number, LakeSyncError>> {\n\t\tconst encodeResult = encodeSyncResponse({\n\t\t\tdeltas,\n\t\t\tserverHlc: snapshotHlc,\n\t\t\thasMore: false,\n\t\t});\n\n\t\tif (!encodeResult.ok) {\n\t\t\treturn Err(\n\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t`Failed to encode checkpoint chunk ${index}`,\n\t\t\t\t\t\"CHECKPOINT_ENCODE_ERROR\",\n\t\t\t\t\tencodeResult.error,\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\n\t\tconst data = encodeResult.value;\n\t\tconst chunkKey = `${prefix}/${this.chunkFileName(index)}`;\n\n\t\tconst putResult = await this.adapter.putObject(chunkKey, data, \"application/octet-stream\");\n\t\tif (!putResult.ok) {\n\t\t\treturn Err(\n\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t`Failed to write checkpoint chunk: ${chunkKey}`,\n\t\t\t\t\t\"CHECKPOINT_WRITE_ERROR\",\n\t\t\t\t\tputResult.error,\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\n\t\treturn Ok(data.byteLength);\n\t}\n}\n","import {\n\tErr,\n\tFlushError,\n\tHLC,\n\ttype HLCTimestamp,\n\tOk,\n\ttype Result,\n\ttype RowDelta,\n\ttype TableSchema,\n} from \"@lakesync/core\";\nimport { readParquetToDeltas, writeDeltasToParquet } from \"@lakesync/parquet\";\n\n/**\n * Minimal schema used for equality delete files.\n *\n * Equality delete files only need the row-identity columns (table + rowId),\n * which are already present as system columns in every RowDelta. Using an\n * empty user-column list keeps the Parquet file as small as possible.\n */\nconst EQUALITY_DELETE_SCHEMA: TableSchema = {\n\ttable: \"_equality_delete\",\n\tcolumns: [],\n};\n\n/**\n * Sentinel HLC value used for synthetic equality-delete deltas.\n * The actual timestamp is irrelevant for equality deletes — only\n * `table` and `rowId` matter for row identification.\n */\nconst SENTINEL_HLC: HLCTimestamp = HLC.encode(0, 0);\n\n/**\n * Write an Iceberg equality delete file.\n *\n * Contains only the equality columns (table + rowId) needed to identify\n * deleted rows. The file is encoded as a Parquet file using synthetic\n * DELETE RowDeltas with no user columns.\n *\n * @param deletedRows - Array of row identifiers (table + rowId) for deleted rows\n * @param _schema - The table schema (reserved for future use with custom equality columns)\n * @returns A Result containing the Parquet bytes, or a FlushError on failure\n */\nexport async function writeEqualityDeletes(\n\tdeletedRows: Array<{ table: string; rowId: string }>,\n\t_schema: TableSchema,\n): Promise<Result<Uint8Array, FlushError>> {\n\tif (deletedRows.length === 0) {\n\t\treturn Ok(new Uint8Array(0));\n\t}\n\n\ttry {\n\t\t// Build synthetic DELETE RowDeltas with only row-identity fields.\n\t\t// All other fields use sentinel values since only table + rowId\n\t\t// are meaningful for equality deletes.\n\t\tconst syntheticDeltas: RowDelta[] = deletedRows.map((row, index) => ({\n\t\t\top: \"DELETE\" as const,\n\t\t\ttable: row.table,\n\t\t\trowId: row.rowId,\n\t\t\tclientId: \"_compactor\",\n\t\t\tcolumns: [],\n\t\t\thlc: SENTINEL_HLC,\n\t\t\tdeltaId: `eq-delete-${index}`,\n\t\t}));\n\n\t\treturn await writeDeltasToParquet(syntheticDeltas, EQUALITY_DELETE_SCHEMA);\n\t} catch (err) {\n\t\tconst cause = err instanceof Error ? err : new Error(String(err));\n\t\treturn Err(new FlushError(`Failed to write equality deletes: ${cause.message}`, cause));\n\t}\n}\n\n/**\n * Read an equality delete file back into row identifiers.\n *\n * Deserialises a Parquet equality delete file and extracts the\n * table + rowId pairs that identify deleted rows.\n *\n * @param data - The Parquet file bytes to read\n * @returns A Result containing the row identifiers, or a FlushError on failure\n */\nexport async function readEqualityDeletes(\n\tdata: Uint8Array,\n): Promise<Result<Array<{ table: string; rowId: string }>, FlushError>> {\n\tif (data.byteLength === 0) {\n\t\treturn Ok([]);\n\t}\n\n\tconst readResult = await readParquetToDeltas(data);\n\tif (!readResult.ok) {\n\t\treturn Err(\n\t\t\tnew FlushError(\n\t\t\t\t`Failed to read equality deletes: ${readResult.error.message}`,\n\t\t\t\treadResult.error,\n\t\t\t),\n\t\t);\n\t}\n\n\tconst rows = readResult.value.map((delta) => ({\n\t\ttable: delta.table,\n\t\trowId: delta.rowId,\n\t}));\n\n\treturn Ok(rows);\n}\n","import type { LakeAdapter } from \"@lakesync/adapter\";\nimport {\n\ttype ColumnDelta,\n\tErr,\n\tHLC,\n\ttype HLCTimestamp,\n\tLakeSyncError,\n\tOk,\n\ttype Result,\n\ttype RowDelta,\n\trowKey,\n\ttype TableSchema,\n} from \"@lakesync/core\";\nimport { readParquetToDeltas, writeDeltasToParquet } from \"@lakesync/parquet\";\nimport { writeEqualityDeletes } from \"./equality-delete\";\nimport type { CompactionConfig, CompactionResult } from \"./types\";\n\n/** Per-column LWW state for incremental resolution. */\ninterface ColumnState {\n\tvalue: unknown;\n\thlc: HLCTimestamp;\n}\n\n/** Per-row state tracking column-level LWW resolution. */\ninterface RowState {\n\ttable: string;\n\trowId: string;\n\tclientId: string;\n\tcolumns: Map<string, ColumnState>;\n\tlatestHlc: HLCTimestamp;\n\tlatestDeltaId: string;\n\t/** HLC of the most recent DELETE operation, or 0n if never deleted. */\n\tdeleteHlc: HLCTimestamp;\n}\n\n/**\n * Compacts delta files into consolidated base data files and equality delete files.\n *\n * Reads delta Parquet files from the lake adapter, resolves all deltas per row\n * using LWW (last-writer-wins based on HLC ordering), then writes the final\n * materialised state back as base files and delete files.\n */\nexport class Compactor {\n\tprivate readonly adapter: LakeAdapter;\n\tprivate readonly config: CompactionConfig;\n\tprivate readonly schema: TableSchema;\n\n\t/**\n\t * Create a new Compactor instance.\n\t *\n\t * @param adapter - The lake adapter for reading/writing Parquet files\n\t * @param config - Compaction configuration (thresholds and limits)\n\t * @param schema - The table schema describing user-defined columns\n\t */\n\tconstructor(adapter: LakeAdapter, config: CompactionConfig, schema: TableSchema) {\n\t\tthis.adapter = adapter;\n\t\tthis.config = config;\n\t\tthis.schema = schema;\n\t}\n\n\t/**\n\t * Compact delta files into base data files.\n\t *\n\t * Reads delta files from storage, resolves all deltas per row using LWW,\n\t * and writes consolidated base files + equality delete files.\n\t *\n\t * @param deltaFileKeys - Storage keys of the delta Parquet files to compact\n\t * @param outputPrefix - Prefix for the output base/delete file keys\n\t * @returns A Result containing the CompactionResult, or a LakeSyncError on failure\n\t */\n\tasync compact(\n\t\tdeltaFileKeys: string[],\n\t\toutputPrefix: string,\n\t): Promise<Result<CompactionResult, LakeSyncError>> {\n\t\tif (deltaFileKeys.length < this.config.minDeltaFiles) {\n\t\t\treturn Ok({\n\t\t\t\tbaseFilesWritten: 0,\n\t\t\t\tdeleteFilesWritten: 0,\n\t\t\t\tdeltaFilesCompacted: 0,\n\t\t\t\tbytesRead: 0,\n\t\t\t\tbytesWritten: 0,\n\t\t\t});\n\t\t}\n\n\t\tconst keysToCompact = deltaFileKeys.slice(0, this.config.maxDeltaFiles);\n\n\t\tconst resolveResult = await this.readAndResolveIncrementally(keysToCompact);\n\t\tif (!resolveResult.ok) return resolveResult;\n\n\t\tconst { liveRows, deletedRows, bytesRead } = resolveResult.value;\n\n\t\tconst writeResult = await this.writeOutputFiles(liveRows, deletedRows, outputPrefix);\n\t\tif (!writeResult.ok) return writeResult;\n\n\t\treturn Ok({\n\t\t\t...writeResult.value,\n\t\t\tdeltaFilesCompacted: keysToCompact.length,\n\t\t\tbytesRead,\n\t\t});\n\t}\n\n\t/**\n\t * Read delta files one at a time and incrementally resolve to final row state.\n\t *\n\t * Memory usage is O(unique rows x columns) rather than O(total deltas),\n\t * since each file's deltas are processed and discarded before reading the next.\n\t */\n\tprivate async readAndResolveIncrementally(keysToCompact: string[]): Promise<\n\t\tResult<\n\t\t\t{\n\t\t\t\tliveRows: RowDelta[];\n\t\t\t\tdeletedRows: Array<{ table: string; rowId: string }>;\n\t\t\t\tbytesRead: number;\n\t\t\t},\n\t\t\tLakeSyncError\n\t\t>\n\t> {\n\t\tconst rowStates = new Map<string, RowState>();\n\t\tlet bytesRead = 0;\n\n\t\tfor (const key of keysToCompact) {\n\t\t\tconst getResult = await this.adapter.getObject(key);\n\t\t\tif (!getResult.ok) {\n\t\t\t\treturn Err(\n\t\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t\t`Failed to read delta file: ${key}`,\n\t\t\t\t\t\t\"COMPACTION_READ_ERROR\",\n\t\t\t\t\t\tgetResult.error,\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst data = getResult.value;\n\t\t\tbytesRead += data.byteLength;\n\n\t\t\tconst parseResult = await readParquetToDeltas(data);\n\t\t\tif (!parseResult.ok) {\n\t\t\t\treturn Err(\n\t\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t\t`Failed to parse delta file: ${key}`,\n\t\t\t\t\t\t\"COMPACTION_PARSE_ERROR\",\n\t\t\t\t\t\tparseResult.error,\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Process each delta incrementally — no accumulation\n\t\t\tfor (const delta of parseResult.value) {\n\t\t\t\tconst k = rowKey(delta.table, delta.rowId);\n\t\t\t\tlet state = rowStates.get(k);\n\n\t\t\t\tif (!state) {\n\t\t\t\t\tstate = {\n\t\t\t\t\t\ttable: delta.table,\n\t\t\t\t\t\trowId: delta.rowId,\n\t\t\t\t\t\tclientId: delta.clientId,\n\t\t\t\t\t\tcolumns: new Map(),\n\t\t\t\t\t\tlatestHlc: 0n as HLCTimestamp,\n\t\t\t\t\t\tlatestDeltaId: delta.deltaId,\n\t\t\t\t\t\tdeleteHlc: 0n as HLCTimestamp,\n\t\t\t\t\t};\n\t\t\t\t\trowStates.set(k, state);\n\t\t\t\t}\n\n\t\t\t\t// Track overall latest HLC for metadata\n\t\t\t\tif (HLC.compare(delta.hlc, state.latestHlc) > 0) {\n\t\t\t\t\tstate.latestHlc = delta.hlc;\n\t\t\t\t\tstate.latestDeltaId = delta.deltaId;\n\t\t\t\t\tstate.clientId = delta.clientId;\n\t\t\t\t}\n\n\t\t\t\tif (delta.op === \"DELETE\") {\n\t\t\t\t\t// Track the latest DELETE HLC\n\t\t\t\t\tif (HLC.compare(delta.hlc, state.deleteHlc) > 0) {\n\t\t\t\t\t\tstate.deleteHlc = delta.hlc;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// INSERT or UPDATE — apply column-level LWW\n\t\t\t\t\tfor (const col of delta.columns) {\n\t\t\t\t\t\tconst existing = state.columns.get(col.column);\n\t\t\t\t\t\tif (!existing || HLC.compare(delta.hlc, existing.hlc) > 0) {\n\t\t\t\t\t\t\tstate.columns.set(col.column, {\n\t\t\t\t\t\t\t\tvalue: col.value,\n\t\t\t\t\t\t\t\thlc: delta.hlc,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Convert resolved states to output format\n\t\tconst liveRows: RowDelta[] = [];\n\t\tconst deletedRows: Array<{ table: string; rowId: string }> = [];\n\n\t\tfor (const [, state] of rowStates) {\n\t\t\t// A row is deleted if the DELETE HLC is >= all column HLCs\n\t\t\t// (i.e. no column was written after the delete)\n\t\t\tlet isDeleted = state.deleteHlc > 0n;\n\t\t\tif (isDeleted) {\n\t\t\t\tfor (const col of state.columns.values()) {\n\t\t\t\t\tif (HLC.compare(state.deleteHlc, col.hlc) < 0) {\n\t\t\t\t\t\tisDeleted = false;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (isDeleted || state.columns.size === 0) {\n\t\t\t\tdeletedRows.push({ table: state.table, rowId: state.rowId });\n\t\t\t} else {\n\t\t\t\t// Filter out columns that were set before the delete\n\t\t\t\tconst columns: ColumnDelta[] = [];\n\t\t\t\tfor (const col of this.schema.columns) {\n\t\t\t\t\tconst colState = state.columns.get(col.name);\n\t\t\t\t\tif (\n\t\t\t\t\t\tcolState &&\n\t\t\t\t\t\t(state.deleteHlc === 0n || HLC.compare(colState.hlc, state.deleteHlc) > 0)\n\t\t\t\t\t) {\n\t\t\t\t\t\tcolumns.push({ column: col.name, value: colState.value });\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tliveRows.push({\n\t\t\t\t\top: \"INSERT\",\n\t\t\t\t\ttable: state.table,\n\t\t\t\t\trowId: state.rowId,\n\t\t\t\t\tclientId: state.clientId,\n\t\t\t\t\tcolumns,\n\t\t\t\t\thlc: state.latestHlc,\n\t\t\t\t\tdeltaId: state.latestDeltaId,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\treturn Ok({ liveRows, deletedRows, bytesRead });\n\t}\n\n\t/** Write base Parquet file(s) for live rows and equality delete file(s) for deleted rows. */\n\tprivate async writeOutputFiles(\n\t\tliveRows: RowDelta[],\n\t\tdeletedRows: Array<{ table: string; rowId: string }>,\n\t\toutputPrefix: string,\n\t): Promise<\n\t\tResult<\n\t\t\t{ baseFilesWritten: number; deleteFilesWritten: number; bytesWritten: number },\n\t\t\tLakeSyncError\n\t\t>\n\t> {\n\t\tlet bytesWritten = 0;\n\t\tlet baseFilesWritten = 0;\n\t\tlet deleteFilesWritten = 0;\n\n\t\tif (liveRows.length > 0) {\n\t\t\tconst writeResult = await writeDeltasToParquet(liveRows, this.schema);\n\t\t\tif (!writeResult.ok) {\n\t\t\t\treturn Err(\n\t\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t\t\"Failed to write base file\",\n\t\t\t\t\t\t\"COMPACTION_WRITE_ERROR\",\n\t\t\t\t\t\twriteResult.error,\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst baseData = writeResult.value;\n\t\t\tconst timestamp = this.generateTimestamp();\n\t\t\tconst basePath = `${outputPrefix}/base-${timestamp}.parquet`;\n\n\t\t\tconst putResult = await this.adapter.putObject(\n\t\t\t\tbasePath,\n\t\t\t\tbaseData,\n\t\t\t\t\"application/octet-stream\",\n\t\t\t);\n\t\t\tif (!putResult.ok) {\n\t\t\t\treturn Err(\n\t\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t\t`Failed to store base file: ${basePath}`,\n\t\t\t\t\t\t\"COMPACTION_STORE_ERROR\",\n\t\t\t\t\t\tputResult.error,\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tbytesWritten += baseData.byteLength;\n\t\t\tbaseFilesWritten = 1;\n\t\t}\n\n\t\tif (deletedRows.length > 0) {\n\t\t\tconst writeResult = await writeEqualityDeletes(deletedRows, this.schema);\n\t\t\tif (!writeResult.ok) {\n\t\t\t\treturn Err(\n\t\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t\t\"Failed to write equality delete file\",\n\t\t\t\t\t\t\"COMPACTION_WRITE_ERROR\",\n\t\t\t\t\t\twriteResult.error,\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst deleteData = writeResult.value;\n\t\t\tconst timestamp = this.generateTimestamp();\n\t\t\tconst deletePath = `${outputPrefix}/delete-${timestamp}.parquet`;\n\n\t\t\tconst putResult = await this.adapter.putObject(\n\t\t\t\tdeletePath,\n\t\t\t\tdeleteData,\n\t\t\t\t\"application/octet-stream\",\n\t\t\t);\n\t\t\tif (!putResult.ok) {\n\t\t\t\treturn Err(\n\t\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t\t`Failed to store delete file: ${deletePath}`,\n\t\t\t\t\t\t\"COMPACTION_STORE_ERROR\",\n\t\t\t\t\t\tputResult.error,\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tbytesWritten += deleteData.byteLength;\n\t\t\tdeleteFilesWritten = 1;\n\t\t}\n\n\t\treturn Ok({ baseFilesWritten, deleteFilesWritten, bytesWritten });\n\t}\n\n\t/**\n\t * Generate a timestamp string for output file naming.\n\t * Uses the current wall clock time with a random suffix for uniqueness.\n\t */\n\tprivate generateTimestamp(): string {\n\t\tconst now = Date.now();\n\t\tconst suffix = Math.random().toString(36).slice(2, 8);\n\t\treturn `${now}-${suffix}`;\n\t}\n}\n","import type { LakeAdapter, ObjectInfo } from \"@lakesync/adapter\";\nimport { Err, HLC, LakeSyncError, Ok, type Result } from \"@lakesync/core\";\nimport type { CheckpointGenerator, CheckpointResult } from \"./checkpoint-generator\";\nimport type { Compactor } from \"./compactor\";\nimport type { CompactionResult } from \"./types\";\n\n/** Configuration for the maintenance cycle */\nexport interface MaintenanceConfig {\n\t/** Number of recent snapshots to retain */\n\tretainSnapshots: number;\n\t/** Minimum age (ms) before orphaned files can be deleted */\n\torphanAgeMs: number;\n}\n\n/** Default maintenance configuration values */\nexport const DEFAULT_MAINTENANCE_CONFIG: MaintenanceConfig = {\n\tretainSnapshots: 5,\n\torphanAgeMs: 60 * 60 * 1000, // 1 hour\n};\n\n/** Report produced by a full maintenance cycle */\nexport interface MaintenanceReport {\n\t/** Result of the compaction step */\n\tcompaction: CompactionResult;\n\t/** Number of expired snapshots removed */\n\tsnapshotsExpired: number;\n\t/** Number of orphaned files removed */\n\torphansRemoved: number;\n\t/** Result of checkpoint generation (if a generator was configured) */\n\tcheckpoint?: CheckpointResult;\n}\n\n/**\n * Runs a full maintenance cycle: compact, expire snapshots, and clean orphans.\n *\n * The runner orchestrates the three maintenance phases in order:\n * 1. **Compact** — merge delta files into consolidated base/delete files\n * 2. **Expire** — (reserved for future snapshot expiry logic)\n * 3. **Clean** — remove orphaned files that are no longer referenced\n */\nexport class MaintenanceRunner {\n\tprivate readonly compactor: Compactor;\n\tprivate readonly adapter: LakeAdapter;\n\tprivate readonly config: MaintenanceConfig;\n\tprivate readonly checkpointGenerator: CheckpointGenerator | null;\n\n\t/**\n\t * Create a new MaintenanceRunner instance.\n\t *\n\t * @param compactor - The compactor instance for merging delta files\n\t * @param adapter - The lake adapter for storage operations\n\t * @param config - Maintenance configuration (retention and age thresholds)\n\t * @param checkpointGenerator - Optional checkpoint generator; when provided,\n\t * checkpoints are generated after successful compaction\n\t */\n\tconstructor(\n\t\tcompactor: Compactor,\n\t\tadapter: LakeAdapter,\n\t\tconfig: MaintenanceConfig,\n\t\tcheckpointGenerator?: CheckpointGenerator,\n\t) {\n\t\tthis.compactor = compactor;\n\t\tthis.adapter = adapter;\n\t\tthis.config = config;\n\t\tthis.checkpointGenerator = checkpointGenerator ?? null;\n\t}\n\n\t/**\n\t * Run the full maintenance cycle: compact, expire, and clean.\n\t *\n\t * Compacts delta files into base/delete files, then removes orphaned\n\t * storage objects that are no longer referenced by any active data.\n\t * Files younger than `orphanAgeMs` are never deleted to avoid races\n\t * with in-progress flush operations.\n\t *\n\t * @param deltaFileKeys - Storage keys of the delta Parquet files to compact\n\t * @param outputPrefix - Prefix for the output base/delete file keys\n\t * @param storagePrefix - Prefix under which all related storage files live\n\t * @returns A Result containing the MaintenanceReport, or a LakeSyncError on failure\n\t */\n\tasync run(\n\t\tdeltaFileKeys: string[],\n\t\toutputPrefix: string,\n\t\tstoragePrefix: string,\n\t): Promise<Result<MaintenanceReport, LakeSyncError>> {\n\t\t// Step 1: Compact delta files into base/delete files\n\t\tconst compactionResult = await this.compactor.compact(deltaFileKeys, outputPrefix);\n\t\tif (!compactionResult.ok) {\n\t\t\treturn Err(\n\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t`Maintenance compaction failed: ${compactionResult.error.message}`,\n\t\t\t\t\t\"MAINTENANCE_COMPACTION_ERROR\",\n\t\t\t\t\tcompactionResult.error,\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\n\t\tconst compaction = compactionResult.value;\n\n\t\t// Build the set of active keys:\n\t\t// - Delta files that were NOT consumed by compaction (i.e. those beyond maxDeltaFiles)\n\t\t// - All newly written output files under the output prefix\n\t\tconst activeKeys = new Set<string>();\n\n\t\t// Delta files that were not compacted remain active\n\t\tconst compactedCount = compaction.deltaFilesCompacted;\n\t\tfor (let i = compactedCount; i < deltaFileKeys.length; i++) {\n\t\t\tactiveKeys.add(deltaFileKeys[i]!);\n\t\t}\n\n\t\t// Discover newly written output files (base + delete files)\n\t\tconst listOutputResult = await this.adapter.listObjects(outputPrefix);\n\t\tif (!listOutputResult.ok) {\n\t\t\treturn Err(\n\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t`Failed to list output files: ${listOutputResult.error.message}`,\n\t\t\t\t\t\"MAINTENANCE_LIST_ERROR\",\n\t\t\t\t\tlistOutputResult.error,\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\n\t\tfor (const obj of listOutputResult.value) {\n\t\t\tactiveKeys.add(obj.key);\n\t\t}\n\n\t\t// Step 2: Generate checkpoints (if configured)\n\t\tlet checkpoint: CheckpointResult | undefined;\n\t\tif (this.checkpointGenerator && compaction.baseFilesWritten > 0) {\n\t\t\t// Collect base file keys from the output\n\t\t\tconst baseFileKeys = listOutputResult.value\n\t\t\t\t.filter((obj) => obj.key.endsWith(\".parquet\") && obj.key.includes(\"/base-\"))\n\t\t\t\t.map((obj) => obj.key);\n\n\t\t\tif (baseFileKeys.length > 0) {\n\t\t\t\t// Use the latest HLC from the compacted data as the snapshot HLC.\n\t\t\t\t// Read the first base file's max HLC as an approximation.\n\t\t\t\tconst snapshotHlc = HLC.encode(Date.now(), 0);\n\n\t\t\t\tconst checkpointResult = await this.checkpointGenerator.generate(baseFileKeys, snapshotHlc);\n\n\t\t\t\tif (checkpointResult.ok) {\n\t\t\t\t\tcheckpoint = checkpointResult.value;\n\t\t\t\t\t// Add checkpoint keys to active set to prevent orphan removal\n\t\t\t\t\tconst checkpointKeys = this.checkpointGenerator.getCheckpointKeys(\n\t\t\t\t\t\tcheckpoint.chunksWritten,\n\t\t\t\t\t);\n\t\t\t\t\tfor (const key of checkpointKeys) {\n\t\t\t\t\t\tactiveKeys.add(key);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Checkpoint failure is non-fatal — compaction still succeeded\n\t\t\t}\n\t\t}\n\n\t\t// Step 3: Remove orphaned files\n\t\tconst orphanResult = await this.removeOrphans(storagePrefix, activeKeys);\n\t\tif (!orphanResult.ok) {\n\t\t\treturn Err(\n\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t`Maintenance orphan removal failed: ${orphanResult.error.message}`,\n\t\t\t\t\t\"MAINTENANCE_ORPHAN_ERROR\",\n\t\t\t\t\torphanResult.error,\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\n\t\treturn Ok({\n\t\t\tcompaction,\n\t\t\tsnapshotsExpired: 0,\n\t\t\torphansRemoved: orphanResult.value,\n\t\t\tcheckpoint,\n\t\t});\n\t}\n\n\t/**\n\t * Delete orphaned files not referenced by any active data.\n\t *\n\t * Lists all files under the given storage prefix, compares each\n\t * against the set of active keys, and deletes files that are both\n\t * unreferenced and older than `orphanAgeMs`. This age guard\n\t * prevents deletion of files created by in-progress flush operations.\n\t *\n\t * @param storagePrefix - The storage prefix to scan for orphaned files\n\t * @param activeKeys - Set of storage keys that must be retained\n\t * @returns A Result containing the count of deleted files, or a LakeSyncError on failure\n\t */\n\tasync removeOrphans(\n\t\tstoragePrefix: string,\n\t\tactiveKeys: Set<string>,\n\t): Promise<Result<number, LakeSyncError>> {\n\t\tconst listResult = await this.adapter.listObjects(storagePrefix);\n\t\tif (!listResult.ok) {\n\t\t\treturn Err(\n\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t`Failed to list objects for orphan removal: ${listResult.error.message}`,\n\t\t\t\t\t\"MAINTENANCE_LIST_ERROR\",\n\t\t\t\t\tlistResult.error,\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\n\t\tconst now = Date.now();\n\t\tconst orphanKeys = this.findOrphans(listResult.value, activeKeys, now);\n\n\t\tif (orphanKeys.length === 0) {\n\t\t\treturn Ok(0);\n\t\t}\n\n\t\tconst deleteResult = await this.adapter.deleteObjects(orphanKeys);\n\t\tif (!deleteResult.ok) {\n\t\t\treturn Err(\n\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t`Failed to delete orphaned files: ${deleteResult.error.message}`,\n\t\t\t\t\t\"MAINTENANCE_DELETE_ERROR\",\n\t\t\t\t\tdeleteResult.error,\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\n\t\treturn Ok(orphanKeys.length);\n\t}\n\n\t/**\n\t * Identify orphaned file keys from a list of storage objects.\n\t *\n\t * A file is considered an orphan if it is not in the active keys set\n\t * and its last modification time is older than the configured orphan age.\n\t */\n\tprivate findOrphans(objects: ObjectInfo[], activeKeys: Set<string>, now: number): string[] {\n\t\tconst orphans: string[] = [];\n\t\tfor (const obj of objects) {\n\t\t\tif (activeKeys.has(obj.key)) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tconst age = now - obj.lastModified.getTime();\n\t\t\tif (age >= this.config.orphanAgeMs) {\n\t\t\t\torphans.push(obj.key);\n\t\t\t}\n\t\t}\n\t\treturn orphans;\n\t}\n}\n","import { Err, LakeSyncError, Ok, type Result } from \"@lakesync/core\";\nimport type { MaintenanceReport, MaintenanceRunner } from \"./maintenance\";\n\n/** Parameters for a single maintenance run */\nexport interface MaintenanceTask {\n\t/** Storage keys of the delta Parquet files to compact */\n\tdeltaFileKeys: string[];\n\t/** Prefix for the output base/delete file keys */\n\toutputPrefix: string;\n\t/** Prefix under which all related storage files live */\n\tstoragePrefix: string;\n}\n\n/**\n * Provider function that resolves the maintenance task parameters for each run.\n * Called before every scheduled tick to determine what files to compact.\n * Return `null` to skip this tick (e.g. when there is nothing to compact).\n */\nexport type MaintenanceTaskProvider = () => Promise<MaintenanceTask | null>;\n\n/** Configuration for the compaction scheduler */\nexport interface SchedulerConfig {\n\t/** Interval between maintenance runs in milliseconds (default 60000) */\n\tintervalMs: number;\n\t/** Whether the scheduler is enabled (default true) */\n\tenabled: boolean;\n}\n\n/** Default scheduler configuration values */\nexport const DEFAULT_SCHEDULER_CONFIG: SchedulerConfig = {\n\tintervalMs: 60_000,\n\tenabled: true,\n};\n\n/**\n * Manages interval-based compaction scheduling.\n *\n * Wraps a {@link MaintenanceRunner} and executes maintenance cycles on a\n * configurable interval. The scheduler is safe against concurrent runs:\n * if a previous tick is still in progress when the next fires, the tick\n * is silently skipped.\n */\nexport class CompactionScheduler {\n\tprivate readonly runner: MaintenanceRunner;\n\tprivate readonly taskProvider: MaintenanceTaskProvider;\n\tprivate readonly config: SchedulerConfig;\n\n\tprivate timer: ReturnType<typeof setInterval> | null = null;\n\tprivate running = false;\n\tprivate inFlightPromise: Promise<Result<MaintenanceReport, LakeSyncError>> | null = null;\n\n\t/**\n\t * Create a new CompactionScheduler instance.\n\t *\n\t * @param runner - The maintenance runner to execute on each tick\n\t * @param taskProvider - Function that provides maintenance task parameters for each run\n\t * @param config - Scheduler configuration (interval and enabled flag)\n\t */\n\tconstructor(\n\t\trunner: MaintenanceRunner,\n\t\ttaskProvider: MaintenanceTaskProvider,\n\t\tconfig: Partial<SchedulerConfig> = {},\n\t) {\n\t\tthis.runner = runner;\n\t\tthis.taskProvider = taskProvider;\n\t\tthis.config = { ...DEFAULT_SCHEDULER_CONFIG, ...config };\n\t}\n\n\t/**\n\t * Whether the scheduler is currently active (timer is ticking).\n\t */\n\tget isRunning(): boolean {\n\t\treturn this.running;\n\t}\n\n\t/**\n\t * Start the scheduler interval timer.\n\t *\n\t * Begins executing maintenance runs at the configured interval.\n\t * If the scheduler is already running or disabled, returns an error.\n\t *\n\t * @returns A Result indicating success or a descriptive error\n\t */\n\tstart(): Result<void, LakeSyncError> {\n\t\tif (!this.config.enabled) {\n\t\t\treturn Err(new LakeSyncError(\"Scheduler is disabled\", \"SCHEDULER_DISABLED\"));\n\t\t}\n\n\t\tif (this.running) {\n\t\t\treturn Err(new LakeSyncError(\"Scheduler is already running\", \"SCHEDULER_ALREADY_RUNNING\"));\n\t\t}\n\n\t\tthis.running = true;\n\t\tthis.timer = setInterval(() => {\n\t\t\tvoid this.tick();\n\t\t}, this.config.intervalMs);\n\n\t\treturn Ok(undefined);\n\t}\n\n\t/**\n\t * Stop the scheduler and wait for any in-progress run to finish.\n\t *\n\t * Clears the interval timer and, if a maintenance run is currently\n\t * executing, awaits its completion before returning.\n\t *\n\t * @returns A Result indicating success or a descriptive error\n\t */\n\tasync stop(): Promise<Result<void, LakeSyncError>> {\n\t\tif (!this.running) {\n\t\t\treturn Err(new LakeSyncError(\"Scheduler is not running\", \"SCHEDULER_NOT_RUNNING\"));\n\t\t}\n\n\t\tif (this.timer !== null) {\n\t\t\tclearInterval(this.timer);\n\t\t\tthis.timer = null;\n\t\t}\n\n\t\tthis.running = false;\n\n\t\t// Wait for any in-progress run to complete\n\t\tif (this.inFlightPromise !== null) {\n\t\t\tawait this.inFlightPromise;\n\t\t\tthis.inFlightPromise = null;\n\t\t}\n\n\t\treturn Ok(undefined);\n\t}\n\n\t/**\n\t * Manually trigger a single maintenance run.\n\t *\n\t * Useful for testing or administrative purposes. If a run is already\n\t * in progress, skips and returns an error.\n\t *\n\t * @returns A Result containing the MaintenanceReport, or a LakeSyncError on failure\n\t */\n\tasync runOnce(): Promise<Result<MaintenanceReport, LakeSyncError>> {\n\t\tif (this.inFlightPromise !== null) {\n\t\t\treturn Err(new LakeSyncError(\"A maintenance run is already in progress\", \"SCHEDULER_BUSY\"));\n\t\t}\n\n\t\treturn this.executeMaintenance();\n\t}\n\n\t/**\n\t * Internal tick handler called by the interval timer.\n\t * Skips if a previous run is still in progress.\n\t */\n\tprivate async tick(): Promise<void> {\n\t\tif (this.inFlightPromise !== null) {\n\t\t\treturn;\n\t\t}\n\n\t\tawait this.executeMaintenance();\n\t}\n\n\t/**\n\t * Execute a single maintenance cycle.\n\t *\n\t * Calls the task provider to get parameters, then runs the maintenance\n\t * runner. Tracks the in-flight promise so concurrent runs are prevented.\n\t */\n\tprivate async executeMaintenance(): Promise<Result<MaintenanceReport, LakeSyncError>> {\n\t\tconst taskResult = await this.resolveTask();\n\t\tif (!taskResult.ok) {\n\t\t\treturn taskResult;\n\t\t}\n\n\t\tconst task = taskResult.value;\n\t\tif (task === null) {\n\t\t\treturn Ok({\n\t\t\t\tcompaction: {\n\t\t\t\t\tbaseFilesWritten: 0,\n\t\t\t\t\tdeleteFilesWritten: 0,\n\t\t\t\t\tdeltaFilesCompacted: 0,\n\t\t\t\t\tbytesRead: 0,\n\t\t\t\t\tbytesWritten: 0,\n\t\t\t\t},\n\t\t\t\tsnapshotsExpired: 0,\n\t\t\t\torphansRemoved: 0,\n\t\t\t});\n\t\t}\n\n\t\tconst promise = this.runner.run(task.deltaFileKeys, task.outputPrefix, task.storagePrefix);\n\t\tthis.inFlightPromise = promise;\n\n\t\ttry {\n\t\t\tconst result = await promise;\n\t\t\treturn result;\n\t\t} finally {\n\t\t\tthis.inFlightPromise = null;\n\t\t}\n\t}\n\n\t/**\n\t * Resolve the maintenance task from the provider, wrapping any thrown\n\t * exceptions into a Result error.\n\t */\n\tprivate async resolveTask(): Promise<Result<MaintenanceTask | null, LakeSyncError>> {\n\t\ttry {\n\t\t\tconst task = await this.taskProvider();\n\t\t\treturn Ok(task);\n\t\t} catch (error) {\n\t\t\treturn Err(\n\t\t\t\tnew LakeSyncError(\n\t\t\t\t\t`Task provider failed: ${error instanceof Error ? error.message : String(error)}`,\n\t\t\t\t\t\"SCHEDULER_TASK_PROVIDER_ERROR\",\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\t}\n}\n","/** Configuration for the compaction process */\nexport interface CompactionConfig {\n\t/** Minimum number of delta files before compaction triggers */\n\tminDeltaFiles: number;\n\t/** Maximum number of delta files to compact in one pass */\n\tmaxDeltaFiles: number;\n\t/** Target base file size in bytes */\n\ttargetFileSizeBytes: number;\n}\n\n/** Default compaction configuration values */\nexport const DEFAULT_COMPACTION_CONFIG: CompactionConfig = {\n\tminDeltaFiles: 10,\n\tmaxDeltaFiles: 20,\n\ttargetFileSizeBytes: 128 * 1024 * 1024, // 128 MB\n};\n\n/** Result of a compaction operation */\nexport interface CompactionResult {\n\t/** Number of base data files written */\n\tbaseFilesWritten: number;\n\t/** Number of equality delete files written */\n\tdeleteFilesWritten: number;\n\t/** Number of delta files that were compacted */\n\tdeltaFilesCompacted: number;\n\t/** Total bytes read during compaction */\n\tbytesRead: number;\n\t/** Total bytes written during compaction */\n\tbytesWritten: number;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAoBO,IAAM,4BAA8C;AAAA,EAC1D,YAAY,KAAK,OAAO;AACzB;AA2BA,IAAM,uBAAuB;AAC7B,IAAM,6BAA6B;AAS5B,IAAM,sBAAN,MAA0B;AAAA,EACf;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YACC,SACA,SACA,WACA,QACC;AACD,SAAK,UAAU;AACf,SAAK,YAAY;AACjB,SAAK,SAAS,UAAU;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAM,SACL,cACA,aACmD;AACnD,QAAI,aAAa,WAAW,GAAG;AAC9B,aAAO,GAAG,EAAE,eAAe,GAAG,cAAc,GAAG,YAAY,CAAC;AAAA,IAC7D;AAEA,UAAM,SAAS,eAAe,KAAK,SAAS;AAC5C,UAAM,aAAuB,CAAC;AAC9B,QAAI,oBAAoB;AACxB,QAAI,cAAc;AAGlB,QAAI,cAA0B,CAAC;AAC/B,QAAI,mBAAmB;AAEvB,eAAW,OAAO,cAAc;AAC/B,YAAM,YAAY,MAAM,KAAK,QAAQ,UAAU,GAAG;AAClD,UAAI,CAAC,UAAU,IAAI;AAClB,eAAO;AAAA,UACN,IAAI;AAAA,YACH,6BAA6B,GAAG;AAAA,YAChC;AAAA,YACA,UAAU;AAAA,UACX;AAAA,QACD;AAAA,MACD;AAEA,YAAM,cAAc,MAAM,oBAAoB,UAAU,KAAK;AAC7D,UAAI,CAAC,YAAY,IAAI;AACpB,eAAO;AAAA,UACN,IAAI;AAAA,YACH,8BAA8B,GAAG;AAAA,YACjC;AAAA,YACA,YAAY;AAAA,UACb;AAAA,QACD;AAAA,MACD;AAEA,iBAAW,SAAS,YAAY,OAAO;AACtC,oBAAY,KAAK,KAAK;AACtB,4BACC,uBAAuB,MAAM,QAAQ,SAAS;AAE/C,YAAI,oBAAoB,KAAK,OAAO,YAAY;AAC/C,gBAAM,cAAc,MAAM,KAAK;AAAA,YAC9B;AAAA,YACA,WAAW;AAAA,YACX;AAAA,YACA;AAAA,UACD;AACA,cAAI,CAAC,YAAY,GAAI,QAAO;AAE5B,+BAAqB,YAAY;AACjC,yBAAe,YAAY;AAC3B,qBAAW,KAAK,KAAK,cAAc,WAAW,MAAM,CAAC;AACrD,wBAAc,CAAC;AACf,6BAAmB;AAAA,QACpB;AAAA,MACD;AAAA,IACD;AAGA,QAAI,YAAY,SAAS,GAAG;AAC3B,YAAM,cAAc,MAAM,KAAK;AAAA,QAC9B;AAAA,QACA,WAAW;AAAA,QACX;AAAA,QACA;AAAA,MACD;AACA,UAAI,CAAC,YAAY,GAAI,QAAO;AAE5B,2BAAqB,YAAY;AACjC,qBAAe,YAAY;AAC3B,iBAAW,KAAK,KAAK,cAAc,WAAW,MAAM,CAAC;AAAA,IACtD;AAGA,UAAM,WAA+B;AAAA,MACpC,aAAa,YAAY,SAAS;AAAA,MAClC,cAAa,oBAAI,KAAK,GAAE,YAAY;AAAA,MACpC,YAAY,WAAW;AAAA,MACvB;AAAA,MACA,QAAQ;AAAA,IACT;AAEA,UAAM,gBAAgB,IAAI,YAAY,EAAE,OAAO,KAAK,UAAU,QAAQ,CAAC;AACvE,UAAM,iBAAiB,MAAM,KAAK,QAAQ;AAAA,MACzC,GAAG,MAAM;AAAA,MACT;AAAA,MACA;AAAA,IACD;AAEA,QAAI,CAAC,eAAe,IAAI;AACvB,aAAO;AAAA,QACN,IAAI;AAAA,UACH;AAAA,UACA;AAAA,UACA,eAAe;AAAA,QAChB;AAAA,MACD;AAAA,IACD;AAEA,yBAAqB,cAAc;AAEnC,WAAO,GAAG;AAAA,MACT,eAAe,WAAW;AAAA,MAC1B,cAAc;AAAA,MACd;AAAA,IACD,CAAC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,kBAAkB,YAA8B;AAC/C,UAAM,SAAS,eAAe,KAAK,SAAS;AAC5C,UAAM,OAAO,CAAC,GAAG,MAAM,gBAAgB;AACvC,aAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACpC,WAAK,KAAK,GAAG,MAAM,IAAI,KAAK,cAAc,CAAC,CAAC,EAAE;AAAA,IAC/C;AACA,WAAO;AAAA,EACR;AAAA,EAEQ,cAAc,OAAuB;AAC5C,WAAO,SAAS,OAAO,KAAK,EAAE,SAAS,GAAG,GAAG,CAAC;AAAA,EAC/C;AAAA,EAEA,MAAc,WACb,QACA,OACA,QACA,aACyC;AACzC,UAAM,eAAe,mBAAmB;AAAA,MACvC;AAAA,MACA,WAAW;AAAA,MACX,SAAS;AAAA,IACV,CAAC;AAED,QAAI,CAAC,aAAa,IAAI;AACrB,aAAO;AAAA,QACN,IAAI;AAAA,UACH,qCAAqC,KAAK;AAAA,UAC1C;AAAA,UACA,aAAa;AAAA,QACd;AAAA,MACD;AAAA,IACD;AAEA,UAAM,OAAO,aAAa;AAC1B,UAAM,WAAW,GAAG,MAAM,IAAI,KAAK,cAAc,KAAK,CAAC;AAEvD,UAAM,YAAY,MAAM,KAAK,QAAQ,UAAU,UAAU,MAAM,0BAA0B;AACzF,QAAI,CAAC,UAAU,IAAI;AAClB,aAAO;AAAA,QACN,IAAI;AAAA,UACH,qCAAqC,QAAQ;AAAA,UAC7C;AAAA,UACA,UAAU;AAAA,QACX;AAAA,MACD;AAAA,IACD;AAEA,WAAO,GAAG,KAAK,UAAU;AAAA,EAC1B;AACD;;;AC1OA,IAAM,yBAAsC;AAAA,EAC3C,OAAO;AAAA,EACP,SAAS,CAAC;AACX;AAOA,IAAM,eAA6B,IAAI,OAAO,GAAG,CAAC;AAalD,eAAsB,qBACrB,aACA,SAC0C;AAC1C,MAAI,YAAY,WAAW,GAAG;AAC7B,WAAO,GAAG,IAAI,WAAW,CAAC,CAAC;AAAA,EAC5B;AAEA,MAAI;AAIH,UAAM,kBAA8B,YAAY,IAAI,CAAC,KAAK,WAAW;AAAA,MACpE,IAAI;AAAA,MACJ,OAAO,IAAI;AAAA,MACX,OAAO,IAAI;AAAA,MACX,UAAU;AAAA,MACV,SAAS,CAAC;AAAA,MACV,KAAK;AAAA,MACL,SAAS,aAAa,KAAK;AAAA,IAC5B,EAAE;AAEF,WAAO,MAAM,qBAAqB,iBAAiB,sBAAsB;AAAA,EAC1E,SAAS,KAAK;AACb,UAAM,QAAQ,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC;AAChE,WAAO,IAAI,IAAI,WAAW,qCAAqC,MAAM,OAAO,IAAI,KAAK,CAAC;AAAA,EACvF;AACD;AAWA,eAAsB,oBACrB,MACuE;AACvE,MAAI,KAAK,eAAe,GAAG;AAC1B,WAAO,GAAG,CAAC,CAAC;AAAA,EACb;AAEA,QAAM,aAAa,MAAM,oBAAoB,IAAI;AACjD,MAAI,CAAC,WAAW,IAAI;AACnB,WAAO;AAAA,MACN,IAAI;AAAA,QACH,oCAAoC,WAAW,MAAM,OAAO;AAAA,QAC5D,WAAW;AAAA,MACZ;AAAA,IACD;AAAA,EACD;AAEA,QAAM,OAAO,WAAW,MAAM,IAAI,CAAC,WAAW;AAAA,IAC7C,OAAO,MAAM;AAAA,IACb,OAAO,MAAM;AAAA,EACd,EAAE;AAEF,SAAO,GAAG,IAAI;AACf;;;AC7DO,IAAM,YAAN,MAAgB;AAAA,EACL;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASjB,YAAY,SAAsB,QAA0B,QAAqB;AAChF,SAAK,UAAU;AACf,SAAK,SAAS;AACd,SAAK,SAAS;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAM,QACL,eACA,cACmD;AACnD,QAAI,cAAc,SAAS,KAAK,OAAO,eAAe;AACrD,aAAO,GAAG;AAAA,QACT,kBAAkB;AAAA,QAClB,oBAAoB;AAAA,QACpB,qBAAqB;AAAA,QACrB,WAAW;AAAA,QACX,cAAc;AAAA,MACf,CAAC;AAAA,IACF;AAEA,UAAM,gBAAgB,cAAc,MAAM,GAAG,KAAK,OAAO,aAAa;AAEtE,UAAM,gBAAgB,MAAM,KAAK,4BAA4B,aAAa;AAC1E,QAAI,CAAC,cAAc,GAAI,QAAO;AAE9B,UAAM,EAAE,UAAU,aAAa,UAAU,IAAI,cAAc;AAE3D,UAAM,cAAc,MAAM,KAAK,iBAAiB,UAAU,aAAa,YAAY;AACnF,QAAI,CAAC,YAAY,GAAI,QAAO;AAE5B,WAAO,GAAG;AAAA,MACT,GAAG,YAAY;AAAA,MACf,qBAAqB,cAAc;AAAA,MACnC;AAAA,IACD,CAAC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAc,4BAA4B,eASxC;AACD,UAAM,YAAY,oBAAI,IAAsB;AAC5C,QAAI,YAAY;AAEhB,eAAW,OAAO,eAAe;AAChC,YAAM,YAAY,MAAM,KAAK,QAAQ,UAAU,GAAG;AAClD,UAAI,CAAC,UAAU,IAAI;AAClB,eAAO;AAAA,UACN,IAAI;AAAA,YACH,8BAA8B,GAAG;AAAA,YACjC;AAAA,YACA,UAAU;AAAA,UACX;AAAA,QACD;AAAA,MACD;AAEA,YAAM,OAAO,UAAU;AACvB,mBAAa,KAAK;AAElB,YAAM,cAAc,MAAM,oBAAoB,IAAI;AAClD,UAAI,CAAC,YAAY,IAAI;AACpB,eAAO;AAAA,UACN,IAAI;AAAA,YACH,+BAA+B,GAAG;AAAA,YAClC;AAAA,YACA,YAAY;AAAA,UACb;AAAA,QACD;AAAA,MACD;AAGA,iBAAW,SAAS,YAAY,OAAO;AACtC,cAAM,IAAI,OAAO,MAAM,OAAO,MAAM,KAAK;AACzC,YAAI,QAAQ,UAAU,IAAI,CAAC;AAE3B,YAAI,CAAC,OAAO;AACX,kBAAQ;AAAA,YACP,OAAO,MAAM;AAAA,YACb,OAAO,MAAM;AAAA,YACb,UAAU,MAAM;AAAA,YAChB,SAAS,oBAAI,IAAI;AAAA,YACjB,WAAW;AAAA,YACX,eAAe,MAAM;AAAA,YACrB,WAAW;AAAA,UACZ;AACA,oBAAU,IAAI,GAAG,KAAK;AAAA,QACvB;AAGA,YAAI,IAAI,QAAQ,MAAM,KAAK,MAAM,SAAS,IAAI,GAAG;AAChD,gBAAM,YAAY,MAAM;AACxB,gBAAM,gBAAgB,MAAM;AAC5B,gBAAM,WAAW,MAAM;AAAA,QACxB;AAEA,YAAI,MAAM,OAAO,UAAU;AAE1B,cAAI,IAAI,QAAQ,MAAM,KAAK,MAAM,SAAS,IAAI,GAAG;AAChD,kBAAM,YAAY,MAAM;AAAA,UACzB;AAAA,QACD,OAAO;AAEN,qBAAW,OAAO,MAAM,SAAS;AAChC,kBAAM,WAAW,MAAM,QAAQ,IAAI,IAAI,MAAM;AAC7C,gBAAI,CAAC,YAAY,IAAI,QAAQ,MAAM,KAAK,SAAS,GAAG,IAAI,GAAG;AAC1D,oBAAM,QAAQ,IAAI,IAAI,QAAQ;AAAA,gBAC7B,OAAO,IAAI;AAAA,gBACX,KAAK,MAAM;AAAA,cACZ,CAAC;AAAA,YACF;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAGA,UAAM,WAAuB,CAAC;AAC9B,UAAM,cAAuD,CAAC;AAE9D,eAAW,CAAC,EAAE,KAAK,KAAK,WAAW;AAGlC,UAAI,YAAY,MAAM,YAAY;AAClC,UAAI,WAAW;AACd,mBAAW,OAAO,MAAM,QAAQ,OAAO,GAAG;AACzC,cAAI,IAAI,QAAQ,MAAM,WAAW,IAAI,GAAG,IAAI,GAAG;AAC9C,wBAAY;AACZ;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAEA,UAAI,aAAa,MAAM,QAAQ,SAAS,GAAG;AAC1C,oBAAY,KAAK,EAAE,OAAO,MAAM,OAAO,OAAO,MAAM,MAAM,CAAC;AAAA,MAC5D,OAAO;AAEN,cAAM,UAAyB,CAAC;AAChC,mBAAW,OAAO,KAAK,OAAO,SAAS;AACtC,gBAAM,WAAW,MAAM,QAAQ,IAAI,IAAI,IAAI;AAC3C,cACC,aACC,MAAM,cAAc,MAAM,IAAI,QAAQ,SAAS,KAAK,MAAM,SAAS,IAAI,IACvE;AACD,oBAAQ,KAAK,EAAE,QAAQ,IAAI,MAAM,OAAO,SAAS,MAAM,CAAC;AAAA,UACzD;AAAA,QACD;AAEA,iBAAS,KAAK;AAAA,UACb,IAAI;AAAA,UACJ,OAAO,MAAM;AAAA,UACb,OAAO,MAAM;AAAA,UACb,UAAU,MAAM;AAAA,UAChB;AAAA,UACA,KAAK,MAAM;AAAA,UACX,SAAS,MAAM;AAAA,QAChB,CAAC;AAAA,MACF;AAAA,IACD;AAEA,WAAO,GAAG,EAAE,UAAU,aAAa,UAAU,CAAC;AAAA,EAC/C;AAAA;AAAA,EAGA,MAAc,iBACb,UACA,aACA,cAMC;AACD,QAAI,eAAe;AACnB,QAAI,mBAAmB;AACvB,QAAI,qBAAqB;AAEzB,QAAI,SAAS,SAAS,GAAG;AACxB,YAAM,cAAc,MAAM,qBAAqB,UAAU,KAAK,MAAM;AACpE,UAAI,CAAC,YAAY,IAAI;AACpB,eAAO;AAAA,UACN,IAAI;AAAA,YACH;AAAA,YACA;AAAA,YACA,YAAY;AAAA,UACb;AAAA,QACD;AAAA,MACD;AAEA,YAAM,WAAW,YAAY;AAC7B,YAAM,YAAY,KAAK,kBAAkB;AACzC,YAAM,WAAW,GAAG,YAAY,SAAS,SAAS;AAElD,YAAM,YAAY,MAAM,KAAK,QAAQ;AAAA,QACpC;AAAA,QACA;AAAA,QACA;AAAA,MACD;AACA,UAAI,CAAC,UAAU,IAAI;AAClB,eAAO;AAAA,UACN,IAAI;AAAA,YACH,8BAA8B,QAAQ;AAAA,YACtC;AAAA,YACA,UAAU;AAAA,UACX;AAAA,QACD;AAAA,MACD;AAEA,sBAAgB,SAAS;AACzB,yBAAmB;AAAA,IACpB;AAEA,QAAI,YAAY,SAAS,GAAG;AAC3B,YAAM,cAAc,MAAM,qBAAqB,aAAa,KAAK,MAAM;AACvE,UAAI,CAAC,YAAY,IAAI;AACpB,eAAO;AAAA,UACN,IAAI;AAAA,YACH;AAAA,YACA;AAAA,YACA,YAAY;AAAA,UACb;AAAA,QACD;AAAA,MACD;AAEA,YAAM,aAAa,YAAY;AAC/B,YAAM,YAAY,KAAK,kBAAkB;AACzC,YAAM,aAAa,GAAG,YAAY,WAAW,SAAS;AAEtD,YAAM,YAAY,MAAM,KAAK,QAAQ;AAAA,QACpC;AAAA,QACA;AAAA,QACA;AAAA,MACD;AACA,UAAI,CAAC,UAAU,IAAI;AAClB,eAAO;AAAA,UACN,IAAI;AAAA,YACH,gCAAgC,UAAU;AAAA,YAC1C;AAAA,YACA,UAAU;AAAA,UACX;AAAA,QACD;AAAA,MACD;AAEA,sBAAgB,WAAW;AAC3B,2BAAqB;AAAA,IACtB;AAEA,WAAO,GAAG,EAAE,kBAAkB,oBAAoB,aAAa,CAAC;AAAA,EACjE;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,oBAA4B;AACnC,UAAM,MAAM,KAAK,IAAI;AACrB,UAAM,SAAS,KAAK,OAAO,EAAE,SAAS,EAAE,EAAE,MAAM,GAAG,CAAC;AACpD,WAAO,GAAG,GAAG,IAAI,MAAM;AAAA,EACxB;AACD;;;AChUO,IAAM,6BAAgD;AAAA,EAC5D,iBAAiB;AAAA,EACjB,aAAa,KAAK,KAAK;AAAA;AACxB;AAsBO,IAAM,oBAAN,MAAwB;AAAA,EACb;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWjB,YACC,WACA,SACA,QACA,qBACC;AACD,SAAK,YAAY;AACjB,SAAK,UAAU;AACf,SAAK,SAAS;AACd,SAAK,sBAAsB,uBAAuB;AAAA,EACnD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAeA,MAAM,IACL,eACA,cACA,eACoD;AAEpD,UAAM,mBAAmB,MAAM,KAAK,UAAU,QAAQ,eAAe,YAAY;AACjF,QAAI,CAAC,iBAAiB,IAAI;AACzB,aAAO;AAAA,QACN,IAAI;AAAA,UACH,kCAAkC,iBAAiB,MAAM,OAAO;AAAA,UAChE;AAAA,UACA,iBAAiB;AAAA,QAClB;AAAA,MACD;AAAA,IACD;AAEA,UAAM,aAAa,iBAAiB;AAKpC,UAAM,aAAa,oBAAI,IAAY;AAGnC,UAAM,iBAAiB,WAAW;AAClC,aAAS,IAAI,gBAAgB,IAAI,cAAc,QAAQ,KAAK;AAC3D,iBAAW,IAAI,cAAc,CAAC,CAAE;AAAA,IACjC;AAGA,UAAM,mBAAmB,MAAM,KAAK,QAAQ,YAAY,YAAY;AACpE,QAAI,CAAC,iBAAiB,IAAI;AACzB,aAAO;AAAA,QACN,IAAI;AAAA,UACH,gCAAgC,iBAAiB,MAAM,OAAO;AAAA,UAC9D;AAAA,UACA,iBAAiB;AAAA,QAClB;AAAA,MACD;AAAA,IACD;AAEA,eAAW,OAAO,iBAAiB,OAAO;AACzC,iBAAW,IAAI,IAAI,GAAG;AAAA,IACvB;AAGA,QAAI;AACJ,QAAI,KAAK,uBAAuB,WAAW,mBAAmB,GAAG;AAEhE,YAAM,eAAe,iBAAiB,MACpC,OAAO,CAAC,QAAQ,IAAI,IAAI,SAAS,UAAU,KAAK,IAAI,IAAI,SAAS,QAAQ,CAAC,EAC1E,IAAI,CAAC,QAAQ,IAAI,GAAG;AAEtB,UAAI,aAAa,SAAS,GAAG;AAG5B,cAAM,cAAc,IAAI,OAAO,KAAK,IAAI,GAAG,CAAC;AAE5C,cAAM,mBAAmB,MAAM,KAAK,oBAAoB,SAAS,cAAc,WAAW;AAE1F,YAAI,iBAAiB,IAAI;AACxB,uBAAa,iBAAiB;AAE9B,gBAAM,iBAAiB,KAAK,oBAAoB;AAAA,YAC/C,WAAW;AAAA,UACZ;AACA,qBAAW,OAAO,gBAAgB;AACjC,uBAAW,IAAI,GAAG;AAAA,UACnB;AAAA,QACD;AAAA,MAED;AAAA,IACD;AAGA,UAAM,eAAe,MAAM,KAAK,cAAc,eAAe,UAAU;AACvE,QAAI,CAAC,aAAa,IAAI;AACrB,aAAO;AAAA,QACN,IAAI;AAAA,UACH,sCAAsC,aAAa,MAAM,OAAO;AAAA,UAChE;AAAA,UACA,aAAa;AAAA,QACd;AAAA,MACD;AAAA,IACD;AAEA,WAAO,GAAG;AAAA,MACT;AAAA,MACA,kBAAkB;AAAA,MAClB,gBAAgB,aAAa;AAAA,MAC7B;AAAA,IACD,CAAC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,MAAM,cACL,eACA,YACyC;AACzC,UAAM,aAAa,MAAM,KAAK,QAAQ,YAAY,aAAa;AAC/D,QAAI,CAAC,WAAW,IAAI;AACnB,aAAO;AAAA,QACN,IAAI;AAAA,UACH,8CAA8C,WAAW,MAAM,OAAO;AAAA,UACtE;AAAA,UACA,WAAW;AAAA,QACZ;AAAA,MACD;AAAA,IACD;AAEA,UAAM,MAAM,KAAK,IAAI;AACrB,UAAM,aAAa,KAAK,YAAY,WAAW,OAAO,YAAY,GAAG;AAErE,QAAI,WAAW,WAAW,GAAG;AAC5B,aAAO,GAAG,CAAC;AAAA,IACZ;AAEA,UAAM,eAAe,MAAM,KAAK,QAAQ,cAAc,UAAU;AAChE,QAAI,CAAC,aAAa,IAAI;AACrB,aAAO;AAAA,QACN,IAAI;AAAA,UACH,oCAAoC,aAAa,MAAM,OAAO;AAAA,UAC9D;AAAA,UACA,aAAa;AAAA,QACd;AAAA,MACD;AAAA,IACD;AAEA,WAAO,GAAG,WAAW,MAAM;AAAA,EAC5B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,YAAY,SAAuB,YAAyB,KAAuB;AAC1F,UAAM,UAAoB,CAAC;AAC3B,eAAW,OAAO,SAAS;AAC1B,UAAI,WAAW,IAAI,IAAI,GAAG,GAAG;AAC5B;AAAA,MACD;AACA,YAAM,MAAM,MAAM,IAAI,aAAa,QAAQ;AAC3C,UAAI,OAAO,KAAK,OAAO,aAAa;AACnC,gBAAQ,KAAK,IAAI,GAAG;AAAA,MACrB;AAAA,IACD;AACA,WAAO;AAAA,EACR;AACD;;;ACrNO,IAAM,2BAA4C;AAAA,EACxD,YAAY;AAAA,EACZ,SAAS;AACV;AAUO,IAAM,sBAAN,MAA0B;AAAA,EACf;AAAA,EACA;AAAA,EACA;AAAA,EAET,QAA+C;AAAA,EAC/C,UAAU;AAAA,EACV,kBAA4E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASpF,YACC,QACA,cACA,SAAmC,CAAC,GACnC;AACD,SAAK,SAAS;AACd,SAAK,eAAe;AACpB,SAAK,SAAS,EAAE,GAAG,0BAA0B,GAAG,OAAO;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA,EAKA,IAAI,YAAqB;AACxB,WAAO,KAAK;AAAA,EACb;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,QAAqC;AACpC,QAAI,CAAC,KAAK,OAAO,SAAS;AACzB,aAAO,IAAI,IAAI,cAAc,yBAAyB,oBAAoB,CAAC;AAAA,IAC5E;AAEA,QAAI,KAAK,SAAS;AACjB,aAAO,IAAI,IAAI,cAAc,gCAAgC,2BAA2B,CAAC;AAAA,IAC1F;AAEA,SAAK,UAAU;AACf,SAAK,QAAQ,YAAY,MAAM;AAC9B,WAAK,KAAK,KAAK;AAAA,IAChB,GAAG,KAAK,OAAO,UAAU;AAEzB,WAAO,GAAG,MAAS;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,OAA6C;AAClD,QAAI,CAAC,KAAK,SAAS;AAClB,aAAO,IAAI,IAAI,cAAc,4BAA4B,uBAAuB,CAAC;AAAA,IAClF;AAEA,QAAI,KAAK,UAAU,MAAM;AACxB,oBAAc,KAAK,KAAK;AACxB,WAAK,QAAQ;AAAA,IACd;AAEA,SAAK,UAAU;AAGf,QAAI,KAAK,oBAAoB,MAAM;AAClC,YAAM,KAAK;AACX,WAAK,kBAAkB;AAAA,IACxB;AAEA,WAAO,GAAG,MAAS;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,UAA6D;AAClE,QAAI,KAAK,oBAAoB,MAAM;AAClC,aAAO,IAAI,IAAI,cAAc,4CAA4C,gBAAgB,CAAC;AAAA,IAC3F;AAEA,WAAO,KAAK,mBAAmB;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,OAAsB;AACnC,QAAI,KAAK,oBAAoB,MAAM;AAClC;AAAA,IACD;AAEA,UAAM,KAAK,mBAAmB;AAAA,EAC/B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAc,qBAAwE;AACrF,UAAM,aAAa,MAAM,KAAK,YAAY;AAC1C,QAAI,CAAC,WAAW,IAAI;AACnB,aAAO;AAAA,IACR;AAEA,UAAM,OAAO,WAAW;AACxB,QAAI,SAAS,MAAM;AAClB,aAAO,GAAG;AAAA,QACT,YAAY;AAAA,UACX,kBAAkB;AAAA,UAClB,oBAAoB;AAAA,UACpB,qBAAqB;AAAA,UACrB,WAAW;AAAA,UACX,cAAc;AAAA,QACf;AAAA,QACA,kBAAkB;AAAA,QAClB,gBAAgB;AAAA,MACjB,CAAC;AAAA,IACF;AAEA,UAAM,UAAU,KAAK,OAAO,IAAI,KAAK,eAAe,KAAK,cAAc,KAAK,aAAa;AACzF,SAAK,kBAAkB;AAEvB,QAAI;AACH,YAAM,SAAS,MAAM;AACrB,aAAO;AAAA,IACR,UAAE;AACD,WAAK,kBAAkB;AAAA,IACxB;AAAA,EACD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,cAAsE;AACnF,QAAI;AACH,YAAM,OAAO,MAAM,KAAK,aAAa;AACrC,aAAO,GAAG,IAAI;AAAA,IACf,SAAS,OAAO;AACf,aAAO;AAAA,QACN,IAAI;AAAA,UACH,yBAAyB,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC;AAAA,UAC/E;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AACD;;;ACzMO,IAAM,4BAA8C;AAAA,EAC1D,eAAe;AAAA,EACf,eAAe;AAAA,EACf,qBAAqB,MAAM,OAAO;AAAA;AACnC;","names":[]}
|
|
@@ -0,0 +1,396 @@
|
|
|
1
|
+
import { L as LakeSyncError, R as Result, H as HLCTimestamp } from './result-CojzlFE2.js';
|
|
2
|
+
import { A as Action, i as ActionPush, j as ActionResponse, d as ActionDiscovery, k as ActionResult, e as ActionErrorResult } from './types-Bs-QyOe-.js';
|
|
3
|
+
import { H as HLC } from './hlc-DiD8QNG3.js';
|
|
4
|
+
import { R as RowDelta, b as SyncPush, S as SyncPull, c as SyncResponse } from './types-V_jVu2sA.js';
|
|
5
|
+
|
|
6
|
+
/** Configuration for opening a local database */
|
|
7
|
+
interface DbConfig {
|
|
8
|
+
/** Database name (used for identification and future persistence) */
|
|
9
|
+
name: string;
|
|
10
|
+
/** Storage backend — auto-detected if not set */
|
|
11
|
+
backend?: "idb" | "memory";
|
|
12
|
+
}
|
|
13
|
+
/** Error type for database operations */
|
|
14
|
+
declare class DbError extends LakeSyncError {
|
|
15
|
+
constructor(message: string, cause?: Error);
|
|
16
|
+
}
|
|
17
|
+
/** Synchronous transaction interface wrapping sql.js operations */
|
|
18
|
+
interface Transaction {
|
|
19
|
+
/** Execute a SQL statement with optional parameters */
|
|
20
|
+
exec(sql: string, params?: unknown[]): Result<void, DbError>;
|
|
21
|
+
/** Query rows from the database with optional parameters */
|
|
22
|
+
query<T>(sql: string, params?: unknown[]): Result<T[], DbError>;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/** Resolved storage backend after auto-detection */
|
|
26
|
+
type ResolvedBackend = "idb" | "memory";
|
|
27
|
+
/**
|
|
28
|
+
* Local SQLite database backed by sql.js (SQLite compiled to WASM).
|
|
29
|
+
*
|
|
30
|
+
* Supports two persistence backends:
|
|
31
|
+
* - `"memory"` — purely in-memory, data lost on close
|
|
32
|
+
* - `"idb"` — snapshots persisted to IndexedDB between sessions
|
|
33
|
+
*
|
|
34
|
+
* When no backend is specified, auto-detects: uses `"idb"` if
|
|
35
|
+
* `indexedDB` is available, otherwise falls back to `"memory"`.
|
|
36
|
+
*/
|
|
37
|
+
declare class LocalDB {
|
|
38
|
+
#private;
|
|
39
|
+
private constructor();
|
|
40
|
+
/** The database name from configuration */
|
|
41
|
+
get name(): string;
|
|
42
|
+
/** The resolved storage backend for this instance */
|
|
43
|
+
get backend(): ResolvedBackend;
|
|
44
|
+
/**
|
|
45
|
+
* Open a new LocalDB instance.
|
|
46
|
+
*
|
|
47
|
+
* Initialises the sql.js WASM engine and creates a database. When the
|
|
48
|
+
* backend is `"idb"`, any existing snapshot is loaded from IndexedDB.
|
|
49
|
+
* If no backend is specified, auto-detects based on `indexedDB` availability.
|
|
50
|
+
*/
|
|
51
|
+
static open(config: DbConfig): Promise<Result<LocalDB, DbError>>;
|
|
52
|
+
/**
|
|
53
|
+
* Execute a SQL statement (INSERT, UPDATE, DELETE, CREATE, etc.).
|
|
54
|
+
*
|
|
55
|
+
* Returns `Ok(void)` on success, or `Err(DbError)` on failure.
|
|
56
|
+
*/
|
|
57
|
+
exec(sql: string, params?: unknown[]): Promise<Result<void, DbError>>;
|
|
58
|
+
/**
|
|
59
|
+
* Query the database and return typed rows as an array of objects.
|
|
60
|
+
*
|
|
61
|
+
* Each row is mapped from sql.js column-array format into a keyed object.
|
|
62
|
+
*/
|
|
63
|
+
query<T>(sql: string, params?: unknown[]): Promise<Result<T[], DbError>>;
|
|
64
|
+
/**
|
|
65
|
+
* Execute a function within a database transaction.
|
|
66
|
+
*
|
|
67
|
+
* Begins a transaction, executes the callback with a `Transaction` object,
|
|
68
|
+
* commits on success, or rolls back if the callback throws.
|
|
69
|
+
*/
|
|
70
|
+
transaction<T>(fn: (tx: Transaction) => T): Promise<Result<T, DbError>>;
|
|
71
|
+
/**
|
|
72
|
+
* Export the current database state and persist it to IndexedDB.
|
|
73
|
+
*
|
|
74
|
+
* No-op when the backend is `"memory"`.
|
|
75
|
+
*/
|
|
76
|
+
save(): Promise<Result<void, DbError>>;
|
|
77
|
+
/**
|
|
78
|
+
* Close the database and release resources.
|
|
79
|
+
*
|
|
80
|
+
* When the backend is `"idb"`, the database snapshot is persisted
|
|
81
|
+
* to IndexedDB before closing.
|
|
82
|
+
*/
|
|
83
|
+
close(): Promise<void>;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/** Status of an action queue entry. */
|
|
87
|
+
type ActionQueueEntryStatus = "pending" | "sending" | "acked" | "failed";
|
|
88
|
+
/** A single entry in the action queue. */
|
|
89
|
+
interface ActionQueueEntry {
|
|
90
|
+
/** Unique entry identifier. */
|
|
91
|
+
id: string;
|
|
92
|
+
/** The action to be executed. */
|
|
93
|
+
action: Action;
|
|
94
|
+
/** Current processing status. */
|
|
95
|
+
status: ActionQueueEntryStatus;
|
|
96
|
+
/** Timestamp when the entry was created. */
|
|
97
|
+
createdAt: number;
|
|
98
|
+
/** Number of times this entry has been retried. */
|
|
99
|
+
retryCount: number;
|
|
100
|
+
/** Earliest time (ms since epoch) this entry should be retried. Undefined = immediately. */
|
|
101
|
+
retryAfter?: number;
|
|
102
|
+
}
|
|
103
|
+
/** Outbox-pattern action queue interface. */
|
|
104
|
+
interface ActionQueue {
|
|
105
|
+
/** Add an action to the queue. */
|
|
106
|
+
push(action: Action): Promise<Result<ActionQueueEntry, LakeSyncError>>;
|
|
107
|
+
/** Peek at pending entries (ordered by createdAt). */
|
|
108
|
+
peek(limit: number): Promise<Result<ActionQueueEntry[], LakeSyncError>>;
|
|
109
|
+
/** Mark entries as currently being sent. */
|
|
110
|
+
markSending(ids: string[]): Promise<Result<void, LakeSyncError>>;
|
|
111
|
+
/** Acknowledge successful delivery (removes entries). */
|
|
112
|
+
ack(ids: string[]): Promise<Result<void, LakeSyncError>>;
|
|
113
|
+
/** Negative acknowledge — reset to pending with incremented retryCount. */
|
|
114
|
+
nack(ids: string[]): Promise<Result<void, LakeSyncError>>;
|
|
115
|
+
/** Get the number of pending + sending entries. */
|
|
116
|
+
depth(): Promise<Result<number, LakeSyncError>>;
|
|
117
|
+
/** Remove all entries. */
|
|
118
|
+
clear(): Promise<Result<void, LakeSyncError>>;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/** Status of a queue entry */
|
|
122
|
+
type QueueEntryStatus = "pending" | "sending" | "acked";
|
|
123
|
+
/** A single entry in the sync queue */
|
|
124
|
+
interface QueueEntry {
|
|
125
|
+
/** Unique entry identifier */
|
|
126
|
+
id: string;
|
|
127
|
+
/** The delta to be synced */
|
|
128
|
+
delta: RowDelta;
|
|
129
|
+
/** Current processing status */
|
|
130
|
+
status: QueueEntryStatus;
|
|
131
|
+
/** Timestamp when the entry was created */
|
|
132
|
+
createdAt: number;
|
|
133
|
+
/** Number of times this entry has been retried */
|
|
134
|
+
retryCount: number;
|
|
135
|
+
/** Earliest time (ms since epoch) this entry should be retried. Undefined = immediately. */
|
|
136
|
+
retryAfter?: number;
|
|
137
|
+
}
|
|
138
|
+
/** Outbox-pattern sync queue interface */
|
|
139
|
+
interface SyncQueue {
|
|
140
|
+
/** Add a delta to the queue */
|
|
141
|
+
push(delta: RowDelta): Promise<Result<QueueEntry, LakeSyncError>>;
|
|
142
|
+
/** Peek at pending entries (ordered by createdAt) */
|
|
143
|
+
peek(limit: number): Promise<Result<QueueEntry[], LakeSyncError>>;
|
|
144
|
+
/** Mark entries as currently being sent */
|
|
145
|
+
markSending(ids: string[]): Promise<Result<void, LakeSyncError>>;
|
|
146
|
+
/** Acknowledge successful delivery (removes entries) */
|
|
147
|
+
ack(ids: string[]): Promise<Result<void, LakeSyncError>>;
|
|
148
|
+
/** Negative acknowledge — reset to pending with incremented retryCount */
|
|
149
|
+
nack(ids: string[]): Promise<Result<void, LakeSyncError>>;
|
|
150
|
+
/** Get the number of pending + sending entries */
|
|
151
|
+
depth(): Promise<Result<number, LakeSyncError>>;
|
|
152
|
+
/** Remove all entries */
|
|
153
|
+
clear(): Promise<Result<void, LakeSyncError>>;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* Tracks local mutations (insert, update, delete) and produces
|
|
158
|
+
* column-level deltas that are pushed to a SyncQueue.
|
|
159
|
+
*
|
|
160
|
+
* Each write operation:
|
|
161
|
+
* 1. Applies the change to the local SQLite database
|
|
162
|
+
* 2. Extracts a RowDelta describing the change
|
|
163
|
+
* 3. Pushes the delta to the sync queue for eventual upstream delivery
|
|
164
|
+
*/
|
|
165
|
+
declare class SyncTracker {
|
|
166
|
+
private readonly db;
|
|
167
|
+
private readonly queue;
|
|
168
|
+
private readonly hlc;
|
|
169
|
+
private readonly clientId;
|
|
170
|
+
private schemaCache;
|
|
171
|
+
constructor(db: LocalDB, queue: SyncQueue, hlc: HLC, clientId: string);
|
|
172
|
+
private getCachedSchema;
|
|
173
|
+
/**
|
|
174
|
+
* Insert a new row into the specified table.
|
|
175
|
+
*
|
|
176
|
+
* Writes the row to SQLite and pushes an INSERT delta to the queue.
|
|
177
|
+
*
|
|
178
|
+
* @param table - The target table name
|
|
179
|
+
* @param rowId - The unique row identifier
|
|
180
|
+
* @param data - Column name/value pairs for the new row
|
|
181
|
+
* @returns Ok on success, or Err with a LakeSyncError on failure
|
|
182
|
+
*/
|
|
183
|
+
insert(table: string, rowId: string, data: Record<string, unknown>): Promise<Result<void, LakeSyncError>>;
|
|
184
|
+
/**
|
|
185
|
+
* Update an existing row in the specified table.
|
|
186
|
+
*
|
|
187
|
+
* Reads the current row state, applies partial updates, and pushes
|
|
188
|
+
* an UPDATE delta containing only the changed columns.
|
|
189
|
+
*
|
|
190
|
+
* @param table - The target table name
|
|
191
|
+
* @param rowId - The unique row identifier
|
|
192
|
+
* @param data - Column name/value pairs to update (partial)
|
|
193
|
+
* @returns Ok on success, Err if the row is not found or on failure
|
|
194
|
+
*/
|
|
195
|
+
update(table: string, rowId: string, data: Record<string, unknown>): Promise<Result<void, LakeSyncError>>;
|
|
196
|
+
/**
|
|
197
|
+
* Delete a row from the specified table.
|
|
198
|
+
*
|
|
199
|
+
* Reads the current row state for delta extraction, removes the row
|
|
200
|
+
* from SQLite, and pushes a DELETE delta to the queue.
|
|
201
|
+
*
|
|
202
|
+
* @param table - The target table name
|
|
203
|
+
* @param rowId - The unique row identifier
|
|
204
|
+
* @returns Ok on success, Err if the row is not found or on failure
|
|
205
|
+
*/
|
|
206
|
+
delete(table: string, rowId: string): Promise<Result<void, LakeSyncError>>;
|
|
207
|
+
/**
|
|
208
|
+
* Query the local database.
|
|
209
|
+
*
|
|
210
|
+
* Pass-through to the underlying LocalDB query method.
|
|
211
|
+
*
|
|
212
|
+
* @param sql - The SQL query to execute
|
|
213
|
+
* @param params - Optional bind parameters
|
|
214
|
+
* @returns The query results as typed rows, or a DbError on failure
|
|
215
|
+
*/
|
|
216
|
+
query<T>(sql: string, params?: unknown[]): Promise<Result<T[], DbError>>;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
/** Response from a checkpoint download */
|
|
220
|
+
interface CheckpointResponse {
|
|
221
|
+
/** All deltas from the checkpoint (filtered by server) */
|
|
222
|
+
deltas: RowDelta[];
|
|
223
|
+
/** Snapshot HLC marking the point-in-time of this checkpoint */
|
|
224
|
+
snapshotHlc: HLCTimestamp;
|
|
225
|
+
}
|
|
226
|
+
/** Abstract transport layer for communicating with a remote sync gateway */
|
|
227
|
+
interface SyncTransport {
|
|
228
|
+
/** Push local deltas to the gateway */
|
|
229
|
+
push(msg: SyncPush): Promise<Result<{
|
|
230
|
+
serverHlc: HLCTimestamp;
|
|
231
|
+
accepted: number;
|
|
232
|
+
}, LakeSyncError>>;
|
|
233
|
+
/** Pull remote deltas from the gateway */
|
|
234
|
+
pull(msg: SyncPull): Promise<Result<SyncResponse, LakeSyncError>>;
|
|
235
|
+
/** Download checkpoint for initial sync. Returns null if no checkpoint available. */
|
|
236
|
+
checkpoint?(): Promise<Result<CheckpointResponse | null, LakeSyncError>>;
|
|
237
|
+
/** Execute imperative actions against external systems via the gateway. */
|
|
238
|
+
executeAction?(msg: ActionPush): Promise<Result<ActionResponse, LakeSyncError>>;
|
|
239
|
+
/** Discover available connectors and their supported action types. */
|
|
240
|
+
describeActions?(): Promise<Result<ActionDiscovery, LakeSyncError>>;
|
|
241
|
+
/** Whether this transport supports real-time server push. */
|
|
242
|
+
readonly supportsRealtime?: boolean;
|
|
243
|
+
/** Register callback for server-initiated broadcasts. */
|
|
244
|
+
onBroadcast?(callback: (deltas: RowDelta[], serverHlc: HLCTimestamp) => void): void;
|
|
245
|
+
/** Connect persistent transport (e.g. open WebSocket). */
|
|
246
|
+
connect?(): void;
|
|
247
|
+
/** Disconnect persistent transport (e.g. close WebSocket). */
|
|
248
|
+
disconnect?(): void;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
/** Controls which operations syncOnce() / startAutoSync() performs */
|
|
252
|
+
type SyncMode = "full" | "pushOnly" | "pullOnly";
|
|
253
|
+
/** Events emitted by SyncCoordinator */
|
|
254
|
+
interface SyncEvents {
|
|
255
|
+
/** Fired after remote deltas are applied locally. Count is the number of deltas applied. */
|
|
256
|
+
onChange: (count: number) => void;
|
|
257
|
+
/** Fired after a successful sync cycle (push + pull) completes. */
|
|
258
|
+
onSyncComplete: () => void;
|
|
259
|
+
/** Fired when a sync error occurs. */
|
|
260
|
+
onError: (error: Error) => void;
|
|
261
|
+
/** Fired when an action completes (success or non-retryable failure). */
|
|
262
|
+
onActionComplete: (actionId: string, result: ActionResult | ActionErrorResult) => void;
|
|
263
|
+
}
|
|
264
|
+
/** Optional configuration for dependency injection (useful for testing) */
|
|
265
|
+
interface SyncCoordinatorConfig {
|
|
266
|
+
/** Sync queue implementation. Defaults to IDBQueue. */
|
|
267
|
+
queue?: SyncQueue;
|
|
268
|
+
/** HLC instance. Defaults to a new HLC(). */
|
|
269
|
+
hlc?: HLC;
|
|
270
|
+
/** Client identifier. Defaults to a random UUID. */
|
|
271
|
+
clientId?: string;
|
|
272
|
+
/** Maximum retries before dead-lettering an entry. Defaults to 10. */
|
|
273
|
+
maxRetries?: number;
|
|
274
|
+
/** Sync mode. Defaults to "full" (push + pull). */
|
|
275
|
+
syncMode?: SyncMode;
|
|
276
|
+
/** Auto-sync interval in milliseconds. Defaults to 10000 (10 seconds). */
|
|
277
|
+
autoSyncIntervalMs?: number;
|
|
278
|
+
/** Polling interval when realtime transport is active (heartbeat). Defaults to 60000 (60 seconds). */
|
|
279
|
+
realtimeHeartbeatMs?: number;
|
|
280
|
+
/** Action queue for imperative command execution. */
|
|
281
|
+
actionQueue?: ActionQueue;
|
|
282
|
+
/** Maximum retries for actions before dead-lettering. Defaults to 5. */
|
|
283
|
+
maxActionRetries?: number;
|
|
284
|
+
}
|
|
285
|
+
/**
|
|
286
|
+
* Coordinates local mutations (via SyncTracker) with gateway push/pull.
|
|
287
|
+
*
|
|
288
|
+
* Uses a {@link SyncTransport} abstraction to communicate with the gateway,
|
|
289
|
+
* allowing both in-process (LocalTransport) and remote (HttpTransport) usage.
|
|
290
|
+
*/
|
|
291
|
+
declare class SyncCoordinator {
|
|
292
|
+
readonly tracker: SyncTracker;
|
|
293
|
+
private readonly queue;
|
|
294
|
+
private readonly hlc;
|
|
295
|
+
private readonly transport;
|
|
296
|
+
private readonly db;
|
|
297
|
+
private readonly resolver;
|
|
298
|
+
private readonly _clientId;
|
|
299
|
+
private readonly maxRetries;
|
|
300
|
+
private readonly syncMode;
|
|
301
|
+
private readonly autoSyncIntervalMs;
|
|
302
|
+
private readonly realtimeHeartbeatMs;
|
|
303
|
+
private lastSyncedHlc;
|
|
304
|
+
private _lastSyncTime;
|
|
305
|
+
private syncIntervalId;
|
|
306
|
+
private visibilityHandler;
|
|
307
|
+
private syncing;
|
|
308
|
+
private readonly actionQueue;
|
|
309
|
+
private readonly maxActionRetries;
|
|
310
|
+
private listeners;
|
|
311
|
+
constructor(db: LocalDB, transport: SyncTransport, config?: SyncCoordinatorConfig);
|
|
312
|
+
/** Register an event listener */
|
|
313
|
+
on<K extends keyof SyncEvents>(event: K, listener: SyncEvents[K]): void;
|
|
314
|
+
/** Remove an event listener */
|
|
315
|
+
off<K extends keyof SyncEvents>(event: K, listener: SyncEvents[K]): void;
|
|
316
|
+
private emit;
|
|
317
|
+
/** Push pending deltas to the gateway via the transport */
|
|
318
|
+
pushToGateway(): Promise<void>;
|
|
319
|
+
/**
|
|
320
|
+
* Pull deltas from a named adapter source.
|
|
321
|
+
*
|
|
322
|
+
* Convenience wrapper around {@link pullFromGateway} that passes the
|
|
323
|
+
* `source` field through to the gateway, triggering an adapter-sourced
|
|
324
|
+
* pull instead of a buffer pull.
|
|
325
|
+
*/
|
|
326
|
+
pullFrom(source: string): Promise<number>;
|
|
327
|
+
/** Pull remote deltas from the gateway and apply them */
|
|
328
|
+
pullFromGateway(source?: string): Promise<number>;
|
|
329
|
+
/**
|
|
330
|
+
* Handle a server-initiated broadcast of deltas.
|
|
331
|
+
*
|
|
332
|
+
* Applies the deltas using the same conflict resolution and idempotency
|
|
333
|
+
* logic as a regular pull. Advances `lastSyncedHlc` and emits `onChange`.
|
|
334
|
+
*/
|
|
335
|
+
private handleBroadcast;
|
|
336
|
+
/** Get the queue depth */
|
|
337
|
+
queueDepth(): Promise<number>;
|
|
338
|
+
/** Get the client identifier */
|
|
339
|
+
get clientId(): string;
|
|
340
|
+
/** Get the last successful sync time, or null if never synced */
|
|
341
|
+
get lastSyncTime(): Date | null;
|
|
342
|
+
/**
|
|
343
|
+
* Start auto-sync: periodic interval + visibility change handler.
|
|
344
|
+
* Synchronises (push + pull) on tab focus and every 10 seconds.
|
|
345
|
+
*/
|
|
346
|
+
startAutoSync(): void;
|
|
347
|
+
/** Register a visibility change listener to sync on tab focus. */
|
|
348
|
+
private setupVisibilitySync;
|
|
349
|
+
/**
|
|
350
|
+
* Perform initial sync via checkpoint download.
|
|
351
|
+
*
|
|
352
|
+
* Called on first sync when `lastSyncedHlc` is zero. Downloads the
|
|
353
|
+
* server's checkpoint (which is pre-filtered by JWT claims server-side),
|
|
354
|
+
* applies the deltas locally, and advances the sync cursor to the
|
|
355
|
+
* snapshot's HLC. If no checkpoint is available or the transport does
|
|
356
|
+
* not support checkpoints, falls back to incremental pull.
|
|
357
|
+
*/
|
|
358
|
+
private initialSync;
|
|
359
|
+
/** Perform a single sync cycle (push + pull + actions, depending on syncMode). */
|
|
360
|
+
syncOnce(): Promise<void>;
|
|
361
|
+
/**
|
|
362
|
+
* Submit an action for execution.
|
|
363
|
+
*
|
|
364
|
+
* Pushes the action to the ActionQueue and triggers immediate processing.
|
|
365
|
+
* The action will be sent to the gateway on the next sync cycle or
|
|
366
|
+
* immediately if not currently syncing.
|
|
367
|
+
*
|
|
368
|
+
* @param params - Partial action (connector, actionType, params). ActionId and HLC are generated.
|
|
369
|
+
*/
|
|
370
|
+
executeAction(params: {
|
|
371
|
+
connector: string;
|
|
372
|
+
actionType: string;
|
|
373
|
+
params: Record<string, unknown>;
|
|
374
|
+
idempotencyKey?: string;
|
|
375
|
+
}): Promise<void>;
|
|
376
|
+
/**
|
|
377
|
+
* Process pending actions from the action queue.
|
|
378
|
+
*
|
|
379
|
+
* Peeks at pending entries, sends them to the gateway via
|
|
380
|
+
* `transport.executeAction()`, and acks/nacks based on the result.
|
|
381
|
+
* Dead-letters entries after `maxActionRetries` failures.
|
|
382
|
+
* Triggers an immediate `syncOnce()` on success to pull fresh state.
|
|
383
|
+
*/
|
|
384
|
+
processActionQueue(): Promise<void>;
|
|
385
|
+
/**
|
|
386
|
+
* Discover available connectors and their supported action types.
|
|
387
|
+
*
|
|
388
|
+
* Delegates to the transport's `describeActions()` method. Returns
|
|
389
|
+
* empty connectors when the transport does not support discovery.
|
|
390
|
+
*/
|
|
391
|
+
describeActions(): Promise<Result<ActionDiscovery, LakeSyncError>>;
|
|
392
|
+
/** Stop auto-sync and clean up listeners */
|
|
393
|
+
stopAutoSync(): void;
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
export { type ActionQueue as A, type CheckpointResponse as C, DbError as D, LocalDB as L, type QueueEntry as Q, type SyncQueue as S, type Transaction as T, type ActionQueueEntry as a, type SyncTransport as b, type ActionQueueEntryStatus as c, type DbConfig as d, type QueueEntryStatus as e, SyncCoordinator as f, type SyncCoordinatorConfig as g, type SyncEvents as h, type SyncMode as i, SyncTracker as j };
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { R as Result, A as AdapterError, H as HLCTimestamp } from './result-CojzlFE2.js';
|
|
2
|
+
import { R as RowDelta, T as TableSchema } from './types-V_jVu2sA.js';
|
|
3
|
+
|
|
4
|
+
/** Configuration for a database adapter connection. */
|
|
5
|
+
interface DatabaseAdapterConfig {
|
|
6
|
+
/** Connection string (e.g. postgres://user:pass@host/db) */
|
|
7
|
+
connectionString: string;
|
|
8
|
+
}
|
|
9
|
+
/**
|
|
10
|
+
* Abstract interface for SQL database storage operations.
|
|
11
|
+
* Alternative to LakeAdapter for small-data backends (Postgres, MySQL, etc).
|
|
12
|
+
*/
|
|
13
|
+
interface DatabaseAdapter {
|
|
14
|
+
/** Insert deltas into the database in a single batch. Idempotent via deltaId uniqueness. */
|
|
15
|
+
insertDeltas(deltas: RowDelta[]): Promise<Result<void, AdapterError>>;
|
|
16
|
+
/** Query deltas with HLC greater than the given timestamp, optionally filtered by table. */
|
|
17
|
+
queryDeltasSince(hlc: HLCTimestamp, tables?: string[]): Promise<Result<RowDelta[], AdapterError>>;
|
|
18
|
+
/** Get the latest merged state for a specific row. Returns null if the row doesn't exist. */
|
|
19
|
+
getLatestState(table: string, rowId: string): Promise<Result<Record<string, unknown> | null, AdapterError>>;
|
|
20
|
+
/** Ensure the database schema matches the given TableSchema. Creates/alters tables as needed. */
|
|
21
|
+
ensureSchema(schema: TableSchema): Promise<Result<void, AdapterError>>;
|
|
22
|
+
/** Close the database connection and release resources. */
|
|
23
|
+
close(): Promise<void>;
|
|
24
|
+
}
|
|
25
|
+
declare function lakeSyncTypeToBigQuery(type: TableSchema["columns"][number]["type"]): string;
|
|
26
|
+
/** Type guard to distinguish DatabaseAdapter from LakeAdapter at runtime. */
|
|
27
|
+
declare function isDatabaseAdapter(adapter: unknown): adapter is DatabaseAdapter;
|
|
28
|
+
|
|
29
|
+
export { type DatabaseAdapter as D, type DatabaseAdapterConfig as a, isDatabaseAdapter as i, lakeSyncTypeToBigQuery as l };
|