lakesync 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -0
- package/dist/adapter.d.ts +369 -0
- package/dist/adapter.js +39 -0
- package/dist/adapter.js.map +1 -0
- package/dist/analyst.d.ts +268 -0
- package/dist/analyst.js +495 -0
- package/dist/analyst.js.map +1 -0
- package/dist/auth-CAVutXzx.d.ts +30 -0
- package/dist/base-poller-Qo_SmCZs.d.ts +82 -0
- package/dist/catalogue.d.ts +65 -0
- package/dist/catalogue.js +17 -0
- package/dist/catalogue.js.map +1 -0
- package/dist/chunk-4ARO6KTJ.js +257 -0
- package/dist/chunk-4ARO6KTJ.js.map +1 -0
- package/dist/chunk-5YOFCJQ7.js +1115 -0
- package/dist/chunk-5YOFCJQ7.js.map +1 -0
- package/dist/chunk-7D4SUZUM.js +38 -0
- package/dist/chunk-7D4SUZUM.js.map +1 -0
- package/dist/chunk-BNJOGBYK.js +335 -0
- package/dist/chunk-BNJOGBYK.js.map +1 -0
- package/dist/chunk-ICNT7I3K.js +1180 -0
- package/dist/chunk-ICNT7I3K.js.map +1 -0
- package/dist/chunk-P5DRFKIT.js +413 -0
- package/dist/chunk-P5DRFKIT.js.map +1 -0
- package/dist/chunk-X3RO5SYJ.js +880 -0
- package/dist/chunk-X3RO5SYJ.js.map +1 -0
- package/dist/client.d.ts +428 -0
- package/dist/client.js +2048 -0
- package/dist/client.js.map +1 -0
- package/dist/compactor.d.ts +342 -0
- package/dist/compactor.js +793 -0
- package/dist/compactor.js.map +1 -0
- package/dist/coordinator-CxckTzYW.d.ts +396 -0
- package/dist/db-types-BR6Kt4uf.d.ts +29 -0
- package/dist/gateway-D5SaaMvT.d.ts +337 -0
- package/dist/gateway-server.d.ts +306 -0
- package/dist/gateway-server.js +4663 -0
- package/dist/gateway-server.js.map +1 -0
- package/dist/gateway.d.ts +196 -0
- package/dist/gateway.js +79 -0
- package/dist/gateway.js.map +1 -0
- package/dist/hlc-DiD8QNG3.d.ts +70 -0
- package/dist/index.d.ts +245 -0
- package/dist/index.js +102 -0
- package/dist/index.js.map +1 -0
- package/dist/json-dYtqiL0F.d.ts +18 -0
- package/dist/nessie-client-DrNikVXy.d.ts +160 -0
- package/dist/parquet.d.ts +78 -0
- package/dist/parquet.js +15 -0
- package/dist/parquet.js.map +1 -0
- package/dist/proto.d.ts +434 -0
- package/dist/proto.js +67 -0
- package/dist/proto.js.map +1 -0
- package/dist/react.d.ts +147 -0
- package/dist/react.js +224 -0
- package/dist/react.js.map +1 -0
- package/dist/resolver-C3Wphi6O.d.ts +10 -0
- package/dist/result-CojzlFE2.d.ts +64 -0
- package/dist/src-QU2YLPZY.js +383 -0
- package/dist/src-QU2YLPZY.js.map +1 -0
- package/dist/src-WYBF5LOI.js +102 -0
- package/dist/src-WYBF5LOI.js.map +1 -0
- package/dist/src-WZNPHANQ.js +426 -0
- package/dist/src-WZNPHANQ.js.map +1 -0
- package/dist/types-Bs-QyOe-.d.ts +143 -0
- package/dist/types-DAQL_vU_.d.ts +118 -0
- package/dist/types-DSC_EiwR.d.ts +45 -0
- package/dist/types-V_jVu2sA.d.ts +73 -0
- package/package.json +119 -0
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
import { H as HLCTimestamp, R as Result, L as LakeSyncError, F as FlushError } from './result-CojzlFE2.js';
|
|
2
|
+
import { T as TableSchema } from './types-V_jVu2sA.js';
|
|
3
|
+
import { L as LakeAdapter } from './types-DSC_EiwR.js';
|
|
4
|
+
|
|
5
|
+
/** Configuration for checkpoint generation */
|
|
6
|
+
interface CheckpointConfig {
|
|
7
|
+
/** Max raw proto bytes per chunk. Tune to serving runtime memory budget. */
|
|
8
|
+
chunkBytes: number;
|
|
9
|
+
}
|
|
10
|
+
/** Default checkpoint configuration (16 MB chunks for 128 MB DO) */
|
|
11
|
+
declare const DEFAULT_CHECKPOINT_CONFIG: CheckpointConfig;
|
|
12
|
+
/** Result of a checkpoint generation operation */
|
|
13
|
+
interface CheckpointResult {
|
|
14
|
+
/** Number of chunk files written */
|
|
15
|
+
chunksWritten: number;
|
|
16
|
+
/** Total bytes written across all chunks */
|
|
17
|
+
bytesWritten: number;
|
|
18
|
+
/** Snapshot HLC timestamp */
|
|
19
|
+
snapshotHlc: HLCTimestamp;
|
|
20
|
+
}
|
|
21
|
+
/** Manifest stored alongside checkpoint chunks */
|
|
22
|
+
interface CheckpointManifest {
|
|
23
|
+
/** Snapshot HLC as decimal string (JSON-safe bigint) */
|
|
24
|
+
snapshotHlc: string;
|
|
25
|
+
/** ISO 8601 generation timestamp */
|
|
26
|
+
generatedAt: string;
|
|
27
|
+
/** Number of chunks */
|
|
28
|
+
chunkCount: number;
|
|
29
|
+
/** Total deltas across all chunks */
|
|
30
|
+
totalDeltas: number;
|
|
31
|
+
/** Ordered list of chunk file names */
|
|
32
|
+
chunks: string[];
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Generates checkpoint files from base Parquet files.
|
|
36
|
+
*
|
|
37
|
+
* Reads compacted base files, encodes ALL rows as proto SyncResponse chunks
|
|
38
|
+
* sized to a configurable byte budget, and writes them to storage. Chunks
|
|
39
|
+
* contain all rows (not per-user); filtering happens at serve time.
|
|
40
|
+
*/
|
|
41
|
+
declare class CheckpointGenerator {
|
|
42
|
+
private readonly adapter;
|
|
43
|
+
private readonly gatewayId;
|
|
44
|
+
private readonly config;
|
|
45
|
+
constructor(adapter: LakeAdapter, _schema: TableSchema, gatewayId: string, config?: CheckpointConfig);
|
|
46
|
+
/**
|
|
47
|
+
* Generate checkpoint chunks from base Parquet files.
|
|
48
|
+
*
|
|
49
|
+
* Reads each base file sequentially, accumulates deltas, and flushes
|
|
50
|
+
* chunks when the estimated byte size exceeds the configured threshold.
|
|
51
|
+
*
|
|
52
|
+
* @param baseFileKeys - Storage keys of the base Parquet files
|
|
53
|
+
* @param snapshotHlc - The HLC timestamp representing this snapshot point
|
|
54
|
+
* @returns A Result containing the CheckpointResult, or a LakeSyncError on failure
|
|
55
|
+
*/
|
|
56
|
+
generate(baseFileKeys: string[], snapshotHlc: HLCTimestamp): Promise<Result<CheckpointResult, LakeSyncError>>;
|
|
57
|
+
/**
|
|
58
|
+
* Get all storage keys produced by a checkpoint generation.
|
|
59
|
+
* Useful for adding to activeKeys in maintenance to prevent orphan removal.
|
|
60
|
+
*/
|
|
61
|
+
getCheckpointKeys(chunkCount: number): string[];
|
|
62
|
+
private chunkFileName;
|
|
63
|
+
private flushChunk;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/** Configuration for the compaction process */
|
|
67
|
+
interface CompactionConfig {
|
|
68
|
+
/** Minimum number of delta files before compaction triggers */
|
|
69
|
+
minDeltaFiles: number;
|
|
70
|
+
/** Maximum number of delta files to compact in one pass */
|
|
71
|
+
maxDeltaFiles: number;
|
|
72
|
+
/** Target base file size in bytes */
|
|
73
|
+
targetFileSizeBytes: number;
|
|
74
|
+
}
|
|
75
|
+
/** Default compaction configuration values */
|
|
76
|
+
declare const DEFAULT_COMPACTION_CONFIG: CompactionConfig;
|
|
77
|
+
/** Result of a compaction operation */
|
|
78
|
+
interface CompactionResult {
|
|
79
|
+
/** Number of base data files written */
|
|
80
|
+
baseFilesWritten: number;
|
|
81
|
+
/** Number of equality delete files written */
|
|
82
|
+
deleteFilesWritten: number;
|
|
83
|
+
/** Number of delta files that were compacted */
|
|
84
|
+
deltaFilesCompacted: number;
|
|
85
|
+
/** Total bytes read during compaction */
|
|
86
|
+
bytesRead: number;
|
|
87
|
+
/** Total bytes written during compaction */
|
|
88
|
+
bytesWritten: number;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Compacts delta files into consolidated base data files and equality delete files.
|
|
93
|
+
*
|
|
94
|
+
* Reads delta Parquet files from the lake adapter, resolves all deltas per row
|
|
95
|
+
* using LWW (last-writer-wins based on HLC ordering), then writes the final
|
|
96
|
+
* materialised state back as base files and delete files.
|
|
97
|
+
*/
|
|
98
|
+
declare class Compactor {
|
|
99
|
+
private readonly adapter;
|
|
100
|
+
private readonly config;
|
|
101
|
+
private readonly schema;
|
|
102
|
+
/**
|
|
103
|
+
* Create a new Compactor instance.
|
|
104
|
+
*
|
|
105
|
+
* @param adapter - The lake adapter for reading/writing Parquet files
|
|
106
|
+
* @param config - Compaction configuration (thresholds and limits)
|
|
107
|
+
* @param schema - The table schema describing user-defined columns
|
|
108
|
+
*/
|
|
109
|
+
constructor(adapter: LakeAdapter, config: CompactionConfig, schema: TableSchema);
|
|
110
|
+
/**
|
|
111
|
+
* Compact delta files into base data files.
|
|
112
|
+
*
|
|
113
|
+
* Reads delta files from storage, resolves all deltas per row using LWW,
|
|
114
|
+
* and writes consolidated base files + equality delete files.
|
|
115
|
+
*
|
|
116
|
+
* @param deltaFileKeys - Storage keys of the delta Parquet files to compact
|
|
117
|
+
* @param outputPrefix - Prefix for the output base/delete file keys
|
|
118
|
+
* @returns A Result containing the CompactionResult, or a LakeSyncError on failure
|
|
119
|
+
*/
|
|
120
|
+
compact(deltaFileKeys: string[], outputPrefix: string): Promise<Result<CompactionResult, LakeSyncError>>;
|
|
121
|
+
/**
|
|
122
|
+
* Read delta files one at a time and incrementally resolve to final row state.
|
|
123
|
+
*
|
|
124
|
+
* Memory usage is O(unique rows x columns) rather than O(total deltas),
|
|
125
|
+
* since each file's deltas are processed and discarded before reading the next.
|
|
126
|
+
*/
|
|
127
|
+
private readAndResolveIncrementally;
|
|
128
|
+
/** Write base Parquet file(s) for live rows and equality delete file(s) for deleted rows. */
|
|
129
|
+
private writeOutputFiles;
|
|
130
|
+
/**
|
|
131
|
+
* Generate a timestamp string for output file naming.
|
|
132
|
+
* Uses the current wall clock time with a random suffix for uniqueness.
|
|
133
|
+
*/
|
|
134
|
+
private generateTimestamp;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Write an Iceberg equality delete file.
|
|
139
|
+
*
|
|
140
|
+
* Contains only the equality columns (table + rowId) needed to identify
|
|
141
|
+
* deleted rows. The file is encoded as a Parquet file using synthetic
|
|
142
|
+
* DELETE RowDeltas with no user columns.
|
|
143
|
+
*
|
|
144
|
+
* @param deletedRows - Array of row identifiers (table + rowId) for deleted rows
|
|
145
|
+
* @param _schema - The table schema (reserved for future use with custom equality columns)
|
|
146
|
+
* @returns A Result containing the Parquet bytes, or a FlushError on failure
|
|
147
|
+
*/
|
|
148
|
+
declare function writeEqualityDeletes(deletedRows: Array<{
|
|
149
|
+
table: string;
|
|
150
|
+
rowId: string;
|
|
151
|
+
}>, _schema: TableSchema): Promise<Result<Uint8Array, FlushError>>;
|
|
152
|
+
/**
|
|
153
|
+
* Read an equality delete file back into row identifiers.
|
|
154
|
+
*
|
|
155
|
+
* Deserialises a Parquet equality delete file and extracts the
|
|
156
|
+
* table + rowId pairs that identify deleted rows.
|
|
157
|
+
*
|
|
158
|
+
* @param data - The Parquet file bytes to read
|
|
159
|
+
* @returns A Result containing the row identifiers, or a FlushError on failure
|
|
160
|
+
*/
|
|
161
|
+
declare function readEqualityDeletes(data: Uint8Array): Promise<Result<Array<{
|
|
162
|
+
table: string;
|
|
163
|
+
rowId: string;
|
|
164
|
+
}>, FlushError>>;
|
|
165
|
+
|
|
166
|
+
/** Configuration for the maintenance cycle */
|
|
167
|
+
interface MaintenanceConfig {
|
|
168
|
+
/** Number of recent snapshots to retain */
|
|
169
|
+
retainSnapshots: number;
|
|
170
|
+
/** Minimum age (ms) before orphaned files can be deleted */
|
|
171
|
+
orphanAgeMs: number;
|
|
172
|
+
}
|
|
173
|
+
/** Default maintenance configuration values */
|
|
174
|
+
declare const DEFAULT_MAINTENANCE_CONFIG: MaintenanceConfig;
|
|
175
|
+
/** Report produced by a full maintenance cycle */
|
|
176
|
+
interface MaintenanceReport {
|
|
177
|
+
/** Result of the compaction step */
|
|
178
|
+
compaction: CompactionResult;
|
|
179
|
+
/** Number of expired snapshots removed */
|
|
180
|
+
snapshotsExpired: number;
|
|
181
|
+
/** Number of orphaned files removed */
|
|
182
|
+
orphansRemoved: number;
|
|
183
|
+
/** Result of checkpoint generation (if a generator was configured) */
|
|
184
|
+
checkpoint?: CheckpointResult;
|
|
185
|
+
}
|
|
186
|
+
/**
|
|
187
|
+
* Runs a full maintenance cycle: compact, expire snapshots, and clean orphans.
|
|
188
|
+
*
|
|
189
|
+
* The runner orchestrates the three maintenance phases in order:
|
|
190
|
+
* 1. **Compact** — merge delta files into consolidated base/delete files
|
|
191
|
+
* 2. **Expire** — (reserved for future snapshot expiry logic)
|
|
192
|
+
* 3. **Clean** — remove orphaned files that are no longer referenced
|
|
193
|
+
*/
|
|
194
|
+
declare class MaintenanceRunner {
|
|
195
|
+
private readonly compactor;
|
|
196
|
+
private readonly adapter;
|
|
197
|
+
private readonly config;
|
|
198
|
+
private readonly checkpointGenerator;
|
|
199
|
+
/**
|
|
200
|
+
* Create a new MaintenanceRunner instance.
|
|
201
|
+
*
|
|
202
|
+
* @param compactor - The compactor instance for merging delta files
|
|
203
|
+
* @param adapter - The lake adapter for storage operations
|
|
204
|
+
* @param config - Maintenance configuration (retention and age thresholds)
|
|
205
|
+
* @param checkpointGenerator - Optional checkpoint generator; when provided,
|
|
206
|
+
* checkpoints are generated after successful compaction
|
|
207
|
+
*/
|
|
208
|
+
constructor(compactor: Compactor, adapter: LakeAdapter, config: MaintenanceConfig, checkpointGenerator?: CheckpointGenerator);
|
|
209
|
+
/**
|
|
210
|
+
* Run the full maintenance cycle: compact, expire, and clean.
|
|
211
|
+
*
|
|
212
|
+
* Compacts delta files into base/delete files, then removes orphaned
|
|
213
|
+
* storage objects that are no longer referenced by any active data.
|
|
214
|
+
* Files younger than `orphanAgeMs` are never deleted to avoid races
|
|
215
|
+
* with in-progress flush operations.
|
|
216
|
+
*
|
|
217
|
+
* @param deltaFileKeys - Storage keys of the delta Parquet files to compact
|
|
218
|
+
* @param outputPrefix - Prefix for the output base/delete file keys
|
|
219
|
+
* @param storagePrefix - Prefix under which all related storage files live
|
|
220
|
+
* @returns A Result containing the MaintenanceReport, or a LakeSyncError on failure
|
|
221
|
+
*/
|
|
222
|
+
run(deltaFileKeys: string[], outputPrefix: string, storagePrefix: string): Promise<Result<MaintenanceReport, LakeSyncError>>;
|
|
223
|
+
/**
|
|
224
|
+
* Delete orphaned files not referenced by any active data.
|
|
225
|
+
*
|
|
226
|
+
* Lists all files under the given storage prefix, compares each
|
|
227
|
+
* against the set of active keys, and deletes files that are both
|
|
228
|
+
* unreferenced and older than `orphanAgeMs`. This age guard
|
|
229
|
+
* prevents deletion of files created by in-progress flush operations.
|
|
230
|
+
*
|
|
231
|
+
* @param storagePrefix - The storage prefix to scan for orphaned files
|
|
232
|
+
* @param activeKeys - Set of storage keys that must be retained
|
|
233
|
+
* @returns A Result containing the count of deleted files, or a LakeSyncError on failure
|
|
234
|
+
*/
|
|
235
|
+
removeOrphans(storagePrefix: string, activeKeys: Set<string>): Promise<Result<number, LakeSyncError>>;
|
|
236
|
+
/**
|
|
237
|
+
* Identify orphaned file keys from a list of storage objects.
|
|
238
|
+
*
|
|
239
|
+
* A file is considered an orphan if it is not in the active keys set
|
|
240
|
+
* and its last modification time is older than the configured orphan age.
|
|
241
|
+
*/
|
|
242
|
+
private findOrphans;
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
/** Parameters for a single maintenance run */
|
|
246
|
+
interface MaintenanceTask {
|
|
247
|
+
/** Storage keys of the delta Parquet files to compact */
|
|
248
|
+
deltaFileKeys: string[];
|
|
249
|
+
/** Prefix for the output base/delete file keys */
|
|
250
|
+
outputPrefix: string;
|
|
251
|
+
/** Prefix under which all related storage files live */
|
|
252
|
+
storagePrefix: string;
|
|
253
|
+
}
|
|
254
|
+
/**
|
|
255
|
+
* Provider function that resolves the maintenance task parameters for each run.
|
|
256
|
+
* Called before every scheduled tick to determine what files to compact.
|
|
257
|
+
* Return `null` to skip this tick (e.g. when there is nothing to compact).
|
|
258
|
+
*/
|
|
259
|
+
type MaintenanceTaskProvider = () => Promise<MaintenanceTask | null>;
|
|
260
|
+
/** Configuration for the compaction scheduler */
|
|
261
|
+
interface SchedulerConfig {
|
|
262
|
+
/** Interval between maintenance runs in milliseconds (default 60000) */
|
|
263
|
+
intervalMs: number;
|
|
264
|
+
/** Whether the scheduler is enabled (default true) */
|
|
265
|
+
enabled: boolean;
|
|
266
|
+
}
|
|
267
|
+
/** Default scheduler configuration values */
|
|
268
|
+
declare const DEFAULT_SCHEDULER_CONFIG: SchedulerConfig;
|
|
269
|
+
/**
|
|
270
|
+
* Manages interval-based compaction scheduling.
|
|
271
|
+
*
|
|
272
|
+
* Wraps a {@link MaintenanceRunner} and executes maintenance cycles on a
|
|
273
|
+
* configurable interval. The scheduler is safe against concurrent runs:
|
|
274
|
+
* if a previous tick is still in progress when the next fires, the tick
|
|
275
|
+
* is silently skipped.
|
|
276
|
+
*/
|
|
277
|
+
declare class CompactionScheduler {
|
|
278
|
+
private readonly runner;
|
|
279
|
+
private readonly taskProvider;
|
|
280
|
+
private readonly config;
|
|
281
|
+
private timer;
|
|
282
|
+
private running;
|
|
283
|
+
private inFlightPromise;
|
|
284
|
+
/**
|
|
285
|
+
* Create a new CompactionScheduler instance.
|
|
286
|
+
*
|
|
287
|
+
* @param runner - The maintenance runner to execute on each tick
|
|
288
|
+
* @param taskProvider - Function that provides maintenance task parameters for each run
|
|
289
|
+
* @param config - Scheduler configuration (interval and enabled flag)
|
|
290
|
+
*/
|
|
291
|
+
constructor(runner: MaintenanceRunner, taskProvider: MaintenanceTaskProvider, config?: Partial<SchedulerConfig>);
|
|
292
|
+
/**
|
|
293
|
+
* Whether the scheduler is currently active (timer is ticking).
|
|
294
|
+
*/
|
|
295
|
+
get isRunning(): boolean;
|
|
296
|
+
/**
|
|
297
|
+
* Start the scheduler interval timer.
|
|
298
|
+
*
|
|
299
|
+
* Begins executing maintenance runs at the configured interval.
|
|
300
|
+
* If the scheduler is already running or disabled, returns an error.
|
|
301
|
+
*
|
|
302
|
+
* @returns A Result indicating success or a descriptive error
|
|
303
|
+
*/
|
|
304
|
+
start(): Result<void, LakeSyncError>;
|
|
305
|
+
/**
|
|
306
|
+
* Stop the scheduler and wait for any in-progress run to finish.
|
|
307
|
+
*
|
|
308
|
+
* Clears the interval timer and, if a maintenance run is currently
|
|
309
|
+
* executing, awaits its completion before returning.
|
|
310
|
+
*
|
|
311
|
+
* @returns A Result indicating success or a descriptive error
|
|
312
|
+
*/
|
|
313
|
+
stop(): Promise<Result<void, LakeSyncError>>;
|
|
314
|
+
/**
|
|
315
|
+
* Manually trigger a single maintenance run.
|
|
316
|
+
*
|
|
317
|
+
* Useful for testing or administrative purposes. If a run is already
|
|
318
|
+
* in progress, skips and returns an error.
|
|
319
|
+
*
|
|
320
|
+
* @returns A Result containing the MaintenanceReport, or a LakeSyncError on failure
|
|
321
|
+
*/
|
|
322
|
+
runOnce(): Promise<Result<MaintenanceReport, LakeSyncError>>;
|
|
323
|
+
/**
|
|
324
|
+
* Internal tick handler called by the interval timer.
|
|
325
|
+
* Skips if a previous run is still in progress.
|
|
326
|
+
*/
|
|
327
|
+
private tick;
|
|
328
|
+
/**
|
|
329
|
+
* Execute a single maintenance cycle.
|
|
330
|
+
*
|
|
331
|
+
* Calls the task provider to get parameters, then runs the maintenance
|
|
332
|
+
* runner. Tracks the in-flight promise so concurrent runs are prevented.
|
|
333
|
+
*/
|
|
334
|
+
private executeMaintenance;
|
|
335
|
+
/**
|
|
336
|
+
* Resolve the maintenance task from the provider, wrapping any thrown
|
|
337
|
+
* exceptions into a Result error.
|
|
338
|
+
*/
|
|
339
|
+
private resolveTask;
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
export { type CheckpointConfig, CheckpointGenerator, type CheckpointManifest, type CheckpointResult, type CompactionConfig, type CompactionResult, CompactionScheduler, Compactor, DEFAULT_CHECKPOINT_CONFIG, DEFAULT_COMPACTION_CONFIG, DEFAULT_MAINTENANCE_CONFIG, DEFAULT_SCHEDULER_CONFIG, type MaintenanceConfig, type MaintenanceReport, MaintenanceRunner, type MaintenanceTask, type MaintenanceTaskProvider, type SchedulerConfig, readEqualityDeletes, writeEqualityDeletes };
|