@514labs/moose-lib 0.6.459 → 0.6.460

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1327 @@
1
+ import { ClickHouseClient, ResultSet, CommandResult } from '@clickhouse/client';
2
+ import { IJsonSchemaCollection, tags } from 'typia';
3
+ import { Pattern, TagBase } from 'typia/lib/tags';
4
+ import { Readable } from 'node:stream';
5
+
6
+ /**
7
+ * Quote a ClickHouse identifier with backticks if not already quoted.
8
+ * Backticks allow special characters (e.g., hyphens) in identifiers.
9
+ */
10
+ declare const quoteIdentifier: (name: string) => string;
11
+ type IdentifierBrandedString = string & {
12
+ readonly __identifier_brand?: unique symbol;
13
+ };
14
+ type NonIdentifierBrandedString = string & {
15
+ readonly __identifier_brand?: unique symbol;
16
+ };
17
+ /**
18
+ * Values supported by SQL engine.
19
+ */
20
+ type Value = NonIdentifierBrandedString | number | boolean | Date | [string, string];
21
+ /**
22
+ * Supported value or SQL instance.
23
+ */
24
+ type RawValue = Value | Sql;
25
+ /**
26
+ * Sql template tag interface with attached helper methods.
27
+ */
28
+ interface SqlTemplateTag {
29
+ /**
30
+ * @deprecated Use `sql.statement` for full SQL statements or `sql.fragment` for SQL fragments.
31
+ */
32
+ (strings: readonly string[], ...values: readonly (RawValue | Column | OlapTable<any> | View)[]): Sql;
33
+ /**
34
+ * Template literal tag for complete SQL statements (e.g. SELECT, INSERT, CREATE).
35
+ * Produces a Sql instance with `isFragment = false`.
36
+ */
37
+ statement(strings: readonly string[], ...values: readonly (RawValue | Column | OlapTable<any> | View)[]): Sql;
38
+ /**
39
+ * Template literal tag for SQL fragments (e.g. expressions, conditions, partial clauses).
40
+ * Produces a Sql instance with `isFragment = true`.
41
+ */
42
+ fragment(strings: readonly string[], ...values: readonly (RawValue | Column | OlapTable<any> | View)[]): Sql;
43
+ /**
44
+ * Join an array of Sql fragments with a separator.
45
+ * @param fragments - Array of Sql fragments to join
46
+ * @param separator - Optional separator string (defaults to ", ")
47
+ */
48
+ join(fragments: Sql[], separator?: string): Sql;
49
+ /**
50
+ * Create raw SQL from a string without parameterization.
51
+ * WARNING: SQL injection risk if used with untrusted input.
52
+ */
53
+ raw(text: string): Sql;
54
+ }
55
+ declare const sql: SqlTemplateTag;
56
+ /**
57
+ * A SQL instance can be nested within each other to build SQL strings.
58
+ */
59
+ declare class Sql {
60
+ readonly values: Value[];
61
+ readonly strings: string[];
62
+ readonly isFragment: boolean | undefined;
63
+ constructor(rawStrings: readonly string[], rawValues: readonly (RawValue | Column | OlapTable<any> | View | Sql)[], isFragment?: boolean);
64
+ /**
65
+ * Append another Sql fragment, returning a new Sql instance.
66
+ */
67
+ append(other: Sql): Sql;
68
+ }
69
+ declare const toStaticQuery: (sql: Sql) => string;
70
+ declare const toQuery: (sql: Sql) => [string, {
71
+ [pN: string]: any;
72
+ }];
73
+ /**
74
+ * Build a display-only SQL string with values inlined for logging/debugging.
75
+ * Does not alter execution behavior; use toQuery for actual execution.
76
+ */
77
+ declare const toQueryPreview: (sql: Sql) => string;
78
+ declare const getValueFromParameter: (value: any) => any;
79
+ declare function createClickhouseParameter(parameterIndex: number, value: Value): string;
80
+ /**
81
+ * Convert the JS type (source is JSON format by API query parameter) to the corresponding ClickHouse type for generating named placeholder of parameterized query.
82
+ * Only support to convert number to Int or Float, boolean to Bool, string to String, other types will convert to String.
83
+ * If exist complex type e.g: object, Array, null, undefined, Date, Record.. etc, just convert to string type by ClickHouse function in SQL.
84
+ * ClickHouse support converting string to other types function.
85
+ * Please see Each section of the https://clickhouse.com/docs/en/sql-reference/functions and https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions
86
+ * @param value
87
+ * @returns 'Float', 'Int', 'Bool', 'String'
88
+ */
89
+ declare const mapToClickHouseType: (value: Value) => string;
90
+
91
+ type EnumValues = {
92
+ name: string;
93
+ value: {
94
+ Int: number;
95
+ };
96
+ }[] | {
97
+ name: string;
98
+ value: {
99
+ String: string;
100
+ };
101
+ }[];
102
+ type DataEnum = {
103
+ name: string;
104
+ values: EnumValues;
105
+ };
106
+ type Nested = {
107
+ name: string;
108
+ columns: Column[];
109
+ jwt: boolean;
110
+ };
111
+ type ArrayType = {
112
+ elementType: DataType;
113
+ elementNullable: boolean;
114
+ };
115
+ type NamedTupleType = {
116
+ fields: Array<[string, DataType]>;
117
+ };
118
+ type MapType = {
119
+ keyType: DataType;
120
+ valueType: DataType;
121
+ };
122
+ type JsonOptions = {
123
+ max_dynamic_paths?: number;
124
+ max_dynamic_types?: number;
125
+ typed_paths?: Array<[string, DataType]>;
126
+ skip_paths?: string[];
127
+ skip_regexps?: string[];
128
+ };
129
+ type DataType = string | DataEnum | ArrayType | Nested | NamedTupleType | MapType | JsonOptions | {
130
+ nullable: DataType;
131
+ };
132
+ interface Column {
133
+ name: IdentifierBrandedString;
134
+ data_type: DataType;
135
+ required: boolean;
136
+ unique: false;
137
+ primary_key: boolean;
138
+ default: string | null;
139
+ materialized: string | null;
140
+ alias: string | null;
141
+ ttl: string | null;
142
+ codec: string | null;
143
+ annotations: [string, any][];
144
+ comment: string | null;
145
+ }
146
+
147
+ /**
148
+ * Type definition for typia validation functions
149
+ */
150
+ interface TypiaValidators<T> {
151
+ /** Typia validator function: returns { success: boolean, data?: T, errors?: any[] } */
152
+ validate?: (data: unknown) => {
153
+ success: boolean;
154
+ data?: T;
155
+ errors?: any[];
156
+ };
157
+ /** Typia assert function: throws on validation failure, returns T on success */
158
+ assert?: (data: unknown) => T;
159
+ /** Typia is function: returns boolean indicating if data matches type T */
160
+ is?: (data: unknown) => data is T;
161
+ }
162
+ /**
163
+ * Base class for all typed Moose dmv2 resources (OlapTable, Stream, etc.).
164
+ * Handles the storage and injection of schema information (JSON schema and Column array)
165
+ * provided by the Moose compiler plugin.
166
+ *
167
+ * @template T The data type (interface or type alias) defining the schema of the resource.
168
+ * @template C The specific configuration type for the resource (e.g., OlapConfig, StreamConfig).
169
+ */
170
+ declare class TypedBase<T, C> {
171
+ /** The JSON schema representation of type T. Injected by the compiler plugin. */
172
+ schema: IJsonSchemaCollection.IV3_1;
173
+ /** The name assigned to this resource instance. */
174
+ name: string;
175
+ /** A dictionary mapping column names (keys of T) to their Column definitions. */
176
+ columns: {
177
+ [columnName in keyof Required<T>]: Column;
178
+ };
179
+ /** An array containing the Column definitions for this resource. Injected by the compiler plugin. */
180
+ columnArray: Column[];
181
+ /** The configuration object specific to this resource type. */
182
+ config: C;
183
+ /** Typia validation functions for type T. Injected by the compiler plugin for OlapTable. */
184
+ validators?: TypiaValidators<T>;
185
+ /** Optional metadata for the resource, always present as an object. */
186
+ metadata: {
187
+ [key: string]: any;
188
+ };
189
+ /**
190
+ * Whether this resource allows extra fields beyond the defined columns.
191
+ * When true, extra fields in payloads are passed through to streaming functions.
192
+ * Injected by the compiler plugin when the type has an index signature.
193
+ */
194
+ allowExtraFields: boolean;
195
+ /**
196
+ * @internal Constructor intended for internal use by subclasses and the compiler plugin.
197
+ * It expects the schema and columns to be provided, typically injected by the compiler.
198
+ *
199
+ * @param name The name for the resource instance.
200
+ * @param config The configuration object for the resource.
201
+ * @param schema The JSON schema for the resource's data type T (injected).
202
+ * @param columns The array of Column definitions for T (injected).
203
+ * @param allowExtraFields Whether extra fields are allowed (injected when type has index signature).
204
+ */
205
+ constructor(name: string, config: C, schema?: IJsonSchemaCollection.IV3_1, columns?: Column[], validators?: TypiaValidators<T>, allowExtraFields?: boolean);
206
+ }
207
+
208
+ type ClickHousePrecision<P extends number> = {
209
+ _clickhouse_precision?: P;
210
+ };
211
+ declare const DecimalRegex: "^-?\\d+(\\.\\d+)?$";
212
+ type ClickHouseDecimal<P extends number, S extends number> = {
213
+ _clickhouse_precision?: P;
214
+ _clickhouse_scale?: S;
215
+ } & Pattern<typeof DecimalRegex>;
216
+ type ClickHouseFixedStringSize<N extends number> = {
217
+ _clickhouse_fixed_string_size?: N;
218
+ };
219
+ /**
220
+ * FixedString(N) - Fixed-length string of exactly N bytes.
221
+ *
222
+ * ClickHouse stores exactly N bytes, padding shorter values with null bytes.
223
+ * Values exceeding N bytes will throw an exception.
224
+ *
225
+ * Use for binary data: hashes, IP addresses, UUIDs, MAC addresses.
226
+ *
227
+ * @example
228
+ * interface BinaryData {
229
+ * md5_hash: string & FixedString<16>; // 16-byte MD5
230
+ * sha256_hash: string & FixedString<32>; // 32-byte SHA256
231
+ * }
232
+ */
233
+ type FixedString<N extends number> = string & ClickHouseFixedStringSize<N>;
234
+ type ClickHouseByteSize<N extends number> = {
235
+ _clickhouse_byte_size?: N;
236
+ };
237
+ type LowCardinality = {
238
+ _LowCardinality?: true;
239
+ };
240
+ type DateTime = Date;
241
+ type DateTime64<P extends number> = Date & ClickHousePrecision<P>;
242
+ type DateTimeString = string & tags.Format<"date-time">;
243
+ /**
244
+ * JS Date objects cannot hold microsecond precision.
245
+ * Use string as the runtime type to avoid losing information.
246
+ */
247
+ type DateTime64String<P extends number> = string & tags.Format<"date-time"> & ClickHousePrecision<P>;
248
+ type Float32 = number & ClickHouseFloat<"float32">;
249
+ type Float64 = number & ClickHouseFloat<"float64">;
250
+ type Int8 = number & ClickHouseInt<"int8">;
251
+ type Int16 = number & ClickHouseInt<"int16">;
252
+ type Int32 = number & ClickHouseInt<"int32">;
253
+ type Int64 = number & ClickHouseInt<"int64">;
254
+ type UInt8 = number & ClickHouseInt<"uint8">;
255
+ type UInt16 = number & ClickHouseInt<"uint16">;
256
+ type UInt32 = number & ClickHouseInt<"uint32">;
257
+ type UInt64 = number & ClickHouseInt<"uint64">;
258
+ type Decimal<P extends number, S extends number> = string & ClickHouseDecimal<P, S>;
259
+ /**
260
+ * Attach compression codec to a column type.
261
+ *
262
+ * Any valid ClickHouse codec expression is allowed. ClickHouse validates the codec at runtime.
263
+ *
264
+ * @template T The base data type
265
+ * @template CodecExpr The codec expression (single codec or chain)
266
+ *
267
+ * @example
268
+ * interface Metrics {
269
+ * // Single codec
270
+ * log_blob: string & ClickHouseCodec<"ZSTD(3)">;
271
+ *
272
+ * // Codec chain (processed left-to-right)
273
+ * timestamp: Date & ClickHouseCodec<"Delta, LZ4">;
274
+ * temperature: number & ClickHouseCodec<"Gorilla, ZSTD">;
275
+ *
276
+ * // Specialized codecs
277
+ * counter: number & ClickHouseCodec<"DoubleDelta">;
278
+ *
279
+ * // Can combine with other annotations
280
+ * count: UInt64 & ClickHouseCodec<"DoubleDelta, LZ4">;
281
+ * }
282
+ */
283
+ type ClickHouseCodec<CodecExpr extends string> = {
284
+ _clickhouse_codec?: CodecExpr;
285
+ };
286
+ type ClickHouseFloat<Value extends "float32" | "float64"> = tags.Type<Value extends "float32" ? "float" : "double">;
287
+ type ClickHouseInt<Value extends "int8" | "int16" | "int32" | "int64" | "uint8" | "uint16" | "uint32" | "uint64"> = Value extends "int32" | "int64" | "uint32" | "uint64" ? tags.Type<Value> : TagBase<{
288
+ target: "number";
289
+ kind: "type";
290
+ value: Value;
291
+ validate: Value extends "int8" ? "-128 <= $input && $input <= 127" : Value extends "int16" ? "-32768 <= $input && $input <= 32767" : Value extends "uint8" ? "0 <= $input && $input <= 255" : Value extends "uint16" ? "0 <= $input && $input <= 65535" : never;
292
+ exclusive: true;
293
+ schema: {
294
+ type: "integer";
295
+ };
296
+ }>;
297
+ /**
298
+ * By default, nested objects map to the `Nested` type in clickhouse.
299
+ * Write `nestedObject: AnotherInterfaceType & ClickHouseNamedTuple`
300
+ * to map AnotherInterfaceType to the named tuple type.
301
+ */
302
+ type ClickHouseNamedTuple = {
303
+ _clickhouse_mapped_type?: "namedTuple";
304
+ };
305
+ type ClickHouseJson<maxDynamicPaths extends number | undefined = undefined, maxDynamicTypes extends number | undefined = undefined, skipPaths extends string[] = [], skipRegexes extends string[] = []> = {
306
+ _clickhouse_mapped_type?: "JSON";
307
+ _clickhouse_json_settings?: {
308
+ maxDynamicPaths?: maxDynamicPaths;
309
+ maxDynamicTypes?: maxDynamicTypes;
310
+ skipPaths?: skipPaths;
311
+ skipRegexes?: skipRegexes;
312
+ };
313
+ };
314
+ type ClickHousePoint = [number, number] & {
315
+ _clickhouse_mapped_type?: "Point";
316
+ };
317
+ type ClickHouseRing = ClickHousePoint[] & {
318
+ _clickhouse_mapped_type?: "Ring";
319
+ };
320
+ type ClickHouseLineString = ClickHousePoint[] & {
321
+ _clickhouse_mapped_type?: "LineString";
322
+ };
323
+ type ClickHouseMultiLineString = ClickHouseLineString[] & {
324
+ _clickhouse_mapped_type?: "MultiLineString";
325
+ };
326
+ type ClickHousePolygon = ClickHouseRing[] & {
327
+ _clickhouse_mapped_type?: "Polygon";
328
+ };
329
+ type ClickHouseMultiPolygon = ClickHousePolygon[] & {
330
+ _clickhouse_mapped_type?: "MultiPolygon";
331
+ };
332
+ /**
333
+ * typia may have trouble handling this type.
334
+ * In which case, use {@link WithDefault} as a workaround
335
+ *
336
+ * @example
337
+ * { field: number & ClickHouseDefault<"0"> }
338
+ */
339
+ type ClickHouseDefault<SqlExpression extends string> = {
340
+ _clickhouse_default?: SqlExpression;
341
+ };
342
+ /**
343
+ * @example
344
+ * {
345
+ * ...
346
+ * timestamp: Date;
347
+ * debugMessage: string & ClickHouseTTL<"timestamp + INTERVAL 1 WEEK">;
348
+ * }
349
+ */
350
+ type ClickHouseTTL<SqlExpression extends string> = {
351
+ _clickhouse_ttl?: SqlExpression;
352
+ };
353
+ /**
354
+ * ClickHouse MATERIALIZED column annotation.
355
+ * The column value is computed at INSERT time and physically stored.
356
+ * Cannot be explicitly inserted by users.
357
+ *
358
+ * @example
359
+ * interface Events {
360
+ * eventTime: DateTime;
361
+ * // Extract date component - computed and stored at insert time
362
+ * eventDate: Date & ClickHouseMaterialized<"toDate(event_time)">;
363
+ *
364
+ * userId: string;
365
+ * // Precompute hash for fast lookups
366
+ * userHash: UInt64 & ClickHouseMaterialized<"cityHash64(userId)">;
367
+ * }
368
+ *
369
+ * @remarks
370
+ * - MATERIALIZED and DEFAULT are mutually exclusive
371
+ * - Can be combined with ClickHouseCodec for compression
372
+ * - Changing the expression modifies the column in-place (existing values preserved)
373
+ */
374
+ type ClickHouseMaterialized<SqlExpression extends string> = {
375
+ _clickhouse_materialized?: SqlExpression;
376
+ };
377
+ /**
378
+ * ClickHouse ALIAS column annotation.
379
+ * The column value is computed on-the-fly at SELECT time and NOT physically stored.
380
+ * Cannot be explicitly inserted by users.
381
+ *
382
+ * @example
383
+ * interface Events {
384
+ * eventTime: DateTime;
385
+ * // Computed at query time, not stored on disk
386
+ * eventDate: Date & ClickHouseAlias<"toDate(event_time)">;
387
+ *
388
+ * firstName: string;
389
+ * lastName: string;
390
+ * // Virtual computed column
391
+ * fullName: string & ClickHouseAlias<"concat(first_name, ' ', last_name)">;
392
+ * }
393
+ *
394
+ * @remarks
395
+ * - ALIAS, MATERIALIZED, and DEFAULT are mutually exclusive
396
+ * - ALIAS columns are NOT stored on disk (saves storage, costs CPU at query time)
397
+ * - Cannot be used in ORDER BY, PRIMARY KEY, or PARTITION BY
398
+ * - Can be combined with ClickHouseCodec (though rarely useful since not stored)
399
+ */
400
+ type ClickHouseAlias<SqlExpression extends string> = {
401
+ _clickhouse_alias?: SqlExpression;
402
+ };
403
+ /**
404
+ * See also {@link ClickHouseDefault}
405
+ *
406
+ * @example{ updated_at: WithDefault<Date, "now()"> }
407
+ */
408
+ type WithDefault<T, _SqlExpression extends string> = T;
409
+ type IsComputed<T> = "_clickhouse_alias" extends keyof T ? true : "_clickhouse_materialized" extends keyof T ? true : false;
410
+ type HasDefault<T> = "_clickhouse_default" extends keyof T ? true : false;
411
+ /** Keys whose columns are ALIAS or MATERIALIZED — excluded from inserts entirely. */
412
+ type ComputedKeys<T> = {
413
+ [K in keyof T]: IsComputed<T[K]> extends true ? K : never;
414
+ }[keyof T];
415
+ /** Keys whose columns carry a ClickHouseDefault expression — optional during inserts. */
416
+ type DefaultKeys<T> = {
417
+ [K in keyof T]: HasDefault<T[K]> extends true ? K : never;
418
+ }[keyof T];
419
+ /**
420
+ * Derive the insert-safe shape of a model:
421
+ * - ALIAS / MATERIALIZED columns are **omitted** (ClickHouse computes them).
422
+ * - DEFAULT columns become **optional** (ClickHouse fills them when absent).
423
+ * - All other columns remain **required**.
424
+ *
425
+ * @example
426
+ * interface Events {
427
+ * id: Key<string>;
428
+ * timestamp: Date;
429
+ * eventDate: Date & ClickHouseAlias<"toDate(timestamp)">;
430
+ * createdAt: Date & ClickHouseDefault<"now()">;
431
+ * }
432
+ *
433
+ * // Insertable<Events> ≡ { id: string; timestamp: Date; createdAt?: Date }
434
+ * const table = new OlapTable<Events>("events");
435
+ * await table.insert([{ id: "1", timestamp: new Date() }]);
436
+ */
437
+ type Insertable<T> = {
438
+ [K in Exclude<keyof T, ComputedKeys<T> | DefaultKeys<T>>]: T[K];
439
+ } & {
440
+ [K in Exclude<DefaultKeys<T>, ComputedKeys<T>>]?: T[K];
441
+ };
442
+ /**
443
+ * ClickHouse table engine types supported by Moose.
444
+ */
445
+ declare enum ClickHouseEngines {
446
+ MergeTree = "MergeTree",
447
+ ReplacingMergeTree = "ReplacingMergeTree",
448
+ SummingMergeTree = "SummingMergeTree",
449
+ AggregatingMergeTree = "AggregatingMergeTree",
450
+ CollapsingMergeTree = "CollapsingMergeTree",
451
+ VersionedCollapsingMergeTree = "VersionedCollapsingMergeTree",
452
+ GraphiteMergeTree = "GraphiteMergeTree",
453
+ S3Queue = "S3Queue",
454
+ S3 = "S3",
455
+ Buffer = "Buffer",
456
+ Distributed = "Distributed",
457
+ IcebergS3 = "IcebergS3",
458
+ Kafka = "Kafka",
459
+ Merge = "Merge",
460
+ ReplicatedMergeTree = "ReplicatedMergeTree",
461
+ ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
462
+ ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
463
+ ReplicatedSummingMergeTree = "ReplicatedSummingMergeTree",
464
+ ReplicatedCollapsingMergeTree = "ReplicatedCollapsingMergeTree",
465
+ ReplicatedVersionedCollapsingMergeTree = "ReplicatedVersionedCollapsingMergeTree"
466
+ }
467
+
468
+ /**
469
+ * Defines how Moose manages the lifecycle of database resources when your code changes.
470
+ *
471
+ * This enum controls the behavior when there are differences between your code definitions
472
+ * and the actual database schema or structure.
473
+ */
474
+ declare enum LifeCycle {
475
+ /**
476
+ * Full automatic management (default behavior).
477
+ * Moose will automatically modify database resources to match your code definitions,
478
+ * including potentially destructive operations like dropping columns or tables.
479
+ */
480
+ FULLY_MANAGED = "FULLY_MANAGED",
481
+ /**
482
+ * Deletion-protected automatic management.
483
+ * Moose will modify resources to match your code but will avoid destructive actions
484
+ * such as dropping columns, or tables. Only additive changes are applied.
485
+ */
486
+ DELETION_PROTECTED = "DELETION_PROTECTED",
487
+ /**
488
+ * External management - no automatic changes.
489
+ * Moose will not modify the database resources. You are responsible for managing
490
+ * the schema and ensuring it matches your code definitions manually.
491
+ */
492
+ EXTERNALLY_MANAGED = "EXTERNALLY_MANAGED"
493
+ }
494
+
495
+ interface TableIndex {
496
+ name: string;
497
+ expression: string;
498
+ type: string;
499
+ arguments?: string[];
500
+ granularity?: number;
501
+ }
502
+ interface TableProjection {
503
+ name: string;
504
+ body: string;
505
+ }
506
+ /**
507
+ * Represents a failed record during insertion with error details
508
+ */
509
+ interface FailedRecord<T> {
510
+ /** The original record that failed to insert */
511
+ record: T;
512
+ /** The error message describing why the insertion failed */
513
+ error: string;
514
+ /** Optional: The index of this record in the original batch */
515
+ index?: number;
516
+ }
517
+ /**
518
+ * Result of an insert operation with detailed success/failure information
519
+ */
520
+ interface InsertResult<T> {
521
+ /** Number of records successfully inserted */
522
+ successful: number;
523
+ /** Number of records that failed to insert */
524
+ failed: number;
525
+ /** Total number of records processed */
526
+ total: number;
527
+ /** Detailed information about failed records (if record isolation was used) */
528
+ failedRecords?: FailedRecord<T>[];
529
+ }
530
+ /**
531
+ * Error handling strategy for insert operations
532
+ */
533
+ type ErrorStrategy = "fail-fast" | "discard" | "isolate";
534
+ /**
535
+ * Options for insert operations
536
+ */
537
+ interface InsertOptions {
538
+ /** Maximum number of bad records to tolerate before failing */
539
+ allowErrors?: number;
540
+ /** Maximum ratio of bad records to tolerate (0.0 to 1.0) before failing */
541
+ allowErrorsRatio?: number;
542
+ /** Error handling strategy */
543
+ strategy?: ErrorStrategy;
544
+ /** Whether to enable dead letter queue for failed records (future feature) */
545
+ deadLetterQueue?: boolean;
546
+ /** Whether to validate data against schema before insertion (default: true) */
547
+ validate?: boolean;
548
+ /** Whether to skip validation for individual records during 'isolate' strategy retries (default: false) */
549
+ skipValidationOnRetry?: boolean;
550
+ }
551
+ /**
552
+ * Validation result for a record with detailed error information
553
+ */
554
+ interface ValidationError {
555
+ /** The original record that failed validation */
556
+ record: any;
557
+ /** Detailed validation error message */
558
+ error: string;
559
+ /** Optional: The index of this record in the original batch */
560
+ index?: number;
561
+ /** The path to the field that failed validation */
562
+ path?: string;
563
+ }
564
+ /**
565
+ * Result of data validation with success/failure breakdown
566
+ */
567
+ interface ValidationResult<T> {
568
+ /** Records that passed validation */
569
+ valid: T[];
570
+ /** Records that failed validation with detailed error information */
571
+ invalid: ValidationError[];
572
+ /** Total number of records processed */
573
+ total: number;
574
+ }
575
+ /**
576
+ * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING
577
+ * Note: Since ClickHouse 24.7, settings no longer require the 's3queue_' prefix
578
+ */
579
+ interface S3QueueTableSettings {
580
+ /** Processing mode: "ordered" for sequential or "unordered" for parallel processing */
581
+ mode?: "ordered" | "unordered";
582
+ /** What to do with files after processing: 'keep' or 'delete' */
583
+ after_processing?: "keep" | "delete";
584
+ /** ZooKeeper/Keeper path for coordination between replicas */
585
+ keeper_path?: string;
586
+ /** Number of retry attempts for failed files */
587
+ loading_retries?: string;
588
+ /** Number of threads for parallel processing */
589
+ processing_threads_num?: string;
590
+ /** Enable parallel inserts */
591
+ parallel_inserts?: string;
592
+ /** Enable logging to system.s3queue_log table */
593
+ enable_logging_to_queue_log?: string;
594
+ /** Last processed file path (for ordered mode) */
595
+ last_processed_path?: string;
596
+ /** Maximum number of tracked files in ZooKeeper */
597
+ tracked_files_limit?: string;
598
+ /** TTL for tracked files in seconds */
599
+ tracked_file_ttl_sec?: string;
600
+ /** Minimum polling timeout in milliseconds */
601
+ polling_min_timeout_ms?: string;
602
+ /** Maximum polling timeout in milliseconds */
603
+ polling_max_timeout_ms?: string;
604
+ /** Polling backoff in milliseconds */
605
+ polling_backoff_ms?: string;
606
+ /** Minimum cleanup interval in milliseconds */
607
+ cleanup_interval_min_ms?: string;
608
+ /** Maximum cleanup interval in milliseconds */
609
+ cleanup_interval_max_ms?: string;
610
+ /** Number of buckets for sharding (0 = disabled) */
611
+ buckets?: string;
612
+ /** Batch size for listing objects */
613
+ list_objects_batch_size?: string;
614
+ /** Enable hash ring filtering for distributed processing */
615
+ enable_hash_ring_filtering?: string;
616
+ /** Maximum files to process before committing */
617
+ max_processed_files_before_commit?: string;
618
+ /** Maximum rows to process before committing */
619
+ max_processed_rows_before_commit?: string;
620
+ /** Maximum bytes to process before committing */
621
+ max_processed_bytes_before_commit?: string;
622
+ /** Maximum processing time in seconds before committing */
623
+ max_processing_time_sec_before_commit?: string;
624
+ /** Use persistent processing nodes (available from 25.8) */
625
+ use_persistent_processing_nodes?: string;
626
+ /** TTL for persistent processing nodes in seconds */
627
+ persistent_processing_nodes_ttl_seconds?: string;
628
+ /** Additional settings */
629
+ [key: string]: string | undefined;
630
+ }
631
+ /**
632
+ * Base configuration shared by all table engines
633
+ * @template T The data type of the records stored in the table.
634
+ */
635
+ type BaseOlapConfig<T> = ({
636
+ /**
637
+ * Specifies the fields to use for ordering data within the ClickHouse table.
638
+ * This is crucial for optimizing query performance.
639
+ */
640
+ orderByFields: (keyof T & string)[];
641
+ orderByExpression?: undefined;
642
+ } | {
643
+ orderByFields?: undefined;
644
+ /**
645
+ * An arbitrary ClickHouse SQL expression for the order by clause.
646
+ *
647
+ * `orderByExpression: "(id, name)"` is equivalent to `orderByFields: ["id", "name"]`
648
+ * `orderByExpression: "tuple()"` means no sorting
649
+ */
650
+ orderByExpression: string;
651
+ } | {
652
+ orderByFields?: undefined;
653
+ orderByExpression?: undefined;
654
+ }) & {
655
+ partitionBy?: string;
656
+ /**
657
+ * SAMPLE BY expression for approximate query processing.
658
+ *
659
+ * Examples:
660
+ * ```typescript
661
+ * // Single unsigned integer field
662
+ * sampleByExpression: "userId"
663
+ *
664
+ * // Hash function on any field type
665
+ * sampleByExpression: "cityHash64(id)"
666
+ *
667
+ * // Multiple fields with hash
668
+ * sampleByExpression: "cityHash64(userId, timestamp)"
669
+ * ```
670
+ *
671
+ * Requirements:
672
+ * - Expression must evaluate to an unsigned integer (UInt8/16/32/64)
673
+ * - Expression must be present in the ORDER BY clause
674
+ * - If using hash functions, the same expression must appear in orderByExpression
675
+ */
676
+ sampleByExpression?: string;
677
+ /**
678
+ * Optional PRIMARY KEY expression.
679
+ * When specified, this overrides the primary key inferred from Key<T> column annotations.
680
+ *
681
+ * This allows for:
682
+ * - Complex primary keys using functions (e.g., "cityHash64(id)")
683
+ * - Different column ordering in primary key vs schema definition
684
+ * - Primary keys that differ from ORDER BY
685
+ *
686
+ * Example: primaryKeyExpression: "(userId, cityHash64(eventId))"
687
+ *
688
+ * Note: When this is set, any Key<T> annotations on columns are ignored for PRIMARY KEY generation.
689
+ */
690
+ primaryKeyExpression?: string;
691
+ version?: string;
692
+ lifeCycle?: LifeCycle;
693
+ settings?: {
694
+ [key: string]: string;
695
+ };
696
+ /**
697
+ * Optional TTL configuration for the table.
698
+ * e.g., "TTL timestamp + INTERVAL 90 DAY DELETE"
699
+ *
700
+ * Use the {@link ClickHouseTTL} type to configure column level TTL
701
+ */
702
+ ttl?: string;
703
+ /** Optional secondary/data-skipping indexes */
704
+ indexes?: TableIndex[];
705
+ /** Optional projections for alternative data ordering within parts */
706
+ projections?: TableProjection[];
707
+ /**
708
+ * Optional database name for multi-database support.
709
+ * When not specified, uses the global ClickHouse config database.
710
+ */
711
+ database?: string;
712
+ /**
713
+ * Optional cluster name for ON CLUSTER support.
714
+ * Use this to enable replicated tables across ClickHouse clusters.
715
+ * The cluster must be defined in config.toml (dev environment only).
716
+ * Example: cluster: "prod_cluster"
717
+ */
718
+ cluster?: string;
719
+ /**
720
+ * Optional seed filter applied when `moose seed clickhouse` populates a
721
+ * local/testing database from a remote source.
722
+ *
723
+ * Example:
724
+ * ```typescript
725
+ * seedFilter: { limit: 100, where: "user_id = 10" }
726
+ * ```
727
+ */
728
+ seedFilter?: {
729
+ /** Maximum number of rows to seed for this table. */
730
+ limit?: number;
731
+ /** ClickHouse SQL WHERE expression to filter seeded rows. */
732
+ where?: string;
733
+ };
734
+ };
735
+ /**
736
+ * Configuration for MergeTree engine
737
+ * @template T The data type of the records stored in the table.
738
+ */
739
+ type MergeTreeConfig<T> = BaseOlapConfig<T> & {
740
+ engine: ClickHouseEngines.MergeTree;
741
+ };
742
+ /**
743
+ * Configuration for ReplacingMergeTree engine (deduplication)
744
+ * @template T The data type of the records stored in the table.
745
+ */
746
+ type ReplacingMergeTreeConfig<T> = BaseOlapConfig<T> & {
747
+ engine: ClickHouseEngines.ReplacingMergeTree;
748
+ ver?: keyof T & string;
749
+ isDeleted?: keyof T & string;
750
+ };
751
+ /**
752
+ * Configuration for AggregatingMergeTree engine
753
+ * @template T The data type of the records stored in the table.
754
+ */
755
+ type AggregatingMergeTreeConfig<T> = BaseOlapConfig<T> & {
756
+ engine: ClickHouseEngines.AggregatingMergeTree;
757
+ };
758
+ /**
759
+ * Configuration for SummingMergeTree engine
760
+ * @template T The data type of the records stored in the table.
761
+ */
762
+ type SummingMergeTreeConfig<T> = BaseOlapConfig<T> & {
763
+ engine: ClickHouseEngines.SummingMergeTree;
764
+ columns?: string[];
765
+ };
766
+ /**
767
+ * Configuration for CollapsingMergeTree engine
768
+ * @template T The data type of the records stored in the table.
769
+ */
770
+ type CollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {
771
+ engine: ClickHouseEngines.CollapsingMergeTree;
772
+ sign: keyof T & string;
773
+ };
774
+ /**
775
+ * Configuration for VersionedCollapsingMergeTree engine
776
+ * @template T The data type of the records stored in the table.
777
+ */
778
+ type VersionedCollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {
779
+ engine: ClickHouseEngines.VersionedCollapsingMergeTree;
780
+ sign: keyof T & string;
781
+ ver: keyof T & string;
782
+ };
783
+ interface ReplicatedEngineProperties {
784
+ keeperPath?: string;
785
+ replicaName?: string;
786
+ }
787
+ /**
788
+ * Configuration for ReplicatedMergeTree engine
789
+ * @template T The data type of the records stored in the table.
790
+ *
791
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
792
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
793
+ * provide both parameters or neither (to use server defaults).
794
+ */
795
+ type ReplicatedMergeTreeConfig<T> = Omit<MergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
796
+ engine: ClickHouseEngines.ReplicatedMergeTree;
797
+ };
798
+ /**
799
+ * Configuration for ReplicatedReplacingMergeTree engine
800
+ * @template T The data type of the records stored in the table.
801
+ *
802
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
803
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
804
+ * provide both parameters or neither (to use server defaults).
805
+ */
806
+ type ReplicatedReplacingMergeTreeConfig<T> = Omit<ReplacingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
807
+ engine: ClickHouseEngines.ReplicatedReplacingMergeTree;
808
+ };
809
+ /**
810
+ * Configuration for ReplicatedAggregatingMergeTree engine
811
+ * @template T The data type of the records stored in the table.
812
+ *
813
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
814
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
815
+ * provide both parameters or neither (to use server defaults).
816
+ */
817
+ type ReplicatedAggregatingMergeTreeConfig<T> = Omit<AggregatingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
818
+ engine: ClickHouseEngines.ReplicatedAggregatingMergeTree;
819
+ };
820
+ /**
821
+ * Configuration for ReplicatedSummingMergeTree engine
822
+ * @template T The data type of the records stored in the table.
823
+ *
824
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
825
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
826
+ * provide both parameters or neither (to use server defaults).
827
+ */
828
+ type ReplicatedSummingMergeTreeConfig<T> = Omit<SummingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
829
+ engine: ClickHouseEngines.ReplicatedSummingMergeTree;
830
+ };
831
+ /**
832
+ * Configuration for ReplicatedCollapsingMergeTree engine
833
+ * @template T The data type of the records stored in the table.
834
+ *
835
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
836
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
837
+ * provide both parameters or neither (to use server defaults).
838
+ */
839
+ type ReplicatedCollapsingMergeTreeConfig<T> = Omit<CollapsingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
840
+ engine: ClickHouseEngines.ReplicatedCollapsingMergeTree;
841
+ };
842
+ /**
843
+ * Configuration for ReplicatedVersionedCollapsingMergeTree engine
844
+ * @template T The data type of the records stored in the table.
845
+ *
846
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
847
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
848
+ * provide both parameters or neither (to use server defaults).
849
+ */
850
+ type ReplicatedVersionedCollapsingMergeTreeConfig<T> = Omit<VersionedCollapsingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
851
+ engine: ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree;
852
+ };
853
+ /**
854
+ * Configuration for S3Queue engine - only non-alterable constructor parameters.
855
+ * S3Queue-specific settings like 'mode', 'keeper_path', etc. should be specified
856
+ * in the settings field, not here.
857
+ * @template T The data type of the records stored in the table.
858
+ */
859
+ type S3QueueConfig<T> = Omit<BaseOlapConfig<T>, "settings" | "orderByFields" | "partitionBy" | "sampleByExpression" | "projections"> & {
860
+ engine: ClickHouseEngines.S3Queue;
861
+ /** S3 bucket path with wildcards (e.g., 's3://bucket/data/*.json') */
862
+ s3Path: string;
863
+ /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */
864
+ format: string;
865
+ /** AWS access key ID (optional, omit for NOSIGN/public buckets) */
866
+ awsAccessKeyId?: string;
867
+ /** AWS secret access key */
868
+ awsSecretAccessKey?: string;
869
+ /** Compression type (e.g., 'gzip', 'zstd') */
870
+ compression?: string;
871
+ /** Custom HTTP headers */
872
+ headers?: {
873
+ [key: string]: string;
874
+ };
875
+ /**
876
+ * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING.
877
+ * These settings control the behavior of the S3Queue engine.
878
+ */
879
+ settings?: S3QueueTableSettings;
880
+ };
881
+ /**
882
+ * Configuration for S3 engine
883
+ * Note: S3 engine supports ORDER BY clause, unlike S3Queue, Buffer, and Distributed engines
884
+ * @template T The data type of the records stored in the table.
885
+ */
886
+ type S3Config<T> = Omit<BaseOlapConfig<T>, "sampleByExpression" | "projections"> & {
887
+ engine: ClickHouseEngines.S3;
888
+ /** S3 path (e.g., 's3://bucket/path/file.json') */
889
+ path: string;
890
+ /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */
891
+ format: string;
892
+ /** AWS access key ID (optional, omit for NOSIGN/public buckets) */
893
+ awsAccessKeyId?: string;
894
+ /** AWS secret access key */
895
+ awsSecretAccessKey?: string;
896
+ /** Compression type (e.g., 'gzip', 'zstd', 'auto') */
897
+ compression?: string;
898
+ /** Partition strategy (optional) */
899
+ partitionStrategy?: string;
900
+ /** Partition columns in data file (optional) */
901
+ partitionColumnsInDataFile?: string;
902
+ };
903
+ /**
904
+ * Configuration for Buffer engine
905
+ * @template T The data type of the records stored in the table.
906
+ */
907
+ type BufferConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression" | "projections"> & {
908
+ engine: ClickHouseEngines.Buffer;
909
+ /** Target database name for the destination table */
910
+ targetDatabase: string;
911
+ /** Target table name where data will be flushed */
912
+ targetTable: string;
913
+ /** Number of buffer layers (typically 16) */
914
+ numLayers: number;
915
+ /** Minimum time in seconds before flushing */
916
+ minTime: number;
917
+ /** Maximum time in seconds before flushing */
918
+ maxTime: number;
919
+ /** Minimum number of rows before flushing */
920
+ minRows: number;
921
+ /** Maximum number of rows before flushing */
922
+ maxRows: number;
923
+ /** Minimum bytes before flushing */
924
+ minBytes: number;
925
+ /** Maximum bytes before flushing */
926
+ maxBytes: number;
927
+ /** Optional: Flush time in seconds */
928
+ flushTime?: number;
929
+ /** Optional: Flush number of rows */
930
+ flushRows?: number;
931
+ /** Optional: Flush number of bytes */
932
+ flushBytes?: number;
933
+ };
934
+ /**
935
+ * Configuration for Distributed engine
936
+ * @template T The data type of the records stored in the table.
937
+ */
938
+ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression" | "projections"> & {
939
+ engine: ClickHouseEngines.Distributed;
940
+ /** Cluster name from the ClickHouse configuration */
941
+ cluster: string;
942
+ /** Database name on the cluster */
943
+ targetDatabase: string;
944
+ /** Table name on the cluster */
945
+ targetTable: string;
946
+ /** Optional: Sharding key expression for data distribution */
947
+ shardingKey?: string;
948
+ /** Optional: Policy name for data distribution */
949
+ policyName?: string;
950
+ };
951
+ /** Kafka table settings. See: https://clickhouse.com/docs/engines/table-engines/integrations/kafka */
952
+ interface KafkaTableSettings {
953
+ kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
954
+ kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
955
+ kafka_sasl_username?: string;
956
+ kafka_sasl_password?: string;
957
+ kafka_schema?: string;
958
+ kafka_num_consumers?: string;
959
+ kafka_max_block_size?: string;
960
+ kafka_skip_broken_messages?: string;
961
+ kafka_commit_every_batch?: string;
962
+ kafka_client_id?: string;
963
+ kafka_poll_timeout_ms?: string;
964
+ kafka_poll_max_batch_size?: string;
965
+ kafka_flush_interval_ms?: string;
966
+ kafka_consumer_reschedule_ms?: string;
967
+ kafka_thread_per_consumer?: string;
968
+ kafka_handle_error_mode?: "default" | "stream";
969
+ kafka_commit_on_select?: string;
970
+ kafka_max_rows_per_message?: string;
971
+ kafka_compression_codec?: string;
972
+ kafka_compression_level?: string;
973
+ }
974
+ /** Kafka engine for streaming data from Kafka topics. Additional settings go in `settings`. */
975
+ type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression" | "projections"> & {
976
+ engine: ClickHouseEngines.Kafka;
977
+ brokerList: string;
978
+ topicList: string;
979
+ groupName: string;
980
+ format: string;
981
+ settings?: KafkaTableSettings;
982
+ };
983
+ /**
984
+ * Configuration for IcebergS3 engine - read-only Iceberg table access
985
+ *
986
+ * Provides direct querying of Apache Iceberg tables stored on S3.
987
+ * Data is not copied; queries stream directly from Parquet/ORC files.
988
+ *
989
+ * @template T The data type of the records stored in the table.
990
+ *
991
+ * @example
992
+ * ```typescript
993
+ * const lakeEvents = new OlapTable<Event>("lake_events", {
994
+ * engine: ClickHouseEngines.IcebergS3,
995
+ * path: "s3://datalake/events/",
996
+ * format: "Parquet",
997
+ * awsAccessKeyId: mooseRuntimeEnv.get("AWS_ACCESS_KEY_ID"),
998
+ * awsSecretAccessKey: mooseRuntimeEnv.get("AWS_SECRET_ACCESS_KEY")
999
+ * });
1000
+ * ```
1001
+ *
1002
+ * @remarks
1003
+ * - IcebergS3 engine is read-only
1004
+ * - Does not support ORDER BY, PARTITION BY, or SAMPLE BY clauses
1005
+ * - Queries always see the latest Iceberg snapshot (with metadata cache)
1006
+ */
1007
+ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression" | "projections"> & {
1008
+ engine: ClickHouseEngines.IcebergS3;
1009
+ /** S3 path to Iceberg table root (e.g., 's3://bucket/warehouse/events/') */
1010
+ path: string;
1011
+ /** Data format - 'Parquet' or 'ORC' */
1012
+ format: "Parquet" | "ORC";
1013
+ /** AWS access key ID (optional, omit for NOSIGN/public buckets) */
1014
+ awsAccessKeyId?: string;
1015
+ /** AWS secret access key (optional) */
1016
+ awsSecretAccessKey?: string;
1017
+ /** Compression type (optional: 'gzip', 'zstd', 'auto') */
1018
+ compression?: string;
1019
+ };
1020
+ /**
1021
+ * Configuration for Merge engine - read-only view over multiple tables matching a regex pattern.
1022
+ *
1023
+ * @template T The data type of the records in the source tables.
1024
+ *
1025
+ * @example
1026
+ * ```typescript
1027
+ * const allEvents = new OlapTable<Event>("all_events", {
1028
+ * engine: ClickHouseEngines.Merge,
1029
+ * sourceDatabase: "currentDatabase()",
1030
+ * tablesRegexp: "^events_\\d+$",
1031
+ * });
1032
+ * ```
1033
+ *
1034
+ * @remarks
1035
+ * - Merge engine is read-only; INSERT operations are not supported
1036
+ * - Cannot be used as a destination in IngestPipeline
1037
+ * - Does not support ORDER BY, PARTITION BY, or SAMPLE BY clauses
1038
+ */
1039
+ type MergeConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression" | "projections"> & {
1040
+ engine: ClickHouseEngines.Merge;
1041
+ /** Database to scan for source tables (literal name, currentDatabase(), or REGEXP(...)) */
1042
+ sourceDatabase: string;
1043
+ /** Regex pattern to match table names in the source database */
1044
+ tablesRegexp: string;
1045
+ };
1046
+ /**
1047
+ * Legacy configuration (backward compatibility) - defaults to MergeTree engine
1048
+ * @template T The data type of the records stored in the table.
1049
+ */
1050
+ type LegacyOlapConfig<T> = BaseOlapConfig<T>;
1051
+ type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | CollapsingMergeTreeConfig<T> | VersionedCollapsingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | ReplicatedCollapsingMergeTreeConfig<T> | ReplicatedVersionedCollapsingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T> | KafkaConfig<T> | MergeConfig<T>;
1052
+ /**
1053
+ * Union of all engine-specific configurations (new API)
1054
+ * @template T The data type of the records stored in the table.
1055
+ */
1056
+ type OlapConfig<T> = EngineConfig<T> | LegacyOlapConfig<T>;
1057
+ /**
1058
+ * Represents an OLAP (Online Analytical Processing) table, typically corresponding to a ClickHouse table.
1059
+ * Provides a typed interface for interacting with the table.
1060
+ *
1061
+ * @template T The data type of the records stored in the table. The structure of T defines the table schema.
1062
+ */
1063
+ declare class OlapTable<T> extends TypedBase<T, OlapConfig<T>> {
1064
+ name: IdentifierBrandedString;
1065
+ /** @internal */
1066
+ readonly kind = "OlapTable";
1067
+ /** @internal Typia validators for Insertable<T> — used during insert validation */
1068
+ private insertValidators?;
1069
+ /** @internal Memoized ClickHouse client for reusing connections across insert calls */
1070
+ private _memoizedClient?;
1071
+ /** @internal Hash of the configuration used to create the memoized client */
1072
+ private _configHash?;
1073
+ /** @internal Cached table name to avoid repeated generation */
1074
+ private _cachedTableName?;
1075
+ /**
1076
+ * Creates a new OlapTable instance.
1077
+ * @param name The name of the table. This name is used for the underlying ClickHouse table.
1078
+ * @param config Optional configuration for the OLAP table.
1079
+ */
1080
+ constructor(name: string, config?: OlapConfig<T>);
1081
+ /** @internal **/
1082
+ constructor(name: string, config: OlapConfig<T>, schema: IJsonSchemaCollection.IV3_1, columns: Column[], validators?: TypiaValidators<T>, insertValidators?: TypiaValidators<Insertable<T>>);
1083
+ /** @internal Returns the versioned ClickHouse table name (e.g., "events_1_0_0") */
1084
+ generateTableName(): string;
1085
+ /**
1086
+ * Creates a fast hash of the ClickHouse configuration.
1087
+ * Uses crypto.createHash for better performance than JSON.stringify.
1088
+ *
1089
+ * @private
1090
+ */
1091
+ private createConfigHash;
1092
+ /**
1093
+ * Gets or creates a memoized ClickHouse client.
1094
+ * The client is cached and reused across multiple insert calls for better performance.
1095
+ * If the configuration changes, a new client will be created.
1096
+ *
1097
+ * @private
1098
+ */
1099
+ private getMemoizedClient;
1100
+ /**
1101
+ * Closes the memoized ClickHouse client if it exists.
1102
+ * This is useful for cleaning up connections when the table instance is no longer needed.
1103
+ * The client will be automatically recreated on the next insert call if needed.
1104
+ */
1105
+ closeClient(): Promise<void>;
1106
+ /**
1107
+ * Validates a single record using typia's comprehensive type checking.
1108
+ * This provides the most accurate validation as it uses the exact TypeScript type information.
1109
+ *
1110
+ * @param record The record to validate
1111
+ * @returns Validation result with detailed error information
1112
+ */
1113
+ validateRecord(record: unknown): {
1114
+ success: boolean;
1115
+ data?: T;
1116
+ errors?: string[];
1117
+ };
1118
+ /**
1119
+ * Type guard function using typia's is() function.
1120
+ * Provides compile-time type narrowing for TypeScript.
1121
+ *
1122
+ * @param record The record to check
1123
+ * @returns True if record matches type T, with type narrowing
1124
+ */
1125
+ isValidRecord(record: unknown): record is T;
1126
+ /**
1127
+ * Assert that a record matches type T, throwing detailed errors if not.
1128
+ * Uses typia's assert() function for the most detailed error reporting.
1129
+ *
1130
+ * @param record The record to assert
1131
+ * @returns The validated and typed record
1132
+ * @throws Detailed validation error if record doesn't match type T
1133
+ */
1134
+ assertValidRecord(record: unknown): T;
1135
+ /**
1136
+ * Validates records for insert using Insertable<T> validators when available.
1137
+ * Falls back to the full T validators if insert validators weren't generated.
1138
+ * @private
1139
+ */
1140
+ private validateInsertRecords;
1141
+ /**
1142
+ * Validates an array of records with comprehensive error reporting.
1143
+ * Uses the most appropriate validation method available (typia or basic).
1144
+ *
1145
+ * @param data Array of records to validate
1146
+ * @returns Detailed validation results
1147
+ */
1148
+ validateRecords(data: unknown[]): Promise<ValidationResult<T>>;
1149
+ /**
1150
+ * Optimized batch retry that minimizes individual insert operations.
1151
+ * Groups records into smaller batches to reduce round trips while still isolating failures.
1152
+ *
1153
+ * @private
1154
+ */
1155
+ private retryIndividualRecords;
1156
+ /**
1157
+ * Validates input parameters and strategy compatibility
1158
+ * @private
1159
+ */
1160
+ private validateInsertParameters;
1161
+ /**
1162
+ * Handles early return cases for empty data
1163
+ * @private
1164
+ */
1165
+ private handleEmptyData;
1166
+ /**
1167
+ * Performs pre-insertion validation for array data
1168
+ * @private
1169
+ */
1170
+ private performPreInsertionValidation;
1171
+ /**
1172
+ * Handles validation errors based on the specified strategy
1173
+ * @private
1174
+ */
1175
+ private handleValidationErrors;
1176
+ /**
1177
+ * Checks if validation errors exceed configured thresholds
1178
+ * @private
1179
+ */
1180
+ private checkValidationThresholds;
1181
+ /**
1182
+ * Optimized insert options preparation with better memory management
1183
+ * @private
1184
+ */
1185
+ private prepareInsertOptions;
1186
+ /**
1187
+ * Creates success result for completed insertions
1188
+ * @private
1189
+ */
1190
+ private createSuccessResult;
1191
+ /**
1192
+ * Handles insertion errors based on the specified strategy
1193
+ * @private
1194
+ */
1195
+ private handleInsertionError;
1196
+ /**
1197
+ * Handles the isolate strategy for insertion errors
1198
+ * @private
1199
+ */
1200
+ private handleIsolateStrategy;
1201
+ /**
1202
+ * Checks if insertion errors exceed configured thresholds
1203
+ * @private
1204
+ */
1205
+ private checkInsertionThresholds;
1206
+ /**
1207
+ * Recursively transforms a record to match ClickHouse's JSONEachRow requirements
1208
+ *
1209
+ * - For every Array(Nested(...)) field at any depth, each item is wrapped in its own array and recursively processed.
1210
+ * - For every Nested struct (not array), it recurses into the struct.
1211
+ * - This ensures compatibility with kafka_clickhouse_sync
1212
+ *
1213
+ * @param record The input record to transform (may be deeply nested)
1214
+ * @param columns The schema columns for this level (defaults to this.columnArray at the top level)
1215
+ * @returns The transformed record, ready for ClickHouse JSONEachRow insertion
1216
+ */
1217
+ private mapToClickhouseRecord;
1218
+ /**
1219
+ * Inserts data directly into the ClickHouse table with enhanced error handling and validation.
1220
+ * This method establishes a direct connection to ClickHouse using the project configuration
1221
+ * and inserts the provided data into the versioned table.
1222
+ *
1223
+ * PERFORMANCE OPTIMIZATIONS:
1224
+ * - Memoized client connections with fast config hashing
1225
+ * - Single-pass validation with pre-allocated arrays
1226
+ * - Batch-optimized retry strategy (batches of 10, then individual)
1227
+ * - Optimized ClickHouse settings for large datasets
1228
+ * - Reduced memory allocations and object creation
1229
+ *
1230
+ * Uses advanced typia validation when available for comprehensive type checking,
1231
+ * with fallback to basic validation for compatibility.
1232
+ *
1233
+ * The ClickHouse client is memoized and reused across multiple insert calls for better performance.
1234
+ * If the configuration changes, a new client will be automatically created.
1235
+ *
1236
+ * @param data Array of objects conforming to the table schema, or a Node.js Readable stream
1237
+ * @param options Optional configuration for error handling, validation, and insertion behavior
1238
+ * @returns Promise resolving to detailed insertion results
1239
+ * @throws {ConfigError} When configuration cannot be read or parsed
1240
+ * @throws {ClickHouseError} When insertion fails based on the error strategy
1241
+ * @throws {ValidationError} When validation fails and strategy is 'fail-fast'
1242
+ *
1243
+ * @example
1244
+ * ```typescript
1245
+ * // Create an OlapTable instance (typia validators auto-injected)
1246
+ * const userTable = new OlapTable<User>('users');
1247
+ *
1248
+ * // Insert with comprehensive typia validation
1249
+ * const result1 = await userTable.insert([
1250
+ * { id: 1, name: 'John', email: 'john@example.com' },
1251
+ * { id: 2, name: 'Jane', email: 'jane@example.com' }
1252
+ * ]);
1253
+ *
1254
+ * // Insert data with stream input (validation not available for streams)
1255
+ * const dataStream = new Readable({
1256
+ * objectMode: true,
1257
+ * read() { // Stream implementation }
1258
+ * });
1259
+ * const result2 = await userTable.insert(dataStream, { strategy: 'fail-fast' });
1260
+ *
1261
+ * // Insert with validation disabled for performance
1262
+ * const result3 = await userTable.insert(data, { validate: false });
1263
+ *
1264
+ * // Insert with error handling strategies
1265
+ * const result4 = await userTable.insert(mixedData, {
1266
+ * strategy: 'isolate',
1267
+ * allowErrorsRatio: 0.1,
1268
+ * validate: true // Use typia validation (default)
1269
+ * });
1270
+ *
1271
+ * // Optional: Clean up connection when completely done
1272
+ * await userTable.closeClient();
1273
+ * ```
1274
+ */
1275
+ insert(data: Insertable<T>[] | Readable, options?: InsertOptions): Promise<InsertResult<T>>;
1276
+ }
1277
+
1278
+ /**
1279
+ * Options for per-query ClickHouse row policy enforcement.
1280
+ * When provided, each query activates the specified role and injects
1281
+ * custom settings (e.g., tenant ID) that row policies evaluate via getSetting().
1282
+ */
1283
+ interface RowPolicyOptions {
1284
+ role: string;
1285
+ clickhouse_settings: Record<string, string>;
1286
+ }
1287
+ declare class QueryClient {
1288
+ client: ClickHouseClient;
1289
+ query_id_prefix: string;
1290
+ private rowPolicyOptions?;
1291
+ constructor(client: ClickHouseClient, query_id_prefix: string, rowPolicyOptions?: RowPolicyOptions);
1292
+ execute<T = any>(sql: Sql): Promise<ResultSet<"JSONEachRow"> & {
1293
+ __query_result_t?: T[];
1294
+ }>;
1295
+ command(sql: Sql): Promise<CommandResult>;
1296
+ }
1297
+
1298
+ /**
1299
+ * Represents a database View, defined by a SQL SELECT statement based on one or more base tables or other views.
1300
+ * Emits structured data for the Moose infrastructure system.
1301
+ */
1302
+ declare class View {
1303
+ /** @internal */
1304
+ readonly kind = "View";
1305
+ /** The name of the view */
1306
+ name: string;
1307
+ /** The SELECT SQL statement that defines the view */
1308
+ selectSql: string;
1309
+ /** Names of source tables/views that the SELECT reads from */
1310
+ sourceTables: string[];
1311
+ /** Optional metadata for the view */
1312
+ metadata: {
1313
+ [key: string]: any;
1314
+ };
1315
+ /**
1316
+ * Creates a new View instance.
1317
+ * @param name The name of the view to be created.
1318
+ * @param selectStatement The SQL SELECT statement that defines the view's logic.
1319
+ * @param baseTables An array of OlapTable or View objects that the `selectStatement` reads from. Used for dependency tracking.
1320
+ * @param metadata Optional metadata for the view (e.g., description, source file).
1321
+ */
1322
+ constructor(name: string, selectStatement: string | Sql, baseTables: (OlapTable<any> | View)[], metadata?: {
1323
+ [key: string]: any;
1324
+ });
1325
+ }
1326
+
1327
+ export { TypedBase as $, type Decimal as A, type Insertable as B, ClickHouseEngines as C, type DateTime as D, quoteIdentifier as E, type FixedString as F, type IdentifierBrandedString as G, type Value as H, type Int8 as I, type SqlTemplateTag as J, sql as K, LifeCycle as L, Sql as M, type NonIdentifierBrandedString as N, OlapTable as O, toStaticQuery as P, toQuery as Q, type RawValue as R, type S3QueueTableSettings as S, toQueryPreview as T, type UInt8 as U, View as V, type WithDefault as W, getValueFromParameter as X, createClickhouseParameter as Y, mapToClickHouseType as Z, QueryClient as _, type OlapConfig as a, type Column as a0, type RowPolicyOptions as a1, type TypiaValidators as a2, type DataType as a3, type ClickHousePoint as a4, type ClickHouseRing as a5, type ClickHouseLineString as a6, type ClickHouseMultiLineString as a7, type ClickHousePolygon as a8, type ClickHouseMultiPolygon as a9, type ClickHousePrecision as b, type ClickHouseDecimal as c, type ClickHouseByteSize as d, type ClickHouseFixedStringSize as e, type ClickHouseFloat as f, type ClickHouseInt as g, type ClickHouseJson as h, type LowCardinality as i, type ClickHouseNamedTuple as j, type ClickHouseDefault as k, type ClickHouseTTL as l, type ClickHouseMaterialized as m, type ClickHouseAlias as n, type ClickHouseCodec as o, type DateTime64 as p, type DateTimeString as q, type DateTime64String as r, type Float32 as s, type Float64 as t, type Int16 as u, type Int32 as v, type Int64 as w, type UInt16 as x, type UInt32 as y, type UInt64 as z };