@bayoudhi/moose-lib-serverless 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2380 @@
1
+ import { tags, IJsonSchemaCollection as IJsonSchemaCollection$1 } from 'typia';
2
+ import { ClickHouseClient, ResultSet, CommandResult } from '@clickhouse/client';
3
+ import { Client } from '@temporalio/client';
4
+ import { JWTPayload } from 'jose';
5
+ import http from 'http';
6
+ import { IJsonSchemaCollection } from 'typia/src/schemas/json/IJsonSchemaCollection';
7
+ import { Pattern, TagBase } from 'typia/lib/tags';
8
+ import { Readable } from 'node:stream';
9
+
10
+ type EnumValues = {
11
+ name: string;
12
+ value: {
13
+ Int: number;
14
+ };
15
+ }[] | {
16
+ name: string;
17
+ value: {
18
+ String: string;
19
+ };
20
+ }[];
21
+ type DataEnum = {
22
+ name: string;
23
+ values: EnumValues;
24
+ };
25
+ type Nested = {
26
+ name: string;
27
+ columns: Column[];
28
+ jwt: boolean;
29
+ };
30
+ type ArrayType = {
31
+ elementType: DataType;
32
+ elementNullable: boolean;
33
+ };
34
+ type NamedTupleType = {
35
+ fields: Array<[string, DataType]>;
36
+ };
37
+ type MapType = {
38
+ keyType: DataType;
39
+ valueType: DataType;
40
+ };
41
+ type JsonOptions = {
42
+ max_dynamic_paths?: number;
43
+ max_dynamic_types?: number;
44
+ typed_paths?: Array<[string, DataType]>;
45
+ skip_paths?: string[];
46
+ skip_regexps?: string[];
47
+ };
48
+ type DataType = string | DataEnum | ArrayType | Nested | NamedTupleType | MapType | JsonOptions | {
49
+ nullable: DataType;
50
+ };
51
+ interface Column {
52
+ name: IdentifierBrandedString;
53
+ data_type: DataType;
54
+ required: boolean;
55
+ unique: false;
56
+ primary_key: boolean;
57
+ default: string | null;
58
+ materialized: string | null;
59
+ ttl: string | null;
60
+ codec: string | null;
61
+ annotations: [string, any][];
62
+ comment: string | null;
63
+ }
64
+
65
+ /**
66
+ * Type definition for typia validation functions
67
+ */
68
+ interface TypiaValidators<T> {
69
+ /** Typia validator function: returns { success: boolean, data?: T, errors?: any[] } */
70
+ validate?: (data: unknown) => {
71
+ success: boolean;
72
+ data?: T;
73
+ errors?: any[];
74
+ };
75
+ /** Typia assert function: throws on validation failure, returns T on success */
76
+ assert?: (data: unknown) => T;
77
+ /** Typia is function: returns boolean indicating if data matches type T */
78
+ is?: (data: unknown) => data is T;
79
+ }
80
+ /**
81
+ * Base class for all typed Moose dmv2 resources (OlapTable, Stream, etc.).
82
+ * Handles the storage and injection of schema information (JSON schema and Column array)
83
+ * provided by the Moose compiler plugin.
84
+ *
85
+ * @template T The data type (interface or type alias) defining the schema of the resource.
86
+ * @template C The specific configuration type for the resource (e.g., OlapConfig, StreamConfig).
87
+ */
88
+ declare class TypedBase<T, C> {
89
+ /** The JSON schema representation of type T. Injected by the compiler plugin. */
90
+ schema: IJsonSchemaCollection.IV3_1;
91
+ /** The name assigned to this resource instance. */
92
+ name: string;
93
+ /** A dictionary mapping column names (keys of T) to their Column definitions. */
94
+ columns: {
95
+ [columnName in keyof Required<T>]: Column;
96
+ };
97
+ /** An array containing the Column definitions for this resource. Injected by the compiler plugin. */
98
+ columnArray: Column[];
99
+ /** The configuration object specific to this resource type. */
100
+ config: C;
101
+ /** Typia validation functions for type T. Injected by the compiler plugin for OlapTable. */
102
+ validators?: TypiaValidators<T>;
103
+ /** Optional metadata for the resource, always present as an object. */
104
+ metadata: {
105
+ [key: string]: any;
106
+ };
107
+ /**
108
+ * Whether this resource allows extra fields beyond the defined columns.
109
+ * When true, extra fields in payloads are passed through to streaming functions.
110
+ * Injected by the compiler plugin when the type has an index signature.
111
+ */
112
+ allowExtraFields: boolean;
113
+ /**
114
+ * @internal Constructor intended for internal use by subclasses and the compiler plugin.
115
+ * It expects the schema and columns to be provided, typically injected by the compiler.
116
+ *
117
+ * @param name The name for the resource instance.
118
+ * @param config The configuration object for the resource.
119
+ * @param schema The JSON schema for the resource's data type T (injected).
120
+ * @param columns The array of Column definitions for T (injected).
121
+ * @param allowExtraFields Whether extra fields are allowed (injected when type has index signature).
122
+ */
123
+ constructor(name: string, config: C, schema?: IJsonSchemaCollection.IV3_1, columns?: Column[], validators?: TypiaValidators<T>, allowExtraFields?: boolean);
124
+ }
125
+
126
+ type ClickHousePrecision<P extends number> = {
127
+ _clickhouse_precision?: P;
128
+ };
129
+ declare const DecimalRegex: "^-?\\d+(\\.\\d+)?$";
130
+ type ClickHouseDecimal<P extends number, S extends number> = {
131
+ _clickhouse_precision?: P;
132
+ _clickhouse_scale?: S;
133
+ } & Pattern<typeof DecimalRegex>;
134
+ type ClickHouseFixedStringSize<N extends number> = {
135
+ _clickhouse_fixed_string_size?: N;
136
+ };
137
+ /**
138
+ * FixedString(N) - Fixed-length string of exactly N bytes.
139
+ *
140
+ * ClickHouse stores exactly N bytes, padding shorter values with null bytes.
141
+ * Values exceeding N bytes will throw an exception.
142
+ *
143
+ * Use for binary data: hashes, IP addresses, UUIDs, MAC addresses.
144
+ *
145
+ * @example
146
+ * interface BinaryData {
147
+ * md5_hash: string & FixedString<16>; // 16-byte MD5
148
+ * sha256_hash: string & FixedString<32>; // 32-byte SHA256
149
+ * }
150
+ */
151
+ type FixedString<N extends number> = string & ClickHouseFixedStringSize<N>;
152
+ type ClickHouseByteSize<N extends number> = {
153
+ _clickhouse_byte_size?: N;
154
+ };
155
+ type LowCardinality = {
156
+ _LowCardinality?: true;
157
+ };
158
+ type DateTime = Date;
159
+ type DateTime64<P extends number> = Date & ClickHousePrecision<P>;
160
+ type DateTimeString = string & tags.Format<"date-time">;
161
+ /**
162
+ * JS Date objects cannot hold microsecond precision.
163
+ * Use string as the runtime type to avoid losing information.
164
+ */
165
+ type DateTime64String<P extends number> = string & tags.Format<"date-time"> & ClickHousePrecision<P>;
166
+ type Float32 = number & ClickHouseFloat<"float32">;
167
+ type Float64 = number & ClickHouseFloat<"float64">;
168
+ type Int8 = number & ClickHouseInt<"int8">;
169
+ type Int16 = number & ClickHouseInt<"int16">;
170
+ type Int32 = number & ClickHouseInt<"int32">;
171
+ type Int64 = number & ClickHouseInt<"int64">;
172
+ type UInt8 = number & ClickHouseInt<"uint8">;
173
+ type UInt16 = number & ClickHouseInt<"uint16">;
174
+ type UInt32 = number & ClickHouseInt<"uint32">;
175
+ type UInt64 = number & ClickHouseInt<"uint64">;
176
+ type Decimal<P extends number, S extends number> = string & ClickHouseDecimal<P, S>;
177
+ /**
178
+ * Attach compression codec to a column type.
179
+ *
180
+ * Any valid ClickHouse codec expression is allowed. ClickHouse validates the codec at runtime.
181
+ *
182
+ * @template T The base data type
183
+ * @template CodecExpr The codec expression (single codec or chain)
184
+ *
185
+ * @example
186
+ * interface Metrics {
187
+ * // Single codec
188
+ * log_blob: string & ClickHouseCodec<"ZSTD(3)">;
189
+ *
190
+ * // Codec chain (processed left-to-right)
191
+ * timestamp: Date & ClickHouseCodec<"Delta, LZ4">;
192
+ * temperature: number & ClickHouseCodec<"Gorilla, ZSTD">;
193
+ *
194
+ * // Specialized codecs
195
+ * counter: number & ClickHouseCodec<"DoubleDelta">;
196
+ *
197
+ * // Can combine with other annotations
198
+ * count: UInt64 & ClickHouseCodec<"DoubleDelta, LZ4">;
199
+ * }
200
+ */
201
+ type ClickHouseCodec<CodecExpr extends string> = {
202
+ _clickhouse_codec?: CodecExpr;
203
+ };
204
+ type ClickHouseFloat<Value extends "float32" | "float64"> = tags.Type<Value extends "float32" ? "float" : "double">;
205
+ type ClickHouseInt<Value extends "int8" | "int16" | "int32" | "int64" | "uint8" | "uint16" | "uint32" | "uint64"> = Value extends "int32" | "int64" | "uint32" | "uint64" ? tags.Type<Value> : TagBase<{
206
+ target: "number";
207
+ kind: "type";
208
+ value: Value;
209
+ validate: Value extends "int8" ? "-128 <= $input && $input <= 127" : Value extends "int16" ? "-32768 <= $input && $input <= 32767" : Value extends "uint8" ? "0 <= $input && $input <= 255" : Value extends "uint16" ? "0 <= $input && $input <= 65535" : never;
210
+ exclusive: true;
211
+ schema: {
212
+ type: "integer";
213
+ };
214
+ }>;
215
+ /**
216
+ * By default, nested objects map to the `Nested` type in clickhouse.
217
+ * Write `nestedObject: AnotherInterfaceType & ClickHouseNamedTuple`
218
+ * to map AnotherInterfaceType to the named tuple type.
219
+ */
220
+ type ClickHouseNamedTuple = {
221
+ _clickhouse_mapped_type?: "namedTuple";
222
+ };
223
+ type ClickHouseJson<maxDynamicPaths extends number | undefined = undefined, maxDynamicTypes extends number | undefined = undefined, skipPaths extends string[] = [], skipRegexes extends string[] = []> = {
224
+ _clickhouse_mapped_type?: "JSON";
225
+ _clickhouse_json_settings?: {
226
+ maxDynamicPaths?: maxDynamicPaths;
227
+ maxDynamicTypes?: maxDynamicTypes;
228
+ skipPaths?: skipPaths;
229
+ skipRegexes?: skipRegexes;
230
+ };
231
+ };
232
+ type ClickHousePoint = [number, number] & {
233
+ _clickhouse_mapped_type?: "Point";
234
+ };
235
+ type ClickHouseRing = ClickHousePoint[] & {
236
+ _clickhouse_mapped_type?: "Ring";
237
+ };
238
+ type ClickHouseLineString = ClickHousePoint[] & {
239
+ _clickhouse_mapped_type?: "LineString";
240
+ };
241
+ type ClickHouseMultiLineString = ClickHouseLineString[] & {
242
+ _clickhouse_mapped_type?: "MultiLineString";
243
+ };
244
+ type ClickHousePolygon = ClickHouseRing[] & {
245
+ _clickhouse_mapped_type?: "Polygon";
246
+ };
247
+ type ClickHouseMultiPolygon = ClickHousePolygon[] & {
248
+ _clickhouse_mapped_type?: "MultiPolygon";
249
+ };
250
+ /**
251
+ * typia may have trouble handling this type.
252
+ * In which case, use {@link WithDefault} as a workaround
253
+ *
254
+ * @example
255
+ * { field: number & ClickHouseDefault<"0"> }
256
+ */
257
+ type ClickHouseDefault<SqlExpression extends string> = {
258
+ _clickhouse_default?: SqlExpression;
259
+ };
260
+ /**
261
+ * @example
262
+ * {
263
+ * ...
264
+ * timestamp: Date;
265
+ * debugMessage: string & ClickHouseTTL<"timestamp + INTERVAL 1 WEEK">;
266
+ * }
267
+ */
268
+ type ClickHouseTTL<SqlExpression extends string> = {
269
+ _clickhouse_ttl?: SqlExpression;
270
+ };
271
+ /**
272
+ * ClickHouse MATERIALIZED column annotation.
273
+ * The column value is computed at INSERT time and physically stored.
274
+ * Cannot be explicitly inserted by users.
275
+ *
276
+ * @example
277
+ * interface Events {
278
+ * eventTime: DateTime;
279
+ * // Extract date component - computed and stored at insert time
280
+ * eventDate: Date & ClickHouseMaterialized<"toDate(event_time)">;
281
+ *
282
+ * userId: string;
283
+ * // Precompute hash for fast lookups
284
+ * userHash: UInt64 & ClickHouseMaterialized<"cityHash64(userId)">;
285
+ * }
286
+ *
287
+ * @remarks
288
+ * - MATERIALIZED and DEFAULT are mutually exclusive
289
+ * - Can be combined with ClickHouseCodec for compression
290
+ * - Changing the expression modifies the column in-place (existing values preserved)
291
+ */
292
+ type ClickHouseMaterialized<SqlExpression extends string> = {
293
+ _clickhouse_materialized?: SqlExpression;
294
+ };
295
+ /**
296
+ * See also {@link ClickHouseDefault}
297
+ *
298
+ * @example{ updated_at: WithDefault<Date, "now()"> }
299
+ */
300
+ type WithDefault<T, _SqlExpression extends string> = T;
301
+ /**
302
+ * ClickHouse table engine types supported by Moose.
303
+ */
304
+ declare enum ClickHouseEngines {
305
+ MergeTree = "MergeTree",
306
+ ReplacingMergeTree = "ReplacingMergeTree",
307
+ SummingMergeTree = "SummingMergeTree",
308
+ AggregatingMergeTree = "AggregatingMergeTree",
309
+ CollapsingMergeTree = "CollapsingMergeTree",
310
+ VersionedCollapsingMergeTree = "VersionedCollapsingMergeTree",
311
+ GraphiteMergeTree = "GraphiteMergeTree",
312
+ S3Queue = "S3Queue",
313
+ S3 = "S3",
314
+ Buffer = "Buffer",
315
+ Distributed = "Distributed",
316
+ IcebergS3 = "IcebergS3",
317
+ Kafka = "Kafka",
318
+ ReplicatedMergeTree = "ReplicatedMergeTree",
319
+ ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
320
+ ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
321
+ ReplicatedSummingMergeTree = "ReplicatedSummingMergeTree",
322
+ ReplicatedCollapsingMergeTree = "ReplicatedCollapsingMergeTree",
323
+ ReplicatedVersionedCollapsingMergeTree = "ReplicatedVersionedCollapsingMergeTree"
324
+ }
325
+
326
+ /**
327
+ * Defines how Moose manages the lifecycle of database resources when your code changes.
328
+ *
329
+ * This enum controls the behavior when there are differences between your code definitions
330
+ * and the actual database schema or structure.
331
+ */
332
+ declare enum LifeCycle {
333
+ /**
334
+ * Full automatic management (default behavior).
335
+ * Moose will automatically modify database resources to match your code definitions,
336
+ * including potentially destructive operations like dropping columns or tables.
337
+ */
338
+ FULLY_MANAGED = "FULLY_MANAGED",
339
+ /**
340
+ * Deletion-protected automatic management.
341
+ * Moose will modify resources to match your code but will avoid destructive actions
342
+ * such as dropping columns, or tables. Only additive changes are applied.
343
+ */
344
+ DELETION_PROTECTED = "DELETION_PROTECTED",
345
+ /**
346
+ * External management - no automatic changes.
347
+ * Moose will not modify the database resources. You are responsible for managing
348
+ * the schema and ensuring it matches your code definitions manually.
349
+ */
350
+ EXTERNALLY_MANAGED = "EXTERNALLY_MANAGED"
351
+ }
352
+
353
+ interface TableIndex {
354
+ name: string;
355
+ expression: string;
356
+ type: string;
357
+ arguments?: string[];
358
+ granularity?: number;
359
+ }
360
+ /**
361
+ * Represents a failed record during insertion with error details
362
+ */
363
+ interface FailedRecord<T> {
364
+ /** The original record that failed to insert */
365
+ record: T;
366
+ /** The error message describing why the insertion failed */
367
+ error: string;
368
+ /** Optional: The index of this record in the original batch */
369
+ index?: number;
370
+ }
371
+ /**
372
+ * Result of an insert operation with detailed success/failure information
373
+ */
374
+ interface InsertResult<T> {
375
+ /** Number of records successfully inserted */
376
+ successful: number;
377
+ /** Number of records that failed to insert */
378
+ failed: number;
379
+ /** Total number of records processed */
380
+ total: number;
381
+ /** Detailed information about failed records (if record isolation was used) */
382
+ failedRecords?: FailedRecord<T>[];
383
+ }
384
+ /**
385
+ * Error handling strategy for insert operations
386
+ */
387
+ type ErrorStrategy = "fail-fast" | "discard" | "isolate";
388
+ /**
389
+ * Options for insert operations
390
+ */
391
+ interface InsertOptions {
392
+ /** Maximum number of bad records to tolerate before failing */
393
+ allowErrors?: number;
394
+ /** Maximum ratio of bad records to tolerate (0.0 to 1.0) before failing */
395
+ allowErrorsRatio?: number;
396
+ /** Error handling strategy */
397
+ strategy?: ErrorStrategy;
398
+ /** Whether to enable dead letter queue for failed records (future feature) */
399
+ deadLetterQueue?: boolean;
400
+ /** Whether to validate data against schema before insertion (default: true) */
401
+ validate?: boolean;
402
+ /** Whether to skip validation for individual records during 'isolate' strategy retries (default: false) */
403
+ skipValidationOnRetry?: boolean;
404
+ }
405
+ /**
406
+ * Validation result for a record with detailed error information
407
+ */
408
+ interface ValidationError {
409
+ /** The original record that failed validation */
410
+ record: any;
411
+ /** Detailed validation error message */
412
+ error: string;
413
+ /** Optional: The index of this record in the original batch */
414
+ index?: number;
415
+ /** The path to the field that failed validation */
416
+ path?: string;
417
+ }
418
+ /**
419
+ * Result of data validation with success/failure breakdown
420
+ */
421
+ interface ValidationResult<T> {
422
+ /** Records that passed validation */
423
+ valid: T[];
424
+ /** Records that failed validation with detailed error information */
425
+ invalid: ValidationError[];
426
+ /** Total number of records processed */
427
+ total: number;
428
+ }
429
+ /**
430
+ * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING
431
+ * Note: Since ClickHouse 24.7, settings no longer require the 's3queue_' prefix
432
+ */
433
+ interface S3QueueTableSettings {
434
+ /** Processing mode: "ordered" for sequential or "unordered" for parallel processing */
435
+ mode?: "ordered" | "unordered";
436
+ /** What to do with files after processing: 'keep' or 'delete' */
437
+ after_processing?: "keep" | "delete";
438
+ /** ZooKeeper/Keeper path for coordination between replicas */
439
+ keeper_path?: string;
440
+ /** Number of retry attempts for failed files */
441
+ loading_retries?: string;
442
+ /** Number of threads for parallel processing */
443
+ processing_threads_num?: string;
444
+ /** Enable parallel inserts */
445
+ parallel_inserts?: string;
446
+ /** Enable logging to system.s3queue_log table */
447
+ enable_logging_to_queue_log?: string;
448
+ /** Last processed file path (for ordered mode) */
449
+ last_processed_path?: string;
450
+ /** Maximum number of tracked files in ZooKeeper */
451
+ tracked_files_limit?: string;
452
+ /** TTL for tracked files in seconds */
453
+ tracked_file_ttl_sec?: string;
454
+ /** Minimum polling timeout in milliseconds */
455
+ polling_min_timeout_ms?: string;
456
+ /** Maximum polling timeout in milliseconds */
457
+ polling_max_timeout_ms?: string;
458
+ /** Polling backoff in milliseconds */
459
+ polling_backoff_ms?: string;
460
+ /** Minimum cleanup interval in milliseconds */
461
+ cleanup_interval_min_ms?: string;
462
+ /** Maximum cleanup interval in milliseconds */
463
+ cleanup_interval_max_ms?: string;
464
+ /** Number of buckets for sharding (0 = disabled) */
465
+ buckets?: string;
466
+ /** Batch size for listing objects */
467
+ list_objects_batch_size?: string;
468
+ /** Enable hash ring filtering for distributed processing */
469
+ enable_hash_ring_filtering?: string;
470
+ /** Maximum files to process before committing */
471
+ max_processed_files_before_commit?: string;
472
+ /** Maximum rows to process before committing */
473
+ max_processed_rows_before_commit?: string;
474
+ /** Maximum bytes to process before committing */
475
+ max_processed_bytes_before_commit?: string;
476
+ /** Maximum processing time in seconds before committing */
477
+ max_processing_time_sec_before_commit?: string;
478
+ /** Use persistent processing nodes (available from 25.8) */
479
+ use_persistent_processing_nodes?: string;
480
+ /** TTL for persistent processing nodes in seconds */
481
+ persistent_processing_nodes_ttl_seconds?: string;
482
+ /** Additional settings */
483
+ [key: string]: string | undefined;
484
+ }
485
+ /**
486
+ * Base configuration shared by all table engines
487
+ * @template T The data type of the records stored in the table.
488
+ */
489
+ type BaseOlapConfig<T> = ({
490
+ /**
491
+ * Specifies the fields to use for ordering data within the ClickHouse table.
492
+ * This is crucial for optimizing query performance.
493
+ */
494
+ orderByFields: (keyof T & string)[];
495
+ orderByExpression?: undefined;
496
+ } | {
497
+ orderByFields?: undefined;
498
+ /**
499
+ * An arbitrary ClickHouse SQL expression for the order by clause.
500
+ *
501
+ * `orderByExpression: "(id, name)"` is equivalent to `orderByFields: ["id", "name"]`
502
+ * `orderByExpression: "tuple()"` means no sorting
503
+ */
504
+ orderByExpression: string;
505
+ } | {
506
+ orderByFields?: undefined;
507
+ orderByExpression?: undefined;
508
+ }) & {
509
+ partitionBy?: string;
510
+ /**
511
+ * SAMPLE BY expression for approximate query processing.
512
+ *
513
+ * Examples:
514
+ * ```typescript
515
+ * // Single unsigned integer field
516
+ * sampleByExpression: "userId"
517
+ *
518
+ * // Hash function on any field type
519
+ * sampleByExpression: "cityHash64(id)"
520
+ *
521
+ * // Multiple fields with hash
522
+ * sampleByExpression: "cityHash64(userId, timestamp)"
523
+ * ```
524
+ *
525
+ * Requirements:
526
+ * - Expression must evaluate to an unsigned integer (UInt8/16/32/64)
527
+ * - Expression must be present in the ORDER BY clause
528
+ * - If using hash functions, the same expression must appear in orderByExpression
529
+ */
530
+ sampleByExpression?: string;
531
+ /**
532
+ * Optional PRIMARY KEY expression.
533
+ * When specified, this overrides the primary key inferred from Key<T> column annotations.
534
+ *
535
+ * This allows for:
536
+ * - Complex primary keys using functions (e.g., "cityHash64(id)")
537
+ * - Different column ordering in primary key vs schema definition
538
+ * - Primary keys that differ from ORDER BY
539
+ *
540
+ * Example: primaryKeyExpression: "(userId, cityHash64(eventId))"
541
+ *
542
+ * Note: When this is set, any Key<T> annotations on columns are ignored for PRIMARY KEY generation.
543
+ */
544
+ primaryKeyExpression?: string;
545
+ version?: string;
546
+ lifeCycle?: LifeCycle;
547
+ settings?: {
548
+ [key: string]: string;
549
+ };
550
+ /**
551
+ * Optional TTL configuration for the table.
552
+ * e.g., "TTL timestamp + INTERVAL 90 DAY DELETE"
553
+ *
554
+ * Use the {@link ClickHouseTTL} type to configure column level TTL
555
+ */
556
+ ttl?: string;
557
+ /** Optional secondary/data-skipping indexes */
558
+ indexes?: TableIndex[];
559
+ /**
560
+ * Optional database name for multi-database support.
561
+ * When not specified, uses the global ClickHouse config database.
562
+ */
563
+ database?: string;
564
+ /**
565
+ * Optional cluster name for ON CLUSTER support.
566
+ * Use this to enable replicated tables across ClickHouse clusters.
567
+ * The cluster must be defined in config.toml (dev environment only).
568
+ * Example: cluster: "prod_cluster"
569
+ */
570
+ cluster?: string;
571
+ };
572
+ /**
573
+ * Configuration for MergeTree engine
574
+ * @template T The data type of the records stored in the table.
575
+ */
576
+ type MergeTreeConfig<T> = BaseOlapConfig<T> & {
577
+ engine: ClickHouseEngines.MergeTree;
578
+ };
579
+ /**
580
+ * Configuration for ReplacingMergeTree engine (deduplication)
581
+ * @template T The data type of the records stored in the table.
582
+ */
583
+ type ReplacingMergeTreeConfig<T> = BaseOlapConfig<T> & {
584
+ engine: ClickHouseEngines.ReplacingMergeTree;
585
+ ver?: keyof T & string;
586
+ isDeleted?: keyof T & string;
587
+ };
588
+ /**
589
+ * Configuration for AggregatingMergeTree engine
590
+ * @template T The data type of the records stored in the table.
591
+ */
592
+ type AggregatingMergeTreeConfig<T> = BaseOlapConfig<T> & {
593
+ engine: ClickHouseEngines.AggregatingMergeTree;
594
+ };
595
+ /**
596
+ * Configuration for SummingMergeTree engine
597
+ * @template T The data type of the records stored in the table.
598
+ */
599
+ type SummingMergeTreeConfig<T> = BaseOlapConfig<T> & {
600
+ engine: ClickHouseEngines.SummingMergeTree;
601
+ columns?: string[];
602
+ };
603
+ /**
604
+ * Configuration for CollapsingMergeTree engine
605
+ * @template T The data type of the records stored in the table.
606
+ */
607
+ type CollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {
608
+ engine: ClickHouseEngines.CollapsingMergeTree;
609
+ sign: keyof T & string;
610
+ };
611
+ /**
612
+ * Configuration for VersionedCollapsingMergeTree engine
613
+ * @template T The data type of the records stored in the table.
614
+ */
615
+ type VersionedCollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {
616
+ engine: ClickHouseEngines.VersionedCollapsingMergeTree;
617
+ sign: keyof T & string;
618
+ ver: keyof T & string;
619
+ };
620
+ interface ReplicatedEngineProperties {
621
+ keeperPath?: string;
622
+ replicaName?: string;
623
+ }
624
+ /**
625
+ * Configuration for ReplicatedMergeTree engine
626
+ * @template T The data type of the records stored in the table.
627
+ *
628
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
629
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
630
+ * provide both parameters or neither (to use server defaults).
631
+ */
632
+ type ReplicatedMergeTreeConfig<T> = Omit<MergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
633
+ engine: ClickHouseEngines.ReplicatedMergeTree;
634
+ };
635
+ /**
636
+ * Configuration for ReplicatedReplacingMergeTree engine
637
+ * @template T The data type of the records stored in the table.
638
+ *
639
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
640
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
641
+ * provide both parameters or neither (to use server defaults).
642
+ */
643
+ type ReplicatedReplacingMergeTreeConfig<T> = Omit<ReplacingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
644
+ engine: ClickHouseEngines.ReplicatedReplacingMergeTree;
645
+ };
646
+ /**
647
+ * Configuration for ReplicatedAggregatingMergeTree engine
648
+ * @template T The data type of the records stored in the table.
649
+ *
650
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
651
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
652
+ * provide both parameters or neither (to use server defaults).
653
+ */
654
+ type ReplicatedAggregatingMergeTreeConfig<T> = Omit<AggregatingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
655
+ engine: ClickHouseEngines.ReplicatedAggregatingMergeTree;
656
+ };
657
+ /**
658
+ * Configuration for ReplicatedSummingMergeTree engine
659
+ * @template T The data type of the records stored in the table.
660
+ *
661
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
662
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
663
+ * provide both parameters or neither (to use server defaults).
664
+ */
665
+ type ReplicatedSummingMergeTreeConfig<T> = Omit<SummingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
666
+ engine: ClickHouseEngines.ReplicatedSummingMergeTree;
667
+ };
668
+ /**
669
+ * Configuration for ReplicatedCollapsingMergeTree engine
670
+ * @template T The data type of the records stored in the table.
671
+ *
672
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
673
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
674
+ * provide both parameters or neither (to use server defaults).
675
+ */
676
+ type ReplicatedCollapsingMergeTreeConfig<T> = Omit<CollapsingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
677
+ engine: ClickHouseEngines.ReplicatedCollapsingMergeTree;
678
+ };
679
+ /**
680
+ * Configuration for ReplicatedVersionedCollapsingMergeTree engine
681
+ * @template T The data type of the records stored in the table.
682
+ *
683
+ * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,
684
+ * which manages replication automatically. For self-hosted with ClickHouse Keeper,
685
+ * provide both parameters or neither (to use server defaults).
686
+ */
687
+ type ReplicatedVersionedCollapsingMergeTreeConfig<T> = Omit<VersionedCollapsingMergeTreeConfig<T>, "engine"> & ReplicatedEngineProperties & {
688
+ engine: ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree;
689
+ };
690
+ /**
691
+ * Configuration for S3Queue engine - only non-alterable constructor parameters.
692
+ * S3Queue-specific settings like 'mode', 'keeper_path', etc. should be specified
693
+ * in the settings field, not here.
694
+ * @template T The data type of the records stored in the table.
695
+ */
696
+ type S3QueueConfig<T> = Omit<BaseOlapConfig<T>, "settings" | "orderByFields" | "partitionBy" | "sampleByExpression"> & {
697
+ engine: ClickHouseEngines.S3Queue;
698
+ /** S3 bucket path with wildcards (e.g., 's3://bucket/data/*.json') */
699
+ s3Path: string;
700
+ /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */
701
+ format: string;
702
+ /** AWS access key ID (optional, omit for NOSIGN/public buckets) */
703
+ awsAccessKeyId?: string;
704
+ /** AWS secret access key */
705
+ awsSecretAccessKey?: string;
706
+ /** Compression type (e.g., 'gzip', 'zstd') */
707
+ compression?: string;
708
+ /** Custom HTTP headers */
709
+ headers?: {
710
+ [key: string]: string;
711
+ };
712
+ /**
713
+ * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING.
714
+ * These settings control the behavior of the S3Queue engine.
715
+ */
716
+ settings?: S3QueueTableSettings;
717
+ };
718
+ /**
719
+ * Configuration for S3 engine
720
+ * Note: S3 engine supports ORDER BY clause, unlike S3Queue, Buffer, and Distributed engines
721
+ * @template T The data type of the records stored in the table.
722
+ */
723
+ type S3Config<T> = Omit<BaseOlapConfig<T>, "sampleByExpression"> & {
724
+ engine: ClickHouseEngines.S3;
725
+ /** S3 path (e.g., 's3://bucket/path/file.json') */
726
+ path: string;
727
+ /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */
728
+ format: string;
729
+ /** AWS access key ID (optional, omit for NOSIGN/public buckets) */
730
+ awsAccessKeyId?: string;
731
+ /** AWS secret access key */
732
+ awsSecretAccessKey?: string;
733
+ /** Compression type (e.g., 'gzip', 'zstd', 'auto') */
734
+ compression?: string;
735
+ /** Partition strategy (optional) */
736
+ partitionStrategy?: string;
737
+ /** Partition columns in data file (optional) */
738
+ partitionColumnsInDataFile?: string;
739
+ };
740
+ /**
741
+ * Configuration for Buffer engine
742
+ * @template T The data type of the records stored in the table.
743
+ */
744
+ type BufferConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
745
+ engine: ClickHouseEngines.Buffer;
746
+ /** Target database name for the destination table */
747
+ targetDatabase: string;
748
+ /** Target table name where data will be flushed */
749
+ targetTable: string;
750
+ /** Number of buffer layers (typically 16) */
751
+ numLayers: number;
752
+ /** Minimum time in seconds before flushing */
753
+ minTime: number;
754
+ /** Maximum time in seconds before flushing */
755
+ maxTime: number;
756
+ /** Minimum number of rows before flushing */
757
+ minRows: number;
758
+ /** Maximum number of rows before flushing */
759
+ maxRows: number;
760
+ /** Minimum bytes before flushing */
761
+ minBytes: number;
762
+ /** Maximum bytes before flushing */
763
+ maxBytes: number;
764
+ /** Optional: Flush time in seconds */
765
+ flushTime?: number;
766
+ /** Optional: Flush number of rows */
767
+ flushRows?: number;
768
+ /** Optional: Flush number of bytes */
769
+ flushBytes?: number;
770
+ };
771
+ /**
772
+ * Configuration for Distributed engine
773
+ * @template T The data type of the records stored in the table.
774
+ */
775
+ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
776
+ engine: ClickHouseEngines.Distributed;
777
+ /** Cluster name from the ClickHouse configuration */
778
+ cluster: string;
779
+ /** Database name on the cluster */
780
+ targetDatabase: string;
781
+ /** Table name on the cluster */
782
+ targetTable: string;
783
+ /** Optional: Sharding key expression for data distribution */
784
+ shardingKey?: string;
785
+ /** Optional: Policy name for data distribution */
786
+ policyName?: string;
787
+ };
788
+ /** Kafka table settings. See: https://clickhouse.com/docs/engines/table-engines/integrations/kafka */
789
+ interface KafkaTableSettings {
790
+ kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
791
+ kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
792
+ kafka_sasl_username?: string;
793
+ kafka_sasl_password?: string;
794
+ kafka_schema?: string;
795
+ kafka_num_consumers?: string;
796
+ kafka_max_block_size?: string;
797
+ kafka_skip_broken_messages?: string;
798
+ kafka_commit_every_batch?: string;
799
+ kafka_client_id?: string;
800
+ kafka_poll_timeout_ms?: string;
801
+ kafka_poll_max_batch_size?: string;
802
+ kafka_flush_interval_ms?: string;
803
+ kafka_consumer_reschedule_ms?: string;
804
+ kafka_thread_per_consumer?: string;
805
+ kafka_handle_error_mode?: "default" | "stream";
806
+ kafka_commit_on_select?: string;
807
+ kafka_max_rows_per_message?: string;
808
+ kafka_compression_codec?: string;
809
+ kafka_compression_level?: string;
810
+ }
811
+ /** Kafka engine for streaming data from Kafka topics. Additional settings go in `settings`. */
812
+ type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
813
+ engine: ClickHouseEngines.Kafka;
814
+ brokerList: string;
815
+ topicList: string;
816
+ groupName: string;
817
+ format: string;
818
+ settings?: KafkaTableSettings;
819
+ };
820
+ /**
821
+ * Configuration for IcebergS3 engine - read-only Iceberg table access
822
+ *
823
+ * Provides direct querying of Apache Iceberg tables stored on S3.
824
+ * Data is not copied; queries stream directly from Parquet/ORC files.
825
+ *
826
+ * @template T The data type of the records stored in the table.
827
+ *
828
+ * @example
829
+ * ```typescript
830
+ * const lakeEvents = new OlapTable<Event>("lake_events", {
831
+ * engine: ClickHouseEngines.IcebergS3,
832
+ * path: "s3://datalake/events/",
833
+ * format: "Parquet",
834
+ * awsAccessKeyId: mooseRuntimeEnv.get("AWS_ACCESS_KEY_ID"),
835
+ * awsSecretAccessKey: mooseRuntimeEnv.get("AWS_SECRET_ACCESS_KEY")
836
+ * });
837
+ * ```
838
+ *
839
+ * @remarks
840
+ * - IcebergS3 engine is read-only
841
+ * - Does not support ORDER BY, PARTITION BY, or SAMPLE BY clauses
842
+ * - Queries always see the latest Iceberg snapshot (with metadata cache)
843
+ */
844
+ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
845
+ engine: ClickHouseEngines.IcebergS3;
846
+ /** S3 path to Iceberg table root (e.g., 's3://bucket/warehouse/events/') */
847
+ path: string;
848
+ /** Data format - 'Parquet' or 'ORC' */
849
+ format: "Parquet" | "ORC";
850
+ /** AWS access key ID (optional, omit for NOSIGN/public buckets) */
851
+ awsAccessKeyId?: string;
852
+ /** AWS secret access key (optional) */
853
+ awsSecretAccessKey?: string;
854
+ /** Compression type (optional: 'gzip', 'zstd', 'auto') */
855
+ compression?: string;
856
+ };
857
+ /**
858
+ * Legacy configuration (backward compatibility) - defaults to MergeTree engine
859
+ * @template T The data type of the records stored in the table.
860
+ */
861
+ type LegacyOlapConfig<T> = BaseOlapConfig<T>;
862
+ type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | CollapsingMergeTreeConfig<T> | VersionedCollapsingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | ReplicatedCollapsingMergeTreeConfig<T> | ReplicatedVersionedCollapsingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T> | KafkaConfig<T>;
863
+ /**
864
+ * Union of all engine-specific configurations (new API)
865
+ * @template T The data type of the records stored in the table.
866
+ */
867
+ type OlapConfig<T> = EngineConfig<T> | LegacyOlapConfig<T>;
868
+ /**
869
+ * Represents an OLAP (Online Analytical Processing) table, typically corresponding to a ClickHouse table.
870
+ * Provides a typed interface for interacting with the table.
871
+ *
872
+ * @template T The data type of the records stored in the table. The structure of T defines the table schema.
873
+ */
874
+ declare class OlapTable<T> extends TypedBase<T, OlapConfig<T>> {
875
+ name: IdentifierBrandedString;
876
+ /** @internal */
877
+ readonly kind = "OlapTable";
878
+ /** @internal Memoized ClickHouse client for reusing connections across insert calls */
879
+ private _memoizedClient?;
880
+ /** @internal Hash of the configuration used to create the memoized client */
881
+ private _configHash?;
882
+ /** @internal Cached table name to avoid repeated generation */
883
+ private _cachedTableName?;
884
+ /**
885
+ * Creates a new OlapTable instance.
886
+ * @param name The name of the table. This name is used for the underlying ClickHouse table.
887
+ * @param config Optional configuration for the OLAP table.
888
+ */
889
+ constructor(name: string, config?: OlapConfig<T>);
890
+ /** @internal **/
891
+ constructor(name: string, config: OlapConfig<T>, schema: IJsonSchemaCollection$1.IV3_1, columns: Column[], validators?: TypiaValidators<T>);
892
+ /**
893
+ * Generates the versioned table name following Moose's naming convention
894
+ * Format: {tableName}_{version_with_dots_replaced_by_underscores}
895
+ */
896
+ private generateTableName;
897
+ /**
898
+ * Creates a fast hash of the ClickHouse configuration.
899
+ * Uses crypto.createHash for better performance than JSON.stringify.
900
+ *
901
+ * @private
902
+ */
903
+ private createConfigHash;
904
+ /**
905
+ * Gets or creates a memoized ClickHouse client.
906
+ * The client is cached and reused across multiple insert calls for better performance.
907
+ * If the configuration changes, a new client will be created.
908
+ *
909
+ * @private
910
+ */
911
+ private getMemoizedClient;
912
+ /**
913
+ * Closes the memoized ClickHouse client if it exists.
914
+ * This is useful for cleaning up connections when the table instance is no longer needed.
915
+ * The client will be automatically recreated on the next insert call if needed.
916
+ */
917
+ closeClient(): Promise<void>;
918
+ /**
919
+ * Validates a single record using typia's comprehensive type checking.
920
+ * This provides the most accurate validation as it uses the exact TypeScript type information.
921
+ *
922
+ * @param record The record to validate
923
+ * @returns Validation result with detailed error information
924
+ */
925
+ validateRecord(record: unknown): {
926
+ success: boolean;
927
+ data?: T;
928
+ errors?: string[];
929
+ };
930
+ /**
931
+ * Type guard function using typia's is() function.
932
+ * Provides compile-time type narrowing for TypeScript.
933
+ *
934
+ * @param record The record to check
935
+ * @returns True if record matches type T, with type narrowing
936
+ */
937
+ isValidRecord(record: unknown): record is T;
938
+ /**
939
+ * Assert that a record matches type T, throwing detailed errors if not.
940
+ * Uses typia's assert() function for the most detailed error reporting.
941
+ *
942
+ * @param record The record to assert
943
+ * @returns The validated and typed record
944
+ * @throws Detailed validation error if record doesn't match type T
945
+ */
946
+ assertValidRecord(record: unknown): T;
947
+ /**
948
+ * Validates an array of records with comprehensive error reporting.
949
+ * Uses the most appropriate validation method available (typia or basic).
950
+ *
951
+ * @param data Array of records to validate
952
+ * @returns Detailed validation results
953
+ */
954
+ validateRecords(data: unknown[]): Promise<ValidationResult<T>>;
955
+ /**
956
+ * Optimized batch retry that minimizes individual insert operations.
957
+ * Groups records into smaller batches to reduce round trips while still isolating failures.
958
+ *
959
+ * @private
960
+ */
961
+ private retryIndividualRecords;
962
+ /**
963
+ * Validates input parameters and strategy compatibility
964
+ * @private
965
+ */
966
+ private validateInsertParameters;
967
+ /**
968
+ * Handles early return cases for empty data
969
+ * @private
970
+ */
971
+ private handleEmptyData;
972
+ /**
973
+ * Performs pre-insertion validation for array data
974
+ * @private
975
+ */
976
+ private performPreInsertionValidation;
977
+ /**
978
+ * Handles validation errors based on the specified strategy
979
+ * @private
980
+ */
981
+ private handleValidationErrors;
982
+ /**
983
+ * Checks if validation errors exceed configured thresholds
984
+ * @private
985
+ */
986
+ private checkValidationThresholds;
987
+ /**
988
+ * Optimized insert options preparation with better memory management
989
+ * @private
990
+ */
991
+ private prepareInsertOptions;
992
+ /**
993
+ * Creates success result for completed insertions
994
+ * @private
995
+ */
996
+ private createSuccessResult;
997
+ /**
998
+ * Handles insertion errors based on the specified strategy
999
+ * @private
1000
+ */
1001
+ private handleInsertionError;
1002
+ /**
1003
+ * Handles the isolate strategy for insertion errors
1004
+ * @private
1005
+ */
1006
+ private handleIsolateStrategy;
1007
+ /**
1008
+ * Checks if insertion errors exceed configured thresholds
1009
+ * @private
1010
+ */
1011
+ private checkInsertionThresholds;
1012
+ /**
1013
+ * Recursively transforms a record to match ClickHouse's JSONEachRow requirements
1014
+ *
1015
+ * - For every Array(Nested(...)) field at any depth, each item is wrapped in its own array and recursively processed.
1016
+ * - For every Nested struct (not array), it recurses into the struct.
1017
+ * - This ensures compatibility with kafka_clickhouse_sync
1018
+ *
1019
+ * @param record The input record to transform (may be deeply nested)
1020
+ * @param columns The schema columns for this level (defaults to this.columnArray at the top level)
1021
+ * @returns The transformed record, ready for ClickHouse JSONEachRow insertion
1022
+ */
1023
+ private mapToClickhouseRecord;
1024
+ /**
1025
+ * Inserts data directly into the ClickHouse table with enhanced error handling and validation.
1026
+ * This method establishes a direct connection to ClickHouse using the project configuration
1027
+ * and inserts the provided data into the versioned table.
1028
+ *
1029
+ * PERFORMANCE OPTIMIZATIONS:
1030
+ * - Memoized client connections with fast config hashing
1031
+ * - Single-pass validation with pre-allocated arrays
1032
+ * - Batch-optimized retry strategy (batches of 10, then individual)
1033
+ * - Optimized ClickHouse settings for large datasets
1034
+ * - Reduced memory allocations and object creation
1035
+ *
1036
+ * Uses advanced typia validation when available for comprehensive type checking,
1037
+ * with fallback to basic validation for compatibility.
1038
+ *
1039
+ * The ClickHouse client is memoized and reused across multiple insert calls for better performance.
1040
+ * If the configuration changes, a new client will be automatically created.
1041
+ *
1042
+ * @param data Array of objects conforming to the table schema, or a Node.js Readable stream
1043
+ * @param options Optional configuration for error handling, validation, and insertion behavior
1044
+ * @returns Promise resolving to detailed insertion results
1045
+ * @throws {ConfigError} When configuration cannot be read or parsed
1046
+ * @throws {ClickHouseError} When insertion fails based on the error strategy
1047
+ * @throws {ValidationError} When validation fails and strategy is 'fail-fast'
1048
+ *
1049
+ * @example
1050
+ * ```typescript
1051
+ * // Create an OlapTable instance (typia validators auto-injected)
1052
+ * const userTable = new OlapTable<User>('users');
1053
+ *
1054
+ * // Insert with comprehensive typia validation
1055
+ * const result1 = await userTable.insert([
1056
+ * { id: 1, name: 'John', email: 'john@example.com' },
1057
+ * { id: 2, name: 'Jane', email: 'jane@example.com' }
1058
+ * ]);
1059
+ *
1060
+ * // Insert data with stream input (validation not available for streams)
1061
+ * const dataStream = new Readable({
1062
+ * objectMode: true,
1063
+ * read() { // Stream implementation }
1064
+ * });
1065
+ * const result2 = await userTable.insert(dataStream, { strategy: 'fail-fast' });
1066
+ *
1067
+ * // Insert with validation disabled for performance
1068
+ * const result3 = await userTable.insert(data, { validate: false });
1069
+ *
1070
+ * // Insert with error handling strategies
1071
+ * const result4 = await userTable.insert(mixedData, {
1072
+ * strategy: 'isolate',
1073
+ * allowErrorsRatio: 0.1,
1074
+ * validate: true // Use typia validation (default)
1075
+ * });
1076
+ *
1077
+ * // Optional: Clean up connection when completely done
1078
+ * await userTable.closeClient();
1079
+ * ```
1080
+ */
1081
+ insert(data: T[] | Readable, options?: InsertOptions): Promise<InsertResult<T>>;
1082
+ }
1083
+
1084
+ /**
1085
+ * @fileoverview Stream SDK for data streaming operations in Moose.
1086
+ *
1087
+ * This module provides the core streaming functionality including:
1088
+ * - Stream creation and configuration
1089
+ * - Message transformations between streams
1090
+ * - Consumer registration for message processing
1091
+ * - Dead letter queue handling for error recovery
1092
+ *
1093
+ * @module Stream
1094
+ */
1095
+
1096
+ /**
1097
+ * Represents zero, one, or many values of type T.
1098
+ * Used for flexible return types in transformations where a single input
1099
+ * can produce no output, one output, or multiple outputs.
1100
+ *
1101
+ * @template T The type of the value(s)
1102
+ * @example
1103
+ * ```typescript
1104
+ * // Can return a single value
1105
+ * const single: ZeroOrMany<string> = "hello";
1106
+ *
1107
+ * // Can return an array
1108
+ * const multiple: ZeroOrMany<string> = ["hello", "world"];
1109
+ *
1110
+ * // Can return null/undefined to filter out
1111
+ * const filtered: ZeroOrMany<string> = null;
1112
+ * ```
1113
+ */
1114
+ type ZeroOrMany<T> = T | T[] | undefined | null;
1115
+ /**
1116
+ * Function type for transforming records from one type to another.
1117
+ * Supports both synchronous and asynchronous transformations.
1118
+ *
1119
+ * @template T The input record type
1120
+ * @template U The output record type
1121
+ * @param record The input record to transform
1122
+ * @returns The transformed record(s), or null/undefined to filter out
1123
+ *
1124
+ * @example
1125
+ * ```typescript
1126
+ * const transform: SyncOrAsyncTransform<InputType, OutputType> = (record) => {
1127
+ * return { ...record, processed: true };
1128
+ * };
1129
+ * ```
1130
+ */
1131
+ type SyncOrAsyncTransform<T, U> = (record: T) => ZeroOrMany<U> | Promise<ZeroOrMany<U>>;
1132
+ /**
1133
+ * Function type for consuming records without producing output.
1134
+ * Used for side effects like logging, external API calls, or database writes.
1135
+ *
1136
+ * @template T The record type to consume
1137
+ * @param record The record to process
1138
+ * @returns Promise<void> or void
1139
+ *
1140
+ * @example
1141
+ * ```typescript
1142
+ * const consumer: Consumer<UserEvent> = async (event) => {
1143
+ * await sendToAnalytics(event);
1144
+ * };
1145
+ * ```
1146
+ */
1147
+ type Consumer<T> = (record: T) => Promise<void> | void;
1148
+ /**
1149
+ * Configuration options for stream transformations.
1150
+ *
1151
+ * @template T The type of records being transformed
1152
+ */
1153
+ interface TransformConfig<T> {
1154
+ /**
1155
+ * Optional version identifier for this transformation.
1156
+ * Multiple transformations to the same destination can coexist with different versions.
1157
+ */
1158
+ version?: string;
1159
+ /**
1160
+ * Optional metadata for documentation and tracking purposes.
1161
+ */
1162
+ metadata?: {
1163
+ description?: string;
1164
+ };
1165
+ /**
1166
+ * Optional dead letter queue for handling transformation failures.
1167
+ * Failed records will be sent to this queue for manual inspection or reprocessing.
1168
+ * Uses {@link Stream.defaultDeadLetterQueue} by default
1169
+ * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value
1170
+ */
1171
+ deadLetterQueue?: DeadLetterQueue<T> | null;
1172
+ /**
1173
+ * @internal Source file path where this transform was declared.
1174
+ * Automatically captured from stack trace.
1175
+ */
1176
+ sourceFile?: string;
1177
+ }
1178
+ /**
1179
+ * Configuration options for stream consumers.
1180
+ *
1181
+ * @template T The type of records being consumed
1182
+ */
1183
+ interface ConsumerConfig<T> {
1184
+ /**
1185
+ * Optional version identifier for this consumer.
1186
+ * Multiple consumers can coexist with different versions.
1187
+ */
1188
+ version?: string;
1189
+ /**
1190
+ * Optional dead letter queue for handling consumer failures.
1191
+ * Failed records will be sent to this queue for manual inspection or reprocessing.
1192
+ * Uses {@link Stream.defaultDeadLetterQueue} by default
1193
+ * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value
1194
+ */
1195
+ deadLetterQueue?: DeadLetterQueue<T> | null;
1196
+ /**
1197
+ * @internal Source file path where this consumer was declared.
1198
+ * Automatically captured from stack trace.
1199
+ */
1200
+ sourceFile?: string;
1201
+ }
1202
+ type SchemaRegistryEncoding = "JSON" | "AVRO" | "PROTOBUF";
1203
+ type SchemaRegistryReference = {
1204
+ id: number;
1205
+ } | {
1206
+ subjectLatest: string;
1207
+ } | {
1208
+ subject: string;
1209
+ version: number;
1210
+ };
1211
+ interface KafkaSchemaConfig {
1212
+ kind: SchemaRegistryEncoding;
1213
+ reference: SchemaRegistryReference;
1214
+ }
1215
+ /**
1216
+ * Represents a message routed to a specific destination stream.
1217
+ * Used internally by the multi-transform functionality to specify
1218
+ * where transformed messages should be sent.
1219
+ *
1220
+ * @internal
1221
+ */
1222
+ declare class RoutedMessage {
1223
+ /** The destination stream for the message */
1224
+ destination: Stream<any>;
1225
+ /** The message value(s) to send */
1226
+ values: ZeroOrMany<any>;
1227
+ /**
1228
+ * Creates a new routed message.
1229
+ *
1230
+ * @param destination The target stream
1231
+ * @param values The message(s) to route
1232
+ */
1233
+ constructor(destination: Stream<any>, values: ZeroOrMany<any>);
1234
+ }
1235
+ /**
1236
+ * Configuration options for a data stream (e.g., a Redpanda topic).
1237
+ * @template T The data type of the messages in the stream.
1238
+ */
1239
+ interface StreamConfig<T> {
1240
+ /**
1241
+ * Specifies the number of partitions for the stream. Affects parallelism and throughput.
1242
+ */
1243
+ parallelism?: number;
1244
+ /**
1245
+ * Specifies the data retention period for the stream in seconds. Messages older than this may be deleted.
1246
+ */
1247
+ retentionPeriod?: number;
1248
+ /**
1249
+ * An optional destination OLAP table where messages from this stream should be automatically ingested.
1250
+ */
1251
+ destination?: OlapTable<T>;
1252
+ /**
1253
+ * An optional version string for this configuration. Can be used for tracking changes or managing deployments.
1254
+ */
1255
+ version?: string;
1256
+ metadata?: {
1257
+ description?: string;
1258
+ };
1259
+ lifeCycle?: LifeCycle;
1260
+ defaultDeadLetterQueue?: DeadLetterQueue<T>;
1261
+ /** Optional Schema Registry configuration for this stream */
1262
+ schemaConfig?: KafkaSchemaConfig;
1263
+ }
1264
+ /**
1265
+ * Represents a data stream, typically corresponding to a Redpanda topic.
1266
+ * Provides a typed interface for producing to and consuming from the stream, and defining transformations.
1267
+ *
1268
+ * @template T The data type of the messages flowing through the stream. The structure of T defines the message schema.
1269
+ */
1270
+ declare class Stream<T> extends TypedBase<T, StreamConfig<T>> {
1271
+ defaultDeadLetterQueue?: DeadLetterQueue<T>;
1272
+ /** @internal Memoized KafkaJS producer for reusing connections across sends */
1273
+ private _memoizedProducer?;
1274
+ /** @internal Hash of the configuration used to create the memoized Kafka producer */
1275
+ private _kafkaConfigHash?;
1276
+ /**
1277
+ * Creates a new Stream instance.
1278
+ * @param name The name of the stream. This name is used for the underlying Redpanda topic.
1279
+ * @param config Optional configuration for the stream.
1280
+ */
1281
+ constructor(name: string, config?: StreamConfig<T>);
1282
+ /**
1283
+ * @internal
1284
+ * Note: `validators` parameter is a positional placeholder (always undefined for Stream).
1285
+ * It exists because TypedBase has validators as the 5th param, and we need to pass
1286
+ * allowExtraFields as the 6th param. Stream doesn't use validators.
1287
+ */
1288
+ constructor(name: string, config: StreamConfig<T>, schema: IJsonSchemaCollection$1.IV3_1, columns: Column[], validators: undefined, allowExtraFields: boolean);
1289
+ /**
1290
+ * Internal map storing transformation configurations.
1291
+ * Maps destination stream names to arrays of transformation functions and their configs.
1292
+ *
1293
+ * @internal
1294
+ */
1295
+ _transformations: Map<string, [Stream<any>, SyncOrAsyncTransform<T, any>, TransformConfig<T>][]>;
1296
+ /**
1297
+ * Internal function for multi-stream transformations.
1298
+ * Allows a single transformation to route messages to multiple destinations.
1299
+ *
1300
+ * @internal
1301
+ */
1302
+ _multipleTransformations?: (record: T) => [RoutedMessage];
1303
+ /**
1304
+ * Internal array storing consumer configurations.
1305
+ *
1306
+ * @internal
1307
+ */
1308
+ _consumers: {
1309
+ consumer: Consumer<T>;
1310
+ config: ConsumerConfig<T>;
1311
+ }[];
1312
+ /**
1313
+ * Builds the full Kafka topic name including optional namespace and version suffix.
1314
+ * Version suffix is appended as _x_y_z where dots in version are replaced with underscores.
1315
+ */
1316
+ private buildFullTopicName;
1317
+ /**
1318
+ * Creates a fast hash string from relevant Kafka configuration fields.
1319
+ */
1320
+ private createConfigHash;
1321
+ /**
1322
+ * Gets or creates a memoized KafkaJS producer using runtime configuration.
1323
+ */
1324
+ private getMemoizedProducer;
1325
+ /**
1326
+ * Closes the memoized Kafka producer if it exists.
1327
+ */
1328
+ closeProducer(): Promise<void>;
1329
+ /**
1330
+ * Sends one or more records to this stream's Kafka topic.
1331
+ * Values are JSON-serialized as message values.
1332
+ */
1333
+ send(values: ZeroOrMany<T>): Promise<void>;
1334
+ /**
1335
+ * Adds a transformation step that processes messages from this stream and sends the results to a destination stream.
1336
+ * Multiple transformations to the same destination stream can be added if they have distinct `version` identifiers in their config.
1337
+ *
1338
+ * @template U The data type of the messages in the destination stream.
1339
+ * @param destination The destination stream for the transformed messages.
1340
+ * @param transformation A function that takes a message of type T and returns zero or more messages of type U (or a Promise thereof).
1341
+ * Return `null` or `undefined` or an empty array `[]` to filter out a message. Return an array to emit multiple messages.
1342
+ * @param config Optional configuration for this specific transformation step, like a version.
1343
+ */
1344
+ addTransform<U>(destination: Stream<U>, transformation: SyncOrAsyncTransform<T, U>, config?: TransformConfig<T>): void;
1345
+ /**
1346
+ * Adds a consumer function that processes messages from this stream.
1347
+ * Multiple consumers can be added if they have distinct `version` identifiers in their config.
1348
+ *
1349
+ * @param consumer A function that takes a message of type T and performs an action (e.g., side effect, logging). Should return void or Promise<void>.
1350
+ * @param config Optional configuration for this specific consumer, like a version.
1351
+ */
1352
+ addConsumer(consumer: Consumer<T>, config?: ConsumerConfig<T>): void;
1353
+ /**
1354
+ * Helper method for `addMultiTransform` to specify the destination and values for a routed message.
1355
+ * @param values The value or values to send to this stream.
1356
+ * @returns A `RoutedMessage` object associating the values with this stream.
1357
+ *
1358
+ * @example
1359
+ * ```typescript
1360
+ * sourceStream.addMultiTransform((record) => [
1361
+ * destinationStream1.routed(transformedRecord1),
1362
+ * destinationStream2.routed([record2a, record2b])
1363
+ * ]);
1364
+ * ```
1365
+ */
1366
+ routed: (values: ZeroOrMany<T>) => RoutedMessage;
1367
+ /**
1368
+ * Adds a single transformation function that can route messages to multiple destination streams.
1369
+ * This is an alternative to adding multiple individual `addTransform` calls.
1370
+ * Only one multi-transform function can be added per stream.
1371
+ *
1372
+ * @param transformation A function that takes a message of type T and returns an array of `RoutedMessage` objects,
1373
+ * each specifying a destination stream and the message(s) to send to it.
1374
+ */
1375
+ addMultiTransform(transformation: (record: T) => [RoutedMessage]): void;
1376
+ }
1377
+ /**
1378
+ * Base model for dead letter queue entries.
1379
+ * Contains the original failed record along with error information.
1380
+ */
1381
+ interface DeadLetterModel {
1382
+ /** The original record that failed processing */
1383
+ originalRecord: Record<string, any>;
1384
+ /** Human-readable error message describing the failure */
1385
+ errorMessage: string;
1386
+ /** Classification of the error type (e.g., "ValidationError", "TransformError") */
1387
+ errorType: string;
1388
+ /** Timestamp when the failure occurred */
1389
+ failedAt: Date;
1390
+ /** The source component where the failure occurred */
1391
+ source: "api" | "transform" | "table";
1392
+ }
1393
+ /**
1394
+ * Enhanced dead letter model with type recovery functionality.
1395
+ * Extends the base model with the ability to recover the original typed record.
1396
+ *
1397
+ * @template T The original record type before failure
1398
+ */
1399
+ interface DeadLetter<T> extends DeadLetterModel {
1400
+ /**
1401
+ * Recovers the original record as its typed form.
1402
+ * Useful for reprocessing failed records with proper type safety.
1403
+ *
1404
+ * @returns The original record cast to type T
1405
+ */
1406
+ asTyped: () => T;
1407
+ }
1408
+ /**
1409
+ * Specialized stream for handling failed records (dead letters).
1410
+ * Provides type-safe access to failed records for reprocessing or analysis.
1411
+ *
1412
+ * @template T The original record type that failed processing
1413
+ *
1414
+ * @example
1415
+ * ```typescript
1416
+ * const dlq = new DeadLetterQueue<UserEvent>("user-events-dlq");
1417
+ *
1418
+ * dlq.addConsumer(async (deadLetter) => {
1419
+ * const originalEvent = deadLetter.asTyped();
1420
+ * console.log(`Failed event: ${deadLetter.errorMessage}`);
1421
+ * // Potentially reprocess or alert
1422
+ * });
1423
+ * ```
1424
+ */
1425
+ declare class DeadLetterQueue<T> extends Stream<DeadLetterModel> {
1426
+ /**
1427
+ * Creates a new DeadLetterQueue instance.
1428
+ * @param name The name of the dead letter queue stream
1429
+ * @param config Optional configuration for the stream. The metadata property is always present and includes stackTrace.
1430
+ */
1431
+ constructor(name: string, config?: StreamConfig<DeadLetterModel>);
1432
+ /** @internal **/
1433
+ constructor(name: string, config: StreamConfig<DeadLetterModel>, validate: (originalRecord: any) => T);
1434
+ /**
1435
+ * Internal type guard function for validating and casting original records.
1436
+ *
1437
+ * @internal
1438
+ */
1439
+ private typeGuard;
1440
+ /**
1441
+ * Adds a transformation step for dead letter records.
1442
+ * The transformation function receives a DeadLetter<T> with type recovery capabilities.
1443
+ *
1444
+ * @template U The output type for the transformation
1445
+ * @param destination The destination stream for transformed messages
1446
+ * @param transformation Function to transform dead letter records
1447
+ * @param config Optional transformation configuration
1448
+ */
1449
+ addTransform<U>(destination: Stream<U>, transformation: SyncOrAsyncTransform<DeadLetter<T>, U>, config?: TransformConfig<DeadLetterModel>): void;
1450
+ /**
1451
+ * Adds a consumer for dead letter records.
1452
+ * The consumer function receives a DeadLetter<T> with type recovery capabilities.
1453
+ *
1454
+ * @param consumer Function to process dead letter records
1455
+ * @param config Optional consumer configuration
1456
+ */
1457
+ addConsumer(consumer: Consumer<DeadLetter<T>>, config?: ConsumerConfig<DeadLetterModel>): void;
1458
+ /**
1459
+ * Adds a multi-stream transformation for dead letter records.
1460
+ * The transformation function receives a DeadLetter<T> with type recovery capabilities.
1461
+ *
1462
+ * @param transformation Function to route dead letter records to multiple destinations
1463
+ */
1464
+ addMultiTransform(transformation: (record: DeadLetter<T>) => [RoutedMessage]): void;
1465
+ }
1466
+
1467
+ /**
1468
+ * Context passed to task handlers. Single param to future-proof API changes.
1469
+ *
1470
+ * - state: shared mutable state for the task and its lifecycle hooks
1471
+ * - input: optional typed input for the task (undefined when task has no input)
1472
+ */
1473
+ /**
1474
+ * Task handler context. If the task declares an input type (T != null),
1475
+ * `input` is required and strongly typed. For no-input tasks (T = null),
1476
+ * `input` is omitted/optional.
1477
+ */
1478
+ type TaskContext<TInput> = TInput extends null ? {
1479
+ state: any;
1480
+ input?: null;
1481
+ } : {
1482
+ state: any;
1483
+ input: TInput;
1484
+ };
1485
+ /**
1486
+ * Configuration options for defining a task within a workflow.
1487
+ *
1488
+ * @template T - The input type for the task
1489
+ * @template R - The return type for the task
1490
+ */
1491
+ interface TaskConfig<T, R> {
1492
+ /** The main function that executes the task logic */
1493
+ run: (context: TaskContext<T>) => Promise<R>;
1494
+ /**
1495
+ * Optional array of tasks to execute after this task completes successfully.
1496
+ * Supports all combinations of input types (real type or null) and output types (real type or void).
1497
+ * When this task returns void, onComplete tasks expect null as input.
1498
+ * When this task returns a real type, onComplete tasks expect that type as input.
1499
+ */
1500
+ onComplete?: (Task<R extends void ? null : R, any> | Task<R extends void ? null : R, void>)[];
1501
+ /**
1502
+ * Optional function that is called when the task is cancelled.
1503
+ */
1504
+ /** Optional function that is called when the task is cancelled. */
1505
+ onCancel?: (context: TaskContext<T>) => Promise<void>;
1506
+ /** Optional timeout duration for the task execution (e.g., "30s", "5m") */
1507
+ timeout?: string;
1508
+ /** Optional number of retry attempts if the task fails */
1509
+ retries?: number;
1510
+ }
1511
+ /**
1512
+ * Represents a single task within a workflow system.
1513
+ *
1514
+ * A Task encapsulates the execution logic, completion handlers, and configuration
1515
+ * for a unit of work that can be chained with other tasks in a workflow.
1516
+ *
1517
+ * @template T - The input type that this task expects
1518
+ * @template R - The return type that this task produces
1519
+ */
1520
+ declare class Task<T, R> {
1521
+ readonly name: string;
1522
+ readonly config: TaskConfig<T, R>;
1523
+ /**
1524
+ * Creates a new Task instance.
1525
+ *
1526
+ * @param name - Unique identifier for the task
1527
+ * @param config - Configuration object defining the task behavior
1528
+ *
1529
+ * @example
1530
+ * ```typescript
1531
+ * // No input, no output
1532
+ * const task1 = new Task<null, void>("task1", {
1533
+ * run: async () => {
1534
+ * console.log("No input/output");
1535
+ * }
1536
+ * });
1537
+ *
1538
+ * // No input, but has output
1539
+ * const task2 = new Task<null, OutputType>("task2", {
1540
+ * run: async () => {
1541
+ * return someOutput;
1542
+ * }
1543
+ * });
1544
+ *
1545
+ * // Has input, no output
1546
+ * const task3 = new Task<InputType, void>("task3", {
1547
+ * run: async (input: InputType) => {
1548
+ * // process input but return nothing
1549
+ * }
1550
+ * });
1551
+ *
1552
+ * // Has both input and output
1553
+ * const task4 = new Task<InputType, OutputType>("task4", {
1554
+ * run: async (input: InputType) => {
1555
+ * return process(input);
1556
+ * }
1557
+ * });
1558
+ * ```
1559
+ */
1560
+ constructor(name: string, config: TaskConfig<T, R>);
1561
+ }
1562
+ /**
1563
+ * Configuration options for defining a workflow.
1564
+ *
1565
+ * A workflow orchestrates the execution of multiple tasks in a defined sequence
1566
+ * or pattern, with support for scheduling, retries, and timeouts.
1567
+ */
1568
+ interface WorkflowConfig {
1569
+ /**
1570
+ * The initial task that begins the workflow execution.
1571
+ * Supports all combinations of input types (real type or null) and output types (real type or void):
1572
+ * - Task<null, OutputType>: No input, returns a type
1573
+ * - Task<null, void>: No input, returns nothing
1574
+ * - Task<InputType, OutputType>: Has input, returns a type
1575
+ * - Task<InputType, void>: Has input, returns nothing
1576
+ */
1577
+ startingTask: Task<null, any> | Task<null, void> | Task<any, any> | Task<any, void>;
1578
+ /** Optional number of retry attempts if the entire workflow fails */
1579
+ retries?: number;
1580
+ /** Optional timeout duration for the entire workflow execution (e.g., "10m", "1h") */
1581
+ timeout?: string;
1582
+ /** Optional cron-style schedule string for automated workflow execution */
1583
+ schedule?: string;
1584
+ }
1585
+ /**
1586
+ * Represents a complete workflow composed of interconnected tasks.
1587
+ *
1588
+ * A Workflow manages the execution flow of multiple tasks, handling scheduling,
1589
+ * error recovery, and task orchestration. Once created, workflows are automatically
1590
+ * registered with the internal Moose system.
1591
+ *
1592
+ * @example
1593
+ * ```typescript
1594
+ * const dataProcessingWorkflow = new Workflow("dataProcessing", {
1595
+ * startingTask: extractDataTask,
1596
+ * schedule: "0 2 * * *", // Run daily at 2 AM
1597
+ * timeout: "1h",
1598
+ * retries: 2
1599
+ * });
1600
+ * ```
1601
+ */
1602
+ declare class Workflow {
1603
+ readonly name: string;
1604
+ readonly config: WorkflowConfig;
1605
+ /**
1606
+ * Creates a new Workflow instance and registers it with the Moose system.
1607
+ *
1608
+ * @param name - Unique identifier for the workflow
1609
+ * @param config - Configuration object defining the workflow behavior and task orchestration
1610
+ * @throws {Error} When the workflow contains null/undefined tasks or infinite loops
1611
+ */
1612
+ constructor(name: string, config: WorkflowConfig);
1613
+ /**
1614
+ * Validates the task graph to ensure there are no null tasks or infinite loops.
1615
+ *
1616
+ * @private
1617
+ * @param startingTask - The starting task to begin validation from
1618
+ * @param workflowName - The name of the workflow being validated (for error messages)
1619
+ * @throws {Error} When null/undefined tasks are found or infinite loops are detected
1620
+ */
1621
+ private validateTaskGraph;
1622
+ }
1623
+
1624
+ /**
1625
+ * @template T The data type of the messages expected by the destination stream.
1626
+ */
1627
+ interface IngestConfig<T> {
1628
+ /**
1629
+ * The destination stream where the ingested data should be sent.
1630
+ */
1631
+ destination: Stream<T>;
1632
+ deadLetterQueue?: DeadLetterQueue<T>;
1633
+ /**
1634
+ * An optional version string for this configuration.
1635
+ */
1636
+ version?: string;
1637
+ /**
1638
+ * An optional custom path for the ingestion endpoint.
1639
+ */
1640
+ path?: string;
1641
+ metadata?: {
1642
+ description?: string;
1643
+ };
1644
+ }
1645
+ /**
1646
+ * Represents an Ingest API endpoint, used for sending data into a Moose system, typically writing to a Stream.
1647
+ * Provides a typed interface for the expected data format.
1648
+ *
1649
+ * @template T The data type of the records that this API endpoint accepts. The structure of T defines the expected request body schema.
1650
+ */
1651
+ declare class IngestApi<T> extends TypedBase<T, IngestConfig<T>> {
1652
+ /**
1653
+ * Creates a new IngestApi instance.
1654
+ * @param name The name of the ingest API endpoint.
1655
+ * @param config Optional configuration for the ingest API.
1656
+ */
1657
+ constructor(name: string, config?: IngestConfig<T>);
1658
+ /**
1659
+ * @internal
1660
+ * Note: `validators` parameter is a positional placeholder (always undefined for IngestApi).
1661
+ * It exists because TypedBase has validators as the 5th param, and we need to pass
1662
+ * allowExtraFields as the 6th param. IngestApi doesn't use validators.
1663
+ */
1664
+ constructor(name: string, config: IngestConfig<T>, schema: IJsonSchemaCollection$1.IV3_1, columns: Column[], validators: undefined, allowExtraFields: boolean);
1665
+ }
1666
+
1667
+ /**
1668
+ * Defines the signature for a handler function used by a Consumption API.
1669
+ * @template T The expected type of the request parameters or query parameters.
1670
+ * @template R The expected type of the response data.
1671
+ * @param params An object containing the validated request parameters, matching the structure of T.
1672
+ * @param utils Utility functions provided to the handler, e.g., for database access (`runSql`).
1673
+ * @returns A Promise resolving to the response data of type R.
1674
+ */
1675
+ type ApiHandler<T, R> = (params: T, utils: ApiUtil) => Promise<R>;
1676
+ /**
1677
+ * @template T The data type of the request parameters.
1678
+ */
1679
+ interface ApiConfig<T> {
1680
+ /**
1681
+ * An optional version string for this configuration.
1682
+ */
1683
+ version?: string;
1684
+ /**
1685
+ * An optional custom path for the API endpoint.
1686
+ * If not specified, defaults to the API name.
1687
+ */
1688
+ path?: string;
1689
+ metadata?: {
1690
+ description?: string;
1691
+ };
1692
+ }
1693
+ /**
1694
+ * Represents a Consumption API endpoint (API), used for querying data from a Moose system.
1695
+ * Exposes data, often from an OlapTable or derived through a custom handler function.
1696
+ *
1697
+ * @template T The data type defining the expected structure of the API's query parameters.
1698
+ * @template R The data type defining the expected structure of the API's response body. Defaults to `any`.
1699
+ */
1700
+ declare class Api<T, R = any> extends TypedBase<T, ApiConfig<T>> {
1701
+ /** @internal The handler function that processes requests and generates responses. */
1702
+ _handler: ApiHandler<T, R>;
1703
+ /** @internal The JSON schema definition for the response type R. */
1704
+ responseSchema: IJsonSchemaCollection$1.IV3_1;
1705
+ /**
1706
+ * Creates a new Api instance.
1707
+ * @param name The name of the consumption API endpoint.
1708
+ * @param handler The function to execute when the endpoint is called. It receives validated query parameters and utility functions.
1709
+ * @param config Optional configuration for the consumption API.
1710
+ */
1711
+ constructor(name: string, handler: ApiHandler<T, R>, config?: {});
1712
+ /** @internal **/
1713
+ constructor(name: string, handler: ApiHandler<T, R>, config: ApiConfig<T>, schema: IJsonSchemaCollection$1.IV3_1, columns: Column[], responseSchema: IJsonSchemaCollection$1.IV3_1);
1714
+ /**
1715
+ * Retrieves the handler function associated with this Consumption API.
1716
+ * @returns The handler function.
1717
+ */
1718
+ getHandler: () => ApiHandler<T, R>;
1719
+ call(baseUrl: string, queryParams: T): Promise<R>;
1720
+ }
1721
+ /** @deprecated Use ApiConfig<T> directly instead. */
1722
+ type EgressConfig<T> = ApiConfig<T>;
1723
+ /** @deprecated Use Api directly instead. */
1724
+ declare const ConsumptionApi: typeof Api;
1725
+
1726
+ /**
1727
+ * Configuration options for a complete ingestion pipeline, potentially including an Ingest API, a Stream, and an OLAP Table.
1728
+ *
1729
+ * @template T The data type of the records being ingested.
1730
+ *
1731
+ * @example
1732
+ * ```typescript
1733
+ * // Simple pipeline with all components enabled
1734
+ * const pipelineConfig: IngestPipelineConfig<UserData> = {
1735
+ * table: true,
1736
+ * stream: true,
1737
+ * ingestApi: true
1738
+ * };
1739
+ *
1740
+ * // Advanced pipeline with custom configurations
1741
+ * const advancedConfig: IngestPipelineConfig<UserData> = {
1742
+ * table: { orderByFields: ['timestamp', 'userId'], engine: ClickHouseEngines.ReplacingMergeTree },
1743
+ * stream: { parallelism: 4, retentionPeriod: 86400 },
1744
+ * ingestApi: true,
1745
+ * version: '1.2.0',
1746
+ * metadata: { description: 'User data ingestion pipeline' }
1747
+ * };
1748
+ * ```
1749
+ */
1750
+ type IngestPipelineConfig<T> = {
1751
+ /**
1752
+ * Configuration for the OLAP table component of the pipeline.
1753
+ *
1754
+ * - If `true`, a table with default settings is created.
1755
+ * - If an `OlapConfig` object is provided, it specifies the table's configuration.
1756
+ * - If `false`, no OLAP table is created.
1757
+ *
1758
+ * @default false
1759
+ */
1760
+ table: boolean | OlapConfig<T>;
1761
+ /**
1762
+ * Configuration for the stream component of the pipeline.
1763
+ *
1764
+ * - If `true`, a stream with default settings is created.
1765
+ * - Pass a config object to specify the stream's configuration.
1766
+ * - The stream's destination will automatically be set to the pipeline's table if one exists.
1767
+ * - If `false`, no stream is created.
1768
+ *
1769
+ * @default false
1770
+ */
1771
+ stream: boolean | Omit<StreamConfig<T>, "destination">;
1772
+ /**
1773
+ * Configuration for the ingest API component of the pipeline.
1774
+ *
1775
+ * - If `true`, an ingest API with default settings is created.
1776
+ * - If a partial `IngestConfig` object (excluding `destination`) is provided, it specifies the API's configuration.
1777
+ * - The API's destination will automatically be set to the pipeline's stream if one exists.
1778
+ * - If `false`, no ingest API is created.
1779
+ *
1780
+ * **Note:** Requires a stream to be configured when enabled.
1781
+ *
1782
+ * @default false
1783
+ */
1784
+ ingestApi: boolean | Omit<IngestConfig<T>, "destination">;
1785
+ /**
1786
+ * @deprecated Use `ingestApi` instead. This parameter will be removed in a future version.
1787
+ */
1788
+ ingest?: boolean | Omit<IngestConfig<T>, "destination">;
1789
+ /**
1790
+ * Configuration for the dead letter queue of the pipeline.
1791
+ * If `true`, a dead letter queue with default settings is created.
1792
+ * If a partial `StreamConfig` object (excluding `destination`) is provided, it specifies the dead letter queue's configuration.
1793
+ * The API's destination will automatically be set to the pipeline's stream if one exists.
1794
+ * If `false` or `undefined`, no dead letter queue is created.
1795
+ */
1796
+ deadLetterQueue?: boolean | StreamConfig<DeadLetterModel>;
1797
+ /**
1798
+ * An optional version string applying to all components (table, stream, ingest) created by this pipeline configuration.
1799
+ * This version will be used for schema versioning and component identification.
1800
+ *
1801
+ * @example "v1.0.0", "2023-12", "prod"
1802
+ */
1803
+ version?: string;
1804
+ /**
1805
+ * An optional custom path for the ingestion API endpoint.
1806
+ * This will be used as the HTTP path for the ingest API if one is created.
1807
+ *
1808
+ * @example "pipelines/analytics", "data/events"
1809
+ */
1810
+ path?: string;
1811
+ /**
1812
+ * Optional metadata for the pipeline.
1813
+ */
1814
+ metadata?: {
1815
+ /** Human-readable description of the pipeline's purpose */
1816
+ description?: string;
1817
+ };
1818
+ /** Determines how changes in code will propagate to the resources. */
1819
+ lifeCycle?: LifeCycle;
1820
+ };
1821
+ /**
1822
+ * Represents a complete ingestion pipeline, potentially combining an Ingest API, a Stream, and an OLAP Table
1823
+ * under a single name and configuration. Simplifies the setup of common ingestion patterns.
1824
+ *
1825
+ * This class provides a high-level abstraction for creating data ingestion workflows that can include:
1826
+ * - An HTTP API endpoint for receiving data
1827
+ * - A streaming component for real-time data processing
1828
+ * - An OLAP table for analytical queries
1829
+ *
1830
+ * @template T The data type of the records flowing through the pipeline. This type defines the schema for the
1831
+ * Ingest API input, the Stream messages, and the OLAP Table rows.
1832
+ *
1833
+ * @example
1834
+ * ```typescript
1835
+ * // Create a complete pipeline with all components
1836
+ * const userDataPipeline = new IngestPipeline('userData', {
1837
+ * table: true,
1838
+ * stream: true,
1839
+ * ingestApi: true,
1840
+ * version: '1.0.0',
1841
+ * metadata: { description: 'Pipeline for user registration data' }
1842
+ * });
1843
+ *
1844
+ * // Create a pipeline with only table and stream
1845
+ * const analyticsStream = new IngestPipeline('analytics', {
1846
+ * table: { orderByFields: ['timestamp'], engine: ClickHouseEngines.ReplacingMergeTree },
1847
+ * stream: { parallelism: 8, retentionPeriod: 604800 },
1848
+ * ingestApi: false
1849
+ * });
1850
+ * ```
1851
+ */
1852
+ declare class IngestPipeline<T> extends TypedBase<T, IngestPipelineConfig<T>> {
1853
+ /**
1854
+ * The OLAP table component of the pipeline, if configured.
1855
+ * Provides analytical query capabilities for the ingested data.
1856
+ * Only present when `config.table` is not `false`.
1857
+ */
1858
+ table?: OlapTable<T>;
1859
+ /**
1860
+ * The stream component of the pipeline, if configured.
1861
+ * Handles real-time data flow and processing between components.
1862
+ * Only present when `config.stream` is not `false`.
1863
+ */
1864
+ stream?: Stream<T>;
1865
+ /**
1866
+ * The ingest API component of the pipeline, if configured.
1867
+ * Provides HTTP endpoints for data ingestion.
1868
+ * Only present when `config.ingestApi` is not `false`.
1869
+ */
1870
+ ingestApi?: IngestApi<T>;
1871
+ /** The dead letter queue of the pipeline, if configured. */
1872
+ deadLetterQueue?: DeadLetterQueue<T>;
1873
+ /**
1874
+ * Creates a new IngestPipeline instance.
1875
+ * Based on the configuration, it automatically creates and links the IngestApi, Stream, and OlapTable components.
1876
+ *
1877
+ * @param name The base name for the pipeline components (e.g., "userData" could create "userData" table, "userData" stream, "userData" ingest API).
1878
+ * @param config Optional configuration for the ingestion pipeline.
1879
+ *
1880
+ * @throws {Error} When ingest API is enabled but no stream is configured, since the API requires a stream destination.
1881
+ *
1882
+ * @example
1883
+ * ```typescript
1884
+ * const pipeline = new IngestPipeline('events', {
1885
+ * table: { orderByFields: ['timestamp'], engine: ClickHouseEngines.ReplacingMergeTree },
1886
+ * stream: { parallelism: 2 },
1887
+ * ingestApi: true
1888
+ * });
1889
+ * ```
1890
+ */
1891
+ constructor(name: string, config: IngestPipelineConfig<T>);
1892
+ /**
1893
+ * Internal constructor used by the framework for advanced initialization.
1894
+ *
1895
+ * @internal
1896
+ * @param name The base name for the pipeline components.
1897
+ * @param config Configuration specifying which components to create and their settings.
1898
+ * @param schema JSON schema collection for type validation.
1899
+ * @param columns Column definitions for the data model.
1900
+ * @param validators Typia validation functions.
1901
+ * @param allowExtraFields Whether extra fields are allowed (injected when type has index signature).
1902
+ */
1903
+ constructor(name: string, config: IngestPipelineConfig<T>, schema: IJsonSchemaCollection$1.IV3_1, columns: Column[], validators: TypiaValidators<T>, allowExtraFields: boolean);
1904
+ }
1905
+
1906
+ interface ETLPipelineConfig<T, U> {
1907
+ extract: AsyncIterable<T> | (() => AsyncIterable<T>);
1908
+ transform: (sourceData: T) => Promise<U>;
1909
+ load: ((data: U[]) => Promise<void>) | OlapTable<U>;
1910
+ }
1911
+ declare class ETLPipeline<T, U> {
1912
+ readonly name: string;
1913
+ readonly config: ETLPipelineConfig<T, U>;
1914
+ private batcher;
1915
+ constructor(name: string, config: ETLPipelineConfig<T, U>);
1916
+ private setupPipeline;
1917
+ private createBatcher;
1918
+ private getDefaultTaskConfig;
1919
+ private createAllTasks;
1920
+ private createExtractTask;
1921
+ private createTransformTask;
1922
+ private createLoadTask;
1923
+ run(): Promise<void>;
1924
+ }
1925
+
1926
+ /**
1927
+ * Represents a database View, defined by a SQL SELECT statement based on one or more base tables or other views.
1928
+ * Emits structured data for the Moose infrastructure system.
1929
+ */
1930
+ declare class View {
1931
+ /** @internal */
1932
+ readonly kind = "View";
1933
+ /** The name of the view */
1934
+ name: string;
1935
+ /** The SELECT SQL statement that defines the view */
1936
+ selectSql: string;
1937
+ /** Names of source tables/views that the SELECT reads from */
1938
+ sourceTables: string[];
1939
+ /** Optional metadata for the view */
1940
+ metadata: {
1941
+ [key: string]: any;
1942
+ };
1943
+ /**
1944
+ * Creates a new View instance.
1945
+ * @param name The name of the view to be created.
1946
+ * @param selectStatement The SQL SELECT statement that defines the view's logic.
1947
+ * @param baseTables An array of OlapTable or View objects that the `selectStatement` reads from. Used for dependency tracking.
1948
+ * @param metadata Optional metadata for the view (e.g., description, source file).
1949
+ */
1950
+ constructor(name: string, selectStatement: string | Sql, baseTables: (OlapTable<any> | View)[], metadata?: {
1951
+ [key: string]: any;
1952
+ });
1953
+ }
1954
+
1955
+ /**
1956
+ * Configuration options for creating a Materialized View.
1957
+ * @template T The data type of the records stored in the target table of the materialized view.
1958
+ */
1959
+ interface MaterializedViewConfig<T> {
1960
+ /** The SQL SELECT statement or `Sql` object defining the data to be materialized. Dynamic SQL (with parameters) is not allowed here. */
1961
+ selectStatement: string | Sql;
1962
+ /** An array of OlapTable or View objects that the `selectStatement` reads from. */
1963
+ selectTables: (OlapTable<any> | View)[];
1964
+ /** @deprecated See {@link targetTable}
1965
+ * The name for the underlying target OlapTable that stores the materialized data. */
1966
+ tableName?: string;
1967
+ /** The name for the ClickHouse MATERIALIZED VIEW object itself. */
1968
+ materializedViewName: string;
1969
+ /** @deprecated See {@link targetTable}
1970
+ * Optional ClickHouse engine for the target table (e.g., ReplacingMergeTree). Defaults to MergeTree. */
1971
+ engine?: ClickHouseEngines;
1972
+ targetTable?: OlapTable<T> /** Target table if the OlapTable object is already constructed. */ | {
1973
+ /** The name for the underlying target OlapTable that stores the materialized data. */
1974
+ name: string;
1975
+ /** Optional ClickHouse engine for the target table (e.g., ReplacingMergeTree). Defaults to MergeTree. */
1976
+ engine?: ClickHouseEngines;
1977
+ /** Optional ordering fields for the target table. Crucial if using ReplacingMergeTree. */
1978
+ orderByFields?: (keyof T & string)[];
1979
+ };
1980
+ /** @deprecated See {@link targetTable}
1981
+ * Optional ordering fields for the target table. Crucial if using ReplacingMergeTree. */
1982
+ orderByFields?: (keyof T & string)[];
1983
+ /** Optional metadata for the materialized view (e.g., description, source file). */
1984
+ metadata?: {
1985
+ [key: string]: any;
1986
+ };
1987
+ }
1988
+ /**
1989
+ * Represents a Materialized View in ClickHouse.
1990
+ * This encapsulates both the target OlapTable that stores the data and the MATERIALIZED VIEW definition
1991
+ * that populates the table based on inserts into the source tables.
1992
+ *
1993
+ * @template TargetTable The data type of the records stored in the underlying target OlapTable. The structure of T defines the target table schema.
1994
+ */
1995
+ declare class MaterializedView<TargetTable> {
1996
+ /** @internal */
1997
+ readonly kind = "MaterializedView";
1998
+ /** The name of the materialized view */
1999
+ name: string;
2000
+ /** The target OlapTable instance where the materialized data is stored. */
2001
+ targetTable: OlapTable<TargetTable>;
2002
+ /** The SELECT SQL statement */
2003
+ selectSql: string;
2004
+ /** Names of source tables that the SELECT reads from */
2005
+ sourceTables: string[];
2006
+ /** Optional metadata for the materialized view */
2007
+ metadata: {
2008
+ [key: string]: any;
2009
+ };
2010
+ /**
2011
+ * Creates a new MaterializedView instance.
2012
+ * Requires the `TargetTable` type parameter to be explicitly provided or inferred,
2013
+ * as it's needed to define the schema of the underlying target table.
2014
+ *
2015
+ * @param options Configuration options for the materialized view.
2016
+ */
2017
+ constructor(options: MaterializedViewConfig<TargetTable>);
2018
+ /** @internal **/
2019
+ constructor(options: MaterializedViewConfig<TargetTable>, targetSchema: IJsonSchemaCollection$1.IV3_1, targetColumns: Column[]);
2020
+ }
2021
+
2022
+ type SqlObject = OlapTable<any> | SqlResource | View | MaterializedView<any>;
2023
+ /**
2024
+ * Represents a generic SQL resource that requires setup and teardown commands.
2025
+ * Base class for constructs like Views and Materialized Views. Tracks dependencies.
2026
+ */
2027
+ declare class SqlResource {
2028
+ /** @internal */
2029
+ readonly kind = "SqlResource";
2030
+ /** Array of SQL statements to execute for setting up the resource. */
2031
+ setup: readonly string[];
2032
+ /** Array of SQL statements to execute for tearing down the resource. */
2033
+ teardown: readonly string[];
2034
+ /** The name of the SQL resource (e.g., view name, materialized view name). */
2035
+ name: string;
2036
+ /** List of OlapTables or Views that this resource reads data from. */
2037
+ pullsDataFrom: SqlObject[];
2038
+ /** List of OlapTables or Views that this resource writes data to. */
2039
+ pushesDataTo: SqlObject[];
2040
+ /** @internal Source file path where this resource was defined */
2041
+ sourceFile?: string;
2042
+ /** @internal Source line number where this resource was defined */
2043
+ sourceLine?: number;
2044
+ /** @internal Source column number where this resource was defined */
2045
+ sourceColumn?: number;
2046
+ /**
2047
+ * Creates a new SqlResource instance.
2048
+ * @param name The name of the resource.
2049
+ * @param setup An array of SQL DDL statements to create the resource.
2050
+ * @param teardown An array of SQL DDL statements to drop the resource.
2051
+ * @param options Optional configuration for specifying data dependencies.
2052
+ * @param options.pullsDataFrom Tables/Views this resource reads from.
2053
+ * @param options.pushesDataTo Tables/Views this resource writes to.
2054
+ */
2055
+ constructor(name: string, setup: readonly (string | Sql)[], teardown: readonly (string | Sql)[], options?: {
2056
+ pullsDataFrom?: SqlObject[];
2057
+ pushesDataTo?: SqlObject[];
2058
+ });
2059
+ }
2060
+
2061
+ type WebAppHandler = (req: http.IncomingMessage, res: http.ServerResponse) => void | Promise<void>;
2062
+ interface FrameworkApp {
2063
+ handle?: (req: http.IncomingMessage, res: http.ServerResponse, next?: (err?: any) => void) => void;
2064
+ callback?: () => WebAppHandler;
2065
+ routing?: (req: http.IncomingMessage, res: http.ServerResponse) => void;
2066
+ ready?: () => PromiseLike<unknown>;
2067
+ }
2068
+ interface WebAppConfig {
2069
+ mountPath: string;
2070
+ metadata?: {
2071
+ description?: string;
2072
+ };
2073
+ injectMooseUtils?: boolean;
2074
+ }
2075
+ declare class WebApp {
2076
+ name: string;
2077
+ handler: WebAppHandler;
2078
+ config: WebAppConfig;
2079
+ private _rawApp?;
2080
+ constructor(name: string, appOrHandler: FrameworkApp | WebAppHandler, config: WebAppConfig);
2081
+ private toHandler;
2082
+ getRawApp(): FrameworkApp | undefined;
2083
+ }
2084
+
2085
+ /**
2086
+ * @module registry
2087
+ * Public registry functions for accessing Moose Data Model v2 (dmv2) resources.
2088
+ *
2089
+ * This module provides functions to retrieve registered resources like tables, streams,
2090
+ * APIs, and more. These functions are part of the public API and can be used by
2091
+ * user applications to inspect and interact with registered Moose resources.
2092
+ */
2093
+
2094
+ /**
2095
+ * Get all registered OLAP tables.
2096
+ * @returns A Map of table name to OlapTable instance
2097
+ */
2098
+ declare function getTables(): Map<string, OlapTable<any>>;
2099
+ /**
2100
+ * Get a registered OLAP table by name.
2101
+ * @param name - The name of the table
2102
+ * @returns The OlapTable instance or undefined if not found
2103
+ */
2104
+ declare function getTable(name: string): OlapTable<any> | undefined;
2105
+ /**
2106
+ * Get all registered streams.
2107
+ * @returns A Map of stream name to Stream instance
2108
+ */
2109
+ declare function getStreams(): Map<string, Stream<any>>;
2110
+ /**
2111
+ * Get a registered stream by name.
2112
+ * @param name - The name of the stream
2113
+ * @returns The Stream instance or undefined if not found
2114
+ */
2115
+ declare function getStream(name: string): Stream<any> | undefined;
2116
+ /**
2117
+ * Get all registered ingestion APIs.
2118
+ * @returns A Map of API name to IngestApi instance
2119
+ */
2120
+ declare function getIngestApis(): Map<string, IngestApi<any>>;
2121
+ /**
2122
+ * Get a registered ingestion API by name.
2123
+ * @param name - The name of the ingestion API
2124
+ * @returns The IngestApi instance or undefined if not found
2125
+ */
2126
+ declare function getIngestApi(name: string): IngestApi<any> | undefined;
2127
+ /**
2128
+ * Get all registered APIs (consumption/egress APIs).
2129
+ * @returns A Map of API key to Api instance
2130
+ */
2131
+ declare function getApis(): Map<string, Api<any>>;
2132
+ /**
2133
+ * Get a registered API by name, version, or path.
2134
+ *
2135
+ * Supports multiple lookup strategies:
2136
+ * 1. Direct lookup by full key (name:version or name for unversioned)
2137
+ * 2. Lookup by name with automatic version aliasing when only one versioned API exists
2138
+ * 3. Lookup by custom path (if configured)
2139
+ *
2140
+ * @param nameOrPath - The name, name:version, or custom path of the API
2141
+ * @returns The Api instance or undefined if not found
2142
+ */
2143
+ declare function getApi(nameOrPath: string): Api<any> | undefined;
2144
+ /**
2145
+ * Get all registered SQL resources.
2146
+ * @returns A Map of resource name to SqlResource instance
2147
+ */
2148
+ declare function getSqlResources(): Map<string, SqlResource>;
2149
+ /**
2150
+ * Get a registered SQL resource by name.
2151
+ * @param name - The name of the SQL resource
2152
+ * @returns The SqlResource instance or undefined if not found
2153
+ */
2154
+ declare function getSqlResource(name: string): SqlResource | undefined;
2155
+ /**
2156
+ * Get all registered workflows.
2157
+ * @returns A Map of workflow name to Workflow instance
2158
+ */
2159
+ declare function getWorkflows(): Map<string, Workflow>;
2160
+ /**
2161
+ * Get a registered workflow by name.
2162
+ * @param name - The name of the workflow
2163
+ * @returns The Workflow instance or undefined if not found
2164
+ */
2165
+ declare function getWorkflow(name: string): Workflow | undefined;
2166
+ /**
2167
+ * Get all registered web apps.
2168
+ * @returns A Map of web app name to WebApp instance
2169
+ */
2170
+ declare function getWebApps(): Map<string, WebApp>;
2171
+ /**
2172
+ * Get a registered web app by name.
2173
+ * @param name - The name of the web app
2174
+ * @returns The WebApp instance or undefined if not found
2175
+ */
2176
+ declare function getWebApp(name: string): WebApp | undefined;
2177
+ /**
2178
+ * Get all registered materialized views.
2179
+ * @returns A Map of MV name to MaterializedView instance
2180
+ */
2181
+ declare function getMaterializedViews(): Map<string, MaterializedView<any>>;
2182
+ /**
2183
+ * Get a registered materialized view by name.
2184
+ * @param name - The name of the materialized view
2185
+ * @returns The MaterializedView instance or undefined if not found
2186
+ */
2187
+ declare function getMaterializedView(name: string): MaterializedView<any> | undefined;
2188
+ /**
2189
+ * Get all registered views.
2190
+ * @returns A Map of view name to View instance
2191
+ */
2192
+ declare function getViews(): Map<string, View>;
2193
+ /**
2194
+ * Get a registered view by name.
2195
+ * @param name - The name of the view
2196
+ * @returns The View instance or undefined if not found
2197
+ */
2198
+ declare function getView(name: string): View | undefined;
2199
+
2200
+ /**
2201
+ * @module dmv2
2202
+ * This module defines the core Moose v2 data model constructs, including OlapTable, Stream, IngestApi, Api,
2203
+ * IngestPipeline, View, and MaterializedView. These classes provide a typed interface for defining and managing
2204
+ * data infrastructure components like ClickHouse tables, Redpanda streams, and data processing pipelines.
2205
+ */
2206
+ /**
2207
+ * A helper type used potentially for indicating aggregated fields in query results or schemas.
2208
+ * Captures the aggregation function name and argument types.
2209
+ * (Usage context might be specific to query builders or ORM features).
2210
+ *
2211
+ * @template AggregationFunction The name of the aggregation function (e.g., 'sum', 'avg', 'count').
2212
+ * @template ArgTypes An array type representing the types of the arguments passed to the aggregation function.
2213
+ */
2214
+ type Aggregated<AggregationFunction extends string, ArgTypes extends any[] = []> = {
2215
+ _aggregationFunction?: AggregationFunction;
2216
+ _argTypes?: ArgTypes;
2217
+ };
2218
+ /**
2219
+ * A helper type for SimpleAggregateFunction in ClickHouse.
2220
+ * SimpleAggregateFunction stores the aggregated value directly instead of intermediate states,
2221
+ * offering better performance for functions like sum, max, min, any, anyLast, etc.
2222
+ *
2223
+ * @template AggregationFunction The name of the simple aggregation function (e.g., 'sum', 'max', 'anyLast').
2224
+ * @template ArgType The type of the argument (and result) of the aggregation function.
2225
+ *
2226
+ * @example
2227
+ * ```typescript
2228
+ * interface Stats {
2229
+ * rowCount: number & SimpleAggregated<'sum', number>;
2230
+ * maxValue: number & SimpleAggregated<'max', number>;
2231
+ * lastStatus: string & SimpleAggregated<'anyLast', string>;
2232
+ * }
2233
+ * ```
2234
+ */
2235
+ type SimpleAggregated<AggregationFunction extends string, ArgType = any> = {
2236
+ _simpleAggregationFunction?: AggregationFunction;
2237
+ _argType?: ArgType;
2238
+ };
2239
+
2240
+ /**
2241
+ * Quote a ClickHouse identifier with backticks if not already quoted.
2242
+ * Backticks allow special characters (e.g., hyphens) in identifiers.
2243
+ */
2244
+ declare const quoteIdentifier: (name: string) => string;
2245
+ type IdentifierBrandedString = string & {
2246
+ readonly __identifier_brand?: unique symbol;
2247
+ };
2248
+ type NonIdentifierBrandedString = string & {
2249
+ readonly __identifier_brand?: unique symbol;
2250
+ };
2251
+ /**
2252
+ * Values supported by SQL engine.
2253
+ */
2254
+ type Value = NonIdentifierBrandedString | number | boolean | Date | [string, string];
2255
+ /**
2256
+ * Supported value or SQL instance.
2257
+ */
2258
+ type RawValue = Value | Sql;
2259
+ /**
2260
+ * Sql template tag interface with attached helper methods.
2261
+ */
2262
+ interface SqlTemplateTag {
2263
+ (strings: readonly string[], ...values: readonly (RawValue | Column | OlapTable<any> | View)[]): Sql;
2264
+ /**
2265
+ * Join an array of Sql fragments with a separator.
2266
+ * @param fragments - Array of Sql fragments to join
2267
+ * @param separator - Optional separator string (defaults to ", ")
2268
+ */
2269
+ join(fragments: Sql[], separator?: string): Sql;
2270
+ /**
2271
+ * Create raw SQL from a string without parameterization.
2272
+ * WARNING: SQL injection risk if used with untrusted input.
2273
+ */
2274
+ raw(text: string): Sql;
2275
+ }
2276
+ declare const sql: SqlTemplateTag;
2277
+ /**
2278
+ * A SQL instance can be nested within each other to build SQL strings.
2279
+ */
2280
+ declare class Sql {
2281
+ readonly values: Value[];
2282
+ readonly strings: string[];
2283
+ constructor(rawStrings: readonly string[], rawValues: readonly (RawValue | Column | OlapTable<any> | View | Sql)[]);
2284
+ /**
2285
+ * Append another Sql fragment, returning a new Sql instance.
2286
+ */
2287
+ append(other: Sql): Sql;
2288
+ }
2289
+ declare const toStaticQuery: (sql: Sql) => string;
2290
+ declare const toQuery: (sql: Sql) => [string, {
2291
+ [pN: string]: any;
2292
+ }];
2293
+ /**
2294
+ * Build a display-only SQL string with values inlined for logging/debugging.
2295
+ * Does not alter execution behavior; use toQuery for actual execution.
2296
+ */
2297
+ declare const toQueryPreview: (sql: Sql) => string;
2298
+ declare const getValueFromParameter: (value: any) => any;
2299
+ declare function createClickhouseParameter(parameterIndex: number, value: Value): string;
2300
+ /**
2301
+ * Convert the JS type (source is JSON format by API query parameter) to the corresponding ClickHouse type for generating named placeholder of parameterized query.
2302
+ * Only support to convert number to Int or Float, boolean to Bool, string to String, other types will convert to String.
2303
+ * If exist complex type e.g: object, Array, null, undefined, Date, Record.. etc, just convert to string type by ClickHouse function in SQL.
2304
+ * ClickHouse support converting string to other types function.
2305
+ * Please see Each section of the https://clickhouse.com/docs/en/sql-reference/functions and https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions
2306
+ * @param value
2307
+ * @returns 'Float', 'Int', 'Bool', 'String'
2308
+ */
2309
+ declare const mapToClickHouseType: (value: Value) => string;
2310
+
2311
+ /**
2312
+ * Utilities provided by getMooseUtils() for database access and SQL queries.
2313
+ * Works in both Moose runtime and standalone contexts.
2314
+ */
2315
+ interface MooseUtils {
2316
+ client: MooseClient;
2317
+ sql: typeof sql;
2318
+ jwt?: JWTPayload;
2319
+ }
2320
+ /**
2321
+ * @deprecated Use MooseUtils instead. ApiUtil is now a type alias to MooseUtils
2322
+ * and will be removed in a future version.
2323
+ *
2324
+ * Migration: Replace `ApiUtil` with `MooseUtils` in your type annotations.
2325
+ */
2326
+ type ApiUtil = MooseUtils;
2327
+ /** @deprecated Use MooseUtils instead. */
2328
+ type ConsumptionUtil = MooseUtils;
2329
+ declare class MooseClient {
2330
+ query: QueryClient;
2331
+ workflow: WorkflowClient;
2332
+ constructor(queryClient: QueryClient, temporalClient?: Client);
2333
+ }
2334
+ declare class QueryClient {
2335
+ client: ClickHouseClient;
2336
+ query_id_prefix: string;
2337
+ constructor(client: ClickHouseClient, query_id_prefix: string);
2338
+ execute<T = any>(sql: Sql): Promise<ResultSet<"JSONEachRow"> & {
2339
+ __query_result_t?: T[];
2340
+ }>;
2341
+ command(sql: Sql): Promise<CommandResult>;
2342
+ }
2343
+ declare class WorkflowClient {
2344
+ client: Client | undefined;
2345
+ constructor(temporalClient?: Client);
2346
+ execute(name: string, input_data: any): Promise<{
2347
+ status: number;
2348
+ body: string;
2349
+ }>;
2350
+ terminate(workflowId: string): Promise<{
2351
+ status: number;
2352
+ body: string;
2353
+ }>;
2354
+ private getWorkflowConfig;
2355
+ private processInputData;
2356
+ }
2357
+ /**
2358
+ * This looks similar to the client in runner.ts which is a worker.
2359
+ * Temporal SDK uses similar looking connection options & client,
2360
+ * but there are different libraries for a worker & client like this one
2361
+ * that triggers workflows.
2362
+ */
2363
+ declare function getTemporalClient(temporalUrl: string, namespace: string, clientCert: string, clientKey: string, apiKey: string): Promise<Client | undefined>;
2364
+ declare const ApiHelpers: {
2365
+ column: (value: string) => [string, string];
2366
+ table: (value: string) => [string, string];
2367
+ };
2368
+ /** @deprecated Use ApiHelpers instead. */
2369
+ declare const ConsumptionHelpers: {
2370
+ column: (value: string) => [string, string];
2371
+ table: (value: string) => [string, string];
2372
+ };
2373
+ declare function joinQueries({ values, separator, prefix, suffix, }: {
2374
+ values: readonly RawValue[];
2375
+ separator?: string;
2376
+ prefix?: string;
2377
+ suffix?: string;
2378
+ }): Sql;
2379
+
2380
+ export { getStreams as $, type ApiUtil as A, type ApiConfig as B, type ConsumptionUtil as C, type DateTime as D, ClickHouseEngines as E, type FixedString as F, ConsumptionApi as G, type DeadLetter as H, type Int8 as I, type DeadLetterModel as J, DeadLetterQueue as K, type LowCardinality as L, type EgressConfig as M, ETLPipeline as N, type ETLPipelineConfig as O, type FrameworkApp as P, getApi as Q, getApis as R, getIngestApi as S, getIngestApis as T, type UInt8 as U, getMaterializedView as V, type WithDefault as W, getMaterializedViews as X, getSqlResource as Y, getSqlResources as Z, getStream as _, type ClickHouseByteSize as a, getTable as a0, getTables as a1, getView as a2, getViews as a3, getWebApp as a4, getWebApps as a5, getWorkflow as a6, getWorkflows as a7, IngestApi as a8, type IngestConfig as a9, toQueryPreview as aA, getValueFromParameter as aB, createClickhouseParameter as aC, mapToClickHouseType as aD, type MooseUtils as aE, MooseClient as aF, type ClickHousePoint as aG, type ClickHouseRing as aH, type ClickHouseLineString as aI, type ClickHouseMultiLineString as aJ, type ClickHousePolygon as aK, type ClickHouseMultiPolygon as aL, QueryClient as aM, WorkflowClient as aN, getTemporalClient as aO, ApiHelpers as aP, ConsumptionHelpers as aQ, joinQueries as aR, type ConsumerConfig as aS, type TransformConfig as aT, type TaskContext as aU, type TaskConfig as aV, type IngestPipelineConfig as aW, type MaterializedViewConfig as aX, IngestPipeline as aa, LifeCycle as ab, MaterializedView as ac, type OlapConfig as ad, OlapTable as ae, type S3QueueTableSettings as af, type SimpleAggregated as ag, SqlResource as ah, Stream as ai, type StreamConfig as aj, Task as ak, View as al, WebApp as am, type WebAppConfig as an, type WebAppHandler as ao, Workflow as ap, quoteIdentifier as aq, type IdentifierBrandedString as ar, type NonIdentifierBrandedString as as, type Value as at, type RawValue as au, type SqlTemplateTag as av, sql as aw, Sql as ax, toStaticQuery as ay, toQuery as az, type ClickHouseCodec as b, type ClickHouseDecimal as c, type ClickHouseDefault as d, type ClickHouseFixedStringSize as e, type ClickHouseFloat as f, type ClickHouseInt as g, type ClickHouseJson as h, type ClickHouseMaterialized as i, type ClickHouseNamedTuple as j, type ClickHousePrecision as k, type ClickHouseTTL as l, type DateTime64 as m, type DateTime64String as n, type DateTimeString as o, type Decimal as p, type Float32 as q, type Float64 as r, type Int16 as s, type Int32 as t, type Int64 as u, type UInt16 as v, type UInt32 as w, type UInt64 as x, type Aggregated as y, Api as z };