@514labs/moose-lib 0.6.297-ci-29-gef88e0aa → 0.6.297-ci-4-g8012f636

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"sources":["../../src/dmv2/utils/stackTrace.ts","../../src/dmv2/typedBase.ts","../../src/dataModels/dataModelTypes.ts","../../src/sqlHelpers.ts","../../src/blocks/helpers.ts","../../src/dataModels/types.ts","../../src/browserCompatible.ts","../../src/commons.ts","../../src/secrets.ts","../../src/consumption-apis/helpers.ts","../../src/consumption-apis/webAppHelpers.ts","../../src/scripts/task.ts","../../src/cluster-utils.ts","../../src/consumption-apis/runner.ts","../../src/clients/redisClient.ts","../../src/consumption-apis/standalone.ts","../../src/utilities/json.ts","../../src/utilities/dataParser.ts","../../src/utilities/index.ts","../../src/connectors/dataSource.ts","../../src/index.ts","../../src/dmv2/internal.ts","../../src/config/configFile.ts","../../src/config/runtime.ts","../../src/dmv2/sdk/olapTable.ts","../../src/dmv2/sdk/stream.ts","../../src/dmv2/sdk/workflow.ts","../../src/dmv2/sdk/ingestApi.ts","../../src/dmv2/sdk/consumptionApi.ts","../../src/dmv2/sdk/ingestPipeline.ts","../../src/dmv2/sdk/etlPipeline.ts","../../src/dmv2/sdk/sqlResource.ts","../../src/dmv2/sdk/materializedView.ts","../../src/dmv2/sdk/view.ts","../../src/dmv2/sdk/lifeCycle.ts","../../src/dmv2/sdk/webApp.ts","../../src/dmv2/registry.ts","../../src/dmv2/index.ts"],"sourcesContent":["/**\n * Stack trace utilities for extracting source file information.\n *\n * This module provides functions for parsing stack traces to determine\n * where user code is located, filtering out internal library paths.\n */\n\n/**\n * Information extracted from a stack trace about source file location.\n */\nexport interface SourceFileInfo {\n /** The file path */\n file?: string;\n /** The line number (as a string) */\n line?: string;\n}\n\n/**\n * Source location with file, line, and column information.\n * Used for precise error location tracking.\n */\nexport interface SourceLocation {\n /** The file path */\n file: string;\n /** The line number */\n line: number;\n /** The column number (optional - may not always be available from stack trace) */\n column?: number;\n}\n\n/**\n * Check if a stack trace line should be skipped (internal/library code).\n * @internal\n */\nfunction shouldSkipStackLine(line: string): boolean {\n return (\n line.includes(\"node_modules\") || // Skip npm installed packages (prod)\n line.includes(\"node:internal\") || // Skip Node.js internals (modern format)\n line.includes(\"internal/modules\") || // Skip Node.js internals (older format)\n line.includes(\"ts-node\") || // Skip TypeScript execution\n line.includes(\"/ts-moose-lib/src/\") || // Skip dev/linked moose-lib src (Unix)\n line.includes(\"\\\\ts-moose-lib\\\\src\\\\\") || // Skip dev/linked moose-lib src (Windows)\n line.includes(\"/ts-moose-lib/dist/\") || // Skip dev/linked moose-lib dist (Unix)\n line.includes(\"\\\\ts-moose-lib\\\\dist\\\\\") // Skip dev/linked moose-lib dist (Windows)\n );\n}\n\n/**\n * Extract file path and line number from a stack trace line.\n * @internal\n */\nfunction parseStackLine(line: string): SourceFileInfo | undefined {\n const match =\n line.match(/\\((.*):(\\d+):(\\d+)\\)/) || line.match(/at (.*):(\\d+):(\\d+)/);\n if (match && match[1]) {\n return {\n file: match[1],\n line: match[2],\n };\n }\n return undefined;\n}\n\n/**\n * Extract source file information from a stack trace.\n * Works in both development (npm link) and production (npm install) environments.\n *\n * @param stack - The stack trace string from an Error object\n * @returns Object with file path and line number, or empty object if not found\n */\nexport function getSourceFileInfo(stack?: string): SourceFileInfo {\n if (!stack) return {};\n const lines = stack.split(\"\\n\");\n for (const line of lines) {\n if (shouldSkipStackLine(line)) continue;\n const info = parseStackLine(line);\n if (info) return info;\n }\n return {};\n}\n\n/**\n * Extracts source location (file, line, column) from a stack trace.\n *\n * Stack trace formats vary by environment:\n * - V8 (Node/Chrome): \" at Function (file.ts:10:15)\"\n * - SpiderMonkey (Firefox): \"Function@file.ts:10:15\"\n *\n * @param stack - Error stack trace string\n * @returns SourceLocation or undefined if parsing fails\n */\nexport function getSourceLocationFromStack(\n stack: string | undefined,\n): SourceLocation | undefined {\n if (!stack) return undefined;\n\n const lines = stack.split(\"\\n\");\n\n // Skip first line (error message) and internal frames\n for (const line of lines.slice(1)) {\n // Skip node_modules and internal moose-lib frames\n if (shouldSkipStackLine(line)) {\n continue;\n }\n\n // V8 format: \" at Function (file.ts:10:15)\" or \" at file.ts:10:15\"\n const v8Match = line.match(/at\\s+(?:.*?\\s+\\()?(.+):(\\d+):(\\d+)\\)?/);\n if (v8Match) {\n return {\n file: v8Match[1],\n line: parseInt(v8Match[2], 10),\n column: parseInt(v8Match[3], 10),\n };\n }\n\n // SpiderMonkey format: \"Function@file.ts:10:15\"\n const smMatch = line.match(/(?:.*@)?(.+):(\\d+):(\\d+)/);\n if (smMatch) {\n return {\n file: smMatch[1],\n line: parseInt(smMatch[2], 10),\n column: parseInt(smMatch[3], 10),\n };\n }\n }\n\n return undefined;\n}\n\n/**\n * Extract the first file path outside moose-lib internals from a stack trace.\n * Works in both development (npm link) and production (npm install) environments.\n *\n * @deprecated Use getSourceLocationFromStack instead\n * @param stack - The stack trace string from an Error object\n * @returns The first user-code file path, or undefined if not found\n */\nexport function getSourceFileFromStack(stack?: string): string | undefined {\n const location = getSourceLocationFromStack(stack);\n return location?.file;\n}\n","import { IJsonSchemaCollection } from \"typia/src/schemas/json/IJsonSchemaCollection\";\nimport { Column } from \"../dataModels/dataModelTypes\";\nimport { getSourceFileInfo } from \"./utils/stackTrace\";\n\n/**\n * Type definition for typia validation functions\n */\nexport interface TypiaValidators<T> {\n /** Typia validator function: returns { success: boolean, data?: T, errors?: any[] } */\n validate?: (data: unknown) => { success: boolean; data?: T; errors?: any[] };\n /** Typia assert function: throws on validation failure, returns T on success */\n assert?: (data: unknown) => T;\n /** Typia is function: returns boolean indicating if data matches type T */\n is?: (data: unknown) => data is T;\n}\n\n/**\n * Base class for all typed Moose dmv2 resources (OlapTable, Stream, etc.).\n * Handles the storage and injection of schema information (JSON schema and Column array)\n * provided by the Moose compiler plugin.\n *\n * @template T The data type (interface or type alias) defining the schema of the resource.\n * @template C The specific configuration type for the resource (e.g., OlapConfig, StreamConfig).\n */\nexport class TypedBase<T, C> {\n /** The JSON schema representation of type T. Injected by the compiler plugin. */\n schema: IJsonSchemaCollection.IV3_1;\n /** The name assigned to this resource instance. */\n name: string;\n\n /** A dictionary mapping column names (keys of T) to their Column definitions. */\n columns: {\n [columnName in keyof Required<T>]: Column;\n };\n /** An array containing the Column definitions for this resource. Injected by the compiler plugin. */\n columnArray: Column[];\n\n /** The configuration object specific to this resource type. */\n config: C;\n\n /** Typia validation functions for type T. Injected by the compiler plugin for OlapTable. */\n validators?: TypiaValidators<T>;\n\n /** Optional metadata for the resource, always present as an object. */\n metadata!: { [key: string]: any };\n\n /**\n * Whether this resource allows extra fields beyond the defined columns.\n * When true, extra fields in payloads are passed through to streaming functions.\n * Injected by the compiler plugin when the type has an index signature.\n */\n allowExtraFields: boolean;\n\n /**\n * @internal Constructor intended for internal use by subclasses and the compiler plugin.\n * It expects the schema and columns to be provided, typically injected by the compiler.\n *\n * @param name The name for the resource instance.\n * @param config The configuration object for the resource.\n * @param schema The JSON schema for the resource's data type T (injected).\n * @param columns The array of Column definitions for T (injected).\n * @param allowExtraFields Whether extra fields are allowed (injected when type has index signature).\n */\n constructor(\n name: string,\n config: C,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: TypiaValidators<T>,\n allowExtraFields?: boolean,\n ) {\n if (schema === undefined || columns === undefined) {\n throw new Error(\n \"Supply the type param T so that the schema is inserted by the compiler plugin.\",\n );\n }\n\n this.schema = schema;\n this.columnArray = columns;\n const columnsObj = {} as any;\n columns.forEach((column) => {\n columnsObj[column.name] = column;\n });\n this.columns = columnsObj;\n\n this.name = name;\n this.config = config;\n this.validators = validators;\n this.allowExtraFields = allowExtraFields ?? false;\n\n // Always ensure metadata is an object and attach stackTrace (last 10 lines only)\n this.metadata =\n (config as any)?.metadata ? { ...(config as any).metadata } : {};\n\n if (!this.metadata.source) {\n const stack = new Error().stack;\n if (stack) {\n const info = getSourceFileInfo(stack);\n this.metadata.source = { file: info.file, line: info.line };\n }\n }\n }\n}\n","import ts from \"typescript\";\nimport { IdentifierBrandedString } from \"../sqlHelpers\";\n\nexport type EnumValues =\n | { name: string; value: { Int: number } }[]\n | { name: string; value: { String: string } }[];\nexport type DataEnum = { name: string; values: EnumValues };\nexport type Nested = { name: string; columns: Column[]; jwt: boolean };\nexport type ArrayType = { elementType: DataType; elementNullable: boolean };\nexport type NamedTupleType = { fields: Array<[string, DataType]> };\nexport type MapType = { keyType: DataType; valueType: DataType };\nexport type JsonOptions = {\n max_dynamic_paths?: number;\n max_dynamic_types?: number;\n typed_paths?: Array<[string, DataType]>;\n skip_paths?: string[];\n skip_regexps?: string[];\n};\nexport type DataType =\n | string\n | DataEnum\n | ArrayType\n | Nested\n | NamedTupleType\n | MapType\n | JsonOptions\n | { nullable: DataType };\nexport interface Column {\n name: IdentifierBrandedString;\n data_type: DataType;\n required: boolean;\n unique: false; // what is this for?\n primary_key: boolean;\n default: string | null;\n materialized: string | null;\n ttl: string | null;\n codec: string | null;\n annotations: [string, any][];\n comment: string | null;\n}\n\nexport interface DataModel {\n columns: Column[];\n name: string;\n}\n\nexport class UnknownType extends Error {\n t: ts.Type;\n fieldName: string;\n typeName: string;\n constructor(t: ts.Type, fieldName: string, typeName: string) {\n super();\n this.t = t;\n this.fieldName = fieldName;\n this.typeName = typeName;\n }\n}\n\nexport class NullType extends Error {\n fieldName: string;\n typeName: string;\n constructor(fieldName: string, typeName: string) {\n super();\n this.fieldName = fieldName;\n this.typeName = typeName;\n }\n}\n\nexport class UnsupportedEnum extends Error {\n enumName: string;\n constructor(enumName: string) {\n super();\n this.enumName = enumName;\n }\n}\n\nexport class UnsupportedFeature extends Error {\n featureName: string;\n constructor(featureName: string) {\n super();\n this.featureName = featureName;\n }\n}\n\nexport class IndexType extends Error {\n typeName: string;\n indexSignatures: string[];\n\n constructor(typeName: string, indexSignatures: string[]) {\n const explanation =\n \"Index signatures (e.g. [key: string]: value) are not supported in data models.\";\n\n const suggestion =\n \"Consider splitting this into separate types or using a single Record<K, V> type.\";\n\n const signatures = `Found index signatures: ${indexSignatures.join(\", \")}`;\n\n super(\n `${explanation}\\n\\nType: ${typeName}\\n\\n${signatures}\\n\\nSuggestion: ${suggestion}`,\n );\n\n this.typeName = typeName;\n this.indexSignatures = indexSignatures;\n }\n}\n\n/**\n * Type guard: is this DataType an Array(Nested(...))?\n * Uses the ArrayType and Nested types for type safety.\n */\nexport function isArrayNestedType(\n dt: DataType,\n): dt is ArrayType & { elementType: Nested } {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n (dt as ArrayType).elementType !== null &&\n typeof (dt as ArrayType).elementType === \"object\" &&\n (dt as ArrayType).elementType.hasOwnProperty(\"columns\") &&\n Array.isArray(((dt as ArrayType).elementType as Nested).columns)\n );\n}\n\n/**\n * Type guard: is this DataType a Nested struct (not array)?\n */\nexport function isNestedType(dt: DataType): dt is Nested {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n Array.isArray((dt as Nested).columns)\n );\n}\n","// source https://github.com/blakeembrey/sql-template-tag/blob/main/src/index.ts\nimport { Column } from \"./dataModels/dataModelTypes\";\nimport { OlapTable } from \"./dmv2\";\n\nimport { AggregationFunction } from \"./dataModels/typeConvert\";\n\n/**\n * Quote a ClickHouse identifier with backticks if not already quoted.\n * Backticks allow special characters (e.g., hyphens) in identifiers.\n */\nexport const quoteIdentifier = (name: string): string => {\n return name.startsWith(\"`\") && name.endsWith(\"`\") ? name : `\\`${name}\\``;\n};\n\nconst isTable = (\n value: RawValue | Column | OlapTable<any>,\n): value is OlapTable<any> =>\n typeof value === \"object\" &&\n value !== null &&\n \"kind\" in value &&\n value.kind === \"OlapTable\";\n\nexport type IdentifierBrandedString = string & {\n readonly __identifier_brand?: unique symbol;\n};\nexport type NonIdentifierBrandedString = string & {\n readonly __identifier_brand?: unique symbol;\n};\n\n/**\n * Values supported by SQL engine.\n */\nexport type Value =\n | NonIdentifierBrandedString\n | number\n | boolean\n | Date\n | [string, string];\n\n/**\n * Supported value or SQL instance.\n */\nexport type RawValue = Value | Sql;\n\nconst isColumn = (value: RawValue | Column | OlapTable<any>): value is Column =>\n typeof value === \"object\" && \"name\" in value && \"annotations\" in value;\n\nexport function sql(\n strings: readonly string[],\n ...values: readonly (RawValue | Column | OlapTable<any>)[]\n) {\n return new Sql(strings, values);\n}\n\nconst instanceofSql = (\n value: RawValue | Column | OlapTable<any>,\n): value is Sql =>\n typeof value === \"object\" && \"values\" in value && \"strings\" in value;\n\n/**\n * A SQL instance can be nested within each other to build SQL strings.\n */\nexport class Sql {\n readonly values: Value[];\n readonly strings: string[];\n\n constructor(\n rawStrings: readonly string[],\n rawValues: readonly (RawValue | Column | OlapTable<any>)[],\n ) {\n if (rawStrings.length - 1 !== rawValues.length) {\n if (rawStrings.length === 0) {\n throw new TypeError(\"Expected at least 1 string\");\n }\n\n throw new TypeError(\n `Expected ${rawStrings.length} strings to have ${\n rawStrings.length - 1\n } values`,\n );\n }\n\n const valuesLength = rawValues.reduce<number>(\n (len: number, value: RawValue | Column | OlapTable<any>) =>\n len +\n (instanceofSql(value) ? value.values.length\n : isColumn(value) || isTable(value) ? 0\n : 1),\n 0,\n );\n\n this.values = new Array(valuesLength);\n this.strings = new Array(valuesLength + 1);\n\n this.strings[0] = rawStrings[0];\n\n // Iterate over raw values, strings, and children. The value is always\n // positioned between two strings, e.g. `index + 1`.\n let i = 0,\n pos = 0;\n while (i < rawValues.length) {\n const child = rawValues[i++];\n const rawString = rawStrings[i];\n\n // Check for nested `sql` queries.\n if (instanceofSql(child)) {\n // Append child prefix text to current string.\n this.strings[pos] += child.strings[0];\n\n let childIndex = 0;\n while (childIndex < child.values.length) {\n this.values[pos++] = child.values[childIndex++];\n this.strings[pos] = child.strings[childIndex];\n }\n\n // Append raw string to current string.\n this.strings[pos] += rawString;\n } else if (isColumn(child)) {\n const aggregationFunction = child.annotations.find(\n ([k, _]) => k === \"aggregationFunction\",\n );\n if (aggregationFunction !== undefined) {\n this.strings[pos] +=\n `${(aggregationFunction[1] as AggregationFunction).functionName}Merge(\\`${child.name}\\`)`;\n } else {\n this.strings[pos] += `\\`${child.name}\\``;\n }\n this.strings[pos] += rawString;\n } else if (isTable(child)) {\n if (child.config.database) {\n this.strings[pos] += `\\`${child.config.database}\\`.\\`${child.name}\\``;\n } else {\n this.strings[pos] += `\\`${child.name}\\``;\n }\n this.strings[pos] += rawString;\n } else {\n this.values[pos++] = child;\n this.strings[pos] = rawString;\n }\n }\n }\n}\n\nexport const toStaticQuery = (sql: Sql): string => {\n const [query, params] = toQuery(sql);\n if (Object.keys(params).length !== 0) {\n throw new Error(\n \"Dynamic SQL is not allowed in the select statement in view creation.\",\n );\n }\n return query;\n};\n\nexport const toQuery = (sql: Sql): [string, { [pN: string]: any }] => {\n const parameterizedStubs = sql.values.map((v, i) =>\n createClickhouseParameter(i, v),\n );\n\n const query = sql.strings\n .map((s, i) =>\n s != \"\" ? `${s}${emptyIfUndefined(parameterizedStubs[i])}` : \"\",\n )\n .join(\"\");\n\n const query_params = sql.values.reduce(\n (acc: Record<string, unknown>, v, i) => ({\n ...acc,\n [`p${i}`]: getValueFromParameter(v),\n }),\n {},\n );\n return [query, query_params];\n};\n\n/**\n * Build a display-only SQL string with values inlined for logging/debugging.\n * Does not alter execution behavior; use toQuery for actual execution.\n */\nexport const toQueryPreview = (sql: Sql): string => {\n try {\n const formatValue = (v: Value): string => {\n // Unwrap identifiers: [\"Identifier\", name]\n if (Array.isArray(v)) {\n const [type, val] = v as unknown as [string, any];\n if (type === \"Identifier\") {\n // Quote identifiers with backticks like other helpers\n return `\\`${String(val)}\\``;\n }\n // Fallback for unexpected arrays\n return `[${(v as unknown as any[]).map((x) => formatValue(x as Value)).join(\", \")}]`;\n }\n if (v === null || v === undefined) return \"NULL\";\n if (typeof v === \"string\") return `'${v.replace(/'/g, \"''\")}'`;\n if (typeof v === \"number\") return String(v);\n if (typeof v === \"boolean\") return v ? \"true\" : \"false\";\n if (v instanceof Date)\n return `'${v.toISOString().replace(\"T\", \" \").slice(0, 19)}'`;\n try {\n return JSON.stringify(v as unknown as any);\n } catch {\n return String(v);\n }\n };\n\n let out = sql.strings[0] ?? \"\";\n for (let i = 0; i < sql.values.length; i++) {\n const val = getValueFromParameter(sql.values[i] as any);\n out += formatValue(val as Value);\n out += sql.strings[i + 1] ?? \"\";\n }\n return out.replace(/\\s+/g, \" \").trim();\n } catch (error) {\n console.log(`toQueryPreview error: ${error}`);\n return \"/* query preview unavailable */\";\n }\n};\n\nexport const getValueFromParameter = (value: any) => {\n if (Array.isArray(value)) {\n const [type, val] = value;\n if (type === \"Identifier\") return val;\n }\n return value;\n};\nexport function createClickhouseParameter(\n parameterIndex: number,\n value: Value,\n) {\n // ClickHouse use {name:type} be a placeholder, so if we only use number string as name e.g: {1:Unit8}\n // it will face issue when converting to the query params => {1: value1}, because the key is value not string type, so here add prefix \"p\" to avoid this issue.\n return `{p${parameterIndex}:${mapToClickHouseType(value)}}`;\n}\n\n/**\n * Convert the JS type (source is JSON format by API query parameter) to the corresponding ClickHouse type for generating named placeholder of parameterized query.\n * Only support to convert number to Int or Float, boolean to Bool, string to String, other types will convert to String.\n * If exist complex type e.g: object, Array, null, undefined, Date, Record.. etc, just convert to string type by ClickHouse function in SQL.\n * ClickHouse support converting string to other types function.\n * Please see Each section of the https://clickhouse.com/docs/en/sql-reference/functions and https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions\n * @param value\n * @returns 'Float', 'Int', 'Bool', 'String'\n */\nexport const mapToClickHouseType = (value: Value) => {\n if (typeof value === \"number\") {\n // infer the float or int according to exist remainder or not\n return Number.isInteger(value) ? \"Int\" : \"Float\";\n }\n // When define column type or query result with parameterized query, The Bool or Boolean type both supported.\n // But the column type of query result only return Bool, so we only support Bool type for safety.\n if (typeof value === \"boolean\") return \"Bool\";\n if (value instanceof Date) return \"DateTime\";\n if (Array.isArray(value)) {\n const [type, _] = value;\n return type;\n }\n return \"String\";\n};\nfunction emptyIfUndefined(value: string | undefined): string {\n return value === undefined ? \"\" : value;\n}\n","import { quoteIdentifier } from \"../sqlHelpers\";\n\ninterface AggregationCreateOptions {\n tableCreateOptions: TableCreateOptions;\n materializedViewName: string;\n select: string;\n}\n\ninterface AggregationDropOptions {\n viewName: string;\n tableName: string;\n}\n\ninterface MaterializedViewCreateOptions {\n name: string;\n destinationTable: string;\n select: string;\n}\n\ninterface PopulateTableOptions {\n destinationTable: string;\n select: string;\n}\n\ninterface TableCreateOptions {\n name: string;\n columns: Record<string, string>;\n engine?: ClickHouseEngines;\n orderBy?: string;\n}\n\nexport interface Blocks {\n setup: string[];\n teardown: string[];\n}\n\nexport enum ClickHouseEngines {\n MergeTree = \"MergeTree\",\n ReplacingMergeTree = \"ReplacingMergeTree\",\n SummingMergeTree = \"SummingMergeTree\",\n AggregatingMergeTree = \"AggregatingMergeTree\",\n CollapsingMergeTree = \"CollapsingMergeTree\",\n VersionedCollapsingMergeTree = \"VersionedCollapsingMergeTree\",\n GraphiteMergeTree = \"GraphiteMergeTree\",\n S3Queue = \"S3Queue\",\n S3 = \"S3\",\n Buffer = \"Buffer\",\n Distributed = \"Distributed\",\n IcebergS3 = \"IcebergS3\",\n Kafka = \"Kafka\",\n ReplicatedMergeTree = \"ReplicatedMergeTree\",\n ReplicatedReplacingMergeTree = \"ReplicatedReplacingMergeTree\",\n ReplicatedAggregatingMergeTree = \"ReplicatedAggregatingMergeTree\",\n ReplicatedSummingMergeTree = \"ReplicatedSummingMergeTree\",\n ReplicatedCollapsingMergeTree = \"ReplicatedCollapsingMergeTree\",\n ReplicatedVersionedCollapsingMergeTree = \"ReplicatedVersionedCollapsingMergeTree\",\n}\n\n/**\n * Drops an existing view if it exists.\n */\nexport function dropView(name: string): string {\n return `DROP VIEW IF EXISTS ${quoteIdentifier(name)}`.trim();\n}\n\n/**\n * Creates a materialized view.\n */\nexport function createMaterializedView(\n options: MaterializedViewCreateOptions,\n): string {\n return `CREATE MATERIALIZED VIEW IF NOT EXISTS ${quoteIdentifier(options.name)}\n TO ${quoteIdentifier(options.destinationTable)}\n AS ${options.select}`.trim();\n}\n\n/**\n * @deprecated Population of tables is now handled automatically by the Rust infrastructure.\n * This function is kept for backwards compatibility but will be ignored.\n * The framework now intelligently determines when to populate based on:\n * - Whether the materialized view is new or being replaced\n * - Whether the source is an S3Queue table (which doesn't support SELECT)\n *\n * Populates a table with data.\n */\nexport function populateTable(options: PopulateTableOptions): string {\n return `INSERT INTO ${quoteIdentifier(options.destinationTable)}\n ${options.select}`.trim();\n}\n","import { Pattern, TagBase } from \"typia/lib/tags\";\nimport { tags } from \"typia\";\n\nexport type ClickHousePrecision<P extends number> = {\n _clickhouse_precision?: P;\n};\n\nexport const DecimalRegex: \"^-?\\\\d+(\\\\.\\\\d+)?$\" = \"^-?\\\\d+(\\\\.\\\\d+)?$\";\n\nexport type ClickHouseDecimal<P extends number, S extends number> = {\n _clickhouse_precision?: P;\n _clickhouse_scale?: S;\n} & Pattern<typeof DecimalRegex>;\n\nexport type ClickHouseFixedStringSize<N extends number> = {\n _clickhouse_fixed_string_size?: N;\n};\n\n/**\n * FixedString(N) - Fixed-length string of exactly N bytes.\n *\n * ClickHouse stores exactly N bytes, padding shorter values with null bytes.\n * Values exceeding N bytes will throw an exception.\n *\n * Use for binary data: hashes, IP addresses, UUIDs, MAC addresses.\n *\n * @example\n * interface BinaryData {\n * md5_hash: string & FixedString<16>; // 16-byte MD5\n * sha256_hash: string & FixedString<32>; // 32-byte SHA256\n * }\n */\nexport type FixedString<N extends number> = string &\n ClickHouseFixedStringSize<N>;\n\nexport type ClickHouseByteSize<N extends number> = {\n _clickhouse_byte_size?: N;\n};\n\nexport type LowCardinality = {\n _LowCardinality?: true;\n};\n\n// ClickHouse-friendly helper aliases for clarity in user schemas\n// These are erased at compile time but guide the ClickHouse mapping logic.\nexport type DateTime = Date;\nexport type DateTime64<P extends number> = Date & ClickHousePrecision<P>;\n\nexport type DateTimeString = string & tags.Format<\"date-time\">;\n/**\n * JS Date objects cannot hold microsecond precision.\n * Use string as the runtime type to avoid losing information.\n */\nexport type DateTime64String<P extends number> = string &\n tags.Format<\"date-time\"> &\n ClickHousePrecision<P>;\n\n// Numeric convenience tags mirroring ClickHouse integer and float families\nexport type Float32 = number & ClickHouseFloat<\"float32\">;\nexport type Float64 = number & ClickHouseFloat<\"float64\">;\n\nexport type Int8 = number & ClickHouseInt<\"int8\">;\nexport type Int16 = number & ClickHouseInt<\"int16\">;\nexport type Int32 = number & ClickHouseInt<\"int32\">;\nexport type Int64 = number & ClickHouseInt<\"int64\">;\n\nexport type UInt8 = number & ClickHouseInt<\"uint8\">;\nexport type UInt16 = number & ClickHouseInt<\"uint16\">;\nexport type UInt32 = number & ClickHouseInt<\"uint32\">;\nexport type UInt64 = number & ClickHouseInt<\"uint64\">;\n\n// Decimal(P, S) annotation\nexport type Decimal<P extends number, S extends number> = string &\n ClickHouseDecimal<P, S>;\n\n/**\n * Attach compression codec to a column type.\n *\n * Any valid ClickHouse codec expression is allowed. ClickHouse validates the codec at runtime.\n *\n * @template T The base data type\n * @template CodecExpr The codec expression (single codec or chain)\n *\n * @example\n * interface Metrics {\n * // Single codec\n * log_blob: string & ClickHouseCodec<\"ZSTD(3)\">;\n *\n * // Codec chain (processed left-to-right)\n * timestamp: Date & ClickHouseCodec<\"Delta, LZ4\">;\n * temperature: number & ClickHouseCodec<\"Gorilla, ZSTD\">;\n *\n * // Specialized codecs\n * counter: number & ClickHouseCodec<\"DoubleDelta\">;\n *\n * // Can combine with other annotations\n * count: UInt64 & ClickHouseCodec<\"DoubleDelta, LZ4\">;\n * }\n */\nexport type ClickHouseCodec<CodecExpr extends string> = {\n _clickhouse_codec?: CodecExpr;\n};\n\nexport type ClickHouseFloat<Value extends \"float32\" | \"float64\"> = tags.Type<\n Value extends \"float32\" ? \"float\" : \"double\"\n>;\n\nexport type ClickHouseInt<\n Value extends\n | \"int8\"\n | \"int16\"\n | \"int32\"\n | \"int64\"\n // | \"int128\"\n // | \"int256\"\n | \"uint8\"\n | \"uint16\"\n | \"uint32\"\n | \"uint64\",\n // | \"uint128\"\n // | \"uint256\",\n> =\n Value extends \"int32\" | \"int64\" | \"uint32\" | \"uint64\" ? tags.Type<Value>\n : TagBase<{\n target: \"number\";\n kind: \"type\";\n value: Value;\n validate: Value extends \"int8\" ? \"-128 <= $input && $input <= 127\"\n : Value extends \"int16\" ? \"-32768 <= $input && $input <= 32767\"\n : Value extends \"uint8\" ? \"0 <= $input && $input <= 255\"\n : Value extends \"uint16\" ? \"0 <= $input && $input <= 65535\"\n : never;\n exclusive: true;\n schema: {\n type: \"integer\";\n };\n }>;\n\n/**\n * By default, nested objects map to the `Nested` type in clickhouse.\n * Write `nestedObject: AnotherInterfaceType & ClickHouseNamedTuple`\n * to map AnotherInterfaceType to the named tuple type.\n */\nexport type ClickHouseNamedTuple = {\n _clickhouse_mapped_type?: \"namedTuple\";\n};\n\nexport type ClickHouseJson<\n maxDynamicPaths extends number | undefined = undefined,\n maxDynamicTypes extends number | undefined = undefined,\n skipPaths extends string[] = [],\n skipRegexes extends string[] = [],\n> = {\n _clickhouse_mapped_type?: \"JSON\";\n _clickhouse_json_settings?: {\n maxDynamicPaths?: maxDynamicPaths;\n maxDynamicTypes?: maxDynamicTypes;\n skipPaths?: skipPaths;\n skipRegexes?: skipRegexes;\n };\n};\n\n// Geometry helper types\nexport type ClickHousePoint = [number, number] & {\n _clickhouse_mapped_type?: \"Point\";\n};\nexport type ClickHouseRing = ClickHousePoint[] & {\n _clickhouse_mapped_type?: \"Ring\";\n};\nexport type ClickHouseLineString = ClickHousePoint[] & {\n _clickhouse_mapped_type?: \"LineString\";\n};\nexport type ClickHouseMultiLineString = ClickHouseLineString[] & {\n _clickhouse_mapped_type?: \"MultiLineString\";\n};\nexport type ClickHousePolygon = ClickHouseRing[] & {\n _clickhouse_mapped_type?: \"Polygon\";\n};\nexport type ClickHouseMultiPolygon = ClickHousePolygon[] & {\n _clickhouse_mapped_type?: \"MultiPolygon\";\n};\n\n/**\n * typia may have trouble handling this type.\n * In which case, use {@link WithDefault} as a workaround\n *\n * @example\n * { field: number & ClickHouseDefault<\"0\"> }\n */\nexport type ClickHouseDefault<SqlExpression extends string> = {\n _clickhouse_default?: SqlExpression;\n};\n\n/**\n * @example\n * {\n * ...\n * timestamp: Date;\n * debugMessage: string & ClickHouseTTL<\"timestamp + INTERVAL 1 WEEK\">;\n * }\n */\nexport type ClickHouseTTL<SqlExpression extends string> = {\n _clickhouse_ttl?: SqlExpression;\n};\n\n/**\n * ClickHouse MATERIALIZED column annotation.\n * The column value is computed at INSERT time and physically stored.\n * Cannot be explicitly inserted by users.\n *\n * @example\n * interface Events {\n * eventTime: DateTime;\n * // Extract date component - computed and stored at insert time\n * eventDate: Date & ClickHouseMaterialized<\"toDate(event_time)\">;\n *\n * userId: string;\n * // Precompute hash for fast lookups\n * userHash: UInt64 & ClickHouseMaterialized<\"cityHash64(userId)\">;\n * }\n *\n * @remarks\n * - MATERIALIZED and DEFAULT are mutually exclusive\n * - Can be combined with ClickHouseCodec for compression\n * - Changing the expression modifies the column in-place (existing values preserved)\n */\nexport type ClickHouseMaterialized<SqlExpression extends string> = {\n _clickhouse_materialized?: SqlExpression;\n};\n\n/**\n * See also {@link ClickHouseDefault}\n *\n * @example{ updated_at: WithDefault<Date, \"now()\"> }\n */\nexport type WithDefault<T, _SqlExpression extends string> = T;\n","export type Key<T extends string | number | Date> = T;\n\nexport type JWT<T extends object> = T;\n\nexport {\n Aggregated,\n SimpleAggregated,\n OlapTable,\n OlapConfig,\n S3QueueTableSettings,\n Stream,\n StreamConfig,\n DeadLetterModel,\n DeadLetter,\n DeadLetterQueue,\n IngestApi,\n IngestConfig,\n Api,\n ApiConfig,\n ConsumptionApi,\n EgressConfig,\n IngestPipeline,\n SqlResource,\n View,\n MaterializedView,\n Task,\n Workflow,\n ETLPipeline,\n ETLPipelineConfig,\n LifeCycle,\n WebApp,\n WebAppConfig,\n WebAppHandler,\n FrameworkApp,\n // Registry functions\n getTables,\n getTable,\n getStreams,\n getStream,\n getIngestApis,\n getIngestApi,\n getApis,\n getApi,\n getSqlResources,\n getSqlResource,\n getWorkflows,\n getWorkflow,\n getWebApps,\n getWebApp,\n} from \"./dmv2\";\n\nexport {\n ClickHousePrecision,\n ClickHouseDecimal,\n ClickHouseByteSize,\n ClickHouseFixedStringSize,\n ClickHouseFloat,\n ClickHouseInt,\n ClickHouseJson,\n LowCardinality,\n ClickHouseNamedTuple,\n ClickHouseDefault,\n ClickHouseTTL,\n ClickHouseMaterialized,\n WithDefault,\n ClickHouseCodec,\n // Added friendly aliases and numeric helpers\n DateTime,\n DateTime64,\n DateTimeString,\n DateTime64String,\n FixedString,\n Float32,\n Float64,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n Decimal,\n} from \"./dataModels/types\";\n\nexport type { ApiUtil, ConsumptionUtil } from \"./consumption-apis/helpers\";\n\nexport * from \"./sqlHelpers\";\n","import http from \"http\";\nimport { createClient } from \"@clickhouse/client\";\nimport { KafkaJS } from \"@514labs/kafka-javascript\";\nimport { SASLOptions } from \"@514labs/kafka-javascript/types/kafkajs\";\nconst { Kafka } = KafkaJS;\ntype Kafka = KafkaJS.Kafka;\ntype Consumer = KafkaJS.Consumer;\nexport type Producer = KafkaJS.Producer;\n\n/**\n * Utility function for compiler-related logging that can be disabled via environment variable.\n * Set MOOSE_DISABLE_COMPILER_LOGS=true to suppress these logs (useful for testing environments).\n */\n\n/**\n * Returns true if the value is a common truthy string: \"1\", \"true\", \"yes\", \"on\" (case-insensitive).\n */\nfunction isTruthy(value: string | undefined): boolean {\n if (!value) return false;\n switch (value.trim().toLowerCase()) {\n case \"1\":\n case \"true\":\n case \"yes\":\n case \"on\":\n return true;\n default:\n return false;\n }\n}\n\nexport const compilerLog = (message: string) => {\n if (!isTruthy(process.env.MOOSE_DISABLE_COMPILER_LOGS)) {\n console.log(message);\n }\n};\n\nexport const antiCachePath = (path: string) =>\n `${path}?num=${Math.random().toString()}&time=${Date.now()}`;\n\nexport const getFileName = (filePath: string) => {\n const regex = /\\/([^\\/]+)\\.ts/;\n const matches = filePath.match(regex);\n if (matches && matches.length > 1) {\n return matches[1];\n }\n return \"\";\n};\n\ninterface ClientConfig {\n username: string;\n password: string;\n database: string;\n useSSL: string;\n host: string;\n port: string;\n}\n\nexport const getClickhouseClient = ({\n username,\n password,\n database,\n useSSL,\n host,\n port,\n}: ClientConfig) => {\n const protocol =\n useSSL === \"1\" || useSSL.toLowerCase() === \"true\" ? \"https\" : \"http\";\n console.log(`Connecting to Clickhouse at ${protocol}://${host}:${port}`);\n return createClient({\n url: `${protocol}://${host}:${port}`,\n username: username,\n password: password,\n database: database,\n application: \"moose\",\n // Note: wait_end_of_query is configured per operation type, not globally\n // to preserve SELECT query performance while ensuring INSERT/DDL reliability\n });\n};\n\nexport type CliLogData = {\n message_type?: \"Info\" | \"Success\" | \"Error\" | \"Highlight\";\n action: string;\n message: string;\n};\n\nexport const cliLog: (log: CliLogData) => void = (log) => {\n const req = http.request({\n port: parseInt(process.env.MOOSE_MANAGEMENT_PORT ?? \"5001\"),\n method: \"POST\",\n path: \"/logs\",\n });\n\n req.on(\"error\", (err: Error) => {\n console.log(`Error ${err.name} sending CLI log.`, err.message);\n });\n\n req.write(JSON.stringify({ message_type: \"Info\", ...log }));\n req.end();\n};\n\n/**\n * Method to change .ts, .cts, and .mts to .js, .cjs, and .mjs\n * This is needed because 'import' does not support .ts, .cts, and .mts\n */\nexport function mapTstoJs(filePath: string): string {\n return filePath\n .replace(/\\.ts$/, \".js\")\n .replace(/\\.cts$/, \".cjs\")\n .replace(/\\.mts$/, \".mjs\");\n}\n\nexport const MAX_RETRIES = 150;\nexport const MAX_RETRY_TIME_MS = 1000;\nexport const RETRY_INITIAL_TIME_MS = 100;\n\nexport const MAX_RETRIES_PRODUCER = 150;\nexport const RETRY_FACTOR_PRODUCER = 0.2;\n// Means all replicas need to acknowledge the message\nexport const ACKs = -1;\n\n/**\n * Creates the base producer configuration for Kafka.\n * Used by both the SDK stream publishing and streaming function workers.\n *\n * @param maxMessageBytes - Optional max message size in bytes (synced with topic config)\n * @returns Producer configuration object for the Confluent Kafka client\n */\nexport function createProducerConfig(maxMessageBytes?: number) {\n return {\n kafkaJS: {\n idempotent: false, // Not needed for at-least-once delivery\n acks: ACKs,\n retry: {\n retries: MAX_RETRIES_PRODUCER,\n maxRetryTime: MAX_RETRY_TIME_MS,\n },\n },\n \"linger.ms\": 0, // This is to make sure at least once delivery with immediate feedback on the send\n ...(maxMessageBytes && { \"message.max.bytes\": maxMessageBytes }),\n };\n}\n\n/**\n * Parses a comma-separated broker string into an array of valid broker addresses.\n * Handles whitespace trimming and filters out empty elements.\n *\n * @param brokerString - Comma-separated broker addresses (e.g., \"broker1:9092, broker2:9092, , broker3:9092\")\n * @returns Array of trimmed, non-empty broker addresses\n */\nconst parseBrokerString = (brokerString: string): string[] =>\n brokerString\n .split(\",\")\n .map((b) => b.trim())\n .filter((b) => b.length > 0);\n\nexport type KafkaClientConfig = {\n clientId: string;\n broker: string;\n securityProtocol?: string; // e.g. \"SASL_SSL\" or \"PLAINTEXT\"\n saslUsername?: string;\n saslPassword?: string;\n saslMechanism?: string; // e.g. \"scram-sha-256\", \"plain\"\n};\n\n/**\n * Dynamically creates and connects a KafkaJS producer using the provided configuration.\n * Returns a connected producer instance.\n *\n * @param cfg - Kafka client configuration\n * @param logger - Logger instance\n * @param maxMessageBytes - Optional max message size in bytes (synced with topic config)\n */\nexport async function getKafkaProducer(\n cfg: KafkaClientConfig,\n logger: Logger,\n maxMessageBytes?: number,\n): Promise<Producer> {\n const kafka = await getKafkaClient(cfg, logger);\n\n const producer = kafka.producer(createProducerConfig(maxMessageBytes));\n await producer.connect();\n return producer;\n}\n\n/**\n * Interface for logging functionality\n */\nexport interface Logger {\n logPrefix: string;\n log: (message: string) => void;\n error: (message: string) => void;\n warn: (message: string) => void;\n}\n\nexport const logError = (logger: Logger, e: Error): void => {\n logger.error(e.message);\n const stack = e.stack;\n if (stack) {\n logger.error(stack);\n }\n};\n\n/**\n * Builds SASL configuration for Kafka client authentication\n */\nconst buildSaslConfig = (\n logger: Logger,\n args: KafkaClientConfig,\n): SASLOptions | undefined => {\n const mechanism = args.saslMechanism ? args.saslMechanism.toLowerCase() : \"\";\n switch (mechanism) {\n case \"plain\":\n case \"scram-sha-256\":\n case \"scram-sha-512\":\n return {\n mechanism: mechanism,\n username: args.saslUsername || \"\",\n password: args.saslPassword || \"\",\n };\n default:\n logger.warn(`Unsupported SASL mechanism: ${args.saslMechanism}`);\n return undefined;\n }\n};\n\n/**\n * Dynamically creates a KafkaJS client configured with provided settings.\n * Use this to construct producers/consumers with custom options.\n */\nexport const getKafkaClient = async (\n cfg: KafkaClientConfig,\n logger: Logger,\n): Promise<Kafka> => {\n const brokers = parseBrokerString(cfg.broker || \"\");\n if (brokers.length === 0) {\n throw new Error(`No valid broker addresses found in: \"${cfg.broker}\"`);\n }\n\n logger.log(`Creating Kafka client with brokers: ${brokers.join(\", \")}`);\n logger.log(`Security protocol: ${cfg.securityProtocol || \"plaintext\"}`);\n logger.log(`Client ID: ${cfg.clientId}`);\n\n const saslConfig = buildSaslConfig(logger, cfg);\n\n return new Kafka({\n kafkaJS: {\n clientId: cfg.clientId,\n brokers,\n ssl: cfg.securityProtocol === \"SASL_SSL\",\n ...(saslConfig && { sasl: saslConfig }),\n retry: {\n initialRetryTime: RETRY_INITIAL_TIME_MS,\n maxRetryTime: MAX_RETRY_TIME_MS,\n retries: MAX_RETRIES,\n },\n },\n });\n};\n","/**\n * @module secrets\n * Utilities for runtime environment variable resolution.\n *\n * This module provides functionality to mark values that should be resolved\n * from environment variables at runtime by the Moose CLI, rather than being\n * embedded at build time.\n *\n * @example\n * ```typescript\n * import { S3QueueEngine, mooseRuntimeEnv } from 'moose-lib';\n *\n * const table = OlapTable<MyData>(\n * \"MyTable\",\n * OlapConfig({\n * engine: S3QueueEngine({\n * s3_path: \"s3://bucket/data/*.json\",\n * format: \"JSONEachRow\",\n * awsAccessKeyId: mooseRuntimeEnv.get(\"AWS_ACCESS_KEY_ID\"),\n * awsSecretAccessKey: mooseRuntimeEnv.get(\"AWS_SECRET_ACCESS_KEY\")\n * })\n * })\n * );\n * ```\n */\n\n/**\n * Prefix used to mark values for runtime environment variable resolution.\n * @internal\n */\nexport const MOOSE_RUNTIME_ENV_PREFIX = \"__MOOSE_RUNTIME_ENV__:\";\n\n/**\n * Utilities for marking values to be resolved from environment variables at runtime.\n *\n * When you use `mooseRuntimeEnv.get()`, the behavior depends on the context:\n * - During infrastructure map loading: Returns a marker string for later resolution\n * - During function/workflow execution: Returns the actual environment variable value\n *\n * This is useful for:\n * - Credentials that should never be embedded in Docker images\n * - Configuration that can be rotated without rebuilding\n * - Different values for different environments (dev, staging, prod)\n * - Any runtime configuration in infrastructure elements (Tables, Topics, etc.)\n */\nexport const mooseRuntimeEnv = {\n /**\n * Gets a value from an environment variable, with behavior depending on context.\n *\n * When IS_LOADING_INFRA_MAP=true (infrastructure loading):\n * Returns a marker string that Moose CLI will resolve later\n *\n * When IS_LOADING_INFRA_MAP is unset (function/workflow runtime):\n * Returns the actual value from the environment variable\n *\n * @param envVarName - Name of the environment variable to resolve\n * @returns Either a marker string or the actual environment variable value\n * @throws {Error} If the environment variable name is empty\n * @throws {Error} If the environment variable is not set (runtime mode only)\n *\n * @example\n * ```typescript\n * // Instead of this (evaluated at build time):\n * awsAccessKeyId: process.env.AWS_ACCESS_KEY_ID\n *\n * // Use this (evaluated at runtime):\n * awsAccessKeyId: mooseRuntimeEnv.get(\"AWS_ACCESS_KEY_ID\")\n * ```\n */\n get(envVarName: string): string {\n if (!envVarName || envVarName.trim() === \"\") {\n throw new Error(\"Environment variable name cannot be empty\");\n }\n\n // Check if we're loading infrastructure map\n const isLoadingInfraMap = process.env.IS_LOADING_INFRA_MAP === \"true\";\n\n if (isLoadingInfraMap) {\n // Return marker string for later resolution by Moose CLI\n return `${MOOSE_RUNTIME_ENV_PREFIX}${envVarName}`;\n } else {\n // Return actual value from environment for runtime execution\n const value = process.env[envVarName];\n if (value === undefined) {\n throw new Error(\n `Environment variable '${envVarName}' is not set. ` +\n `This is required for runtime execution of functions/workflows.`,\n );\n }\n return value;\n }\n },\n};\n\n// Legacy export for backwards compatibility\n/** @deprecated Use mooseRuntimeEnv instead */\nexport const mooseEnvSecrets = mooseRuntimeEnv;\n","import { ClickHouseClient, CommandResult, ResultSet } from \"@clickhouse/client\";\nimport {\n Client as TemporalClient,\n Connection,\n ConnectionOptions,\n} from \"@temporalio/client\";\nimport { StringValue } from \"@temporalio/common\";\nimport { createHash, randomUUID } from \"node:crypto\";\nimport { performance } from \"perf_hooks\";\nimport * as fs from \"fs\";\nimport { getWorkflows } from \"../dmv2/internal\";\nimport { JWTPayload } from \"jose\";\nimport { Sql, sql, RawValue, toQuery, toQueryPreview } from \"../sqlHelpers\";\n\n/**\n * Format elapsed milliseconds into a human-readable string.\n * Matches Python's format_timespan behavior.\n */\nfunction formatElapsedTime(ms: number): string {\n if (ms < 1000) {\n return `${Math.round(ms)} ms`;\n }\n const seconds = ms / 1000;\n if (seconds < 60) {\n return `${seconds.toFixed(2)} seconds`;\n }\n const minutes = Math.floor(seconds / 60);\n const remainingSeconds = seconds % 60;\n return `${minutes} minutes and ${remainingSeconds.toFixed(2)} seconds`;\n}\n\n/**\n * Utilities provided by getMooseUtils() for database access and SQL queries.\n * Works in both Moose runtime and standalone contexts.\n */\nexport interface MooseUtils {\n client: MooseClient;\n sql: typeof sql;\n jwt?: JWTPayload;\n}\n\n/**\n * @deprecated Use MooseUtils instead. ApiUtil is now a type alias to MooseUtils\n * and will be removed in a future version.\n *\n * Migration: Replace `ApiUtil` with `MooseUtils` in your type annotations.\n */\nexport type ApiUtil = MooseUtils;\n\n/** @deprecated Use MooseUtils instead. */\nexport type ConsumptionUtil = MooseUtils;\n\nexport class MooseClient {\n query: QueryClient;\n workflow: WorkflowClient;\n\n constructor(queryClient: QueryClient, temporalClient?: TemporalClient) {\n this.query = queryClient;\n this.workflow = new WorkflowClient(temporalClient);\n }\n}\n\nexport class QueryClient {\n client: ClickHouseClient;\n query_id_prefix: string;\n constructor(client: ClickHouseClient, query_id_prefix: string) {\n this.client = client;\n this.query_id_prefix = query_id_prefix;\n }\n\n async execute<T = any>(\n sql: Sql,\n ): Promise<ResultSet<\"JSONEachRow\"> & { __query_result_t?: T[] }> {\n const [query, query_params] = toQuery(sql);\n\n console.log(`[QueryClient] | Query: ${toQueryPreview(sql)}`);\n const start = performance.now();\n const result = await this.client.query({\n query,\n query_params,\n format: \"JSONEachRow\",\n query_id: this.query_id_prefix + randomUUID(),\n // Note: wait_end_of_query deliberately NOT set here as this is used for SELECT queries\n // where response buffering would harm streaming performance and concurrency\n });\n const elapsedMs = performance.now() - start;\n console.log(\n `[QueryClient] | Query completed: ${formatElapsedTime(elapsedMs)}`,\n );\n return result;\n }\n\n async command(sql: Sql): Promise<CommandResult> {\n const [query, query_params] = toQuery(sql);\n\n console.log(`[QueryClient] | Command: ${toQueryPreview(sql)}`);\n const start = performance.now();\n const result = await this.client.command({\n query,\n query_params,\n query_id: this.query_id_prefix + randomUUID(),\n });\n const elapsedMs = performance.now() - start;\n console.log(\n `[QueryClient] | Command completed: ${formatElapsedTime(elapsedMs)}`,\n );\n return result;\n }\n}\n\nexport class WorkflowClient {\n client: TemporalClient | undefined;\n\n constructor(temporalClient?: TemporalClient) {\n this.client = temporalClient;\n }\n\n async execute(name: string, input_data: any) {\n try {\n if (!this.client) {\n return {\n status: 404,\n body: `Temporal client not found. Is the feature flag enabled?`,\n };\n }\n\n // Get workflow configuration\n const config = await this.getWorkflowConfig(name);\n\n // Process input data and generate workflow ID\n const [processedInput, workflowId] = this.processInputData(\n name,\n input_data,\n );\n\n console.log(\n `WorkflowClient - starting workflow: ${name} with config ${JSON.stringify(config)} and input_data ${JSON.stringify(processedInput)}`,\n );\n\n const handle = await this.client.workflow.start(\"ScriptWorkflow\", {\n args: [\n { workflow_name: name, execution_mode: \"start\" as const },\n processedInput,\n ],\n taskQueue: \"typescript-script-queue\",\n workflowId,\n workflowIdConflictPolicy: \"FAIL\",\n workflowIdReusePolicy: \"ALLOW_DUPLICATE\",\n retry: {\n maximumAttempts: config.retries,\n },\n workflowRunTimeout: config.timeout as StringValue,\n });\n\n return {\n status: 200,\n body: `Workflow started: ${name}. View it in the Temporal dashboard: http://localhost:8080/namespaces/default/workflows/${workflowId}/${handle.firstExecutionRunId}/history`,\n };\n } catch (error) {\n return {\n status: 400,\n body: `Error starting workflow: ${error}`,\n };\n }\n }\n\n async terminate(workflowId: string) {\n try {\n if (!this.client) {\n return {\n status: 404,\n body: `Temporal client not found. Is the feature flag enabled?`,\n };\n }\n\n const handle = this.client.workflow.getHandle(workflowId);\n await handle.terminate();\n\n return {\n status: 200,\n body: `Workflow terminated: ${workflowId}`,\n };\n } catch (error) {\n return {\n status: 400,\n body: `Error terminating workflow: ${error}`,\n };\n }\n }\n\n private async getWorkflowConfig(\n name: string,\n ): Promise<{ retries: number; timeout: string }> {\n const workflows = await getWorkflows();\n const dmv2Workflow = workflows.get(name);\n if (dmv2Workflow) {\n return {\n retries: dmv2Workflow.config.retries || 3,\n timeout: dmv2Workflow.config.timeout || \"1h\",\n };\n }\n\n throw new Error(`Workflow config not found for ${name}`);\n }\n\n private processInputData(name: string, input_data: any): [any, string] {\n let workflowId = name;\n if (input_data) {\n const hash = createHash(\"sha256\")\n .update(JSON.stringify(input_data))\n .digest(\"hex\")\n .slice(0, 16);\n workflowId = `${name}-${hash}`;\n }\n return [input_data, workflowId];\n }\n}\n\n/**\n * This looks similar to the client in runner.ts which is a worker.\n * Temporal SDK uses similar looking connection options & client,\n * but there are different libraries for a worker & client like this one\n * that triggers workflows.\n */\nexport async function getTemporalClient(\n temporalUrl: string,\n namespace: string,\n clientCert: string,\n clientKey: string,\n apiKey: string,\n): Promise<TemporalClient | undefined> {\n try {\n console.info(\n `<api> Using temporal_url: ${temporalUrl} and namespace: ${namespace}`,\n );\n\n let connectionOptions: ConnectionOptions = {\n address: temporalUrl,\n connectTimeout: \"3s\",\n };\n\n if (clientCert && clientKey) {\n // URL with mTLS uses gRPC namespace endpoint which is what temporalUrl already is\n console.log(\"Using TLS for secure Temporal\");\n const cert = await fs.readFileSync(clientCert);\n const key = await fs.readFileSync(clientKey);\n\n connectionOptions.tls = {\n clientCertPair: { crt: cert, key: key },\n };\n } else if (apiKey) {\n console.log(\"Using API key for secure Temporal\");\n // URL with API key uses gRPC regional endpoint\n connectionOptions.address = \"us-west1.gcp.api.temporal.io:7233\";\n connectionOptions.apiKey = apiKey;\n connectionOptions.tls = {};\n connectionOptions.metadata = {\n \"temporal-namespace\": namespace,\n };\n }\n\n console.log(`<api> Connecting to Temporal at ${connectionOptions.address}`);\n const connection = await Connection.connect(connectionOptions);\n const client = new TemporalClient({ connection, namespace });\n console.log(\"<api> Connected to Temporal server\");\n\n return client;\n } catch (error) {\n console.warn(`Failed to connect to Temporal. Is the feature flag enabled?`);\n console.warn(error);\n return undefined;\n }\n}\n\nexport const ApiHelpers = {\n column: (value: string) => [\"Identifier\", value] as [string, string],\n table: (value: string) => [\"Identifier\", value] as [string, string],\n};\n\n/** @deprecated Use ApiHelpers instead. */\nexport const ConsumptionHelpers = ApiHelpers;\n\nexport function joinQueries({\n values,\n separator = \",\",\n prefix = \"\",\n suffix = \"\",\n}: {\n values: readonly RawValue[];\n separator?: string;\n prefix?: string;\n suffix?: string;\n}) {\n if (values.length === 0) {\n throw new TypeError(\n \"Expected `join([])` to be called with an array of multiple elements, but got an empty array\",\n );\n }\n\n return new Sql(\n [prefix, ...Array(values.length - 1).fill(separator), suffix],\n values,\n );\n}\n","import http from \"http\";\nimport type { MooseUtils } from \"./helpers\";\n\n/**\n * @deprecated Use `getMooseUtils()` from '@514labs/moose-lib' instead.\n *\n * This synchronous function extracts MooseUtils from a request object that was\n * injected by Moose runtime middleware. It returns undefined if not running\n * in a Moose-managed context.\n *\n * Migration: Replace with the async version:\n * ```typescript\n * // Old (sync, deprecated):\n * import { getMooseUtilsFromRequest } from '@514labs/moose-lib';\n * const moose = getMooseUtilsFromRequest(req);\n *\n * // New (async, recommended):\n * import { getMooseUtils } from '@514labs/moose-lib';\n * const moose = await getMooseUtils();\n * ```\n *\n * @param req - The HTTP request object containing injected moose utilities\n * @returns MooseUtils if available on the request, undefined otherwise\n */\nexport function getMooseUtilsFromRequest(\n req: http.IncomingMessage | any,\n): MooseUtils | undefined {\n console.warn(\n \"[DEPRECATED] getMooseUtilsFromRequest() is deprecated. \" +\n \"Import getMooseUtils from '@514labs/moose-lib' and call it without parameters: \" +\n \"const { client, sql } = await getMooseUtils();\",\n );\n return (req as any).moose;\n}\n\n/**\n * @deprecated Use `getMooseUtils()` from '@514labs/moose-lib' instead.\n *\n * This is a legacy alias for getMooseUtilsFromRequest. The main getMooseUtils\n * export from '@514labs/moose-lib' is now async and does not require a request parameter.\n *\n * BREAKING CHANGE WARNING: The new getMooseUtils() returns Promise<MooseUtils>,\n * not MooseUtils | undefined. You must await the result:\n * ```typescript\n * const moose = await getMooseUtils(); // New async API\n * ```\n */\nexport const getLegacyMooseUtils = getMooseUtilsFromRequest;\n\n/**\n * @deprecated No longer needed. Use getMooseUtils() directly instead.\n * Moose now handles utility injection automatically when injectMooseUtils is true.\n */\nexport function expressMiddleware() {\n console.warn(\n \"[DEPRECATED] expressMiddleware() is deprecated. \" +\n \"Use getMooseUtils() directly or rely on injectMooseUtils config.\",\n );\n return (req: any, res: any, next: any) => {\n // Maintain backwards compat: copy req.raw.moose to req.moose if present\n if (!req.moose && req.raw && (req.raw as any).moose) {\n req.moose = (req.raw as any).moose;\n }\n next();\n };\n}\n\n/**\n * @deprecated Use MooseUtils from helpers.ts instead.\n */\nexport interface ExpressRequestWithMoose {\n moose?: MooseUtils;\n}\n","export interface TaskFunction {\n (input?: any): Promise<{ task: string; data: any }>;\n}\n\nexport interface TaskConfig {\n retries: number;\n}\n\nexport interface TaskDefinition {\n task: TaskFunction;\n config?: TaskConfig;\n}\n","import cluster from \"node:cluster\";\nimport { availableParallelism } from \"node:os\";\nimport { exit } from \"node:process\";\nimport { Worker } from \"node:cluster\";\n\nconst DEFAULT_MAX_CPU_USAGE_RATIO = 0.7;\n// Time to restart the worker when it exits unexpectedly\n// This value is not too high to avoid the worker to be stuck in a bad state\n// but also not too low to avoid restarting the worker too often\nconst RESTART_TIME_MS = 10000;\nconst SIGTERM = \"SIGTERM\";\nconst SIGINT = \"SIGINT\";\nconst SHUTDOWN_WORKERS_INTERVAL = 500;\n\n/**\n * Manages a cluster of worker processes, handling their lifecycle including startup,\n * shutdown, and error handling.\n *\n * @typeParam C - The type of output produced during worker startup\n */\nexport class Cluster<C> {\n // Tracks if shutdown is currently in progress\n private shutdownInProgress: boolean = false;\n // Tracks if workers exited cleanly during shutdown\n private hasCleanWorkerExit: boolean = true;\n\n // String identifying if this is primary or worker process\n private processStr = `${cluster.isPrimary ? \"primary\" : \"worker\"} process ${process.pid}`;\n\n // Functions for starting and stopping workers\n private workerStart: (w: Worker, paralelism: number) => Promise<C>;\n private workerStop: (c: C) => Promise<void>;\n\n // Result from starting worker, needed for cleanup\n private startOutput: C | undefined;\n private maxCpuUsageRatio: number;\n private usedCpuCount: number;\n\n /**\n * Creates a new cluster manager instance.\n *\n * @param options - Configuration options for the cluster\n * @param options.workerStart - Async function to execute when starting a worker\n * @param options.workerStop - Async function to execute when stopping a worker\n * @param options.maxCpuUsageRatio - Maximum ratio of CPU cores to utilize (0-1)\n * @param options.maxWorkerCount - Maximum number of workers to spawn\n * @throws {Error} If maxCpuUsageRatio is not between 0 and 1\n */\n constructor(options: {\n workerStart: (w: Worker, paralelism: number) => Promise<C>;\n workerStop: (c: C) => Promise<void>;\n maxCpuUsageRatio?: number;\n maxWorkerCount?: number;\n }) {\n this.workerStart = options.workerStart;\n this.workerStop = options.workerStop;\n if (\n options.maxCpuUsageRatio &&\n (options.maxCpuUsageRatio > 1 || options.maxCpuUsageRatio < 0)\n ) {\n throw new Error(\"maxCpuUsageRatio must be between 0 and 1\");\n }\n this.maxCpuUsageRatio =\n options.maxCpuUsageRatio || DEFAULT_MAX_CPU_USAGE_RATIO;\n this.usedCpuCount = this.computeCPUUsageCount(\n this.maxCpuUsageRatio,\n options.maxWorkerCount,\n );\n }\n\n /**\n * Calculates the number of CPU cores to utilize based on available parallelism and constraints.\n *\n * @param cpuUsageRatio - Ratio of CPU cores to use (0-1)\n * @param maxWorkerCount - Optional maximum number of workers\n * @returns The number of CPU cores to utilize\n */\n computeCPUUsageCount(cpuUsageRatio: number, maxWorkerCount?: number) {\n const cpuCount = availableParallelism();\n const maxWorkers = maxWorkerCount || cpuCount;\n return Math.min(\n maxWorkers,\n Math.max(1, Math.floor(cpuCount * cpuUsageRatio)),\n );\n }\n\n /**\n * Initializes the cluster by spawning worker processes and setting up signal handlers.\n * For the primary process, spawns workers and monitors parent process.\n * For worker processes, executes the worker startup function.\n *\n * @throws {Error} If worker is undefined in worker process\n */\n async start() {\n process.on(SIGTERM, this.gracefulClusterShutdown(SIGTERM));\n process.on(SIGINT, this.gracefulClusterShutdown(SIGINT));\n\n if (cluster.isPrimary) {\n const parentPid = process.ppid;\n\n setInterval(() => {\n try {\n process.kill(parentPid, 0);\n } catch (e) {\n console.log(\"Parent process has exited.\");\n this.gracefulClusterShutdown(SIGTERM)();\n }\n }, 1000);\n\n await this.bootWorkers(this.usedCpuCount);\n } else {\n if (!cluster.worker) {\n throw new Error(\n \"Worker is not defined, it should be defined in worker process\",\n );\n }\n\n this.startOutput = await this.workerStart(\n cluster.worker,\n this.usedCpuCount,\n );\n }\n }\n\n /**\n * Spawns worker processes and configures their lifecycle event handlers.\n * Handles worker online, exit and disconnect events.\n * Automatically restarts failed workers during normal operation.\n *\n * @param numWorkers - Number of worker processes to spawn\n */\n bootWorkers = async (numWorkers: number) => {\n console.info(`Setting ${numWorkers} workers...`);\n\n for (let i = 0; i < numWorkers; i++) {\n cluster.fork();\n }\n\n cluster.on(\"online\", (worker) => {\n console.info(`worker process ${worker.process.pid} is online`);\n });\n\n cluster.on(\"exit\", (worker, code, signal) => {\n console.info(\n `worker ${worker.process.pid} exited with code ${code} and signal ${signal}`,\n );\n\n if (!this.shutdownInProgress) {\n setTimeout(() => cluster.fork(), RESTART_TIME_MS);\n }\n\n if (this.shutdownInProgress && code != 0) {\n this.hasCleanWorkerExit = false;\n }\n });\n\n cluster.on(\"disconnect\", (worker) => {\n console.info(`worker process ${worker.process.pid} has disconnected`);\n });\n };\n\n /**\n * Creates a handler function for graceful shutdown on receipt of a signal.\n * Ensures only one shutdown can occur at a time.\n * Handles shutdown differently for primary and worker processes.\n *\n * @param signal - The signal triggering the shutdown (e.g. SIGTERM)\n * @returns An async function that performs the shutdown\n */\n gracefulClusterShutdown = (signal: NodeJS.Signals) => async () => {\n if (this.shutdownInProgress) {\n return;\n }\n\n this.shutdownInProgress = true;\n this.hasCleanWorkerExit = true;\n\n console.info(\n `Got ${signal} on ${this.processStr}. Graceful shutdown start at ${new Date().toISOString()}`,\n );\n\n try {\n if (cluster.isPrimary) {\n await this.shutdownWorkers(signal);\n console.info(`${this.processStr} - worker shutdown successful`);\n exit(0);\n } else {\n // Only attempt to stop if the worker has finished starting\n if (this.startOutput) {\n await this.workerStop(this.startOutput);\n } else {\n console.info(\n `${this.processStr} - shutdown before worker fully started`,\n );\n }\n console.info(`${this.processStr} shutdown successful`);\n this.hasCleanWorkerExit ? exit(0) : exit(1);\n }\n } catch (e) {\n console.error(`${this.processStr} - shutdown failed`, e);\n exit(1);\n }\n };\n\n /**\n * Gracefully terminates all worker processes.\n * Monitors workers until they all exit or timeout occurs.\n * Only relevant for the primary process.\n *\n * @param signal - The signal to send to worker processes\n * @returns A promise that resolves when all workers have terminated\n */\n shutdownWorkers = (signal: NodeJS.Signals) => {\n return new Promise<void>((resolve, reject) => {\n if (!cluster.isPrimary) {\n return resolve();\n }\n\n if (!cluster.workers) {\n return resolve();\n }\n\n const workerIds = Object.keys(cluster.workers);\n if (workerIds.length == 0) {\n return resolve();\n }\n\n let workersAlive = 0;\n let funcRun = 0;\n\n const cleanWorkers = () => {\n ++funcRun;\n workersAlive = 0;\n\n Object.values(cluster.workers || {})\n .filter((worker) => !!worker)\n .forEach((worker) => {\n if (worker && !worker.isDead()) {\n ++workersAlive;\n if (funcRun == 1) {\n worker.kill(signal);\n }\n }\n });\n\n console.info(workersAlive + \" workers alive\");\n if (workersAlive == 0) {\n clearInterval(interval);\n return resolve();\n }\n };\n\n const interval = setInterval(cleanWorkers, SHUTDOWN_WORKERS_INTERVAL);\n });\n };\n}\n","import http from \"http\";\nimport { getClickhouseClient } from \"../commons\";\nimport { MooseClient, QueryClient, getTemporalClient } from \"./helpers\";\nimport * as jose from \"jose\";\nimport { ClickHouseClient } from \"@clickhouse/client\";\nimport { Cluster } from \"../cluster-utils\";\nimport { ApiUtil } from \"../index\";\nimport { sql } from \"../sqlHelpers\";\nimport { Client as TemporalClient } from \"@temporalio/client\";\nimport { getApis, getWebApps } from \"../dmv2/internal\";\n\ninterface ClickhouseConfig {\n database: string;\n host: string;\n port: string;\n username: string;\n password: string;\n useSSL: boolean;\n}\n\ninterface JwtConfig {\n secret?: string;\n issuer: string;\n audience: string;\n}\n\ninterface TemporalConfig {\n url: string;\n namespace: string;\n clientCert: string;\n clientKey: string;\n apiKey: string;\n}\n\ninterface ApisConfig {\n apisDir: string;\n clickhouseConfig: ClickhouseConfig;\n jwtConfig?: JwtConfig;\n temporalConfig?: TemporalConfig;\n enforceAuth: boolean;\n isDmv2: boolean;\n proxyPort?: number;\n workerCount?: number;\n}\n\n// Convert our config to Clickhouse client config\nconst toClientConfig = (config: ClickhouseConfig) => ({\n ...config,\n useSSL: config.useSSL ? \"true\" : \"false\",\n});\n\nconst createPath = (apisDir: string, path: string) => `${apisDir}${path}.ts`;\n\nconst httpLogger = (\n req: http.IncomingMessage,\n res: http.ServerResponse,\n startMs: number,\n) => {\n console.log(\n `${req.method} ${req.url} ${res.statusCode} ${Date.now() - startMs}ms`,\n );\n};\n\nconst modulesCache = new Map<string, any>();\n\nexport function createApi<T extends object, R = any>(\n _handler: (params: T, utils: ApiUtil) => Promise<R>,\n): (\n rawParams: Record<string, string[] | string>,\n utils: ApiUtil,\n) => Promise<R> {\n throw new Error(\n \"This should be compiled-time replaced by compiler plugins to add parsing.\",\n );\n}\n\n/** @deprecated Use `Api` from \"dmv2/sdk/consumptionApi\" instead. */\nexport const createConsumptionApi = createApi;\n\nconst apiHandler = async (\n publicKey: jose.KeyLike | undefined,\n clickhouseClient: ClickHouseClient,\n temporalClient: TemporalClient | undefined,\n apisDir: string,\n enforceAuth: boolean,\n isDmv2: boolean,\n jwtConfig?: JwtConfig,\n) => {\n const apis = isDmv2 ? await getApis() : new Map();\n return async (req: http.IncomingMessage, res: http.ServerResponse) => {\n const start = Date.now();\n\n try {\n const url = new URL(req.url || \"\", \"http://localhost\");\n const fileName = url.pathname;\n\n let jwtPayload;\n if (publicKey && jwtConfig) {\n const jwt = req.headers.authorization?.split(\" \")[1]; // Bearer <token>\n if (jwt) {\n try {\n const { payload } = await jose.jwtVerify(jwt, publicKey, {\n issuer: jwtConfig.issuer,\n audience: jwtConfig.audience,\n });\n jwtPayload = payload;\n } catch (error) {\n console.log(\"JWT verification failed\");\n if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n }\n } else if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n } else if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n\n const pathName = createPath(apisDir, fileName);\n const paramsObject = Array.from(url.searchParams.entries()).reduce(\n (obj: { [key: string]: string[] | string }, [key, value]) => {\n const existingValue = obj[key];\n if (existingValue) {\n if (Array.isArray(existingValue)) {\n existingValue.push(value);\n } else {\n obj[key] = [existingValue, value];\n }\n } else {\n obj[key] = value;\n }\n return obj;\n },\n {},\n );\n\n let userFuncModule = modulesCache.get(pathName);\n if (userFuncModule === undefined) {\n if (isDmv2) {\n let apiName = fileName.replace(/^\\/+|\\/+$/g, \"\");\n let version: string | null = null;\n\n // First, try to find the API by the full path (for custom paths)\n userFuncModule = apis.get(apiName);\n\n if (!userFuncModule) {\n // Fall back to the old name:version parsing\n version = url.searchParams.get(\"version\");\n\n // Check if version is in the path (e.g., /bar/1)\n if (!version && apiName.includes(\"/\")) {\n const pathParts = apiName.split(\"/\");\n if (pathParts.length >= 2) {\n // Try the full path first (it might be a custom path)\n userFuncModule = apis.get(apiName);\n if (!userFuncModule) {\n // If not found, treat it as name/version\n apiName = pathParts[0];\n version = pathParts.slice(1).join(\"/\");\n }\n }\n }\n\n // Only do versioned lookup if we still haven't found it\n if (!userFuncModule) {\n if (version) {\n const versionedKey = `${apiName}:${version}`;\n userFuncModule = apis.get(versionedKey);\n } else {\n userFuncModule = apis.get(apiName);\n }\n }\n }\n\n if (!userFuncModule) {\n const availableApis = Array.from(apis.keys()).map((key) =>\n key.replace(\":\", \"/\"),\n );\n const errorMessage =\n version ?\n `API ${apiName} with version ${version} not found. Available APIs: ${availableApis.join(\", \")}`\n : `API ${apiName} not found. Available APIs: ${availableApis.join(\", \")}`;\n throw new Error(errorMessage);\n }\n\n modulesCache.set(pathName, userFuncModule);\n console.log(`[API] | Executing API: ${apiName}`);\n } else {\n userFuncModule = require(pathName);\n modulesCache.set(pathName, userFuncModule);\n }\n }\n\n const queryClient = new QueryClient(clickhouseClient, fileName);\n let result =\n isDmv2 ?\n await userFuncModule(paramsObject, {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n })\n : await userFuncModule.default(paramsObject, {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n });\n\n let body: string;\n let status: number | undefined;\n\n // TODO investigate why these prototypes are different\n if (Object.getPrototypeOf(result).constructor.name === \"ResultSet\") {\n body = JSON.stringify(await result.json());\n } else {\n if (\"body\" in result && \"status\" in result) {\n body = JSON.stringify(result.body);\n status = result.status;\n } else {\n body = JSON.stringify(result);\n }\n }\n\n if (status) {\n res.writeHead(status, { \"Content-Type\": \"application/json\" });\n httpLogger(req, res, start);\n } else {\n res.writeHead(200, { \"Content-Type\": \"application/json\" });\n httpLogger(req, res, start);\n }\n\n res.end(body);\n } catch (error: any) {\n console.log(\"error in path \", req.url, error);\n // todo: same workaround as ResultSet\n if (Object.getPrototypeOf(error).constructor.name === \"TypeGuardError\") {\n res.writeHead(400, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: error.message }));\n httpLogger(req, res, start);\n }\n if (error instanceof Error) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: error.message }));\n httpLogger(req, res, start);\n } else {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end();\n httpLogger(req, res, start);\n }\n }\n };\n};\n\nconst createMainRouter = async (\n publicKey: jose.KeyLike | undefined,\n clickhouseClient: ClickHouseClient,\n temporalClient: TemporalClient | undefined,\n apisDir: string,\n enforceAuth: boolean,\n isDmv2: boolean,\n jwtConfig?: JwtConfig,\n) => {\n const apiRequestHandler = await apiHandler(\n publicKey,\n clickhouseClient,\n temporalClient,\n apisDir,\n enforceAuth,\n isDmv2,\n jwtConfig,\n );\n\n const webApps = isDmv2 ? await getWebApps() : new Map();\n\n const sortedWebApps = Array.from(webApps.values()).sort((a, b) => {\n const pathA = a.config.mountPath || \"/\";\n const pathB = b.config.mountPath || \"/\";\n return pathB.length - pathA.length;\n });\n\n return async (req: http.IncomingMessage, res: http.ServerResponse) => {\n const start = Date.now();\n\n const url = new URL(req.url || \"\", \"http://localhost\");\n const pathname = url.pathname;\n\n // Health check - checked before all other routes\n if (pathname === \"/_moose_internal/health\") {\n res.writeHead(200, { \"Content-Type\": \"application/json\" });\n res.end(\n JSON.stringify({\n status: \"healthy\",\n timestamp: new Date().toISOString(),\n }),\n );\n return;\n }\n\n let jwtPayload;\n if (publicKey && jwtConfig) {\n const jwt = req.headers.authorization?.split(\" \")[1];\n if (jwt) {\n try {\n const { payload } = await jose.jwtVerify(jwt, publicKey, {\n issuer: jwtConfig.issuer,\n audience: jwtConfig.audience,\n });\n jwtPayload = payload;\n } catch (error) {\n console.log(\"JWT verification failed for WebApp route\");\n }\n }\n }\n\n for (const webApp of sortedWebApps) {\n const mountPath = webApp.config.mountPath || \"/\";\n const normalizedMount =\n mountPath.endsWith(\"/\") && mountPath !== \"/\" ?\n mountPath.slice(0, -1)\n : mountPath;\n\n const matches =\n pathname === normalizedMount ||\n pathname.startsWith(normalizedMount + \"/\");\n\n if (matches) {\n if (webApp.config.injectMooseUtils !== false) {\n // Import getMooseUtils dynamically to avoid circular deps\n const { getMooseUtils } = await import(\"./standalone\");\n (req as any).moose = await getMooseUtils();\n }\n\n let proxiedUrl = req.url;\n if (normalizedMount !== \"/\") {\n const pathWithoutMount =\n pathname.substring(normalizedMount.length) || \"/\";\n proxiedUrl = pathWithoutMount + url.search;\n }\n\n try {\n // Create a modified request preserving all properties including headers\n // A shallow clone (like { ...req }) generally will not work since headers and other\n // members are not cloned.\n const modifiedReq = Object.assign(\n Object.create(Object.getPrototypeOf(req)),\n req,\n {\n url: proxiedUrl,\n },\n );\n await webApp.handler(modifiedReq, res);\n return;\n } catch (error) {\n console.error(`Error in WebApp ${webApp.name}:`, error);\n if (!res.headersSent) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Internal Server Error\" }));\n }\n return;\n }\n }\n }\n\n // If no WebApp matched, check if it's an Api request\n // Strip /api or /consumption prefix for Api routing\n let apiPath = pathname;\n if (pathname.startsWith(\"/api/\")) {\n apiPath = pathname.substring(4); // Remove \"/api\"\n } else if (pathname.startsWith(\"/consumption/\")) {\n apiPath = pathname.substring(13); // Remove \"/consumption\"\n }\n\n // If we stripped a prefix, it's an Api request\n if (apiPath !== pathname) {\n // Create a modified request with the rewritten URL for the apiHandler\n // Preserve all properties including headers by using Object.assign with prototype chain\n // A shallow clone (like { ...req }) generally will not work since headers and other\n // members are not cloned.\n const modifiedReq = Object.assign(\n Object.create(Object.getPrototypeOf(req)),\n req,\n {\n url: apiPath + url.search,\n },\n );\n await apiRequestHandler(modifiedReq as http.IncomingMessage, res);\n return;\n }\n\n res.writeHead(404, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Not Found\" }));\n httpLogger(req, res, start);\n };\n};\n\nexport const runApis = async (config: ApisConfig) => {\n const apisCluster = new Cluster({\n maxWorkerCount:\n (config.workerCount ?? 0) > 0 ? config.workerCount : undefined,\n workerStart: async () => {\n let temporalClient: TemporalClient | undefined;\n if (config.temporalConfig) {\n temporalClient = await getTemporalClient(\n config.temporalConfig.url,\n config.temporalConfig.namespace,\n config.temporalConfig.clientCert,\n config.temporalConfig.clientKey,\n config.temporalConfig.apiKey,\n );\n }\n const clickhouseClient = getClickhouseClient(\n toClientConfig(config.clickhouseConfig),\n );\n let publicKey: jose.KeyLike | undefined;\n if (config.jwtConfig?.secret) {\n console.log(\"Importing JWT public key...\");\n publicKey = await jose.importSPKI(config.jwtConfig.secret, \"RS256\");\n }\n\n // Set runtime context for getMooseUtils() to detect\n const runtimeQueryClient = new QueryClient(clickhouseClient, \"runtime\");\n (globalThis as any)._mooseRuntimeContext = {\n client: new MooseClient(runtimeQueryClient, temporalClient),\n };\n\n const server = http.createServer(\n await createMainRouter(\n publicKey,\n clickhouseClient,\n temporalClient,\n config.apisDir,\n config.enforceAuth,\n config.isDmv2,\n config.jwtConfig,\n ),\n );\n // port is now passed via config.proxyPort or defaults to 4001\n const port = config.proxyPort !== undefined ? config.proxyPort : 4001;\n server.listen(port, \"localhost\", () => {\n console.log(`Server running on port ${port}`);\n });\n\n return server;\n },\n workerStop: async (server) => {\n return new Promise<void>((resolve) => {\n server.close(() => resolve());\n });\n },\n });\n\n apisCluster.start();\n};\n","import { createClient, RedisClientType } from \"redis\";\n\n// Module-level singleton instance and initialization promise\nlet instance: MooseCache | null = null;\nlet initPromise: Promise<MooseCache> | null = null;\n\ntype SupportedTypes = string | object;\n\nexport class MooseCache {\n private client: RedisClientType;\n private isConnected: boolean = false;\n private readonly keyPrefix: string;\n private disconnectTimer: NodeJS.Timeout | null = null;\n private readonly idleTimeout: number;\n private connectPromise: Promise<void> | null = null;\n\n private constructor() {\n const redisUrl =\n process.env.MOOSE_REDIS_CONFIG__URL || \"redis://127.0.0.1:6379\";\n const prefix = process.env.MOOSE_REDIS_CONFIG__KEY_PREFIX || \"MS\";\n // 30 seconds of inactivity before disconnecting\n this.idleTimeout =\n parseInt(process.env.MOOSE_REDIS_CONFIG__IDLE_TIMEOUT || \"30\", 10) * 1000;\n this.keyPrefix = `${prefix}::moosecache::`;\n\n this.client = createClient({\n url: redisUrl,\n });\n\n process.on(\"SIGTERM\", this.gracefulShutdown);\n process.on(\"SIGINT\", this.gracefulShutdown);\n\n this.client.on(\"error\", async (err: Error) => {\n console.error(\"TS Redis client error:\", err);\n await this.disconnect();\n });\n\n this.client.on(\"connect\", () => {\n this.isConnected = true;\n console.log(\"TS Redis client connected\");\n });\n\n this.client.on(\"end\", () => {\n this.isConnected = false;\n console.log(\"TS Redis client disconnected\");\n this.clearDisconnectTimer();\n });\n }\n\n private clearDisconnectTimer(): void {\n if (this.disconnectTimer) {\n clearTimeout(this.disconnectTimer);\n this.disconnectTimer = null;\n }\n }\n\n private resetDisconnectTimer(): void {\n this.clearDisconnectTimer();\n this.disconnectTimer = setTimeout(async () => {\n if (this.isConnected) {\n console.log(\"TS Redis client disconnecting due to inactivity\");\n await this.disconnect();\n }\n }, this.idleTimeout);\n }\n\n private async ensureConnected(): Promise<void> {\n if (!this.isConnected) {\n await this.connect();\n }\n this.resetDisconnectTimer();\n }\n\n private async connect(): Promise<void> {\n // If already connected, return immediately\n if (this.isConnected) {\n return;\n }\n\n // If connection is in progress, wait for it\n // This prevents race conditions when multiple callers try to reconnect\n // simultaneously after a disconnection\n if (this.connectPromise) {\n return this.connectPromise;\n }\n\n // Start connection\n this.connectPromise = (async () => {\n try {\n await this.client.connect();\n this.resetDisconnectTimer();\n } catch (error) {\n // Reset the promise on error so retries can work\n this.connectPromise = null;\n throw error;\n }\n })();\n\n return this.connectPromise;\n }\n\n private async gracefulShutdown(): Promise<void> {\n if (this.isConnected) {\n await this.disconnect();\n }\n process.exit(0);\n }\n\n private getPrefixedKey(key: string): string {\n return `${this.keyPrefix}${key}`;\n }\n\n /**\n * Gets the singleton instance of MooseCache. Creates a new instance if one doesn't exist.\n * The client will automatically connect to Redis and handle reconnection if needed.\n *\n * @returns Promise<MooseCache> The singleton instance of MooseCache\n * @example\n * const cache = await MooseCache.get();\n */\n public static async get(): Promise<MooseCache> {\n // If we already have an instance, return it immediately\n if (instance) {\n return instance;\n }\n\n // If initialization is already in progress, wait for it\n // This prevents race conditions where multiple concurrent calls to get()\n // would each create their own instance and connection\n //\n // A simple singleton pattern (just checking if instance exists) isn't enough\n // because multiple async calls can check \"if (!instance)\" simultaneously,\n // find it's null, and each try to create their own instance before any\n // of them finish setting the instance variable\n if (initPromise) {\n return initPromise;\n }\n\n // Start initialization\n // We store the promise immediately so that any concurrent calls\n // will wait for this same initialization instead of starting their own\n initPromise = (async () => {\n try {\n const newInstance = new MooseCache();\n await newInstance.connect();\n instance = newInstance;\n return newInstance;\n } catch (error) {\n // Reset the promise on error so retries can work\n initPromise = null;\n throw error;\n }\n })();\n\n return initPromise;\n }\n\n /**\n * Sets a value in the cache. Objects are automatically JSON stringified.\n *\n * @param key - The key to store the value under\n * @param value - The value to store. Can be a string or any object (will be JSON stringified)\n * @param ttlSeconds - Optional time-to-live in seconds. If not provided, defaults to 1 hour (3600 seconds).\n * Must be a non-negative number. If 0, the key will expire immediately.\n * @example\n * // Store a string\n * await cache.set(\"foo\", \"bar\");\n *\n * // Store an object with custom TTL\n * await cache.set(\"foo:config\", { baz: 123, qux: true }, 60); // expires in 1 minute\n *\n * // This is essentially a get-set, which returns the previous value if it exists.\n * // You can create logic to only do work for the first time.\n * const value = await cache.set(\"testSessionId\", \"true\");\n * if (value) {\n * // Cache was set before, return\n * } else {\n * // Cache was set for first time, do work\n * }\n */\n public async set(\n key: string,\n value: string | object,\n ttlSeconds?: number,\n ): Promise<string | null> {\n try {\n // Validate TTL\n if (ttlSeconds !== undefined && ttlSeconds < 0) {\n throw new Error(\"ttlSeconds must be a non-negative number\");\n }\n\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n const stringValue =\n typeof value === \"object\" ? JSON.stringify(value) : value;\n\n // Use provided TTL or default to 1 hour\n const ttl = ttlSeconds ?? 3600;\n return await this.client.set(prefixedKey, stringValue, {\n EX: ttl,\n GET: true,\n });\n } catch (error) {\n console.error(`Error setting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Retrieves a value from the cache. Attempts to parse the value as JSON if possible.\n *\n * @param key - The key to retrieve\n * @returns Promise<T | null> The value, parsed as type T if it was JSON, or as string if not. Returns null if key doesn't exist\n * @example\n * // Get a string\n * const value = await cache.get(\"foo\");\n *\n * // Get and parse an object with type safety\n * interface Config { baz: number; qux: boolean; }\n * const config = await cache.get<Config>(\"foo:config\");\n */\n public async get<T extends SupportedTypes = string>(\n key: string,\n ): Promise<T | null> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n const value = await this.client.get(prefixedKey);\n\n if (value === null) return null;\n\n // Note: We can't check if T is string at runtime because TypeScript types are erased.\n // Instead, we try to parse as JSON and return the original string if that fails.\n try {\n const parsed = JSON.parse(value);\n // Only return parsed value if it's an object\n if (typeof parsed === \"object\" && parsed !== null) {\n return parsed as T;\n }\n // If parsed value isn't an object, return as string\n return value as T;\n } catch {\n // If JSON parse fails, return as string\n return value as T;\n }\n } catch (error) {\n console.error(`Error getting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Deletes a specific key from the cache.\n *\n * @param key - The key to delete\n * @example\n * await cache.delete(\"foo\");\n */\n public async delete(key: string): Promise<void> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n await this.client.del(prefixedKey);\n } catch (error) {\n console.error(`Error deleting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Deletes all keys that start with the given prefix.\n *\n * @param keyPrefix - The prefix of keys to delete\n * @example\n * // Delete all keys starting with \"foo\"\n * await cache.clearKeys(\"foo\");\n */\n public async clearKeys(keyPrefix: string): Promise<void> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(keyPrefix);\n const keys = await this.client.keys(`${prefixedKey}*`);\n if (keys.length > 0) {\n await this.client.del(keys);\n }\n } catch (error) {\n console.error(\n `Error clearing cache keys with prefix ${keyPrefix}:`,\n error,\n );\n throw error;\n }\n }\n\n /**\n * Deletes all keys in the cache\n *\n * @example\n * await cache.clear();\n */\n public async clear(): Promise<void> {\n try {\n await this.ensureConnected();\n const keys = await this.client.keys(`${this.keyPrefix}*`);\n if (keys.length > 0) {\n await this.client.del(keys);\n }\n } catch (error) {\n console.error(\"Error clearing cache:\", error);\n throw error;\n }\n }\n\n /**\n * Manually disconnects the Redis client. The client will automatically reconnect\n * when the next operation is performed.\n *\n * @example\n * await cache.disconnect();\n */\n public async disconnect(): Promise<void> {\n this.clearDisconnectTimer();\n this.connectPromise = null;\n if (this.isConnected) {\n await this.client.quit();\n }\n }\n}\n","import { MooseClient, QueryClient, MooseUtils } from \"./helpers\";\nimport { getClickhouseClient } from \"../commons\";\nimport { sql } from \"../sqlHelpers\";\nimport type { RuntimeClickHouseConfig } from \"../config/runtime\";\n\n// Cached utilities and initialization promise for standalone mode\nlet standaloneUtils: MooseUtils | null = null;\nlet initPromise: Promise<MooseUtils> | null = null;\n\n// Convert config to client config format\nconst toClientConfig = (config: {\n host: string;\n port: string;\n username: string;\n password: string;\n database: string;\n useSSL: boolean;\n}) => ({\n ...config,\n useSSL: config.useSSL ? \"true\" : \"false\",\n});\n\n/**\n * Get Moose utilities for database access and SQL queries.\n * Works in both Moose runtime and standalone contexts.\n *\n * **IMPORTANT**: This function is async and returns a Promise. You must await the result:\n * ```typescript\n * const moose = await getMooseUtils(); // Correct\n * const moose = getMooseUtils(); // WRONG - returns Promise, not MooseUtils!\n * ```\n *\n * **Breaking Change from v1.x**: This function signature changed from sync to async.\n * If you were using the old sync API that extracted utils from a request object,\n * use `getMooseUtilsFromRequest(req)` for backward compatibility (deprecated).\n *\n * @param req - DEPRECATED: Request parameter is no longer needed and will be ignored.\n * If you need to extract moose from a request, use getMooseUtilsFromRequest().\n * @returns Promise resolving to MooseUtils with client and sql utilities.\n *\n * @example\n * ```typescript\n * const { client, sql } = await getMooseUtils();\n * const result = await client.query.execute(sql`SELECT * FROM table`);\n * ```\n */\nexport async function getMooseUtils(req?: any): Promise<MooseUtils> {\n // Deprecation warning if req passed\n if (req !== undefined) {\n console.warn(\n \"[DEPRECATED] getMooseUtils(req) no longer requires a request parameter. \" +\n \"Use getMooseUtils() instead.\",\n );\n }\n\n // Check if running in Moose runtime\n const runtimeContext = (globalThis as any)._mooseRuntimeContext;\n\n if (runtimeContext) {\n // In Moose runtime - use existing connections\n return {\n client: runtimeContext.client,\n sql: sql,\n jwt: runtimeContext.jwt,\n };\n }\n\n // Standalone mode - use cached client or create new one\n if (standaloneUtils) {\n return standaloneUtils;\n }\n\n // If initialization is in progress, wait for it\n if (initPromise) {\n return initPromise;\n }\n\n // Start initialization\n initPromise = (async () => {\n await import(\"../config/runtime\");\n const configRegistry = (globalThis as any)._mooseConfigRegistry;\n\n if (!configRegistry) {\n throw new Error(\n \"Moose not initialized. Ensure you're running within a Moose app \" +\n \"or have proper configuration set up.\",\n );\n }\n\n const clickhouseConfig =\n await configRegistry.getStandaloneClickhouseConfig();\n\n const clickhouseClient = getClickhouseClient(\n toClientConfig(clickhouseConfig),\n );\n const queryClient = new QueryClient(clickhouseClient, \"standalone\");\n const mooseClient = new MooseClient(queryClient);\n\n standaloneUtils = {\n client: mooseClient,\n sql: sql,\n jwt: undefined,\n };\n return standaloneUtils;\n })();\n\n try {\n return await initPromise;\n } finally {\n initPromise = null;\n }\n}\n\n/**\n * @deprecated Use getMooseUtils() instead.\n * Creates a Moose client for database access.\n */\nexport async function getMooseClients(\n config?: Partial<RuntimeClickHouseConfig>,\n): Promise<{ client: MooseClient }> {\n console.warn(\n \"[DEPRECATED] getMooseClients() is deprecated. Use getMooseUtils() instead.\",\n );\n\n // If custom config provided, create a one-off client (don't cache)\n if (config && Object.keys(config).length > 0) {\n await import(\"../config/runtime\");\n const configRegistry = (globalThis as any)._mooseConfigRegistry;\n\n if (!configRegistry) {\n throw new Error(\n \"Configuration registry not initialized. Ensure the Moose framework is properly set up.\",\n );\n }\n\n const clickhouseConfig =\n await configRegistry.getStandaloneClickhouseConfig(config);\n\n const clickhouseClient = getClickhouseClient(\n toClientConfig(clickhouseConfig),\n );\n const queryClient = new QueryClient(clickhouseClient, \"standalone\");\n const mooseClient = new MooseClient(queryClient);\n\n return { client: mooseClient };\n }\n\n // No custom config - delegate to getMooseUtils\n const utils = await getMooseUtils();\n return { client: utils.client };\n}\n","import type {\n Column,\n DataType,\n Nested,\n ArrayType,\n} from \"../dataModels/dataModelTypes\";\n\n/**\n * Annotation key used to mark DateTime fields that should remain as strings\n * rather than being parsed into Date objects at runtime.\n */\nexport const STRING_DATE_ANNOTATION = \"stringDate\";\n\n/**\n * Type guard to check if a DataType is a nullable wrapper\n */\nfunction isNullableType(dt: DataType): dt is { nullable: DataType } {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"nullable\" in dt &&\n typeof dt.nullable !== \"undefined\"\n );\n}\n\n/**\n * Type guard to check if a DataType is a Nested type\n */\nfunction isNestedType(dt: DataType): dt is Nested {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"columns\" in dt &&\n Array.isArray(dt.columns)\n );\n}\n\n/**\n * Type guard to check if a DataType is an ArrayType\n */\nfunction isArrayType(dt: DataType): dt is ArrayType {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"elementType\" in dt &&\n typeof dt.elementType !== \"undefined\"\n );\n}\n\n/**\n * Revives ISO 8601 date strings into Date objects during JSON parsing\n * This is useful for automatically converting date strings to Date objects\n */\nexport function jsonDateReviver(key: string, value: unknown): unknown {\n const iso8601Format =\n /^([\\+-]?\\d{4}(?!\\d{2}\\b))((-?)((0[1-9]|1[0-2])(\\3([12]\\d|0[1-9]|3[01]))?|W([0-4]\\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\\d|[12]\\d{2}|3([0-5]\\d|6[1-6])))([T\\s]((([01]\\d|2[0-3])((:?)[0-5]\\d)?|24\\:?00)([\\.,]\\d+(?!:))?)?(\\17[0-5]\\d([\\.,]\\d+)?)?([zZ]|([\\+-])([01]\\d|2[0-3]):?([0-5]\\d)?)?)?)$/;\n\n if (typeof value === \"string\" && iso8601Format.test(value)) {\n return new Date(value);\n }\n\n return value;\n}\n\n/**\n * Checks if a DataType represents a datetime column (not just date)\n * AND if the column should be parsed from string to Date at runtime\n *\n * Note: Date and Date16 are date-only types and should remain as strings.\n * Only DateTime types are candidates for parsing to JavaScript Date objects.\n */\nfunction isDateType(dataType: DataType, annotations: [string, any][]): boolean {\n // Check if this is marked as a string-based date (from typia.tags.Format)\n // If so, it should remain as a string, not be parsed to Date\n if (\n annotations.some(\n ([key, value]) => key === STRING_DATE_ANNOTATION && value === true,\n )\n ) {\n return false;\n }\n\n if (typeof dataType === \"string\") {\n // Only DateTime types should be parsed to Date objects\n // Date and Date16 are date-only and should stay as strings\n return dataType === \"DateTime\" || dataType.startsWith(\"DateTime(\");\n }\n // Handle nullable wrapper\n if (isNullableType(dataType)) {\n return isDateType(dataType.nullable, annotations);\n }\n return false;\n}\n\n/**\n * Type of mutation to apply to a field during parsing\n */\nexport type Mutation = \"parseDate\"; // | \"parseBigInt\" - to be added later\n\n/**\n * Recursive tuple array structure representing field mutation operations\n * Each entry is [fieldName, mutation]:\n * - mutation is Mutation[] for leaf fields that need operations applied\n * - mutation is FieldMutations for nested objects/arrays (auto-applies to array elements)\n */\nexport type FieldMutations = [string, Mutation[] | FieldMutations][];\n\n/**\n * Recursively builds field mutations from column definitions\n *\n * @param columns - Array of Column definitions\n * @returns Tuple array of field mutations\n */\nfunction buildFieldMutations(columns: Column[]): FieldMutations {\n const mutations: FieldMutations = [];\n\n for (const column of columns) {\n const dataType = column.data_type;\n\n // Check if this is a date field that should be converted\n if (isDateType(dataType, column.annotations)) {\n mutations.push([column.name, [\"parseDate\"]]);\n continue;\n }\n\n // Handle nested structures\n if (typeof dataType === \"object\" && dataType !== null) {\n // Handle nullable wrapper\n let unwrappedType: DataType = dataType;\n if (isNullableType(dataType)) {\n unwrappedType = dataType.nullable;\n }\n\n // Handle nested objects\n if (isNestedType(unwrappedType)) {\n const nestedMutations = buildFieldMutations(unwrappedType.columns);\n if (nestedMutations.length > 0) {\n mutations.push([column.name, nestedMutations]);\n }\n continue;\n }\n\n // Handle arrays with nested columns\n // The mutations will be auto-applied to each array element at runtime\n if (isArrayType(unwrappedType)) {\n const elementType = unwrappedType.elementType;\n if (isNestedType(elementType)) {\n const nestedMutations = buildFieldMutations(elementType.columns);\n if (nestedMutations.length > 0) {\n mutations.push([column.name, nestedMutations]);\n }\n continue;\n }\n }\n }\n }\n\n return mutations;\n}\n\n/**\n * Applies a mutation operation to a field value\n *\n * @param value - The value to handle\n * @param mutation - The mutation operation to apply\n * @returns The handled value\n */\nfunction applyMutation(value: any, mutation: Mutation): any {\n if (mutation === \"parseDate\") {\n if (typeof value === \"string\") {\n try {\n const date = new Date(value);\n return !isNaN(date.getTime()) ? date : value;\n } catch {\n return value;\n }\n }\n }\n return value;\n}\n\n/**\n * Recursively mutates an object by applying field mutations\n *\n * @param obj - The object to mutate\n * @param mutations - The field mutations to apply\n */\nfunction applyFieldMutations(obj: any, mutations: FieldMutations): void {\n if (!obj || typeof obj !== \"object\") {\n return;\n }\n\n for (const [fieldName, mutation] of mutations) {\n if (!(fieldName in obj)) {\n continue;\n }\n\n if (Array.isArray(mutation)) {\n // Check if it's Mutation[] (leaf) or FieldMutations (nested)\n if (mutation.length > 0 && typeof mutation[0] === \"string\") {\n // It's Mutation[] - apply operations to this field\n const operations = mutation as Mutation[];\n for (const operation of operations) {\n obj[fieldName] = applyMutation(obj[fieldName], operation);\n }\n } else {\n // It's FieldMutations - recurse into nested structure\n const nestedMutations = mutation as FieldMutations;\n const fieldValue = obj[fieldName];\n\n if (Array.isArray(fieldValue)) {\n // Auto-apply to each array element\n for (const item of fieldValue) {\n applyFieldMutations(item, nestedMutations);\n }\n } else if (fieldValue && typeof fieldValue === \"object\") {\n // Apply to nested object\n applyFieldMutations(fieldValue, nestedMutations);\n }\n }\n }\n }\n}\n\n/**\n * Pre-builds field mutations from column schema for efficient reuse\n *\n * @param columns - Column definitions from the Stream schema\n * @returns Field mutations tuple array, or undefined if no columns\n *\n * @example\n * ```typescript\n * const fieldMutations = buildFieldMutationsFromColumns(stream.columnArray);\n * // Reuse fieldMutations for every message\n * ```\n */\nexport function buildFieldMutationsFromColumns(\n columns: Column[] | undefined,\n): FieldMutations | undefined {\n if (!columns || columns.length === 0) {\n return undefined;\n }\n const mutations = buildFieldMutations(columns);\n return mutations.length > 0 ? mutations : undefined;\n}\n\n/**\n * Applies field mutations to parsed data\n * Mutates the object in place for performance\n *\n * @param data - The parsed JSON object to mutate\n * @param fieldMutations - Pre-built field mutations from buildFieldMutationsFromColumns\n *\n * @example\n * ```typescript\n * const fieldMutations = buildFieldMutationsFromColumns(stream.columnArray);\n * const data = JSON.parse(jsonString);\n * mutateParsedJson(data, fieldMutations);\n * // data now has transformations applied per the field mutations\n * ```\n */\nexport function mutateParsedJson(\n data: any,\n fieldMutations: FieldMutations | undefined,\n): void {\n if (!fieldMutations || !data) {\n return;\n }\n\n applyFieldMutations(data, fieldMutations);\n}\n","import { parse } from \"csv-parse\";\nimport { jsonDateReviver } from \"./json\";\n\n/**\n * Configuration for CSV parsing options\n */\nexport interface CSVParsingConfig {\n /** CSV delimiter character */\n delimiter: string;\n /** Whether to treat first row as headers */\n columns?: boolean;\n /** Whether to skip empty lines */\n skipEmptyLines?: boolean;\n /** Whether to trim whitespace from values */\n trim?: boolean;\n}\n\n/**\n * Configuration for JSON parsing options\n */\nexport interface JSONParsingConfig {\n /** Custom reviver function for JSON.parse */\n reviver?: (key: string, value: any) => any;\n}\n\n/**\n * Parses CSV content into an array of objects\n *\n * @param content - The CSV content as a string\n * @param config - CSV parsing configuration\n * @returns Promise resolving to an array of parsed objects\n */\nexport function parseCSV<T = Record<string, any>>(\n content: string,\n config: CSVParsingConfig,\n): Promise<T[]> {\n return new Promise((resolve, reject) => {\n const results: T[] = [];\n\n parse(content, {\n delimiter: config.delimiter,\n columns: config.columns ?? true,\n skip_empty_lines: config.skipEmptyLines ?? true,\n trim: config.trim ?? true,\n })\n .on(\"data\", (row) => {\n results.push(row as T);\n })\n .on(\"end\", () => {\n resolve(results);\n })\n .on(\"error\", (error) => {\n reject(error);\n });\n });\n}\n\n/**\n * Parses JSON content into an array of objects\n *\n * @param content - The JSON content as a string\n * @param config - JSON parsing configuration\n * @returns Array of parsed objects\n */\nexport function parseJSON<T = any>(\n content: string,\n config: JSONParsingConfig = {},\n): T[] {\n try {\n const parsed = JSON.parse(content, config.reviver);\n\n // Handle both array and single object cases\n if (Array.isArray(parsed)) {\n return parsed as T[];\n } else {\n return [parsed as T];\n }\n } catch (error) {\n throw new Error(\n `Failed to parse JSON: ${error instanceof Error ? error.message : \"Unknown error\"}`,\n );\n }\n}\n\n/**\n * Parses JSON content with automatic date revival\n *\n * @param content - The JSON content as a string\n * @returns Array of parsed objects with Date objects for ISO 8601 strings\n */\nexport function parseJSONWithDates<T = any>(content: string): T[] {\n return parseJSON<T>(content, { reviver: jsonDateReviver });\n}\n\n/**\n * Type guard to check if a value is a valid CSV delimiter\n */\nexport function isValidCSVDelimiter(delimiter: string): boolean {\n return delimiter.length === 1 && !/\\s/.test(delimiter);\n}\n\n/**\n * Common CSV delimiters\n */\nexport const CSV_DELIMITERS = {\n COMMA: \",\",\n TAB: \"\\t\",\n SEMICOLON: \";\",\n PIPE: \"|\",\n} as const;\n\n/**\n * Default CSV parsing configuration\n */\nexport const DEFAULT_CSV_CONFIG: CSVParsingConfig = {\n delimiter: CSV_DELIMITERS.COMMA,\n columns: true,\n skipEmptyLines: true,\n trim: true,\n};\n\n/**\n * Default JSON parsing configuration with date revival\n */\nexport const DEFAULT_JSON_CONFIG: JSONParsingConfig = {\n reviver: jsonDateReviver,\n};\n","import { IsTuple } from \"typia/lib/typings/IsTuple\";\n\nexport * from \"./dataParser\";\n\ntype HasFunctionField<T> =\n T extends object ?\n {\n [K in keyof T]: T[K] extends Function ? true : false;\n }[keyof T] extends false ?\n false\n : true\n : false;\n\n/**\n * `Date & ...` is considered \"nonsensible intersection\" by typia,\n * causing JSON schema to fail.\n * This helper type recursively cleans up the intersection type tagging.\n */\nexport type StripDateIntersection<T> =\n T extends Date ?\n Date extends T ?\n Date\n : T\n : T extends ReadonlyArray<unknown> ?\n IsTuple<T> extends true ? StripDateFromTuple<T>\n : T extends ReadonlyArray<infer U> ?\n ReadonlyArray<U> extends T ?\n ReadonlyArray<StripDateIntersection<U>>\n : Array<StripDateIntersection<U>>\n : T extends Array<infer U> ? Array<StripDateIntersection<U>>\n : T // this catchall should be unreachable\n : // do not touch other classes\n true extends HasFunctionField<T> ? T\n : T extends object ? { [K in keyof T]: StripDateIntersection<T[K]> }\n : T;\n\n// infer fails in a recursive definition if an intersection type tag is present\ntype StripDateFromTuple<T extends readonly any[]> =\n T extends (\n [\n infer T1,\n infer T2,\n infer T3,\n infer T4,\n infer T5,\n infer T6,\n infer T7,\n infer T8,\n infer T9,\n infer T10,\n ]\n ) ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n StripDateIntersection<T7>,\n StripDateIntersection<T8>,\n StripDateIntersection<T9>,\n StripDateIntersection<T10>,\n ]\n : T extends (\n [\n infer T1,\n infer T2,\n infer T3,\n infer T4,\n infer T5,\n infer T6,\n infer T7,\n infer T8,\n infer T9,\n ]\n ) ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n StripDateIntersection<T7>,\n StripDateIntersection<T8>,\n StripDateIntersection<T9>,\n ]\n : T extends (\n [\n infer T1,\n infer T2,\n infer T3,\n infer T4,\n infer T5,\n infer T6,\n infer T7,\n infer T8,\n ]\n ) ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n StripDateIntersection<T7>,\n StripDateIntersection<T8>,\n ]\n : T extends (\n [infer T1, infer T2, infer T3, infer T4, infer T5, infer T6, infer T7]\n ) ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n StripDateIntersection<T7>,\n ]\n : T extends [infer T1, infer T2, infer T3, infer T4, infer T5, infer T6] ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n ]\n : T extends [infer T1, infer T2, infer T3, infer T4, infer T5] ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n ]\n : T extends [infer T1, infer T2, infer T3, infer T4] ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n ]\n : T extends [infer T1, infer T2, infer T3] ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n ]\n : T extends [infer T1, infer T2] ?\n [StripDateIntersection<T1>, StripDateIntersection<T2>]\n : T extends [infer T1] ? [StripDateIntersection<T1>]\n : [];\n","import { Readable } from \"node:stream\";\n\n/**\n * Configuration for a data source\n */\nexport interface DataSourceConfig {\n name: string;\n supportsIncremental?: boolean;\n}\n\n/**\n * DataSource is an abstract class that defines the interface for all data sources.\n * It is used to extract data from a source and test the connection to the source.\n */\nexport abstract class DataSource<T = any, ItemType = any> {\n protected name: string;\n protected supportsIncremental: boolean;\n\n constructor(config: DataSourceConfig) {\n this.name = config.name;\n this.supportsIncremental = config.supportsIncremental ?? false;\n }\n\n /**\n * Extract data from the source\n * Returns either ItemType (for single requests) or Readable (for paginated requests)\n */\n abstract extract(): Promise<ItemType | Readable>;\n\n /**\n * Test connection to the source\n */\n abstract testConnection(): Promise<{ success: boolean; message?: string }>;\n}\n\n/**\n * Result returned from extraction\n * For single requests: data is of type T\n * For paginated requests: data is a Readable stream yielding items of type T\n */\nexport interface ExtractionResult<T = any> {\n data: T | Readable;\n metadata: Record<string, any>;\n}\n","export * from \"./browserCompatible\";\n\nexport type DataModelConfig<T> = Partial<{\n ingestion: true;\n storage: {\n enabled?: boolean;\n order_by_fields?: (keyof T)[];\n deduplicate?: boolean;\n name?: string;\n };\n parallelism?: number;\n}>;\n\nexport * from \"./blocks/helpers\";\nexport * from \"./commons\";\nexport * from \"./secrets\";\nexport * from \"./consumption-apis/helpers\";\nexport {\n expressMiddleware,\n ExpressRequestWithMoose,\n getMooseUtilsFromRequest,\n getLegacyMooseUtils,\n} from \"./consumption-apis/webAppHelpers\";\nexport * from \"./scripts/task\";\n\nexport { createApi, createConsumptionApi } from \"./consumption-apis/runner\";\n\nexport { MooseCache } from \"./clients/redisClient\";\n\nexport { ApiUtil, ConsumptionUtil } from \"./consumption-apis/helpers\";\n\nexport { getMooseUtils, getMooseClients } from \"./consumption-apis/standalone\";\nexport type { MooseUtils } from \"./consumption-apis/helpers\";\nexport { sql } from \"./sqlHelpers\";\n\nexport * from \"./utilities\";\nexport * from \"./connectors/dataSource\";\nexport {\n ClickHouseByteSize,\n ClickHouseInt,\n LowCardinality,\n ClickHouseNamedTuple,\n ClickHousePoint,\n ClickHouseRing,\n ClickHouseLineString,\n ClickHouseMultiLineString,\n ClickHousePolygon,\n ClickHouseMultiPolygon,\n} from \"./dataModels/types\";\n","/**\n * @module internal\n * Internal implementation details for the Moose v2 data model (dmv2).\n *\n * This module manages the registration of user-defined dmv2 resources (Tables, Streams, APIs, etc.)\n * and provides functions to serialize these resources into a JSON format (`InfrastructureMap`)\n * expected by the Moose infrastructure management system. It also includes helper functions\n * to retrieve registered handler functions (for streams and APIs) and the base class\n * (`TypedBase`) used by dmv2 resource classes.\n *\n * @internal This module is intended for internal use by the Moose library and compiler plugin.\n * Its API might change without notice.\n */\nimport process from \"process\";\nimport { Api, IngestApi, SqlResource, Task, Workflow } from \"./index\";\nimport { IJsonSchemaCollection } from \"typia/src/schemas/json/IJsonSchemaCollection\";\nimport { Column } from \"../dataModels/dataModelTypes\";\nimport { ClickHouseEngines, ApiUtil } from \"../index\";\nimport {\n OlapTable,\n OlapConfig,\n ReplacingMergeTreeConfig,\n SummingMergeTreeConfig,\n ReplicatedMergeTreeConfig,\n ReplicatedReplacingMergeTreeConfig,\n ReplicatedAggregatingMergeTreeConfig,\n ReplicatedSummingMergeTreeConfig,\n ReplicatedCollapsingMergeTreeConfig,\n ReplicatedVersionedCollapsingMergeTreeConfig,\n S3QueueConfig,\n} from \"./sdk/olapTable\";\nimport {\n ConsumerConfig,\n KafkaSchemaConfig,\n Stream,\n TransformConfig,\n} from \"./sdk/stream\";\nimport { compilerLog } from \"../commons\";\nimport { WebApp } from \"./sdk/webApp\";\n\n/**\n * Gets the source directory from environment variable or defaults to \"app\"\n */\nfunction getSourceDir(): string {\n return process.env.MOOSE_SOURCE_DIR || \"app\";\n}\n\n/**\n * Client-only mode check. When true, resource registration is permissive\n * (duplicates overwrite silently instead of throwing).\n * Set via MOOSE_CLIENT_ONLY=true environment variable.\n *\n * This enables Next.js apps to import OlapTable definitions for type-safe\n * queries without the Moose runtime, avoiding \"already exists\" errors on HMR.\n *\n * @returns true if MOOSE_CLIENT_ONLY environment variable is set to \"true\"\n */\nexport const isClientOnlyMode = (): boolean =>\n process.env.MOOSE_CLIENT_ONLY === \"true\";\n\n/**\n * Internal registry holding all defined Moose dmv2 resources.\n * Populated by the constructors of OlapTable, Stream, IngestApi, etc.\n * Accessed via `getMooseInternal()`.\n */\nconst moose_internal = {\n tables: new Map<string, OlapTable<any>>(),\n streams: new Map<string, Stream<any>>(),\n ingestApis: new Map<string, IngestApi<any>>(),\n apis: new Map<string, Api<any>>(),\n sqlResources: new Map<string, SqlResource>(),\n workflows: new Map<string, Workflow>(),\n webApps: new Map<string, WebApp>(),\n};\n/**\n * Default retention period for streams if not specified (7 days in seconds).\n */\nconst defaultRetentionPeriod = 60 * 60 * 24 * 7;\n\n/**\n * Engine-specific configuration types using discriminated union pattern\n */\ninterface MergeTreeEngineConfig {\n engine: \"MergeTree\";\n}\n\ninterface ReplacingMergeTreeEngineConfig {\n engine: \"ReplacingMergeTree\";\n ver?: string;\n isDeleted?: string;\n}\n\ninterface AggregatingMergeTreeEngineConfig {\n engine: \"AggregatingMergeTree\";\n}\n\ninterface SummingMergeTreeEngineConfig {\n engine: \"SummingMergeTree\";\n columns?: string[];\n}\n\ninterface CollapsingMergeTreeEngineConfig {\n engine: \"CollapsingMergeTree\";\n sign: string;\n}\n\ninterface VersionedCollapsingMergeTreeEngineConfig {\n engine: \"VersionedCollapsingMergeTree\";\n sign: string;\n ver: string;\n}\n\ninterface ReplicatedMergeTreeEngineConfig {\n engine: \"ReplicatedMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n}\n\ninterface ReplicatedReplacingMergeTreeEngineConfig {\n engine: \"ReplicatedReplacingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n ver?: string;\n isDeleted?: string;\n}\n\ninterface ReplicatedAggregatingMergeTreeEngineConfig {\n engine: \"ReplicatedAggregatingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n}\n\ninterface ReplicatedSummingMergeTreeEngineConfig {\n engine: \"ReplicatedSummingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n columns?: string[];\n}\n\ninterface ReplicatedCollapsingMergeTreeEngineConfig {\n engine: \"ReplicatedCollapsingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n sign: string;\n}\n\ninterface ReplicatedVersionedCollapsingMergeTreeEngineConfig {\n engine: \"ReplicatedVersionedCollapsingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n sign: string;\n ver: string;\n}\n\ninterface S3QueueEngineConfig {\n engine: \"S3Queue\";\n s3Path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n headers?: { [key: string]: string };\n}\n\ninterface S3EngineConfig {\n engine: \"S3\";\n path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n partitionStrategy?: string;\n partitionColumnsInDataFile?: string;\n}\n\ninterface BufferEngineConfig {\n engine: \"Buffer\";\n targetDatabase: string;\n targetTable: string;\n numLayers: number;\n minTime: number;\n maxTime: number;\n minRows: number;\n maxRows: number;\n minBytes: number;\n maxBytes: number;\n flushTime?: number;\n flushRows?: number;\n flushBytes?: number;\n}\n\ninterface DistributedEngineConfig {\n engine: \"Distributed\";\n cluster: string;\n targetDatabase: string;\n targetTable: string;\n shardingKey?: string;\n policyName?: string;\n}\n\ninterface IcebergS3EngineConfig {\n engine: \"IcebergS3\";\n path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n}\n\ninterface KafkaEngineConfig {\n engine: \"Kafka\";\n brokerList: string;\n topicList: string;\n groupName: string;\n format: string;\n}\n\n/**\n * Union type for all supported engine configurations\n */\ntype EngineConfig =\n | MergeTreeEngineConfig\n | ReplacingMergeTreeEngineConfig\n | AggregatingMergeTreeEngineConfig\n | SummingMergeTreeEngineConfig\n | CollapsingMergeTreeEngineConfig\n | VersionedCollapsingMergeTreeEngineConfig\n | ReplicatedMergeTreeEngineConfig\n | ReplicatedReplacingMergeTreeEngineConfig\n | ReplicatedAggregatingMergeTreeEngineConfig\n | ReplicatedSummingMergeTreeEngineConfig\n | ReplicatedCollapsingMergeTreeEngineConfig\n | ReplicatedVersionedCollapsingMergeTreeEngineConfig\n | S3QueueEngineConfig\n | S3EngineConfig\n | BufferEngineConfig\n | DistributedEngineConfig\n | IcebergS3EngineConfig\n | KafkaEngineConfig;\n\n/**\n * JSON representation of an OLAP table configuration.\n */\ninterface TableJson {\n /** The name of the table. */\n name: string;\n /** Array defining the table's columns and their types. */\n columns: Column[];\n /** ORDER BY clause: either array of column names or a single ClickHouse expression. */\n orderBy: string[] | string;\n /** The column name used for the PARTITION BY clause. */\n partitionBy?: string;\n /** SAMPLE BY expression for approximate query processing. */\n sampleByExpression?: string;\n /** PRIMARY KEY expression (overrides column-level primary_key flags when specified). */\n primaryKeyExpression?: string;\n /** Engine configuration with type-safe, engine-specific parameters */\n engineConfig?: EngineConfig;\n /** Optional version string for the table configuration. */\n version?: string;\n /** Optional metadata for the table (e.g., description). */\n metadata?: { description?: string };\n /** Lifecycle management setting for the table. */\n lifeCycle?: string;\n /** Optional table-level settings that can be modified with ALTER TABLE MODIFY SETTING. */\n tableSettings?: { [key: string]: string };\n /** Optional table indexes */\n indexes?: {\n name: string;\n expression: string;\n type: string;\n arguments: string[];\n granularity: number;\n }[];\n /** Optional table-level TTL expression (without leading 'TTL'). */\n ttl?: string;\n /** Optional database name for multi-database support. */\n database?: string;\n /** Optional cluster name for ON CLUSTER support. */\n cluster?: string;\n}\n/**\n * Represents a target destination for data flow, typically a stream.\n */\ninterface Target {\n /** The name of the target resource (e.g., stream name). */\n name: string;\n /** The kind of the target resource. */\n kind: \"stream\"; // may add `| \"table\"` in the future\n /** Optional version string of the target resource's configuration. */\n version?: string;\n /** Optional metadata for the target (e.g., description for function processes). */\n metadata?: { description?: string };\n /** Optional source file path where this transform was declared. */\n sourceFile?: string;\n}\n\n/**\n * Represents a consumer attached to a stream.\n */\ninterface Consumer {\n /** Optional version string for the consumer configuration. */\n version?: string;\n /** Optional source file path where this consumer was declared. */\n sourceFile?: string;\n}\n\n/**\n * JSON representation of a Stream/Topic configuration.\n */\ninterface StreamJson {\n /** The name of the stream/topic. */\n name: string;\n /** Array defining the message schema (columns/fields). */\n columns: Column[];\n /** Data retention period in seconds. */\n retentionPeriod: number;\n /** Number of partitions for the stream/topic. */\n partitionCount: number;\n /** Optional name of the OLAP table this stream automatically syncs to. */\n targetTable?: string;\n /** Optional version of the target OLAP table configuration. */\n targetTableVersion?: string;\n /** Optional version string for the stream configuration. */\n version?: string;\n /** List of target streams this stream transforms data into. */\n transformationTargets: Target[];\n /** Flag indicating if a multi-transform function (`_multipleTransformations`) is defined. */\n hasMultiTransform: boolean;\n /** List of consumers attached to this stream. */\n consumers: Consumer[];\n /** Optional description for the stream. */\n metadata?: { description?: string };\n /** Lifecycle management setting for the stream. */\n lifeCycle?: string;\n /** Optional Schema Registry config */\n schemaConfig?: KafkaSchemaConfig;\n}\n/**\n * JSON representation of an Ingest API configuration.\n */\ninterface IngestApiJson {\n /** The name of the Ingest API endpoint. */\n name: string;\n /** Array defining the expected input schema (columns/fields). */\n columns: Column[];\n\n /** The target stream where ingested data is written. */\n writeTo: Target;\n /** The DLQ if the data does not fit the schema. */\n deadLetterQueue?: string;\n /** Optional version string for the API configuration. */\n version?: string;\n /** Optional custom path for the ingestion endpoint. */\n path?: string;\n /** Optional description for the API. */\n metadata?: { description?: string };\n /** JSON schema */\n schema: IJsonSchemaCollection.IV3_1;\n /**\n * Whether this API allows extra fields beyond the defined columns.\n * When true, extra fields in payloads are passed through to streaming functions.\n */\n allowExtraFields?: boolean;\n}\n\n/**\n * JSON representation of an API configuration.\n */\ninterface ApiJson {\n /** The name of the API endpoint. */\n name: string;\n /** Array defining the expected query parameters schema. */\n queryParams: Column[];\n /** JSON schema definition of the API's response body. */\n responseSchema: IJsonSchemaCollection.IV3_1;\n /** Optional version string for the API configuration. */\n version?: string;\n /** Optional custom path for the API endpoint. */\n path?: string;\n /** Optional description for the API. */\n metadata?: { description?: string };\n}\n\n/**\n * Represents the unique signature of an infrastructure component (Table, Topic, etc.).\n * Used for defining dependencies between SQL resources.\n */\ninterface InfrastructureSignatureJson {\n /** A unique identifier for the resource instance (often name + version). */\n id: string;\n /** The kind/type of the infrastructure component. */\n kind:\n | \"Table\"\n | \"Topic\"\n | \"ApiEndpoint\"\n | \"TopicToTableSyncProcess\"\n | \"View\"\n | \"SqlResource\";\n}\n\ninterface WorkflowJson {\n name: string;\n retries?: number;\n timeout?: string;\n schedule?: string;\n}\n\ninterface WebAppJson {\n name: string;\n mountPath: string;\n metadata?: { description?: string };\n}\n\ninterface SqlResourceJson {\n /** The name of the SQL resource. */\n name: string;\n /** Array of SQL DDL statements required to create the resource. */\n setup: readonly string[];\n /** Array of SQL DDL statements required to drop the resource. */\n teardown: readonly string[];\n\n /** List of infrastructure components (by signature) that this resource reads from. */\n pullsDataFrom: InfrastructureSignatureJson[];\n /** List of infrastructure components (by signature) that this resource writes to. */\n pushesDataTo: InfrastructureSignatureJson[];\n /** Optional source file path where this resource is defined. */\n sourceFile?: string;\n /** Optional source line number where this resource is defined. */\n sourceLine?: number;\n /** Optional source column number where this resource is defined. */\n sourceColumn?: number;\n}\n\n/**\n * Type guard: Check if config is S3QueueConfig\n */\nfunction isS3QueueConfig(\n config: OlapConfig<any>,\n): config is S3QueueConfig<any> {\n return \"engine\" in config && config.engine === ClickHouseEngines.S3Queue;\n}\n\n/**\n * Type guard: Check if config has a replicated engine\n * Checks if the engine value is one of the replicated engine types\n */\nfunction hasReplicatedEngine(\n config: OlapConfig<any>,\n): config is\n | ReplicatedMergeTreeConfig<any>\n | ReplicatedReplacingMergeTreeConfig<any>\n | ReplicatedAggregatingMergeTreeConfig<any>\n | ReplicatedSummingMergeTreeConfig<any>\n | ReplicatedCollapsingMergeTreeConfig<any>\n | ReplicatedVersionedCollapsingMergeTreeConfig<any> {\n if (!(\"engine\" in config)) {\n return false;\n }\n\n const engine = config.engine as ClickHouseEngines;\n // Check if engine is one of the replicated engine types\n return (\n engine === ClickHouseEngines.ReplicatedMergeTree ||\n engine === ClickHouseEngines.ReplicatedReplacingMergeTree ||\n engine === ClickHouseEngines.ReplicatedAggregatingMergeTree ||\n engine === ClickHouseEngines.ReplicatedSummingMergeTree ||\n engine === ClickHouseEngines.ReplicatedCollapsingMergeTree ||\n engine === ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree\n );\n}\n\n/**\n * Extract engine value from table config, handling both legacy and new formats\n */\nfunction extractEngineValue(config: OlapConfig<any>): ClickHouseEngines {\n // Legacy config without engine property defaults to MergeTree\n if (!(\"engine\" in config)) {\n return ClickHouseEngines.MergeTree;\n }\n\n // All engines (replicated and non-replicated) have engine as direct value\n return config.engine as ClickHouseEngines;\n}\n\n/**\n * Convert engine config for basic MergeTree engines\n */\nfunction convertBasicEngineConfig(\n engine: ClickHouseEngines,\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n switch (engine) {\n case ClickHouseEngines.MergeTree:\n return { engine: \"MergeTree\" };\n\n case ClickHouseEngines.AggregatingMergeTree:\n return { engine: \"AggregatingMergeTree\" };\n\n case ClickHouseEngines.ReplacingMergeTree: {\n const replacingConfig = config as ReplacingMergeTreeConfig<any>;\n return {\n engine: \"ReplacingMergeTree\",\n ver: replacingConfig.ver,\n isDeleted: replacingConfig.isDeleted,\n };\n }\n\n case ClickHouseEngines.SummingMergeTree: {\n const summingConfig = config as SummingMergeTreeConfig<any>;\n return {\n engine: \"SummingMergeTree\",\n columns: summingConfig.columns,\n };\n }\n\n case ClickHouseEngines.CollapsingMergeTree: {\n const collapsingConfig = config as any; // CollapsingMergeTreeConfig<any>\n return {\n engine: \"CollapsingMergeTree\",\n sign: collapsingConfig.sign,\n };\n }\n\n case ClickHouseEngines.VersionedCollapsingMergeTree: {\n const versionedConfig = config as any; // VersionedCollapsingMergeTreeConfig<any>\n return {\n engine: \"VersionedCollapsingMergeTree\",\n sign: versionedConfig.sign,\n ver: versionedConfig.ver,\n };\n }\n\n default:\n return undefined;\n }\n}\n\n/**\n * Convert engine config for replicated MergeTree engines\n */\nfunction convertReplicatedEngineConfig(\n engine: ClickHouseEngines,\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n // First check if this is a replicated engine config\n if (!hasReplicatedEngine(config)) {\n return undefined;\n }\n\n switch (engine) {\n case ClickHouseEngines.ReplicatedMergeTree: {\n const replicatedConfig = config as ReplicatedMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n };\n }\n\n case ClickHouseEngines.ReplicatedReplacingMergeTree: {\n const replicatedConfig =\n config as ReplicatedReplacingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedReplacingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n ver: replicatedConfig.ver,\n isDeleted: replicatedConfig.isDeleted,\n };\n }\n\n case ClickHouseEngines.ReplicatedAggregatingMergeTree: {\n const replicatedConfig =\n config as ReplicatedAggregatingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedAggregatingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n };\n }\n\n case ClickHouseEngines.ReplicatedSummingMergeTree: {\n const replicatedConfig = config as ReplicatedSummingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedSummingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n columns: replicatedConfig.columns,\n };\n }\n\n case ClickHouseEngines.ReplicatedCollapsingMergeTree: {\n const replicatedConfig = config as any; // ReplicatedCollapsingMergeTreeConfig<any>\n return {\n engine: \"ReplicatedCollapsingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n sign: replicatedConfig.sign,\n };\n }\n\n case ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree: {\n const replicatedConfig = config as any; // ReplicatedVersionedCollapsingMergeTreeConfig<any>\n return {\n engine: \"ReplicatedVersionedCollapsingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n sign: replicatedConfig.sign,\n ver: replicatedConfig.ver,\n };\n }\n\n default:\n return undefined;\n }\n}\n\n/**\n * Convert S3Queue engine config\n * Uses type guard for fully type-safe property access\n */\nfunction convertS3QueueEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!isS3QueueConfig(config)) {\n return undefined;\n }\n\n return {\n engine: \"S3Queue\",\n s3Path: config.s3Path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n headers: config.headers,\n };\n}\n\n/**\n * Convert S3 engine config\n */\nfunction convertS3EngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.S3) {\n return undefined;\n }\n\n return {\n engine: \"S3\",\n path: config.path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n partitionStrategy: config.partitionStrategy,\n partitionColumnsInDataFile: config.partitionColumnsInDataFile,\n };\n}\n\n/**\n * Convert Buffer engine config\n */\nfunction convertBufferEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.Buffer) {\n return undefined;\n }\n\n return {\n engine: \"Buffer\",\n targetDatabase: config.targetDatabase,\n targetTable: config.targetTable,\n numLayers: config.numLayers,\n minTime: config.minTime,\n maxTime: config.maxTime,\n minRows: config.minRows,\n maxRows: config.maxRows,\n minBytes: config.minBytes,\n maxBytes: config.maxBytes,\n flushTime: config.flushTime,\n flushRows: config.flushRows,\n flushBytes: config.flushBytes,\n };\n}\n\n/**\n * Convert Distributed engine config\n */\nfunction convertDistributedEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (\n !(\"engine\" in config) ||\n config.engine !== ClickHouseEngines.Distributed\n ) {\n return undefined;\n }\n\n return {\n engine: \"Distributed\",\n cluster: config.cluster,\n targetDatabase: config.targetDatabase,\n targetTable: config.targetTable,\n shardingKey: config.shardingKey,\n policyName: config.policyName,\n };\n}\n\n/**\n * Convert IcebergS3 engine config\n */\nfunction convertIcebergS3EngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.IcebergS3) {\n return undefined;\n }\n\n return {\n engine: \"IcebergS3\",\n path: config.path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n };\n}\n\n/**\n * Convert Kafka engine configuration\n */\nfunction convertKafkaEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.Kafka) {\n return undefined;\n }\n\n return {\n engine: \"Kafka\",\n brokerList: config.brokerList,\n topicList: config.topicList,\n groupName: config.groupName,\n format: config.format,\n };\n}\n\n/**\n * Convert table configuration to engine config\n */\nfunction convertTableConfigToEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n const engine = extractEngineValue(config);\n\n // Try basic engines first\n const basicConfig = convertBasicEngineConfig(engine, config);\n if (basicConfig) {\n return basicConfig;\n }\n\n // Try replicated engines\n const replicatedConfig = convertReplicatedEngineConfig(engine, config);\n if (replicatedConfig) {\n return replicatedConfig;\n }\n\n // Handle S3Queue\n if (engine === ClickHouseEngines.S3Queue) {\n return convertS3QueueEngineConfig(config);\n }\n\n // Handle S3\n if (engine === ClickHouseEngines.S3) {\n return convertS3EngineConfig(config);\n }\n\n // Handle Buffer\n if (engine === ClickHouseEngines.Buffer) {\n return convertBufferEngineConfig(config);\n }\n\n // Handle Distributed\n if (engine === ClickHouseEngines.Distributed) {\n return convertDistributedEngineConfig(config);\n }\n\n // Handle IcebergS3\n if (engine === ClickHouseEngines.IcebergS3) {\n return convertIcebergS3EngineConfig(config);\n }\n\n // Handle Kafka\n if (engine === ClickHouseEngines.Kafka) {\n return convertKafkaEngineConfig(config);\n }\n\n return undefined;\n}\n\nexport const toInfraMap = (registry: typeof moose_internal) => {\n const tables: { [key: string]: TableJson } = {};\n const topics: { [key: string]: StreamJson } = {};\n const ingestApis: { [key: string]: IngestApiJson } = {};\n const apis: { [key: string]: ApiJson } = {};\n const sqlResources: { [key: string]: SqlResourceJson } = {};\n const workflows: { [key: string]: WorkflowJson } = {};\n const webApps: { [key: string]: WebAppJson } = {};\n\n registry.tables.forEach((table) => {\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n // If the table is part of an IngestPipeline, inherit metadata if not set\n let metadata = (table as any).metadata;\n if (!metadata && table.config && (table as any).pipelineParent) {\n metadata = (table as any).pipelineParent.metadata;\n }\n // Create type-safe engine configuration\n const engineConfig: EngineConfig | undefined =\n convertTableConfigToEngineConfig(table.config);\n\n // Get table settings, applying defaults for S3Queue\n let tableSettings: { [key: string]: string } | undefined = undefined;\n\n if (table.config.settings) {\n // Convert all settings to strings, filtering out undefined values\n tableSettings = Object.entries(table.config.settings).reduce(\n (acc, [key, value]) => {\n if (value !== undefined) {\n acc[key] = String(value);\n }\n return acc;\n },\n {} as { [key: string]: string },\n );\n }\n\n // Apply default settings for S3Queue if not already specified\n if (engineConfig?.engine === \"S3Queue\") {\n if (!tableSettings) {\n tableSettings = {};\n }\n // Set default mode to 'unordered' if not specified\n if (!tableSettings.mode) {\n tableSettings.mode = \"unordered\";\n }\n }\n\n // Determine ORDER BY from config\n // Note: engines like Buffer and Distributed don't support orderBy/partitionBy/sampleBy\n const hasOrderByFields =\n \"orderByFields\" in table.config &&\n Array.isArray(table.config.orderByFields) &&\n table.config.orderByFields.length > 0;\n const hasOrderByExpression =\n \"orderByExpression\" in table.config &&\n typeof table.config.orderByExpression === \"string\" &&\n table.config.orderByExpression.length > 0;\n if (hasOrderByFields && hasOrderByExpression) {\n throw new Error(\n `Table ${table.name}: Provide either orderByFields or orderByExpression, not both.`,\n );\n }\n const orderBy: string[] | string =\n hasOrderByExpression && \"orderByExpression\" in table.config ?\n (table.config.orderByExpression ?? \"\")\n : \"orderByFields\" in table.config ? (table.config.orderByFields ?? [])\n : [];\n\n tables[id] = {\n name: table.name,\n columns: table.columnArray,\n orderBy,\n partitionBy:\n \"partitionBy\" in table.config ? table.config.partitionBy : undefined,\n sampleByExpression:\n \"sampleByExpression\" in table.config ?\n table.config.sampleByExpression\n : undefined,\n primaryKeyExpression:\n \"primaryKeyExpression\" in table.config ?\n table.config.primaryKeyExpression\n : undefined,\n engineConfig,\n version: table.config.version,\n metadata,\n lifeCycle: table.config.lifeCycle,\n // Map 'settings' to 'tableSettings' for internal use\n tableSettings:\n tableSettings && Object.keys(tableSettings).length > 0 ?\n tableSettings\n : undefined,\n indexes:\n table.config.indexes?.map((i) => ({\n ...i,\n granularity: i.granularity === undefined ? 1 : i.granularity,\n arguments: i.arguments === undefined ? [] : i.arguments,\n })) || [],\n ttl: table.config.ttl,\n database: table.config.database,\n cluster: table.config.cluster,\n };\n });\n\n registry.streams.forEach((stream) => {\n // If the stream is part of an IngestPipeline, inherit metadata if not set\n let metadata = stream.metadata;\n if (!metadata && stream.config && (stream as any).pipelineParent) {\n metadata = (stream as any).pipelineParent.metadata;\n }\n const transformationTargets: Target[] = [];\n const consumers: Consumer[] = [];\n\n stream._transformations.forEach((transforms, destinationName) => {\n transforms.forEach(([destination, _, config]) => {\n transformationTargets.push({\n kind: \"stream\",\n name: destinationName,\n version: config.version,\n metadata: config.metadata,\n sourceFile: config.sourceFile,\n });\n });\n });\n\n stream._consumers.forEach((consumer) => {\n consumers.push({\n version: consumer.config.version,\n sourceFile: consumer.config.sourceFile,\n });\n });\n\n topics[stream.name] = {\n name: stream.name,\n columns: stream.columnArray,\n targetTable: stream.config.destination?.name,\n targetTableVersion: stream.config.destination?.config.version,\n retentionPeriod: stream.config.retentionPeriod ?? defaultRetentionPeriod,\n partitionCount: stream.config.parallelism ?? 1,\n version: stream.config.version,\n transformationTargets,\n hasMultiTransform: stream._multipleTransformations === undefined,\n consumers,\n metadata,\n lifeCycle: stream.config.lifeCycle,\n schemaConfig: stream.config.schemaConfig,\n };\n });\n\n registry.ingestApis.forEach((api) => {\n // If the ingestApi is part of an IngestPipeline, inherit metadata if not set\n let metadata = api.metadata;\n if (!metadata && api.config && (api as any).pipelineParent) {\n metadata = (api as any).pipelineParent.metadata;\n }\n ingestApis[api.name] = {\n name: api.name,\n columns: api.columnArray,\n version: api.config.version,\n path: api.config.path,\n writeTo: {\n kind: \"stream\",\n name: api.config.destination.name,\n },\n deadLetterQueue: api.config.deadLetterQueue?.name,\n metadata,\n schema: api.schema,\n allowExtraFields: api.allowExtraFields,\n };\n });\n\n registry.apis.forEach((api, key) => {\n const rustKey =\n api.config.version ? `${api.name}:${api.config.version}` : api.name;\n apis[rustKey] = {\n name: api.name,\n queryParams: api.columnArray,\n responseSchema: api.responseSchema,\n version: api.config.version,\n path: api.config.path,\n metadata: api.metadata,\n };\n });\n\n registry.sqlResources.forEach((sqlResource) => {\n sqlResources[sqlResource.name] = {\n name: sqlResource.name,\n setup: sqlResource.setup,\n teardown: sqlResource.teardown,\n sourceFile: sqlResource.sourceFile,\n sourceLine: sqlResource.sourceLine,\n sourceColumn: sqlResource.sourceColumn,\n\n pullsDataFrom: sqlResource.pullsDataFrom.map((r) => {\n if (r.kind === \"OlapTable\") {\n const table = r as OlapTable<any>;\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n return {\n id,\n kind: \"Table\",\n };\n } else if (r.kind === \"SqlResource\") {\n const resource = r as SqlResource;\n return {\n id: resource.name,\n kind: \"SqlResource\",\n };\n } else {\n throw new Error(`Unknown sql resource dependency type: ${r}`);\n }\n }),\n pushesDataTo: sqlResource.pushesDataTo.map((r) => {\n if (r.kind === \"OlapTable\") {\n const table = r as OlapTable<any>;\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n return {\n id,\n kind: \"Table\",\n };\n } else if (r.kind === \"SqlResource\") {\n const resource = r as SqlResource;\n return {\n id: resource.name,\n kind: \"SqlResource\",\n };\n } else {\n throw new Error(`Unknown sql resource dependency type: ${r}`);\n }\n }),\n };\n });\n\n registry.workflows.forEach((workflow) => {\n workflows[workflow.name] = {\n name: workflow.name,\n retries: workflow.config.retries,\n timeout: workflow.config.timeout,\n schedule: workflow.config.schedule,\n };\n });\n\n registry.webApps.forEach((webApp) => {\n webApps[webApp.name] = {\n name: webApp.name,\n mountPath: webApp.config.mountPath || \"/\",\n metadata: webApp.config.metadata,\n };\n });\n\n return {\n topics,\n tables,\n ingestApis,\n apis,\n sqlResources,\n workflows,\n webApps,\n };\n};\n\n/**\n * Retrieves the global internal Moose resource registry.\n * Uses `globalThis` to ensure a single registry instance.\n *\n * @returns The internal Moose resource registry.\n */\nexport const getMooseInternal = (): typeof moose_internal =>\n (globalThis as any).moose_internal;\n\n// work around for variable visibility in compiler output\nif (getMooseInternal() === undefined) {\n (globalThis as any).moose_internal = moose_internal;\n}\n\n/**\n * Loads the user's application entry point (`app/index.ts`) to register resources,\n * then generates and prints the infrastructure map as JSON.\n *\n * This function is the main entry point used by the Moose infrastructure system\n * to discover the defined resources.\n * It prints the JSON map surrounded by specific delimiters (`___MOOSE_STUFF___start`\n * and `end___MOOSE_STUFF___`) for easy extraction by the calling process.\n */\nexport const dumpMooseInternal = async () => {\n loadIndex();\n\n console.log(\n \"___MOOSE_STUFF___start\",\n JSON.stringify(toInfraMap(getMooseInternal())),\n \"end___MOOSE_STUFF___\",\n );\n};\n\nconst loadIndex = () => {\n // Clear the registry before loading to support hot reloading\n const registry = getMooseInternal();\n registry.tables.clear();\n registry.streams.clear();\n registry.ingestApis.clear();\n registry.apis.clear();\n registry.sqlResources.clear();\n registry.workflows.clear();\n registry.webApps.clear();\n\n // Clear require cache for app directory to pick up changes\n const appDir = `${process.cwd()}/${getSourceDir()}`;\n Object.keys(require.cache).forEach((key) => {\n if (key.startsWith(appDir)) {\n delete require.cache[key];\n }\n });\n\n try {\n require(`${process.cwd()}/${getSourceDir()}/index.ts`);\n } catch (error) {\n let hint: string | undefined;\n const details = error instanceof Error ? error.message : String(error);\n if (details.includes(\"ERR_REQUIRE_ESM\") || details.includes(\"ES Module\")) {\n hint =\n \"The file or its dependencies are ESM-only. Switch to packages that dual-support CJS & ESM, or upgrade to Node 22.12+. \" +\n \"If you must use Node 20, you may try Node 20.19\\n\\n\";\n }\n\n const errorMsg = `${hint ?? \"\"}${details}`;\n const cause = error instanceof Error ? error : undefined;\n throw new Error(errorMsg, { cause });\n }\n};\n\n/**\n * Loads the user's application entry point and extracts all registered stream\n * transformation and consumer functions.\n *\n * @returns A Map where keys are unique identifiers for transformations/consumers\n * (e.g., \"sourceStream_destStream_version\", \"sourceStream_<no-target>_version\")\n * and values are tuples containing: [handler function, config, source stream columns]\n */\nexport const getStreamingFunctions = async () => {\n loadIndex();\n\n const registry = getMooseInternal();\n const transformFunctions = new Map<\n string,\n [\n (data: unknown) => unknown,\n TransformConfig<any> | ConsumerConfig<any>,\n Column[],\n ]\n >();\n\n registry.streams.forEach((stream) => {\n stream._transformations.forEach((transforms, destinationName) => {\n transforms.forEach(([_, transform, config]) => {\n const transformFunctionKey = `${stream.name}_${destinationName}${config.version ? `_${config.version}` : \"\"}`;\n compilerLog(`getStreamingFunctions: ${transformFunctionKey}`);\n transformFunctions.set(transformFunctionKey, [\n transform,\n config,\n stream.columnArray,\n ]);\n });\n });\n\n stream._consumers.forEach((consumer) => {\n const consumerFunctionKey = `${stream.name}_<no-target>${consumer.config.version ? `_${consumer.config.version}` : \"\"}`;\n transformFunctions.set(consumerFunctionKey, [\n consumer.consumer,\n consumer.config,\n stream.columnArray,\n ]);\n });\n });\n\n return transformFunctions;\n};\n\n/**\n * Loads the user's application entry point and extracts all registered\n * API handler functions.\n *\n * @returns A Map where keys are the names of the APIs and values\n * are their corresponding handler functions.\n */\nexport const getApis = async () => {\n loadIndex();\n const apiFunctions = new Map<\n string,\n (params: unknown, utils: ApiUtil) => unknown\n >();\n\n const registry = getMooseInternal();\n // Single pass: store full keys, track aliasing decisions\n const versionCountByName = new Map<string, number>();\n const nameToSoleVersionHandler = new Map<\n string,\n (params: unknown, utils: ApiUtil) => unknown\n >();\n\n registry.apis.forEach((api, key) => {\n const handler = api.getHandler();\n apiFunctions.set(key, handler);\n\n if (!api.config.version) {\n // Explicit unversioned takes precedence for alias\n if (!apiFunctions.has(api.name)) {\n apiFunctions.set(api.name, handler);\n }\n nameToSoleVersionHandler.delete(api.name);\n versionCountByName.delete(api.name);\n } else if (!apiFunctions.has(api.name)) {\n // Only track versioned for alias if no explicit unversioned present\n const count = (versionCountByName.get(api.name) ?? 0) + 1;\n versionCountByName.set(api.name, count);\n if (count === 1) {\n nameToSoleVersionHandler.set(api.name, handler);\n } else {\n nameToSoleVersionHandler.delete(api.name);\n }\n }\n });\n\n // Finalize aliases for names that have exactly one versioned API and no unversioned\n nameToSoleVersionHandler.forEach((handler, name) => {\n if (!apiFunctions.has(name)) {\n apiFunctions.set(name, handler);\n }\n });\n\n return apiFunctions;\n};\n\nexport const dlqSchema: IJsonSchemaCollection.IV3_1 = {\n version: \"3.1\",\n components: {\n schemas: {\n DeadLetterModel: {\n type: \"object\",\n properties: {\n originalRecord: {\n $ref: \"#/components/schemas/Recordstringany\",\n },\n errorMessage: {\n type: \"string\",\n },\n errorType: {\n type: \"string\",\n },\n failedAt: {\n type: \"string\",\n format: \"date-time\",\n },\n source: {\n oneOf: [\n {\n const: \"api\",\n },\n {\n const: \"transform\",\n },\n {\n const: \"table\",\n },\n ],\n },\n },\n required: [\n \"originalRecord\",\n \"errorMessage\",\n \"errorType\",\n \"failedAt\",\n \"source\",\n ],\n },\n Recordstringany: {\n type: \"object\",\n properties: {},\n required: [],\n description: \"Construct a type with a set of properties K of type T\",\n additionalProperties: {},\n },\n },\n },\n schemas: [\n {\n $ref: \"#/components/schemas/DeadLetterModel\",\n },\n ],\n};\n\nexport const dlqColumns: Column[] = [\n {\n name: \"originalRecord\",\n data_type: \"Json\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"errorMessage\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"errorType\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"failedAt\",\n data_type: \"DateTime\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"source\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n];\n\nexport const getWorkflows = async () => {\n loadIndex();\n\n const registry = getMooseInternal();\n return registry.workflows;\n};\n\nfunction findTaskInTree(\n task: Task<any, any>,\n targetName: string,\n): Task<any, any> | undefined {\n if (task.name === targetName) {\n return task;\n }\n\n if (task.config.onComplete?.length) {\n for (const childTask of task.config.onComplete) {\n const found = findTaskInTree(childTask, targetName);\n if (found) {\n return found;\n }\n }\n }\n\n return undefined;\n}\n\nexport const getTaskForWorkflow = async (\n workflowName: string,\n taskName: string,\n): Promise<Task<any, any>> => {\n const workflows = await getWorkflows();\n const workflow = workflows.get(workflowName);\n if (!workflow) {\n throw new Error(`Workflow ${workflowName} not found`);\n }\n\n const task = findTaskInTree(\n workflow.config.startingTask as Task<any, any>,\n taskName,\n );\n if (!task) {\n throw new Error(`Task ${taskName} not found in workflow ${workflowName}`);\n }\n\n return task;\n};\n\nexport const getWebApps = async () => {\n loadIndex();\n return getMooseInternal().webApps;\n};\n","import path from \"node:path\";\nimport * as toml from \"toml\";\n\n/**\n * ClickHouse configuration from moose.config.toml\n */\nexport interface ClickHouseConfig {\n host: string;\n host_port: number;\n user: string;\n password: string;\n db_name: string;\n use_ssl?: boolean;\n native_port?: number;\n}\n\n/**\n * Redpanda/Kafka configuration from moose.config.toml\n */\nexport interface KafkaConfig {\n /** Broker connection string (e.g., \"host:port\" or comma-separated list) */\n broker: string;\n /** Message timeout in milliseconds */\n message_timeout_ms: number;\n /** Default retention period in milliseconds */\n retention_ms: number;\n /** Topic replication factor */\n replication_factor?: number;\n /** SASL username for authentication, if required */\n sasl_username?: string;\n /** SASL password for authentication, if required */\n sasl_password?: string;\n /** SASL mechanism (e.g., \"PLAIN\", \"SCRAM-SHA-256\") */\n sasl_mechanism?: string;\n /** Security protocol (e.g., \"SASL_SSL\", \"PLAINTEXT\") */\n security_protocol?: string;\n /** Optional namespace used as a prefix for topics */\n namespace?: string;\n /** Optional Confluent Schema Registry URL */\n schema_registry_url?: string;\n}\n\n/**\n * Project configuration from moose.config.toml\n */\nexport interface ProjectConfig {\n language: string;\n clickhouse_config: ClickHouseConfig;\n redpanda_config?: KafkaConfig;\n /**\n * Redpanda/Kafka configuration. Previously named `redpanda_config` in some places.\n * Prefer `kafka_config` but support both for backward compatibility.\n */\n\n kafka_config?: KafkaConfig;\n}\n\n/**\n * Error thrown when configuration cannot be found or parsed\n */\nexport class ConfigError extends Error {\n constructor(message: string) {\n super(message);\n this.name = \"ConfigError\";\n }\n}\n\n/**\n * Walks up the directory tree to find moose.config.toml\n */\nasync function findConfigFile(\n startDir: string = process.cwd(),\n): Promise<string | null> {\n const fs = await import(\"node:fs\");\n\n let currentDir = path.resolve(startDir);\n\n while (true) {\n const configPath = path.join(currentDir, \"moose.config.toml\");\n if (fs.existsSync(configPath)) {\n return configPath;\n }\n\n const parentDir = path.dirname(currentDir);\n if (parentDir === currentDir) {\n // Reached root directory\n break;\n }\n currentDir = parentDir;\n }\n\n return null;\n}\n\n/**\n * Reads and parses the project configuration from moose.config.toml\n */\nexport async function readProjectConfig(): Promise<ProjectConfig> {\n const fs = await import(\"node:fs\");\n const configPath = await findConfigFile();\n if (!configPath) {\n throw new ConfigError(\n \"moose.config.toml not found in current directory or any parent directory\",\n );\n }\n\n try {\n const configContent = fs.readFileSync(configPath, \"utf-8\");\n const config = toml.parse(configContent) as ProjectConfig;\n return config;\n } catch (error) {\n throw new ConfigError(`Failed to parse moose.config.toml: ${error}`);\n }\n}\n","import { readProjectConfig } from \"./configFile\";\n\ninterface RuntimeClickHouseConfig {\n host: string;\n port: string;\n username: string;\n password: string;\n database: string;\n useSSL: boolean;\n}\n\ninterface RuntimeKafkaConfig {\n broker: string;\n messageTimeoutMs: number;\n saslUsername?: string;\n saslPassword?: string;\n saslMechanism?: string;\n securityProtocol?: string;\n namespace?: string;\n schemaRegistryUrl?: string;\n}\n\nclass ConfigurationRegistry {\n private static instance: ConfigurationRegistry;\n private clickhouseConfig?: RuntimeClickHouseConfig;\n private kafkaConfig?: RuntimeKafkaConfig;\n\n static getInstance(): ConfigurationRegistry {\n if (!ConfigurationRegistry.instance) {\n ConfigurationRegistry.instance = new ConfigurationRegistry();\n }\n return ConfigurationRegistry.instance;\n }\n\n setClickHouseConfig(config: RuntimeClickHouseConfig): void {\n this.clickhouseConfig = config;\n }\n\n setKafkaConfig(config: RuntimeKafkaConfig): void {\n this.kafkaConfig = config;\n }\n\n private _env(name: string): string | undefined {\n const value = process.env[name];\n if (value === undefined) return undefined;\n const trimmed = value.trim();\n return trimmed.length > 0 ? trimmed : undefined;\n }\n\n private _parseBool(value: string | undefined): boolean | undefined {\n if (value === undefined) return undefined;\n switch (value.trim().toLowerCase()) {\n case \"1\":\n case \"true\":\n case \"yes\":\n case \"on\":\n return true;\n case \"0\":\n case \"false\":\n case \"no\":\n case \"off\":\n return false;\n default:\n return undefined;\n }\n }\n\n async getClickHouseConfig(): Promise<RuntimeClickHouseConfig> {\n if (this.clickhouseConfig) {\n return this.clickhouseConfig;\n }\n\n // Fallback to reading from config file for backward compatibility\n const projectConfig = await readProjectConfig();\n const envHost = this._env(\"MOOSE_CLICKHOUSE_CONFIG__HOST\");\n const envPort = this._env(\"MOOSE_CLICKHOUSE_CONFIG__HOST_PORT\");\n const envUser = this._env(\"MOOSE_CLICKHOUSE_CONFIG__USER\");\n const envPassword = this._env(\"MOOSE_CLICKHOUSE_CONFIG__PASSWORD\");\n const envDb = this._env(\"MOOSE_CLICKHOUSE_CONFIG__DB_NAME\");\n const envUseSSL = this._parseBool(\n this._env(\"MOOSE_CLICKHOUSE_CONFIG__USE_SSL\"),\n );\n\n return {\n host: envHost ?? projectConfig.clickhouse_config.host,\n port: envPort ?? projectConfig.clickhouse_config.host_port.toString(),\n username: envUser ?? projectConfig.clickhouse_config.user,\n password: envPassword ?? projectConfig.clickhouse_config.password,\n database: envDb ?? projectConfig.clickhouse_config.db_name,\n useSSL:\n envUseSSL !== undefined ? envUseSSL : (\n projectConfig.clickhouse_config.use_ssl || false\n ),\n };\n }\n\n async getStandaloneClickhouseConfig(\n overrides?: Partial<RuntimeClickHouseConfig>,\n ): Promise<RuntimeClickHouseConfig> {\n if (this.clickhouseConfig) {\n return { ...this.clickhouseConfig, ...overrides };\n }\n\n const envHost = this._env(\"MOOSE_CLICKHOUSE_CONFIG__HOST\");\n const envPort = this._env(\"MOOSE_CLICKHOUSE_CONFIG__HOST_PORT\");\n const envUser = this._env(\"MOOSE_CLICKHOUSE_CONFIG__USER\");\n const envPassword = this._env(\"MOOSE_CLICKHOUSE_CONFIG__PASSWORD\");\n const envDb = this._env(\"MOOSE_CLICKHOUSE_CONFIG__DB_NAME\");\n const envUseSSL = this._parseBool(\n this._env(\"MOOSE_CLICKHOUSE_CONFIG__USE_SSL\"),\n );\n\n let projectConfig;\n try {\n projectConfig = await readProjectConfig();\n } catch (error) {\n projectConfig = null;\n }\n\n const defaults = {\n host: \"localhost\",\n port: \"18123\",\n username: \"default\",\n password: \"\",\n database: \"local\",\n useSSL: false,\n };\n\n return {\n host:\n overrides?.host ??\n envHost ??\n projectConfig?.clickhouse_config.host ??\n defaults.host,\n port:\n overrides?.port ??\n envPort ??\n projectConfig?.clickhouse_config.host_port.toString() ??\n defaults.port,\n username:\n overrides?.username ??\n envUser ??\n projectConfig?.clickhouse_config.user ??\n defaults.username,\n password:\n overrides?.password ??\n envPassword ??\n projectConfig?.clickhouse_config.password ??\n defaults.password,\n database:\n overrides?.database ??\n envDb ??\n projectConfig?.clickhouse_config.db_name ??\n defaults.database,\n useSSL:\n overrides?.useSSL ??\n envUseSSL ??\n projectConfig?.clickhouse_config.use_ssl ??\n defaults.useSSL,\n };\n }\n\n async getKafkaConfig(): Promise<RuntimeKafkaConfig> {\n if (this.kafkaConfig) {\n return this.kafkaConfig;\n }\n\n const projectConfig = await readProjectConfig();\n\n const envBroker =\n this._env(\"MOOSE_REDPANDA_CONFIG__BROKER\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__BROKER\");\n const envMsgTimeout =\n this._env(\"MOOSE_REDPANDA_CONFIG__MESSAGE_TIMEOUT_MS\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__MESSAGE_TIMEOUT_MS\");\n const envSaslUsername =\n this._env(\"MOOSE_REDPANDA_CONFIG__SASL_USERNAME\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SASL_USERNAME\");\n const envSaslPassword =\n this._env(\"MOOSE_REDPANDA_CONFIG__SASL_PASSWORD\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SASL_PASSWORD\");\n const envSaslMechanism =\n this._env(\"MOOSE_REDPANDA_CONFIG__SASL_MECHANISM\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SASL_MECHANISM\");\n const envSecurityProtocol =\n this._env(\"MOOSE_REDPANDA_CONFIG__SECURITY_PROTOCOL\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SECURITY_PROTOCOL\");\n const envNamespace =\n this._env(\"MOOSE_REDPANDA_CONFIG__NAMESPACE\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__NAMESPACE\");\n const envSchemaRegistryUrl =\n this._env(\"MOOSE_REDPANDA_CONFIG__SCHEMA_REGISTRY_URL\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SCHEMA_REGISTRY_URL\");\n\n const fileKafka =\n projectConfig.kafka_config ?? projectConfig.redpanda_config;\n\n return {\n broker: envBroker ?? fileKafka?.broker ?? \"localhost:19092\",\n messageTimeoutMs:\n envMsgTimeout ?\n parseInt(envMsgTimeout, 10)\n : (fileKafka?.message_timeout_ms ?? 1000),\n saslUsername: envSaslUsername ?? fileKafka?.sasl_username,\n saslPassword: envSaslPassword ?? fileKafka?.sasl_password,\n saslMechanism: envSaslMechanism ?? fileKafka?.sasl_mechanism,\n securityProtocol: envSecurityProtocol ?? fileKafka?.security_protocol,\n namespace: envNamespace ?? fileKafka?.namespace,\n schemaRegistryUrl: envSchemaRegistryUrl ?? fileKafka?.schema_registry_url,\n };\n }\n\n hasRuntimeConfig(): boolean {\n return !!this.clickhouseConfig || !!this.kafkaConfig;\n }\n}\n\n(globalThis as any)._mooseConfigRegistry = ConfigurationRegistry.getInstance();\nexport type {\n ConfigurationRegistry,\n RuntimeClickHouseConfig,\n RuntimeKafkaConfig,\n};\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase, TypiaValidators } from \"../typedBase\";\nimport {\n Column,\n isArrayNestedType,\n isNestedType,\n} from \"../../dataModels/dataModelTypes\";\nimport { ClickHouseEngines } from \"../../blocks/helpers\";\nimport { getMooseInternal, isClientOnlyMode } from \"../internal\";\nimport { Readable } from \"node:stream\";\nimport { createHash } from \"node:crypto\";\nimport type {\n ConfigurationRegistry,\n RuntimeClickHouseConfig,\n} from \"../../config/runtime\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport { IdentifierBrandedString, quoteIdentifier } from \"../../sqlHelpers\";\nimport type { NodeClickHouseClient } from \"@clickhouse/client/dist/client\";\n\nexport interface TableIndex {\n name: string;\n expression: string;\n type: string;\n arguments?: string[];\n granularity?: number;\n}\n\n/**\n * Represents a failed record during insertion with error details\n */\nexport interface FailedRecord<T> {\n /** The original record that failed to insert */\n record: T;\n /** The error message describing why the insertion failed */\n error: string;\n /** Optional: The index of this record in the original batch */\n index?: number;\n}\n\n/**\n * Result of an insert operation with detailed success/failure information\n */\nexport interface InsertResult<T> {\n /** Number of records successfully inserted */\n successful: number;\n /** Number of records that failed to insert */\n failed: number;\n /** Total number of records processed */\n total: number;\n /** Detailed information about failed records (if record isolation was used) */\n failedRecords?: FailedRecord<T>[];\n}\n\n/**\n * Error handling strategy for insert operations\n */\nexport type ErrorStrategy =\n | \"fail-fast\" // Fail immediately on any error (default)\n | \"discard\" // Discard bad records and continue with good ones\n | \"isolate\"; // Retry individual records to isolate failures\n\n/**\n * Options for insert operations\n */\nexport interface InsertOptions {\n /** Maximum number of bad records to tolerate before failing */\n allowErrors?: number;\n /** Maximum ratio of bad records to tolerate (0.0 to 1.0) before failing */\n allowErrorsRatio?: number;\n /** Error handling strategy */\n strategy?: ErrorStrategy;\n /** Whether to enable dead letter queue for failed records (future feature) */\n deadLetterQueue?: boolean;\n /** Whether to validate data against schema before insertion (default: true) */\n validate?: boolean;\n /** Whether to skip validation for individual records during 'isolate' strategy retries (default: false) */\n skipValidationOnRetry?: boolean;\n}\n\n/**\n * Validation result for a record with detailed error information\n */\nexport interface ValidationError {\n /** The original record that failed validation */\n record: any;\n /** Detailed validation error message */\n error: string;\n /** Optional: The index of this record in the original batch */\n index?: number;\n /** The path to the field that failed validation */\n path?: string;\n}\n\n/**\n * Result of data validation with success/failure breakdown\n */\nexport interface ValidationResult<T> {\n /** Records that passed validation */\n valid: T[];\n /** Records that failed validation with detailed error information */\n invalid: ValidationError[];\n /** Total number of records processed */\n total: number;\n}\n\n/**\n * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING\n * Note: Since ClickHouse 24.7, settings no longer require the 's3queue_' prefix\n */\nexport interface S3QueueTableSettings {\n /** Processing mode: \"ordered\" for sequential or \"unordered\" for parallel processing */\n mode?: \"ordered\" | \"unordered\";\n /** What to do with files after processing: 'keep' or 'delete' */\n after_processing?: \"keep\" | \"delete\";\n /** ZooKeeper/Keeper path for coordination between replicas */\n keeper_path?: string;\n /** Number of retry attempts for failed files */\n loading_retries?: string;\n /** Number of threads for parallel processing */\n processing_threads_num?: string;\n /** Enable parallel inserts */\n parallel_inserts?: string;\n /** Enable logging to system.s3queue_log table */\n enable_logging_to_queue_log?: string;\n /** Last processed file path (for ordered mode) */\n last_processed_path?: string;\n /** Maximum number of tracked files in ZooKeeper */\n tracked_files_limit?: string;\n /** TTL for tracked files in seconds */\n tracked_file_ttl_sec?: string;\n /** Minimum polling timeout in milliseconds */\n polling_min_timeout_ms?: string;\n /** Maximum polling timeout in milliseconds */\n polling_max_timeout_ms?: string;\n /** Polling backoff in milliseconds */\n polling_backoff_ms?: string;\n /** Minimum cleanup interval in milliseconds */\n cleanup_interval_min_ms?: string;\n /** Maximum cleanup interval in milliseconds */\n cleanup_interval_max_ms?: string;\n /** Number of buckets for sharding (0 = disabled) */\n buckets?: string;\n /** Batch size for listing objects */\n list_objects_batch_size?: string;\n /** Enable hash ring filtering for distributed processing */\n enable_hash_ring_filtering?: string;\n /** Maximum files to process before committing */\n max_processed_files_before_commit?: string;\n /** Maximum rows to process before committing */\n max_processed_rows_before_commit?: string;\n /** Maximum bytes to process before committing */\n max_processed_bytes_before_commit?: string;\n /** Maximum processing time in seconds before committing */\n max_processing_time_sec_before_commit?: string;\n /** Use persistent processing nodes (available from 25.8) */\n use_persistent_processing_nodes?: string;\n /** TTL for persistent processing nodes in seconds */\n persistent_processing_nodes_ttl_seconds?: string;\n /** Additional settings */\n [key: string]: string | undefined;\n}\n\n/**\n * Base configuration shared by all table engines\n * @template T The data type of the records stored in the table.\n */\n\nexport type BaseOlapConfig<T> = (\n | {\n /**\n * Specifies the fields to use for ordering data within the ClickHouse table.\n * This is crucial for optimizing query performance.\n */\n orderByFields: (keyof T & string)[];\n orderByExpression?: undefined;\n }\n | {\n orderByFields?: undefined;\n /**\n * An arbitrary ClickHouse SQL expression for the order by clause.\n *\n * `orderByExpression: \"(id, name)\"` is equivalent to `orderByFields: [\"id\", \"name\"]`\n * `orderByExpression: \"tuple()\"` means no sorting\n */\n orderByExpression: string;\n }\n // specify either or leave both unspecified\n | { orderByFields?: undefined; orderByExpression?: undefined }\n) & {\n partitionBy?: string;\n /**\n * SAMPLE BY expression for approximate query processing.\n *\n * Examples:\n * ```typescript\n * // Single unsigned integer field\n * sampleByExpression: \"userId\"\n *\n * // Hash function on any field type\n * sampleByExpression: \"cityHash64(id)\"\n *\n * // Multiple fields with hash\n * sampleByExpression: \"cityHash64(userId, timestamp)\"\n * ```\n *\n * Requirements:\n * - Expression must evaluate to an unsigned integer (UInt8/16/32/64)\n * - Expression must be present in the ORDER BY clause\n * - If using hash functions, the same expression must appear in orderByExpression\n */\n sampleByExpression?: string;\n /**\n * Optional PRIMARY KEY expression.\n * When specified, this overrides the primary key inferred from Key<T> column annotations.\n *\n * This allows for:\n * - Complex primary keys using functions (e.g., \"cityHash64(id)\")\n * - Different column ordering in primary key vs schema definition\n * - Primary keys that differ from ORDER BY\n *\n * Example: primaryKeyExpression: \"(userId, cityHash64(eventId))\"\n *\n * Note: When this is set, any Key<T> annotations on columns are ignored for PRIMARY KEY generation.\n */\n primaryKeyExpression?: string;\n version?: string;\n lifeCycle?: LifeCycle;\n settings?: { [key: string]: string };\n /**\n * Optional TTL configuration for the table.\n * e.g., \"TTL timestamp + INTERVAL 90 DAY DELETE\"\n *\n * Use the {@link ClickHouseTTL} type to configure column level TTL\n */\n ttl?: string;\n /** Optional secondary/data-skipping indexes */\n indexes?: TableIndex[];\n /**\n * Optional database name for multi-database support.\n * When not specified, uses the global ClickHouse config database.\n */\n database?: string;\n /**\n * Optional cluster name for ON CLUSTER support.\n * Use this to enable replicated tables across ClickHouse clusters.\n * The cluster must be defined in config.toml (dev environment only).\n * Example: cluster: \"prod_cluster\"\n */\n cluster?: string;\n};\n\n/**\n * Configuration for MergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type MergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.MergeTree;\n};\n\n/**\n * Configuration for ReplacingMergeTree engine (deduplication)\n * @template T The data type of the records stored in the table.\n */\nexport type ReplacingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.ReplacingMergeTree;\n ver?: keyof T & string; // Optional version column\n isDeleted?: keyof T & string; // Optional is_deleted column\n};\n\n/**\n * Configuration for AggregatingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type AggregatingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.AggregatingMergeTree;\n};\n\n/**\n * Configuration for SummingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type SummingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.SummingMergeTree;\n columns?: string[];\n};\n\n/**\n * Configuration for CollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type CollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.CollapsingMergeTree;\n sign: keyof T & string; // Sign column (1 = state, -1 = cancel)\n};\n\n/**\n * Configuration for VersionedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type VersionedCollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.VersionedCollapsingMergeTree;\n sign: keyof T & string; // Sign column (1 = state, -1 = cancel)\n ver: keyof T & string; // Version column for ordering state changes\n};\n\ninterface ReplicatedEngineProperties {\n keeperPath?: string;\n replicaName?: string;\n}\n\n/**\n * Configuration for ReplicatedMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedMergeTreeConfig<T> = Omit<MergeTreeConfig<T>, \"engine\"> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedMergeTree;\n };\n\n/**\n * Configuration for ReplicatedReplacingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedReplacingMergeTreeConfig<T> = Omit<\n ReplacingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedReplacingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedAggregatingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedAggregatingMergeTreeConfig<T> = Omit<\n AggregatingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedAggregatingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedSummingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedSummingMergeTreeConfig<T> = Omit<\n SummingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedSummingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedCollapsingMergeTreeConfig<T> = Omit<\n CollapsingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedCollapsingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedVersionedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedVersionedCollapsingMergeTreeConfig<T> = Omit<\n VersionedCollapsingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree;\n };\n\n/**\n * Configuration for S3Queue engine - only non-alterable constructor parameters.\n * S3Queue-specific settings like 'mode', 'keeper_path', etc. should be specified\n * in the settings field, not here.\n * @template T The data type of the records stored in the table.\n */\nexport type S3QueueConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"settings\" | \"orderByFields\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.S3Queue;\n /** S3 bucket path with wildcards (e.g., 's3://bucket/data/*.json') */\n s3Path: string;\n /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */\n format: string;\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key */\n awsSecretAccessKey?: string;\n /** Compression type (e.g., 'gzip', 'zstd') */\n compression?: string;\n /** Custom HTTP headers */\n headers?: { [key: string]: string };\n /**\n * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING.\n * These settings control the behavior of the S3Queue engine.\n */\n settings?: S3QueueTableSettings;\n};\n\n/**\n * Configuration for S3 engine\n * Note: S3 engine supports ORDER BY clause, unlike S3Queue, Buffer, and Distributed engines\n * @template T The data type of the records stored in the table.\n */\nexport type S3Config<T> = Omit<BaseOlapConfig<T>, \"sampleByExpression\"> & {\n engine: ClickHouseEngines.S3;\n /** S3 path (e.g., 's3://bucket/path/file.json') */\n path: string;\n /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */\n format: string;\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key */\n awsSecretAccessKey?: string;\n /** Compression type (e.g., 'gzip', 'zstd', 'auto') */\n compression?: string;\n /** Partition strategy (optional) */\n partitionStrategy?: string;\n /** Partition columns in data file (optional) */\n partitionColumnsInDataFile?: string;\n};\n\n/**\n * Configuration for Buffer engine\n * @template T The data type of the records stored in the table.\n */\nexport type BufferConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Buffer;\n /** Target database name for the destination table */\n targetDatabase: string;\n /** Target table name where data will be flushed */\n targetTable: string;\n /** Number of buffer layers (typically 16) */\n numLayers: number;\n /** Minimum time in seconds before flushing */\n minTime: number;\n /** Maximum time in seconds before flushing */\n maxTime: number;\n /** Minimum number of rows before flushing */\n minRows: number;\n /** Maximum number of rows before flushing */\n maxRows: number;\n /** Minimum bytes before flushing */\n minBytes: number;\n /** Maximum bytes before flushing */\n maxBytes: number;\n /** Optional: Flush time in seconds */\n flushTime?: number;\n /** Optional: Flush number of rows */\n flushRows?: number;\n /** Optional: Flush number of bytes */\n flushBytes?: number;\n};\n\n/**\n * Configuration for Distributed engine\n * @template T The data type of the records stored in the table.\n */\nexport type DistributedConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Distributed;\n /** Cluster name from the ClickHouse configuration */\n cluster: string;\n /** Database name on the cluster */\n targetDatabase: string;\n /** Table name on the cluster */\n targetTable: string;\n /** Optional: Sharding key expression for data distribution */\n shardingKey?: string;\n /** Optional: Policy name for data distribution */\n policyName?: string;\n};\n\n/** Kafka table settings. See: https://clickhouse.com/docs/engines/table-engines/integrations/kafka */\nexport interface KafkaTableSettings {\n kafka_security_protocol?: \"PLAINTEXT\" | \"SSL\" | \"SASL_PLAINTEXT\" | \"SASL_SSL\";\n kafka_sasl_mechanism?:\n | \"GSSAPI\"\n | \"PLAIN\"\n | \"SCRAM-SHA-256\"\n | \"SCRAM-SHA-512\"\n | \"OAUTHBEARER\";\n kafka_sasl_username?: string;\n kafka_sasl_password?: string;\n kafka_schema?: string;\n kafka_num_consumers?: string;\n kafka_max_block_size?: string;\n kafka_skip_broken_messages?: string;\n kafka_commit_every_batch?: string;\n kafka_client_id?: string;\n kafka_poll_timeout_ms?: string;\n kafka_poll_max_batch_size?: string;\n kafka_flush_interval_ms?: string;\n kafka_consumer_reschedule_ms?: string;\n kafka_thread_per_consumer?: string;\n kafka_handle_error_mode?: \"default\" | \"stream\";\n kafka_commit_on_select?: string;\n kafka_max_rows_per_message?: string;\n kafka_compression_codec?: string;\n kafka_compression_level?: string;\n}\n\n/** Kafka engine for streaming data from Kafka topics. Additional settings go in `settings`. */\nexport type KafkaConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Kafka;\n brokerList: string;\n topicList: string;\n groupName: string;\n format: string;\n settings?: KafkaTableSettings;\n};\n\n/**\n * Configuration for IcebergS3 engine - read-only Iceberg table access\n *\n * Provides direct querying of Apache Iceberg tables stored on S3.\n * Data is not copied; queries stream directly from Parquet/ORC files.\n *\n * @template T The data type of the records stored in the table.\n *\n * @example\n * ```typescript\n * const lakeEvents = new OlapTable<Event>(\"lake_events\", {\n * engine: ClickHouseEngines.IcebergS3,\n * path: \"s3://datalake/events/\",\n * format: \"Parquet\",\n * awsAccessKeyId: mooseRuntimeEnv.get(\"AWS_ACCESS_KEY_ID\"),\n * awsSecretAccessKey: mooseRuntimeEnv.get(\"AWS_SECRET_ACCESS_KEY\")\n * });\n * ```\n *\n * @remarks\n * - IcebergS3 engine is read-only\n * - Does not support ORDER BY, PARTITION BY, or SAMPLE BY clauses\n * - Queries always see the latest Iceberg snapshot (with metadata cache)\n */\nexport type IcebergS3Config<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.IcebergS3;\n /** S3 path to Iceberg table root (e.g., 's3://bucket/warehouse/events/') */\n path: string;\n /** Data format - 'Parquet' or 'ORC' */\n format: \"Parquet\" | \"ORC\";\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key (optional) */\n awsSecretAccessKey?: string;\n /** Compression type (optional: 'gzip', 'zstd', 'auto') */\n compression?: string;\n};\n\n/**\n * Legacy configuration (backward compatibility) - defaults to MergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type LegacyOlapConfig<T> = BaseOlapConfig<T>;\n\ntype EngineConfig<T> =\n | MergeTreeConfig<T>\n | ReplacingMergeTreeConfig<T>\n | AggregatingMergeTreeConfig<T>\n | SummingMergeTreeConfig<T>\n | CollapsingMergeTreeConfig<T>\n | VersionedCollapsingMergeTreeConfig<T>\n | ReplicatedMergeTreeConfig<T>\n | ReplicatedReplacingMergeTreeConfig<T>\n | ReplicatedAggregatingMergeTreeConfig<T>\n | ReplicatedSummingMergeTreeConfig<T>\n | ReplicatedCollapsingMergeTreeConfig<T>\n | ReplicatedVersionedCollapsingMergeTreeConfig<T>\n | S3QueueConfig<T>\n | S3Config<T>\n | BufferConfig<T>\n | DistributedConfig<T>\n | IcebergS3Config<T>\n | KafkaConfig<T>;\n\n/**\n * Union of all engine-specific configurations (new API)\n * @template T The data type of the records stored in the table.\n */\nexport type OlapConfig<T> = EngineConfig<T> | LegacyOlapConfig<T>;\n\n/**\n * Represents an OLAP (Online Analytical Processing) table, typically corresponding to a ClickHouse table.\n * Provides a typed interface for interacting with the table.\n *\n * @template T The data type of the records stored in the table. The structure of T defines the table schema.\n */\nexport class OlapTable<T> extends TypedBase<T, OlapConfig<T>> {\n name: IdentifierBrandedString;\n\n /** @internal */\n public readonly kind = \"OlapTable\";\n\n /** @internal Memoized ClickHouse client for reusing connections across insert calls */\n private _memoizedClient?: any;\n /** @internal Hash of the configuration used to create the memoized client */\n private _configHash?: string;\n /** @internal Cached table name to avoid repeated generation */\n private _cachedTableName?: string;\n\n /**\n * Creates a new OlapTable instance.\n * @param name The name of the table. This name is used for the underlying ClickHouse table.\n * @param config Optional configuration for the OLAP table.\n */\n constructor(name: string, config?: OlapConfig<T>);\n\n /** @internal **/\n constructor(\n name: string,\n config: OlapConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators?: TypiaValidators<T>,\n );\n\n constructor(\n name: string,\n config?: OlapConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: TypiaValidators<T>,\n ) {\n // Handle legacy configuration by defaulting to MergeTree when no engine is specified\n const resolvedConfig =\n config ?\n \"engine\" in config ?\n config\n : { ...config, engine: ClickHouseEngines.MergeTree }\n : { engine: ClickHouseEngines.MergeTree };\n\n // Enforce mutual exclusivity at runtime as well\n const hasFields =\n Array.isArray((resolvedConfig as any).orderByFields) &&\n (resolvedConfig as any).orderByFields.length > 0;\n const hasExpr =\n typeof (resolvedConfig as any).orderByExpression === \"string\" &&\n (resolvedConfig as any).orderByExpression.length > 0;\n if (hasFields && hasExpr) {\n throw new Error(\n `OlapTable ${name}: Provide either orderByFields or orderByExpression, not both.`,\n );\n }\n\n // Validate cluster and explicit replication params are not both specified\n const hasCluster = typeof (resolvedConfig as any).cluster === \"string\";\n const hasKeeperPath =\n typeof (resolvedConfig as any).keeperPath === \"string\";\n const hasReplicaName =\n typeof (resolvedConfig as any).replicaName === \"string\";\n\n if (hasCluster && (hasKeeperPath || hasReplicaName)) {\n throw new Error(\n `OlapTable ${name}: Cannot specify both 'cluster' and explicit replication params ('keeperPath' or 'replicaName'). ` +\n `Use 'cluster' for auto-injected params, or use explicit 'keeperPath' and 'replicaName' without 'cluster'.`,\n );\n }\n\n super(name, resolvedConfig, schema, columns, validators);\n this.name = name;\n\n const tables = getMooseInternal().tables;\n const registryKey =\n this.config.version ? `${name}_${this.config.version}` : name;\n // In client-only mode (MOOSE_CLIENT_ONLY=true), allow duplicate registrations\n // to support Next.js HMR which re-executes modules without clearing the registry\n if (!isClientOnlyMode() && tables.has(registryKey)) {\n throw new Error(\n `OlapTable with name ${name} and version ${config?.version ?? \"unversioned\"} already exists`,\n );\n }\n tables.set(registryKey, this);\n }\n\n /**\n * Generates the versioned table name following Moose's naming convention\n * Format: {tableName}_{version_with_dots_replaced_by_underscores}\n */\n private generateTableName(): string {\n // Cache the table name since version rarely changes\n if (this._cachedTableName) {\n return this._cachedTableName;\n }\n\n const tableVersion = this.config.version;\n if (!tableVersion) {\n this._cachedTableName = this.name;\n } else {\n const versionSuffix = tableVersion.replace(/\\./g, \"_\");\n this._cachedTableName = `${this.name}_${versionSuffix}`;\n }\n\n return this._cachedTableName;\n }\n\n /**\n * Creates a fast hash of the ClickHouse configuration.\n * Uses crypto.createHash for better performance than JSON.stringify.\n *\n * @private\n */\n private createConfigHash(clickhouseConfig: any): string {\n // Use per-table database if specified, otherwise fall back to global config\n const effectiveDatabase = this.config.database ?? clickhouseConfig.database;\n const configString = `${clickhouseConfig.host}:${clickhouseConfig.port}:${clickhouseConfig.username}:${clickhouseConfig.password}:${effectiveDatabase}:${clickhouseConfig.useSSL}`;\n return createHash(\"sha256\")\n .update(configString)\n .digest(\"hex\")\n .substring(0, 16);\n }\n\n /**\n * Gets or creates a memoized ClickHouse client.\n * The client is cached and reused across multiple insert calls for better performance.\n * If the configuration changes, a new client will be created.\n *\n * @private\n */\n private async getMemoizedClient(): Promise<{\n client: NodeClickHouseClient;\n config: RuntimeClickHouseConfig;\n }> {\n await import(\"../../config/runtime\");\n const configRegistry = (globalThis as any)\n ._mooseConfigRegistry as ConfigurationRegistry;\n const { getClickhouseClient } = await import(\"../../commons\");\n\n const clickhouseConfig = await configRegistry.getClickHouseConfig();\n const currentConfigHash = this.createConfigHash(clickhouseConfig);\n\n // If we have a cached client and the config hasn't changed, reuse it\n if (this._memoizedClient && this._configHash === currentConfigHash) {\n return { client: this._memoizedClient, config: clickhouseConfig };\n }\n\n // Close existing client if config changed\n if (this._memoizedClient && this._configHash !== currentConfigHash) {\n try {\n await this._memoizedClient.close();\n } catch (error) {\n // Ignore errors when closing old client\n }\n }\n\n // Create new client with standard configuration\n // Use per-table database if specified, otherwise fall back to global config\n const effectiveDatabase = this.config.database ?? clickhouseConfig.database;\n const client = getClickhouseClient({\n username: clickhouseConfig.username,\n password: clickhouseConfig.password,\n database: effectiveDatabase,\n useSSL: clickhouseConfig.useSSL ? \"true\" : \"false\",\n host: clickhouseConfig.host,\n port: clickhouseConfig.port,\n });\n\n // Cache the new client and config hash\n this._memoizedClient = client;\n this._configHash = currentConfigHash;\n\n return { client, config: clickhouseConfig };\n }\n\n /**\n * Closes the memoized ClickHouse client if it exists.\n * This is useful for cleaning up connections when the table instance is no longer needed.\n * The client will be automatically recreated on the next insert call if needed.\n */\n async closeClient(): Promise<void> {\n if (this._memoizedClient) {\n try {\n await this._memoizedClient.close();\n } catch (error) {\n // Ignore errors when closing\n } finally {\n this._memoizedClient = undefined;\n this._configHash = undefined;\n }\n }\n }\n\n /**\n * Validates a single record using typia's comprehensive type checking.\n * This provides the most accurate validation as it uses the exact TypeScript type information.\n *\n * @param record The record to validate\n * @returns Validation result with detailed error information\n */\n validateRecord(record: unknown): {\n success: boolean;\n data?: T;\n errors?: string[];\n } {\n // Use injected typia validator if available\n if (this.validators?.validate) {\n try {\n const result = this.validators.validate(record);\n return {\n success: result.success,\n data: result.data,\n errors: result.errors?.map((err) =>\n typeof err === \"string\" ? err : JSON.stringify(err),\n ),\n };\n } catch (error) {\n return {\n success: false,\n errors: [error instanceof Error ? error.message : String(error)],\n };\n }\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Type guard function using typia's is() function.\n * Provides compile-time type narrowing for TypeScript.\n *\n * @param record The record to check\n * @returns True if record matches type T, with type narrowing\n */\n isValidRecord(record: unknown): record is T {\n if (this.validators?.is) {\n return this.validators.is(record);\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Assert that a record matches type T, throwing detailed errors if not.\n * Uses typia's assert() function for the most detailed error reporting.\n *\n * @param record The record to assert\n * @returns The validated and typed record\n * @throws Detailed validation error if record doesn't match type T\n */\n assertValidRecord(record: unknown): T {\n if (this.validators?.assert) {\n return this.validators.assert(record);\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Validates an array of records with comprehensive error reporting.\n * Uses the most appropriate validation method available (typia or basic).\n *\n * @param data Array of records to validate\n * @returns Detailed validation results\n */\n async validateRecords(data: unknown[]): Promise<ValidationResult<T>> {\n const valid: T[] = [];\n const invalid: ValidationError[] = [];\n\n // Pre-allocate arrays with estimated sizes to reduce reallocations\n valid.length = 0;\n invalid.length = 0;\n\n // Use for loop instead of forEach for better performance\n const dataLength = data.length;\n for (let i = 0; i < dataLength; i++) {\n const record = data[i];\n\n try {\n // Fast path: use typia's is() function first for type checking\n if (this.isValidRecord(record)) {\n valid.push(this.mapToClickhouseRecord(record));\n } else {\n // Only use expensive validateRecord for detailed errors when needed\n const result = this.validateRecord(record);\n if (result.success) {\n valid.push(this.mapToClickhouseRecord(record));\n } else {\n invalid.push({\n record,\n error: result.errors?.join(\", \") || \"Validation failed\",\n index: i,\n path: \"root\",\n });\n }\n }\n } catch (error) {\n invalid.push({\n record,\n error: error instanceof Error ? error.message : String(error),\n index: i,\n path: \"root\",\n });\n }\n }\n\n return {\n valid,\n invalid,\n total: dataLength,\n };\n }\n\n /**\n * Optimized batch retry that minimizes individual insert operations.\n * Groups records into smaller batches to reduce round trips while still isolating failures.\n *\n * @private\n */\n private async retryIndividualRecords(\n client: any,\n tableName: string,\n records: T[],\n ): Promise<{ successful: T[]; failed: FailedRecord<T>[] }> {\n const successful: T[] = [];\n const failed: FailedRecord<T>[] = [];\n\n // Instead of individual inserts, try smaller batches first (batches of 10)\n const RETRY_BATCH_SIZE = 10;\n const totalRecords = records.length;\n\n for (let i = 0; i < totalRecords; i += RETRY_BATCH_SIZE) {\n const batchEnd = Math.min(i + RETRY_BATCH_SIZE, totalRecords);\n const batch = records.slice(i, batchEnd);\n\n try {\n await client.insert({\n table: quoteIdentifier(tableName),\n values: batch,\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n // Add performance settings for retries\n max_insert_block_size: RETRY_BATCH_SIZE,\n max_block_size: RETRY_BATCH_SIZE,\n },\n });\n successful.push(...batch);\n } catch (batchError) {\n // If small batch fails, fall back to individual records\n for (let j = 0; j < batch.length; j++) {\n const record = batch[j];\n try {\n await client.insert({\n table: quoteIdentifier(tableName),\n values: [record],\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n },\n });\n successful.push(record);\n } catch (error) {\n failed.push({\n record,\n error: error instanceof Error ? error.message : String(error),\n index: i + j,\n });\n }\n }\n }\n }\n\n return { successful, failed };\n }\n\n /**\n * Validates input parameters and strategy compatibility\n * @private\n */\n private validateInsertParameters(\n data: T[] | Readable,\n options?: InsertOptions,\n ): { isStream: boolean; strategy: string; shouldValidate: boolean } {\n const isStream = data instanceof Readable;\n const strategy = options?.strategy || \"fail-fast\";\n const shouldValidate = options?.validate !== false;\n\n // Validate strategy compatibility with streams\n if (isStream && strategy === \"isolate\") {\n throw new Error(\n \"The 'isolate' error strategy is not supported with stream input. Use 'fail-fast' or 'discard' instead.\",\n );\n }\n\n // Validate that validation is not attempted on streams\n if (isStream && shouldValidate) {\n console.warn(\n \"Validation is not supported with stream input. Validation will be skipped.\",\n );\n }\n\n return { isStream, strategy, shouldValidate };\n }\n\n /**\n * Handles early return cases for empty data\n * @private\n */\n private handleEmptyData(\n data: T[] | Readable,\n isStream: boolean,\n ): InsertResult<T> | null {\n if (isStream && !data) {\n return {\n successful: 0,\n failed: 0,\n total: 0,\n };\n }\n\n if (!isStream && (!data || (data as T[]).length === 0)) {\n return {\n successful: 0,\n failed: 0,\n total: 0,\n };\n }\n\n return null;\n }\n\n /**\n * Performs pre-insertion validation for array data\n * @private\n */\n private async performPreInsertionValidation(\n data: T[],\n shouldValidate: boolean,\n strategy: string,\n options?: InsertOptions,\n ): Promise<{ validatedData: T[]; validationErrors: ValidationError[] }> {\n if (!shouldValidate) {\n return { validatedData: data, validationErrors: [] };\n }\n\n try {\n const validationResult = await this.validateRecords(data as unknown[]);\n const validatedData = validationResult.valid;\n const validationErrors = validationResult.invalid;\n\n if (validationErrors.length > 0) {\n this.handleValidationErrors(validationErrors, strategy, data, options);\n\n // Return appropriate data based on strategy\n switch (strategy) {\n case \"discard\":\n return { validatedData, validationErrors };\n case \"isolate\":\n return { validatedData: data, validationErrors };\n default:\n return { validatedData, validationErrors };\n }\n }\n\n return { validatedData, validationErrors };\n } catch (validationError) {\n if (strategy === \"fail-fast\") {\n throw validationError;\n }\n console.warn(\"Validation error:\", validationError);\n return { validatedData: data, validationErrors: [] };\n }\n }\n\n /**\n * Handles validation errors based on the specified strategy\n * @private\n */\n private handleValidationErrors(\n validationErrors: ValidationError[],\n strategy: string,\n data: T[],\n options?: InsertOptions,\n ): void {\n switch (strategy) {\n case \"fail-fast\":\n const firstError = validationErrors[0];\n throw new Error(\n `Validation failed for record at index ${firstError.index}: ${firstError.error}`,\n );\n\n case \"discard\":\n this.checkValidationThresholds(validationErrors, data.length, options);\n break;\n\n case \"isolate\":\n // For isolate strategy, validation errors will be handled in the final result\n break;\n }\n }\n\n /**\n * Checks if validation errors exceed configured thresholds\n * @private\n */\n private checkValidationThresholds(\n validationErrors: ValidationError[],\n totalRecords: number,\n options?: InsertOptions,\n ): void {\n const validationFailedCount = validationErrors.length;\n const validationFailedRatio = validationFailedCount / totalRecords;\n\n if (\n options?.allowErrors !== undefined &&\n validationFailedCount > options.allowErrors\n ) {\n throw new Error(\n `Too many validation failures: ${validationFailedCount} > ${options.allowErrors}. Errors: ${validationErrors.map((e) => e.error).join(\", \")}`,\n );\n }\n\n if (\n options?.allowErrorsRatio !== undefined &&\n validationFailedRatio > options.allowErrorsRatio\n ) {\n throw new Error(\n `Validation failure ratio too high: ${validationFailedRatio.toFixed(3)} > ${options.allowErrorsRatio}. Errors: ${validationErrors.map((e) => e.error).join(\", \")}`,\n );\n }\n }\n\n /**\n * Optimized insert options preparation with better memory management\n * @private\n */\n private prepareInsertOptions(\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n isStream: boolean,\n strategy: string,\n options?: InsertOptions,\n ): any {\n const insertOptions: any = {\n table: quoteIdentifier(tableName),\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n wait_end_of_query: 1, // Ensure at least once delivery for INSERT operations\n // Performance optimizations\n max_insert_block_size:\n isStream ? 100000 : Math.min(validatedData.length, 100000),\n max_block_size: 65536,\n // Use async inserts for better performance with large datasets\n async_insert: validatedData.length > 1000 ? 1 : 0,\n wait_for_async_insert: 1, // For at least once delivery\n },\n };\n\n // Handle stream vs array input\n if (isStream) {\n insertOptions.values = data;\n } else {\n insertOptions.values = validatedData;\n }\n\n // For discard strategy, add optimized ClickHouse error tolerance settings\n if (\n strategy === \"discard\" &&\n (options?.allowErrors !== undefined ||\n options?.allowErrorsRatio !== undefined)\n ) {\n if (options.allowErrors !== undefined) {\n insertOptions.clickhouse_settings.input_format_allow_errors_num =\n options.allowErrors;\n }\n\n if (options.allowErrorsRatio !== undefined) {\n insertOptions.clickhouse_settings.input_format_allow_errors_ratio =\n options.allowErrorsRatio;\n }\n }\n\n return insertOptions;\n }\n\n /**\n * Creates success result for completed insertions\n * @private\n */\n private createSuccessResult(\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n strategy: string,\n ): InsertResult<T> {\n if (isStream) {\n return {\n successful: -1, // -1 indicates stream mode where count is unknown\n failed: 0,\n total: -1,\n };\n }\n\n const insertedCount = validatedData.length;\n const totalProcessed =\n shouldValidate ? (data as T[]).length : insertedCount;\n\n const result: InsertResult<T> = {\n successful: insertedCount,\n failed: shouldValidate ? validationErrors.length : 0,\n total: totalProcessed,\n };\n\n // Add failed records if there are validation errors and using discard strategy\n if (\n shouldValidate &&\n validationErrors.length > 0 &&\n strategy === \"discard\"\n ) {\n result.failedRecords = validationErrors.map((ve) => ({\n record: ve.record as T,\n error: `Validation error: ${ve.error}`,\n index: ve.index,\n }));\n }\n\n return result;\n }\n\n /**\n * Handles insertion errors based on the specified strategy\n * @private\n */\n private async handleInsertionError(\n batchError: any,\n strategy: string,\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n switch (strategy) {\n case \"fail-fast\":\n throw new Error(\n `Failed to insert data into table ${tableName}: ${batchError}`,\n );\n\n case \"discard\":\n throw new Error(\n `Too many errors during insert into table ${tableName}. Error threshold exceeded: ${batchError}`,\n );\n\n case \"isolate\":\n return await this.handleIsolateStrategy(\n batchError,\n tableName,\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n options,\n );\n\n default:\n throw new Error(`Unknown error strategy: ${strategy}`);\n }\n }\n\n /**\n * Handles the isolate strategy for insertion errors\n * @private\n */\n private async handleIsolateStrategy(\n batchError: any,\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n if (isStream) {\n throw new Error(\n `Isolate strategy is not supported with stream input: ${batchError}`,\n );\n }\n\n try {\n const { client } = await this.getMemoizedClient();\n const skipValidationOnRetry = options?.skipValidationOnRetry || false;\n const retryData = skipValidationOnRetry ? (data as T[]) : validatedData;\n\n const { successful, failed } = await this.retryIndividualRecords(\n client,\n tableName,\n retryData,\n );\n\n // Combine validation errors with insertion errors\n const allFailedRecords: FailedRecord<T>[] = [\n // Validation errors (if any and not skipping validation on retry)\n ...(shouldValidate && !skipValidationOnRetry ?\n validationErrors.map((ve) => ({\n record: ve.record as T,\n error: `Validation error: ${ve.error}`,\n index: ve.index,\n }))\n : []),\n // Insertion errors\n ...failed,\n ];\n\n this.checkInsertionThresholds(\n allFailedRecords,\n (data as T[]).length,\n options,\n );\n\n return {\n successful: successful.length,\n failed: allFailedRecords.length,\n total: (data as T[]).length,\n failedRecords: allFailedRecords,\n };\n } catch (isolationError) {\n throw new Error(\n `Failed to insert data into table ${tableName} during record isolation: ${isolationError}`,\n );\n }\n }\n\n /**\n * Checks if insertion errors exceed configured thresholds\n * @private\n */\n private checkInsertionThresholds(\n failedRecords: FailedRecord<T>[],\n totalRecords: number,\n options?: InsertOptions,\n ): void {\n const totalFailed = failedRecords.length;\n const failedRatio = totalFailed / totalRecords;\n\n if (\n options?.allowErrors !== undefined &&\n totalFailed > options.allowErrors\n ) {\n throw new Error(\n `Too many failed records: ${totalFailed} > ${options.allowErrors}. Failed records: ${failedRecords.map((f) => f.error).join(\", \")}`,\n );\n }\n\n if (\n options?.allowErrorsRatio !== undefined &&\n failedRatio > options.allowErrorsRatio\n ) {\n throw new Error(\n `Failed record ratio too high: ${failedRatio.toFixed(3)} > ${options.allowErrorsRatio}. Failed records: ${failedRecords.map((f) => f.error).join(\", \")}`,\n );\n }\n }\n\n /**\n * Recursively transforms a record to match ClickHouse's JSONEachRow requirements\n *\n * - For every Array(Nested(...)) field at any depth, each item is wrapped in its own array and recursively processed.\n * - For every Nested struct (not array), it recurses into the struct.\n * - This ensures compatibility with kafka_clickhouse_sync\n *\n * @param record The input record to transform (may be deeply nested)\n * @param columns The schema columns for this level (defaults to this.columnArray at the top level)\n * @returns The transformed record, ready for ClickHouse JSONEachRow insertion\n */\n private mapToClickhouseRecord(\n record: any,\n columns: Column[] = this.columnArray,\n ): any {\n const result = { ...record };\n for (const col of columns) {\n const value = record[col.name];\n const dt = col.data_type;\n\n if (isArrayNestedType(dt)) {\n // For Array(Nested(...)), wrap each item in its own array and recurse\n if (\n Array.isArray(value) &&\n (value.length === 0 || typeof value[0] === \"object\")\n ) {\n result[col.name] = value.map((item) => [\n this.mapToClickhouseRecord(item, dt.elementType.columns),\n ]);\n }\n } else if (isNestedType(dt)) {\n // For Nested struct (not array), recurse into it\n if (value && typeof value === \"object\") {\n result[col.name] = this.mapToClickhouseRecord(value, dt.columns);\n }\n }\n // All other types: leave as is for now\n }\n return result;\n }\n\n /**\n * Inserts data directly into the ClickHouse table with enhanced error handling and validation.\n * This method establishes a direct connection to ClickHouse using the project configuration\n * and inserts the provided data into the versioned table.\n *\n * PERFORMANCE OPTIMIZATIONS:\n * - Memoized client connections with fast config hashing\n * - Single-pass validation with pre-allocated arrays\n * - Batch-optimized retry strategy (batches of 10, then individual)\n * - Optimized ClickHouse settings for large datasets\n * - Reduced memory allocations and object creation\n *\n * Uses advanced typia validation when available for comprehensive type checking,\n * with fallback to basic validation for compatibility.\n *\n * The ClickHouse client is memoized and reused across multiple insert calls for better performance.\n * If the configuration changes, a new client will be automatically created.\n *\n * @param data Array of objects conforming to the table schema, or a Node.js Readable stream\n * @param options Optional configuration for error handling, validation, and insertion behavior\n * @returns Promise resolving to detailed insertion results\n * @throws {ConfigError} When configuration cannot be read or parsed\n * @throws {ClickHouseError} When insertion fails based on the error strategy\n * @throws {ValidationError} When validation fails and strategy is 'fail-fast'\n *\n * @example\n * ```typescript\n * // Create an OlapTable instance (typia validators auto-injected)\n * const userTable = new OlapTable<User>('users');\n *\n * // Insert with comprehensive typia validation\n * const result1 = await userTable.insert([\n * { id: 1, name: 'John', email: 'john@example.com' },\n * { id: 2, name: 'Jane', email: 'jane@example.com' }\n * ]);\n *\n * // Insert data with stream input (validation not available for streams)\n * const dataStream = new Readable({\n * objectMode: true,\n * read() { // Stream implementation }\n * });\n * const result2 = await userTable.insert(dataStream, { strategy: 'fail-fast' });\n *\n * // Insert with validation disabled for performance\n * const result3 = await userTable.insert(data, { validate: false });\n *\n * // Insert with error handling strategies\n * const result4 = await userTable.insert(mixedData, {\n * strategy: 'isolate',\n * allowErrorsRatio: 0.1,\n * validate: true // Use typia validation (default)\n * });\n *\n * // Optional: Clean up connection when completely done\n * await userTable.closeClient();\n * ```\n */\n async insert(\n data: T[] | Readable,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n // Validate input parameters and strategy compatibility\n const { isStream, strategy, shouldValidate } =\n this.validateInsertParameters(data, options);\n\n // Handle early return cases for empty data\n const emptyResult = this.handleEmptyData(data, isStream);\n if (emptyResult) {\n return emptyResult;\n }\n\n // Pre-insertion validation for arrays (optimized single-pass)\n let validatedData: T[] = [];\n let validationErrors: ValidationError[] = [];\n\n if (!isStream && shouldValidate) {\n const validationResult = await this.performPreInsertionValidation(\n data as T[],\n shouldValidate,\n strategy,\n options,\n );\n validatedData = validationResult.validatedData;\n validationErrors = validationResult.validationErrors;\n } else {\n // No validation or stream input\n validatedData = isStream ? [] : (data as T[]);\n }\n\n // Get memoized client and generate cached table name\n const { client } = await this.getMemoizedClient();\n const tableName = this.generateTableName();\n\n try {\n // Prepare and execute insertion with optimized settings\n const insertOptions = this.prepareInsertOptions(\n tableName,\n data,\n validatedData,\n isStream,\n strategy,\n options,\n );\n\n await client.insert(insertOptions);\n\n // Return success result\n return this.createSuccessResult(\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n strategy,\n );\n } catch (batchError) {\n // Handle insertion failure based on strategy with optimized retry\n return await this.handleInsertionError(\n batchError,\n strategy,\n tableName,\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n options,\n );\n }\n // Note: We don't close the client here since it's memoized for reuse\n // Use closeClient() method if you need to explicitly close the connection\n }\n\n // Note: Static factory methods (withS3Queue, withReplacingMergeTree, withMergeTree)\n // were removed in ENG-856. Use direct configuration instead, e.g.:\n // new OlapTable(name, { engine: ClickHouseEngines.ReplacingMergeTree, orderByFields: [\"id\"], ver: \"updated_at\" })\n}\n","/**\n * @fileoverview Stream SDK for data streaming operations in Moose.\n *\n * This module provides the core streaming functionality including:\n * - Stream creation and configuration\n * - Message transformations between streams\n * - Consumer registration for message processing\n * - Dead letter queue handling for error recovery\n *\n * @module Stream\n */\n\nimport { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport { dlqColumns, dlqSchema, getMooseInternal } from \"../internal\";\nimport { OlapTable } from \"./olapTable\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport type {\n RuntimeKafkaConfig,\n ConfigurationRegistry,\n} from \"../../config/runtime\";\nimport { createHash } from \"node:crypto\";\nimport { Logger, Producer } from \"../../commons\";\nimport { getSourceFileFromStack } from \"../utils/stackTrace\";\n\n/**\n * Represents zero, one, or many values of type T.\n * Used for flexible return types in transformations where a single input\n * can produce no output, one output, or multiple outputs.\n *\n * @template T The type of the value(s)\n * @example\n * ```typescript\n * // Can return a single value\n * const single: ZeroOrMany<string> = \"hello\";\n *\n * // Can return an array\n * const multiple: ZeroOrMany<string> = [\"hello\", \"world\"];\n *\n * // Can return null/undefined to filter out\n * const filtered: ZeroOrMany<string> = null;\n * ```\n */\nexport type ZeroOrMany<T> = T | T[] | undefined | null;\n\n/**\n * Function type for transforming records from one type to another.\n * Supports both synchronous and asynchronous transformations.\n *\n * @template T The input record type\n * @template U The output record type\n * @param record The input record to transform\n * @returns The transformed record(s), or null/undefined to filter out\n *\n * @example\n * ```typescript\n * const transform: SyncOrAsyncTransform<InputType, OutputType> = (record) => {\n * return { ...record, processed: true };\n * };\n * ```\n */\nexport type SyncOrAsyncTransform<T, U> = (\n record: T,\n) => ZeroOrMany<U> | Promise<ZeroOrMany<U>>;\n\n/**\n * Function type for consuming records without producing output.\n * Used for side effects like logging, external API calls, or database writes.\n *\n * @template T The record type to consume\n * @param record The record to process\n * @returns Promise<void> or void\n *\n * @example\n * ```typescript\n * const consumer: Consumer<UserEvent> = async (event) => {\n * await sendToAnalytics(event);\n * };\n * ```\n */\nexport type Consumer<T> = (record: T) => Promise<void> | void;\n\n/**\n * Configuration options for stream transformations.\n *\n * @template T The type of records being transformed\n */\nexport interface TransformConfig<T> {\n /**\n * Optional version identifier for this transformation.\n * Multiple transformations to the same destination can coexist with different versions.\n */\n version?: string;\n\n /**\n * Optional metadata for documentation and tracking purposes.\n */\n metadata?: { description?: string };\n\n /**\n * Optional dead letter queue for handling transformation failures.\n * Failed records will be sent to this queue for manual inspection or reprocessing.\n * Uses {@link Stream.defaultDeadLetterQueue} by default\n * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value\n */\n deadLetterQueue?: DeadLetterQueue<T> | null;\n\n /**\n * @internal Source file path where this transform was declared.\n * Automatically captured from stack trace.\n */\n sourceFile?: string;\n}\n\n/**\n * Configuration options for stream consumers.\n *\n * @template T The type of records being consumed\n */\nexport interface ConsumerConfig<T> {\n /**\n * Optional version identifier for this consumer.\n * Multiple consumers can coexist with different versions.\n */\n version?: string;\n\n /**\n * Optional dead letter queue for handling consumer failures.\n * Failed records will be sent to this queue for manual inspection or reprocessing.\n * Uses {@link Stream.defaultDeadLetterQueue} by default\n * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value\n */\n deadLetterQueue?: DeadLetterQueue<T> | null;\n\n /**\n * @internal Source file path where this consumer was declared.\n * Automatically captured from stack trace.\n */\n sourceFile?: string;\n}\n\nexport type SchemaRegistryEncoding = \"JSON\" | \"AVRO\" | \"PROTOBUF\";\n\nexport type SchemaRegistryReference =\n | { id: number }\n | { subjectLatest: string }\n | { subject: string; version: number };\n\nexport interface KafkaSchemaConfig {\n kind: SchemaRegistryEncoding;\n reference: SchemaRegistryReference;\n}\n\n/**\n * Represents a message routed to a specific destination stream.\n * Used internally by the multi-transform functionality to specify\n * where transformed messages should be sent.\n *\n * @internal\n */\nclass RoutedMessage {\n /** The destination stream for the message */\n destination: Stream<any>;\n\n /** The message value(s) to send */\n values: ZeroOrMany<any>;\n\n /**\n * Creates a new routed message.\n *\n * @param destination The target stream\n * @param values The message(s) to route\n */\n constructor(destination: Stream<any>, values: ZeroOrMany<any>) {\n this.destination = destination;\n this.values = values;\n }\n}\n\n/**\n * Configuration options for a data stream (e.g., a Redpanda topic).\n * @template T The data type of the messages in the stream.\n */\nexport interface StreamConfig<T> {\n /**\n * Specifies the number of partitions for the stream. Affects parallelism and throughput.\n */\n parallelism?: number;\n /**\n * Specifies the data retention period for the stream in seconds. Messages older than this may be deleted.\n */\n retentionPeriod?: number;\n /**\n * An optional destination OLAP table where messages from this stream should be automatically ingested.\n */\n destination?: OlapTable<T>;\n /**\n * An optional version string for this configuration. Can be used for tracking changes or managing deployments.\n */\n version?: string;\n metadata?: { description?: string };\n lifeCycle?: LifeCycle;\n\n defaultDeadLetterQueue?: DeadLetterQueue<T>;\n\n /** Optional Schema Registry configuration for this stream */\n schemaConfig?: KafkaSchemaConfig;\n}\n\n/**\n * Represents a data stream, typically corresponding to a Redpanda topic.\n * Provides a typed interface for producing to and consuming from the stream, and defining transformations.\n *\n * @template T The data type of the messages flowing through the stream. The structure of T defines the message schema.\n */\nexport class Stream<T> extends TypedBase<T, StreamConfig<T>> {\n defaultDeadLetterQueue?: DeadLetterQueue<T>;\n /** @internal Memoized KafkaJS producer for reusing connections across sends */\n private _memoizedProducer?: Producer;\n /** @internal Hash of the configuration used to create the memoized Kafka producer */\n private _kafkaConfigHash?: string;\n\n /**\n * Creates a new Stream instance.\n * @param name The name of the stream. This name is used for the underlying Redpanda topic.\n * @param config Optional configuration for the stream.\n */\n constructor(name: string, config?: StreamConfig<T>);\n\n /**\n * @internal\n * Note: `validators` parameter is a positional placeholder (always undefined for Stream).\n * It exists because TypedBase has validators as the 5th param, and we need to pass\n * allowExtraFields as the 6th param. Stream doesn't use validators.\n */\n constructor(\n name: string,\n config: StreamConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators: undefined,\n allowExtraFields: boolean,\n );\n\n constructor(\n name: string,\n config?: StreamConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: undefined,\n allowExtraFields?: boolean,\n ) {\n super(name, config ?? {}, schema, columns, undefined, allowExtraFields);\n const streams = getMooseInternal().streams;\n if (streams.has(name)) {\n throw new Error(`Stream with name ${name} already exists`);\n }\n streams.set(name, this);\n this.defaultDeadLetterQueue = this.config.defaultDeadLetterQueue;\n }\n\n /**\n * Internal map storing transformation configurations.\n * Maps destination stream names to arrays of transformation functions and their configs.\n *\n * @internal\n */\n _transformations = new Map<\n string,\n [Stream<any>, SyncOrAsyncTransform<T, any>, TransformConfig<T>][]\n >();\n\n /**\n * Internal function for multi-stream transformations.\n * Allows a single transformation to route messages to multiple destinations.\n *\n * @internal\n */\n _multipleTransformations?: (record: T) => [RoutedMessage];\n\n /**\n * Internal array storing consumer configurations.\n *\n * @internal\n */\n _consumers = new Array<{\n consumer: Consumer<T>;\n config: ConsumerConfig<T>;\n }>();\n\n /**\n * Builds the full Kafka topic name including optional namespace and version suffix.\n * Version suffix is appended as _x_y_z where dots in version are replaced with underscores.\n */\n private buildFullTopicName(namespace?: string): string {\n const versionSuffix =\n this.config.version ? `_${this.config.version.replace(/\\./g, \"_\")}` : \"\";\n const base = `${this.name}${versionSuffix}`;\n return namespace !== undefined && namespace.length > 0 ?\n `${namespace}.${base}`\n : base;\n }\n\n /**\n * Creates a fast hash string from relevant Kafka configuration fields.\n */\n private createConfigHash(kafkaConfig: RuntimeKafkaConfig): string {\n const configString = [\n kafkaConfig.broker,\n kafkaConfig.messageTimeoutMs,\n kafkaConfig.saslUsername,\n kafkaConfig.saslPassword,\n kafkaConfig.saslMechanism,\n kafkaConfig.securityProtocol,\n kafkaConfig.namespace,\n ].join(\":\");\n return createHash(\"sha256\")\n .update(configString)\n .digest(\"hex\")\n .substring(0, 16);\n }\n\n /**\n * Gets or creates a memoized KafkaJS producer using runtime configuration.\n */\n private async getMemoizedProducer(): Promise<{\n producer: Producer;\n kafkaConfig: RuntimeKafkaConfig;\n }> {\n // dynamic import to keep Stream objects browser compatible\n await import(\"../../config/runtime\");\n const configRegistry = (globalThis as any)\n ._mooseConfigRegistry as ConfigurationRegistry;\n const { getKafkaProducer } = await import(\"../../commons\");\n\n const kafkaConfig = await (configRegistry as any).getKafkaConfig();\n const currentHash = this.createConfigHash(kafkaConfig);\n\n if (this._memoizedProducer && this._kafkaConfigHash === currentHash) {\n return { producer: this._memoizedProducer, kafkaConfig };\n }\n\n // Close existing producer if config changed\n if (this._memoizedProducer && this._kafkaConfigHash !== currentHash) {\n try {\n await this._memoizedProducer.disconnect();\n } catch {\n // ignore\n }\n this._memoizedProducer = undefined;\n }\n\n const clientId = `moose-sdk-stream-${this.name}`;\n const logger: Logger = {\n logPrefix: clientId,\n log: (message: string): void => {\n console.log(`${clientId}: ${message}`);\n },\n error: (message: string): void => {\n console.error(`${clientId}: ${message}`);\n },\n warn: (message: string): void => {\n console.warn(`${clientId}: ${message}`);\n },\n };\n\n const producer = await getKafkaProducer(\n {\n clientId,\n broker: kafkaConfig.broker,\n securityProtocol: kafkaConfig.securityProtocol,\n saslUsername: kafkaConfig.saslUsername,\n saslPassword: kafkaConfig.saslPassword,\n saslMechanism: kafkaConfig.saslMechanism,\n },\n logger,\n );\n\n this._memoizedProducer = producer;\n this._kafkaConfigHash = currentHash;\n\n return { producer, kafkaConfig };\n }\n\n /**\n * Closes the memoized Kafka producer if it exists.\n */\n async closeProducer(): Promise<void> {\n if (this._memoizedProducer) {\n try {\n await this._memoizedProducer.disconnect();\n } catch {\n // ignore\n } finally {\n this._memoizedProducer = undefined;\n this._kafkaConfigHash = undefined;\n }\n }\n }\n\n /**\n * Sends one or more records to this stream's Kafka topic.\n * Values are JSON-serialized as message values.\n */\n async send(values: ZeroOrMany<T>): Promise<void> {\n // Normalize to flat array of records\n const flat: T[] =\n Array.isArray(values) ? values\n : values !== undefined && values !== null ? [values as T]\n : [];\n\n if (flat.length === 0) return;\n\n const { producer, kafkaConfig } = await this.getMemoizedProducer();\n const topic = this.buildFullTopicName(kafkaConfig.namespace);\n\n // Use Schema Registry JSON envelope if configured\n const sr = this.config.schemaConfig;\n if (sr && sr.kind === \"JSON\") {\n const schemaRegistryUrl = kafkaConfig.schemaRegistryUrl;\n if (!schemaRegistryUrl) {\n throw new Error(\"Schema Registry URL not configured\");\n }\n\n const {\n default: { SchemaRegistry },\n } = await import(\"@kafkajs/confluent-schema-registry\");\n const registry = new SchemaRegistry({ host: schemaRegistryUrl });\n\n let schemaId: undefined | number = undefined;\n\n if (\"id\" in sr.reference) {\n schemaId = sr.reference.id;\n } else if (\"subjectLatest\" in sr.reference) {\n schemaId = await registry.getLatestSchemaId(sr.reference.subjectLatest);\n } else if (\"subject\" in sr.reference) {\n schemaId = await registry.getRegistryId(\n sr.reference.subject,\n sr.reference.version,\n );\n }\n\n if (schemaId === undefined) {\n throw new Error(\"Malformed schema reference.\");\n }\n\n const encoded = await Promise.all(\n flat.map((v) =>\n registry.encode(schemaId, v as unknown as Record<string, unknown>),\n ),\n );\n await producer.send({\n topic,\n messages: encoded.map((value) => ({ value })),\n });\n return;\n } else if (sr !== undefined) {\n throw new Error(\"Currently only JSON Schema is supported.\");\n }\n\n await producer.send({\n topic,\n messages: flat.map((v) => ({ value: JSON.stringify(v) })),\n });\n }\n\n /**\n * Adds a transformation step that processes messages from this stream and sends the results to a destination stream.\n * Multiple transformations to the same destination stream can be added if they have distinct `version` identifiers in their config.\n *\n * @template U The data type of the messages in the destination stream.\n * @param destination The destination stream for the transformed messages.\n * @param transformation A function that takes a message of type T and returns zero or more messages of type U (or a Promise thereof).\n * Return `null` or `undefined` or an empty array `[]` to filter out a message. Return an array to emit multiple messages.\n * @param config Optional configuration for this specific transformation step, like a version.\n */\n addTransform<U>(\n destination: Stream<U>,\n transformation: SyncOrAsyncTransform<T, U>,\n config?: TransformConfig<T>,\n ) {\n // Capture source file from call stack at this exact moment\n const sourceFile = getSourceFileFromStack(new Error().stack);\n\n const transformConfig: TransformConfig<T> = {\n ...(config ?? {}),\n sourceFile,\n };\n if (transformConfig.deadLetterQueue === undefined) {\n transformConfig.deadLetterQueue = this.defaultDeadLetterQueue;\n }\n\n if (this._transformations.has(destination.name)) {\n const existingTransforms = this._transformations.get(destination.name)!;\n const hasVersion = existingTransforms.some(\n ([_, __, cfg]) => cfg.version === transformConfig.version,\n );\n\n if (!hasVersion) {\n existingTransforms.push([destination, transformation, transformConfig]);\n }\n } else {\n this._transformations.set(destination.name, [\n [destination, transformation, transformConfig],\n ]);\n }\n }\n\n /**\n * Adds a consumer function that processes messages from this stream.\n * Multiple consumers can be added if they have distinct `version` identifiers in their config.\n *\n * @param consumer A function that takes a message of type T and performs an action (e.g., side effect, logging). Should return void or Promise<void>.\n * @param config Optional configuration for this specific consumer, like a version.\n */\n addConsumer(consumer: Consumer<T>, config?: ConsumerConfig<T>) {\n // Capture source file from call stack at this exact moment\n const sourceFile = getSourceFileFromStack(new Error().stack);\n\n const consumerConfig: ConsumerConfig<T> = {\n ...(config ?? {}),\n sourceFile,\n };\n if (consumerConfig.deadLetterQueue === undefined) {\n consumerConfig.deadLetterQueue = this.defaultDeadLetterQueue;\n }\n const hasVersion = this._consumers.some(\n (existing) => existing.config.version === consumerConfig.version,\n );\n\n if (!hasVersion) {\n this._consumers.push({ consumer, config: consumerConfig });\n }\n }\n\n /**\n * Helper method for `addMultiTransform` to specify the destination and values for a routed message.\n * @param values The value or values to send to this stream.\n * @returns A `RoutedMessage` object associating the values with this stream.\n *\n * @example\n * ```typescript\n * sourceStream.addMultiTransform((record) => [\n * destinationStream1.routed(transformedRecord1),\n * destinationStream2.routed([record2a, record2b])\n * ]);\n * ```\n */\n routed = (values: ZeroOrMany<T>) => new RoutedMessage(this, values);\n\n /**\n * Adds a single transformation function that can route messages to multiple destination streams.\n * This is an alternative to adding multiple individual `addTransform` calls.\n * Only one multi-transform function can be added per stream.\n *\n * @param transformation A function that takes a message of type T and returns an array of `RoutedMessage` objects,\n * each specifying a destination stream and the message(s) to send to it.\n */\n addMultiTransform(transformation: (record: T) => [RoutedMessage]) {\n this._multipleTransformations = transformation;\n }\n}\n\n/**\n * Base model for dead letter queue entries.\n * Contains the original failed record along with error information.\n */\nexport interface DeadLetterModel {\n /** The original record that failed processing */\n originalRecord: Record<string, any>;\n\n /** Human-readable error message describing the failure */\n errorMessage: string;\n\n /** Classification of the error type (e.g., \"ValidationError\", \"TransformError\") */\n errorType: string;\n\n /** Timestamp when the failure occurred */\n failedAt: Date;\n\n /** The source component where the failure occurred */\n source: \"api\" | \"transform\" | \"table\";\n}\n\n/**\n * Enhanced dead letter model with type recovery functionality.\n * Extends the base model with the ability to recover the original typed record.\n *\n * @template T The original record type before failure\n */\nexport interface DeadLetter<T> extends DeadLetterModel {\n /**\n * Recovers the original record as its typed form.\n * Useful for reprocessing failed records with proper type safety.\n *\n * @returns The original record cast to type T\n */\n asTyped: () => T;\n}\n\n/**\n * Internal function to attach type guard functionality to dead letter records.\n *\n * @internal\n * @template T The original record type\n * @param dl The dead letter model to enhance\n * @param typeGuard Function to validate and cast the original record\n */\nfunction attachTypeGuard<T>(\n dl: DeadLetterModel,\n typeGuard: (input: any) => T,\n): asserts dl is DeadLetter<T> {\n (dl as any).asTyped = () => typeGuard(dl.originalRecord);\n}\n\n/**\n * Specialized stream for handling failed records (dead letters).\n * Provides type-safe access to failed records for reprocessing or analysis.\n *\n * @template T The original record type that failed processing\n *\n * @example\n * ```typescript\n * const dlq = new DeadLetterQueue<UserEvent>(\"user-events-dlq\");\n *\n * dlq.addConsumer(async (deadLetter) => {\n * const originalEvent = deadLetter.asTyped();\n * console.log(`Failed event: ${deadLetter.errorMessage}`);\n * // Potentially reprocess or alert\n * });\n * ```\n */\nexport class DeadLetterQueue<T> extends Stream<DeadLetterModel> {\n /**\n * Creates a new DeadLetterQueue instance.\n * @param name The name of the dead letter queue stream\n * @param config Optional configuration for the stream. The metadata property is always present and includes stackTrace.\n */\n constructor(name: string, config?: StreamConfig<DeadLetterModel>);\n\n /** @internal **/\n constructor(\n name: string,\n config: StreamConfig<DeadLetterModel>,\n validate: (originalRecord: any) => T,\n );\n\n constructor(\n name: string,\n config?: StreamConfig<DeadLetterModel>,\n typeGuard?: (originalRecord: any) => T,\n ) {\n if (typeGuard === undefined) {\n throw new Error(\n \"Supply the type param T so that the schema is inserted by the compiler plugin.\",\n );\n }\n\n super(name, config ?? {}, dlqSchema, dlqColumns, undefined, false);\n this.typeGuard = typeGuard;\n getMooseInternal().streams.set(name, this);\n }\n\n /**\n * Internal type guard function for validating and casting original records.\n *\n * @internal\n */\n private typeGuard: (originalRecord: any) => T;\n\n /**\n * Adds a transformation step for dead letter records.\n * The transformation function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @template U The output type for the transformation\n * @param destination The destination stream for transformed messages\n * @param transformation Function to transform dead letter records\n * @param config Optional transformation configuration\n */\n addTransform<U>(\n destination: Stream<U>,\n transformation: SyncOrAsyncTransform<DeadLetter<T>, U>,\n config?: TransformConfig<DeadLetterModel>,\n ) {\n const withValidate: SyncOrAsyncTransform<DeadLetterModel, U> = (\n deadLetter,\n ) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return transformation(deadLetter);\n };\n super.addTransform(destination, withValidate, config);\n }\n\n /**\n * Adds a consumer for dead letter records.\n * The consumer function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @param consumer Function to process dead letter records\n * @param config Optional consumer configuration\n */\n addConsumer(\n consumer: Consumer<DeadLetter<T>>,\n config?: ConsumerConfig<DeadLetterModel>,\n ) {\n const withValidate: Consumer<DeadLetterModel> = (deadLetter) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return consumer(deadLetter);\n };\n super.addConsumer(withValidate, config);\n }\n\n /**\n * Adds a multi-stream transformation for dead letter records.\n * The transformation function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @param transformation Function to route dead letter records to multiple destinations\n */\n addMultiTransform(\n transformation: (record: DeadLetter<T>) => [RoutedMessage],\n ) {\n const withValidate: (record: DeadLetterModel) => [RoutedMessage] = (\n deadLetter,\n ) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return transformation(deadLetter);\n };\n super.addMultiTransform(withValidate);\n }\n}\n","import { getMooseInternal } from \"../internal\";\n\n/**\n * Context passed to task handlers. Single param to future-proof API changes.\n *\n * - state: shared mutable state for the task and its lifecycle hooks\n * - input: optional typed input for the task (undefined when task has no input)\n */\n/**\n * Task handler context. If the task declares an input type (T != null),\n * `input` is required and strongly typed. For no-input tasks (T = null),\n * `input` is omitted/optional.\n */\nexport type TaskContext<TInput> =\n TInput extends null ? { state: any; input?: null }\n : { state: any; input: TInput };\n\n/**\n * Configuration options for defining a task within a workflow.\n *\n * @template T - The input type for the task\n * @template R - The return type for the task\n */\nexport interface TaskConfig<T, R> {\n /** The main function that executes the task logic */\n run: (context: TaskContext<T>) => Promise<R>;\n\n /**\n * Optional array of tasks to execute after this task completes successfully.\n * Supports all combinations of input types (real type or null) and output types (real type or void).\n * When this task returns void, onComplete tasks expect null as input.\n * When this task returns a real type, onComplete tasks expect that type as input.\n */\n onComplete?: (\n | Task<R extends void ? null : R, any>\n | Task<R extends void ? null : R, void>\n )[];\n\n /**\n * Optional function that is called when the task is cancelled.\n */\n /** Optional function that is called when the task is cancelled. */\n onCancel?: (context: TaskContext<T>) => Promise<void>;\n\n /** Optional timeout duration for the task execution (e.g., \"30s\", \"5m\") */\n timeout?: string;\n\n /** Optional number of retry attempts if the task fails */\n retries?: number;\n}\n\n/**\n * Represents a single task within a workflow system.\n *\n * A Task encapsulates the execution logic, completion handlers, and configuration\n * for a unit of work that can be chained with other tasks in a workflow.\n *\n * @template T - The input type that this task expects\n * @template R - The return type that this task produces\n */\nexport class Task<T, R> {\n /**\n * Creates a new Task instance.\n *\n * @param name - Unique identifier for the task\n * @param config - Configuration object defining the task behavior\n *\n * @example\n * ```typescript\n * // No input, no output\n * const task1 = new Task<null, void>(\"task1\", {\n * run: async () => {\n * console.log(\"No input/output\");\n * }\n * });\n *\n * // No input, but has output\n * const task2 = new Task<null, OutputType>(\"task2\", {\n * run: async () => {\n * return someOutput;\n * }\n * });\n *\n * // Has input, no output\n * const task3 = new Task<InputType, void>(\"task3\", {\n * run: async (input: InputType) => {\n * // process input but return nothing\n * }\n * });\n *\n * // Has both input and output\n * const task4 = new Task<InputType, OutputType>(\"task4\", {\n * run: async (input: InputType) => {\n * return process(input);\n * }\n * });\n * ```\n */\n constructor(\n readonly name: string,\n readonly config: TaskConfig<T, R>,\n ) {}\n}\n\n/**\n * Configuration options for defining a workflow.\n *\n * A workflow orchestrates the execution of multiple tasks in a defined sequence\n * or pattern, with support for scheduling, retries, and timeouts.\n */\nexport interface WorkflowConfig {\n /**\n * The initial task that begins the workflow execution.\n * Supports all combinations of input types (real type or null) and output types (real type or void):\n * - Task<null, OutputType>: No input, returns a type\n * - Task<null, void>: No input, returns nothing\n * - Task<InputType, OutputType>: Has input, returns a type\n * - Task<InputType, void>: Has input, returns nothing\n */\n startingTask:\n | Task<null, any>\n | Task<null, void>\n | Task<any, any>\n | Task<any, void>;\n\n /** Optional number of retry attempts if the entire workflow fails */\n retries?: number;\n\n /** Optional timeout duration for the entire workflow execution (e.g., \"10m\", \"1h\") */\n timeout?: string;\n\n /** Optional cron-style schedule string for automated workflow execution */\n schedule?: string;\n}\n\n/**\n * Represents a complete workflow composed of interconnected tasks.\n *\n * A Workflow manages the execution flow of multiple tasks, handling scheduling,\n * error recovery, and task orchestration. Once created, workflows are automatically\n * registered with the internal Moose system.\n *\n * @example\n * ```typescript\n * const dataProcessingWorkflow = new Workflow(\"dataProcessing\", {\n * startingTask: extractDataTask,\n * schedule: \"0 2 * * *\", // Run daily at 2 AM\n * timeout: \"1h\",\n * retries: 2\n * });\n * ```\n */\nexport class Workflow {\n /**\n * Creates a new Workflow instance and registers it with the Moose system.\n *\n * @param name - Unique identifier for the workflow\n * @param config - Configuration object defining the workflow behavior and task orchestration\n * @throws {Error} When the workflow contains null/undefined tasks or infinite loops\n */\n constructor(\n readonly name: string,\n readonly config: WorkflowConfig,\n ) {\n const workflows = getMooseInternal().workflows;\n if (workflows.has(name)) {\n throw new Error(`Workflow with name ${name} already exists`);\n }\n this.validateTaskGraph(config.startingTask, name);\n workflows.set(name, this);\n }\n\n /**\n * Validates the task graph to ensure there are no null tasks or infinite loops.\n *\n * @private\n * @param startingTask - The starting task to begin validation from\n * @param workflowName - The name of the workflow being validated (for error messages)\n * @throws {Error} When null/undefined tasks are found or infinite loops are detected\n */\n private validateTaskGraph(\n startingTask: Task<any, any> | null | undefined,\n workflowName: string,\n ): void {\n if (startingTask === null || startingTask === undefined) {\n throw new Error(\n `Workflow \"${workflowName}\" has a null or undefined starting task`,\n );\n }\n\n const visited = new Set<string>();\n const recursionStack = new Set<string>();\n\n const validateTask = (\n task: Task<any, any> | null | undefined,\n currentPath: string[],\n ): void => {\n if (task === null || task === undefined) {\n const pathStr =\n currentPath.length > 0 ? currentPath.join(\" -> \") + \" -> \" : \"\";\n throw new Error(\n `Workflow \"${workflowName}\" contains a null or undefined task in the task chain: ${pathStr}null`,\n );\n }\n\n const taskName = task.name;\n\n if (recursionStack.has(taskName)) {\n const cycleStartIndex = currentPath.indexOf(taskName);\n const cyclePath =\n cycleStartIndex >= 0 ?\n currentPath.slice(cycleStartIndex).concat(taskName)\n : currentPath.concat(taskName);\n throw new Error(\n `Workflow \"${workflowName}\" contains an infinite loop in task chain: ${cyclePath.join(\" -> \")}`,\n );\n }\n\n if (visited.has(taskName)) {\n // Already processed this task and its children\n return;\n }\n\n visited.add(taskName);\n recursionStack.add(taskName);\n\n if (task.config.onComplete) {\n for (const nextTask of task.config.onComplete) {\n validateTask(nextTask, [...currentPath, taskName]);\n }\n }\n\n recursionStack.delete(taskName);\n };\n\n validateTask(startingTask, []);\n }\n}\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport { getMooseInternal } from \"../internal\";\nimport { DeadLetterQueue, Stream } from \"./stream\";\n\n/**\n * @template T The data type of the messages expected by the destination stream.\n */\nexport interface IngestConfig<T> {\n /**\n * The destination stream where the ingested data should be sent.\n */\n destination: Stream<T>;\n\n deadLetterQueue?: DeadLetterQueue<T>;\n /**\n * An optional version string for this configuration.\n */\n version?: string;\n /**\n * An optional custom path for the ingestion endpoint.\n */\n path?: string;\n metadata?: { description?: string };\n}\n\n/**\n * Represents an Ingest API endpoint, used for sending data into a Moose system, typically writing to a Stream.\n * Provides a typed interface for the expected data format.\n *\n * @template T The data type of the records that this API endpoint accepts. The structure of T defines the expected request body schema.\n */\nexport class IngestApi<T> extends TypedBase<T, IngestConfig<T>> {\n /**\n * Creates a new IngestApi instance.\n * @param name The name of the ingest API endpoint.\n * @param config Optional configuration for the ingest API.\n */\n constructor(name: string, config?: IngestConfig<T>);\n\n /**\n * @internal\n * Note: `validators` parameter is a positional placeholder (always undefined for IngestApi).\n * It exists because TypedBase has validators as the 5th param, and we need to pass\n * allowExtraFields as the 6th param. IngestApi doesn't use validators.\n */\n constructor(\n name: string,\n config: IngestConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators: undefined,\n allowExtraFields: boolean,\n );\n\n constructor(\n name: string,\n config: IngestConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: undefined,\n allowExtraFields?: boolean,\n ) {\n super(name, config, schema, columns, undefined, allowExtraFields);\n const ingestApis = getMooseInternal().ingestApis;\n if (ingestApis.has(name)) {\n throw new Error(`Ingest API with name ${name} already exists`);\n }\n ingestApis.set(name, this);\n }\n}\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport { getMooseInternal } from \"../internal\";\nimport type { ApiUtil } from \"../../consumption-apis/helpers\";\n\n/**\n * Defines the signature for a handler function used by a Consumption API.\n * @template T The expected type of the request parameters or query parameters.\n * @template R The expected type of the response data.\n * @param params An object containing the validated request parameters, matching the structure of T.\n * @param utils Utility functions provided to the handler, e.g., for database access (`runSql`).\n * @returns A Promise resolving to the response data of type R.\n */\ntype ApiHandler<T, R> = (params: T, utils: ApiUtil) => Promise<R>;\n\n/**\n * @template T The data type of the request parameters.\n */\nexport interface ApiConfig<T> {\n /**\n * An optional version string for this configuration.\n */\n version?: string;\n /**\n * An optional custom path for the API endpoint.\n * If not specified, defaults to the API name.\n */\n path?: string;\n metadata?: { description?: string };\n}\n\n/**\n * Represents a Consumption API endpoint (API), used for querying data from a Moose system.\n * Exposes data, often from an OlapTable or derived through a custom handler function.\n *\n * @template T The data type defining the expected structure of the API's query parameters.\n * @template R The data type defining the expected structure of the API's response body. Defaults to `any`.\n */\nexport class Api<T, R = any> extends TypedBase<T, ApiConfig<T>> {\n /** @internal The handler function that processes requests and generates responses. */\n _handler: ApiHandler<T, R>;\n /** @internal The JSON schema definition for the response type R. */\n responseSchema: IJsonSchemaCollection.IV3_1;\n\n /**\n * Creates a new Api instance.\n * @param name The name of the consumption API endpoint.\n * @param handler The function to execute when the endpoint is called. It receives validated query parameters and utility functions.\n * @param config Optional configuration for the consumption API.\n */\n constructor(name: string, handler: ApiHandler<T, R>, config?: {});\n\n /** @internal **/\n constructor(\n name: string,\n handler: ApiHandler<T, R>,\n config: ApiConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n responseSchema: IJsonSchemaCollection.IV3_1,\n );\n\n constructor(\n name: string,\n handler: ApiHandler<T, R>,\n config?: ApiConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n responseSchema?: IJsonSchemaCollection.IV3_1,\n ) {\n super(name, config ?? {}, schema, columns);\n this._handler = handler;\n this.responseSchema = responseSchema ?? {\n version: \"3.1\",\n schemas: [{ type: \"array\", items: { type: \"object\" } }],\n components: { schemas: {} },\n };\n const apis = getMooseInternal().apis;\n const key = `${name}${config?.version ? `:${config.version}` : \"\"}`;\n if (apis.has(key)) {\n throw new Error(\n `Consumption API with name ${name} and version ${config?.version} already exists`,\n );\n }\n apis.set(key, this);\n\n // Also register by custom path if provided\n if (config?.path) {\n if (config.version) {\n // Check if the path already ends with the version\n const pathEndsWithVersion =\n config.path.endsWith(`/${config.version}`) ||\n config.path === config.version ||\n (config.path.endsWith(config.version) &&\n config.path.length > config.version.length &&\n config.path[config.path.length - config.version.length - 1] ===\n \"/\");\n\n if (pathEndsWithVersion) {\n // Path already contains version, register as-is\n if (apis.has(config.path)) {\n const existing = apis.get(config.path)!;\n throw new Error(\n `Cannot register API \"${name}\" with path \"${config.path}\" - this path is already used by API \"${existing.name}\"`,\n );\n }\n apis.set(config.path, this);\n } else {\n // Path doesn't contain version, register with version appended\n const versionedPath = `${config.path.replace(/\\/$/, \"\")}/${config.version}`;\n\n // Check for collision on versioned path\n if (apis.has(versionedPath)) {\n const existing = apis.get(versionedPath)!;\n throw new Error(\n `Cannot register API \"${name}\" with path \"${versionedPath}\" - this path is already used by API \"${existing.name}\"`,\n );\n }\n apis.set(versionedPath, this);\n\n // Also register the unversioned path if not already claimed\n // (This is intentionally more permissive - first API gets the unversioned path)\n if (!apis.has(config.path)) {\n apis.set(config.path, this);\n }\n }\n } else {\n // Unversioned API, check for collision and register\n if (apis.has(config.path)) {\n const existing = apis.get(config.path)!;\n throw new Error(\n `Cannot register API \"${name}\" with custom path \"${config.path}\" - this path is already used by API \"${existing.name}\"`,\n );\n }\n apis.set(config.path, this);\n }\n }\n }\n\n /**\n * Retrieves the handler function associated with this Consumption API.\n * @returns The handler function.\n */\n getHandler = (): ApiHandler<T, R> => {\n return this._handler;\n };\n\n async call(baseUrl: string, queryParams: T): Promise<R> {\n // Construct the API endpoint URL using custom path or default to name\n let path: string;\n if (this.config?.path) {\n // Check if the custom path already contains the version\n if (this.config.version) {\n const pathEndsWithVersion =\n this.config.path.endsWith(`/${this.config.version}`) ||\n this.config.path === this.config.version ||\n (this.config.path.endsWith(this.config.version) &&\n this.config.path.length > this.config.version.length &&\n this.config.path[\n this.config.path.length - this.config.version.length - 1\n ] === \"/\");\n\n if (pathEndsWithVersion) {\n path = this.config.path;\n } else {\n path = `${this.config.path.replace(/\\/$/, \"\")}/${this.config.version}`;\n }\n } else {\n path = this.config.path;\n }\n } else {\n // Default to name with optional version\n path =\n this.config?.version ?\n `${this.name}/${this.config.version}`\n : this.name;\n }\n const url = new URL(`${baseUrl.replace(/\\/$/, \"\")}/api/${path}`);\n\n const searchParams = url.searchParams;\n\n for (const [key, value] of Object.entries(queryParams as any)) {\n if (Array.isArray(value)) {\n // For array values, add each item as a separate query param\n for (const item of value) {\n if (item !== null && item !== undefined) {\n searchParams.append(key, String(item));\n }\n }\n } else if (value !== null && value !== undefined) {\n searchParams.append(key, String(value));\n }\n }\n\n const response = await fetch(url, {\n method: \"GET\",\n headers: {\n Accept: \"application/json\",\n },\n });\n if (!response.ok) {\n throw new Error(`HTTP error! status: ${response.status}`);\n }\n const data = await response.json();\n return data as R;\n }\n}\n\n/** @deprecated Use ApiConfig<T> directly instead. */\nexport type EgressConfig<T> = ApiConfig<T>;\n\n/** @deprecated Use Api directly instead. */\nexport const ConsumptionApi = Api;\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase, TypiaValidators } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport {\n DeadLetterModel,\n DeadLetterQueue,\n Stream,\n StreamConfig,\n} from \"./stream\";\nimport { OlapConfig, OlapTable } from \"./olapTable\";\nimport { IngestApi, IngestConfig } from \"./ingestApi\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport { ClickHouseEngines } from \"../../blocks/helpers\";\n\n/**\n * Configuration options for a complete ingestion pipeline, potentially including an Ingest API, a Stream, and an OLAP Table.\n *\n * @template T The data type of the records being ingested.\n *\n * @example\n * ```typescript\n * // Simple pipeline with all components enabled\n * const pipelineConfig: IngestPipelineConfig<UserData> = {\n * table: true,\n * stream: true,\n * ingestApi: true\n * };\n *\n * // Advanced pipeline with custom configurations\n * const advancedConfig: IngestPipelineConfig<UserData> = {\n * table: { orderByFields: ['timestamp', 'userId'], engine: ClickHouseEngines.ReplacingMergeTree },\n * stream: { parallelism: 4, retentionPeriod: 86400 },\n * ingestApi: true,\n * version: '1.2.0',\n * metadata: { description: 'User data ingestion pipeline' }\n * };\n * ```\n */\nexport type IngestPipelineConfig<T> = {\n /**\n * Configuration for the OLAP table component of the pipeline.\n *\n * - If `true`, a table with default settings is created.\n * - If an `OlapConfig` object is provided, it specifies the table's configuration.\n * - If `false`, no OLAP table is created.\n *\n * @default false\n */\n table: boolean | OlapConfig<T>;\n\n /**\n * Configuration for the stream component of the pipeline.\n *\n * - If `true`, a stream with default settings is created.\n * - Pass a config object to specify the stream's configuration.\n * - The stream's destination will automatically be set to the pipeline's table if one exists.\n * - If `false`, no stream is created.\n *\n * @default false\n */\n stream: boolean | Omit<StreamConfig<T>, \"destination\">;\n\n /**\n * Configuration for the ingest API component of the pipeline.\n *\n * - If `true`, an ingest API with default settings is created.\n * - If a partial `IngestConfig` object (excluding `destination`) is provided, it specifies the API's configuration.\n * - The API's destination will automatically be set to the pipeline's stream if one exists.\n * - If `false`, no ingest API is created.\n *\n * **Note:** Requires a stream to be configured when enabled.\n *\n * @default false\n */\n ingestApi: boolean | Omit<IngestConfig<T>, \"destination\">;\n\n /**\n * @deprecated Use `ingestApi` instead. This parameter will be removed in a future version.\n */\n ingest?: boolean | Omit<IngestConfig<T>, \"destination\">;\n\n /**\n * Configuration for the dead letter queue of the pipeline.\n * If `true`, a dead letter queue with default settings is created.\n * If a partial `StreamConfig` object (excluding `destination`) is provided, it specifies the dead letter queue's configuration.\n * The API's destination will automatically be set to the pipeline's stream if one exists.\n * If `false` or `undefined`, no dead letter queue is created.\n */\n deadLetterQueue?: boolean | StreamConfig<DeadLetterModel>;\n\n /**\n * An optional version string applying to all components (table, stream, ingest) created by this pipeline configuration.\n * This version will be used for schema versioning and component identification.\n *\n * @example \"v1.0.0\", \"2023-12\", \"prod\"\n */\n version?: string;\n\n /**\n * An optional custom path for the ingestion API endpoint.\n * This will be used as the HTTP path for the ingest API if one is created.\n *\n * @example \"pipelines/analytics\", \"data/events\"\n */\n path?: string;\n\n /**\n * Optional metadata for the pipeline.\n */\n metadata?: {\n /** Human-readable description of the pipeline's purpose */\n description?: string;\n };\n\n /** Determines how changes in code will propagate to the resources. */\n lifeCycle?: LifeCycle;\n};\n\n/**\n * Represents a complete ingestion pipeline, potentially combining an Ingest API, a Stream, and an OLAP Table\n * under a single name and configuration. Simplifies the setup of common ingestion patterns.\n *\n * This class provides a high-level abstraction for creating data ingestion workflows that can include:\n * - An HTTP API endpoint for receiving data\n * - A streaming component for real-time data processing\n * - An OLAP table for analytical queries\n *\n * @template T The data type of the records flowing through the pipeline. This type defines the schema for the\n * Ingest API input, the Stream messages, and the OLAP Table rows.\n *\n * @example\n * ```typescript\n * // Create a complete pipeline with all components\n * const userDataPipeline = new IngestPipeline('userData', {\n * table: true,\n * stream: true,\n * ingestApi: true,\n * version: '1.0.0',\n * metadata: { description: 'Pipeline for user registration data' }\n * });\n *\n * // Create a pipeline with only table and stream\n * const analyticsStream = new IngestPipeline('analytics', {\n * table: { orderByFields: ['timestamp'], engine: ClickHouseEngines.ReplacingMergeTree },\n * stream: { parallelism: 8, retentionPeriod: 604800 },\n * ingestApi: false\n * });\n * ```\n */\nexport class IngestPipeline<T> extends TypedBase<T, IngestPipelineConfig<T>> {\n /**\n * The OLAP table component of the pipeline, if configured.\n * Provides analytical query capabilities for the ingested data.\n * Only present when `config.table` is not `false`.\n */\n table?: OlapTable<T>;\n\n /**\n * The stream component of the pipeline, if configured.\n * Handles real-time data flow and processing between components.\n * Only present when `config.stream` is not `false`.\n */\n stream?: Stream<T>;\n\n /**\n * The ingest API component of the pipeline, if configured.\n * Provides HTTP endpoints for data ingestion.\n * Only present when `config.ingestApi` is not `false`.\n */\n ingestApi?: IngestApi<T>;\n\n /** The dead letter queue of the pipeline, if configured. */\n deadLetterQueue?: DeadLetterQueue<T>;\n\n /**\n * Creates a new IngestPipeline instance.\n * Based on the configuration, it automatically creates and links the IngestApi, Stream, and OlapTable components.\n *\n * @param name The base name for the pipeline components (e.g., \"userData\" could create \"userData\" table, \"userData\" stream, \"userData\" ingest API).\n * @param config Optional configuration for the ingestion pipeline.\n *\n * @throws {Error} When ingest API is enabled but no stream is configured, since the API requires a stream destination.\n *\n * @example\n * ```typescript\n * const pipeline = new IngestPipeline('events', {\n * table: { orderByFields: ['timestamp'], engine: ClickHouseEngines.ReplacingMergeTree },\n * stream: { parallelism: 2 },\n * ingestApi: true\n * });\n * ```\n */\n constructor(name: string, config: IngestPipelineConfig<T>);\n\n /**\n * Internal constructor used by the framework for advanced initialization.\n *\n * @internal\n * @param name The base name for the pipeline components.\n * @param config Configuration specifying which components to create and their settings.\n * @param schema JSON schema collection for type validation.\n * @param columns Column definitions for the data model.\n * @param validators Typia validation functions.\n * @param allowExtraFields Whether extra fields are allowed (injected when type has index signature).\n */\n constructor(\n name: string,\n config: IngestPipelineConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators: TypiaValidators<T>,\n allowExtraFields: boolean,\n );\n\n constructor(\n name: string,\n config: IngestPipelineConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: TypiaValidators<T>,\n allowExtraFields?: boolean,\n ) {\n super(name, config, schema, columns, validators, allowExtraFields);\n\n // Handle backwards compatibility for deprecated 'ingest' parameter\n if (config.ingest !== undefined) {\n console.warn(\n \"⚠️ DEPRECATION WARNING: The 'ingest' parameter is deprecated and will be removed in a future version. \" +\n \"Please use 'ingestApi' instead.\",\n );\n // If ingestApi is not explicitly set, use the ingest value\n if (config.ingestApi === undefined) {\n (config as any).ingestApi = config.ingest;\n }\n }\n\n // Create OLAP table if configured\n if (config.table) {\n const tableConfig: OlapConfig<T> =\n typeof config.table === \"object\" ?\n {\n ...config.table,\n lifeCycle: config.table.lifeCycle ?? config.lifeCycle,\n ...(config.version && { version: config.version }),\n }\n : {\n lifeCycle: config.lifeCycle,\n engine: ClickHouseEngines.MergeTree,\n ...(config.version && { version: config.version }),\n };\n this.table = new OlapTable(\n name,\n tableConfig,\n this.schema,\n this.columnArray,\n this.validators,\n );\n }\n\n if (config.deadLetterQueue) {\n const streamConfig = {\n destination: undefined,\n ...(typeof config.deadLetterQueue === \"object\" ?\n {\n ...config.deadLetterQueue,\n lifeCycle: config.deadLetterQueue.lifeCycle ?? config.lifeCycle,\n }\n : { lifeCycle: config.lifeCycle }),\n ...(config.version && { version: config.version }),\n };\n this.deadLetterQueue = new DeadLetterQueue<T>(\n `${name}DeadLetterQueue`,\n streamConfig,\n validators!.assert!,\n );\n }\n\n // Create stream if configured, linking it to the table as destination\n if (config.stream) {\n const streamConfig: StreamConfig<T> = {\n destination: this.table,\n defaultDeadLetterQueue: this.deadLetterQueue,\n ...(typeof config.stream === \"object\" ?\n {\n ...config.stream,\n lifeCycle: config.stream.lifeCycle ?? config.lifeCycle,\n }\n : { lifeCycle: config.lifeCycle }),\n ...(config.version && { version: config.version }),\n };\n this.stream = new Stream(\n name,\n streamConfig,\n this.schema,\n this.columnArray,\n undefined,\n this.allowExtraFields,\n );\n // Set pipeline parent reference for internal framework use\n (this.stream as any).pipelineParent = this;\n }\n\n // Create ingest API if configured, requiring a stream as destination\n const effectiveIngestAPI =\n config.ingestApi !== undefined ? config.ingestApi : config.ingest;\n if (effectiveIngestAPI) {\n if (!this.stream) {\n throw new Error(\"Ingest API needs a stream to write to.\");\n }\n\n const ingestConfig = {\n destination: this.stream,\n deadLetterQueue: this.deadLetterQueue,\n ...(typeof effectiveIngestAPI === \"object\" ?\n (effectiveIngestAPI as object)\n : {}),\n ...(config.version && { version: config.version }),\n ...(config.path && { path: config.path }),\n };\n this.ingestApi = new IngestApi(\n name,\n ingestConfig,\n this.schema,\n this.columnArray,\n undefined,\n this.allowExtraFields,\n );\n // Set pipeline parent reference for internal framework use\n (this.ingestApi as any).pipelineParent = this;\n }\n }\n}\n","import { Workflow, Task } from \"./workflow\";\nimport { OlapTable } from \"./olapTable\";\n\ninterface BatchResult<T> {\n items: T[];\n hasMore: boolean;\n}\n\ninterface TransformedResult<U> {\n items: U[];\n}\n\ninterface TaskConfig {\n retries: number;\n timeout: string;\n}\n\ninterface ETLTasks<T, U> {\n extract: Task<null, BatchResult<T>>;\n transform: Task<BatchResult<T>, TransformedResult<U>>;\n load: Task<TransformedResult<U>, void>;\n}\n\nclass InternalBatcher<T> {\n private iterator: AsyncIterator<T>;\n private batchSize: number;\n\n constructor(asyncIterable: AsyncIterable<T>, batchSize = 20) {\n this.iterator = asyncIterable[Symbol.asyncIterator]();\n this.batchSize = batchSize;\n }\n\n async getNextBatch(): Promise<BatchResult<T>> {\n const items: T[] = [];\n\n for (let i = 0; i < this.batchSize; i++) {\n const { value, done } = await this.iterator.next();\n\n if (done) {\n return { items, hasMore: false };\n }\n\n items.push(value);\n }\n\n return { items, hasMore: true };\n }\n}\n\nexport interface ETLPipelineConfig<T, U> {\n extract: AsyncIterable<T> | (() => AsyncIterable<T>);\n transform: (sourceData: T) => Promise<U>;\n load: ((data: U[]) => Promise<void>) | OlapTable<U>;\n}\n\nexport class ETLPipeline<T, U> {\n private batcher!: InternalBatcher<T>;\n\n constructor(\n readonly name: string,\n readonly config: ETLPipelineConfig<T, U>,\n ) {\n this.setupPipeline();\n }\n\n private setupPipeline(): void {\n this.batcher = this.createBatcher();\n const tasks = this.createAllTasks();\n\n tasks.extract.config.onComplete = [tasks.transform];\n tasks.transform.config.onComplete = [tasks.load];\n\n new Workflow(this.name, {\n startingTask: tasks.extract,\n retries: 1,\n timeout: \"30m\",\n });\n }\n\n private createBatcher(): InternalBatcher<T> {\n const iterable =\n typeof this.config.extract === \"function\" ?\n this.config.extract()\n : this.config.extract;\n\n return new InternalBatcher(iterable);\n }\n\n private getDefaultTaskConfig(): TaskConfig {\n return {\n retries: 1,\n timeout: \"30m\",\n };\n }\n\n private createAllTasks(): ETLTasks<T, U> {\n const taskConfig = this.getDefaultTaskConfig();\n\n return {\n extract: this.createExtractTask(taskConfig),\n transform: this.createTransformTask(taskConfig),\n load: this.createLoadTask(taskConfig),\n };\n }\n\n private createExtractTask(\n taskConfig: TaskConfig,\n ): Task<null, BatchResult<T>> {\n return new Task<null, BatchResult<T>>(`${this.name}_extract`, {\n run: async ({}) => {\n console.log(`Running extract task for ${this.name}...`);\n const batch = await this.batcher.getNextBatch();\n console.log(`Extract task completed with ${batch.items.length} items`);\n return batch;\n },\n retries: taskConfig.retries,\n timeout: taskConfig.timeout,\n });\n }\n\n private createTransformTask(\n taskConfig: TaskConfig,\n ): Task<BatchResult<T>, TransformedResult<U>> {\n return new Task<BatchResult<T>, TransformedResult<U>>(\n `${this.name}_transform`,\n {\n // Use new single-parameter context API for handlers\n run: async ({ input }) => {\n const batch = input!;\n console.log(\n `Running transform task for ${this.name} with ${batch.items.length} items...`,\n );\n const transformedItems: U[] = [];\n\n for (const item of batch.items) {\n const transformed = await this.config.transform(item);\n transformedItems.push(transformed);\n }\n\n console.log(\n `Transform task completed with ${transformedItems.length} items`,\n );\n return { items: transformedItems };\n },\n retries: taskConfig.retries,\n timeout: taskConfig.timeout,\n },\n );\n }\n\n private createLoadTask(\n taskConfig: TaskConfig,\n ): Task<TransformedResult<U>, void> {\n return new Task<TransformedResult<U>, void>(`${this.name}_load`, {\n run: async ({ input: transformedItems }) => {\n console.log(\n `Running load task for ${this.name} with ${transformedItems.items.length} items...`,\n );\n\n // Handle both function and OlapTable\n if (\"insert\" in this.config.load) {\n // It's an OlapTable - insert entire batch\n await this.config.load.insert(transformedItems.items);\n } else {\n // It's a function - call with entire array\n await this.config.load(transformedItems.items);\n }\n\n console.log(`Load task completed`);\n },\n retries: taskConfig.retries,\n timeout: taskConfig.timeout,\n });\n }\n\n // Execute the entire ETL pipeline\n async run(): Promise<void> {\n console.log(`Starting ETL Pipeline: ${this.name}`);\n\n let batchNumber = 1;\n do {\n console.log(`Processing batch ${batchNumber}...`);\n const batch = await this.batcher.getNextBatch();\n\n if (batch.items.length === 0) {\n break;\n }\n\n // Transform all items in the batch\n const transformedItems: U[] = [];\n for (const extractedData of batch.items) {\n const transformedData = await this.config.transform(extractedData);\n transformedItems.push(transformedData);\n }\n\n // Load the entire batch\n if (\"insert\" in this.config.load) {\n // It's an OlapTable - insert entire batch\n await this.config.load.insert(transformedItems);\n } else {\n // It's a function - call with entire array\n await this.config.load(transformedItems);\n }\n\n console.log(\n `Completed batch ${batchNumber} with ${batch.items.length} items`,\n );\n batchNumber++;\n\n if (!batch.hasMore) {\n break;\n }\n } while (true);\n\n console.log(`Completed ETL Pipeline: ${this.name}`);\n }\n}\n","import { getMooseInternal, isClientOnlyMode } from \"../internal\";\nimport { OlapTable } from \"./olapTable\";\nimport { Sql, toStaticQuery } from \"../../sqlHelpers\";\nimport { getSourceLocationFromStack } from \"../utils/stackTrace\";\n\ntype SqlObject = OlapTable<any> | SqlResource;\n\n/**\n * Represents a generic SQL resource that requires setup and teardown commands.\n * Base class for constructs like Views and Materialized Views. Tracks dependencies.\n */\nexport class SqlResource {\n /** @internal */\n public readonly kind = \"SqlResource\";\n\n /** Array of SQL statements to execute for setting up the resource. */\n setup: readonly string[];\n /** Array of SQL statements to execute for tearing down the resource. */\n teardown: readonly string[];\n /** The name of the SQL resource (e.g., view name, materialized view name). */\n name: string;\n\n /** List of OlapTables or Views that this resource reads data from. */\n pullsDataFrom: SqlObject[];\n /** List of OlapTables or Views that this resource writes data to. */\n pushesDataTo: SqlObject[];\n\n /** @internal Source file path where this resource was defined */\n sourceFile?: string;\n\n /** @internal Source line number where this resource was defined */\n sourceLine?: number;\n\n /** @internal Source column number where this resource was defined */\n sourceColumn?: number;\n\n /**\n * Creates a new SqlResource instance.\n * @param name The name of the resource.\n * @param setup An array of SQL DDL statements to create the resource.\n * @param teardown An array of SQL DDL statements to drop the resource.\n * @param options Optional configuration for specifying data dependencies.\n * @param options.pullsDataFrom Tables/Views this resource reads from.\n * @param options.pushesDataTo Tables/Views this resource writes to.\n */\n constructor(\n name: string,\n setup: readonly (string | Sql)[],\n teardown: readonly (string | Sql)[],\n options?: {\n pullsDataFrom?: SqlObject[];\n pushesDataTo?: SqlObject[];\n },\n ) {\n const sqlResources = getMooseInternal().sqlResources;\n // In client-only mode (MOOSE_CLIENT_ONLY=true), allow duplicate registrations\n // to support Next.js HMR which re-executes modules without clearing the registry\n if (!isClientOnlyMode() && sqlResources.has(name)) {\n throw new Error(`SqlResource with name ${name} already exists`);\n }\n sqlResources.set(name, this);\n\n this.name = name;\n this.setup = setup.map((sql) =>\n typeof sql === \"string\" ? sql : toStaticQuery(sql),\n );\n this.teardown = teardown.map((sql) =>\n typeof sql === \"string\" ? sql : toStaticQuery(sql),\n );\n this.pullsDataFrom = options?.pullsDataFrom ?? [];\n this.pushesDataTo = options?.pushesDataTo ?? [];\n\n // Capture source location from stack trace\n const stack = new Error().stack;\n const location = getSourceLocationFromStack(stack);\n\n if (location) {\n this.sourceFile = location.file;\n this.sourceLine = location.line;\n this.sourceColumn = location.column;\n }\n }\n}\n","import {\n ClickHouseEngines,\n createMaterializedView,\n dropView,\n} from \"../../blocks/helpers\";\nimport { Sql, toStaticQuery } from \"../../sqlHelpers\";\nimport { OlapConfig, OlapTable } from \"./olapTable\";\nimport { SqlResource } from \"./sqlResource\";\nimport { View } from \"./view\";\nimport { IJsonSchemaCollection } from \"typia\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\n\n/**\n * Configuration options for creating a Materialized View.\n * @template T The data type of the records stored in the target table of the materialized view.\n */\nexport interface MaterializedViewConfig<T> {\n /** The SQL SELECT statement or `Sql` object defining the data to be materialized. Dynamic SQL (with parameters) is not allowed here. */\n selectStatement: string | Sql;\n /** An array of OlapTable or View objects that the `selectStatement` reads from. */\n selectTables: (OlapTable<any> | View)[];\n\n /** @deprecated See {@link targetTable}\n * The name for the underlying target OlapTable that stores the materialized data. */\n tableName?: string;\n\n /** The name for the ClickHouse MATERIALIZED VIEW object itself. */\n materializedViewName: string;\n\n /** @deprecated See {@link targetTable}\n * Optional ClickHouse engine for the target table (e.g., ReplacingMergeTree). Defaults to MergeTree. */\n engine?: ClickHouseEngines;\n\n targetTable?:\n | OlapTable<T> /** Target table if the OlapTable object is already constructed. */\n | {\n /** The name for the underlying target OlapTable that stores the materialized data. */\n name: string;\n /** Optional ClickHouse engine for the target table (e.g., ReplacingMergeTree). Defaults to MergeTree. */\n engine?: ClickHouseEngines;\n /** Optional ordering fields for the target table. Crucial if using ReplacingMergeTree. */\n orderByFields?: (keyof T & string)[];\n };\n\n /** @deprecated See {@link targetTable}\n * Optional ordering fields for the target table. Crucial if using ReplacingMergeTree. */\n orderByFields?: (keyof T & string)[];\n}\n\nconst requireTargetTableName = (tableName: string | undefined): string => {\n if (typeof tableName === \"string\") {\n return tableName;\n } else {\n throw new Error(\"Name of targetTable is not specified.\");\n }\n};\n\n/**\n * Represents a Materialized View in ClickHouse.\n * This encapsulates both the target OlapTable that stores the data and the MATERIALIZED VIEW definition\n * that populates the table based on inserts into the source tables.\n *\n * @template TargetTable The data type of the records stored in the underlying target OlapTable. The structure of T defines the target table schema.\n */\nexport class MaterializedView<TargetTable> extends SqlResource {\n /** The target OlapTable instance where the materialized data is stored. */\n targetTable: OlapTable<TargetTable>;\n\n /**\n * Creates a new MaterializedView instance.\n * Requires the `TargetTable` type parameter to be explicitly provided or inferred,\n * as it's needed to define the schema of the underlying target table.\n *\n * @param options Configuration options for the materialized view.\n */\n constructor(options: MaterializedViewConfig<TargetTable>);\n\n /** @internal **/\n constructor(\n options: MaterializedViewConfig<TargetTable>,\n targetSchema: IJsonSchemaCollection.IV3_1,\n targetColumns: Column[],\n );\n constructor(\n options: MaterializedViewConfig<TargetTable>,\n targetSchema?: IJsonSchemaCollection.IV3_1,\n targetColumns?: Column[],\n ) {\n let selectStatement = options.selectStatement;\n if (typeof selectStatement !== \"string\") {\n selectStatement = toStaticQuery(selectStatement);\n }\n\n if (targetSchema === undefined || targetColumns === undefined) {\n throw new Error(\n \"Supply the type param T so that the schema is inserted by the compiler plugin.\",\n );\n }\n\n const targetTable =\n options.targetTable instanceof OlapTable ?\n options.targetTable\n : new OlapTable(\n requireTargetTableName(\n options.targetTable?.name ?? options.tableName,\n ),\n {\n orderByFields:\n options.targetTable?.orderByFields ?? options.orderByFields,\n engine:\n options.targetTable?.engine ??\n options.engine ??\n ClickHouseEngines.MergeTree,\n } as OlapConfig<TargetTable>,\n targetSchema,\n targetColumns,\n );\n\n if (targetTable.name === options.materializedViewName) {\n throw new Error(\n \"Materialized view name cannot be the same as the target table name.\",\n );\n }\n\n super(\n options.materializedViewName,\n [\n createMaterializedView({\n name: options.materializedViewName,\n destinationTable: targetTable.name,\n select: selectStatement,\n }),\n // Population is now handled automatically by Rust infrastructure\n // based on table engine type and whether this is a new or updated view\n ],\n [dropView(options.materializedViewName)],\n {\n pullsDataFrom: options.selectTables,\n pushesDataTo: [targetTable],\n },\n );\n\n this.targetTable = targetTable;\n }\n}\n","import { dropView } from \"../../blocks/helpers\";\nimport { Sql, toStaticQuery } from \"../../sqlHelpers\";\nimport { OlapTable } from \"./olapTable\";\nimport { SqlResource } from \"./sqlResource\";\n\n/**\n * Represents a database View, defined by a SQL SELECT statement based on one or more base tables or other views.\n * Inherits from SqlResource, providing setup (CREATE VIEW) and teardown (DROP VIEW) commands.\n */\nexport class View extends SqlResource {\n /**\n * Creates a new View instance.\n * @param name The name of the view to be created.\n * @param selectStatement The SQL SELECT statement that defines the view's logic.\n * @param baseTables An array of OlapTable or View objects that the `selectStatement` reads from. Used for dependency tracking.\n */\n constructor(\n name: string,\n selectStatement: string | Sql,\n baseTables: (OlapTable<any> | View)[],\n ) {\n if (typeof selectStatement !== \"string\") {\n selectStatement = toStaticQuery(selectStatement);\n }\n\n super(\n name,\n [\n `CREATE VIEW IF NOT EXISTS ${name} \n AS ${selectStatement}`.trim(),\n ],\n [dropView(name)],\n {\n pullsDataFrom: baseTables,\n },\n );\n }\n}\n","/**\n * Defines how Moose manages the lifecycle of database resources when your code changes.\n *\n * This enum controls the behavior when there are differences between your code definitions\n * and the actual database schema or structure.\n */\nexport enum LifeCycle {\n /**\n * Full automatic management (default behavior).\n * Moose will automatically modify database resources to match your code definitions,\n * including potentially destructive operations like dropping columns or tables.\n */\n FULLY_MANAGED = \"FULLY_MANAGED\",\n\n /**\n * Deletion-protected automatic management.\n * Moose will modify resources to match your code but will avoid destructive actions\n * such as dropping columns, or tables. Only additive changes are applied.\n */\n DELETION_PROTECTED = \"DELETION_PROTECTED\",\n\n /**\n * External management - no automatic changes.\n * Moose will not modify the database resources. You are responsible for managing\n * the schema and ensuring it matches your code definitions manually.\n */\n EXTERNALLY_MANAGED = \"EXTERNALLY_MANAGED\",\n}\n","import http from \"http\";\nimport { getMooseInternal } from \"../internal\";\n\nexport type WebAppHandler = (\n req: http.IncomingMessage,\n res: http.ServerResponse,\n) => void | Promise<void>;\n\nexport interface FrameworkApp {\n handle?: (\n req: http.IncomingMessage,\n res: http.ServerResponse,\n next?: (err?: any) => void,\n ) => void;\n callback?: () => WebAppHandler;\n routing?: (req: http.IncomingMessage, res: http.ServerResponse) => void;\n ready?: () => PromiseLike<unknown>; // Fastify's ready method (returns FastifyInstance)\n}\n\nexport interface WebAppConfig {\n mountPath: string;\n metadata?: { description?: string };\n injectMooseUtils?: boolean;\n}\n\nconst RESERVED_MOUNT_PATHS = [\n \"/admin\",\n \"/api\",\n \"/consumption\",\n \"/health\",\n \"/ingest\",\n \"/moose\", // reserved for future use\n \"/ready\",\n \"/workflows\",\n] as const;\n\nexport class WebApp {\n name: string;\n handler: WebAppHandler;\n config: WebAppConfig;\n private _rawApp?: FrameworkApp;\n\n constructor(\n name: string,\n appOrHandler: FrameworkApp | WebAppHandler,\n config: WebAppConfig,\n ) {\n this.name = name;\n this.config = config;\n\n // Validate mountPath - it is required\n if (!this.config.mountPath) {\n throw new Error(\n `mountPath is required. Please specify a mount path for your WebApp (e.g., \"/myapi\").`,\n );\n }\n\n const mountPath = this.config.mountPath;\n\n // Check for root path - not allowed as it would overlap reserved paths\n if (mountPath === \"/\") {\n throw new Error(\n `mountPath cannot be \"/\" as it would allow routes to overlap with reserved paths: ${RESERVED_MOUNT_PATHS.join(\", \")}`,\n );\n }\n\n // Check for trailing slash\n if (mountPath.endsWith(\"/\")) {\n throw new Error(\n `mountPath cannot end with a trailing slash. Remove the '/' from: \"${mountPath}\"`,\n );\n }\n\n // Check for reserved path prefixes\n for (const reserved of RESERVED_MOUNT_PATHS) {\n if (mountPath === reserved || mountPath.startsWith(`${reserved}/`)) {\n throw new Error(\n `mountPath cannot begin with a reserved path: ${RESERVED_MOUNT_PATHS.join(\", \")}. Got: \"${mountPath}\"`,\n );\n }\n }\n\n this.handler = this.toHandler(appOrHandler);\n this._rawApp =\n typeof appOrHandler === \"function\" ? undefined : appOrHandler;\n\n const webApps = getMooseInternal().webApps;\n if (webApps.has(name)) {\n throw new Error(`WebApp with name ${name} already exists`);\n }\n\n // Check for duplicate mountPath\n if (this.config.mountPath) {\n for (const [existingName, existingApp] of webApps) {\n if (existingApp.config.mountPath === this.config.mountPath) {\n throw new Error(\n `WebApp with mountPath \"${this.config.mountPath}\" already exists (used by WebApp \"${existingName}\")`,\n );\n }\n }\n }\n\n webApps.set(name, this);\n }\n\n private toHandler(appOrHandler: FrameworkApp | WebAppHandler): WebAppHandler {\n if (typeof appOrHandler === \"function\") {\n return appOrHandler as WebAppHandler;\n }\n\n const app = appOrHandler as FrameworkApp;\n\n if (typeof app.handle === \"function\") {\n return (req, res) => {\n app.handle!(req, res, (err?: any) => {\n if (err) {\n console.error(\"WebApp handler error:\", err);\n if (!res.headersSent) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Internal Server Error\" }));\n }\n }\n });\n };\n }\n\n if (typeof app.callback === \"function\") {\n return app.callback();\n }\n\n // Fastify: routing is a function that handles requests directly\n // Fastify requires .ready() to be called before routes are available\n if (typeof app.routing === \"function\") {\n // Capture references to avoid TypeScript narrowing issues in closure\n const routing = app.routing;\n const appWithReady = app;\n\n // Use lazy initialization - don't call ready() during module loading\n // This prevents blocking the event loop when streaming functions import the app module\n // The ready() call is deferred to the first actual HTTP request\n let readyPromise: PromiseLike<unknown> | null = null;\n\n return async (req, res) => {\n // Lazy init - only call ready() when first request comes in\n if (readyPromise === null) {\n readyPromise =\n typeof appWithReady.ready === \"function\" ?\n appWithReady.ready()\n : Promise.resolve();\n }\n await readyPromise;\n routing(req, res);\n };\n }\n\n throw new Error(\n `Unable to convert app to handler. The provided object must be:\n - A function (raw Node.js handler)\n - An object with .handle() method (Express, Connect)\n - An object with .callback() method (Koa)\n - An object with .routing function (Fastify)\n \nExamples:\n Express: new WebApp(\"name\", expressApp)\n Koa: new WebApp(\"name\", koaApp)\n Fastify: new WebApp(\"name\", fastifyApp)\n Raw: new WebApp(\"name\", (req, res) => { ... })\n `,\n );\n }\n\n getRawApp(): FrameworkApp | undefined {\n return this._rawApp;\n }\n}\n","/**\n * @module registry\n * Public registry functions for accessing Moose Data Model v2 (dmv2) resources.\n *\n * This module provides functions to retrieve registered resources like tables, streams,\n * APIs, and more. These functions are part of the public API and can be used by\n * user applications to inspect and interact with registered Moose resources.\n */\n\nimport { OlapTable } from \"./sdk/olapTable\";\nimport { Stream } from \"./sdk/stream\";\nimport { IngestApi } from \"./sdk/ingestApi\";\nimport { Api } from \"./sdk/consumptionApi\";\nimport { SqlResource } from \"./sdk/sqlResource\";\nimport { Workflow } from \"./sdk/workflow\";\nimport { WebApp } from \"./sdk/webApp\";\nimport { getMooseInternal } from \"./internal\";\n\n/**\n * Get all registered OLAP tables.\n * @returns A Map of table name to OlapTable instance\n */\nexport function getTables(): Map<string, OlapTable<any>> {\n return getMooseInternal().tables;\n}\n\n/**\n * Get a registered OLAP table by name.\n * @param name - The name of the table\n * @returns The OlapTable instance or undefined if not found\n */\nexport function getTable(name: string): OlapTable<any> | undefined {\n return getMooseInternal().tables.get(name);\n}\n\n/**\n * Get all registered streams.\n * @returns A Map of stream name to Stream instance\n */\nexport function getStreams(): Map<string, Stream<any>> {\n return getMooseInternal().streams;\n}\n\n/**\n * Get a registered stream by name.\n * @param name - The name of the stream\n * @returns The Stream instance or undefined if not found\n */\nexport function getStream(name: string): Stream<any> | undefined {\n return getMooseInternal().streams.get(name);\n}\n\n/**\n * Get all registered ingestion APIs.\n * @returns A Map of API name to IngestApi instance\n */\nexport function getIngestApis(): Map<string, IngestApi<any>> {\n return getMooseInternal().ingestApis;\n}\n\n/**\n * Get a registered ingestion API by name.\n * @param name - The name of the ingestion API\n * @returns The IngestApi instance or undefined if not found\n */\nexport function getIngestApi(name: string): IngestApi<any> | undefined {\n return getMooseInternal().ingestApis.get(name);\n}\n\n/**\n * Get all registered APIs (consumption/egress APIs).\n * @returns A Map of API key to Api instance\n */\nexport function getApis(): Map<string, Api<any>> {\n return getMooseInternal().apis;\n}\n\n/**\n * Get a registered API by name, version, or path.\n *\n * Supports multiple lookup strategies:\n * 1. Direct lookup by full key (name:version or name for unversioned)\n * 2. Lookup by name with automatic version aliasing when only one versioned API exists\n * 3. Lookup by custom path (if configured)\n *\n * @param nameOrPath - The name, name:version, or custom path of the API\n * @returns The Api instance or undefined if not found\n */\nexport function getApi(nameOrPath: string): Api<any> | undefined {\n const registry = getMooseInternal();\n\n // Try direct lookup first (full key: name or name:version)\n const directMatch = registry.apis.get(nameOrPath);\n if (directMatch) {\n return directMatch;\n }\n\n // Build alias maps on-demand for unversioned lookups\n const versionedApis = new Map<string, Api<any>[]>();\n const pathMap = new Map<string, Api<any>>();\n\n registry.apis.forEach((api, key) => {\n // Track APIs by base name for aliasing\n const baseName = api.name;\n if (!versionedApis.has(baseName)) {\n versionedApis.set(baseName, []);\n }\n versionedApis.get(baseName)!.push(api);\n\n // Track APIs by custom path\n if (api.config.path) {\n pathMap.set(api.config.path, api);\n }\n });\n\n // Try alias lookup: if there's exactly one API with this base name, return it\n const candidates = versionedApis.get(nameOrPath);\n if (candidates && candidates.length === 1) {\n return candidates[0];\n }\n\n // Try path-based lookup\n return pathMap.get(nameOrPath);\n}\n\n/**\n * Get all registered SQL resources.\n * @returns A Map of resource name to SqlResource instance\n */\nexport function getSqlResources(): Map<string, SqlResource> {\n return getMooseInternal().sqlResources;\n}\n\n/**\n * Get a registered SQL resource by name.\n * @param name - The name of the SQL resource\n * @returns The SqlResource instance or undefined if not found\n */\nexport function getSqlResource(name: string): SqlResource | undefined {\n return getMooseInternal().sqlResources.get(name);\n}\n\n/**\n * Get all registered workflows.\n * @returns A Map of workflow name to Workflow instance\n */\nexport function getWorkflows(): Map<string, Workflow> {\n return getMooseInternal().workflows;\n}\n\n/**\n * Get a registered workflow by name.\n * @param name - The name of the workflow\n * @returns The Workflow instance or undefined if not found\n */\nexport function getWorkflow(name: string): Workflow | undefined {\n return getMooseInternal().workflows.get(name);\n}\n\n/**\n * Get all registered web apps.\n * @returns A Map of web app name to WebApp instance\n */\nexport function getWebApps(): Map<string, WebApp> {\n return getMooseInternal().webApps;\n}\n\n/**\n * Get a registered web app by name.\n * @param name - The name of the web app\n * @returns The WebApp instance or undefined if not found\n */\nexport function getWebApp(name: string): WebApp | undefined {\n return getMooseInternal().webApps.get(name);\n}\n","/**\n * @module dmv2\n * This module defines the core Moose v2 data model constructs, including OlapTable, Stream, IngestApi, Api,\n * IngestPipeline, View, and MaterializedView. These classes provide a typed interface for defining and managing\n * data infrastructure components like ClickHouse tables, Redpanda streams, and data processing pipelines.\n */\n\n/**\n * A helper type used potentially for indicating aggregated fields in query results or schemas.\n * Captures the aggregation function name and argument types.\n * (Usage context might be specific to query builders or ORM features).\n *\n * @template AggregationFunction The name of the aggregation function (e.g., 'sum', 'avg', 'count').\n * @template ArgTypes An array type representing the types of the arguments passed to the aggregation function.\n */\nexport type Aggregated<\n AggregationFunction extends string,\n ArgTypes extends any[] = [],\n> = {\n _aggregationFunction?: AggregationFunction;\n _argTypes?: ArgTypes;\n};\n\n/**\n * A helper type for SimpleAggregateFunction in ClickHouse.\n * SimpleAggregateFunction stores the aggregated value directly instead of intermediate states,\n * offering better performance for functions like sum, max, min, any, anyLast, etc.\n *\n * @template AggregationFunction The name of the simple aggregation function (e.g., 'sum', 'max', 'anyLast').\n * @template ArgType The type of the argument (and result) of the aggregation function.\n *\n * @example\n * ```typescript\n * interface Stats {\n * rowCount: number & SimpleAggregated<'sum', number>;\n * maxValue: number & SimpleAggregated<'max', number>;\n * lastStatus: string & SimpleAggregated<'anyLast', string>;\n * }\n * ```\n */\nexport type SimpleAggregated<\n AggregationFunction extends string,\n ArgType = any,\n> = {\n _simpleAggregationFunction?: AggregationFunction;\n _argType?: ArgType;\n};\n\nexport { OlapTable, OlapConfig, S3QueueTableSettings } from \"./sdk/olapTable\";\nexport {\n Stream,\n StreamConfig,\n DeadLetterModel,\n DeadLetter,\n DeadLetterQueue,\n ConsumerConfig,\n TransformConfig,\n} from \"./sdk/stream\";\n\nexport { Workflow, Task } from \"./sdk/workflow\";\nexport type { TaskContext, TaskConfig } from \"./sdk/workflow\";\n\nexport { IngestApi, IngestConfig } from \"./sdk/ingestApi\";\nexport {\n Api,\n ApiConfig,\n EgressConfig,\n ConsumptionApi,\n} from \"./sdk/consumptionApi\";\nexport { IngestPipeline, IngestPipelineConfig } from \"./sdk/ingestPipeline\";\nexport { ETLPipeline, ETLPipelineConfig } from \"./sdk/etlPipeline\";\nexport {\n MaterializedView,\n MaterializedViewConfig,\n} from \"./sdk/materializedView\";\nexport { SqlResource } from \"./sdk/sqlResource\";\nexport { View } from \"./sdk/view\";\nexport { LifeCycle } from \"./sdk/lifeCycle\";\nexport {\n WebApp,\n WebAppConfig,\n WebAppHandler,\n FrameworkApp,\n} from \"./sdk/webApp\";\n\nexport {\n getTables,\n getTable,\n getStreams,\n getStream,\n getIngestApis,\n getIngestApi,\n getApis,\n getApi,\n getSqlResources,\n getSqlResource,\n getWorkflows,\n getWorkflow,\n getWebApps,\n getWebApp,\n} from \"./registry\";\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkCA,SAAS,oBAAoB,MAAuB;AAClD,SACE,KAAK,SAAS,cAAc;AAAA,EAC5B,KAAK,SAAS,eAAe;AAAA,EAC7B,KAAK,SAAS,kBAAkB;AAAA,EAChC,KAAK,SAAS,SAAS;AAAA,EACvB,KAAK,SAAS,oBAAoB;AAAA,EAClC,KAAK,SAAS,uBAAuB;AAAA,EACrC,KAAK,SAAS,qBAAqB;AAAA,EACnC,KAAK,SAAS,wBAAwB;AAE1C;AAMA,SAAS,eAAe,MAA0C;AAChE,QAAM,QACJ,KAAK,MAAM,sBAAsB,KAAK,KAAK,MAAM,qBAAqB;AACxE,MAAI,SAAS,MAAM,CAAC,GAAG;AACrB,WAAO;AAAA,MACL,MAAM,MAAM,CAAC;AAAA,MACb,MAAM,MAAM,CAAC;AAAA,IACf;AAAA,EACF;AACA,SAAO;AACT;AASO,SAAS,kBAAkB,OAAgC;AAChE,MAAI,CAAC,MAAO,QAAO,CAAC;AACpB,QAAM,QAAQ,MAAM,MAAM,IAAI;AAC9B,aAAW,QAAQ,OAAO;AACxB,QAAI,oBAAoB,IAAI,EAAG;AAC/B,UAAM,OAAO,eAAe,IAAI;AAChC,QAAI,KAAM,QAAO;AAAA,EACnB;AACA,SAAO,CAAC;AACV;AAYO,SAAS,2BACd,OAC4B;AAC5B,MAAI,CAAC,MAAO,QAAO;AAEnB,QAAM,QAAQ,MAAM,MAAM,IAAI;AAG9B,aAAW,QAAQ,MAAM,MAAM,CAAC,GAAG;AAEjC,QAAI,oBAAoB,IAAI,GAAG;AAC7B;AAAA,IACF;AAGA,UAAM,UAAU,KAAK,MAAM,uCAAuC;AAClE,QAAI,SAAS;AACX,aAAO;AAAA,QACL,MAAM,QAAQ,CAAC;AAAA,QACf,MAAM,SAAS,QAAQ,CAAC,GAAG,EAAE;AAAA,QAC7B,QAAQ,SAAS,QAAQ,CAAC,GAAG,EAAE;AAAA,MACjC;AAAA,IACF;AAGA,UAAM,UAAU,KAAK,MAAM,0BAA0B;AACrD,QAAI,SAAS;AACX,aAAO;AAAA,QACL,MAAM,QAAQ,CAAC;AAAA,QACf,MAAM,SAAS,QAAQ,CAAC,GAAG,EAAE;AAAA,QAC7B,QAAQ,SAAS,QAAQ,CAAC,GAAG,EAAE;AAAA,MACjC;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAUO,SAAS,uBAAuB,OAAoC;AACzE,QAAM,WAAW,2BAA2B,KAAK;AACjD,SAAO,UAAU;AACnB;AA5IA;AAAA;AAAA;AAAA;AAAA;;;ACAA,IAwBa;AAxBb;AAAA;AAAA;AAEA;AAsBO,IAAM,YAAN,MAAsB;AAAA;AAAA,MAE3B;AAAA;AAAA,MAEA;AAAA;AAAA,MAGA;AAAA;AAAA,MAIA;AAAA;AAAA,MAGA;AAAA;AAAA,MAGA;AAAA;AAAA,MAGA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAYA,YACE,MACA,QACA,QACA,SACA,YACA,kBACA;AACA,YAAI,WAAW,UAAa,YAAY,QAAW;AACjD,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA,aAAK,SAAS;AACd,aAAK,cAAc;AACnB,cAAM,aAAa,CAAC;AACpB,gBAAQ,QAAQ,CAAC,WAAW;AAC1B,qBAAW,OAAO,IAAI,IAAI;AAAA,QAC5B,CAAC;AACD,aAAK,UAAU;AAEf,aAAK,OAAO;AACZ,aAAK,SAAS;AACd,aAAK,aAAa;AAClB,aAAK,mBAAmB,oBAAoB;AAG5C,aAAK,WACF,QAAgB,WAAW,EAAE,GAAI,OAAe,SAAS,IAAI,CAAC;AAEjE,YAAI,CAAC,KAAK,SAAS,QAAQ;AACzB,gBAAM,QAAQ,IAAI,MAAM,EAAE;AAC1B,cAAI,OAAO;AACT,kBAAM,OAAO,kBAAkB,KAAK;AACpC,iBAAK,SAAS,SAAS,EAAE,MAAM,KAAK,MAAM,MAAM,KAAK,KAAK;AAAA,UAC5D;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA;AAAA;;;ACQO,SAAS,kBACd,IAC2C;AAC3C,SACE,OAAO,OAAO,YACd,OAAO,QACN,GAAiB,gBAAgB,QAClC,OAAQ,GAAiB,gBAAgB,YACxC,GAAiB,YAAY,eAAe,SAAS,KACtD,MAAM,QAAU,GAAiB,YAAuB,OAAO;AAEnE;AAKO,SAAS,aAAa,IAA4B;AACvD,SACE,OAAO,OAAO,YACd,OAAO,QACP,MAAM,QAAS,GAAc,OAAO;AAExC;AApIA;AAAA;AAAA;AAAA;AAAA;;;ACgOO,SAAS,0BACd,gBACA,OACA;AAGA,SAAO,KAAK,cAAc,IAAI,oBAAoB,KAAK,CAAC;AAC1D;AA0BA,SAAS,iBAAiB,OAAmC;AAC3D,SAAO,UAAU,SAAY,KAAK;AACpC;AAnQA,IAUa,iBAqIA,eAUA,SAgEA,uBAyBA;AAlPb;AAAA;AAAA;AAUO,IAAM,kBAAkB,CAAC,SAAyB;AACvD,aAAO,KAAK,WAAW,GAAG,KAAK,KAAK,SAAS,GAAG,IAAI,OAAO,KAAK,IAAI;AAAA,IACtE;AAmIO,IAAM,gBAAgB,CAACA,SAAqB;AACjD,YAAM,CAAC,OAAO,MAAM,IAAI,QAAQA,IAAG;AACnC,UAAI,OAAO,KAAK,MAAM,EAAE,WAAW,GAAG;AACpC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AACA,aAAO;AAAA,IACT;AAEO,IAAM,UAAU,CAACA,SAA8C;AACpE,YAAM,qBAAqBA,KAAI,OAAO;AAAA,QAAI,CAAC,GAAG,MAC5C,0BAA0B,GAAG,CAAC;AAAA,MAChC;AAEA,YAAM,QAAQA,KAAI,QACf;AAAA,QAAI,CAAC,GAAG,MACP,KAAK,KAAK,GAAG,CAAC,GAAG,iBAAiB,mBAAmB,CAAC,CAAC,CAAC,KAAK;AAAA,MAC/D,EACC,KAAK,EAAE;AAEV,YAAM,eAAeA,KAAI,OAAO;AAAA,QAC9B,CAAC,KAA8B,GAAG,OAAO;AAAA,UACvC,GAAG;AAAA,UACH,CAAC,IAAI,CAAC,EAAE,GAAG,sBAAsB,CAAC;AAAA,QACpC;AAAA,QACA,CAAC;AAAA,MACH;AACA,aAAO,CAAC,OAAO,YAAY;AAAA,IAC7B;AA6CO,IAAM,wBAAwB,CAAC,UAAe;AACnD,UAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,cAAM,CAAC,MAAM,GAAG,IAAI;AACpB,YAAI,SAAS,aAAc,QAAO;AAAA,MACpC;AACA,aAAO;AAAA,IACT;AAmBO,IAAM,sBAAsB,CAAC,UAAiB;AACnD,UAAI,OAAO,UAAU,UAAU;AAE7B,eAAO,OAAO,UAAU,KAAK,IAAI,QAAQ;AAAA,MAC3C;AAGA,UAAI,OAAO,UAAU,UAAW,QAAO;AACvC,UAAI,iBAAiB,KAAM,QAAO;AAClC,UAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,cAAM,CAAC,MAAM,CAAC,IAAI;AAClB,eAAO;AAAA,MACT;AACA,aAAO;AAAA,IACT;AAAA;AAAA;;;ACnMO,SAAS,SAAS,MAAsB;AAC7C,SAAO,uBAAuB,gBAAgB,IAAI,CAAC,GAAG,KAAK;AAC7D;AAKO,SAAS,uBACd,SACQ;AACR,SAAO,0CAA0C,gBAAgB,QAAQ,IAAI,CAAC;AAAA,aACnE,gBAAgB,QAAQ,gBAAgB,CAAC;AAAA,aACzC,QAAQ,MAAM,GAAG,KAAK;AACnC;AA1EA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA;AAAA;AAAA;AAAA;AAAA;;;ACAA;AAAA;AAAA;AAIA;AA+CA;AAoCA;AAAA;AAAA;;;ACvFA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAiBA,SAAS,SAAS,OAAoC;AACpD,MAAI,CAAC,MAAO,QAAO;AACnB,UAAQ,MAAM,KAAK,EAAE,YAAY,GAAG;AAAA,IAClC,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,IACT;AACE,aAAO;AAAA,EACX;AACF;AA4EO,SAAS,UAAU,UAA0B;AAClD,SAAO,SACJ,QAAQ,SAAS,KAAK,EACtB,QAAQ,UAAU,MAAM,EACxB,QAAQ,UAAU,MAAM;AAC7B;AAkBO,SAAS,qBAAqB,iBAA0B;AAC7D,SAAO;AAAA,IACL,SAAS;AAAA,MACP,YAAY;AAAA;AAAA,MACZ,MAAM;AAAA,MACN,OAAO;AAAA,QACL,SAAS;AAAA,QACT,cAAc;AAAA,MAChB;AAAA,IACF;AAAA,IACA,aAAa;AAAA;AAAA,IACb,GAAI,mBAAmB,EAAE,qBAAqB,gBAAgB;AAAA,EAChE;AACF;AAgCA,eAAsB,iBACpB,KACA,QACA,iBACmB;AACnB,QAAM,QAAQ,MAAM,eAAe,KAAK,MAAM;AAE9C,QAAM,WAAW,MAAM,SAAS,qBAAqB,eAAe,CAAC;AACrE,QAAM,SAAS,QAAQ;AACvB,SAAO;AACT;AAtLA,iBACA,eACA,yBAEQ,OA0BK,aAMA,eAGA,aAkBA,qBA4BA,QA0BA,aACA,mBACA,uBAEA,sBACA,uBAEA,MA+BP,mBA6CO,UAWP,iBAwBO;AArOb;AAAA;AAAA;AAAA,kBAAiB;AACjB,oBAA6B;AAC7B,8BAAwB;AAExB,KAAM,EAAE,UAAU;AA0BX,IAAM,cAAc,CAAC,YAAoB;AAC9C,UAAI,CAAC,SAAS,QAAQ,IAAI,2BAA2B,GAAG;AACtD,gBAAQ,IAAI,OAAO;AAAA,MACrB;AAAA,IACF;AAEO,IAAM,gBAAgB,CAACC,UAC5B,GAAGA,KAAI,QAAQ,KAAK,OAAO,EAAE,SAAS,CAAC,SAAS,KAAK,IAAI,CAAC;AAErD,IAAM,cAAc,CAAC,aAAqB;AAC/C,YAAM,QAAQ;AACd,YAAM,UAAU,SAAS,MAAM,KAAK;AACpC,UAAI,WAAW,QAAQ,SAAS,GAAG;AACjC,eAAO,QAAQ,CAAC;AAAA,MAClB;AACA,aAAO;AAAA,IACT;AAWO,IAAM,sBAAsB,CAAC;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,MAAoB;AAClB,YAAM,WACJ,WAAW,OAAO,OAAO,YAAY,MAAM,SAAS,UAAU;AAChE,cAAQ,IAAI,+BAA+B,QAAQ,MAAM,IAAI,IAAI,IAAI,EAAE;AACvE,iBAAO,4BAAa;AAAA,QAClB,KAAK,GAAG,QAAQ,MAAM,IAAI,IAAI,IAAI;AAAA,QAClC;AAAA,QACA;AAAA,QACA;AAAA,QACA,aAAa;AAAA;AAAA;AAAA,MAGf,CAAC;AAAA,IACH;AAQO,IAAM,SAAoC,CAAC,QAAQ;AACxD,YAAM,MAAM,YAAAC,QAAK,QAAQ;AAAA,QACvB,MAAM,SAAS,QAAQ,IAAI,yBAAyB,MAAM;AAAA,QAC1D,QAAQ;AAAA,QACR,MAAM;AAAA,MACR,CAAC;AAED,UAAI,GAAG,SAAS,CAAC,QAAe;AAC9B,gBAAQ,IAAI,SAAS,IAAI,IAAI,qBAAqB,IAAI,OAAO;AAAA,MAC/D,CAAC;AAED,UAAI,MAAM,KAAK,UAAU,EAAE,cAAc,QAAQ,GAAG,IAAI,CAAC,CAAC;AAC1D,UAAI,IAAI;AAAA,IACV;AAaO,IAAM,cAAc;AACpB,IAAM,oBAAoB;AAC1B,IAAM,wBAAwB;AAE9B,IAAM,uBAAuB;AAC7B,IAAM,wBAAwB;AAE9B,IAAM,OAAO;AA+BpB,IAAM,oBAAoB,CAAC,iBACzB,aACG,MAAM,GAAG,EACT,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC,EACnB,OAAO,CAAC,MAAM,EAAE,SAAS,CAAC;AAyCxB,IAAM,WAAW,CAAC,QAAgB,MAAmB;AAC1D,aAAO,MAAM,EAAE,OAAO;AACtB,YAAM,QAAQ,EAAE;AAChB,UAAI,OAAO;AACT,eAAO,MAAM,KAAK;AAAA,MACpB;AAAA,IACF;AAKA,IAAM,kBAAkB,CACtB,QACA,SAC4B;AAC5B,YAAM,YAAY,KAAK,gBAAgB,KAAK,cAAc,YAAY,IAAI;AAC1E,cAAQ,WAAW;AAAA,QACjB,KAAK;AAAA,QACL,KAAK;AAAA,QACL,KAAK;AACH,iBAAO;AAAA,YACL;AAAA,YACA,UAAU,KAAK,gBAAgB;AAAA,YAC/B,UAAU,KAAK,gBAAgB;AAAA,UACjC;AAAA,QACF;AACE,iBAAO,KAAK,+BAA+B,KAAK,aAAa,EAAE;AAC/D,iBAAO;AAAA,MACX;AAAA,IACF;AAMO,IAAM,iBAAiB,OAC5B,KACA,WACmB;AACnB,YAAM,UAAU,kBAAkB,IAAI,UAAU,EAAE;AAClD,UAAI,QAAQ,WAAW,GAAG;AACxB,cAAM,IAAI,MAAM,wCAAwC,IAAI,MAAM,GAAG;AAAA,MACvE;AAEA,aAAO,IAAI,uCAAuC,QAAQ,KAAK,IAAI,CAAC,EAAE;AACtE,aAAO,IAAI,sBAAsB,IAAI,oBAAoB,WAAW,EAAE;AACtE,aAAO,IAAI,cAAc,IAAI,QAAQ,EAAE;AAEvC,YAAM,aAAa,gBAAgB,QAAQ,GAAG;AAE9C,aAAO,IAAI,MAAM;AAAA,QACf,SAAS;AAAA,UACP,UAAU,IAAI;AAAA,UACd;AAAA,UACA,KAAK,IAAI,qBAAqB;AAAA,UAC9B,GAAI,cAAc,EAAE,MAAM,WAAW;AAAA,UACrC,OAAO;AAAA,YACL,kBAAkB;AAAA,YAClB,cAAc;AAAA,YACd,SAAS;AAAA,UACX;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAAA;AAAA;;;ACjQA;AAAA;AAAA;AAAA;AAAA;;;ACAA,IACAC,gBAMA;AAPA,IAAAC,gBAAA;AAAA;AAAA;AACA,IAAAD,iBAIO;AAEP,yBAAuC;AAGvC;AAEA;AAAA;AAAA;;;ACZA;AAAA;AAAA;AAAA;AAAA;;;ACAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,yBACA,gBACA;AAFA;AAAA;AAAA;AAAA,0BAAoB;AACpB,qBAAqC;AACrC,0BAAqB;AAAA;AAAA;;;ACFrB,IAGA;AAHA;AAAA;AAAA;AACA;AACA,IAAAE;AACA,WAAsB;AAEtB;AAEA;AAEA;AAAA;AAAA;;;ACTA;AAAA;AAAA;AAAA;AAAA,mBAA8C;AAAA;AAAA;;;ACA9C;AAAA;AAAA;AAAA,IAAAC;AACA;AACA;AAAA;AAAA;;;ACFA;AAAA;AAAA;AAAA;AAAA;;;ACAA,sBAwGa,gBAUA;AAlHb;AAAA;AAAA;AAAA,uBAAsB;AACtB;AAuGO,IAAM,iBAAiB;AAAA,MAC5B,OAAO;AAAA,MACP,KAAK;AAAA,MACL,WAAW;AAAA,MACX,MAAM;AAAA,IACR;AAKO,IAAM,qBAAuC;AAAA,MAClD,WAAW,eAAe;AAAA,MAC1B,SAAS;AAAA,MACT,gBAAgB;AAAA,MAChB,MAAM;AAAA,IACR;AAAA;AAAA;;;ACvHA;AAAA;AAAA;AAEA;AAAA;AAAA;;;ACFA;AAAA;AAAA;AAAA;AAAA;;;ACAA;AAAA;AAAA;AAAA;AAaA;AACA;AACA;AACA,IAAAC;AACA;AAMA;AAEA;AAEA;AAEA,IAAAA;AAEA;AAEA;AAEA;AACA;AACA;AAAA;AAAA;;;ACrCA,IAaA,gBA4Ca,kBAQP,gBAYA,wBAy+BO,kBAqKA,WA0DA;AArxCb;AAAA;AAAA;AAaA,qBAAoB;AAIpB;AAoBA;AAoBO,IAAM,mBAAmB,MAC9B,eAAAC,QAAQ,IAAI,sBAAsB;AAOpC,IAAM,iBAAiB;AAAA,MACrB,QAAQ,oBAAI,IAA4B;AAAA,MACxC,SAAS,oBAAI,IAAyB;AAAA,MACtC,YAAY,oBAAI,IAA4B;AAAA,MAC5C,MAAM,oBAAI,IAAsB;AAAA,MAChC,cAAc,oBAAI,IAAyB;AAAA,MAC3C,WAAW,oBAAI,IAAsB;AAAA,MACrC,SAAS,oBAAI,IAAoB;AAAA,IACnC;AAIA,IAAM,yBAAyB,KAAK,KAAK,KAAK;AAy+BvC,IAAM,mBAAmB,MAC7B,WAAmB;AAGtB,QAAI,iBAAiB,MAAM,QAAW;AACpC,MAAC,WAAmB,iBAAiB;AAAA,IACvC;AA+JO,IAAM,YAAyC;AAAA,MACpD,SAAS;AAAA,MACT,YAAY;AAAA,QACV,SAAS;AAAA,UACP,iBAAiB;AAAA,YACf,MAAM;AAAA,YACN,YAAY;AAAA,cACV,gBAAgB;AAAA,gBACd,MAAM;AAAA,cACR;AAAA,cACA,cAAc;AAAA,gBACZ,MAAM;AAAA,cACR;AAAA,cACA,WAAW;AAAA,gBACT,MAAM;AAAA,cACR;AAAA,cACA,UAAU;AAAA,gBACR,MAAM;AAAA,gBACN,QAAQ;AAAA,cACV;AAAA,cACA,QAAQ;AAAA,gBACN,OAAO;AAAA,kBACL;AAAA,oBACE,OAAO;AAAA,kBACT;AAAA,kBACA;AAAA,oBACE,OAAO;AAAA,kBACT;AAAA,kBACA;AAAA,oBACE,OAAO;AAAA,kBACT;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,YACA,UAAU;AAAA,cACR;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,YACF;AAAA,UACF;AAAA,UACA,iBAAiB;AAAA,YACf,MAAM;AAAA,YACN,YAAY,CAAC;AAAA,YACb,UAAU,CAAC;AAAA,YACX,aAAa;AAAA,YACb,sBAAsB,CAAC;AAAA,UACzB;AAAA,QACF;AAAA,MACF;AAAA,MACA,SAAS;AAAA,QACP;AAAA,UACE,MAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEO,IAAM,aAAuB;AAAA,MAClC;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA;AAAA;;;ACjxCA,eAAe,eACb,WAAmB,QAAQ,IAAI,GACP;AACxB,QAAM,KAAK,MAAM,OAAO,IAAS;AAEjC,MAAI,aAAa,iBAAAC,QAAK,QAAQ,QAAQ;AAEtC,SAAO,MAAM;AACX,UAAM,aAAa,iBAAAA,QAAK,KAAK,YAAY,mBAAmB;AAC5D,QAAI,GAAG,WAAW,UAAU,GAAG;AAC7B,aAAO;AAAA,IACT;AAEA,UAAM,YAAY,iBAAAA,QAAK,QAAQ,UAAU;AACzC,QAAI,cAAc,YAAY;AAE5B;AAAA,IACF;AACA,iBAAa;AAAA,EACf;AAEA,SAAO;AACT;AAKA,eAAsB,oBAA4C;AAChE,QAAM,KAAK,MAAM,OAAO,IAAS;AACjC,QAAM,aAAa,MAAM,eAAe;AACxC,MAAI,CAAC,YAAY;AACf,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,MAAI;AACF,UAAM,gBAAgB,GAAG,aAAa,YAAY,OAAO;AACzD,UAAM,SAAc,WAAM,aAAa;AACvC,WAAO;AAAA,EACT,SAAS,OAAO;AACd,UAAM,IAAI,YAAY,sCAAsC,KAAK,EAAE;AAAA,EACrE;AACF;AAjHA,sBACA,MA2Da;AA5Db;AAAA;AAAA;AAAA,uBAAiB;AACjB,WAAsB;AA2Df,IAAM,cAAN,cAA0B,MAAM;AAAA,MACrC,YAAY,SAAiB;AAC3B,cAAM,OAAO;AACb,aAAK,OAAO;AAAA,MACd;AAAA,IACF;AAAA;AAAA;;;ACjEA;AAAA,IAsBM;AAtBN;AAAA;AAAA;AAAA;AAsBA,IAAM,wBAAN,MAAM,uBAAsB;AAAA,MAC1B,OAAe;AAAA,MACP;AAAA,MACA;AAAA,MAER,OAAO,cAAqC;AAC1C,YAAI,CAAC,uBAAsB,UAAU;AACnC,iCAAsB,WAAW,IAAI,uBAAsB;AAAA,QAC7D;AACA,eAAO,uBAAsB;AAAA,MAC/B;AAAA,MAEA,oBAAoB,QAAuC;AACzD,aAAK,mBAAmB;AAAA,MAC1B;AAAA,MAEA,eAAe,QAAkC;AAC/C,aAAK,cAAc;AAAA,MACrB;AAAA,MAEQ,KAAK,MAAkC;AAC7C,cAAM,QAAQ,QAAQ,IAAI,IAAI;AAC9B,YAAI,UAAU,OAAW,QAAO;AAChC,cAAM,UAAU,MAAM,KAAK;AAC3B,eAAO,QAAQ,SAAS,IAAI,UAAU;AAAA,MACxC;AAAA,MAEQ,WAAW,OAAgD;AACjE,YAAI,UAAU,OAAW,QAAO;AAChC,gBAAQ,MAAM,KAAK,EAAE,YAAY,GAAG;AAAA,UAClC,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AACH,mBAAO;AAAA,UACT,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AACH,mBAAO;AAAA,UACT;AACE,mBAAO;AAAA,QACX;AAAA,MACF;AAAA,MAEA,MAAM,sBAAwD;AAC5D,YAAI,KAAK,kBAAkB;AACzB,iBAAO,KAAK;AAAA,QACd;AAGA,cAAM,gBAAgB,MAAM,kBAAkB;AAC9C,cAAM,UAAU,KAAK,KAAK,+BAA+B;AACzD,cAAM,UAAU,KAAK,KAAK,oCAAoC;AAC9D,cAAM,UAAU,KAAK,KAAK,+BAA+B;AACzD,cAAM,cAAc,KAAK,KAAK,mCAAmC;AACjE,cAAM,QAAQ,KAAK,KAAK,kCAAkC;AAC1D,cAAM,YAAY,KAAK;AAAA,UACrB,KAAK,KAAK,kCAAkC;AAAA,QAC9C;AAEA,eAAO;AAAA,UACL,MAAM,WAAW,cAAc,kBAAkB;AAAA,UACjD,MAAM,WAAW,cAAc,kBAAkB,UAAU,SAAS;AAAA,UACpE,UAAU,WAAW,cAAc,kBAAkB;AAAA,UACrD,UAAU,eAAe,cAAc,kBAAkB;AAAA,UACzD,UAAU,SAAS,cAAc,kBAAkB;AAAA,UACnD,QACE,cAAc,SAAY,YACxB,cAAc,kBAAkB,WAAW;AAAA,QAEjD;AAAA,MACF;AAAA,MAEA,MAAM,8BACJ,WACkC;AAClC,YAAI,KAAK,kBAAkB;AACzB,iBAAO,EAAE,GAAG,KAAK,kBAAkB,GAAG,UAAU;AAAA,QAClD;AAEA,cAAM,UAAU,KAAK,KAAK,+BAA+B;AACzD,cAAM,UAAU,KAAK,KAAK,oCAAoC;AAC9D,cAAM,UAAU,KAAK,KAAK,+BAA+B;AACzD,cAAM,cAAc,KAAK,KAAK,mCAAmC;AACjE,cAAM,QAAQ,KAAK,KAAK,kCAAkC;AAC1D,cAAM,YAAY,KAAK;AAAA,UACrB,KAAK,KAAK,kCAAkC;AAAA,QAC9C;AAEA,YAAI;AACJ,YAAI;AACF,0BAAgB,MAAM,kBAAkB;AAAA,QAC1C,SAAS,OAAO;AACd,0BAAgB;AAAA,QAClB;AAEA,cAAM,WAAW;AAAA,UACf,MAAM;AAAA,UACN,MAAM;AAAA,UACN,UAAU;AAAA,UACV,UAAU;AAAA,UACV,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,eAAO;AAAA,UACL,MACE,WAAW,QACX,WACA,eAAe,kBAAkB,QACjC,SAAS;AAAA,UACX,MACE,WAAW,QACX,WACA,eAAe,kBAAkB,UAAU,SAAS,KACpD,SAAS;AAAA,UACX,UACE,WAAW,YACX,WACA,eAAe,kBAAkB,QACjC,SAAS;AAAA,UACX,UACE,WAAW,YACX,eACA,eAAe,kBAAkB,YACjC,SAAS;AAAA,UACX,UACE,WAAW,YACX,SACA,eAAe,kBAAkB,WACjC,SAAS;AAAA,UACX,QACE,WAAW,UACX,aACA,eAAe,kBAAkB,WACjC,SAAS;AAAA,QACb;AAAA,MACF;AAAA,MAEA,MAAM,iBAA8C;AAClD,YAAI,KAAK,aAAa;AACpB,iBAAO,KAAK;AAAA,QACd;AAEA,cAAM,gBAAgB,MAAM,kBAAkB;AAE9C,cAAM,YACJ,KAAK,KAAK,+BAA+B,KACzC,KAAK,KAAK,4BAA4B;AACxC,cAAM,gBACJ,KAAK,KAAK,2CAA2C,KACrD,KAAK,KAAK,wCAAwC;AACpD,cAAM,kBACJ,KAAK,KAAK,sCAAsC,KAChD,KAAK,KAAK,mCAAmC;AAC/C,cAAM,kBACJ,KAAK,KAAK,sCAAsC,KAChD,KAAK,KAAK,mCAAmC;AAC/C,cAAM,mBACJ,KAAK,KAAK,uCAAuC,KACjD,KAAK,KAAK,oCAAoC;AAChD,cAAM,sBACJ,KAAK,KAAK,0CAA0C,KACpD,KAAK,KAAK,uCAAuC;AACnD,cAAM,eACJ,KAAK,KAAK,kCAAkC,KAC5C,KAAK,KAAK,+BAA+B;AAC3C,cAAM,uBACJ,KAAK,KAAK,4CAA4C,KACtD,KAAK,KAAK,yCAAyC;AAErD,cAAM,YACJ,cAAc,gBAAgB,cAAc;AAE9C,eAAO;AAAA,UACL,QAAQ,aAAa,WAAW,UAAU;AAAA,UAC1C,kBACE,gBACE,SAAS,eAAe,EAAE,IACzB,WAAW,sBAAsB;AAAA,UACtC,cAAc,mBAAmB,WAAW;AAAA,UAC5C,cAAc,mBAAmB,WAAW;AAAA,UAC5C,eAAe,oBAAoB,WAAW;AAAA,UAC9C,kBAAkB,uBAAuB,WAAW;AAAA,UACpD,WAAW,gBAAgB,WAAW;AAAA,UACtC,mBAAmB,wBAAwB,WAAW;AAAA,QACxD;AAAA,MACF;AAAA,MAEA,mBAA4B;AAC1B,eAAO,CAAC,CAAC,KAAK,oBAAoB,CAAC,CAAC,KAAK;AAAA,MAC3C;AAAA,IACF;AAEA,IAAC,WAAmB,uBAAuB,sBAAsB,YAAY;AAAA;AAAA;;;ACzN7E,IASA,oBACAC,qBA+mBa;AAznBb;AAAA;AAAA;AACA;AACA;AAKA;AACA;AACA,yBAAyB;AACzB,IAAAA,sBAA2B;AAM3B;AAymBO,IAAM,YAAN,cAA2B,UAA4B;AAAA,MAC5D;AAAA;AAAA,MAGgB,OAAO;AAAA;AAAA,MAGf;AAAA;AAAA,MAEA;AAAA;AAAA,MAEA;AAAA,MAkBR,YACE,MACA,QACA,QACA,SACA,YACA;AAEA,cAAM,iBACJ,SACE,YAAY,SACV,SACA,EAAE,GAAG,QAAQ,oCAAoC,IACnD,EAAE,oCAAoC;AAG1C,cAAM,YACJ,MAAM,QAAS,eAAuB,aAAa,KAClD,eAAuB,cAAc,SAAS;AACjD,cAAM,UACJ,OAAQ,eAAuB,sBAAsB,YACpD,eAAuB,kBAAkB,SAAS;AACrD,YAAI,aAAa,SAAS;AACxB,gBAAM,IAAI;AAAA,YACR,aAAa,IAAI;AAAA,UACnB;AAAA,QACF;AAGA,cAAM,aAAa,OAAQ,eAAuB,YAAY;AAC9D,cAAM,gBACJ,OAAQ,eAAuB,eAAe;AAChD,cAAM,iBACJ,OAAQ,eAAuB,gBAAgB;AAEjD,YAAI,eAAe,iBAAiB,iBAAiB;AACnD,gBAAM,IAAI;AAAA,YACR,aAAa,IAAI;AAAA,UAEnB;AAAA,QACF;AAEA,cAAM,MAAM,gBAAgB,QAAQ,SAAS,UAAU;AACvD,aAAK,OAAO;AAEZ,cAAM,SAAS,iBAAiB,EAAE;AAClC,cAAM,cACJ,KAAK,OAAO,UAAU,GAAG,IAAI,IAAI,KAAK,OAAO,OAAO,KAAK;AAG3D,YAAI,CAAC,iBAAiB,KAAK,OAAO,IAAI,WAAW,GAAG;AAClD,gBAAM,IAAI;AAAA,YACR,uBAAuB,IAAI,gBAAgB,QAAQ,WAAW,aAAa;AAAA,UAC7E;AAAA,QACF;AACA,eAAO,IAAI,aAAa,IAAI;AAAA,MAC9B;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,oBAA4B;AAElC,YAAI,KAAK,kBAAkB;AACzB,iBAAO,KAAK;AAAA,QACd;AAEA,cAAM,eAAe,KAAK,OAAO;AACjC,YAAI,CAAC,cAAc;AACjB,eAAK,mBAAmB,KAAK;AAAA,QAC/B,OAAO;AACL,gBAAM,gBAAgB,aAAa,QAAQ,OAAO,GAAG;AACrD,eAAK,mBAAmB,GAAG,KAAK,IAAI,IAAI,aAAa;AAAA,QACvD;AAEA,eAAO,KAAK;AAAA,MACd;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQQ,iBAAiB,kBAA+B;AAEtD,cAAM,oBAAoB,KAAK,OAAO,YAAY,iBAAiB;AACnE,cAAM,eAAe,GAAG,iBAAiB,IAAI,IAAI,iBAAiB,IAAI,IAAI,iBAAiB,QAAQ,IAAI,iBAAiB,QAAQ,IAAI,iBAAiB,IAAI,iBAAiB,MAAM;AAChL,mBAAO,gCAAW,QAAQ,EACvB,OAAO,YAAY,EACnB,OAAO,KAAK,EACZ,UAAU,GAAG,EAAE;AAAA,MACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,MAAc,oBAGX;AACD,cAAM;AACN,cAAM,iBAAkB,WACrB;AACH,cAAM,EAAE,qBAAAC,qBAAoB,IAAI,MAAM;AAEtC,cAAM,mBAAmB,MAAM,eAAe,oBAAoB;AAClE,cAAM,oBAAoB,KAAK,iBAAiB,gBAAgB;AAGhE,YAAI,KAAK,mBAAmB,KAAK,gBAAgB,mBAAmB;AAClE,iBAAO,EAAE,QAAQ,KAAK,iBAAiB,QAAQ,iBAAiB;AAAA,QAClE;AAGA,YAAI,KAAK,mBAAmB,KAAK,gBAAgB,mBAAmB;AAClE,cAAI;AACF,kBAAM,KAAK,gBAAgB,MAAM;AAAA,UACnC,SAAS,OAAO;AAAA,UAEhB;AAAA,QACF;AAIA,cAAM,oBAAoB,KAAK,OAAO,YAAY,iBAAiB;AACnE,cAAM,SAASA,qBAAoB;AAAA,UACjC,UAAU,iBAAiB;AAAA,UAC3B,UAAU,iBAAiB;AAAA,UAC3B,UAAU;AAAA,UACV,QAAQ,iBAAiB,SAAS,SAAS;AAAA,UAC3C,MAAM,iBAAiB;AAAA,UACvB,MAAM,iBAAiB;AAAA,QACzB,CAAC;AAGD,aAAK,kBAAkB;AACvB,aAAK,cAAc;AAEnB,eAAO,EAAE,QAAQ,QAAQ,iBAAiB;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA,MAAM,cAA6B;AACjC,YAAI,KAAK,iBAAiB;AACxB,cAAI;AACF,kBAAM,KAAK,gBAAgB,MAAM;AAAA,UACnC,SAAS,OAAO;AAAA,UAEhB,UAAE;AACA,iBAAK,kBAAkB;AACvB,iBAAK,cAAc;AAAA,UACrB;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,eAAe,QAIb;AAEA,YAAI,KAAK,YAAY,UAAU;AAC7B,cAAI;AACF,kBAAM,SAAS,KAAK,WAAW,SAAS,MAAM;AAC9C,mBAAO;AAAA,cACL,SAAS,OAAO;AAAA,cAChB,MAAM,OAAO;AAAA,cACb,QAAQ,OAAO,QAAQ;AAAA,gBAAI,CAAC,QAC1B,OAAO,QAAQ,WAAW,MAAM,KAAK,UAAU,GAAG;AAAA,cACpD;AAAA,YACF;AAAA,UACF,SAAS,OAAO;AACd,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,QAAQ,CAAC,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC;AAAA,YACjE;AAAA,UACF;AAAA,QACF;AAEA,cAAM,IAAI,MAAM,0BAA0B;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,cAAc,QAA8B;AAC1C,YAAI,KAAK,YAAY,IAAI;AACvB,iBAAO,KAAK,WAAW,GAAG,MAAM;AAAA,QAClC;AAEA,cAAM,IAAI,MAAM,0BAA0B;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAUA,kBAAkB,QAAoB;AACpC,YAAI,KAAK,YAAY,QAAQ;AAC3B,iBAAO,KAAK,WAAW,OAAO,MAAM;AAAA,QACtC;AAEA,cAAM,IAAI,MAAM,0BAA0B;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,MAAM,gBAAgB,MAA+C;AACnE,cAAM,QAAa,CAAC;AACpB,cAAM,UAA6B,CAAC;AAGpC,cAAM,SAAS;AACf,gBAAQ,SAAS;AAGjB,cAAM,aAAa,KAAK;AACxB,iBAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,gBAAM,SAAS,KAAK,CAAC;AAErB,cAAI;AAEF,gBAAI,KAAK,cAAc,MAAM,GAAG;AAC9B,oBAAM,KAAK,KAAK,sBAAsB,MAAM,CAAC;AAAA,YAC/C,OAAO;AAEL,oBAAM,SAAS,KAAK,eAAe,MAAM;AACzC,kBAAI,OAAO,SAAS;AAClB,sBAAM,KAAK,KAAK,sBAAsB,MAAM,CAAC;AAAA,cAC/C,OAAO;AACL,wBAAQ,KAAK;AAAA,kBACX;AAAA,kBACA,OAAO,OAAO,QAAQ,KAAK,IAAI,KAAK;AAAA,kBACpC,OAAO;AAAA,kBACP,MAAM;AAAA,gBACR,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF,SAAS,OAAO;AACd,oBAAQ,KAAK;AAAA,cACX;AAAA,cACA,OAAO,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,cAC5D,OAAO;AAAA,cACP,MAAM;AAAA,YACR,CAAC;AAAA,UACH;AAAA,QACF;AAEA,eAAO;AAAA,UACL;AAAA,UACA;AAAA,UACA,OAAO;AAAA,QACT;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQA,MAAc,uBACZ,QACA,WACA,SACyD;AACzD,cAAM,aAAkB,CAAC;AACzB,cAAM,SAA4B,CAAC;AAGnC,cAAM,mBAAmB;AACzB,cAAM,eAAe,QAAQ;AAE7B,iBAAS,IAAI,GAAG,IAAI,cAAc,KAAK,kBAAkB;AACvD,gBAAM,WAAW,KAAK,IAAI,IAAI,kBAAkB,YAAY;AAC5D,gBAAM,QAAQ,QAAQ,MAAM,GAAG,QAAQ;AAEvC,cAAI;AACF,kBAAM,OAAO,OAAO;AAAA,cAClB,OAAO,gBAAgB,SAAS;AAAA,cAChC,QAAQ;AAAA,cACR,QAAQ;AAAA,cACR,qBAAqB;AAAA,gBACnB,wBAAwB;AAAA;AAAA,gBAExB,uBAAuB;AAAA,gBACvB,gBAAgB;AAAA,cAClB;AAAA,YACF,CAAC;AACD,uBAAW,KAAK,GAAG,KAAK;AAAA,UAC1B,SAAS,YAAY;AAEnB,qBAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;AACrC,oBAAM,SAAS,MAAM,CAAC;AACtB,kBAAI;AACF,sBAAM,OAAO,OAAO;AAAA,kBAClB,OAAO,gBAAgB,SAAS;AAAA,kBAChC,QAAQ,CAAC,MAAM;AAAA,kBACf,QAAQ;AAAA,kBACR,qBAAqB;AAAA,oBACnB,wBAAwB;AAAA,kBAC1B;AAAA,gBACF,CAAC;AACD,2BAAW,KAAK,MAAM;AAAA,cACxB,SAAS,OAAO;AACd,uBAAO,KAAK;AAAA,kBACV;AAAA,kBACA,OAAO,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,kBAC5D,OAAO,IAAI;AAAA,gBACb,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAEA,eAAO,EAAE,YAAY,OAAO;AAAA,MAC9B;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,yBACN,MACA,SACkE;AAClE,cAAM,WAAW,gBAAgB;AACjC,cAAM,WAAW,SAAS,YAAY;AACtC,cAAM,iBAAiB,SAAS,aAAa;AAG7C,YAAI,YAAY,aAAa,WAAW;AACtC,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAGA,YAAI,YAAY,gBAAgB;AAC9B,kBAAQ;AAAA,YACN;AAAA,UACF;AAAA,QACF;AAEA,eAAO,EAAE,UAAU,UAAU,eAAe;AAAA,MAC9C;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,gBACN,MACA,UACwB;AACxB,YAAI,YAAY,CAAC,MAAM;AACrB,iBAAO;AAAA,YACL,YAAY;AAAA,YACZ,QAAQ;AAAA,YACR,OAAO;AAAA,UACT;AAAA,QACF;AAEA,YAAI,CAAC,aAAa,CAAC,QAAS,KAAa,WAAW,IAAI;AACtD,iBAAO;AAAA,YACL,YAAY;AAAA,YACZ,QAAQ;AAAA,YACR,OAAO;AAAA,UACT;AAAA,QACF;AAEA,eAAO;AAAA,MACT;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,MAAc,8BACZ,MACA,gBACA,UACA,SACsE;AACtE,YAAI,CAAC,gBAAgB;AACnB,iBAAO,EAAE,eAAe,MAAM,kBAAkB,CAAC,EAAE;AAAA,QACrD;AAEA,YAAI;AACF,gBAAM,mBAAmB,MAAM,KAAK,gBAAgB,IAAiB;AACrE,gBAAM,gBAAgB,iBAAiB;AACvC,gBAAM,mBAAmB,iBAAiB;AAE1C,cAAI,iBAAiB,SAAS,GAAG;AAC/B,iBAAK,uBAAuB,kBAAkB,UAAU,MAAM,OAAO;AAGrE,oBAAQ,UAAU;AAAA,cAChB,KAAK;AACH,uBAAO,EAAE,eAAe,iBAAiB;AAAA,cAC3C,KAAK;AACH,uBAAO,EAAE,eAAe,MAAM,iBAAiB;AAAA,cACjD;AACE,uBAAO,EAAE,eAAe,iBAAiB;AAAA,YAC7C;AAAA,UACF;AAEA,iBAAO,EAAE,eAAe,iBAAiB;AAAA,QAC3C,SAAS,iBAAiB;AACxB,cAAI,aAAa,aAAa;AAC5B,kBAAM;AAAA,UACR;AACA,kBAAQ,KAAK,qBAAqB,eAAe;AACjD,iBAAO,EAAE,eAAe,MAAM,kBAAkB,CAAC,EAAE;AAAA,QACrD;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,uBACN,kBACA,UACA,MACA,SACM;AACN,gBAAQ,UAAU;AAAA,UAChB,KAAK;AACH,kBAAM,aAAa,iBAAiB,CAAC;AACrC,kBAAM,IAAI;AAAA,cACR,yCAAyC,WAAW,KAAK,KAAK,WAAW,KAAK;AAAA,YAChF;AAAA,UAEF,KAAK;AACH,iBAAK,0BAA0B,kBAAkB,KAAK,QAAQ,OAAO;AACrE;AAAA,UAEF,KAAK;AAEH;AAAA,QACJ;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,0BACN,kBACA,cACA,SACM;AACN,cAAM,wBAAwB,iBAAiB;AAC/C,cAAM,wBAAwB,wBAAwB;AAEtD,YACE,SAAS,gBAAgB,UACzB,wBAAwB,QAAQ,aAChC;AACA,gBAAM,IAAI;AAAA,YACR,iCAAiC,qBAAqB,MAAM,QAAQ,WAAW,aAAa,iBAAiB,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,KAAK,IAAI,CAAC;AAAA,UAC7I;AAAA,QACF;AAEA,YACE,SAAS,qBAAqB,UAC9B,wBAAwB,QAAQ,kBAChC;AACA,gBAAM,IAAI;AAAA,YACR,sCAAsC,sBAAsB,QAAQ,CAAC,CAAC,MAAM,QAAQ,gBAAgB,aAAa,iBAAiB,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,KAAK,IAAI,CAAC;AAAA,UAClK;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,qBACN,WACA,MACA,eACA,UACA,UACA,SACK;AACL,cAAM,gBAAqB;AAAA,UACzB,OAAO,gBAAgB,SAAS;AAAA,UAChC,QAAQ;AAAA,UACR,qBAAqB;AAAA,YACnB,wBAAwB;AAAA,YACxB,mBAAmB;AAAA;AAAA;AAAA,YAEnB,uBACE,WAAW,MAAS,KAAK,IAAI,cAAc,QAAQ,GAAM;AAAA,YAC3D,gBAAgB;AAAA;AAAA,YAEhB,cAAc,cAAc,SAAS,MAAO,IAAI;AAAA,YAChD,uBAAuB;AAAA;AAAA,UACzB;AAAA,QACF;AAGA,YAAI,UAAU;AACZ,wBAAc,SAAS;AAAA,QACzB,OAAO;AACL,wBAAc,SAAS;AAAA,QACzB;AAGA,YACE,aAAa,cACZ,SAAS,gBAAgB,UACxB,SAAS,qBAAqB,SAChC;AACA,cAAI,QAAQ,gBAAgB,QAAW;AACrC,0BAAc,oBAAoB,gCAChC,QAAQ;AAAA,UACZ;AAEA,cAAI,QAAQ,qBAAqB,QAAW;AAC1C,0BAAc,oBAAoB,kCAChC,QAAQ;AAAA,UACZ;AAAA,QACF;AAEA,eAAO;AAAA,MACT;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,oBACN,MACA,eACA,kBACA,UACA,gBACA,UACiB;AACjB,YAAI,UAAU;AACZ,iBAAO;AAAA,YACL,YAAY;AAAA;AAAA,YACZ,QAAQ;AAAA,YACR,OAAO;AAAA,UACT;AAAA,QACF;AAEA,cAAM,gBAAgB,cAAc;AACpC,cAAM,iBACJ,iBAAkB,KAAa,SAAS;AAE1C,cAAM,SAA0B;AAAA,UAC9B,YAAY;AAAA,UACZ,QAAQ,iBAAiB,iBAAiB,SAAS;AAAA,UACnD,OAAO;AAAA,QACT;AAGA,YACE,kBACA,iBAAiB,SAAS,KAC1B,aAAa,WACb;AACA,iBAAO,gBAAgB,iBAAiB,IAAI,CAAC,QAAQ;AAAA,YACnD,QAAQ,GAAG;AAAA,YACX,OAAO,qBAAqB,GAAG,KAAK;AAAA,YACpC,OAAO,GAAG;AAAA,UACZ,EAAE;AAAA,QACJ;AAEA,eAAO;AAAA,MACT;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,MAAc,qBACZ,YACA,UACA,WACA,MACA,eACA,kBACA,UACA,gBACA,SAC0B;AAC1B,gBAAQ,UAAU;AAAA,UAChB,KAAK;AACH,kBAAM,IAAI;AAAA,cACR,oCAAoC,SAAS,KAAK,UAAU;AAAA,YAC9D;AAAA,UAEF,KAAK;AACH,kBAAM,IAAI;AAAA,cACR,4CAA4C,SAAS,+BAA+B,UAAU;AAAA,YAChG;AAAA,UAEF,KAAK;AACH,mBAAO,MAAM,KAAK;AAAA,cAChB;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,YACF;AAAA,UAEF;AACE,kBAAM,IAAI,MAAM,2BAA2B,QAAQ,EAAE;AAAA,QACzD;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,MAAc,sBACZ,YACA,WACA,MACA,eACA,kBACA,UACA,gBACA,SAC0B;AAC1B,YAAI,UAAU;AACZ,gBAAM,IAAI;AAAA,YACR,wDAAwD,UAAU;AAAA,UACpE;AAAA,QACF;AAEA,YAAI;AACF,gBAAM,EAAE,OAAO,IAAI,MAAM,KAAK,kBAAkB;AAChD,gBAAM,wBAAwB,SAAS,yBAAyB;AAChE,gBAAM,YAAY,wBAAyB,OAAe;AAE1D,gBAAM,EAAE,YAAY,OAAO,IAAI,MAAM,KAAK;AAAA,YACxC;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAGA,gBAAM,mBAAsC;AAAA;AAAA,YAE1C,GAAI,kBAAkB,CAAC,wBACrB,iBAAiB,IAAI,CAAC,QAAQ;AAAA,cAC5B,QAAQ,GAAG;AAAA,cACX,OAAO,qBAAqB,GAAG,KAAK;AAAA,cACpC,OAAO,GAAG;AAAA,YACZ,EAAE,IACF,CAAC;AAAA;AAAA,YAEH,GAAG;AAAA,UACL;AAEA,eAAK;AAAA,YACH;AAAA,YACC,KAAa;AAAA,YACd;AAAA,UACF;AAEA,iBAAO;AAAA,YACL,YAAY,WAAW;AAAA,YACvB,QAAQ,iBAAiB;AAAA,YACzB,OAAQ,KAAa;AAAA,YACrB,eAAe;AAAA,UACjB;AAAA,QACF,SAAS,gBAAgB;AACvB,gBAAM,IAAI;AAAA,YACR,oCAAoC,SAAS,6BAA6B,cAAc;AAAA,UAC1F;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,yBACN,eACA,cACA,SACM;AACN,cAAM,cAAc,cAAc;AAClC,cAAM,cAAc,cAAc;AAElC,YACE,SAAS,gBAAgB,UACzB,cAAc,QAAQ,aACtB;AACA,gBAAM,IAAI;AAAA,YACR,4BAA4B,WAAW,MAAM,QAAQ,WAAW,qBAAqB,cAAc,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,KAAK,IAAI,CAAC;AAAA,UACnI;AAAA,QACF;AAEA,YACE,SAAS,qBAAqB,UAC9B,cAAc,QAAQ,kBACtB;AACA,gBAAM,IAAI;AAAA,YACR,iCAAiC,YAAY,QAAQ,CAAC,CAAC,MAAM,QAAQ,gBAAgB,qBAAqB,cAAc,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,KAAK,IAAI,CAAC;AAAA,UACxJ;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAaQ,sBACN,QACA,UAAoB,KAAK,aACpB;AACL,cAAM,SAAS,EAAE,GAAG,OAAO;AAC3B,mBAAW,OAAO,SAAS;AACzB,gBAAM,QAAQ,OAAO,IAAI,IAAI;AAC7B,gBAAM,KAAK,IAAI;AAEf,cAAI,kBAAkB,EAAE,GAAG;AAEzB,gBACE,MAAM,QAAQ,KAAK,MAClB,MAAM,WAAW,KAAK,OAAO,MAAM,CAAC,MAAM,WAC3C;AACA,qBAAO,IAAI,IAAI,IAAI,MAAM,IAAI,CAAC,SAAS;AAAA,gBACrC,KAAK,sBAAsB,MAAM,GAAG,YAAY,OAAO;AAAA,cACzD,CAAC;AAAA,YACH;AAAA,UACF,WAAW,aAAa,EAAE,GAAG;AAE3B,gBAAI,SAAS,OAAO,UAAU,UAAU;AACtC,qBAAO,IAAI,IAAI,IAAI,KAAK,sBAAsB,OAAO,GAAG,OAAO;AAAA,YACjE;AAAA,UACF;AAAA,QAEF;AACA,eAAO;AAAA,MACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MA2DA,MAAM,OACJ,MACA,SAC0B;AAE1B,cAAM,EAAE,UAAU,UAAU,eAAe,IACzC,KAAK,yBAAyB,MAAM,OAAO;AAG7C,cAAM,cAAc,KAAK,gBAAgB,MAAM,QAAQ;AACvD,YAAI,aAAa;AACf,iBAAO;AAAA,QACT;AAGA,YAAI,gBAAqB,CAAC;AAC1B,YAAI,mBAAsC,CAAC;AAE3C,YAAI,CAAC,YAAY,gBAAgB;AAC/B,gBAAM,mBAAmB,MAAM,KAAK;AAAA,YAClC;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AACA,0BAAgB,iBAAiB;AACjC,6BAAmB,iBAAiB;AAAA,QACtC,OAAO;AAEL,0BAAgB,WAAW,CAAC,IAAK;AAAA,QACnC;AAGA,cAAM,EAAE,OAAO,IAAI,MAAM,KAAK,kBAAkB;AAChD,cAAM,YAAY,KAAK,kBAAkB;AAEzC,YAAI;AAEF,gBAAM,gBAAgB,KAAK;AAAA,YACzB;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAEA,gBAAM,OAAO,OAAO,aAAa;AAGjC,iBAAO,KAAK;AAAA,YACV;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,SAAS,YAAY;AAEnB,iBAAO,MAAM,KAAK;AAAA,YAChB;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAAA,MAGF;AAAA;AAAA;AAAA;AAAA,IAKF;AAAA;AAAA;;;AC98BA,SAAS,gBACP,IACA,WAC6B;AAC7B,EAAC,GAAW,UAAU,MAAM,UAAU,GAAG,cAAc;AACzD;AAtmBA,IAsBAC,qBA2IM,eAuDO,QAiaA;AAznBb;AAAA;AAAA;AAaA;AAEA;AAOA,IAAAA,sBAA2B;AAE3B;AAyIA,IAAM,gBAAN,MAAoB;AAAA;AAAA,MAElB;AAAA;AAAA,MAGA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQA,YAAY,aAA0B,QAAyB;AAC7D,aAAK,cAAc;AACnB,aAAK,SAAS;AAAA,MAChB;AAAA,IACF;AAsCO,IAAM,SAAN,cAAwB,UAA8B;AAAA,MAC3D;AAAA;AAAA,MAEQ;AAAA;AAAA,MAEA;AAAA,MAwBR,YACE,MACA,QACA,QACA,SACA,YACA,kBACA;AACA,cAAM,MAAM,UAAU,CAAC,GAAG,QAAQ,SAAS,QAAW,gBAAgB;AACtE,cAAM,UAAU,iBAAiB,EAAE;AACnC,YAAI,QAAQ,IAAI,IAAI,GAAG;AACrB,gBAAM,IAAI,MAAM,oBAAoB,IAAI,iBAAiB;AAAA,QAC3D;AACA,gBAAQ,IAAI,MAAM,IAAI;AACtB,aAAK,yBAAyB,KAAK,OAAO;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQA,mBAAmB,oBAAI,IAGrB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA,aAAa,IAAI,MAGd;AAAA;AAAA;AAAA;AAAA;AAAA,MAMK,mBAAmB,WAA4B;AACrD,cAAM,gBACJ,KAAK,OAAO,UAAU,IAAI,KAAK,OAAO,QAAQ,QAAQ,OAAO,GAAG,CAAC,KAAK;AACxE,cAAM,OAAO,GAAG,KAAK,IAAI,GAAG,aAAa;AACzC,eAAO,cAAc,UAAa,UAAU,SAAS,IACjD,GAAG,SAAS,IAAI,IAAI,KACpB;AAAA,MACN;AAAA;AAAA;AAAA;AAAA,MAKQ,iBAAiB,aAAyC;AAChE,cAAM,eAAe;AAAA,UACnB,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,QACd,EAAE,KAAK,GAAG;AACV,mBAAO,gCAAW,QAAQ,EACvB,OAAO,YAAY,EACnB,OAAO,KAAK,EACZ,UAAU,GAAG,EAAE;AAAA,MACpB;AAAA;AAAA;AAAA;AAAA,MAKA,MAAc,sBAGX;AAED,cAAM;AACN,cAAM,iBAAkB,WACrB;AACH,cAAM,EAAE,kBAAAC,kBAAiB,IAAI,MAAM;AAEnC,cAAM,cAAc,MAAO,eAAuB,eAAe;AACjE,cAAM,cAAc,KAAK,iBAAiB,WAAW;AAErD,YAAI,KAAK,qBAAqB,KAAK,qBAAqB,aAAa;AACnE,iBAAO,EAAE,UAAU,KAAK,mBAAmB,YAAY;AAAA,QACzD;AAGA,YAAI,KAAK,qBAAqB,KAAK,qBAAqB,aAAa;AACnE,cAAI;AACF,kBAAM,KAAK,kBAAkB,WAAW;AAAA,UAC1C,QAAQ;AAAA,UAER;AACA,eAAK,oBAAoB;AAAA,QAC3B;AAEA,cAAM,WAAW,oBAAoB,KAAK,IAAI;AAC9C,cAAM,SAAiB;AAAA,UACrB,WAAW;AAAA,UACX,KAAK,CAAC,YAA0B;AAC9B,oBAAQ,IAAI,GAAG,QAAQ,KAAK,OAAO,EAAE;AAAA,UACvC;AAAA,UACA,OAAO,CAAC,YAA0B;AAChC,oBAAQ,MAAM,GAAG,QAAQ,KAAK,OAAO,EAAE;AAAA,UACzC;AAAA,UACA,MAAM,CAAC,YAA0B;AAC/B,oBAAQ,KAAK,GAAG,QAAQ,KAAK,OAAO,EAAE;AAAA,UACxC;AAAA,QACF;AAEA,cAAM,WAAW,MAAMA;AAAA,UACrB;AAAA,YACE;AAAA,YACA,QAAQ,YAAY;AAAA,YACpB,kBAAkB,YAAY;AAAA,YAC9B,cAAc,YAAY;AAAA,YAC1B,cAAc,YAAY;AAAA,YAC1B,eAAe,YAAY;AAAA,UAC7B;AAAA,UACA;AAAA,QACF;AAEA,aAAK,oBAAoB;AACzB,aAAK,mBAAmB;AAExB,eAAO,EAAE,UAAU,YAAY;AAAA,MACjC;AAAA;AAAA;AAAA;AAAA,MAKA,MAAM,gBAA+B;AACnC,YAAI,KAAK,mBAAmB;AAC1B,cAAI;AACF,kBAAM,KAAK,kBAAkB,WAAW;AAAA,UAC1C,QAAQ;AAAA,UAER,UAAE;AACA,iBAAK,oBAAoB;AACzB,iBAAK,mBAAmB;AAAA,UAC1B;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,MAAM,KAAK,QAAsC;AAE/C,cAAM,OACJ,MAAM,QAAQ,MAAM,IAAI,SACtB,WAAW,UAAa,WAAW,OAAO,CAAC,MAAW,IACtD,CAAC;AAEL,YAAI,KAAK,WAAW,EAAG;AAEvB,cAAM,EAAE,UAAU,YAAY,IAAI,MAAM,KAAK,oBAAoB;AACjE,cAAM,QAAQ,KAAK,mBAAmB,YAAY,SAAS;AAG3D,cAAM,KAAK,KAAK,OAAO;AACvB,YAAI,MAAM,GAAG,SAAS,QAAQ;AAC5B,gBAAM,oBAAoB,YAAY;AACtC,cAAI,CAAC,mBAAmB;AACtB,kBAAM,IAAI,MAAM,oCAAoC;AAAA,UACtD;AAEA,gBAAM;AAAA,YACJ,SAAS,EAAE,eAAe;AAAA,UAC5B,IAAI,MAAM,OAAO,oCAAoC;AACrD,gBAAM,WAAW,IAAI,eAAe,EAAE,MAAM,kBAAkB,CAAC;AAE/D,cAAI,WAA+B;AAEnC,cAAI,QAAQ,GAAG,WAAW;AACxB,uBAAW,GAAG,UAAU;AAAA,UAC1B,WAAW,mBAAmB,GAAG,WAAW;AAC1C,uBAAW,MAAM,SAAS,kBAAkB,GAAG,UAAU,aAAa;AAAA,UACxE,WAAW,aAAa,GAAG,WAAW;AACpC,uBAAW,MAAM,SAAS;AAAA,cACxB,GAAG,UAAU;AAAA,cACb,GAAG,UAAU;AAAA,YACf;AAAA,UACF;AAEA,cAAI,aAAa,QAAW;AAC1B,kBAAM,IAAI,MAAM,6BAA6B;AAAA,UAC/C;AAEA,gBAAM,UAAU,MAAM,QAAQ;AAAA,YAC5B,KAAK;AAAA,cAAI,CAAC,MACR,SAAS,OAAO,UAAU,CAAuC;AAAA,YACnE;AAAA,UACF;AACA,gBAAM,SAAS,KAAK;AAAA,YAClB;AAAA,YACA,UAAU,QAAQ,IAAI,CAAC,WAAW,EAAE,MAAM,EAAE;AAAA,UAC9C,CAAC;AACD;AAAA,QACF,WAAW,OAAO,QAAW;AAC3B,gBAAM,IAAI,MAAM,0CAA0C;AAAA,QAC5D;AAEA,cAAM,SAAS,KAAK;AAAA,UAClB;AAAA,UACA,UAAU,KAAK,IAAI,CAAC,OAAO,EAAE,OAAO,KAAK,UAAU,CAAC,EAAE,EAAE;AAAA,QAC1D,CAAC;AAAA,MACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAYA,aACE,aACA,gBACA,QACA;AAEA,cAAM,aAAa,uBAAuB,IAAI,MAAM,EAAE,KAAK;AAE3D,cAAM,kBAAsC;AAAA,UAC1C,GAAI,UAAU,CAAC;AAAA,UACf;AAAA,QACF;AACA,YAAI,gBAAgB,oBAAoB,QAAW;AACjD,0BAAgB,kBAAkB,KAAK;AAAA,QACzC;AAEA,YAAI,KAAK,iBAAiB,IAAI,YAAY,IAAI,GAAG;AAC/C,gBAAM,qBAAqB,KAAK,iBAAiB,IAAI,YAAY,IAAI;AACrE,gBAAM,aAAa,mBAAmB;AAAA,YACpC,CAAC,CAAC,GAAG,IAAI,GAAG,MAAM,IAAI,YAAY,gBAAgB;AAAA,UACpD;AAEA,cAAI,CAAC,YAAY;AACf,+BAAmB,KAAK,CAAC,aAAa,gBAAgB,eAAe,CAAC;AAAA,UACxE;AAAA,QACF,OAAO;AACL,eAAK,iBAAiB,IAAI,YAAY,MAAM;AAAA,YAC1C,CAAC,aAAa,gBAAgB,eAAe;AAAA,UAC/C,CAAC;AAAA,QACH;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,YAAY,UAAuB,QAA4B;AAE7D,cAAM,aAAa,uBAAuB,IAAI,MAAM,EAAE,KAAK;AAE3D,cAAM,iBAAoC;AAAA,UACxC,GAAI,UAAU,CAAC;AAAA,UACf;AAAA,QACF;AACA,YAAI,eAAe,oBAAoB,QAAW;AAChD,yBAAe,kBAAkB,KAAK;AAAA,QACxC;AACA,cAAM,aAAa,KAAK,WAAW;AAAA,UACjC,CAAC,aAAa,SAAS,OAAO,YAAY,eAAe;AAAA,QAC3D;AAEA,YAAI,CAAC,YAAY;AACf,eAAK,WAAW,KAAK,EAAE,UAAU,QAAQ,eAAe,CAAC;AAAA,QAC3D;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAeA,SAAS,CAAC,WAA0B,IAAI,cAAc,MAAM,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAUlE,kBAAkB,gBAAgD;AAChE,aAAK,2BAA2B;AAAA,MAClC;AAAA,IACF;AAuEO,IAAM,kBAAN,cAAiC,OAAwB;AAAA,MAe9D,YACE,MACA,QACA,WACA;AACA,YAAI,cAAc,QAAW;AAC3B,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA,cAAM,MAAM,UAAU,CAAC,GAAG,WAAW,YAAY,QAAW,KAAK;AACjE,aAAK,YAAY;AACjB,yBAAiB,EAAE,QAAQ,IAAI,MAAM,IAAI;AAAA,MAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAWR,aACE,aACA,gBACA,QACA;AACA,cAAM,eAAyD,CAC7D,eACG;AACH,0BAAmB,YAAY,KAAK,SAAS;AAC7C,iBAAO,eAAe,UAAU;AAAA,QAClC;AACA,cAAM,aAAa,aAAa,cAAc,MAAM;AAAA,MACtD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,YACE,UACA,QACA;AACA,cAAM,eAA0C,CAAC,eAAe;AAC9D,0BAAmB,YAAY,KAAK,SAAS;AAC7C,iBAAO,SAAS,UAAU;AAAA,QAC5B;AACA,cAAM,YAAY,cAAc,MAAM;AAAA,MACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQA,kBACE,gBACA;AACA,cAAM,eAA6D,CACjE,eACG;AACH,0BAAmB,YAAY,KAAK,SAAS;AAC7C,iBAAO,eAAe,UAAU;AAAA,QAClC;AACA,cAAM,kBAAkB,YAAY;AAAA,MACtC;AAAA,IACF;AAAA;AAAA;;;ACztBA,IA4Da,MA4FA;AAxJb;AAAA;AAAA;AAAA;AA4DO,IAAM,OAAN,MAAiB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAsCtB,YACW,MACA,QACT;AAFS;AACA;AAAA,MACR;AAAA,IACL;AAkDO,IAAM,WAAN,MAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQpB,YACW,MACA,QACT;AAFS;AACA;AAET,cAAM,YAAY,iBAAiB,EAAE;AACrC,YAAI,UAAU,IAAI,IAAI,GAAG;AACvB,gBAAM,IAAI,MAAM,sBAAsB,IAAI,iBAAiB;AAAA,QAC7D;AACA,aAAK,kBAAkB,OAAO,cAAc,IAAI;AAChD,kBAAU,IAAI,MAAM,IAAI;AAAA,MAC1B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAUQ,kBACN,cACA,cACM;AACN,YAAI,iBAAiB,QAAQ,iBAAiB,QAAW;AACvD,gBAAM,IAAI;AAAA,YACR,aAAa,YAAY;AAAA,UAC3B;AAAA,QACF;AAEA,cAAM,UAAU,oBAAI,IAAY;AAChC,cAAM,iBAAiB,oBAAI,IAAY;AAEvC,cAAM,eAAe,CACnB,MACA,gBACS;AACT,cAAI,SAAS,QAAQ,SAAS,QAAW;AACvC,kBAAM,UACJ,YAAY,SAAS,IAAI,YAAY,KAAK,MAAM,IAAI,SAAS;AAC/D,kBAAM,IAAI;AAAA,cACR,aAAa,YAAY,0DAA0D,OAAO;AAAA,YAC5F;AAAA,UACF;AAEA,gBAAM,WAAW,KAAK;AAEtB,cAAI,eAAe,IAAI,QAAQ,GAAG;AAChC,kBAAM,kBAAkB,YAAY,QAAQ,QAAQ;AACpD,kBAAM,YACJ,mBAAmB,IACjB,YAAY,MAAM,eAAe,EAAE,OAAO,QAAQ,IAClD,YAAY,OAAO,QAAQ;AAC/B,kBAAM,IAAI;AAAA,cACR,aAAa,YAAY,8CAA8C,UAAU,KAAK,MAAM,CAAC;AAAA,YAC/F;AAAA,UACF;AAEA,cAAI,QAAQ,IAAI,QAAQ,GAAG;AAEzB;AAAA,UACF;AAEA,kBAAQ,IAAI,QAAQ;AACpB,yBAAe,IAAI,QAAQ;AAE3B,cAAI,KAAK,OAAO,YAAY;AAC1B,uBAAW,YAAY,KAAK,OAAO,YAAY;AAC7C,2BAAa,UAAU,CAAC,GAAG,aAAa,QAAQ,CAAC;AAAA,YACnD;AAAA,UACF;AAEA,yBAAe,OAAO,QAAQ;AAAA,QAChC;AAEA,qBAAa,cAAc,CAAC,CAAC;AAAA,MAC/B;AAAA,IACF;AAAA;AAAA;;;AC7OA,IAiCa;AAjCb;AAAA;AAAA;AACA;AAEA;AA8BO,IAAM,YAAN,cAA2B,UAA8B;AAAA,MAuB9D,YACE,MACA,QACA,QACA,SACA,YACA,kBACA;AACA,cAAM,MAAM,QAAQ,QAAQ,SAAS,QAAW,gBAAgB;AAChE,cAAM,aAAa,iBAAiB,EAAE;AACtC,YAAI,WAAW,IAAI,IAAI,GAAG;AACxB,gBAAM,IAAI,MAAM,wBAAwB,IAAI,iBAAiB;AAAA,QAC/D;AACA,mBAAW,IAAI,MAAM,IAAI;AAAA,MAC3B;AAAA,IACF;AAAA;AAAA;;;ACvEA,IAuCa,KA8KA;AArNb;AAAA;AAAA;AACA;AAEA;AAoCO,IAAM,MAAN,cAA8B,UAA2B;AAAA;AAAA,MAE9D;AAAA;AAAA,MAEA;AAAA,MAoBA,YACE,MACA,SACA,QACA,QACA,SACA,gBACA;AACA,cAAM,MAAM,UAAU,CAAC,GAAG,QAAQ,OAAO;AACzC,aAAK,WAAW;AAChB,aAAK,iBAAiB,kBAAkB;AAAA,UACtC,SAAS;AAAA,UACT,SAAS,CAAC,EAAE,MAAM,SAAS,OAAO,EAAE,MAAM,SAAS,EAAE,CAAC;AAAA,UACtD,YAAY,EAAE,SAAS,CAAC,EAAE;AAAA,QAC5B;AACA,cAAM,OAAO,iBAAiB,EAAE;AAChC,cAAM,MAAM,GAAG,IAAI,GAAG,QAAQ,UAAU,IAAI,OAAO,OAAO,KAAK,EAAE;AACjE,YAAI,KAAK,IAAI,GAAG,GAAG;AACjB,gBAAM,IAAI;AAAA,YACR,6BAA6B,IAAI,gBAAgB,QAAQ,OAAO;AAAA,UAClE;AAAA,QACF;AACA,aAAK,IAAI,KAAK,IAAI;AAGlB,YAAI,QAAQ,MAAM;AAChB,cAAI,OAAO,SAAS;AAElB,kBAAM,sBACJ,OAAO,KAAK,SAAS,IAAI,OAAO,OAAO,EAAE,KACzC,OAAO,SAAS,OAAO,WACtB,OAAO,KAAK,SAAS,OAAO,OAAO,KAClC,OAAO,KAAK,SAAS,OAAO,QAAQ,UACpC,OAAO,KAAK,OAAO,KAAK,SAAS,OAAO,QAAQ,SAAS,CAAC,MACxD;AAEN,gBAAI,qBAAqB;AAEvB,kBAAI,KAAK,IAAI,OAAO,IAAI,GAAG;AACzB,sBAAM,WAAW,KAAK,IAAI,OAAO,IAAI;AACrC,sBAAM,IAAI;AAAA,kBACR,wBAAwB,IAAI,gBAAgB,OAAO,IAAI,yCAAyC,SAAS,IAAI;AAAA,gBAC/G;AAAA,cACF;AACA,mBAAK,IAAI,OAAO,MAAM,IAAI;AAAA,YAC5B,OAAO;AAEL,oBAAM,gBAAgB,GAAG,OAAO,KAAK,QAAQ,OAAO,EAAE,CAAC,IAAI,OAAO,OAAO;AAGzE,kBAAI,KAAK,IAAI,aAAa,GAAG;AAC3B,sBAAM,WAAW,KAAK,IAAI,aAAa;AACvC,sBAAM,IAAI;AAAA,kBACR,wBAAwB,IAAI,gBAAgB,aAAa,yCAAyC,SAAS,IAAI;AAAA,gBACjH;AAAA,cACF;AACA,mBAAK,IAAI,eAAe,IAAI;AAI5B,kBAAI,CAAC,KAAK,IAAI,OAAO,IAAI,GAAG;AAC1B,qBAAK,IAAI,OAAO,MAAM,IAAI;AAAA,cAC5B;AAAA,YACF;AAAA,UACF,OAAO;AAEL,gBAAI,KAAK,IAAI,OAAO,IAAI,GAAG;AACzB,oBAAM,WAAW,KAAK,IAAI,OAAO,IAAI;AACrC,oBAAM,IAAI;AAAA,gBACR,wBAAwB,IAAI,uBAAuB,OAAO,IAAI,yCAAyC,SAAS,IAAI;AAAA,cACtH;AAAA,YACF;AACA,iBAAK,IAAI,OAAO,MAAM,IAAI;AAAA,UAC5B;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,aAAa,MAAwB;AACnC,eAAO,KAAK;AAAA,MACd;AAAA,MAEA,MAAM,KAAK,SAAiB,aAA4B;AAEtD,YAAIC;AACJ,YAAI,KAAK,QAAQ,MAAM;AAErB,cAAI,KAAK,OAAO,SAAS;AACvB,kBAAM,sBACJ,KAAK,OAAO,KAAK,SAAS,IAAI,KAAK,OAAO,OAAO,EAAE,KACnD,KAAK,OAAO,SAAS,KAAK,OAAO,WAChC,KAAK,OAAO,KAAK,SAAS,KAAK,OAAO,OAAO,KAC5C,KAAK,OAAO,KAAK,SAAS,KAAK,OAAO,QAAQ,UAC9C,KAAK,OAAO,KACV,KAAK,OAAO,KAAK,SAAS,KAAK,OAAO,QAAQ,SAAS,CACzD,MAAM;AAEV,gBAAI,qBAAqB;AACvB,cAAAA,QAAO,KAAK,OAAO;AAAA,YACrB,OAAO;AACL,cAAAA,QAAO,GAAG,KAAK,OAAO,KAAK,QAAQ,OAAO,EAAE,CAAC,IAAI,KAAK,OAAO,OAAO;AAAA,YACtE;AAAA,UACF,OAAO;AACL,YAAAA,QAAO,KAAK,OAAO;AAAA,UACrB;AAAA,QACF,OAAO;AAEL,UAAAA,QACE,KAAK,QAAQ,UACX,GAAG,KAAK,IAAI,IAAI,KAAK,OAAO,OAAO,KACnC,KAAK;AAAA,QACX;AACA,cAAM,MAAM,IAAI,IAAI,GAAG,QAAQ,QAAQ,OAAO,EAAE,CAAC,QAAQA,KAAI,EAAE;AAE/D,cAAM,eAAe,IAAI;AAEzB,mBAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,WAAkB,GAAG;AAC7D,cAAI,MAAM,QAAQ,KAAK,GAAG;AAExB,uBAAW,QAAQ,OAAO;AACxB,kBAAI,SAAS,QAAQ,SAAS,QAAW;AACvC,6BAAa,OAAO,KAAK,OAAO,IAAI,CAAC;AAAA,cACvC;AAAA,YACF;AAAA,UACF,WAAW,UAAU,QAAQ,UAAU,QAAW;AAChD,yBAAa,OAAO,KAAK,OAAO,KAAK,CAAC;AAAA,UACxC;AAAA,QACF;AAEA,cAAM,WAAW,MAAM,MAAM,KAAK;AAAA,UAChC,QAAQ;AAAA,UACR,SAAS;AAAA,YACP,QAAQ;AAAA,UACV;AAAA,QACF,CAAC;AACD,YAAI,CAAC,SAAS,IAAI;AAChB,gBAAM,IAAI,MAAM,uBAAuB,SAAS,MAAM,EAAE;AAAA,QAC1D;AACA,cAAM,OAAO,MAAM,SAAS,KAAK;AACjC,eAAO;AAAA,MACT;AAAA,IACF;AAMO,IAAM,iBAAiB;AAAA;AAAA;;;ACrN9B,IAqJa;AArJb;AAAA;AAAA;AACA;AAEA;AAMA;AACA;AAEA;AAyIO,IAAM,iBAAN,cAAgC,UAAsC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAM3E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA;AAAA;AAAA,MAGA;AAAA,MA0CA,YACE,MACA,QACA,QACA,SACA,YACA,kBACA;AACA,cAAM,MAAM,QAAQ,QAAQ,SAAS,YAAY,gBAAgB;AAGjE,YAAI,OAAO,WAAW,QAAW;AAC/B,kBAAQ;AAAA,YACN;AAAA,UAEF;AAEA,cAAI,OAAO,cAAc,QAAW;AAClC,YAAC,OAAe,YAAY,OAAO;AAAA,UACrC;AAAA,QACF;AAGA,YAAI,OAAO,OAAO;AAChB,gBAAM,cACJ,OAAO,OAAO,UAAU,WACtB;AAAA,YACE,GAAG,OAAO;AAAA,YACV,WAAW,OAAO,MAAM,aAAa,OAAO;AAAA,YAC5C,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,UAClD,IACA;AAAA,YACE,WAAW,OAAO;AAAA,YAClB;AAAA,YACA,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,UAClD;AACJ,eAAK,QAAQ,IAAI;AAAA,YACf;AAAA,YACA;AAAA,YACA,KAAK;AAAA,YACL,KAAK;AAAA,YACL,KAAK;AAAA,UACP;AAAA,QACF;AAEA,YAAI,OAAO,iBAAiB;AAC1B,gBAAM,eAAe;AAAA,YACnB,aAAa;AAAA,YACb,GAAI,OAAO,OAAO,oBAAoB,WACpC;AAAA,cACE,GAAG,OAAO;AAAA,cACV,WAAW,OAAO,gBAAgB,aAAa,OAAO;AAAA,YACxD,IACA,EAAE,WAAW,OAAO,UAAU;AAAA,YAChC,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,UAClD;AACA,eAAK,kBAAkB,IAAI;AAAA,YACzB,GAAG,IAAI;AAAA,YACP;AAAA,YACA,WAAY;AAAA,UACd;AAAA,QACF;AAGA,YAAI,OAAO,QAAQ;AACjB,gBAAM,eAAgC;AAAA,YACpC,aAAa,KAAK;AAAA,YAClB,wBAAwB,KAAK;AAAA,YAC7B,GAAI,OAAO,OAAO,WAAW,WAC3B;AAAA,cACE,GAAG,OAAO;AAAA,cACV,WAAW,OAAO,OAAO,aAAa,OAAO;AAAA,YAC/C,IACA,EAAE,WAAW,OAAO,UAAU;AAAA,YAChC,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,UAClD;AACA,eAAK,SAAS,IAAI;AAAA,YAChB;AAAA,YACA;AAAA,YACA,KAAK;AAAA,YACL,KAAK;AAAA,YACL;AAAA,YACA,KAAK;AAAA,UACP;AAEA,UAAC,KAAK,OAAe,iBAAiB;AAAA,QACxC;AAGA,cAAM,qBACJ,OAAO,cAAc,SAAY,OAAO,YAAY,OAAO;AAC7D,YAAI,oBAAoB;AACtB,cAAI,CAAC,KAAK,QAAQ;AAChB,kBAAM,IAAI,MAAM,wCAAwC;AAAA,UAC1D;AAEA,gBAAM,eAAe;AAAA,YACnB,aAAa,KAAK;AAAA,YAClB,iBAAiB,KAAK;AAAA,YACtB,GAAI,OAAO,uBAAuB,WAC/B,qBACD,CAAC;AAAA,YACH,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,YAChD,GAAI,OAAO,QAAQ,EAAE,MAAM,OAAO,KAAK;AAAA,UACzC;AACA,eAAK,YAAY,IAAI;AAAA,YACnB;AAAA,YACA;AAAA,YACA,KAAK;AAAA,YACL,KAAK;AAAA,YACL;AAAA,YACA,KAAK;AAAA,UACP;AAEA,UAAC,KAAK,UAAkB,iBAAiB;AAAA,QAC3C;AAAA,MACF;AAAA,IACF;AAAA;AAAA;;;AC3UA,IAuBM,iBAgCO;AAvDb;AAAA;AAAA;AAAA;AAuBA,IAAM,kBAAN,MAAyB;AAAA,MACf;AAAA,MACA;AAAA,MAER,YAAY,eAAiC,YAAY,IAAI;AAC3D,aAAK,WAAW,cAAc,OAAO,aAAa,EAAE;AACpD,aAAK,YAAY;AAAA,MACnB;AAAA,MAEA,MAAM,eAAwC;AAC5C,cAAM,QAAa,CAAC;AAEpB,iBAAS,IAAI,GAAG,IAAI,KAAK,WAAW,KAAK;AACvC,gBAAM,EAAE,OAAO,KAAK,IAAI,MAAM,KAAK,SAAS,KAAK;AAEjD,cAAI,MAAM;AACR,mBAAO,EAAE,OAAO,SAAS,MAAM;AAAA,UACjC;AAEA,gBAAM,KAAK,KAAK;AAAA,QAClB;AAEA,eAAO,EAAE,OAAO,SAAS,KAAK;AAAA,MAChC;AAAA,IACF;AAQO,IAAM,cAAN,MAAwB;AAAA,MAG7B,YACW,MACA,QACT;AAFS;AACA;AAET,aAAK,cAAc;AAAA,MACrB;AAAA,MAPQ;AAAA,MASA,gBAAsB;AAC5B,aAAK,UAAU,KAAK,cAAc;AAClC,cAAM,QAAQ,KAAK,eAAe;AAElC,cAAM,QAAQ,OAAO,aAAa,CAAC,MAAM,SAAS;AAClD,cAAM,UAAU,OAAO,aAAa,CAAC,MAAM,IAAI;AAE/C,YAAI,SAAS,KAAK,MAAM;AAAA,UACtB,cAAc,MAAM;AAAA,UACpB,SAAS;AAAA,UACT,SAAS;AAAA,QACX,CAAC;AAAA,MACH;AAAA,MAEQ,gBAAoC;AAC1C,cAAM,WACJ,OAAO,KAAK,OAAO,YAAY,aAC7B,KAAK,OAAO,QAAQ,IACpB,KAAK,OAAO;AAEhB,eAAO,IAAI,gBAAgB,QAAQ;AAAA,MACrC;AAAA,MAEQ,uBAAmC;AACzC,eAAO;AAAA,UACL,SAAS;AAAA,UACT,SAAS;AAAA,QACX;AAAA,MACF;AAAA,MAEQ,iBAAiC;AACvC,cAAM,aAAa,KAAK,qBAAqB;AAE7C,eAAO;AAAA,UACL,SAAS,KAAK,kBAAkB,UAAU;AAAA,UAC1C,WAAW,KAAK,oBAAoB,UAAU;AAAA,UAC9C,MAAM,KAAK,eAAe,UAAU;AAAA,QACtC;AAAA,MACF;AAAA,MAEQ,kBACN,YAC4B;AAC5B,eAAO,IAAI,KAA2B,GAAG,KAAK,IAAI,YAAY;AAAA,UAC5D,KAAK,OAAO,CAAC,MAAM;AACjB,oBAAQ,IAAI,4BAA4B,KAAK,IAAI,KAAK;AACtD,kBAAM,QAAQ,MAAM,KAAK,QAAQ,aAAa;AAC9C,oBAAQ,IAAI,+BAA+B,MAAM,MAAM,MAAM,QAAQ;AACrE,mBAAO;AAAA,UACT;AAAA,UACA,SAAS,WAAW;AAAA,UACpB,SAAS,WAAW;AAAA,QACtB,CAAC;AAAA,MACH;AAAA,MAEQ,oBACN,YAC4C;AAC5C,eAAO,IAAI;AAAA,UACT,GAAG,KAAK,IAAI;AAAA,UACZ;AAAA;AAAA,YAEE,KAAK,OAAO,EAAE,MAAM,MAAM;AACxB,oBAAM,QAAQ;AACd,sBAAQ;AAAA,gBACN,8BAA8B,KAAK,IAAI,SAAS,MAAM,MAAM,MAAM;AAAA,cACpE;AACA,oBAAM,mBAAwB,CAAC;AAE/B,yBAAW,QAAQ,MAAM,OAAO;AAC9B,sBAAM,cAAc,MAAM,KAAK,OAAO,UAAU,IAAI;AACpD,iCAAiB,KAAK,WAAW;AAAA,cACnC;AAEA,sBAAQ;AAAA,gBACN,iCAAiC,iBAAiB,MAAM;AAAA,cAC1D;AACA,qBAAO,EAAE,OAAO,iBAAiB;AAAA,YACnC;AAAA,YACA,SAAS,WAAW;AAAA,YACpB,SAAS,WAAW;AAAA,UACtB;AAAA,QACF;AAAA,MACF;AAAA,MAEQ,eACN,YACkC;AAClC,eAAO,IAAI,KAAiC,GAAG,KAAK,IAAI,SAAS;AAAA,UAC/D,KAAK,OAAO,EAAE,OAAO,iBAAiB,MAAM;AAC1C,oBAAQ;AAAA,cACN,yBAAyB,KAAK,IAAI,SAAS,iBAAiB,MAAM,MAAM;AAAA,YAC1E;AAGA,gBAAI,YAAY,KAAK,OAAO,MAAM;AAEhC,oBAAM,KAAK,OAAO,KAAK,OAAO,iBAAiB,KAAK;AAAA,YACtD,OAAO;AAEL,oBAAM,KAAK,OAAO,KAAK,iBAAiB,KAAK;AAAA,YAC/C;AAEA,oBAAQ,IAAI,qBAAqB;AAAA,UACnC;AAAA,UACA,SAAS,WAAW;AAAA,UACpB,SAAS,WAAW;AAAA,QACtB,CAAC;AAAA,MACH;AAAA;AAAA,MAGA,MAAM,MAAqB;AACzB,gBAAQ,IAAI,0BAA0B,KAAK,IAAI,EAAE;AAEjD,YAAI,cAAc;AAClB,WAAG;AACD,kBAAQ,IAAI,oBAAoB,WAAW,KAAK;AAChD,gBAAM,QAAQ,MAAM,KAAK,QAAQ,aAAa;AAE9C,cAAI,MAAM,MAAM,WAAW,GAAG;AAC5B;AAAA,UACF;AAGA,gBAAM,mBAAwB,CAAC;AAC/B,qBAAW,iBAAiB,MAAM,OAAO;AACvC,kBAAM,kBAAkB,MAAM,KAAK,OAAO,UAAU,aAAa;AACjE,6BAAiB,KAAK,eAAe;AAAA,UACvC;AAGA,cAAI,YAAY,KAAK,OAAO,MAAM;AAEhC,kBAAM,KAAK,OAAO,KAAK,OAAO,gBAAgB;AAAA,UAChD,OAAO;AAEL,kBAAM,KAAK,OAAO,KAAK,gBAAgB;AAAA,UACzC;AAEA,kBAAQ;AAAA,YACN,mBAAmB,WAAW,SAAS,MAAM,MAAM,MAAM;AAAA,UAC3D;AACA;AAEA,cAAI,CAAC,MAAM,SAAS;AAClB;AAAA,UACF;AAAA,QACF,SAAS;AAET,gBAAQ,IAAI,2BAA2B,KAAK,IAAI,EAAE;AAAA,MACpD;AAAA,IACF;AAAA;AAAA;;;ACxNA,IAWa;AAXb;AAAA;AAAA;AAAA;AAEA;AACA;AAQO,IAAM,cAAN,MAAkB;AAAA;AAAA,MAEP,OAAO;AAAA;AAAA,MAGvB;AAAA;AAAA,MAEA;AAAA;AAAA,MAEA;AAAA;AAAA,MAGA;AAAA;AAAA,MAEA;AAAA;AAAA,MAGA;AAAA;AAAA,MAGA;AAAA;AAAA,MAGA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAWA,YACE,MACA,OACA,UACA,SAIA;AACA,cAAM,eAAe,iBAAiB,EAAE;AAGxC,YAAI,CAAC,iBAAiB,KAAK,aAAa,IAAI,IAAI,GAAG;AACjD,gBAAM,IAAI,MAAM,yBAAyB,IAAI,iBAAiB;AAAA,QAChE;AACA,qBAAa,IAAI,MAAM,IAAI;AAE3B,aAAK,OAAO;AACZ,aAAK,QAAQ,MAAM;AAAA,UAAI,CAACC,SACtB,OAAOA,SAAQ,WAAWA,OAAM,cAAcA,IAAG;AAAA,QACnD;AACA,aAAK,WAAW,SAAS;AAAA,UAAI,CAACA,SAC5B,OAAOA,SAAQ,WAAWA,OAAM,cAAcA,IAAG;AAAA,QACnD;AACA,aAAK,gBAAgB,SAAS,iBAAiB,CAAC;AAChD,aAAK,eAAe,SAAS,gBAAgB,CAAC;AAG9C,cAAM,QAAQ,IAAI,MAAM,EAAE;AAC1B,cAAM,WAAW,2BAA2B,KAAK;AAEjD,YAAI,UAAU;AACZ,eAAK,aAAa,SAAS;AAC3B,eAAK,aAAa,SAAS;AAC3B,eAAK,eAAe,SAAS;AAAA,QAC/B;AAAA,MACF;AAAA,IACF;AAAA;AAAA;;;AClFA,IAiDM,wBAeO;AAhEb;AAAA;AAAA;AAAA;AAKA;AACA;AACA;AA0CA,IAAM,yBAAyB,CAAC,cAA0C;AACxE,UAAI,OAAO,cAAc,UAAU;AACjC,eAAO;AAAA,MACT,OAAO;AACL,cAAM,IAAI,MAAM,uCAAuC;AAAA,MACzD;AAAA,IACF;AASO,IAAM,mBAAN,cAA4C,YAAY;AAAA;AAAA,MAE7D;AAAA,MAiBA,YACE,SACA,cACA,eACA;AACA,YAAI,kBAAkB,QAAQ;AAC9B,YAAI,OAAO,oBAAoB,UAAU;AACvC,4BAAkB,cAAc,eAAe;AAAA,QACjD;AAEA,YAAI,iBAAiB,UAAa,kBAAkB,QAAW;AAC7D,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cACJ,QAAQ,uBAAuB,YAC7B,QAAQ,cACR,IAAI;AAAA,UACF;AAAA,YACE,QAAQ,aAAa,QAAQ,QAAQ;AAAA,UACvC;AAAA,UACA;AAAA,YACE,eACE,QAAQ,aAAa,iBAAiB,QAAQ;AAAA,YAChD,QACE,QAAQ,aAAa,UACrB,QAAQ;AAAA,UAEZ;AAAA,UACA;AAAA,UACA;AAAA,QACF;AAEJ,YAAI,YAAY,SAAS,QAAQ,sBAAsB;AACrD,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA;AAAA,UACE,QAAQ;AAAA,UACR;AAAA,YACE,uBAAuB;AAAA,cACrB,MAAM,QAAQ;AAAA,cACd,kBAAkB,YAAY;AAAA,cAC9B,QAAQ;AAAA,YACV,CAAC;AAAA;AAAA;AAAA,UAGH;AAAA,UACA,CAAC,SAAS,QAAQ,oBAAoB,CAAC;AAAA,UACvC;AAAA,YACE,eAAe,QAAQ;AAAA,YACvB,cAAc,CAAC,WAAW;AAAA,UAC5B;AAAA,QACF;AAEA,aAAK,cAAc;AAAA,MACrB;AAAA,IACF;AAAA;AAAA;;;AChJA,IASa;AATb;AAAA;AAAA;AAAA;AACA;AAEA;AAMO,IAAM,OAAN,cAAmB,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOpC,YACE,MACA,iBACA,YACA;AACA,YAAI,OAAO,oBAAoB,UAAU;AACvC,4BAAkB,cAAc,eAAe;AAAA,QACjD;AAEA;AAAA,UACE;AAAA,UACA;AAAA,YACE,6BAA6B,IAAI;AAAA,eAC1B,eAAe,GAAG,KAAK;AAAA,UAChC;AAAA,UACA,CAAC,SAAS,IAAI,CAAC;AAAA,UACf;AAAA,YACE,eAAe;AAAA,UACjB;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA;AAAA;;;ACrCA,IAMY;AANZ;AAAA;AAAA;AAMO,IAAK,YAAL,kBAAKC,eAAL;AAML,MAAAA,WAAA,mBAAgB;AAOhB,MAAAA,WAAA,wBAAqB;AAOrB,MAAAA,WAAA,wBAAqB;AApBX,aAAAA;AAAA,OAAA;AAAA;AAAA;;;ACNZ,IAyBM,sBAWO;AApCb;AAAA;AAAA;AACA;AAwBA,IAAM,uBAAuB;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEO,IAAM,SAAN,MAAa;AAAA,MAClB;AAAA,MACA;AAAA,MACA;AAAA,MACQ;AAAA,MAER,YACE,MACA,cACA,QACA;AACA,aAAK,OAAO;AACZ,aAAK,SAAS;AAGd,YAAI,CAAC,KAAK,OAAO,WAAW;AAC1B,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA,cAAM,YAAY,KAAK,OAAO;AAG9B,YAAI,cAAc,KAAK;AACrB,gBAAM,IAAI;AAAA,YACR,oFAAoF,qBAAqB,KAAK,IAAI,CAAC;AAAA,UACrH;AAAA,QACF;AAGA,YAAI,UAAU,SAAS,GAAG,GAAG;AAC3B,gBAAM,IAAI;AAAA,YACR,qEAAqE,SAAS;AAAA,UAChF;AAAA,QACF;AAGA,mBAAW,YAAY,sBAAsB;AAC3C,cAAI,cAAc,YAAY,UAAU,WAAW,GAAG,QAAQ,GAAG,GAAG;AAClE,kBAAM,IAAI;AAAA,cACR,gDAAgD,qBAAqB,KAAK,IAAI,CAAC,WAAW,SAAS;AAAA,YACrG;AAAA,UACF;AAAA,QACF;AAEA,aAAK,UAAU,KAAK,UAAU,YAAY;AAC1C,aAAK,UACH,OAAO,iBAAiB,aAAa,SAAY;AAEnD,cAAM,UAAU,iBAAiB,EAAE;AACnC,YAAI,QAAQ,IAAI,IAAI,GAAG;AACrB,gBAAM,IAAI,MAAM,oBAAoB,IAAI,iBAAiB;AAAA,QAC3D;AAGA,YAAI,KAAK,OAAO,WAAW;AACzB,qBAAW,CAAC,cAAc,WAAW,KAAK,SAAS;AACjD,gBAAI,YAAY,OAAO,cAAc,KAAK,OAAO,WAAW;AAC1D,oBAAM,IAAI;AAAA,gBACR,0BAA0B,KAAK,OAAO,SAAS,qCAAqC,YAAY;AAAA,cAClG;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAEA,gBAAQ,IAAI,MAAM,IAAI;AAAA,MACxB;AAAA,MAEQ,UAAU,cAA2D;AAC3E,YAAI,OAAO,iBAAiB,YAAY;AACtC,iBAAO;AAAA,QACT;AAEA,cAAM,MAAM;AAEZ,YAAI,OAAO,IAAI,WAAW,YAAY;AACpC,iBAAO,CAAC,KAAK,QAAQ;AACnB,gBAAI,OAAQ,KAAK,KAAK,CAAC,QAAc;AACnC,kBAAI,KAAK;AACP,wBAAQ,MAAM,yBAAyB,GAAG;AAC1C,oBAAI,CAAC,IAAI,aAAa;AACpB,sBAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,sBAAI,IAAI,KAAK,UAAU,EAAE,OAAO,wBAAwB,CAAC,CAAC;AAAA,gBAC5D;AAAA,cACF;AAAA,YACF,CAAC;AAAA,UACH;AAAA,QACF;AAEA,YAAI,OAAO,IAAI,aAAa,YAAY;AACtC,iBAAO,IAAI,SAAS;AAAA,QACtB;AAIA,YAAI,OAAO,IAAI,YAAY,YAAY;AAErC,gBAAM,UAAU,IAAI;AACpB,gBAAM,eAAe;AAKrB,cAAI,eAA4C;AAEhD,iBAAO,OAAO,KAAK,QAAQ;AAEzB,gBAAI,iBAAiB,MAAM;AACzB,6BACE,OAAO,aAAa,UAAU,aAC5B,aAAa,MAAM,IACnB,QAAQ,QAAQ;AAAA,YACtB;AACA,kBAAM;AACN,oBAAQ,KAAK,GAAG;AAAA,UAClB;AAAA,QACF;AAEA,cAAM,IAAI;AAAA,UACR;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAYF;AAAA,MACF;AAAA,MAEA,YAAsC;AACpC,eAAO,KAAK;AAAA,MACd;AAAA,IACF;AAAA;AAAA;;;ACxJO,SAAS,YAAyC;AACvD,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,SAAS,MAA0C;AACjE,SAAO,iBAAiB,EAAE,OAAO,IAAI,IAAI;AAC3C;AAMO,SAAS,aAAuC;AACrD,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,UAAU,MAAuC;AAC/D,SAAO,iBAAiB,EAAE,QAAQ,IAAI,IAAI;AAC5C;AAMO,SAAS,gBAA6C;AAC3D,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,aAAa,MAA0C;AACrE,SAAO,iBAAiB,EAAE,WAAW,IAAI,IAAI;AAC/C;AAMO,SAAS,UAAiC;AAC/C,SAAO,iBAAiB,EAAE;AAC5B;AAaO,SAAS,OAAO,YAA0C;AAC/D,QAAM,WAAW,iBAAiB;AAGlC,QAAM,cAAc,SAAS,KAAK,IAAI,UAAU;AAChD,MAAI,aAAa;AACf,WAAO;AAAA,EACT;AAGA,QAAM,gBAAgB,oBAAI,IAAwB;AAClD,QAAM,UAAU,oBAAI,IAAsB;AAE1C,WAAS,KAAK,QAAQ,CAAC,KAAK,QAAQ;AAElC,UAAM,WAAW,IAAI;AACrB,QAAI,CAAC,cAAc,IAAI,QAAQ,GAAG;AAChC,oBAAc,IAAI,UAAU,CAAC,CAAC;AAAA,IAChC;AACA,kBAAc,IAAI,QAAQ,EAAG,KAAK,GAAG;AAGrC,QAAI,IAAI,OAAO,MAAM;AACnB,cAAQ,IAAI,IAAI,OAAO,MAAM,GAAG;AAAA,IAClC;AAAA,EACF,CAAC;AAGD,QAAM,aAAa,cAAc,IAAI,UAAU;AAC/C,MAAI,cAAc,WAAW,WAAW,GAAG;AACzC,WAAO,WAAW,CAAC;AAAA,EACrB;AAGA,SAAO,QAAQ,IAAI,UAAU;AAC/B;AAMO,SAAS,kBAA4C;AAC1D,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,eAAe,MAAuC;AACpE,SAAO,iBAAiB,EAAE,aAAa,IAAI,IAAI;AACjD;AAMO,SAAS,eAAsC;AACpD,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,YAAY,MAAoC;AAC9D,SAAO,iBAAiB,EAAE,UAAU,IAAI,IAAI;AAC9C;AAMO,SAAS,aAAkC;AAChD,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,UAAU,MAAkC;AAC1D,SAAO,iBAAiB,EAAE,QAAQ,IAAI,IAAI;AAC5C;AA9KA;AAAA;AAAA;AAgBA;AAAA;AAAA;;;AChBA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgDA;AACA;AAUA;AAGA;AACA;AAMA;AACA;AACA;AAIA;AACA;AACA;AACA;AAOA;AAAA;AAAA;","names":["sql","path","http","import_client","init_helpers","init_helpers","init_helpers","init_helpers","process","path","import_node_crypto","getClickhouseClient","import_node_crypto","getKafkaProducer","path","sql","LifeCycle"]}
1
+ {"version":3,"sources":["../../src/dmv2/utils/stackTrace.ts","../../src/dmv2/typedBase.ts","../../src/dataModels/dataModelTypes.ts","../../src/sqlHelpers.ts","../../src/blocks/helpers.ts","../../src/dataModels/types.ts","../../src/browserCompatible.ts","../../src/commons.ts","../../src/secrets.ts","../../src/consumption-apis/helpers.ts","../../src/consumption-apis/webAppHelpers.ts","../../src/scripts/task.ts","../../src/cluster-utils.ts","../../src/consumption-apis/runner.ts","../../src/clients/redisClient.ts","../../src/consumption-apis/standalone.ts","../../src/utilities/json.ts","../../src/utilities/dataParser.ts","../../src/utilities/index.ts","../../src/connectors/dataSource.ts","../../src/index.ts","../../src/dmv2/internal.ts","../../src/config/configFile.ts","../../src/config/runtime.ts","../../src/dmv2/sdk/olapTable.ts","../../src/dmv2/sdk/stream.ts","../../src/dmv2/sdk/workflow.ts","../../src/dmv2/sdk/ingestApi.ts","../../src/dmv2/sdk/consumptionApi.ts","../../src/dmv2/sdk/ingestPipeline.ts","../../src/dmv2/sdk/etlPipeline.ts","../../src/dmv2/sdk/sqlResource.ts","../../src/dmv2/sdk/materializedView.ts","../../src/dmv2/sdk/view.ts","../../src/dmv2/sdk/lifeCycle.ts","../../src/dmv2/sdk/webApp.ts","../../src/dmv2/registry.ts","../../src/dmv2/index.ts"],"sourcesContent":["/**\n * Stack trace utilities for extracting source file information.\n *\n * This module provides functions for parsing stack traces to determine\n * where user code is located, filtering out internal library paths.\n */\n\n/**\n * Information extracted from a stack trace about source file location.\n */\nexport interface SourceFileInfo {\n /** The file path */\n file?: string;\n /** The line number (as a string) */\n line?: string;\n}\n\n/**\n * Source location with file, line, and column information.\n * Used for precise error location tracking.\n */\nexport interface SourceLocation {\n /** The file path */\n file: string;\n /** The line number */\n line: number;\n /** The column number (optional - may not always be available from stack trace) */\n column?: number;\n}\n\n/**\n * Check if a stack trace line should be skipped (internal/library code).\n * @internal\n */\nfunction shouldSkipStackLine(line: string): boolean {\n return (\n line.includes(\"node_modules\") || // Skip npm installed packages (prod)\n line.includes(\"node:internal\") || // Skip Node.js internals (modern format)\n line.includes(\"internal/modules\") || // Skip Node.js internals (older format)\n line.includes(\"ts-node\") || // Skip TypeScript execution\n line.includes(\"/ts-moose-lib/src/\") || // Skip dev/linked moose-lib src (Unix)\n line.includes(\"\\\\ts-moose-lib\\\\src\\\\\") || // Skip dev/linked moose-lib src (Windows)\n line.includes(\"/ts-moose-lib/dist/\") || // Skip dev/linked moose-lib dist (Unix)\n line.includes(\"\\\\ts-moose-lib\\\\dist\\\\\") // Skip dev/linked moose-lib dist (Windows)\n );\n}\n\n/**\n * Extract file path and line number from a stack trace line.\n * @internal\n */\nfunction parseStackLine(line: string): SourceFileInfo | undefined {\n const match =\n line.match(/\\((.*):(\\d+):(\\d+)\\)/) || line.match(/at (.*):(\\d+):(\\d+)/);\n if (match && match[1]) {\n return {\n file: match[1],\n line: match[2],\n };\n }\n return undefined;\n}\n\n/**\n * Extract source file information from a stack trace.\n * Works in both development (npm link) and production (npm install) environments.\n *\n * @param stack - The stack trace string from an Error object\n * @returns Object with file path and line number, or empty object if not found\n */\nexport function getSourceFileInfo(stack?: string): SourceFileInfo {\n if (!stack) return {};\n const lines = stack.split(\"\\n\");\n for (const line of lines) {\n if (shouldSkipStackLine(line)) continue;\n const info = parseStackLine(line);\n if (info) return info;\n }\n return {};\n}\n\n/**\n * Extracts source location (file, line, column) from a stack trace.\n *\n * Stack trace formats vary by environment:\n * - V8 (Node/Chrome): \" at Function (file.ts:10:15)\"\n * - SpiderMonkey (Firefox): \"Function@file.ts:10:15\"\n *\n * @param stack - Error stack trace string\n * @returns SourceLocation or undefined if parsing fails\n */\nexport function getSourceLocationFromStack(\n stack: string | undefined,\n): SourceLocation | undefined {\n if (!stack) return undefined;\n\n const lines = stack.split(\"\\n\");\n\n // Skip first line (error message) and internal frames\n for (const line of lines.slice(1)) {\n // Skip node_modules and internal moose-lib frames\n if (shouldSkipStackLine(line)) {\n continue;\n }\n\n // V8 format: \" at Function (file.ts:10:15)\" or \" at file.ts:10:15\"\n const v8Match = line.match(/at\\s+(?:.*?\\s+\\()?(.+):(\\d+):(\\d+)\\)?/);\n if (v8Match) {\n return {\n file: v8Match[1],\n line: parseInt(v8Match[2], 10),\n column: parseInt(v8Match[3], 10),\n };\n }\n\n // SpiderMonkey format: \"Function@file.ts:10:15\"\n const smMatch = line.match(/(?:.*@)?(.+):(\\d+):(\\d+)/);\n if (smMatch) {\n return {\n file: smMatch[1],\n line: parseInt(smMatch[2], 10),\n column: parseInt(smMatch[3], 10),\n };\n }\n }\n\n return undefined;\n}\n\n/**\n * Extract the first file path outside moose-lib internals from a stack trace.\n * Works in both development (npm link) and production (npm install) environments.\n *\n * @deprecated Use getSourceLocationFromStack instead\n * @param stack - The stack trace string from an Error object\n * @returns The first user-code file path, or undefined if not found\n */\nexport function getSourceFileFromStack(stack?: string): string | undefined {\n const location = getSourceLocationFromStack(stack);\n return location?.file;\n}\n","import { IJsonSchemaCollection } from \"typia/src/schemas/json/IJsonSchemaCollection\";\nimport { Column } from \"../dataModels/dataModelTypes\";\nimport { getSourceFileInfo } from \"./utils/stackTrace\";\n\n/**\n * Type definition for typia validation functions\n */\nexport interface TypiaValidators<T> {\n /** Typia validator function: returns { success: boolean, data?: T, errors?: any[] } */\n validate?: (data: unknown) => { success: boolean; data?: T; errors?: any[] };\n /** Typia assert function: throws on validation failure, returns T on success */\n assert?: (data: unknown) => T;\n /** Typia is function: returns boolean indicating if data matches type T */\n is?: (data: unknown) => data is T;\n}\n\n/**\n * Base class for all typed Moose dmv2 resources (OlapTable, Stream, etc.).\n * Handles the storage and injection of schema information (JSON schema and Column array)\n * provided by the Moose compiler plugin.\n *\n * @template T The data type (interface or type alias) defining the schema of the resource.\n * @template C The specific configuration type for the resource (e.g., OlapConfig, StreamConfig).\n */\nexport class TypedBase<T, C> {\n /** The JSON schema representation of type T. Injected by the compiler plugin. */\n schema: IJsonSchemaCollection.IV3_1;\n /** The name assigned to this resource instance. */\n name: string;\n\n /** A dictionary mapping column names (keys of T) to their Column definitions. */\n columns: {\n [columnName in keyof Required<T>]: Column;\n };\n /** An array containing the Column definitions for this resource. Injected by the compiler plugin. */\n columnArray: Column[];\n\n /** The configuration object specific to this resource type. */\n config: C;\n\n /** Typia validation functions for type T. Injected by the compiler plugin for OlapTable. */\n validators?: TypiaValidators<T>;\n\n /** Optional metadata for the resource, always present as an object. */\n metadata!: { [key: string]: any };\n\n /**\n * Whether this resource allows extra fields beyond the defined columns.\n * When true, extra fields in payloads are passed through to streaming functions.\n * Injected by the compiler plugin when the type has an index signature.\n */\n allowExtraFields: boolean;\n\n /**\n * @internal Constructor intended for internal use by subclasses and the compiler plugin.\n * It expects the schema and columns to be provided, typically injected by the compiler.\n *\n * @param name The name for the resource instance.\n * @param config The configuration object for the resource.\n * @param schema The JSON schema for the resource's data type T (injected).\n * @param columns The array of Column definitions for T (injected).\n * @param allowExtraFields Whether extra fields are allowed (injected when type has index signature).\n */\n constructor(\n name: string,\n config: C,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: TypiaValidators<T>,\n allowExtraFields?: boolean,\n ) {\n if (schema === undefined || columns === undefined) {\n throw new Error(\n \"Supply the type param T so that the schema is inserted by the compiler plugin.\",\n );\n }\n\n this.schema = schema;\n this.columnArray = columns;\n const columnsObj = {} as any;\n columns.forEach((column) => {\n columnsObj[column.name] = column;\n });\n this.columns = columnsObj;\n\n this.name = name;\n this.config = config;\n this.validators = validators;\n this.allowExtraFields = allowExtraFields ?? false;\n\n // Always ensure metadata is an object and attach stackTrace (last 10 lines only)\n this.metadata =\n (config as any)?.metadata ? { ...(config as any).metadata } : {};\n\n if (!this.metadata.source) {\n const stack = new Error().stack;\n if (stack) {\n const info = getSourceFileInfo(stack);\n this.metadata.source = { file: info.file, line: info.line };\n }\n }\n }\n}\n","import ts from \"typescript\";\nimport { IdentifierBrandedString } from \"../sqlHelpers\";\n\nexport type EnumValues =\n | { name: string; value: { Int: number } }[]\n | { name: string; value: { String: string } }[];\nexport type DataEnum = { name: string; values: EnumValues };\nexport type Nested = { name: string; columns: Column[]; jwt: boolean };\nexport type ArrayType = { elementType: DataType; elementNullable: boolean };\nexport type NamedTupleType = { fields: Array<[string, DataType]> };\nexport type MapType = { keyType: DataType; valueType: DataType };\nexport type JsonOptions = {\n max_dynamic_paths?: number;\n max_dynamic_types?: number;\n typed_paths?: Array<[string, DataType]>;\n skip_paths?: string[];\n skip_regexps?: string[];\n};\nexport type DataType =\n | string\n | DataEnum\n | ArrayType\n | Nested\n | NamedTupleType\n | MapType\n | JsonOptions\n | { nullable: DataType };\nexport interface Column {\n name: IdentifierBrandedString;\n data_type: DataType;\n required: boolean;\n unique: false; // what is this for?\n primary_key: boolean;\n default: string | null;\n materialized: string | null;\n ttl: string | null;\n codec: string | null;\n annotations: [string, any][];\n comment: string | null;\n}\n\nexport interface DataModel {\n columns: Column[];\n name: string;\n}\n\nexport class UnknownType extends Error {\n t: ts.Type;\n fieldName: string;\n typeName: string;\n constructor(t: ts.Type, fieldName: string, typeName: string) {\n super();\n this.t = t;\n this.fieldName = fieldName;\n this.typeName = typeName;\n }\n}\n\nexport class NullType extends Error {\n fieldName: string;\n typeName: string;\n constructor(fieldName: string, typeName: string) {\n super();\n this.fieldName = fieldName;\n this.typeName = typeName;\n }\n}\n\nexport class UnsupportedEnum extends Error {\n enumName: string;\n constructor(enumName: string) {\n super();\n this.enumName = enumName;\n }\n}\n\nexport class UnsupportedFeature extends Error {\n featureName: string;\n constructor(featureName: string) {\n super();\n this.featureName = featureName;\n }\n}\n\nexport class IndexType extends Error {\n typeName: string;\n indexSignatures: string[];\n\n constructor(typeName: string, indexSignatures: string[]) {\n const explanation =\n \"Index signatures (e.g. [key: string]: value) are not supported in data models.\";\n\n const suggestion =\n \"Consider splitting this into separate types or using a single Record<K, V> type.\";\n\n const signatures = `Found index signatures: ${indexSignatures.join(\", \")}`;\n\n super(\n `${explanation}\\n\\nType: ${typeName}\\n\\n${signatures}\\n\\nSuggestion: ${suggestion}`,\n );\n\n this.typeName = typeName;\n this.indexSignatures = indexSignatures;\n }\n}\n\n/**\n * Type guard: is this DataType an Array(Nested(...))?\n * Uses the ArrayType and Nested types for type safety.\n */\nexport function isArrayNestedType(\n dt: DataType,\n): dt is ArrayType & { elementType: Nested } {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n (dt as ArrayType).elementType !== null &&\n typeof (dt as ArrayType).elementType === \"object\" &&\n (dt as ArrayType).elementType.hasOwnProperty(\"columns\") &&\n Array.isArray(((dt as ArrayType).elementType as Nested).columns)\n );\n}\n\n/**\n * Type guard: is this DataType a Nested struct (not array)?\n */\nexport function isNestedType(dt: DataType): dt is Nested {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n Array.isArray((dt as Nested).columns)\n );\n}\n","// source https://github.com/blakeembrey/sql-template-tag/blob/main/src/index.ts\nimport { Column } from \"./dataModels/dataModelTypes\";\nimport { OlapTable } from \"./dmv2\";\n\nimport { AggregationFunction } from \"./dataModels/typeConvert\";\n\n/**\n * Quote a ClickHouse identifier with backticks if not already quoted.\n * Backticks allow special characters (e.g., hyphens) in identifiers.\n */\nexport const quoteIdentifier = (name: string): string => {\n return name.startsWith(\"`\") && name.endsWith(\"`\") ? name : `\\`${name}\\``;\n};\n\nconst isTable = (\n value: RawValue | Column | OlapTable<any>,\n): value is OlapTable<any> =>\n typeof value === \"object\" &&\n value !== null &&\n \"kind\" in value &&\n value.kind === \"OlapTable\";\n\nexport type IdentifierBrandedString = string & {\n readonly __identifier_brand?: unique symbol;\n};\nexport type NonIdentifierBrandedString = string & {\n readonly __identifier_brand?: unique symbol;\n};\n\n/**\n * Values supported by SQL engine.\n */\nexport type Value =\n | NonIdentifierBrandedString\n | number\n | boolean\n | Date\n | [string, string];\n\n/**\n * Supported value or SQL instance.\n */\nexport type RawValue = Value | Sql;\n\nconst isColumn = (value: RawValue | Column | OlapTable<any>): value is Column =>\n typeof value === \"object\" && \"name\" in value && \"annotations\" in value;\n\nexport function sql(\n strings: readonly string[],\n ...values: readonly (RawValue | Column | OlapTable<any>)[]\n) {\n return new Sql(strings, values);\n}\n\nconst instanceofSql = (\n value: RawValue | Column | OlapTable<any>,\n): value is Sql =>\n typeof value === \"object\" && \"values\" in value && \"strings\" in value;\n\n/**\n * A SQL instance can be nested within each other to build SQL strings.\n */\nexport class Sql {\n readonly values: Value[];\n readonly strings: string[];\n\n constructor(\n rawStrings: readonly string[],\n rawValues: readonly (RawValue | Column | OlapTable<any>)[],\n ) {\n if (rawStrings.length - 1 !== rawValues.length) {\n if (rawStrings.length === 0) {\n throw new TypeError(\"Expected at least 1 string\");\n }\n\n throw new TypeError(\n `Expected ${rawStrings.length} strings to have ${\n rawStrings.length - 1\n } values`,\n );\n }\n\n const valuesLength = rawValues.reduce<number>(\n (len: number, value: RawValue | Column | OlapTable<any>) =>\n len +\n (instanceofSql(value) ? value.values.length\n : isColumn(value) || isTable(value) ? 0\n : 1),\n 0,\n );\n\n this.values = new Array(valuesLength);\n this.strings = new Array(valuesLength + 1);\n\n this.strings[0] = rawStrings[0];\n\n // Iterate over raw values, strings, and children. The value is always\n // positioned between two strings, e.g. `index + 1`.\n let i = 0,\n pos = 0;\n while (i < rawValues.length) {\n const child = rawValues[i++];\n const rawString = rawStrings[i];\n\n // Check for nested `sql` queries.\n if (instanceofSql(child)) {\n // Append child prefix text to current string.\n this.strings[pos] += child.strings[0];\n\n let childIndex = 0;\n while (childIndex < child.values.length) {\n this.values[pos++] = child.values[childIndex++];\n this.strings[pos] = child.strings[childIndex];\n }\n\n // Append raw string to current string.\n this.strings[pos] += rawString;\n } else if (isColumn(child)) {\n const aggregationFunction = child.annotations.find(\n ([k, _]) => k === \"aggregationFunction\",\n );\n if (aggregationFunction !== undefined) {\n this.strings[pos] +=\n `${(aggregationFunction[1] as AggregationFunction).functionName}Merge(\\`${child.name}\\`)`;\n } else {\n this.strings[pos] += `\\`${child.name}\\``;\n }\n this.strings[pos] += rawString;\n } else if (isTable(child)) {\n if (child.config.database) {\n this.strings[pos] += `\\`${child.config.database}\\`.\\`${child.name}\\``;\n } else {\n this.strings[pos] += `\\`${child.name}\\``;\n }\n this.strings[pos] += rawString;\n } else {\n this.values[pos++] = child;\n this.strings[pos] = rawString;\n }\n }\n }\n}\n\nexport const toStaticQuery = (sql: Sql): string => {\n const [query, params] = toQuery(sql);\n if (Object.keys(params).length !== 0) {\n throw new Error(\n \"Dynamic SQL is not allowed in the select statement in view creation.\",\n );\n }\n return query;\n};\n\nexport const toQuery = (sql: Sql): [string, { [pN: string]: any }] => {\n const parameterizedStubs = sql.values.map((v, i) =>\n createClickhouseParameter(i, v),\n );\n\n const query = sql.strings\n .map((s, i) =>\n s != \"\" ? `${s}${emptyIfUndefined(parameterizedStubs[i])}` : \"\",\n )\n .join(\"\");\n\n const query_params = sql.values.reduce(\n (acc: Record<string, unknown>, v, i) => ({\n ...acc,\n [`p${i}`]: getValueFromParameter(v),\n }),\n {},\n );\n return [query, query_params];\n};\n\n/**\n * Build a display-only SQL string with values inlined for logging/debugging.\n * Does not alter execution behavior; use toQuery for actual execution.\n */\nexport const toQueryPreview = (sql: Sql): string => {\n try {\n const formatValue = (v: Value): string => {\n // Unwrap identifiers: [\"Identifier\", name]\n if (Array.isArray(v)) {\n const [type, val] = v as unknown as [string, any];\n if (type === \"Identifier\") {\n // Quote identifiers with backticks like other helpers\n return `\\`${String(val)}\\``;\n }\n // Fallback for unexpected arrays\n return `[${(v as unknown as any[]).map((x) => formatValue(x as Value)).join(\", \")}]`;\n }\n if (v === null || v === undefined) return \"NULL\";\n if (typeof v === \"string\") return `'${v.replace(/'/g, \"''\")}'`;\n if (typeof v === \"number\") return String(v);\n if (typeof v === \"boolean\") return v ? \"true\" : \"false\";\n if (v instanceof Date)\n return `'${v.toISOString().replace(\"T\", \" \").slice(0, 19)}'`;\n try {\n return JSON.stringify(v as unknown as any);\n } catch {\n return String(v);\n }\n };\n\n let out = sql.strings[0] ?? \"\";\n for (let i = 0; i < sql.values.length; i++) {\n const val = getValueFromParameter(sql.values[i] as any);\n out += formatValue(val as Value);\n out += sql.strings[i + 1] ?? \"\";\n }\n return out.replace(/\\s+/g, \" \").trim();\n } catch (error) {\n console.log(`toQueryPreview error: ${error}`);\n return \"/* query preview unavailable */\";\n }\n};\n\nexport const getValueFromParameter = (value: any) => {\n if (Array.isArray(value)) {\n const [type, val] = value;\n if (type === \"Identifier\") return val;\n }\n return value;\n};\nexport function createClickhouseParameter(\n parameterIndex: number,\n value: Value,\n) {\n // ClickHouse use {name:type} be a placeholder, so if we only use number string as name e.g: {1:Unit8}\n // it will face issue when converting to the query params => {1: value1}, because the key is value not string type, so here add prefix \"p\" to avoid this issue.\n return `{p${parameterIndex}:${mapToClickHouseType(value)}}`;\n}\n\n/**\n * Convert the JS type (source is JSON format by API query parameter) to the corresponding ClickHouse type for generating named placeholder of parameterized query.\n * Only support to convert number to Int or Float, boolean to Bool, string to String, other types will convert to String.\n * If exist complex type e.g: object, Array, null, undefined, Date, Record.. etc, just convert to string type by ClickHouse function in SQL.\n * ClickHouse support converting string to other types function.\n * Please see Each section of the https://clickhouse.com/docs/en/sql-reference/functions and https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions\n * @param value\n * @returns 'Float', 'Int', 'Bool', 'String'\n */\nexport const mapToClickHouseType = (value: Value) => {\n if (typeof value === \"number\") {\n // infer the float or int according to exist remainder or not\n return Number.isInteger(value) ? \"Int\" : \"Float\";\n }\n // When define column type or query result with parameterized query, The Bool or Boolean type both supported.\n // But the column type of query result only return Bool, so we only support Bool type for safety.\n if (typeof value === \"boolean\") return \"Bool\";\n if (value instanceof Date) return \"DateTime\";\n if (Array.isArray(value)) {\n const [type, _] = value;\n return type;\n }\n return \"String\";\n};\nfunction emptyIfUndefined(value: string | undefined): string {\n return value === undefined ? \"\" : value;\n}\n","import { quoteIdentifier } from \"../sqlHelpers\";\n\ninterface AggregationCreateOptions {\n tableCreateOptions: TableCreateOptions;\n materializedViewName: string;\n select: string;\n}\n\ninterface AggregationDropOptions {\n viewName: string;\n tableName: string;\n}\n\ninterface MaterializedViewCreateOptions {\n name: string;\n destinationTable: string;\n select: string;\n}\n\ninterface PopulateTableOptions {\n destinationTable: string;\n select: string;\n}\n\ninterface TableCreateOptions {\n name: string;\n columns: Record<string, string>;\n engine?: ClickHouseEngines;\n orderBy?: string;\n}\n\nexport interface Blocks {\n setup: string[];\n teardown: string[];\n}\n\nexport enum ClickHouseEngines {\n MergeTree = \"MergeTree\",\n ReplacingMergeTree = \"ReplacingMergeTree\",\n SummingMergeTree = \"SummingMergeTree\",\n AggregatingMergeTree = \"AggregatingMergeTree\",\n CollapsingMergeTree = \"CollapsingMergeTree\",\n VersionedCollapsingMergeTree = \"VersionedCollapsingMergeTree\",\n GraphiteMergeTree = \"GraphiteMergeTree\",\n S3Queue = \"S3Queue\",\n S3 = \"S3\",\n Buffer = \"Buffer\",\n Distributed = \"Distributed\",\n IcebergS3 = \"IcebergS3\",\n Kafka = \"Kafka\",\n ReplicatedMergeTree = \"ReplicatedMergeTree\",\n ReplicatedReplacingMergeTree = \"ReplicatedReplacingMergeTree\",\n ReplicatedAggregatingMergeTree = \"ReplicatedAggregatingMergeTree\",\n ReplicatedSummingMergeTree = \"ReplicatedSummingMergeTree\",\n ReplicatedCollapsingMergeTree = \"ReplicatedCollapsingMergeTree\",\n ReplicatedVersionedCollapsingMergeTree = \"ReplicatedVersionedCollapsingMergeTree\",\n}\n\n/**\n * Drops an existing view if it exists.\n */\nexport function dropView(name: string): string {\n return `DROP VIEW IF EXISTS ${quoteIdentifier(name)}`.trim();\n}\n\n/**\n * Creates a materialized view.\n */\nexport function createMaterializedView(\n options: MaterializedViewCreateOptions,\n): string {\n return `CREATE MATERIALIZED VIEW IF NOT EXISTS ${quoteIdentifier(options.name)}\n TO ${quoteIdentifier(options.destinationTable)}\n AS ${options.select}`.trim();\n}\n\n/**\n * @deprecated Population of tables is now handled automatically by the Rust infrastructure.\n * This function is kept for backwards compatibility but will be ignored.\n * The framework now intelligently determines when to populate based on:\n * - Whether the materialized view is new or being replaced\n * - Whether the source is an S3Queue table (which doesn't support SELECT)\n *\n * Populates a table with data.\n */\nexport function populateTable(options: PopulateTableOptions): string {\n return `INSERT INTO ${quoteIdentifier(options.destinationTable)}\n ${options.select}`.trim();\n}\n","import { Pattern, TagBase } from \"typia/lib/tags\";\nimport { tags } from \"typia\";\n\nexport type ClickHousePrecision<P extends number> = {\n _clickhouse_precision?: P;\n};\n\nexport const DecimalRegex: \"^-?\\\\d+(\\\\.\\\\d+)?$\" = \"^-?\\\\d+(\\\\.\\\\d+)?$\";\n\nexport type ClickHouseDecimal<P extends number, S extends number> = {\n _clickhouse_precision?: P;\n _clickhouse_scale?: S;\n} & Pattern<typeof DecimalRegex>;\n\nexport type ClickHouseFixedStringSize<N extends number> = {\n _clickhouse_fixed_string_size?: N;\n};\n\n/**\n * FixedString(N) - Fixed-length string of exactly N bytes.\n *\n * ClickHouse stores exactly N bytes, padding shorter values with null bytes.\n * Values exceeding N bytes will throw an exception.\n *\n * Use for binary data: hashes, IP addresses, UUIDs, MAC addresses.\n *\n * @example\n * interface BinaryData {\n * md5_hash: string & FixedString<16>; // 16-byte MD5\n * sha256_hash: string & FixedString<32>; // 32-byte SHA256\n * }\n */\nexport type FixedString<N extends number> = string &\n ClickHouseFixedStringSize<N>;\n\nexport type ClickHouseByteSize<N extends number> = {\n _clickhouse_byte_size?: N;\n};\n\nexport type LowCardinality = {\n _LowCardinality?: true;\n};\n\n// ClickHouse-friendly helper aliases for clarity in user schemas\n// These are erased at compile time but guide the ClickHouse mapping logic.\nexport type DateTime = Date;\nexport type DateTime64<P extends number> = Date & ClickHousePrecision<P>;\n\nexport type DateTimeString = string & tags.Format<\"date-time\">;\n/**\n * JS Date objects cannot hold microsecond precision.\n * Use string as the runtime type to avoid losing information.\n */\nexport type DateTime64String<P extends number> = string &\n tags.Format<\"date-time\"> &\n ClickHousePrecision<P>;\n\n// Numeric convenience tags mirroring ClickHouse integer and float families\nexport type Float32 = number & ClickHouseFloat<\"float32\">;\nexport type Float64 = number & ClickHouseFloat<\"float64\">;\n\nexport type Int8 = number & ClickHouseInt<\"int8\">;\nexport type Int16 = number & ClickHouseInt<\"int16\">;\nexport type Int32 = number & ClickHouseInt<\"int32\">;\nexport type Int64 = number & ClickHouseInt<\"int64\">;\n\nexport type UInt8 = number & ClickHouseInt<\"uint8\">;\nexport type UInt16 = number & ClickHouseInt<\"uint16\">;\nexport type UInt32 = number & ClickHouseInt<\"uint32\">;\nexport type UInt64 = number & ClickHouseInt<\"uint64\">;\n\n// Decimal(P, S) annotation\nexport type Decimal<P extends number, S extends number> = string &\n ClickHouseDecimal<P, S>;\n\n/**\n * Attach compression codec to a column type.\n *\n * Any valid ClickHouse codec expression is allowed. ClickHouse validates the codec at runtime.\n *\n * @template T The base data type\n * @template CodecExpr The codec expression (single codec or chain)\n *\n * @example\n * interface Metrics {\n * // Single codec\n * log_blob: string & ClickHouseCodec<\"ZSTD(3)\">;\n *\n * // Codec chain (processed left-to-right)\n * timestamp: Date & ClickHouseCodec<\"Delta, LZ4\">;\n * temperature: number & ClickHouseCodec<\"Gorilla, ZSTD\">;\n *\n * // Specialized codecs\n * counter: number & ClickHouseCodec<\"DoubleDelta\">;\n *\n * // Can combine with other annotations\n * count: UInt64 & ClickHouseCodec<\"DoubleDelta, LZ4\">;\n * }\n */\nexport type ClickHouseCodec<CodecExpr extends string> = {\n _clickhouse_codec?: CodecExpr;\n};\n\nexport type ClickHouseFloat<Value extends \"float32\" | \"float64\"> = tags.Type<\n Value extends \"float32\" ? \"float\" : \"double\"\n>;\n\nexport type ClickHouseInt<\n Value extends\n | \"int8\"\n | \"int16\"\n | \"int32\"\n | \"int64\"\n // | \"int128\"\n // | \"int256\"\n | \"uint8\"\n | \"uint16\"\n | \"uint32\"\n | \"uint64\",\n // | \"uint128\"\n // | \"uint256\",\n> =\n Value extends \"int32\" | \"int64\" | \"uint32\" | \"uint64\" ? tags.Type<Value>\n : TagBase<{\n target: \"number\";\n kind: \"type\";\n value: Value;\n validate: Value extends \"int8\" ? \"-128 <= $input && $input <= 127\"\n : Value extends \"int16\" ? \"-32768 <= $input && $input <= 32767\"\n : Value extends \"uint8\" ? \"0 <= $input && $input <= 255\"\n : Value extends \"uint16\" ? \"0 <= $input && $input <= 65535\"\n : never;\n exclusive: true;\n schema: {\n type: \"integer\";\n };\n }>;\n\n/**\n * By default, nested objects map to the `Nested` type in clickhouse.\n * Write `nestedObject: AnotherInterfaceType & ClickHouseNamedTuple`\n * to map AnotherInterfaceType to the named tuple type.\n */\nexport type ClickHouseNamedTuple = {\n _clickhouse_mapped_type?: \"namedTuple\";\n};\n\nexport type ClickHouseJson<\n maxDynamicPaths extends number | undefined = undefined,\n maxDynamicTypes extends number | undefined = undefined,\n skipPaths extends string[] = [],\n skipRegexes extends string[] = [],\n> = {\n _clickhouse_mapped_type?: \"JSON\";\n _clickhouse_json_settings?: {\n maxDynamicPaths?: maxDynamicPaths;\n maxDynamicTypes?: maxDynamicTypes;\n skipPaths?: skipPaths;\n skipRegexes?: skipRegexes;\n };\n};\n\n// Geometry helper types\nexport type ClickHousePoint = [number, number] & {\n _clickhouse_mapped_type?: \"Point\";\n};\nexport type ClickHouseRing = ClickHousePoint[] & {\n _clickhouse_mapped_type?: \"Ring\";\n};\nexport type ClickHouseLineString = ClickHousePoint[] & {\n _clickhouse_mapped_type?: \"LineString\";\n};\nexport type ClickHouseMultiLineString = ClickHouseLineString[] & {\n _clickhouse_mapped_type?: \"MultiLineString\";\n};\nexport type ClickHousePolygon = ClickHouseRing[] & {\n _clickhouse_mapped_type?: \"Polygon\";\n};\nexport type ClickHouseMultiPolygon = ClickHousePolygon[] & {\n _clickhouse_mapped_type?: \"MultiPolygon\";\n};\n\n/**\n * typia may have trouble handling this type.\n * In which case, use {@link WithDefault} as a workaround\n *\n * @example\n * { field: number & ClickHouseDefault<\"0\"> }\n */\nexport type ClickHouseDefault<SqlExpression extends string> = {\n _clickhouse_default?: SqlExpression;\n};\n\n/**\n * @example\n * {\n * ...\n * timestamp: Date;\n * debugMessage: string & ClickHouseTTL<\"timestamp + INTERVAL 1 WEEK\">;\n * }\n */\nexport type ClickHouseTTL<SqlExpression extends string> = {\n _clickhouse_ttl?: SqlExpression;\n};\n\n/**\n * ClickHouse MATERIALIZED column annotation.\n * The column value is computed at INSERT time and physically stored.\n * Cannot be explicitly inserted by users.\n *\n * @example\n * interface Events {\n * eventTime: DateTime;\n * // Extract date component - computed and stored at insert time\n * eventDate: Date & ClickHouseMaterialized<\"toDate(event_time)\">;\n *\n * userId: string;\n * // Precompute hash for fast lookups\n * userHash: UInt64 & ClickHouseMaterialized<\"cityHash64(userId)\">;\n * }\n *\n * @remarks\n * - MATERIALIZED and DEFAULT are mutually exclusive\n * - Can be combined with ClickHouseCodec for compression\n * - Changing the expression modifies the column in-place (existing values preserved)\n */\nexport type ClickHouseMaterialized<SqlExpression extends string> = {\n _clickhouse_materialized?: SqlExpression;\n};\n\n/**\n * See also {@link ClickHouseDefault}\n *\n * @example{ updated_at: WithDefault<Date, \"now()\"> }\n */\nexport type WithDefault<T, _SqlExpression extends string> = T;\n","export type Key<T extends string | number | Date> = T;\n\nexport type JWT<T extends object> = T;\n\nexport {\n Aggregated,\n SimpleAggregated,\n OlapTable,\n OlapConfig,\n S3QueueTableSettings,\n Stream,\n StreamConfig,\n DeadLetterModel,\n DeadLetter,\n DeadLetterQueue,\n IngestApi,\n IngestConfig,\n Api,\n ApiConfig,\n ConsumptionApi,\n EgressConfig,\n IngestPipeline,\n SqlResource,\n View,\n MaterializedView,\n Task,\n Workflow,\n ETLPipeline,\n ETLPipelineConfig,\n LifeCycle,\n WebApp,\n WebAppConfig,\n WebAppHandler,\n FrameworkApp,\n // Registry functions\n getTables,\n getTable,\n getStreams,\n getStream,\n getIngestApis,\n getIngestApi,\n getApis,\n getApi,\n getSqlResources,\n getSqlResource,\n getWorkflows,\n getWorkflow,\n getWebApps,\n getWebApp,\n} from \"./dmv2\";\n\nexport {\n ClickHousePrecision,\n ClickHouseDecimal,\n ClickHouseByteSize,\n ClickHouseFixedStringSize,\n ClickHouseFloat,\n ClickHouseInt,\n ClickHouseJson,\n LowCardinality,\n ClickHouseNamedTuple,\n ClickHouseDefault,\n ClickHouseTTL,\n ClickHouseMaterialized,\n WithDefault,\n ClickHouseCodec,\n // Added friendly aliases and numeric helpers\n DateTime,\n DateTime64,\n DateTimeString,\n DateTime64String,\n FixedString,\n Float32,\n Float64,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n Decimal,\n} from \"./dataModels/types\";\n\nexport type { ApiUtil, ConsumptionUtil } from \"./consumption-apis/helpers\";\n\nexport * from \"./sqlHelpers\";\n","import http from \"http\";\nimport { createClient } from \"@clickhouse/client\";\nimport { KafkaJS } from \"@514labs/kafka-javascript\";\nimport { SASLOptions } from \"@514labs/kafka-javascript/types/kafkajs\";\nconst { Kafka } = KafkaJS;\ntype Kafka = KafkaJS.Kafka;\ntype Consumer = KafkaJS.Consumer;\nexport type Producer = KafkaJS.Producer;\n\n/**\n * Utility function for compiler-related logging that can be disabled via environment variable.\n * Set MOOSE_DISABLE_COMPILER_LOGS=true to suppress these logs (useful for testing environments).\n */\n\n/**\n * Returns true if the value is a common truthy string: \"1\", \"true\", \"yes\", \"on\" (case-insensitive).\n */\nfunction isTruthy(value: string | undefined): boolean {\n if (!value) return false;\n switch (value.trim().toLowerCase()) {\n case \"1\":\n case \"true\":\n case \"yes\":\n case \"on\":\n return true;\n default:\n return false;\n }\n}\n\nexport const compilerLog = (message: string) => {\n if (!isTruthy(process.env.MOOSE_DISABLE_COMPILER_LOGS)) {\n console.log(message);\n }\n};\n\nexport const antiCachePath = (path: string) =>\n `${path}?num=${Math.random().toString()}&time=${Date.now()}`;\n\nexport const getFileName = (filePath: string) => {\n const regex = /\\/([^\\/]+)\\.ts/;\n const matches = filePath.match(regex);\n if (matches && matches.length > 1) {\n return matches[1];\n }\n return \"\";\n};\n\ninterface ClientConfig {\n username: string;\n password: string;\n database: string;\n useSSL: string;\n host: string;\n port: string;\n}\n\nexport const getClickhouseClient = ({\n username,\n password,\n database,\n useSSL,\n host,\n port,\n}: ClientConfig) => {\n const protocol =\n useSSL === \"1\" || useSSL.toLowerCase() === \"true\" ? \"https\" : \"http\";\n console.log(`Connecting to Clickhouse at ${protocol}://${host}:${port}`);\n return createClient({\n url: `${protocol}://${host}:${port}`,\n username: username,\n password: password,\n database: database,\n application: \"moose\",\n // Note: wait_end_of_query is configured per operation type, not globally\n // to preserve SELECT query performance while ensuring INSERT/DDL reliability\n });\n};\n\nexport type CliLogData = {\n message_type?: \"Info\" | \"Success\" | \"Error\" | \"Highlight\";\n action: string;\n message: string;\n};\n\nexport const cliLog: (log: CliLogData) => void = (log) => {\n const req = http.request({\n port: parseInt(process.env.MOOSE_MANAGEMENT_PORT ?? \"5001\"),\n method: \"POST\",\n path: \"/logs\",\n });\n\n req.on(\"error\", (err: Error) => {\n console.log(`Error ${err.name} sending CLI log.`, err.message);\n });\n\n req.write(JSON.stringify({ message_type: \"Info\", ...log }));\n req.end();\n};\n\n/**\n * Method to change .ts, .cts, and .mts to .js, .cjs, and .mjs\n * This is needed because 'import' does not support .ts, .cts, and .mts\n */\nexport function mapTstoJs(filePath: string): string {\n return filePath\n .replace(/\\.ts$/, \".js\")\n .replace(/\\.cts$/, \".cjs\")\n .replace(/\\.mts$/, \".mjs\");\n}\n\nexport const MAX_RETRIES = 150;\nexport const MAX_RETRY_TIME_MS = 1000;\nexport const RETRY_INITIAL_TIME_MS = 100;\n\nexport const MAX_RETRIES_PRODUCER = 150;\nexport const RETRY_FACTOR_PRODUCER = 0.2;\n// Means all replicas need to acknowledge the message\nexport const ACKs = -1;\n\n/**\n * Creates the base producer configuration for Kafka.\n * Used by both the SDK stream publishing and streaming function workers.\n *\n * @param maxMessageBytes - Optional max message size in bytes (synced with topic config)\n * @returns Producer configuration object for the Confluent Kafka client\n */\nexport function createProducerConfig(maxMessageBytes?: number) {\n return {\n kafkaJS: {\n idempotent: false, // Not needed for at-least-once delivery\n acks: ACKs,\n retry: {\n retries: MAX_RETRIES_PRODUCER,\n maxRetryTime: MAX_RETRY_TIME_MS,\n },\n },\n \"linger.ms\": 0, // This is to make sure at least once delivery with immediate feedback on the send\n ...(maxMessageBytes && { \"message.max.bytes\": maxMessageBytes }),\n };\n}\n\n/**\n * Parses a comma-separated broker string into an array of valid broker addresses.\n * Handles whitespace trimming and filters out empty elements.\n *\n * @param brokerString - Comma-separated broker addresses (e.g., \"broker1:9092, broker2:9092, , broker3:9092\")\n * @returns Array of trimmed, non-empty broker addresses\n */\nconst parseBrokerString = (brokerString: string): string[] =>\n brokerString\n .split(\",\")\n .map((b) => b.trim())\n .filter((b) => b.length > 0);\n\nexport type KafkaClientConfig = {\n clientId: string;\n broker: string;\n securityProtocol?: string; // e.g. \"SASL_SSL\" or \"PLAINTEXT\"\n saslUsername?: string;\n saslPassword?: string;\n saslMechanism?: string; // e.g. \"scram-sha-256\", \"plain\"\n};\n\n/**\n * Dynamically creates and connects a KafkaJS producer using the provided configuration.\n * Returns a connected producer instance.\n *\n * @param cfg - Kafka client configuration\n * @param logger - Logger instance\n * @param maxMessageBytes - Optional max message size in bytes (synced with topic config)\n */\nexport async function getKafkaProducer(\n cfg: KafkaClientConfig,\n logger: Logger,\n maxMessageBytes?: number,\n): Promise<Producer> {\n const kafka = await getKafkaClient(cfg, logger);\n\n const producer = kafka.producer(createProducerConfig(maxMessageBytes));\n await producer.connect();\n return producer;\n}\n\n/**\n * Interface for logging functionality\n */\nexport interface Logger {\n logPrefix: string;\n log: (message: string) => void;\n error: (message: string) => void;\n warn: (message: string) => void;\n}\n\nexport const logError = (logger: Logger, e: Error): void => {\n logger.error(e.message);\n const stack = e.stack;\n if (stack) {\n logger.error(stack);\n }\n};\n\n/**\n * Builds SASL configuration for Kafka client authentication\n */\nconst buildSaslConfig = (\n logger: Logger,\n args: KafkaClientConfig,\n): SASLOptions | undefined => {\n const mechanism = args.saslMechanism ? args.saslMechanism.toLowerCase() : \"\";\n switch (mechanism) {\n case \"plain\":\n case \"scram-sha-256\":\n case \"scram-sha-512\":\n return {\n mechanism: mechanism,\n username: args.saslUsername || \"\",\n password: args.saslPassword || \"\",\n };\n default:\n logger.warn(`Unsupported SASL mechanism: ${args.saslMechanism}`);\n return undefined;\n }\n};\n\n/**\n * Dynamically creates a KafkaJS client configured with provided settings.\n * Use this to construct producers/consumers with custom options.\n */\nexport const getKafkaClient = async (\n cfg: KafkaClientConfig,\n logger: Logger,\n): Promise<Kafka> => {\n const brokers = parseBrokerString(cfg.broker || \"\");\n if (brokers.length === 0) {\n throw new Error(`No valid broker addresses found in: \"${cfg.broker}\"`);\n }\n\n logger.log(`Creating Kafka client with brokers: ${brokers.join(\", \")}`);\n logger.log(`Security protocol: ${cfg.securityProtocol || \"plaintext\"}`);\n logger.log(`Client ID: ${cfg.clientId}`);\n\n const saslConfig = buildSaslConfig(logger, cfg);\n\n return new Kafka({\n kafkaJS: {\n clientId: cfg.clientId,\n brokers,\n ssl: cfg.securityProtocol === \"SASL_SSL\",\n ...(saslConfig && { sasl: saslConfig }),\n retry: {\n initialRetryTime: RETRY_INITIAL_TIME_MS,\n maxRetryTime: MAX_RETRY_TIME_MS,\n retries: MAX_RETRIES,\n },\n },\n });\n};\n","/**\n * @module secrets\n * Utilities for runtime environment variable resolution.\n *\n * This module provides functionality to mark values that should be resolved\n * from environment variables at runtime by the Moose CLI, rather than being\n * embedded at build time.\n *\n * @example\n * ```typescript\n * import { S3QueueEngine, mooseRuntimeEnv } from 'moose-lib';\n *\n * const table = OlapTable<MyData>(\n * \"MyTable\",\n * OlapConfig({\n * engine: S3QueueEngine({\n * s3_path: \"s3://bucket/data/*.json\",\n * format: \"JSONEachRow\",\n * awsAccessKeyId: mooseRuntimeEnv.get(\"AWS_ACCESS_KEY_ID\"),\n * awsSecretAccessKey: mooseRuntimeEnv.get(\"AWS_SECRET_ACCESS_KEY\")\n * })\n * })\n * );\n * ```\n */\n\n/**\n * Prefix used to mark values for runtime environment variable resolution.\n * @internal\n */\nexport const MOOSE_RUNTIME_ENV_PREFIX = \"__MOOSE_RUNTIME_ENV__:\";\n\n/**\n * Utilities for marking values to be resolved from environment variables at runtime.\n *\n * When you use `mooseRuntimeEnv.get()`, the behavior depends on the context:\n * - During infrastructure map loading: Returns a marker string for later resolution\n * - During function/workflow execution: Returns the actual environment variable value\n *\n * This is useful for:\n * - Credentials that should never be embedded in Docker images\n * - Configuration that can be rotated without rebuilding\n * - Different values for different environments (dev, staging, prod)\n * - Any runtime configuration in infrastructure elements (Tables, Topics, etc.)\n */\nexport const mooseRuntimeEnv = {\n /**\n * Gets a value from an environment variable, with behavior depending on context.\n *\n * When IS_LOADING_INFRA_MAP=true (infrastructure loading):\n * Returns a marker string that Moose CLI will resolve later\n *\n * When IS_LOADING_INFRA_MAP is unset (function/workflow runtime):\n * Returns the actual value from the environment variable\n *\n * @param envVarName - Name of the environment variable to resolve\n * @returns Either a marker string or the actual environment variable value\n * @throws {Error} If the environment variable name is empty\n * @throws {Error} If the environment variable is not set (runtime mode only)\n *\n * @example\n * ```typescript\n * // Instead of this (evaluated at build time):\n * awsAccessKeyId: process.env.AWS_ACCESS_KEY_ID\n *\n * // Use this (evaluated at runtime):\n * awsAccessKeyId: mooseRuntimeEnv.get(\"AWS_ACCESS_KEY_ID\")\n * ```\n */\n get(envVarName: string): string {\n if (!envVarName || envVarName.trim() === \"\") {\n throw new Error(\"Environment variable name cannot be empty\");\n }\n\n // Check if we're loading infrastructure map\n const isLoadingInfraMap = process.env.IS_LOADING_INFRA_MAP === \"true\";\n\n if (isLoadingInfraMap) {\n // Return marker string for later resolution by Moose CLI\n return `${MOOSE_RUNTIME_ENV_PREFIX}${envVarName}`;\n } else {\n // Return actual value from environment for runtime execution\n const value = process.env[envVarName];\n if (value === undefined) {\n throw new Error(\n `Environment variable '${envVarName}' is not set. ` +\n `This is required for runtime execution of functions/workflows.`,\n );\n }\n return value;\n }\n },\n};\n\n// Legacy export for backwards compatibility\n/** @deprecated Use mooseRuntimeEnv instead */\nexport const mooseEnvSecrets = mooseRuntimeEnv;\n","import { ClickHouseClient, CommandResult, ResultSet } from \"@clickhouse/client\";\nimport {\n Client as TemporalClient,\n Connection,\n ConnectionOptions,\n} from \"@temporalio/client\";\nimport { StringValue } from \"@temporalio/common\";\nimport { createHash, randomUUID } from \"node:crypto\";\nimport { performance } from \"perf_hooks\";\nimport * as fs from \"fs\";\nimport { getWorkflows } from \"../dmv2/internal\";\nimport { JWTPayload } from \"jose\";\nimport { Sql, sql, RawValue, toQuery, toQueryPreview } from \"../sqlHelpers\";\n\n/**\n * Format elapsed milliseconds into a human-readable string.\n * Matches Python's format_timespan behavior.\n */\nfunction formatElapsedTime(ms: number): string {\n if (ms < 1000) {\n return `${Math.round(ms)} ms`;\n }\n const seconds = ms / 1000;\n if (seconds < 60) {\n return `${seconds.toFixed(2)} seconds`;\n }\n const minutes = Math.floor(seconds / 60);\n const remainingSeconds = seconds % 60;\n return `${minutes} minutes and ${remainingSeconds.toFixed(2)} seconds`;\n}\n\n/**\n * Utilities provided by getMooseUtils() for database access and SQL queries.\n * Works in both Moose runtime and standalone contexts.\n */\nexport interface MooseUtils {\n client: MooseClient;\n sql: typeof sql;\n jwt?: JWTPayload;\n}\n\n/**\n * @deprecated Use MooseUtils instead. ApiUtil is now a type alias to MooseUtils\n * and will be removed in a future version.\n *\n * Migration: Replace `ApiUtil` with `MooseUtils` in your type annotations.\n */\nexport type ApiUtil = MooseUtils;\n\n/** @deprecated Use MooseUtils instead. */\nexport type ConsumptionUtil = MooseUtils;\n\nexport class MooseClient {\n query: QueryClient;\n workflow: WorkflowClient;\n\n constructor(queryClient: QueryClient, temporalClient?: TemporalClient) {\n this.query = queryClient;\n this.workflow = new WorkflowClient(temporalClient);\n }\n}\n\nexport class QueryClient {\n client: ClickHouseClient;\n query_id_prefix: string;\n constructor(client: ClickHouseClient, query_id_prefix: string) {\n this.client = client;\n this.query_id_prefix = query_id_prefix;\n }\n\n async execute<T = any>(\n sql: Sql,\n ): Promise<ResultSet<\"JSONEachRow\"> & { __query_result_t?: T[] }> {\n const [query, query_params] = toQuery(sql);\n\n console.log(`[QueryClient] | Query: ${toQueryPreview(sql)}`);\n const start = performance.now();\n const result = await this.client.query({\n query,\n query_params,\n format: \"JSONEachRow\",\n query_id: this.query_id_prefix + randomUUID(),\n // Note: wait_end_of_query deliberately NOT set here as this is used for SELECT queries\n // where response buffering would harm streaming performance and concurrency\n });\n const elapsedMs = performance.now() - start;\n console.log(\n `[QueryClient] | Query completed: ${formatElapsedTime(elapsedMs)}`,\n );\n return result;\n }\n\n async command(sql: Sql): Promise<CommandResult> {\n const [query, query_params] = toQuery(sql);\n\n console.log(`[QueryClient] | Command: ${toQueryPreview(sql)}`);\n const start = performance.now();\n const result = await this.client.command({\n query,\n query_params,\n query_id: this.query_id_prefix + randomUUID(),\n });\n const elapsedMs = performance.now() - start;\n console.log(\n `[QueryClient] | Command completed: ${formatElapsedTime(elapsedMs)}`,\n );\n return result;\n }\n}\n\nexport class WorkflowClient {\n client: TemporalClient | undefined;\n\n constructor(temporalClient?: TemporalClient) {\n this.client = temporalClient;\n }\n\n async execute(name: string, input_data: any) {\n try {\n if (!this.client) {\n return {\n status: 404,\n body: `Temporal client not found. Is the feature flag enabled?`,\n };\n }\n\n // Get workflow configuration\n const config = await this.getWorkflowConfig(name);\n\n // Process input data and generate workflow ID\n const [processedInput, workflowId] = this.processInputData(\n name,\n input_data,\n );\n\n console.log(\n `WorkflowClient - starting workflow: ${name} with config ${JSON.stringify(config)} and input_data ${JSON.stringify(processedInput)}`,\n );\n\n const handle = await this.client.workflow.start(\"ScriptWorkflow\", {\n args: [\n { workflow_name: name, execution_mode: \"start\" as const },\n processedInput,\n ],\n taskQueue: \"typescript-script-queue\",\n workflowId,\n workflowIdConflictPolicy: \"FAIL\",\n workflowIdReusePolicy: \"ALLOW_DUPLICATE\",\n retry: {\n maximumAttempts: config.retries,\n },\n workflowRunTimeout: config.timeout as StringValue,\n });\n\n return {\n status: 200,\n body: `Workflow started: ${name}. View it in the Temporal dashboard: http://localhost:8080/namespaces/default/workflows/${workflowId}/${handle.firstExecutionRunId}/history`,\n };\n } catch (error) {\n return {\n status: 400,\n body: `Error starting workflow: ${error}`,\n };\n }\n }\n\n async terminate(workflowId: string) {\n try {\n if (!this.client) {\n return {\n status: 404,\n body: `Temporal client not found. Is the feature flag enabled?`,\n };\n }\n\n const handle = this.client.workflow.getHandle(workflowId);\n await handle.terminate();\n\n return {\n status: 200,\n body: `Workflow terminated: ${workflowId}`,\n };\n } catch (error) {\n return {\n status: 400,\n body: `Error terminating workflow: ${error}`,\n };\n }\n }\n\n private async getWorkflowConfig(\n name: string,\n ): Promise<{ retries: number; timeout: string }> {\n const workflows = await getWorkflows();\n const dmv2Workflow = workflows.get(name);\n if (dmv2Workflow) {\n return {\n retries: dmv2Workflow.config.retries || 3,\n timeout: dmv2Workflow.config.timeout || \"1h\",\n };\n }\n\n throw new Error(`Workflow config not found for ${name}`);\n }\n\n private processInputData(name: string, input_data: any): [any, string] {\n let workflowId = name;\n if (input_data) {\n const hash = createHash(\"sha256\")\n .update(JSON.stringify(input_data))\n .digest(\"hex\")\n .slice(0, 16);\n workflowId = `${name}-${hash}`;\n }\n return [input_data, workflowId];\n }\n}\n\n/**\n * This looks similar to the client in runner.ts which is a worker.\n * Temporal SDK uses similar looking connection options & client,\n * but there are different libraries for a worker & client like this one\n * that triggers workflows.\n */\nexport async function getTemporalClient(\n temporalUrl: string,\n namespace: string,\n clientCert: string,\n clientKey: string,\n apiKey: string,\n): Promise<TemporalClient | undefined> {\n try {\n console.info(\n `<api> Using temporal_url: ${temporalUrl} and namespace: ${namespace}`,\n );\n\n let connectionOptions: ConnectionOptions = {\n address: temporalUrl,\n connectTimeout: \"3s\",\n };\n\n if (clientCert && clientKey) {\n // URL with mTLS uses gRPC namespace endpoint which is what temporalUrl already is\n console.log(\"Using TLS for secure Temporal\");\n const cert = await fs.readFileSync(clientCert);\n const key = await fs.readFileSync(clientKey);\n\n connectionOptions.tls = {\n clientCertPair: { crt: cert, key: key },\n };\n } else if (apiKey) {\n console.log(\"Using API key for secure Temporal\");\n // URL with API key uses gRPC regional endpoint\n connectionOptions.address = \"us-west1.gcp.api.temporal.io:7233\";\n connectionOptions.apiKey = apiKey;\n connectionOptions.tls = {};\n connectionOptions.metadata = {\n \"temporal-namespace\": namespace,\n };\n }\n\n console.log(`<api> Connecting to Temporal at ${connectionOptions.address}`);\n const connection = await Connection.connect(connectionOptions);\n const client = new TemporalClient({ connection, namespace });\n console.log(\"<api> Connected to Temporal server\");\n\n return client;\n } catch (error) {\n console.warn(`Failed to connect to Temporal. Is the feature flag enabled?`);\n console.warn(error);\n return undefined;\n }\n}\n\nexport const ApiHelpers = {\n column: (value: string) => [\"Identifier\", value] as [string, string],\n table: (value: string) => [\"Identifier\", value] as [string, string],\n};\n\n/** @deprecated Use ApiHelpers instead. */\nexport const ConsumptionHelpers = ApiHelpers;\n\nexport function joinQueries({\n values,\n separator = \",\",\n prefix = \"\",\n suffix = \"\",\n}: {\n values: readonly RawValue[];\n separator?: string;\n prefix?: string;\n suffix?: string;\n}) {\n if (values.length === 0) {\n throw new TypeError(\n \"Expected `join([])` to be called with an array of multiple elements, but got an empty array\",\n );\n }\n\n return new Sql(\n [prefix, ...Array(values.length - 1).fill(separator), suffix],\n values,\n );\n}\n","import http from \"http\";\nimport type { MooseUtils } from \"./helpers\";\n\n/**\n * @deprecated Use `getMooseUtils()` from '@514labs/moose-lib' instead.\n *\n * This synchronous function extracts MooseUtils from a request object that was\n * injected by Moose runtime middleware. It returns undefined if not running\n * in a Moose-managed context.\n *\n * Migration: Replace with the async version:\n * ```typescript\n * // Old (sync, deprecated):\n * import { getMooseUtilsFromRequest } from '@514labs/moose-lib';\n * const moose = getMooseUtilsFromRequest(req);\n *\n * // New (async, recommended):\n * import { getMooseUtils } from '@514labs/moose-lib';\n * const moose = await getMooseUtils();\n * ```\n *\n * @param req - The HTTP request object containing injected moose utilities\n * @returns MooseUtils if available on the request, undefined otherwise\n */\nexport function getMooseUtilsFromRequest(\n req: http.IncomingMessage | any,\n): MooseUtils | undefined {\n console.warn(\n \"[DEPRECATED] getMooseUtilsFromRequest() is deprecated. \" +\n \"Import getMooseUtils from '@514labs/moose-lib' and call it without parameters: \" +\n \"const { client, sql } = await getMooseUtils();\",\n );\n return (req as any).moose;\n}\n\n/**\n * @deprecated Use `getMooseUtils()` from '@514labs/moose-lib' instead.\n *\n * This is a legacy alias for getMooseUtilsFromRequest. The main getMooseUtils\n * export from '@514labs/moose-lib' is now async and does not require a request parameter.\n *\n * BREAKING CHANGE WARNING: The new getMooseUtils() returns Promise<MooseUtils>,\n * not MooseUtils | undefined. You must await the result:\n * ```typescript\n * const moose = await getMooseUtils(); // New async API\n * ```\n */\nexport const getLegacyMooseUtils = getMooseUtilsFromRequest;\n\n/**\n * @deprecated No longer needed. Use getMooseUtils() directly instead.\n * Moose now handles utility injection automatically when injectMooseUtils is true.\n */\nexport function expressMiddleware() {\n console.warn(\n \"[DEPRECATED] expressMiddleware() is deprecated. \" +\n \"Use getMooseUtils() directly or rely on injectMooseUtils config.\",\n );\n return (req: any, res: any, next: any) => {\n // Maintain backwards compat: copy req.raw.moose to req.moose if present\n if (!req.moose && req.raw && (req.raw as any).moose) {\n req.moose = (req.raw as any).moose;\n }\n next();\n };\n}\n\n/**\n * @deprecated Use MooseUtils from helpers.ts instead.\n */\nexport interface ExpressRequestWithMoose {\n moose?: MooseUtils;\n}\n","export interface TaskFunction {\n (input?: any): Promise<{ task: string; data: any }>;\n}\n\nexport interface TaskConfig {\n retries: number;\n}\n\nexport interface TaskDefinition {\n task: TaskFunction;\n config?: TaskConfig;\n}\n","import cluster from \"node:cluster\";\nimport { availableParallelism } from \"node:os\";\nimport { exit } from \"node:process\";\nimport { Worker } from \"node:cluster\";\n\nconst DEFAULT_MAX_CPU_USAGE_RATIO = 0.7;\n// Time to restart the worker when it exits unexpectedly\n// This value is not too high to avoid the worker to be stuck in a bad state\n// but also not too low to avoid restarting the worker too often\nconst RESTART_TIME_MS = 10000;\nconst SIGTERM = \"SIGTERM\";\nconst SIGINT = \"SIGINT\";\nconst SHUTDOWN_WORKERS_INTERVAL = 500;\n\n/**\n * Manages a cluster of worker processes, handling their lifecycle including startup,\n * shutdown, and error handling.\n *\n * @typeParam C - The type of output produced during worker startup\n */\nexport class Cluster<C> {\n // Tracks if shutdown is currently in progress\n private shutdownInProgress: boolean = false;\n // Tracks if workers exited cleanly during shutdown\n private hasCleanWorkerExit: boolean = true;\n\n // String identifying if this is primary or worker process\n private processStr = `${cluster.isPrimary ? \"primary\" : \"worker\"} process ${process.pid}`;\n\n // Functions for starting and stopping workers\n private workerStart: (w: Worker, paralelism: number) => Promise<C>;\n private workerStop: (c: C) => Promise<void>;\n\n // Result from starting worker, needed for cleanup\n private startOutput: C | undefined;\n private maxCpuUsageRatio: number;\n private usedCpuCount: number;\n\n /**\n * Creates a new cluster manager instance.\n *\n * @param options - Configuration options for the cluster\n * @param options.workerStart - Async function to execute when starting a worker\n * @param options.workerStop - Async function to execute when stopping a worker\n * @param options.maxCpuUsageRatio - Maximum ratio of CPU cores to utilize (0-1)\n * @param options.maxWorkerCount - Maximum number of workers to spawn\n * @throws {Error} If maxCpuUsageRatio is not between 0 and 1\n */\n constructor(options: {\n workerStart: (w: Worker, paralelism: number) => Promise<C>;\n workerStop: (c: C) => Promise<void>;\n maxCpuUsageRatio?: number;\n maxWorkerCount?: number;\n }) {\n this.workerStart = options.workerStart;\n this.workerStop = options.workerStop;\n if (\n options.maxCpuUsageRatio &&\n (options.maxCpuUsageRatio > 1 || options.maxCpuUsageRatio < 0)\n ) {\n throw new Error(\"maxCpuUsageRatio must be between 0 and 1\");\n }\n this.maxCpuUsageRatio =\n options.maxCpuUsageRatio || DEFAULT_MAX_CPU_USAGE_RATIO;\n this.usedCpuCount = this.computeCPUUsageCount(\n this.maxCpuUsageRatio,\n options.maxWorkerCount,\n );\n }\n\n /**\n * Calculates the number of CPU cores to utilize based on available parallelism and constraints.\n *\n * @param cpuUsageRatio - Ratio of CPU cores to use (0-1)\n * @param maxWorkerCount - Optional maximum number of workers\n * @returns The number of CPU cores to utilize\n */\n computeCPUUsageCount(cpuUsageRatio: number, maxWorkerCount?: number) {\n const cpuCount = availableParallelism();\n const maxWorkers = maxWorkerCount || cpuCount;\n return Math.min(\n maxWorkers,\n Math.max(1, Math.floor(cpuCount * cpuUsageRatio)),\n );\n }\n\n /**\n * Initializes the cluster by spawning worker processes and setting up signal handlers.\n * For the primary process, spawns workers and monitors parent process.\n * For worker processes, executes the worker startup function.\n *\n * @throws {Error} If worker is undefined in worker process\n */\n async start() {\n process.on(SIGTERM, this.gracefulClusterShutdown(SIGTERM));\n process.on(SIGINT, this.gracefulClusterShutdown(SIGINT));\n\n if (cluster.isPrimary) {\n const parentPid = process.ppid;\n\n setInterval(() => {\n try {\n process.kill(parentPid, 0);\n } catch (e) {\n console.log(\"Parent process has exited.\");\n this.gracefulClusterShutdown(SIGTERM)();\n }\n }, 1000);\n\n await this.bootWorkers(this.usedCpuCount);\n } else {\n if (!cluster.worker) {\n throw new Error(\n \"Worker is not defined, it should be defined in worker process\",\n );\n }\n\n this.startOutput = await this.workerStart(\n cluster.worker,\n this.usedCpuCount,\n );\n }\n }\n\n /**\n * Spawns worker processes and configures their lifecycle event handlers.\n * Handles worker online, exit and disconnect events.\n * Automatically restarts failed workers during normal operation.\n *\n * @param numWorkers - Number of worker processes to spawn\n */\n bootWorkers = async (numWorkers: number) => {\n console.info(`Setting ${numWorkers} workers...`);\n\n for (let i = 0; i < numWorkers; i++) {\n cluster.fork();\n }\n\n cluster.on(\"online\", (worker) => {\n console.info(`worker process ${worker.process.pid} is online`);\n });\n\n cluster.on(\"exit\", (worker, code, signal) => {\n console.info(\n `worker ${worker.process.pid} exited with code ${code} and signal ${signal}`,\n );\n\n if (!this.shutdownInProgress) {\n setTimeout(() => cluster.fork(), RESTART_TIME_MS);\n }\n\n if (this.shutdownInProgress && code != 0) {\n this.hasCleanWorkerExit = false;\n }\n });\n\n cluster.on(\"disconnect\", (worker) => {\n console.info(`worker process ${worker.process.pid} has disconnected`);\n });\n };\n\n /**\n * Creates a handler function for graceful shutdown on receipt of a signal.\n * Ensures only one shutdown can occur at a time.\n * Handles shutdown differently for primary and worker processes.\n *\n * @param signal - The signal triggering the shutdown (e.g. SIGTERM)\n * @returns An async function that performs the shutdown\n */\n gracefulClusterShutdown = (signal: NodeJS.Signals) => async () => {\n if (this.shutdownInProgress) {\n return;\n }\n\n this.shutdownInProgress = true;\n this.hasCleanWorkerExit = true;\n\n console.info(\n `Got ${signal} on ${this.processStr}. Graceful shutdown start at ${new Date().toISOString()}`,\n );\n\n try {\n if (cluster.isPrimary) {\n await this.shutdownWorkers(signal);\n console.info(`${this.processStr} - worker shutdown successful`);\n exit(0);\n } else {\n // Only attempt to stop if the worker has finished starting\n if (this.startOutput) {\n await this.workerStop(this.startOutput);\n } else {\n console.info(\n `${this.processStr} - shutdown before worker fully started`,\n );\n }\n console.info(`${this.processStr} shutdown successful`);\n this.hasCleanWorkerExit ? exit(0) : exit(1);\n }\n } catch (e) {\n console.error(`${this.processStr} - shutdown failed`, e);\n exit(1);\n }\n };\n\n /**\n * Gracefully terminates all worker processes.\n * Monitors workers until they all exit or timeout occurs.\n * Only relevant for the primary process.\n *\n * @param signal - The signal to send to worker processes\n * @returns A promise that resolves when all workers have terminated\n */\n shutdownWorkers = (signal: NodeJS.Signals) => {\n return new Promise<void>((resolve, reject) => {\n if (!cluster.isPrimary) {\n return resolve();\n }\n\n if (!cluster.workers) {\n return resolve();\n }\n\n const workerIds = Object.keys(cluster.workers);\n if (workerIds.length == 0) {\n return resolve();\n }\n\n let workersAlive = 0;\n let funcRun = 0;\n\n const cleanWorkers = () => {\n ++funcRun;\n workersAlive = 0;\n\n Object.values(cluster.workers || {})\n .filter((worker) => !!worker)\n .forEach((worker) => {\n if (worker && !worker.isDead()) {\n ++workersAlive;\n if (funcRun == 1) {\n worker.kill(signal);\n }\n }\n });\n\n console.info(workersAlive + \" workers alive\");\n if (workersAlive == 0) {\n clearInterval(interval);\n return resolve();\n }\n };\n\n const interval = setInterval(cleanWorkers, SHUTDOWN_WORKERS_INTERVAL);\n });\n };\n}\n","import http from \"http\";\nimport { getClickhouseClient } from \"../commons\";\nimport { MooseClient, QueryClient, getTemporalClient } from \"./helpers\";\nimport * as jose from \"jose\";\nimport { ClickHouseClient } from \"@clickhouse/client\";\nimport { Cluster } from \"../cluster-utils\";\nimport { ApiUtil } from \"../index\";\nimport { sql } from \"../sqlHelpers\";\nimport { Client as TemporalClient } from \"@temporalio/client\";\nimport { getApis, getWebApps } from \"../dmv2/internal\";\n\ninterface ClickhouseConfig {\n database: string;\n host: string;\n port: string;\n username: string;\n password: string;\n useSSL: boolean;\n}\n\ninterface JwtConfig {\n secret?: string;\n issuer: string;\n audience: string;\n}\n\ninterface TemporalConfig {\n url: string;\n namespace: string;\n clientCert: string;\n clientKey: string;\n apiKey: string;\n}\n\ninterface ApisConfig {\n apisDir: string;\n clickhouseConfig: ClickhouseConfig;\n jwtConfig?: JwtConfig;\n temporalConfig?: TemporalConfig;\n enforceAuth: boolean;\n isDmv2: boolean;\n proxyPort?: number;\n workerCount?: number;\n}\n\n// Convert our config to Clickhouse client config\nconst toClientConfig = (config: ClickhouseConfig) => ({\n ...config,\n useSSL: config.useSSL ? \"true\" : \"false\",\n});\n\nconst createPath = (apisDir: string, path: string) => `${apisDir}${path}.ts`;\n\nconst httpLogger = (\n req: http.IncomingMessage,\n res: http.ServerResponse,\n startMs: number,\n) => {\n console.log(\n `${req.method} ${req.url} ${res.statusCode} ${Date.now() - startMs}ms`,\n );\n};\n\nconst modulesCache = new Map<string, any>();\n\nexport function createApi<T extends object, R = any>(\n _handler: (params: T, utils: ApiUtil) => Promise<R>,\n): (\n rawParams: Record<string, string[] | string>,\n utils: ApiUtil,\n) => Promise<R> {\n throw new Error(\n \"This should be compiled-time replaced by compiler plugins to add parsing.\",\n );\n}\n\n/** @deprecated Use `Api` from \"dmv2/sdk/consumptionApi\" instead. */\nexport const createConsumptionApi = createApi;\n\nconst apiHandler = async (\n publicKey: jose.KeyLike | undefined,\n clickhouseClient: ClickHouseClient,\n temporalClient: TemporalClient | undefined,\n apisDir: string,\n enforceAuth: boolean,\n isDmv2: boolean,\n jwtConfig?: JwtConfig,\n) => {\n const apis = isDmv2 ? await getApis() : new Map();\n return async (req: http.IncomingMessage, res: http.ServerResponse) => {\n const start = Date.now();\n\n try {\n const url = new URL(req.url || \"\", \"http://localhost\");\n const fileName = url.pathname;\n\n let jwtPayload;\n if (publicKey && jwtConfig) {\n const jwt = req.headers.authorization?.split(\" \")[1]; // Bearer <token>\n if (jwt) {\n try {\n const { payload } = await jose.jwtVerify(jwt, publicKey, {\n issuer: jwtConfig.issuer,\n audience: jwtConfig.audience,\n });\n jwtPayload = payload;\n } catch (error) {\n console.log(\"JWT verification failed\");\n if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n }\n } else if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n } else if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n\n const pathName = createPath(apisDir, fileName);\n const paramsObject = Array.from(url.searchParams.entries()).reduce(\n (obj: { [key: string]: string[] | string }, [key, value]) => {\n const existingValue = obj[key];\n if (existingValue) {\n if (Array.isArray(existingValue)) {\n existingValue.push(value);\n } else {\n obj[key] = [existingValue, value];\n }\n } else {\n obj[key] = value;\n }\n return obj;\n },\n {},\n );\n\n let userFuncModule = modulesCache.get(pathName);\n if (userFuncModule === undefined) {\n if (isDmv2) {\n let apiName = fileName.replace(/^\\/+|\\/+$/g, \"\");\n let version: string | null = null;\n\n // First, try to find the API by the full path (for custom paths)\n userFuncModule = apis.get(apiName);\n\n if (!userFuncModule) {\n // Fall back to the old name:version parsing\n version = url.searchParams.get(\"version\");\n\n // Check if version is in the path (e.g., /bar/1)\n if (!version && apiName.includes(\"/\")) {\n const pathParts = apiName.split(\"/\");\n if (pathParts.length >= 2) {\n // Try the full path first (it might be a custom path)\n userFuncModule = apis.get(apiName);\n if (!userFuncModule) {\n // If not found, treat it as name/version\n apiName = pathParts[0];\n version = pathParts.slice(1).join(\"/\");\n }\n }\n }\n\n // Only do versioned lookup if we still haven't found it\n if (!userFuncModule) {\n if (version) {\n const versionedKey = `${apiName}:${version}`;\n userFuncModule = apis.get(versionedKey);\n } else {\n userFuncModule = apis.get(apiName);\n }\n }\n }\n\n if (!userFuncModule) {\n const availableApis = Array.from(apis.keys()).map((key) =>\n key.replace(\":\", \"/\"),\n );\n const errorMessage =\n version ?\n `API ${apiName} with version ${version} not found. Available APIs: ${availableApis.join(\", \")}`\n : `API ${apiName} not found. Available APIs: ${availableApis.join(\", \")}`;\n throw new Error(errorMessage);\n }\n\n modulesCache.set(pathName, userFuncModule);\n console.log(`[API] | Executing API: ${apiName}`);\n } else {\n userFuncModule = require(pathName);\n modulesCache.set(pathName, userFuncModule);\n }\n }\n\n const queryClient = new QueryClient(clickhouseClient, fileName);\n let result =\n isDmv2 ?\n await userFuncModule(paramsObject, {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n })\n : await userFuncModule.default(paramsObject, {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n });\n\n let body: string;\n let status: number | undefined;\n\n // TODO investigate why these prototypes are different\n if (Object.getPrototypeOf(result).constructor.name === \"ResultSet\") {\n body = JSON.stringify(await result.json());\n } else {\n if (\"body\" in result && \"status\" in result) {\n body = JSON.stringify(result.body);\n status = result.status;\n } else {\n body = JSON.stringify(result);\n }\n }\n\n if (status) {\n res.writeHead(status, { \"Content-Type\": \"application/json\" });\n httpLogger(req, res, start);\n } else {\n res.writeHead(200, { \"Content-Type\": \"application/json\" });\n httpLogger(req, res, start);\n }\n\n res.end(body);\n } catch (error: any) {\n console.log(\"error in path \", req.url, error);\n // todo: same workaround as ResultSet\n if (Object.getPrototypeOf(error).constructor.name === \"TypeGuardError\") {\n res.writeHead(400, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: error.message }));\n httpLogger(req, res, start);\n }\n if (error instanceof Error) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: error.message }));\n httpLogger(req, res, start);\n } else {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end();\n httpLogger(req, res, start);\n }\n }\n };\n};\n\nconst createMainRouter = async (\n publicKey: jose.KeyLike | undefined,\n clickhouseClient: ClickHouseClient,\n temporalClient: TemporalClient | undefined,\n apisDir: string,\n enforceAuth: boolean,\n isDmv2: boolean,\n jwtConfig?: JwtConfig,\n) => {\n const apiRequestHandler = await apiHandler(\n publicKey,\n clickhouseClient,\n temporalClient,\n apisDir,\n enforceAuth,\n isDmv2,\n jwtConfig,\n );\n\n const webApps = isDmv2 ? await getWebApps() : new Map();\n\n const sortedWebApps = Array.from(webApps.values()).sort((a, b) => {\n const pathA = a.config.mountPath || \"/\";\n const pathB = b.config.mountPath || \"/\";\n return pathB.length - pathA.length;\n });\n\n return async (req: http.IncomingMessage, res: http.ServerResponse) => {\n const start = Date.now();\n\n const url = new URL(req.url || \"\", \"http://localhost\");\n const pathname = url.pathname;\n\n // Health check - checked before all other routes\n if (pathname === \"/_moose_internal/health\") {\n res.writeHead(200, { \"Content-Type\": \"application/json\" });\n res.end(\n JSON.stringify({\n status: \"healthy\",\n timestamp: new Date().toISOString(),\n }),\n );\n return;\n }\n\n let jwtPayload;\n if (publicKey && jwtConfig) {\n const jwt = req.headers.authorization?.split(\" \")[1];\n if (jwt) {\n try {\n const { payload } = await jose.jwtVerify(jwt, publicKey, {\n issuer: jwtConfig.issuer,\n audience: jwtConfig.audience,\n });\n jwtPayload = payload;\n } catch (error) {\n console.log(\"JWT verification failed for WebApp route\");\n }\n }\n }\n\n for (const webApp of sortedWebApps) {\n const mountPath = webApp.config.mountPath || \"/\";\n const normalizedMount =\n mountPath.endsWith(\"/\") && mountPath !== \"/\" ?\n mountPath.slice(0, -1)\n : mountPath;\n\n const matches =\n pathname === normalizedMount ||\n pathname.startsWith(normalizedMount + \"/\");\n\n if (matches) {\n if (webApp.config.injectMooseUtils !== false) {\n // Import getMooseUtils dynamically to avoid circular deps\n const { getMooseUtils } = await import(\"./standalone\");\n (req as any).moose = await getMooseUtils();\n }\n\n let proxiedUrl = req.url;\n if (normalizedMount !== \"/\") {\n const pathWithoutMount =\n pathname.substring(normalizedMount.length) || \"/\";\n proxiedUrl = pathWithoutMount + url.search;\n }\n\n try {\n // Create a modified request preserving all properties including headers\n // A shallow clone (like { ...req }) generally will not work since headers and other\n // members are not cloned.\n const modifiedReq = Object.assign(\n Object.create(Object.getPrototypeOf(req)),\n req,\n {\n url: proxiedUrl,\n },\n );\n await webApp.handler(modifiedReq, res);\n return;\n } catch (error) {\n console.error(`Error in WebApp ${webApp.name}:`, error);\n if (!res.headersSent) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Internal Server Error\" }));\n }\n return;\n }\n }\n }\n\n // If no WebApp matched, check if it's an Api request\n // Strip /api or /consumption prefix for Api routing\n let apiPath = pathname;\n if (pathname.startsWith(\"/api/\")) {\n apiPath = pathname.substring(4); // Remove \"/api\"\n } else if (pathname.startsWith(\"/consumption/\")) {\n apiPath = pathname.substring(13); // Remove \"/consumption\"\n }\n\n // If we stripped a prefix, it's an Api request\n if (apiPath !== pathname) {\n // Create a modified request with the rewritten URL for the apiHandler\n // Preserve all properties including headers by using Object.assign with prototype chain\n // A shallow clone (like { ...req }) generally will not work since headers and other\n // members are not cloned.\n const modifiedReq = Object.assign(\n Object.create(Object.getPrototypeOf(req)),\n req,\n {\n url: apiPath + url.search,\n },\n );\n await apiRequestHandler(modifiedReq as http.IncomingMessage, res);\n return;\n }\n\n res.writeHead(404, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Not Found\" }));\n httpLogger(req, res, start);\n };\n};\n\nexport const runApis = async (config: ApisConfig) => {\n const apisCluster = new Cluster({\n maxWorkerCount:\n (config.workerCount ?? 0) > 0 ? config.workerCount : undefined,\n workerStart: async () => {\n let temporalClient: TemporalClient | undefined;\n if (config.temporalConfig) {\n temporalClient = await getTemporalClient(\n config.temporalConfig.url,\n config.temporalConfig.namespace,\n config.temporalConfig.clientCert,\n config.temporalConfig.clientKey,\n config.temporalConfig.apiKey,\n );\n }\n const clickhouseClient = getClickhouseClient(\n toClientConfig(config.clickhouseConfig),\n );\n let publicKey: jose.KeyLike | undefined;\n if (config.jwtConfig?.secret) {\n console.log(\"Importing JWT public key...\");\n publicKey = await jose.importSPKI(config.jwtConfig.secret, \"RS256\");\n }\n\n // Set runtime context for getMooseUtils() to detect\n const runtimeQueryClient = new QueryClient(clickhouseClient, \"runtime\");\n (globalThis as any)._mooseRuntimeContext = {\n client: new MooseClient(runtimeQueryClient, temporalClient),\n };\n\n const server = http.createServer(\n await createMainRouter(\n publicKey,\n clickhouseClient,\n temporalClient,\n config.apisDir,\n config.enforceAuth,\n config.isDmv2,\n config.jwtConfig,\n ),\n );\n // port is now passed via config.proxyPort or defaults to 4001\n const port = config.proxyPort !== undefined ? config.proxyPort : 4001;\n server.listen(port, \"localhost\", () => {\n console.log(`Server running on port ${port}`);\n });\n\n return server;\n },\n workerStop: async (server) => {\n return new Promise<void>((resolve) => {\n server.close(() => resolve());\n });\n },\n });\n\n apisCluster.start();\n};\n","import { createClient, RedisClientType } from \"redis\";\n\n// Module-level singleton instance and initialization promise\nlet instance: MooseCache | null = null;\nlet initPromise: Promise<MooseCache> | null = null;\n\ntype SupportedTypes = string | object;\n\nexport class MooseCache {\n private client: RedisClientType;\n private isConnected: boolean = false;\n private readonly keyPrefix: string;\n private disconnectTimer: NodeJS.Timeout | null = null;\n private readonly idleTimeout: number;\n private connectPromise: Promise<void> | null = null;\n\n private constructor() {\n const redisUrl =\n process.env.MOOSE_REDIS_CONFIG__URL || \"redis://127.0.0.1:6379\";\n const prefix = process.env.MOOSE_REDIS_CONFIG__KEY_PREFIX || \"MS\";\n // 30 seconds of inactivity before disconnecting\n this.idleTimeout =\n parseInt(process.env.MOOSE_REDIS_CONFIG__IDLE_TIMEOUT || \"30\", 10) * 1000;\n this.keyPrefix = `${prefix}::moosecache::`;\n\n this.client = createClient({\n url: redisUrl,\n });\n\n process.on(\"SIGTERM\", this.gracefulShutdown);\n process.on(\"SIGINT\", this.gracefulShutdown);\n\n this.client.on(\"error\", async (err: Error) => {\n console.error(\"TS Redis client error:\", err);\n await this.disconnect();\n });\n\n this.client.on(\"connect\", () => {\n this.isConnected = true;\n console.log(\"TS Redis client connected\");\n });\n\n this.client.on(\"end\", () => {\n this.isConnected = false;\n console.log(\"TS Redis client disconnected\");\n this.clearDisconnectTimer();\n });\n }\n\n private clearDisconnectTimer(): void {\n if (this.disconnectTimer) {\n clearTimeout(this.disconnectTimer);\n this.disconnectTimer = null;\n }\n }\n\n private resetDisconnectTimer(): void {\n this.clearDisconnectTimer();\n this.disconnectTimer = setTimeout(async () => {\n if (this.isConnected) {\n console.log(\"TS Redis client disconnecting due to inactivity\");\n await this.disconnect();\n }\n }, this.idleTimeout);\n }\n\n private async ensureConnected(): Promise<void> {\n if (!this.isConnected) {\n await this.connect();\n }\n this.resetDisconnectTimer();\n }\n\n private async connect(): Promise<void> {\n // If already connected, return immediately\n if (this.isConnected) {\n return;\n }\n\n // If connection is in progress, wait for it\n // This prevents race conditions when multiple callers try to reconnect\n // simultaneously after a disconnection\n if (this.connectPromise) {\n return this.connectPromise;\n }\n\n // Start connection\n this.connectPromise = (async () => {\n try {\n await this.client.connect();\n this.resetDisconnectTimer();\n } catch (error) {\n // Reset the promise on error so retries can work\n this.connectPromise = null;\n throw error;\n }\n })();\n\n return this.connectPromise;\n }\n\n private async gracefulShutdown(): Promise<void> {\n if (this.isConnected) {\n await this.disconnect();\n }\n process.exit(0);\n }\n\n private getPrefixedKey(key: string): string {\n return `${this.keyPrefix}${key}`;\n }\n\n /**\n * Gets the singleton instance of MooseCache. Creates a new instance if one doesn't exist.\n * The client will automatically connect to Redis and handle reconnection if needed.\n *\n * @returns Promise<MooseCache> The singleton instance of MooseCache\n * @example\n * const cache = await MooseCache.get();\n */\n public static async get(): Promise<MooseCache> {\n // If we already have an instance, return it immediately\n if (instance) {\n return instance;\n }\n\n // If initialization is already in progress, wait for it\n // This prevents race conditions where multiple concurrent calls to get()\n // would each create their own instance and connection\n //\n // A simple singleton pattern (just checking if instance exists) isn't enough\n // because multiple async calls can check \"if (!instance)\" simultaneously,\n // find it's null, and each try to create their own instance before any\n // of them finish setting the instance variable\n if (initPromise) {\n return initPromise;\n }\n\n // Start initialization\n // We store the promise immediately so that any concurrent calls\n // will wait for this same initialization instead of starting their own\n initPromise = (async () => {\n try {\n const newInstance = new MooseCache();\n await newInstance.connect();\n instance = newInstance;\n return newInstance;\n } catch (error) {\n // Reset the promise on error so retries can work\n initPromise = null;\n throw error;\n }\n })();\n\n return initPromise;\n }\n\n /**\n * Sets a value in the cache. Objects are automatically JSON stringified.\n *\n * @param key - The key to store the value under\n * @param value - The value to store. Can be a string or any object (will be JSON stringified)\n * @param ttlSeconds - Optional time-to-live in seconds. If not provided, defaults to 1 hour (3600 seconds).\n * Must be a non-negative number. If 0, the key will expire immediately.\n * @example\n * // Store a string\n * await cache.set(\"foo\", \"bar\");\n *\n * // Store an object with custom TTL\n * await cache.set(\"foo:config\", { baz: 123, qux: true }, 60); // expires in 1 minute\n *\n * // This is essentially a get-set, which returns the previous value if it exists.\n * // You can create logic to only do work for the first time.\n * const value = await cache.set(\"testSessionId\", \"true\");\n * if (value) {\n * // Cache was set before, return\n * } else {\n * // Cache was set for first time, do work\n * }\n */\n public async set(\n key: string,\n value: string | object,\n ttlSeconds?: number,\n ): Promise<string | null> {\n try {\n // Validate TTL\n if (ttlSeconds !== undefined && ttlSeconds < 0) {\n throw new Error(\"ttlSeconds must be a non-negative number\");\n }\n\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n const stringValue =\n typeof value === \"object\" ? JSON.stringify(value) : value;\n\n // Use provided TTL or default to 1 hour\n const ttl = ttlSeconds ?? 3600;\n return await this.client.set(prefixedKey, stringValue, {\n EX: ttl,\n GET: true,\n });\n } catch (error) {\n console.error(`Error setting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Retrieves a value from the cache. Attempts to parse the value as JSON if possible.\n *\n * @param key - The key to retrieve\n * @returns Promise<T | null> The value, parsed as type T if it was JSON, or as string if not. Returns null if key doesn't exist\n * @example\n * // Get a string\n * const value = await cache.get(\"foo\");\n *\n * // Get and parse an object with type safety\n * interface Config { baz: number; qux: boolean; }\n * const config = await cache.get<Config>(\"foo:config\");\n */\n public async get<T extends SupportedTypes = string>(\n key: string,\n ): Promise<T | null> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n const value = await this.client.get(prefixedKey);\n\n if (value === null) return null;\n\n // Note: We can't check if T is string at runtime because TypeScript types are erased.\n // Instead, we try to parse as JSON and return the original string if that fails.\n try {\n const parsed = JSON.parse(value);\n // Only return parsed value if it's an object\n if (typeof parsed === \"object\" && parsed !== null) {\n return parsed as T;\n }\n // If parsed value isn't an object, return as string\n return value as T;\n } catch {\n // If JSON parse fails, return as string\n return value as T;\n }\n } catch (error) {\n console.error(`Error getting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Deletes a specific key from the cache.\n *\n * @param key - The key to delete\n * @example\n * await cache.delete(\"foo\");\n */\n public async delete(key: string): Promise<void> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n await this.client.del(prefixedKey);\n } catch (error) {\n console.error(`Error deleting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Deletes all keys that start with the given prefix.\n *\n * @param keyPrefix - The prefix of keys to delete\n * @example\n * // Delete all keys starting with \"foo\"\n * await cache.clearKeys(\"foo\");\n */\n public async clearKeys(keyPrefix: string): Promise<void> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(keyPrefix);\n const keys = await this.client.keys(`${prefixedKey}*`);\n if (keys.length > 0) {\n await this.client.del(keys);\n }\n } catch (error) {\n console.error(\n `Error clearing cache keys with prefix ${keyPrefix}:`,\n error,\n );\n throw error;\n }\n }\n\n /**\n * Deletes all keys in the cache\n *\n * @example\n * await cache.clear();\n */\n public async clear(): Promise<void> {\n try {\n await this.ensureConnected();\n const keys = await this.client.keys(`${this.keyPrefix}*`);\n if (keys.length > 0) {\n await this.client.del(keys);\n }\n } catch (error) {\n console.error(\"Error clearing cache:\", error);\n throw error;\n }\n }\n\n /**\n * Manually disconnects the Redis client. The client will automatically reconnect\n * when the next operation is performed.\n *\n * @example\n * await cache.disconnect();\n */\n public async disconnect(): Promise<void> {\n this.clearDisconnectTimer();\n this.connectPromise = null;\n if (this.isConnected) {\n await this.client.quit();\n }\n }\n}\n","import { MooseClient, QueryClient, MooseUtils } from \"./helpers\";\nimport { getClickhouseClient } from \"../commons\";\nimport { sql } from \"../sqlHelpers\";\nimport type { RuntimeClickHouseConfig } from \"../config/runtime\";\n\n// Cached utilities and initialization promise for standalone mode\nlet standaloneUtils: MooseUtils | null = null;\nlet initPromise: Promise<MooseUtils> | null = null;\n\n// Convert config to client config format\nconst toClientConfig = (config: {\n host: string;\n port: string;\n username: string;\n password: string;\n database: string;\n useSSL: boolean;\n}) => ({\n ...config,\n useSSL: config.useSSL ? \"true\" : \"false\",\n});\n\n/**\n * Get Moose utilities for database access and SQL queries.\n * Works in both Moose runtime and standalone contexts.\n *\n * **IMPORTANT**: This function is async and returns a Promise. You must await the result:\n * ```typescript\n * const moose = await getMooseUtils(); // Correct\n * const moose = getMooseUtils(); // WRONG - returns Promise, not MooseUtils!\n * ```\n *\n * **Breaking Change from v1.x**: This function signature changed from sync to async.\n * If you were using the old sync API that extracted utils from a request object,\n * use `getMooseUtilsFromRequest(req)` for backward compatibility (deprecated).\n *\n * @param req - DEPRECATED: Request parameter is no longer needed and will be ignored.\n * If you need to extract moose from a request, use getMooseUtilsFromRequest().\n * @returns Promise resolving to MooseUtils with client and sql utilities.\n *\n * @example\n * ```typescript\n * const { client, sql } = await getMooseUtils();\n * const result = await client.query.execute(sql`SELECT * FROM table`);\n * ```\n */\nexport async function getMooseUtils(req?: any): Promise<MooseUtils> {\n // Deprecation warning if req passed\n if (req !== undefined) {\n console.warn(\n \"[DEPRECATED] getMooseUtils(req) no longer requires a request parameter. \" +\n \"Use getMooseUtils() instead.\",\n );\n }\n\n // Check if running in Moose runtime\n const runtimeContext = (globalThis as any)._mooseRuntimeContext;\n\n if (runtimeContext) {\n // In Moose runtime - use existing connections\n return {\n client: runtimeContext.client,\n sql: sql,\n jwt: runtimeContext.jwt,\n };\n }\n\n // Standalone mode - use cached client or create new one\n if (standaloneUtils) {\n return standaloneUtils;\n }\n\n // If initialization is in progress, wait for it\n if (initPromise) {\n return initPromise;\n }\n\n // Start initialization\n initPromise = (async () => {\n await import(\"../config/runtime\");\n const configRegistry = (globalThis as any)._mooseConfigRegistry;\n\n if (!configRegistry) {\n throw new Error(\n \"Moose not initialized. Ensure you're running within a Moose app \" +\n \"or have proper configuration set up.\",\n );\n }\n\n const clickhouseConfig =\n await configRegistry.getStandaloneClickhouseConfig();\n\n const clickhouseClient = getClickhouseClient(\n toClientConfig(clickhouseConfig),\n );\n const queryClient = new QueryClient(clickhouseClient, \"standalone\");\n const mooseClient = new MooseClient(queryClient);\n\n standaloneUtils = {\n client: mooseClient,\n sql: sql,\n jwt: undefined,\n };\n return standaloneUtils;\n })();\n\n try {\n return await initPromise;\n } finally {\n initPromise = null;\n }\n}\n\n/**\n * @deprecated Use getMooseUtils() instead.\n * Creates a Moose client for database access.\n */\nexport async function getMooseClients(\n config?: Partial<RuntimeClickHouseConfig>,\n): Promise<{ client: MooseClient }> {\n console.warn(\n \"[DEPRECATED] getMooseClients() is deprecated. Use getMooseUtils() instead.\",\n );\n\n // If custom config provided, create a one-off client (don't cache)\n if (config && Object.keys(config).length > 0) {\n await import(\"../config/runtime\");\n const configRegistry = (globalThis as any)._mooseConfigRegistry;\n\n if (!configRegistry) {\n throw new Error(\n \"Configuration registry not initialized. Ensure the Moose framework is properly set up.\",\n );\n }\n\n const clickhouseConfig =\n await configRegistry.getStandaloneClickhouseConfig(config);\n\n const clickhouseClient = getClickhouseClient(\n toClientConfig(clickhouseConfig),\n );\n const queryClient = new QueryClient(clickhouseClient, \"standalone\");\n const mooseClient = new MooseClient(queryClient);\n\n return { client: mooseClient };\n }\n\n // No custom config - delegate to getMooseUtils\n const utils = await getMooseUtils();\n return { client: utils.client };\n}\n","import type {\n Column,\n DataType,\n Nested,\n ArrayType,\n} from \"../dataModels/dataModelTypes\";\n\n/**\n * Annotation key used to mark DateTime fields that should remain as strings\n * rather than being parsed into Date objects at runtime.\n */\nexport const STRING_DATE_ANNOTATION = \"stringDate\";\n\n/**\n * Type guard to check if a DataType is a nullable wrapper\n */\nfunction isNullableType(dt: DataType): dt is { nullable: DataType } {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"nullable\" in dt &&\n typeof dt.nullable !== \"undefined\"\n );\n}\n\n/**\n * Type guard to check if a DataType is a Nested type\n */\nfunction isNestedType(dt: DataType): dt is Nested {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"columns\" in dt &&\n Array.isArray(dt.columns)\n );\n}\n\n/**\n * Type guard to check if a DataType is an ArrayType\n */\nfunction isArrayType(dt: DataType): dt is ArrayType {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"elementType\" in dt &&\n typeof dt.elementType !== \"undefined\"\n );\n}\n\n/**\n * Revives ISO 8601 date strings into Date objects during JSON parsing\n * This is useful for automatically converting date strings to Date objects\n */\nexport function jsonDateReviver(key: string, value: unknown): unknown {\n const iso8601Format =\n /^([\\+-]?\\d{4}(?!\\d{2}\\b))((-?)((0[1-9]|1[0-2])(\\3([12]\\d|0[1-9]|3[01]))?|W([0-4]\\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\\d|[12]\\d{2}|3([0-5]\\d|6[1-6])))([T\\s]((([01]\\d|2[0-3])((:?)[0-5]\\d)?|24\\:?00)([\\.,]\\d+(?!:))?)?(\\17[0-5]\\d([\\.,]\\d+)?)?([zZ]|([\\+-])([01]\\d|2[0-3]):?([0-5]\\d)?)?)?)$/;\n\n if (typeof value === \"string\" && iso8601Format.test(value)) {\n return new Date(value);\n }\n\n return value;\n}\n\n/**\n * Checks if a DataType represents a datetime column (not just date)\n * AND if the column should be parsed from string to Date at runtime\n *\n * Note: Date and Date16 are date-only types and should remain as strings.\n * Only DateTime types are candidates for parsing to JavaScript Date objects.\n */\nfunction isDateType(dataType: DataType, annotations: [string, any][]): boolean {\n // Check if this is marked as a string-based date (from typia.tags.Format)\n // If so, it should remain as a string, not be parsed to Date\n if (\n annotations.some(\n ([key, value]) => key === STRING_DATE_ANNOTATION && value === true,\n )\n ) {\n return false;\n }\n\n if (typeof dataType === \"string\") {\n // Only DateTime types should be parsed to Date objects\n // Date and Date16 are date-only and should stay as strings\n return dataType === \"DateTime\" || dataType.startsWith(\"DateTime(\");\n }\n // Handle nullable wrapper\n if (isNullableType(dataType)) {\n return isDateType(dataType.nullable, annotations);\n }\n return false;\n}\n\n/**\n * Type of mutation to apply to a field during parsing\n */\nexport type Mutation = \"parseDate\"; // | \"parseBigInt\" - to be added later\n\n/**\n * Recursive tuple array structure representing field mutation operations\n * Each entry is [fieldName, mutation]:\n * - mutation is Mutation[] for leaf fields that need operations applied\n * - mutation is FieldMutations for nested objects/arrays (auto-applies to array elements)\n */\nexport type FieldMutations = [string, Mutation[] | FieldMutations][];\n\n/**\n * Recursively builds field mutations from column definitions\n *\n * @param columns - Array of Column definitions\n * @returns Tuple array of field mutations\n */\nfunction buildFieldMutations(columns: Column[]): FieldMutations {\n const mutations: FieldMutations = [];\n\n for (const column of columns) {\n const dataType = column.data_type;\n\n // Check if this is a date field that should be converted\n if (isDateType(dataType, column.annotations)) {\n mutations.push([column.name, [\"parseDate\"]]);\n continue;\n }\n\n // Handle nested structures\n if (typeof dataType === \"object\" && dataType !== null) {\n // Handle nullable wrapper\n let unwrappedType: DataType = dataType;\n if (isNullableType(dataType)) {\n unwrappedType = dataType.nullable;\n }\n\n // Handle nested objects\n if (isNestedType(unwrappedType)) {\n const nestedMutations = buildFieldMutations(unwrappedType.columns);\n if (nestedMutations.length > 0) {\n mutations.push([column.name, nestedMutations]);\n }\n continue;\n }\n\n // Handle arrays with nested columns\n // The mutations will be auto-applied to each array element at runtime\n if (isArrayType(unwrappedType)) {\n const elementType = unwrappedType.elementType;\n if (isNestedType(elementType)) {\n const nestedMutations = buildFieldMutations(elementType.columns);\n if (nestedMutations.length > 0) {\n mutations.push([column.name, nestedMutations]);\n }\n continue;\n }\n }\n }\n }\n\n return mutations;\n}\n\n/**\n * Applies a mutation operation to a field value\n *\n * @param value - The value to handle\n * @param mutation - The mutation operation to apply\n * @returns The handled value\n */\nfunction applyMutation(value: any, mutation: Mutation): any {\n if (mutation === \"parseDate\") {\n if (typeof value === \"string\") {\n try {\n const date = new Date(value);\n return !isNaN(date.getTime()) ? date : value;\n } catch {\n return value;\n }\n }\n }\n return value;\n}\n\n/**\n * Recursively mutates an object by applying field mutations\n *\n * @param obj - The object to mutate\n * @param mutations - The field mutations to apply\n */\nfunction applyFieldMutations(obj: any, mutations: FieldMutations): void {\n if (!obj || typeof obj !== \"object\") {\n return;\n }\n\n for (const [fieldName, mutation] of mutations) {\n if (!(fieldName in obj)) {\n continue;\n }\n\n if (Array.isArray(mutation)) {\n // Check if it's Mutation[] (leaf) or FieldMutations (nested)\n if (mutation.length > 0 && typeof mutation[0] === \"string\") {\n // It's Mutation[] - apply operations to this field\n const operations = mutation as Mutation[];\n for (const operation of operations) {\n obj[fieldName] = applyMutation(obj[fieldName], operation);\n }\n } else {\n // It's FieldMutations - recurse into nested structure\n const nestedMutations = mutation as FieldMutations;\n const fieldValue = obj[fieldName];\n\n if (Array.isArray(fieldValue)) {\n // Auto-apply to each array element\n for (const item of fieldValue) {\n applyFieldMutations(item, nestedMutations);\n }\n } else if (fieldValue && typeof fieldValue === \"object\") {\n // Apply to nested object\n applyFieldMutations(fieldValue, nestedMutations);\n }\n }\n }\n }\n}\n\n/**\n * Pre-builds field mutations from column schema for efficient reuse\n *\n * @param columns - Column definitions from the Stream schema\n * @returns Field mutations tuple array, or undefined if no columns\n *\n * @example\n * ```typescript\n * const fieldMutations = buildFieldMutationsFromColumns(stream.columnArray);\n * // Reuse fieldMutations for every message\n * ```\n */\nexport function buildFieldMutationsFromColumns(\n columns: Column[] | undefined,\n): FieldMutations | undefined {\n if (!columns || columns.length === 0) {\n return undefined;\n }\n const mutations = buildFieldMutations(columns);\n return mutations.length > 0 ? mutations : undefined;\n}\n\n/**\n * Applies field mutations to parsed data\n * Mutates the object in place for performance\n *\n * @param data - The parsed JSON object to mutate\n * @param fieldMutations - Pre-built field mutations from buildFieldMutationsFromColumns\n *\n * @example\n * ```typescript\n * const fieldMutations = buildFieldMutationsFromColumns(stream.columnArray);\n * const data = JSON.parse(jsonString);\n * mutateParsedJson(data, fieldMutations);\n * // data now has transformations applied per the field mutations\n * ```\n */\nexport function mutateParsedJson(\n data: any,\n fieldMutations: FieldMutations | undefined,\n): void {\n if (!fieldMutations || !data) {\n return;\n }\n\n applyFieldMutations(data, fieldMutations);\n}\n","import { parse } from \"csv-parse\";\nimport { jsonDateReviver } from \"./json\";\n\n/**\n * Configuration for CSV parsing options\n */\nexport interface CSVParsingConfig {\n /** CSV delimiter character */\n delimiter: string;\n /** Whether to treat first row as headers */\n columns?: boolean;\n /** Whether to skip empty lines */\n skipEmptyLines?: boolean;\n /** Whether to trim whitespace from values */\n trim?: boolean;\n}\n\n/**\n * Configuration for JSON parsing options\n */\nexport interface JSONParsingConfig {\n /** Custom reviver function for JSON.parse */\n reviver?: (key: string, value: any) => any;\n}\n\n/**\n * Parses CSV content into an array of objects\n *\n * @param content - The CSV content as a string\n * @param config - CSV parsing configuration\n * @returns Promise resolving to an array of parsed objects\n */\nexport function parseCSV<T = Record<string, any>>(\n content: string,\n config: CSVParsingConfig,\n): Promise<T[]> {\n return new Promise((resolve, reject) => {\n const results: T[] = [];\n\n parse(content, {\n delimiter: config.delimiter,\n columns: config.columns ?? true,\n skip_empty_lines: config.skipEmptyLines ?? true,\n trim: config.trim ?? true,\n })\n .on(\"data\", (row) => {\n results.push(row as T);\n })\n .on(\"end\", () => {\n resolve(results);\n })\n .on(\"error\", (error) => {\n reject(error);\n });\n });\n}\n\n/**\n * Parses JSON content into an array of objects\n *\n * @param content - The JSON content as a string\n * @param config - JSON parsing configuration\n * @returns Array of parsed objects\n */\nexport function parseJSON<T = any>(\n content: string,\n config: JSONParsingConfig = {},\n): T[] {\n try {\n const parsed = JSON.parse(content, config.reviver);\n\n // Handle both array and single object cases\n if (Array.isArray(parsed)) {\n return parsed as T[];\n } else {\n return [parsed as T];\n }\n } catch (error) {\n throw new Error(\n `Failed to parse JSON: ${error instanceof Error ? error.message : \"Unknown error\"}`,\n );\n }\n}\n\n/**\n * Parses JSON content with automatic date revival\n *\n * @param content - The JSON content as a string\n * @returns Array of parsed objects with Date objects for ISO 8601 strings\n */\nexport function parseJSONWithDates<T = any>(content: string): T[] {\n return parseJSON<T>(content, { reviver: jsonDateReviver });\n}\n\n/**\n * Type guard to check if a value is a valid CSV delimiter\n */\nexport function isValidCSVDelimiter(delimiter: string): boolean {\n return delimiter.length === 1 && !/\\s/.test(delimiter);\n}\n\n/**\n * Common CSV delimiters\n */\nexport const CSV_DELIMITERS = {\n COMMA: \",\",\n TAB: \"\\t\",\n SEMICOLON: \";\",\n PIPE: \"|\",\n} as const;\n\n/**\n * Default CSV parsing configuration\n */\nexport const DEFAULT_CSV_CONFIG: CSVParsingConfig = {\n delimiter: CSV_DELIMITERS.COMMA,\n columns: true,\n skipEmptyLines: true,\n trim: true,\n};\n\n/**\n * Default JSON parsing configuration with date revival\n */\nexport const DEFAULT_JSON_CONFIG: JSONParsingConfig = {\n reviver: jsonDateReviver,\n};\n","import { IsTuple } from \"typia/lib/typings/IsTuple\";\n\nexport * from \"./dataParser\";\n\ntype HasFunctionField<T> =\n T extends object ?\n {\n [K in keyof T]: T[K] extends Function ? true : false;\n }[keyof T] extends false ?\n false\n : true\n : false;\n\n/**\n * `Date & ...` is considered \"nonsensible intersection\" by typia,\n * causing JSON schema to fail.\n * This helper type recursively cleans up the intersection type tagging.\n */\nexport type StripDateIntersection<T> =\n T extends Date ?\n Date extends T ?\n Date\n : T\n : T extends ReadonlyArray<unknown> ?\n IsTuple<T> extends true ? StripDateFromTuple<T>\n : T extends ReadonlyArray<infer U> ?\n ReadonlyArray<U> extends T ?\n ReadonlyArray<StripDateIntersection<U>>\n : Array<StripDateIntersection<U>>\n : T extends Array<infer U> ? Array<StripDateIntersection<U>>\n : T // this catchall should be unreachable\n : // do not touch other classes\n true extends HasFunctionField<T> ? T\n : T extends object ? { [K in keyof T]: StripDateIntersection<T[K]> }\n : T;\n\n// infer fails in a recursive definition if an intersection type tag is present\ntype StripDateFromTuple<T extends readonly any[]> =\n T extends (\n [\n infer T1,\n infer T2,\n infer T3,\n infer T4,\n infer T5,\n infer T6,\n infer T7,\n infer T8,\n infer T9,\n infer T10,\n ]\n ) ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n StripDateIntersection<T7>,\n StripDateIntersection<T8>,\n StripDateIntersection<T9>,\n StripDateIntersection<T10>,\n ]\n : T extends (\n [\n infer T1,\n infer T2,\n infer T3,\n infer T4,\n infer T5,\n infer T6,\n infer T7,\n infer T8,\n infer T9,\n ]\n ) ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n StripDateIntersection<T7>,\n StripDateIntersection<T8>,\n StripDateIntersection<T9>,\n ]\n : T extends (\n [\n infer T1,\n infer T2,\n infer T3,\n infer T4,\n infer T5,\n infer T6,\n infer T7,\n infer T8,\n ]\n ) ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n StripDateIntersection<T7>,\n StripDateIntersection<T8>,\n ]\n : T extends (\n [infer T1, infer T2, infer T3, infer T4, infer T5, infer T6, infer T7]\n ) ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n StripDateIntersection<T7>,\n ]\n : T extends [infer T1, infer T2, infer T3, infer T4, infer T5, infer T6] ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n StripDateIntersection<T6>,\n ]\n : T extends [infer T1, infer T2, infer T3, infer T4, infer T5] ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n StripDateIntersection<T5>,\n ]\n : T extends [infer T1, infer T2, infer T3, infer T4] ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n StripDateIntersection<T4>,\n ]\n : T extends [infer T1, infer T2, infer T3] ?\n [\n StripDateIntersection<T1>,\n StripDateIntersection<T2>,\n StripDateIntersection<T3>,\n ]\n : T extends [infer T1, infer T2] ?\n [StripDateIntersection<T1>, StripDateIntersection<T2>]\n : T extends [infer T1] ? [StripDateIntersection<T1>]\n : [];\n","import { Readable } from \"node:stream\";\n\n/**\n * Configuration for a data source\n */\nexport interface DataSourceConfig {\n name: string;\n supportsIncremental?: boolean;\n}\n\n/**\n * DataSource is an abstract class that defines the interface for all data sources.\n * It is used to extract data from a source and test the connection to the source.\n */\nexport abstract class DataSource<T = any, ItemType = any> {\n protected name: string;\n protected supportsIncremental: boolean;\n\n constructor(config: DataSourceConfig) {\n this.name = config.name;\n this.supportsIncremental = config.supportsIncremental ?? false;\n }\n\n /**\n * Extract data from the source\n * Returns either ItemType (for single requests) or Readable (for paginated requests)\n */\n abstract extract(): Promise<ItemType | Readable>;\n\n /**\n * Test connection to the source\n */\n abstract testConnection(): Promise<{ success: boolean; message?: string }>;\n}\n\n/**\n * Result returned from extraction\n * For single requests: data is of type T\n * For paginated requests: data is a Readable stream yielding items of type T\n */\nexport interface ExtractionResult<T = any> {\n data: T | Readable;\n metadata: Record<string, any>;\n}\n","export * from \"./browserCompatible\";\n\nexport type DataModelConfig<T> = Partial<{\n ingestion: true;\n storage: {\n enabled?: boolean;\n order_by_fields?: (keyof T)[];\n deduplicate?: boolean;\n name?: string;\n };\n parallelism?: number;\n}>;\n\nexport * from \"./blocks/helpers\";\nexport * from \"./commons\";\nexport * from \"./secrets\";\nexport * from \"./consumption-apis/helpers\";\nexport {\n expressMiddleware,\n ExpressRequestWithMoose,\n getMooseUtilsFromRequest,\n getLegacyMooseUtils,\n} from \"./consumption-apis/webAppHelpers\";\nexport * from \"./scripts/task\";\n\nexport { createApi, createConsumptionApi } from \"./consumption-apis/runner\";\n\nexport { MooseCache } from \"./clients/redisClient\";\n\nexport { ApiUtil, ConsumptionUtil } from \"./consumption-apis/helpers\";\n\nexport { getMooseUtils, getMooseClients } from \"./consumption-apis/standalone\";\nexport type { MooseUtils } from \"./consumption-apis/helpers\";\nexport { sql } from \"./sqlHelpers\";\n\nexport * from \"./utilities\";\nexport * from \"./connectors/dataSource\";\nexport {\n ClickHouseByteSize,\n ClickHouseInt,\n LowCardinality,\n ClickHouseNamedTuple,\n ClickHousePoint,\n ClickHouseRing,\n ClickHouseLineString,\n ClickHouseMultiLineString,\n ClickHousePolygon,\n ClickHouseMultiPolygon,\n} from \"./dataModels/types\";\n","/**\n * @module internal\n * Internal implementation details for the Moose v2 data model (dmv2).\n *\n * This module manages the registration of user-defined dmv2 resources (Tables, Streams, APIs, etc.)\n * and provides functions to serialize these resources into a JSON format (`InfrastructureMap`)\n * expected by the Moose infrastructure management system. It also includes helper functions\n * to retrieve registered handler functions (for streams and APIs) and the base class\n * (`TypedBase`) used by dmv2 resource classes.\n *\n * @internal This module is intended for internal use by the Moose library and compiler plugin.\n * Its API might change without notice.\n */\nimport process from \"process\";\nimport { Api, IngestApi, SqlResource, Task, Workflow } from \"./index\";\nimport { IJsonSchemaCollection } from \"typia/src/schemas/json/IJsonSchemaCollection\";\nimport { Column } from \"../dataModels/dataModelTypes\";\nimport { ClickHouseEngines, ApiUtil } from \"../index\";\nimport {\n OlapTable,\n OlapConfig,\n ReplacingMergeTreeConfig,\n SummingMergeTreeConfig,\n ReplicatedMergeTreeConfig,\n ReplicatedReplacingMergeTreeConfig,\n ReplicatedAggregatingMergeTreeConfig,\n ReplicatedSummingMergeTreeConfig,\n ReplicatedCollapsingMergeTreeConfig,\n ReplicatedVersionedCollapsingMergeTreeConfig,\n S3QueueConfig,\n} from \"./sdk/olapTable\";\nimport {\n ConsumerConfig,\n KafkaSchemaConfig,\n Stream,\n TransformConfig,\n} from \"./sdk/stream\";\nimport { compilerLog } from \"../commons\";\nimport { WebApp } from \"./sdk/webApp\";\n\n/**\n * Gets the source directory from environment variable or defaults to \"app\"\n */\nfunction getSourceDir(): string {\n return process.env.MOOSE_SOURCE_DIR || \"app\";\n}\n\n/**\n * Client-only mode check. When true, resource registration is permissive\n * (duplicates overwrite silently instead of throwing).\n * Set via MOOSE_CLIENT_ONLY=true environment variable.\n *\n * This enables Next.js apps to import OlapTable definitions for type-safe\n * queries without the Moose runtime, avoiding \"already exists\" errors on HMR.\n *\n * @returns true if MOOSE_CLIENT_ONLY environment variable is set to \"true\"\n */\nexport const isClientOnlyMode = (): boolean =>\n process.env.MOOSE_CLIENT_ONLY === \"true\";\n\n/**\n * Internal registry holding all defined Moose dmv2 resources.\n * Populated by the constructors of OlapTable, Stream, IngestApi, etc.\n * Accessed via `getMooseInternal()`.\n */\nconst moose_internal = {\n tables: new Map<string, OlapTable<any>>(),\n streams: new Map<string, Stream<any>>(),\n ingestApis: new Map<string, IngestApi<any>>(),\n apis: new Map<string, Api<any>>(),\n sqlResources: new Map<string, SqlResource>(),\n workflows: new Map<string, Workflow>(),\n webApps: new Map<string, WebApp>(),\n};\n/**\n * Default retention period for streams if not specified (7 days in seconds).\n */\nconst defaultRetentionPeriod = 60 * 60 * 24 * 7;\n\n/**\n * Engine-specific configuration types using discriminated union pattern\n */\ninterface MergeTreeEngineConfig {\n engine: \"MergeTree\";\n}\n\ninterface ReplacingMergeTreeEngineConfig {\n engine: \"ReplacingMergeTree\";\n ver?: string;\n isDeleted?: string;\n}\n\ninterface AggregatingMergeTreeEngineConfig {\n engine: \"AggregatingMergeTree\";\n}\n\ninterface SummingMergeTreeEngineConfig {\n engine: \"SummingMergeTree\";\n columns?: string[];\n}\n\ninterface CollapsingMergeTreeEngineConfig {\n engine: \"CollapsingMergeTree\";\n sign: string;\n}\n\ninterface VersionedCollapsingMergeTreeEngineConfig {\n engine: \"VersionedCollapsingMergeTree\";\n sign: string;\n ver: string;\n}\n\ninterface ReplicatedMergeTreeEngineConfig {\n engine: \"ReplicatedMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n}\n\ninterface ReplicatedReplacingMergeTreeEngineConfig {\n engine: \"ReplicatedReplacingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n ver?: string;\n isDeleted?: string;\n}\n\ninterface ReplicatedAggregatingMergeTreeEngineConfig {\n engine: \"ReplicatedAggregatingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n}\n\ninterface ReplicatedSummingMergeTreeEngineConfig {\n engine: \"ReplicatedSummingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n columns?: string[];\n}\n\ninterface ReplicatedCollapsingMergeTreeEngineConfig {\n engine: \"ReplicatedCollapsingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n sign: string;\n}\n\ninterface ReplicatedVersionedCollapsingMergeTreeEngineConfig {\n engine: \"ReplicatedVersionedCollapsingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n sign: string;\n ver: string;\n}\n\ninterface S3QueueEngineConfig {\n engine: \"S3Queue\";\n s3Path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n headers?: { [key: string]: string };\n}\n\ninterface S3EngineConfig {\n engine: \"S3\";\n path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n partitionStrategy?: string;\n partitionColumnsInDataFile?: string;\n}\n\ninterface BufferEngineConfig {\n engine: \"Buffer\";\n targetDatabase: string;\n targetTable: string;\n numLayers: number;\n minTime: number;\n maxTime: number;\n minRows: number;\n maxRows: number;\n minBytes: number;\n maxBytes: number;\n flushTime?: number;\n flushRows?: number;\n flushBytes?: number;\n}\n\ninterface DistributedEngineConfig {\n engine: \"Distributed\";\n cluster: string;\n targetDatabase: string;\n targetTable: string;\n shardingKey?: string;\n policyName?: string;\n}\n\ninterface IcebergS3EngineConfig {\n engine: \"IcebergS3\";\n path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n}\n\ninterface KafkaEngineConfig {\n engine: \"Kafka\";\n brokerList: string;\n topicList: string;\n groupName: string;\n format: string;\n}\n\n/**\n * Union type for all supported engine configurations\n */\ntype EngineConfig =\n | MergeTreeEngineConfig\n | ReplacingMergeTreeEngineConfig\n | AggregatingMergeTreeEngineConfig\n | SummingMergeTreeEngineConfig\n | CollapsingMergeTreeEngineConfig\n | VersionedCollapsingMergeTreeEngineConfig\n | ReplicatedMergeTreeEngineConfig\n | ReplicatedReplacingMergeTreeEngineConfig\n | ReplicatedAggregatingMergeTreeEngineConfig\n | ReplicatedSummingMergeTreeEngineConfig\n | ReplicatedCollapsingMergeTreeEngineConfig\n | ReplicatedVersionedCollapsingMergeTreeEngineConfig\n | S3QueueEngineConfig\n | S3EngineConfig\n | BufferEngineConfig\n | DistributedEngineConfig\n | IcebergS3EngineConfig\n | KafkaEngineConfig;\n\n/**\n * JSON representation of an OLAP table configuration.\n */\ninterface TableJson {\n /** The name of the table. */\n name: string;\n /** Array defining the table's columns and their types. */\n columns: Column[];\n /** ORDER BY clause: either array of column names or a single ClickHouse expression. */\n orderBy: string[] | string;\n /** The column name used for the PARTITION BY clause. */\n partitionBy?: string;\n /** SAMPLE BY expression for approximate query processing. */\n sampleByExpression?: string;\n /** PRIMARY KEY expression (overrides column-level primary_key flags when specified). */\n primaryKeyExpression?: string;\n /** Engine configuration with type-safe, engine-specific parameters */\n engineConfig?: EngineConfig;\n /** Optional version string for the table configuration. */\n version?: string;\n /** Optional metadata for the table (e.g., description). */\n metadata?: { description?: string };\n /** Lifecycle management setting for the table. */\n lifeCycle?: string;\n /** Optional table-level settings that can be modified with ALTER TABLE MODIFY SETTING. */\n tableSettings?: { [key: string]: string };\n /** Optional table indexes */\n indexes?: {\n name: string;\n expression: string;\n type: string;\n arguments: string[];\n granularity: number;\n }[];\n /** Optional table projections */\n projections?: {\n name: string;\n select: string[] | string;\n orderBy?: string[] | string; // Optional: only for non-aggregate projections (camelCase for Rust serde)\n groupBy?: string[] | string; // Optional: only for aggregate projections (camelCase for Rust serde)\n where_clause?: string;\n }[];\n /** Optional table-level TTL expression (without leading 'TTL'). */\n ttl?: string;\n /** Optional database name for multi-database support. */\n database?: string;\n /** Optional cluster name for ON CLUSTER support. */\n cluster?: string;\n}\n/**\n * Represents a target destination for data flow, typically a stream.\n */\ninterface Target {\n /** The name of the target resource (e.g., stream name). */\n name: string;\n /** The kind of the target resource. */\n kind: \"stream\"; // may add `| \"table\"` in the future\n /** Optional version string of the target resource's configuration. */\n version?: string;\n /** Optional metadata for the target (e.g., description for function processes). */\n metadata?: { description?: string };\n /** Optional source file path where this transform was declared. */\n sourceFile?: string;\n}\n\n/**\n * Represents a consumer attached to a stream.\n */\ninterface Consumer {\n /** Optional version string for the consumer configuration. */\n version?: string;\n /** Optional source file path where this consumer was declared. */\n sourceFile?: string;\n}\n\n/**\n * JSON representation of a Stream/Topic configuration.\n */\ninterface StreamJson {\n /** The name of the stream/topic. */\n name: string;\n /** Array defining the message schema (columns/fields). */\n columns: Column[];\n /** Data retention period in seconds. */\n retentionPeriod: number;\n /** Number of partitions for the stream/topic. */\n partitionCount: number;\n /** Optional name of the OLAP table this stream automatically syncs to. */\n targetTable?: string;\n /** Optional version of the target OLAP table configuration. */\n targetTableVersion?: string;\n /** Optional version string for the stream configuration. */\n version?: string;\n /** List of target streams this stream transforms data into. */\n transformationTargets: Target[];\n /** Flag indicating if a multi-transform function (`_multipleTransformations`) is defined. */\n hasMultiTransform: boolean;\n /** List of consumers attached to this stream. */\n consumers: Consumer[];\n /** Optional description for the stream. */\n metadata?: { description?: string };\n /** Lifecycle management setting for the stream. */\n lifeCycle?: string;\n /** Optional Schema Registry config */\n schemaConfig?: KafkaSchemaConfig;\n}\n/**\n * JSON representation of an Ingest API configuration.\n */\ninterface IngestApiJson {\n /** The name of the Ingest API endpoint. */\n name: string;\n /** Array defining the expected input schema (columns/fields). */\n columns: Column[];\n\n /** The target stream where ingested data is written. */\n writeTo: Target;\n /** The DLQ if the data does not fit the schema. */\n deadLetterQueue?: string;\n /** Optional version string for the API configuration. */\n version?: string;\n /** Optional custom path for the ingestion endpoint. */\n path?: string;\n /** Optional description for the API. */\n metadata?: { description?: string };\n /** JSON schema */\n schema: IJsonSchemaCollection.IV3_1;\n /**\n * Whether this API allows extra fields beyond the defined columns.\n * When true, extra fields in payloads are passed through to streaming functions.\n */\n allowExtraFields?: boolean;\n}\n\n/**\n * JSON representation of an API configuration.\n */\ninterface ApiJson {\n /** The name of the API endpoint. */\n name: string;\n /** Array defining the expected query parameters schema. */\n queryParams: Column[];\n /** JSON schema definition of the API's response body. */\n responseSchema: IJsonSchemaCollection.IV3_1;\n /** Optional version string for the API configuration. */\n version?: string;\n /** Optional custom path for the API endpoint. */\n path?: string;\n /** Optional description for the API. */\n metadata?: { description?: string };\n}\n\n/**\n * Represents the unique signature of an infrastructure component (Table, Topic, etc.).\n * Used for defining dependencies between SQL resources.\n */\ninterface InfrastructureSignatureJson {\n /** A unique identifier for the resource instance (often name + version). */\n id: string;\n /** The kind/type of the infrastructure component. */\n kind:\n | \"Table\"\n | \"Topic\"\n | \"ApiEndpoint\"\n | \"TopicToTableSyncProcess\"\n | \"View\"\n | \"SqlResource\";\n}\n\ninterface WorkflowJson {\n name: string;\n retries?: number;\n timeout?: string;\n schedule?: string;\n}\n\ninterface WebAppJson {\n name: string;\n mountPath: string;\n metadata?: { description?: string };\n}\n\ninterface SqlResourceJson {\n /** The name of the SQL resource. */\n name: string;\n /** Array of SQL DDL statements required to create the resource. */\n setup: readonly string[];\n /** Array of SQL DDL statements required to drop the resource. */\n teardown: readonly string[];\n\n /** List of infrastructure components (by signature) that this resource reads from. */\n pullsDataFrom: InfrastructureSignatureJson[];\n /** List of infrastructure components (by signature) that this resource writes to. */\n pushesDataTo: InfrastructureSignatureJson[];\n /** Optional source file path where this resource is defined. */\n sourceFile?: string;\n /** Optional source line number where this resource is defined. */\n sourceLine?: number;\n /** Optional source column number where this resource is defined. */\n sourceColumn?: number;\n}\n\n/**\n * Type guard: Check if config is S3QueueConfig\n */\nfunction isS3QueueConfig(\n config: OlapConfig<any>,\n): config is S3QueueConfig<any> {\n return \"engine\" in config && config.engine === ClickHouseEngines.S3Queue;\n}\n\n/**\n * Type guard: Check if config has a replicated engine\n * Checks if the engine value is one of the replicated engine types\n */\nfunction hasReplicatedEngine(\n config: OlapConfig<any>,\n): config is\n | ReplicatedMergeTreeConfig<any>\n | ReplicatedReplacingMergeTreeConfig<any>\n | ReplicatedAggregatingMergeTreeConfig<any>\n | ReplicatedSummingMergeTreeConfig<any>\n | ReplicatedCollapsingMergeTreeConfig<any>\n | ReplicatedVersionedCollapsingMergeTreeConfig<any> {\n if (!(\"engine\" in config)) {\n return false;\n }\n\n const engine = config.engine as ClickHouseEngines;\n // Check if engine is one of the replicated engine types\n return (\n engine === ClickHouseEngines.ReplicatedMergeTree ||\n engine === ClickHouseEngines.ReplicatedReplacingMergeTree ||\n engine === ClickHouseEngines.ReplicatedAggregatingMergeTree ||\n engine === ClickHouseEngines.ReplicatedSummingMergeTree ||\n engine === ClickHouseEngines.ReplicatedCollapsingMergeTree ||\n engine === ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree\n );\n}\n\n/**\n * Extract engine value from table config, handling both legacy and new formats\n */\nfunction extractEngineValue(config: OlapConfig<any>): ClickHouseEngines {\n // Legacy config without engine property defaults to MergeTree\n if (!(\"engine\" in config)) {\n return ClickHouseEngines.MergeTree;\n }\n\n // All engines (replicated and non-replicated) have engine as direct value\n return config.engine as ClickHouseEngines;\n}\n\n/**\n * Convert engine config for basic MergeTree engines\n */\nfunction convertBasicEngineConfig(\n engine: ClickHouseEngines,\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n switch (engine) {\n case ClickHouseEngines.MergeTree:\n return { engine: \"MergeTree\" };\n\n case ClickHouseEngines.AggregatingMergeTree:\n return { engine: \"AggregatingMergeTree\" };\n\n case ClickHouseEngines.ReplacingMergeTree: {\n const replacingConfig = config as ReplacingMergeTreeConfig<any>;\n return {\n engine: \"ReplacingMergeTree\",\n ver: replacingConfig.ver,\n isDeleted: replacingConfig.isDeleted,\n };\n }\n\n case ClickHouseEngines.SummingMergeTree: {\n const summingConfig = config as SummingMergeTreeConfig<any>;\n return {\n engine: \"SummingMergeTree\",\n columns: summingConfig.columns,\n };\n }\n\n case ClickHouseEngines.CollapsingMergeTree: {\n const collapsingConfig = config as any; // CollapsingMergeTreeConfig<any>\n return {\n engine: \"CollapsingMergeTree\",\n sign: collapsingConfig.sign,\n };\n }\n\n case ClickHouseEngines.VersionedCollapsingMergeTree: {\n const versionedConfig = config as any; // VersionedCollapsingMergeTreeConfig<any>\n return {\n engine: \"VersionedCollapsingMergeTree\",\n sign: versionedConfig.sign,\n ver: versionedConfig.ver,\n };\n }\n\n default:\n return undefined;\n }\n}\n\n/**\n * Convert engine config for replicated MergeTree engines\n */\nfunction convertReplicatedEngineConfig(\n engine: ClickHouseEngines,\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n // First check if this is a replicated engine config\n if (!hasReplicatedEngine(config)) {\n return undefined;\n }\n\n switch (engine) {\n case ClickHouseEngines.ReplicatedMergeTree: {\n const replicatedConfig = config as ReplicatedMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n };\n }\n\n case ClickHouseEngines.ReplicatedReplacingMergeTree: {\n const replicatedConfig =\n config as ReplicatedReplacingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedReplacingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n ver: replicatedConfig.ver,\n isDeleted: replicatedConfig.isDeleted,\n };\n }\n\n case ClickHouseEngines.ReplicatedAggregatingMergeTree: {\n const replicatedConfig =\n config as ReplicatedAggregatingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedAggregatingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n };\n }\n\n case ClickHouseEngines.ReplicatedSummingMergeTree: {\n const replicatedConfig = config as ReplicatedSummingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedSummingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n columns: replicatedConfig.columns,\n };\n }\n\n case ClickHouseEngines.ReplicatedCollapsingMergeTree: {\n const replicatedConfig = config as any; // ReplicatedCollapsingMergeTreeConfig<any>\n return {\n engine: \"ReplicatedCollapsingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n sign: replicatedConfig.sign,\n };\n }\n\n case ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree: {\n const replicatedConfig = config as any; // ReplicatedVersionedCollapsingMergeTreeConfig<any>\n return {\n engine: \"ReplicatedVersionedCollapsingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n sign: replicatedConfig.sign,\n ver: replicatedConfig.ver,\n };\n }\n\n default:\n return undefined;\n }\n}\n\n/**\n * Convert S3Queue engine config\n * Uses type guard for fully type-safe property access\n */\nfunction convertS3QueueEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!isS3QueueConfig(config)) {\n return undefined;\n }\n\n return {\n engine: \"S3Queue\",\n s3Path: config.s3Path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n headers: config.headers,\n };\n}\n\n/**\n * Convert S3 engine config\n */\nfunction convertS3EngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.S3) {\n return undefined;\n }\n\n return {\n engine: \"S3\",\n path: config.path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n partitionStrategy: config.partitionStrategy,\n partitionColumnsInDataFile: config.partitionColumnsInDataFile,\n };\n}\n\n/**\n * Convert Buffer engine config\n */\nfunction convertBufferEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.Buffer) {\n return undefined;\n }\n\n return {\n engine: \"Buffer\",\n targetDatabase: config.targetDatabase,\n targetTable: config.targetTable,\n numLayers: config.numLayers,\n minTime: config.minTime,\n maxTime: config.maxTime,\n minRows: config.minRows,\n maxRows: config.maxRows,\n minBytes: config.minBytes,\n maxBytes: config.maxBytes,\n flushTime: config.flushTime,\n flushRows: config.flushRows,\n flushBytes: config.flushBytes,\n };\n}\n\n/**\n * Convert Distributed engine config\n */\nfunction convertDistributedEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (\n !(\"engine\" in config) ||\n config.engine !== ClickHouseEngines.Distributed\n ) {\n return undefined;\n }\n\n return {\n engine: \"Distributed\",\n cluster: config.cluster,\n targetDatabase: config.targetDatabase,\n targetTable: config.targetTable,\n shardingKey: config.shardingKey,\n policyName: config.policyName,\n };\n}\n\n/**\n * Convert IcebergS3 engine config\n */\nfunction convertIcebergS3EngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.IcebergS3) {\n return undefined;\n }\n\n return {\n engine: \"IcebergS3\",\n path: config.path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n };\n}\n\n/**\n * Convert Kafka engine configuration\n */\nfunction convertKafkaEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.Kafka) {\n return undefined;\n }\n\n return {\n engine: \"Kafka\",\n brokerList: config.brokerList,\n topicList: config.topicList,\n groupName: config.groupName,\n format: config.format,\n };\n}\n\n/**\n * Convert table configuration to engine config\n */\nfunction convertTableConfigToEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n const engine = extractEngineValue(config);\n\n // Try basic engines first\n const basicConfig = convertBasicEngineConfig(engine, config);\n if (basicConfig) {\n return basicConfig;\n }\n\n // Try replicated engines\n const replicatedConfig = convertReplicatedEngineConfig(engine, config);\n if (replicatedConfig) {\n return replicatedConfig;\n }\n\n // Handle S3Queue\n if (engine === ClickHouseEngines.S3Queue) {\n return convertS3QueueEngineConfig(config);\n }\n\n // Handle S3\n if (engine === ClickHouseEngines.S3) {\n return convertS3EngineConfig(config);\n }\n\n // Handle Buffer\n if (engine === ClickHouseEngines.Buffer) {\n return convertBufferEngineConfig(config);\n }\n\n // Handle Distributed\n if (engine === ClickHouseEngines.Distributed) {\n return convertDistributedEngineConfig(config);\n }\n\n // Handle IcebergS3\n if (engine === ClickHouseEngines.IcebergS3) {\n return convertIcebergS3EngineConfig(config);\n }\n\n // Handle Kafka\n if (engine === ClickHouseEngines.Kafka) {\n return convertKafkaEngineConfig(config);\n }\n\n return undefined;\n}\n\nexport const toInfraMap = (registry: typeof moose_internal) => {\n const tables: { [key: string]: TableJson } = {};\n const topics: { [key: string]: StreamJson } = {};\n const ingestApis: { [key: string]: IngestApiJson } = {};\n const apis: { [key: string]: ApiJson } = {};\n const sqlResources: { [key: string]: SqlResourceJson } = {};\n const workflows: { [key: string]: WorkflowJson } = {};\n const webApps: { [key: string]: WebAppJson } = {};\n\n registry.tables.forEach((table) => {\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n // If the table is part of an IngestPipeline, inherit metadata if not set\n let metadata = (table as any).metadata;\n if (!metadata && table.config && (table as any).pipelineParent) {\n metadata = (table as any).pipelineParent.metadata;\n }\n // Create type-safe engine configuration\n const engineConfig: EngineConfig | undefined =\n convertTableConfigToEngineConfig(table.config);\n\n // Get table settings, applying defaults for S3Queue\n let tableSettings: { [key: string]: string } | undefined = undefined;\n\n if (table.config.settings) {\n // Convert all settings to strings, filtering out undefined values\n tableSettings = Object.entries(table.config.settings).reduce(\n (acc, [key, value]) => {\n if (value !== undefined) {\n acc[key] = String(value);\n }\n return acc;\n },\n {} as { [key: string]: string },\n );\n }\n\n // Apply default settings for S3Queue if not already specified\n if (engineConfig?.engine === \"S3Queue\") {\n if (!tableSettings) {\n tableSettings = {};\n }\n // Set default mode to 'unordered' if not specified\n if (!tableSettings.mode) {\n tableSettings.mode = \"unordered\";\n }\n }\n\n // Determine ORDER BY from config\n // Note: engines like Buffer and Distributed don't support orderBy/partitionBy/sampleBy\n const hasOrderByFields =\n \"orderByFields\" in table.config &&\n Array.isArray(table.config.orderByFields) &&\n table.config.orderByFields.length > 0;\n const hasOrderByExpression =\n \"orderByExpression\" in table.config &&\n typeof table.config.orderByExpression === \"string\" &&\n table.config.orderByExpression.length > 0;\n if (hasOrderByFields && hasOrderByExpression) {\n throw new Error(\n `Table ${table.name}: Provide either orderByFields or orderByExpression, not both.`,\n );\n }\n const orderBy: string[] | string =\n hasOrderByExpression && \"orderByExpression\" in table.config ?\n (table.config.orderByExpression ?? \"\")\n : \"orderByFields\" in table.config ? (table.config.orderByFields ?? [])\n : [];\n\n tables[id] = {\n name: table.name,\n columns: table.columnArray,\n orderBy,\n partitionBy:\n \"partitionBy\" in table.config ? table.config.partitionBy : undefined,\n sampleByExpression:\n \"sampleByExpression\" in table.config ?\n table.config.sampleByExpression\n : undefined,\n primaryKeyExpression:\n \"primaryKeyExpression\" in table.config ?\n table.config.primaryKeyExpression\n : undefined,\n engineConfig,\n version: table.config.version,\n metadata,\n lifeCycle: table.config.lifeCycle,\n // Map 'settings' to 'tableSettings' for internal use\n tableSettings:\n tableSettings && Object.keys(tableSettings).length > 0 ?\n tableSettings\n : undefined,\n indexes:\n table.config.indexes?.map((i) => ({\n ...i,\n granularity: i.granularity === undefined ? 1 : i.granularity,\n arguments: i.arguments === undefined ? [] : i.arguments,\n })) || [],\n projections:\n table.config.projections?.map((p) => ({\n name: p.name,\n select: p.select,\n ...(\"orderBy\" in p && p.orderBy !== undefined ?\n { orderBy: p.orderBy }\n : {}),\n ...(\"groupBy\" in p && p.groupBy !== undefined ?\n { groupBy: p.groupBy }\n : {}),\n })) || [],\n ttl: table.config.ttl,\n database: table.config.database,\n cluster: table.config.cluster,\n };\n });\n\n registry.streams.forEach((stream) => {\n // If the stream is part of an IngestPipeline, inherit metadata if not set\n let metadata = stream.metadata;\n if (!metadata && stream.config && (stream as any).pipelineParent) {\n metadata = (stream as any).pipelineParent.metadata;\n }\n const transformationTargets: Target[] = [];\n const consumers: Consumer[] = [];\n\n stream._transformations.forEach((transforms, destinationName) => {\n transforms.forEach(([destination, _, config]) => {\n transformationTargets.push({\n kind: \"stream\",\n name: destinationName,\n version: config.version,\n metadata: config.metadata,\n sourceFile: config.sourceFile,\n });\n });\n });\n\n stream._consumers.forEach((consumer) => {\n consumers.push({\n version: consumer.config.version,\n sourceFile: consumer.config.sourceFile,\n });\n });\n\n topics[stream.name] = {\n name: stream.name,\n columns: stream.columnArray,\n targetTable: stream.config.destination?.name,\n targetTableVersion: stream.config.destination?.config.version,\n retentionPeriod: stream.config.retentionPeriod ?? defaultRetentionPeriod,\n partitionCount: stream.config.parallelism ?? 1,\n version: stream.config.version,\n transformationTargets,\n hasMultiTransform: stream._multipleTransformations === undefined,\n consumers,\n metadata,\n lifeCycle: stream.config.lifeCycle,\n schemaConfig: stream.config.schemaConfig,\n };\n });\n\n registry.ingestApis.forEach((api) => {\n // If the ingestApi is part of an IngestPipeline, inherit metadata if not set\n let metadata = api.metadata;\n if (!metadata && api.config && (api as any).pipelineParent) {\n metadata = (api as any).pipelineParent.metadata;\n }\n ingestApis[api.name] = {\n name: api.name,\n columns: api.columnArray,\n version: api.config.version,\n path: api.config.path,\n writeTo: {\n kind: \"stream\",\n name: api.config.destination.name,\n },\n deadLetterQueue: api.config.deadLetterQueue?.name,\n metadata,\n schema: api.schema,\n allowExtraFields: api.allowExtraFields,\n };\n });\n\n registry.apis.forEach((api, key) => {\n const rustKey =\n api.config.version ? `${api.name}:${api.config.version}` : api.name;\n apis[rustKey] = {\n name: api.name,\n queryParams: api.columnArray,\n responseSchema: api.responseSchema,\n version: api.config.version,\n path: api.config.path,\n metadata: api.metadata,\n };\n });\n\n registry.sqlResources.forEach((sqlResource) => {\n sqlResources[sqlResource.name] = {\n name: sqlResource.name,\n setup: sqlResource.setup,\n teardown: sqlResource.teardown,\n sourceFile: sqlResource.sourceFile,\n sourceLine: sqlResource.sourceLine,\n sourceColumn: sqlResource.sourceColumn,\n\n pullsDataFrom: sqlResource.pullsDataFrom.map((r) => {\n if (r.kind === \"OlapTable\") {\n const table = r as OlapTable<any>;\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n return {\n id,\n kind: \"Table\",\n };\n } else if (r.kind === \"SqlResource\") {\n const resource = r as SqlResource;\n return {\n id: resource.name,\n kind: \"SqlResource\",\n };\n } else {\n throw new Error(`Unknown sql resource dependency type: ${r}`);\n }\n }),\n pushesDataTo: sqlResource.pushesDataTo.map((r) => {\n if (r.kind === \"OlapTable\") {\n const table = r as OlapTable<any>;\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n return {\n id,\n kind: \"Table\",\n };\n } else if (r.kind === \"SqlResource\") {\n const resource = r as SqlResource;\n return {\n id: resource.name,\n kind: \"SqlResource\",\n };\n } else {\n throw new Error(`Unknown sql resource dependency type: ${r}`);\n }\n }),\n };\n });\n\n registry.workflows.forEach((workflow) => {\n workflows[workflow.name] = {\n name: workflow.name,\n retries: workflow.config.retries,\n timeout: workflow.config.timeout,\n schedule: workflow.config.schedule,\n };\n });\n\n registry.webApps.forEach((webApp) => {\n webApps[webApp.name] = {\n name: webApp.name,\n mountPath: webApp.config.mountPath || \"/\",\n metadata: webApp.config.metadata,\n };\n });\n\n return {\n topics,\n tables,\n ingestApis,\n apis,\n sqlResources,\n workflows,\n webApps,\n };\n};\n\n/**\n * Retrieves the global internal Moose resource registry.\n * Uses `globalThis` to ensure a single registry instance.\n *\n * @returns The internal Moose resource registry.\n */\nexport const getMooseInternal = (): typeof moose_internal =>\n (globalThis as any).moose_internal;\n\n// work around for variable visibility in compiler output\nif (getMooseInternal() === undefined) {\n (globalThis as any).moose_internal = moose_internal;\n}\n\n/**\n * Loads the user's application entry point (`app/index.ts`) to register resources,\n * then generates and prints the infrastructure map as JSON.\n *\n * This function is the main entry point used by the Moose infrastructure system\n * to discover the defined resources.\n * It prints the JSON map surrounded by specific delimiters (`___MOOSE_STUFF___start`\n * and `end___MOOSE_STUFF___`) for easy extraction by the calling process.\n */\nexport const dumpMooseInternal = async () => {\n loadIndex();\n\n console.log(\n \"___MOOSE_STUFF___start\",\n JSON.stringify(toInfraMap(getMooseInternal())),\n \"end___MOOSE_STUFF___\",\n );\n};\n\nconst loadIndex = () => {\n // Clear the registry before loading to support hot reloading\n const registry = getMooseInternal();\n registry.tables.clear();\n registry.streams.clear();\n registry.ingestApis.clear();\n registry.apis.clear();\n registry.sqlResources.clear();\n registry.workflows.clear();\n registry.webApps.clear();\n\n // Clear require cache for app directory to pick up changes\n const appDir = `${process.cwd()}/${getSourceDir()}`;\n Object.keys(require.cache).forEach((key) => {\n if (key.startsWith(appDir)) {\n delete require.cache[key];\n }\n });\n\n try {\n require(`${process.cwd()}/${getSourceDir()}/index.ts`);\n } catch (error) {\n let hint: string | undefined;\n const details = error instanceof Error ? error.message : String(error);\n if (details.includes(\"ERR_REQUIRE_ESM\") || details.includes(\"ES Module\")) {\n hint =\n \"The file or its dependencies are ESM-only. Switch to packages that dual-support CJS & ESM, or upgrade to Node 22.12+. \" +\n \"If you must use Node 20, you may try Node 20.19\\n\\n\";\n }\n\n const errorMsg = `${hint ?? \"\"}${details}`;\n const cause = error instanceof Error ? error : undefined;\n throw new Error(errorMsg, { cause });\n }\n};\n\n/**\n * Loads the user's application entry point and extracts all registered stream\n * transformation and consumer functions.\n *\n * @returns A Map where keys are unique identifiers for transformations/consumers\n * (e.g., \"sourceStream_destStream_version\", \"sourceStream_<no-target>_version\")\n * and values are tuples containing: [handler function, config, source stream columns]\n */\nexport const getStreamingFunctions = async () => {\n loadIndex();\n\n const registry = getMooseInternal();\n const transformFunctions = new Map<\n string,\n [\n (data: unknown) => unknown,\n TransformConfig<any> | ConsumerConfig<any>,\n Column[],\n ]\n >();\n\n registry.streams.forEach((stream) => {\n stream._transformations.forEach((transforms, destinationName) => {\n transforms.forEach(([_, transform, config]) => {\n const transformFunctionKey = `${stream.name}_${destinationName}${config.version ? `_${config.version}` : \"\"}`;\n compilerLog(`getStreamingFunctions: ${transformFunctionKey}`);\n transformFunctions.set(transformFunctionKey, [\n transform,\n config,\n stream.columnArray,\n ]);\n });\n });\n\n stream._consumers.forEach((consumer) => {\n const consumerFunctionKey = `${stream.name}_<no-target>${consumer.config.version ? `_${consumer.config.version}` : \"\"}`;\n transformFunctions.set(consumerFunctionKey, [\n consumer.consumer,\n consumer.config,\n stream.columnArray,\n ]);\n });\n });\n\n return transformFunctions;\n};\n\n/**\n * Loads the user's application entry point and extracts all registered\n * API handler functions.\n *\n * @returns A Map where keys are the names of the APIs and values\n * are their corresponding handler functions.\n */\nexport const getApis = async () => {\n loadIndex();\n const apiFunctions = new Map<\n string,\n (params: unknown, utils: ApiUtil) => unknown\n >();\n\n const registry = getMooseInternal();\n // Single pass: store full keys, track aliasing decisions\n const versionCountByName = new Map<string, number>();\n const nameToSoleVersionHandler = new Map<\n string,\n (params: unknown, utils: ApiUtil) => unknown\n >();\n\n registry.apis.forEach((api, key) => {\n const handler = api.getHandler();\n apiFunctions.set(key, handler);\n\n if (!api.config.version) {\n // Explicit unversioned takes precedence for alias\n if (!apiFunctions.has(api.name)) {\n apiFunctions.set(api.name, handler);\n }\n nameToSoleVersionHandler.delete(api.name);\n versionCountByName.delete(api.name);\n } else if (!apiFunctions.has(api.name)) {\n // Only track versioned for alias if no explicit unversioned present\n const count = (versionCountByName.get(api.name) ?? 0) + 1;\n versionCountByName.set(api.name, count);\n if (count === 1) {\n nameToSoleVersionHandler.set(api.name, handler);\n } else {\n nameToSoleVersionHandler.delete(api.name);\n }\n }\n });\n\n // Finalize aliases for names that have exactly one versioned API and no unversioned\n nameToSoleVersionHandler.forEach((handler, name) => {\n if (!apiFunctions.has(name)) {\n apiFunctions.set(name, handler);\n }\n });\n\n return apiFunctions;\n};\n\nexport const dlqSchema: IJsonSchemaCollection.IV3_1 = {\n version: \"3.1\",\n components: {\n schemas: {\n DeadLetterModel: {\n type: \"object\",\n properties: {\n originalRecord: {\n $ref: \"#/components/schemas/Recordstringany\",\n },\n errorMessage: {\n type: \"string\",\n },\n errorType: {\n type: \"string\",\n },\n failedAt: {\n type: \"string\",\n format: \"date-time\",\n },\n source: {\n oneOf: [\n {\n const: \"api\",\n },\n {\n const: \"transform\",\n },\n {\n const: \"table\",\n },\n ],\n },\n },\n required: [\n \"originalRecord\",\n \"errorMessage\",\n \"errorType\",\n \"failedAt\",\n \"source\",\n ],\n },\n Recordstringany: {\n type: \"object\",\n properties: {},\n required: [],\n description: \"Construct a type with a set of properties K of type T\",\n additionalProperties: {},\n },\n },\n },\n schemas: [\n {\n $ref: \"#/components/schemas/DeadLetterModel\",\n },\n ],\n};\n\nexport const dlqColumns: Column[] = [\n {\n name: \"originalRecord\",\n data_type: \"Json\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"errorMessage\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"errorType\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"failedAt\",\n data_type: \"DateTime\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"source\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n];\n\nexport const getWorkflows = async () => {\n loadIndex();\n\n const registry = getMooseInternal();\n return registry.workflows;\n};\n\nfunction findTaskInTree(\n task: Task<any, any>,\n targetName: string,\n): Task<any, any> | undefined {\n if (task.name === targetName) {\n return task;\n }\n\n if (task.config.onComplete?.length) {\n for (const childTask of task.config.onComplete) {\n const found = findTaskInTree(childTask, targetName);\n if (found) {\n return found;\n }\n }\n }\n\n return undefined;\n}\n\nexport const getTaskForWorkflow = async (\n workflowName: string,\n taskName: string,\n): Promise<Task<any, any>> => {\n const workflows = await getWorkflows();\n const workflow = workflows.get(workflowName);\n if (!workflow) {\n throw new Error(`Workflow ${workflowName} not found`);\n }\n\n const task = findTaskInTree(\n workflow.config.startingTask as Task<any, any>,\n taskName,\n );\n if (!task) {\n throw new Error(`Task ${taskName} not found in workflow ${workflowName}`);\n }\n\n return task;\n};\n\nexport const getWebApps = async () => {\n loadIndex();\n return getMooseInternal().webApps;\n};\n","import path from \"node:path\";\nimport * as toml from \"toml\";\n\n/**\n * ClickHouse configuration from moose.config.toml\n */\nexport interface ClickHouseConfig {\n host: string;\n host_port: number;\n user: string;\n password: string;\n db_name: string;\n use_ssl?: boolean;\n native_port?: number;\n}\n\n/**\n * Redpanda/Kafka configuration from moose.config.toml\n */\nexport interface KafkaConfig {\n /** Broker connection string (e.g., \"host:port\" or comma-separated list) */\n broker: string;\n /** Message timeout in milliseconds */\n message_timeout_ms: number;\n /** Default retention period in milliseconds */\n retention_ms: number;\n /** Topic replication factor */\n replication_factor?: number;\n /** SASL username for authentication, if required */\n sasl_username?: string;\n /** SASL password for authentication, if required */\n sasl_password?: string;\n /** SASL mechanism (e.g., \"PLAIN\", \"SCRAM-SHA-256\") */\n sasl_mechanism?: string;\n /** Security protocol (e.g., \"SASL_SSL\", \"PLAINTEXT\") */\n security_protocol?: string;\n /** Optional namespace used as a prefix for topics */\n namespace?: string;\n /** Optional Confluent Schema Registry URL */\n schema_registry_url?: string;\n}\n\n/**\n * Project configuration from moose.config.toml\n */\nexport interface ProjectConfig {\n language: string;\n clickhouse_config: ClickHouseConfig;\n redpanda_config?: KafkaConfig;\n /**\n * Redpanda/Kafka configuration. Previously named `redpanda_config` in some places.\n * Prefer `kafka_config` but support both for backward compatibility.\n */\n\n kafka_config?: KafkaConfig;\n}\n\n/**\n * Error thrown when configuration cannot be found or parsed\n */\nexport class ConfigError extends Error {\n constructor(message: string) {\n super(message);\n this.name = \"ConfigError\";\n }\n}\n\n/**\n * Walks up the directory tree to find moose.config.toml\n */\nasync function findConfigFile(\n startDir: string = process.cwd(),\n): Promise<string | null> {\n const fs = await import(\"node:fs\");\n\n let currentDir = path.resolve(startDir);\n\n while (true) {\n const configPath = path.join(currentDir, \"moose.config.toml\");\n if (fs.existsSync(configPath)) {\n return configPath;\n }\n\n const parentDir = path.dirname(currentDir);\n if (parentDir === currentDir) {\n // Reached root directory\n break;\n }\n currentDir = parentDir;\n }\n\n return null;\n}\n\n/**\n * Reads and parses the project configuration from moose.config.toml\n */\nexport async function readProjectConfig(): Promise<ProjectConfig> {\n const fs = await import(\"node:fs\");\n const configPath = await findConfigFile();\n if (!configPath) {\n throw new ConfigError(\n \"moose.config.toml not found in current directory or any parent directory\",\n );\n }\n\n try {\n const configContent = fs.readFileSync(configPath, \"utf-8\");\n const config = toml.parse(configContent) as ProjectConfig;\n return config;\n } catch (error) {\n throw new ConfigError(`Failed to parse moose.config.toml: ${error}`);\n }\n}\n","import { readProjectConfig } from \"./configFile\";\n\ninterface RuntimeClickHouseConfig {\n host: string;\n port: string;\n username: string;\n password: string;\n database: string;\n useSSL: boolean;\n}\n\ninterface RuntimeKafkaConfig {\n broker: string;\n messageTimeoutMs: number;\n saslUsername?: string;\n saslPassword?: string;\n saslMechanism?: string;\n securityProtocol?: string;\n namespace?: string;\n schemaRegistryUrl?: string;\n}\n\nclass ConfigurationRegistry {\n private static instance: ConfigurationRegistry;\n private clickhouseConfig?: RuntimeClickHouseConfig;\n private kafkaConfig?: RuntimeKafkaConfig;\n\n static getInstance(): ConfigurationRegistry {\n if (!ConfigurationRegistry.instance) {\n ConfigurationRegistry.instance = new ConfigurationRegistry();\n }\n return ConfigurationRegistry.instance;\n }\n\n setClickHouseConfig(config: RuntimeClickHouseConfig): void {\n this.clickhouseConfig = config;\n }\n\n setKafkaConfig(config: RuntimeKafkaConfig): void {\n this.kafkaConfig = config;\n }\n\n private _env(name: string): string | undefined {\n const value = process.env[name];\n if (value === undefined) return undefined;\n const trimmed = value.trim();\n return trimmed.length > 0 ? trimmed : undefined;\n }\n\n private _parseBool(value: string | undefined): boolean | undefined {\n if (value === undefined) return undefined;\n switch (value.trim().toLowerCase()) {\n case \"1\":\n case \"true\":\n case \"yes\":\n case \"on\":\n return true;\n case \"0\":\n case \"false\":\n case \"no\":\n case \"off\":\n return false;\n default:\n return undefined;\n }\n }\n\n async getClickHouseConfig(): Promise<RuntimeClickHouseConfig> {\n if (this.clickhouseConfig) {\n return this.clickhouseConfig;\n }\n\n // Fallback to reading from config file for backward compatibility\n const projectConfig = await readProjectConfig();\n const envHost = this._env(\"MOOSE_CLICKHOUSE_CONFIG__HOST\");\n const envPort = this._env(\"MOOSE_CLICKHOUSE_CONFIG__HOST_PORT\");\n const envUser = this._env(\"MOOSE_CLICKHOUSE_CONFIG__USER\");\n const envPassword = this._env(\"MOOSE_CLICKHOUSE_CONFIG__PASSWORD\");\n const envDb = this._env(\"MOOSE_CLICKHOUSE_CONFIG__DB_NAME\");\n const envUseSSL = this._parseBool(\n this._env(\"MOOSE_CLICKHOUSE_CONFIG__USE_SSL\"),\n );\n\n return {\n host: envHost ?? projectConfig.clickhouse_config.host,\n port: envPort ?? projectConfig.clickhouse_config.host_port.toString(),\n username: envUser ?? projectConfig.clickhouse_config.user,\n password: envPassword ?? projectConfig.clickhouse_config.password,\n database: envDb ?? projectConfig.clickhouse_config.db_name,\n useSSL:\n envUseSSL !== undefined ? envUseSSL : (\n projectConfig.clickhouse_config.use_ssl || false\n ),\n };\n }\n\n async getStandaloneClickhouseConfig(\n overrides?: Partial<RuntimeClickHouseConfig>,\n ): Promise<RuntimeClickHouseConfig> {\n if (this.clickhouseConfig) {\n return { ...this.clickhouseConfig, ...overrides };\n }\n\n const envHost = this._env(\"MOOSE_CLICKHOUSE_CONFIG__HOST\");\n const envPort = this._env(\"MOOSE_CLICKHOUSE_CONFIG__HOST_PORT\");\n const envUser = this._env(\"MOOSE_CLICKHOUSE_CONFIG__USER\");\n const envPassword = this._env(\"MOOSE_CLICKHOUSE_CONFIG__PASSWORD\");\n const envDb = this._env(\"MOOSE_CLICKHOUSE_CONFIG__DB_NAME\");\n const envUseSSL = this._parseBool(\n this._env(\"MOOSE_CLICKHOUSE_CONFIG__USE_SSL\"),\n );\n\n let projectConfig;\n try {\n projectConfig = await readProjectConfig();\n } catch (error) {\n projectConfig = null;\n }\n\n const defaults = {\n host: \"localhost\",\n port: \"18123\",\n username: \"default\",\n password: \"\",\n database: \"local\",\n useSSL: false,\n };\n\n return {\n host:\n overrides?.host ??\n envHost ??\n projectConfig?.clickhouse_config.host ??\n defaults.host,\n port:\n overrides?.port ??\n envPort ??\n projectConfig?.clickhouse_config.host_port.toString() ??\n defaults.port,\n username:\n overrides?.username ??\n envUser ??\n projectConfig?.clickhouse_config.user ??\n defaults.username,\n password:\n overrides?.password ??\n envPassword ??\n projectConfig?.clickhouse_config.password ??\n defaults.password,\n database:\n overrides?.database ??\n envDb ??\n projectConfig?.clickhouse_config.db_name ??\n defaults.database,\n useSSL:\n overrides?.useSSL ??\n envUseSSL ??\n projectConfig?.clickhouse_config.use_ssl ??\n defaults.useSSL,\n };\n }\n\n async getKafkaConfig(): Promise<RuntimeKafkaConfig> {\n if (this.kafkaConfig) {\n return this.kafkaConfig;\n }\n\n const projectConfig = await readProjectConfig();\n\n const envBroker =\n this._env(\"MOOSE_REDPANDA_CONFIG__BROKER\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__BROKER\");\n const envMsgTimeout =\n this._env(\"MOOSE_REDPANDA_CONFIG__MESSAGE_TIMEOUT_MS\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__MESSAGE_TIMEOUT_MS\");\n const envSaslUsername =\n this._env(\"MOOSE_REDPANDA_CONFIG__SASL_USERNAME\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SASL_USERNAME\");\n const envSaslPassword =\n this._env(\"MOOSE_REDPANDA_CONFIG__SASL_PASSWORD\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SASL_PASSWORD\");\n const envSaslMechanism =\n this._env(\"MOOSE_REDPANDA_CONFIG__SASL_MECHANISM\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SASL_MECHANISM\");\n const envSecurityProtocol =\n this._env(\"MOOSE_REDPANDA_CONFIG__SECURITY_PROTOCOL\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SECURITY_PROTOCOL\");\n const envNamespace =\n this._env(\"MOOSE_REDPANDA_CONFIG__NAMESPACE\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__NAMESPACE\");\n const envSchemaRegistryUrl =\n this._env(\"MOOSE_REDPANDA_CONFIG__SCHEMA_REGISTRY_URL\") ??\n this._env(\"MOOSE_KAFKA_CONFIG__SCHEMA_REGISTRY_URL\");\n\n const fileKafka =\n projectConfig.kafka_config ?? projectConfig.redpanda_config;\n\n return {\n broker: envBroker ?? fileKafka?.broker ?? \"localhost:19092\",\n messageTimeoutMs:\n envMsgTimeout ?\n parseInt(envMsgTimeout, 10)\n : (fileKafka?.message_timeout_ms ?? 1000),\n saslUsername: envSaslUsername ?? fileKafka?.sasl_username,\n saslPassword: envSaslPassword ?? fileKafka?.sasl_password,\n saslMechanism: envSaslMechanism ?? fileKafka?.sasl_mechanism,\n securityProtocol: envSecurityProtocol ?? fileKafka?.security_protocol,\n namespace: envNamespace ?? fileKafka?.namespace,\n schemaRegistryUrl: envSchemaRegistryUrl ?? fileKafka?.schema_registry_url,\n };\n }\n\n hasRuntimeConfig(): boolean {\n return !!this.clickhouseConfig || !!this.kafkaConfig;\n }\n}\n\n(globalThis as any)._mooseConfigRegistry = ConfigurationRegistry.getInstance();\nexport type {\n ConfigurationRegistry,\n RuntimeClickHouseConfig,\n RuntimeKafkaConfig,\n};\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase, TypiaValidators } from \"../typedBase\";\nimport {\n Column,\n isArrayNestedType,\n isNestedType,\n} from \"../../dataModels/dataModelTypes\";\nimport { ClickHouseEngines } from \"../../blocks/helpers\";\nimport { getMooseInternal, isClientOnlyMode } from \"../internal\";\nimport { Readable } from \"node:stream\";\nimport { createHash } from \"node:crypto\";\nimport type {\n ConfigurationRegistry,\n RuntimeClickHouseConfig,\n} from \"../../config/runtime\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport { IdentifierBrandedString, quoteIdentifier } from \"../../sqlHelpers\";\nimport type { NodeClickHouseClient } from \"@clickhouse/client/dist/client\";\n\nexport interface TableIndex {\n name: string;\n expression: string;\n type: string;\n arguments?: string[];\n granularity?: number;\n}\n\n/**\n * Defines a ClickHouse projection for optimizing specific query patterns.\n * Projections duplicate data with different sort orders or pre-computed aggregations.\n *\n * ClickHouse rules:\n * - Non-aggregate projections: Must have ORDER BY\n * - Aggregate projections (with GROUP BY): Cannot have ORDER BY (ordering is implicit from GROUP BY)\n */\nexport type TableProjection<T = any> = {\n /** Unique name for the projection */\n name: string;\n} & (\n | {\n /** List of column names to include in non-aggregate projection */\n select: (keyof T & string)[];\n /** Columns to order by (required for non-aggregate projections) */\n orderBy: (keyof T & string)[];\n groupBy?: never;\n }\n | {\n /** List of column names to include in aggregate projection */\n select: (keyof T & string)[];\n /** GROUP BY columns (for aggregate projections). ORDER BY is implicit from this. */\n groupBy: (keyof T & string)[];\n orderBy?: never;\n }\n | {\n /** SQL expression for SELECT clause (non-aggregate) */\n select: string;\n /** SQL expression for ORDER BY clause (required for non-aggregate projections) */\n orderBy: string;\n groupBy?: never;\n }\n | {\n /** SQL expression for SELECT clause (aggregate) */\n select: string;\n /** SQL expression for GROUP BY clause (for aggregate projections). ORDER BY is implicit from this. */\n groupBy: string;\n orderBy?: never;\n }\n);\n\n/**\n * Represents a failed record during insertion with error details\n */\nexport interface FailedRecord<T> {\n /** The original record that failed to insert */\n record: T;\n /** The error message describing why the insertion failed */\n error: string;\n /** Optional: The index of this record in the original batch */\n index?: number;\n}\n\n/**\n * Result of an insert operation with detailed success/failure information\n */\nexport interface InsertResult<T> {\n /** Number of records successfully inserted */\n successful: number;\n /** Number of records that failed to insert */\n failed: number;\n /** Total number of records processed */\n total: number;\n /** Detailed information about failed records (if record isolation was used) */\n failedRecords?: FailedRecord<T>[];\n}\n\n/**\n * Error handling strategy for insert operations\n */\nexport type ErrorStrategy =\n | \"fail-fast\" // Fail immediately on any error (default)\n | \"discard\" // Discard bad records and continue with good ones\n | \"isolate\"; // Retry individual records to isolate failures\n\n/**\n * Options for insert operations\n */\nexport interface InsertOptions {\n /** Maximum number of bad records to tolerate before failing */\n allowErrors?: number;\n /** Maximum ratio of bad records to tolerate (0.0 to 1.0) before failing */\n allowErrorsRatio?: number;\n /** Error handling strategy */\n strategy?: ErrorStrategy;\n /** Whether to enable dead letter queue for failed records (future feature) */\n deadLetterQueue?: boolean;\n /** Whether to validate data against schema before insertion (default: true) */\n validate?: boolean;\n /** Whether to skip validation for individual records during 'isolate' strategy retries (default: false) */\n skipValidationOnRetry?: boolean;\n}\n\n/**\n * Validation result for a record with detailed error information\n */\nexport interface ValidationError {\n /** The original record that failed validation */\n record: any;\n /** Detailed validation error message */\n error: string;\n /** Optional: The index of this record in the original batch */\n index?: number;\n /** The path to the field that failed validation */\n path?: string;\n}\n\n/**\n * Result of data validation with success/failure breakdown\n */\nexport interface ValidationResult<T> {\n /** Records that passed validation */\n valid: T[];\n /** Records that failed validation with detailed error information */\n invalid: ValidationError[];\n /** Total number of records processed */\n total: number;\n}\n\n/**\n * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING\n * Note: Since ClickHouse 24.7, settings no longer require the 's3queue_' prefix\n */\nexport interface S3QueueTableSettings {\n /** Processing mode: \"ordered\" for sequential or \"unordered\" for parallel processing */\n mode?: \"ordered\" | \"unordered\";\n /** What to do with files after processing: 'keep' or 'delete' */\n after_processing?: \"keep\" | \"delete\";\n /** ZooKeeper/Keeper path for coordination between replicas */\n keeper_path?: string;\n /** Number of retry attempts for failed files */\n loading_retries?: string;\n /** Number of threads for parallel processing */\n processing_threads_num?: string;\n /** Enable parallel inserts */\n parallel_inserts?: string;\n /** Enable logging to system.s3queue_log table */\n enable_logging_to_queue_log?: string;\n /** Last processed file path (for ordered mode) */\n last_processed_path?: string;\n /** Maximum number of tracked files in ZooKeeper */\n tracked_files_limit?: string;\n /** TTL for tracked files in seconds */\n tracked_file_ttl_sec?: string;\n /** Minimum polling timeout in milliseconds */\n polling_min_timeout_ms?: string;\n /** Maximum polling timeout in milliseconds */\n polling_max_timeout_ms?: string;\n /** Polling backoff in milliseconds */\n polling_backoff_ms?: string;\n /** Minimum cleanup interval in milliseconds */\n cleanup_interval_min_ms?: string;\n /** Maximum cleanup interval in milliseconds */\n cleanup_interval_max_ms?: string;\n /** Number of buckets for sharding (0 = disabled) */\n buckets?: string;\n /** Batch size for listing objects */\n list_objects_batch_size?: string;\n /** Enable hash ring filtering for distributed processing */\n enable_hash_ring_filtering?: string;\n /** Maximum files to process before committing */\n max_processed_files_before_commit?: string;\n /** Maximum rows to process before committing */\n max_processed_rows_before_commit?: string;\n /** Maximum bytes to process before committing */\n max_processed_bytes_before_commit?: string;\n /** Maximum processing time in seconds before committing */\n max_processing_time_sec_before_commit?: string;\n /** Use persistent processing nodes (available from 25.8) */\n use_persistent_processing_nodes?: string;\n /** TTL for persistent processing nodes in seconds */\n persistent_processing_nodes_ttl_seconds?: string;\n /** Additional settings */\n [key: string]: string | undefined;\n}\n\n/**\n * Base configuration shared by all table engines\n * @template T The data type of the records stored in the table.\n */\n\nexport type BaseOlapConfig<T> = (\n | {\n /**\n * Specifies the fields to use for ordering data within the ClickHouse table.\n * This is crucial for optimizing query performance.\n */\n orderByFields: (keyof T & string)[];\n orderByExpression?: undefined;\n }\n | {\n orderByFields?: undefined;\n /**\n * An arbitrary ClickHouse SQL expression for the order by clause.\n *\n * `orderByExpression: \"(id, name)\"` is equivalent to `orderByFields: [\"id\", \"name\"]`\n * `orderByExpression: \"tuple()\"` means no sorting\n */\n orderByExpression: string;\n }\n // specify either or leave both unspecified\n | { orderByFields?: undefined; orderByExpression?: undefined }\n) & {\n partitionBy?: string;\n /**\n * SAMPLE BY expression for approximate query processing.\n *\n * Examples:\n * ```typescript\n * // Single unsigned integer field\n * sampleByExpression: \"userId\"\n *\n * // Hash function on any field type\n * sampleByExpression: \"cityHash64(id)\"\n *\n * // Multiple fields with hash\n * sampleByExpression: \"cityHash64(userId, timestamp)\"\n * ```\n *\n * Requirements:\n * - Expression must evaluate to an unsigned integer (UInt8/16/32/64)\n * - Expression must be present in the ORDER BY clause\n * - If using hash functions, the same expression must appear in orderByExpression\n */\n sampleByExpression?: string;\n /**\n * Optional PRIMARY KEY expression.\n * When specified, this overrides the primary key inferred from Key<T> column annotations.\n *\n * This allows for:\n * - Complex primary keys using functions (e.g., \"cityHash64(id)\")\n * - Different column ordering in primary key vs schema definition\n * - Primary keys that differ from ORDER BY\n *\n * Example: primaryKeyExpression: \"(userId, cityHash64(eventId))\"\n *\n * Note: When this is set, any Key<T> annotations on columns are ignored for PRIMARY KEY generation.\n */\n primaryKeyExpression?: string;\n version?: string;\n lifeCycle?: LifeCycle;\n settings?: { [key: string]: string };\n /**\n * Optional TTL configuration for the table.\n * e.g., \"TTL timestamp + INTERVAL 90 DAY DELETE\"\n *\n * Use the {@link ClickHouseTTL} type to configure column level TTL\n */\n ttl?: string;\n /** Optional secondary/data-skipping indexes */\n indexes?: TableIndex[];\n /**\n * Optional projections for optimizing specific query patterns.\n * Projections store duplicate data with different sort orders or pre-computed aggregations.\n */\n projections?: TableProjection<T>[];\n /**\n * Optional database name for multi-database support.\n * When not specified, uses the global ClickHouse config database.\n */\n database?: string;\n /**\n * Optional cluster name for ON CLUSTER support.\n * Use this to enable replicated tables across ClickHouse clusters.\n * The cluster must be defined in config.toml (dev environment only).\n * Example: cluster: \"prod_cluster\"\n */\n cluster?: string;\n};\n\n/**\n * Configuration for MergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type MergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.MergeTree;\n};\n\n/**\n * Configuration for ReplacingMergeTree engine (deduplication)\n * @template T The data type of the records stored in the table.\n */\nexport type ReplacingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.ReplacingMergeTree;\n ver?: keyof T & string; // Optional version column\n isDeleted?: keyof T & string; // Optional is_deleted column\n};\n\n/**\n * Configuration for AggregatingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type AggregatingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.AggregatingMergeTree;\n};\n\n/**\n * Configuration for SummingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type SummingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.SummingMergeTree;\n columns?: string[];\n};\n\n/**\n * Configuration for CollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type CollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.CollapsingMergeTree;\n sign: keyof T & string; // Sign column (1 = state, -1 = cancel)\n};\n\n/**\n * Configuration for VersionedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type VersionedCollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.VersionedCollapsingMergeTree;\n sign: keyof T & string; // Sign column (1 = state, -1 = cancel)\n ver: keyof T & string; // Version column for ordering state changes\n};\n\ninterface ReplicatedEngineProperties {\n keeperPath?: string;\n replicaName?: string;\n}\n\n/**\n * Configuration for ReplicatedMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedMergeTreeConfig<T> = Omit<MergeTreeConfig<T>, \"engine\"> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedMergeTree;\n };\n\n/**\n * Configuration for ReplicatedReplacingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedReplacingMergeTreeConfig<T> = Omit<\n ReplacingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedReplacingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedAggregatingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedAggregatingMergeTreeConfig<T> = Omit<\n AggregatingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedAggregatingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedSummingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedSummingMergeTreeConfig<T> = Omit<\n SummingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedSummingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedCollapsingMergeTreeConfig<T> = Omit<\n CollapsingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedCollapsingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedVersionedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedVersionedCollapsingMergeTreeConfig<T> = Omit<\n VersionedCollapsingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree;\n };\n\n/**\n * Configuration for S3Queue engine - only non-alterable constructor parameters.\n * S3Queue-specific settings like 'mode', 'keeper_path', etc. should be specified\n * in the settings field, not here.\n * @template T The data type of the records stored in the table.\n */\nexport type S3QueueConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"settings\" | \"orderByFields\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.S3Queue;\n /** S3 bucket path with wildcards (e.g., 's3://bucket/data/*.json') */\n s3Path: string;\n /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */\n format: string;\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key */\n awsSecretAccessKey?: string;\n /** Compression type (e.g., 'gzip', 'zstd') */\n compression?: string;\n /** Custom HTTP headers */\n headers?: { [key: string]: string };\n /**\n * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING.\n * These settings control the behavior of the S3Queue engine.\n */\n settings?: S3QueueTableSettings;\n};\n\n/**\n * Configuration for S3 engine\n * Note: S3 engine supports ORDER BY clause, unlike S3Queue, Buffer, and Distributed engines\n * @template T The data type of the records stored in the table.\n */\nexport type S3Config<T> = Omit<BaseOlapConfig<T>, \"sampleByExpression\"> & {\n engine: ClickHouseEngines.S3;\n /** S3 path (e.g., 's3://bucket/path/file.json') */\n path: string;\n /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */\n format: string;\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key */\n awsSecretAccessKey?: string;\n /** Compression type (e.g., 'gzip', 'zstd', 'auto') */\n compression?: string;\n /** Partition strategy (optional) */\n partitionStrategy?: string;\n /** Partition columns in data file (optional) */\n partitionColumnsInDataFile?: string;\n};\n\n/**\n * Configuration for Buffer engine\n * @template T The data type of the records stored in the table.\n */\nexport type BufferConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Buffer;\n /** Target database name for the destination table */\n targetDatabase: string;\n /** Target table name where data will be flushed */\n targetTable: string;\n /** Number of buffer layers (typically 16) */\n numLayers: number;\n /** Minimum time in seconds before flushing */\n minTime: number;\n /** Maximum time in seconds before flushing */\n maxTime: number;\n /** Minimum number of rows before flushing */\n minRows: number;\n /** Maximum number of rows before flushing */\n maxRows: number;\n /** Minimum bytes before flushing */\n minBytes: number;\n /** Maximum bytes before flushing */\n maxBytes: number;\n /** Optional: Flush time in seconds */\n flushTime?: number;\n /** Optional: Flush number of rows */\n flushRows?: number;\n /** Optional: Flush number of bytes */\n flushBytes?: number;\n};\n\n/**\n * Configuration for Distributed engine\n * @template T The data type of the records stored in the table.\n */\nexport type DistributedConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Distributed;\n /** Cluster name from the ClickHouse configuration */\n cluster: string;\n /** Database name on the cluster */\n targetDatabase: string;\n /** Table name on the cluster */\n targetTable: string;\n /** Optional: Sharding key expression for data distribution */\n shardingKey?: string;\n /** Optional: Policy name for data distribution */\n policyName?: string;\n};\n\n/** Kafka table settings. See: https://clickhouse.com/docs/engines/table-engines/integrations/kafka */\nexport interface KafkaTableSettings {\n kafka_security_protocol?: \"PLAINTEXT\" | \"SSL\" | \"SASL_PLAINTEXT\" | \"SASL_SSL\";\n kafka_sasl_mechanism?:\n | \"GSSAPI\"\n | \"PLAIN\"\n | \"SCRAM-SHA-256\"\n | \"SCRAM-SHA-512\"\n | \"OAUTHBEARER\";\n kafka_sasl_username?: string;\n kafka_sasl_password?: string;\n kafka_schema?: string;\n kafka_num_consumers?: string;\n kafka_max_block_size?: string;\n kafka_skip_broken_messages?: string;\n kafka_commit_every_batch?: string;\n kafka_client_id?: string;\n kafka_poll_timeout_ms?: string;\n kafka_poll_max_batch_size?: string;\n kafka_flush_interval_ms?: string;\n kafka_consumer_reschedule_ms?: string;\n kafka_thread_per_consumer?: string;\n kafka_handle_error_mode?: \"default\" | \"stream\";\n kafka_commit_on_select?: string;\n kafka_max_rows_per_message?: string;\n kafka_compression_codec?: string;\n kafka_compression_level?: string;\n}\n\n/** Kafka engine for streaming data from Kafka topics. Additional settings go in `settings`. */\nexport type KafkaConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Kafka;\n brokerList: string;\n topicList: string;\n groupName: string;\n format: string;\n settings?: KafkaTableSettings;\n};\n\n/**\n * Configuration for IcebergS3 engine - read-only Iceberg table access\n *\n * Provides direct querying of Apache Iceberg tables stored on S3.\n * Data is not copied; queries stream directly from Parquet/ORC files.\n *\n * @template T The data type of the records stored in the table.\n *\n * @example\n * ```typescript\n * const lakeEvents = new OlapTable<Event>(\"lake_events\", {\n * engine: ClickHouseEngines.IcebergS3,\n * path: \"s3://datalake/events/\",\n * format: \"Parquet\",\n * awsAccessKeyId: mooseRuntimeEnv.get(\"AWS_ACCESS_KEY_ID\"),\n * awsSecretAccessKey: mooseRuntimeEnv.get(\"AWS_SECRET_ACCESS_KEY\")\n * });\n * ```\n *\n * @remarks\n * - IcebergS3 engine is read-only\n * - Does not support ORDER BY, PARTITION BY, or SAMPLE BY clauses\n * - Queries always see the latest Iceberg snapshot (with metadata cache)\n */\nexport type IcebergS3Config<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.IcebergS3;\n /** S3 path to Iceberg table root (e.g., 's3://bucket/warehouse/events/') */\n path: string;\n /** Data format - 'Parquet' or 'ORC' */\n format: \"Parquet\" | \"ORC\";\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key (optional) */\n awsSecretAccessKey?: string;\n /** Compression type (optional: 'gzip', 'zstd', 'auto') */\n compression?: string;\n};\n\n/**\n * Legacy configuration (backward compatibility) - defaults to MergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type LegacyOlapConfig<T> = BaseOlapConfig<T>;\n\ntype EngineConfig<T> =\n | MergeTreeConfig<T>\n | ReplacingMergeTreeConfig<T>\n | AggregatingMergeTreeConfig<T>\n | SummingMergeTreeConfig<T>\n | CollapsingMergeTreeConfig<T>\n | VersionedCollapsingMergeTreeConfig<T>\n | ReplicatedMergeTreeConfig<T>\n | ReplicatedReplacingMergeTreeConfig<T>\n | ReplicatedAggregatingMergeTreeConfig<T>\n | ReplicatedSummingMergeTreeConfig<T>\n | ReplicatedCollapsingMergeTreeConfig<T>\n | ReplicatedVersionedCollapsingMergeTreeConfig<T>\n | S3QueueConfig<T>\n | S3Config<T>\n | BufferConfig<T>\n | DistributedConfig<T>\n | IcebergS3Config<T>\n | KafkaConfig<T>;\n\n/**\n * Union of all engine-specific configurations (new API)\n * @template T The data type of the records stored in the table.\n */\nexport type OlapConfig<T> = EngineConfig<T> | LegacyOlapConfig<T>;\n\n/**\n * Represents an OLAP (Online Analytical Processing) table, typically corresponding to a ClickHouse table.\n * Provides a typed interface for interacting with the table.\n *\n * @template T The data type of the records stored in the table. The structure of T defines the table schema.\n */\nexport class OlapTable<T> extends TypedBase<T, OlapConfig<T>> {\n name: IdentifierBrandedString;\n\n /** @internal */\n public readonly kind = \"OlapTable\";\n\n /** @internal Memoized ClickHouse client for reusing connections across insert calls */\n private _memoizedClient?: any;\n /** @internal Hash of the configuration used to create the memoized client */\n private _configHash?: string;\n /** @internal Cached table name to avoid repeated generation */\n private _cachedTableName?: string;\n\n /**\n * Creates a new OlapTable instance.\n * @param name The name of the table. This name is used for the underlying ClickHouse table.\n * @param config Optional configuration for the OLAP table.\n */\n constructor(name: string, config?: OlapConfig<T>);\n\n /** @internal **/\n constructor(\n name: string,\n config: OlapConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators?: TypiaValidators<T>,\n );\n\n constructor(\n name: string,\n config?: OlapConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: TypiaValidators<T>,\n ) {\n // Handle legacy configuration by defaulting to MergeTree when no engine is specified\n const resolvedConfig =\n config ?\n \"engine\" in config ?\n config\n : { ...config, engine: ClickHouseEngines.MergeTree }\n : { engine: ClickHouseEngines.MergeTree };\n\n // Enforce mutual exclusivity at runtime as well\n const hasFields =\n Array.isArray((resolvedConfig as any).orderByFields) &&\n (resolvedConfig as any).orderByFields.length > 0;\n const hasExpr =\n typeof (resolvedConfig as any).orderByExpression === \"string\" &&\n (resolvedConfig as any).orderByExpression.length > 0;\n if (hasFields && hasExpr) {\n throw new Error(\n `OlapTable ${name}: Provide either orderByFields or orderByExpression, not both.`,\n );\n }\n\n // Validate cluster and explicit replication params are not both specified\n const hasCluster = typeof (resolvedConfig as any).cluster === \"string\";\n const hasKeeperPath =\n typeof (resolvedConfig as any).keeperPath === \"string\";\n const hasReplicaName =\n typeof (resolvedConfig as any).replicaName === \"string\";\n\n if (hasCluster && (hasKeeperPath || hasReplicaName)) {\n throw new Error(\n `OlapTable ${name}: Cannot specify both 'cluster' and explicit replication params ('keeperPath' or 'replicaName'). ` +\n `Use 'cluster' for auto-injected params, or use explicit 'keeperPath' and 'replicaName' without 'cluster'.`,\n );\n }\n\n super(name, resolvedConfig, schema, columns, validators);\n this.name = name;\n\n const tables = getMooseInternal().tables;\n const registryKey =\n this.config.version ? `${name}_${this.config.version}` : name;\n // In client-only mode (MOOSE_CLIENT_ONLY=true), allow duplicate registrations\n // to support Next.js HMR which re-executes modules without clearing the registry\n if (!isClientOnlyMode() && tables.has(registryKey)) {\n throw new Error(\n `OlapTable with name ${name} and version ${config?.version ?? \"unversioned\"} already exists`,\n );\n }\n tables.set(registryKey, this);\n }\n\n /**\n * Generates the versioned table name following Moose's naming convention\n * Format: {tableName}_{version_with_dots_replaced_by_underscores}\n */\n private generateTableName(): string {\n // Cache the table name since version rarely changes\n if (this._cachedTableName) {\n return this._cachedTableName;\n }\n\n const tableVersion = this.config.version;\n if (!tableVersion) {\n this._cachedTableName = this.name;\n } else {\n const versionSuffix = tableVersion.replace(/\\./g, \"_\");\n this._cachedTableName = `${this.name}_${versionSuffix}`;\n }\n\n return this._cachedTableName;\n }\n\n /**\n * Creates a fast hash of the ClickHouse configuration.\n * Uses crypto.createHash for better performance than JSON.stringify.\n *\n * @private\n */\n private createConfigHash(clickhouseConfig: any): string {\n // Use per-table database if specified, otherwise fall back to global config\n const effectiveDatabase = this.config.database ?? clickhouseConfig.database;\n const configString = `${clickhouseConfig.host}:${clickhouseConfig.port}:${clickhouseConfig.username}:${clickhouseConfig.password}:${effectiveDatabase}:${clickhouseConfig.useSSL}`;\n return createHash(\"sha256\")\n .update(configString)\n .digest(\"hex\")\n .substring(0, 16);\n }\n\n /**\n * Gets or creates a memoized ClickHouse client.\n * The client is cached and reused across multiple insert calls for better performance.\n * If the configuration changes, a new client will be created.\n *\n * @private\n */\n private async getMemoizedClient(): Promise<{\n client: NodeClickHouseClient;\n config: RuntimeClickHouseConfig;\n }> {\n await import(\"../../config/runtime\");\n const configRegistry = (globalThis as any)\n ._mooseConfigRegistry as ConfigurationRegistry;\n const { getClickhouseClient } = await import(\"../../commons\");\n\n const clickhouseConfig = await configRegistry.getClickHouseConfig();\n const currentConfigHash = this.createConfigHash(clickhouseConfig);\n\n // If we have a cached client and the config hasn't changed, reuse it\n if (this._memoizedClient && this._configHash === currentConfigHash) {\n return { client: this._memoizedClient, config: clickhouseConfig };\n }\n\n // Close existing client if config changed\n if (this._memoizedClient && this._configHash !== currentConfigHash) {\n try {\n await this._memoizedClient.close();\n } catch (error) {\n // Ignore errors when closing old client\n }\n }\n\n // Create new client with standard configuration\n // Use per-table database if specified, otherwise fall back to global config\n const effectiveDatabase = this.config.database ?? clickhouseConfig.database;\n const client = getClickhouseClient({\n username: clickhouseConfig.username,\n password: clickhouseConfig.password,\n database: effectiveDatabase,\n useSSL: clickhouseConfig.useSSL ? \"true\" : \"false\",\n host: clickhouseConfig.host,\n port: clickhouseConfig.port,\n });\n\n // Cache the new client and config hash\n this._memoizedClient = client;\n this._configHash = currentConfigHash;\n\n return { client, config: clickhouseConfig };\n }\n\n /**\n * Closes the memoized ClickHouse client if it exists.\n * This is useful for cleaning up connections when the table instance is no longer needed.\n * The client will be automatically recreated on the next insert call if needed.\n */\n async closeClient(): Promise<void> {\n if (this._memoizedClient) {\n try {\n await this._memoizedClient.close();\n } catch (error) {\n // Ignore errors when closing\n } finally {\n this._memoizedClient = undefined;\n this._configHash = undefined;\n }\n }\n }\n\n /**\n * Validates a single record using typia's comprehensive type checking.\n * This provides the most accurate validation as it uses the exact TypeScript type information.\n *\n * @param record The record to validate\n * @returns Validation result with detailed error information\n */\n validateRecord(record: unknown): {\n success: boolean;\n data?: T;\n errors?: string[];\n } {\n // Use injected typia validator if available\n if (this.validators?.validate) {\n try {\n const result = this.validators.validate(record);\n return {\n success: result.success,\n data: result.data,\n errors: result.errors?.map((err) =>\n typeof err === \"string\" ? err : JSON.stringify(err),\n ),\n };\n } catch (error) {\n return {\n success: false,\n errors: [error instanceof Error ? error.message : String(error)],\n };\n }\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Type guard function using typia's is() function.\n * Provides compile-time type narrowing for TypeScript.\n *\n * @param record The record to check\n * @returns True if record matches type T, with type narrowing\n */\n isValidRecord(record: unknown): record is T {\n if (this.validators?.is) {\n return this.validators.is(record);\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Assert that a record matches type T, throwing detailed errors if not.\n * Uses typia's assert() function for the most detailed error reporting.\n *\n * @param record The record to assert\n * @returns The validated and typed record\n * @throws Detailed validation error if record doesn't match type T\n */\n assertValidRecord(record: unknown): T {\n if (this.validators?.assert) {\n return this.validators.assert(record);\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Validates an array of records with comprehensive error reporting.\n * Uses the most appropriate validation method available (typia or basic).\n *\n * @param data Array of records to validate\n * @returns Detailed validation results\n */\n async validateRecords(data: unknown[]): Promise<ValidationResult<T>> {\n const valid: T[] = [];\n const invalid: ValidationError[] = [];\n\n // Pre-allocate arrays with estimated sizes to reduce reallocations\n valid.length = 0;\n invalid.length = 0;\n\n // Use for loop instead of forEach for better performance\n const dataLength = data.length;\n for (let i = 0; i < dataLength; i++) {\n const record = data[i];\n\n try {\n // Fast path: use typia's is() function first for type checking\n if (this.isValidRecord(record)) {\n valid.push(this.mapToClickhouseRecord(record));\n } else {\n // Only use expensive validateRecord for detailed errors when needed\n const result = this.validateRecord(record);\n if (result.success) {\n valid.push(this.mapToClickhouseRecord(record));\n } else {\n invalid.push({\n record,\n error: result.errors?.join(\", \") || \"Validation failed\",\n index: i,\n path: \"root\",\n });\n }\n }\n } catch (error) {\n invalid.push({\n record,\n error: error instanceof Error ? error.message : String(error),\n index: i,\n path: \"root\",\n });\n }\n }\n\n return {\n valid,\n invalid,\n total: dataLength,\n };\n }\n\n /**\n * Optimized batch retry that minimizes individual insert operations.\n * Groups records into smaller batches to reduce round trips while still isolating failures.\n *\n * @private\n */\n private async retryIndividualRecords(\n client: any,\n tableName: string,\n records: T[],\n ): Promise<{ successful: T[]; failed: FailedRecord<T>[] }> {\n const successful: T[] = [];\n const failed: FailedRecord<T>[] = [];\n\n // Instead of individual inserts, try smaller batches first (batches of 10)\n const RETRY_BATCH_SIZE = 10;\n const totalRecords = records.length;\n\n for (let i = 0; i < totalRecords; i += RETRY_BATCH_SIZE) {\n const batchEnd = Math.min(i + RETRY_BATCH_SIZE, totalRecords);\n const batch = records.slice(i, batchEnd);\n\n try {\n await client.insert({\n table: quoteIdentifier(tableName),\n values: batch,\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n // Add performance settings for retries\n max_insert_block_size: RETRY_BATCH_SIZE,\n max_block_size: RETRY_BATCH_SIZE,\n },\n });\n successful.push(...batch);\n } catch (batchError) {\n // If small batch fails, fall back to individual records\n for (let j = 0; j < batch.length; j++) {\n const record = batch[j];\n try {\n await client.insert({\n table: quoteIdentifier(tableName),\n values: [record],\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n },\n });\n successful.push(record);\n } catch (error) {\n failed.push({\n record,\n error: error instanceof Error ? error.message : String(error),\n index: i + j,\n });\n }\n }\n }\n }\n\n return { successful, failed };\n }\n\n /**\n * Validates input parameters and strategy compatibility\n * @private\n */\n private validateInsertParameters(\n data: T[] | Readable,\n options?: InsertOptions,\n ): { isStream: boolean; strategy: string; shouldValidate: boolean } {\n const isStream = data instanceof Readable;\n const strategy = options?.strategy || \"fail-fast\";\n const shouldValidate = options?.validate !== false;\n\n // Validate strategy compatibility with streams\n if (isStream && strategy === \"isolate\") {\n throw new Error(\n \"The 'isolate' error strategy is not supported with stream input. Use 'fail-fast' or 'discard' instead.\",\n );\n }\n\n // Validate that validation is not attempted on streams\n if (isStream && shouldValidate) {\n console.warn(\n \"Validation is not supported with stream input. Validation will be skipped.\",\n );\n }\n\n return { isStream, strategy, shouldValidate };\n }\n\n /**\n * Handles early return cases for empty data\n * @private\n */\n private handleEmptyData(\n data: T[] | Readable,\n isStream: boolean,\n ): InsertResult<T> | null {\n if (isStream && !data) {\n return {\n successful: 0,\n failed: 0,\n total: 0,\n };\n }\n\n if (!isStream && (!data || (data as T[]).length === 0)) {\n return {\n successful: 0,\n failed: 0,\n total: 0,\n };\n }\n\n return null;\n }\n\n /**\n * Performs pre-insertion validation for array data\n * @private\n */\n private async performPreInsertionValidation(\n data: T[],\n shouldValidate: boolean,\n strategy: string,\n options?: InsertOptions,\n ): Promise<{ validatedData: T[]; validationErrors: ValidationError[] }> {\n if (!shouldValidate) {\n return { validatedData: data, validationErrors: [] };\n }\n\n try {\n const validationResult = await this.validateRecords(data as unknown[]);\n const validatedData = validationResult.valid;\n const validationErrors = validationResult.invalid;\n\n if (validationErrors.length > 0) {\n this.handleValidationErrors(validationErrors, strategy, data, options);\n\n // Return appropriate data based on strategy\n switch (strategy) {\n case \"discard\":\n return { validatedData, validationErrors };\n case \"isolate\":\n return { validatedData: data, validationErrors };\n default:\n return { validatedData, validationErrors };\n }\n }\n\n return { validatedData, validationErrors };\n } catch (validationError) {\n if (strategy === \"fail-fast\") {\n throw validationError;\n }\n console.warn(\"Validation error:\", validationError);\n return { validatedData: data, validationErrors: [] };\n }\n }\n\n /**\n * Handles validation errors based on the specified strategy\n * @private\n */\n private handleValidationErrors(\n validationErrors: ValidationError[],\n strategy: string,\n data: T[],\n options?: InsertOptions,\n ): void {\n switch (strategy) {\n case \"fail-fast\":\n const firstError = validationErrors[0];\n throw new Error(\n `Validation failed for record at index ${firstError.index}: ${firstError.error}`,\n );\n\n case \"discard\":\n this.checkValidationThresholds(validationErrors, data.length, options);\n break;\n\n case \"isolate\":\n // For isolate strategy, validation errors will be handled in the final result\n break;\n }\n }\n\n /**\n * Checks if validation errors exceed configured thresholds\n * @private\n */\n private checkValidationThresholds(\n validationErrors: ValidationError[],\n totalRecords: number,\n options?: InsertOptions,\n ): void {\n const validationFailedCount = validationErrors.length;\n const validationFailedRatio = validationFailedCount / totalRecords;\n\n if (\n options?.allowErrors !== undefined &&\n validationFailedCount > options.allowErrors\n ) {\n throw new Error(\n `Too many validation failures: ${validationFailedCount} > ${options.allowErrors}. Errors: ${validationErrors.map((e) => e.error).join(\", \")}`,\n );\n }\n\n if (\n options?.allowErrorsRatio !== undefined &&\n validationFailedRatio > options.allowErrorsRatio\n ) {\n throw new Error(\n `Validation failure ratio too high: ${validationFailedRatio.toFixed(3)} > ${options.allowErrorsRatio}. Errors: ${validationErrors.map((e) => e.error).join(\", \")}`,\n );\n }\n }\n\n /**\n * Optimized insert options preparation with better memory management\n * @private\n */\n private prepareInsertOptions(\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n isStream: boolean,\n strategy: string,\n options?: InsertOptions,\n ): any {\n const insertOptions: any = {\n table: quoteIdentifier(tableName),\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n wait_end_of_query: 1, // Ensure at least once delivery for INSERT operations\n // Performance optimizations\n max_insert_block_size:\n isStream ? 100000 : Math.min(validatedData.length, 100000),\n max_block_size: 65536,\n // Use async inserts for better performance with large datasets\n async_insert: validatedData.length > 1000 ? 1 : 0,\n wait_for_async_insert: 1, // For at least once delivery\n },\n };\n\n // Handle stream vs array input\n if (isStream) {\n insertOptions.values = data;\n } else {\n insertOptions.values = validatedData;\n }\n\n // For discard strategy, add optimized ClickHouse error tolerance settings\n if (\n strategy === \"discard\" &&\n (options?.allowErrors !== undefined ||\n options?.allowErrorsRatio !== undefined)\n ) {\n if (options.allowErrors !== undefined) {\n insertOptions.clickhouse_settings.input_format_allow_errors_num =\n options.allowErrors;\n }\n\n if (options.allowErrorsRatio !== undefined) {\n insertOptions.clickhouse_settings.input_format_allow_errors_ratio =\n options.allowErrorsRatio;\n }\n }\n\n return insertOptions;\n }\n\n /**\n * Creates success result for completed insertions\n * @private\n */\n private createSuccessResult(\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n strategy: string,\n ): InsertResult<T> {\n if (isStream) {\n return {\n successful: -1, // -1 indicates stream mode where count is unknown\n failed: 0,\n total: -1,\n };\n }\n\n const insertedCount = validatedData.length;\n const totalProcessed =\n shouldValidate ? (data as T[]).length : insertedCount;\n\n const result: InsertResult<T> = {\n successful: insertedCount,\n failed: shouldValidate ? validationErrors.length : 0,\n total: totalProcessed,\n };\n\n // Add failed records if there are validation errors and using discard strategy\n if (\n shouldValidate &&\n validationErrors.length > 0 &&\n strategy === \"discard\"\n ) {\n result.failedRecords = validationErrors.map((ve) => ({\n record: ve.record as T,\n error: `Validation error: ${ve.error}`,\n index: ve.index,\n }));\n }\n\n return result;\n }\n\n /**\n * Handles insertion errors based on the specified strategy\n * @private\n */\n private async handleInsertionError(\n batchError: any,\n strategy: string,\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n switch (strategy) {\n case \"fail-fast\":\n throw new Error(\n `Failed to insert data into table ${tableName}: ${batchError}`,\n );\n\n case \"discard\":\n throw new Error(\n `Too many errors during insert into table ${tableName}. Error threshold exceeded: ${batchError}`,\n );\n\n case \"isolate\":\n return await this.handleIsolateStrategy(\n batchError,\n tableName,\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n options,\n );\n\n default:\n throw new Error(`Unknown error strategy: ${strategy}`);\n }\n }\n\n /**\n * Handles the isolate strategy for insertion errors\n * @private\n */\n private async handleIsolateStrategy(\n batchError: any,\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n if (isStream) {\n throw new Error(\n `Isolate strategy is not supported with stream input: ${batchError}`,\n );\n }\n\n try {\n const { client } = await this.getMemoizedClient();\n const skipValidationOnRetry = options?.skipValidationOnRetry || false;\n const retryData = skipValidationOnRetry ? (data as T[]) : validatedData;\n\n const { successful, failed } = await this.retryIndividualRecords(\n client,\n tableName,\n retryData,\n );\n\n // Combine validation errors with insertion errors\n const allFailedRecords: FailedRecord<T>[] = [\n // Validation errors (if any and not skipping validation on retry)\n ...(shouldValidate && !skipValidationOnRetry ?\n validationErrors.map((ve) => ({\n record: ve.record as T,\n error: `Validation error: ${ve.error}`,\n index: ve.index,\n }))\n : []),\n // Insertion errors\n ...failed,\n ];\n\n this.checkInsertionThresholds(\n allFailedRecords,\n (data as T[]).length,\n options,\n );\n\n return {\n successful: successful.length,\n failed: allFailedRecords.length,\n total: (data as T[]).length,\n failedRecords: allFailedRecords,\n };\n } catch (isolationError) {\n throw new Error(\n `Failed to insert data into table ${tableName} during record isolation: ${isolationError}`,\n );\n }\n }\n\n /**\n * Checks if insertion errors exceed configured thresholds\n * @private\n */\n private checkInsertionThresholds(\n failedRecords: FailedRecord<T>[],\n totalRecords: number,\n options?: InsertOptions,\n ): void {\n const totalFailed = failedRecords.length;\n const failedRatio = totalFailed / totalRecords;\n\n if (\n options?.allowErrors !== undefined &&\n totalFailed > options.allowErrors\n ) {\n throw new Error(\n `Too many failed records: ${totalFailed} > ${options.allowErrors}. Failed records: ${failedRecords.map((f) => f.error).join(\", \")}`,\n );\n }\n\n if (\n options?.allowErrorsRatio !== undefined &&\n failedRatio > options.allowErrorsRatio\n ) {\n throw new Error(\n `Failed record ratio too high: ${failedRatio.toFixed(3)} > ${options.allowErrorsRatio}. Failed records: ${failedRecords.map((f) => f.error).join(\", \")}`,\n );\n }\n }\n\n /**\n * Recursively transforms a record to match ClickHouse's JSONEachRow requirements\n *\n * - For every Array(Nested(...)) field at any depth, each item is wrapped in its own array and recursively processed.\n * - For every Nested struct (not array), it recurses into the struct.\n * - This ensures compatibility with kafka_clickhouse_sync\n *\n * @param record The input record to transform (may be deeply nested)\n * @param columns The schema columns for this level (defaults to this.columnArray at the top level)\n * @returns The transformed record, ready for ClickHouse JSONEachRow insertion\n */\n private mapToClickhouseRecord(\n record: any,\n columns: Column[] = this.columnArray,\n ): any {\n const result = { ...record };\n for (const col of columns) {\n const value = record[col.name];\n const dt = col.data_type;\n\n if (isArrayNestedType(dt)) {\n // For Array(Nested(...)), wrap each item in its own array and recurse\n if (\n Array.isArray(value) &&\n (value.length === 0 || typeof value[0] === \"object\")\n ) {\n result[col.name] = value.map((item) => [\n this.mapToClickhouseRecord(item, dt.elementType.columns),\n ]);\n }\n } else if (isNestedType(dt)) {\n // For Nested struct (not array), recurse into it\n if (value && typeof value === \"object\") {\n result[col.name] = this.mapToClickhouseRecord(value, dt.columns);\n }\n }\n // All other types: leave as is for now\n }\n return result;\n }\n\n /**\n * Inserts data directly into the ClickHouse table with enhanced error handling and validation.\n * This method establishes a direct connection to ClickHouse using the project configuration\n * and inserts the provided data into the versioned table.\n *\n * PERFORMANCE OPTIMIZATIONS:\n * - Memoized client connections with fast config hashing\n * - Single-pass validation with pre-allocated arrays\n * - Batch-optimized retry strategy (batches of 10, then individual)\n * - Optimized ClickHouse settings for large datasets\n * - Reduced memory allocations and object creation\n *\n * Uses advanced typia validation when available for comprehensive type checking,\n * with fallback to basic validation for compatibility.\n *\n * The ClickHouse client is memoized and reused across multiple insert calls for better performance.\n * If the configuration changes, a new client will be automatically created.\n *\n * @param data Array of objects conforming to the table schema, or a Node.js Readable stream\n * @param options Optional configuration for error handling, validation, and insertion behavior\n * @returns Promise resolving to detailed insertion results\n * @throws {ConfigError} When configuration cannot be read or parsed\n * @throws {ClickHouseError} When insertion fails based on the error strategy\n * @throws {ValidationError} When validation fails and strategy is 'fail-fast'\n *\n * @example\n * ```typescript\n * // Create an OlapTable instance (typia validators auto-injected)\n * const userTable = new OlapTable<User>('users');\n *\n * // Insert with comprehensive typia validation\n * const result1 = await userTable.insert([\n * { id: 1, name: 'John', email: 'john@example.com' },\n * { id: 2, name: 'Jane', email: 'jane@example.com' }\n * ]);\n *\n * // Insert data with stream input (validation not available for streams)\n * const dataStream = new Readable({\n * objectMode: true,\n * read() { // Stream implementation }\n * });\n * const result2 = await userTable.insert(dataStream, { strategy: 'fail-fast' });\n *\n * // Insert with validation disabled for performance\n * const result3 = await userTable.insert(data, { validate: false });\n *\n * // Insert with error handling strategies\n * const result4 = await userTable.insert(mixedData, {\n * strategy: 'isolate',\n * allowErrorsRatio: 0.1,\n * validate: true // Use typia validation (default)\n * });\n *\n * // Optional: Clean up connection when completely done\n * await userTable.closeClient();\n * ```\n */\n async insert(\n data: T[] | Readable,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n // Validate input parameters and strategy compatibility\n const { isStream, strategy, shouldValidate } =\n this.validateInsertParameters(data, options);\n\n // Handle early return cases for empty data\n const emptyResult = this.handleEmptyData(data, isStream);\n if (emptyResult) {\n return emptyResult;\n }\n\n // Pre-insertion validation for arrays (optimized single-pass)\n let validatedData: T[] = [];\n let validationErrors: ValidationError[] = [];\n\n if (!isStream && shouldValidate) {\n const validationResult = await this.performPreInsertionValidation(\n data as T[],\n shouldValidate,\n strategy,\n options,\n );\n validatedData = validationResult.validatedData;\n validationErrors = validationResult.validationErrors;\n } else {\n // No validation or stream input\n validatedData = isStream ? [] : (data as T[]);\n }\n\n // Get memoized client and generate cached table name\n const { client } = await this.getMemoizedClient();\n const tableName = this.generateTableName();\n\n try {\n // Prepare and execute insertion with optimized settings\n const insertOptions = this.prepareInsertOptions(\n tableName,\n data,\n validatedData,\n isStream,\n strategy,\n options,\n );\n\n await client.insert(insertOptions);\n\n // Return success result\n return this.createSuccessResult(\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n strategy,\n );\n } catch (batchError) {\n // Handle insertion failure based on strategy with optimized retry\n return await this.handleInsertionError(\n batchError,\n strategy,\n tableName,\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n options,\n );\n }\n // Note: We don't close the client here since it's memoized for reuse\n // Use closeClient() method if you need to explicitly close the connection\n }\n\n // Note: Static factory methods (withS3Queue, withReplacingMergeTree, withMergeTree)\n // were removed in ENG-856. Use direct configuration instead, e.g.:\n // new OlapTable(name, { engine: ClickHouseEngines.ReplacingMergeTree, orderByFields: [\"id\"], ver: \"updated_at\" })\n}\n","/**\n * @fileoverview Stream SDK for data streaming operations in Moose.\n *\n * This module provides the core streaming functionality including:\n * - Stream creation and configuration\n * - Message transformations between streams\n * - Consumer registration for message processing\n * - Dead letter queue handling for error recovery\n *\n * @module Stream\n */\n\nimport { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport { dlqColumns, dlqSchema, getMooseInternal } from \"../internal\";\nimport { OlapTable } from \"./olapTable\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport type {\n RuntimeKafkaConfig,\n ConfigurationRegistry,\n} from \"../../config/runtime\";\nimport { createHash } from \"node:crypto\";\nimport { Logger, Producer } from \"../../commons\";\nimport { getSourceFileFromStack } from \"../utils/stackTrace\";\n\n/**\n * Represents zero, one, or many values of type T.\n * Used for flexible return types in transformations where a single input\n * can produce no output, one output, or multiple outputs.\n *\n * @template T The type of the value(s)\n * @example\n * ```typescript\n * // Can return a single value\n * const single: ZeroOrMany<string> = \"hello\";\n *\n * // Can return an array\n * const multiple: ZeroOrMany<string> = [\"hello\", \"world\"];\n *\n * // Can return null/undefined to filter out\n * const filtered: ZeroOrMany<string> = null;\n * ```\n */\nexport type ZeroOrMany<T> = T | T[] | undefined | null;\n\n/**\n * Function type for transforming records from one type to another.\n * Supports both synchronous and asynchronous transformations.\n *\n * @template T The input record type\n * @template U The output record type\n * @param record The input record to transform\n * @returns The transformed record(s), or null/undefined to filter out\n *\n * @example\n * ```typescript\n * const transform: SyncOrAsyncTransform<InputType, OutputType> = (record) => {\n * return { ...record, processed: true };\n * };\n * ```\n */\nexport type SyncOrAsyncTransform<T, U> = (\n record: T,\n) => ZeroOrMany<U> | Promise<ZeroOrMany<U>>;\n\n/**\n * Function type for consuming records without producing output.\n * Used for side effects like logging, external API calls, or database writes.\n *\n * @template T The record type to consume\n * @param record The record to process\n * @returns Promise<void> or void\n *\n * @example\n * ```typescript\n * const consumer: Consumer<UserEvent> = async (event) => {\n * await sendToAnalytics(event);\n * };\n * ```\n */\nexport type Consumer<T> = (record: T) => Promise<void> | void;\n\n/**\n * Configuration options for stream transformations.\n *\n * @template T The type of records being transformed\n */\nexport interface TransformConfig<T> {\n /**\n * Optional version identifier for this transformation.\n * Multiple transformations to the same destination can coexist with different versions.\n */\n version?: string;\n\n /**\n * Optional metadata for documentation and tracking purposes.\n */\n metadata?: { description?: string };\n\n /**\n * Optional dead letter queue for handling transformation failures.\n * Failed records will be sent to this queue for manual inspection or reprocessing.\n * Uses {@link Stream.defaultDeadLetterQueue} by default\n * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value\n */\n deadLetterQueue?: DeadLetterQueue<T> | null;\n\n /**\n * @internal Source file path where this transform was declared.\n * Automatically captured from stack trace.\n */\n sourceFile?: string;\n}\n\n/**\n * Configuration options for stream consumers.\n *\n * @template T The type of records being consumed\n */\nexport interface ConsumerConfig<T> {\n /**\n * Optional version identifier for this consumer.\n * Multiple consumers can coexist with different versions.\n */\n version?: string;\n\n /**\n * Optional dead letter queue for handling consumer failures.\n * Failed records will be sent to this queue for manual inspection or reprocessing.\n * Uses {@link Stream.defaultDeadLetterQueue} by default\n * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value\n */\n deadLetterQueue?: DeadLetterQueue<T> | null;\n\n /**\n * @internal Source file path where this consumer was declared.\n * Automatically captured from stack trace.\n */\n sourceFile?: string;\n}\n\nexport type SchemaRegistryEncoding = \"JSON\" | \"AVRO\" | \"PROTOBUF\";\n\nexport type SchemaRegistryReference =\n | { id: number }\n | { subjectLatest: string }\n | { subject: string; version: number };\n\nexport interface KafkaSchemaConfig {\n kind: SchemaRegistryEncoding;\n reference: SchemaRegistryReference;\n}\n\n/**\n * Represents a message routed to a specific destination stream.\n * Used internally by the multi-transform functionality to specify\n * where transformed messages should be sent.\n *\n * @internal\n */\nclass RoutedMessage {\n /** The destination stream for the message */\n destination: Stream<any>;\n\n /** The message value(s) to send */\n values: ZeroOrMany<any>;\n\n /**\n * Creates a new routed message.\n *\n * @param destination The target stream\n * @param values The message(s) to route\n */\n constructor(destination: Stream<any>, values: ZeroOrMany<any>) {\n this.destination = destination;\n this.values = values;\n }\n}\n\n/**\n * Configuration options for a data stream (e.g., a Redpanda topic).\n * @template T The data type of the messages in the stream.\n */\nexport interface StreamConfig<T> {\n /**\n * Specifies the number of partitions for the stream. Affects parallelism and throughput.\n */\n parallelism?: number;\n /**\n * Specifies the data retention period for the stream in seconds. Messages older than this may be deleted.\n */\n retentionPeriod?: number;\n /**\n * An optional destination OLAP table where messages from this stream should be automatically ingested.\n */\n destination?: OlapTable<T>;\n /**\n * An optional version string for this configuration. Can be used for tracking changes or managing deployments.\n */\n version?: string;\n metadata?: { description?: string };\n lifeCycle?: LifeCycle;\n\n defaultDeadLetterQueue?: DeadLetterQueue<T>;\n\n /** Optional Schema Registry configuration for this stream */\n schemaConfig?: KafkaSchemaConfig;\n}\n\n/**\n * Represents a data stream, typically corresponding to a Redpanda topic.\n * Provides a typed interface for producing to and consuming from the stream, and defining transformations.\n *\n * @template T The data type of the messages flowing through the stream. The structure of T defines the message schema.\n */\nexport class Stream<T> extends TypedBase<T, StreamConfig<T>> {\n defaultDeadLetterQueue?: DeadLetterQueue<T>;\n /** @internal Memoized KafkaJS producer for reusing connections across sends */\n private _memoizedProducer?: Producer;\n /** @internal Hash of the configuration used to create the memoized Kafka producer */\n private _kafkaConfigHash?: string;\n\n /**\n * Creates a new Stream instance.\n * @param name The name of the stream. This name is used for the underlying Redpanda topic.\n * @param config Optional configuration for the stream.\n */\n constructor(name: string, config?: StreamConfig<T>);\n\n /**\n * @internal\n * Note: `validators` parameter is a positional placeholder (always undefined for Stream).\n * It exists because TypedBase has validators as the 5th param, and we need to pass\n * allowExtraFields as the 6th param. Stream doesn't use validators.\n */\n constructor(\n name: string,\n config: StreamConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators: undefined,\n allowExtraFields: boolean,\n );\n\n constructor(\n name: string,\n config?: StreamConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: undefined,\n allowExtraFields?: boolean,\n ) {\n super(name, config ?? {}, schema, columns, undefined, allowExtraFields);\n const streams = getMooseInternal().streams;\n if (streams.has(name)) {\n throw new Error(`Stream with name ${name} already exists`);\n }\n streams.set(name, this);\n this.defaultDeadLetterQueue = this.config.defaultDeadLetterQueue;\n }\n\n /**\n * Internal map storing transformation configurations.\n * Maps destination stream names to arrays of transformation functions and their configs.\n *\n * @internal\n */\n _transformations = new Map<\n string,\n [Stream<any>, SyncOrAsyncTransform<T, any>, TransformConfig<T>][]\n >();\n\n /**\n * Internal function for multi-stream transformations.\n * Allows a single transformation to route messages to multiple destinations.\n *\n * @internal\n */\n _multipleTransformations?: (record: T) => [RoutedMessage];\n\n /**\n * Internal array storing consumer configurations.\n *\n * @internal\n */\n _consumers = new Array<{\n consumer: Consumer<T>;\n config: ConsumerConfig<T>;\n }>();\n\n /**\n * Builds the full Kafka topic name including optional namespace and version suffix.\n * Version suffix is appended as _x_y_z where dots in version are replaced with underscores.\n */\n private buildFullTopicName(namespace?: string): string {\n const versionSuffix =\n this.config.version ? `_${this.config.version.replace(/\\./g, \"_\")}` : \"\";\n const base = `${this.name}${versionSuffix}`;\n return namespace !== undefined && namespace.length > 0 ?\n `${namespace}.${base}`\n : base;\n }\n\n /**\n * Creates a fast hash string from relevant Kafka configuration fields.\n */\n private createConfigHash(kafkaConfig: RuntimeKafkaConfig): string {\n const configString = [\n kafkaConfig.broker,\n kafkaConfig.messageTimeoutMs,\n kafkaConfig.saslUsername,\n kafkaConfig.saslPassword,\n kafkaConfig.saslMechanism,\n kafkaConfig.securityProtocol,\n kafkaConfig.namespace,\n ].join(\":\");\n return createHash(\"sha256\")\n .update(configString)\n .digest(\"hex\")\n .substring(0, 16);\n }\n\n /**\n * Gets or creates a memoized KafkaJS producer using runtime configuration.\n */\n private async getMemoizedProducer(): Promise<{\n producer: Producer;\n kafkaConfig: RuntimeKafkaConfig;\n }> {\n // dynamic import to keep Stream objects browser compatible\n await import(\"../../config/runtime\");\n const configRegistry = (globalThis as any)\n ._mooseConfigRegistry as ConfigurationRegistry;\n const { getKafkaProducer } = await import(\"../../commons\");\n\n const kafkaConfig = await (configRegistry as any).getKafkaConfig();\n const currentHash = this.createConfigHash(kafkaConfig);\n\n if (this._memoizedProducer && this._kafkaConfigHash === currentHash) {\n return { producer: this._memoizedProducer, kafkaConfig };\n }\n\n // Close existing producer if config changed\n if (this._memoizedProducer && this._kafkaConfigHash !== currentHash) {\n try {\n await this._memoizedProducer.disconnect();\n } catch {\n // ignore\n }\n this._memoizedProducer = undefined;\n }\n\n const clientId = `moose-sdk-stream-${this.name}`;\n const logger: Logger = {\n logPrefix: clientId,\n log: (message: string): void => {\n console.log(`${clientId}: ${message}`);\n },\n error: (message: string): void => {\n console.error(`${clientId}: ${message}`);\n },\n warn: (message: string): void => {\n console.warn(`${clientId}: ${message}`);\n },\n };\n\n const producer = await getKafkaProducer(\n {\n clientId,\n broker: kafkaConfig.broker,\n securityProtocol: kafkaConfig.securityProtocol,\n saslUsername: kafkaConfig.saslUsername,\n saslPassword: kafkaConfig.saslPassword,\n saslMechanism: kafkaConfig.saslMechanism,\n },\n logger,\n );\n\n this._memoizedProducer = producer;\n this._kafkaConfigHash = currentHash;\n\n return { producer, kafkaConfig };\n }\n\n /**\n * Closes the memoized Kafka producer if it exists.\n */\n async closeProducer(): Promise<void> {\n if (this._memoizedProducer) {\n try {\n await this._memoizedProducer.disconnect();\n } catch {\n // ignore\n } finally {\n this._memoizedProducer = undefined;\n this._kafkaConfigHash = undefined;\n }\n }\n }\n\n /**\n * Sends one or more records to this stream's Kafka topic.\n * Values are JSON-serialized as message values.\n */\n async send(values: ZeroOrMany<T>): Promise<void> {\n // Normalize to flat array of records\n const flat: T[] =\n Array.isArray(values) ? values\n : values !== undefined && values !== null ? [values as T]\n : [];\n\n if (flat.length === 0) return;\n\n const { producer, kafkaConfig } = await this.getMemoizedProducer();\n const topic = this.buildFullTopicName(kafkaConfig.namespace);\n\n // Use Schema Registry JSON envelope if configured\n const sr = this.config.schemaConfig;\n if (sr && sr.kind === \"JSON\") {\n const schemaRegistryUrl = kafkaConfig.schemaRegistryUrl;\n if (!schemaRegistryUrl) {\n throw new Error(\"Schema Registry URL not configured\");\n }\n\n const {\n default: { SchemaRegistry },\n } = await import(\"@kafkajs/confluent-schema-registry\");\n const registry = new SchemaRegistry({ host: schemaRegistryUrl });\n\n let schemaId: undefined | number = undefined;\n\n if (\"id\" in sr.reference) {\n schemaId = sr.reference.id;\n } else if (\"subjectLatest\" in sr.reference) {\n schemaId = await registry.getLatestSchemaId(sr.reference.subjectLatest);\n } else if (\"subject\" in sr.reference) {\n schemaId = await registry.getRegistryId(\n sr.reference.subject,\n sr.reference.version,\n );\n }\n\n if (schemaId === undefined) {\n throw new Error(\"Malformed schema reference.\");\n }\n\n const encoded = await Promise.all(\n flat.map((v) =>\n registry.encode(schemaId, v as unknown as Record<string, unknown>),\n ),\n );\n await producer.send({\n topic,\n messages: encoded.map((value) => ({ value })),\n });\n return;\n } else if (sr !== undefined) {\n throw new Error(\"Currently only JSON Schema is supported.\");\n }\n\n await producer.send({\n topic,\n messages: flat.map((v) => ({ value: JSON.stringify(v) })),\n });\n }\n\n /**\n * Adds a transformation step that processes messages from this stream and sends the results to a destination stream.\n * Multiple transformations to the same destination stream can be added if they have distinct `version` identifiers in their config.\n *\n * @template U The data type of the messages in the destination stream.\n * @param destination The destination stream for the transformed messages.\n * @param transformation A function that takes a message of type T and returns zero or more messages of type U (or a Promise thereof).\n * Return `null` or `undefined` or an empty array `[]` to filter out a message. Return an array to emit multiple messages.\n * @param config Optional configuration for this specific transformation step, like a version.\n */\n addTransform<U>(\n destination: Stream<U>,\n transformation: SyncOrAsyncTransform<T, U>,\n config?: TransformConfig<T>,\n ) {\n // Capture source file from call stack at this exact moment\n const sourceFile = getSourceFileFromStack(new Error().stack);\n\n const transformConfig: TransformConfig<T> = {\n ...(config ?? {}),\n sourceFile,\n };\n if (transformConfig.deadLetterQueue === undefined) {\n transformConfig.deadLetterQueue = this.defaultDeadLetterQueue;\n }\n\n if (this._transformations.has(destination.name)) {\n const existingTransforms = this._transformations.get(destination.name)!;\n const hasVersion = existingTransforms.some(\n ([_, __, cfg]) => cfg.version === transformConfig.version,\n );\n\n if (!hasVersion) {\n existingTransforms.push([destination, transformation, transformConfig]);\n }\n } else {\n this._transformations.set(destination.name, [\n [destination, transformation, transformConfig],\n ]);\n }\n }\n\n /**\n * Adds a consumer function that processes messages from this stream.\n * Multiple consumers can be added if they have distinct `version` identifiers in their config.\n *\n * @param consumer A function that takes a message of type T and performs an action (e.g., side effect, logging). Should return void or Promise<void>.\n * @param config Optional configuration for this specific consumer, like a version.\n */\n addConsumer(consumer: Consumer<T>, config?: ConsumerConfig<T>) {\n // Capture source file from call stack at this exact moment\n const sourceFile = getSourceFileFromStack(new Error().stack);\n\n const consumerConfig: ConsumerConfig<T> = {\n ...(config ?? {}),\n sourceFile,\n };\n if (consumerConfig.deadLetterQueue === undefined) {\n consumerConfig.deadLetterQueue = this.defaultDeadLetterQueue;\n }\n const hasVersion = this._consumers.some(\n (existing) => existing.config.version === consumerConfig.version,\n );\n\n if (!hasVersion) {\n this._consumers.push({ consumer, config: consumerConfig });\n }\n }\n\n /**\n * Helper method for `addMultiTransform` to specify the destination and values for a routed message.\n * @param values The value or values to send to this stream.\n * @returns A `RoutedMessage` object associating the values with this stream.\n *\n * @example\n * ```typescript\n * sourceStream.addMultiTransform((record) => [\n * destinationStream1.routed(transformedRecord1),\n * destinationStream2.routed([record2a, record2b])\n * ]);\n * ```\n */\n routed = (values: ZeroOrMany<T>) => new RoutedMessage(this, values);\n\n /**\n * Adds a single transformation function that can route messages to multiple destination streams.\n * This is an alternative to adding multiple individual `addTransform` calls.\n * Only one multi-transform function can be added per stream.\n *\n * @param transformation A function that takes a message of type T and returns an array of `RoutedMessage` objects,\n * each specifying a destination stream and the message(s) to send to it.\n */\n addMultiTransform(transformation: (record: T) => [RoutedMessage]) {\n this._multipleTransformations = transformation;\n }\n}\n\n/**\n * Base model for dead letter queue entries.\n * Contains the original failed record along with error information.\n */\nexport interface DeadLetterModel {\n /** The original record that failed processing */\n originalRecord: Record<string, any>;\n\n /** Human-readable error message describing the failure */\n errorMessage: string;\n\n /** Classification of the error type (e.g., \"ValidationError\", \"TransformError\") */\n errorType: string;\n\n /** Timestamp when the failure occurred */\n failedAt: Date;\n\n /** The source component where the failure occurred */\n source: \"api\" | \"transform\" | \"table\";\n}\n\n/**\n * Enhanced dead letter model with type recovery functionality.\n * Extends the base model with the ability to recover the original typed record.\n *\n * @template T The original record type before failure\n */\nexport interface DeadLetter<T> extends DeadLetterModel {\n /**\n * Recovers the original record as its typed form.\n * Useful for reprocessing failed records with proper type safety.\n *\n * @returns The original record cast to type T\n */\n asTyped: () => T;\n}\n\n/**\n * Internal function to attach type guard functionality to dead letter records.\n *\n * @internal\n * @template T The original record type\n * @param dl The dead letter model to enhance\n * @param typeGuard Function to validate and cast the original record\n */\nfunction attachTypeGuard<T>(\n dl: DeadLetterModel,\n typeGuard: (input: any) => T,\n): asserts dl is DeadLetter<T> {\n (dl as any).asTyped = () => typeGuard(dl.originalRecord);\n}\n\n/**\n * Specialized stream for handling failed records (dead letters).\n * Provides type-safe access to failed records for reprocessing or analysis.\n *\n * @template T The original record type that failed processing\n *\n * @example\n * ```typescript\n * const dlq = new DeadLetterQueue<UserEvent>(\"user-events-dlq\");\n *\n * dlq.addConsumer(async (deadLetter) => {\n * const originalEvent = deadLetter.asTyped();\n * console.log(`Failed event: ${deadLetter.errorMessage}`);\n * // Potentially reprocess or alert\n * });\n * ```\n */\nexport class DeadLetterQueue<T> extends Stream<DeadLetterModel> {\n /**\n * Creates a new DeadLetterQueue instance.\n * @param name The name of the dead letter queue stream\n * @param config Optional configuration for the stream. The metadata property is always present and includes stackTrace.\n */\n constructor(name: string, config?: StreamConfig<DeadLetterModel>);\n\n /** @internal **/\n constructor(\n name: string,\n config: StreamConfig<DeadLetterModel>,\n validate: (originalRecord: any) => T,\n );\n\n constructor(\n name: string,\n config?: StreamConfig<DeadLetterModel>,\n typeGuard?: (originalRecord: any) => T,\n ) {\n if (typeGuard === undefined) {\n throw new Error(\n \"Supply the type param T so that the schema is inserted by the compiler plugin.\",\n );\n }\n\n super(name, config ?? {}, dlqSchema, dlqColumns, undefined, false);\n this.typeGuard = typeGuard;\n getMooseInternal().streams.set(name, this);\n }\n\n /**\n * Internal type guard function for validating and casting original records.\n *\n * @internal\n */\n private typeGuard: (originalRecord: any) => T;\n\n /**\n * Adds a transformation step for dead letter records.\n * The transformation function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @template U The output type for the transformation\n * @param destination The destination stream for transformed messages\n * @param transformation Function to transform dead letter records\n * @param config Optional transformation configuration\n */\n addTransform<U>(\n destination: Stream<U>,\n transformation: SyncOrAsyncTransform<DeadLetter<T>, U>,\n config?: TransformConfig<DeadLetterModel>,\n ) {\n const withValidate: SyncOrAsyncTransform<DeadLetterModel, U> = (\n deadLetter,\n ) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return transformation(deadLetter);\n };\n super.addTransform(destination, withValidate, config);\n }\n\n /**\n * Adds a consumer for dead letter records.\n * The consumer function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @param consumer Function to process dead letter records\n * @param config Optional consumer configuration\n */\n addConsumer(\n consumer: Consumer<DeadLetter<T>>,\n config?: ConsumerConfig<DeadLetterModel>,\n ) {\n const withValidate: Consumer<DeadLetterModel> = (deadLetter) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return consumer(deadLetter);\n };\n super.addConsumer(withValidate, config);\n }\n\n /**\n * Adds a multi-stream transformation for dead letter records.\n * The transformation function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @param transformation Function to route dead letter records to multiple destinations\n */\n addMultiTransform(\n transformation: (record: DeadLetter<T>) => [RoutedMessage],\n ) {\n const withValidate: (record: DeadLetterModel) => [RoutedMessage] = (\n deadLetter,\n ) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return transformation(deadLetter);\n };\n super.addMultiTransform(withValidate);\n }\n}\n","import { getMooseInternal } from \"../internal\";\n\n/**\n * Context passed to task handlers. Single param to future-proof API changes.\n *\n * - state: shared mutable state for the task and its lifecycle hooks\n * - input: optional typed input for the task (undefined when task has no input)\n */\n/**\n * Task handler context. If the task declares an input type (T != null),\n * `input` is required and strongly typed. For no-input tasks (T = null),\n * `input` is omitted/optional.\n */\nexport type TaskContext<TInput> =\n TInput extends null ? { state: any; input?: null }\n : { state: any; input: TInput };\n\n/**\n * Configuration options for defining a task within a workflow.\n *\n * @template T - The input type for the task\n * @template R - The return type for the task\n */\nexport interface TaskConfig<T, R> {\n /** The main function that executes the task logic */\n run: (context: TaskContext<T>) => Promise<R>;\n\n /**\n * Optional array of tasks to execute after this task completes successfully.\n * Supports all combinations of input types (real type or null) and output types (real type or void).\n * When this task returns void, onComplete tasks expect null as input.\n * When this task returns a real type, onComplete tasks expect that type as input.\n */\n onComplete?: (\n | Task<R extends void ? null : R, any>\n | Task<R extends void ? null : R, void>\n )[];\n\n /**\n * Optional function that is called when the task is cancelled.\n */\n /** Optional function that is called when the task is cancelled. */\n onCancel?: (context: TaskContext<T>) => Promise<void>;\n\n /** Optional timeout duration for the task execution (e.g., \"30s\", \"5m\") */\n timeout?: string;\n\n /** Optional number of retry attempts if the task fails */\n retries?: number;\n}\n\n/**\n * Represents a single task within a workflow system.\n *\n * A Task encapsulates the execution logic, completion handlers, and configuration\n * for a unit of work that can be chained with other tasks in a workflow.\n *\n * @template T - The input type that this task expects\n * @template R - The return type that this task produces\n */\nexport class Task<T, R> {\n /**\n * Creates a new Task instance.\n *\n * @param name - Unique identifier for the task\n * @param config - Configuration object defining the task behavior\n *\n * @example\n * ```typescript\n * // No input, no output\n * const task1 = new Task<null, void>(\"task1\", {\n * run: async () => {\n * console.log(\"No input/output\");\n * }\n * });\n *\n * // No input, but has output\n * const task2 = new Task<null, OutputType>(\"task2\", {\n * run: async () => {\n * return someOutput;\n * }\n * });\n *\n * // Has input, no output\n * const task3 = new Task<InputType, void>(\"task3\", {\n * run: async (input: InputType) => {\n * // process input but return nothing\n * }\n * });\n *\n * // Has both input and output\n * const task4 = new Task<InputType, OutputType>(\"task4\", {\n * run: async (input: InputType) => {\n * return process(input);\n * }\n * });\n * ```\n */\n constructor(\n readonly name: string,\n readonly config: TaskConfig<T, R>,\n ) {}\n}\n\n/**\n * Configuration options for defining a workflow.\n *\n * A workflow orchestrates the execution of multiple tasks in a defined sequence\n * or pattern, with support for scheduling, retries, and timeouts.\n */\nexport interface WorkflowConfig {\n /**\n * The initial task that begins the workflow execution.\n * Supports all combinations of input types (real type or null) and output types (real type or void):\n * - Task<null, OutputType>: No input, returns a type\n * - Task<null, void>: No input, returns nothing\n * - Task<InputType, OutputType>: Has input, returns a type\n * - Task<InputType, void>: Has input, returns nothing\n */\n startingTask:\n | Task<null, any>\n | Task<null, void>\n | Task<any, any>\n | Task<any, void>;\n\n /** Optional number of retry attempts if the entire workflow fails */\n retries?: number;\n\n /** Optional timeout duration for the entire workflow execution (e.g., \"10m\", \"1h\") */\n timeout?: string;\n\n /** Optional cron-style schedule string for automated workflow execution */\n schedule?: string;\n}\n\n/**\n * Represents a complete workflow composed of interconnected tasks.\n *\n * A Workflow manages the execution flow of multiple tasks, handling scheduling,\n * error recovery, and task orchestration. Once created, workflows are automatically\n * registered with the internal Moose system.\n *\n * @example\n * ```typescript\n * const dataProcessingWorkflow = new Workflow(\"dataProcessing\", {\n * startingTask: extractDataTask,\n * schedule: \"0 2 * * *\", // Run daily at 2 AM\n * timeout: \"1h\",\n * retries: 2\n * });\n * ```\n */\nexport class Workflow {\n /**\n * Creates a new Workflow instance and registers it with the Moose system.\n *\n * @param name - Unique identifier for the workflow\n * @param config - Configuration object defining the workflow behavior and task orchestration\n * @throws {Error} When the workflow contains null/undefined tasks or infinite loops\n */\n constructor(\n readonly name: string,\n readonly config: WorkflowConfig,\n ) {\n const workflows = getMooseInternal().workflows;\n if (workflows.has(name)) {\n throw new Error(`Workflow with name ${name} already exists`);\n }\n this.validateTaskGraph(config.startingTask, name);\n workflows.set(name, this);\n }\n\n /**\n * Validates the task graph to ensure there are no null tasks or infinite loops.\n *\n * @private\n * @param startingTask - The starting task to begin validation from\n * @param workflowName - The name of the workflow being validated (for error messages)\n * @throws {Error} When null/undefined tasks are found or infinite loops are detected\n */\n private validateTaskGraph(\n startingTask: Task<any, any> | null | undefined,\n workflowName: string,\n ): void {\n if (startingTask === null || startingTask === undefined) {\n throw new Error(\n `Workflow \"${workflowName}\" has a null or undefined starting task`,\n );\n }\n\n const visited = new Set<string>();\n const recursionStack = new Set<string>();\n\n const validateTask = (\n task: Task<any, any> | null | undefined,\n currentPath: string[],\n ): void => {\n if (task === null || task === undefined) {\n const pathStr =\n currentPath.length > 0 ? currentPath.join(\" -> \") + \" -> \" : \"\";\n throw new Error(\n `Workflow \"${workflowName}\" contains a null or undefined task in the task chain: ${pathStr}null`,\n );\n }\n\n const taskName = task.name;\n\n if (recursionStack.has(taskName)) {\n const cycleStartIndex = currentPath.indexOf(taskName);\n const cyclePath =\n cycleStartIndex >= 0 ?\n currentPath.slice(cycleStartIndex).concat(taskName)\n : currentPath.concat(taskName);\n throw new Error(\n `Workflow \"${workflowName}\" contains an infinite loop in task chain: ${cyclePath.join(\" -> \")}`,\n );\n }\n\n if (visited.has(taskName)) {\n // Already processed this task and its children\n return;\n }\n\n visited.add(taskName);\n recursionStack.add(taskName);\n\n if (task.config.onComplete) {\n for (const nextTask of task.config.onComplete) {\n validateTask(nextTask, [...currentPath, taskName]);\n }\n }\n\n recursionStack.delete(taskName);\n };\n\n validateTask(startingTask, []);\n }\n}\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport { getMooseInternal } from \"../internal\";\nimport { DeadLetterQueue, Stream } from \"./stream\";\n\n/**\n * @template T The data type of the messages expected by the destination stream.\n */\nexport interface IngestConfig<T> {\n /**\n * The destination stream where the ingested data should be sent.\n */\n destination: Stream<T>;\n\n deadLetterQueue?: DeadLetterQueue<T>;\n /**\n * An optional version string for this configuration.\n */\n version?: string;\n /**\n * An optional custom path for the ingestion endpoint.\n */\n path?: string;\n metadata?: { description?: string };\n}\n\n/**\n * Represents an Ingest API endpoint, used for sending data into a Moose system, typically writing to a Stream.\n * Provides a typed interface for the expected data format.\n *\n * @template T The data type of the records that this API endpoint accepts. The structure of T defines the expected request body schema.\n */\nexport class IngestApi<T> extends TypedBase<T, IngestConfig<T>> {\n /**\n * Creates a new IngestApi instance.\n * @param name The name of the ingest API endpoint.\n * @param config Optional configuration for the ingest API.\n */\n constructor(name: string, config?: IngestConfig<T>);\n\n /**\n * @internal\n * Note: `validators` parameter is a positional placeholder (always undefined for IngestApi).\n * It exists because TypedBase has validators as the 5th param, and we need to pass\n * allowExtraFields as the 6th param. IngestApi doesn't use validators.\n */\n constructor(\n name: string,\n config: IngestConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators: undefined,\n allowExtraFields: boolean,\n );\n\n constructor(\n name: string,\n config: IngestConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: undefined,\n allowExtraFields?: boolean,\n ) {\n super(name, config, schema, columns, undefined, allowExtraFields);\n const ingestApis = getMooseInternal().ingestApis;\n if (ingestApis.has(name)) {\n throw new Error(`Ingest API with name ${name} already exists`);\n }\n ingestApis.set(name, this);\n }\n}\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport { getMooseInternal } from \"../internal\";\nimport type { ApiUtil } from \"../../consumption-apis/helpers\";\n\n/**\n * Defines the signature for a handler function used by a Consumption API.\n * @template T The expected type of the request parameters or query parameters.\n * @template R The expected type of the response data.\n * @param params An object containing the validated request parameters, matching the structure of T.\n * @param utils Utility functions provided to the handler, e.g., for database access (`runSql`).\n * @returns A Promise resolving to the response data of type R.\n */\ntype ApiHandler<T, R> = (params: T, utils: ApiUtil) => Promise<R>;\n\n/**\n * @template T The data type of the request parameters.\n */\nexport interface ApiConfig<T> {\n /**\n * An optional version string for this configuration.\n */\n version?: string;\n /**\n * An optional custom path for the API endpoint.\n * If not specified, defaults to the API name.\n */\n path?: string;\n metadata?: { description?: string };\n}\n\n/**\n * Represents a Consumption API endpoint (API), used for querying data from a Moose system.\n * Exposes data, often from an OlapTable or derived through a custom handler function.\n *\n * @template T The data type defining the expected structure of the API's query parameters.\n * @template R The data type defining the expected structure of the API's response body. Defaults to `any`.\n */\nexport class Api<T, R = any> extends TypedBase<T, ApiConfig<T>> {\n /** @internal The handler function that processes requests and generates responses. */\n _handler: ApiHandler<T, R>;\n /** @internal The JSON schema definition for the response type R. */\n responseSchema: IJsonSchemaCollection.IV3_1;\n\n /**\n * Creates a new Api instance.\n * @param name The name of the consumption API endpoint.\n * @param handler The function to execute when the endpoint is called. It receives validated query parameters and utility functions.\n * @param config Optional configuration for the consumption API.\n */\n constructor(name: string, handler: ApiHandler<T, R>, config?: {});\n\n /** @internal **/\n constructor(\n name: string,\n handler: ApiHandler<T, R>,\n config: ApiConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n responseSchema: IJsonSchemaCollection.IV3_1,\n );\n\n constructor(\n name: string,\n handler: ApiHandler<T, R>,\n config?: ApiConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n responseSchema?: IJsonSchemaCollection.IV3_1,\n ) {\n super(name, config ?? {}, schema, columns);\n this._handler = handler;\n this.responseSchema = responseSchema ?? {\n version: \"3.1\",\n schemas: [{ type: \"array\", items: { type: \"object\" } }],\n components: { schemas: {} },\n };\n const apis = getMooseInternal().apis;\n const key = `${name}${config?.version ? `:${config.version}` : \"\"}`;\n if (apis.has(key)) {\n throw new Error(\n `Consumption API with name ${name} and version ${config?.version} already exists`,\n );\n }\n apis.set(key, this);\n\n // Also register by custom path if provided\n if (config?.path) {\n if (config.version) {\n // Check if the path already ends with the version\n const pathEndsWithVersion =\n config.path.endsWith(`/${config.version}`) ||\n config.path === config.version ||\n (config.path.endsWith(config.version) &&\n config.path.length > config.version.length &&\n config.path[config.path.length - config.version.length - 1] ===\n \"/\");\n\n if (pathEndsWithVersion) {\n // Path already contains version, register as-is\n if (apis.has(config.path)) {\n const existing = apis.get(config.path)!;\n throw new Error(\n `Cannot register API \"${name}\" with path \"${config.path}\" - this path is already used by API \"${existing.name}\"`,\n );\n }\n apis.set(config.path, this);\n } else {\n // Path doesn't contain version, register with version appended\n const versionedPath = `${config.path.replace(/\\/$/, \"\")}/${config.version}`;\n\n // Check for collision on versioned path\n if (apis.has(versionedPath)) {\n const existing = apis.get(versionedPath)!;\n throw new Error(\n `Cannot register API \"${name}\" with path \"${versionedPath}\" - this path is already used by API \"${existing.name}\"`,\n );\n }\n apis.set(versionedPath, this);\n\n // Also register the unversioned path if not already claimed\n // (This is intentionally more permissive - first API gets the unversioned path)\n if (!apis.has(config.path)) {\n apis.set(config.path, this);\n }\n }\n } else {\n // Unversioned API, check for collision and register\n if (apis.has(config.path)) {\n const existing = apis.get(config.path)!;\n throw new Error(\n `Cannot register API \"${name}\" with custom path \"${config.path}\" - this path is already used by API \"${existing.name}\"`,\n );\n }\n apis.set(config.path, this);\n }\n }\n }\n\n /**\n * Retrieves the handler function associated with this Consumption API.\n * @returns The handler function.\n */\n getHandler = (): ApiHandler<T, R> => {\n return this._handler;\n };\n\n async call(baseUrl: string, queryParams: T): Promise<R> {\n // Construct the API endpoint URL using custom path or default to name\n let path: string;\n if (this.config?.path) {\n // Check if the custom path already contains the version\n if (this.config.version) {\n const pathEndsWithVersion =\n this.config.path.endsWith(`/${this.config.version}`) ||\n this.config.path === this.config.version ||\n (this.config.path.endsWith(this.config.version) &&\n this.config.path.length > this.config.version.length &&\n this.config.path[\n this.config.path.length - this.config.version.length - 1\n ] === \"/\");\n\n if (pathEndsWithVersion) {\n path = this.config.path;\n } else {\n path = `${this.config.path.replace(/\\/$/, \"\")}/${this.config.version}`;\n }\n } else {\n path = this.config.path;\n }\n } else {\n // Default to name with optional version\n path =\n this.config?.version ?\n `${this.name}/${this.config.version}`\n : this.name;\n }\n const url = new URL(`${baseUrl.replace(/\\/$/, \"\")}/api/${path}`);\n\n const searchParams = url.searchParams;\n\n for (const [key, value] of Object.entries(queryParams as any)) {\n if (Array.isArray(value)) {\n // For array values, add each item as a separate query param\n for (const item of value) {\n if (item !== null && item !== undefined) {\n searchParams.append(key, String(item));\n }\n }\n } else if (value !== null && value !== undefined) {\n searchParams.append(key, String(value));\n }\n }\n\n const response = await fetch(url, {\n method: \"GET\",\n headers: {\n Accept: \"application/json\",\n },\n });\n if (!response.ok) {\n throw new Error(`HTTP error! status: ${response.status}`);\n }\n const data = await response.json();\n return data as R;\n }\n}\n\n/** @deprecated Use ApiConfig<T> directly instead. */\nexport type EgressConfig<T> = ApiConfig<T>;\n\n/** @deprecated Use Api directly instead. */\nexport const ConsumptionApi = Api;\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase, TypiaValidators } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport {\n DeadLetterModel,\n DeadLetterQueue,\n Stream,\n StreamConfig,\n} from \"./stream\";\nimport { OlapConfig, OlapTable } from \"./olapTable\";\nimport { IngestApi, IngestConfig } from \"./ingestApi\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport { ClickHouseEngines } from \"../../blocks/helpers\";\n\n/**\n * Configuration options for a complete ingestion pipeline, potentially including an Ingest API, a Stream, and an OLAP Table.\n *\n * @template T The data type of the records being ingested.\n *\n * @example\n * ```typescript\n * // Simple pipeline with all components enabled\n * const pipelineConfig: IngestPipelineConfig<UserData> = {\n * table: true,\n * stream: true,\n * ingestApi: true\n * };\n *\n * // Advanced pipeline with custom configurations\n * const advancedConfig: IngestPipelineConfig<UserData> = {\n * table: { orderByFields: ['timestamp', 'userId'], engine: ClickHouseEngines.ReplacingMergeTree },\n * stream: { parallelism: 4, retentionPeriod: 86400 },\n * ingestApi: true,\n * version: '1.2.0',\n * metadata: { description: 'User data ingestion pipeline' }\n * };\n * ```\n */\nexport type IngestPipelineConfig<T> = {\n /**\n * Configuration for the OLAP table component of the pipeline.\n *\n * - If `true`, a table with default settings is created.\n * - If an `OlapConfig` object is provided, it specifies the table's configuration.\n * - If `false`, no OLAP table is created.\n *\n * @default false\n */\n table: boolean | OlapConfig<T>;\n\n /**\n * Configuration for the stream component of the pipeline.\n *\n * - If `true`, a stream with default settings is created.\n * - Pass a config object to specify the stream's configuration.\n * - The stream's destination will automatically be set to the pipeline's table if one exists.\n * - If `false`, no stream is created.\n *\n * @default false\n */\n stream: boolean | Omit<StreamConfig<T>, \"destination\">;\n\n /**\n * Configuration for the ingest API component of the pipeline.\n *\n * - If `true`, an ingest API with default settings is created.\n * - If a partial `IngestConfig` object (excluding `destination`) is provided, it specifies the API's configuration.\n * - The API's destination will automatically be set to the pipeline's stream if one exists.\n * - If `false`, no ingest API is created.\n *\n * **Note:** Requires a stream to be configured when enabled.\n *\n * @default false\n */\n ingestApi: boolean | Omit<IngestConfig<T>, \"destination\">;\n\n /**\n * @deprecated Use `ingestApi` instead. This parameter will be removed in a future version.\n */\n ingest?: boolean | Omit<IngestConfig<T>, \"destination\">;\n\n /**\n * Configuration for the dead letter queue of the pipeline.\n * If `true`, a dead letter queue with default settings is created.\n * If a partial `StreamConfig` object (excluding `destination`) is provided, it specifies the dead letter queue's configuration.\n * The API's destination will automatically be set to the pipeline's stream if one exists.\n * If `false` or `undefined`, no dead letter queue is created.\n */\n deadLetterQueue?: boolean | StreamConfig<DeadLetterModel>;\n\n /**\n * An optional version string applying to all components (table, stream, ingest) created by this pipeline configuration.\n * This version will be used for schema versioning and component identification.\n *\n * @example \"v1.0.0\", \"2023-12\", \"prod\"\n */\n version?: string;\n\n /**\n * An optional custom path for the ingestion API endpoint.\n * This will be used as the HTTP path for the ingest API if one is created.\n *\n * @example \"pipelines/analytics\", \"data/events\"\n */\n path?: string;\n\n /**\n * Optional metadata for the pipeline.\n */\n metadata?: {\n /** Human-readable description of the pipeline's purpose */\n description?: string;\n };\n\n /** Determines how changes in code will propagate to the resources. */\n lifeCycle?: LifeCycle;\n};\n\n/**\n * Represents a complete ingestion pipeline, potentially combining an Ingest API, a Stream, and an OLAP Table\n * under a single name and configuration. Simplifies the setup of common ingestion patterns.\n *\n * This class provides a high-level abstraction for creating data ingestion workflows that can include:\n * - An HTTP API endpoint for receiving data\n * - A streaming component for real-time data processing\n * - An OLAP table for analytical queries\n *\n * @template T The data type of the records flowing through the pipeline. This type defines the schema for the\n * Ingest API input, the Stream messages, and the OLAP Table rows.\n *\n * @example\n * ```typescript\n * // Create a complete pipeline with all components\n * const userDataPipeline = new IngestPipeline('userData', {\n * table: true,\n * stream: true,\n * ingestApi: true,\n * version: '1.0.0',\n * metadata: { description: 'Pipeline for user registration data' }\n * });\n *\n * // Create a pipeline with only table and stream\n * const analyticsStream = new IngestPipeline('analytics', {\n * table: { orderByFields: ['timestamp'], engine: ClickHouseEngines.ReplacingMergeTree },\n * stream: { parallelism: 8, retentionPeriod: 604800 },\n * ingestApi: false\n * });\n * ```\n */\nexport class IngestPipeline<T> extends TypedBase<T, IngestPipelineConfig<T>> {\n /**\n * The OLAP table component of the pipeline, if configured.\n * Provides analytical query capabilities for the ingested data.\n * Only present when `config.table` is not `false`.\n */\n table?: OlapTable<T>;\n\n /**\n * The stream component of the pipeline, if configured.\n * Handles real-time data flow and processing between components.\n * Only present when `config.stream` is not `false`.\n */\n stream?: Stream<T>;\n\n /**\n * The ingest API component of the pipeline, if configured.\n * Provides HTTP endpoints for data ingestion.\n * Only present when `config.ingestApi` is not `false`.\n */\n ingestApi?: IngestApi<T>;\n\n /** The dead letter queue of the pipeline, if configured. */\n deadLetterQueue?: DeadLetterQueue<T>;\n\n /**\n * Creates a new IngestPipeline instance.\n * Based on the configuration, it automatically creates and links the IngestApi, Stream, and OlapTable components.\n *\n * @param name The base name for the pipeline components (e.g., \"userData\" could create \"userData\" table, \"userData\" stream, \"userData\" ingest API).\n * @param config Optional configuration for the ingestion pipeline.\n *\n * @throws {Error} When ingest API is enabled but no stream is configured, since the API requires a stream destination.\n *\n * @example\n * ```typescript\n * const pipeline = new IngestPipeline('events', {\n * table: { orderByFields: ['timestamp'], engine: ClickHouseEngines.ReplacingMergeTree },\n * stream: { parallelism: 2 },\n * ingestApi: true\n * });\n * ```\n */\n constructor(name: string, config: IngestPipelineConfig<T>);\n\n /**\n * Internal constructor used by the framework for advanced initialization.\n *\n * @internal\n * @param name The base name for the pipeline components.\n * @param config Configuration specifying which components to create and their settings.\n * @param schema JSON schema collection for type validation.\n * @param columns Column definitions for the data model.\n * @param validators Typia validation functions.\n * @param allowExtraFields Whether extra fields are allowed (injected when type has index signature).\n */\n constructor(\n name: string,\n config: IngestPipelineConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators: TypiaValidators<T>,\n allowExtraFields: boolean,\n );\n\n constructor(\n name: string,\n config: IngestPipelineConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: TypiaValidators<T>,\n allowExtraFields?: boolean,\n ) {\n super(name, config, schema, columns, validators, allowExtraFields);\n\n // Handle backwards compatibility for deprecated 'ingest' parameter\n if (config.ingest !== undefined) {\n console.warn(\n \"⚠️ DEPRECATION WARNING: The 'ingest' parameter is deprecated and will be removed in a future version. \" +\n \"Please use 'ingestApi' instead.\",\n );\n // If ingestApi is not explicitly set, use the ingest value\n if (config.ingestApi === undefined) {\n (config as any).ingestApi = config.ingest;\n }\n }\n\n // Create OLAP table if configured\n if (config.table) {\n const tableConfig: OlapConfig<T> =\n typeof config.table === \"object\" ?\n {\n ...config.table,\n lifeCycle: config.table.lifeCycle ?? config.lifeCycle,\n ...(config.version && { version: config.version }),\n }\n : {\n lifeCycle: config.lifeCycle,\n engine: ClickHouseEngines.MergeTree,\n ...(config.version && { version: config.version }),\n };\n this.table = new OlapTable(\n name,\n tableConfig,\n this.schema,\n this.columnArray,\n this.validators,\n );\n }\n\n if (config.deadLetterQueue) {\n const streamConfig = {\n destination: undefined,\n ...(typeof config.deadLetterQueue === \"object\" ?\n {\n ...config.deadLetterQueue,\n lifeCycle: config.deadLetterQueue.lifeCycle ?? config.lifeCycle,\n }\n : { lifeCycle: config.lifeCycle }),\n ...(config.version && { version: config.version }),\n };\n this.deadLetterQueue = new DeadLetterQueue<T>(\n `${name}DeadLetterQueue`,\n streamConfig,\n validators!.assert!,\n );\n }\n\n // Create stream if configured, linking it to the table as destination\n if (config.stream) {\n const streamConfig: StreamConfig<T> = {\n destination: this.table,\n defaultDeadLetterQueue: this.deadLetterQueue,\n ...(typeof config.stream === \"object\" ?\n {\n ...config.stream,\n lifeCycle: config.stream.lifeCycle ?? config.lifeCycle,\n }\n : { lifeCycle: config.lifeCycle }),\n ...(config.version && { version: config.version }),\n };\n this.stream = new Stream(\n name,\n streamConfig,\n this.schema,\n this.columnArray,\n undefined,\n this.allowExtraFields,\n );\n // Set pipeline parent reference for internal framework use\n (this.stream as any).pipelineParent = this;\n }\n\n // Create ingest API if configured, requiring a stream as destination\n const effectiveIngestAPI =\n config.ingestApi !== undefined ? config.ingestApi : config.ingest;\n if (effectiveIngestAPI) {\n if (!this.stream) {\n throw new Error(\"Ingest API needs a stream to write to.\");\n }\n\n const ingestConfig = {\n destination: this.stream,\n deadLetterQueue: this.deadLetterQueue,\n ...(typeof effectiveIngestAPI === \"object\" ?\n (effectiveIngestAPI as object)\n : {}),\n ...(config.version && { version: config.version }),\n ...(config.path && { path: config.path }),\n };\n this.ingestApi = new IngestApi(\n name,\n ingestConfig,\n this.schema,\n this.columnArray,\n undefined,\n this.allowExtraFields,\n );\n // Set pipeline parent reference for internal framework use\n (this.ingestApi as any).pipelineParent = this;\n }\n }\n}\n","import { Workflow, Task } from \"./workflow\";\nimport { OlapTable } from \"./olapTable\";\n\ninterface BatchResult<T> {\n items: T[];\n hasMore: boolean;\n}\n\ninterface TransformedResult<U> {\n items: U[];\n}\n\ninterface TaskConfig {\n retries: number;\n timeout: string;\n}\n\ninterface ETLTasks<T, U> {\n extract: Task<null, BatchResult<T>>;\n transform: Task<BatchResult<T>, TransformedResult<U>>;\n load: Task<TransformedResult<U>, void>;\n}\n\nclass InternalBatcher<T> {\n private iterator: AsyncIterator<T>;\n private batchSize: number;\n\n constructor(asyncIterable: AsyncIterable<T>, batchSize = 20) {\n this.iterator = asyncIterable[Symbol.asyncIterator]();\n this.batchSize = batchSize;\n }\n\n async getNextBatch(): Promise<BatchResult<T>> {\n const items: T[] = [];\n\n for (let i = 0; i < this.batchSize; i++) {\n const { value, done } = await this.iterator.next();\n\n if (done) {\n return { items, hasMore: false };\n }\n\n items.push(value);\n }\n\n return { items, hasMore: true };\n }\n}\n\nexport interface ETLPipelineConfig<T, U> {\n extract: AsyncIterable<T> | (() => AsyncIterable<T>);\n transform: (sourceData: T) => Promise<U>;\n load: ((data: U[]) => Promise<void>) | OlapTable<U>;\n}\n\nexport class ETLPipeline<T, U> {\n private batcher!: InternalBatcher<T>;\n\n constructor(\n readonly name: string,\n readonly config: ETLPipelineConfig<T, U>,\n ) {\n this.setupPipeline();\n }\n\n private setupPipeline(): void {\n this.batcher = this.createBatcher();\n const tasks = this.createAllTasks();\n\n tasks.extract.config.onComplete = [tasks.transform];\n tasks.transform.config.onComplete = [tasks.load];\n\n new Workflow(this.name, {\n startingTask: tasks.extract,\n retries: 1,\n timeout: \"30m\",\n });\n }\n\n private createBatcher(): InternalBatcher<T> {\n const iterable =\n typeof this.config.extract === \"function\" ?\n this.config.extract()\n : this.config.extract;\n\n return new InternalBatcher(iterable);\n }\n\n private getDefaultTaskConfig(): TaskConfig {\n return {\n retries: 1,\n timeout: \"30m\",\n };\n }\n\n private createAllTasks(): ETLTasks<T, U> {\n const taskConfig = this.getDefaultTaskConfig();\n\n return {\n extract: this.createExtractTask(taskConfig),\n transform: this.createTransformTask(taskConfig),\n load: this.createLoadTask(taskConfig),\n };\n }\n\n private createExtractTask(\n taskConfig: TaskConfig,\n ): Task<null, BatchResult<T>> {\n return new Task<null, BatchResult<T>>(`${this.name}_extract`, {\n run: async ({}) => {\n console.log(`Running extract task for ${this.name}...`);\n const batch = await this.batcher.getNextBatch();\n console.log(`Extract task completed with ${batch.items.length} items`);\n return batch;\n },\n retries: taskConfig.retries,\n timeout: taskConfig.timeout,\n });\n }\n\n private createTransformTask(\n taskConfig: TaskConfig,\n ): Task<BatchResult<T>, TransformedResult<U>> {\n return new Task<BatchResult<T>, TransformedResult<U>>(\n `${this.name}_transform`,\n {\n // Use new single-parameter context API for handlers\n run: async ({ input }) => {\n const batch = input!;\n console.log(\n `Running transform task for ${this.name} with ${batch.items.length} items...`,\n );\n const transformedItems: U[] = [];\n\n for (const item of batch.items) {\n const transformed = await this.config.transform(item);\n transformedItems.push(transformed);\n }\n\n console.log(\n `Transform task completed with ${transformedItems.length} items`,\n );\n return { items: transformedItems };\n },\n retries: taskConfig.retries,\n timeout: taskConfig.timeout,\n },\n );\n }\n\n private createLoadTask(\n taskConfig: TaskConfig,\n ): Task<TransformedResult<U>, void> {\n return new Task<TransformedResult<U>, void>(`${this.name}_load`, {\n run: async ({ input: transformedItems }) => {\n console.log(\n `Running load task for ${this.name} with ${transformedItems.items.length} items...`,\n );\n\n // Handle both function and OlapTable\n if (\"insert\" in this.config.load) {\n // It's an OlapTable - insert entire batch\n await this.config.load.insert(transformedItems.items);\n } else {\n // It's a function - call with entire array\n await this.config.load(transformedItems.items);\n }\n\n console.log(`Load task completed`);\n },\n retries: taskConfig.retries,\n timeout: taskConfig.timeout,\n });\n }\n\n // Execute the entire ETL pipeline\n async run(): Promise<void> {\n console.log(`Starting ETL Pipeline: ${this.name}`);\n\n let batchNumber = 1;\n do {\n console.log(`Processing batch ${batchNumber}...`);\n const batch = await this.batcher.getNextBatch();\n\n if (batch.items.length === 0) {\n break;\n }\n\n // Transform all items in the batch\n const transformedItems: U[] = [];\n for (const extractedData of batch.items) {\n const transformedData = await this.config.transform(extractedData);\n transformedItems.push(transformedData);\n }\n\n // Load the entire batch\n if (\"insert\" in this.config.load) {\n // It's an OlapTable - insert entire batch\n await this.config.load.insert(transformedItems);\n } else {\n // It's a function - call with entire array\n await this.config.load(transformedItems);\n }\n\n console.log(\n `Completed batch ${batchNumber} with ${batch.items.length} items`,\n );\n batchNumber++;\n\n if (!batch.hasMore) {\n break;\n }\n } while (true);\n\n console.log(`Completed ETL Pipeline: ${this.name}`);\n }\n}\n","import { getMooseInternal, isClientOnlyMode } from \"../internal\";\nimport { OlapTable } from \"./olapTable\";\nimport { Sql, toStaticQuery } from \"../../sqlHelpers\";\nimport { getSourceLocationFromStack } from \"../utils/stackTrace\";\n\ntype SqlObject = OlapTable<any> | SqlResource;\n\n/**\n * Represents a generic SQL resource that requires setup and teardown commands.\n * Base class for constructs like Views and Materialized Views. Tracks dependencies.\n */\nexport class SqlResource {\n /** @internal */\n public readonly kind = \"SqlResource\";\n\n /** Array of SQL statements to execute for setting up the resource. */\n setup: readonly string[];\n /** Array of SQL statements to execute for tearing down the resource. */\n teardown: readonly string[];\n /** The name of the SQL resource (e.g., view name, materialized view name). */\n name: string;\n\n /** List of OlapTables or Views that this resource reads data from. */\n pullsDataFrom: SqlObject[];\n /** List of OlapTables or Views that this resource writes data to. */\n pushesDataTo: SqlObject[];\n\n /** @internal Source file path where this resource was defined */\n sourceFile?: string;\n\n /** @internal Source line number where this resource was defined */\n sourceLine?: number;\n\n /** @internal Source column number where this resource was defined */\n sourceColumn?: number;\n\n /**\n * Creates a new SqlResource instance.\n * @param name The name of the resource.\n * @param setup An array of SQL DDL statements to create the resource.\n * @param teardown An array of SQL DDL statements to drop the resource.\n * @param options Optional configuration for specifying data dependencies.\n * @param options.pullsDataFrom Tables/Views this resource reads from.\n * @param options.pushesDataTo Tables/Views this resource writes to.\n */\n constructor(\n name: string,\n setup: readonly (string | Sql)[],\n teardown: readonly (string | Sql)[],\n options?: {\n pullsDataFrom?: SqlObject[];\n pushesDataTo?: SqlObject[];\n },\n ) {\n const sqlResources = getMooseInternal().sqlResources;\n // In client-only mode (MOOSE_CLIENT_ONLY=true), allow duplicate registrations\n // to support Next.js HMR which re-executes modules without clearing the registry\n if (!isClientOnlyMode() && sqlResources.has(name)) {\n throw new Error(`SqlResource with name ${name} already exists`);\n }\n sqlResources.set(name, this);\n\n this.name = name;\n this.setup = setup.map((sql) =>\n typeof sql === \"string\" ? sql : toStaticQuery(sql),\n );\n this.teardown = teardown.map((sql) =>\n typeof sql === \"string\" ? sql : toStaticQuery(sql),\n );\n this.pullsDataFrom = options?.pullsDataFrom ?? [];\n this.pushesDataTo = options?.pushesDataTo ?? [];\n\n // Capture source location from stack trace\n const stack = new Error().stack;\n const location = getSourceLocationFromStack(stack);\n\n if (location) {\n this.sourceFile = location.file;\n this.sourceLine = location.line;\n this.sourceColumn = location.column;\n }\n }\n}\n","import {\n ClickHouseEngines,\n createMaterializedView,\n dropView,\n} from \"../../blocks/helpers\";\nimport { Sql, toStaticQuery } from \"../../sqlHelpers\";\nimport { OlapConfig, OlapTable } from \"./olapTable\";\nimport { SqlResource } from \"./sqlResource\";\nimport { View } from \"./view\";\nimport { IJsonSchemaCollection } from \"typia\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\n\n/**\n * Configuration options for creating a Materialized View.\n * @template T The data type of the records stored in the target table of the materialized view.\n */\nexport interface MaterializedViewConfig<T> {\n /** The SQL SELECT statement or `Sql` object defining the data to be materialized. Dynamic SQL (with parameters) is not allowed here. */\n selectStatement: string | Sql;\n /** An array of OlapTable or View objects that the `selectStatement` reads from. */\n selectTables: (OlapTable<any> | View)[];\n\n /** @deprecated See {@link targetTable}\n * The name for the underlying target OlapTable that stores the materialized data. */\n tableName?: string;\n\n /** The name for the ClickHouse MATERIALIZED VIEW object itself. */\n materializedViewName: string;\n\n /** @deprecated See {@link targetTable}\n * Optional ClickHouse engine for the target table (e.g., ReplacingMergeTree). Defaults to MergeTree. */\n engine?: ClickHouseEngines;\n\n targetTable?:\n | OlapTable<T> /** Target table if the OlapTable object is already constructed. */\n | {\n /** The name for the underlying target OlapTable that stores the materialized data. */\n name: string;\n /** Optional ClickHouse engine for the target table (e.g., ReplacingMergeTree). Defaults to MergeTree. */\n engine?: ClickHouseEngines;\n /** Optional ordering fields for the target table. Crucial if using ReplacingMergeTree. */\n orderByFields?: (keyof T & string)[];\n };\n\n /** @deprecated See {@link targetTable}\n * Optional ordering fields for the target table. Crucial if using ReplacingMergeTree. */\n orderByFields?: (keyof T & string)[];\n}\n\nconst requireTargetTableName = (tableName: string | undefined): string => {\n if (typeof tableName === \"string\") {\n return tableName;\n } else {\n throw new Error(\"Name of targetTable is not specified.\");\n }\n};\n\n/**\n * Represents a Materialized View in ClickHouse.\n * This encapsulates both the target OlapTable that stores the data and the MATERIALIZED VIEW definition\n * that populates the table based on inserts into the source tables.\n *\n * @template TargetTable The data type of the records stored in the underlying target OlapTable. The structure of T defines the target table schema.\n */\nexport class MaterializedView<TargetTable> extends SqlResource {\n /** The target OlapTable instance where the materialized data is stored. */\n targetTable: OlapTable<TargetTable>;\n\n /**\n * Creates a new MaterializedView instance.\n * Requires the `TargetTable` type parameter to be explicitly provided or inferred,\n * as it's needed to define the schema of the underlying target table.\n *\n * @param options Configuration options for the materialized view.\n */\n constructor(options: MaterializedViewConfig<TargetTable>);\n\n /** @internal **/\n constructor(\n options: MaterializedViewConfig<TargetTable>,\n targetSchema: IJsonSchemaCollection.IV3_1,\n targetColumns: Column[],\n );\n constructor(\n options: MaterializedViewConfig<TargetTable>,\n targetSchema?: IJsonSchemaCollection.IV3_1,\n targetColumns?: Column[],\n ) {\n let selectStatement = options.selectStatement;\n if (typeof selectStatement !== \"string\") {\n selectStatement = toStaticQuery(selectStatement);\n }\n\n if (targetSchema === undefined || targetColumns === undefined) {\n throw new Error(\n \"Supply the type param T so that the schema is inserted by the compiler plugin.\",\n );\n }\n\n const targetTable =\n options.targetTable instanceof OlapTable ?\n options.targetTable\n : new OlapTable(\n requireTargetTableName(\n options.targetTable?.name ?? options.tableName,\n ),\n {\n orderByFields:\n options.targetTable?.orderByFields ?? options.orderByFields,\n engine:\n options.targetTable?.engine ??\n options.engine ??\n ClickHouseEngines.MergeTree,\n } as OlapConfig<TargetTable>,\n targetSchema,\n targetColumns,\n );\n\n if (targetTable.name === options.materializedViewName) {\n throw new Error(\n \"Materialized view name cannot be the same as the target table name.\",\n );\n }\n\n super(\n options.materializedViewName,\n [\n createMaterializedView({\n name: options.materializedViewName,\n destinationTable: targetTable.name,\n select: selectStatement,\n }),\n // Population is now handled automatically by Rust infrastructure\n // based on table engine type and whether this is a new or updated view\n ],\n [dropView(options.materializedViewName)],\n {\n pullsDataFrom: options.selectTables,\n pushesDataTo: [targetTable],\n },\n );\n\n this.targetTable = targetTable;\n }\n}\n","import { dropView } from \"../../blocks/helpers\";\nimport { Sql, toStaticQuery } from \"../../sqlHelpers\";\nimport { OlapTable } from \"./olapTable\";\nimport { SqlResource } from \"./sqlResource\";\n\n/**\n * Represents a database View, defined by a SQL SELECT statement based on one or more base tables or other views.\n * Inherits from SqlResource, providing setup (CREATE VIEW) and teardown (DROP VIEW) commands.\n */\nexport class View extends SqlResource {\n /**\n * Creates a new View instance.\n * @param name The name of the view to be created.\n * @param selectStatement The SQL SELECT statement that defines the view's logic.\n * @param baseTables An array of OlapTable or View objects that the `selectStatement` reads from. Used for dependency tracking.\n */\n constructor(\n name: string,\n selectStatement: string | Sql,\n baseTables: (OlapTable<any> | View)[],\n ) {\n if (typeof selectStatement !== \"string\") {\n selectStatement = toStaticQuery(selectStatement);\n }\n\n super(\n name,\n [\n `CREATE VIEW IF NOT EXISTS ${name} \n AS ${selectStatement}`.trim(),\n ],\n [dropView(name)],\n {\n pullsDataFrom: baseTables,\n },\n );\n }\n}\n","/**\n * Defines how Moose manages the lifecycle of database resources when your code changes.\n *\n * This enum controls the behavior when there are differences between your code definitions\n * and the actual database schema or structure.\n */\nexport enum LifeCycle {\n /**\n * Full automatic management (default behavior).\n * Moose will automatically modify database resources to match your code definitions,\n * including potentially destructive operations like dropping columns or tables.\n */\n FULLY_MANAGED = \"FULLY_MANAGED\",\n\n /**\n * Deletion-protected automatic management.\n * Moose will modify resources to match your code but will avoid destructive actions\n * such as dropping columns, or tables. Only additive changes are applied.\n */\n DELETION_PROTECTED = \"DELETION_PROTECTED\",\n\n /**\n * External management - no automatic changes.\n * Moose will not modify the database resources. You are responsible for managing\n * the schema and ensuring it matches your code definitions manually.\n */\n EXTERNALLY_MANAGED = \"EXTERNALLY_MANAGED\",\n}\n","import http from \"http\";\nimport { getMooseInternal } from \"../internal\";\n\nexport type WebAppHandler = (\n req: http.IncomingMessage,\n res: http.ServerResponse,\n) => void | Promise<void>;\n\nexport interface FrameworkApp {\n handle?: (\n req: http.IncomingMessage,\n res: http.ServerResponse,\n next?: (err?: any) => void,\n ) => void;\n callback?: () => WebAppHandler;\n routing?: (req: http.IncomingMessage, res: http.ServerResponse) => void;\n ready?: () => PromiseLike<unknown>; // Fastify's ready method (returns FastifyInstance)\n}\n\nexport interface WebAppConfig {\n mountPath: string;\n metadata?: { description?: string };\n injectMooseUtils?: boolean;\n}\n\nconst RESERVED_MOUNT_PATHS = [\n \"/admin\",\n \"/api\",\n \"/consumption\",\n \"/health\",\n \"/ingest\",\n \"/moose\", // reserved for future use\n \"/ready\",\n \"/workflows\",\n] as const;\n\nexport class WebApp {\n name: string;\n handler: WebAppHandler;\n config: WebAppConfig;\n private _rawApp?: FrameworkApp;\n\n constructor(\n name: string,\n appOrHandler: FrameworkApp | WebAppHandler,\n config: WebAppConfig,\n ) {\n this.name = name;\n this.config = config;\n\n // Validate mountPath - it is required\n if (!this.config.mountPath) {\n throw new Error(\n `mountPath is required. Please specify a mount path for your WebApp (e.g., \"/myapi\").`,\n );\n }\n\n const mountPath = this.config.mountPath;\n\n // Check for root path - not allowed as it would overlap reserved paths\n if (mountPath === \"/\") {\n throw new Error(\n `mountPath cannot be \"/\" as it would allow routes to overlap with reserved paths: ${RESERVED_MOUNT_PATHS.join(\", \")}`,\n );\n }\n\n // Check for trailing slash\n if (mountPath.endsWith(\"/\")) {\n throw new Error(\n `mountPath cannot end with a trailing slash. Remove the '/' from: \"${mountPath}\"`,\n );\n }\n\n // Check for reserved path prefixes\n for (const reserved of RESERVED_MOUNT_PATHS) {\n if (mountPath === reserved || mountPath.startsWith(`${reserved}/`)) {\n throw new Error(\n `mountPath cannot begin with a reserved path: ${RESERVED_MOUNT_PATHS.join(\", \")}. Got: \"${mountPath}\"`,\n );\n }\n }\n\n this.handler = this.toHandler(appOrHandler);\n this._rawApp =\n typeof appOrHandler === \"function\" ? undefined : appOrHandler;\n\n const webApps = getMooseInternal().webApps;\n if (webApps.has(name)) {\n throw new Error(`WebApp with name ${name} already exists`);\n }\n\n // Check for duplicate mountPath\n if (this.config.mountPath) {\n for (const [existingName, existingApp] of webApps) {\n if (existingApp.config.mountPath === this.config.mountPath) {\n throw new Error(\n `WebApp with mountPath \"${this.config.mountPath}\" already exists (used by WebApp \"${existingName}\")`,\n );\n }\n }\n }\n\n webApps.set(name, this);\n }\n\n private toHandler(appOrHandler: FrameworkApp | WebAppHandler): WebAppHandler {\n if (typeof appOrHandler === \"function\") {\n return appOrHandler as WebAppHandler;\n }\n\n const app = appOrHandler as FrameworkApp;\n\n if (typeof app.handle === \"function\") {\n return (req, res) => {\n app.handle!(req, res, (err?: any) => {\n if (err) {\n console.error(\"WebApp handler error:\", err);\n if (!res.headersSent) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Internal Server Error\" }));\n }\n }\n });\n };\n }\n\n if (typeof app.callback === \"function\") {\n return app.callback();\n }\n\n // Fastify: routing is a function that handles requests directly\n // Fastify requires .ready() to be called before routes are available\n if (typeof app.routing === \"function\") {\n // Capture references to avoid TypeScript narrowing issues in closure\n const routing = app.routing;\n const appWithReady = app;\n\n // Use lazy initialization - don't call ready() during module loading\n // This prevents blocking the event loop when streaming functions import the app module\n // The ready() call is deferred to the first actual HTTP request\n let readyPromise: PromiseLike<unknown> | null = null;\n\n return async (req, res) => {\n // Lazy init - only call ready() when first request comes in\n if (readyPromise === null) {\n readyPromise =\n typeof appWithReady.ready === \"function\" ?\n appWithReady.ready()\n : Promise.resolve();\n }\n await readyPromise;\n routing(req, res);\n };\n }\n\n throw new Error(\n `Unable to convert app to handler. The provided object must be:\n - A function (raw Node.js handler)\n - An object with .handle() method (Express, Connect)\n - An object with .callback() method (Koa)\n - An object with .routing function (Fastify)\n \nExamples:\n Express: new WebApp(\"name\", expressApp)\n Koa: new WebApp(\"name\", koaApp)\n Fastify: new WebApp(\"name\", fastifyApp)\n Raw: new WebApp(\"name\", (req, res) => { ... })\n `,\n );\n }\n\n getRawApp(): FrameworkApp | undefined {\n return this._rawApp;\n }\n}\n","/**\n * @module registry\n * Public registry functions for accessing Moose Data Model v2 (dmv2) resources.\n *\n * This module provides functions to retrieve registered resources like tables, streams,\n * APIs, and more. These functions are part of the public API and can be used by\n * user applications to inspect and interact with registered Moose resources.\n */\n\nimport { OlapTable } from \"./sdk/olapTable\";\nimport { Stream } from \"./sdk/stream\";\nimport { IngestApi } from \"./sdk/ingestApi\";\nimport { Api } from \"./sdk/consumptionApi\";\nimport { SqlResource } from \"./sdk/sqlResource\";\nimport { Workflow } from \"./sdk/workflow\";\nimport { WebApp } from \"./sdk/webApp\";\nimport { getMooseInternal } from \"./internal\";\n\n/**\n * Get all registered OLAP tables.\n * @returns A Map of table name to OlapTable instance\n */\nexport function getTables(): Map<string, OlapTable<any>> {\n return getMooseInternal().tables;\n}\n\n/**\n * Get a registered OLAP table by name.\n * @param name - The name of the table\n * @returns The OlapTable instance or undefined if not found\n */\nexport function getTable(name: string): OlapTable<any> | undefined {\n return getMooseInternal().tables.get(name);\n}\n\n/**\n * Get all registered streams.\n * @returns A Map of stream name to Stream instance\n */\nexport function getStreams(): Map<string, Stream<any>> {\n return getMooseInternal().streams;\n}\n\n/**\n * Get a registered stream by name.\n * @param name - The name of the stream\n * @returns The Stream instance or undefined if not found\n */\nexport function getStream(name: string): Stream<any> | undefined {\n return getMooseInternal().streams.get(name);\n}\n\n/**\n * Get all registered ingestion APIs.\n * @returns A Map of API name to IngestApi instance\n */\nexport function getIngestApis(): Map<string, IngestApi<any>> {\n return getMooseInternal().ingestApis;\n}\n\n/**\n * Get a registered ingestion API by name.\n * @param name - The name of the ingestion API\n * @returns The IngestApi instance or undefined if not found\n */\nexport function getIngestApi(name: string): IngestApi<any> | undefined {\n return getMooseInternal().ingestApis.get(name);\n}\n\n/**\n * Get all registered APIs (consumption/egress APIs).\n * @returns A Map of API key to Api instance\n */\nexport function getApis(): Map<string, Api<any>> {\n return getMooseInternal().apis;\n}\n\n/**\n * Get a registered API by name, version, or path.\n *\n * Supports multiple lookup strategies:\n * 1. Direct lookup by full key (name:version or name for unversioned)\n * 2. Lookup by name with automatic version aliasing when only one versioned API exists\n * 3. Lookup by custom path (if configured)\n *\n * @param nameOrPath - The name, name:version, or custom path of the API\n * @returns The Api instance or undefined if not found\n */\nexport function getApi(nameOrPath: string): Api<any> | undefined {\n const registry = getMooseInternal();\n\n // Try direct lookup first (full key: name or name:version)\n const directMatch = registry.apis.get(nameOrPath);\n if (directMatch) {\n return directMatch;\n }\n\n // Build alias maps on-demand for unversioned lookups\n const versionedApis = new Map<string, Api<any>[]>();\n const pathMap = new Map<string, Api<any>>();\n\n registry.apis.forEach((api, key) => {\n // Track APIs by base name for aliasing\n const baseName = api.name;\n if (!versionedApis.has(baseName)) {\n versionedApis.set(baseName, []);\n }\n versionedApis.get(baseName)!.push(api);\n\n // Track APIs by custom path\n if (api.config.path) {\n pathMap.set(api.config.path, api);\n }\n });\n\n // Try alias lookup: if there's exactly one API with this base name, return it\n const candidates = versionedApis.get(nameOrPath);\n if (candidates && candidates.length === 1) {\n return candidates[0];\n }\n\n // Try path-based lookup\n return pathMap.get(nameOrPath);\n}\n\n/**\n * Get all registered SQL resources.\n * @returns A Map of resource name to SqlResource instance\n */\nexport function getSqlResources(): Map<string, SqlResource> {\n return getMooseInternal().sqlResources;\n}\n\n/**\n * Get a registered SQL resource by name.\n * @param name - The name of the SQL resource\n * @returns The SqlResource instance or undefined if not found\n */\nexport function getSqlResource(name: string): SqlResource | undefined {\n return getMooseInternal().sqlResources.get(name);\n}\n\n/**\n * Get all registered workflows.\n * @returns A Map of workflow name to Workflow instance\n */\nexport function getWorkflows(): Map<string, Workflow> {\n return getMooseInternal().workflows;\n}\n\n/**\n * Get a registered workflow by name.\n * @param name - The name of the workflow\n * @returns The Workflow instance or undefined if not found\n */\nexport function getWorkflow(name: string): Workflow | undefined {\n return getMooseInternal().workflows.get(name);\n}\n\n/**\n * Get all registered web apps.\n * @returns A Map of web app name to WebApp instance\n */\nexport function getWebApps(): Map<string, WebApp> {\n return getMooseInternal().webApps;\n}\n\n/**\n * Get a registered web app by name.\n * @param name - The name of the web app\n * @returns The WebApp instance or undefined if not found\n */\nexport function getWebApp(name: string): WebApp | undefined {\n return getMooseInternal().webApps.get(name);\n}\n","/**\n * @module dmv2\n * This module defines the core Moose v2 data model constructs, including OlapTable, Stream, IngestApi, Api,\n * IngestPipeline, View, and MaterializedView. These classes provide a typed interface for defining and managing\n * data infrastructure components like ClickHouse tables, Redpanda streams, and data processing pipelines.\n */\n\n/**\n * A helper type used potentially for indicating aggregated fields in query results or schemas.\n * Captures the aggregation function name and argument types.\n * (Usage context might be specific to query builders or ORM features).\n *\n * @template AggregationFunction The name of the aggregation function (e.g., 'sum', 'avg', 'count').\n * @template ArgTypes An array type representing the types of the arguments passed to the aggregation function.\n */\nexport type Aggregated<\n AggregationFunction extends string,\n ArgTypes extends any[] = [],\n> = {\n _aggregationFunction?: AggregationFunction;\n _argTypes?: ArgTypes;\n};\n\n/**\n * A helper type for SimpleAggregateFunction in ClickHouse.\n * SimpleAggregateFunction stores the aggregated value directly instead of intermediate states,\n * offering better performance for functions like sum, max, min, any, anyLast, etc.\n *\n * @template AggregationFunction The name of the simple aggregation function (e.g., 'sum', 'max', 'anyLast').\n * @template ArgType The type of the argument (and result) of the aggregation function.\n *\n * @example\n * ```typescript\n * interface Stats {\n * rowCount: number & SimpleAggregated<'sum', number>;\n * maxValue: number & SimpleAggregated<'max', number>;\n * lastStatus: string & SimpleAggregated<'anyLast', string>;\n * }\n * ```\n */\nexport type SimpleAggregated<\n AggregationFunction extends string,\n ArgType = any,\n> = {\n _simpleAggregationFunction?: AggregationFunction;\n _argType?: ArgType;\n};\n\nexport { OlapTable, OlapConfig, S3QueueTableSettings } from \"./sdk/olapTable\";\nexport {\n Stream,\n StreamConfig,\n DeadLetterModel,\n DeadLetter,\n DeadLetterQueue,\n ConsumerConfig,\n TransformConfig,\n} from \"./sdk/stream\";\n\nexport { Workflow, Task } from \"./sdk/workflow\";\nexport type { TaskContext, TaskConfig } from \"./sdk/workflow\";\n\nexport { IngestApi, IngestConfig } from \"./sdk/ingestApi\";\nexport {\n Api,\n ApiConfig,\n EgressConfig,\n ConsumptionApi,\n} from \"./sdk/consumptionApi\";\nexport { IngestPipeline, IngestPipelineConfig } from \"./sdk/ingestPipeline\";\nexport { ETLPipeline, ETLPipelineConfig } from \"./sdk/etlPipeline\";\nexport {\n MaterializedView,\n MaterializedViewConfig,\n} from \"./sdk/materializedView\";\nexport { SqlResource } from \"./sdk/sqlResource\";\nexport { View } from \"./sdk/view\";\nexport { LifeCycle } from \"./sdk/lifeCycle\";\nexport {\n WebApp,\n WebAppConfig,\n WebAppHandler,\n FrameworkApp,\n} from \"./sdk/webApp\";\n\nexport {\n getTables,\n getTable,\n getStreams,\n getStream,\n getIngestApis,\n getIngestApi,\n getApis,\n getApi,\n getSqlResources,\n getSqlResource,\n getWorkflows,\n getWorkflow,\n getWebApps,\n getWebApp,\n} from \"./registry\";\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkCA,SAAS,oBAAoB,MAAuB;AAClD,SACE,KAAK,SAAS,cAAc;AAAA,EAC5B,KAAK,SAAS,eAAe;AAAA,EAC7B,KAAK,SAAS,kBAAkB;AAAA,EAChC,KAAK,SAAS,SAAS;AAAA,EACvB,KAAK,SAAS,oBAAoB;AAAA,EAClC,KAAK,SAAS,uBAAuB;AAAA,EACrC,KAAK,SAAS,qBAAqB;AAAA,EACnC,KAAK,SAAS,wBAAwB;AAE1C;AAMA,SAAS,eAAe,MAA0C;AAChE,QAAM,QACJ,KAAK,MAAM,sBAAsB,KAAK,KAAK,MAAM,qBAAqB;AACxE,MAAI,SAAS,MAAM,CAAC,GAAG;AACrB,WAAO;AAAA,MACL,MAAM,MAAM,CAAC;AAAA,MACb,MAAM,MAAM,CAAC;AAAA,IACf;AAAA,EACF;AACA,SAAO;AACT;AASO,SAAS,kBAAkB,OAAgC;AAChE,MAAI,CAAC,MAAO,QAAO,CAAC;AACpB,QAAM,QAAQ,MAAM,MAAM,IAAI;AAC9B,aAAW,QAAQ,OAAO;AACxB,QAAI,oBAAoB,IAAI,EAAG;AAC/B,UAAM,OAAO,eAAe,IAAI;AAChC,QAAI,KAAM,QAAO;AAAA,EACnB;AACA,SAAO,CAAC;AACV;AAYO,SAAS,2BACd,OAC4B;AAC5B,MAAI,CAAC,MAAO,QAAO;AAEnB,QAAM,QAAQ,MAAM,MAAM,IAAI;AAG9B,aAAW,QAAQ,MAAM,MAAM,CAAC,GAAG;AAEjC,QAAI,oBAAoB,IAAI,GAAG;AAC7B;AAAA,IACF;AAGA,UAAM,UAAU,KAAK,MAAM,uCAAuC;AAClE,QAAI,SAAS;AACX,aAAO;AAAA,QACL,MAAM,QAAQ,CAAC;AAAA,QACf,MAAM,SAAS,QAAQ,CAAC,GAAG,EAAE;AAAA,QAC7B,QAAQ,SAAS,QAAQ,CAAC,GAAG,EAAE;AAAA,MACjC;AAAA,IACF;AAGA,UAAM,UAAU,KAAK,MAAM,0BAA0B;AACrD,QAAI,SAAS;AACX,aAAO;AAAA,QACL,MAAM,QAAQ,CAAC;AAAA,QACf,MAAM,SAAS,QAAQ,CAAC,GAAG,EAAE;AAAA,QAC7B,QAAQ,SAAS,QAAQ,CAAC,GAAG,EAAE;AAAA,MACjC;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAUO,SAAS,uBAAuB,OAAoC;AACzE,QAAM,WAAW,2BAA2B,KAAK;AACjD,SAAO,UAAU;AACnB;AA5IA;AAAA;AAAA;AAAA;AAAA;;;ACAA,IAwBa;AAxBb;AAAA;AAAA;AAEA;AAsBO,IAAM,YAAN,MAAsB;AAAA;AAAA,MAE3B;AAAA;AAAA,MAEA;AAAA;AAAA,MAGA;AAAA;AAAA,MAIA;AAAA;AAAA,MAGA;AAAA;AAAA,MAGA;AAAA;AAAA,MAGA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAYA,YACE,MACA,QACA,QACA,SACA,YACA,kBACA;AACA,YAAI,WAAW,UAAa,YAAY,QAAW;AACjD,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA,aAAK,SAAS;AACd,aAAK,cAAc;AACnB,cAAM,aAAa,CAAC;AACpB,gBAAQ,QAAQ,CAAC,WAAW;AAC1B,qBAAW,OAAO,IAAI,IAAI;AAAA,QAC5B,CAAC;AACD,aAAK,UAAU;AAEf,aAAK,OAAO;AACZ,aAAK,SAAS;AACd,aAAK,aAAa;AAClB,aAAK,mBAAmB,oBAAoB;AAG5C,aAAK,WACF,QAAgB,WAAW,EAAE,GAAI,OAAe,SAAS,IAAI,CAAC;AAEjE,YAAI,CAAC,KAAK,SAAS,QAAQ;AACzB,gBAAM,QAAQ,IAAI,MAAM,EAAE;AAC1B,cAAI,OAAO;AACT,kBAAM,OAAO,kBAAkB,KAAK;AACpC,iBAAK,SAAS,SAAS,EAAE,MAAM,KAAK,MAAM,MAAM,KAAK,KAAK;AAAA,UAC5D;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA;AAAA;;;ACQO,SAAS,kBACd,IAC2C;AAC3C,SACE,OAAO,OAAO,YACd,OAAO,QACN,GAAiB,gBAAgB,QAClC,OAAQ,GAAiB,gBAAgB,YACxC,GAAiB,YAAY,eAAe,SAAS,KACtD,MAAM,QAAU,GAAiB,YAAuB,OAAO;AAEnE;AAKO,SAAS,aAAa,IAA4B;AACvD,SACE,OAAO,OAAO,YACd,OAAO,QACP,MAAM,QAAS,GAAc,OAAO;AAExC;AApIA;AAAA;AAAA;AAAA;AAAA;;;ACgOO,SAAS,0BACd,gBACA,OACA;AAGA,SAAO,KAAK,cAAc,IAAI,oBAAoB,KAAK,CAAC;AAC1D;AA0BA,SAAS,iBAAiB,OAAmC;AAC3D,SAAO,UAAU,SAAY,KAAK;AACpC;AAnQA,IAUa,iBAqIA,eAUA,SAgEA,uBAyBA;AAlPb;AAAA;AAAA;AAUO,IAAM,kBAAkB,CAAC,SAAyB;AACvD,aAAO,KAAK,WAAW,GAAG,KAAK,KAAK,SAAS,GAAG,IAAI,OAAO,KAAK,IAAI;AAAA,IACtE;AAmIO,IAAM,gBAAgB,CAACA,SAAqB;AACjD,YAAM,CAAC,OAAO,MAAM,IAAI,QAAQA,IAAG;AACnC,UAAI,OAAO,KAAK,MAAM,EAAE,WAAW,GAAG;AACpC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AACA,aAAO;AAAA,IACT;AAEO,IAAM,UAAU,CAACA,SAA8C;AACpE,YAAM,qBAAqBA,KAAI,OAAO;AAAA,QAAI,CAAC,GAAG,MAC5C,0BAA0B,GAAG,CAAC;AAAA,MAChC;AAEA,YAAM,QAAQA,KAAI,QACf;AAAA,QAAI,CAAC,GAAG,MACP,KAAK,KAAK,GAAG,CAAC,GAAG,iBAAiB,mBAAmB,CAAC,CAAC,CAAC,KAAK;AAAA,MAC/D,EACC,KAAK,EAAE;AAEV,YAAM,eAAeA,KAAI,OAAO;AAAA,QAC9B,CAAC,KAA8B,GAAG,OAAO;AAAA,UACvC,GAAG;AAAA,UACH,CAAC,IAAI,CAAC,EAAE,GAAG,sBAAsB,CAAC;AAAA,QACpC;AAAA,QACA,CAAC;AAAA,MACH;AACA,aAAO,CAAC,OAAO,YAAY;AAAA,IAC7B;AA6CO,IAAM,wBAAwB,CAAC,UAAe;AACnD,UAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,cAAM,CAAC,MAAM,GAAG,IAAI;AACpB,YAAI,SAAS,aAAc,QAAO;AAAA,MACpC;AACA,aAAO;AAAA,IACT;AAmBO,IAAM,sBAAsB,CAAC,UAAiB;AACnD,UAAI,OAAO,UAAU,UAAU;AAE7B,eAAO,OAAO,UAAU,KAAK,IAAI,QAAQ;AAAA,MAC3C;AAGA,UAAI,OAAO,UAAU,UAAW,QAAO;AACvC,UAAI,iBAAiB,KAAM,QAAO;AAClC,UAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,cAAM,CAAC,MAAM,CAAC,IAAI;AAClB,eAAO;AAAA,MACT;AACA,aAAO;AAAA,IACT;AAAA;AAAA;;;ACnMO,SAAS,SAAS,MAAsB;AAC7C,SAAO,uBAAuB,gBAAgB,IAAI,CAAC,GAAG,KAAK;AAC7D;AAKO,SAAS,uBACd,SACQ;AACR,SAAO,0CAA0C,gBAAgB,QAAQ,IAAI,CAAC;AAAA,aACnE,gBAAgB,QAAQ,gBAAgB,CAAC;AAAA,aACzC,QAAQ,MAAM,GAAG,KAAK;AACnC;AA1EA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA;AAAA;AAAA;AAAA;AAAA;;;ACAA;AAAA;AAAA;AAIA;AA+CA;AAoCA;AAAA;AAAA;;;ACvFA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAiBA,SAAS,SAAS,OAAoC;AACpD,MAAI,CAAC,MAAO,QAAO;AACnB,UAAQ,MAAM,KAAK,EAAE,YAAY,GAAG;AAAA,IAClC,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,IACT;AACE,aAAO;AAAA,EACX;AACF;AA4EO,SAAS,UAAU,UAA0B;AAClD,SAAO,SACJ,QAAQ,SAAS,KAAK,EACtB,QAAQ,UAAU,MAAM,EACxB,QAAQ,UAAU,MAAM;AAC7B;AAkBO,SAAS,qBAAqB,iBAA0B;AAC7D,SAAO;AAAA,IACL,SAAS;AAAA,MACP,YAAY;AAAA;AAAA,MACZ,MAAM;AAAA,MACN,OAAO;AAAA,QACL,SAAS;AAAA,QACT,cAAc;AAAA,MAChB;AAAA,IACF;AAAA,IACA,aAAa;AAAA;AAAA,IACb,GAAI,mBAAmB,EAAE,qBAAqB,gBAAgB;AAAA,EAChE;AACF;AAgCA,eAAsB,iBACpB,KACA,QACA,iBACmB;AACnB,QAAM,QAAQ,MAAM,eAAe,KAAK,MAAM;AAE9C,QAAM,WAAW,MAAM,SAAS,qBAAqB,eAAe,CAAC;AACrE,QAAM,SAAS,QAAQ;AACvB,SAAO;AACT;AAtLA,iBACA,eACA,yBAEQ,OA0BK,aAMA,eAGA,aAkBA,qBA4BA,QA0BA,aACA,mBACA,uBAEA,sBACA,uBAEA,MA+BP,mBA6CO,UAWP,iBAwBO;AArOb;AAAA;AAAA;AAAA,kBAAiB;AACjB,oBAA6B;AAC7B,8BAAwB;AAExB,KAAM,EAAE,UAAU;AA0BX,IAAM,cAAc,CAAC,YAAoB;AAC9C,UAAI,CAAC,SAAS,QAAQ,IAAI,2BAA2B,GAAG;AACtD,gBAAQ,IAAI,OAAO;AAAA,MACrB;AAAA,IACF;AAEO,IAAM,gBAAgB,CAACC,UAC5B,GAAGA,KAAI,QAAQ,KAAK,OAAO,EAAE,SAAS,CAAC,SAAS,KAAK,IAAI,CAAC;AAErD,IAAM,cAAc,CAAC,aAAqB;AAC/C,YAAM,QAAQ;AACd,YAAM,UAAU,SAAS,MAAM,KAAK;AACpC,UAAI,WAAW,QAAQ,SAAS,GAAG;AACjC,eAAO,QAAQ,CAAC;AAAA,MAClB;AACA,aAAO;AAAA,IACT;AAWO,IAAM,sBAAsB,CAAC;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,MAAoB;AAClB,YAAM,WACJ,WAAW,OAAO,OAAO,YAAY,MAAM,SAAS,UAAU;AAChE,cAAQ,IAAI,+BAA+B,QAAQ,MAAM,IAAI,IAAI,IAAI,EAAE;AACvE,iBAAO,4BAAa;AAAA,QAClB,KAAK,GAAG,QAAQ,MAAM,IAAI,IAAI,IAAI;AAAA,QAClC;AAAA,QACA;AAAA,QACA;AAAA,QACA,aAAa;AAAA;AAAA;AAAA,MAGf,CAAC;AAAA,IACH;AAQO,IAAM,SAAoC,CAAC,QAAQ;AACxD,YAAM,MAAM,YAAAC,QAAK,QAAQ;AAAA,QACvB,MAAM,SAAS,QAAQ,IAAI,yBAAyB,MAAM;AAAA,QAC1D,QAAQ;AAAA,QACR,MAAM;AAAA,MACR,CAAC;AAED,UAAI,GAAG,SAAS,CAAC,QAAe;AAC9B,gBAAQ,IAAI,SAAS,IAAI,IAAI,qBAAqB,IAAI,OAAO;AAAA,MAC/D,CAAC;AAED,UAAI,MAAM,KAAK,UAAU,EAAE,cAAc,QAAQ,GAAG,IAAI,CAAC,CAAC;AAC1D,UAAI,IAAI;AAAA,IACV;AAaO,IAAM,cAAc;AACpB,IAAM,oBAAoB;AAC1B,IAAM,wBAAwB;AAE9B,IAAM,uBAAuB;AAC7B,IAAM,wBAAwB;AAE9B,IAAM,OAAO;AA+BpB,IAAM,oBAAoB,CAAC,iBACzB,aACG,MAAM,GAAG,EACT,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC,EACnB,OAAO,CAAC,MAAM,EAAE,SAAS,CAAC;AAyCxB,IAAM,WAAW,CAAC,QAAgB,MAAmB;AAC1D,aAAO,MAAM,EAAE,OAAO;AACtB,YAAM,QAAQ,EAAE;AAChB,UAAI,OAAO;AACT,eAAO,MAAM,KAAK;AAAA,MACpB;AAAA,IACF;AAKA,IAAM,kBAAkB,CACtB,QACA,SAC4B;AAC5B,YAAM,YAAY,KAAK,gBAAgB,KAAK,cAAc,YAAY,IAAI;AAC1E,cAAQ,WAAW;AAAA,QACjB,KAAK;AAAA,QACL,KAAK;AAAA,QACL,KAAK;AACH,iBAAO;AAAA,YACL;AAAA,YACA,UAAU,KAAK,gBAAgB;AAAA,YAC/B,UAAU,KAAK,gBAAgB;AAAA,UACjC;AAAA,QACF;AACE,iBAAO,KAAK,+BAA+B,KAAK,aAAa,EAAE;AAC/D,iBAAO;AAAA,MACX;AAAA,IACF;AAMO,IAAM,iBAAiB,OAC5B,KACA,WACmB;AACnB,YAAM,UAAU,kBAAkB,IAAI,UAAU,EAAE;AAClD,UAAI,QAAQ,WAAW,GAAG;AACxB,cAAM,IAAI,MAAM,wCAAwC,IAAI,MAAM,GAAG;AAAA,MACvE;AAEA,aAAO,IAAI,uCAAuC,QAAQ,KAAK,IAAI,CAAC,EAAE;AACtE,aAAO,IAAI,sBAAsB,IAAI,oBAAoB,WAAW,EAAE;AACtE,aAAO,IAAI,cAAc,IAAI,QAAQ,EAAE;AAEvC,YAAM,aAAa,gBAAgB,QAAQ,GAAG;AAE9C,aAAO,IAAI,MAAM;AAAA,QACf,SAAS;AAAA,UACP,UAAU,IAAI;AAAA,UACd;AAAA,UACA,KAAK,IAAI,qBAAqB;AAAA,UAC9B,GAAI,cAAc,EAAE,MAAM,WAAW;AAAA,UACrC,OAAO;AAAA,YACL,kBAAkB;AAAA,YAClB,cAAc;AAAA,YACd,SAAS;AAAA,UACX;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAAA;AAAA;;;ACjQA;AAAA;AAAA;AAAA;AAAA;;;ACAA,IACAC,gBAMA;AAPA,IAAAC,gBAAA;AAAA;AAAA;AACA,IAAAD,iBAIO;AAEP,yBAAuC;AAGvC;AAEA;AAAA;AAAA;;;ACZA;AAAA;AAAA;AAAA;AAAA;;;ACAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,yBACA,gBACA;AAFA;AAAA;AAAA;AAAA,0BAAoB;AACpB,qBAAqC;AACrC,0BAAqB;AAAA;AAAA;;;ACFrB,IAGA;AAHA;AAAA;AAAA;AACA;AACA,IAAAE;AACA,WAAsB;AAEtB;AAEA;AAEA;AAAA;AAAA;;;ACTA;AAAA;AAAA;AAAA;AAAA,mBAA8C;AAAA;AAAA;;;ACA9C;AAAA;AAAA;AAAA,IAAAC;AACA;AACA;AAAA;AAAA;;;ACFA;AAAA;AAAA;AAAA;AAAA;;;ACAA,sBAwGa,gBAUA;AAlHb;AAAA;AAAA;AAAA,uBAAsB;AACtB;AAuGO,IAAM,iBAAiB;AAAA,MAC5B,OAAO;AAAA,MACP,KAAK;AAAA,MACL,WAAW;AAAA,MACX,MAAM;AAAA,IACR;AAKO,IAAM,qBAAuC;AAAA,MAClD,WAAW,eAAe;AAAA,MAC1B,SAAS;AAAA,MACT,gBAAgB;AAAA,MAChB,MAAM;AAAA,IACR;AAAA;AAAA;;;ACvHA;AAAA;AAAA;AAEA;AAAA;AAAA;;;ACFA;AAAA;AAAA;AAAA;AAAA;;;ACAA;AAAA;AAAA;AAAA;AAaA;AACA;AACA;AACA,IAAAC;AACA;AAMA;AAEA;AAEA;AAEA,IAAAA;AAEA;AAEA;AAEA;AACA;AACA;AAAA;AAAA;;;ACrCA,IAaA,gBA4Ca,kBAQP,gBAYA,wBA4/BO,kBAqKA,WA0DA;AAxyCb;AAAA;AAAA;AAaA,qBAAoB;AAIpB;AAoBA;AAoBO,IAAM,mBAAmB,MAC9B,eAAAC,QAAQ,IAAI,sBAAsB;AAOpC,IAAM,iBAAiB;AAAA,MACrB,QAAQ,oBAAI,IAA4B;AAAA,MACxC,SAAS,oBAAI,IAAyB;AAAA,MACtC,YAAY,oBAAI,IAA4B;AAAA,MAC5C,MAAM,oBAAI,IAAsB;AAAA,MAChC,cAAc,oBAAI,IAAyB;AAAA,MAC3C,WAAW,oBAAI,IAAsB;AAAA,MACrC,SAAS,oBAAI,IAAoB;AAAA,IACnC;AAIA,IAAM,yBAAyB,KAAK,KAAK,KAAK;AA4/BvC,IAAM,mBAAmB,MAC7B,WAAmB;AAGtB,QAAI,iBAAiB,MAAM,QAAW;AACpC,MAAC,WAAmB,iBAAiB;AAAA,IACvC;AA+JO,IAAM,YAAyC;AAAA,MACpD,SAAS;AAAA,MACT,YAAY;AAAA,QACV,SAAS;AAAA,UACP,iBAAiB;AAAA,YACf,MAAM;AAAA,YACN,YAAY;AAAA,cACV,gBAAgB;AAAA,gBACd,MAAM;AAAA,cACR;AAAA,cACA,cAAc;AAAA,gBACZ,MAAM;AAAA,cACR;AAAA,cACA,WAAW;AAAA,gBACT,MAAM;AAAA,cACR;AAAA,cACA,UAAU;AAAA,gBACR,MAAM;AAAA,gBACN,QAAQ;AAAA,cACV;AAAA,cACA,QAAQ;AAAA,gBACN,OAAO;AAAA,kBACL;AAAA,oBACE,OAAO;AAAA,kBACT;AAAA,kBACA;AAAA,oBACE,OAAO;AAAA,kBACT;AAAA,kBACA;AAAA,oBACE,OAAO;AAAA,kBACT;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,YACA,UAAU;AAAA,cACR;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,YACF;AAAA,UACF;AAAA,UACA,iBAAiB;AAAA,YACf,MAAM;AAAA,YACN,YAAY,CAAC;AAAA,YACb,UAAU,CAAC;AAAA,YACX,aAAa;AAAA,YACb,sBAAsB,CAAC;AAAA,UACzB;AAAA,QACF;AAAA,MACF;AAAA,MACA,SAAS;AAAA,QACP;AAAA,UACE,MAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEO,IAAM,aAAuB;AAAA,MAClC;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,WAAW;AAAA,QACX,aAAa;AAAA,QACb,UAAU;AAAA,QACV,QAAQ;AAAA,QACR,SAAS;AAAA,QACT,aAAa,CAAC;AAAA,QACd,KAAK;AAAA,QACL,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA;AAAA;;;ACpyCA,eAAe,eACb,WAAmB,QAAQ,IAAI,GACP;AACxB,QAAM,KAAK,MAAM,OAAO,IAAS;AAEjC,MAAI,aAAa,iBAAAC,QAAK,QAAQ,QAAQ;AAEtC,SAAO,MAAM;AACX,UAAM,aAAa,iBAAAA,QAAK,KAAK,YAAY,mBAAmB;AAC5D,QAAI,GAAG,WAAW,UAAU,GAAG;AAC7B,aAAO;AAAA,IACT;AAEA,UAAM,YAAY,iBAAAA,QAAK,QAAQ,UAAU;AACzC,QAAI,cAAc,YAAY;AAE5B;AAAA,IACF;AACA,iBAAa;AAAA,EACf;AAEA,SAAO;AACT;AAKA,eAAsB,oBAA4C;AAChE,QAAM,KAAK,MAAM,OAAO,IAAS;AACjC,QAAM,aAAa,MAAM,eAAe;AACxC,MAAI,CAAC,YAAY;AACf,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,MAAI;AACF,UAAM,gBAAgB,GAAG,aAAa,YAAY,OAAO;AACzD,UAAM,SAAc,WAAM,aAAa;AACvC,WAAO;AAAA,EACT,SAAS,OAAO;AACd,UAAM,IAAI,YAAY,sCAAsC,KAAK,EAAE;AAAA,EACrE;AACF;AAjHA,sBACA,MA2Da;AA5Db;AAAA;AAAA;AAAA,uBAAiB;AACjB,WAAsB;AA2Df,IAAM,cAAN,cAA0B,MAAM;AAAA,MACrC,YAAY,SAAiB;AAC3B,cAAM,OAAO;AACb,aAAK,OAAO;AAAA,MACd;AAAA,IACF;AAAA;AAAA;;;ACjEA;AAAA,IAsBM;AAtBN;AAAA;AAAA;AAAA;AAsBA,IAAM,wBAAN,MAAM,uBAAsB;AAAA,MAC1B,OAAe;AAAA,MACP;AAAA,MACA;AAAA,MAER,OAAO,cAAqC;AAC1C,YAAI,CAAC,uBAAsB,UAAU;AACnC,iCAAsB,WAAW,IAAI,uBAAsB;AAAA,QAC7D;AACA,eAAO,uBAAsB;AAAA,MAC/B;AAAA,MAEA,oBAAoB,QAAuC;AACzD,aAAK,mBAAmB;AAAA,MAC1B;AAAA,MAEA,eAAe,QAAkC;AAC/C,aAAK,cAAc;AAAA,MACrB;AAAA,MAEQ,KAAK,MAAkC;AAC7C,cAAM,QAAQ,QAAQ,IAAI,IAAI;AAC9B,YAAI,UAAU,OAAW,QAAO;AAChC,cAAM,UAAU,MAAM,KAAK;AAC3B,eAAO,QAAQ,SAAS,IAAI,UAAU;AAAA,MACxC;AAAA,MAEQ,WAAW,OAAgD;AACjE,YAAI,UAAU,OAAW,QAAO;AAChC,gBAAQ,MAAM,KAAK,EAAE,YAAY,GAAG;AAAA,UAClC,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AACH,mBAAO;AAAA,UACT,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AACH,mBAAO;AAAA,UACT;AACE,mBAAO;AAAA,QACX;AAAA,MACF;AAAA,MAEA,MAAM,sBAAwD;AAC5D,YAAI,KAAK,kBAAkB;AACzB,iBAAO,KAAK;AAAA,QACd;AAGA,cAAM,gBAAgB,MAAM,kBAAkB;AAC9C,cAAM,UAAU,KAAK,KAAK,+BAA+B;AACzD,cAAM,UAAU,KAAK,KAAK,oCAAoC;AAC9D,cAAM,UAAU,KAAK,KAAK,+BAA+B;AACzD,cAAM,cAAc,KAAK,KAAK,mCAAmC;AACjE,cAAM,QAAQ,KAAK,KAAK,kCAAkC;AAC1D,cAAM,YAAY,KAAK;AAAA,UACrB,KAAK,KAAK,kCAAkC;AAAA,QAC9C;AAEA,eAAO;AAAA,UACL,MAAM,WAAW,cAAc,kBAAkB;AAAA,UACjD,MAAM,WAAW,cAAc,kBAAkB,UAAU,SAAS;AAAA,UACpE,UAAU,WAAW,cAAc,kBAAkB;AAAA,UACrD,UAAU,eAAe,cAAc,kBAAkB;AAAA,UACzD,UAAU,SAAS,cAAc,kBAAkB;AAAA,UACnD,QACE,cAAc,SAAY,YACxB,cAAc,kBAAkB,WAAW;AAAA,QAEjD;AAAA,MACF;AAAA,MAEA,MAAM,8BACJ,WACkC;AAClC,YAAI,KAAK,kBAAkB;AACzB,iBAAO,EAAE,GAAG,KAAK,kBAAkB,GAAG,UAAU;AAAA,QAClD;AAEA,cAAM,UAAU,KAAK,KAAK,+BAA+B;AACzD,cAAM,UAAU,KAAK,KAAK,oCAAoC;AAC9D,cAAM,UAAU,KAAK,KAAK,+BAA+B;AACzD,cAAM,cAAc,KAAK,KAAK,mCAAmC;AACjE,cAAM,QAAQ,KAAK,KAAK,kCAAkC;AAC1D,cAAM,YAAY,KAAK;AAAA,UACrB,KAAK,KAAK,kCAAkC;AAAA,QAC9C;AAEA,YAAI;AACJ,YAAI;AACF,0BAAgB,MAAM,kBAAkB;AAAA,QAC1C,SAAS,OAAO;AACd,0BAAgB;AAAA,QAClB;AAEA,cAAM,WAAW;AAAA,UACf,MAAM;AAAA,UACN,MAAM;AAAA,UACN,UAAU;AAAA,UACV,UAAU;AAAA,UACV,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,eAAO;AAAA,UACL,MACE,WAAW,QACX,WACA,eAAe,kBAAkB,QACjC,SAAS;AAAA,UACX,MACE,WAAW,QACX,WACA,eAAe,kBAAkB,UAAU,SAAS,KACpD,SAAS;AAAA,UACX,UACE,WAAW,YACX,WACA,eAAe,kBAAkB,QACjC,SAAS;AAAA,UACX,UACE,WAAW,YACX,eACA,eAAe,kBAAkB,YACjC,SAAS;AAAA,UACX,UACE,WAAW,YACX,SACA,eAAe,kBAAkB,WACjC,SAAS;AAAA,UACX,QACE,WAAW,UACX,aACA,eAAe,kBAAkB,WACjC,SAAS;AAAA,QACb;AAAA,MACF;AAAA,MAEA,MAAM,iBAA8C;AAClD,YAAI,KAAK,aAAa;AACpB,iBAAO,KAAK;AAAA,QACd;AAEA,cAAM,gBAAgB,MAAM,kBAAkB;AAE9C,cAAM,YACJ,KAAK,KAAK,+BAA+B,KACzC,KAAK,KAAK,4BAA4B;AACxC,cAAM,gBACJ,KAAK,KAAK,2CAA2C,KACrD,KAAK,KAAK,wCAAwC;AACpD,cAAM,kBACJ,KAAK,KAAK,sCAAsC,KAChD,KAAK,KAAK,mCAAmC;AAC/C,cAAM,kBACJ,KAAK,KAAK,sCAAsC,KAChD,KAAK,KAAK,mCAAmC;AAC/C,cAAM,mBACJ,KAAK,KAAK,uCAAuC,KACjD,KAAK,KAAK,oCAAoC;AAChD,cAAM,sBACJ,KAAK,KAAK,0CAA0C,KACpD,KAAK,KAAK,uCAAuC;AACnD,cAAM,eACJ,KAAK,KAAK,kCAAkC,KAC5C,KAAK,KAAK,+BAA+B;AAC3C,cAAM,uBACJ,KAAK,KAAK,4CAA4C,KACtD,KAAK,KAAK,yCAAyC;AAErD,cAAM,YACJ,cAAc,gBAAgB,cAAc;AAE9C,eAAO;AAAA,UACL,QAAQ,aAAa,WAAW,UAAU;AAAA,UAC1C,kBACE,gBACE,SAAS,eAAe,EAAE,IACzB,WAAW,sBAAsB;AAAA,UACtC,cAAc,mBAAmB,WAAW;AAAA,UAC5C,cAAc,mBAAmB,WAAW;AAAA,UAC5C,eAAe,oBAAoB,WAAW;AAAA,UAC9C,kBAAkB,uBAAuB,WAAW;AAAA,UACpD,WAAW,gBAAgB,WAAW;AAAA,UACtC,mBAAmB,wBAAwB,WAAW;AAAA,QACxD;AAAA,MACF;AAAA,MAEA,mBAA4B;AAC1B,eAAO,CAAC,CAAC,KAAK,oBAAoB,CAAC,CAAC,KAAK;AAAA,MAC3C;AAAA,IACF;AAEA,IAAC,WAAmB,uBAAuB,sBAAsB,YAAY;AAAA;AAAA;;;ACzN7E,IASA,oBACAC,qBA8pBa;AAxqBb;AAAA;AAAA;AACA;AACA;AAKA;AACA;AACA,yBAAyB;AACzB,IAAAA,sBAA2B;AAM3B;AAwpBO,IAAM,YAAN,cAA2B,UAA4B;AAAA,MAC5D;AAAA;AAAA,MAGgB,OAAO;AAAA;AAAA,MAGf;AAAA;AAAA,MAEA;AAAA;AAAA,MAEA;AAAA,MAkBR,YACE,MACA,QACA,QACA,SACA,YACA;AAEA,cAAM,iBACJ,SACE,YAAY,SACV,SACA,EAAE,GAAG,QAAQ,oCAAoC,IACnD,EAAE,oCAAoC;AAG1C,cAAM,YACJ,MAAM,QAAS,eAAuB,aAAa,KAClD,eAAuB,cAAc,SAAS;AACjD,cAAM,UACJ,OAAQ,eAAuB,sBAAsB,YACpD,eAAuB,kBAAkB,SAAS;AACrD,YAAI,aAAa,SAAS;AACxB,gBAAM,IAAI;AAAA,YACR,aAAa,IAAI;AAAA,UACnB;AAAA,QACF;AAGA,cAAM,aAAa,OAAQ,eAAuB,YAAY;AAC9D,cAAM,gBACJ,OAAQ,eAAuB,eAAe;AAChD,cAAM,iBACJ,OAAQ,eAAuB,gBAAgB;AAEjD,YAAI,eAAe,iBAAiB,iBAAiB;AACnD,gBAAM,IAAI;AAAA,YACR,aAAa,IAAI;AAAA,UAEnB;AAAA,QACF;AAEA,cAAM,MAAM,gBAAgB,QAAQ,SAAS,UAAU;AACvD,aAAK,OAAO;AAEZ,cAAM,SAAS,iBAAiB,EAAE;AAClC,cAAM,cACJ,KAAK,OAAO,UAAU,GAAG,IAAI,IAAI,KAAK,OAAO,OAAO,KAAK;AAG3D,YAAI,CAAC,iBAAiB,KAAK,OAAO,IAAI,WAAW,GAAG;AAClD,gBAAM,IAAI;AAAA,YACR,uBAAuB,IAAI,gBAAgB,QAAQ,WAAW,aAAa;AAAA,UAC7E;AAAA,QACF;AACA,eAAO,IAAI,aAAa,IAAI;AAAA,MAC9B;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,oBAA4B;AAElC,YAAI,KAAK,kBAAkB;AACzB,iBAAO,KAAK;AAAA,QACd;AAEA,cAAM,eAAe,KAAK,OAAO;AACjC,YAAI,CAAC,cAAc;AACjB,eAAK,mBAAmB,KAAK;AAAA,QAC/B,OAAO;AACL,gBAAM,gBAAgB,aAAa,QAAQ,OAAO,GAAG;AACrD,eAAK,mBAAmB,GAAG,KAAK,IAAI,IAAI,aAAa;AAAA,QACvD;AAEA,eAAO,KAAK;AAAA,MACd;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQQ,iBAAiB,kBAA+B;AAEtD,cAAM,oBAAoB,KAAK,OAAO,YAAY,iBAAiB;AACnE,cAAM,eAAe,GAAG,iBAAiB,IAAI,IAAI,iBAAiB,IAAI,IAAI,iBAAiB,QAAQ,IAAI,iBAAiB,QAAQ,IAAI,iBAAiB,IAAI,iBAAiB,MAAM;AAChL,mBAAO,gCAAW,QAAQ,EACvB,OAAO,YAAY,EACnB,OAAO,KAAK,EACZ,UAAU,GAAG,EAAE;AAAA,MACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,MAAc,oBAGX;AACD,cAAM;AACN,cAAM,iBAAkB,WACrB;AACH,cAAM,EAAE,qBAAAC,qBAAoB,IAAI,MAAM;AAEtC,cAAM,mBAAmB,MAAM,eAAe,oBAAoB;AAClE,cAAM,oBAAoB,KAAK,iBAAiB,gBAAgB;AAGhE,YAAI,KAAK,mBAAmB,KAAK,gBAAgB,mBAAmB;AAClE,iBAAO,EAAE,QAAQ,KAAK,iBAAiB,QAAQ,iBAAiB;AAAA,QAClE;AAGA,YAAI,KAAK,mBAAmB,KAAK,gBAAgB,mBAAmB;AAClE,cAAI;AACF,kBAAM,KAAK,gBAAgB,MAAM;AAAA,UACnC,SAAS,OAAO;AAAA,UAEhB;AAAA,QACF;AAIA,cAAM,oBAAoB,KAAK,OAAO,YAAY,iBAAiB;AACnE,cAAM,SAASA,qBAAoB;AAAA,UACjC,UAAU,iBAAiB;AAAA,UAC3B,UAAU,iBAAiB;AAAA,UAC3B,UAAU;AAAA,UACV,QAAQ,iBAAiB,SAAS,SAAS;AAAA,UAC3C,MAAM,iBAAiB;AAAA,UACvB,MAAM,iBAAiB;AAAA,QACzB,CAAC;AAGD,aAAK,kBAAkB;AACvB,aAAK,cAAc;AAEnB,eAAO,EAAE,QAAQ,QAAQ,iBAAiB;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA,MAAM,cAA6B;AACjC,YAAI,KAAK,iBAAiB;AACxB,cAAI;AACF,kBAAM,KAAK,gBAAgB,MAAM;AAAA,UACnC,SAAS,OAAO;AAAA,UAEhB,UAAE;AACA,iBAAK,kBAAkB;AACvB,iBAAK,cAAc;AAAA,UACrB;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,eAAe,QAIb;AAEA,YAAI,KAAK,YAAY,UAAU;AAC7B,cAAI;AACF,kBAAM,SAAS,KAAK,WAAW,SAAS,MAAM;AAC9C,mBAAO;AAAA,cACL,SAAS,OAAO;AAAA,cAChB,MAAM,OAAO;AAAA,cACb,QAAQ,OAAO,QAAQ;AAAA,gBAAI,CAAC,QAC1B,OAAO,QAAQ,WAAW,MAAM,KAAK,UAAU,GAAG;AAAA,cACpD;AAAA,YACF;AAAA,UACF,SAAS,OAAO;AACd,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,QAAQ,CAAC,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC;AAAA,YACjE;AAAA,UACF;AAAA,QACF;AAEA,cAAM,IAAI,MAAM,0BAA0B;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,cAAc,QAA8B;AAC1C,YAAI,KAAK,YAAY,IAAI;AACvB,iBAAO,KAAK,WAAW,GAAG,MAAM;AAAA,QAClC;AAEA,cAAM,IAAI,MAAM,0BAA0B;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAUA,kBAAkB,QAAoB;AACpC,YAAI,KAAK,YAAY,QAAQ;AAC3B,iBAAO,KAAK,WAAW,OAAO,MAAM;AAAA,QACtC;AAEA,cAAM,IAAI,MAAM,0BAA0B;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,MAAM,gBAAgB,MAA+C;AACnE,cAAM,QAAa,CAAC;AACpB,cAAM,UAA6B,CAAC;AAGpC,cAAM,SAAS;AACf,gBAAQ,SAAS;AAGjB,cAAM,aAAa,KAAK;AACxB,iBAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,gBAAM,SAAS,KAAK,CAAC;AAErB,cAAI;AAEF,gBAAI,KAAK,cAAc,MAAM,GAAG;AAC9B,oBAAM,KAAK,KAAK,sBAAsB,MAAM,CAAC;AAAA,YAC/C,OAAO;AAEL,oBAAM,SAAS,KAAK,eAAe,MAAM;AACzC,kBAAI,OAAO,SAAS;AAClB,sBAAM,KAAK,KAAK,sBAAsB,MAAM,CAAC;AAAA,cAC/C,OAAO;AACL,wBAAQ,KAAK;AAAA,kBACX;AAAA,kBACA,OAAO,OAAO,QAAQ,KAAK,IAAI,KAAK;AAAA,kBACpC,OAAO;AAAA,kBACP,MAAM;AAAA,gBACR,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF,SAAS,OAAO;AACd,oBAAQ,KAAK;AAAA,cACX;AAAA,cACA,OAAO,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,cAC5D,OAAO;AAAA,cACP,MAAM;AAAA,YACR,CAAC;AAAA,UACH;AAAA,QACF;AAEA,eAAO;AAAA,UACL;AAAA,UACA;AAAA,UACA,OAAO;AAAA,QACT;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQA,MAAc,uBACZ,QACA,WACA,SACyD;AACzD,cAAM,aAAkB,CAAC;AACzB,cAAM,SAA4B,CAAC;AAGnC,cAAM,mBAAmB;AACzB,cAAM,eAAe,QAAQ;AAE7B,iBAAS,IAAI,GAAG,IAAI,cAAc,KAAK,kBAAkB;AACvD,gBAAM,WAAW,KAAK,IAAI,IAAI,kBAAkB,YAAY;AAC5D,gBAAM,QAAQ,QAAQ,MAAM,GAAG,QAAQ;AAEvC,cAAI;AACF,kBAAM,OAAO,OAAO;AAAA,cAClB,OAAO,gBAAgB,SAAS;AAAA,cAChC,QAAQ;AAAA,cACR,QAAQ;AAAA,cACR,qBAAqB;AAAA,gBACnB,wBAAwB;AAAA;AAAA,gBAExB,uBAAuB;AAAA,gBACvB,gBAAgB;AAAA,cAClB;AAAA,YACF,CAAC;AACD,uBAAW,KAAK,GAAG,KAAK;AAAA,UAC1B,SAAS,YAAY;AAEnB,qBAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;AACrC,oBAAM,SAAS,MAAM,CAAC;AACtB,kBAAI;AACF,sBAAM,OAAO,OAAO;AAAA,kBAClB,OAAO,gBAAgB,SAAS;AAAA,kBAChC,QAAQ,CAAC,MAAM;AAAA,kBACf,QAAQ;AAAA,kBACR,qBAAqB;AAAA,oBACnB,wBAAwB;AAAA,kBAC1B;AAAA,gBACF,CAAC;AACD,2BAAW,KAAK,MAAM;AAAA,cACxB,SAAS,OAAO;AACd,uBAAO,KAAK;AAAA,kBACV;AAAA,kBACA,OAAO,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,kBAC5D,OAAO,IAAI;AAAA,gBACb,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAEA,eAAO,EAAE,YAAY,OAAO;AAAA,MAC9B;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,yBACN,MACA,SACkE;AAClE,cAAM,WAAW,gBAAgB;AACjC,cAAM,WAAW,SAAS,YAAY;AACtC,cAAM,iBAAiB,SAAS,aAAa;AAG7C,YAAI,YAAY,aAAa,WAAW;AACtC,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAGA,YAAI,YAAY,gBAAgB;AAC9B,kBAAQ;AAAA,YACN;AAAA,UACF;AAAA,QACF;AAEA,eAAO,EAAE,UAAU,UAAU,eAAe;AAAA,MAC9C;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,gBACN,MACA,UACwB;AACxB,YAAI,YAAY,CAAC,MAAM;AACrB,iBAAO;AAAA,YACL,YAAY;AAAA,YACZ,QAAQ;AAAA,YACR,OAAO;AAAA,UACT;AAAA,QACF;AAEA,YAAI,CAAC,aAAa,CAAC,QAAS,KAAa,WAAW,IAAI;AACtD,iBAAO;AAAA,YACL,YAAY;AAAA,YACZ,QAAQ;AAAA,YACR,OAAO;AAAA,UACT;AAAA,QACF;AAEA,eAAO;AAAA,MACT;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,MAAc,8BACZ,MACA,gBACA,UACA,SACsE;AACtE,YAAI,CAAC,gBAAgB;AACnB,iBAAO,EAAE,eAAe,MAAM,kBAAkB,CAAC,EAAE;AAAA,QACrD;AAEA,YAAI;AACF,gBAAM,mBAAmB,MAAM,KAAK,gBAAgB,IAAiB;AACrE,gBAAM,gBAAgB,iBAAiB;AACvC,gBAAM,mBAAmB,iBAAiB;AAE1C,cAAI,iBAAiB,SAAS,GAAG;AAC/B,iBAAK,uBAAuB,kBAAkB,UAAU,MAAM,OAAO;AAGrE,oBAAQ,UAAU;AAAA,cAChB,KAAK;AACH,uBAAO,EAAE,eAAe,iBAAiB;AAAA,cAC3C,KAAK;AACH,uBAAO,EAAE,eAAe,MAAM,iBAAiB;AAAA,cACjD;AACE,uBAAO,EAAE,eAAe,iBAAiB;AAAA,YAC7C;AAAA,UACF;AAEA,iBAAO,EAAE,eAAe,iBAAiB;AAAA,QAC3C,SAAS,iBAAiB;AACxB,cAAI,aAAa,aAAa;AAC5B,kBAAM;AAAA,UACR;AACA,kBAAQ,KAAK,qBAAqB,eAAe;AACjD,iBAAO,EAAE,eAAe,MAAM,kBAAkB,CAAC,EAAE;AAAA,QACrD;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,uBACN,kBACA,UACA,MACA,SACM;AACN,gBAAQ,UAAU;AAAA,UAChB,KAAK;AACH,kBAAM,aAAa,iBAAiB,CAAC;AACrC,kBAAM,IAAI;AAAA,cACR,yCAAyC,WAAW,KAAK,KAAK,WAAW,KAAK;AAAA,YAChF;AAAA,UAEF,KAAK;AACH,iBAAK,0BAA0B,kBAAkB,KAAK,QAAQ,OAAO;AACrE;AAAA,UAEF,KAAK;AAEH;AAAA,QACJ;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,0BACN,kBACA,cACA,SACM;AACN,cAAM,wBAAwB,iBAAiB;AAC/C,cAAM,wBAAwB,wBAAwB;AAEtD,YACE,SAAS,gBAAgB,UACzB,wBAAwB,QAAQ,aAChC;AACA,gBAAM,IAAI;AAAA,YACR,iCAAiC,qBAAqB,MAAM,QAAQ,WAAW,aAAa,iBAAiB,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,KAAK,IAAI,CAAC;AAAA,UAC7I;AAAA,QACF;AAEA,YACE,SAAS,qBAAqB,UAC9B,wBAAwB,QAAQ,kBAChC;AACA,gBAAM,IAAI;AAAA,YACR,sCAAsC,sBAAsB,QAAQ,CAAC,CAAC,MAAM,QAAQ,gBAAgB,aAAa,iBAAiB,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,KAAK,IAAI,CAAC;AAAA,UAClK;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,qBACN,WACA,MACA,eACA,UACA,UACA,SACK;AACL,cAAM,gBAAqB;AAAA,UACzB,OAAO,gBAAgB,SAAS;AAAA,UAChC,QAAQ;AAAA,UACR,qBAAqB;AAAA,YACnB,wBAAwB;AAAA,YACxB,mBAAmB;AAAA;AAAA;AAAA,YAEnB,uBACE,WAAW,MAAS,KAAK,IAAI,cAAc,QAAQ,GAAM;AAAA,YAC3D,gBAAgB;AAAA;AAAA,YAEhB,cAAc,cAAc,SAAS,MAAO,IAAI;AAAA,YAChD,uBAAuB;AAAA;AAAA,UACzB;AAAA,QACF;AAGA,YAAI,UAAU;AACZ,wBAAc,SAAS;AAAA,QACzB,OAAO;AACL,wBAAc,SAAS;AAAA,QACzB;AAGA,YACE,aAAa,cACZ,SAAS,gBAAgB,UACxB,SAAS,qBAAqB,SAChC;AACA,cAAI,QAAQ,gBAAgB,QAAW;AACrC,0BAAc,oBAAoB,gCAChC,QAAQ;AAAA,UACZ;AAEA,cAAI,QAAQ,qBAAqB,QAAW;AAC1C,0BAAc,oBAAoB,kCAChC,QAAQ;AAAA,UACZ;AAAA,QACF;AAEA,eAAO;AAAA,MACT;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,oBACN,MACA,eACA,kBACA,UACA,gBACA,UACiB;AACjB,YAAI,UAAU;AACZ,iBAAO;AAAA,YACL,YAAY;AAAA;AAAA,YACZ,QAAQ;AAAA,YACR,OAAO;AAAA,UACT;AAAA,QACF;AAEA,cAAM,gBAAgB,cAAc;AACpC,cAAM,iBACJ,iBAAkB,KAAa,SAAS;AAE1C,cAAM,SAA0B;AAAA,UAC9B,YAAY;AAAA,UACZ,QAAQ,iBAAiB,iBAAiB,SAAS;AAAA,UACnD,OAAO;AAAA,QACT;AAGA,YACE,kBACA,iBAAiB,SAAS,KAC1B,aAAa,WACb;AACA,iBAAO,gBAAgB,iBAAiB,IAAI,CAAC,QAAQ;AAAA,YACnD,QAAQ,GAAG;AAAA,YACX,OAAO,qBAAqB,GAAG,KAAK;AAAA,YACpC,OAAO,GAAG;AAAA,UACZ,EAAE;AAAA,QACJ;AAEA,eAAO;AAAA,MACT;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,MAAc,qBACZ,YACA,UACA,WACA,MACA,eACA,kBACA,UACA,gBACA,SAC0B;AAC1B,gBAAQ,UAAU;AAAA,UAChB,KAAK;AACH,kBAAM,IAAI;AAAA,cACR,oCAAoC,SAAS,KAAK,UAAU;AAAA,YAC9D;AAAA,UAEF,KAAK;AACH,kBAAM,IAAI;AAAA,cACR,4CAA4C,SAAS,+BAA+B,UAAU;AAAA,YAChG;AAAA,UAEF,KAAK;AACH,mBAAO,MAAM,KAAK;AAAA,cAChB;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,YACF;AAAA,UAEF;AACE,kBAAM,IAAI,MAAM,2BAA2B,QAAQ,EAAE;AAAA,QACzD;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,MAAc,sBACZ,YACA,WACA,MACA,eACA,kBACA,UACA,gBACA,SAC0B;AAC1B,YAAI,UAAU;AACZ,gBAAM,IAAI;AAAA,YACR,wDAAwD,UAAU;AAAA,UACpE;AAAA,QACF;AAEA,YAAI;AACF,gBAAM,EAAE,OAAO,IAAI,MAAM,KAAK,kBAAkB;AAChD,gBAAM,wBAAwB,SAAS,yBAAyB;AAChE,gBAAM,YAAY,wBAAyB,OAAe;AAE1D,gBAAM,EAAE,YAAY,OAAO,IAAI,MAAM,KAAK;AAAA,YACxC;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAGA,gBAAM,mBAAsC;AAAA;AAAA,YAE1C,GAAI,kBAAkB,CAAC,wBACrB,iBAAiB,IAAI,CAAC,QAAQ;AAAA,cAC5B,QAAQ,GAAG;AAAA,cACX,OAAO,qBAAqB,GAAG,KAAK;AAAA,cACpC,OAAO,GAAG;AAAA,YACZ,EAAE,IACF,CAAC;AAAA;AAAA,YAEH,GAAG;AAAA,UACL;AAEA,eAAK;AAAA,YACH;AAAA,YACC,KAAa;AAAA,YACd;AAAA,UACF;AAEA,iBAAO;AAAA,YACL,YAAY,WAAW;AAAA,YACvB,QAAQ,iBAAiB;AAAA,YACzB,OAAQ,KAAa;AAAA,YACrB,eAAe;AAAA,UACjB;AAAA,QACF,SAAS,gBAAgB;AACvB,gBAAM,IAAI;AAAA,YACR,oCAAoC,SAAS,6BAA6B,cAAc;AAAA,UAC1F;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMQ,yBACN,eACA,cACA,SACM;AACN,cAAM,cAAc,cAAc;AAClC,cAAM,cAAc,cAAc;AAElC,YACE,SAAS,gBAAgB,UACzB,cAAc,QAAQ,aACtB;AACA,gBAAM,IAAI;AAAA,YACR,4BAA4B,WAAW,MAAM,QAAQ,WAAW,qBAAqB,cAAc,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,KAAK,IAAI,CAAC;AAAA,UACnI;AAAA,QACF;AAEA,YACE,SAAS,qBAAqB,UAC9B,cAAc,QAAQ,kBACtB;AACA,gBAAM,IAAI;AAAA,YACR,iCAAiC,YAAY,QAAQ,CAAC,CAAC,MAAM,QAAQ,gBAAgB,qBAAqB,cAAc,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,KAAK,IAAI,CAAC;AAAA,UACxJ;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAaQ,sBACN,QACA,UAAoB,KAAK,aACpB;AACL,cAAM,SAAS,EAAE,GAAG,OAAO;AAC3B,mBAAW,OAAO,SAAS;AACzB,gBAAM,QAAQ,OAAO,IAAI,IAAI;AAC7B,gBAAM,KAAK,IAAI;AAEf,cAAI,kBAAkB,EAAE,GAAG;AAEzB,gBACE,MAAM,QAAQ,KAAK,MAClB,MAAM,WAAW,KAAK,OAAO,MAAM,CAAC,MAAM,WAC3C;AACA,qBAAO,IAAI,IAAI,IAAI,MAAM,IAAI,CAAC,SAAS;AAAA,gBACrC,KAAK,sBAAsB,MAAM,GAAG,YAAY,OAAO;AAAA,cACzD,CAAC;AAAA,YACH;AAAA,UACF,WAAW,aAAa,EAAE,GAAG;AAE3B,gBAAI,SAAS,OAAO,UAAU,UAAU;AACtC,qBAAO,IAAI,IAAI,IAAI,KAAK,sBAAsB,OAAO,GAAG,OAAO;AAAA,YACjE;AAAA,UACF;AAAA,QAEF;AACA,eAAO;AAAA,MACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MA2DA,MAAM,OACJ,MACA,SAC0B;AAE1B,cAAM,EAAE,UAAU,UAAU,eAAe,IACzC,KAAK,yBAAyB,MAAM,OAAO;AAG7C,cAAM,cAAc,KAAK,gBAAgB,MAAM,QAAQ;AACvD,YAAI,aAAa;AACf,iBAAO;AAAA,QACT;AAGA,YAAI,gBAAqB,CAAC;AAC1B,YAAI,mBAAsC,CAAC;AAE3C,YAAI,CAAC,YAAY,gBAAgB;AAC/B,gBAAM,mBAAmB,MAAM,KAAK;AAAA,YAClC;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AACA,0BAAgB,iBAAiB;AACjC,6BAAmB,iBAAiB;AAAA,QACtC,OAAO;AAEL,0BAAgB,WAAW,CAAC,IAAK;AAAA,QACnC;AAGA,cAAM,EAAE,OAAO,IAAI,MAAM,KAAK,kBAAkB;AAChD,cAAM,YAAY,KAAK,kBAAkB;AAEzC,YAAI;AAEF,gBAAM,gBAAgB,KAAK;AAAA,YACzB;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAEA,gBAAM,OAAO,OAAO,aAAa;AAGjC,iBAAO,KAAK;AAAA,YACV;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,SAAS,YAAY;AAEnB,iBAAO,MAAM,KAAK;AAAA,YAChB;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAAA,MAGF;AAAA;AAAA;AAAA;AAAA,IAKF;AAAA;AAAA;;;AC7/BA,SAAS,gBACP,IACA,WAC6B;AAC7B,EAAC,GAAW,UAAU,MAAM,UAAU,GAAG,cAAc;AACzD;AAtmBA,IAsBAC,qBA2IM,eAuDO,QAiaA;AAznBb;AAAA;AAAA;AAaA;AAEA;AAOA,IAAAA,sBAA2B;AAE3B;AAyIA,IAAM,gBAAN,MAAoB;AAAA;AAAA,MAElB;AAAA;AAAA,MAGA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQA,YAAY,aAA0B,QAAyB;AAC7D,aAAK,cAAc;AACnB,aAAK,SAAS;AAAA,MAChB;AAAA,IACF;AAsCO,IAAM,SAAN,cAAwB,UAA8B;AAAA,MAC3D;AAAA;AAAA,MAEQ;AAAA;AAAA,MAEA;AAAA,MAwBR,YACE,MACA,QACA,QACA,SACA,YACA,kBACA;AACA,cAAM,MAAM,UAAU,CAAC,GAAG,QAAQ,SAAS,QAAW,gBAAgB;AACtE,cAAM,UAAU,iBAAiB,EAAE;AACnC,YAAI,QAAQ,IAAI,IAAI,GAAG;AACrB,gBAAM,IAAI,MAAM,oBAAoB,IAAI,iBAAiB;AAAA,QAC3D;AACA,gBAAQ,IAAI,MAAM,IAAI;AACtB,aAAK,yBAAyB,KAAK,OAAO;AAAA,MAC5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQA,mBAAmB,oBAAI,IAGrB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA,aAAa,IAAI,MAGd;AAAA;AAAA;AAAA;AAAA;AAAA,MAMK,mBAAmB,WAA4B;AACrD,cAAM,gBACJ,KAAK,OAAO,UAAU,IAAI,KAAK,OAAO,QAAQ,QAAQ,OAAO,GAAG,CAAC,KAAK;AACxE,cAAM,OAAO,GAAG,KAAK,IAAI,GAAG,aAAa;AACzC,eAAO,cAAc,UAAa,UAAU,SAAS,IACjD,GAAG,SAAS,IAAI,IAAI,KACpB;AAAA,MACN;AAAA;AAAA;AAAA;AAAA,MAKQ,iBAAiB,aAAyC;AAChE,cAAM,eAAe;AAAA,UACnB,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,UACZ,YAAY;AAAA,QACd,EAAE,KAAK,GAAG;AACV,mBAAO,gCAAW,QAAQ,EACvB,OAAO,YAAY,EACnB,OAAO,KAAK,EACZ,UAAU,GAAG,EAAE;AAAA,MACpB;AAAA;AAAA;AAAA;AAAA,MAKA,MAAc,sBAGX;AAED,cAAM;AACN,cAAM,iBAAkB,WACrB;AACH,cAAM,EAAE,kBAAAC,kBAAiB,IAAI,MAAM;AAEnC,cAAM,cAAc,MAAO,eAAuB,eAAe;AACjE,cAAM,cAAc,KAAK,iBAAiB,WAAW;AAErD,YAAI,KAAK,qBAAqB,KAAK,qBAAqB,aAAa;AACnE,iBAAO,EAAE,UAAU,KAAK,mBAAmB,YAAY;AAAA,QACzD;AAGA,YAAI,KAAK,qBAAqB,KAAK,qBAAqB,aAAa;AACnE,cAAI;AACF,kBAAM,KAAK,kBAAkB,WAAW;AAAA,UAC1C,QAAQ;AAAA,UAER;AACA,eAAK,oBAAoB;AAAA,QAC3B;AAEA,cAAM,WAAW,oBAAoB,KAAK,IAAI;AAC9C,cAAM,SAAiB;AAAA,UACrB,WAAW;AAAA,UACX,KAAK,CAAC,YAA0B;AAC9B,oBAAQ,IAAI,GAAG,QAAQ,KAAK,OAAO,EAAE;AAAA,UACvC;AAAA,UACA,OAAO,CAAC,YAA0B;AAChC,oBAAQ,MAAM,GAAG,QAAQ,KAAK,OAAO,EAAE;AAAA,UACzC;AAAA,UACA,MAAM,CAAC,YAA0B;AAC/B,oBAAQ,KAAK,GAAG,QAAQ,KAAK,OAAO,EAAE;AAAA,UACxC;AAAA,QACF;AAEA,cAAM,WAAW,MAAMA;AAAA,UACrB;AAAA,YACE;AAAA,YACA,QAAQ,YAAY;AAAA,YACpB,kBAAkB,YAAY;AAAA,YAC9B,cAAc,YAAY;AAAA,YAC1B,cAAc,YAAY;AAAA,YAC1B,eAAe,YAAY;AAAA,UAC7B;AAAA,UACA;AAAA,QACF;AAEA,aAAK,oBAAoB;AACzB,aAAK,mBAAmB;AAExB,eAAO,EAAE,UAAU,YAAY;AAAA,MACjC;AAAA;AAAA;AAAA;AAAA,MAKA,MAAM,gBAA+B;AACnC,YAAI,KAAK,mBAAmB;AAC1B,cAAI;AACF,kBAAM,KAAK,kBAAkB,WAAW;AAAA,UAC1C,QAAQ;AAAA,UAER,UAAE;AACA,iBAAK,oBAAoB;AACzB,iBAAK,mBAAmB;AAAA,UAC1B;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,MAAM,KAAK,QAAsC;AAE/C,cAAM,OACJ,MAAM,QAAQ,MAAM,IAAI,SACtB,WAAW,UAAa,WAAW,OAAO,CAAC,MAAW,IACtD,CAAC;AAEL,YAAI,KAAK,WAAW,EAAG;AAEvB,cAAM,EAAE,UAAU,YAAY,IAAI,MAAM,KAAK,oBAAoB;AACjE,cAAM,QAAQ,KAAK,mBAAmB,YAAY,SAAS;AAG3D,cAAM,KAAK,KAAK,OAAO;AACvB,YAAI,MAAM,GAAG,SAAS,QAAQ;AAC5B,gBAAM,oBAAoB,YAAY;AACtC,cAAI,CAAC,mBAAmB;AACtB,kBAAM,IAAI,MAAM,oCAAoC;AAAA,UACtD;AAEA,gBAAM;AAAA,YACJ,SAAS,EAAE,eAAe;AAAA,UAC5B,IAAI,MAAM,OAAO,oCAAoC;AACrD,gBAAM,WAAW,IAAI,eAAe,EAAE,MAAM,kBAAkB,CAAC;AAE/D,cAAI,WAA+B;AAEnC,cAAI,QAAQ,GAAG,WAAW;AACxB,uBAAW,GAAG,UAAU;AAAA,UAC1B,WAAW,mBAAmB,GAAG,WAAW;AAC1C,uBAAW,MAAM,SAAS,kBAAkB,GAAG,UAAU,aAAa;AAAA,UACxE,WAAW,aAAa,GAAG,WAAW;AACpC,uBAAW,MAAM,SAAS;AAAA,cACxB,GAAG,UAAU;AAAA,cACb,GAAG,UAAU;AAAA,YACf;AAAA,UACF;AAEA,cAAI,aAAa,QAAW;AAC1B,kBAAM,IAAI,MAAM,6BAA6B;AAAA,UAC/C;AAEA,gBAAM,UAAU,MAAM,QAAQ;AAAA,YAC5B,KAAK;AAAA,cAAI,CAAC,MACR,SAAS,OAAO,UAAU,CAAuC;AAAA,YACnE;AAAA,UACF;AACA,gBAAM,SAAS,KAAK;AAAA,YAClB;AAAA,YACA,UAAU,QAAQ,IAAI,CAAC,WAAW,EAAE,MAAM,EAAE;AAAA,UAC9C,CAAC;AACD;AAAA,QACF,WAAW,OAAO,QAAW;AAC3B,gBAAM,IAAI,MAAM,0CAA0C;AAAA,QAC5D;AAEA,cAAM,SAAS,KAAK;AAAA,UAClB;AAAA,UACA,UAAU,KAAK,IAAI,CAAC,OAAO,EAAE,OAAO,KAAK,UAAU,CAAC,EAAE,EAAE;AAAA,QAC1D,CAAC;AAAA,MACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAYA,aACE,aACA,gBACA,QACA;AAEA,cAAM,aAAa,uBAAuB,IAAI,MAAM,EAAE,KAAK;AAE3D,cAAM,kBAAsC;AAAA,UAC1C,GAAI,UAAU,CAAC;AAAA,UACf;AAAA,QACF;AACA,YAAI,gBAAgB,oBAAoB,QAAW;AACjD,0BAAgB,kBAAkB,KAAK;AAAA,QACzC;AAEA,YAAI,KAAK,iBAAiB,IAAI,YAAY,IAAI,GAAG;AAC/C,gBAAM,qBAAqB,KAAK,iBAAiB,IAAI,YAAY,IAAI;AACrE,gBAAM,aAAa,mBAAmB;AAAA,YACpC,CAAC,CAAC,GAAG,IAAI,GAAG,MAAM,IAAI,YAAY,gBAAgB;AAAA,UACpD;AAEA,cAAI,CAAC,YAAY;AACf,+BAAmB,KAAK,CAAC,aAAa,gBAAgB,eAAe,CAAC;AAAA,UACxE;AAAA,QACF,OAAO;AACL,eAAK,iBAAiB,IAAI,YAAY,MAAM;AAAA,YAC1C,CAAC,aAAa,gBAAgB,eAAe;AAAA,UAC/C,CAAC;AAAA,QACH;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,YAAY,UAAuB,QAA4B;AAE7D,cAAM,aAAa,uBAAuB,IAAI,MAAM,EAAE,KAAK;AAE3D,cAAM,iBAAoC;AAAA,UACxC,GAAI,UAAU,CAAC;AAAA,UACf;AAAA,QACF;AACA,YAAI,eAAe,oBAAoB,QAAW;AAChD,yBAAe,kBAAkB,KAAK;AAAA,QACxC;AACA,cAAM,aAAa,KAAK,WAAW;AAAA,UACjC,CAAC,aAAa,SAAS,OAAO,YAAY,eAAe;AAAA,QAC3D;AAEA,YAAI,CAAC,YAAY;AACf,eAAK,WAAW,KAAK,EAAE,UAAU,QAAQ,eAAe,CAAC;AAAA,QAC3D;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAeA,SAAS,CAAC,WAA0B,IAAI,cAAc,MAAM,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAUlE,kBAAkB,gBAAgD;AAChE,aAAK,2BAA2B;AAAA,MAClC;AAAA,IACF;AAuEO,IAAM,kBAAN,cAAiC,OAAwB;AAAA,MAe9D,YACE,MACA,QACA,WACA;AACA,YAAI,cAAc,QAAW;AAC3B,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA,cAAM,MAAM,UAAU,CAAC,GAAG,WAAW,YAAY,QAAW,KAAK;AACjE,aAAK,YAAY;AACjB,yBAAiB,EAAE,QAAQ,IAAI,MAAM,IAAI;AAAA,MAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAWR,aACE,aACA,gBACA,QACA;AACA,cAAM,eAAyD,CAC7D,eACG;AACH,0BAAmB,YAAY,KAAK,SAAS;AAC7C,iBAAO,eAAe,UAAU;AAAA,QAClC;AACA,cAAM,aAAa,aAAa,cAAc,MAAM;AAAA,MACtD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASA,YACE,UACA,QACA;AACA,cAAM,eAA0C,CAAC,eAAe;AAC9D,0BAAmB,YAAY,KAAK,SAAS;AAC7C,iBAAO,SAAS,UAAU;AAAA,QAC5B;AACA,cAAM,YAAY,cAAc,MAAM;AAAA,MACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQA,kBACE,gBACA;AACA,cAAM,eAA6D,CACjE,eACG;AACH,0BAAmB,YAAY,KAAK,SAAS;AAC7C,iBAAO,eAAe,UAAU;AAAA,QAClC;AACA,cAAM,kBAAkB,YAAY;AAAA,MACtC;AAAA,IACF;AAAA;AAAA;;;ACztBA,IA4Da,MA4FA;AAxJb;AAAA;AAAA;AAAA;AA4DO,IAAM,OAAN,MAAiB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAsCtB,YACW,MACA,QACT;AAFS;AACA;AAAA,MACR;AAAA,IACL;AAkDO,IAAM,WAAN,MAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQpB,YACW,MACA,QACT;AAFS;AACA;AAET,cAAM,YAAY,iBAAiB,EAAE;AACrC,YAAI,UAAU,IAAI,IAAI,GAAG;AACvB,gBAAM,IAAI,MAAM,sBAAsB,IAAI,iBAAiB;AAAA,QAC7D;AACA,aAAK,kBAAkB,OAAO,cAAc,IAAI;AAChD,kBAAU,IAAI,MAAM,IAAI;AAAA,MAC1B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAUQ,kBACN,cACA,cACM;AACN,YAAI,iBAAiB,QAAQ,iBAAiB,QAAW;AACvD,gBAAM,IAAI;AAAA,YACR,aAAa,YAAY;AAAA,UAC3B;AAAA,QACF;AAEA,cAAM,UAAU,oBAAI,IAAY;AAChC,cAAM,iBAAiB,oBAAI,IAAY;AAEvC,cAAM,eAAe,CACnB,MACA,gBACS;AACT,cAAI,SAAS,QAAQ,SAAS,QAAW;AACvC,kBAAM,UACJ,YAAY,SAAS,IAAI,YAAY,KAAK,MAAM,IAAI,SAAS;AAC/D,kBAAM,IAAI;AAAA,cACR,aAAa,YAAY,0DAA0D,OAAO;AAAA,YAC5F;AAAA,UACF;AAEA,gBAAM,WAAW,KAAK;AAEtB,cAAI,eAAe,IAAI,QAAQ,GAAG;AAChC,kBAAM,kBAAkB,YAAY,QAAQ,QAAQ;AACpD,kBAAM,YACJ,mBAAmB,IACjB,YAAY,MAAM,eAAe,EAAE,OAAO,QAAQ,IAClD,YAAY,OAAO,QAAQ;AAC/B,kBAAM,IAAI;AAAA,cACR,aAAa,YAAY,8CAA8C,UAAU,KAAK,MAAM,CAAC;AAAA,YAC/F;AAAA,UACF;AAEA,cAAI,QAAQ,IAAI,QAAQ,GAAG;AAEzB;AAAA,UACF;AAEA,kBAAQ,IAAI,QAAQ;AACpB,yBAAe,IAAI,QAAQ;AAE3B,cAAI,KAAK,OAAO,YAAY;AAC1B,uBAAW,YAAY,KAAK,OAAO,YAAY;AAC7C,2BAAa,UAAU,CAAC,GAAG,aAAa,QAAQ,CAAC;AAAA,YACnD;AAAA,UACF;AAEA,yBAAe,OAAO,QAAQ;AAAA,QAChC;AAEA,qBAAa,cAAc,CAAC,CAAC;AAAA,MAC/B;AAAA,IACF;AAAA;AAAA;;;AC7OA,IAiCa;AAjCb;AAAA;AAAA;AACA;AAEA;AA8BO,IAAM,YAAN,cAA2B,UAA8B;AAAA,MAuB9D,YACE,MACA,QACA,QACA,SACA,YACA,kBACA;AACA,cAAM,MAAM,QAAQ,QAAQ,SAAS,QAAW,gBAAgB;AAChE,cAAM,aAAa,iBAAiB,EAAE;AACtC,YAAI,WAAW,IAAI,IAAI,GAAG;AACxB,gBAAM,IAAI,MAAM,wBAAwB,IAAI,iBAAiB;AAAA,QAC/D;AACA,mBAAW,IAAI,MAAM,IAAI;AAAA,MAC3B;AAAA,IACF;AAAA;AAAA;;;ACvEA,IAuCa,KA8KA;AArNb;AAAA;AAAA;AACA;AAEA;AAoCO,IAAM,MAAN,cAA8B,UAA2B;AAAA;AAAA,MAE9D;AAAA;AAAA,MAEA;AAAA,MAoBA,YACE,MACA,SACA,QACA,QACA,SACA,gBACA;AACA,cAAM,MAAM,UAAU,CAAC,GAAG,QAAQ,OAAO;AACzC,aAAK,WAAW;AAChB,aAAK,iBAAiB,kBAAkB;AAAA,UACtC,SAAS;AAAA,UACT,SAAS,CAAC,EAAE,MAAM,SAAS,OAAO,EAAE,MAAM,SAAS,EAAE,CAAC;AAAA,UACtD,YAAY,EAAE,SAAS,CAAC,EAAE;AAAA,QAC5B;AACA,cAAM,OAAO,iBAAiB,EAAE;AAChC,cAAM,MAAM,GAAG,IAAI,GAAG,QAAQ,UAAU,IAAI,OAAO,OAAO,KAAK,EAAE;AACjE,YAAI,KAAK,IAAI,GAAG,GAAG;AACjB,gBAAM,IAAI;AAAA,YACR,6BAA6B,IAAI,gBAAgB,QAAQ,OAAO;AAAA,UAClE;AAAA,QACF;AACA,aAAK,IAAI,KAAK,IAAI;AAGlB,YAAI,QAAQ,MAAM;AAChB,cAAI,OAAO,SAAS;AAElB,kBAAM,sBACJ,OAAO,KAAK,SAAS,IAAI,OAAO,OAAO,EAAE,KACzC,OAAO,SAAS,OAAO,WACtB,OAAO,KAAK,SAAS,OAAO,OAAO,KAClC,OAAO,KAAK,SAAS,OAAO,QAAQ,UACpC,OAAO,KAAK,OAAO,KAAK,SAAS,OAAO,QAAQ,SAAS,CAAC,MACxD;AAEN,gBAAI,qBAAqB;AAEvB,kBAAI,KAAK,IAAI,OAAO,IAAI,GAAG;AACzB,sBAAM,WAAW,KAAK,IAAI,OAAO,IAAI;AACrC,sBAAM,IAAI;AAAA,kBACR,wBAAwB,IAAI,gBAAgB,OAAO,IAAI,yCAAyC,SAAS,IAAI;AAAA,gBAC/G;AAAA,cACF;AACA,mBAAK,IAAI,OAAO,MAAM,IAAI;AAAA,YAC5B,OAAO;AAEL,oBAAM,gBAAgB,GAAG,OAAO,KAAK,QAAQ,OAAO,EAAE,CAAC,IAAI,OAAO,OAAO;AAGzE,kBAAI,KAAK,IAAI,aAAa,GAAG;AAC3B,sBAAM,WAAW,KAAK,IAAI,aAAa;AACvC,sBAAM,IAAI;AAAA,kBACR,wBAAwB,IAAI,gBAAgB,aAAa,yCAAyC,SAAS,IAAI;AAAA,gBACjH;AAAA,cACF;AACA,mBAAK,IAAI,eAAe,IAAI;AAI5B,kBAAI,CAAC,KAAK,IAAI,OAAO,IAAI,GAAG;AAC1B,qBAAK,IAAI,OAAO,MAAM,IAAI;AAAA,cAC5B;AAAA,YACF;AAAA,UACF,OAAO;AAEL,gBAAI,KAAK,IAAI,OAAO,IAAI,GAAG;AACzB,oBAAM,WAAW,KAAK,IAAI,OAAO,IAAI;AACrC,oBAAM,IAAI;AAAA,gBACR,wBAAwB,IAAI,uBAAuB,OAAO,IAAI,yCAAyC,SAAS,IAAI;AAAA,cACtH;AAAA,YACF;AACA,iBAAK,IAAI,OAAO,MAAM,IAAI;AAAA,UAC5B;AAAA,QACF;AAAA,MACF;AAAA;AAAA;AAAA;AAAA;AAAA,MAMA,aAAa,MAAwB;AACnC,eAAO,KAAK;AAAA,MACd;AAAA,MAEA,MAAM,KAAK,SAAiB,aAA4B;AAEtD,YAAIC;AACJ,YAAI,KAAK,QAAQ,MAAM;AAErB,cAAI,KAAK,OAAO,SAAS;AACvB,kBAAM,sBACJ,KAAK,OAAO,KAAK,SAAS,IAAI,KAAK,OAAO,OAAO,EAAE,KACnD,KAAK,OAAO,SAAS,KAAK,OAAO,WAChC,KAAK,OAAO,KAAK,SAAS,KAAK,OAAO,OAAO,KAC5C,KAAK,OAAO,KAAK,SAAS,KAAK,OAAO,QAAQ,UAC9C,KAAK,OAAO,KACV,KAAK,OAAO,KAAK,SAAS,KAAK,OAAO,QAAQ,SAAS,CACzD,MAAM;AAEV,gBAAI,qBAAqB;AACvB,cAAAA,QAAO,KAAK,OAAO;AAAA,YACrB,OAAO;AACL,cAAAA,QAAO,GAAG,KAAK,OAAO,KAAK,QAAQ,OAAO,EAAE,CAAC,IAAI,KAAK,OAAO,OAAO;AAAA,YACtE;AAAA,UACF,OAAO;AACL,YAAAA,QAAO,KAAK,OAAO;AAAA,UACrB;AAAA,QACF,OAAO;AAEL,UAAAA,QACE,KAAK,QAAQ,UACX,GAAG,KAAK,IAAI,IAAI,KAAK,OAAO,OAAO,KACnC,KAAK;AAAA,QACX;AACA,cAAM,MAAM,IAAI,IAAI,GAAG,QAAQ,QAAQ,OAAO,EAAE,CAAC,QAAQA,KAAI,EAAE;AAE/D,cAAM,eAAe,IAAI;AAEzB,mBAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,WAAkB,GAAG;AAC7D,cAAI,MAAM,QAAQ,KAAK,GAAG;AAExB,uBAAW,QAAQ,OAAO;AACxB,kBAAI,SAAS,QAAQ,SAAS,QAAW;AACvC,6BAAa,OAAO,KAAK,OAAO,IAAI,CAAC;AAAA,cACvC;AAAA,YACF;AAAA,UACF,WAAW,UAAU,QAAQ,UAAU,QAAW;AAChD,yBAAa,OAAO,KAAK,OAAO,KAAK,CAAC;AAAA,UACxC;AAAA,QACF;AAEA,cAAM,WAAW,MAAM,MAAM,KAAK;AAAA,UAChC,QAAQ;AAAA,UACR,SAAS;AAAA,YACP,QAAQ;AAAA,UACV;AAAA,QACF,CAAC;AACD,YAAI,CAAC,SAAS,IAAI;AAChB,gBAAM,IAAI,MAAM,uBAAuB,SAAS,MAAM,EAAE;AAAA,QAC1D;AACA,cAAM,OAAO,MAAM,SAAS,KAAK;AACjC,eAAO;AAAA,MACT;AAAA,IACF;AAMO,IAAM,iBAAiB;AAAA;AAAA;;;ACrN9B,IAqJa;AArJb;AAAA;AAAA;AACA;AAEA;AAMA;AACA;AAEA;AAyIO,IAAM,iBAAN,cAAgC,UAAsC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAM3E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOA;AAAA;AAAA,MAGA;AAAA,MA0CA,YACE,MACA,QACA,QACA,SACA,YACA,kBACA;AACA,cAAM,MAAM,QAAQ,QAAQ,SAAS,YAAY,gBAAgB;AAGjE,YAAI,OAAO,WAAW,QAAW;AAC/B,kBAAQ;AAAA,YACN;AAAA,UAEF;AAEA,cAAI,OAAO,cAAc,QAAW;AAClC,YAAC,OAAe,YAAY,OAAO;AAAA,UACrC;AAAA,QACF;AAGA,YAAI,OAAO,OAAO;AAChB,gBAAM,cACJ,OAAO,OAAO,UAAU,WACtB;AAAA,YACE,GAAG,OAAO;AAAA,YACV,WAAW,OAAO,MAAM,aAAa,OAAO;AAAA,YAC5C,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,UAClD,IACA;AAAA,YACE,WAAW,OAAO;AAAA,YAClB;AAAA,YACA,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,UAClD;AACJ,eAAK,QAAQ,IAAI;AAAA,YACf;AAAA,YACA;AAAA,YACA,KAAK;AAAA,YACL,KAAK;AAAA,YACL,KAAK;AAAA,UACP;AAAA,QACF;AAEA,YAAI,OAAO,iBAAiB;AAC1B,gBAAM,eAAe;AAAA,YACnB,aAAa;AAAA,YACb,GAAI,OAAO,OAAO,oBAAoB,WACpC;AAAA,cACE,GAAG,OAAO;AAAA,cACV,WAAW,OAAO,gBAAgB,aAAa,OAAO;AAAA,YACxD,IACA,EAAE,WAAW,OAAO,UAAU;AAAA,YAChC,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,UAClD;AACA,eAAK,kBAAkB,IAAI;AAAA,YACzB,GAAG,IAAI;AAAA,YACP;AAAA,YACA,WAAY;AAAA,UACd;AAAA,QACF;AAGA,YAAI,OAAO,QAAQ;AACjB,gBAAM,eAAgC;AAAA,YACpC,aAAa,KAAK;AAAA,YAClB,wBAAwB,KAAK;AAAA,YAC7B,GAAI,OAAO,OAAO,WAAW,WAC3B;AAAA,cACE,GAAG,OAAO;AAAA,cACV,WAAW,OAAO,OAAO,aAAa,OAAO;AAAA,YAC/C,IACA,EAAE,WAAW,OAAO,UAAU;AAAA,YAChC,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,UAClD;AACA,eAAK,SAAS,IAAI;AAAA,YAChB;AAAA,YACA;AAAA,YACA,KAAK;AAAA,YACL,KAAK;AAAA,YACL;AAAA,YACA,KAAK;AAAA,UACP;AAEA,UAAC,KAAK,OAAe,iBAAiB;AAAA,QACxC;AAGA,cAAM,qBACJ,OAAO,cAAc,SAAY,OAAO,YAAY,OAAO;AAC7D,YAAI,oBAAoB;AACtB,cAAI,CAAC,KAAK,QAAQ;AAChB,kBAAM,IAAI,MAAM,wCAAwC;AAAA,UAC1D;AAEA,gBAAM,eAAe;AAAA,YACnB,aAAa,KAAK;AAAA,YAClB,iBAAiB,KAAK;AAAA,YACtB,GAAI,OAAO,uBAAuB,WAC/B,qBACD,CAAC;AAAA,YACH,GAAI,OAAO,WAAW,EAAE,SAAS,OAAO,QAAQ;AAAA,YAChD,GAAI,OAAO,QAAQ,EAAE,MAAM,OAAO,KAAK;AAAA,UACzC;AACA,eAAK,YAAY,IAAI;AAAA,YACnB;AAAA,YACA;AAAA,YACA,KAAK;AAAA,YACL,KAAK;AAAA,YACL;AAAA,YACA,KAAK;AAAA,UACP;AAEA,UAAC,KAAK,UAAkB,iBAAiB;AAAA,QAC3C;AAAA,MACF;AAAA,IACF;AAAA;AAAA;;;AC3UA,IAuBM,iBAgCO;AAvDb;AAAA;AAAA;AAAA;AAuBA,IAAM,kBAAN,MAAyB;AAAA,MACf;AAAA,MACA;AAAA,MAER,YAAY,eAAiC,YAAY,IAAI;AAC3D,aAAK,WAAW,cAAc,OAAO,aAAa,EAAE;AACpD,aAAK,YAAY;AAAA,MACnB;AAAA,MAEA,MAAM,eAAwC;AAC5C,cAAM,QAAa,CAAC;AAEpB,iBAAS,IAAI,GAAG,IAAI,KAAK,WAAW,KAAK;AACvC,gBAAM,EAAE,OAAO,KAAK,IAAI,MAAM,KAAK,SAAS,KAAK;AAEjD,cAAI,MAAM;AACR,mBAAO,EAAE,OAAO,SAAS,MAAM;AAAA,UACjC;AAEA,gBAAM,KAAK,KAAK;AAAA,QAClB;AAEA,eAAO,EAAE,OAAO,SAAS,KAAK;AAAA,MAChC;AAAA,IACF;AAQO,IAAM,cAAN,MAAwB;AAAA,MAG7B,YACW,MACA,QACT;AAFS;AACA;AAET,aAAK,cAAc;AAAA,MACrB;AAAA,MAPQ;AAAA,MASA,gBAAsB;AAC5B,aAAK,UAAU,KAAK,cAAc;AAClC,cAAM,QAAQ,KAAK,eAAe;AAElC,cAAM,QAAQ,OAAO,aAAa,CAAC,MAAM,SAAS;AAClD,cAAM,UAAU,OAAO,aAAa,CAAC,MAAM,IAAI;AAE/C,YAAI,SAAS,KAAK,MAAM;AAAA,UACtB,cAAc,MAAM;AAAA,UACpB,SAAS;AAAA,UACT,SAAS;AAAA,QACX,CAAC;AAAA,MACH;AAAA,MAEQ,gBAAoC;AAC1C,cAAM,WACJ,OAAO,KAAK,OAAO,YAAY,aAC7B,KAAK,OAAO,QAAQ,IACpB,KAAK,OAAO;AAEhB,eAAO,IAAI,gBAAgB,QAAQ;AAAA,MACrC;AAAA,MAEQ,uBAAmC;AACzC,eAAO;AAAA,UACL,SAAS;AAAA,UACT,SAAS;AAAA,QACX;AAAA,MACF;AAAA,MAEQ,iBAAiC;AACvC,cAAM,aAAa,KAAK,qBAAqB;AAE7C,eAAO;AAAA,UACL,SAAS,KAAK,kBAAkB,UAAU;AAAA,UAC1C,WAAW,KAAK,oBAAoB,UAAU;AAAA,UAC9C,MAAM,KAAK,eAAe,UAAU;AAAA,QACtC;AAAA,MACF;AAAA,MAEQ,kBACN,YAC4B;AAC5B,eAAO,IAAI,KAA2B,GAAG,KAAK,IAAI,YAAY;AAAA,UAC5D,KAAK,OAAO,CAAC,MAAM;AACjB,oBAAQ,IAAI,4BAA4B,KAAK,IAAI,KAAK;AACtD,kBAAM,QAAQ,MAAM,KAAK,QAAQ,aAAa;AAC9C,oBAAQ,IAAI,+BAA+B,MAAM,MAAM,MAAM,QAAQ;AACrE,mBAAO;AAAA,UACT;AAAA,UACA,SAAS,WAAW;AAAA,UACpB,SAAS,WAAW;AAAA,QACtB,CAAC;AAAA,MACH;AAAA,MAEQ,oBACN,YAC4C;AAC5C,eAAO,IAAI;AAAA,UACT,GAAG,KAAK,IAAI;AAAA,UACZ;AAAA;AAAA,YAEE,KAAK,OAAO,EAAE,MAAM,MAAM;AACxB,oBAAM,QAAQ;AACd,sBAAQ;AAAA,gBACN,8BAA8B,KAAK,IAAI,SAAS,MAAM,MAAM,MAAM;AAAA,cACpE;AACA,oBAAM,mBAAwB,CAAC;AAE/B,yBAAW,QAAQ,MAAM,OAAO;AAC9B,sBAAM,cAAc,MAAM,KAAK,OAAO,UAAU,IAAI;AACpD,iCAAiB,KAAK,WAAW;AAAA,cACnC;AAEA,sBAAQ;AAAA,gBACN,iCAAiC,iBAAiB,MAAM;AAAA,cAC1D;AACA,qBAAO,EAAE,OAAO,iBAAiB;AAAA,YACnC;AAAA,YACA,SAAS,WAAW;AAAA,YACpB,SAAS,WAAW;AAAA,UACtB;AAAA,QACF;AAAA,MACF;AAAA,MAEQ,eACN,YACkC;AAClC,eAAO,IAAI,KAAiC,GAAG,KAAK,IAAI,SAAS;AAAA,UAC/D,KAAK,OAAO,EAAE,OAAO,iBAAiB,MAAM;AAC1C,oBAAQ;AAAA,cACN,yBAAyB,KAAK,IAAI,SAAS,iBAAiB,MAAM,MAAM;AAAA,YAC1E;AAGA,gBAAI,YAAY,KAAK,OAAO,MAAM;AAEhC,oBAAM,KAAK,OAAO,KAAK,OAAO,iBAAiB,KAAK;AAAA,YACtD,OAAO;AAEL,oBAAM,KAAK,OAAO,KAAK,iBAAiB,KAAK;AAAA,YAC/C;AAEA,oBAAQ,IAAI,qBAAqB;AAAA,UACnC;AAAA,UACA,SAAS,WAAW;AAAA,UACpB,SAAS,WAAW;AAAA,QACtB,CAAC;AAAA,MACH;AAAA;AAAA,MAGA,MAAM,MAAqB;AACzB,gBAAQ,IAAI,0BAA0B,KAAK,IAAI,EAAE;AAEjD,YAAI,cAAc;AAClB,WAAG;AACD,kBAAQ,IAAI,oBAAoB,WAAW,KAAK;AAChD,gBAAM,QAAQ,MAAM,KAAK,QAAQ,aAAa;AAE9C,cAAI,MAAM,MAAM,WAAW,GAAG;AAC5B;AAAA,UACF;AAGA,gBAAM,mBAAwB,CAAC;AAC/B,qBAAW,iBAAiB,MAAM,OAAO;AACvC,kBAAM,kBAAkB,MAAM,KAAK,OAAO,UAAU,aAAa;AACjE,6BAAiB,KAAK,eAAe;AAAA,UACvC;AAGA,cAAI,YAAY,KAAK,OAAO,MAAM;AAEhC,kBAAM,KAAK,OAAO,KAAK,OAAO,gBAAgB;AAAA,UAChD,OAAO;AAEL,kBAAM,KAAK,OAAO,KAAK,gBAAgB;AAAA,UACzC;AAEA,kBAAQ;AAAA,YACN,mBAAmB,WAAW,SAAS,MAAM,MAAM,MAAM;AAAA,UAC3D;AACA;AAEA,cAAI,CAAC,MAAM,SAAS;AAClB;AAAA,UACF;AAAA,QACF,SAAS;AAET,gBAAQ,IAAI,2BAA2B,KAAK,IAAI,EAAE;AAAA,MACpD;AAAA,IACF;AAAA;AAAA;;;ACxNA,IAWa;AAXb;AAAA;AAAA;AAAA;AAEA;AACA;AAQO,IAAM,cAAN,MAAkB;AAAA;AAAA,MAEP,OAAO;AAAA;AAAA,MAGvB;AAAA;AAAA,MAEA;AAAA;AAAA,MAEA;AAAA;AAAA,MAGA;AAAA;AAAA,MAEA;AAAA;AAAA,MAGA;AAAA;AAAA,MAGA;AAAA;AAAA,MAGA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAWA,YACE,MACA,OACA,UACA,SAIA;AACA,cAAM,eAAe,iBAAiB,EAAE;AAGxC,YAAI,CAAC,iBAAiB,KAAK,aAAa,IAAI,IAAI,GAAG;AACjD,gBAAM,IAAI,MAAM,yBAAyB,IAAI,iBAAiB;AAAA,QAChE;AACA,qBAAa,IAAI,MAAM,IAAI;AAE3B,aAAK,OAAO;AACZ,aAAK,QAAQ,MAAM;AAAA,UAAI,CAACC,SACtB,OAAOA,SAAQ,WAAWA,OAAM,cAAcA,IAAG;AAAA,QACnD;AACA,aAAK,WAAW,SAAS;AAAA,UAAI,CAACA,SAC5B,OAAOA,SAAQ,WAAWA,OAAM,cAAcA,IAAG;AAAA,QACnD;AACA,aAAK,gBAAgB,SAAS,iBAAiB,CAAC;AAChD,aAAK,eAAe,SAAS,gBAAgB,CAAC;AAG9C,cAAM,QAAQ,IAAI,MAAM,EAAE;AAC1B,cAAM,WAAW,2BAA2B,KAAK;AAEjD,YAAI,UAAU;AACZ,eAAK,aAAa,SAAS;AAC3B,eAAK,aAAa,SAAS;AAC3B,eAAK,eAAe,SAAS;AAAA,QAC/B;AAAA,MACF;AAAA,IACF;AAAA;AAAA;;;AClFA,IAiDM,wBAeO;AAhEb;AAAA;AAAA;AAAA;AAKA;AACA;AACA;AA0CA,IAAM,yBAAyB,CAAC,cAA0C;AACxE,UAAI,OAAO,cAAc,UAAU;AACjC,eAAO;AAAA,MACT,OAAO;AACL,cAAM,IAAI,MAAM,uCAAuC;AAAA,MACzD;AAAA,IACF;AASO,IAAM,mBAAN,cAA4C,YAAY;AAAA;AAAA,MAE7D;AAAA,MAiBA,YACE,SACA,cACA,eACA;AACA,YAAI,kBAAkB,QAAQ;AAC9B,YAAI,OAAO,oBAAoB,UAAU;AACvC,4BAAkB,cAAc,eAAe;AAAA,QACjD;AAEA,YAAI,iBAAiB,UAAa,kBAAkB,QAAW;AAC7D,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cACJ,QAAQ,uBAAuB,YAC7B,QAAQ,cACR,IAAI;AAAA,UACF;AAAA,YACE,QAAQ,aAAa,QAAQ,QAAQ;AAAA,UACvC;AAAA,UACA;AAAA,YACE,eACE,QAAQ,aAAa,iBAAiB,QAAQ;AAAA,YAChD,QACE,QAAQ,aAAa,UACrB,QAAQ;AAAA,UAEZ;AAAA,UACA;AAAA,UACA;AAAA,QACF;AAEJ,YAAI,YAAY,SAAS,QAAQ,sBAAsB;AACrD,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA;AAAA,UACE,QAAQ;AAAA,UACR;AAAA,YACE,uBAAuB;AAAA,cACrB,MAAM,QAAQ;AAAA,cACd,kBAAkB,YAAY;AAAA,cAC9B,QAAQ;AAAA,YACV,CAAC;AAAA;AAAA;AAAA,UAGH;AAAA,UACA,CAAC,SAAS,QAAQ,oBAAoB,CAAC;AAAA,UACvC;AAAA,YACE,eAAe,QAAQ;AAAA,YACvB,cAAc,CAAC,WAAW;AAAA,UAC5B;AAAA,QACF;AAEA,aAAK,cAAc;AAAA,MACrB;AAAA,IACF;AAAA;AAAA;;;AChJA,IASa;AATb;AAAA;AAAA;AAAA;AACA;AAEA;AAMO,IAAM,OAAN,cAAmB,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOpC,YACE,MACA,iBACA,YACA;AACA,YAAI,OAAO,oBAAoB,UAAU;AACvC,4BAAkB,cAAc,eAAe;AAAA,QACjD;AAEA;AAAA,UACE;AAAA,UACA;AAAA,YACE,6BAA6B,IAAI;AAAA,eAC1B,eAAe,GAAG,KAAK;AAAA,UAChC;AAAA,UACA,CAAC,SAAS,IAAI,CAAC;AAAA,UACf;AAAA,YACE,eAAe;AAAA,UACjB;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA;AAAA;;;ACrCA,IAMY;AANZ;AAAA;AAAA;AAMO,IAAK,YAAL,kBAAKC,eAAL;AAML,MAAAA,WAAA,mBAAgB;AAOhB,MAAAA,WAAA,wBAAqB;AAOrB,MAAAA,WAAA,wBAAqB;AApBX,aAAAA;AAAA,OAAA;AAAA;AAAA;;;ACNZ,IAyBM,sBAWO;AApCb;AAAA;AAAA;AACA;AAwBA,IAAM,uBAAuB;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEO,IAAM,SAAN,MAAa;AAAA,MAClB;AAAA,MACA;AAAA,MACA;AAAA,MACQ;AAAA,MAER,YACE,MACA,cACA,QACA;AACA,aAAK,OAAO;AACZ,aAAK,SAAS;AAGd,YAAI,CAAC,KAAK,OAAO,WAAW;AAC1B,gBAAM,IAAI;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAEA,cAAM,YAAY,KAAK,OAAO;AAG9B,YAAI,cAAc,KAAK;AACrB,gBAAM,IAAI;AAAA,YACR,oFAAoF,qBAAqB,KAAK,IAAI,CAAC;AAAA,UACrH;AAAA,QACF;AAGA,YAAI,UAAU,SAAS,GAAG,GAAG;AAC3B,gBAAM,IAAI;AAAA,YACR,qEAAqE,SAAS;AAAA,UAChF;AAAA,QACF;AAGA,mBAAW,YAAY,sBAAsB;AAC3C,cAAI,cAAc,YAAY,UAAU,WAAW,GAAG,QAAQ,GAAG,GAAG;AAClE,kBAAM,IAAI;AAAA,cACR,gDAAgD,qBAAqB,KAAK,IAAI,CAAC,WAAW,SAAS;AAAA,YACrG;AAAA,UACF;AAAA,QACF;AAEA,aAAK,UAAU,KAAK,UAAU,YAAY;AAC1C,aAAK,UACH,OAAO,iBAAiB,aAAa,SAAY;AAEnD,cAAM,UAAU,iBAAiB,EAAE;AACnC,YAAI,QAAQ,IAAI,IAAI,GAAG;AACrB,gBAAM,IAAI,MAAM,oBAAoB,IAAI,iBAAiB;AAAA,QAC3D;AAGA,YAAI,KAAK,OAAO,WAAW;AACzB,qBAAW,CAAC,cAAc,WAAW,KAAK,SAAS;AACjD,gBAAI,YAAY,OAAO,cAAc,KAAK,OAAO,WAAW;AAC1D,oBAAM,IAAI;AAAA,gBACR,0BAA0B,KAAK,OAAO,SAAS,qCAAqC,YAAY;AAAA,cAClG;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAEA,gBAAQ,IAAI,MAAM,IAAI;AAAA,MACxB;AAAA,MAEQ,UAAU,cAA2D;AAC3E,YAAI,OAAO,iBAAiB,YAAY;AACtC,iBAAO;AAAA,QACT;AAEA,cAAM,MAAM;AAEZ,YAAI,OAAO,IAAI,WAAW,YAAY;AACpC,iBAAO,CAAC,KAAK,QAAQ;AACnB,gBAAI,OAAQ,KAAK,KAAK,CAAC,QAAc;AACnC,kBAAI,KAAK;AACP,wBAAQ,MAAM,yBAAyB,GAAG;AAC1C,oBAAI,CAAC,IAAI,aAAa;AACpB,sBAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,sBAAI,IAAI,KAAK,UAAU,EAAE,OAAO,wBAAwB,CAAC,CAAC;AAAA,gBAC5D;AAAA,cACF;AAAA,YACF,CAAC;AAAA,UACH;AAAA,QACF;AAEA,YAAI,OAAO,IAAI,aAAa,YAAY;AACtC,iBAAO,IAAI,SAAS;AAAA,QACtB;AAIA,YAAI,OAAO,IAAI,YAAY,YAAY;AAErC,gBAAM,UAAU,IAAI;AACpB,gBAAM,eAAe;AAKrB,cAAI,eAA4C;AAEhD,iBAAO,OAAO,KAAK,QAAQ;AAEzB,gBAAI,iBAAiB,MAAM;AACzB,6BACE,OAAO,aAAa,UAAU,aAC5B,aAAa,MAAM,IACnB,QAAQ,QAAQ;AAAA,YACtB;AACA,kBAAM;AACN,oBAAQ,KAAK,GAAG;AAAA,UAClB;AAAA,QACF;AAEA,cAAM,IAAI;AAAA,UACR;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAYF;AAAA,MACF;AAAA,MAEA,YAAsC;AACpC,eAAO,KAAK;AAAA,MACd;AAAA,IACF;AAAA;AAAA;;;ACxJO,SAAS,YAAyC;AACvD,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,SAAS,MAA0C;AACjE,SAAO,iBAAiB,EAAE,OAAO,IAAI,IAAI;AAC3C;AAMO,SAAS,aAAuC;AACrD,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,UAAU,MAAuC;AAC/D,SAAO,iBAAiB,EAAE,QAAQ,IAAI,IAAI;AAC5C;AAMO,SAAS,gBAA6C;AAC3D,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,aAAa,MAA0C;AACrE,SAAO,iBAAiB,EAAE,WAAW,IAAI,IAAI;AAC/C;AAMO,SAAS,UAAiC;AAC/C,SAAO,iBAAiB,EAAE;AAC5B;AAaO,SAAS,OAAO,YAA0C;AAC/D,QAAM,WAAW,iBAAiB;AAGlC,QAAM,cAAc,SAAS,KAAK,IAAI,UAAU;AAChD,MAAI,aAAa;AACf,WAAO;AAAA,EACT;AAGA,QAAM,gBAAgB,oBAAI,IAAwB;AAClD,QAAM,UAAU,oBAAI,IAAsB;AAE1C,WAAS,KAAK,QAAQ,CAAC,KAAK,QAAQ;AAElC,UAAM,WAAW,IAAI;AACrB,QAAI,CAAC,cAAc,IAAI,QAAQ,GAAG;AAChC,oBAAc,IAAI,UAAU,CAAC,CAAC;AAAA,IAChC;AACA,kBAAc,IAAI,QAAQ,EAAG,KAAK,GAAG;AAGrC,QAAI,IAAI,OAAO,MAAM;AACnB,cAAQ,IAAI,IAAI,OAAO,MAAM,GAAG;AAAA,IAClC;AAAA,EACF,CAAC;AAGD,QAAM,aAAa,cAAc,IAAI,UAAU;AAC/C,MAAI,cAAc,WAAW,WAAW,GAAG;AACzC,WAAO,WAAW,CAAC;AAAA,EACrB;AAGA,SAAO,QAAQ,IAAI,UAAU;AAC/B;AAMO,SAAS,kBAA4C;AAC1D,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,eAAe,MAAuC;AACpE,SAAO,iBAAiB,EAAE,aAAa,IAAI,IAAI;AACjD;AAMO,SAAS,eAAsC;AACpD,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,YAAY,MAAoC;AAC9D,SAAO,iBAAiB,EAAE,UAAU,IAAI,IAAI;AAC9C;AAMO,SAAS,aAAkC;AAChD,SAAO,iBAAiB,EAAE;AAC5B;AAOO,SAAS,UAAU,MAAkC;AAC1D,SAAO,iBAAiB,EAAE,QAAQ,IAAI,IAAI;AAC5C;AA9KA;AAAA;AAAA;AAgBA;AAAA;AAAA;;;AChBA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgDA;AACA;AAUA;AAGA;AACA;AAMA;AACA;AACA;AAIA;AACA;AACA;AACA;AAOA;AAAA;AAAA;","names":["sql","path","http","import_client","init_helpers","init_helpers","init_helpers","init_helpers","process","path","import_node_crypto","getClickhouseClient","import_node_crypto","getKafkaProducer","path","sql","LifeCycle"]}