@514labs/moose-lib 0.6.295-ci-20-gbe187727 → 0.6.295

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/commons.ts","../src/moose-runner.ts","../src/dmv2/internal.ts","../src/sqlHelpers.ts","../src/dmv2/sdk/olapTable.ts","../src/dmv2/sdk/stream.ts","../src/index.ts","../src/consumption-apis/helpers.ts","../src/consumption-apis/runner.ts","../src/cluster-utils.ts","../src/clients/redisClient.ts","../src/consumption-apis/standalone.ts","../src/utilities/dataParser.ts","../src/utilities/json.ts","../src/blocks/runner.ts","../src/streaming-functions/runner.ts","../src/moduleExportSerializer.ts","../src/consumption-apis/exportTypeSerializer.ts","../src/scripts/runner.ts","../src/scripts/activity.ts","../src/scripts/logger.ts"],"sourcesContent":["import http from \"http\";\nimport { createClient } from \"@clickhouse/client\";\nimport { KafkaJS } from \"@514labs/kafka-javascript\";\nimport { SASLOptions } from \"@514labs/kafka-javascript/types/kafkajs\";\nconst { Kafka } = KafkaJS;\ntype Kafka = KafkaJS.Kafka;\ntype Consumer = KafkaJS.Consumer;\nexport type Producer = KafkaJS.Producer;\n\n/**\n * Utility function for compiler-related logging that can be disabled via environment variable.\n * Set MOOSE_DISABLE_COMPILER_LOGS=true to suppress these logs (useful for testing environments).\n */\n\n/**\n * Returns true if the value is a common truthy string: \"1\", \"true\", \"yes\", \"on\" (case-insensitive).\n */\nfunction isTruthy(value: string | undefined): boolean {\n if (!value) return false;\n switch (value.trim().toLowerCase()) {\n case \"1\":\n case \"true\":\n case \"yes\":\n case \"on\":\n return true;\n default:\n return false;\n }\n}\n\nexport const compilerLog = (message: string) => {\n if (!isTruthy(process.env.MOOSE_DISABLE_COMPILER_LOGS)) {\n console.log(message);\n }\n};\n\nexport const antiCachePath = (path: string) =>\n `${path}?num=${Math.random().toString()}&time=${Date.now()}`;\n\nexport const getFileName = (filePath: string) => {\n const regex = /\\/([^\\/]+)\\.ts/;\n const matches = filePath.match(regex);\n if (matches && matches.length > 1) {\n return matches[1];\n }\n return \"\";\n};\n\ninterface ClientConfig {\n username: string;\n password: string;\n database: string;\n useSSL: string;\n host: string;\n port: string;\n}\n\nexport const getClickhouseClient = ({\n username,\n password,\n database,\n useSSL,\n host,\n port,\n}: ClientConfig) => {\n const protocol =\n useSSL === \"1\" || useSSL.toLowerCase() === \"true\" ? \"https\" : \"http\";\n console.log(`Connecting to Clickhouse at ${protocol}://${host}:${port}`);\n return createClient({\n url: `${protocol}://${host}:${port}`,\n username: username,\n password: password,\n database: database,\n application: \"moose\",\n // Note: wait_end_of_query is configured per operation type, not globally\n // to preserve SELECT query performance while ensuring INSERT/DDL reliability\n });\n};\n\nexport type CliLogData = {\n message_type?: \"Info\" | \"Success\" | \"Error\" | \"Highlight\";\n action: string;\n message: string;\n};\n\nexport const cliLog: (log: CliLogData) => void = (log) => {\n const req = http.request({\n port: parseInt(process.env.MOOSE_MANAGEMENT_PORT ?? \"5001\"),\n method: \"POST\",\n path: \"/logs\",\n });\n\n req.on(\"error\", (err: Error) => {\n console.log(`Error ${err.name} sending CLI log.`, err.message);\n });\n\n req.write(JSON.stringify({ message_type: \"Info\", ...log }));\n req.end();\n};\n\n/**\n * Method to change .ts, .cts, and .mts to .js, .cjs, and .mjs\n * This is needed because 'import' does not support .ts, .cts, and .mts\n */\nexport function mapTstoJs(filePath: string): string {\n return filePath\n .replace(/\\.ts$/, \".js\")\n .replace(/\\.cts$/, \".cjs\")\n .replace(/\\.mts$/, \".mjs\");\n}\n\nexport const MAX_RETRIES = 150;\nexport const MAX_RETRY_TIME_MS = 1000;\nexport const RETRY_INITIAL_TIME_MS = 100;\n\nexport const MAX_RETRIES_PRODUCER = 150;\nexport const RETRY_FACTOR_PRODUCER = 0.2;\n// Means all replicas need to acknowledge the message\nexport const ACKs = -1;\n\n/**\n * Creates the base producer configuration for Kafka.\n * Used by both the SDK stream publishing and streaming function workers.\n *\n * @param maxMessageBytes - Optional max message size in bytes (synced with topic config)\n * @returns Producer configuration object for the Confluent Kafka client\n */\nexport function createProducerConfig(maxMessageBytes?: number) {\n return {\n kafkaJS: {\n idempotent: false, // Not needed for at-least-once delivery\n acks: ACKs,\n retry: {\n retries: MAX_RETRIES_PRODUCER,\n maxRetryTime: MAX_RETRY_TIME_MS,\n },\n },\n \"linger.ms\": 0, // This is to make sure at least once delivery with immediate feedback on the send\n ...(maxMessageBytes && { \"message.max.bytes\": maxMessageBytes }),\n };\n}\n\n/**\n * Parses a comma-separated broker string into an array of valid broker addresses.\n * Handles whitespace trimming and filters out empty elements.\n *\n * @param brokerString - Comma-separated broker addresses (e.g., \"broker1:9092, broker2:9092, , broker3:9092\")\n * @returns Array of trimmed, non-empty broker addresses\n */\nconst parseBrokerString = (brokerString: string): string[] =>\n brokerString\n .split(\",\")\n .map((b) => b.trim())\n .filter((b) => b.length > 0);\n\nexport type KafkaClientConfig = {\n clientId: string;\n broker: string;\n securityProtocol?: string; // e.g. \"SASL_SSL\" or \"PLAINTEXT\"\n saslUsername?: string;\n saslPassword?: string;\n saslMechanism?: string; // e.g. \"scram-sha-256\", \"plain\"\n};\n\n/**\n * Dynamically creates and connects a KafkaJS producer using the provided configuration.\n * Returns a connected producer instance.\n *\n * @param cfg - Kafka client configuration\n * @param logger - Logger instance\n * @param maxMessageBytes - Optional max message size in bytes (synced with topic config)\n */\nexport async function getKafkaProducer(\n cfg: KafkaClientConfig,\n logger: Logger,\n maxMessageBytes?: number,\n): Promise<Producer> {\n const kafka = await getKafkaClient(cfg, logger);\n\n const producer = kafka.producer(createProducerConfig(maxMessageBytes));\n await producer.connect();\n return producer;\n}\n\n/**\n * Interface for logging functionality\n */\nexport interface Logger {\n logPrefix: string;\n log: (message: string) => void;\n error: (message: string) => void;\n warn: (message: string) => void;\n}\n\nexport const logError = (logger: Logger, e: Error): void => {\n logger.error(e.message);\n const stack = e.stack;\n if (stack) {\n logger.error(stack);\n }\n};\n\n/**\n * Builds SASL configuration for Kafka client authentication\n */\nconst buildSaslConfig = (\n logger: Logger,\n args: KafkaClientConfig,\n): SASLOptions | undefined => {\n const mechanism = args.saslMechanism ? args.saslMechanism.toLowerCase() : \"\";\n switch (mechanism) {\n case \"plain\":\n case \"scram-sha-256\":\n case \"scram-sha-512\":\n return {\n mechanism: mechanism,\n username: args.saslUsername || \"\",\n password: args.saslPassword || \"\",\n };\n default:\n logger.warn(`Unsupported SASL mechanism: ${args.saslMechanism}`);\n return undefined;\n }\n};\n\n/**\n * Dynamically creates a KafkaJS client configured with provided settings.\n * Use this to construct producers/consumers with custom options.\n */\nexport const getKafkaClient = async (\n cfg: KafkaClientConfig,\n logger: Logger,\n): Promise<Kafka> => {\n const brokers = parseBrokerString(cfg.broker || \"\");\n if (brokers.length === 0) {\n throw new Error(`No valid broker addresses found in: \"${cfg.broker}\"`);\n }\n\n logger.log(`Creating Kafka client with brokers: ${brokers.join(\", \")}`);\n logger.log(`Security protocol: ${cfg.securityProtocol || \"plaintext\"}`);\n logger.log(`Client ID: ${cfg.clientId}`);\n\n const saslConfig = buildSaslConfig(logger, cfg);\n\n return new Kafka({\n kafkaJS: {\n clientId: cfg.clientId,\n brokers,\n ssl: cfg.securityProtocol === \"SASL_SSL\",\n ...(saslConfig && { sasl: saslConfig }),\n retry: {\n initialRetryTime: RETRY_INITIAL_TIME_MS,\n maxRetryTime: MAX_RETRY_TIME_MS,\n retries: MAX_RETRIES,\n },\n },\n });\n};\n","#!/usr/bin/env node\n\n// This file is use to run the proper runners for moose based on the\n// the arguments passed to the file.\n// It registers ts-node to be able to interpret user code.\n\nimport { register } from \"ts-node\";\n\n// We register ts-node to be able to interpret TS user code.\nif (\n process.argv[2] == \"consumption-apis\" ||\n process.argv[2] == \"consumption-type-serializer\" ||\n process.argv[2] == \"dmv2-serializer\" ||\n // Streaming functions for dmv2 need to load moose internals\n process.argv[2] == \"streaming-functions\" ||\n process.argv[2] == \"scripts\"\n) {\n register({\n require: [\"tsconfig-paths/register\"],\n esm: true,\n experimentalTsImportSpecifiers: true,\n compiler: \"ts-patch/compiler\",\n compilerOptions: {\n plugins: [\n {\n transform: `./node_modules/@514labs/moose-lib/dist/compilerPlugin.js`,\n transformProgram: true,\n },\n {\n transform: \"typia/lib/transform\",\n },\n ],\n experimentalDecorators: true,\n },\n });\n} else {\n register({\n esm: true,\n experimentalTsImportSpecifiers: true,\n });\n}\n\nimport { dumpMooseInternal } from \"./dmv2/internal\";\nimport { runBlocks } from \"./blocks/runner\";\nimport { runApis } from \"./consumption-apis/runner\";\nimport { runStreamingFunctions } from \"./streaming-functions/runner\";\nimport { runExportSerializer } from \"./moduleExportSerializer\";\nimport { runApiTypeSerializer } from \"./consumption-apis/exportTypeSerializer\";\nimport { runScripts } from \"./scripts/runner\";\nimport process from \"process\";\n\nimport { Command } from \"commander\";\n\n// Import the StreamingFunctionArgs type\nimport type { StreamingFunctionArgs } from \"./streaming-functions/runner\";\n\nconst program = new Command();\n\nprogram\n .name(\"moose-runner\")\n .description(\"Moose runner for various operations\")\n .version(\"1.0.0\");\n\nprogram\n .command(\"dmv2-serializer\")\n .description(\"Load DMv2 index\")\n .action(() => {\n dumpMooseInternal();\n });\n\nprogram\n .command(\"export-serializer\")\n .description(\"Run export serializer\")\n .argument(\"<target-model>\", \"Target model to serialize\")\n .action((targetModel) => {\n runExportSerializer(targetModel);\n });\n\nprogram\n .command(\"blocks\")\n .description(\"Run blocks\")\n .argument(\"<blocks-dir>\", \"Directory containing blocks\")\n .argument(\"<clickhouse-db>\", \"Clickhouse database name\")\n .argument(\"<clickhouse-host>\", \"Clickhouse host\")\n .argument(\"<clickhouse-port>\", \"Clickhouse port\")\n .argument(\"<clickhouse-username>\", \"Clickhouse username\")\n .argument(\"<clickhouse-password>\", \"Clickhouse password\")\n .option(\"--clickhouse-use-ssl\", \"Use SSL for Clickhouse connection\", false)\n .action(\n (\n blocksDir,\n clickhouseDb,\n clickhouseHost,\n clickhousePort,\n clickhouseUsername,\n clickhousePassword,\n options,\n ) => {\n runBlocks({\n blocksDir,\n clickhouseConfig: {\n database: clickhouseDb,\n host: clickhouseHost,\n port: clickhousePort,\n username: clickhouseUsername,\n password: clickhousePassword,\n useSSL: options.clickhouseUseSsl,\n },\n });\n },\n );\n\nprogram\n .command(\"consumption-apis\")\n .description(\"Run consumption APIs\")\n .argument(\"<consumption-dir>\", \"Directory containing consumption APIs\")\n .argument(\"<clickhouse-db>\", \"Clickhouse database name\")\n .argument(\"<clickhouse-host>\", \"Clickhouse host\")\n .argument(\"<clickhouse-port>\", \"Clickhouse port\")\n .argument(\"<clickhouse-username>\", \"Clickhouse username\")\n .argument(\"<clickhouse-password>\", \"Clickhouse password\")\n .option(\"--clickhouse-use-ssl\", \"Use SSL for Clickhouse connection\", false)\n .option(\"--jwt-secret <secret>\", \"JWT public key for verification\")\n .option(\"--jwt-issuer <issuer>\", \"Expected JWT issuer\")\n .option(\"--jwt-audience <audience>\", \"Expected JWT audience\")\n .option(\n \"--enforce-auth\",\n \"Enforce authentication on all consumption APIs\",\n false,\n )\n .option(\"--temporal-url <url>\", \"Temporal server URL\")\n .option(\"--temporal-namespace <namespace>\", \"Temporal namespace\")\n .option(\"--client-cert <path>\", \"Path to client certificate\")\n .option(\"--client-key <path>\", \"Path to client key\")\n .option(\"--api-key <key>\", \"API key for authentication\")\n .option(\"--is-dmv2\", \"Whether this is a DMv2 consumption\", false)\n .option(\"--proxy-port <port>\", \"Port to run the proxy server on\", parseInt)\n .option(\n \"--worker-count <count>\",\n \"Number of worker processes for the consumption API cluster\",\n parseInt,\n )\n .action(\n (\n apisDir,\n clickhouseDb,\n clickhouseHost,\n clickhousePort,\n clickhouseUsername,\n clickhousePassword,\n options,\n ) => {\n runApis({\n apisDir,\n clickhouseConfig: {\n database: clickhouseDb,\n host: clickhouseHost,\n port: clickhousePort,\n username: clickhouseUsername,\n password: clickhousePassword,\n useSSL: options.clickhouseUseSsl,\n },\n jwtConfig: {\n secret: options.jwtSecret,\n issuer: options.jwtIssuer,\n audience: options.jwtAudience,\n },\n temporalConfig: {\n url: options.temporalUrl,\n namespace: options.temporalNamespace,\n clientCert: options.clientCert,\n clientKey: options.clientKey,\n apiKey: options.apiKey,\n },\n enforceAuth: options.enforceAuth,\n isDmv2: options.isDmv2,\n proxyPort: options.proxyPort,\n workerCount: options.workerCount,\n });\n },\n );\n\nprogram\n .command(\"streaming-functions\")\n .description(\"Run streaming functions\")\n .argument(\"<source-topic>\", \"Source topic configuration as JSON\")\n .argument(\"<function-file-path>\", \"Path to the function file\")\n .argument(\n \"<broker>\",\n \"Kafka broker address(es) - comma-separated for multiple brokers (e.g., 'broker1:9092, broker2:9092'). Whitespace around commas is automatically trimmed.\",\n )\n .argument(\"<max-subscriber-count>\", \"Maximum number of subscribers\")\n .option(\"--target-topic <target-topic>\", \"Target topic configuration as JSON\")\n .option(\"--sasl-username <username>\", \"SASL username\")\n .option(\"--sasl-password <password>\", \"SASL password\")\n .option(\"--sasl-mechanism <mechanism>\", \"SASL mechanism\")\n .option(\"--security-protocol <protocol>\", \"Security protocol\")\n .option(\"--is-dmv2\", \"Whether this is a DMv2 function\", false)\n .action(\n (sourceTopic, functionFilePath, broker, maxSubscriberCount, options) => {\n const config: StreamingFunctionArgs = {\n sourceTopic: JSON.parse(sourceTopic),\n targetTopic:\n options.targetTopic ? JSON.parse(options.targetTopic) : undefined,\n functionFilePath,\n broker,\n maxSubscriberCount: parseInt(maxSubscriberCount),\n isDmv2: options.isDmv2,\n saslUsername: options.saslUsername,\n saslPassword: options.saslPassword,\n saslMechanism: options.saslMechanism,\n securityProtocol: options.securityProtocol,\n };\n runStreamingFunctions(config);\n },\n );\n\nprogram\n .command(\"consumption-type-serializer\")\n .description(\"Run consumption type serializer\")\n .argument(\"<target-model>\", \"Target model to serialize\")\n .action((targetModel) => {\n runApiTypeSerializer(targetModel);\n });\n\nprogram\n .command(\"scripts\")\n .description(\"Run scripts\")\n .option(\"--temporal-url <url>\", \"Temporal server URL\")\n .option(\"--temporal-namespace <namespace>\", \"Temporal namespace\")\n .option(\"--client-cert <path>\", \"Path to client certificate\")\n .option(\"--client-key <path>\", \"Path to client key\")\n .option(\"--api-key <key>\", \"API key for authentication\")\n .action((options) => {\n runScripts({\n temporalConfig: {\n url: options.temporalUrl,\n namespace: options.temporalNamespace,\n clientCert: options.clientCert,\n clientKey: options.clientKey,\n apiKey: options.apiKey,\n },\n });\n });\n\nprogram.parse();\n","/**\n * @module internal\n * Internal implementation details for the Moose v2 data model (dmv2).\n *\n * This module manages the registration of user-defined dmv2 resources (Tables, Streams, APIs, etc.)\n * and provides functions to serialize these resources into a JSON format (`InfrastructureMap`)\n * expected by the Moose infrastructure management system. It also includes helper functions\n * to retrieve registered handler functions (for streams and APIs) and the base class\n * (`TypedBase`) used by dmv2 resource classes.\n *\n * @internal This module is intended for internal use by the Moose library and compiler plugin.\n * Its API might change without notice.\n */\nimport process from \"process\";\nimport { Api, IngestApi, SqlResource, Task, Workflow } from \"./index\";\nimport { IJsonSchemaCollection } from \"typia/src/schemas/json/IJsonSchemaCollection\";\nimport { Column } from \"../dataModels/dataModelTypes\";\nimport { ClickHouseEngines, ApiUtil } from \"../index\";\nimport {\n OlapTable,\n OlapConfig,\n ReplacingMergeTreeConfig,\n SummingMergeTreeConfig,\n ReplicatedMergeTreeConfig,\n ReplicatedReplacingMergeTreeConfig,\n ReplicatedAggregatingMergeTreeConfig,\n ReplicatedSummingMergeTreeConfig,\n ReplicatedCollapsingMergeTreeConfig,\n ReplicatedVersionedCollapsingMergeTreeConfig,\n S3QueueConfig,\n} from \"./sdk/olapTable\";\nimport {\n ConsumerConfig,\n KafkaSchemaConfig,\n Stream,\n TransformConfig,\n} from \"./sdk/stream\";\nimport { compilerLog } from \"../commons\";\nimport { WebApp } from \"./sdk/webApp\";\nimport { MaterializedView } from \"./sdk/materializedView\";\nimport { View } from \"./sdk/view\";\n\n/**\n * Gets the source directory from environment variable or defaults to \"app\"\n */\nfunction getSourceDir(): string {\n return process.env.MOOSE_SOURCE_DIR || \"app\";\n}\n\n/**\n * Client-only mode check. When true, resource registration is permissive\n * (duplicates overwrite silently instead of throwing).\n * Set via MOOSE_CLIENT_ONLY=true environment variable.\n *\n * This enables Next.js apps to import OlapTable definitions for type-safe\n * queries without the Moose runtime, avoiding \"already exists\" errors on HMR.\n *\n * @returns true if MOOSE_CLIENT_ONLY environment variable is set to \"true\"\n */\nexport const isClientOnlyMode = (): boolean =>\n process.env.MOOSE_CLIENT_ONLY === \"true\";\n\n/**\n * Internal registry holding all defined Moose dmv2 resources.\n * Populated by the constructors of OlapTable, Stream, IngestApi, etc.\n * Accessed via `getMooseInternal()`.\n */\nconst moose_internal = {\n tables: new Map<string, OlapTable<any>>(),\n streams: new Map<string, Stream<any>>(),\n ingestApis: new Map<string, IngestApi<any>>(),\n apis: new Map<string, Api<any>>(),\n sqlResources: new Map<string, SqlResource>(),\n workflows: new Map<string, Workflow>(),\n webApps: new Map<string, WebApp>(),\n materializedViews: new Map<string, MaterializedView<any>>(),\n customViews: new Map<string, View>(),\n};\n/**\n * Default retention period for streams if not specified (7 days in seconds).\n */\nconst defaultRetentionPeriod = 60 * 60 * 24 * 7;\n\n/**\n * Engine-specific configuration types using discriminated union pattern\n */\ninterface MergeTreeEngineConfig {\n engine: \"MergeTree\";\n}\n\ninterface ReplacingMergeTreeEngineConfig {\n engine: \"ReplacingMergeTree\";\n ver?: string;\n isDeleted?: string;\n}\n\ninterface AggregatingMergeTreeEngineConfig {\n engine: \"AggregatingMergeTree\";\n}\n\ninterface SummingMergeTreeEngineConfig {\n engine: \"SummingMergeTree\";\n columns?: string[];\n}\n\ninterface CollapsingMergeTreeEngineConfig {\n engine: \"CollapsingMergeTree\";\n sign: string;\n}\n\ninterface VersionedCollapsingMergeTreeEngineConfig {\n engine: \"VersionedCollapsingMergeTree\";\n sign: string;\n ver: string;\n}\n\ninterface ReplicatedMergeTreeEngineConfig {\n engine: \"ReplicatedMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n}\n\ninterface ReplicatedReplacingMergeTreeEngineConfig {\n engine: \"ReplicatedReplacingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n ver?: string;\n isDeleted?: string;\n}\n\ninterface ReplicatedAggregatingMergeTreeEngineConfig {\n engine: \"ReplicatedAggregatingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n}\n\ninterface ReplicatedSummingMergeTreeEngineConfig {\n engine: \"ReplicatedSummingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n columns?: string[];\n}\n\ninterface ReplicatedCollapsingMergeTreeEngineConfig {\n engine: \"ReplicatedCollapsingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n sign: string;\n}\n\ninterface ReplicatedVersionedCollapsingMergeTreeEngineConfig {\n engine: \"ReplicatedVersionedCollapsingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n sign: string;\n ver: string;\n}\n\ninterface S3QueueEngineConfig {\n engine: \"S3Queue\";\n s3Path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n headers?: { [key: string]: string };\n}\n\ninterface S3EngineConfig {\n engine: \"S3\";\n path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n partitionStrategy?: string;\n partitionColumnsInDataFile?: string;\n}\n\ninterface BufferEngineConfig {\n engine: \"Buffer\";\n targetDatabase: string;\n targetTable: string;\n numLayers: number;\n minTime: number;\n maxTime: number;\n minRows: number;\n maxRows: number;\n minBytes: number;\n maxBytes: number;\n flushTime?: number;\n flushRows?: number;\n flushBytes?: number;\n}\n\ninterface DistributedEngineConfig {\n engine: \"Distributed\";\n cluster: string;\n targetDatabase: string;\n targetTable: string;\n shardingKey?: string;\n policyName?: string;\n}\n\ninterface IcebergS3EngineConfig {\n engine: \"IcebergS3\";\n path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n}\n\ninterface KafkaEngineConfig {\n engine: \"Kafka\";\n brokerList: string;\n topicList: string;\n groupName: string;\n format: string;\n}\n\n/**\n * Union type for all supported engine configurations\n */\ntype EngineConfig =\n | MergeTreeEngineConfig\n | ReplacingMergeTreeEngineConfig\n | AggregatingMergeTreeEngineConfig\n | SummingMergeTreeEngineConfig\n | CollapsingMergeTreeEngineConfig\n | VersionedCollapsingMergeTreeEngineConfig\n | ReplicatedMergeTreeEngineConfig\n | ReplicatedReplacingMergeTreeEngineConfig\n | ReplicatedAggregatingMergeTreeEngineConfig\n | ReplicatedSummingMergeTreeEngineConfig\n | ReplicatedCollapsingMergeTreeEngineConfig\n | ReplicatedVersionedCollapsingMergeTreeEngineConfig\n | S3QueueEngineConfig\n | S3EngineConfig\n | BufferEngineConfig\n | DistributedEngineConfig\n | IcebergS3EngineConfig\n | KafkaEngineConfig;\n\n/**\n * JSON representation of an OLAP table configuration.\n */\ninterface TableJson {\n /** The name of the table. */\n name: string;\n /** Array defining the table's columns and their types. */\n columns: Column[];\n /** ORDER BY clause: either array of column names or a single ClickHouse expression. */\n orderBy: string[] | string;\n /** The column name used for the PARTITION BY clause. */\n partitionBy?: string;\n /** SAMPLE BY expression for approximate query processing. */\n sampleByExpression?: string;\n /** PRIMARY KEY expression (overrides column-level primary_key flags when specified). */\n primaryKeyExpression?: string;\n /** Engine configuration with type-safe, engine-specific parameters */\n engineConfig?: EngineConfig;\n /** Optional version string for the table configuration. */\n version?: string;\n /** Optional metadata for the table (e.g., description). */\n metadata?: { description?: string };\n /** Lifecycle management setting for the table. */\n lifeCycle?: string;\n /** Optional table-level settings that can be modified with ALTER TABLE MODIFY SETTING. */\n tableSettings?: { [key: string]: string };\n /** Optional table indexes */\n indexes?: {\n name: string;\n expression: string;\n type: string;\n arguments: string[];\n granularity: number;\n }[];\n /** Optional table-level TTL expression (without leading 'TTL'). */\n ttl?: string;\n /** Optional database name for multi-database support. */\n database?: string;\n /** Optional cluster name for ON CLUSTER support. */\n cluster?: string;\n}\n/**\n * Represents a target destination for data flow, typically a stream.\n */\ninterface Target {\n /** The name of the target resource (e.g., stream name). */\n name: string;\n /** The kind of the target resource. */\n kind: \"stream\"; // may add `| \"table\"` in the future\n /** Optional version string of the target resource's configuration. */\n version?: string;\n /** Optional metadata for the target (e.g., description for function processes). */\n metadata?: { description?: string };\n /** Optional source file path where this transform was declared. */\n sourceFile?: string;\n}\n\n/**\n * Represents a consumer attached to a stream.\n */\ninterface Consumer {\n /** Optional version string for the consumer configuration. */\n version?: string;\n /** Optional source file path where this consumer was declared. */\n sourceFile?: string;\n}\n\n/**\n * JSON representation of a Stream/Topic configuration.\n */\ninterface StreamJson {\n /** The name of the stream/topic. */\n name: string;\n /** Array defining the message schema (columns/fields). */\n columns: Column[];\n /** Data retention period in seconds. */\n retentionPeriod: number;\n /** Number of partitions for the stream/topic. */\n partitionCount: number;\n /** Optional name of the OLAP table this stream automatically syncs to. */\n targetTable?: string;\n /** Optional version of the target OLAP table configuration. */\n targetTableVersion?: string;\n /** Optional version string for the stream configuration. */\n version?: string;\n /** List of target streams this stream transforms data into. */\n transformationTargets: Target[];\n /** Flag indicating if a multi-transform function (`_multipleTransformations`) is defined. */\n hasMultiTransform: boolean;\n /** List of consumers attached to this stream. */\n consumers: Consumer[];\n /** Optional description for the stream. */\n metadata?: { description?: string };\n /** Lifecycle management setting for the stream. */\n lifeCycle?: string;\n /** Optional Schema Registry config */\n schemaConfig?: KafkaSchemaConfig;\n}\n/**\n * JSON representation of an Ingest API configuration.\n */\ninterface IngestApiJson {\n /** The name of the Ingest API endpoint. */\n name: string;\n /** Array defining the expected input schema (columns/fields). */\n columns: Column[];\n\n /** The target stream where ingested data is written. */\n writeTo: Target;\n /** The DLQ if the data does not fit the schema. */\n deadLetterQueue?: string;\n /** Optional version string for the API configuration. */\n version?: string;\n /** Optional custom path for the ingestion endpoint. */\n path?: string;\n /** Optional description for the API. */\n metadata?: { description?: string };\n /** JSON schema */\n schema: IJsonSchemaCollection.IV3_1;\n /**\n * Whether this API allows extra fields beyond the defined columns.\n * When true, extra fields in payloads are passed through to streaming functions.\n */\n allowExtraFields?: boolean;\n}\n\n/**\n * JSON representation of an API configuration.\n */\ninterface ApiJson {\n /** The name of the API endpoint. */\n name: string;\n /** Array defining the expected query parameters schema. */\n queryParams: Column[];\n /** JSON schema definition of the API's response body. */\n responseSchema: IJsonSchemaCollection.IV3_1;\n /** Optional version string for the API configuration. */\n version?: string;\n /** Optional custom path for the API endpoint. */\n path?: string;\n /** Optional description for the API. */\n metadata?: { description?: string };\n}\n\n/**\n * Represents the unique signature of an infrastructure component (Table, Topic, etc.).\n * Used for defining dependencies between SQL resources.\n */\ninterface InfrastructureSignatureJson {\n /** A unique identifier for the resource instance (often name + version). */\n id: string;\n /** The kind/type of the infrastructure component. */\n kind:\n | \"Table\"\n | \"Topic\"\n | \"ApiEndpoint\"\n | \"TopicToTableSyncProcess\"\n | \"View\"\n | \"SqlResource\";\n}\n\ninterface WorkflowJson {\n name: string;\n retries?: number;\n timeout?: string;\n schedule?: string;\n}\n\ninterface WebAppJson {\n name: string;\n mountPath: string;\n metadata?: { description?: string };\n}\n\ninterface SqlResourceJson {\n /** The name of the SQL resource. */\n name: string;\n /** Array of SQL DDL statements required to create the resource. */\n setup: readonly string[];\n /** Array of SQL DDL statements required to drop the resource. */\n teardown: readonly string[];\n\n /** List of infrastructure components (by signature) that this resource reads from. */\n pullsDataFrom: InfrastructureSignatureJson[];\n /** List of infrastructure components (by signature) that this resource writes to. */\n pushesDataTo: InfrastructureSignatureJson[];\n /** Optional source file path where this resource is defined. */\n sourceFile?: string;\n /** Optional source line number where this resource is defined. */\n sourceLine?: number;\n /** Optional source column number where this resource is defined. */\n sourceColumn?: number;\n}\n\n/**\n * JSON representation of a structured Materialized View.\n */\ninterface MaterializedViewJson {\n /** Name of the materialized view */\n name: string;\n /** Database where the MV is created (optional, uses default if not set) */\n database?: string;\n /** The SELECT SQL statement */\n selectSql: string;\n /** Source tables that the SELECT reads from */\n sourceTables: string[];\n /** Target table where transformed data is written */\n targetTable: string;\n /** Target table database (optional) */\n targetDatabase?: string;\n /** Optional source file path where this MV is defined */\n sourceFile?: string;\n}\n\n/**\n * JSON representation of a structured Custom View.\n */\ninterface CustomViewJson {\n /** Name of the view */\n name: string;\n /** Database where the view is created (optional, uses default if not set) */\n database?: string;\n /** The SELECT SQL statement */\n selectSql: string;\n /** Source tables that the SELECT reads from */\n sourceTables: string[];\n /** Optional source file path where this view is defined */\n sourceFile?: string;\n}\n\n/**\n * Type guard: Check if config is S3QueueConfig\n */\nfunction isS3QueueConfig(\n config: OlapConfig<any>,\n): config is S3QueueConfig<any> {\n return \"engine\" in config && config.engine === ClickHouseEngines.S3Queue;\n}\n\n/**\n * Type guard: Check if config has a replicated engine\n * Checks if the engine value is one of the replicated engine types\n */\nfunction hasReplicatedEngine(\n config: OlapConfig<any>,\n): config is\n | ReplicatedMergeTreeConfig<any>\n | ReplicatedReplacingMergeTreeConfig<any>\n | ReplicatedAggregatingMergeTreeConfig<any>\n | ReplicatedSummingMergeTreeConfig<any>\n | ReplicatedCollapsingMergeTreeConfig<any>\n | ReplicatedVersionedCollapsingMergeTreeConfig<any> {\n if (!(\"engine\" in config)) {\n return false;\n }\n\n const engine = config.engine as ClickHouseEngines;\n // Check if engine is one of the replicated engine types\n return (\n engine === ClickHouseEngines.ReplicatedMergeTree ||\n engine === ClickHouseEngines.ReplicatedReplacingMergeTree ||\n engine === ClickHouseEngines.ReplicatedAggregatingMergeTree ||\n engine === ClickHouseEngines.ReplicatedSummingMergeTree ||\n engine === ClickHouseEngines.ReplicatedCollapsingMergeTree ||\n engine === ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree\n );\n}\n\n/**\n * Extract engine value from table config, handling both legacy and new formats\n */\nfunction extractEngineValue(config: OlapConfig<any>): ClickHouseEngines {\n // Legacy config without engine property defaults to MergeTree\n if (!(\"engine\" in config)) {\n return ClickHouseEngines.MergeTree;\n }\n\n // All engines (replicated and non-replicated) have engine as direct value\n return config.engine as ClickHouseEngines;\n}\n\n/**\n * Convert engine config for basic MergeTree engines\n */\nfunction convertBasicEngineConfig(\n engine: ClickHouseEngines,\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n switch (engine) {\n case ClickHouseEngines.MergeTree:\n return { engine: \"MergeTree\" };\n\n case ClickHouseEngines.AggregatingMergeTree:\n return { engine: \"AggregatingMergeTree\" };\n\n case ClickHouseEngines.ReplacingMergeTree: {\n const replacingConfig = config as ReplacingMergeTreeConfig<any>;\n return {\n engine: \"ReplacingMergeTree\",\n ver: replacingConfig.ver,\n isDeleted: replacingConfig.isDeleted,\n };\n }\n\n case ClickHouseEngines.SummingMergeTree: {\n const summingConfig = config as SummingMergeTreeConfig<any>;\n return {\n engine: \"SummingMergeTree\",\n columns: summingConfig.columns,\n };\n }\n\n case ClickHouseEngines.CollapsingMergeTree: {\n const collapsingConfig = config as any; // CollapsingMergeTreeConfig<any>\n return {\n engine: \"CollapsingMergeTree\",\n sign: collapsingConfig.sign,\n };\n }\n\n case ClickHouseEngines.VersionedCollapsingMergeTree: {\n const versionedConfig = config as any; // VersionedCollapsingMergeTreeConfig<any>\n return {\n engine: \"VersionedCollapsingMergeTree\",\n sign: versionedConfig.sign,\n ver: versionedConfig.ver,\n };\n }\n\n default:\n return undefined;\n }\n}\n\n/**\n * Convert engine config for replicated MergeTree engines\n */\nfunction convertReplicatedEngineConfig(\n engine: ClickHouseEngines,\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n // First check if this is a replicated engine config\n if (!hasReplicatedEngine(config)) {\n return undefined;\n }\n\n switch (engine) {\n case ClickHouseEngines.ReplicatedMergeTree: {\n const replicatedConfig = config as ReplicatedMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n };\n }\n\n case ClickHouseEngines.ReplicatedReplacingMergeTree: {\n const replicatedConfig =\n config as ReplicatedReplacingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedReplacingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n ver: replicatedConfig.ver,\n isDeleted: replicatedConfig.isDeleted,\n };\n }\n\n case ClickHouseEngines.ReplicatedAggregatingMergeTree: {\n const replicatedConfig =\n config as ReplicatedAggregatingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedAggregatingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n };\n }\n\n case ClickHouseEngines.ReplicatedSummingMergeTree: {\n const replicatedConfig = config as ReplicatedSummingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedSummingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n columns: replicatedConfig.columns,\n };\n }\n\n case ClickHouseEngines.ReplicatedCollapsingMergeTree: {\n const replicatedConfig = config as any; // ReplicatedCollapsingMergeTreeConfig<any>\n return {\n engine: \"ReplicatedCollapsingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n sign: replicatedConfig.sign,\n };\n }\n\n case ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree: {\n const replicatedConfig = config as any; // ReplicatedVersionedCollapsingMergeTreeConfig<any>\n return {\n engine: \"ReplicatedVersionedCollapsingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n sign: replicatedConfig.sign,\n ver: replicatedConfig.ver,\n };\n }\n\n default:\n return undefined;\n }\n}\n\n/**\n * Convert S3Queue engine config\n * Uses type guard for fully type-safe property access\n */\nfunction convertS3QueueEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!isS3QueueConfig(config)) {\n return undefined;\n }\n\n return {\n engine: \"S3Queue\",\n s3Path: config.s3Path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n headers: config.headers,\n };\n}\n\n/**\n * Convert S3 engine config\n */\nfunction convertS3EngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.S3) {\n return undefined;\n }\n\n return {\n engine: \"S3\",\n path: config.path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n partitionStrategy: config.partitionStrategy,\n partitionColumnsInDataFile: config.partitionColumnsInDataFile,\n };\n}\n\n/**\n * Convert Buffer engine config\n */\nfunction convertBufferEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.Buffer) {\n return undefined;\n }\n\n return {\n engine: \"Buffer\",\n targetDatabase: config.targetDatabase,\n targetTable: config.targetTable,\n numLayers: config.numLayers,\n minTime: config.minTime,\n maxTime: config.maxTime,\n minRows: config.minRows,\n maxRows: config.maxRows,\n minBytes: config.minBytes,\n maxBytes: config.maxBytes,\n flushTime: config.flushTime,\n flushRows: config.flushRows,\n flushBytes: config.flushBytes,\n };\n}\n\n/**\n * Convert Distributed engine config\n */\nfunction convertDistributedEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (\n !(\"engine\" in config) ||\n config.engine !== ClickHouseEngines.Distributed\n ) {\n return undefined;\n }\n\n return {\n engine: \"Distributed\",\n cluster: config.cluster,\n targetDatabase: config.targetDatabase,\n targetTable: config.targetTable,\n shardingKey: config.shardingKey,\n policyName: config.policyName,\n };\n}\n\n/**\n * Convert IcebergS3 engine config\n */\nfunction convertIcebergS3EngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.IcebergS3) {\n return undefined;\n }\n\n return {\n engine: \"IcebergS3\",\n path: config.path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n };\n}\n\n/**\n * Convert Kafka engine configuration\n */\nfunction convertKafkaEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.Kafka) {\n return undefined;\n }\n\n return {\n engine: \"Kafka\",\n brokerList: config.brokerList,\n topicList: config.topicList,\n groupName: config.groupName,\n format: config.format,\n };\n}\n\n/**\n * Convert table configuration to engine config\n */\nfunction convertTableConfigToEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n const engine = extractEngineValue(config);\n\n // Try basic engines first\n const basicConfig = convertBasicEngineConfig(engine, config);\n if (basicConfig) {\n return basicConfig;\n }\n\n // Try replicated engines\n const replicatedConfig = convertReplicatedEngineConfig(engine, config);\n if (replicatedConfig) {\n return replicatedConfig;\n }\n\n // Handle S3Queue\n if (engine === ClickHouseEngines.S3Queue) {\n return convertS3QueueEngineConfig(config);\n }\n\n // Handle S3\n if (engine === ClickHouseEngines.S3) {\n return convertS3EngineConfig(config);\n }\n\n // Handle Buffer\n if (engine === ClickHouseEngines.Buffer) {\n return convertBufferEngineConfig(config);\n }\n\n // Handle Distributed\n if (engine === ClickHouseEngines.Distributed) {\n return convertDistributedEngineConfig(config);\n }\n\n // Handle IcebergS3\n if (engine === ClickHouseEngines.IcebergS3) {\n return convertIcebergS3EngineConfig(config);\n }\n\n // Handle Kafka\n if (engine === ClickHouseEngines.Kafka) {\n return convertKafkaEngineConfig(config);\n }\n\n return undefined;\n}\n\nexport const toInfraMap = (registry: typeof moose_internal) => {\n const tables: { [key: string]: TableJson } = {};\n const topics: { [key: string]: StreamJson } = {};\n const ingestApis: { [key: string]: IngestApiJson } = {};\n const apis: { [key: string]: ApiJson } = {};\n const sqlResources: { [key: string]: SqlResourceJson } = {};\n const workflows: { [key: string]: WorkflowJson } = {};\n const webApps: { [key: string]: WebAppJson } = {};\n const materializedViews: { [key: string]: MaterializedViewJson } = {};\n const customViews: { [key: string]: CustomViewJson } = {};\n\n registry.tables.forEach((table) => {\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n // If the table is part of an IngestPipeline, inherit metadata if not set\n let metadata = (table as any).metadata;\n if (!metadata && table.config && (table as any).pipelineParent) {\n metadata = (table as any).pipelineParent.metadata;\n }\n // Create type-safe engine configuration\n const engineConfig: EngineConfig | undefined =\n convertTableConfigToEngineConfig(table.config);\n\n // Get table settings, applying defaults for S3Queue\n let tableSettings: { [key: string]: string } | undefined = undefined;\n\n if (table.config.settings) {\n // Convert all settings to strings, filtering out undefined values\n tableSettings = Object.entries(table.config.settings).reduce(\n (acc, [key, value]) => {\n if (value !== undefined) {\n acc[key] = String(value);\n }\n return acc;\n },\n {} as { [key: string]: string },\n );\n }\n\n // Apply default settings for S3Queue if not already specified\n if (engineConfig?.engine === \"S3Queue\") {\n if (!tableSettings) {\n tableSettings = {};\n }\n // Set default mode to 'unordered' if not specified\n if (!tableSettings.mode) {\n tableSettings.mode = \"unordered\";\n }\n }\n\n // Determine ORDER BY from config\n // Note: engines like Buffer and Distributed don't support orderBy/partitionBy/sampleBy\n const hasOrderByFields =\n \"orderByFields\" in table.config &&\n Array.isArray(table.config.orderByFields) &&\n table.config.orderByFields.length > 0;\n const hasOrderByExpression =\n \"orderByExpression\" in table.config &&\n typeof table.config.orderByExpression === \"string\" &&\n table.config.orderByExpression.length > 0;\n if (hasOrderByFields && hasOrderByExpression) {\n throw new Error(\n `Table ${table.name}: Provide either orderByFields or orderByExpression, not both.`,\n );\n }\n const orderBy: string[] | string =\n hasOrderByExpression && \"orderByExpression\" in table.config ?\n (table.config.orderByExpression ?? \"\")\n : \"orderByFields\" in table.config ? (table.config.orderByFields ?? [])\n : [];\n\n tables[id] = {\n name: table.name,\n columns: table.columnArray,\n orderBy,\n partitionBy:\n \"partitionBy\" in table.config ? table.config.partitionBy : undefined,\n sampleByExpression:\n \"sampleByExpression\" in table.config ?\n table.config.sampleByExpression\n : undefined,\n primaryKeyExpression:\n \"primaryKeyExpression\" in table.config ?\n table.config.primaryKeyExpression\n : undefined,\n engineConfig,\n version: table.config.version,\n metadata,\n lifeCycle: table.config.lifeCycle,\n // Map 'settings' to 'tableSettings' for internal use\n tableSettings:\n tableSettings && Object.keys(tableSettings).length > 0 ?\n tableSettings\n : undefined,\n indexes:\n table.config.indexes?.map((i) => ({\n ...i,\n granularity: i.granularity === undefined ? 1 : i.granularity,\n arguments: i.arguments === undefined ? [] : i.arguments,\n })) || [],\n ttl: table.config.ttl,\n database: table.config.database,\n cluster: table.config.cluster,\n };\n });\n\n registry.streams.forEach((stream) => {\n // If the stream is part of an IngestPipeline, inherit metadata if not set\n let metadata = stream.metadata;\n if (!metadata && stream.config && (stream as any).pipelineParent) {\n metadata = (stream as any).pipelineParent.metadata;\n }\n const transformationTargets: Target[] = [];\n const consumers: Consumer[] = [];\n\n stream._transformations.forEach((transforms, destinationName) => {\n transforms.forEach(([destination, _, config]) => {\n transformationTargets.push({\n kind: \"stream\",\n name: destinationName,\n version: config.version,\n metadata: config.metadata,\n sourceFile: config.sourceFile,\n });\n });\n });\n\n stream._consumers.forEach((consumer) => {\n consumers.push({\n version: consumer.config.version,\n sourceFile: consumer.config.sourceFile,\n });\n });\n\n topics[stream.name] = {\n name: stream.name,\n columns: stream.columnArray,\n targetTable: stream.config.destination?.name,\n targetTableVersion: stream.config.destination?.config.version,\n retentionPeriod: stream.config.retentionPeriod ?? defaultRetentionPeriod,\n partitionCount: stream.config.parallelism ?? 1,\n version: stream.config.version,\n transformationTargets,\n hasMultiTransform: stream._multipleTransformations === undefined,\n consumers,\n metadata,\n lifeCycle: stream.config.lifeCycle,\n schemaConfig: stream.config.schemaConfig,\n };\n });\n\n registry.ingestApis.forEach((api) => {\n // If the ingestApi is part of an IngestPipeline, inherit metadata if not set\n let metadata = api.metadata;\n if (!metadata && api.config && (api as any).pipelineParent) {\n metadata = (api as any).pipelineParent.metadata;\n }\n ingestApis[api.name] = {\n name: api.name,\n columns: api.columnArray,\n version: api.config.version,\n path: api.config.path,\n writeTo: {\n kind: \"stream\",\n name: api.config.destination.name,\n },\n deadLetterQueue: api.config.deadLetterQueue?.name,\n metadata,\n schema: api.schema,\n allowExtraFields: api.allowExtraFields,\n };\n });\n\n registry.apis.forEach((api, key) => {\n const rustKey =\n api.config.version ? `${api.name}:${api.config.version}` : api.name;\n apis[rustKey] = {\n name: api.name,\n queryParams: api.columnArray,\n responseSchema: api.responseSchema,\n version: api.config.version,\n path: api.config.path,\n metadata: api.metadata,\n };\n });\n\n registry.sqlResources.forEach((sqlResource) => {\n sqlResources[sqlResource.name] = {\n name: sqlResource.name,\n setup: sqlResource.setup,\n teardown: sqlResource.teardown,\n sourceFile: sqlResource.sourceFile,\n sourceLine: sqlResource.sourceLine,\n sourceColumn: sqlResource.sourceColumn,\n\n pullsDataFrom: sqlResource.pullsDataFrom.map((r) => {\n if (r.kind === \"OlapTable\") {\n const table = r as OlapTable<any>;\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n return {\n id,\n kind: \"Table\",\n };\n } else if (r.kind === \"SqlResource\") {\n const resource = r as SqlResource;\n return {\n id: resource.name,\n kind: \"SqlResource\",\n };\n } else {\n throw new Error(`Unknown sql resource dependency type: ${r}`);\n }\n }),\n pushesDataTo: sqlResource.pushesDataTo.map((r) => {\n if (r.kind === \"OlapTable\") {\n const table = r as OlapTable<any>;\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n return {\n id,\n kind: \"Table\",\n };\n } else if (r.kind === \"SqlResource\") {\n const resource = r as SqlResource;\n return {\n id: resource.name,\n kind: \"SqlResource\",\n };\n } else {\n throw new Error(`Unknown sql resource dependency type: ${r}`);\n }\n }),\n };\n });\n\n registry.workflows.forEach((workflow) => {\n workflows[workflow.name] = {\n name: workflow.name,\n retries: workflow.config.retries,\n timeout: workflow.config.timeout,\n schedule: workflow.config.schedule,\n };\n });\n\n registry.webApps.forEach((webApp) => {\n webApps[webApp.name] = {\n name: webApp.name,\n mountPath: webApp.config.mountPath || \"/\",\n metadata: webApp.config.metadata,\n };\n });\n\n // Serialize materialized views with structured data\n registry.materializedViews.forEach((mv) => {\n materializedViews[mv.name] = {\n name: mv.name,\n selectSql: mv.selectSql,\n sourceTables: mv.sourceTables,\n targetTable: mv.targetTable.name,\n targetDatabase: mv.targetTable.config.database,\n sourceFile: mv.sourceFile,\n };\n });\n\n // Serialize custom views with structured data\n registry.customViews.forEach((view) => {\n customViews[view.name] = {\n name: view.name,\n selectSql: view.selectSql,\n sourceTables: view.sourceTables,\n sourceFile: view.sourceFile,\n };\n });\n\n return {\n topics,\n tables,\n ingestApis,\n apis,\n sqlResources,\n workflows,\n webApps,\n materializedViews,\n customViews,\n };\n};\n\n/**\n * Retrieves the global internal Moose resource registry.\n * Uses `globalThis` to ensure a single registry instance.\n *\n * @returns The internal Moose resource registry.\n */\nexport const getMooseInternal = (): typeof moose_internal =>\n (globalThis as any).moose_internal;\n\n// work around for variable visibility in compiler output\nif (getMooseInternal() === undefined) {\n (globalThis as any).moose_internal = moose_internal;\n}\n\n/**\n * Loads the user's application entry point (`app/index.ts`) to register resources,\n * then generates and prints the infrastructure map as JSON.\n *\n * This function is the main entry point used by the Moose infrastructure system\n * to discover the defined resources.\n * It prints the JSON map surrounded by specific delimiters (`___MOOSE_STUFF___start`\n * and `end___MOOSE_STUFF___`) for easy extraction by the calling process.\n */\nexport const dumpMooseInternal = async () => {\n loadIndex();\n\n console.log(\n \"___MOOSE_STUFF___start\",\n JSON.stringify(toInfraMap(getMooseInternal())),\n \"end___MOOSE_STUFF___\",\n );\n};\n\nconst loadIndex = () => {\n // Clear the registry before loading to support hot reloading\n const registry = getMooseInternal();\n registry.tables.clear();\n registry.streams.clear();\n registry.ingestApis.clear();\n registry.apis.clear();\n registry.sqlResources.clear();\n registry.workflows.clear();\n registry.webApps.clear();\n registry.materializedViews.clear();\n registry.customViews.clear();\n\n // Clear require cache for app directory to pick up changes\n const appDir = `${process.cwd()}/${getSourceDir()}`;\n Object.keys(require.cache).forEach((key) => {\n if (key.startsWith(appDir)) {\n delete require.cache[key];\n }\n });\n\n try {\n require(`${process.cwd()}/${getSourceDir()}/index.ts`);\n } catch (error) {\n let hint: string | undefined;\n const details = error instanceof Error ? error.message : String(error);\n if (details.includes(\"ERR_REQUIRE_ESM\") || details.includes(\"ES Module\")) {\n hint =\n \"The file or its dependencies are ESM-only. Switch to packages that dual-support CJS & ESM, or upgrade to Node 22.12+. \" +\n \"If you must use Node 20, you may try Node 20.19\\n\\n\";\n }\n\n const errorMsg = `${hint ?? \"\"}${details}`;\n const cause = error instanceof Error ? error : undefined;\n throw new Error(errorMsg, { cause });\n }\n};\n\n/**\n * Loads the user's application entry point and extracts all registered stream\n * transformation and consumer functions.\n *\n * @returns A Map where keys are unique identifiers for transformations/consumers\n * (e.g., \"sourceStream_destStream_version\", \"sourceStream_<no-target>_version\")\n * and values are tuples containing: [handler function, config, source stream columns]\n */\nexport const getStreamingFunctions = async () => {\n loadIndex();\n\n const registry = getMooseInternal();\n const transformFunctions = new Map<\n string,\n [\n (data: unknown) => unknown,\n TransformConfig<any> | ConsumerConfig<any>,\n Column[],\n ]\n >();\n\n registry.streams.forEach((stream) => {\n stream._transformations.forEach((transforms, destinationName) => {\n transforms.forEach(([_, transform, config]) => {\n const transformFunctionKey = `${stream.name}_${destinationName}${config.version ? `_${config.version}` : \"\"}`;\n compilerLog(`getStreamingFunctions: ${transformFunctionKey}`);\n transformFunctions.set(transformFunctionKey, [\n transform,\n config,\n stream.columnArray,\n ]);\n });\n });\n\n stream._consumers.forEach((consumer) => {\n const consumerFunctionKey = `${stream.name}_<no-target>${consumer.config.version ? `_${consumer.config.version}` : \"\"}`;\n transformFunctions.set(consumerFunctionKey, [\n consumer.consumer,\n consumer.config,\n stream.columnArray,\n ]);\n });\n });\n\n return transformFunctions;\n};\n\n/**\n * Loads the user's application entry point and extracts all registered\n * API handler functions.\n *\n * @returns A Map where keys are the names of the APIs and values\n * are their corresponding handler functions.\n */\nexport const getApis = async () => {\n loadIndex();\n const apiFunctions = new Map<\n string,\n (params: unknown, utils: ApiUtil) => unknown\n >();\n\n const registry = getMooseInternal();\n // Single pass: store full keys, track aliasing decisions\n const versionCountByName = new Map<string, number>();\n const nameToSoleVersionHandler = new Map<\n string,\n (params: unknown, utils: ApiUtil) => unknown\n >();\n\n registry.apis.forEach((api, key) => {\n const handler = api.getHandler();\n apiFunctions.set(key, handler);\n\n if (!api.config.version) {\n // Explicit unversioned takes precedence for alias\n if (!apiFunctions.has(api.name)) {\n apiFunctions.set(api.name, handler);\n }\n nameToSoleVersionHandler.delete(api.name);\n versionCountByName.delete(api.name);\n } else if (!apiFunctions.has(api.name)) {\n // Only track versioned for alias if no explicit unversioned present\n const count = (versionCountByName.get(api.name) ?? 0) + 1;\n versionCountByName.set(api.name, count);\n if (count === 1) {\n nameToSoleVersionHandler.set(api.name, handler);\n } else {\n nameToSoleVersionHandler.delete(api.name);\n }\n }\n });\n\n // Finalize aliases for names that have exactly one versioned API and no unversioned\n nameToSoleVersionHandler.forEach((handler, name) => {\n if (!apiFunctions.has(name)) {\n apiFunctions.set(name, handler);\n }\n });\n\n return apiFunctions;\n};\n\nexport const dlqSchema: IJsonSchemaCollection.IV3_1 = {\n version: \"3.1\",\n components: {\n schemas: {\n DeadLetterModel: {\n type: \"object\",\n properties: {\n originalRecord: {\n $ref: \"#/components/schemas/Recordstringany\",\n },\n errorMessage: {\n type: \"string\",\n },\n errorType: {\n type: \"string\",\n },\n failedAt: {\n type: \"string\",\n format: \"date-time\",\n },\n source: {\n oneOf: [\n {\n const: \"api\",\n },\n {\n const: \"transform\",\n },\n {\n const: \"table\",\n },\n ],\n },\n },\n required: [\n \"originalRecord\",\n \"errorMessage\",\n \"errorType\",\n \"failedAt\",\n \"source\",\n ],\n },\n Recordstringany: {\n type: \"object\",\n properties: {},\n required: [],\n description: \"Construct a type with a set of properties K of type T\",\n additionalProperties: {},\n },\n },\n },\n schemas: [\n {\n $ref: \"#/components/schemas/DeadLetterModel\",\n },\n ],\n};\n\nexport const dlqColumns: Column[] = [\n {\n name: \"originalRecord\",\n data_type: \"Json\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"errorMessage\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"errorType\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"failedAt\",\n data_type: \"DateTime\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"source\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n];\n\nexport const getWorkflows = async () => {\n loadIndex();\n\n const registry = getMooseInternal();\n return registry.workflows;\n};\n\nfunction findTaskInTree(\n task: Task<any, any>,\n targetName: string,\n): Task<any, any> | undefined {\n if (task.name === targetName) {\n return task;\n }\n\n if (task.config.onComplete?.length) {\n for (const childTask of task.config.onComplete) {\n const found = findTaskInTree(childTask, targetName);\n if (found) {\n return found;\n }\n }\n }\n\n return undefined;\n}\n\nexport const getTaskForWorkflow = async (\n workflowName: string,\n taskName: string,\n): Promise<Task<any, any>> => {\n const workflows = await getWorkflows();\n const workflow = workflows.get(workflowName);\n if (!workflow) {\n throw new Error(`Workflow ${workflowName} not found`);\n }\n\n const task = findTaskInTree(\n workflow.config.startingTask as Task<any, any>,\n taskName,\n );\n if (!task) {\n throw new Error(`Task ${taskName} not found in workflow ${workflowName}`);\n }\n\n return task;\n};\n\nexport const getWebApps = async () => {\n loadIndex();\n return getMooseInternal().webApps;\n};\n","// source https://github.com/blakeembrey/sql-template-tag/blob/main/src/index.ts\nimport { Column } from \"./dataModels/dataModelTypes\";\nimport { OlapTable } from \"./dmv2\";\n\nimport { AggregationFunction } from \"./dataModels/typeConvert\";\n\n/**\n * Quote a ClickHouse identifier with backticks if not already quoted.\n * Backticks allow special characters (e.g., hyphens) in identifiers.\n */\nexport const quoteIdentifier = (name: string): string => {\n return name.startsWith(\"`\") && name.endsWith(\"`\") ? name : `\\`${name}\\``;\n};\n\nconst isTable = (\n value: RawValue | Column | OlapTable<any>,\n): value is OlapTable<any> =>\n typeof value === \"object\" &&\n value !== null &&\n \"kind\" in value &&\n value.kind === \"OlapTable\";\n\nexport type IdentifierBrandedString = string & {\n readonly __identifier_brand?: unique symbol;\n};\nexport type NonIdentifierBrandedString = string & {\n readonly __identifier_brand?: unique symbol;\n};\n\n/**\n * Values supported by SQL engine.\n */\nexport type Value =\n | NonIdentifierBrandedString\n | number\n | boolean\n | Date\n | [string, string];\n\n/**\n * Supported value or SQL instance.\n */\nexport type RawValue = Value | Sql;\n\nconst isColumn = (value: RawValue | Column | OlapTable<any>): value is Column =>\n typeof value === \"object\" && \"name\" in value && \"annotations\" in value;\n\nexport function sql(\n strings: readonly string[],\n ...values: readonly (RawValue | Column | OlapTable<any>)[]\n) {\n return new Sql(strings, values);\n}\n\nconst instanceofSql = (\n value: RawValue | Column | OlapTable<any>,\n): value is Sql =>\n typeof value === \"object\" && \"values\" in value && \"strings\" in value;\n\n/**\n * A SQL instance can be nested within each other to build SQL strings.\n */\nexport class Sql {\n readonly values: Value[];\n readonly strings: string[];\n\n constructor(\n rawStrings: readonly string[],\n rawValues: readonly (RawValue | Column | OlapTable<any>)[],\n ) {\n if (rawStrings.length - 1 !== rawValues.length) {\n if (rawStrings.length === 0) {\n throw new TypeError(\"Expected at least 1 string\");\n }\n\n throw new TypeError(\n `Expected ${rawStrings.length} strings to have ${\n rawStrings.length - 1\n } values`,\n );\n }\n\n const valuesLength = rawValues.reduce<number>(\n (len: number, value: RawValue | Column | OlapTable<any>) =>\n len +\n (instanceofSql(value) ? value.values.length\n : isColumn(value) || isTable(value) ? 0\n : 1),\n 0,\n );\n\n this.values = new Array(valuesLength);\n this.strings = new Array(valuesLength + 1);\n\n this.strings[0] = rawStrings[0];\n\n // Iterate over raw values, strings, and children. The value is always\n // positioned between two strings, e.g. `index + 1`.\n let i = 0,\n pos = 0;\n while (i < rawValues.length) {\n const child = rawValues[i++];\n const rawString = rawStrings[i];\n\n // Check for nested `sql` queries.\n if (instanceofSql(child)) {\n // Append child prefix text to current string.\n this.strings[pos] += child.strings[0];\n\n let childIndex = 0;\n while (childIndex < child.values.length) {\n this.values[pos++] = child.values[childIndex++];\n this.strings[pos] = child.strings[childIndex];\n }\n\n // Append raw string to current string.\n this.strings[pos] += rawString;\n } else if (isColumn(child)) {\n const aggregationFunction = child.annotations.find(\n ([k, _]) => k === \"aggregationFunction\",\n );\n if (aggregationFunction !== undefined) {\n this.strings[pos] +=\n `${(aggregationFunction[1] as AggregationFunction).functionName}Merge(\\`${child.name}\\`)`;\n } else {\n this.strings[pos] += `\\`${child.name}\\``;\n }\n this.strings[pos] += rawString;\n } else if (isTable(child)) {\n if (child.config.database) {\n this.strings[pos] += `\\`${child.config.database}\\`.\\`${child.name}\\``;\n } else {\n this.strings[pos] += `\\`${child.name}\\``;\n }\n this.strings[pos] += rawString;\n } else {\n this.values[pos++] = child;\n this.strings[pos] = rawString;\n }\n }\n }\n}\n\nexport const toStaticQuery = (sql: Sql): string => {\n const [query, params] = toQuery(sql);\n if (Object.keys(params).length !== 0) {\n throw new Error(\n \"Dynamic SQL is not allowed in the select statement in view creation.\",\n );\n }\n return query;\n};\n\nexport const toQuery = (sql: Sql): [string, { [pN: string]: any }] => {\n const parameterizedStubs = sql.values.map((v, i) =>\n createClickhouseParameter(i, v),\n );\n\n const query = sql.strings\n .map((s, i) =>\n s != \"\" ? `${s}${emptyIfUndefined(parameterizedStubs[i])}` : \"\",\n )\n .join(\"\");\n\n const query_params = sql.values.reduce(\n (acc: Record<string, unknown>, v, i) => ({\n ...acc,\n [`p${i}`]: getValueFromParameter(v),\n }),\n {},\n );\n return [query, query_params];\n};\n\n/**\n * Build a display-only SQL string with values inlined for logging/debugging.\n * Does not alter execution behavior; use toQuery for actual execution.\n */\nexport const toQueryPreview = (sql: Sql): string => {\n try {\n const formatValue = (v: Value): string => {\n // Unwrap identifiers: [\"Identifier\", name]\n if (Array.isArray(v)) {\n const [type, val] = v as unknown as [string, any];\n if (type === \"Identifier\") {\n // Quote identifiers with backticks like other helpers\n return `\\`${String(val)}\\``;\n }\n // Fallback for unexpected arrays\n return `[${(v as unknown as any[]).map((x) => formatValue(x as Value)).join(\", \")}]`;\n }\n if (v === null || v === undefined) return \"NULL\";\n if (typeof v === \"string\") return `'${v.replace(/'/g, \"''\")}'`;\n if (typeof v === \"number\") return String(v);\n if (typeof v === \"boolean\") return v ? \"true\" : \"false\";\n if (v instanceof Date)\n return `'${v.toISOString().replace(\"T\", \" \").slice(0, 19)}'`;\n try {\n return JSON.stringify(v as unknown as any);\n } catch {\n return String(v);\n }\n };\n\n let out = sql.strings[0] ?? \"\";\n for (let i = 0; i < sql.values.length; i++) {\n const val = getValueFromParameter(sql.values[i] as any);\n out += formatValue(val as Value);\n out += sql.strings[i + 1] ?? \"\";\n }\n return out.replace(/\\s+/g, \" \").trim();\n } catch (error) {\n console.log(`toQueryPreview error: ${error}`);\n return \"/* query preview unavailable */\";\n }\n};\n\nexport const getValueFromParameter = (value: any) => {\n if (Array.isArray(value)) {\n const [type, val] = value;\n if (type === \"Identifier\") return val;\n }\n return value;\n};\nexport function createClickhouseParameter(\n parameterIndex: number,\n value: Value,\n) {\n // ClickHouse use {name:type} be a placeholder, so if we only use number string as name e.g: {1:Unit8}\n // it will face issue when converting to the query params => {1: value1}, because the key is value not string type, so here add prefix \"p\" to avoid this issue.\n return `{p${parameterIndex}:${mapToClickHouseType(value)}}`;\n}\n\n/**\n * Convert the JS type (source is JSON format by API query parameter) to the corresponding ClickHouse type for generating named placeholder of parameterized query.\n * Only support to convert number to Int or Float, boolean to Bool, string to String, other types will convert to String.\n * If exist complex type e.g: object, Array, null, undefined, Date, Record.. etc, just convert to string type by ClickHouse function in SQL.\n * ClickHouse support converting string to other types function.\n * Please see Each section of the https://clickhouse.com/docs/en/sql-reference/functions and https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions\n * @param value\n * @returns 'Float', 'Int', 'Bool', 'String'\n */\nexport const mapToClickHouseType = (value: Value) => {\n if (typeof value === \"number\") {\n // infer the float or int according to exist remainder or not\n return Number.isInteger(value) ? \"Int\" : \"Float\";\n }\n // When define column type or query result with parameterized query, The Bool or Boolean type both supported.\n // But the column type of query result only return Bool, so we only support Bool type for safety.\n if (typeof value === \"boolean\") return \"Bool\";\n if (value instanceof Date) return \"DateTime\";\n if (Array.isArray(value)) {\n const [type, _] = value;\n return type;\n }\n return \"String\";\n};\nfunction emptyIfUndefined(value: string | undefined): string {\n return value === undefined ? \"\" : value;\n}\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase, TypiaValidators } from \"../typedBase\";\nimport {\n Column,\n isArrayNestedType,\n isNestedType,\n} from \"../../dataModels/dataModelTypes\";\nimport { ClickHouseEngines } from \"../../blocks/helpers\";\nimport { getMooseInternal, isClientOnlyMode } from \"../internal\";\nimport { Readable } from \"node:stream\";\nimport { createHash } from \"node:crypto\";\nimport type {\n ConfigurationRegistry,\n RuntimeClickHouseConfig,\n} from \"../../config/runtime\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport { IdentifierBrandedString, quoteIdentifier } from \"../../sqlHelpers\";\nimport type { NodeClickHouseClient } from \"@clickhouse/client/dist/client\";\n\nexport interface TableIndex {\n name: string;\n expression: string;\n type: string;\n arguments?: string[];\n granularity?: number;\n}\n\n/**\n * Represents a failed record during insertion with error details\n */\nexport interface FailedRecord<T> {\n /** The original record that failed to insert */\n record: T;\n /** The error message describing why the insertion failed */\n error: string;\n /** Optional: The index of this record in the original batch */\n index?: number;\n}\n\n/**\n * Result of an insert operation with detailed success/failure information\n */\nexport interface InsertResult<T> {\n /** Number of records successfully inserted */\n successful: number;\n /** Number of records that failed to insert */\n failed: number;\n /** Total number of records processed */\n total: number;\n /** Detailed information about failed records (if record isolation was used) */\n failedRecords?: FailedRecord<T>[];\n}\n\n/**\n * Error handling strategy for insert operations\n */\nexport type ErrorStrategy =\n | \"fail-fast\" // Fail immediately on any error (default)\n | \"discard\" // Discard bad records and continue with good ones\n | \"isolate\"; // Retry individual records to isolate failures\n\n/**\n * Options for insert operations\n */\nexport interface InsertOptions {\n /** Maximum number of bad records to tolerate before failing */\n allowErrors?: number;\n /** Maximum ratio of bad records to tolerate (0.0 to 1.0) before failing */\n allowErrorsRatio?: number;\n /** Error handling strategy */\n strategy?: ErrorStrategy;\n /** Whether to enable dead letter queue for failed records (future feature) */\n deadLetterQueue?: boolean;\n /** Whether to validate data against schema before insertion (default: true) */\n validate?: boolean;\n /** Whether to skip validation for individual records during 'isolate' strategy retries (default: false) */\n skipValidationOnRetry?: boolean;\n}\n\n/**\n * Validation result for a record with detailed error information\n */\nexport interface ValidationError {\n /** The original record that failed validation */\n record: any;\n /** Detailed validation error message */\n error: string;\n /** Optional: The index of this record in the original batch */\n index?: number;\n /** The path to the field that failed validation */\n path?: string;\n}\n\n/**\n * Result of data validation with success/failure breakdown\n */\nexport interface ValidationResult<T> {\n /** Records that passed validation */\n valid: T[];\n /** Records that failed validation with detailed error information */\n invalid: ValidationError[];\n /** Total number of records processed */\n total: number;\n}\n\n/**\n * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING\n * Note: Since ClickHouse 24.7, settings no longer require the 's3queue_' prefix\n */\nexport interface S3QueueTableSettings {\n /** Processing mode: \"ordered\" for sequential or \"unordered\" for parallel processing */\n mode?: \"ordered\" | \"unordered\";\n /** What to do with files after processing: 'keep' or 'delete' */\n after_processing?: \"keep\" | \"delete\";\n /** ZooKeeper/Keeper path for coordination between replicas */\n keeper_path?: string;\n /** Number of retry attempts for failed files */\n loading_retries?: string;\n /** Number of threads for parallel processing */\n processing_threads_num?: string;\n /** Enable parallel inserts */\n parallel_inserts?: string;\n /** Enable logging to system.s3queue_log table */\n enable_logging_to_queue_log?: string;\n /** Last processed file path (for ordered mode) */\n last_processed_path?: string;\n /** Maximum number of tracked files in ZooKeeper */\n tracked_files_limit?: string;\n /** TTL for tracked files in seconds */\n tracked_file_ttl_sec?: string;\n /** Minimum polling timeout in milliseconds */\n polling_min_timeout_ms?: string;\n /** Maximum polling timeout in milliseconds */\n polling_max_timeout_ms?: string;\n /** Polling backoff in milliseconds */\n polling_backoff_ms?: string;\n /** Minimum cleanup interval in milliseconds */\n cleanup_interval_min_ms?: string;\n /** Maximum cleanup interval in milliseconds */\n cleanup_interval_max_ms?: string;\n /** Number of buckets for sharding (0 = disabled) */\n buckets?: string;\n /** Batch size for listing objects */\n list_objects_batch_size?: string;\n /** Enable hash ring filtering for distributed processing */\n enable_hash_ring_filtering?: string;\n /** Maximum files to process before committing */\n max_processed_files_before_commit?: string;\n /** Maximum rows to process before committing */\n max_processed_rows_before_commit?: string;\n /** Maximum bytes to process before committing */\n max_processed_bytes_before_commit?: string;\n /** Maximum processing time in seconds before committing */\n max_processing_time_sec_before_commit?: string;\n /** Use persistent processing nodes (available from 25.8) */\n use_persistent_processing_nodes?: string;\n /** TTL for persistent processing nodes in seconds */\n persistent_processing_nodes_ttl_seconds?: string;\n /** Additional settings */\n [key: string]: string | undefined;\n}\n\n/**\n * Base configuration shared by all table engines\n * @template T The data type of the records stored in the table.\n */\n\nexport type BaseOlapConfig<T> = (\n | {\n /**\n * Specifies the fields to use for ordering data within the ClickHouse table.\n * This is crucial for optimizing query performance.\n */\n orderByFields: (keyof T & string)[];\n orderByExpression?: undefined;\n }\n | {\n orderByFields?: undefined;\n /**\n * An arbitrary ClickHouse SQL expression for the order by clause.\n *\n * `orderByExpression: \"(id, name)\"` is equivalent to `orderByFields: [\"id\", \"name\"]`\n * `orderByExpression: \"tuple()\"` means no sorting\n */\n orderByExpression: string;\n }\n // specify either or leave both unspecified\n | { orderByFields?: undefined; orderByExpression?: undefined }\n) & {\n partitionBy?: string;\n /**\n * SAMPLE BY expression for approximate query processing.\n *\n * Examples:\n * ```typescript\n * // Single unsigned integer field\n * sampleByExpression: \"userId\"\n *\n * // Hash function on any field type\n * sampleByExpression: \"cityHash64(id)\"\n *\n * // Multiple fields with hash\n * sampleByExpression: \"cityHash64(userId, timestamp)\"\n * ```\n *\n * Requirements:\n * - Expression must evaluate to an unsigned integer (UInt8/16/32/64)\n * - Expression must be present in the ORDER BY clause\n * - If using hash functions, the same expression must appear in orderByExpression\n */\n sampleByExpression?: string;\n /**\n * Optional PRIMARY KEY expression.\n * When specified, this overrides the primary key inferred from Key<T> column annotations.\n *\n * This allows for:\n * - Complex primary keys using functions (e.g., \"cityHash64(id)\")\n * - Different column ordering in primary key vs schema definition\n * - Primary keys that differ from ORDER BY\n *\n * Example: primaryKeyExpression: \"(userId, cityHash64(eventId))\"\n *\n * Note: When this is set, any Key<T> annotations on columns are ignored for PRIMARY KEY generation.\n */\n primaryKeyExpression?: string;\n version?: string;\n lifeCycle?: LifeCycle;\n settings?: { [key: string]: string };\n /**\n * Optional TTL configuration for the table.\n * e.g., \"TTL timestamp + INTERVAL 90 DAY DELETE\"\n *\n * Use the {@link ClickHouseTTL} type to configure column level TTL\n */\n ttl?: string;\n /** Optional secondary/data-skipping indexes */\n indexes?: TableIndex[];\n /**\n * Optional database name for multi-database support.\n * When not specified, uses the global ClickHouse config database.\n */\n database?: string;\n /**\n * Optional cluster name for ON CLUSTER support.\n * Use this to enable replicated tables across ClickHouse clusters.\n * The cluster must be defined in config.toml (dev environment only).\n * Example: cluster: \"prod_cluster\"\n */\n cluster?: string;\n};\n\n/**\n * Configuration for MergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type MergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.MergeTree;\n};\n\n/**\n * Configuration for ReplacingMergeTree engine (deduplication)\n * @template T The data type of the records stored in the table.\n */\nexport type ReplacingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.ReplacingMergeTree;\n ver?: keyof T & string; // Optional version column\n isDeleted?: keyof T & string; // Optional is_deleted column\n};\n\n/**\n * Configuration for AggregatingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type AggregatingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.AggregatingMergeTree;\n};\n\n/**\n * Configuration for SummingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type SummingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.SummingMergeTree;\n columns?: string[];\n};\n\n/**\n * Configuration for CollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type CollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.CollapsingMergeTree;\n sign: keyof T & string; // Sign column (1 = state, -1 = cancel)\n};\n\n/**\n * Configuration for VersionedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type VersionedCollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.VersionedCollapsingMergeTree;\n sign: keyof T & string; // Sign column (1 = state, -1 = cancel)\n ver: keyof T & string; // Version column for ordering state changes\n};\n\ninterface ReplicatedEngineProperties {\n keeperPath?: string;\n replicaName?: string;\n}\n\n/**\n * Configuration for ReplicatedMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedMergeTreeConfig<T> = Omit<MergeTreeConfig<T>, \"engine\"> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedMergeTree;\n };\n\n/**\n * Configuration for ReplicatedReplacingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedReplacingMergeTreeConfig<T> = Omit<\n ReplacingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedReplacingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedAggregatingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedAggregatingMergeTreeConfig<T> = Omit<\n AggregatingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedAggregatingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedSummingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedSummingMergeTreeConfig<T> = Omit<\n SummingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedSummingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedCollapsingMergeTreeConfig<T> = Omit<\n CollapsingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedCollapsingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedVersionedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedVersionedCollapsingMergeTreeConfig<T> = Omit<\n VersionedCollapsingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree;\n };\n\n/**\n * Configuration for S3Queue engine - only non-alterable constructor parameters.\n * S3Queue-specific settings like 'mode', 'keeper_path', etc. should be specified\n * in the settings field, not here.\n * @template T The data type of the records stored in the table.\n */\nexport type S3QueueConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"settings\" | \"orderByFields\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.S3Queue;\n /** S3 bucket path with wildcards (e.g., 's3://bucket/data/*.json') */\n s3Path: string;\n /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */\n format: string;\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key */\n awsSecretAccessKey?: string;\n /** Compression type (e.g., 'gzip', 'zstd') */\n compression?: string;\n /** Custom HTTP headers */\n headers?: { [key: string]: string };\n /**\n * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING.\n * These settings control the behavior of the S3Queue engine.\n */\n settings?: S3QueueTableSettings;\n};\n\n/**\n * Configuration for S3 engine\n * Note: S3 engine supports ORDER BY clause, unlike S3Queue, Buffer, and Distributed engines\n * @template T The data type of the records stored in the table.\n */\nexport type S3Config<T> = Omit<BaseOlapConfig<T>, \"sampleByExpression\"> & {\n engine: ClickHouseEngines.S3;\n /** S3 path (e.g., 's3://bucket/path/file.json') */\n path: string;\n /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */\n format: string;\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key */\n awsSecretAccessKey?: string;\n /** Compression type (e.g., 'gzip', 'zstd', 'auto') */\n compression?: string;\n /** Partition strategy (optional) */\n partitionStrategy?: string;\n /** Partition columns in data file (optional) */\n partitionColumnsInDataFile?: string;\n};\n\n/**\n * Configuration for Buffer engine\n * @template T The data type of the records stored in the table.\n */\nexport type BufferConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Buffer;\n /** Target database name for the destination table */\n targetDatabase: string;\n /** Target table name where data will be flushed */\n targetTable: string;\n /** Number of buffer layers (typically 16) */\n numLayers: number;\n /** Minimum time in seconds before flushing */\n minTime: number;\n /** Maximum time in seconds before flushing */\n maxTime: number;\n /** Minimum number of rows before flushing */\n minRows: number;\n /** Maximum number of rows before flushing */\n maxRows: number;\n /** Minimum bytes before flushing */\n minBytes: number;\n /** Maximum bytes before flushing */\n maxBytes: number;\n /** Optional: Flush time in seconds */\n flushTime?: number;\n /** Optional: Flush number of rows */\n flushRows?: number;\n /** Optional: Flush number of bytes */\n flushBytes?: number;\n};\n\n/**\n * Configuration for Distributed engine\n * @template T The data type of the records stored in the table.\n */\nexport type DistributedConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Distributed;\n /** Cluster name from the ClickHouse configuration */\n cluster: string;\n /** Database name on the cluster */\n targetDatabase: string;\n /** Table name on the cluster */\n targetTable: string;\n /** Optional: Sharding key expression for data distribution */\n shardingKey?: string;\n /** Optional: Policy name for data distribution */\n policyName?: string;\n};\n\n/** Kafka table settings. See: https://clickhouse.com/docs/engines/table-engines/integrations/kafka */\nexport interface KafkaTableSettings {\n kafka_security_protocol?: \"PLAINTEXT\" | \"SSL\" | \"SASL_PLAINTEXT\" | \"SASL_SSL\";\n kafka_sasl_mechanism?:\n | \"GSSAPI\"\n | \"PLAIN\"\n | \"SCRAM-SHA-256\"\n | \"SCRAM-SHA-512\"\n | \"OAUTHBEARER\";\n kafka_sasl_username?: string;\n kafka_sasl_password?: string;\n kafka_schema?: string;\n kafka_num_consumers?: string;\n kafka_max_block_size?: string;\n kafka_skip_broken_messages?: string;\n kafka_commit_every_batch?: string;\n kafka_client_id?: string;\n kafka_poll_timeout_ms?: string;\n kafka_poll_max_batch_size?: string;\n kafka_flush_interval_ms?: string;\n kafka_consumer_reschedule_ms?: string;\n kafka_thread_per_consumer?: string;\n kafka_handle_error_mode?: \"default\" | \"stream\";\n kafka_commit_on_select?: string;\n kafka_max_rows_per_message?: string;\n kafka_compression_codec?: string;\n kafka_compression_level?: string;\n}\n\n/** Kafka engine for streaming data from Kafka topics. Additional settings go in `settings`. */\nexport type KafkaConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Kafka;\n brokerList: string;\n topicList: string;\n groupName: string;\n format: string;\n settings?: KafkaTableSettings;\n};\n\n/**\n * Configuration for IcebergS3 engine - read-only Iceberg table access\n *\n * Provides direct querying of Apache Iceberg tables stored on S3.\n * Data is not copied; queries stream directly from Parquet/ORC files.\n *\n * @template T The data type of the records stored in the table.\n *\n * @example\n * ```typescript\n * const lakeEvents = new OlapTable<Event>(\"lake_events\", {\n * engine: ClickHouseEngines.IcebergS3,\n * path: \"s3://datalake/events/\",\n * format: \"Parquet\",\n * awsAccessKeyId: mooseRuntimeEnv.get(\"AWS_ACCESS_KEY_ID\"),\n * awsSecretAccessKey: mooseRuntimeEnv.get(\"AWS_SECRET_ACCESS_KEY\")\n * });\n * ```\n *\n * @remarks\n * - IcebergS3 engine is read-only\n * - Does not support ORDER BY, PARTITION BY, or SAMPLE BY clauses\n * - Queries always see the latest Iceberg snapshot (with metadata cache)\n */\nexport type IcebergS3Config<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.IcebergS3;\n /** S3 path to Iceberg table root (e.g., 's3://bucket/warehouse/events/') */\n path: string;\n /** Data format - 'Parquet' or 'ORC' */\n format: \"Parquet\" | \"ORC\";\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key (optional) */\n awsSecretAccessKey?: string;\n /** Compression type (optional: 'gzip', 'zstd', 'auto') */\n compression?: string;\n};\n\n/**\n * Legacy configuration (backward compatibility) - defaults to MergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type LegacyOlapConfig<T> = BaseOlapConfig<T>;\n\ntype EngineConfig<T> =\n | MergeTreeConfig<T>\n | ReplacingMergeTreeConfig<T>\n | AggregatingMergeTreeConfig<T>\n | SummingMergeTreeConfig<T>\n | CollapsingMergeTreeConfig<T>\n | VersionedCollapsingMergeTreeConfig<T>\n | ReplicatedMergeTreeConfig<T>\n | ReplicatedReplacingMergeTreeConfig<T>\n | ReplicatedAggregatingMergeTreeConfig<T>\n | ReplicatedSummingMergeTreeConfig<T>\n | ReplicatedCollapsingMergeTreeConfig<T>\n | ReplicatedVersionedCollapsingMergeTreeConfig<T>\n | S3QueueConfig<T>\n | S3Config<T>\n | BufferConfig<T>\n | DistributedConfig<T>\n | IcebergS3Config<T>\n | KafkaConfig<T>;\n\n/**\n * Union of all engine-specific configurations (new API)\n * @template T The data type of the records stored in the table.\n */\nexport type OlapConfig<T> = EngineConfig<T> | LegacyOlapConfig<T>;\n\n/**\n * Represents an OLAP (Online Analytical Processing) table, typically corresponding to a ClickHouse table.\n * Provides a typed interface for interacting with the table.\n *\n * @template T The data type of the records stored in the table. The structure of T defines the table schema.\n */\nexport class OlapTable<T> extends TypedBase<T, OlapConfig<T>> {\n name: IdentifierBrandedString;\n\n /** @internal */\n public readonly kind = \"OlapTable\";\n\n /** @internal Memoized ClickHouse client for reusing connections across insert calls */\n private _memoizedClient?: any;\n /** @internal Hash of the configuration used to create the memoized client */\n private _configHash?: string;\n /** @internal Cached table name to avoid repeated generation */\n private _cachedTableName?: string;\n\n /**\n * Creates a new OlapTable instance.\n * @param name The name of the table. This name is used for the underlying ClickHouse table.\n * @param config Optional configuration for the OLAP table.\n */\n constructor(name: string, config?: OlapConfig<T>);\n\n /** @internal **/\n constructor(\n name: string,\n config: OlapConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators?: TypiaValidators<T>,\n );\n\n constructor(\n name: string,\n config?: OlapConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: TypiaValidators<T>,\n ) {\n // Handle legacy configuration by defaulting to MergeTree when no engine is specified\n const resolvedConfig =\n config ?\n \"engine\" in config ?\n config\n : { ...config, engine: ClickHouseEngines.MergeTree }\n : { engine: ClickHouseEngines.MergeTree };\n\n // Enforce mutual exclusivity at runtime as well\n const hasFields =\n Array.isArray((resolvedConfig as any).orderByFields) &&\n (resolvedConfig as any).orderByFields.length > 0;\n const hasExpr =\n typeof (resolvedConfig as any).orderByExpression === \"string\" &&\n (resolvedConfig as any).orderByExpression.length > 0;\n if (hasFields && hasExpr) {\n throw new Error(\n `OlapTable ${name}: Provide either orderByFields or orderByExpression, not both.`,\n );\n }\n\n // Validate cluster and explicit replication params are not both specified\n const hasCluster = typeof (resolvedConfig as any).cluster === \"string\";\n const hasKeeperPath =\n typeof (resolvedConfig as any).keeperPath === \"string\";\n const hasReplicaName =\n typeof (resolvedConfig as any).replicaName === \"string\";\n\n if (hasCluster && (hasKeeperPath || hasReplicaName)) {\n throw new Error(\n `OlapTable ${name}: Cannot specify both 'cluster' and explicit replication params ('keeperPath' or 'replicaName'). ` +\n `Use 'cluster' for auto-injected params, or use explicit 'keeperPath' and 'replicaName' without 'cluster'.`,\n );\n }\n\n super(name, resolvedConfig, schema, columns, validators);\n this.name = name;\n\n const tables = getMooseInternal().tables;\n const registryKey =\n this.config.version ? `${name}_${this.config.version}` : name;\n // In client-only mode (MOOSE_CLIENT_ONLY=true), allow duplicate registrations\n // to support Next.js HMR which re-executes modules without clearing the registry\n if (!isClientOnlyMode() && tables.has(registryKey)) {\n throw new Error(\n `OlapTable with name ${name} and version ${config?.version ?? \"unversioned\"} already exists`,\n );\n }\n tables.set(registryKey, this);\n }\n\n /**\n * Generates the versioned table name following Moose's naming convention\n * Format: {tableName}_{version_with_dots_replaced_by_underscores}\n */\n private generateTableName(): string {\n // Cache the table name since version rarely changes\n if (this._cachedTableName) {\n return this._cachedTableName;\n }\n\n const tableVersion = this.config.version;\n if (!tableVersion) {\n this._cachedTableName = this.name;\n } else {\n const versionSuffix = tableVersion.replace(/\\./g, \"_\");\n this._cachedTableName = `${this.name}_${versionSuffix}`;\n }\n\n return this._cachedTableName;\n }\n\n /**\n * Creates a fast hash of the ClickHouse configuration.\n * Uses crypto.createHash for better performance than JSON.stringify.\n *\n * @private\n */\n private createConfigHash(clickhouseConfig: any): string {\n // Use per-table database if specified, otherwise fall back to global config\n const effectiveDatabase = this.config.database ?? clickhouseConfig.database;\n const configString = `${clickhouseConfig.host}:${clickhouseConfig.port}:${clickhouseConfig.username}:${clickhouseConfig.password}:${effectiveDatabase}:${clickhouseConfig.useSSL}`;\n return createHash(\"sha256\")\n .update(configString)\n .digest(\"hex\")\n .substring(0, 16);\n }\n\n /**\n * Gets or creates a memoized ClickHouse client.\n * The client is cached and reused across multiple insert calls for better performance.\n * If the configuration changes, a new client will be created.\n *\n * @private\n */\n private async getMemoizedClient(): Promise<{\n client: NodeClickHouseClient;\n config: RuntimeClickHouseConfig;\n }> {\n await import(\"../../config/runtime\");\n const configRegistry = (globalThis as any)\n ._mooseConfigRegistry as ConfigurationRegistry;\n const { getClickhouseClient } = await import(\"../../commons\");\n\n const clickhouseConfig = await configRegistry.getClickHouseConfig();\n const currentConfigHash = this.createConfigHash(clickhouseConfig);\n\n // If we have a cached client and the config hasn't changed, reuse it\n if (this._memoizedClient && this._configHash === currentConfigHash) {\n return { client: this._memoizedClient, config: clickhouseConfig };\n }\n\n // Close existing client if config changed\n if (this._memoizedClient && this._configHash !== currentConfigHash) {\n try {\n await this._memoizedClient.close();\n } catch (error) {\n // Ignore errors when closing old client\n }\n }\n\n // Create new client with standard configuration\n // Use per-table database if specified, otherwise fall back to global config\n const effectiveDatabase = this.config.database ?? clickhouseConfig.database;\n const client = getClickhouseClient({\n username: clickhouseConfig.username,\n password: clickhouseConfig.password,\n database: effectiveDatabase,\n useSSL: clickhouseConfig.useSSL ? \"true\" : \"false\",\n host: clickhouseConfig.host,\n port: clickhouseConfig.port,\n });\n\n // Cache the new client and config hash\n this._memoizedClient = client;\n this._configHash = currentConfigHash;\n\n return { client, config: clickhouseConfig };\n }\n\n /**\n * Closes the memoized ClickHouse client if it exists.\n * This is useful for cleaning up connections when the table instance is no longer needed.\n * The client will be automatically recreated on the next insert call if needed.\n */\n async closeClient(): Promise<void> {\n if (this._memoizedClient) {\n try {\n await this._memoizedClient.close();\n } catch (error) {\n // Ignore errors when closing\n } finally {\n this._memoizedClient = undefined;\n this._configHash = undefined;\n }\n }\n }\n\n /**\n * Validates a single record using typia's comprehensive type checking.\n * This provides the most accurate validation as it uses the exact TypeScript type information.\n *\n * @param record The record to validate\n * @returns Validation result with detailed error information\n */\n validateRecord(record: unknown): {\n success: boolean;\n data?: T;\n errors?: string[];\n } {\n // Use injected typia validator if available\n if (this.validators?.validate) {\n try {\n const result = this.validators.validate(record);\n return {\n success: result.success,\n data: result.data,\n errors: result.errors?.map((err) =>\n typeof err === \"string\" ? err : JSON.stringify(err),\n ),\n };\n } catch (error) {\n return {\n success: false,\n errors: [error instanceof Error ? error.message : String(error)],\n };\n }\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Type guard function using typia's is() function.\n * Provides compile-time type narrowing for TypeScript.\n *\n * @param record The record to check\n * @returns True if record matches type T, with type narrowing\n */\n isValidRecord(record: unknown): record is T {\n if (this.validators?.is) {\n return this.validators.is(record);\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Assert that a record matches type T, throwing detailed errors if not.\n * Uses typia's assert() function for the most detailed error reporting.\n *\n * @param record The record to assert\n * @returns The validated and typed record\n * @throws Detailed validation error if record doesn't match type T\n */\n assertValidRecord(record: unknown): T {\n if (this.validators?.assert) {\n return this.validators.assert(record);\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Validates an array of records with comprehensive error reporting.\n * Uses the most appropriate validation method available (typia or basic).\n *\n * @param data Array of records to validate\n * @returns Detailed validation results\n */\n async validateRecords(data: unknown[]): Promise<ValidationResult<T>> {\n const valid: T[] = [];\n const invalid: ValidationError[] = [];\n\n // Pre-allocate arrays with estimated sizes to reduce reallocations\n valid.length = 0;\n invalid.length = 0;\n\n // Use for loop instead of forEach for better performance\n const dataLength = data.length;\n for (let i = 0; i < dataLength; i++) {\n const record = data[i];\n\n try {\n // Fast path: use typia's is() function first for type checking\n if (this.isValidRecord(record)) {\n valid.push(this.mapToClickhouseRecord(record));\n } else {\n // Only use expensive validateRecord for detailed errors when needed\n const result = this.validateRecord(record);\n if (result.success) {\n valid.push(this.mapToClickhouseRecord(record));\n } else {\n invalid.push({\n record,\n error: result.errors?.join(\", \") || \"Validation failed\",\n index: i,\n path: \"root\",\n });\n }\n }\n } catch (error) {\n invalid.push({\n record,\n error: error instanceof Error ? error.message : String(error),\n index: i,\n path: \"root\",\n });\n }\n }\n\n return {\n valid,\n invalid,\n total: dataLength,\n };\n }\n\n /**\n * Optimized batch retry that minimizes individual insert operations.\n * Groups records into smaller batches to reduce round trips while still isolating failures.\n *\n * @private\n */\n private async retryIndividualRecords(\n client: any,\n tableName: string,\n records: T[],\n ): Promise<{ successful: T[]; failed: FailedRecord<T>[] }> {\n const successful: T[] = [];\n const failed: FailedRecord<T>[] = [];\n\n // Instead of individual inserts, try smaller batches first (batches of 10)\n const RETRY_BATCH_SIZE = 10;\n const totalRecords = records.length;\n\n for (let i = 0; i < totalRecords; i += RETRY_BATCH_SIZE) {\n const batchEnd = Math.min(i + RETRY_BATCH_SIZE, totalRecords);\n const batch = records.slice(i, batchEnd);\n\n try {\n await client.insert({\n table: quoteIdentifier(tableName),\n values: batch,\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n // Add performance settings for retries\n max_insert_block_size: RETRY_BATCH_SIZE,\n max_block_size: RETRY_BATCH_SIZE,\n },\n });\n successful.push(...batch);\n } catch (batchError) {\n // If small batch fails, fall back to individual records\n for (let j = 0; j < batch.length; j++) {\n const record = batch[j];\n try {\n await client.insert({\n table: quoteIdentifier(tableName),\n values: [record],\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n },\n });\n successful.push(record);\n } catch (error) {\n failed.push({\n record,\n error: error instanceof Error ? error.message : String(error),\n index: i + j,\n });\n }\n }\n }\n }\n\n return { successful, failed };\n }\n\n /**\n * Validates input parameters and strategy compatibility\n * @private\n */\n private validateInsertParameters(\n data: T[] | Readable,\n options?: InsertOptions,\n ): { isStream: boolean; strategy: string; shouldValidate: boolean } {\n const isStream = data instanceof Readable;\n const strategy = options?.strategy || \"fail-fast\";\n const shouldValidate = options?.validate !== false;\n\n // Validate strategy compatibility with streams\n if (isStream && strategy === \"isolate\") {\n throw new Error(\n \"The 'isolate' error strategy is not supported with stream input. Use 'fail-fast' or 'discard' instead.\",\n );\n }\n\n // Validate that validation is not attempted on streams\n if (isStream && shouldValidate) {\n console.warn(\n \"Validation is not supported with stream input. Validation will be skipped.\",\n );\n }\n\n return { isStream, strategy, shouldValidate };\n }\n\n /**\n * Handles early return cases for empty data\n * @private\n */\n private handleEmptyData(\n data: T[] | Readable,\n isStream: boolean,\n ): InsertResult<T> | null {\n if (isStream && !data) {\n return {\n successful: 0,\n failed: 0,\n total: 0,\n };\n }\n\n if (!isStream && (!data || (data as T[]).length === 0)) {\n return {\n successful: 0,\n failed: 0,\n total: 0,\n };\n }\n\n return null;\n }\n\n /**\n * Performs pre-insertion validation for array data\n * @private\n */\n private async performPreInsertionValidation(\n data: T[],\n shouldValidate: boolean,\n strategy: string,\n options?: InsertOptions,\n ): Promise<{ validatedData: T[]; validationErrors: ValidationError[] }> {\n if (!shouldValidate) {\n return { validatedData: data, validationErrors: [] };\n }\n\n try {\n const validationResult = await this.validateRecords(data as unknown[]);\n const validatedData = validationResult.valid;\n const validationErrors = validationResult.invalid;\n\n if (validationErrors.length > 0) {\n this.handleValidationErrors(validationErrors, strategy, data, options);\n\n // Return appropriate data based on strategy\n switch (strategy) {\n case \"discard\":\n return { validatedData, validationErrors };\n case \"isolate\":\n return { validatedData: data, validationErrors };\n default:\n return { validatedData, validationErrors };\n }\n }\n\n return { validatedData, validationErrors };\n } catch (validationError) {\n if (strategy === \"fail-fast\") {\n throw validationError;\n }\n console.warn(\"Validation error:\", validationError);\n return { validatedData: data, validationErrors: [] };\n }\n }\n\n /**\n * Handles validation errors based on the specified strategy\n * @private\n */\n private handleValidationErrors(\n validationErrors: ValidationError[],\n strategy: string,\n data: T[],\n options?: InsertOptions,\n ): void {\n switch (strategy) {\n case \"fail-fast\":\n const firstError = validationErrors[0];\n throw new Error(\n `Validation failed for record at index ${firstError.index}: ${firstError.error}`,\n );\n\n case \"discard\":\n this.checkValidationThresholds(validationErrors, data.length, options);\n break;\n\n case \"isolate\":\n // For isolate strategy, validation errors will be handled in the final result\n break;\n }\n }\n\n /**\n * Checks if validation errors exceed configured thresholds\n * @private\n */\n private checkValidationThresholds(\n validationErrors: ValidationError[],\n totalRecords: number,\n options?: InsertOptions,\n ): void {\n const validationFailedCount = validationErrors.length;\n const validationFailedRatio = validationFailedCount / totalRecords;\n\n if (\n options?.allowErrors !== undefined &&\n validationFailedCount > options.allowErrors\n ) {\n throw new Error(\n `Too many validation failures: ${validationFailedCount} > ${options.allowErrors}. Errors: ${validationErrors.map((e) => e.error).join(\", \")}`,\n );\n }\n\n if (\n options?.allowErrorsRatio !== undefined &&\n validationFailedRatio > options.allowErrorsRatio\n ) {\n throw new Error(\n `Validation failure ratio too high: ${validationFailedRatio.toFixed(3)} > ${options.allowErrorsRatio}. Errors: ${validationErrors.map((e) => e.error).join(\", \")}`,\n );\n }\n }\n\n /**\n * Optimized insert options preparation with better memory management\n * @private\n */\n private prepareInsertOptions(\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n isStream: boolean,\n strategy: string,\n options?: InsertOptions,\n ): any {\n const insertOptions: any = {\n table: quoteIdentifier(tableName),\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n wait_end_of_query: 1, // Ensure at least once delivery for INSERT operations\n // Performance optimizations\n max_insert_block_size:\n isStream ? 100000 : Math.min(validatedData.length, 100000),\n max_block_size: 65536,\n // Use async inserts for better performance with large datasets\n async_insert: validatedData.length > 1000 ? 1 : 0,\n wait_for_async_insert: 1, // For at least once delivery\n },\n };\n\n // Handle stream vs array input\n if (isStream) {\n insertOptions.values = data;\n } else {\n insertOptions.values = validatedData;\n }\n\n // For discard strategy, add optimized ClickHouse error tolerance settings\n if (\n strategy === \"discard\" &&\n (options?.allowErrors !== undefined ||\n options?.allowErrorsRatio !== undefined)\n ) {\n if (options.allowErrors !== undefined) {\n insertOptions.clickhouse_settings.input_format_allow_errors_num =\n options.allowErrors;\n }\n\n if (options.allowErrorsRatio !== undefined) {\n insertOptions.clickhouse_settings.input_format_allow_errors_ratio =\n options.allowErrorsRatio;\n }\n }\n\n return insertOptions;\n }\n\n /**\n * Creates success result for completed insertions\n * @private\n */\n private createSuccessResult(\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n strategy: string,\n ): InsertResult<T> {\n if (isStream) {\n return {\n successful: -1, // -1 indicates stream mode where count is unknown\n failed: 0,\n total: -1,\n };\n }\n\n const insertedCount = validatedData.length;\n const totalProcessed =\n shouldValidate ? (data as T[]).length : insertedCount;\n\n const result: InsertResult<T> = {\n successful: insertedCount,\n failed: shouldValidate ? validationErrors.length : 0,\n total: totalProcessed,\n };\n\n // Add failed records if there are validation errors and using discard strategy\n if (\n shouldValidate &&\n validationErrors.length > 0 &&\n strategy === \"discard\"\n ) {\n result.failedRecords = validationErrors.map((ve) => ({\n record: ve.record as T,\n error: `Validation error: ${ve.error}`,\n index: ve.index,\n }));\n }\n\n return result;\n }\n\n /**\n * Handles insertion errors based on the specified strategy\n * @private\n */\n private async handleInsertionError(\n batchError: any,\n strategy: string,\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n switch (strategy) {\n case \"fail-fast\":\n throw new Error(\n `Failed to insert data into table ${tableName}: ${batchError}`,\n );\n\n case \"discard\":\n throw new Error(\n `Too many errors during insert into table ${tableName}. Error threshold exceeded: ${batchError}`,\n );\n\n case \"isolate\":\n return await this.handleIsolateStrategy(\n batchError,\n tableName,\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n options,\n );\n\n default:\n throw new Error(`Unknown error strategy: ${strategy}`);\n }\n }\n\n /**\n * Handles the isolate strategy for insertion errors\n * @private\n */\n private async handleIsolateStrategy(\n batchError: any,\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n if (isStream) {\n throw new Error(\n `Isolate strategy is not supported with stream input: ${batchError}`,\n );\n }\n\n try {\n const { client } = await this.getMemoizedClient();\n const skipValidationOnRetry = options?.skipValidationOnRetry || false;\n const retryData = skipValidationOnRetry ? (data as T[]) : validatedData;\n\n const { successful, failed } = await this.retryIndividualRecords(\n client,\n tableName,\n retryData,\n );\n\n // Combine validation errors with insertion errors\n const allFailedRecords: FailedRecord<T>[] = [\n // Validation errors (if any and not skipping validation on retry)\n ...(shouldValidate && !skipValidationOnRetry ?\n validationErrors.map((ve) => ({\n record: ve.record as T,\n error: `Validation error: ${ve.error}`,\n index: ve.index,\n }))\n : []),\n // Insertion errors\n ...failed,\n ];\n\n this.checkInsertionThresholds(\n allFailedRecords,\n (data as T[]).length,\n options,\n );\n\n return {\n successful: successful.length,\n failed: allFailedRecords.length,\n total: (data as T[]).length,\n failedRecords: allFailedRecords,\n };\n } catch (isolationError) {\n throw new Error(\n `Failed to insert data into table ${tableName} during record isolation: ${isolationError}`,\n );\n }\n }\n\n /**\n * Checks if insertion errors exceed configured thresholds\n * @private\n */\n private checkInsertionThresholds(\n failedRecords: FailedRecord<T>[],\n totalRecords: number,\n options?: InsertOptions,\n ): void {\n const totalFailed = failedRecords.length;\n const failedRatio = totalFailed / totalRecords;\n\n if (\n options?.allowErrors !== undefined &&\n totalFailed > options.allowErrors\n ) {\n throw new Error(\n `Too many failed records: ${totalFailed} > ${options.allowErrors}. Failed records: ${failedRecords.map((f) => f.error).join(\", \")}`,\n );\n }\n\n if (\n options?.allowErrorsRatio !== undefined &&\n failedRatio > options.allowErrorsRatio\n ) {\n throw new Error(\n `Failed record ratio too high: ${failedRatio.toFixed(3)} > ${options.allowErrorsRatio}. Failed records: ${failedRecords.map((f) => f.error).join(\", \")}`,\n );\n }\n }\n\n /**\n * Recursively transforms a record to match ClickHouse's JSONEachRow requirements\n *\n * - For every Array(Nested(...)) field at any depth, each item is wrapped in its own array and recursively processed.\n * - For every Nested struct (not array), it recurses into the struct.\n * - This ensures compatibility with kafka_clickhouse_sync\n *\n * @param record The input record to transform (may be deeply nested)\n * @param columns The schema columns for this level (defaults to this.columnArray at the top level)\n * @returns The transformed record, ready for ClickHouse JSONEachRow insertion\n */\n private mapToClickhouseRecord(\n record: any,\n columns: Column[] = this.columnArray,\n ): any {\n const result = { ...record };\n for (const col of columns) {\n const value = record[col.name];\n const dt = col.data_type;\n\n if (isArrayNestedType(dt)) {\n // For Array(Nested(...)), wrap each item in its own array and recurse\n if (\n Array.isArray(value) &&\n (value.length === 0 || typeof value[0] === \"object\")\n ) {\n result[col.name] = value.map((item) => [\n this.mapToClickhouseRecord(item, dt.elementType.columns),\n ]);\n }\n } else if (isNestedType(dt)) {\n // For Nested struct (not array), recurse into it\n if (value && typeof value === \"object\") {\n result[col.name] = this.mapToClickhouseRecord(value, dt.columns);\n }\n }\n // All other types: leave as is for now\n }\n return result;\n }\n\n /**\n * Inserts data directly into the ClickHouse table with enhanced error handling and validation.\n * This method establishes a direct connection to ClickHouse using the project configuration\n * and inserts the provided data into the versioned table.\n *\n * PERFORMANCE OPTIMIZATIONS:\n * - Memoized client connections with fast config hashing\n * - Single-pass validation with pre-allocated arrays\n * - Batch-optimized retry strategy (batches of 10, then individual)\n * - Optimized ClickHouse settings for large datasets\n * - Reduced memory allocations and object creation\n *\n * Uses advanced typia validation when available for comprehensive type checking,\n * with fallback to basic validation for compatibility.\n *\n * The ClickHouse client is memoized and reused across multiple insert calls for better performance.\n * If the configuration changes, a new client will be automatically created.\n *\n * @param data Array of objects conforming to the table schema, or a Node.js Readable stream\n * @param options Optional configuration for error handling, validation, and insertion behavior\n * @returns Promise resolving to detailed insertion results\n * @throws {ConfigError} When configuration cannot be read or parsed\n * @throws {ClickHouseError} When insertion fails based on the error strategy\n * @throws {ValidationError} When validation fails and strategy is 'fail-fast'\n *\n * @example\n * ```typescript\n * // Create an OlapTable instance (typia validators auto-injected)\n * const userTable = new OlapTable<User>('users');\n *\n * // Insert with comprehensive typia validation\n * const result1 = await userTable.insert([\n * { id: 1, name: 'John', email: 'john@example.com' },\n * { id: 2, name: 'Jane', email: 'jane@example.com' }\n * ]);\n *\n * // Insert data with stream input (validation not available for streams)\n * const dataStream = new Readable({\n * objectMode: true,\n * read() { // Stream implementation }\n * });\n * const result2 = await userTable.insert(dataStream, { strategy: 'fail-fast' });\n *\n * // Insert with validation disabled for performance\n * const result3 = await userTable.insert(data, { validate: false });\n *\n * // Insert with error handling strategies\n * const result4 = await userTable.insert(mixedData, {\n * strategy: 'isolate',\n * allowErrorsRatio: 0.1,\n * validate: true // Use typia validation (default)\n * });\n *\n * // Optional: Clean up connection when completely done\n * await userTable.closeClient();\n * ```\n */\n async insert(\n data: T[] | Readable,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n // Validate input parameters and strategy compatibility\n const { isStream, strategy, shouldValidate } =\n this.validateInsertParameters(data, options);\n\n // Handle early return cases for empty data\n const emptyResult = this.handleEmptyData(data, isStream);\n if (emptyResult) {\n return emptyResult;\n }\n\n // Pre-insertion validation for arrays (optimized single-pass)\n let validatedData: T[] = [];\n let validationErrors: ValidationError[] = [];\n\n if (!isStream && shouldValidate) {\n const validationResult = await this.performPreInsertionValidation(\n data as T[],\n shouldValidate,\n strategy,\n options,\n );\n validatedData = validationResult.validatedData;\n validationErrors = validationResult.validationErrors;\n } else {\n // No validation or stream input\n validatedData = isStream ? [] : (data as T[]);\n }\n\n // Get memoized client and generate cached table name\n const { client } = await this.getMemoizedClient();\n const tableName = this.generateTableName();\n\n try {\n // Prepare and execute insertion with optimized settings\n const insertOptions = this.prepareInsertOptions(\n tableName,\n data,\n validatedData,\n isStream,\n strategy,\n options,\n );\n\n await client.insert(insertOptions);\n\n // Return success result\n return this.createSuccessResult(\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n strategy,\n );\n } catch (batchError) {\n // Handle insertion failure based on strategy with optimized retry\n return await this.handleInsertionError(\n batchError,\n strategy,\n tableName,\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n options,\n );\n }\n // Note: We don't close the client here since it's memoized for reuse\n // Use closeClient() method if you need to explicitly close the connection\n }\n\n // Note: Static factory methods (withS3Queue, withReplacingMergeTree, withMergeTree)\n // were removed in ENG-856. Use direct configuration instead, e.g.:\n // new OlapTable(name, { engine: ClickHouseEngines.ReplacingMergeTree, orderByFields: [\"id\"], ver: \"updated_at\" })\n}\n","/**\n * @fileoverview Stream SDK for data streaming operations in Moose.\n *\n * This module provides the core streaming functionality including:\n * - Stream creation and configuration\n * - Message transformations between streams\n * - Consumer registration for message processing\n * - Dead letter queue handling for error recovery\n *\n * @module Stream\n */\n\nimport { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport { dlqColumns, dlqSchema, getMooseInternal } from \"../internal\";\nimport { OlapTable } from \"./olapTable\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport type {\n RuntimeKafkaConfig,\n ConfigurationRegistry,\n} from \"../../config/runtime\";\nimport { createHash } from \"node:crypto\";\nimport { Logger, Producer } from \"../../commons\";\nimport { getSourceFileFromStack } from \"../utils/stackTrace\";\n\n/**\n * Represents zero, one, or many values of type T.\n * Used for flexible return types in transformations where a single input\n * can produce no output, one output, or multiple outputs.\n *\n * @template T The type of the value(s)\n * @example\n * ```typescript\n * // Can return a single value\n * const single: ZeroOrMany<string> = \"hello\";\n *\n * // Can return an array\n * const multiple: ZeroOrMany<string> = [\"hello\", \"world\"];\n *\n * // Can return null/undefined to filter out\n * const filtered: ZeroOrMany<string> = null;\n * ```\n */\nexport type ZeroOrMany<T> = T | T[] | undefined | null;\n\n/**\n * Function type for transforming records from one type to another.\n * Supports both synchronous and asynchronous transformations.\n *\n * @template T The input record type\n * @template U The output record type\n * @param record The input record to transform\n * @returns The transformed record(s), or null/undefined to filter out\n *\n * @example\n * ```typescript\n * const transform: SyncOrAsyncTransform<InputType, OutputType> = (record) => {\n * return { ...record, processed: true };\n * };\n * ```\n */\nexport type SyncOrAsyncTransform<T, U> = (\n record: T,\n) => ZeroOrMany<U> | Promise<ZeroOrMany<U>>;\n\n/**\n * Function type for consuming records without producing output.\n * Used for side effects like logging, external API calls, or database writes.\n *\n * @template T The record type to consume\n * @param record The record to process\n * @returns Promise<void> or void\n *\n * @example\n * ```typescript\n * const consumer: Consumer<UserEvent> = async (event) => {\n * await sendToAnalytics(event);\n * };\n * ```\n */\nexport type Consumer<T> = (record: T) => Promise<void> | void;\n\n/**\n * Configuration options for stream transformations.\n *\n * @template T The type of records being transformed\n */\nexport interface TransformConfig<T> {\n /**\n * Optional version identifier for this transformation.\n * Multiple transformations to the same destination can coexist with different versions.\n */\n version?: string;\n\n /**\n * Optional metadata for documentation and tracking purposes.\n */\n metadata?: { description?: string };\n\n /**\n * Optional dead letter queue for handling transformation failures.\n * Failed records will be sent to this queue for manual inspection or reprocessing.\n * Uses {@link Stream.defaultDeadLetterQueue} by default\n * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value\n */\n deadLetterQueue?: DeadLetterQueue<T> | null;\n\n /**\n * @internal Source file path where this transform was declared.\n * Automatically captured from stack trace.\n */\n sourceFile?: string;\n}\n\n/**\n * Configuration options for stream consumers.\n *\n * @template T The type of records being consumed\n */\nexport interface ConsumerConfig<T> {\n /**\n * Optional version identifier for this consumer.\n * Multiple consumers can coexist with different versions.\n */\n version?: string;\n\n /**\n * Optional dead letter queue for handling consumer failures.\n * Failed records will be sent to this queue for manual inspection or reprocessing.\n * Uses {@link Stream.defaultDeadLetterQueue} by default\n * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value\n */\n deadLetterQueue?: DeadLetterQueue<T> | null;\n\n /**\n * @internal Source file path where this consumer was declared.\n * Automatically captured from stack trace.\n */\n sourceFile?: string;\n}\n\nexport type SchemaRegistryEncoding = \"JSON\" | \"AVRO\" | \"PROTOBUF\";\n\nexport type SchemaRegistryReference =\n | { id: number }\n | { subjectLatest: string }\n | { subject: string; version: number };\n\nexport interface KafkaSchemaConfig {\n kind: SchemaRegistryEncoding;\n reference: SchemaRegistryReference;\n}\n\n/**\n * Represents a message routed to a specific destination stream.\n * Used internally by the multi-transform functionality to specify\n * where transformed messages should be sent.\n *\n * @internal\n */\nclass RoutedMessage {\n /** The destination stream for the message */\n destination: Stream<any>;\n\n /** The message value(s) to send */\n values: ZeroOrMany<any>;\n\n /**\n * Creates a new routed message.\n *\n * @param destination The target stream\n * @param values The message(s) to route\n */\n constructor(destination: Stream<any>, values: ZeroOrMany<any>) {\n this.destination = destination;\n this.values = values;\n }\n}\n\n/**\n * Configuration options for a data stream (e.g., a Redpanda topic).\n * @template T The data type of the messages in the stream.\n */\nexport interface StreamConfig<T> {\n /**\n * Specifies the number of partitions for the stream. Affects parallelism and throughput.\n */\n parallelism?: number;\n /**\n * Specifies the data retention period for the stream in seconds. Messages older than this may be deleted.\n */\n retentionPeriod?: number;\n /**\n * An optional destination OLAP table where messages from this stream should be automatically ingested.\n */\n destination?: OlapTable<T>;\n /**\n * An optional version string for this configuration. Can be used for tracking changes or managing deployments.\n */\n version?: string;\n metadata?: { description?: string };\n lifeCycle?: LifeCycle;\n\n defaultDeadLetterQueue?: DeadLetterQueue<T>;\n\n /** Optional Schema Registry configuration for this stream */\n schemaConfig?: KafkaSchemaConfig;\n}\n\n/**\n * Represents a data stream, typically corresponding to a Redpanda topic.\n * Provides a typed interface for producing to and consuming from the stream, and defining transformations.\n *\n * @template T The data type of the messages flowing through the stream. The structure of T defines the message schema.\n */\nexport class Stream<T> extends TypedBase<T, StreamConfig<T>> {\n defaultDeadLetterQueue?: DeadLetterQueue<T>;\n /** @internal Memoized KafkaJS producer for reusing connections across sends */\n private _memoizedProducer?: Producer;\n /** @internal Hash of the configuration used to create the memoized Kafka producer */\n private _kafkaConfigHash?: string;\n\n /**\n * Creates a new Stream instance.\n * @param name The name of the stream. This name is used for the underlying Redpanda topic.\n * @param config Optional configuration for the stream.\n */\n constructor(name: string, config?: StreamConfig<T>);\n\n /**\n * @internal\n * Note: `validators` parameter is a positional placeholder (always undefined for Stream).\n * It exists because TypedBase has validators as the 5th param, and we need to pass\n * allowExtraFields as the 6th param. Stream doesn't use validators.\n */\n constructor(\n name: string,\n config: StreamConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators: undefined,\n allowExtraFields: boolean,\n );\n\n constructor(\n name: string,\n config?: StreamConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: undefined,\n allowExtraFields?: boolean,\n ) {\n super(name, config ?? {}, schema, columns, undefined, allowExtraFields);\n const streams = getMooseInternal().streams;\n if (streams.has(name)) {\n throw new Error(`Stream with name ${name} already exists`);\n }\n streams.set(name, this);\n this.defaultDeadLetterQueue = this.config.defaultDeadLetterQueue;\n }\n\n /**\n * Internal map storing transformation configurations.\n * Maps destination stream names to arrays of transformation functions and their configs.\n *\n * @internal\n */\n _transformations = new Map<\n string,\n [Stream<any>, SyncOrAsyncTransform<T, any>, TransformConfig<T>][]\n >();\n\n /**\n * Internal function for multi-stream transformations.\n * Allows a single transformation to route messages to multiple destinations.\n *\n * @internal\n */\n _multipleTransformations?: (record: T) => [RoutedMessage];\n\n /**\n * Internal array storing consumer configurations.\n *\n * @internal\n */\n _consumers = new Array<{\n consumer: Consumer<T>;\n config: ConsumerConfig<T>;\n }>();\n\n /**\n * Builds the full Kafka topic name including optional namespace and version suffix.\n * Version suffix is appended as _x_y_z where dots in version are replaced with underscores.\n */\n private buildFullTopicName(namespace?: string): string {\n const versionSuffix =\n this.config.version ? `_${this.config.version.replace(/\\./g, \"_\")}` : \"\";\n const base = `${this.name}${versionSuffix}`;\n return namespace !== undefined && namespace.length > 0 ?\n `${namespace}.${base}`\n : base;\n }\n\n /**\n * Creates a fast hash string from relevant Kafka configuration fields.\n */\n private createConfigHash(kafkaConfig: RuntimeKafkaConfig): string {\n const configString = [\n kafkaConfig.broker,\n kafkaConfig.messageTimeoutMs,\n kafkaConfig.saslUsername,\n kafkaConfig.saslPassword,\n kafkaConfig.saslMechanism,\n kafkaConfig.securityProtocol,\n kafkaConfig.namespace,\n ].join(\":\");\n return createHash(\"sha256\")\n .update(configString)\n .digest(\"hex\")\n .substring(0, 16);\n }\n\n /**\n * Gets or creates a memoized KafkaJS producer using runtime configuration.\n */\n private async getMemoizedProducer(): Promise<{\n producer: Producer;\n kafkaConfig: RuntimeKafkaConfig;\n }> {\n // dynamic import to keep Stream objects browser compatible\n await import(\"../../config/runtime\");\n const configRegistry = (globalThis as any)\n ._mooseConfigRegistry as ConfigurationRegistry;\n const { getKafkaProducer } = await import(\"../../commons\");\n\n const kafkaConfig = await (configRegistry as any).getKafkaConfig();\n const currentHash = this.createConfigHash(kafkaConfig);\n\n if (this._memoizedProducer && this._kafkaConfigHash === currentHash) {\n return { producer: this._memoizedProducer, kafkaConfig };\n }\n\n // Close existing producer if config changed\n if (this._memoizedProducer && this._kafkaConfigHash !== currentHash) {\n try {\n await this._memoizedProducer.disconnect();\n } catch {\n // ignore\n }\n this._memoizedProducer = undefined;\n }\n\n const clientId = `moose-sdk-stream-${this.name}`;\n const logger: Logger = {\n logPrefix: clientId,\n log: (message: string): void => {\n console.log(`${clientId}: ${message}`);\n },\n error: (message: string): void => {\n console.error(`${clientId}: ${message}`);\n },\n warn: (message: string): void => {\n console.warn(`${clientId}: ${message}`);\n },\n };\n\n const producer = await getKafkaProducer(\n {\n clientId,\n broker: kafkaConfig.broker,\n securityProtocol: kafkaConfig.securityProtocol,\n saslUsername: kafkaConfig.saslUsername,\n saslPassword: kafkaConfig.saslPassword,\n saslMechanism: kafkaConfig.saslMechanism,\n },\n logger,\n );\n\n this._memoizedProducer = producer;\n this._kafkaConfigHash = currentHash;\n\n return { producer, kafkaConfig };\n }\n\n /**\n * Closes the memoized Kafka producer if it exists.\n */\n async closeProducer(): Promise<void> {\n if (this._memoizedProducer) {\n try {\n await this._memoizedProducer.disconnect();\n } catch {\n // ignore\n } finally {\n this._memoizedProducer = undefined;\n this._kafkaConfigHash = undefined;\n }\n }\n }\n\n /**\n * Sends one or more records to this stream's Kafka topic.\n * Values are JSON-serialized as message values.\n */\n async send(values: ZeroOrMany<T>): Promise<void> {\n // Normalize to flat array of records\n const flat: T[] =\n Array.isArray(values) ? values\n : values !== undefined && values !== null ? [values as T]\n : [];\n\n if (flat.length === 0) return;\n\n const { producer, kafkaConfig } = await this.getMemoizedProducer();\n const topic = this.buildFullTopicName(kafkaConfig.namespace);\n\n // Use Schema Registry JSON envelope if configured\n const sr = this.config.schemaConfig;\n if (sr && sr.kind === \"JSON\") {\n const schemaRegistryUrl = kafkaConfig.schemaRegistryUrl;\n if (!schemaRegistryUrl) {\n throw new Error(\"Schema Registry URL not configured\");\n }\n\n const {\n default: { SchemaRegistry },\n } = await import(\"@kafkajs/confluent-schema-registry\");\n const registry = new SchemaRegistry({ host: schemaRegistryUrl });\n\n let schemaId: undefined | number = undefined;\n\n if (\"id\" in sr.reference) {\n schemaId = sr.reference.id;\n } else if (\"subjectLatest\" in sr.reference) {\n schemaId = await registry.getLatestSchemaId(sr.reference.subjectLatest);\n } else if (\"subject\" in sr.reference) {\n schemaId = await registry.getRegistryId(\n sr.reference.subject,\n sr.reference.version,\n );\n }\n\n if (schemaId === undefined) {\n throw new Error(\"Malformed schema reference.\");\n }\n\n const encoded = await Promise.all(\n flat.map((v) =>\n registry.encode(schemaId, v as unknown as Record<string, unknown>),\n ),\n );\n await producer.send({\n topic,\n messages: encoded.map((value) => ({ value })),\n });\n return;\n } else if (sr !== undefined) {\n throw new Error(\"Currently only JSON Schema is supported.\");\n }\n\n await producer.send({\n topic,\n messages: flat.map((v) => ({ value: JSON.stringify(v) })),\n });\n }\n\n /**\n * Adds a transformation step that processes messages from this stream and sends the results to a destination stream.\n * Multiple transformations to the same destination stream can be added if they have distinct `version` identifiers in their config.\n *\n * @template U The data type of the messages in the destination stream.\n * @param destination The destination stream for the transformed messages.\n * @param transformation A function that takes a message of type T and returns zero or more messages of type U (or a Promise thereof).\n * Return `null` or `undefined` or an empty array `[]` to filter out a message. Return an array to emit multiple messages.\n * @param config Optional configuration for this specific transformation step, like a version.\n */\n addTransform<U>(\n destination: Stream<U>,\n transformation: SyncOrAsyncTransform<T, U>,\n config?: TransformConfig<T>,\n ) {\n // Capture source file from call stack at this exact moment\n const sourceFile = getSourceFileFromStack(new Error().stack);\n\n const transformConfig: TransformConfig<T> = {\n ...(config ?? {}),\n sourceFile,\n };\n if (transformConfig.deadLetterQueue === undefined) {\n transformConfig.deadLetterQueue = this.defaultDeadLetterQueue;\n }\n\n if (this._transformations.has(destination.name)) {\n const existingTransforms = this._transformations.get(destination.name)!;\n const hasVersion = existingTransforms.some(\n ([_, __, cfg]) => cfg.version === transformConfig.version,\n );\n\n if (!hasVersion) {\n existingTransforms.push([destination, transformation, transformConfig]);\n }\n } else {\n this._transformations.set(destination.name, [\n [destination, transformation, transformConfig],\n ]);\n }\n }\n\n /**\n * Adds a consumer function that processes messages from this stream.\n * Multiple consumers can be added if they have distinct `version` identifiers in their config.\n *\n * @param consumer A function that takes a message of type T and performs an action (e.g., side effect, logging). Should return void or Promise<void>.\n * @param config Optional configuration for this specific consumer, like a version.\n */\n addConsumer(consumer: Consumer<T>, config?: ConsumerConfig<T>) {\n // Capture source file from call stack at this exact moment\n const sourceFile = getSourceFileFromStack(new Error().stack);\n\n const consumerConfig: ConsumerConfig<T> = {\n ...(config ?? {}),\n sourceFile,\n };\n if (consumerConfig.deadLetterQueue === undefined) {\n consumerConfig.deadLetterQueue = this.defaultDeadLetterQueue;\n }\n const hasVersion = this._consumers.some(\n (existing) => existing.config.version === consumerConfig.version,\n );\n\n if (!hasVersion) {\n this._consumers.push({ consumer, config: consumerConfig });\n }\n }\n\n /**\n * Helper method for `addMultiTransform` to specify the destination and values for a routed message.\n * @param values The value or values to send to this stream.\n * @returns A `RoutedMessage` object associating the values with this stream.\n *\n * @example\n * ```typescript\n * sourceStream.addMultiTransform((record) => [\n * destinationStream1.routed(transformedRecord1),\n * destinationStream2.routed([record2a, record2b])\n * ]);\n * ```\n */\n routed = (values: ZeroOrMany<T>) => new RoutedMessage(this, values);\n\n /**\n * Adds a single transformation function that can route messages to multiple destination streams.\n * This is an alternative to adding multiple individual `addTransform` calls.\n * Only one multi-transform function can be added per stream.\n *\n * @param transformation A function that takes a message of type T and returns an array of `RoutedMessage` objects,\n * each specifying a destination stream and the message(s) to send to it.\n */\n addMultiTransform(transformation: (record: T) => [RoutedMessage]) {\n this._multipleTransformations = transformation;\n }\n}\n\n/**\n * Base model for dead letter queue entries.\n * Contains the original failed record along with error information.\n */\nexport interface DeadLetterModel {\n /** The original record that failed processing */\n originalRecord: Record<string, any>;\n\n /** Human-readable error message describing the failure */\n errorMessage: string;\n\n /** Classification of the error type (e.g., \"ValidationError\", \"TransformError\") */\n errorType: string;\n\n /** Timestamp when the failure occurred */\n failedAt: Date;\n\n /** The source component where the failure occurred */\n source: \"api\" | \"transform\" | \"table\";\n}\n\n/**\n * Enhanced dead letter model with type recovery functionality.\n * Extends the base model with the ability to recover the original typed record.\n *\n * @template T The original record type before failure\n */\nexport interface DeadLetter<T> extends DeadLetterModel {\n /**\n * Recovers the original record as its typed form.\n * Useful for reprocessing failed records with proper type safety.\n *\n * @returns The original record cast to type T\n */\n asTyped: () => T;\n}\n\n/**\n * Internal function to attach type guard functionality to dead letter records.\n *\n * @internal\n * @template T The original record type\n * @param dl The dead letter model to enhance\n * @param typeGuard Function to validate and cast the original record\n */\nfunction attachTypeGuard<T>(\n dl: DeadLetterModel,\n typeGuard: (input: any) => T,\n): asserts dl is DeadLetter<T> {\n (dl as any).asTyped = () => typeGuard(dl.originalRecord);\n}\n\n/**\n * Specialized stream for handling failed records (dead letters).\n * Provides type-safe access to failed records for reprocessing or analysis.\n *\n * @template T The original record type that failed processing\n *\n * @example\n * ```typescript\n * const dlq = new DeadLetterQueue<UserEvent>(\"user-events-dlq\");\n *\n * dlq.addConsumer(async (deadLetter) => {\n * const originalEvent = deadLetter.asTyped();\n * console.log(`Failed event: ${deadLetter.errorMessage}`);\n * // Potentially reprocess or alert\n * });\n * ```\n */\nexport class DeadLetterQueue<T> extends Stream<DeadLetterModel> {\n /**\n * Creates a new DeadLetterQueue instance.\n * @param name The name of the dead letter queue stream\n * @param config Optional configuration for the stream. The metadata property is always present and includes stackTrace.\n */\n constructor(name: string, config?: StreamConfig<DeadLetterModel>);\n\n /** @internal **/\n constructor(\n name: string,\n config: StreamConfig<DeadLetterModel>,\n validate: (originalRecord: any) => T,\n );\n\n constructor(\n name: string,\n config?: StreamConfig<DeadLetterModel>,\n typeGuard?: (originalRecord: any) => T,\n ) {\n if (typeGuard === undefined) {\n throw new Error(\n \"Supply the type param T so that the schema is inserted by the compiler plugin.\",\n );\n }\n\n super(name, config ?? {}, dlqSchema, dlqColumns, undefined, false);\n this.typeGuard = typeGuard;\n getMooseInternal().streams.set(name, this);\n }\n\n /**\n * Internal type guard function for validating and casting original records.\n *\n * @internal\n */\n private typeGuard: (originalRecord: any) => T;\n\n /**\n * Adds a transformation step for dead letter records.\n * The transformation function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @template U The output type for the transformation\n * @param destination The destination stream for transformed messages\n * @param transformation Function to transform dead letter records\n * @param config Optional transformation configuration\n */\n addTransform<U>(\n destination: Stream<U>,\n transformation: SyncOrAsyncTransform<DeadLetter<T>, U>,\n config?: TransformConfig<DeadLetterModel>,\n ) {\n const withValidate: SyncOrAsyncTransform<DeadLetterModel, U> = (\n deadLetter,\n ) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return transformation(deadLetter);\n };\n super.addTransform(destination, withValidate, config);\n }\n\n /**\n * Adds a consumer for dead letter records.\n * The consumer function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @param consumer Function to process dead letter records\n * @param config Optional consumer configuration\n */\n addConsumer(\n consumer: Consumer<DeadLetter<T>>,\n config?: ConsumerConfig<DeadLetterModel>,\n ) {\n const withValidate: Consumer<DeadLetterModel> = (deadLetter) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return consumer(deadLetter);\n };\n super.addConsumer(withValidate, config);\n }\n\n /**\n * Adds a multi-stream transformation for dead letter records.\n * The transformation function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @param transformation Function to route dead letter records to multiple destinations\n */\n addMultiTransform(\n transformation: (record: DeadLetter<T>) => [RoutedMessage],\n ) {\n const withValidate: (record: DeadLetterModel) => [RoutedMessage] = (\n deadLetter,\n ) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return transformation(deadLetter);\n };\n super.addMultiTransform(withValidate);\n }\n}\n","export * from \"./browserCompatible\";\n\nexport type DataModelConfig<T> = Partial<{\n ingestion: true;\n storage: {\n enabled?: boolean;\n order_by_fields?: (keyof T)[];\n deduplicate?: boolean;\n name?: string;\n };\n parallelism?: number;\n}>;\n\nexport * from \"./blocks/helpers\";\nexport * from \"./commons\";\nexport * from \"./secrets\";\nexport * from \"./consumption-apis/helpers\";\nexport * from \"./consumption-apis/webAppHelpers\";\nexport * from \"./scripts/task\";\n\nexport { createApi, createConsumptionApi } from \"./consumption-apis/runner\";\n\nexport { MooseCache } from \"./clients/redisClient\";\n\nexport { ApiUtil, ConsumptionUtil } from \"./consumption-apis/helpers\";\n\nexport { getMooseClients } from \"./consumption-apis/standalone\";\nexport { sql } from \"./sqlHelpers\";\n\nexport * from \"./utilities\";\nexport * from \"./connectors/dataSource\";\nexport {\n ClickHouseByteSize,\n ClickHouseInt,\n LowCardinality,\n ClickHouseNamedTuple,\n ClickHousePoint,\n ClickHouseRing,\n ClickHouseLineString,\n ClickHouseMultiLineString,\n ClickHousePolygon,\n ClickHouseMultiPolygon,\n} from \"./dataModels/types\";\n","import { ClickHouseClient, CommandResult, ResultSet } from \"@clickhouse/client\";\nimport {\n Client as TemporalClient,\n Connection,\n ConnectionOptions,\n} from \"@temporalio/client\";\nimport { StringValue } from \"@temporalio/common\";\nimport { createHash, randomUUID } from \"node:crypto\";\nimport { performance } from \"perf_hooks\";\nimport * as fs from \"fs\";\nimport { getWorkflows } from \"../dmv2/internal\";\nimport { JWTPayload } from \"jose\";\nimport { Sql, sql, RawValue, toQuery, toQueryPreview } from \"../sqlHelpers\";\n\n/**\n * Format elapsed milliseconds into a human-readable string.\n * Matches Python's format_timespan behavior.\n */\nfunction formatElapsedTime(ms: number): string {\n if (ms < 1000) {\n return `${Math.round(ms)} ms`;\n }\n const seconds = ms / 1000;\n if (seconds < 60) {\n return `${seconds.toFixed(2)} seconds`;\n }\n const minutes = Math.floor(seconds / 60);\n const remainingSeconds = seconds % 60;\n return `${minutes} minutes and ${remainingSeconds.toFixed(2)} seconds`;\n}\n\nexport interface ApiUtil {\n client: MooseClient;\n\n // SQL interpolator\n sql: typeof sql;\n jwt: JWTPayload | undefined;\n}\n\n/** @deprecated Use ApiUtil instead. */\nexport type ConsumptionUtil = ApiUtil;\n\nexport class MooseClient {\n query: QueryClient;\n workflow: WorkflowClient;\n\n constructor(queryClient: QueryClient, temporalClient?: TemporalClient) {\n this.query = queryClient;\n this.workflow = new WorkflowClient(temporalClient);\n }\n}\n\nexport class QueryClient {\n client: ClickHouseClient;\n query_id_prefix: string;\n constructor(client: ClickHouseClient, query_id_prefix: string) {\n this.client = client;\n this.query_id_prefix = query_id_prefix;\n }\n\n async execute<T = any>(\n sql: Sql,\n ): Promise<ResultSet<\"JSONEachRow\"> & { __query_result_t?: T[] }> {\n const [query, query_params] = toQuery(sql);\n\n console.log(`[QueryClient] | Query: ${toQueryPreview(sql)}`);\n const start = performance.now();\n const result = await this.client.query({\n query,\n query_params,\n format: \"JSONEachRow\",\n query_id: this.query_id_prefix + randomUUID(),\n // Note: wait_end_of_query deliberately NOT set here as this is used for SELECT queries\n // where response buffering would harm streaming performance and concurrency\n });\n const elapsedMs = performance.now() - start;\n console.log(\n `[QueryClient] | Query completed: ${formatElapsedTime(elapsedMs)}`,\n );\n return result;\n }\n\n async command(sql: Sql): Promise<CommandResult> {\n const [query, query_params] = toQuery(sql);\n\n console.log(`[QueryClient] | Command: ${toQueryPreview(sql)}`);\n const start = performance.now();\n const result = await this.client.command({\n query,\n query_params,\n query_id: this.query_id_prefix + randomUUID(),\n });\n const elapsedMs = performance.now() - start;\n console.log(\n `[QueryClient] | Command completed: ${formatElapsedTime(elapsedMs)}`,\n );\n return result;\n }\n}\n\nexport class WorkflowClient {\n client: TemporalClient | undefined;\n\n constructor(temporalClient?: TemporalClient) {\n this.client = temporalClient;\n }\n\n async execute(name: string, input_data: any) {\n try {\n if (!this.client) {\n return {\n status: 404,\n body: `Temporal client not found. Is the feature flag enabled?`,\n };\n }\n\n // Get workflow configuration\n const config = await this.getWorkflowConfig(name);\n\n // Process input data and generate workflow ID\n const [processedInput, workflowId] = this.processInputData(\n name,\n input_data,\n );\n\n console.log(\n `WorkflowClient - starting workflow: ${name} with config ${JSON.stringify(config)} and input_data ${JSON.stringify(processedInput)}`,\n );\n\n const handle = await this.client.workflow.start(\"ScriptWorkflow\", {\n args: [\n { workflow_name: name, execution_mode: \"start\" as const },\n processedInput,\n ],\n taskQueue: \"typescript-script-queue\",\n workflowId,\n workflowIdConflictPolicy: \"FAIL\",\n workflowIdReusePolicy: \"ALLOW_DUPLICATE\",\n retry: {\n maximumAttempts: config.retries,\n },\n workflowRunTimeout: config.timeout as StringValue,\n });\n\n return {\n status: 200,\n body: `Workflow started: ${name}. View it in the Temporal dashboard: http://localhost:8080/namespaces/default/workflows/${workflowId}/${handle.firstExecutionRunId}/history`,\n };\n } catch (error) {\n return {\n status: 400,\n body: `Error starting workflow: ${error}`,\n };\n }\n }\n\n async terminate(workflowId: string) {\n try {\n if (!this.client) {\n return {\n status: 404,\n body: `Temporal client not found. Is the feature flag enabled?`,\n };\n }\n\n const handle = this.client.workflow.getHandle(workflowId);\n await handle.terminate();\n\n return {\n status: 200,\n body: `Workflow terminated: ${workflowId}`,\n };\n } catch (error) {\n return {\n status: 400,\n body: `Error terminating workflow: ${error}`,\n };\n }\n }\n\n private async getWorkflowConfig(\n name: string,\n ): Promise<{ retries: number; timeout: string }> {\n const workflows = await getWorkflows();\n const dmv2Workflow = workflows.get(name);\n if (dmv2Workflow) {\n return {\n retries: dmv2Workflow.config.retries || 3,\n timeout: dmv2Workflow.config.timeout || \"1h\",\n };\n }\n\n throw new Error(`Workflow config not found for ${name}`);\n }\n\n private processInputData(name: string, input_data: any): [any, string] {\n let workflowId = name;\n if (input_data) {\n const hash = createHash(\"sha256\")\n .update(JSON.stringify(input_data))\n .digest(\"hex\")\n .slice(0, 16);\n workflowId = `${name}-${hash}`;\n }\n return [input_data, workflowId];\n }\n}\n\n/**\n * This looks similar to the client in runner.ts which is a worker.\n * Temporal SDK uses similar looking connection options & client,\n * but there are different libraries for a worker & client like this one\n * that triggers workflows.\n */\nexport async function getTemporalClient(\n temporalUrl: string,\n namespace: string,\n clientCert: string,\n clientKey: string,\n apiKey: string,\n): Promise<TemporalClient | undefined> {\n try {\n console.info(\n `<api> Using temporal_url: ${temporalUrl} and namespace: ${namespace}`,\n );\n\n let connectionOptions: ConnectionOptions = {\n address: temporalUrl,\n connectTimeout: \"3s\",\n };\n\n if (clientCert && clientKey) {\n // URL with mTLS uses gRPC namespace endpoint which is what temporalUrl already is\n console.log(\"Using TLS for secure Temporal\");\n const cert = await fs.readFileSync(clientCert);\n const key = await fs.readFileSync(clientKey);\n\n connectionOptions.tls = {\n clientCertPair: { crt: cert, key: key },\n };\n } else if (apiKey) {\n console.log(\"Using API key for secure Temporal\");\n // URL with API key uses gRPC regional endpoint\n connectionOptions.address = \"us-west1.gcp.api.temporal.io:7233\";\n connectionOptions.apiKey = apiKey;\n connectionOptions.tls = {};\n connectionOptions.metadata = {\n \"temporal-namespace\": namespace,\n };\n }\n\n console.log(`<api> Connecting to Temporal at ${connectionOptions.address}`);\n const connection = await Connection.connect(connectionOptions);\n const client = new TemporalClient({ connection, namespace });\n console.log(\"<api> Connected to Temporal server\");\n\n return client;\n } catch (error) {\n console.warn(`Failed to connect to Temporal. Is the feature flag enabled?`);\n console.warn(error);\n return undefined;\n }\n}\n\nexport const ApiHelpers = {\n column: (value: string) => [\"Identifier\", value] as [string, string],\n table: (value: string) => [\"Identifier\", value] as [string, string],\n};\n\n/** @deprecated Use ApiHelpers instead. */\nexport const ConsumptionHelpers = ApiHelpers;\n\nexport function joinQueries({\n values,\n separator = \",\",\n prefix = \"\",\n suffix = \"\",\n}: {\n values: readonly RawValue[];\n separator?: string;\n prefix?: string;\n suffix?: string;\n}) {\n if (values.length === 0) {\n throw new TypeError(\n \"Expected `join([])` to be called with an array of multiple elements, but got an empty array\",\n );\n }\n\n return new Sql(\n [prefix, ...Array(values.length - 1).fill(separator), suffix],\n values,\n );\n}\n","import http from \"http\";\nimport { getClickhouseClient } from \"../commons\";\nimport { MooseClient, QueryClient, getTemporalClient } from \"./helpers\";\nimport * as jose from \"jose\";\nimport { ClickHouseClient } from \"@clickhouse/client\";\nimport { Cluster } from \"../cluster-utils\";\nimport { ApiUtil } from \"../index\";\nimport { sql } from \"../sqlHelpers\";\nimport { Client as TemporalClient } from \"@temporalio/client\";\nimport { getApis, getWebApps } from \"../dmv2/internal\";\n\ninterface ClickhouseConfig {\n database: string;\n host: string;\n port: string;\n username: string;\n password: string;\n useSSL: boolean;\n}\n\ninterface JwtConfig {\n secret?: string;\n issuer: string;\n audience: string;\n}\n\ninterface TemporalConfig {\n url: string;\n namespace: string;\n clientCert: string;\n clientKey: string;\n apiKey: string;\n}\n\ninterface ApisConfig {\n apisDir: string;\n clickhouseConfig: ClickhouseConfig;\n jwtConfig?: JwtConfig;\n temporalConfig?: TemporalConfig;\n enforceAuth: boolean;\n isDmv2: boolean;\n proxyPort?: number;\n workerCount?: number;\n}\n\n// Convert our config to Clickhouse client config\nconst toClientConfig = (config: ClickhouseConfig) => ({\n ...config,\n useSSL: config.useSSL ? \"true\" : \"false\",\n});\n\nconst createPath = (apisDir: string, path: string) => `${apisDir}${path}.ts`;\n\nconst httpLogger = (\n req: http.IncomingMessage,\n res: http.ServerResponse,\n startMs: number,\n) => {\n console.log(\n `${req.method} ${req.url} ${res.statusCode} ${Date.now() - startMs}ms`,\n );\n};\n\nconst modulesCache = new Map<string, any>();\n\nexport function createApi<T extends object, R = any>(\n _handler: (params: T, utils: ApiUtil) => Promise<R>,\n): (\n rawParams: Record<string, string[] | string>,\n utils: ApiUtil,\n) => Promise<R> {\n throw new Error(\n \"This should be compiled-time replaced by compiler plugins to add parsing.\",\n );\n}\n\n/** @deprecated Use `Api` from \"dmv2/sdk/consumptionApi\" instead. */\nexport const createConsumptionApi = createApi;\n\nconst apiHandler = async (\n publicKey: jose.KeyLike | undefined,\n clickhouseClient: ClickHouseClient,\n temporalClient: TemporalClient | undefined,\n apisDir: string,\n enforceAuth: boolean,\n isDmv2: boolean,\n jwtConfig?: JwtConfig,\n) => {\n const apis = isDmv2 ? await getApis() : new Map();\n return async (req: http.IncomingMessage, res: http.ServerResponse) => {\n const start = Date.now();\n\n try {\n const url = new URL(req.url || \"\", \"http://localhost\");\n const fileName = url.pathname;\n\n let jwtPayload;\n if (publicKey && jwtConfig) {\n const jwt = req.headers.authorization?.split(\" \")[1]; // Bearer <token>\n if (jwt) {\n try {\n const { payload } = await jose.jwtVerify(jwt, publicKey, {\n issuer: jwtConfig.issuer,\n audience: jwtConfig.audience,\n });\n jwtPayload = payload;\n } catch (error) {\n console.log(\"JWT verification failed\");\n if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n }\n } else if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n } else if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n\n const pathName = createPath(apisDir, fileName);\n const paramsObject = Array.from(url.searchParams.entries()).reduce(\n (obj: { [key: string]: string[] | string }, [key, value]) => {\n const existingValue = obj[key];\n if (existingValue) {\n if (Array.isArray(existingValue)) {\n existingValue.push(value);\n } else {\n obj[key] = [existingValue, value];\n }\n } else {\n obj[key] = value;\n }\n return obj;\n },\n {},\n );\n\n let userFuncModule = modulesCache.get(pathName);\n if (userFuncModule === undefined) {\n if (isDmv2) {\n let apiName = fileName.replace(/^\\/+|\\/+$/g, \"\");\n let version: string | null = null;\n\n // First, try to find the API by the full path (for custom paths)\n userFuncModule = apis.get(apiName);\n\n if (!userFuncModule) {\n // Fall back to the old name:version parsing\n version = url.searchParams.get(\"version\");\n\n // Check if version is in the path (e.g., /bar/1)\n if (!version && apiName.includes(\"/\")) {\n const pathParts = apiName.split(\"/\");\n if (pathParts.length >= 2) {\n // Try the full path first (it might be a custom path)\n userFuncModule = apis.get(apiName);\n if (!userFuncModule) {\n // If not found, treat it as name/version\n apiName = pathParts[0];\n version = pathParts.slice(1).join(\"/\");\n }\n }\n }\n\n // Only do versioned lookup if we still haven't found it\n if (!userFuncModule) {\n if (version) {\n const versionedKey = `${apiName}:${version}`;\n userFuncModule = apis.get(versionedKey);\n } else {\n userFuncModule = apis.get(apiName);\n }\n }\n }\n\n if (!userFuncModule) {\n const availableApis = Array.from(apis.keys()).map((key) =>\n key.replace(\":\", \"/\"),\n );\n const errorMessage =\n version ?\n `API ${apiName} with version ${version} not found. Available APIs: ${availableApis.join(\", \")}`\n : `API ${apiName} not found. Available APIs: ${availableApis.join(\", \")}`;\n throw new Error(errorMessage);\n }\n\n modulesCache.set(pathName, userFuncModule);\n console.log(`[API] | Executing API: ${apiName}`);\n } else {\n userFuncModule = require(pathName);\n modulesCache.set(pathName, userFuncModule);\n }\n }\n\n const queryClient = new QueryClient(clickhouseClient, fileName);\n let result =\n isDmv2 ?\n await userFuncModule(paramsObject, {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n })\n : await userFuncModule.default(paramsObject, {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n });\n\n let body: string;\n let status: number | undefined;\n\n // TODO investigate why these prototypes are different\n if (Object.getPrototypeOf(result).constructor.name === \"ResultSet\") {\n body = JSON.stringify(await result.json());\n } else {\n if (\"body\" in result && \"status\" in result) {\n body = JSON.stringify(result.body);\n status = result.status;\n } else {\n body = JSON.stringify(result);\n }\n }\n\n if (status) {\n res.writeHead(status, { \"Content-Type\": \"application/json\" });\n httpLogger(req, res, start);\n } else {\n res.writeHead(200, { \"Content-Type\": \"application/json\" });\n httpLogger(req, res, start);\n }\n\n res.end(body);\n } catch (error: any) {\n console.log(\"error in path \", req.url, error);\n // todo: same workaround as ResultSet\n if (Object.getPrototypeOf(error).constructor.name === \"TypeGuardError\") {\n res.writeHead(400, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: error.message }));\n httpLogger(req, res, start);\n }\n if (error instanceof Error) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: error.message }));\n httpLogger(req, res, start);\n } else {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end();\n httpLogger(req, res, start);\n }\n }\n };\n};\n\nconst createMainRouter = async (\n publicKey: jose.KeyLike | undefined,\n clickhouseClient: ClickHouseClient,\n temporalClient: TemporalClient | undefined,\n apisDir: string,\n enforceAuth: boolean,\n isDmv2: boolean,\n jwtConfig?: JwtConfig,\n) => {\n const apiRequestHandler = await apiHandler(\n publicKey,\n clickhouseClient,\n temporalClient,\n apisDir,\n enforceAuth,\n isDmv2,\n jwtConfig,\n );\n\n const webApps = isDmv2 ? await getWebApps() : new Map();\n\n const sortedWebApps = Array.from(webApps.values()).sort((a, b) => {\n const pathA = a.config.mountPath || \"/\";\n const pathB = b.config.mountPath || \"/\";\n return pathB.length - pathA.length;\n });\n\n return async (req: http.IncomingMessage, res: http.ServerResponse) => {\n const start = Date.now();\n\n const url = new URL(req.url || \"\", \"http://localhost\");\n const pathname = url.pathname;\n\n // Health check - checked before all other routes\n if (pathname === \"/_moose_internal/health\") {\n res.writeHead(200, { \"Content-Type\": \"application/json\" });\n res.end(\n JSON.stringify({\n status: \"healthy\",\n timestamp: new Date().toISOString(),\n }),\n );\n return;\n }\n\n let jwtPayload;\n if (publicKey && jwtConfig) {\n const jwt = req.headers.authorization?.split(\" \")[1];\n if (jwt) {\n try {\n const { payload } = await jose.jwtVerify(jwt, publicKey, {\n issuer: jwtConfig.issuer,\n audience: jwtConfig.audience,\n });\n jwtPayload = payload;\n } catch (error) {\n console.log(\"JWT verification failed for WebApp route\");\n }\n }\n }\n\n for (const webApp of sortedWebApps) {\n const mountPath = webApp.config.mountPath || \"/\";\n const normalizedMount =\n mountPath.endsWith(\"/\") && mountPath !== \"/\" ?\n mountPath.slice(0, -1)\n : mountPath;\n\n const matches =\n pathname === normalizedMount ||\n pathname.startsWith(normalizedMount + \"/\");\n\n if (matches) {\n if (webApp.config.injectMooseUtils !== false) {\n const queryClient = new QueryClient(clickhouseClient, pathname);\n (req as any).moose = {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n };\n }\n\n let proxiedUrl = req.url;\n if (normalizedMount !== \"/\") {\n const pathWithoutMount =\n pathname.substring(normalizedMount.length) || \"/\";\n proxiedUrl = pathWithoutMount + url.search;\n }\n\n try {\n // Create a modified request preserving all properties including headers\n // A shallow clone (like { ...req }) generally will not work since headers and other\n // members are not cloned.\n const modifiedReq = Object.assign(\n Object.create(Object.getPrototypeOf(req)),\n req,\n {\n url: proxiedUrl,\n },\n );\n await webApp.handler(modifiedReq, res);\n return;\n } catch (error) {\n console.error(`Error in WebApp ${webApp.name}:`, error);\n if (!res.headersSent) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Internal Server Error\" }));\n }\n return;\n }\n }\n }\n\n // If no WebApp matched, check if it's an Api request\n // Strip /api or /consumption prefix for Api routing\n let apiPath = pathname;\n if (pathname.startsWith(\"/api/\")) {\n apiPath = pathname.substring(4); // Remove \"/api\"\n } else if (pathname.startsWith(\"/consumption/\")) {\n apiPath = pathname.substring(13); // Remove \"/consumption\"\n }\n\n // If we stripped a prefix, it's an Api request\n if (apiPath !== pathname) {\n // Create a modified request with the rewritten URL for the apiHandler\n // Preserve all properties including headers by using Object.assign with prototype chain\n // A shallow clone (like { ...req }) generally will not work since headers and other\n // members are not cloned.\n const modifiedReq = Object.assign(\n Object.create(Object.getPrototypeOf(req)),\n req,\n {\n url: apiPath + url.search,\n },\n );\n await apiRequestHandler(modifiedReq as http.IncomingMessage, res);\n return;\n }\n\n res.writeHead(404, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Not Found\" }));\n httpLogger(req, res, start);\n };\n};\n\nexport const runApis = async (config: ApisConfig) => {\n const apisCluster = new Cluster({\n maxWorkerCount:\n (config.workerCount ?? 0) > 0 ? config.workerCount : undefined,\n workerStart: async () => {\n let temporalClient: TemporalClient | undefined;\n if (config.temporalConfig) {\n temporalClient = await getTemporalClient(\n config.temporalConfig.url,\n config.temporalConfig.namespace,\n config.temporalConfig.clientCert,\n config.temporalConfig.clientKey,\n config.temporalConfig.apiKey,\n );\n }\n const clickhouseClient = getClickhouseClient(\n toClientConfig(config.clickhouseConfig),\n );\n let publicKey: jose.KeyLike | undefined;\n if (config.jwtConfig?.secret) {\n console.log(\"Importing JWT public key...\");\n publicKey = await jose.importSPKI(config.jwtConfig.secret, \"RS256\");\n }\n\n const server = http.createServer(\n await createMainRouter(\n publicKey,\n clickhouseClient,\n temporalClient,\n config.apisDir,\n config.enforceAuth,\n config.isDmv2,\n config.jwtConfig,\n ),\n );\n // port is now passed via config.proxyPort or defaults to 4001\n const port = config.proxyPort !== undefined ? config.proxyPort : 4001;\n server.listen(port, \"localhost\", () => {\n console.log(`Server running on port ${port}`);\n });\n\n return server;\n },\n workerStop: async (server) => {\n return new Promise<void>((resolve) => {\n server.close(() => resolve());\n });\n },\n });\n\n apisCluster.start();\n};\n","import cluster from \"node:cluster\";\nimport { availableParallelism } from \"node:os\";\nimport { exit } from \"node:process\";\nimport { Worker } from \"node:cluster\";\n\nconst DEFAULT_MAX_CPU_USAGE_RATIO = 0.7;\n// Time to restart the worker when it exits unexpectedly\n// This value is not too high to avoid the worker to be stuck in a bad state\n// but also not too low to avoid restarting the worker too often\nconst RESTART_TIME_MS = 10000;\nconst SIGTERM = \"SIGTERM\";\nconst SIGINT = \"SIGINT\";\nconst SHUTDOWN_WORKERS_INTERVAL = 500;\n\n/**\n * Manages a cluster of worker processes, handling their lifecycle including startup,\n * shutdown, and error handling.\n *\n * @typeParam C - The type of output produced during worker startup\n */\nexport class Cluster<C> {\n // Tracks if shutdown is currently in progress\n private shutdownInProgress: boolean = false;\n // Tracks if workers exited cleanly during shutdown\n private hasCleanWorkerExit: boolean = true;\n\n // String identifying if this is primary or worker process\n private processStr = `${cluster.isPrimary ? \"primary\" : \"worker\"} process ${process.pid}`;\n\n // Functions for starting and stopping workers\n private workerStart: (w: Worker, paralelism: number) => Promise<C>;\n private workerStop: (c: C) => Promise<void>;\n\n // Result from starting worker, needed for cleanup\n private startOutput: C | undefined;\n private maxCpuUsageRatio: number;\n private usedCpuCount: number;\n\n /**\n * Creates a new cluster manager instance.\n *\n * @param options - Configuration options for the cluster\n * @param options.workerStart - Async function to execute when starting a worker\n * @param options.workerStop - Async function to execute when stopping a worker\n * @param options.maxCpuUsageRatio - Maximum ratio of CPU cores to utilize (0-1)\n * @param options.maxWorkerCount - Maximum number of workers to spawn\n * @throws {Error} If maxCpuUsageRatio is not between 0 and 1\n */\n constructor(options: {\n workerStart: (w: Worker, paralelism: number) => Promise<C>;\n workerStop: (c: C) => Promise<void>;\n maxCpuUsageRatio?: number;\n maxWorkerCount?: number;\n }) {\n this.workerStart = options.workerStart;\n this.workerStop = options.workerStop;\n if (\n options.maxCpuUsageRatio &&\n (options.maxCpuUsageRatio > 1 || options.maxCpuUsageRatio < 0)\n ) {\n throw new Error(\"maxCpuUsageRatio must be between 0 and 1\");\n }\n this.maxCpuUsageRatio =\n options.maxCpuUsageRatio || DEFAULT_MAX_CPU_USAGE_RATIO;\n this.usedCpuCount = this.computeCPUUsageCount(\n this.maxCpuUsageRatio,\n options.maxWorkerCount,\n );\n }\n\n /**\n * Calculates the number of CPU cores to utilize based on available parallelism and constraints.\n *\n * @param cpuUsageRatio - Ratio of CPU cores to use (0-1)\n * @param maxWorkerCount - Optional maximum number of workers\n * @returns The number of CPU cores to utilize\n */\n computeCPUUsageCount(cpuUsageRatio: number, maxWorkerCount?: number) {\n const cpuCount = availableParallelism();\n const maxWorkers = maxWorkerCount || cpuCount;\n return Math.min(\n maxWorkers,\n Math.max(1, Math.floor(cpuCount * cpuUsageRatio)),\n );\n }\n\n /**\n * Initializes the cluster by spawning worker processes and setting up signal handlers.\n * For the primary process, spawns workers and monitors parent process.\n * For worker processes, executes the worker startup function.\n *\n * @throws {Error} If worker is undefined in worker process\n */\n async start() {\n process.on(SIGTERM, this.gracefulClusterShutdown(SIGTERM));\n process.on(SIGINT, this.gracefulClusterShutdown(SIGINT));\n\n if (cluster.isPrimary) {\n const parentPid = process.ppid;\n\n setInterval(() => {\n try {\n process.kill(parentPid, 0);\n } catch (e) {\n console.log(\"Parent process has exited.\");\n this.gracefulClusterShutdown(SIGTERM)();\n }\n }, 1000);\n\n await this.bootWorkers(this.usedCpuCount);\n } else {\n if (!cluster.worker) {\n throw new Error(\n \"Worker is not defined, it should be defined in worker process\",\n );\n }\n\n this.startOutput = await this.workerStart(\n cluster.worker,\n this.usedCpuCount,\n );\n }\n }\n\n /**\n * Spawns worker processes and configures their lifecycle event handlers.\n * Handles worker online, exit and disconnect events.\n * Automatically restarts failed workers during normal operation.\n *\n * @param numWorkers - Number of worker processes to spawn\n */\n bootWorkers = async (numWorkers: number) => {\n console.info(`Setting ${numWorkers} workers...`);\n\n for (let i = 0; i < numWorkers; i++) {\n cluster.fork();\n }\n\n cluster.on(\"online\", (worker) => {\n console.info(`worker process ${worker.process.pid} is online`);\n });\n\n cluster.on(\"exit\", (worker, code, signal) => {\n console.info(\n `worker ${worker.process.pid} exited with code ${code} and signal ${signal}`,\n );\n\n if (!this.shutdownInProgress) {\n setTimeout(() => cluster.fork(), RESTART_TIME_MS);\n }\n\n if (this.shutdownInProgress && code != 0) {\n this.hasCleanWorkerExit = false;\n }\n });\n\n cluster.on(\"disconnect\", (worker) => {\n console.info(`worker process ${worker.process.pid} has disconnected`);\n });\n };\n\n /**\n * Creates a handler function for graceful shutdown on receipt of a signal.\n * Ensures only one shutdown can occur at a time.\n * Handles shutdown differently for primary and worker processes.\n *\n * @param signal - The signal triggering the shutdown (e.g. SIGTERM)\n * @returns An async function that performs the shutdown\n */\n gracefulClusterShutdown = (signal: NodeJS.Signals) => async () => {\n if (this.shutdownInProgress) {\n return;\n }\n\n this.shutdownInProgress = true;\n this.hasCleanWorkerExit = true;\n\n console.info(\n `Got ${signal} on ${this.processStr}. Graceful shutdown start at ${new Date().toISOString()}`,\n );\n\n try {\n if (cluster.isPrimary) {\n await this.shutdownWorkers(signal);\n console.info(`${this.processStr} - worker shutdown successful`);\n exit(0);\n } else {\n // Only attempt to stop if the worker has finished starting\n if (this.startOutput) {\n await this.workerStop(this.startOutput);\n } else {\n console.info(\n `${this.processStr} - shutdown before worker fully started`,\n );\n }\n console.info(`${this.processStr} shutdown successful`);\n this.hasCleanWorkerExit ? exit(0) : exit(1);\n }\n } catch (e) {\n console.error(`${this.processStr} - shutdown failed`, e);\n exit(1);\n }\n };\n\n /**\n * Gracefully terminates all worker processes.\n * Monitors workers until they all exit or timeout occurs.\n * Only relevant for the primary process.\n *\n * @param signal - The signal to send to worker processes\n * @returns A promise that resolves when all workers have terminated\n */\n shutdownWorkers = (signal: NodeJS.Signals) => {\n return new Promise<void>((resolve, reject) => {\n if (!cluster.isPrimary) {\n return resolve();\n }\n\n if (!cluster.workers) {\n return resolve();\n }\n\n const workerIds = Object.keys(cluster.workers);\n if (workerIds.length == 0) {\n return resolve();\n }\n\n let workersAlive = 0;\n let funcRun = 0;\n\n const cleanWorkers = () => {\n ++funcRun;\n workersAlive = 0;\n\n Object.values(cluster.workers || {})\n .filter((worker) => !!worker)\n .forEach((worker) => {\n if (worker && !worker.isDead()) {\n ++workersAlive;\n if (funcRun == 1) {\n worker.kill(signal);\n }\n }\n });\n\n console.info(workersAlive + \" workers alive\");\n if (workersAlive == 0) {\n clearInterval(interval);\n return resolve();\n }\n };\n\n const interval = setInterval(cleanWorkers, SHUTDOWN_WORKERS_INTERVAL);\n });\n };\n}\n","import { createClient, RedisClientType } from \"redis\";\n\n// Module-level singleton instance and initialization promise\nlet instance: MooseCache | null = null;\nlet initPromise: Promise<MooseCache> | null = null;\n\ntype SupportedTypes = string | object;\n\nexport class MooseCache {\n private client: RedisClientType;\n private isConnected: boolean = false;\n private readonly keyPrefix: string;\n private disconnectTimer: NodeJS.Timeout | null = null;\n private readonly idleTimeout: number;\n private connectPromise: Promise<void> | null = null;\n\n private constructor() {\n const redisUrl =\n process.env.MOOSE_REDIS_CONFIG__URL || \"redis://127.0.0.1:6379\";\n const prefix = process.env.MOOSE_REDIS_CONFIG__KEY_PREFIX || \"MS\";\n // 30 seconds of inactivity before disconnecting\n this.idleTimeout =\n parseInt(process.env.MOOSE_REDIS_CONFIG__IDLE_TIMEOUT || \"30\", 10) * 1000;\n this.keyPrefix = `${prefix}::moosecache::`;\n\n this.client = createClient({\n url: redisUrl,\n });\n\n process.on(\"SIGTERM\", this.gracefulShutdown);\n process.on(\"SIGINT\", this.gracefulShutdown);\n\n this.client.on(\"error\", async (err: Error) => {\n console.error(\"TS Redis client error:\", err);\n await this.disconnect();\n });\n\n this.client.on(\"connect\", () => {\n this.isConnected = true;\n console.log(\"TS Redis client connected\");\n });\n\n this.client.on(\"end\", () => {\n this.isConnected = false;\n console.log(\"TS Redis client disconnected\");\n this.clearDisconnectTimer();\n });\n }\n\n private clearDisconnectTimer(): void {\n if (this.disconnectTimer) {\n clearTimeout(this.disconnectTimer);\n this.disconnectTimer = null;\n }\n }\n\n private resetDisconnectTimer(): void {\n this.clearDisconnectTimer();\n this.disconnectTimer = setTimeout(async () => {\n if (this.isConnected) {\n console.log(\"TS Redis client disconnecting due to inactivity\");\n await this.disconnect();\n }\n }, this.idleTimeout);\n }\n\n private async ensureConnected(): Promise<void> {\n if (!this.isConnected) {\n await this.connect();\n }\n this.resetDisconnectTimer();\n }\n\n private async connect(): Promise<void> {\n // If already connected, return immediately\n if (this.isConnected) {\n return;\n }\n\n // If connection is in progress, wait for it\n // This prevents race conditions when multiple callers try to reconnect\n // simultaneously after a disconnection\n if (this.connectPromise) {\n return this.connectPromise;\n }\n\n // Start connection\n this.connectPromise = (async () => {\n try {\n await this.client.connect();\n this.resetDisconnectTimer();\n } catch (error) {\n // Reset the promise on error so retries can work\n this.connectPromise = null;\n throw error;\n }\n })();\n\n return this.connectPromise;\n }\n\n private async gracefulShutdown(): Promise<void> {\n if (this.isConnected) {\n await this.disconnect();\n }\n process.exit(0);\n }\n\n private getPrefixedKey(key: string): string {\n return `${this.keyPrefix}${key}`;\n }\n\n /**\n * Gets the singleton instance of MooseCache. Creates a new instance if one doesn't exist.\n * The client will automatically connect to Redis and handle reconnection if needed.\n *\n * @returns Promise<MooseCache> The singleton instance of MooseCache\n * @example\n * const cache = await MooseCache.get();\n */\n public static async get(): Promise<MooseCache> {\n // If we already have an instance, return it immediately\n if (instance) {\n return instance;\n }\n\n // If initialization is already in progress, wait for it\n // This prevents race conditions where multiple concurrent calls to get()\n // would each create their own instance and connection\n //\n // A simple singleton pattern (just checking if instance exists) isn't enough\n // because multiple async calls can check \"if (!instance)\" simultaneously,\n // find it's null, and each try to create their own instance before any\n // of them finish setting the instance variable\n if (initPromise) {\n return initPromise;\n }\n\n // Start initialization\n // We store the promise immediately so that any concurrent calls\n // will wait for this same initialization instead of starting their own\n initPromise = (async () => {\n try {\n const newInstance = new MooseCache();\n await newInstance.connect();\n instance = newInstance;\n return newInstance;\n } catch (error) {\n // Reset the promise on error so retries can work\n initPromise = null;\n throw error;\n }\n })();\n\n return initPromise;\n }\n\n /**\n * Sets a value in the cache. Objects are automatically JSON stringified.\n *\n * @param key - The key to store the value under\n * @param value - The value to store. Can be a string or any object (will be JSON stringified)\n * @param ttlSeconds - Optional time-to-live in seconds. If not provided, defaults to 1 hour (3600 seconds).\n * Must be a non-negative number. If 0, the key will expire immediately.\n * @example\n * // Store a string\n * await cache.set(\"foo\", \"bar\");\n *\n * // Store an object with custom TTL\n * await cache.set(\"foo:config\", { baz: 123, qux: true }, 60); // expires in 1 minute\n *\n * // This is essentially a get-set, which returns the previous value if it exists.\n * // You can create logic to only do work for the first time.\n * const value = await cache.set(\"testSessionId\", \"true\");\n * if (value) {\n * // Cache was set before, return\n * } else {\n * // Cache was set for first time, do work\n * }\n */\n public async set(\n key: string,\n value: string | object,\n ttlSeconds?: number,\n ): Promise<string | null> {\n try {\n // Validate TTL\n if (ttlSeconds !== undefined && ttlSeconds < 0) {\n throw new Error(\"ttlSeconds must be a non-negative number\");\n }\n\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n const stringValue =\n typeof value === \"object\" ? JSON.stringify(value) : value;\n\n // Use provided TTL or default to 1 hour\n const ttl = ttlSeconds ?? 3600;\n return await this.client.set(prefixedKey, stringValue, {\n EX: ttl,\n GET: true,\n });\n } catch (error) {\n console.error(`Error setting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Retrieves a value from the cache. Attempts to parse the value as JSON if possible.\n *\n * @param key - The key to retrieve\n * @returns Promise<T | null> The value, parsed as type T if it was JSON, or as string if not. Returns null if key doesn't exist\n * @example\n * // Get a string\n * const value = await cache.get(\"foo\");\n *\n * // Get and parse an object with type safety\n * interface Config { baz: number; qux: boolean; }\n * const config = await cache.get<Config>(\"foo:config\");\n */\n public async get<T extends SupportedTypes = string>(\n key: string,\n ): Promise<T | null> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n const value = await this.client.get(prefixedKey);\n\n if (value === null) return null;\n\n // Note: We can't check if T is string at runtime because TypeScript types are erased.\n // Instead, we try to parse as JSON and return the original string if that fails.\n try {\n const parsed = JSON.parse(value);\n // Only return parsed value if it's an object\n if (typeof parsed === \"object\" && parsed !== null) {\n return parsed as T;\n }\n // If parsed value isn't an object, return as string\n return value as T;\n } catch {\n // If JSON parse fails, return as string\n return value as T;\n }\n } catch (error) {\n console.error(`Error getting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Deletes a specific key from the cache.\n *\n * @param key - The key to delete\n * @example\n * await cache.delete(\"foo\");\n */\n public async delete(key: string): Promise<void> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n await this.client.del(prefixedKey);\n } catch (error) {\n console.error(`Error deleting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Deletes all keys that start with the given prefix.\n *\n * @param keyPrefix - The prefix of keys to delete\n * @example\n * // Delete all keys starting with \"foo\"\n * await cache.clearKeys(\"foo\");\n */\n public async clearKeys(keyPrefix: string): Promise<void> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(keyPrefix);\n const keys = await this.client.keys(`${prefixedKey}*`);\n if (keys.length > 0) {\n await this.client.del(keys);\n }\n } catch (error) {\n console.error(\n `Error clearing cache keys with prefix ${keyPrefix}:`,\n error,\n );\n throw error;\n }\n }\n\n /**\n * Deletes all keys in the cache\n *\n * @example\n * await cache.clear();\n */\n public async clear(): Promise<void> {\n try {\n await this.ensureConnected();\n const keys = await this.client.keys(`${this.keyPrefix}*`);\n if (keys.length > 0) {\n await this.client.del(keys);\n }\n } catch (error) {\n console.error(\"Error clearing cache:\", error);\n throw error;\n }\n }\n\n /**\n * Manually disconnects the Redis client. The client will automatically reconnect\n * when the next operation is performed.\n *\n * @example\n * await cache.disconnect();\n */\n public async disconnect(): Promise<void> {\n this.clearDisconnectTimer();\n this.connectPromise = null;\n if (this.isConnected) {\n await this.client.quit();\n }\n }\n}\n","import { MooseClient, QueryClient } from \"./helpers\";\nimport { getClickhouseClient } from \"../commons\";\nimport type { RuntimeClickHouseConfig } from \"../config/runtime\";\n\nexport async function getMooseClients(\n config?: Partial<RuntimeClickHouseConfig>,\n): Promise<{ client: MooseClient }> {\n await import(\"../config/runtime\");\n const configRegistry = (globalThis as any)._mooseConfigRegistry;\n\n if (!configRegistry) {\n throw new Error(\n \"Configuration registry not initialized. Ensure the Moose framework is properly set up.\",\n );\n }\n\n const clickhouseConfig =\n await configRegistry.getStandaloneClickhouseConfig(config);\n\n const clickhouseClient = getClickhouseClient({\n username: clickhouseConfig.username,\n password: clickhouseConfig.password,\n database: clickhouseConfig.database,\n useSSL: clickhouseConfig.useSSL ? \"true\" : \"false\",\n host: clickhouseConfig.host,\n port: clickhouseConfig.port,\n });\n\n const queryClient = new QueryClient(clickhouseClient, \"standalone\");\n const mooseClient = new MooseClient(queryClient);\n\n return { client: mooseClient };\n}\n","import { parse } from \"csv-parse\";\nimport { jsonDateReviver } from \"./json\";\n\n/**\n * Configuration for CSV parsing options\n */\nexport interface CSVParsingConfig {\n /** CSV delimiter character */\n delimiter: string;\n /** Whether to treat first row as headers */\n columns?: boolean;\n /** Whether to skip empty lines */\n skipEmptyLines?: boolean;\n /** Whether to trim whitespace from values */\n trim?: boolean;\n}\n\n/**\n * Configuration for JSON parsing options\n */\nexport interface JSONParsingConfig {\n /** Custom reviver function for JSON.parse */\n reviver?: (key: string, value: any) => any;\n}\n\n/**\n * Parses CSV content into an array of objects\n *\n * @param content - The CSV content as a string\n * @param config - CSV parsing configuration\n * @returns Promise resolving to an array of parsed objects\n */\nexport function parseCSV<T = Record<string, any>>(\n content: string,\n config: CSVParsingConfig,\n): Promise<T[]> {\n return new Promise((resolve, reject) => {\n const results: T[] = [];\n\n parse(content, {\n delimiter: config.delimiter,\n columns: config.columns ?? true,\n skip_empty_lines: config.skipEmptyLines ?? true,\n trim: config.trim ?? true,\n })\n .on(\"data\", (row) => {\n results.push(row as T);\n })\n .on(\"end\", () => {\n resolve(results);\n })\n .on(\"error\", (error) => {\n reject(error);\n });\n });\n}\n\n/**\n * Parses JSON content into an array of objects\n *\n * @param content - The JSON content as a string\n * @param config - JSON parsing configuration\n * @returns Array of parsed objects\n */\nexport function parseJSON<T = any>(\n content: string,\n config: JSONParsingConfig = {},\n): T[] {\n try {\n const parsed = JSON.parse(content, config.reviver);\n\n // Handle both array and single object cases\n if (Array.isArray(parsed)) {\n return parsed as T[];\n } else {\n return [parsed as T];\n }\n } catch (error) {\n throw new Error(\n `Failed to parse JSON: ${error instanceof Error ? error.message : \"Unknown error\"}`,\n );\n }\n}\n\n/**\n * Parses JSON content with automatic date revival\n *\n * @param content - The JSON content as a string\n * @returns Array of parsed objects with Date objects for ISO 8601 strings\n */\nexport function parseJSONWithDates<T = any>(content: string): T[] {\n return parseJSON<T>(content, { reviver: jsonDateReviver });\n}\n\n/**\n * Type guard to check if a value is a valid CSV delimiter\n */\nexport function isValidCSVDelimiter(delimiter: string): boolean {\n return delimiter.length === 1 && !/\\s/.test(delimiter);\n}\n\n/**\n * Common CSV delimiters\n */\nexport const CSV_DELIMITERS = {\n COMMA: \",\",\n TAB: \"\\t\",\n SEMICOLON: \";\",\n PIPE: \"|\",\n} as const;\n\n/**\n * Default CSV parsing configuration\n */\nexport const DEFAULT_CSV_CONFIG: CSVParsingConfig = {\n delimiter: CSV_DELIMITERS.COMMA,\n columns: true,\n skipEmptyLines: true,\n trim: true,\n};\n\n/**\n * Default JSON parsing configuration with date revival\n */\nexport const DEFAULT_JSON_CONFIG: JSONParsingConfig = {\n reviver: jsonDateReviver,\n};\n","import type {\n Column,\n DataType,\n Nested,\n ArrayType,\n} from \"../dataModels/dataModelTypes\";\n\n/**\n * Annotation key used to mark DateTime fields that should remain as strings\n * rather than being parsed into Date objects at runtime.\n */\nexport const STRING_DATE_ANNOTATION = \"stringDate\";\n\n/**\n * Type guard to check if a DataType is a nullable wrapper\n */\nfunction isNullableType(dt: DataType): dt is { nullable: DataType } {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"nullable\" in dt &&\n typeof dt.nullable !== \"undefined\"\n );\n}\n\n/**\n * Type guard to check if a DataType is a Nested type\n */\nfunction isNestedType(dt: DataType): dt is Nested {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"columns\" in dt &&\n Array.isArray(dt.columns)\n );\n}\n\n/**\n * Type guard to check if a DataType is an ArrayType\n */\nfunction isArrayType(dt: DataType): dt is ArrayType {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"elementType\" in dt &&\n typeof dt.elementType !== \"undefined\"\n );\n}\n\n/**\n * Revives ISO 8601 date strings into Date objects during JSON parsing\n * This is useful for automatically converting date strings to Date objects\n */\nexport function jsonDateReviver(key: string, value: unknown): unknown {\n const iso8601Format =\n /^([\\+-]?\\d{4}(?!\\d{2}\\b))((-?)((0[1-9]|1[0-2])(\\3([12]\\d|0[1-9]|3[01]))?|W([0-4]\\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\\d|[12]\\d{2}|3([0-5]\\d|6[1-6])))([T\\s]((([01]\\d|2[0-3])((:?)[0-5]\\d)?|24\\:?00)([\\.,]\\d+(?!:))?)?(\\17[0-5]\\d([\\.,]\\d+)?)?([zZ]|([\\+-])([01]\\d|2[0-3]):?([0-5]\\d)?)?)?)$/;\n\n if (typeof value === \"string\" && iso8601Format.test(value)) {\n return new Date(value);\n }\n\n return value;\n}\n\n/**\n * Checks if a DataType represents a datetime column (not just date)\n * AND if the column should be parsed from string to Date at runtime\n *\n * Note: Date and Date16 are date-only types and should remain as strings.\n * Only DateTime types are candidates for parsing to JavaScript Date objects.\n */\nfunction isDateType(dataType: DataType, annotations: [string, any][]): boolean {\n // Check if this is marked as a string-based date (from typia.tags.Format)\n // If so, it should remain as a string, not be parsed to Date\n if (\n annotations.some(\n ([key, value]) => key === STRING_DATE_ANNOTATION && value === true,\n )\n ) {\n return false;\n }\n\n if (typeof dataType === \"string\") {\n // Only DateTime types should be parsed to Date objects\n // Date and Date16 are date-only and should stay as strings\n return dataType === \"DateTime\" || dataType.startsWith(\"DateTime(\");\n }\n // Handle nullable wrapper\n if (isNullableType(dataType)) {\n return isDateType(dataType.nullable, annotations);\n }\n return false;\n}\n\n/**\n * Type of mutation to apply to a field during parsing\n */\nexport type Mutation = \"parseDate\"; // | \"parseBigInt\" - to be added later\n\n/**\n * Recursive tuple array structure representing field mutation operations\n * Each entry is [fieldName, mutation]:\n * - mutation is Mutation[] for leaf fields that need operations applied\n * - mutation is FieldMutations for nested objects/arrays (auto-applies to array elements)\n */\nexport type FieldMutations = [string, Mutation[] | FieldMutations][];\n\n/**\n * Recursively builds field mutations from column definitions\n *\n * @param columns - Array of Column definitions\n * @returns Tuple array of field mutations\n */\nfunction buildFieldMutations(columns: Column[]): FieldMutations {\n const mutations: FieldMutations = [];\n\n for (const column of columns) {\n const dataType = column.data_type;\n\n // Check if this is a date field that should be converted\n if (isDateType(dataType, column.annotations)) {\n mutations.push([column.name, [\"parseDate\"]]);\n continue;\n }\n\n // Handle nested structures\n if (typeof dataType === \"object\" && dataType !== null) {\n // Handle nullable wrapper\n let unwrappedType: DataType = dataType;\n if (isNullableType(dataType)) {\n unwrappedType = dataType.nullable;\n }\n\n // Handle nested objects\n if (isNestedType(unwrappedType)) {\n const nestedMutations = buildFieldMutations(unwrappedType.columns);\n if (nestedMutations.length > 0) {\n mutations.push([column.name, nestedMutations]);\n }\n continue;\n }\n\n // Handle arrays with nested columns\n // The mutations will be auto-applied to each array element at runtime\n if (isArrayType(unwrappedType)) {\n const elementType = unwrappedType.elementType;\n if (isNestedType(elementType)) {\n const nestedMutations = buildFieldMutations(elementType.columns);\n if (nestedMutations.length > 0) {\n mutations.push([column.name, nestedMutations]);\n }\n continue;\n }\n }\n }\n }\n\n return mutations;\n}\n\n/**\n * Applies a mutation operation to a field value\n *\n * @param value - The value to handle\n * @param mutation - The mutation operation to apply\n * @returns The handled value\n */\nfunction applyMutation(value: any, mutation: Mutation): any {\n if (mutation === \"parseDate\") {\n if (typeof value === \"string\") {\n try {\n const date = new Date(value);\n return !isNaN(date.getTime()) ? date : value;\n } catch {\n return value;\n }\n }\n }\n return value;\n}\n\n/**\n * Recursively mutates an object by applying field mutations\n *\n * @param obj - The object to mutate\n * @param mutations - The field mutations to apply\n */\nfunction applyFieldMutations(obj: any, mutations: FieldMutations): void {\n if (!obj || typeof obj !== \"object\") {\n return;\n }\n\n for (const [fieldName, mutation] of mutations) {\n if (!(fieldName in obj)) {\n continue;\n }\n\n if (Array.isArray(mutation)) {\n // Check if it's Mutation[] (leaf) or FieldMutations (nested)\n if (mutation.length > 0 && typeof mutation[0] === \"string\") {\n // It's Mutation[] - apply operations to this field\n const operations = mutation as Mutation[];\n for (const operation of operations) {\n obj[fieldName] = applyMutation(obj[fieldName], operation);\n }\n } else {\n // It's FieldMutations - recurse into nested structure\n const nestedMutations = mutation as FieldMutations;\n const fieldValue = obj[fieldName];\n\n if (Array.isArray(fieldValue)) {\n // Auto-apply to each array element\n for (const item of fieldValue) {\n applyFieldMutations(item, nestedMutations);\n }\n } else if (fieldValue && typeof fieldValue === \"object\") {\n // Apply to nested object\n applyFieldMutations(fieldValue, nestedMutations);\n }\n }\n }\n }\n}\n\n/**\n * Pre-builds field mutations from column schema for efficient reuse\n *\n * @param columns - Column definitions from the Stream schema\n * @returns Field mutations tuple array, or undefined if no columns\n *\n * @example\n * ```typescript\n * const fieldMutations = buildFieldMutationsFromColumns(stream.columnArray);\n * // Reuse fieldMutations for every message\n * ```\n */\nexport function buildFieldMutationsFromColumns(\n columns: Column[] | undefined,\n): FieldMutations | undefined {\n if (!columns || columns.length === 0) {\n return undefined;\n }\n const mutations = buildFieldMutations(columns);\n return mutations.length > 0 ? mutations : undefined;\n}\n\n/**\n * Applies field mutations to parsed data\n * Mutates the object in place for performance\n *\n * @param data - The parsed JSON object to mutate\n * @param fieldMutations - Pre-built field mutations from buildFieldMutationsFromColumns\n *\n * @example\n * ```typescript\n * const fieldMutations = buildFieldMutationsFromColumns(stream.columnArray);\n * const data = JSON.parse(jsonString);\n * mutateParsedJson(data, fieldMutations);\n * // data now has transformations applied per the field mutations\n * ```\n */\nexport function mutateParsedJson(\n data: any,\n fieldMutations: FieldMutations | undefined,\n): void {\n if (!fieldMutations || !data) {\n return;\n }\n\n applyFieldMutations(data, fieldMutations);\n}\n","import { ClickHouseClient } from \"@clickhouse/client\";\nimport fastq, { queueAsPromised } from \"fastq\";\nimport { cliLog, getClickhouseClient } from \"../commons\";\nimport { Blocks } from \"./helpers\";\nimport fs from \"node:fs\";\nimport path from \"node:path\";\n\nconst walkDir = (dir: string, fileExtension: string, fileList: string[]) => {\n const files = fs.readdirSync(dir);\n\n files.forEach((file) => {\n if (fs.statSync(path.join(dir, file)).isDirectory()) {\n fileList = walkDir(path.join(dir, file), fileExtension, fileList);\n } else if (file.endsWith(fileExtension)) {\n fileList.push(path.join(dir, file));\n }\n });\n\n return fileList;\n};\n\ninterface BlocksQueueTask {\n chClient: ClickHouseClient;\n blocks: Blocks;\n retries: number;\n}\n\ninterface ClickhouseConfig {\n database: string;\n host: string;\n port: string;\n username: string;\n password: string;\n useSSL: boolean;\n}\n\ninterface BlocksConfig {\n blocksDir: string;\n clickhouseConfig: ClickhouseConfig;\n}\n\nclass DependencyError extends Error {\n constructor(message: string) {\n super(message);\n this.name = \"DependencyError\";\n }\n}\n\n// Convert our config to Clickhouse client config\nconst toClientConfig = (config: ClickhouseConfig) => ({\n ...config,\n useSSL: config.useSSL ? \"true\" : \"false\",\n});\n\nconst createBlocks = async (chClient: ClickHouseClient, blocks: Blocks) => {\n for (const query of blocks.setup) {\n try {\n console.log(`Creating block using query ${query}`);\n await chClient.command({\n query,\n clickhouse_settings: {\n wait_end_of_query: 1, // Ensure at least once delivery and DDL acknowledgment\n },\n });\n } catch (err) {\n cliLog({\n action: \"Blocks\",\n message: `Failed to create blocks: ${err}`,\n message_type: \"Error\",\n });\n if (err && JSON.stringify(err).includes(`UNKNOWN_TABLE`)) {\n throw new DependencyError(err.toString());\n }\n }\n }\n};\n\nconst deleteBlocks = async (chClient: ClickHouseClient, blocks: Blocks) => {\n for (const query of blocks.teardown) {\n try {\n console.log(`Deleting block using query ${query}`);\n await chClient.command({\n query,\n clickhouse_settings: {\n wait_end_of_query: 1, // Ensure at least once delivery and DDL acknowledgment\n },\n });\n } catch (err) {\n cliLog({\n action: \"Blocks\",\n message: `Failed to delete blocks: ${err}`,\n message_type: \"Error\",\n });\n }\n }\n};\n\nconst asyncWorker = async (task: BlocksQueueTask) => {\n await deleteBlocks(task.chClient, task.blocks);\n await createBlocks(task.chClient, task.blocks);\n};\n\nexport const runBlocks = async (config: BlocksConfig) => {\n const chClient = getClickhouseClient(toClientConfig(config.clickhouseConfig));\n console.log(`Connected`);\n\n const blocksFiles = walkDir(config.blocksDir, \".ts\", []);\n const numOfBlockFiles = blocksFiles.length;\n console.log(`Found ${numOfBlockFiles} blocks files`);\n\n const queue: queueAsPromised<BlocksQueueTask> = fastq.promise(asyncWorker, 1);\n\n queue.error((err: Error, task: BlocksQueueTask) => {\n if (err && task.retries > 0) {\n if (err instanceof DependencyError) {\n queue.push({ ...task, retries: task.retries - 1 });\n }\n }\n });\n\n for (const path of blocksFiles) {\n console.log(`Adding to queue: ${path}`);\n\n try {\n const blocks = require(path).default as Blocks;\n queue.push({\n chClient,\n blocks,\n retries: numOfBlockFiles,\n });\n } catch (err) {\n cliLog({\n action: \"Blocks\",\n message: `Failed to import blocks from ${path}: ${err}`,\n message_type: \"Error\",\n });\n }\n }\n\n while (!queue.idle()) {\n await new Promise((resolve) => setTimeout(resolve, 1000));\n }\n};\n","import { Readable } from \"node:stream\";\nimport { KafkaJS } from \"@514labs/kafka-javascript\";\nconst { Kafka } = KafkaJS;\n\ntype Consumer = KafkaJS.Consumer;\ntype Producer = KafkaJS.Producer;\n\ntype KafkaMessage = {\n value: Buffer | string | null;\n key?: Buffer | string | null;\n partition?: number;\n offset?: string;\n timestamp?: string;\n headers?: Record<string, Buffer | string | undefined>;\n};\n\ntype SASLOptions = {\n mechanism: \"plain\" | \"scram-sha-256\" | \"scram-sha-512\";\n username: string;\n password: string;\n};\nimport { Buffer } from \"node:buffer\";\nimport * as process from \"node:process\";\nimport * as http from \"node:http\";\nimport {\n cliLog,\n getKafkaClient,\n createProducerConfig,\n Logger,\n logError,\n} from \"../commons\";\nimport { Cluster } from \"../cluster-utils\";\nimport { getStreamingFunctions } from \"../dmv2/internal\";\nimport type { ConsumerConfig, TransformConfig, DeadLetterQueue } from \"../dmv2\";\nimport {\n buildFieldMutationsFromColumns,\n mutateParsedJson,\n type FieldMutations,\n} from \"../utilities/json\";\nimport type { Column } from \"../dataModels/dataModelTypes\";\n\nconst HOSTNAME = process.env.HOSTNAME;\nconst AUTO_COMMIT_INTERVAL_MS = 5000;\nconst PARTITIONS_CONSUMED_CONCURRENTLY = 3;\nconst MAX_RETRIES_CONSUMER = 150;\nconst SESSION_TIMEOUT_CONSUMER = 30000;\nconst HEARTBEAT_INTERVAL_CONSUMER = 3000;\nconst DEFAULT_MAX_STREAMING_CONCURRENCY = 100;\n// Max messages per eachBatch call - Confluent client defaults to 32, increase for throughput\nconst CONSUMER_MAX_BATCH_SIZE = 1000;\n\n/**\n * Data structure for metrics logging containing counts and metadata\n */\ntype MetricsData = {\n count_in: number;\n count_out: number;\n bytes: number;\n function_name: string;\n timestamp: Date;\n};\n\n/**\n * Interface for tracking message processing metrics\n */\ninterface Metrics {\n count_in: number;\n count_out: number;\n bytes: number;\n}\n\n/**\n * Type definition for streaming transformation function\n */\ntype StreamingFunction = (data: unknown) => unknown | Promise<unknown>;\n\n/**\n * Simplified Kafka message type containing only value\n */\ntype KafkaMessageWithLineage = {\n value: string;\n originalValue: object;\n originalMessage: KafkaMessage;\n dlq?: DeadLetterQueue<any>;\n};\n\n/**\n * Configuration interface for Kafka topics including namespace and version support\n */\nexport interface TopicConfig {\n name: string; // Full topic name including namespace if present\n partitions: number;\n retention_ms: number;\n max_message_bytes: number;\n namespace?: string;\n version?: string;\n}\n\n/**\n * Configuration interface for streaming function arguments\n */\nexport interface StreamingFunctionArgs {\n sourceTopic: TopicConfig;\n targetTopic?: TopicConfig;\n functionFilePath: string;\n broker: string; // Comma-separated list of Kafka broker addresses (e.g., \"broker1:9092, broker2:9092\"). Whitespace around commas is automatically trimmed.\n maxSubscriberCount: number;\n isDmv2: boolean;\n saslUsername?: string;\n saslPassword?: string;\n saslMechanism?: string;\n securityProtocol?: string;\n}\n\n/**\n * Maximum number of concurrent streaming operations, configurable via environment\n */\nconst MAX_STREAMING_CONCURRENCY =\n process.env.MAX_STREAMING_CONCURRENCY ?\n parseInt(process.env.MAX_STREAMING_CONCURRENCY, 10)\n : DEFAULT_MAX_STREAMING_CONCURRENCY;\n\n/**\n * Logs metrics data to HTTP endpoint\n */\nexport const metricsLog: (log: MetricsData) => void = (log) => {\n const req = http.request({\n port: parseInt(process.env.MOOSE_MANAGEMENT_PORT ?? \"5001\", 10),\n method: \"POST\",\n path: \"/metrics-logs\",\n });\n\n req.on(\"error\", (err: Error) => {\n console.log(\n `Error ${err.name} sending metrics to management port.`,\n err.message,\n );\n });\n\n req.write(JSON.stringify({ ...log }));\n req.end();\n};\n\n/**\n * Initializes and connects Kafka producer\n */\nconst startProducer = async (\n logger: Logger,\n producer: Producer,\n): Promise<void> => {\n try {\n logger.log(\"Connecting producer...\");\n await producer.connect();\n logger.log(\"Producer is running...\");\n } catch (error) {\n logger.error(\"Failed to connect producer:\");\n if (error instanceof Error) {\n logError(logger, error);\n }\n throw error;\n }\n};\n\n/**\n * Disconnects a Kafka producer and logs the shutdown\n *\n * @param logger - Logger instance for outputting producer status\n * @param producer - KafkaJS Producer instance to disconnect\n * @returns Promise that resolves when producer is disconnected\n * @example\n * ```ts\n * await stopProducer(logger, producer); // Disconnects producer and logs shutdown\n * ```\n */\nconst stopProducer = async (\n logger: Logger,\n producer: Producer,\n): Promise<void> => {\n await producer.disconnect();\n logger.log(\"Producer is shutting down...\");\n};\n\n/**\n * Gracefully stops a Kafka consumer by pausing all partitions and then disconnecting\n *\n * @param logger - Logger instance for outputting consumer status\n * @param consumer - KafkaJS Consumer instance to disconnect\n * @param sourceTopic - Topic configuration containing name and partition count\n * @returns Promise that resolves when consumer is disconnected\n * @example\n * ```ts\n * await stopConsumer(logger, consumer, sourceTopic); // Pauses all partitions and disconnects consumer\n * ```\n */\nconst stopConsumer = async (\n logger: Logger,\n consumer: Consumer,\n sourceTopic: TopicConfig,\n): Promise<void> => {\n try {\n // Try to pause the consumer first if the method exists\n logger.log(\"Pausing consumer...\");\n\n // Generate partition numbers array based on the topic's partition count\n const partitionNumbers = Array.from(\n { length: sourceTopic.partitions },\n (_, i) => i,\n );\n\n await consumer.pause([\n {\n topic: sourceTopic.name,\n partitions: partitionNumbers,\n },\n ]);\n\n logger.log(\"Disconnecting consumer...\");\n await consumer.disconnect();\n logger.log(\"Consumer is shutting down...\");\n } catch (error) {\n logger.error(`Error during consumer shutdown: ${error}`);\n // Continue with disconnect even if pause fails\n try {\n await consumer.disconnect();\n logger.log(\"Consumer disconnected after error\");\n } catch (disconnectError) {\n logger.error(`Failed to disconnect consumer: ${disconnectError}`);\n }\n }\n};\n\n/**\n * Processes a single Kafka message through a streaming function and returns transformed message(s)\n *\n * @param logger - Logger instance for outputting message processing status and errors\n * @param streamingFunctionWithConfigList - functions (with their configs) that transforms input message data\n * @param message - Kafka message to be processed\n * @param producer - Kafka producer for sending dead letter\n * @param fieldMutations - Pre-built field mutations for data transformations\n * @returns Promise resolving to array of transformed messages or undefined if processing fails\n *\n * The function will:\n * 1. Check for null/undefined message values\n * 2. Parse the message value as JSON\n * 3. Apply field mutations (e.g., date parsing) using pre-built configuration\n * 4. Pass parsed data through the streaming function\n * 5. Convert transformed data back to string format\n * 6. Handle both single and array return values\n * 7. Log any processing errors\n */\nconst handleMessage = async (\n logger: Logger,\n // Note: TransformConfig<any> is intentionally generic here as it handles\n // various data model types that are determined at runtime\n streamingFunctionWithConfigList: [StreamingFunction, TransformConfig<any>][],\n message: KafkaMessage,\n producer: Producer,\n fieldMutations?: FieldMutations,\n): Promise<KafkaMessageWithLineage[] | undefined> => {\n if (message.value === undefined || message.value === null) {\n logger.log(`Received message with no value, skipping...`);\n return undefined;\n }\n\n try {\n // Detect Schema Registry JSON envelope: 0x00 + 4-byte schema ID (big-endian) + JSON bytes\n let payloadBuffer = message.value as Buffer;\n if (\n payloadBuffer &&\n payloadBuffer.length >= 5 &&\n payloadBuffer[0] === 0x00\n ) {\n payloadBuffer = payloadBuffer.subarray(5);\n }\n // Parse JSON then apply field mutations using pre-built configuration\n const parsedData = JSON.parse(payloadBuffer.toString());\n mutateParsedJson(parsedData, fieldMutations);\n const transformedData = await Promise.all(\n streamingFunctionWithConfigList.map(async ([fn, config]) => {\n try {\n return await fn(parsedData);\n } catch (e) {\n // Check if there's a deadLetterQueue configured\n const deadLetterQueue = config.deadLetterQueue;\n\n if (deadLetterQueue) {\n // Create a dead letter record\n const deadLetterRecord = {\n originalRecord: {\n ...parsedData,\n // Include original Kafka message metadata\n __sourcePartition: message.partition,\n __sourceOffset: message.offset,\n __sourceTimestamp: message.timestamp,\n },\n errorMessage: e instanceof Error ? e.message : String(e),\n errorType: e instanceof Error ? e.constructor.name : \"Unknown\",\n failedAt: new Date(),\n source: \"transform\",\n };\n\n cliLog({\n action: \"DeadLetter\",\n message: `Sending message to DLQ ${deadLetterQueue.name}: ${e instanceof Error ? e.message : String(e)}`,\n message_type: \"Error\",\n });\n // Send to the DLQ\n try {\n await producer.send({\n topic: deadLetterQueue.name,\n messages: [{ value: JSON.stringify(deadLetterRecord) }],\n });\n } catch (dlqError) {\n logger.error(`Failed to send to dead letter queue: ${dlqError}`);\n }\n } else {\n // No DLQ configured, just log the error\n cliLog({\n action: \"Function\",\n message: `Error processing message (no DLQ configured): ${e instanceof Error ? e.message : String(e)}`,\n message_type: \"Error\",\n });\n }\n\n // rethrow for the outside error handling\n throw e;\n }\n }),\n );\n\n return transformedData\n .map((userFunctionOutput, i) => {\n const [_, config] = streamingFunctionWithConfigList[i];\n if (userFunctionOutput) {\n if (Array.isArray(userFunctionOutput)) {\n // We Promise.all streamingFunctionWithConfigList above.\n // Promise.all always wraps results in an array, even for single transforms.\n // When a transform returns an array (e.g., [msg1, msg2] to emit multiple messages),\n // we get [[msg1, msg2]]. flat() unwraps one level so each item becomes its own message.\n // Without flat(), the entire array would be JSON.stringify'd as a single message.\n return userFunctionOutput\n .flat()\n .filter((item) => item !== undefined && item !== null)\n .map((item) => ({\n value: JSON.stringify(item),\n originalValue: parsedData,\n originalMessage: message,\n dlq: config.deadLetterQueue ?? undefined,\n }));\n } else {\n return [\n {\n value: JSON.stringify(userFunctionOutput),\n originalValue: parsedData,\n originalMessage: message,\n dlq: config.deadLetterQueue ?? undefined,\n },\n ];\n }\n }\n })\n .flat()\n .filter((item) => item !== undefined && item !== null);\n } catch (e) {\n // TODO: Track failure rate\n logger.error(`Failed to transform data`);\n if (e instanceof Error) {\n logError(logger, e);\n }\n }\n\n return undefined;\n};\n\n/**\n * Handles sending failed messages to their configured Dead Letter Queues\n *\n * @param logger - Logger instance for outputting DLQ status\n * @param producer - Kafka producer for sending to DLQ topics\n * @param messages - Array of failed messages with DLQ configuration\n * @param error - The error that caused the failure\n * @returns true if ALL messages were successfully sent to their DLQs, false otherwise\n */\nconst handleDLQForFailedMessages = async (\n logger: Logger,\n producer: Producer,\n messages: KafkaMessageWithLineage[],\n error: unknown,\n): Promise<boolean> => {\n let messagesHandledByDLQ = 0;\n let messagesWithoutDLQ = 0;\n let dlqErrors = 0;\n\n for (const msg of messages) {\n if (msg.dlq && msg.originalValue) {\n const deadLetterRecord = {\n originalRecord: {\n ...msg.originalValue,\n // Include original Kafka message metadata\n __sourcePartition: msg.originalMessage.partition,\n __sourceOffset: msg.originalMessage.offset,\n __sourceTimestamp: msg.originalMessage.timestamp,\n },\n errorMessage: error instanceof Error ? error.message : String(error),\n errorType: error instanceof Error ? error.constructor.name : \"Unknown\",\n failedAt: new Date(),\n source: \"transform\",\n };\n\n cliLog({\n action: \"DeadLetter\",\n message: `Sending failed message to DLQ ${msg.dlq.name}: ${error instanceof Error ? error.message : String(error)}`,\n message_type: \"Error\",\n });\n\n try {\n await producer.send({\n topic: msg.dlq.name,\n messages: [{ value: JSON.stringify(deadLetterRecord) }],\n });\n logger.log(`Sent failed message to DLQ ${msg.dlq.name}`);\n messagesHandledByDLQ++;\n } catch (dlqError) {\n logger.error(`Failed to send to DLQ: ${dlqError}`);\n dlqErrors++;\n }\n } else if (!msg.dlq) {\n messagesWithoutDLQ++;\n logger.warn(`Cannot send to DLQ: no DLQ configured for message`);\n } else {\n messagesWithoutDLQ++;\n logger.warn(`Cannot send to DLQ: original message value not available`);\n }\n }\n\n // Check if ALL messages were successfully handled by DLQ\n const allMessagesHandled =\n messagesHandledByDLQ === messages.length &&\n messagesWithoutDLQ === 0 &&\n dlqErrors === 0;\n\n if (allMessagesHandled) {\n logger.log(\n `All ${messagesHandledByDLQ} failed message(s) sent to DLQ, suppressing original error`,\n );\n } else if (messagesHandledByDLQ > 0) {\n // Log summary of partial DLQ handling\n logger.warn(\n `Partial DLQ success: ${messagesHandledByDLQ}/${messages.length} message(s) sent to DLQ`,\n );\n if (messagesWithoutDLQ > 0) {\n logger.error(\n `Cannot handle batch failure: ${messagesWithoutDLQ} message(s) have no DLQ configured or missing original value`,\n );\n }\n if (dlqErrors > 0) {\n logger.error(`${dlqErrors} message(s) failed to send to DLQ`);\n }\n }\n\n return allMessagesHandled;\n};\n\n/**\n * Sends processed messages to a target Kafka topic\n *\n * @param logger - Logger instance for outputting send status and errors\n * @param metrics - Metrics object for tracking message counts and bytes sent\n * @param targetTopic - Target topic configuration\n * @param producer - Kafka producer instance for sending messages\n * @param messages - Array of processed messages to send (messages carry their own DLQ config)\n * @returns Promise that resolves when all messages are sent\n *\n * The Confluent Kafka library handles batching internally via message.max.bytes\n * and retries transient failures automatically. This function simply sends all\n * messages and handles permanent failures by routing to DLQ.\n */\nconst sendMessages = async (\n logger: Logger,\n metrics: Metrics,\n targetTopic: TopicConfig,\n producer: Producer,\n messages: KafkaMessageWithLineage[],\n): Promise<void> => {\n if (messages.length === 0) return;\n\n try {\n // Library handles batching and retries internally\n await producer.send({\n topic: targetTopic.name,\n messages: messages,\n });\n\n // Track metrics only after successful send to target topic\n // Messages routed to DLQ should NOT be counted as successful sends\n for (const msg of messages) {\n metrics.bytes += Buffer.byteLength(msg.value, \"utf8\");\n }\n metrics.count_out += messages.length;\n\n logger.log(`Sent ${messages.length} messages to ${targetTopic.name}`);\n } catch (e) {\n // Library already retried - this is a permanent failure\n logger.error(`Failed to send transformed data`);\n if (e instanceof Error) {\n logError(logger, e);\n }\n\n // Handle DLQ for failed messages\n // Only throw if not all messages were successfully routed to DLQ\n const allHandledByDLQ = await handleDLQForFailedMessages(\n logger,\n producer,\n messages,\n e,\n );\n if (!allHandledByDLQ) {\n throw e;\n }\n }\n};\n\n/**\n * Periodically sends metrics about message processing to a metrics logging endpoint.\n * Resets metrics counters after each send. Runs every second via setTimeout.\n *\n * @param logger - Logger instance containing the function name prefix\n * @param metrics - Metrics object tracking message counts and bytes processed\n * @example\n * ```ts\n * const metrics = { count_in: 10, count_out: 8, bytes: 1024 };\n * sendMessageMetrics(logger, metrics); // Sends metrics and resets counters\n * ```\n */\nconst sendMessageMetrics = (logger: Logger, metrics: Metrics) => {\n if (metrics.count_in > 0 || metrics.count_out > 0 || metrics.bytes > 0) {\n metricsLog({\n count_in: metrics.count_in,\n count_out: metrics.count_out,\n function_name: logger.logPrefix,\n bytes: metrics.bytes,\n timestamp: new Date(),\n });\n }\n metrics.count_in = 0;\n metrics.bytes = 0;\n metrics.count_out = 0;\n setTimeout(() => sendMessageMetrics(logger, metrics), 1000);\n};\n\n/**\n * Dynamically loads a streaming function from a file path\n *\n * @param args - The streaming function arguments containing the function file path\n * @returns The default export of the streaming function module\n * @throws Will throw and log an error if the function file cannot be loaded\n * @example\n * ```ts\n * const fn = loadStreamingFunction({functionFilePath: './transform.js'});\n * const result = await fn(data);\n * ```\n */\nfunction loadStreamingFunction(functionFilePath: string) {\n let streamingFunctionImport: { default: StreamingFunction };\n try {\n streamingFunctionImport = require(\n functionFilePath.substring(0, functionFilePath.length - 3),\n );\n } catch (e) {\n cliLog({ action: \"Function\", message: `${e}`, message_type: \"Error\" });\n throw e;\n }\n return streamingFunctionImport.default;\n}\n\nasync function loadStreamingFunctionV2(\n sourceTopic: TopicConfig,\n targetTopic?: TopicConfig,\n): Promise<{\n functions: [StreamingFunction, TransformConfig<any> | ConsumerConfig<any>][];\n fieldMutations: FieldMutations | undefined;\n}> {\n const transformFunctions = await getStreamingFunctions();\n const transformFunctionKey = `${topicNameToStreamName(sourceTopic)}_${targetTopic ? topicNameToStreamName(targetTopic) : \"<no-target>\"}`;\n\n const matchingEntries = Array.from(transformFunctions.entries()).filter(\n ([key]) => key.startsWith(transformFunctionKey),\n );\n\n if (matchingEntries.length === 0) {\n const message = `No functions found for ${transformFunctionKey}`;\n cliLog({\n action: \"Function\",\n message: `${message}`,\n message_type: \"Error\",\n });\n throw new Error(message);\n }\n\n // Extract functions and configs, and get columns from the first entry\n // (all functions for the same source topic will have the same columns)\n const functions = matchingEntries.map(([_, [fn, config]]) => [\n fn,\n config,\n ]) as [StreamingFunction, TransformConfig<any> | ConsumerConfig<any>][];\n const [_key, firstEntry] = matchingEntries[0];\n const sourceColumns = firstEntry[2];\n\n // Pre-build field mutations once for all messages\n const fieldMutations = buildFieldMutationsFromColumns(sourceColumns);\n\n return { functions, fieldMutations };\n}\n\n/**\n * Initializes and starts a Kafka consumer that processes messages using a streaming function\n *\n * @param logger - Logger instance for outputting consumer status and errors\n * @param metrics - Metrics object for tracking message counts and bytes processed\n * @param parallelism - Number of parallel workers processing messages\n * @param args - Configuration arguments for source/target topics and streaming function\n * @param consumer - KafkaJS Consumer instance\n * @param producer - KafkaJS Producer instance for sending processed messages\n * @param streamingFuncId - Unique identifier for this consumer group\n * @param maxMessageSize - Maximum message size in bytes allowed by Kafka broker\n * @returns Promise that resolves when consumer is started\n *\n * The consumer will:\n * 1. Connect to Kafka\n * 2. Subscribe to the source topic\n * 3. Process messages in batches using the streaming function\n * 4. Send processed messages to target topic (if configured)\n * 5. Commit offsets after successful processing\n */\nconst startConsumer = async (\n args: StreamingFunctionArgs,\n logger: Logger,\n metrics: Metrics,\n _parallelism: number,\n consumer: Consumer,\n producer: Producer,\n streamingFuncId: string,\n): Promise<void> => {\n // Validate topic configurations\n validateTopicConfig(args.sourceTopic);\n if (args.targetTopic) {\n validateTopicConfig(args.targetTopic);\n }\n\n try {\n logger.log(\"Connecting consumer...\");\n await consumer.connect();\n logger.log(\"Consumer connected successfully\");\n } catch (error) {\n logger.error(\"Failed to connect consumer:\");\n if (error instanceof Error) {\n logError(logger, error);\n }\n throw error;\n }\n\n logger.log(\n `Starting consumer group '${streamingFuncId}' with source topic: ${args.sourceTopic.name} and target topic: ${args.targetTopic?.name || \"none\"}`,\n );\n\n // We preload the function to not have to load it for each message\n // Note: Config types use 'any' as generics because they handle various\n // data model types determined at runtime, not compile time\n let streamingFunctions: [\n StreamingFunction,\n TransformConfig<any> | ConsumerConfig<any>,\n ][];\n let fieldMutations: FieldMutations | undefined;\n\n if (args.isDmv2) {\n const result = await loadStreamingFunctionV2(\n args.sourceTopic,\n args.targetTopic,\n );\n streamingFunctions = result.functions;\n fieldMutations = result.fieldMutations;\n } else {\n streamingFunctions = [[loadStreamingFunction(args.functionFilePath), {}]];\n fieldMutations = undefined;\n }\n\n await consumer.subscribe({\n topics: [args.sourceTopic.name], // Use full topic name for Kafka operations\n });\n\n await consumer.run({\n eachBatchAutoResolve: true,\n // Enable parallel processing of partitions\n partitionsConsumedConcurrently: PARTITIONS_CONSUMED_CONCURRENTLY, // To be adjusted\n eachBatch: async ({ batch, heartbeat, isRunning, isStale }) => {\n if (!isRunning() || isStale()) {\n return;\n }\n\n metrics.count_in += batch.messages.length;\n\n cliLog({\n action: \"Received\",\n message: `${logger.logPrefix} ${batch.messages.length} message(s)`,\n });\n logger.log(`Received ${batch.messages.length} message(s)`);\n\n let index = 0;\n const readableStream = Readable.from(batch.messages);\n\n const processedMessages: (KafkaMessageWithLineage[] | undefined)[] =\n await readableStream\n .map(\n async (message) => {\n index++;\n if (\n (batch.messages.length > DEFAULT_MAX_STREAMING_CONCURRENCY &&\n index % DEFAULT_MAX_STREAMING_CONCURRENCY) ||\n index - 1 === batch.messages.length\n ) {\n await heartbeat();\n }\n return handleMessage(\n logger,\n streamingFunctions,\n message,\n producer,\n fieldMutations,\n );\n },\n {\n concurrency: MAX_STREAMING_CONCURRENCY,\n },\n )\n .toArray();\n\n const filteredMessages = processedMessages\n .flat()\n .filter((msg) => msg !== undefined && msg.value !== undefined);\n\n if (args.targetTopic === undefined || processedMessages.length === 0) {\n return;\n }\n\n await heartbeat();\n\n if (filteredMessages.length > 0) {\n // Messages now carry their own DLQ configuration in the lineage\n await sendMessages(\n logger,\n metrics,\n args.targetTopic,\n producer,\n filteredMessages as KafkaMessageWithLineage[],\n );\n }\n },\n });\n\n logger.log(\"Consumer is running...\");\n};\n\n/**\n * Creates a Logger instance that prefixes all log messages with the source and target topic\n *\n * @param args - The streaming function arguments containing source and target topics\n * @returns A Logger instance with standard log, error and warn methods\n * @example\n * ```ts\n * const logger = buildLogger({sourceTopic: 'source', targetTopic: 'target'});\n * logger.log('message'); // Outputs: \"source -> target: message\"\n * ```\n */\nconst buildLogger = (args: StreamingFunctionArgs, workerId: number): Logger => {\n const targetLabel =\n args.targetTopic?.name ? ` -> ${args.targetTopic.name}` : \" (consumer)\";\n const logPrefix = `${args.sourceTopic.name}${targetLabel} (worker ${workerId})`;\n return {\n logPrefix: logPrefix,\n log: (message: string): void => {\n console.log(`${logPrefix}: ${message}`);\n },\n error: (message: string): void => {\n console.error(`${logPrefix}: ${message}`);\n },\n warn: (message: string): void => {\n console.warn(`${logPrefix}: ${message}`);\n },\n };\n};\n\n/**\n * Formats a version string into a topic suffix format by replacing dots with underscores\n * Example: \"1.2.3\" -> \"_1_2_3\"\n */\nexport function formatVersionSuffix(version: string): string {\n return `_${version.replace(/\\./g, \"_\")}`;\n}\n\n/**\n * Transforms a topic name by removing namespace prefix and version suffix\n * to get the base stream name for function mapping\n */\nexport function topicNameToStreamName(config: TopicConfig): string {\n let name = config.name;\n\n // Handle version suffix if present\n if (config.version) {\n const versionSuffix = formatVersionSuffix(config.version);\n if (name.endsWith(versionSuffix)) {\n name = name.slice(0, -versionSuffix.length);\n } else {\n throw new Error(\n `Version suffix ${versionSuffix} not found in topic name ${name}`,\n );\n }\n }\n\n // Handle namespace prefix if present\n if (config.namespace && config.namespace !== \"\") {\n const prefix = `${config.namespace}.`;\n if (name.startsWith(prefix)) {\n name = name.slice(prefix.length);\n } else {\n throw new Error(\n `Namespace prefix ${prefix} not found in topic name ${name}`,\n );\n }\n }\n\n return name;\n}\n\n/**\n * Validates a topic configuration for proper namespace and version formatting\n */\nexport function validateTopicConfig(config: TopicConfig): void {\n if (config.namespace && !config.name.startsWith(`${config.namespace}.`)) {\n throw new Error(\n `Topic name ${config.name} must start with namespace ${config.namespace}`,\n );\n }\n\n if (config.version) {\n const versionSuffix = formatVersionSuffix(config.version);\n if (!config.name.endsWith(versionSuffix)) {\n throw new Error(\n `Topic name ${config.name} must end with version ${config.version}`,\n );\n }\n }\n}\n\n/**\n * Initializes and runs a clustered streaming function system that processes messages from Kafka\n *\n * This function:\n * 1. Creates a cluster of workers to handle Kafka message processing\n * 2. Sets up Kafka producers and consumers for each worker\n * 3. Configures logging and metrics collection\n * 4. Handles graceful shutdown on termination\n *\n * The system supports:\n * - Multiple workers processing messages in parallel\n * - Dynamic CPU usage control via maxCpuUsageRatio\n * - SASL authentication for Kafka\n * - Metrics tracking for message counts and bytes processed\n * - Graceful shutdown of Kafka connections\n *\n * @returns Promise that resolves when the cluster is started\n * @throws Will log errors if Kafka connections fail\n *\n * @example\n * ```ts\n * await runStreamingFunctions({\n * sourceTopic: { name: 'source', partitions: 3, retentionPeriod: 86400, maxMessageBytes: 1048576 },\n * targetTopic: { name: 'target', partitions: 3, retentionPeriod: 86400, maxMessageBytes: 1048576 },\n * functionFilePath: './transform.js',\n * broker: 'localhost:9092',\n * maxSubscriberCount: 3,\n * isDmv2: false\n * }); // Starts the streaming function cluster\n * ```\n */\nexport const runStreamingFunctions = async (\n args: StreamingFunctionArgs,\n): Promise<void> => {\n // Validate topic configurations at startup\n validateTopicConfig(args.sourceTopic);\n if (args.targetTopic) {\n validateTopicConfig(args.targetTopic);\n }\n\n // Use base stream names (without namespace/version) for function ID\n // We use flow- instead of function- because that's what the ACLs in boreal are linked with\n // When migrating - make sure the ACLs are updated to use the new prefix.\n const streamingFuncId = `flow-${args.sourceTopic.name}-${args.targetTopic?.name || \"\"}`;\n\n const cluster = new Cluster({\n maxCpuUsageRatio: 0.5,\n maxWorkerCount: args.maxSubscriberCount,\n workerStart: async (worker, parallelism) => {\n const logger = buildLogger(args, worker.id);\n\n const metrics = {\n count_in: 0,\n count_out: 0,\n bytes: 0,\n };\n\n setTimeout(() => sendMessageMetrics(logger, metrics), 1000);\n\n const clientIdPrefix = HOSTNAME ? `${HOSTNAME}-` : \"\";\n const processId = `${clientIdPrefix}${streamingFuncId}-ts-${worker.id}`;\n\n const kafka = await getKafkaClient(\n {\n clientId: processId,\n broker: args.broker,\n securityProtocol: args.securityProtocol,\n saslUsername: args.saslUsername,\n saslPassword: args.saslPassword,\n saslMechanism: args.saslMechanism,\n },\n logger,\n );\n\n // Note: \"js.consumer.max.batch.size\" is a librdkafka native config not in TS types\n const consumer: Consumer = kafka.consumer({\n kafkaJS: {\n groupId: streamingFuncId,\n sessionTimeout: SESSION_TIMEOUT_CONSUMER,\n heartbeatInterval: HEARTBEAT_INTERVAL_CONSUMER,\n retry: {\n retries: MAX_RETRIES_CONSUMER,\n },\n autoCommit: true,\n autoCommitInterval: AUTO_COMMIT_INTERVAL_MS,\n fromBeginning: true,\n },\n \"js.consumer.max.batch.size\": CONSUMER_MAX_BATCH_SIZE,\n });\n\n // Sync producer message.max.bytes with topic config\n const maxMessageBytes =\n args.targetTopic?.max_message_bytes || 1024 * 1024;\n\n const producer: Producer = kafka.producer(\n createProducerConfig(maxMessageBytes),\n );\n\n try {\n logger.log(\"Starting producer...\");\n await startProducer(logger, producer);\n\n try {\n logger.log(\"Starting consumer...\");\n await startConsumer(\n args,\n logger,\n metrics,\n parallelism,\n consumer,\n producer,\n streamingFuncId,\n );\n } catch (e) {\n logger.error(\"Failed to start kafka consumer: \");\n if (e instanceof Error) {\n logError(logger, e);\n }\n // Re-throw to ensure proper error handling\n throw e;\n }\n } catch (e) {\n logger.error(\"Failed to start kafka producer: \");\n if (e instanceof Error) {\n logError(logger, e);\n }\n // Re-throw to ensure proper error handling\n throw e;\n }\n\n return [logger, producer, consumer] as [Logger, Producer, Consumer];\n },\n workerStop: async ([logger, producer, consumer]) => {\n logger.log(`Received SIGTERM, shutting down gracefully...`);\n\n // First stop the consumer to prevent new messages\n logger.log(\"Stopping consumer first...\");\n await stopConsumer(logger, consumer, args.sourceTopic);\n\n // Wait a bit for in-flight messages to complete processing\n logger.log(\"Waiting for in-flight messages to complete...\");\n await new Promise((resolve) => setTimeout(resolve, 2000));\n\n // Then stop the producer\n logger.log(\"Stopping producer...\");\n await stopProducer(logger, producer);\n\n logger.log(\"Graceful shutdown completed\");\n },\n });\n\n cluster.start();\n};\n","export async function runExportSerializer(targetModel: string) {\n const exports_list = require(targetModel);\n console.log(JSON.stringify(exports_list));\n}\n","import process from \"process\";\n\n/**\n * Gets the source directory from environment variable or defaults to \"app\"\n */\nfunction getSourceDir(): string {\n return process.env.MOOSE_SOURCE_DIR || \"app\";\n}\n\nexport async function runApiTypeSerializer(targetModel: string) {\n const func = require(\n `${process.cwd()}/${getSourceDir()}/apis/${targetModel}.ts`,\n ).default;\n const inputSchema = func[\"moose_input_schema\"] || null;\n const outputSchema = func[\"moose_output_schema\"] || null;\n console.log(\n JSON.stringify({\n inputSchema,\n outputSchema,\n }),\n );\n}\n","import {\n DefaultLogger,\n NativeConnection,\n NativeConnectionOptions,\n Worker,\n bundleWorkflowCode,\n} from \"@temporalio/worker\";\nimport * as path from \"path\";\nimport * as fs from \"fs\";\nimport { Workflow } from \"../dmv2\";\nimport { getWorkflows } from \"../dmv2/internal\";\nimport { createActivityForScript } from \"./activity\";\nimport { activities } from \"./activity\";\nimport { initializeLogger } from \"./logger\";\n\ninterface TemporalConfig {\n url: string;\n namespace: string;\n clientCert?: string;\n clientKey?: string;\n apiKey?: string;\n}\n\ninterface ScriptsConfig {\n temporalConfig: TemporalConfig;\n}\n\n// Maintain a global set of activity names we've already registered\nconst ALREADY_REGISTERED = new Set<string>();\n\nfunction collectActivitiesDmv2(\n logger: DefaultLogger,\n workflows: Map<string, Workflow>,\n) {\n logger.info(`<DMV2WF> Collecting tasks from dmv2 workflows`);\n const scriptNames: string[] = [];\n for (const [name, workflow] of workflows.entries()) {\n logger.info(\n `<DMV2WF> Registering dmv2 workflow: ${name} with starting task: ${workflow.config.startingTask.name}`,\n );\n scriptNames.push(`${name}/${workflow.config.startingTask.name}`);\n }\n return scriptNames;\n}\n\n/**\n * This looks similar to the client in apis.\n * Temporal SDK uses similar looking connection options & client,\n * but there are different libraries for a worker like this & a client\n * like in the apis.\n */\nasync function createTemporalConnection(\n logger: DefaultLogger,\n temporalConfig: TemporalConfig,\n): Promise<NativeConnection> {\n logger.info(\n `<workflow> Using temporal_url: ${temporalConfig.url} and namespace: ${temporalConfig.namespace}`,\n );\n\n let connectionOptions: NativeConnectionOptions = {\n address: temporalConfig.url,\n };\n\n if (temporalConfig.clientCert && temporalConfig.clientKey) {\n logger.info(\"Using TLS for secure Temporal\");\n const cert = await fs.readFileSync(temporalConfig.clientCert);\n const key = await fs.readFileSync(temporalConfig.clientKey);\n\n connectionOptions.tls = {\n clientCertPair: {\n crt: cert,\n key: key,\n },\n };\n } else if (temporalConfig.apiKey) {\n logger.info(`Using API key for secure Temporal`);\n // URL with API key uses gRPC regional endpoint\n connectionOptions.address = \"us-west1.gcp.api.temporal.io:7233\";\n connectionOptions.apiKey = temporalConfig.apiKey;\n connectionOptions.tls = {};\n connectionOptions.metadata = {\n \"temporal-namespace\": temporalConfig.namespace,\n };\n }\n\n logger.info(\n `<workflow> Connecting to Temporal at ${connectionOptions.address}`,\n );\n\n const maxRetries = 5;\n const baseDelay = 1000;\n let attempt = 0;\n\n while (true) {\n try {\n const connection = await NativeConnection.connect(connectionOptions);\n logger.info(\"<workflow> Connected to Temporal server\");\n return connection;\n } catch (err) {\n attempt++;\n logger.error(`<workflow> Connection attempt ${attempt} failed: ${err}`);\n\n if (attempt >= maxRetries) {\n logger.error(`Failed to connect after ${attempt} attempts`);\n throw err;\n }\n\n const backoff = baseDelay * Math.pow(2, attempt - 1);\n logger.warn(`<workflow> Retrying connection in ${backoff}ms...`);\n await new Promise((resolve) => setTimeout(resolve, backoff));\n }\n }\n}\n\nasync function registerWorkflows(\n logger: DefaultLogger,\n config: ScriptsConfig,\n): Promise<Worker | null> {\n logger.info(`Registering workflows`);\n\n // Collect all TypeScript activities from registered workflows\n const allScriptPaths: string[] = [];\n const dynamicActivities: any[] = [];\n\n try {\n const workflows = await getWorkflows();\n if (workflows.size > 0) {\n logger.info(`<DMV2WF> Found ${workflows.size} dmv2 workflows`);\n allScriptPaths.push(...collectActivitiesDmv2(logger, workflows));\n\n if (allScriptPaths.length === 0) {\n logger.info(`<DMV2WF> No tasks found in dmv2 workflows`);\n return null;\n }\n\n logger.info(\n `<DMV2WF> Found ${allScriptPaths.length} tasks in dmv2 workflows`,\n );\n\n for (const activityName of allScriptPaths) {\n if (!ALREADY_REGISTERED.has(activityName)) {\n const activity = await createActivityForScript(activityName);\n dynamicActivities.push(activity);\n ALREADY_REGISTERED.add(activityName);\n logger.info(`<DMV2WF> Registered task ${activityName}`);\n }\n }\n\n if (dynamicActivities.length === 0) {\n logger.info(`<DMV2WF> No dynamic activities found in dmv2 workflows`);\n return null;\n }\n\n logger.info(\n `<DMV2WF> Found ${dynamicActivities.length} dynamic activities in dmv2 workflows`,\n );\n }\n\n if (allScriptPaths.length === 0) {\n logger.info(`No workflows found`);\n return null;\n }\n\n logger.info(`Found ${allScriptPaths.length} workflows`);\n\n if (dynamicActivities.length === 0) {\n logger.info(`No tasks found`);\n return null;\n }\n\n logger.info(`Found ${dynamicActivities.length} task(s)`);\n\n const connection = await createTemporalConnection(\n logger,\n config.temporalConfig,\n );\n\n // Create a custom logger that suppresses webpack output\n const silentLogger = {\n info: () => {}, // Suppress info logs (webpack output)\n debug: () => {}, // Suppress debug logs\n warn: () => {}, // Suppress warnings if desired\n log: () => {}, // Suppress general logs\n trace: () => {}, // Suppress trace logs\n error: (message: string, meta?: any) => {\n // Keep error logs but forward to the main logger\n logger.error(message, meta);\n },\n };\n\n // Pre-bundle workflows with silent logger to suppress webpack output\n // https://github.com/temporalio/sdk-typescript/issues/1740\n const workflowBundle = await bundleWorkflowCode({\n workflowsPath: path.resolve(__dirname, \"scripts/workflow.js\"),\n logger: silentLogger,\n });\n\n const worker = await Worker.create({\n connection,\n namespace: config.temporalConfig.namespace,\n taskQueue: \"typescript-script-queue\",\n workflowBundle,\n activities: {\n ...activities,\n ...Object.fromEntries(\n dynamicActivities.map((activity) => [\n Object.keys(activity)[0],\n Object.values(activity)[0],\n ]),\n ),\n },\n });\n\n return worker;\n } catch (error) {\n logger.error(`Error registering workflows: ${error}`);\n throw error;\n }\n}\n\n/**\n * Start a Temporal worker that handles TypeScript script execution workflows.\n */\nexport async function runScripts(\n config: ScriptsConfig,\n): Promise<Worker | null> {\n const logger = initializeLogger();\n\n // Add process-level uncaught exception handler\n process.on(\"uncaughtException\", (error) => {\n console.error(`[PROCESS] Uncaught Exception: ${error}`);\n process.exit(1);\n });\n\n const worker = await registerWorkflows(logger, config);\n\n if (!worker) {\n logger.warn(\n `No workflows found. To disable workflow infrastructure, set workflows=false in moose.config.toml`,\n );\n process.exit(0);\n }\n\n let isShuttingDown = false;\n\n // Handle shutdown signals\n async function handleSignal(signal: string) {\n console.log(`[SHUTDOWN] Received ${signal}`);\n\n if (isShuttingDown) {\n return;\n }\n\n isShuttingDown = true;\n\n try {\n if (!worker) {\n process.exit(0);\n }\n await Promise.race([\n worker.shutdown(),\n new Promise((_, reject) =>\n setTimeout(() => reject(new Error(\"Shutdown timeout\")), 3000),\n ),\n ]);\n process.exit(0);\n } catch (error) {\n console.log(`[SHUTDOWN] Error: ${error}`);\n process.exit(1);\n }\n }\n\n // Register signal handlers immediately\n [\"SIGTERM\", \"SIGINT\", \"SIGHUP\", \"SIGQUIT\"].forEach((signal) => {\n process.on(signal, () => {\n handleSignal(signal).catch((error) => {\n console.log(`[SHUTDOWN] Error: ${error}`);\n process.exit(1);\n });\n });\n });\n\n logger.info(\"Starting TypeScript worker...\");\n try {\n await worker.run();\n } catch (error) {\n console.log(`[SHUTDOWN] Error: ${error}`);\n process.exit(1);\n }\n\n return worker;\n}\n","import { log as logger, Context } from \"@temporalio/activity\";\nimport { isCancellation } from \"@temporalio/workflow\";\nimport { Task, Workflow } from \"../dmv2\";\nimport { getWorkflows, getTaskForWorkflow } from \"../dmv2/internal\";\nimport { jsonDateReviver } from \"../utilities/json\";\n\nexport interface ScriptExecutionInput {\n scriptPath: string;\n inputData?: any;\n}\n\nexport const activities = {\n async hasDmv2Workflow(name: string): Promise<boolean> {\n try {\n const workflows = await getWorkflows();\n const hasWorkflow = workflows.has(name);\n logger.info(`Found workflow:: ${hasWorkflow}`);\n return hasWorkflow;\n } catch (error) {\n logger.error(`Failed to check if workflow ${name} exists: ${error}`);\n return false;\n }\n },\n\n async getDmv2Workflow(name: string): Promise<Workflow> {\n try {\n logger.info(`Getting workflow ${name}`);\n\n const workflows = await getWorkflows();\n\n if (workflows.has(name)) {\n logger.info(`Workflow ${name} found`);\n return workflows.get(name)!;\n } else {\n const errorData = {\n error: \"Workflow not found\",\n details: `Workflow ${name} not found`,\n stack: undefined,\n };\n const errorMsg = JSON.stringify(errorData);\n logger.error(errorMsg);\n throw new Error(errorMsg);\n }\n } catch (error) {\n const errorData = {\n error: \"Failed to get workflow\",\n details: error instanceof Error ? error.message : String(error),\n stack: error instanceof Error ? error.stack : undefined,\n };\n const errorMsg = JSON.stringify(errorData);\n logger.error(errorMsg);\n throw new Error(errorMsg);\n }\n },\n\n async getTaskForWorkflow(\n workflowName: string,\n taskName: string,\n ): Promise<Task<any, any>> {\n try {\n logger.info(`Getting task ${taskName} from workflow ${workflowName}`);\n const task = await getTaskForWorkflow(workflowName, taskName);\n logger.info(`Task ${taskName} found in workflow ${workflowName}`);\n return task;\n } catch (error) {\n const errorData = {\n error: \"Failed to get task\",\n details: error instanceof Error ? error.message : String(error),\n stack: error instanceof Error ? error.stack : undefined,\n };\n const errorMsg = JSON.stringify(errorData);\n logger.error(errorMsg);\n throw new Error(errorMsg);\n }\n },\n\n async executeDmv2Task(\n workflow: Workflow,\n task: Task<any, any>,\n inputData: any,\n ): Promise<any[]> {\n // Get context for heartbeat (required for cancellation detection)\n const context = Context.current();\n const taskState = {};\n\n // Periodic heartbeat is required for cancellation detection\n // https://docs.temporal.io/develop/typescript/cancellation#cancel-an-activity\n // - Temporal activities can only receive cancellation if they send heartbeats\n // - Heartbeats are the communication channel between activity and Temporal server\n // - Server sends cancellation signals back in heartbeat responses\n // - Without heartbeats, context.cancelled will never resolve and cancellation is impossible\n let heartbeatInterval: NodeJS.Timeout | null = null;\n const startPeriodicHeartbeat = () => {\n heartbeatInterval = setInterval(() => {\n context.heartbeat(`Task ${task.name} in progress`);\n }, 5000);\n };\n const stopPeriodicHeartbeat = () => {\n if (heartbeatInterval) {\n clearInterval(heartbeatInterval);\n heartbeatInterval = null;\n }\n };\n\n try {\n logger.info(\n `Task ${task.name} received input: ${JSON.stringify(inputData)}`,\n );\n\n // Send initial heartbeat to enable cancellation detection\n context.heartbeat(`Starting task: ${task.name}`);\n\n // Data between temporal workflow & activities are serialized so we\n // have to get it again to access the user's run function\n const fullTask = await getTaskForWorkflow(workflow.name, task.name);\n\n // Revive any JSON serialized dates in the input data\n const revivedInputData =\n inputData ?\n JSON.parse(JSON.stringify(inputData), jsonDateReviver)\n : inputData;\n\n try {\n startPeriodicHeartbeat();\n\n // Race user code against cancellation detection\n // - context.cancelled Promise rejects when server signals cancellation via heartbeat response\n // - This allows immediate cancellation detection rather than waiting for user code to finish\n // - If cancellation happens first, we catch it below and call onCancel cleanup\n const result = await Promise.race([\n fullTask.config.run({ state: taskState, input: revivedInputData }),\n context.cancelled,\n ]);\n return result;\n } catch (error) {\n if (isCancellation(error)) {\n logger.info(\n `Task ${task.name} cancelled, calling onCancel handler if it exists`,\n );\n if (fullTask.config.onCancel) {\n await fullTask.config.onCancel({\n state: taskState,\n input: revivedInputData,\n });\n }\n return [];\n } else {\n throw error;\n }\n } finally {\n stopPeriodicHeartbeat();\n }\n } catch (error) {\n const errorData = {\n error: \"Task execution failed\",\n details: error instanceof Error ? error.message : String(error),\n stack: error instanceof Error ? error.stack : undefined,\n };\n const errorMsg = JSON.stringify(errorData);\n logger.error(errorMsg);\n throw new Error(errorMsg);\n }\n },\n};\n\n// Helper function to create activity for a specific script\nexport function createActivityForScript(scriptName: string) {\n return {\n [scriptName]: activities.executeDmv2Task,\n };\n}\n","import {\n makeTelemetryFilterString,\n DefaultLogger,\n Runtime,\n} from \"@temporalio/worker\";\n\nclass LoggerSingleton {\n private static instance: DefaultLogger | null = null;\n\n private constructor() {}\n\n public static initializeLogger(): DefaultLogger {\n if (!LoggerSingleton.instance) {\n LoggerSingleton.instance = new DefaultLogger(\n \"DEBUG\",\n ({ level, message }) => {\n console.log(`${level} | ${message}`);\n },\n );\n\n Runtime.install({\n logger: LoggerSingleton.instance,\n telemetryOptions: {\n logging: {\n filter: makeTelemetryFilterString({ core: \"INFO\", other: \"INFO\" }),\n forward: {},\n },\n },\n });\n }\n\n return LoggerSingleton.instance;\n }\n\n public static getInstance(): DefaultLogger {\n return LoggerSingleton.instance!;\n }\n}\n\nexport const initializeLogger = LoggerSingleton.initializeLogger;\n"],"mappings":";;;;;;;;;;;;;AAAA,OAAO,UAAU;AACjB,SAAS,oBAAoB;AAC7B,SAAS,eAAe;AAexB,SAAS,SAAS,OAAoC;AACpD,MAAI,CAAC,MAAO,QAAO;AACnB,UAAQ,MAAM,KAAK,EAAE,YAAY,GAAG;AAAA,IAClC,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,IACT;AACE,aAAO;AAAA,EACX;AACF;AAmGO,SAAS,qBAAqB,iBAA0B;AAC7D,SAAO;AAAA,IACL,SAAS;AAAA,MACP,YAAY;AAAA;AAAA,MACZ,MAAM;AAAA,MACN,OAAO;AAAA,QACL,SAAS;AAAA,QACT,cAAc;AAAA,MAChB;AAAA,IACF;AAAA,IACA,aAAa;AAAA;AAAA,IACb,GAAI,mBAAmB,EAAE,qBAAqB,gBAAgB;AAAA,EAChE;AACF;AA5IA,IAIQ,OA0BK,aA2BA,qBA4BA,QA0BA,aACA,mBACA,uBAEA,sBAGA,MA+BP,mBA6CO,UAWP,iBAwBO;AArOb;AAAA;AAAA;AAIA,KAAM,EAAE,UAAU;AA0BX,IAAM,cAAc,CAAC,YAAoB;AAC9C,UAAI,CAAC,SAAS,QAAQ,IAAI,2BAA2B,GAAG;AACtD,gBAAQ,IAAI,OAAO;AAAA,MACrB;AAAA,IACF;AAuBO,IAAM,sBAAsB,CAAC;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,MAAoB;AAClB,YAAM,WACJ,WAAW,OAAO,OAAO,YAAY,MAAM,SAAS,UAAU;AAChE,cAAQ,IAAI,+BAA+B,QAAQ,MAAM,IAAI,IAAI,IAAI,EAAE;AACvE,aAAO,aAAa;AAAA,QAClB,KAAK,GAAG,QAAQ,MAAM,IAAI,IAAI,IAAI;AAAA,QAClC;AAAA,QACA;AAAA,QACA;AAAA,QACA,aAAa;AAAA;AAAA;AAAA,MAGf,CAAC;AAAA,IACH;AAQO,IAAM,SAAoC,CAAC,QAAQ;AACxD,YAAM,MAAM,KAAK,QAAQ;AAAA,QACvB,MAAM,SAAS,QAAQ,IAAI,yBAAyB,MAAM;AAAA,QAC1D,QAAQ;AAAA,QACR,MAAM;AAAA,MACR,CAAC;AAED,UAAI,GAAG,SAAS,CAAC,QAAe;AAC9B,gBAAQ,IAAI,SAAS,IAAI,IAAI,qBAAqB,IAAI,OAAO;AAAA,MAC/D,CAAC;AAED,UAAI,MAAM,KAAK,UAAU,EAAE,cAAc,QAAQ,GAAG,IAAI,CAAC,CAAC;AAC1D,UAAI,IAAI;AAAA,IACV;AAaO,IAAM,cAAc;AACpB,IAAM,oBAAoB;AAC1B,IAAM,wBAAwB;AAE9B,IAAM,uBAAuB;AAG7B,IAAM,OAAO;AA+BpB,IAAM,oBAAoB,CAAC,iBACzB,aACG,MAAM,GAAG,EACT,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC,EACnB,OAAO,CAAC,MAAM,EAAE,SAAS,CAAC;AAyCxB,IAAM,WAAW,CAACA,SAAgB,MAAmB;AAC1D,MAAAA,QAAO,MAAM,EAAE,OAAO;AACtB,YAAM,QAAQ,EAAE;AAChB,UAAI,OAAO;AACT,QAAAA,QAAO,MAAM,KAAK;AAAA,MACpB;AAAA,IACF;AAKA,IAAM,kBAAkB,CACtBA,SACA,SAC4B;AAC5B,YAAM,YAAY,KAAK,gBAAgB,KAAK,cAAc,YAAY,IAAI;AAC1E,cAAQ,WAAW;AAAA,QACjB,KAAK;AAAA,QACL,KAAK;AAAA,QACL,KAAK;AACH,iBAAO;AAAA,YACL;AAAA,YACA,UAAU,KAAK,gBAAgB;AAAA,YAC/B,UAAU,KAAK,gBAAgB;AAAA,UACjC;AAAA,QACF;AACE,UAAAA,QAAO,KAAK,+BAA+B,KAAK,aAAa,EAAE;AAC/D,iBAAO;AAAA,MACX;AAAA,IACF;AAMO,IAAM,iBAAiB,OAC5B,KACAA,YACmB;AACnB,YAAM,UAAU,kBAAkB,IAAI,UAAU,EAAE;AAClD,UAAI,QAAQ,WAAW,GAAG;AACxB,cAAM,IAAI,MAAM,wCAAwC,IAAI,MAAM,GAAG;AAAA,MACvE;AAEA,MAAAA,QAAO,IAAI,uCAAuC,QAAQ,KAAK,IAAI,CAAC,EAAE;AACtE,MAAAA,QAAO,IAAI,sBAAsB,IAAI,oBAAoB,WAAW,EAAE;AACtE,MAAAA,QAAO,IAAI,cAAc,IAAI,QAAQ,EAAE;AAEvC,YAAM,aAAa,gBAAgBA,SAAQ,GAAG;AAE9C,aAAO,IAAI,MAAM;AAAA,QACf,SAAS;AAAA,UACP,UAAU,IAAI;AAAA,UACd;AAAA,UACA,KAAK,IAAI,qBAAqB;AAAA,UAC9B,GAAI,cAAc,EAAE,MAAM,WAAW;AAAA,UACrC,OAAO;AAAA,YACL,kBAAkB;AAAA,YAClB,cAAc;AAAA,YACd,SAAS;AAAA,UACX;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAAA;AAAA;;;AC3PA,SAAS,gBAAgB;;;ACOzB,OAAOC,cAAa;;;ACCpB,IAAM,UAAU,CACd,UAEA,OAAO,UAAU,YACjB,UAAU,QACV,UAAU,SACV,MAAM,SAAS;AAwBjB,IAAM,WAAW,CAAC,UAChB,OAAO,UAAU,YAAY,UAAU,SAAS,iBAAiB;AAE5D,SAAS,IACd,YACG,QACH;AACA,SAAO,IAAI,IAAI,SAAS,MAAM;AAChC;AAEA,IAAM,gBAAgB,CACpB,UAEA,OAAO,UAAU,YAAY,YAAY,SAAS,aAAa;AAK1D,IAAM,MAAN,MAAU;AAAA,EACN;AAAA,EACA;AAAA,EAET,YACE,YACA,WACA;AACA,QAAI,WAAW,SAAS,MAAM,UAAU,QAAQ;AAC9C,UAAI,WAAW,WAAW,GAAG;AAC3B,cAAM,IAAI,UAAU,4BAA4B;AAAA,MAClD;AAEA,YAAM,IAAI;AAAA,QACR,YAAY,WAAW,MAAM,oBAC3B,WAAW,SAAS,CACtB;AAAA,MACF;AAAA,IACF;AAEA,UAAM,eAAe,UAAU;AAAA,MAC7B,CAAC,KAAa,UACZ,OACC,cAAc,KAAK,IAAI,MAAM,OAAO,SACnC,SAAS,KAAK,KAAK,QAAQ,KAAK,IAAI,IACpC;AAAA,MACJ;AAAA,IACF;AAEA,SAAK,SAAS,IAAI,MAAM,YAAY;AACpC,SAAK,UAAU,IAAI,MAAM,eAAe,CAAC;AAEzC,SAAK,QAAQ,CAAC,IAAI,WAAW,CAAC;AAI9B,QAAI,IAAI,GACN,MAAM;AACR,WAAO,IAAI,UAAU,QAAQ;AAC3B,YAAM,QAAQ,UAAU,GAAG;AAC3B,YAAM,YAAY,WAAW,CAAC;AAG9B,UAAI,cAAc,KAAK,GAAG;AAExB,aAAK,QAAQ,GAAG,KAAK,MAAM,QAAQ,CAAC;AAEpC,YAAI,aAAa;AACjB,eAAO,aAAa,MAAM,OAAO,QAAQ;AACvC,eAAK,OAAO,KAAK,IAAI,MAAM,OAAO,YAAY;AAC9C,eAAK,QAAQ,GAAG,IAAI,MAAM,QAAQ,UAAU;AAAA,QAC9C;AAGA,aAAK,QAAQ,GAAG,KAAK;AAAA,MACvB,WAAW,SAAS,KAAK,GAAG;AAC1B,cAAM,sBAAsB,MAAM,YAAY;AAAA,UAC5C,CAAC,CAAC,GAAG,CAAC,MAAM,MAAM;AAAA,QACpB;AACA,YAAI,wBAAwB,QAAW;AACrC,eAAK,QAAQ,GAAG,KACd,GAAI,oBAAoB,CAAC,EAA0B,YAAY,WAAW,MAAM,IAAI;AAAA,QACxF,OAAO;AACL,eAAK,QAAQ,GAAG,KAAK,KAAK,MAAM,IAAI;AAAA,QACtC;AACA,aAAK,QAAQ,GAAG,KAAK;AAAA,MACvB,WAAW,QAAQ,KAAK,GAAG;AACzB,YAAI,MAAM,OAAO,UAAU;AACzB,eAAK,QAAQ,GAAG,KAAK,KAAK,MAAM,OAAO,QAAQ,QAAQ,MAAM,IAAI;AAAA,QACnE,OAAO;AACL,eAAK,QAAQ,GAAG,KAAK,KAAK,MAAM,IAAI;AAAA,QACtC;AACA,aAAK,QAAQ,GAAG,KAAK;AAAA,MACvB,OAAO;AACL,aAAK,OAAO,KAAK,IAAI;AACrB,aAAK,QAAQ,GAAG,IAAI;AAAA,MACtB;AAAA,IACF;AAAA,EACF;AACF;AAYO,IAAM,UAAU,CAACC,SAA8C;AACpE,QAAM,qBAAqBA,KAAI,OAAO;AAAA,IAAI,CAAC,GAAG,MAC5C,0BAA0B,GAAG,CAAC;AAAA,EAChC;AAEA,QAAM,QAAQA,KAAI,QACf;AAAA,IAAI,CAAC,GAAG,MACP,KAAK,KAAK,GAAG,CAAC,GAAG,iBAAiB,mBAAmB,CAAC,CAAC,CAAC,KAAK;AAAA,EAC/D,EACC,KAAK,EAAE;AAEV,QAAM,eAAeA,KAAI,OAAO;AAAA,IAC9B,CAAC,KAA8B,GAAG,OAAO;AAAA,MACvC,GAAG;AAAA,MACH,CAAC,IAAI,CAAC,EAAE,GAAG,sBAAsB,CAAC;AAAA,IACpC;AAAA,IACA,CAAC;AAAA,EACH;AACA,SAAO,CAAC,OAAO,YAAY;AAC7B;AAMO,IAAM,iBAAiB,CAACA,SAAqB;AAClD,MAAI;AACF,UAAM,cAAc,CAAC,MAAqB;AAExC,UAAI,MAAM,QAAQ,CAAC,GAAG;AACpB,cAAM,CAAC,MAAM,GAAG,IAAI;AACpB,YAAI,SAAS,cAAc;AAEzB,iBAAO,KAAK,OAAO,GAAG,CAAC;AAAA,QACzB;AAEA,eAAO,IAAK,EAAuB,IAAI,CAAC,MAAM,YAAY,CAAU,CAAC,EAAE,KAAK,IAAI,CAAC;AAAA,MACnF;AACA,UAAI,MAAM,QAAQ,MAAM,OAAW,QAAO;AAC1C,UAAI,OAAO,MAAM,SAAU,QAAO,IAAI,EAAE,QAAQ,MAAM,IAAI,CAAC;AAC3D,UAAI,OAAO,MAAM,SAAU,QAAO,OAAO,CAAC;AAC1C,UAAI,OAAO,MAAM,UAAW,QAAO,IAAI,SAAS;AAChD,UAAI,aAAa;AACf,eAAO,IAAI,EAAE,YAAY,EAAE,QAAQ,KAAK,GAAG,EAAE,MAAM,GAAG,EAAE,CAAC;AAC3D,UAAI;AACF,eAAO,KAAK,UAAU,CAAmB;AAAA,MAC3C,QAAQ;AACN,eAAO,OAAO,CAAC;AAAA,MACjB;AAAA,IACF;AAEA,QAAI,MAAMA,KAAI,QAAQ,CAAC,KAAK;AAC5B,aAAS,IAAI,GAAG,IAAIA,KAAI,OAAO,QAAQ,KAAK;AAC1C,YAAM,MAAM,sBAAsBA,KAAI,OAAO,CAAC,CAAQ;AACtD,aAAO,YAAY,GAAY;AAC/B,aAAOA,KAAI,QAAQ,IAAI,CAAC,KAAK;AAAA,IAC/B;AACA,WAAO,IAAI,QAAQ,QAAQ,GAAG,EAAE,KAAK;AAAA,EACvC,SAAS,OAAO;AACd,YAAQ,IAAI,yBAAyB,KAAK,EAAE;AAC5C,WAAO;AAAA,EACT;AACF;AAEO,IAAM,wBAAwB,CAAC,UAAe;AACnD,MAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,UAAM,CAAC,MAAM,GAAG,IAAI;AACpB,QAAI,SAAS,aAAc,QAAO;AAAA,EACpC;AACA,SAAO;AACT;AACO,SAAS,0BACd,gBACA,OACA;AAGA,SAAO,KAAK,cAAc,IAAI,oBAAoB,KAAK,CAAC;AAC1D;AAWO,IAAM,sBAAsB,CAAC,UAAiB;AACnD,MAAI,OAAO,UAAU,UAAU;AAE7B,WAAO,OAAO,UAAU,KAAK,IAAI,QAAQ;AAAA,EAC3C;AAGA,MAAI,OAAO,UAAU,UAAW,QAAO;AACvC,MAAI,iBAAiB,KAAM,QAAO;AAClC,MAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,UAAM,CAAC,MAAM,CAAC,IAAI;AAClB,WAAO;AAAA,EACT;AACA,SAAO;AACT;AACA,SAAS,iBAAiB,OAAmC;AAC3D,SAAO,UAAU,SAAY,KAAK;AACpC;;;AC1PA,SAAS,gBAAgB;AACzB,SAAS,kBAAkB;;;ACY3B,SAAS,cAAAC,mBAAkB;;;ACR3B;;;ACbA;AAAA,EACE,UAAU;AAAA,EACV;AAAA,OAEK;AAEP,SAAS,cAAAC,aAAY,kBAAkB;AACvC,SAAS,mBAAmB;AAC5B,YAAY,QAAQ;AASpB,SAAS,kBAAkB,IAAoB;AAC7C,MAAI,KAAK,KAAM;AACb,WAAO,GAAG,KAAK,MAAM,EAAE,CAAC;AAAA,EAC1B;AACA,QAAM,UAAU,KAAK;AACrB,MAAI,UAAU,IAAI;AAChB,WAAO,GAAG,QAAQ,QAAQ,CAAC,CAAC;AAAA,EAC9B;AACA,QAAM,UAAU,KAAK,MAAM,UAAU,EAAE;AACvC,QAAM,mBAAmB,UAAU;AACnC,SAAO,GAAG,OAAO,gBAAgB,iBAAiB,QAAQ,CAAC,CAAC;AAC9D;AAaO,IAAM,cAAN,MAAkB;AAAA,EACvB;AAAA,EACA;AAAA,EAEA,YAAY,aAA0B,gBAAiC;AACrE,SAAK,QAAQ;AACb,SAAK,WAAW,IAAI,eAAe,cAAc;AAAA,EACnD;AACF;AAEO,IAAM,cAAN,MAAkB;AAAA,EACvB;AAAA,EACA;AAAA,EACA,YAAY,QAA0B,iBAAyB;AAC7D,SAAK,SAAS;AACd,SAAK,kBAAkB;AAAA,EACzB;AAAA,EAEA,MAAM,QACJC,MACgE;AAChE,UAAM,CAAC,OAAO,YAAY,IAAI,QAAQA,IAAG;AAEzC,YAAQ,IAAI,0BAA0B,eAAeA,IAAG,CAAC,EAAE;AAC3D,UAAM,QAAQ,YAAY,IAAI;AAC9B,UAAM,SAAS,MAAM,KAAK,OAAO,MAAM;AAAA,MACrC;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,MACR,UAAU,KAAK,kBAAkB,WAAW;AAAA;AAAA;AAAA,IAG9C,CAAC;AACD,UAAM,YAAY,YAAY,IAAI,IAAI;AACtC,YAAQ;AAAA,MACN,oCAAoC,kBAAkB,SAAS,CAAC;AAAA,IAClE;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAAQA,MAAkC;AAC9C,UAAM,CAAC,OAAO,YAAY,IAAI,QAAQA,IAAG;AAEzC,YAAQ,IAAI,4BAA4B,eAAeA,IAAG,CAAC,EAAE;AAC7D,UAAM,QAAQ,YAAY,IAAI;AAC9B,UAAM,SAAS,MAAM,KAAK,OAAO,QAAQ;AAAA,MACvC;AAAA,MACA;AAAA,MACA,UAAU,KAAK,kBAAkB,WAAW;AAAA,IAC9C,CAAC;AACD,UAAM,YAAY,YAAY,IAAI,IAAI;AACtC,YAAQ;AAAA,MACN,sCAAsC,kBAAkB,SAAS,CAAC;AAAA,IACpE;AACA,WAAO;AAAA,EACT;AACF;AAEO,IAAM,iBAAN,MAAqB;AAAA,EAC1B;AAAA,EAEA,YAAY,gBAAiC;AAC3C,SAAK,SAAS;AAAA,EAChB;AAAA,EAEA,MAAM,QAAQ,MAAc,YAAiB;AAC3C,QAAI;AACF,UAAI,CAAC,KAAK,QAAQ;AAChB,eAAO;AAAA,UACL,QAAQ;AAAA,UACR,MAAM;AAAA,QACR;AAAA,MACF;AAGA,YAAM,SAAS,MAAM,KAAK,kBAAkB,IAAI;AAGhD,YAAM,CAAC,gBAAgB,UAAU,IAAI,KAAK;AAAA,QACxC;AAAA,QACA;AAAA,MACF;AAEA,cAAQ;AAAA,QACN,uCAAuC,IAAI,gBAAgB,KAAK,UAAU,MAAM,CAAC,mBAAmB,KAAK,UAAU,cAAc,CAAC;AAAA,MACpI;AAEA,YAAM,SAAS,MAAM,KAAK,OAAO,SAAS,MAAM,kBAAkB;AAAA,QAChE,MAAM;AAAA,UACJ,EAAE,eAAe,MAAM,gBAAgB,QAAiB;AAAA,UACxD;AAAA,QACF;AAAA,QACA,WAAW;AAAA,QACX;AAAA,QACA,0BAA0B;AAAA,QAC1B,uBAAuB;AAAA,QACvB,OAAO;AAAA,UACL,iBAAiB,OAAO;AAAA,QAC1B;AAAA,QACA,oBAAoB,OAAO;AAAA,MAC7B,CAAC;AAED,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,qBAAqB,IAAI,2FAA2F,UAAU,IAAI,OAAO,mBAAmB;AAAA,MACpK;AAAA,IACF,SAAS,OAAO;AACd,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,4BAA4B,KAAK;AAAA,MACzC;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,UAAU,YAAoB;AAClC,QAAI;AACF,UAAI,CAAC,KAAK,QAAQ;AAChB,eAAO;AAAA,UACL,QAAQ;AAAA,UACR,MAAM;AAAA,QACR;AAAA,MACF;AAEA,YAAM,SAAS,KAAK,OAAO,SAAS,UAAU,UAAU;AACxD,YAAM,OAAO,UAAU;AAEvB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,wBAAwB,UAAU;AAAA,MAC1C;AAAA,IACF,SAAS,OAAO;AACd,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,+BAA+B,KAAK;AAAA,MAC5C;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAc,kBACZ,MAC+C;AAC/C,UAAM,YAAY,MAAMC,cAAa;AACrC,UAAM,eAAe,UAAU,IAAI,IAAI;AACvC,QAAI,cAAc;AAChB,aAAO;AAAA,QACL,SAAS,aAAa,OAAO,WAAW;AAAA,QACxC,SAAS,aAAa,OAAO,WAAW;AAAA,MAC1C;AAAA,IACF;AAEA,UAAM,IAAI,MAAM,iCAAiC,IAAI,EAAE;AAAA,EACzD;AAAA,EAEQ,iBAAiB,MAAc,YAAgC;AACrE,QAAI,aAAa;AACjB,QAAI,YAAY;AACd,YAAM,OAAOC,YAAW,QAAQ,EAC7B,OAAO,KAAK,UAAU,UAAU,CAAC,EACjC,OAAO,KAAK,EACZ,MAAM,GAAG,EAAE;AACd,mBAAa,GAAG,IAAI,IAAI,IAAI;AAAA,IAC9B;AACA,WAAO,CAAC,YAAY,UAAU;AAAA,EAChC;AACF;AAQA,eAAsB,kBACpB,aACA,WACA,YACA,WACA,QACqC;AACrC,MAAI;AACF,YAAQ;AAAA,MACN,6BAA6B,WAAW,mBAAmB,SAAS;AAAA,IACtE;AAEA,QAAI,oBAAuC;AAAA,MACzC,SAAS;AAAA,MACT,gBAAgB;AAAA,IAClB;AAEA,QAAI,cAAc,WAAW;AAE3B,cAAQ,IAAI,+BAA+B;AAC3C,YAAM,OAAO,MAAS,gBAAa,UAAU;AAC7C,YAAM,MAAM,MAAS,gBAAa,SAAS;AAE3C,wBAAkB,MAAM;AAAA,QACtB,gBAAgB,EAAE,KAAK,MAAM,IAAS;AAAA,MACxC;AAAA,IACF,WAAW,QAAQ;AACjB,cAAQ,IAAI,mCAAmC;AAE/C,wBAAkB,UAAU;AAC5B,wBAAkB,SAAS;AAC3B,wBAAkB,MAAM,CAAC;AACzB,wBAAkB,WAAW;AAAA,QAC3B,sBAAsB;AAAA,MACxB;AAAA,IACF;AAEA,YAAQ,IAAI,mCAAmC,kBAAkB,OAAO,EAAE;AAC1E,UAAM,aAAa,MAAM,WAAW,QAAQ,iBAAiB;AAC7D,UAAM,SAAS,IAAI,eAAe,EAAE,YAAY,UAAU,CAAC;AAC3D,YAAQ,IAAI,oCAAoC;AAEhD,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,KAAK,6DAA6D;AAC1E,YAAQ,KAAK,KAAK;AAClB,WAAO;AAAA,EACT;AACF;;;ACrQA;AADA,OAAOC,WAAU;AAGjB,YAAY,UAAU;;;ACHtB,OAAO,aAAa;AACpB,SAAS,4BAA4B;AACrC,SAAS,YAAY;AAGrB,IAAM,8BAA8B;AAIpC,IAAM,kBAAkB;AACxB,IAAM,UAAU;AAChB,IAAM,SAAS;AACf,IAAM,4BAA4B;AAQ3B,IAAM,UAAN,MAAiB;AAAA;AAAA,EAEd,qBAA8B;AAAA;AAAA,EAE9B,qBAA8B;AAAA;AAAA,EAG9B,aAAa,GAAG,QAAQ,YAAY,YAAY,QAAQ,YAAY,QAAQ,GAAG;AAAA;AAAA,EAG/E;AAAA,EACA;AAAA;AAAA,EAGA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYR,YAAY,SAKT;AACD,SAAK,cAAc,QAAQ;AAC3B,SAAK,aAAa,QAAQ;AAC1B,QACE,QAAQ,qBACP,QAAQ,mBAAmB,KAAK,QAAQ,mBAAmB,IAC5D;AACA,YAAM,IAAI,MAAM,0CAA0C;AAAA,IAC5D;AACA,SAAK,mBACH,QAAQ,oBAAoB;AAC9B,SAAK,eAAe,KAAK;AAAA,MACvB,KAAK;AAAA,MACL,QAAQ;AAAA,IACV;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,qBAAqB,eAAuB,gBAAyB;AACnE,UAAM,WAAW,qBAAqB;AACtC,UAAM,aAAa,kBAAkB;AACrC,WAAO,KAAK;AAAA,MACV;AAAA,MACA,KAAK,IAAI,GAAG,KAAK,MAAM,WAAW,aAAa,CAAC;AAAA,IAClD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,QAAQ;AACZ,YAAQ,GAAG,SAAS,KAAK,wBAAwB,OAAO,CAAC;AACzD,YAAQ,GAAG,QAAQ,KAAK,wBAAwB,MAAM,CAAC;AAEvD,QAAI,QAAQ,WAAW;AACrB,YAAM,YAAY,QAAQ;AAE1B,kBAAY,MAAM;AAChB,YAAI;AACF,kBAAQ,KAAK,WAAW,CAAC;AAAA,QAC3B,SAAS,GAAG;AACV,kBAAQ,IAAI,4BAA4B;AACxC,eAAK,wBAAwB,OAAO,EAAE;AAAA,QACxC;AAAA,MACF,GAAG,GAAI;AAEP,YAAM,KAAK,YAAY,KAAK,YAAY;AAAA,IAC1C,OAAO;AACL,UAAI,CAAC,QAAQ,QAAQ;AACnB,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,WAAK,cAAc,MAAM,KAAK;AAAA,QAC5B,QAAQ;AAAA,QACR,KAAK;AAAA,MACP;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,cAAc,OAAO,eAAuB;AAC1C,YAAQ,KAAK,WAAW,UAAU,aAAa;AAE/C,aAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,cAAQ,KAAK;AAAA,IACf;AAEA,YAAQ,GAAG,UAAU,CAAC,WAAW;AAC/B,cAAQ,KAAK,kBAAkB,OAAO,QAAQ,GAAG,YAAY;AAAA,IAC/D,CAAC;AAED,YAAQ,GAAG,QAAQ,CAAC,QAAQ,MAAM,WAAW;AAC3C,cAAQ;AAAA,QACN,UAAU,OAAO,QAAQ,GAAG,qBAAqB,IAAI,eAAe,MAAM;AAAA,MAC5E;AAEA,UAAI,CAAC,KAAK,oBAAoB;AAC5B,mBAAW,MAAM,QAAQ,KAAK,GAAG,eAAe;AAAA,MAClD;AAEA,UAAI,KAAK,sBAAsB,QAAQ,GAAG;AACxC,aAAK,qBAAqB;AAAA,MAC5B;AAAA,IACF,CAAC;AAED,YAAQ,GAAG,cAAc,CAAC,WAAW;AACnC,cAAQ,KAAK,kBAAkB,OAAO,QAAQ,GAAG,mBAAmB;AAAA,IACtE,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,0BAA0B,CAAC,WAA2B,YAAY;AAChE,QAAI,KAAK,oBAAoB;AAC3B;AAAA,IACF;AAEA,SAAK,qBAAqB;AAC1B,SAAK,qBAAqB;AAE1B,YAAQ;AAAA,MACN,OAAO,MAAM,OAAO,KAAK,UAAU,iCAAgC,oBAAI,KAAK,GAAE,YAAY,CAAC;AAAA,IAC7F;AAEA,QAAI;AACF,UAAI,QAAQ,WAAW;AACrB,cAAM,KAAK,gBAAgB,MAAM;AACjC,gBAAQ,KAAK,GAAG,KAAK,UAAU,+BAA+B;AAC9D,aAAK,CAAC;AAAA,MACR,OAAO;AAEL,YAAI,KAAK,aAAa;AACpB,gBAAM,KAAK,WAAW,KAAK,WAAW;AAAA,QACxC,OAAO;AACL,kBAAQ;AAAA,YACN,GAAG,KAAK,UAAU;AAAA,UACpB;AAAA,QACF;AACA,gBAAQ,KAAK,GAAG,KAAK,UAAU,sBAAsB;AACrD,aAAK,qBAAqB,KAAK,CAAC,IAAI,KAAK,CAAC;AAAA,MAC5C;AAAA,IACF,SAAS,GAAG;AACV,cAAQ,MAAM,GAAG,KAAK,UAAU,sBAAsB,CAAC;AACvD,WAAK,CAAC;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,kBAAkB,CAAC,WAA2B;AAC5C,WAAO,IAAI,QAAc,CAACC,UAAS,WAAW;AAC5C,UAAI,CAAC,QAAQ,WAAW;AACtB,eAAOA,SAAQ;AAAA,MACjB;AAEA,UAAI,CAAC,QAAQ,SAAS;AACpB,eAAOA,SAAQ;AAAA,MACjB;AAEA,YAAM,YAAY,OAAO,KAAK,QAAQ,OAAO;AAC7C,UAAI,UAAU,UAAU,GAAG;AACzB,eAAOA,SAAQ;AAAA,MACjB;AAEA,UAAI,eAAe;AACnB,UAAI,UAAU;AAEd,YAAM,eAAe,MAAM;AACzB,UAAE;AACF,uBAAe;AAEf,eAAO,OAAO,QAAQ,WAAW,CAAC,CAAC,EAChC,OAAO,CAAC,WAAW,CAAC,CAAC,MAAM,EAC3B,QAAQ,CAAC,WAAW;AACnB,cAAI,UAAU,CAAC,OAAO,OAAO,GAAG;AAC9B,cAAE;AACF,gBAAI,WAAW,GAAG;AAChB,qBAAO,KAAK,MAAM;AAAA,YACpB;AAAA,UACF;AAAA,QACF,CAAC;AAEH,gBAAQ,KAAK,eAAe,gBAAgB;AAC5C,YAAI,gBAAgB,GAAG;AACrB,wBAAc,QAAQ;AACtB,iBAAOA,SAAQ;AAAA,QACjB;AAAA,MACF;AAEA,YAAM,WAAW,YAAY,cAAc,yBAAyB;AAAA,IACtE,CAAC;AAAA,EACH;AACF;;;ADjNA,IAAM,iBAAiB,CAAC,YAA8B;AAAA,EACpD,GAAG;AAAA,EACH,QAAQ,OAAO,SAAS,SAAS;AACnC;AAEA,IAAM,aAAa,CAAC,SAAiBC,UAAiB,GAAG,OAAO,GAAGA,KAAI;AAEvE,IAAM,aAAa,CACjB,KACA,KACA,YACG;AACH,UAAQ;AAAA,IACN,GAAG,IAAI,MAAM,IAAI,IAAI,GAAG,IAAI,IAAI,UAAU,IAAI,KAAK,IAAI,IAAI,OAAO;AAAA,EACpE;AACF;AAEA,IAAM,eAAe,oBAAI,IAAiB;AAgB1C,IAAM,aAAa,OACjB,WACA,kBACA,gBACA,SACA,aACA,QACA,cACG;AACH,QAAM,OAAO,SAAS,MAAMC,SAAQ,IAAI,oBAAI,IAAI;AAChD,SAAO,OAAO,KAA2B,QAA6B;AACpE,UAAM,QAAQ,KAAK,IAAI;AAEvB,QAAI;AACF,YAAM,MAAM,IAAI,IAAI,IAAI,OAAO,IAAI,kBAAkB;AACrD,YAAM,WAAW,IAAI;AAErB,UAAI;AACJ,UAAI,aAAa,WAAW;AAC1B,cAAM,MAAM,IAAI,QAAQ,eAAe,MAAM,GAAG,EAAE,CAAC;AACnD,YAAI,KAAK;AACP,cAAI;AACF,kBAAM,EAAE,QAAQ,IAAI,MAAW,eAAU,KAAK,WAAW;AAAA,cACvD,QAAQ,UAAU;AAAA,cAClB,UAAU,UAAU;AAAA,YACtB,CAAC;AACD,yBAAa;AAAA,UACf,SAAS,OAAO;AACd,oBAAQ,IAAI,yBAAyB;AACrC,gBAAI,aAAa;AACf,kBAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,kBAAI,IAAI,KAAK,UAAU,EAAE,OAAO,eAAe,CAAC,CAAC;AACjD,yBAAW,KAAK,KAAK,KAAK;AAC1B;AAAA,YACF;AAAA,UACF;AAAA,QACF,WAAW,aAAa;AACtB,cAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,cAAI,IAAI,KAAK,UAAU,EAAE,OAAO,eAAe,CAAC,CAAC;AACjD,qBAAW,KAAK,KAAK,KAAK;AAC1B;AAAA,QACF;AAAA,MACF,WAAW,aAAa;AACtB,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,YAAI,IAAI,KAAK,UAAU,EAAE,OAAO,eAAe,CAAC,CAAC;AACjD,mBAAW,KAAK,KAAK,KAAK;AAC1B;AAAA,MACF;AAEA,YAAM,WAAW,WAAW,SAAS,QAAQ;AAC7C,YAAM,eAAe,MAAM,KAAK,IAAI,aAAa,QAAQ,CAAC,EAAE;AAAA,QAC1D,CAAC,KAA2C,CAAC,KAAK,KAAK,MAAM;AAC3D,gBAAM,gBAAgB,IAAI,GAAG;AAC7B,cAAI,eAAe;AACjB,gBAAI,MAAM,QAAQ,aAAa,GAAG;AAChC,4BAAc,KAAK,KAAK;AAAA,YAC1B,OAAO;AACL,kBAAI,GAAG,IAAI,CAAC,eAAe,KAAK;AAAA,YAClC;AAAA,UACF,OAAO;AACL,gBAAI,GAAG,IAAI;AAAA,UACb;AACA,iBAAO;AAAA,QACT;AAAA,QACA,CAAC;AAAA,MACH;AAEA,UAAI,iBAAiB,aAAa,IAAI,QAAQ;AAC9C,UAAI,mBAAmB,QAAW;AAChC,YAAI,QAAQ;AACV,cAAI,UAAU,SAAS,QAAQ,cAAc,EAAE;AAC/C,cAAI,UAAyB;AAG7B,2BAAiB,KAAK,IAAI,OAAO;AAEjC,cAAI,CAAC,gBAAgB;AAEnB,sBAAU,IAAI,aAAa,IAAI,SAAS;AAGxC,gBAAI,CAAC,WAAW,QAAQ,SAAS,GAAG,GAAG;AACrC,oBAAM,YAAY,QAAQ,MAAM,GAAG;AACnC,kBAAI,UAAU,UAAU,GAAG;AAEzB,iCAAiB,KAAK,IAAI,OAAO;AACjC,oBAAI,CAAC,gBAAgB;AAEnB,4BAAU,UAAU,CAAC;AACrB,4BAAU,UAAU,MAAM,CAAC,EAAE,KAAK,GAAG;AAAA,gBACvC;AAAA,cACF;AAAA,YACF;AAGA,gBAAI,CAAC,gBAAgB;AACnB,kBAAI,SAAS;AACX,sBAAM,eAAe,GAAG,OAAO,IAAI,OAAO;AAC1C,iCAAiB,KAAK,IAAI,YAAY;AAAA,cACxC,OAAO;AACL,iCAAiB,KAAK,IAAI,OAAO;AAAA,cACnC;AAAA,YACF;AAAA,UACF;AAEA,cAAI,CAAC,gBAAgB;AACnB,kBAAM,gBAAgB,MAAM,KAAK,KAAK,KAAK,CAAC,EAAE;AAAA,cAAI,CAAC,QACjD,IAAI,QAAQ,KAAK,GAAG;AAAA,YACtB;AACA,kBAAM,eACJ,UACE,OAAO,OAAO,iBAAiB,OAAO,+BAA+B,cAAc,KAAK,IAAI,CAAC,KAC7F,OAAO,OAAO,+BAA+B,cAAc,KAAK,IAAI,CAAC;AACzE,kBAAM,IAAI,MAAM,YAAY;AAAA,UAC9B;AAEA,uBAAa,IAAI,UAAU,cAAc;AACzC,kBAAQ,IAAI,0BAA0B,OAAO,EAAE;AAAA,QACjD,OAAO;AACL,2BAAiB,UAAQ,QAAQ;AACjC,uBAAa,IAAI,UAAU,cAAc;AAAA,QAC3C;AAAA,MACF;AAEA,YAAM,cAAc,IAAI,YAAY,kBAAkB,QAAQ;AAC9D,UAAI,SACF,SACE,MAAM,eAAe,cAAc;AAAA,QACjC,QAAQ,IAAI,YAAY,aAAa,cAAc;AAAA,QACnD;AAAA,QACA,KAAK;AAAA,MACP,CAAC,IACD,MAAM,eAAe,QAAQ,cAAc;AAAA,QACzC,QAAQ,IAAI,YAAY,aAAa,cAAc;AAAA,QACnD;AAAA,QACA,KAAK;AAAA,MACP,CAAC;AAEL,UAAI;AACJ,UAAI;AAGJ,UAAI,OAAO,eAAe,MAAM,EAAE,YAAY,SAAS,aAAa;AAClE,eAAO,KAAK,UAAU,MAAM,OAAO,KAAK,CAAC;AAAA,MAC3C,OAAO;AACL,YAAI,UAAU,UAAU,YAAY,QAAQ;AAC1C,iBAAO,KAAK,UAAU,OAAO,IAAI;AACjC,mBAAS,OAAO;AAAA,QAClB,OAAO;AACL,iBAAO,KAAK,UAAU,MAAM;AAAA,QAC9B;AAAA,MACF;AAEA,UAAI,QAAQ;AACV,YAAI,UAAU,QAAQ,EAAE,gBAAgB,mBAAmB,CAAC;AAC5D,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B,OAAO;AACL,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B;AAEA,UAAI,IAAI,IAAI;AAAA,IACd,SAAS,OAAY;AACnB,cAAQ,IAAI,kBAAkB,IAAI,KAAK,KAAK;AAE5C,UAAI,OAAO,eAAe,KAAK,EAAE,YAAY,SAAS,kBAAkB;AACtE,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,YAAI,IAAI,KAAK,UAAU,EAAE,OAAO,MAAM,QAAQ,CAAC,CAAC;AAChD,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B;AACA,UAAI,iBAAiB,OAAO;AAC1B,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,YAAI,IAAI,KAAK,UAAU,EAAE,OAAO,MAAM,QAAQ,CAAC,CAAC;AAChD,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B,OAAO;AACL,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,YAAI,IAAI;AACR,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B;AAAA,IACF;AAAA,EACF;AACF;AAEA,IAAM,mBAAmB,OACvB,WACA,kBACA,gBACA,SACA,aACA,QACA,cACG;AACH,QAAM,oBAAoB,MAAM;AAAA,IAC9B;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,QAAM,UAAU,SAAS,MAAMC,YAAW,IAAI,oBAAI,IAAI;AAEtD,QAAM,gBAAgB,MAAM,KAAK,QAAQ,OAAO,CAAC,EAAE,KAAK,CAAC,GAAG,MAAM;AAChE,UAAM,QAAQ,EAAE,OAAO,aAAa;AACpC,UAAM,QAAQ,EAAE,OAAO,aAAa;AACpC,WAAO,MAAM,SAAS,MAAM;AAAA,EAC9B,CAAC;AAED,SAAO,OAAO,KAA2B,QAA6B;AACpE,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,MAAM,IAAI,IAAI,IAAI,OAAO,IAAI,kBAAkB;AACrD,UAAM,WAAW,IAAI;AAGrB,QAAI,aAAa,2BAA2B;AAC1C,UAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,UAAI;AAAA,QACF,KAAK,UAAU;AAAA,UACb,QAAQ;AAAA,UACR,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,QACpC,CAAC;AAAA,MACH;AACA;AAAA,IACF;AAEA,QAAI;AACJ,QAAI,aAAa,WAAW;AAC1B,YAAM,MAAM,IAAI,QAAQ,eAAe,MAAM,GAAG,EAAE,CAAC;AACnD,UAAI,KAAK;AACP,YAAI;AACF,gBAAM,EAAE,QAAQ,IAAI,MAAW,eAAU,KAAK,WAAW;AAAA,YACvD,QAAQ,UAAU;AAAA,YAClB,UAAU,UAAU;AAAA,UACtB,CAAC;AACD,uBAAa;AAAA,QACf,SAAS,OAAO;AACd,kBAAQ,IAAI,0CAA0C;AAAA,QACxD;AAAA,MACF;AAAA,IACF;AAEA,eAAW,UAAU,eAAe;AAClC,YAAM,YAAY,OAAO,OAAO,aAAa;AAC7C,YAAM,kBACJ,UAAU,SAAS,GAAG,KAAK,cAAc,MACvC,UAAU,MAAM,GAAG,EAAE,IACrB;AAEJ,YAAM,UACJ,aAAa,mBACb,SAAS,WAAW,kBAAkB,GAAG;AAE3C,UAAI,SAAS;AACX,YAAI,OAAO,OAAO,qBAAqB,OAAO;AAC5C,gBAAM,cAAc,IAAI,YAAY,kBAAkB,QAAQ;AAC9D,UAAC,IAAY,QAAQ;AAAA,YACnB,QAAQ,IAAI,YAAY,aAAa,cAAc;AAAA,YACnD;AAAA,YACA,KAAK;AAAA,UACP;AAAA,QACF;AAEA,YAAI,aAAa,IAAI;AACrB,YAAI,oBAAoB,KAAK;AAC3B,gBAAM,mBACJ,SAAS,UAAU,gBAAgB,MAAM,KAAK;AAChD,uBAAa,mBAAmB,IAAI;AAAA,QACtC;AAEA,YAAI;AAIF,gBAAM,cAAc,OAAO;AAAA,YACzB,OAAO,OAAO,OAAO,eAAe,GAAG,CAAC;AAAA,YACxC;AAAA,YACA;AAAA,cACE,KAAK;AAAA,YACP;AAAA,UACF;AACA,gBAAM,OAAO,QAAQ,aAAa,GAAG;AACrC;AAAA,QACF,SAAS,OAAO;AACd,kBAAQ,MAAM,mBAAmB,OAAO,IAAI,KAAK,KAAK;AACtD,cAAI,CAAC,IAAI,aAAa;AACpB,gBAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,gBAAI,IAAI,KAAK,UAAU,EAAE,OAAO,wBAAwB,CAAC,CAAC;AAAA,UAC5D;AACA;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAIA,QAAI,UAAU;AACd,QAAI,SAAS,WAAW,OAAO,GAAG;AAChC,gBAAU,SAAS,UAAU,CAAC;AAAA,IAChC,WAAW,SAAS,WAAW,eAAe,GAAG;AAC/C,gBAAU,SAAS,UAAU,EAAE;AAAA,IACjC;AAGA,QAAI,YAAY,UAAU;AAKxB,YAAM,cAAc,OAAO;AAAA,QACzB,OAAO,OAAO,OAAO,eAAe,GAAG,CAAC;AAAA,QACxC;AAAA,QACA;AAAA,UACE,KAAK,UAAU,IAAI;AAAA,QACrB;AAAA,MACF;AACA,YAAM,kBAAkB,aAAqC,GAAG;AAChE;AAAA,IACF;AAEA,QAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,QAAI,IAAI,KAAK,UAAU,EAAE,OAAO,YAAY,CAAC,CAAC;AAC9C,eAAW,KAAK,KAAK,KAAK;AAAA,EAC5B;AACF;AAEO,IAAM,UAAU,OAAO,WAAuB;AACnD,QAAM,cAAc,IAAI,QAAQ;AAAA,IAC9B,iBACG,OAAO,eAAe,KAAK,IAAI,OAAO,cAAc;AAAA,IACvD,aAAa,YAAY;AACvB,UAAI;AACJ,UAAI,OAAO,gBAAgB;AACzB,yBAAiB,MAAM;AAAA,UACrB,OAAO,eAAe;AAAA,UACtB,OAAO,eAAe;AAAA,UACtB,OAAO,eAAe;AAAA,UACtB,OAAO,eAAe;AAAA,UACtB,OAAO,eAAe;AAAA,QACxB;AAAA,MACF;AACA,YAAM,mBAAmB;AAAA,QACvB,eAAe,OAAO,gBAAgB;AAAA,MACxC;AACA,UAAI;AACJ,UAAI,OAAO,WAAW,QAAQ;AAC5B,gBAAQ,IAAI,6BAA6B;AACzC,oBAAY,MAAW,gBAAW,OAAO,UAAU,QAAQ,OAAO;AAAA,MACpE;AAEA,YAAM,SAASC,MAAK;AAAA,QAClB,MAAM;AAAA,UACJ;AAAA,UACA;AAAA,UACA;AAAA,UACA,OAAO;AAAA,UACP,OAAO;AAAA,UACP,OAAO;AAAA,UACP,OAAO;AAAA,QACT;AAAA,MACF;AAEA,YAAM,OAAO,OAAO,cAAc,SAAY,OAAO,YAAY;AACjE,aAAO,OAAO,MAAM,aAAa,MAAM;AACrC,gBAAQ,IAAI,0BAA0B,IAAI,EAAE;AAAA,MAC9C,CAAC;AAED,aAAO;AAAA,IACT;AAAA,IACA,YAAY,OAAO,WAAW;AAC5B,aAAO,IAAI,QAAc,CAACC,aAAY;AACpC,eAAO,MAAM,MAAMA,SAAQ,CAAC;AAAA,MAC9B,CAAC;AAAA,IACH;AAAA,EACF,CAAC;AAED,cAAY,MAAM;AACpB;;;AE1cA,SAAS,gBAAAC,qBAAqC;;;ACC9C;;;ACDA,SAAS,aAAa;;;ACWf,IAAM,yBAAyB;AAKtC,SAAS,eAAe,IAA4C;AAClE,SACE,OAAO,OAAO,YACd,OAAO,QACP,cAAc,MACd,OAAO,GAAG,aAAa;AAE3B;AAKA,SAASC,cAAa,IAA4B;AAChD,SACE,OAAO,OAAO,YACd,OAAO,QACP,aAAa,MACb,MAAM,QAAQ,GAAG,OAAO;AAE5B;AAKA,SAAS,YAAY,IAA+B;AAClD,SACE,OAAO,OAAO,YACd,OAAO,QACP,iBAAiB,MACjB,OAAO,GAAG,gBAAgB;AAE9B;AAMO,SAAS,gBAAgB,KAAa,OAAyB;AACpE,QAAM,gBACJ;AAEF,MAAI,OAAO,UAAU,YAAY,cAAc,KAAK,KAAK,GAAG;AAC1D,WAAO,IAAI,KAAK,KAAK;AAAA,EACvB;AAEA,SAAO;AACT;AASA,SAAS,WAAW,UAAoB,aAAuC;AAG7E,MACE,YAAY;AAAA,IACV,CAAC,CAAC,KAAK,KAAK,MAAM,QAAQ,0BAA0B,UAAU;AAAA,EAChE,GACA;AACA,WAAO;AAAA,EACT;AAEA,MAAI,OAAO,aAAa,UAAU;AAGhC,WAAO,aAAa,cAAc,SAAS,WAAW,WAAW;AAAA,EACnE;AAEA,MAAI,eAAe,QAAQ,GAAG;AAC5B,WAAO,WAAW,SAAS,UAAU,WAAW;AAAA,EAClD;AACA,SAAO;AACT;AAqBA,SAAS,oBAAoB,SAAmC;AAC9D,QAAM,YAA4B,CAAC;AAEnC,aAAW,UAAU,SAAS;AAC5B,UAAM,WAAW,OAAO;AAGxB,QAAI,WAAW,UAAU,OAAO,WAAW,GAAG;AAC5C,gBAAU,KAAK,CAAC,OAAO,MAAM,CAAC,WAAW,CAAC,CAAC;AAC3C;AAAA,IACF;AAGA,QAAI,OAAO,aAAa,YAAY,aAAa,MAAM;AAErD,UAAI,gBAA0B;AAC9B,UAAI,eAAe,QAAQ,GAAG;AAC5B,wBAAgB,SAAS;AAAA,MAC3B;AAGA,UAAIA,cAAa,aAAa,GAAG;AAC/B,cAAM,kBAAkB,oBAAoB,cAAc,OAAO;AACjE,YAAI,gBAAgB,SAAS,GAAG;AAC9B,oBAAU,KAAK,CAAC,OAAO,MAAM,eAAe,CAAC;AAAA,QAC/C;AACA;AAAA,MACF;AAIA,UAAI,YAAY,aAAa,GAAG;AAC9B,cAAM,cAAc,cAAc;AAClC,YAAIA,cAAa,WAAW,GAAG;AAC7B,gBAAM,kBAAkB,oBAAoB,YAAY,OAAO;AAC/D,cAAI,gBAAgB,SAAS,GAAG;AAC9B,sBAAU,KAAK,CAAC,OAAO,MAAM,eAAe,CAAC;AAAA,UAC/C;AACA;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AASA,SAAS,cAAc,OAAY,UAAyB;AAC1D,MAAI,aAAa,aAAa;AAC5B,QAAI,OAAO,UAAU,UAAU;AAC7B,UAAI;AACF,cAAM,OAAO,IAAI,KAAK,KAAK;AAC3B,eAAO,CAAC,MAAM,KAAK,QAAQ,CAAC,IAAI,OAAO;AAAA,MACzC,QAAQ;AACN,eAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACA,SAAO;AACT;AAQA,SAAS,oBAAoB,KAAU,WAAiC;AACtE,MAAI,CAAC,OAAO,OAAO,QAAQ,UAAU;AACnC;AAAA,EACF;AAEA,aAAW,CAAC,WAAW,QAAQ,KAAK,WAAW;AAC7C,QAAI,EAAE,aAAa,MAAM;AACvB;AAAA,IACF;AAEA,QAAI,MAAM,QAAQ,QAAQ,GAAG;AAE3B,UAAI,SAAS,SAAS,KAAK,OAAO,SAAS,CAAC,MAAM,UAAU;AAE1D,cAAM,aAAa;AACnB,mBAAW,aAAa,YAAY;AAClC,cAAI,SAAS,IAAI,cAAc,IAAI,SAAS,GAAG,SAAS;AAAA,QAC1D;AAAA,MACF,OAAO;AAEL,cAAM,kBAAkB;AACxB,cAAM,aAAa,IAAI,SAAS;AAEhC,YAAI,MAAM,QAAQ,UAAU,GAAG;AAE7B,qBAAW,QAAQ,YAAY;AAC7B,gCAAoB,MAAM,eAAe;AAAA,UAC3C;AAAA,QACF,WAAW,cAAc,OAAO,eAAe,UAAU;AAEvD,8BAAoB,YAAY,eAAe;AAAA,QACjD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAcO,SAAS,+BACd,SAC4B;AAC5B,MAAI,CAAC,WAAW,QAAQ,WAAW,GAAG;AACpC,WAAO;AAAA,EACT;AACA,QAAM,YAAY,oBAAoB,OAAO;AAC7C,SAAO,UAAU,SAAS,IAAI,YAAY;AAC5C;AAiBO,SAAS,iBACd,MACA,gBACM;AACN,MAAI,CAAC,kBAAkB,CAAC,MAAM;AAC5B;AAAA,EACF;AAEA,sBAAoB,MAAM,cAAc;AAC1C;;;ADtKO,IAAM,iBAAiB;AAAA,EAC5B,OAAO;AAAA,EACP,KAAK;AAAA,EACL,WAAW;AAAA,EACX,MAAM;AACR;AAKO,IAAM,qBAAuC;AAAA,EAClD,WAAW,eAAe;AAAA,EAC1B,SAAS;AAAA,EACT,gBAAgB;AAAA,EAChB,MAAM;AACR;;;AVlFA;AAQA,SAAS,eAAuB;AAC9B,SAAOC,SAAQ,IAAI,oBAAoB;AACzC;AAoBA,IAAM,iBAAiB;AAAA,EACrB,QAAQ,oBAAI,IAA4B;AAAA,EACxC,SAAS,oBAAI,IAAyB;AAAA,EACtC,YAAY,oBAAI,IAA4B;AAAA,EAC5C,MAAM,oBAAI,IAAsB;AAAA,EAChC,cAAc,oBAAI,IAAyB;AAAA,EAC3C,WAAW,oBAAI,IAAsB;AAAA,EACrC,SAAS,oBAAI,IAAoB;AAAA,EACjC,mBAAmB,oBAAI,IAAmC;AAAA,EAC1D,aAAa,oBAAI,IAAkB;AACrC;AAIA,IAAM,yBAAyB,KAAK,KAAK,KAAK;AA4Y9C,SAAS,gBACP,QAC8B;AAC9B,SAAO,YAAY,UAAU,OAAO;AACtC;AAMA,SAAS,oBACP,QAOoD;AACpD,MAAI,EAAE,YAAY,SAAS;AACzB,WAAO;AAAA,EACT;AAEA,QAAM,SAAS,OAAO;AAEtB,SACE,8DACA,gFACA,oFACA,4EACA,kFACA;AAEJ;AAKA,SAAS,mBAAmB,QAA4C;AAEtE,MAAI,EAAE,YAAY,SAAS;AACzB;AAAA,EACF;AAGA,SAAO,OAAO;AAChB;AAKA,SAAS,yBACP,QACA,QAC0B;AAC1B,UAAQ,QAAQ;AAAA,IACd;AACE,aAAO,EAAE,QAAQ,YAAY;AAAA,IAE/B;AACE,aAAO,EAAE,QAAQ,uBAAuB;AAAA,IAE1C,oDAA2C;AACzC,YAAM,kBAAkB;AACxB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,KAAK,gBAAgB;AAAA,QACrB,WAAW,gBAAgB;AAAA,MAC7B;AAAA,IACF;AAAA,IAEA,gDAAyC;AACvC,YAAM,gBAAgB;AACtB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,cAAc;AAAA,MACzB;AAAA,IACF;AAAA,IAEA,sDAA4C;AAC1C,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,iBAAiB;AAAA,MACzB;AAAA,IACF;AAAA,IAEA,wEAAqD;AACnD,YAAM,kBAAkB;AACxB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,gBAAgB;AAAA,QACtB,KAAK,gBAAgB;AAAA,MACvB;AAAA,IACF;AAAA,IAEA;AACE,aAAO;AAAA,EACX;AACF;AAKA,SAAS,8BACP,QACA,QAC0B;AAE1B,MAAI,CAAC,oBAAoB,MAAM,GAAG;AAChC,WAAO;AAAA,EACT;AAEA,UAAQ,QAAQ;AAAA,IACd,sDAA4C;AAC1C,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,MAChC;AAAA,IACF;AAAA,IAEA,wEAAqD;AACnD,YAAM,mBACJ;AACF,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,QAC9B,KAAK,iBAAiB;AAAA,QACtB,WAAW,iBAAiB;AAAA,MAC9B;AAAA,IACF;AAAA,IAEA,4EAAuD;AACrD,YAAM,mBACJ;AACF,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,MAChC;AAAA,IACF;AAAA,IAEA,oEAAmD;AACjD,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,QAC9B,SAAS,iBAAiB;AAAA,MAC5B;AAAA,IACF;AAAA,IAEA,0EAAsD;AACpD,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,QAC9B,MAAM,iBAAiB;AAAA,MACzB;AAAA,IACF;AAAA,IAEA,4FAA+D;AAC7D,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,QAC9B,MAAM,iBAAiB;AAAA,QACvB,KAAK,iBAAiB;AAAA,MACxB;AAAA,IACF;AAAA,IAEA;AACE,aAAO;AAAA,EACX;AACF;AAMA,SAAS,2BACP,QAC0B;AAC1B,MAAI,CAAC,gBAAgB,MAAM,GAAG;AAC5B,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,QAAQ,OAAO;AAAA,IACf,QAAQ,OAAO;AAAA,IACf,gBAAgB,OAAO;AAAA,IACvB,oBAAoB,OAAO;AAAA,IAC3B,aAAa,OAAO;AAAA,IACpB,SAAS,OAAO;AAAA,EAClB;AACF;AAKA,SAAS,sBACP,QAC0B;AAC1B,MAAI,EAAE,YAAY,WAAW,OAAO,0BAAiC;AACnE,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,MAAM,OAAO;AAAA,IACb,QAAQ,OAAO;AAAA,IACf,gBAAgB,OAAO;AAAA,IACvB,oBAAoB,OAAO;AAAA,IAC3B,aAAa,OAAO;AAAA,IACpB,mBAAmB,OAAO;AAAA,IAC1B,4BAA4B,OAAO;AAAA,EACrC;AACF;AAKA,SAAS,0BACP,QAC0B;AAC1B,MAAI,EAAE,YAAY,WAAW,OAAO,kCAAqC;AACvE,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,gBAAgB,OAAO;AAAA,IACvB,aAAa,OAAO;AAAA,IACpB,WAAW,OAAO;AAAA,IAClB,SAAS,OAAO;AAAA,IAChB,SAAS,OAAO;AAAA,IAChB,SAAS,OAAO;AAAA,IAChB,SAAS,OAAO;AAAA,IAChB,UAAU,OAAO;AAAA,IACjB,UAAU,OAAO;AAAA,IACjB,WAAW,OAAO;AAAA,IAClB,WAAW,OAAO;AAAA,IAClB,YAAY,OAAO;AAAA,EACrB;AACF;AAKA,SAAS,+BACP,QAC0B;AAC1B,MACE,EAAE,YAAY,WACd,OAAO,4CACP;AACA,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,SAAS,OAAO;AAAA,IAChB,gBAAgB,OAAO;AAAA,IACvB,aAAa,OAAO;AAAA,IACpB,aAAa,OAAO;AAAA,IACpB,YAAY,OAAO;AAAA,EACrB;AACF;AAKA,SAAS,6BACP,QAC0B;AAC1B,MAAI,EAAE,YAAY,WAAW,OAAO,wCAAwC;AAC1E,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,MAAM,OAAO;AAAA,IACb,QAAQ,OAAO;AAAA,IACf,gBAAgB,OAAO;AAAA,IACvB,oBAAoB,OAAO;AAAA,IAC3B,aAAa,OAAO;AAAA,EACtB;AACF;AAKA,SAAS,yBACP,QAC0B;AAC1B,MAAI,EAAE,YAAY,WAAW,OAAO,gCAAoC;AACtE,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,YAAY,OAAO;AAAA,IACnB,WAAW,OAAO;AAAA,IAClB,WAAW,OAAO;AAAA,IAClB,QAAQ,OAAO;AAAA,EACjB;AACF;AAKA,SAAS,iCACP,QAC0B;AAC1B,QAAM,SAAS,mBAAmB,MAAM;AAGxC,QAAM,cAAc,yBAAyB,QAAQ,MAAM;AAC3D,MAAI,aAAa;AACf,WAAO;AAAA,EACT;AAGA,QAAM,mBAAmB,8BAA8B,QAAQ,MAAM;AACrE,MAAI,kBAAkB;AACpB,WAAO;AAAA,EACT;AAGA,MAAI,oCAAsC;AACxC,WAAO,2BAA2B,MAAM;AAAA,EAC1C;AAGA,MAAI,0BAAiC;AACnC,WAAO,sBAAsB,MAAM;AAAA,EACrC;AAGA,MAAI,kCAAqC;AACvC,WAAO,0BAA0B,MAAM;AAAA,EACzC;AAGA,MAAI,4CAA0C;AAC5C,WAAO,+BAA+B,MAAM;AAAA,EAC9C;AAGA,MAAI,wCAAwC;AAC1C,WAAO,6BAA6B,MAAM;AAAA,EAC5C;AAGA,MAAI,gCAAoC;AACtC,WAAO,yBAAyB,MAAM;AAAA,EACxC;AAEA,SAAO;AACT;AAEO,IAAM,aAAa,CAAC,aAAoC;AAC7D,QAAM,SAAuC,CAAC;AAC9C,QAAM,SAAwC,CAAC;AAC/C,QAAM,aAA+C,CAAC;AACtD,QAAM,OAAmC,CAAC;AAC1C,QAAM,eAAmD,CAAC;AAC1D,QAAM,YAA6C,CAAC;AACpD,QAAM,UAAyC,CAAC;AAChD,QAAM,oBAA6D,CAAC;AACpE,QAAM,cAAiD,CAAC;AAExD,WAAS,OAAO,QAAQ,CAAC,UAAU;AACjC,UAAM,KACJ,MAAM,OAAO,UACX,GAAG,MAAM,IAAI,IAAI,MAAM,OAAO,OAAO,KACrC,MAAM;AAEV,QAAI,WAAY,MAAc;AAC9B,QAAI,CAAC,YAAY,MAAM,UAAW,MAAc,gBAAgB;AAC9D,iBAAY,MAAc,eAAe;AAAA,IAC3C;AAEA,UAAM,eACJ,iCAAiC,MAAM,MAAM;AAG/C,QAAI,gBAAuD;AAE3D,QAAI,MAAM,OAAO,UAAU;AAEzB,sBAAgB,OAAO,QAAQ,MAAM,OAAO,QAAQ,EAAE;AAAA,QACpD,CAAC,KAAK,CAAC,KAAK,KAAK,MAAM;AACrB,cAAI,UAAU,QAAW;AACvB,gBAAI,GAAG,IAAI,OAAO,KAAK;AAAA,UACzB;AACA,iBAAO;AAAA,QACT;AAAA,QACA,CAAC;AAAA,MACH;AAAA,IACF;AAGA,QAAI,cAAc,WAAW,WAAW;AACtC,UAAI,CAAC,eAAe;AAClB,wBAAgB,CAAC;AAAA,MACnB;AAEA,UAAI,CAAC,cAAc,MAAM;AACvB,sBAAc,OAAO;AAAA,MACvB;AAAA,IACF;AAIA,UAAM,mBACJ,mBAAmB,MAAM,UACzB,MAAM,QAAQ,MAAM,OAAO,aAAa,KACxC,MAAM,OAAO,cAAc,SAAS;AACtC,UAAM,uBACJ,uBAAuB,MAAM,UAC7B,OAAO,MAAM,OAAO,sBAAsB,YAC1C,MAAM,OAAO,kBAAkB,SAAS;AAC1C,QAAI,oBAAoB,sBAAsB;AAC5C,YAAM,IAAI;AAAA,QACR,SAAS,MAAM,IAAI;AAAA,MACrB;AAAA,IACF;AACA,UAAM,UACJ,wBAAwB,uBAAuB,MAAM,SAClD,MAAM,OAAO,qBAAqB,KACnC,mBAAmB,MAAM,SAAU,MAAM,OAAO,iBAAiB,CAAC,IAClE,CAAC;AAEL,WAAO,EAAE,IAAI;AAAA,MACX,MAAM,MAAM;AAAA,MACZ,SAAS,MAAM;AAAA,MACf;AAAA,MACA,aACE,iBAAiB,MAAM,SAAS,MAAM,OAAO,cAAc;AAAA,MAC7D,oBACE,wBAAwB,MAAM,SAC5B,MAAM,OAAO,qBACb;AAAA,MACJ,sBACE,0BAA0B,MAAM,SAC9B,MAAM,OAAO,uBACb;AAAA,MACJ;AAAA,MACA,SAAS,MAAM,OAAO;AAAA,MACtB;AAAA,MACA,WAAW,MAAM,OAAO;AAAA;AAAA,MAExB,eACE,iBAAiB,OAAO,KAAK,aAAa,EAAE,SAAS,IACnD,gBACA;AAAA,MACJ,SACE,MAAM,OAAO,SAAS,IAAI,CAAC,OAAO;AAAA,QAChC,GAAG;AAAA,QACH,aAAa,EAAE,gBAAgB,SAAY,IAAI,EAAE;AAAA,QACjD,WAAW,EAAE,cAAc,SAAY,CAAC,IAAI,EAAE;AAAA,MAChD,EAAE,KAAK,CAAC;AAAA,MACV,KAAK,MAAM,OAAO;AAAA,MAClB,UAAU,MAAM,OAAO;AAAA,MACvB,SAAS,MAAM,OAAO;AAAA,IACxB;AAAA,EACF,CAAC;AAED,WAAS,QAAQ,QAAQ,CAAC,WAAW;AAEnC,QAAI,WAAW,OAAO;AACtB,QAAI,CAAC,YAAY,OAAO,UAAW,OAAe,gBAAgB;AAChE,iBAAY,OAAe,eAAe;AAAA,IAC5C;AACA,UAAM,wBAAkC,CAAC;AACzC,UAAM,YAAwB,CAAC;AAE/B,WAAO,iBAAiB,QAAQ,CAAC,YAAY,oBAAoB;AAC/D,iBAAW,QAAQ,CAAC,CAAC,aAAa,GAAG,MAAM,MAAM;AAC/C,8BAAsB,KAAK;AAAA,UACzB,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS,OAAO;AAAA,UAChB,UAAU,OAAO;AAAA,UACjB,YAAY,OAAO;AAAA,QACrB,CAAC;AAAA,MACH,CAAC;AAAA,IACH,CAAC;AAED,WAAO,WAAW,QAAQ,CAAC,aAAa;AACtC,gBAAU,KAAK;AAAA,QACb,SAAS,SAAS,OAAO;AAAA,QACzB,YAAY,SAAS,OAAO;AAAA,MAC9B,CAAC;AAAA,IACH,CAAC;AAED,WAAO,OAAO,IAAI,IAAI;AAAA,MACpB,MAAM,OAAO;AAAA,MACb,SAAS,OAAO;AAAA,MAChB,aAAa,OAAO,OAAO,aAAa;AAAA,MACxC,oBAAoB,OAAO,OAAO,aAAa,OAAO;AAAA,MACtD,iBAAiB,OAAO,OAAO,mBAAmB;AAAA,MAClD,gBAAgB,OAAO,OAAO,eAAe;AAAA,MAC7C,SAAS,OAAO,OAAO;AAAA,MACvB;AAAA,MACA,mBAAmB,OAAO,6BAA6B;AAAA,MACvD;AAAA,MACA;AAAA,MACA,WAAW,OAAO,OAAO;AAAA,MACzB,cAAc,OAAO,OAAO;AAAA,IAC9B;AAAA,EACF,CAAC;AAED,WAAS,WAAW,QAAQ,CAAC,QAAQ;AAEnC,QAAI,WAAW,IAAI;AACnB,QAAI,CAAC,YAAY,IAAI,UAAW,IAAY,gBAAgB;AAC1D,iBAAY,IAAY,eAAe;AAAA,IACzC;AACA,eAAW,IAAI,IAAI,IAAI;AAAA,MACrB,MAAM,IAAI;AAAA,MACV,SAAS,IAAI;AAAA,MACb,SAAS,IAAI,OAAO;AAAA,MACpB,MAAM,IAAI,OAAO;AAAA,MACjB,SAAS;AAAA,QACP,MAAM;AAAA,QACN,MAAM,IAAI,OAAO,YAAY;AAAA,MAC/B;AAAA,MACA,iBAAiB,IAAI,OAAO,iBAAiB;AAAA,MAC7C;AAAA,MACA,QAAQ,IAAI;AAAA,MACZ,kBAAkB,IAAI;AAAA,IACxB;AAAA,EACF,CAAC;AAED,WAAS,KAAK,QAAQ,CAAC,KAAK,QAAQ;AAClC,UAAM,UACJ,IAAI,OAAO,UAAU,GAAG,IAAI,IAAI,IAAI,IAAI,OAAO,OAAO,KAAK,IAAI;AACjE,SAAK,OAAO,IAAI;AAAA,MACd,MAAM,IAAI;AAAA,MACV,aAAa,IAAI;AAAA,MACjB,gBAAgB,IAAI;AAAA,MACpB,SAAS,IAAI,OAAO;AAAA,MACpB,MAAM,IAAI,OAAO;AAAA,MACjB,UAAU,IAAI;AAAA,IAChB;AAAA,EACF,CAAC;AAED,WAAS,aAAa,QAAQ,CAAC,gBAAgB;AAC7C,iBAAa,YAAY,IAAI,IAAI;AAAA,MAC/B,MAAM,YAAY;AAAA,MAClB,OAAO,YAAY;AAAA,MACnB,UAAU,YAAY;AAAA,MACtB,YAAY,YAAY;AAAA,MACxB,YAAY,YAAY;AAAA,MACxB,cAAc,YAAY;AAAA,MAE1B,eAAe,YAAY,cAAc,IAAI,CAAC,MAAM;AAClD,YAAI,EAAE,SAAS,aAAa;AAC1B,gBAAM,QAAQ;AACd,gBAAM,KACJ,MAAM,OAAO,UACX,GAAG,MAAM,IAAI,IAAI,MAAM,OAAO,OAAO,KACrC,MAAM;AACV,iBAAO;AAAA,YACL;AAAA,YACA,MAAM;AAAA,UACR;AAAA,QACF,WAAW,EAAE,SAAS,eAAe;AACnC,gBAAM,WAAW;AACjB,iBAAO;AAAA,YACL,IAAI,SAAS;AAAA,YACb,MAAM;AAAA,UACR;AAAA,QACF,OAAO;AACL,gBAAM,IAAI,MAAM,yCAAyC,CAAC,EAAE;AAAA,QAC9D;AAAA,MACF,CAAC;AAAA,MACD,cAAc,YAAY,aAAa,IAAI,CAAC,MAAM;AAChD,YAAI,EAAE,SAAS,aAAa;AAC1B,gBAAM,QAAQ;AACd,gBAAM,KACJ,MAAM,OAAO,UACX,GAAG,MAAM,IAAI,IAAI,MAAM,OAAO,OAAO,KACrC,MAAM;AACV,iBAAO;AAAA,YACL;AAAA,YACA,MAAM;AAAA,UACR;AAAA,QACF,WAAW,EAAE,SAAS,eAAe;AACnC,gBAAM,WAAW;AACjB,iBAAO;AAAA,YACL,IAAI,SAAS;AAAA,YACb,MAAM;AAAA,UACR;AAAA,QACF,OAAO;AACL,gBAAM,IAAI,MAAM,yCAAyC,CAAC,EAAE;AAAA,QAC9D;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF,CAAC;AAED,WAAS,UAAU,QAAQ,CAAC,aAAa;AACvC,cAAU,SAAS,IAAI,IAAI;AAAA,MACzB,MAAM,SAAS;AAAA,MACf,SAAS,SAAS,OAAO;AAAA,MACzB,SAAS,SAAS,OAAO;AAAA,MACzB,UAAU,SAAS,OAAO;AAAA,IAC5B;AAAA,EACF,CAAC;AAED,WAAS,QAAQ,QAAQ,CAAC,WAAW;AACnC,YAAQ,OAAO,IAAI,IAAI;AAAA,MACrB,MAAM,OAAO;AAAA,MACb,WAAW,OAAO,OAAO,aAAa;AAAA,MACtC,UAAU,OAAO,OAAO;AAAA,IAC1B;AAAA,EACF,CAAC;AAGD,WAAS,kBAAkB,QAAQ,CAAC,OAAO;AACzC,sBAAkB,GAAG,IAAI,IAAI;AAAA,MAC3B,MAAM,GAAG;AAAA,MACT,WAAW,GAAG;AAAA,MACd,cAAc,GAAG;AAAA,MACjB,aAAa,GAAG,YAAY;AAAA,MAC5B,gBAAgB,GAAG,YAAY,OAAO;AAAA,MACtC,YAAY,GAAG;AAAA,IACjB;AAAA,EACF,CAAC;AAGD,WAAS,YAAY,QAAQ,CAAC,SAAS;AACrC,gBAAY,KAAK,IAAI,IAAI;AAAA,MACvB,MAAM,KAAK;AAAA,MACX,WAAW,KAAK;AAAA,MAChB,cAAc,KAAK;AAAA,MACnB,YAAY,KAAK;AAAA,IACnB;AAAA,EACF,CAAC;AAED,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAQO,IAAM,mBAAmB,MAC7B,WAAmB;AAGtB,IAAI,iBAAiB,MAAM,QAAW;AACpC,EAAC,WAAmB,iBAAiB;AACvC;AAWO,IAAM,oBAAoB,YAAY;AAC3C,YAAU;AAEV,UAAQ;AAAA,IACN;AAAA,IACA,KAAK,UAAU,WAAW,iBAAiB,CAAC,CAAC;AAAA,IAC7C;AAAA,EACF;AACF;AAEA,IAAM,YAAY,MAAM;AAEtB,QAAM,WAAW,iBAAiB;AAClC,WAAS,OAAO,MAAM;AACtB,WAAS,QAAQ,MAAM;AACvB,WAAS,WAAW,MAAM;AAC1B,WAAS,KAAK,MAAM;AACpB,WAAS,aAAa,MAAM;AAC5B,WAAS,UAAU,MAAM;AACzB,WAAS,QAAQ,MAAM;AACvB,WAAS,kBAAkB,MAAM;AACjC,WAAS,YAAY,MAAM;AAG3B,QAAM,SAAS,GAAGC,SAAQ,IAAI,CAAC,IAAI,aAAa,CAAC;AACjD,SAAO,KAAK,UAAQ,KAAK,EAAE,QAAQ,CAAC,QAAQ;AAC1C,QAAI,IAAI,WAAW,MAAM,GAAG;AAC1B,aAAO,UAAQ,MAAM,GAAG;AAAA,IAC1B;AAAA,EACF,CAAC;AAED,MAAI;AACF,cAAQ,GAAGA,SAAQ,IAAI,CAAC,IAAI,aAAa,CAAC,WAAW;AAAA,EACvD,SAAS,OAAO;AACd,QAAI;AACJ,UAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,QAAI,QAAQ,SAAS,iBAAiB,KAAK,QAAQ,SAAS,WAAW,GAAG;AACxE,aACE;AAAA,IAEJ;AAEA,UAAM,WAAW,GAAG,QAAQ,EAAE,GAAG,OAAO;AACxC,UAAM,QAAQ,iBAAiB,QAAQ,QAAQ;AAC/C,UAAM,IAAI,MAAM,UAAU,EAAE,MAAM,CAAC;AAAA,EACrC;AACF;AAUO,IAAM,wBAAwB,YAAY;AAC/C,YAAU;AAEV,QAAM,WAAW,iBAAiB;AAClC,QAAM,qBAAqB,oBAAI,IAO7B;AAEF,WAAS,QAAQ,QAAQ,CAAC,WAAW;AACnC,WAAO,iBAAiB,QAAQ,CAAC,YAAY,oBAAoB;AAC/D,iBAAW,QAAQ,CAAC,CAAC,GAAG,WAAW,MAAM,MAAM;AAC7C,cAAM,uBAAuB,GAAG,OAAO,IAAI,IAAI,eAAe,GAAG,OAAO,UAAU,IAAI,OAAO,OAAO,KAAK,EAAE;AAC3G,oBAAY,0BAA0B,oBAAoB,EAAE;AAC5D,2BAAmB,IAAI,sBAAsB;AAAA,UAC3C;AAAA,UACA;AAAA,UACA,OAAO;AAAA,QACT,CAAC;AAAA,MACH,CAAC;AAAA,IACH,CAAC;AAED,WAAO,WAAW,QAAQ,CAAC,aAAa;AACtC,YAAM,sBAAsB,GAAG,OAAO,IAAI,eAAe,SAAS,OAAO,UAAU,IAAI,SAAS,OAAO,OAAO,KAAK,EAAE;AACrH,yBAAmB,IAAI,qBAAqB;AAAA,QAC1C,SAAS;AAAA,QACT,SAAS;AAAA,QACT,OAAO;AAAA,MACT,CAAC;AAAA,IACH,CAAC;AAAA,EACH,CAAC;AAED,SAAO;AACT;AASO,IAAMC,WAAU,YAAY;AACjC,YAAU;AACV,QAAM,eAAe,oBAAI,IAGvB;AAEF,QAAM,WAAW,iBAAiB;AAElC,QAAM,qBAAqB,oBAAI,IAAoB;AACnD,QAAM,2BAA2B,oBAAI,IAGnC;AAEF,WAAS,KAAK,QAAQ,CAAC,KAAK,QAAQ;AAClC,UAAM,UAAU,IAAI,WAAW;AAC/B,iBAAa,IAAI,KAAK,OAAO;AAE7B,QAAI,CAAC,IAAI,OAAO,SAAS;AAEvB,UAAI,CAAC,aAAa,IAAI,IAAI,IAAI,GAAG;AAC/B,qBAAa,IAAI,IAAI,MAAM,OAAO;AAAA,MACpC;AACA,+BAAyB,OAAO,IAAI,IAAI;AACxC,yBAAmB,OAAO,IAAI,IAAI;AAAA,IACpC,WAAW,CAAC,aAAa,IAAI,IAAI,IAAI,GAAG;AAEtC,YAAM,SAAS,mBAAmB,IAAI,IAAI,IAAI,KAAK,KAAK;AACxD,yBAAmB,IAAI,IAAI,MAAM,KAAK;AACtC,UAAI,UAAU,GAAG;AACf,iCAAyB,IAAI,IAAI,MAAM,OAAO;AAAA,MAChD,OAAO;AACL,iCAAyB,OAAO,IAAI,IAAI;AAAA,MAC1C;AAAA,IACF;AAAA,EACF,CAAC;AAGD,2BAAyB,QAAQ,CAAC,SAAS,SAAS;AAClD,QAAI,CAAC,aAAa,IAAI,IAAI,GAAG;AAC3B,mBAAa,IAAI,MAAM,OAAO;AAAA,IAChC;AAAA,EACF,CAAC;AAED,SAAO;AACT;AAgIO,IAAMC,gBAAe,YAAY;AACtC,YAAU;AAEV,QAAM,WAAW,iBAAiB;AAClC,SAAO,SAAS;AAClB;AAEA,SAAS,eACP,MACA,YAC4B;AAC5B,MAAI,KAAK,SAAS,YAAY;AAC5B,WAAO;AAAA,EACT;AAEA,MAAI,KAAK,OAAO,YAAY,QAAQ;AAClC,eAAW,aAAa,KAAK,OAAO,YAAY;AAC9C,YAAM,QAAQ,eAAe,WAAW,UAAU;AAClD,UAAI,OAAO;AACT,eAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAEO,IAAM,qBAAqB,OAChC,cACA,aAC4B;AAC5B,QAAM,YAAY,MAAMA,cAAa;AACrC,QAAM,WAAW,UAAU,IAAI,YAAY;AAC3C,MAAI,CAAC,UAAU;AACb,UAAM,IAAI,MAAM,YAAY,YAAY,YAAY;AAAA,EACtD;AAEA,QAAM,OAAO;AAAA,IACX,SAAS,OAAO;AAAA,IAChB;AAAA,EACF;AACA,MAAI,CAAC,MAAM;AACT,UAAM,IAAI,MAAM,QAAQ,QAAQ,0BAA0B,YAAY,EAAE;AAAA,EAC1E;AAEA,SAAO;AACT;AAEO,IAAMC,cAAa,YAAY;AACpC,YAAU;AACV,SAAO,iBAAiB,EAAE;AAC5B;;;AY98CA;AADA,OAAO,WAAgC;AAGvC,OAAOC,SAAQ;AACf,OAAO,UAAU;AAEjB,IAAM,UAAU,CAAC,KAAa,eAAuB,aAAuB;AAC1E,QAAM,QAAQC,IAAG,YAAY,GAAG;AAEhC,QAAM,QAAQ,CAAC,SAAS;AACtB,QAAIA,IAAG,SAAS,KAAK,KAAK,KAAK,IAAI,CAAC,EAAE,YAAY,GAAG;AACnD,iBAAW,QAAQ,KAAK,KAAK,KAAK,IAAI,GAAG,eAAe,QAAQ;AAAA,IAClE,WAAW,KAAK,SAAS,aAAa,GAAG;AACvC,eAAS,KAAK,KAAK,KAAK,KAAK,IAAI,CAAC;AAAA,IACpC;AAAA,EACF,CAAC;AAED,SAAO;AACT;AAsBA,IAAM,kBAAN,cAA8B,MAAM;AAAA,EAClC,YAAY,SAAiB;AAC3B,UAAM,OAAO;AACb,SAAK,OAAO;AAAA,EACd;AACF;AAGA,IAAMC,kBAAiB,CAAC,YAA8B;AAAA,EACpD,GAAG;AAAA,EACH,QAAQ,OAAO,SAAS,SAAS;AACnC;AAEA,IAAM,eAAe,OAAO,UAA4B,WAAmB;AACzE,aAAW,SAAS,OAAO,OAAO;AAChC,QAAI;AACF,cAAQ,IAAI,8BAA8B,KAAK,EAAE;AACjD,YAAM,SAAS,QAAQ;AAAA,QACrB;AAAA,QACA,qBAAqB;AAAA,UACnB,mBAAmB;AAAA;AAAA,QACrB;AAAA,MACF,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,4BAA4B,GAAG;AAAA,QACxC,cAAc;AAAA,MAChB,CAAC;AACD,UAAI,OAAO,KAAK,UAAU,GAAG,EAAE,SAAS,eAAe,GAAG;AACxD,cAAM,IAAI,gBAAgB,IAAI,SAAS,CAAC;AAAA,MAC1C;AAAA,IACF;AAAA,EACF;AACF;AAEA,IAAM,eAAe,OAAO,UAA4B,WAAmB;AACzE,aAAW,SAAS,OAAO,UAAU;AACnC,QAAI;AACF,cAAQ,IAAI,8BAA8B,KAAK,EAAE;AACjD,YAAM,SAAS,QAAQ;AAAA,QACrB;AAAA,QACA,qBAAqB;AAAA,UACnB,mBAAmB;AAAA;AAAA,QACrB;AAAA,MACF,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,4BAA4B,GAAG;AAAA,QACxC,cAAc;AAAA,MAChB,CAAC;AAAA,IACH;AAAA,EACF;AACF;AAEA,IAAM,cAAc,OAAO,SAA0B;AACnD,QAAM,aAAa,KAAK,UAAU,KAAK,MAAM;AAC7C,QAAM,aAAa,KAAK,UAAU,KAAK,MAAM;AAC/C;AAEO,IAAM,YAAY,OAAO,WAAyB;AACvD,QAAM,WAAW,oBAAoBA,gBAAe,OAAO,gBAAgB,CAAC;AAC5E,UAAQ,IAAI,WAAW;AAEvB,QAAM,cAAc,QAAQ,OAAO,WAAW,OAAO,CAAC,CAAC;AACvD,QAAM,kBAAkB,YAAY;AACpC,UAAQ,IAAI,SAAS,eAAe,eAAe;AAEnD,QAAM,QAA0C,MAAM,QAAQ,aAAa,CAAC;AAE5E,QAAM,MAAM,CAAC,KAAY,SAA0B;AACjD,QAAI,OAAO,KAAK,UAAU,GAAG;AAC3B,UAAI,eAAe,iBAAiB;AAClC,cAAM,KAAK,EAAE,GAAG,MAAM,SAAS,KAAK,UAAU,EAAE,CAAC;AAAA,MACnD;AAAA,IACF;AAAA,EACF,CAAC;AAED,aAAWC,SAAQ,aAAa;AAC9B,YAAQ,IAAI,oBAAoBA,KAAI,EAAE;AAEtC,QAAI;AACF,YAAM,SAAS,UAAQA,KAAI,EAAE;AAC7B,YAAM,KAAK;AAAA,QACT;AAAA,QACA;AAAA,QACA,SAAS;AAAA,MACX,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,gCAAgCA,KAAI,KAAK,GAAG;AAAA,QACrD,cAAc;AAAA,MAChB,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,CAAC,MAAM,KAAK,GAAG;AACpB,UAAM,IAAI,QAAQ,CAACC,aAAY,WAAWA,UAAS,GAAI,CAAC;AAAA,EAC1D;AACF;;;ACtHA;AAxBA,SAAS,YAAAC,iBAAgB;AACzB,SAAS,WAAAC,gBAAe;AAoBxB,SAAS,UAAAC,eAAc;AACvB,YAAYC,cAAa;AACzB,YAAYC,WAAU;AArBtB,IAAM,EAAE,OAAAC,OAAM,IAAIC;AAuClB,IAAM,WAAmB,aAAI;AAC7B,IAAM,0BAA0B;AAChC,IAAM,mCAAmC;AACzC,IAAM,uBAAuB;AAC7B,IAAM,2BAA2B;AACjC,IAAM,8BAA8B;AACpC,IAAM,oCAAoC;AAE1C,IAAM,0BAA0B;AAoEhC,IAAM,4BACI,aAAI,4BACV,SAAiB,aAAI,2BAA2B,EAAE,IAClD;AAKG,IAAM,aAAyC,CAAC,QAAQ;AAC7D,QAAM,MAAW,cAAQ;AAAA,IACvB,MAAM,SAAiB,aAAI,yBAAyB,QAAQ,EAAE;AAAA,IAC9D,QAAQ;AAAA,IACR,MAAM;AAAA,EACR,CAAC;AAED,MAAI,GAAG,SAAS,CAAC,QAAe;AAC9B,YAAQ;AAAA,MACN,SAAS,IAAI,IAAI;AAAA,MACjB,IAAI;AAAA,IACN;AAAA,EACF,CAAC;AAED,MAAI,MAAM,KAAK,UAAU,EAAE,GAAG,IAAI,CAAC,CAAC;AACpC,MAAI,IAAI;AACV;AAKA,IAAM,gBAAgB,OACpBC,SACA,aACkB;AAClB,MAAI;AACF,IAAAA,QAAO,IAAI,wBAAwB;AACnC,UAAM,SAAS,QAAQ;AACvB,IAAAA,QAAO,IAAI,wBAAwB;AAAA,EACrC,SAAS,OAAO;AACd,IAAAA,QAAO,MAAM,6BAA6B;AAC1C,QAAI,iBAAiB,OAAO;AAC1B,eAASA,SAAQ,KAAK;AAAA,IACxB;AACA,UAAM;AAAA,EACR;AACF;AAaA,IAAM,eAAe,OACnBA,SACA,aACkB;AAClB,QAAM,SAAS,WAAW;AAC1B,EAAAA,QAAO,IAAI,8BAA8B;AAC3C;AAcA,IAAM,eAAe,OACnBA,SACA,UACA,gBACkB;AAClB,MAAI;AAEF,IAAAA,QAAO,IAAI,qBAAqB;AAGhC,UAAM,mBAAmB,MAAM;AAAA,MAC7B,EAAE,QAAQ,YAAY,WAAW;AAAA,MACjC,CAAC,GAAG,MAAM;AAAA,IACZ;AAEA,UAAM,SAAS,MAAM;AAAA,MACnB;AAAA,QACE,OAAO,YAAY;AAAA,QACnB,YAAY;AAAA,MACd;AAAA,IACF,CAAC;AAED,IAAAA,QAAO,IAAI,2BAA2B;AACtC,UAAM,SAAS,WAAW;AAC1B,IAAAA,QAAO,IAAI,8BAA8B;AAAA,EAC3C,SAAS,OAAO;AACd,IAAAA,QAAO,MAAM,mCAAmC,KAAK,EAAE;AAEvD,QAAI;AACF,YAAM,SAAS,WAAW;AAC1B,MAAAA,QAAO,IAAI,mCAAmC;AAAA,IAChD,SAAS,iBAAiB;AACxB,MAAAA,QAAO,MAAM,kCAAkC,eAAe,EAAE;AAAA,IAClE;AAAA,EACF;AACF;AAqBA,IAAM,gBAAgB,OACpBA,SAGA,iCACA,SACA,UACA,mBACmD;AACnD,MAAI,QAAQ,UAAU,UAAa,QAAQ,UAAU,MAAM;AACzD,IAAAA,QAAO,IAAI,6CAA6C;AACxD,WAAO;AAAA,EACT;AAEA,MAAI;AAEF,QAAI,gBAAgB,QAAQ;AAC5B,QACE,iBACA,cAAc,UAAU,KACxB,cAAc,CAAC,MAAM,GACrB;AACA,sBAAgB,cAAc,SAAS,CAAC;AAAA,IAC1C;AAEA,UAAM,aAAa,KAAK,MAAM,cAAc,SAAS,CAAC;AACtD,qBAAiB,YAAY,cAAc;AAC3C,UAAM,kBAAkB,MAAM,QAAQ;AAAA,MACpC,gCAAgC,IAAI,OAAO,CAAC,IAAI,MAAM,MAAM;AAC1D,YAAI;AACF,iBAAO,MAAM,GAAG,UAAU;AAAA,QAC5B,SAAS,GAAG;AAEV,gBAAM,kBAAkB,OAAO;AAE/B,cAAI,iBAAiB;AAEnB,kBAAM,mBAAmB;AAAA,cACvB,gBAAgB;AAAA,gBACd,GAAG;AAAA;AAAA,gBAEH,mBAAmB,QAAQ;AAAA,gBAC3B,gBAAgB,QAAQ;AAAA,gBACxB,mBAAmB,QAAQ;AAAA,cAC7B;AAAA,cACA,cAAc,aAAa,QAAQ,EAAE,UAAU,OAAO,CAAC;AAAA,cACvD,WAAW,aAAa,QAAQ,EAAE,YAAY,OAAO;AAAA,cACrD,UAAU,oBAAI,KAAK;AAAA,cACnB,QAAQ;AAAA,YACV;AAEA,mBAAO;AAAA,cACL,QAAQ;AAAA,cACR,SAAS,0BAA0B,gBAAgB,IAAI,KAAK,aAAa,QAAQ,EAAE,UAAU,OAAO,CAAC,CAAC;AAAA,cACtG,cAAc;AAAA,YAChB,CAAC;AAED,gBAAI;AACF,oBAAM,SAAS,KAAK;AAAA,gBAClB,OAAO,gBAAgB;AAAA,gBACvB,UAAU,CAAC,EAAE,OAAO,KAAK,UAAU,gBAAgB,EAAE,CAAC;AAAA,cACxD,CAAC;AAAA,YACH,SAAS,UAAU;AACjB,cAAAA,QAAO,MAAM,wCAAwC,QAAQ,EAAE;AAAA,YACjE;AAAA,UACF,OAAO;AAEL,mBAAO;AAAA,cACL,QAAQ;AAAA,cACR,SAAS,iDAAiD,aAAa,QAAQ,EAAE,UAAU,OAAO,CAAC,CAAC;AAAA,cACpG,cAAc;AAAA,YAChB,CAAC;AAAA,UACH;AAGA,gBAAM;AAAA,QACR;AAAA,MACF,CAAC;AAAA,IACH;AAEA,WAAO,gBACJ,IAAI,CAAC,oBAAoB,MAAM;AAC9B,YAAM,CAAC,GAAG,MAAM,IAAI,gCAAgC,CAAC;AACrD,UAAI,oBAAoB;AACtB,YAAI,MAAM,QAAQ,kBAAkB,GAAG;AAMrC,iBAAO,mBACJ,KAAK,EACL,OAAO,CAAC,SAAS,SAAS,UAAa,SAAS,IAAI,EACpD,IAAI,CAAC,UAAU;AAAA,YACd,OAAO,KAAK,UAAU,IAAI;AAAA,YAC1B,eAAe;AAAA,YACf,iBAAiB;AAAA,YACjB,KAAK,OAAO,mBAAmB;AAAA,UACjC,EAAE;AAAA,QACN,OAAO;AACL,iBAAO;AAAA,YACL;AAAA,cACE,OAAO,KAAK,UAAU,kBAAkB;AAAA,cACxC,eAAe;AAAA,cACf,iBAAiB;AAAA,cACjB,KAAK,OAAO,mBAAmB;AAAA,YACjC;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAC,EACA,KAAK,EACL,OAAO,CAAC,SAAS,SAAS,UAAa,SAAS,IAAI;AAAA,EACzD,SAAS,GAAG;AAEV,IAAAA,QAAO,MAAM,0BAA0B;AACvC,QAAI,aAAa,OAAO;AACtB,eAASA,SAAQ,CAAC;AAAA,IACpB;AAAA,EACF;AAEA,SAAO;AACT;AAWA,IAAM,6BAA6B,OACjCA,SACA,UACA,UACA,UACqB;AACrB,MAAI,uBAAuB;AAC3B,MAAI,qBAAqB;AACzB,MAAI,YAAY;AAEhB,aAAW,OAAO,UAAU;AAC1B,QAAI,IAAI,OAAO,IAAI,eAAe;AAChC,YAAM,mBAAmB;AAAA,QACvB,gBAAgB;AAAA,UACd,GAAG,IAAI;AAAA;AAAA,UAEP,mBAAmB,IAAI,gBAAgB;AAAA,UACvC,gBAAgB,IAAI,gBAAgB;AAAA,UACpC,mBAAmB,IAAI,gBAAgB;AAAA,QACzC;AAAA,QACA,cAAc,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,QACnE,WAAW,iBAAiB,QAAQ,MAAM,YAAY,OAAO;AAAA,QAC7D,UAAU,oBAAI,KAAK;AAAA,QACnB,QAAQ;AAAA,MACV;AAEA,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,iCAAiC,IAAI,IAAI,IAAI,KAAK,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC;AAAA,QACjH,cAAc;AAAA,MAChB,CAAC;AAED,UAAI;AACF,cAAM,SAAS,KAAK;AAAA,UAClB,OAAO,IAAI,IAAI;AAAA,UACf,UAAU,CAAC,EAAE,OAAO,KAAK,UAAU,gBAAgB,EAAE,CAAC;AAAA,QACxD,CAAC;AACD,QAAAA,QAAO,IAAI,8BAA8B,IAAI,IAAI,IAAI,EAAE;AACvD;AAAA,MACF,SAAS,UAAU;AACjB,QAAAA,QAAO,MAAM,0BAA0B,QAAQ,EAAE;AACjD;AAAA,MACF;AAAA,IACF,WAAW,CAAC,IAAI,KAAK;AACnB;AACA,MAAAA,QAAO,KAAK,mDAAmD;AAAA,IACjE,OAAO;AACL;AACA,MAAAA,QAAO,KAAK,0DAA0D;AAAA,IACxE;AAAA,EACF;AAGA,QAAM,qBACJ,yBAAyB,SAAS,UAClC,uBAAuB,KACvB,cAAc;AAEhB,MAAI,oBAAoB;AACtB,IAAAA,QAAO;AAAA,MACL,OAAO,oBAAoB;AAAA,IAC7B;AAAA,EACF,WAAW,uBAAuB,GAAG;AAEnC,IAAAA,QAAO;AAAA,MACL,wBAAwB,oBAAoB,IAAI,SAAS,MAAM;AAAA,IACjE;AACA,QAAI,qBAAqB,GAAG;AAC1B,MAAAA,QAAO;AAAA,QACL,gCAAgC,kBAAkB;AAAA,MACpD;AAAA,IACF;AACA,QAAI,YAAY,GAAG;AACjB,MAAAA,QAAO,MAAM,GAAG,SAAS,mCAAmC;AAAA,IAC9D;AAAA,EACF;AAEA,SAAO;AACT;AAgBA,IAAM,eAAe,OACnBA,SACA,SACA,aACA,UACA,aACkB;AAClB,MAAI,SAAS,WAAW,EAAG;AAE3B,MAAI;AAEF,UAAM,SAAS,KAAK;AAAA,MAClB,OAAO,YAAY;AAAA,MACnB;AAAA,IACF,CAAC;AAID,eAAW,OAAO,UAAU;AAC1B,cAAQ,SAASC,QAAO,WAAW,IAAI,OAAO,MAAM;AAAA,IACtD;AACA,YAAQ,aAAa,SAAS;AAE9B,IAAAD,QAAO,IAAI,QAAQ,SAAS,MAAM,gBAAgB,YAAY,IAAI,EAAE;AAAA,EACtE,SAAS,GAAG;AAEV,IAAAA,QAAO,MAAM,iCAAiC;AAC9C,QAAI,aAAa,OAAO;AACtB,eAASA,SAAQ,CAAC;AAAA,IACpB;AAIA,UAAM,kBAAkB,MAAM;AAAA,MAC5BA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,QAAI,CAAC,iBAAiB;AACpB,YAAM;AAAA,IACR;AAAA,EACF;AACF;AAcA,IAAM,qBAAqB,CAACA,SAAgB,YAAqB;AAC/D,MAAI,QAAQ,WAAW,KAAK,QAAQ,YAAY,KAAK,QAAQ,QAAQ,GAAG;AACtE,eAAW;AAAA,MACT,UAAU,QAAQ;AAAA,MAClB,WAAW,QAAQ;AAAA,MACnB,eAAeA,QAAO;AAAA,MACtB,OAAO,QAAQ;AAAA,MACf,WAAW,oBAAI,KAAK;AAAA,IACtB,CAAC;AAAA,EACH;AACA,UAAQ,WAAW;AACnB,UAAQ,QAAQ;AAChB,UAAQ,YAAY;AACpB,aAAW,MAAM,mBAAmBA,SAAQ,OAAO,GAAG,GAAI;AAC5D;AAcA,SAAS,sBAAsB,kBAA0B;AACvD,MAAI;AACJ,MAAI;AACF,8BAA0B,UACxB,iBAAiB,UAAU,GAAG,iBAAiB,SAAS,CAAC,CAC3D;AAAA,EACF,SAAS,GAAG;AACV,WAAO,EAAE,QAAQ,YAAY,SAAS,GAAG,CAAC,IAAI,cAAc,QAAQ,CAAC;AACrE,UAAM;AAAA,EACR;AACA,SAAO,wBAAwB;AACjC;AAEA,eAAe,wBACb,aACA,aAIC;AACD,QAAM,qBAAqB,MAAM,sBAAsB;AACvD,QAAM,uBAAuB,GAAG,sBAAsB,WAAW,CAAC,IAAI,cAAc,sBAAsB,WAAW,IAAI,aAAa;AAEtI,QAAM,kBAAkB,MAAM,KAAK,mBAAmB,QAAQ,CAAC,EAAE;AAAA,IAC/D,CAAC,CAAC,GAAG,MAAM,IAAI,WAAW,oBAAoB;AAAA,EAChD;AAEA,MAAI,gBAAgB,WAAW,GAAG;AAChC,UAAM,UAAU,0BAA0B,oBAAoB;AAC9D,WAAO;AAAA,MACL,QAAQ;AAAA,MACR,SAAS,GAAG,OAAO;AAAA,MACnB,cAAc;AAAA,IAChB,CAAC;AACD,UAAM,IAAI,MAAM,OAAO;AAAA,EACzB;AAIA,QAAM,YAAY,gBAAgB,IAAI,CAAC,CAAC,GAAG,CAAC,IAAI,MAAM,CAAC,MAAM;AAAA,IAC3D;AAAA,IACA;AAAA,EACF,CAAC;AACD,QAAM,CAAC,MAAM,UAAU,IAAI,gBAAgB,CAAC;AAC5C,QAAM,gBAAgB,WAAW,CAAC;AAGlC,QAAM,iBAAiB,+BAA+B,aAAa;AAEnE,SAAO,EAAE,WAAW,eAAe;AACrC;AAsBA,IAAM,gBAAgB,OACpB,MACAA,SACA,SACA,cACA,UACA,UACA,oBACkB;AAElB,sBAAoB,KAAK,WAAW;AACpC,MAAI,KAAK,aAAa;AACpB,wBAAoB,KAAK,WAAW;AAAA,EACtC;AAEA,MAAI;AACF,IAAAA,QAAO,IAAI,wBAAwB;AACnC,UAAM,SAAS,QAAQ;AACvB,IAAAA,QAAO,IAAI,iCAAiC;AAAA,EAC9C,SAAS,OAAO;AACd,IAAAA,QAAO,MAAM,6BAA6B;AAC1C,QAAI,iBAAiB,OAAO;AAC1B,eAASA,SAAQ,KAAK;AAAA,IACxB;AACA,UAAM;AAAA,EACR;AAEA,EAAAA,QAAO;AAAA,IACL,4BAA4B,eAAe,wBAAwB,KAAK,YAAY,IAAI,sBAAsB,KAAK,aAAa,QAAQ,MAAM;AAAA,EAChJ;AAKA,MAAI;AAIJ,MAAI;AAEJ,MAAI,KAAK,QAAQ;AACf,UAAM,SAAS,MAAM;AAAA,MACnB,KAAK;AAAA,MACL,KAAK;AAAA,IACP;AACA,yBAAqB,OAAO;AAC5B,qBAAiB,OAAO;AAAA,EAC1B,OAAO;AACL,yBAAqB,CAAC,CAAC,sBAAsB,KAAK,gBAAgB,GAAG,CAAC,CAAC,CAAC;AACxE,qBAAiB;AAAA,EACnB;AAEA,QAAM,SAAS,UAAU;AAAA,IACvB,QAAQ,CAAC,KAAK,YAAY,IAAI;AAAA;AAAA,EAChC,CAAC;AAED,QAAM,SAAS,IAAI;AAAA,IACjB,sBAAsB;AAAA;AAAA,IAEtB,gCAAgC;AAAA;AAAA,IAChC,WAAW,OAAO,EAAE,OAAO,WAAW,WAAW,QAAQ,MAAM;AAC7D,UAAI,CAAC,UAAU,KAAK,QAAQ,GAAG;AAC7B;AAAA,MACF;AAEA,cAAQ,YAAY,MAAM,SAAS;AAEnC,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,GAAGA,QAAO,SAAS,IAAI,MAAM,SAAS,MAAM;AAAA,MACvD,CAAC;AACD,MAAAA,QAAO,IAAI,YAAY,MAAM,SAAS,MAAM,aAAa;AAEzD,UAAI,QAAQ;AACZ,YAAM,iBAAiBE,UAAS,KAAK,MAAM,QAAQ;AAEnD,YAAM,oBACJ,MAAM,eACH;AAAA,QACC,OAAO,YAAY;AACjB;AACA,cACG,MAAM,SAAS,SAAS,qCACvB,QAAQ,qCACV,QAAQ,MAAM,MAAM,SAAS,QAC7B;AACA,kBAAM,UAAU;AAAA,UAClB;AACA,iBAAO;AAAA,YACLF;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,QACf;AAAA,MACF,EACC,QAAQ;AAEb,YAAM,mBAAmB,kBACtB,KAAK,EACL,OAAO,CAAC,QAAQ,QAAQ,UAAa,IAAI,UAAU,MAAS;AAE/D,UAAI,KAAK,gBAAgB,UAAa,kBAAkB,WAAW,GAAG;AACpE;AAAA,MACF;AAEA,YAAM,UAAU;AAEhB,UAAI,iBAAiB,SAAS,GAAG;AAE/B,cAAM;AAAA,UACJA;AAAA,UACA;AAAA,UACA,KAAK;AAAA,UACL;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF,CAAC;AAED,EAAAA,QAAO,IAAI,wBAAwB;AACrC;AAaA,IAAM,cAAc,CAAC,MAA6B,aAA6B;AAC7E,QAAM,cACJ,KAAK,aAAa,OAAO,OAAO,KAAK,YAAY,IAAI,KAAK;AAC5D,QAAM,YAAY,GAAG,KAAK,YAAY,IAAI,GAAG,WAAW,YAAY,QAAQ;AAC5E,SAAO;AAAA,IACL;AAAA,IACA,KAAK,CAAC,YAA0B;AAC9B,cAAQ,IAAI,GAAG,SAAS,KAAK,OAAO,EAAE;AAAA,IACxC;AAAA,IACA,OAAO,CAAC,YAA0B;AAChC,cAAQ,MAAM,GAAG,SAAS,KAAK,OAAO,EAAE;AAAA,IAC1C;AAAA,IACA,MAAM,CAAC,YAA0B;AAC/B,cAAQ,KAAK,GAAG,SAAS,KAAK,OAAO,EAAE;AAAA,IACzC;AAAA,EACF;AACF;AAMO,SAAS,oBAAoB,SAAyB;AAC3D,SAAO,IAAI,QAAQ,QAAQ,OAAO,GAAG,CAAC;AACxC;AAMO,SAAS,sBAAsB,QAA6B;AACjE,MAAI,OAAO,OAAO;AAGlB,MAAI,OAAO,SAAS;AAClB,UAAM,gBAAgB,oBAAoB,OAAO,OAAO;AACxD,QAAI,KAAK,SAAS,aAAa,GAAG;AAChC,aAAO,KAAK,MAAM,GAAG,CAAC,cAAc,MAAM;AAAA,IAC5C,OAAO;AACL,YAAM,IAAI;AAAA,QACR,kBAAkB,aAAa,4BAA4B,IAAI;AAAA,MACjE;AAAA,IACF;AAAA,EACF;AAGA,MAAI,OAAO,aAAa,OAAO,cAAc,IAAI;AAC/C,UAAM,SAAS,GAAG,OAAO,SAAS;AAClC,QAAI,KAAK,WAAW,MAAM,GAAG;AAC3B,aAAO,KAAK,MAAM,OAAO,MAAM;AAAA,IACjC,OAAO;AACL,YAAM,IAAI;AAAA,QACR,oBAAoB,MAAM,4BAA4B,IAAI;AAAA,MAC5D;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAKO,SAAS,oBAAoB,QAA2B;AAC7D,MAAI,OAAO,aAAa,CAAC,OAAO,KAAK,WAAW,GAAG,OAAO,SAAS,GAAG,GAAG;AACvE,UAAM,IAAI;AAAA,MACR,cAAc,OAAO,IAAI,8BAA8B,OAAO,SAAS;AAAA,IACzE;AAAA,EACF;AAEA,MAAI,OAAO,SAAS;AAClB,UAAM,gBAAgB,oBAAoB,OAAO,OAAO;AACxD,QAAI,CAAC,OAAO,KAAK,SAAS,aAAa,GAAG;AACxC,YAAM,IAAI;AAAA,QACR,cAAc,OAAO,IAAI,0BAA0B,OAAO,OAAO;AAAA,MACnE;AAAA,IACF;AAAA,EACF;AACF;AAiCO,IAAM,wBAAwB,OACnC,SACkB;AAElB,sBAAoB,KAAK,WAAW;AACpC,MAAI,KAAK,aAAa;AACpB,wBAAoB,KAAK,WAAW;AAAA,EACtC;AAKA,QAAM,kBAAkB,QAAQ,KAAK,YAAY,IAAI,IAAI,KAAK,aAAa,QAAQ,EAAE;AAErF,QAAMG,WAAU,IAAI,QAAQ;AAAA,IAC1B,kBAAkB;AAAA,IAClB,gBAAgB,KAAK;AAAA,IACrB,aAAa,OAAO,QAAQ,gBAAgB;AAC1C,YAAMH,UAAS,YAAY,MAAM,OAAO,EAAE;AAE1C,YAAM,UAAU;AAAA,QACd,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAEA,iBAAW,MAAM,mBAAmBA,SAAQ,OAAO,GAAG,GAAI;AAE1D,YAAM,iBAAiB,WAAW,GAAG,QAAQ,MAAM;AACnD,YAAM,YAAY,GAAG,cAAc,GAAG,eAAe,OAAO,OAAO,EAAE;AAErE,YAAM,QAAQ,MAAM;AAAA,QAClB;AAAA,UACE,UAAU;AAAA,UACV,QAAQ,KAAK;AAAA,UACb,kBAAkB,KAAK;AAAA,UACvB,cAAc,KAAK;AAAA,UACnB,cAAc,KAAK;AAAA,UACnB,eAAe,KAAK;AAAA,QACtB;AAAA,QACAA;AAAA,MACF;AAGA,YAAM,WAAqB,MAAM,SAAS;AAAA,QACxC,SAAS;AAAA,UACP,SAAS;AAAA,UACT,gBAAgB;AAAA,UAChB,mBAAmB;AAAA,UACnB,OAAO;AAAA,YACL,SAAS;AAAA,UACX;AAAA,UACA,YAAY;AAAA,UACZ,oBAAoB;AAAA,UACpB,eAAe;AAAA,QACjB;AAAA,QACA,8BAA8B;AAAA,MAChC,CAAC;AAGD,YAAM,kBACJ,KAAK,aAAa,qBAAqB,OAAO;AAEhD,YAAM,WAAqB,MAAM;AAAA,QAC/B,qBAAqB,eAAe;AAAA,MACtC;AAEA,UAAI;AACF,QAAAA,QAAO,IAAI,sBAAsB;AACjC,cAAM,cAAcA,SAAQ,QAAQ;AAEpC,YAAI;AACF,UAAAA,QAAO,IAAI,sBAAsB;AACjC,gBAAM;AAAA,YACJ;AAAA,YACAA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,SAAS,GAAG;AACV,UAAAA,QAAO,MAAM,kCAAkC;AAC/C,cAAI,aAAa,OAAO;AACtB,qBAASA,SAAQ,CAAC;AAAA,UACpB;AAEA,gBAAM;AAAA,QACR;AAAA,MACF,SAAS,GAAG;AACV,QAAAA,QAAO,MAAM,kCAAkC;AAC/C,YAAI,aAAa,OAAO;AACtB,mBAASA,SAAQ,CAAC;AAAA,QACpB;AAEA,cAAM;AAAA,MACR;AAEA,aAAO,CAACA,SAAQ,UAAU,QAAQ;AAAA,IACpC;AAAA,IACA,YAAY,OAAO,CAACA,SAAQ,UAAU,QAAQ,MAAM;AAClD,MAAAA,QAAO,IAAI,+CAA+C;AAG1D,MAAAA,QAAO,IAAI,4BAA4B;AACvC,YAAM,aAAaA,SAAQ,UAAU,KAAK,WAAW;AAGrD,MAAAA,QAAO,IAAI,+CAA+C;AAC1D,YAAM,IAAI,QAAQ,CAACI,aAAY,WAAWA,UAAS,GAAI,CAAC;AAGxD,MAAAJ,QAAO,IAAI,sBAAsB;AACjC,YAAM,aAAaA,SAAQ,QAAQ;AAEnC,MAAAA,QAAO,IAAI,6BAA6B;AAAA,IAC1C;AAAA,EACF,CAAC;AAED,EAAAG,SAAQ,MAAM;AAChB;;;AC7+BA,eAAsB,oBAAoB,aAAqB;AAC7D,QAAM,eAAe,UAAQ,WAAW;AACxC,UAAQ,IAAI,KAAK,UAAU,YAAY,CAAC;AAC1C;;;ACHA,OAAOE,cAAa;AAKpB,SAASC,gBAAuB;AAC9B,SAAOC,SAAQ,IAAI,oBAAoB;AACzC;AAEA,eAAsB,qBAAqB,aAAqB;AAC9D,QAAM,OAAO,UACX,GAAGA,SAAQ,IAAI,CAAC,IAAID,cAAa,CAAC,SAAS,WAAW,KACxD,EAAE;AACF,QAAM,cAAc,KAAK,oBAAoB,KAAK;AAClD,QAAM,eAAe,KAAK,qBAAqB,KAAK;AACpD,UAAQ;AAAA,IACN,KAAK,UAAU;AAAA,MACb;AAAA,MACA;AAAA,IACF,CAAC;AAAA,EACH;AACF;;;ACrBA;AAAA,EAEE;AAAA,EAEA;AAAA,EACA;AAAA,OACK;AACP,YAAYE,WAAU;AACtB,YAAYC,SAAQ;;;ACRpB,SAAS,OAAO,QAAQ,eAAe;AACvC,SAAS,sBAAsB;AAUxB,IAAM,aAAa;AAAA,EACxB,MAAM,gBAAgB,MAAgC;AACpD,QAAI;AACF,YAAM,YAAY,MAAMC,cAAa;AACrC,YAAM,cAAc,UAAU,IAAI,IAAI;AACtC,aAAO,KAAK,oBAAoB,WAAW,EAAE;AAC7C,aAAO;AAAA,IACT,SAAS,OAAO;AACd,aAAO,MAAM,+BAA+B,IAAI,YAAY,KAAK,EAAE;AACnE,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAEA,MAAM,gBAAgB,MAAiC;AACrD,QAAI;AACF,aAAO,KAAK,oBAAoB,IAAI,EAAE;AAEtC,YAAM,YAAY,MAAMA,cAAa;AAErC,UAAI,UAAU,IAAI,IAAI,GAAG;AACvB,eAAO,KAAK,YAAY,IAAI,QAAQ;AACpC,eAAO,UAAU,IAAI,IAAI;AAAA,MAC3B,OAAO;AACL,cAAM,YAAY;AAAA,UAChB,OAAO;AAAA,UACP,SAAS,YAAY,IAAI;AAAA,UACzB,OAAO;AAAA,QACT;AACA,cAAM,WAAW,KAAK,UAAU,SAAS;AACzC,eAAO,MAAM,QAAQ;AACrB,cAAM,IAAI,MAAM,QAAQ;AAAA,MAC1B;AAAA,IACF,SAAS,OAAO;AACd,YAAM,YAAY;AAAA,QAChB,OAAO;AAAA,QACP,SAAS,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,QAC9D,OAAO,iBAAiB,QAAQ,MAAM,QAAQ;AAAA,MAChD;AACA,YAAM,WAAW,KAAK,UAAU,SAAS;AACzC,aAAO,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,QAAQ;AAAA,IAC1B;AAAA,EACF;AAAA,EAEA,MAAM,mBACJ,cACA,UACyB;AACzB,QAAI;AACF,aAAO,KAAK,gBAAgB,QAAQ,kBAAkB,YAAY,EAAE;AACpE,YAAM,OAAO,MAAM,mBAAmB,cAAc,QAAQ;AAC5D,aAAO,KAAK,QAAQ,QAAQ,sBAAsB,YAAY,EAAE;AAChE,aAAO;AAAA,IACT,SAAS,OAAO;AACd,YAAM,YAAY;AAAA,QAChB,OAAO;AAAA,QACP,SAAS,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,QAC9D,OAAO,iBAAiB,QAAQ,MAAM,QAAQ;AAAA,MAChD;AACA,YAAM,WAAW,KAAK,UAAU,SAAS;AACzC,aAAO,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,QAAQ;AAAA,IAC1B;AAAA,EACF;AAAA,EAEA,MAAM,gBACJ,UACA,MACA,WACgB;AAEhB,UAAM,UAAU,QAAQ,QAAQ;AAChC,UAAM,YAAY,CAAC;AAQnB,QAAI,oBAA2C;AAC/C,UAAM,yBAAyB,MAAM;AACnC,0BAAoB,YAAY,MAAM;AACpC,gBAAQ,UAAU,QAAQ,KAAK,IAAI,cAAc;AAAA,MACnD,GAAG,GAAI;AAAA,IACT;AACA,UAAM,wBAAwB,MAAM;AAClC,UAAI,mBAAmB;AACrB,sBAAc,iBAAiB;AAC/B,4BAAoB;AAAA,MACtB;AAAA,IACF;AAEA,QAAI;AACF,aAAO;AAAA,QACL,QAAQ,KAAK,IAAI,oBAAoB,KAAK,UAAU,SAAS,CAAC;AAAA,MAChE;AAGA,cAAQ,UAAU,kBAAkB,KAAK,IAAI,EAAE;AAI/C,YAAM,WAAW,MAAM,mBAAmB,SAAS,MAAM,KAAK,IAAI;AAGlE,YAAM,mBACJ,YACE,KAAK,MAAM,KAAK,UAAU,SAAS,GAAG,eAAe,IACrD;AAEJ,UAAI;AACF,+BAAuB;AAMvB,cAAM,SAAS,MAAM,QAAQ,KAAK;AAAA,UAChC,SAAS,OAAO,IAAI,EAAE,OAAO,WAAW,OAAO,iBAAiB,CAAC;AAAA,UACjE,QAAQ;AAAA,QACV,CAAC;AACD,eAAO;AAAA,MACT,SAAS,OAAO;AACd,YAAI,eAAe,KAAK,GAAG;AACzB,iBAAO;AAAA,YACL,QAAQ,KAAK,IAAI;AAAA,UACnB;AACA,cAAI,SAAS,OAAO,UAAU;AAC5B,kBAAM,SAAS,OAAO,SAAS;AAAA,cAC7B,OAAO;AAAA,cACP,OAAO;AAAA,YACT,CAAC;AAAA,UACH;AACA,iBAAO,CAAC;AAAA,QACV,OAAO;AACL,gBAAM;AAAA,QACR;AAAA,MACF,UAAE;AACA,8BAAsB;AAAA,MACxB;AAAA,IACF,SAAS,OAAO;AACd,YAAM,YAAY;AAAA,QAChB,OAAO;AAAA,QACP,SAAS,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,QAC9D,OAAO,iBAAiB,QAAQ,MAAM,QAAQ;AAAA,MAChD;AACA,YAAM,WAAW,KAAK,UAAU,SAAS;AACzC,aAAO,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,QAAQ;AAAA,IAC1B;AAAA,EACF;AACF;AAGO,SAAS,wBAAwB,YAAoB;AAC1D,SAAO;AAAA,IACL,CAAC,UAAU,GAAG,WAAW;AAAA,EAC3B;AACF;;;AC1KA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAEP,IAAM,kBAAN,MAAM,iBAAgB;AAAA,EACpB,OAAe,WAAiC;AAAA,EAExC,cAAc;AAAA,EAAC;AAAA,EAEvB,OAAc,mBAAkC;AAC9C,QAAI,CAAC,iBAAgB,UAAU;AAC7B,uBAAgB,WAAW,IAAI;AAAA,QAC7B;AAAA,QACA,CAAC,EAAE,OAAO,QAAQ,MAAM;AACtB,kBAAQ,IAAI,GAAG,KAAK,MAAM,OAAO,EAAE;AAAA,QACrC;AAAA,MACF;AAEA,cAAQ,QAAQ;AAAA,QACd,QAAQ,iBAAgB;AAAA,QACxB,kBAAkB;AAAA,UAChB,SAAS;AAAA,YACP,QAAQ,0BAA0B,EAAE,MAAM,QAAQ,OAAO,OAAO,CAAC;AAAA,YACjE,SAAS,CAAC;AAAA,UACZ;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAEA,WAAO,iBAAgB;AAAA,EACzB;AAAA,EAEA,OAAc,cAA6B;AACzC,WAAO,iBAAgB;AAAA,EACzB;AACF;AAEO,IAAM,mBAAmB,gBAAgB;;;AFXhD,IAAM,qBAAqB,oBAAI,IAAY;AAE3C,SAAS,sBACPC,SACA,WACA;AACA,EAAAA,QAAO,KAAK,+CAA+C;AAC3D,QAAM,cAAwB,CAAC;AAC/B,aAAW,CAAC,MAAM,QAAQ,KAAK,UAAU,QAAQ,GAAG;AAClD,IAAAA,QAAO;AAAA,MACL,uCAAuC,IAAI,wBAAwB,SAAS,OAAO,aAAa,IAAI;AAAA,IACtG;AACA,gBAAY,KAAK,GAAG,IAAI,IAAI,SAAS,OAAO,aAAa,IAAI,EAAE;AAAA,EACjE;AACA,SAAO;AACT;AAQA,eAAe,yBACbA,SACA,gBAC2B;AAC3B,EAAAA,QAAO;AAAA,IACL,kCAAkC,eAAe,GAAG,mBAAmB,eAAe,SAAS;AAAA,EACjG;AAEA,MAAI,oBAA6C;AAAA,IAC/C,SAAS,eAAe;AAAA,EAC1B;AAEA,MAAI,eAAe,cAAc,eAAe,WAAW;AACzD,IAAAA,QAAO,KAAK,+BAA+B;AAC3C,UAAM,OAAO,MAAS,iBAAa,eAAe,UAAU;AAC5D,UAAM,MAAM,MAAS,iBAAa,eAAe,SAAS;AAE1D,sBAAkB,MAAM;AAAA,MACtB,gBAAgB;AAAA,QACd,KAAK;AAAA,QACL;AAAA,MACF;AAAA,IACF;AAAA,EACF,WAAW,eAAe,QAAQ;AAChC,IAAAA,QAAO,KAAK,mCAAmC;AAE/C,sBAAkB,UAAU;AAC5B,sBAAkB,SAAS,eAAe;AAC1C,sBAAkB,MAAM,CAAC;AACzB,sBAAkB,WAAW;AAAA,MAC3B,sBAAsB,eAAe;AAAA,IACvC;AAAA,EACF;AAEA,EAAAA,QAAO;AAAA,IACL,wCAAwC,kBAAkB,OAAO;AAAA,EACnE;AAEA,QAAM,aAAa;AACnB,QAAM,YAAY;AAClB,MAAI,UAAU;AAEd,SAAO,MAAM;AACX,QAAI;AACF,YAAM,aAAa,MAAM,iBAAiB,QAAQ,iBAAiB;AACnE,MAAAA,QAAO,KAAK,yCAAyC;AACrD,aAAO;AAAA,IACT,SAAS,KAAK;AACZ;AACA,MAAAA,QAAO,MAAM,iCAAiC,OAAO,YAAY,GAAG,EAAE;AAEtE,UAAI,WAAW,YAAY;AACzB,QAAAA,QAAO,MAAM,2BAA2B,OAAO,WAAW;AAC1D,cAAM;AAAA,MACR;AAEA,YAAM,UAAU,YAAY,KAAK,IAAI,GAAG,UAAU,CAAC;AACnD,MAAAA,QAAO,KAAK,qCAAqC,OAAO,OAAO;AAC/D,YAAM,IAAI,QAAQ,CAACC,aAAY,WAAWA,UAAS,OAAO,CAAC;AAAA,IAC7D;AAAA,EACF;AACF;AAEA,eAAe,kBACbD,SACA,QACwB;AACxB,EAAAA,QAAO,KAAK,uBAAuB;AAGnC,QAAM,iBAA2B,CAAC;AAClC,QAAM,oBAA2B,CAAC;AAElC,MAAI;AACF,UAAM,YAAY,MAAME,cAAa;AACrC,QAAI,UAAU,OAAO,GAAG;AACtB,MAAAF,QAAO,KAAK,kBAAkB,UAAU,IAAI,iBAAiB;AAC7D,qBAAe,KAAK,GAAG,sBAAsBA,SAAQ,SAAS,CAAC;AAE/D,UAAI,eAAe,WAAW,GAAG;AAC/B,QAAAA,QAAO,KAAK,2CAA2C;AACvD,eAAO;AAAA,MACT;AAEA,MAAAA,QAAO;AAAA,QACL,kBAAkB,eAAe,MAAM;AAAA,MACzC;AAEA,iBAAW,gBAAgB,gBAAgB;AACzC,YAAI,CAAC,mBAAmB,IAAI,YAAY,GAAG;AACzC,gBAAM,WAAW,MAAM,wBAAwB,YAAY;AAC3D,4BAAkB,KAAK,QAAQ;AAC/B,6BAAmB,IAAI,YAAY;AACnC,UAAAA,QAAO,KAAK,4BAA4B,YAAY,EAAE;AAAA,QACxD;AAAA,MACF;AAEA,UAAI,kBAAkB,WAAW,GAAG;AAClC,QAAAA,QAAO,KAAK,wDAAwD;AACpE,eAAO;AAAA,MACT;AAEA,MAAAA,QAAO;AAAA,QACL,kBAAkB,kBAAkB,MAAM;AAAA,MAC5C;AAAA,IACF;AAEA,QAAI,eAAe,WAAW,GAAG;AAC/B,MAAAA,QAAO,KAAK,oBAAoB;AAChC,aAAO;AAAA,IACT;AAEA,IAAAA,QAAO,KAAK,SAAS,eAAe,MAAM,YAAY;AAEtD,QAAI,kBAAkB,WAAW,GAAG;AAClC,MAAAA,QAAO,KAAK,gBAAgB;AAC5B,aAAO;AAAA,IACT;AAEA,IAAAA,QAAO,KAAK,SAAS,kBAAkB,MAAM,UAAU;AAEvD,UAAM,aAAa,MAAM;AAAA,MACvBA;AAAA,MACA,OAAO;AAAA,IACT;AAGA,UAAM,eAAe;AAAA,MACnB,MAAM,MAAM;AAAA,MAAC;AAAA;AAAA,MACb,OAAO,MAAM;AAAA,MAAC;AAAA;AAAA,MACd,MAAM,MAAM;AAAA,MAAC;AAAA;AAAA,MACb,KAAK,MAAM;AAAA,MAAC;AAAA;AAAA,MACZ,OAAO,MAAM;AAAA,MAAC;AAAA;AAAA,MACd,OAAO,CAAC,SAAiB,SAAe;AAEtC,QAAAA,QAAO,MAAM,SAAS,IAAI;AAAA,MAC5B;AAAA,IACF;AAIA,UAAM,iBAAiB,MAAM,mBAAmB;AAAA,MAC9C,eAAoB,cAAQ,WAAW,qBAAqB;AAAA,MAC5D,QAAQ;AAAA,IACV,CAAC;AAED,UAAM,SAAS,MAAM,OAAO,OAAO;AAAA,MACjC;AAAA,MACA,WAAW,OAAO,eAAe;AAAA,MACjC,WAAW;AAAA,MACX;AAAA,MACA,YAAY;AAAA,QACV,GAAG;AAAA,QACH,GAAG,OAAO;AAAA,UACR,kBAAkB,IAAI,CAAC,aAAa;AAAA,YAClC,OAAO,KAAK,QAAQ,EAAE,CAAC;AAAA,YACvB,OAAO,OAAO,QAAQ,EAAE,CAAC;AAAA,UAC3B,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF,CAAC;AAED,WAAO;AAAA,EACT,SAAS,OAAO;AACd,IAAAA,QAAO,MAAM,gCAAgC,KAAK,EAAE;AACpD,UAAM;AAAA,EACR;AACF;AAKA,eAAsB,WACpB,QACwB;AACxB,QAAMA,UAAS,iBAAiB;AAGhC,UAAQ,GAAG,qBAAqB,CAAC,UAAU;AACzC,YAAQ,MAAM,iCAAiC,KAAK,EAAE;AACtD,YAAQ,KAAK,CAAC;AAAA,EAChB,CAAC;AAED,QAAM,SAAS,MAAM,kBAAkBA,SAAQ,MAAM;AAErD,MAAI,CAAC,QAAQ;AACX,IAAAA,QAAO;AAAA,MACL;AAAA,IACF;AACA,YAAQ,KAAK,CAAC;AAAA,EAChB;AAEA,MAAI,iBAAiB;AAGrB,iBAAe,aAAa,QAAgB;AAC1C,YAAQ,IAAI,uBAAuB,MAAM,EAAE;AAE3C,QAAI,gBAAgB;AAClB;AAAA,IACF;AAEA,qBAAiB;AAEjB,QAAI;AACF,UAAI,CAAC,QAAQ;AACX,gBAAQ,KAAK,CAAC;AAAA,MAChB;AACA,YAAM,QAAQ,KAAK;AAAA,QACjB,OAAO,SAAS;AAAA,QAChB,IAAI;AAAA,UAAQ,CAAC,GAAG,WACd,WAAW,MAAM,OAAO,IAAI,MAAM,kBAAkB,CAAC,GAAG,GAAI;AAAA,QAC9D;AAAA,MACF,CAAC;AACD,cAAQ,KAAK,CAAC;AAAA,IAChB,SAAS,OAAO;AACd,cAAQ,IAAI,qBAAqB,KAAK,EAAE;AACxC,cAAQ,KAAK,CAAC;AAAA,IAChB;AAAA,EACF;AAGA,GAAC,WAAW,UAAU,UAAU,SAAS,EAAE,QAAQ,CAAC,WAAW;AAC7D,YAAQ,GAAG,QAAQ,MAAM;AACvB,mBAAa,MAAM,EAAE,MAAM,CAAC,UAAU;AACpC,gBAAQ,IAAI,qBAAqB,KAAK,EAAE;AACxC,gBAAQ,KAAK,CAAC;AAAA,MAChB,CAAC;AAAA,IACH,CAAC;AAAA,EACH,CAAC;AAED,EAAAA,QAAO,KAAK,+BAA+B;AAC3C,MAAI;AACF,UAAM,OAAO,IAAI;AAAA,EACnB,SAAS,OAAO;AACd,YAAQ,IAAI,qBAAqB,KAAK,EAAE;AACxC,YAAQ,KAAK,CAAC;AAAA,EAChB;AAEA,SAAO;AACT;;;AjBlPA,OAAOG,cAAa;AAEpB,SAAS,eAAe;AA1CxB,IACEA,SAAQ,KAAK,CAAC,KAAK,sBACnBA,SAAQ,KAAK,CAAC,KAAK,iCACnBA,SAAQ,KAAK,CAAC,KAAK;AAEnBA,SAAQ,KAAK,CAAC,KAAK,yBACnBA,SAAQ,KAAK,CAAC,KAAK,WACnB;AACA,WAAS;AAAA,IACP,SAAS,CAAC,yBAAyB;AAAA,IACnC,KAAK;AAAA,IACL,gCAAgC;AAAA,IAChC,UAAU;AAAA,IACV,iBAAiB;AAAA,MACf,SAAS;AAAA,QACP;AAAA,UACE,WAAW;AAAA,UACX,kBAAkB;AAAA,QACpB;AAAA,QACA;AAAA,UACE,WAAW;AAAA,QACb;AAAA,MACF;AAAA,MACA,wBAAwB;AAAA,IAC1B;AAAA,EACF,CAAC;AACH,OAAO;AACL,WAAS;AAAA,IACP,KAAK;AAAA,IACL,gCAAgC;AAAA,EAClC,CAAC;AACH;AAgBA,IAAM,UAAU,IAAI,QAAQ;AAE5B,QACG,KAAK,cAAc,EACnB,YAAY,qCAAqC,EACjD,QAAQ,OAAO;AAElB,QACG,QAAQ,iBAAiB,EACzB,YAAY,iBAAiB,EAC7B,OAAO,MAAM;AACZ,oBAAkB;AACpB,CAAC;AAEH,QACG,QAAQ,mBAAmB,EAC3B,YAAY,uBAAuB,EACnC,SAAS,kBAAkB,2BAA2B,EACtD,OAAO,CAAC,gBAAgB;AACvB,sBAAoB,WAAW;AACjC,CAAC;AAEH,QACG,QAAQ,QAAQ,EAChB,YAAY,YAAY,EACxB,SAAS,gBAAgB,6BAA6B,EACtD,SAAS,mBAAmB,0BAA0B,EACtD,SAAS,qBAAqB,iBAAiB,EAC/C,SAAS,qBAAqB,iBAAiB,EAC/C,SAAS,yBAAyB,qBAAqB,EACvD,SAAS,yBAAyB,qBAAqB,EACvD,OAAO,wBAAwB,qCAAqC,KAAK,EACzE;AAAA,EACC,CACE,WACA,cACA,gBACA,gBACA,oBACA,oBACA,YACG;AACH,cAAU;AAAA,MACR;AAAA,MACA,kBAAkB;AAAA,QAChB,UAAU;AAAA,QACV,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,UAAU;AAAA,QACV,QAAQ,QAAQ;AAAA,MAClB;AAAA,IACF,CAAC;AAAA,EACH;AACF;AAEF,QACG,QAAQ,kBAAkB,EAC1B,YAAY,sBAAsB,EAClC,SAAS,qBAAqB,uCAAuC,EACrE,SAAS,mBAAmB,0BAA0B,EACtD,SAAS,qBAAqB,iBAAiB,EAC/C,SAAS,qBAAqB,iBAAiB,EAC/C,SAAS,yBAAyB,qBAAqB,EACvD,SAAS,yBAAyB,qBAAqB,EACvD,OAAO,wBAAwB,qCAAqC,KAAK,EACzE,OAAO,yBAAyB,iCAAiC,EACjE,OAAO,yBAAyB,qBAAqB,EACrD,OAAO,6BAA6B,uBAAuB,EAC3D;AAAA,EACC;AAAA,EACA;AAAA,EACA;AACF,EACC,OAAO,wBAAwB,qBAAqB,EACpD,OAAO,oCAAoC,oBAAoB,EAC/D,OAAO,wBAAwB,4BAA4B,EAC3D,OAAO,uBAAuB,oBAAoB,EAClD,OAAO,mBAAmB,4BAA4B,EACtD,OAAO,aAAa,sCAAsC,KAAK,EAC/D,OAAO,uBAAuB,mCAAmC,QAAQ,EACzE;AAAA,EACC;AAAA,EACA;AAAA,EACA;AACF,EACC;AAAA,EACC,CACE,SACA,cACA,gBACA,gBACA,oBACA,oBACA,YACG;AACH,YAAQ;AAAA,MACN;AAAA,MACA,kBAAkB;AAAA,QAChB,UAAU;AAAA,QACV,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,UAAU;AAAA,QACV,QAAQ,QAAQ;AAAA,MAClB;AAAA,MACA,WAAW;AAAA,QACT,QAAQ,QAAQ;AAAA,QAChB,QAAQ,QAAQ;AAAA,QAChB,UAAU,QAAQ;AAAA,MACpB;AAAA,MACA,gBAAgB;AAAA,QACd,KAAK,QAAQ;AAAA,QACb,WAAW,QAAQ;AAAA,QACnB,YAAY,QAAQ;AAAA,QACpB,WAAW,QAAQ;AAAA,QACnB,QAAQ,QAAQ;AAAA,MAClB;AAAA,MACA,aAAa,QAAQ;AAAA,MACrB,QAAQ,QAAQ;AAAA,MAChB,WAAW,QAAQ;AAAA,MACnB,aAAa,QAAQ;AAAA,IACvB,CAAC;AAAA,EACH;AACF;AAEF,QACG,QAAQ,qBAAqB,EAC7B,YAAY,yBAAyB,EACrC,SAAS,kBAAkB,oCAAoC,EAC/D,SAAS,wBAAwB,2BAA2B,EAC5D;AAAA,EACC;AAAA,EACA;AACF,EACC,SAAS,0BAA0B,+BAA+B,EAClE,OAAO,iCAAiC,oCAAoC,EAC5E,OAAO,8BAA8B,eAAe,EACpD,OAAO,8BAA8B,eAAe,EACpD,OAAO,gCAAgC,gBAAgB,EACvD,OAAO,kCAAkC,mBAAmB,EAC5D,OAAO,aAAa,mCAAmC,KAAK,EAC5D;AAAA,EACC,CAAC,aAAa,kBAAkB,QAAQ,oBAAoB,YAAY;AACtE,UAAM,SAAgC;AAAA,MACpC,aAAa,KAAK,MAAM,WAAW;AAAA,MACnC,aACE,QAAQ,cAAc,KAAK,MAAM,QAAQ,WAAW,IAAI;AAAA,MAC1D;AAAA,MACA;AAAA,MACA,oBAAoB,SAAS,kBAAkB;AAAA,MAC/C,QAAQ,QAAQ;AAAA,MAChB,cAAc,QAAQ;AAAA,MACtB,cAAc,QAAQ;AAAA,MACtB,eAAe,QAAQ;AAAA,MACvB,kBAAkB,QAAQ;AAAA,IAC5B;AACA,0BAAsB,MAAM;AAAA,EAC9B;AACF;AAEF,QACG,QAAQ,6BAA6B,EACrC,YAAY,iCAAiC,EAC7C,SAAS,kBAAkB,2BAA2B,EACtD,OAAO,CAAC,gBAAgB;AACvB,uBAAqB,WAAW;AAClC,CAAC;AAEH,QACG,QAAQ,SAAS,EACjB,YAAY,aAAa,EACzB,OAAO,wBAAwB,qBAAqB,EACpD,OAAO,oCAAoC,oBAAoB,EAC/D,OAAO,wBAAwB,4BAA4B,EAC3D,OAAO,uBAAuB,oBAAoB,EAClD,OAAO,mBAAmB,4BAA4B,EACtD,OAAO,CAAC,YAAY;AACnB,aAAW;AAAA,IACT,gBAAgB;AAAA,MACd,KAAK,QAAQ;AAAA,MACb,WAAW,QAAQ;AAAA,MACnB,YAAY,QAAQ;AAAA,MACpB,WAAW,QAAQ;AAAA,MACnB,QAAQ,QAAQ;AAAA,IAClB;AAAA,EACF,CAAC;AACH,CAAC;AAEH,QAAQ,MAAM;","names":["logger","process","sql","createHash","createHash","sql","getWorkflows","createHash","http","resolve","path","getApis","getWebApps","http","resolve","createClient","isNestedType","process","process","getApis","getWorkflows","getWebApps","fs","fs","toClientConfig","path","resolve","Readable","KafkaJS","Buffer","process","http","Kafka","KafkaJS","logger","Buffer","Readable","cluster","resolve","process","getSourceDir","process","path","fs","getWorkflows","logger","resolve","getWorkflows","process"]}
1
+ {"version":3,"sources":["../src/commons.ts","../src/moose-runner.ts","../src/dmv2/internal.ts","../src/sqlHelpers.ts","../src/dmv2/sdk/olapTable.ts","../src/dmv2/sdk/stream.ts","../src/index.ts","../src/consumption-apis/helpers.ts","../src/consumption-apis/runner.ts","../src/cluster-utils.ts","../src/clients/redisClient.ts","../src/consumption-apis/standalone.ts","../src/utilities/dataParser.ts","../src/utilities/json.ts","../src/blocks/runner.ts","../src/streaming-functions/runner.ts","../src/moduleExportSerializer.ts","../src/consumption-apis/exportTypeSerializer.ts","../src/scripts/runner.ts","../src/scripts/activity.ts","../src/scripts/logger.ts"],"sourcesContent":["import http from \"http\";\nimport { createClient } from \"@clickhouse/client\";\nimport { KafkaJS } from \"@514labs/kafka-javascript\";\nimport { SASLOptions } from \"@514labs/kafka-javascript/types/kafkajs\";\nconst { Kafka } = KafkaJS;\ntype Kafka = KafkaJS.Kafka;\ntype Consumer = KafkaJS.Consumer;\nexport type Producer = KafkaJS.Producer;\n\n/**\n * Utility function for compiler-related logging that can be disabled via environment variable.\n * Set MOOSE_DISABLE_COMPILER_LOGS=true to suppress these logs (useful for testing environments).\n */\n\n/**\n * Returns true if the value is a common truthy string: \"1\", \"true\", \"yes\", \"on\" (case-insensitive).\n */\nfunction isTruthy(value: string | undefined): boolean {\n if (!value) return false;\n switch (value.trim().toLowerCase()) {\n case \"1\":\n case \"true\":\n case \"yes\":\n case \"on\":\n return true;\n default:\n return false;\n }\n}\n\nexport const compilerLog = (message: string) => {\n if (!isTruthy(process.env.MOOSE_DISABLE_COMPILER_LOGS)) {\n console.log(message);\n }\n};\n\nexport const antiCachePath = (path: string) =>\n `${path}?num=${Math.random().toString()}&time=${Date.now()}`;\n\nexport const getFileName = (filePath: string) => {\n const regex = /\\/([^\\/]+)\\.ts/;\n const matches = filePath.match(regex);\n if (matches && matches.length > 1) {\n return matches[1];\n }\n return \"\";\n};\n\ninterface ClientConfig {\n username: string;\n password: string;\n database: string;\n useSSL: string;\n host: string;\n port: string;\n}\n\nexport const getClickhouseClient = ({\n username,\n password,\n database,\n useSSL,\n host,\n port,\n}: ClientConfig) => {\n const protocol =\n useSSL === \"1\" || useSSL.toLowerCase() === \"true\" ? \"https\" : \"http\";\n console.log(`Connecting to Clickhouse at ${protocol}://${host}:${port}`);\n return createClient({\n url: `${protocol}://${host}:${port}`,\n username: username,\n password: password,\n database: database,\n application: \"moose\",\n // Note: wait_end_of_query is configured per operation type, not globally\n // to preserve SELECT query performance while ensuring INSERT/DDL reliability\n });\n};\n\nexport type CliLogData = {\n message_type?: \"Info\" | \"Success\" | \"Error\" | \"Highlight\";\n action: string;\n message: string;\n};\n\nexport const cliLog: (log: CliLogData) => void = (log) => {\n const req = http.request({\n port: parseInt(process.env.MOOSE_MANAGEMENT_PORT ?? \"5001\"),\n method: \"POST\",\n path: \"/logs\",\n });\n\n req.on(\"error\", (err: Error) => {\n console.log(`Error ${err.name} sending CLI log.`, err.message);\n });\n\n req.write(JSON.stringify({ message_type: \"Info\", ...log }));\n req.end();\n};\n\n/**\n * Method to change .ts, .cts, and .mts to .js, .cjs, and .mjs\n * This is needed because 'import' does not support .ts, .cts, and .mts\n */\nexport function mapTstoJs(filePath: string): string {\n return filePath\n .replace(/\\.ts$/, \".js\")\n .replace(/\\.cts$/, \".cjs\")\n .replace(/\\.mts$/, \".mjs\");\n}\n\nexport const MAX_RETRIES = 150;\nexport const MAX_RETRY_TIME_MS = 1000;\nexport const RETRY_INITIAL_TIME_MS = 100;\n\nexport const MAX_RETRIES_PRODUCER = 150;\nexport const RETRY_FACTOR_PRODUCER = 0.2;\n// Means all replicas need to acknowledge the message\nexport const ACKs = -1;\n\n/**\n * Creates the base producer configuration for Kafka.\n * Used by both the SDK stream publishing and streaming function workers.\n *\n * @param maxMessageBytes - Optional max message size in bytes (synced with topic config)\n * @returns Producer configuration object for the Confluent Kafka client\n */\nexport function createProducerConfig(maxMessageBytes?: number) {\n return {\n kafkaJS: {\n idempotent: false, // Not needed for at-least-once delivery\n acks: ACKs,\n retry: {\n retries: MAX_RETRIES_PRODUCER,\n maxRetryTime: MAX_RETRY_TIME_MS,\n },\n },\n \"linger.ms\": 0, // This is to make sure at least once delivery with immediate feedback on the send\n ...(maxMessageBytes && { \"message.max.bytes\": maxMessageBytes }),\n };\n}\n\n/**\n * Parses a comma-separated broker string into an array of valid broker addresses.\n * Handles whitespace trimming and filters out empty elements.\n *\n * @param brokerString - Comma-separated broker addresses (e.g., \"broker1:9092, broker2:9092, , broker3:9092\")\n * @returns Array of trimmed, non-empty broker addresses\n */\nconst parseBrokerString = (brokerString: string): string[] =>\n brokerString\n .split(\",\")\n .map((b) => b.trim())\n .filter((b) => b.length > 0);\n\nexport type KafkaClientConfig = {\n clientId: string;\n broker: string;\n securityProtocol?: string; // e.g. \"SASL_SSL\" or \"PLAINTEXT\"\n saslUsername?: string;\n saslPassword?: string;\n saslMechanism?: string; // e.g. \"scram-sha-256\", \"plain\"\n};\n\n/**\n * Dynamically creates and connects a KafkaJS producer using the provided configuration.\n * Returns a connected producer instance.\n *\n * @param cfg - Kafka client configuration\n * @param logger - Logger instance\n * @param maxMessageBytes - Optional max message size in bytes (synced with topic config)\n */\nexport async function getKafkaProducer(\n cfg: KafkaClientConfig,\n logger: Logger,\n maxMessageBytes?: number,\n): Promise<Producer> {\n const kafka = await getKafkaClient(cfg, logger);\n\n const producer = kafka.producer(createProducerConfig(maxMessageBytes));\n await producer.connect();\n return producer;\n}\n\n/**\n * Interface for logging functionality\n */\nexport interface Logger {\n logPrefix: string;\n log: (message: string) => void;\n error: (message: string) => void;\n warn: (message: string) => void;\n}\n\nexport const logError = (logger: Logger, e: Error): void => {\n logger.error(e.message);\n const stack = e.stack;\n if (stack) {\n logger.error(stack);\n }\n};\n\n/**\n * Builds SASL configuration for Kafka client authentication\n */\nconst buildSaslConfig = (\n logger: Logger,\n args: KafkaClientConfig,\n): SASLOptions | undefined => {\n const mechanism = args.saslMechanism ? args.saslMechanism.toLowerCase() : \"\";\n switch (mechanism) {\n case \"plain\":\n case \"scram-sha-256\":\n case \"scram-sha-512\":\n return {\n mechanism: mechanism,\n username: args.saslUsername || \"\",\n password: args.saslPassword || \"\",\n };\n default:\n logger.warn(`Unsupported SASL mechanism: ${args.saslMechanism}`);\n return undefined;\n }\n};\n\n/**\n * Dynamically creates a KafkaJS client configured with provided settings.\n * Use this to construct producers/consumers with custom options.\n */\nexport const getKafkaClient = async (\n cfg: KafkaClientConfig,\n logger: Logger,\n): Promise<Kafka> => {\n const brokers = parseBrokerString(cfg.broker || \"\");\n if (brokers.length === 0) {\n throw new Error(`No valid broker addresses found in: \"${cfg.broker}\"`);\n }\n\n logger.log(`Creating Kafka client with brokers: ${brokers.join(\", \")}`);\n logger.log(`Security protocol: ${cfg.securityProtocol || \"plaintext\"}`);\n logger.log(`Client ID: ${cfg.clientId}`);\n\n const saslConfig = buildSaslConfig(logger, cfg);\n\n return new Kafka({\n kafkaJS: {\n clientId: cfg.clientId,\n brokers,\n ssl: cfg.securityProtocol === \"SASL_SSL\",\n ...(saslConfig && { sasl: saslConfig }),\n retry: {\n initialRetryTime: RETRY_INITIAL_TIME_MS,\n maxRetryTime: MAX_RETRY_TIME_MS,\n retries: MAX_RETRIES,\n },\n },\n });\n};\n","#!/usr/bin/env node\n\n// This file is use to run the proper runners for moose based on the\n// the arguments passed to the file.\n// It registers ts-node to be able to interpret user code.\n\nimport { register } from \"ts-node\";\n\n// We register ts-node to be able to interpret TS user code.\nif (\n process.argv[2] == \"consumption-apis\" ||\n process.argv[2] == \"consumption-type-serializer\" ||\n process.argv[2] == \"dmv2-serializer\" ||\n // Streaming functions for dmv2 need to load moose internals\n process.argv[2] == \"streaming-functions\" ||\n process.argv[2] == \"scripts\"\n) {\n register({\n require: [\"tsconfig-paths/register\"],\n esm: true,\n experimentalTsImportSpecifiers: true,\n compiler: \"ts-patch/compiler\",\n compilerOptions: {\n plugins: [\n {\n transform: `./node_modules/@514labs/moose-lib/dist/compilerPlugin.js`,\n transformProgram: true,\n },\n {\n transform: \"typia/lib/transform\",\n },\n ],\n experimentalDecorators: true,\n },\n });\n} else {\n register({\n esm: true,\n experimentalTsImportSpecifiers: true,\n });\n}\n\nimport { dumpMooseInternal } from \"./dmv2/internal\";\nimport { runBlocks } from \"./blocks/runner\";\nimport { runApis } from \"./consumption-apis/runner\";\nimport { runStreamingFunctions } from \"./streaming-functions/runner\";\nimport { runExportSerializer } from \"./moduleExportSerializer\";\nimport { runApiTypeSerializer } from \"./consumption-apis/exportTypeSerializer\";\nimport { runScripts } from \"./scripts/runner\";\nimport process from \"process\";\n\nimport { Command } from \"commander\";\n\n// Import the StreamingFunctionArgs type\nimport type { StreamingFunctionArgs } from \"./streaming-functions/runner\";\n\nconst program = new Command();\n\nprogram\n .name(\"moose-runner\")\n .description(\"Moose runner for various operations\")\n .version(\"1.0.0\");\n\nprogram\n .command(\"dmv2-serializer\")\n .description(\"Load DMv2 index\")\n .action(() => {\n dumpMooseInternal();\n });\n\nprogram\n .command(\"export-serializer\")\n .description(\"Run export serializer\")\n .argument(\"<target-model>\", \"Target model to serialize\")\n .action((targetModel) => {\n runExportSerializer(targetModel);\n });\n\nprogram\n .command(\"blocks\")\n .description(\"Run blocks\")\n .argument(\"<blocks-dir>\", \"Directory containing blocks\")\n .argument(\"<clickhouse-db>\", \"Clickhouse database name\")\n .argument(\"<clickhouse-host>\", \"Clickhouse host\")\n .argument(\"<clickhouse-port>\", \"Clickhouse port\")\n .argument(\"<clickhouse-username>\", \"Clickhouse username\")\n .argument(\"<clickhouse-password>\", \"Clickhouse password\")\n .option(\"--clickhouse-use-ssl\", \"Use SSL for Clickhouse connection\", false)\n .action(\n (\n blocksDir,\n clickhouseDb,\n clickhouseHost,\n clickhousePort,\n clickhouseUsername,\n clickhousePassword,\n options,\n ) => {\n runBlocks({\n blocksDir,\n clickhouseConfig: {\n database: clickhouseDb,\n host: clickhouseHost,\n port: clickhousePort,\n username: clickhouseUsername,\n password: clickhousePassword,\n useSSL: options.clickhouseUseSsl,\n },\n });\n },\n );\n\nprogram\n .command(\"consumption-apis\")\n .description(\"Run consumption APIs\")\n .argument(\"<consumption-dir>\", \"Directory containing consumption APIs\")\n .argument(\"<clickhouse-db>\", \"Clickhouse database name\")\n .argument(\"<clickhouse-host>\", \"Clickhouse host\")\n .argument(\"<clickhouse-port>\", \"Clickhouse port\")\n .argument(\"<clickhouse-username>\", \"Clickhouse username\")\n .argument(\"<clickhouse-password>\", \"Clickhouse password\")\n .option(\"--clickhouse-use-ssl\", \"Use SSL for Clickhouse connection\", false)\n .option(\"--jwt-secret <secret>\", \"JWT public key for verification\")\n .option(\"--jwt-issuer <issuer>\", \"Expected JWT issuer\")\n .option(\"--jwt-audience <audience>\", \"Expected JWT audience\")\n .option(\n \"--enforce-auth\",\n \"Enforce authentication on all consumption APIs\",\n false,\n )\n .option(\"--temporal-url <url>\", \"Temporal server URL\")\n .option(\"--temporal-namespace <namespace>\", \"Temporal namespace\")\n .option(\"--client-cert <path>\", \"Path to client certificate\")\n .option(\"--client-key <path>\", \"Path to client key\")\n .option(\"--api-key <key>\", \"API key for authentication\")\n .option(\"--is-dmv2\", \"Whether this is a DMv2 consumption\", false)\n .option(\"--proxy-port <port>\", \"Port to run the proxy server on\", parseInt)\n .option(\n \"--worker-count <count>\",\n \"Number of worker processes for the consumption API cluster\",\n parseInt,\n )\n .action(\n (\n apisDir,\n clickhouseDb,\n clickhouseHost,\n clickhousePort,\n clickhouseUsername,\n clickhousePassword,\n options,\n ) => {\n runApis({\n apisDir,\n clickhouseConfig: {\n database: clickhouseDb,\n host: clickhouseHost,\n port: clickhousePort,\n username: clickhouseUsername,\n password: clickhousePassword,\n useSSL: options.clickhouseUseSsl,\n },\n jwtConfig: {\n secret: options.jwtSecret,\n issuer: options.jwtIssuer,\n audience: options.jwtAudience,\n },\n temporalConfig: {\n url: options.temporalUrl,\n namespace: options.temporalNamespace,\n clientCert: options.clientCert,\n clientKey: options.clientKey,\n apiKey: options.apiKey,\n },\n enforceAuth: options.enforceAuth,\n isDmv2: options.isDmv2,\n proxyPort: options.proxyPort,\n workerCount: options.workerCount,\n });\n },\n );\n\nprogram\n .command(\"streaming-functions\")\n .description(\"Run streaming functions\")\n .argument(\"<source-topic>\", \"Source topic configuration as JSON\")\n .argument(\"<function-file-path>\", \"Path to the function file\")\n .argument(\n \"<broker>\",\n \"Kafka broker address(es) - comma-separated for multiple brokers (e.g., 'broker1:9092, broker2:9092'). Whitespace around commas is automatically trimmed.\",\n )\n .argument(\"<max-subscriber-count>\", \"Maximum number of subscribers\")\n .option(\"--target-topic <target-topic>\", \"Target topic configuration as JSON\")\n .option(\"--sasl-username <username>\", \"SASL username\")\n .option(\"--sasl-password <password>\", \"SASL password\")\n .option(\"--sasl-mechanism <mechanism>\", \"SASL mechanism\")\n .option(\"--security-protocol <protocol>\", \"Security protocol\")\n .option(\"--is-dmv2\", \"Whether this is a DMv2 function\", false)\n .action(\n (sourceTopic, functionFilePath, broker, maxSubscriberCount, options) => {\n const config: StreamingFunctionArgs = {\n sourceTopic: JSON.parse(sourceTopic),\n targetTopic:\n options.targetTopic ? JSON.parse(options.targetTopic) : undefined,\n functionFilePath,\n broker,\n maxSubscriberCount: parseInt(maxSubscriberCount),\n isDmv2: options.isDmv2,\n saslUsername: options.saslUsername,\n saslPassword: options.saslPassword,\n saslMechanism: options.saslMechanism,\n securityProtocol: options.securityProtocol,\n };\n runStreamingFunctions(config);\n },\n );\n\nprogram\n .command(\"consumption-type-serializer\")\n .description(\"Run consumption type serializer\")\n .argument(\"<target-model>\", \"Target model to serialize\")\n .action((targetModel) => {\n runApiTypeSerializer(targetModel);\n });\n\nprogram\n .command(\"scripts\")\n .description(\"Run scripts\")\n .option(\"--temporal-url <url>\", \"Temporal server URL\")\n .option(\"--temporal-namespace <namespace>\", \"Temporal namespace\")\n .option(\"--client-cert <path>\", \"Path to client certificate\")\n .option(\"--client-key <path>\", \"Path to client key\")\n .option(\"--api-key <key>\", \"API key for authentication\")\n .action((options) => {\n runScripts({\n temporalConfig: {\n url: options.temporalUrl,\n namespace: options.temporalNamespace,\n clientCert: options.clientCert,\n clientKey: options.clientKey,\n apiKey: options.apiKey,\n },\n });\n });\n\nprogram.parse();\n","/**\n * @module internal\n * Internal implementation details for the Moose v2 data model (dmv2).\n *\n * This module manages the registration of user-defined dmv2 resources (Tables, Streams, APIs, etc.)\n * and provides functions to serialize these resources into a JSON format (`InfrastructureMap`)\n * expected by the Moose infrastructure management system. It also includes helper functions\n * to retrieve registered handler functions (for streams and APIs) and the base class\n * (`TypedBase`) used by dmv2 resource classes.\n *\n * @internal This module is intended for internal use by the Moose library and compiler plugin.\n * Its API might change without notice.\n */\nimport process from \"process\";\nimport { Api, IngestApi, SqlResource, Task, Workflow } from \"./index\";\nimport { IJsonSchemaCollection } from \"typia/src/schemas/json/IJsonSchemaCollection\";\nimport { Column } from \"../dataModels/dataModelTypes\";\nimport { ClickHouseEngines, ApiUtil } from \"../index\";\nimport {\n OlapTable,\n OlapConfig,\n ReplacingMergeTreeConfig,\n SummingMergeTreeConfig,\n ReplicatedMergeTreeConfig,\n ReplicatedReplacingMergeTreeConfig,\n ReplicatedAggregatingMergeTreeConfig,\n ReplicatedSummingMergeTreeConfig,\n ReplicatedCollapsingMergeTreeConfig,\n ReplicatedVersionedCollapsingMergeTreeConfig,\n S3QueueConfig,\n} from \"./sdk/olapTable\";\nimport {\n ConsumerConfig,\n KafkaSchemaConfig,\n Stream,\n TransformConfig,\n} from \"./sdk/stream\";\nimport { compilerLog } from \"../commons\";\nimport { WebApp } from \"./sdk/webApp\";\n\n/**\n * Gets the source directory from environment variable or defaults to \"app\"\n */\nfunction getSourceDir(): string {\n return process.env.MOOSE_SOURCE_DIR || \"app\";\n}\n\n/**\n * Client-only mode check. When true, resource registration is permissive\n * (duplicates overwrite silently instead of throwing).\n * Set via MOOSE_CLIENT_ONLY=true environment variable.\n *\n * This enables Next.js apps to import OlapTable definitions for type-safe\n * queries without the Moose runtime, avoiding \"already exists\" errors on HMR.\n *\n * @returns true if MOOSE_CLIENT_ONLY environment variable is set to \"true\"\n */\nexport const isClientOnlyMode = (): boolean =>\n process.env.MOOSE_CLIENT_ONLY === \"true\";\n\n/**\n * Internal registry holding all defined Moose dmv2 resources.\n * Populated by the constructors of OlapTable, Stream, IngestApi, etc.\n * Accessed via `getMooseInternal()`.\n */\nconst moose_internal = {\n tables: new Map<string, OlapTable<any>>(),\n streams: new Map<string, Stream<any>>(),\n ingestApis: new Map<string, IngestApi<any>>(),\n apis: new Map<string, Api<any>>(),\n sqlResources: new Map<string, SqlResource>(),\n workflows: new Map<string, Workflow>(),\n webApps: new Map<string, WebApp>(),\n};\n/**\n * Default retention period for streams if not specified (7 days in seconds).\n */\nconst defaultRetentionPeriod = 60 * 60 * 24 * 7;\n\n/**\n * Engine-specific configuration types using discriminated union pattern\n */\ninterface MergeTreeEngineConfig {\n engine: \"MergeTree\";\n}\n\ninterface ReplacingMergeTreeEngineConfig {\n engine: \"ReplacingMergeTree\";\n ver?: string;\n isDeleted?: string;\n}\n\ninterface AggregatingMergeTreeEngineConfig {\n engine: \"AggregatingMergeTree\";\n}\n\ninterface SummingMergeTreeEngineConfig {\n engine: \"SummingMergeTree\";\n columns?: string[];\n}\n\ninterface CollapsingMergeTreeEngineConfig {\n engine: \"CollapsingMergeTree\";\n sign: string;\n}\n\ninterface VersionedCollapsingMergeTreeEngineConfig {\n engine: \"VersionedCollapsingMergeTree\";\n sign: string;\n ver: string;\n}\n\ninterface ReplicatedMergeTreeEngineConfig {\n engine: \"ReplicatedMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n}\n\ninterface ReplicatedReplacingMergeTreeEngineConfig {\n engine: \"ReplicatedReplacingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n ver?: string;\n isDeleted?: string;\n}\n\ninterface ReplicatedAggregatingMergeTreeEngineConfig {\n engine: \"ReplicatedAggregatingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n}\n\ninterface ReplicatedSummingMergeTreeEngineConfig {\n engine: \"ReplicatedSummingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n columns?: string[];\n}\n\ninterface ReplicatedCollapsingMergeTreeEngineConfig {\n engine: \"ReplicatedCollapsingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n sign: string;\n}\n\ninterface ReplicatedVersionedCollapsingMergeTreeEngineConfig {\n engine: \"ReplicatedVersionedCollapsingMergeTree\";\n keeperPath?: string;\n replicaName?: string;\n sign: string;\n ver: string;\n}\n\ninterface S3QueueEngineConfig {\n engine: \"S3Queue\";\n s3Path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n headers?: { [key: string]: string };\n}\n\ninterface S3EngineConfig {\n engine: \"S3\";\n path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n partitionStrategy?: string;\n partitionColumnsInDataFile?: string;\n}\n\ninterface BufferEngineConfig {\n engine: \"Buffer\";\n targetDatabase: string;\n targetTable: string;\n numLayers: number;\n minTime: number;\n maxTime: number;\n minRows: number;\n maxRows: number;\n minBytes: number;\n maxBytes: number;\n flushTime?: number;\n flushRows?: number;\n flushBytes?: number;\n}\n\ninterface DistributedEngineConfig {\n engine: \"Distributed\";\n cluster: string;\n targetDatabase: string;\n targetTable: string;\n shardingKey?: string;\n policyName?: string;\n}\n\ninterface IcebergS3EngineConfig {\n engine: \"IcebergS3\";\n path: string;\n format: string;\n awsAccessKeyId?: string;\n awsSecretAccessKey?: string;\n compression?: string;\n}\n\ninterface KafkaEngineConfig {\n engine: \"Kafka\";\n brokerList: string;\n topicList: string;\n groupName: string;\n format: string;\n}\n\n/**\n * Union type for all supported engine configurations\n */\ntype EngineConfig =\n | MergeTreeEngineConfig\n | ReplacingMergeTreeEngineConfig\n | AggregatingMergeTreeEngineConfig\n | SummingMergeTreeEngineConfig\n | CollapsingMergeTreeEngineConfig\n | VersionedCollapsingMergeTreeEngineConfig\n | ReplicatedMergeTreeEngineConfig\n | ReplicatedReplacingMergeTreeEngineConfig\n | ReplicatedAggregatingMergeTreeEngineConfig\n | ReplicatedSummingMergeTreeEngineConfig\n | ReplicatedCollapsingMergeTreeEngineConfig\n | ReplicatedVersionedCollapsingMergeTreeEngineConfig\n | S3QueueEngineConfig\n | S3EngineConfig\n | BufferEngineConfig\n | DistributedEngineConfig\n | IcebergS3EngineConfig\n | KafkaEngineConfig;\n\n/**\n * JSON representation of an OLAP table configuration.\n */\ninterface TableJson {\n /** The name of the table. */\n name: string;\n /** Array defining the table's columns and their types. */\n columns: Column[];\n /** ORDER BY clause: either array of column names or a single ClickHouse expression. */\n orderBy: string[] | string;\n /** The column name used for the PARTITION BY clause. */\n partitionBy?: string;\n /** SAMPLE BY expression for approximate query processing. */\n sampleByExpression?: string;\n /** PRIMARY KEY expression (overrides column-level primary_key flags when specified). */\n primaryKeyExpression?: string;\n /** Engine configuration with type-safe, engine-specific parameters */\n engineConfig?: EngineConfig;\n /** Optional version string for the table configuration. */\n version?: string;\n /** Optional metadata for the table (e.g., description). */\n metadata?: { description?: string };\n /** Lifecycle management setting for the table. */\n lifeCycle?: string;\n /** Optional table-level settings that can be modified with ALTER TABLE MODIFY SETTING. */\n tableSettings?: { [key: string]: string };\n /** Optional table indexes */\n indexes?: {\n name: string;\n expression: string;\n type: string;\n arguments: string[];\n granularity: number;\n }[];\n /** Optional table-level TTL expression (without leading 'TTL'). */\n ttl?: string;\n /** Optional database name for multi-database support. */\n database?: string;\n /** Optional cluster name for ON CLUSTER support. */\n cluster?: string;\n}\n/**\n * Represents a target destination for data flow, typically a stream.\n */\ninterface Target {\n /** The name of the target resource (e.g., stream name). */\n name: string;\n /** The kind of the target resource. */\n kind: \"stream\"; // may add `| \"table\"` in the future\n /** Optional version string of the target resource's configuration. */\n version?: string;\n /** Optional metadata for the target (e.g., description for function processes). */\n metadata?: { description?: string };\n /** Optional source file path where this transform was declared. */\n sourceFile?: string;\n}\n\n/**\n * Represents a consumer attached to a stream.\n */\ninterface Consumer {\n /** Optional version string for the consumer configuration. */\n version?: string;\n /** Optional source file path where this consumer was declared. */\n sourceFile?: string;\n}\n\n/**\n * JSON representation of a Stream/Topic configuration.\n */\ninterface StreamJson {\n /** The name of the stream/topic. */\n name: string;\n /** Array defining the message schema (columns/fields). */\n columns: Column[];\n /** Data retention period in seconds. */\n retentionPeriod: number;\n /** Number of partitions for the stream/topic. */\n partitionCount: number;\n /** Optional name of the OLAP table this stream automatically syncs to. */\n targetTable?: string;\n /** Optional version of the target OLAP table configuration. */\n targetTableVersion?: string;\n /** Optional version string for the stream configuration. */\n version?: string;\n /** List of target streams this stream transforms data into. */\n transformationTargets: Target[];\n /** Flag indicating if a multi-transform function (`_multipleTransformations`) is defined. */\n hasMultiTransform: boolean;\n /** List of consumers attached to this stream. */\n consumers: Consumer[];\n /** Optional description for the stream. */\n metadata?: { description?: string };\n /** Lifecycle management setting for the stream. */\n lifeCycle?: string;\n /** Optional Schema Registry config */\n schemaConfig?: KafkaSchemaConfig;\n}\n/**\n * JSON representation of an Ingest API configuration.\n */\ninterface IngestApiJson {\n /** The name of the Ingest API endpoint. */\n name: string;\n /** Array defining the expected input schema (columns/fields). */\n columns: Column[];\n\n /** The target stream where ingested data is written. */\n writeTo: Target;\n /** The DLQ if the data does not fit the schema. */\n deadLetterQueue?: string;\n /** Optional version string for the API configuration. */\n version?: string;\n /** Optional custom path for the ingestion endpoint. */\n path?: string;\n /** Optional description for the API. */\n metadata?: { description?: string };\n /** JSON schema */\n schema: IJsonSchemaCollection.IV3_1;\n /**\n * Whether this API allows extra fields beyond the defined columns.\n * When true, extra fields in payloads are passed through to streaming functions.\n */\n allowExtraFields?: boolean;\n}\n\n/**\n * JSON representation of an API configuration.\n */\ninterface ApiJson {\n /** The name of the API endpoint. */\n name: string;\n /** Array defining the expected query parameters schema. */\n queryParams: Column[];\n /** JSON schema definition of the API's response body. */\n responseSchema: IJsonSchemaCollection.IV3_1;\n /** Optional version string for the API configuration. */\n version?: string;\n /** Optional custom path for the API endpoint. */\n path?: string;\n /** Optional description for the API. */\n metadata?: { description?: string };\n}\n\n/**\n * Represents the unique signature of an infrastructure component (Table, Topic, etc.).\n * Used for defining dependencies between SQL resources.\n */\ninterface InfrastructureSignatureJson {\n /** A unique identifier for the resource instance (often name + version). */\n id: string;\n /** The kind/type of the infrastructure component. */\n kind:\n | \"Table\"\n | \"Topic\"\n | \"ApiEndpoint\"\n | \"TopicToTableSyncProcess\"\n | \"View\"\n | \"SqlResource\";\n}\n\ninterface WorkflowJson {\n name: string;\n retries?: number;\n timeout?: string;\n schedule?: string;\n}\n\ninterface WebAppJson {\n name: string;\n mountPath: string;\n metadata?: { description?: string };\n}\n\ninterface SqlResourceJson {\n /** The name of the SQL resource. */\n name: string;\n /** Array of SQL DDL statements required to create the resource. */\n setup: readonly string[];\n /** Array of SQL DDL statements required to drop the resource. */\n teardown: readonly string[];\n\n /** List of infrastructure components (by signature) that this resource reads from. */\n pullsDataFrom: InfrastructureSignatureJson[];\n /** List of infrastructure components (by signature) that this resource writes to. */\n pushesDataTo: InfrastructureSignatureJson[];\n /** Optional source file path where this resource is defined. */\n sourceFile?: string;\n /** Optional source line number where this resource is defined. */\n sourceLine?: number;\n /** Optional source column number where this resource is defined. */\n sourceColumn?: number;\n}\n\n/**\n * Type guard: Check if config is S3QueueConfig\n */\nfunction isS3QueueConfig(\n config: OlapConfig<any>,\n): config is S3QueueConfig<any> {\n return \"engine\" in config && config.engine === ClickHouseEngines.S3Queue;\n}\n\n/**\n * Type guard: Check if config has a replicated engine\n * Checks if the engine value is one of the replicated engine types\n */\nfunction hasReplicatedEngine(\n config: OlapConfig<any>,\n): config is\n | ReplicatedMergeTreeConfig<any>\n | ReplicatedReplacingMergeTreeConfig<any>\n | ReplicatedAggregatingMergeTreeConfig<any>\n | ReplicatedSummingMergeTreeConfig<any>\n | ReplicatedCollapsingMergeTreeConfig<any>\n | ReplicatedVersionedCollapsingMergeTreeConfig<any> {\n if (!(\"engine\" in config)) {\n return false;\n }\n\n const engine = config.engine as ClickHouseEngines;\n // Check if engine is one of the replicated engine types\n return (\n engine === ClickHouseEngines.ReplicatedMergeTree ||\n engine === ClickHouseEngines.ReplicatedReplacingMergeTree ||\n engine === ClickHouseEngines.ReplicatedAggregatingMergeTree ||\n engine === ClickHouseEngines.ReplicatedSummingMergeTree ||\n engine === ClickHouseEngines.ReplicatedCollapsingMergeTree ||\n engine === ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree\n );\n}\n\n/**\n * Extract engine value from table config, handling both legacy and new formats\n */\nfunction extractEngineValue(config: OlapConfig<any>): ClickHouseEngines {\n // Legacy config without engine property defaults to MergeTree\n if (!(\"engine\" in config)) {\n return ClickHouseEngines.MergeTree;\n }\n\n // All engines (replicated and non-replicated) have engine as direct value\n return config.engine as ClickHouseEngines;\n}\n\n/**\n * Convert engine config for basic MergeTree engines\n */\nfunction convertBasicEngineConfig(\n engine: ClickHouseEngines,\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n switch (engine) {\n case ClickHouseEngines.MergeTree:\n return { engine: \"MergeTree\" };\n\n case ClickHouseEngines.AggregatingMergeTree:\n return { engine: \"AggregatingMergeTree\" };\n\n case ClickHouseEngines.ReplacingMergeTree: {\n const replacingConfig = config as ReplacingMergeTreeConfig<any>;\n return {\n engine: \"ReplacingMergeTree\",\n ver: replacingConfig.ver,\n isDeleted: replacingConfig.isDeleted,\n };\n }\n\n case ClickHouseEngines.SummingMergeTree: {\n const summingConfig = config as SummingMergeTreeConfig<any>;\n return {\n engine: \"SummingMergeTree\",\n columns: summingConfig.columns,\n };\n }\n\n case ClickHouseEngines.CollapsingMergeTree: {\n const collapsingConfig = config as any; // CollapsingMergeTreeConfig<any>\n return {\n engine: \"CollapsingMergeTree\",\n sign: collapsingConfig.sign,\n };\n }\n\n case ClickHouseEngines.VersionedCollapsingMergeTree: {\n const versionedConfig = config as any; // VersionedCollapsingMergeTreeConfig<any>\n return {\n engine: \"VersionedCollapsingMergeTree\",\n sign: versionedConfig.sign,\n ver: versionedConfig.ver,\n };\n }\n\n default:\n return undefined;\n }\n}\n\n/**\n * Convert engine config for replicated MergeTree engines\n */\nfunction convertReplicatedEngineConfig(\n engine: ClickHouseEngines,\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n // First check if this is a replicated engine config\n if (!hasReplicatedEngine(config)) {\n return undefined;\n }\n\n switch (engine) {\n case ClickHouseEngines.ReplicatedMergeTree: {\n const replicatedConfig = config as ReplicatedMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n };\n }\n\n case ClickHouseEngines.ReplicatedReplacingMergeTree: {\n const replicatedConfig =\n config as ReplicatedReplacingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedReplacingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n ver: replicatedConfig.ver,\n isDeleted: replicatedConfig.isDeleted,\n };\n }\n\n case ClickHouseEngines.ReplicatedAggregatingMergeTree: {\n const replicatedConfig =\n config as ReplicatedAggregatingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedAggregatingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n };\n }\n\n case ClickHouseEngines.ReplicatedSummingMergeTree: {\n const replicatedConfig = config as ReplicatedSummingMergeTreeConfig<any>;\n return {\n engine: \"ReplicatedSummingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n columns: replicatedConfig.columns,\n };\n }\n\n case ClickHouseEngines.ReplicatedCollapsingMergeTree: {\n const replicatedConfig = config as any; // ReplicatedCollapsingMergeTreeConfig<any>\n return {\n engine: \"ReplicatedCollapsingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n sign: replicatedConfig.sign,\n };\n }\n\n case ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree: {\n const replicatedConfig = config as any; // ReplicatedVersionedCollapsingMergeTreeConfig<any>\n return {\n engine: \"ReplicatedVersionedCollapsingMergeTree\",\n keeperPath: replicatedConfig.keeperPath,\n replicaName: replicatedConfig.replicaName,\n sign: replicatedConfig.sign,\n ver: replicatedConfig.ver,\n };\n }\n\n default:\n return undefined;\n }\n}\n\n/**\n * Convert S3Queue engine config\n * Uses type guard for fully type-safe property access\n */\nfunction convertS3QueueEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!isS3QueueConfig(config)) {\n return undefined;\n }\n\n return {\n engine: \"S3Queue\",\n s3Path: config.s3Path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n headers: config.headers,\n };\n}\n\n/**\n * Convert S3 engine config\n */\nfunction convertS3EngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.S3) {\n return undefined;\n }\n\n return {\n engine: \"S3\",\n path: config.path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n partitionStrategy: config.partitionStrategy,\n partitionColumnsInDataFile: config.partitionColumnsInDataFile,\n };\n}\n\n/**\n * Convert Buffer engine config\n */\nfunction convertBufferEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.Buffer) {\n return undefined;\n }\n\n return {\n engine: \"Buffer\",\n targetDatabase: config.targetDatabase,\n targetTable: config.targetTable,\n numLayers: config.numLayers,\n minTime: config.minTime,\n maxTime: config.maxTime,\n minRows: config.minRows,\n maxRows: config.maxRows,\n minBytes: config.minBytes,\n maxBytes: config.maxBytes,\n flushTime: config.flushTime,\n flushRows: config.flushRows,\n flushBytes: config.flushBytes,\n };\n}\n\n/**\n * Convert Distributed engine config\n */\nfunction convertDistributedEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (\n !(\"engine\" in config) ||\n config.engine !== ClickHouseEngines.Distributed\n ) {\n return undefined;\n }\n\n return {\n engine: \"Distributed\",\n cluster: config.cluster,\n targetDatabase: config.targetDatabase,\n targetTable: config.targetTable,\n shardingKey: config.shardingKey,\n policyName: config.policyName,\n };\n}\n\n/**\n * Convert IcebergS3 engine config\n */\nfunction convertIcebergS3EngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.IcebergS3) {\n return undefined;\n }\n\n return {\n engine: \"IcebergS3\",\n path: config.path,\n format: config.format,\n awsAccessKeyId: config.awsAccessKeyId,\n awsSecretAccessKey: config.awsSecretAccessKey,\n compression: config.compression,\n };\n}\n\n/**\n * Convert Kafka engine configuration\n */\nfunction convertKafkaEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n if (!(\"engine\" in config) || config.engine !== ClickHouseEngines.Kafka) {\n return undefined;\n }\n\n return {\n engine: \"Kafka\",\n brokerList: config.brokerList,\n topicList: config.topicList,\n groupName: config.groupName,\n format: config.format,\n };\n}\n\n/**\n * Convert table configuration to engine config\n */\nfunction convertTableConfigToEngineConfig(\n config: OlapConfig<any>,\n): EngineConfig | undefined {\n const engine = extractEngineValue(config);\n\n // Try basic engines first\n const basicConfig = convertBasicEngineConfig(engine, config);\n if (basicConfig) {\n return basicConfig;\n }\n\n // Try replicated engines\n const replicatedConfig = convertReplicatedEngineConfig(engine, config);\n if (replicatedConfig) {\n return replicatedConfig;\n }\n\n // Handle S3Queue\n if (engine === ClickHouseEngines.S3Queue) {\n return convertS3QueueEngineConfig(config);\n }\n\n // Handle S3\n if (engine === ClickHouseEngines.S3) {\n return convertS3EngineConfig(config);\n }\n\n // Handle Buffer\n if (engine === ClickHouseEngines.Buffer) {\n return convertBufferEngineConfig(config);\n }\n\n // Handle Distributed\n if (engine === ClickHouseEngines.Distributed) {\n return convertDistributedEngineConfig(config);\n }\n\n // Handle IcebergS3\n if (engine === ClickHouseEngines.IcebergS3) {\n return convertIcebergS3EngineConfig(config);\n }\n\n // Handle Kafka\n if (engine === ClickHouseEngines.Kafka) {\n return convertKafkaEngineConfig(config);\n }\n\n return undefined;\n}\n\nexport const toInfraMap = (registry: typeof moose_internal) => {\n const tables: { [key: string]: TableJson } = {};\n const topics: { [key: string]: StreamJson } = {};\n const ingestApis: { [key: string]: IngestApiJson } = {};\n const apis: { [key: string]: ApiJson } = {};\n const sqlResources: { [key: string]: SqlResourceJson } = {};\n const workflows: { [key: string]: WorkflowJson } = {};\n const webApps: { [key: string]: WebAppJson } = {};\n\n registry.tables.forEach((table) => {\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n // If the table is part of an IngestPipeline, inherit metadata if not set\n let metadata = (table as any).metadata;\n if (!metadata && table.config && (table as any).pipelineParent) {\n metadata = (table as any).pipelineParent.metadata;\n }\n // Create type-safe engine configuration\n const engineConfig: EngineConfig | undefined =\n convertTableConfigToEngineConfig(table.config);\n\n // Get table settings, applying defaults for S3Queue\n let tableSettings: { [key: string]: string } | undefined = undefined;\n\n if (table.config.settings) {\n // Convert all settings to strings, filtering out undefined values\n tableSettings = Object.entries(table.config.settings).reduce(\n (acc, [key, value]) => {\n if (value !== undefined) {\n acc[key] = String(value);\n }\n return acc;\n },\n {} as { [key: string]: string },\n );\n }\n\n // Apply default settings for S3Queue if not already specified\n if (engineConfig?.engine === \"S3Queue\") {\n if (!tableSettings) {\n tableSettings = {};\n }\n // Set default mode to 'unordered' if not specified\n if (!tableSettings.mode) {\n tableSettings.mode = \"unordered\";\n }\n }\n\n // Determine ORDER BY from config\n // Note: engines like Buffer and Distributed don't support orderBy/partitionBy/sampleBy\n const hasOrderByFields =\n \"orderByFields\" in table.config &&\n Array.isArray(table.config.orderByFields) &&\n table.config.orderByFields.length > 0;\n const hasOrderByExpression =\n \"orderByExpression\" in table.config &&\n typeof table.config.orderByExpression === \"string\" &&\n table.config.orderByExpression.length > 0;\n if (hasOrderByFields && hasOrderByExpression) {\n throw new Error(\n `Table ${table.name}: Provide either orderByFields or orderByExpression, not both.`,\n );\n }\n const orderBy: string[] | string =\n hasOrderByExpression && \"orderByExpression\" in table.config ?\n (table.config.orderByExpression ?? \"\")\n : \"orderByFields\" in table.config ? (table.config.orderByFields ?? [])\n : [];\n\n tables[id] = {\n name: table.name,\n columns: table.columnArray,\n orderBy,\n partitionBy:\n \"partitionBy\" in table.config ? table.config.partitionBy : undefined,\n sampleByExpression:\n \"sampleByExpression\" in table.config ?\n table.config.sampleByExpression\n : undefined,\n primaryKeyExpression:\n \"primaryKeyExpression\" in table.config ?\n table.config.primaryKeyExpression\n : undefined,\n engineConfig,\n version: table.config.version,\n metadata,\n lifeCycle: table.config.lifeCycle,\n // Map 'settings' to 'tableSettings' for internal use\n tableSettings:\n tableSettings && Object.keys(tableSettings).length > 0 ?\n tableSettings\n : undefined,\n indexes:\n table.config.indexes?.map((i) => ({\n ...i,\n granularity: i.granularity === undefined ? 1 : i.granularity,\n arguments: i.arguments === undefined ? [] : i.arguments,\n })) || [],\n ttl: table.config.ttl,\n database: table.config.database,\n cluster: table.config.cluster,\n };\n });\n\n registry.streams.forEach((stream) => {\n // If the stream is part of an IngestPipeline, inherit metadata if not set\n let metadata = stream.metadata;\n if (!metadata && stream.config && (stream as any).pipelineParent) {\n metadata = (stream as any).pipelineParent.metadata;\n }\n const transformationTargets: Target[] = [];\n const consumers: Consumer[] = [];\n\n stream._transformations.forEach((transforms, destinationName) => {\n transforms.forEach(([destination, _, config]) => {\n transformationTargets.push({\n kind: \"stream\",\n name: destinationName,\n version: config.version,\n metadata: config.metadata,\n sourceFile: config.sourceFile,\n });\n });\n });\n\n stream._consumers.forEach((consumer) => {\n consumers.push({\n version: consumer.config.version,\n sourceFile: consumer.config.sourceFile,\n });\n });\n\n topics[stream.name] = {\n name: stream.name,\n columns: stream.columnArray,\n targetTable: stream.config.destination?.name,\n targetTableVersion: stream.config.destination?.config.version,\n retentionPeriod: stream.config.retentionPeriod ?? defaultRetentionPeriod,\n partitionCount: stream.config.parallelism ?? 1,\n version: stream.config.version,\n transformationTargets,\n hasMultiTransform: stream._multipleTransformations === undefined,\n consumers,\n metadata,\n lifeCycle: stream.config.lifeCycle,\n schemaConfig: stream.config.schemaConfig,\n };\n });\n\n registry.ingestApis.forEach((api) => {\n // If the ingestApi is part of an IngestPipeline, inherit metadata if not set\n let metadata = api.metadata;\n if (!metadata && api.config && (api as any).pipelineParent) {\n metadata = (api as any).pipelineParent.metadata;\n }\n ingestApis[api.name] = {\n name: api.name,\n columns: api.columnArray,\n version: api.config.version,\n path: api.config.path,\n writeTo: {\n kind: \"stream\",\n name: api.config.destination.name,\n },\n deadLetterQueue: api.config.deadLetterQueue?.name,\n metadata,\n schema: api.schema,\n allowExtraFields: api.allowExtraFields,\n };\n });\n\n registry.apis.forEach((api, key) => {\n const rustKey =\n api.config.version ? `${api.name}:${api.config.version}` : api.name;\n apis[rustKey] = {\n name: api.name,\n queryParams: api.columnArray,\n responseSchema: api.responseSchema,\n version: api.config.version,\n path: api.config.path,\n metadata: api.metadata,\n };\n });\n\n registry.sqlResources.forEach((sqlResource) => {\n sqlResources[sqlResource.name] = {\n name: sqlResource.name,\n setup: sqlResource.setup,\n teardown: sqlResource.teardown,\n sourceFile: sqlResource.sourceFile,\n sourceLine: sqlResource.sourceLine,\n sourceColumn: sqlResource.sourceColumn,\n\n pullsDataFrom: sqlResource.pullsDataFrom.map((r) => {\n if (r.kind === \"OlapTable\") {\n const table = r as OlapTable<any>;\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n return {\n id,\n kind: \"Table\",\n };\n } else if (r.kind === \"SqlResource\") {\n const resource = r as SqlResource;\n return {\n id: resource.name,\n kind: \"SqlResource\",\n };\n } else {\n throw new Error(`Unknown sql resource dependency type: ${r}`);\n }\n }),\n pushesDataTo: sqlResource.pushesDataTo.map((r) => {\n if (r.kind === \"OlapTable\") {\n const table = r as OlapTable<any>;\n const id =\n table.config.version ?\n `${table.name}_${table.config.version}`\n : table.name;\n return {\n id,\n kind: \"Table\",\n };\n } else if (r.kind === \"SqlResource\") {\n const resource = r as SqlResource;\n return {\n id: resource.name,\n kind: \"SqlResource\",\n };\n } else {\n throw new Error(`Unknown sql resource dependency type: ${r}`);\n }\n }),\n };\n });\n\n registry.workflows.forEach((workflow) => {\n workflows[workflow.name] = {\n name: workflow.name,\n retries: workflow.config.retries,\n timeout: workflow.config.timeout,\n schedule: workflow.config.schedule,\n };\n });\n\n registry.webApps.forEach((webApp) => {\n webApps[webApp.name] = {\n name: webApp.name,\n mountPath: webApp.config.mountPath || \"/\",\n metadata: webApp.config.metadata,\n };\n });\n\n return {\n topics,\n tables,\n ingestApis,\n apis,\n sqlResources,\n workflows,\n webApps,\n };\n};\n\n/**\n * Retrieves the global internal Moose resource registry.\n * Uses `globalThis` to ensure a single registry instance.\n *\n * @returns The internal Moose resource registry.\n */\nexport const getMooseInternal = (): typeof moose_internal =>\n (globalThis as any).moose_internal;\n\n// work around for variable visibility in compiler output\nif (getMooseInternal() === undefined) {\n (globalThis as any).moose_internal = moose_internal;\n}\n\n/**\n * Loads the user's application entry point (`app/index.ts`) to register resources,\n * then generates and prints the infrastructure map as JSON.\n *\n * This function is the main entry point used by the Moose infrastructure system\n * to discover the defined resources.\n * It prints the JSON map surrounded by specific delimiters (`___MOOSE_STUFF___start`\n * and `end___MOOSE_STUFF___`) for easy extraction by the calling process.\n */\nexport const dumpMooseInternal = async () => {\n loadIndex();\n\n console.log(\n \"___MOOSE_STUFF___start\",\n JSON.stringify(toInfraMap(getMooseInternal())),\n \"end___MOOSE_STUFF___\",\n );\n};\n\nconst loadIndex = () => {\n // Clear the registry before loading to support hot reloading\n const registry = getMooseInternal();\n registry.tables.clear();\n registry.streams.clear();\n registry.ingestApis.clear();\n registry.apis.clear();\n registry.sqlResources.clear();\n registry.workflows.clear();\n registry.webApps.clear();\n\n // Clear require cache for app directory to pick up changes\n const appDir = `${process.cwd()}/${getSourceDir()}`;\n Object.keys(require.cache).forEach((key) => {\n if (key.startsWith(appDir)) {\n delete require.cache[key];\n }\n });\n\n try {\n require(`${process.cwd()}/${getSourceDir()}/index.ts`);\n } catch (error) {\n let hint: string | undefined;\n const details = error instanceof Error ? error.message : String(error);\n if (details.includes(\"ERR_REQUIRE_ESM\") || details.includes(\"ES Module\")) {\n hint =\n \"The file or its dependencies are ESM-only. Switch to packages that dual-support CJS & ESM, or upgrade to Node 22.12+. \" +\n \"If you must use Node 20, you may try Node 20.19\\n\\n\";\n }\n\n const errorMsg = `${hint ?? \"\"}${details}`;\n const cause = error instanceof Error ? error : undefined;\n throw new Error(errorMsg, { cause });\n }\n};\n\n/**\n * Loads the user's application entry point and extracts all registered stream\n * transformation and consumer functions.\n *\n * @returns A Map where keys are unique identifiers for transformations/consumers\n * (e.g., \"sourceStream_destStream_version\", \"sourceStream_<no-target>_version\")\n * and values are tuples containing: [handler function, config, source stream columns]\n */\nexport const getStreamingFunctions = async () => {\n loadIndex();\n\n const registry = getMooseInternal();\n const transformFunctions = new Map<\n string,\n [\n (data: unknown) => unknown,\n TransformConfig<any> | ConsumerConfig<any>,\n Column[],\n ]\n >();\n\n registry.streams.forEach((stream) => {\n stream._transformations.forEach((transforms, destinationName) => {\n transforms.forEach(([_, transform, config]) => {\n const transformFunctionKey = `${stream.name}_${destinationName}${config.version ? `_${config.version}` : \"\"}`;\n compilerLog(`getStreamingFunctions: ${transformFunctionKey}`);\n transformFunctions.set(transformFunctionKey, [\n transform,\n config,\n stream.columnArray,\n ]);\n });\n });\n\n stream._consumers.forEach((consumer) => {\n const consumerFunctionKey = `${stream.name}_<no-target>${consumer.config.version ? `_${consumer.config.version}` : \"\"}`;\n transformFunctions.set(consumerFunctionKey, [\n consumer.consumer,\n consumer.config,\n stream.columnArray,\n ]);\n });\n });\n\n return transformFunctions;\n};\n\n/**\n * Loads the user's application entry point and extracts all registered\n * API handler functions.\n *\n * @returns A Map where keys are the names of the APIs and values\n * are their corresponding handler functions.\n */\nexport const getApis = async () => {\n loadIndex();\n const apiFunctions = new Map<\n string,\n (params: unknown, utils: ApiUtil) => unknown\n >();\n\n const registry = getMooseInternal();\n // Single pass: store full keys, track aliasing decisions\n const versionCountByName = new Map<string, number>();\n const nameToSoleVersionHandler = new Map<\n string,\n (params: unknown, utils: ApiUtil) => unknown\n >();\n\n registry.apis.forEach((api, key) => {\n const handler = api.getHandler();\n apiFunctions.set(key, handler);\n\n if (!api.config.version) {\n // Explicit unversioned takes precedence for alias\n if (!apiFunctions.has(api.name)) {\n apiFunctions.set(api.name, handler);\n }\n nameToSoleVersionHandler.delete(api.name);\n versionCountByName.delete(api.name);\n } else if (!apiFunctions.has(api.name)) {\n // Only track versioned for alias if no explicit unversioned present\n const count = (versionCountByName.get(api.name) ?? 0) + 1;\n versionCountByName.set(api.name, count);\n if (count === 1) {\n nameToSoleVersionHandler.set(api.name, handler);\n } else {\n nameToSoleVersionHandler.delete(api.name);\n }\n }\n });\n\n // Finalize aliases for names that have exactly one versioned API and no unversioned\n nameToSoleVersionHandler.forEach((handler, name) => {\n if (!apiFunctions.has(name)) {\n apiFunctions.set(name, handler);\n }\n });\n\n return apiFunctions;\n};\n\nexport const dlqSchema: IJsonSchemaCollection.IV3_1 = {\n version: \"3.1\",\n components: {\n schemas: {\n DeadLetterModel: {\n type: \"object\",\n properties: {\n originalRecord: {\n $ref: \"#/components/schemas/Recordstringany\",\n },\n errorMessage: {\n type: \"string\",\n },\n errorType: {\n type: \"string\",\n },\n failedAt: {\n type: \"string\",\n format: \"date-time\",\n },\n source: {\n oneOf: [\n {\n const: \"api\",\n },\n {\n const: \"transform\",\n },\n {\n const: \"table\",\n },\n ],\n },\n },\n required: [\n \"originalRecord\",\n \"errorMessage\",\n \"errorType\",\n \"failedAt\",\n \"source\",\n ],\n },\n Recordstringany: {\n type: \"object\",\n properties: {},\n required: [],\n description: \"Construct a type with a set of properties K of type T\",\n additionalProperties: {},\n },\n },\n },\n schemas: [\n {\n $ref: \"#/components/schemas/DeadLetterModel\",\n },\n ],\n};\n\nexport const dlqColumns: Column[] = [\n {\n name: \"originalRecord\",\n data_type: \"Json\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"errorMessage\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"errorType\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"failedAt\",\n data_type: \"DateTime\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n {\n name: \"source\",\n data_type: \"String\",\n primary_key: false,\n required: true,\n unique: false,\n default: null,\n annotations: [],\n ttl: null,\n codec: null,\n materialized: null,\n comment: null,\n },\n];\n\nexport const getWorkflows = async () => {\n loadIndex();\n\n const registry = getMooseInternal();\n return registry.workflows;\n};\n\nfunction findTaskInTree(\n task: Task<any, any>,\n targetName: string,\n): Task<any, any> | undefined {\n if (task.name === targetName) {\n return task;\n }\n\n if (task.config.onComplete?.length) {\n for (const childTask of task.config.onComplete) {\n const found = findTaskInTree(childTask, targetName);\n if (found) {\n return found;\n }\n }\n }\n\n return undefined;\n}\n\nexport const getTaskForWorkflow = async (\n workflowName: string,\n taskName: string,\n): Promise<Task<any, any>> => {\n const workflows = await getWorkflows();\n const workflow = workflows.get(workflowName);\n if (!workflow) {\n throw new Error(`Workflow ${workflowName} not found`);\n }\n\n const task = findTaskInTree(\n workflow.config.startingTask as Task<any, any>,\n taskName,\n );\n if (!task) {\n throw new Error(`Task ${taskName} not found in workflow ${workflowName}`);\n }\n\n return task;\n};\n\nexport const getWebApps = async () => {\n loadIndex();\n return getMooseInternal().webApps;\n};\n","// source https://github.com/blakeembrey/sql-template-tag/blob/main/src/index.ts\nimport { Column } from \"./dataModels/dataModelTypes\";\nimport { OlapTable } from \"./dmv2\";\n\nimport { AggregationFunction } from \"./dataModels/typeConvert\";\n\n/**\n * Quote a ClickHouse identifier with backticks if not already quoted.\n * Backticks allow special characters (e.g., hyphens) in identifiers.\n */\nexport const quoteIdentifier = (name: string): string => {\n return name.startsWith(\"`\") && name.endsWith(\"`\") ? name : `\\`${name}\\``;\n};\n\nconst isTable = (\n value: RawValue | Column | OlapTable<any>,\n): value is OlapTable<any> =>\n typeof value === \"object\" &&\n value !== null &&\n \"kind\" in value &&\n value.kind === \"OlapTable\";\n\nexport type IdentifierBrandedString = string & {\n readonly __identifier_brand?: unique symbol;\n};\nexport type NonIdentifierBrandedString = string & {\n readonly __identifier_brand?: unique symbol;\n};\n\n/**\n * Values supported by SQL engine.\n */\nexport type Value =\n | NonIdentifierBrandedString\n | number\n | boolean\n | Date\n | [string, string];\n\n/**\n * Supported value or SQL instance.\n */\nexport type RawValue = Value | Sql;\n\nconst isColumn = (value: RawValue | Column | OlapTable<any>): value is Column =>\n typeof value === \"object\" && \"name\" in value && \"annotations\" in value;\n\nexport function sql(\n strings: readonly string[],\n ...values: readonly (RawValue | Column | OlapTable<any>)[]\n) {\n return new Sql(strings, values);\n}\n\nconst instanceofSql = (\n value: RawValue | Column | OlapTable<any>,\n): value is Sql =>\n typeof value === \"object\" && \"values\" in value && \"strings\" in value;\n\n/**\n * A SQL instance can be nested within each other to build SQL strings.\n */\nexport class Sql {\n readonly values: Value[];\n readonly strings: string[];\n\n constructor(\n rawStrings: readonly string[],\n rawValues: readonly (RawValue | Column | OlapTable<any>)[],\n ) {\n if (rawStrings.length - 1 !== rawValues.length) {\n if (rawStrings.length === 0) {\n throw new TypeError(\"Expected at least 1 string\");\n }\n\n throw new TypeError(\n `Expected ${rawStrings.length} strings to have ${\n rawStrings.length - 1\n } values`,\n );\n }\n\n const valuesLength = rawValues.reduce<number>(\n (len: number, value: RawValue | Column | OlapTable<any>) =>\n len +\n (instanceofSql(value) ? value.values.length\n : isColumn(value) || isTable(value) ? 0\n : 1),\n 0,\n );\n\n this.values = new Array(valuesLength);\n this.strings = new Array(valuesLength + 1);\n\n this.strings[0] = rawStrings[0];\n\n // Iterate over raw values, strings, and children. The value is always\n // positioned between two strings, e.g. `index + 1`.\n let i = 0,\n pos = 0;\n while (i < rawValues.length) {\n const child = rawValues[i++];\n const rawString = rawStrings[i];\n\n // Check for nested `sql` queries.\n if (instanceofSql(child)) {\n // Append child prefix text to current string.\n this.strings[pos] += child.strings[0];\n\n let childIndex = 0;\n while (childIndex < child.values.length) {\n this.values[pos++] = child.values[childIndex++];\n this.strings[pos] = child.strings[childIndex];\n }\n\n // Append raw string to current string.\n this.strings[pos] += rawString;\n } else if (isColumn(child)) {\n const aggregationFunction = child.annotations.find(\n ([k, _]) => k === \"aggregationFunction\",\n );\n if (aggregationFunction !== undefined) {\n this.strings[pos] +=\n `${(aggregationFunction[1] as AggregationFunction).functionName}Merge(\\`${child.name}\\`)`;\n } else {\n this.strings[pos] += `\\`${child.name}\\``;\n }\n this.strings[pos] += rawString;\n } else if (isTable(child)) {\n if (child.config.database) {\n this.strings[pos] += `\\`${child.config.database}\\`.\\`${child.name}\\``;\n } else {\n this.strings[pos] += `\\`${child.name}\\``;\n }\n this.strings[pos] += rawString;\n } else {\n this.values[pos++] = child;\n this.strings[pos] = rawString;\n }\n }\n }\n}\n\nexport const toStaticQuery = (sql: Sql): string => {\n const [query, params] = toQuery(sql);\n if (Object.keys(params).length !== 0) {\n throw new Error(\n \"Dynamic SQL is not allowed in the select statement in view creation.\",\n );\n }\n return query;\n};\n\nexport const toQuery = (sql: Sql): [string, { [pN: string]: any }] => {\n const parameterizedStubs = sql.values.map((v, i) =>\n createClickhouseParameter(i, v),\n );\n\n const query = sql.strings\n .map((s, i) =>\n s != \"\" ? `${s}${emptyIfUndefined(parameterizedStubs[i])}` : \"\",\n )\n .join(\"\");\n\n const query_params = sql.values.reduce(\n (acc: Record<string, unknown>, v, i) => ({\n ...acc,\n [`p${i}`]: getValueFromParameter(v),\n }),\n {},\n );\n return [query, query_params];\n};\n\n/**\n * Build a display-only SQL string with values inlined for logging/debugging.\n * Does not alter execution behavior; use toQuery for actual execution.\n */\nexport const toQueryPreview = (sql: Sql): string => {\n try {\n const formatValue = (v: Value): string => {\n // Unwrap identifiers: [\"Identifier\", name]\n if (Array.isArray(v)) {\n const [type, val] = v as unknown as [string, any];\n if (type === \"Identifier\") {\n // Quote identifiers with backticks like other helpers\n return `\\`${String(val)}\\``;\n }\n // Fallback for unexpected arrays\n return `[${(v as unknown as any[]).map((x) => formatValue(x as Value)).join(\", \")}]`;\n }\n if (v === null || v === undefined) return \"NULL\";\n if (typeof v === \"string\") return `'${v.replace(/'/g, \"''\")}'`;\n if (typeof v === \"number\") return String(v);\n if (typeof v === \"boolean\") return v ? \"true\" : \"false\";\n if (v instanceof Date)\n return `'${v.toISOString().replace(\"T\", \" \").slice(0, 19)}'`;\n try {\n return JSON.stringify(v as unknown as any);\n } catch {\n return String(v);\n }\n };\n\n let out = sql.strings[0] ?? \"\";\n for (let i = 0; i < sql.values.length; i++) {\n const val = getValueFromParameter(sql.values[i] as any);\n out += formatValue(val as Value);\n out += sql.strings[i + 1] ?? \"\";\n }\n return out.replace(/\\s+/g, \" \").trim();\n } catch (error) {\n console.log(`toQueryPreview error: ${error}`);\n return \"/* query preview unavailable */\";\n }\n};\n\nexport const getValueFromParameter = (value: any) => {\n if (Array.isArray(value)) {\n const [type, val] = value;\n if (type === \"Identifier\") return val;\n }\n return value;\n};\nexport function createClickhouseParameter(\n parameterIndex: number,\n value: Value,\n) {\n // ClickHouse use {name:type} be a placeholder, so if we only use number string as name e.g: {1:Unit8}\n // it will face issue when converting to the query params => {1: value1}, because the key is value not string type, so here add prefix \"p\" to avoid this issue.\n return `{p${parameterIndex}:${mapToClickHouseType(value)}}`;\n}\n\n/**\n * Convert the JS type (source is JSON format by API query parameter) to the corresponding ClickHouse type for generating named placeholder of parameterized query.\n * Only support to convert number to Int or Float, boolean to Bool, string to String, other types will convert to String.\n * If exist complex type e.g: object, Array, null, undefined, Date, Record.. etc, just convert to string type by ClickHouse function in SQL.\n * ClickHouse support converting string to other types function.\n * Please see Each section of the https://clickhouse.com/docs/en/sql-reference/functions and https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions\n * @param value\n * @returns 'Float', 'Int', 'Bool', 'String'\n */\nexport const mapToClickHouseType = (value: Value) => {\n if (typeof value === \"number\") {\n // infer the float or int according to exist remainder or not\n return Number.isInteger(value) ? \"Int\" : \"Float\";\n }\n // When define column type or query result with parameterized query, The Bool or Boolean type both supported.\n // But the column type of query result only return Bool, so we only support Bool type for safety.\n if (typeof value === \"boolean\") return \"Bool\";\n if (value instanceof Date) return \"DateTime\";\n if (Array.isArray(value)) {\n const [type, _] = value;\n return type;\n }\n return \"String\";\n};\nfunction emptyIfUndefined(value: string | undefined): string {\n return value === undefined ? \"\" : value;\n}\n","import { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase, TypiaValidators } from \"../typedBase\";\nimport {\n Column,\n isArrayNestedType,\n isNestedType,\n} from \"../../dataModels/dataModelTypes\";\nimport { ClickHouseEngines } from \"../../blocks/helpers\";\nimport { getMooseInternal, isClientOnlyMode } from \"../internal\";\nimport { Readable } from \"node:stream\";\nimport { createHash } from \"node:crypto\";\nimport type {\n ConfigurationRegistry,\n RuntimeClickHouseConfig,\n} from \"../../config/runtime\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport { IdentifierBrandedString, quoteIdentifier } from \"../../sqlHelpers\";\nimport type { NodeClickHouseClient } from \"@clickhouse/client/dist/client\";\n\nexport interface TableIndex {\n name: string;\n expression: string;\n type: string;\n arguments?: string[];\n granularity?: number;\n}\n\n/**\n * Represents a failed record during insertion with error details\n */\nexport interface FailedRecord<T> {\n /** The original record that failed to insert */\n record: T;\n /** The error message describing why the insertion failed */\n error: string;\n /** Optional: The index of this record in the original batch */\n index?: number;\n}\n\n/**\n * Result of an insert operation with detailed success/failure information\n */\nexport interface InsertResult<T> {\n /** Number of records successfully inserted */\n successful: number;\n /** Number of records that failed to insert */\n failed: number;\n /** Total number of records processed */\n total: number;\n /** Detailed information about failed records (if record isolation was used) */\n failedRecords?: FailedRecord<T>[];\n}\n\n/**\n * Error handling strategy for insert operations\n */\nexport type ErrorStrategy =\n | \"fail-fast\" // Fail immediately on any error (default)\n | \"discard\" // Discard bad records and continue with good ones\n | \"isolate\"; // Retry individual records to isolate failures\n\n/**\n * Options for insert operations\n */\nexport interface InsertOptions {\n /** Maximum number of bad records to tolerate before failing */\n allowErrors?: number;\n /** Maximum ratio of bad records to tolerate (0.0 to 1.0) before failing */\n allowErrorsRatio?: number;\n /** Error handling strategy */\n strategy?: ErrorStrategy;\n /** Whether to enable dead letter queue for failed records (future feature) */\n deadLetterQueue?: boolean;\n /** Whether to validate data against schema before insertion (default: true) */\n validate?: boolean;\n /** Whether to skip validation for individual records during 'isolate' strategy retries (default: false) */\n skipValidationOnRetry?: boolean;\n}\n\n/**\n * Validation result for a record with detailed error information\n */\nexport interface ValidationError {\n /** The original record that failed validation */\n record: any;\n /** Detailed validation error message */\n error: string;\n /** Optional: The index of this record in the original batch */\n index?: number;\n /** The path to the field that failed validation */\n path?: string;\n}\n\n/**\n * Result of data validation with success/failure breakdown\n */\nexport interface ValidationResult<T> {\n /** Records that passed validation */\n valid: T[];\n /** Records that failed validation with detailed error information */\n invalid: ValidationError[];\n /** Total number of records processed */\n total: number;\n}\n\n/**\n * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING\n * Note: Since ClickHouse 24.7, settings no longer require the 's3queue_' prefix\n */\nexport interface S3QueueTableSettings {\n /** Processing mode: \"ordered\" for sequential or \"unordered\" for parallel processing */\n mode?: \"ordered\" | \"unordered\";\n /** What to do with files after processing: 'keep' or 'delete' */\n after_processing?: \"keep\" | \"delete\";\n /** ZooKeeper/Keeper path for coordination between replicas */\n keeper_path?: string;\n /** Number of retry attempts for failed files */\n loading_retries?: string;\n /** Number of threads for parallel processing */\n processing_threads_num?: string;\n /** Enable parallel inserts */\n parallel_inserts?: string;\n /** Enable logging to system.s3queue_log table */\n enable_logging_to_queue_log?: string;\n /** Last processed file path (for ordered mode) */\n last_processed_path?: string;\n /** Maximum number of tracked files in ZooKeeper */\n tracked_files_limit?: string;\n /** TTL for tracked files in seconds */\n tracked_file_ttl_sec?: string;\n /** Minimum polling timeout in milliseconds */\n polling_min_timeout_ms?: string;\n /** Maximum polling timeout in milliseconds */\n polling_max_timeout_ms?: string;\n /** Polling backoff in milliseconds */\n polling_backoff_ms?: string;\n /** Minimum cleanup interval in milliseconds */\n cleanup_interval_min_ms?: string;\n /** Maximum cleanup interval in milliseconds */\n cleanup_interval_max_ms?: string;\n /** Number of buckets for sharding (0 = disabled) */\n buckets?: string;\n /** Batch size for listing objects */\n list_objects_batch_size?: string;\n /** Enable hash ring filtering for distributed processing */\n enable_hash_ring_filtering?: string;\n /** Maximum files to process before committing */\n max_processed_files_before_commit?: string;\n /** Maximum rows to process before committing */\n max_processed_rows_before_commit?: string;\n /** Maximum bytes to process before committing */\n max_processed_bytes_before_commit?: string;\n /** Maximum processing time in seconds before committing */\n max_processing_time_sec_before_commit?: string;\n /** Use persistent processing nodes (available from 25.8) */\n use_persistent_processing_nodes?: string;\n /** TTL for persistent processing nodes in seconds */\n persistent_processing_nodes_ttl_seconds?: string;\n /** Additional settings */\n [key: string]: string | undefined;\n}\n\n/**\n * Base configuration shared by all table engines\n * @template T The data type of the records stored in the table.\n */\n\nexport type BaseOlapConfig<T> = (\n | {\n /**\n * Specifies the fields to use for ordering data within the ClickHouse table.\n * This is crucial for optimizing query performance.\n */\n orderByFields: (keyof T & string)[];\n orderByExpression?: undefined;\n }\n | {\n orderByFields?: undefined;\n /**\n * An arbitrary ClickHouse SQL expression for the order by clause.\n *\n * `orderByExpression: \"(id, name)\"` is equivalent to `orderByFields: [\"id\", \"name\"]`\n * `orderByExpression: \"tuple()\"` means no sorting\n */\n orderByExpression: string;\n }\n // specify either or leave both unspecified\n | { orderByFields?: undefined; orderByExpression?: undefined }\n) & {\n partitionBy?: string;\n /**\n * SAMPLE BY expression for approximate query processing.\n *\n * Examples:\n * ```typescript\n * // Single unsigned integer field\n * sampleByExpression: \"userId\"\n *\n * // Hash function on any field type\n * sampleByExpression: \"cityHash64(id)\"\n *\n * // Multiple fields with hash\n * sampleByExpression: \"cityHash64(userId, timestamp)\"\n * ```\n *\n * Requirements:\n * - Expression must evaluate to an unsigned integer (UInt8/16/32/64)\n * - Expression must be present in the ORDER BY clause\n * - If using hash functions, the same expression must appear in orderByExpression\n */\n sampleByExpression?: string;\n /**\n * Optional PRIMARY KEY expression.\n * When specified, this overrides the primary key inferred from Key<T> column annotations.\n *\n * This allows for:\n * - Complex primary keys using functions (e.g., \"cityHash64(id)\")\n * - Different column ordering in primary key vs schema definition\n * - Primary keys that differ from ORDER BY\n *\n * Example: primaryKeyExpression: \"(userId, cityHash64(eventId))\"\n *\n * Note: When this is set, any Key<T> annotations on columns are ignored for PRIMARY KEY generation.\n */\n primaryKeyExpression?: string;\n version?: string;\n lifeCycle?: LifeCycle;\n settings?: { [key: string]: string };\n /**\n * Optional TTL configuration for the table.\n * e.g., \"TTL timestamp + INTERVAL 90 DAY DELETE\"\n *\n * Use the {@link ClickHouseTTL} type to configure column level TTL\n */\n ttl?: string;\n /** Optional secondary/data-skipping indexes */\n indexes?: TableIndex[];\n /**\n * Optional database name for multi-database support.\n * When not specified, uses the global ClickHouse config database.\n */\n database?: string;\n /**\n * Optional cluster name for ON CLUSTER support.\n * Use this to enable replicated tables across ClickHouse clusters.\n * The cluster must be defined in config.toml (dev environment only).\n * Example: cluster: \"prod_cluster\"\n */\n cluster?: string;\n};\n\n/**\n * Configuration for MergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type MergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.MergeTree;\n};\n\n/**\n * Configuration for ReplacingMergeTree engine (deduplication)\n * @template T The data type of the records stored in the table.\n */\nexport type ReplacingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.ReplacingMergeTree;\n ver?: keyof T & string; // Optional version column\n isDeleted?: keyof T & string; // Optional is_deleted column\n};\n\n/**\n * Configuration for AggregatingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type AggregatingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.AggregatingMergeTree;\n};\n\n/**\n * Configuration for SummingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type SummingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.SummingMergeTree;\n columns?: string[];\n};\n\n/**\n * Configuration for CollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type CollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.CollapsingMergeTree;\n sign: keyof T & string; // Sign column (1 = state, -1 = cancel)\n};\n\n/**\n * Configuration for VersionedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type VersionedCollapsingMergeTreeConfig<T> = BaseOlapConfig<T> & {\n engine: ClickHouseEngines.VersionedCollapsingMergeTree;\n sign: keyof T & string; // Sign column (1 = state, -1 = cancel)\n ver: keyof T & string; // Version column for ordering state changes\n};\n\ninterface ReplicatedEngineProperties {\n keeperPath?: string;\n replicaName?: string;\n}\n\n/**\n * Configuration for ReplicatedMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedMergeTreeConfig<T> = Omit<MergeTreeConfig<T>, \"engine\"> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedMergeTree;\n };\n\n/**\n * Configuration for ReplicatedReplacingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedReplacingMergeTreeConfig<T> = Omit<\n ReplacingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedReplacingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedAggregatingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedAggregatingMergeTreeConfig<T> = Omit<\n AggregatingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedAggregatingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedSummingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedSummingMergeTreeConfig<T> = Omit<\n SummingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedSummingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedCollapsingMergeTreeConfig<T> = Omit<\n CollapsingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedCollapsingMergeTree;\n };\n\n/**\n * Configuration for ReplicatedVersionedCollapsingMergeTree engine\n * @template T The data type of the records stored in the table.\n *\n * Note: keeperPath and replicaName are optional. Omit them for ClickHouse Cloud,\n * which manages replication automatically. For self-hosted with ClickHouse Keeper,\n * provide both parameters or neither (to use server defaults).\n */\nexport type ReplicatedVersionedCollapsingMergeTreeConfig<T> = Omit<\n VersionedCollapsingMergeTreeConfig<T>,\n \"engine\"\n> &\n ReplicatedEngineProperties & {\n engine: ClickHouseEngines.ReplicatedVersionedCollapsingMergeTree;\n };\n\n/**\n * Configuration for S3Queue engine - only non-alterable constructor parameters.\n * S3Queue-specific settings like 'mode', 'keeper_path', etc. should be specified\n * in the settings field, not here.\n * @template T The data type of the records stored in the table.\n */\nexport type S3QueueConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"settings\" | \"orderByFields\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.S3Queue;\n /** S3 bucket path with wildcards (e.g., 's3://bucket/data/*.json') */\n s3Path: string;\n /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */\n format: string;\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key */\n awsSecretAccessKey?: string;\n /** Compression type (e.g., 'gzip', 'zstd') */\n compression?: string;\n /** Custom HTTP headers */\n headers?: { [key: string]: string };\n /**\n * S3Queue-specific table settings that can be modified with ALTER TABLE MODIFY SETTING.\n * These settings control the behavior of the S3Queue engine.\n */\n settings?: S3QueueTableSettings;\n};\n\n/**\n * Configuration for S3 engine\n * Note: S3 engine supports ORDER BY clause, unlike S3Queue, Buffer, and Distributed engines\n * @template T The data type of the records stored in the table.\n */\nexport type S3Config<T> = Omit<BaseOlapConfig<T>, \"sampleByExpression\"> & {\n engine: ClickHouseEngines.S3;\n /** S3 path (e.g., 's3://bucket/path/file.json') */\n path: string;\n /** Data format (e.g., 'JSONEachRow', 'CSV', 'Parquet') */\n format: string;\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key */\n awsSecretAccessKey?: string;\n /** Compression type (e.g., 'gzip', 'zstd', 'auto') */\n compression?: string;\n /** Partition strategy (optional) */\n partitionStrategy?: string;\n /** Partition columns in data file (optional) */\n partitionColumnsInDataFile?: string;\n};\n\n/**\n * Configuration for Buffer engine\n * @template T The data type of the records stored in the table.\n */\nexport type BufferConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Buffer;\n /** Target database name for the destination table */\n targetDatabase: string;\n /** Target table name where data will be flushed */\n targetTable: string;\n /** Number of buffer layers (typically 16) */\n numLayers: number;\n /** Minimum time in seconds before flushing */\n minTime: number;\n /** Maximum time in seconds before flushing */\n maxTime: number;\n /** Minimum number of rows before flushing */\n minRows: number;\n /** Maximum number of rows before flushing */\n maxRows: number;\n /** Minimum bytes before flushing */\n minBytes: number;\n /** Maximum bytes before flushing */\n maxBytes: number;\n /** Optional: Flush time in seconds */\n flushTime?: number;\n /** Optional: Flush number of rows */\n flushRows?: number;\n /** Optional: Flush number of bytes */\n flushBytes?: number;\n};\n\n/**\n * Configuration for Distributed engine\n * @template T The data type of the records stored in the table.\n */\nexport type DistributedConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Distributed;\n /** Cluster name from the ClickHouse configuration */\n cluster: string;\n /** Database name on the cluster */\n targetDatabase: string;\n /** Table name on the cluster */\n targetTable: string;\n /** Optional: Sharding key expression for data distribution */\n shardingKey?: string;\n /** Optional: Policy name for data distribution */\n policyName?: string;\n};\n\n/** Kafka table settings. See: https://clickhouse.com/docs/engines/table-engines/integrations/kafka */\nexport interface KafkaTableSettings {\n kafka_security_protocol?: \"PLAINTEXT\" | \"SSL\" | \"SASL_PLAINTEXT\" | \"SASL_SSL\";\n kafka_sasl_mechanism?:\n | \"GSSAPI\"\n | \"PLAIN\"\n | \"SCRAM-SHA-256\"\n | \"SCRAM-SHA-512\"\n | \"OAUTHBEARER\";\n kafka_sasl_username?: string;\n kafka_sasl_password?: string;\n kafka_schema?: string;\n kafka_num_consumers?: string;\n kafka_max_block_size?: string;\n kafka_skip_broken_messages?: string;\n kafka_commit_every_batch?: string;\n kafka_client_id?: string;\n kafka_poll_timeout_ms?: string;\n kafka_poll_max_batch_size?: string;\n kafka_flush_interval_ms?: string;\n kafka_consumer_reschedule_ms?: string;\n kafka_thread_per_consumer?: string;\n kafka_handle_error_mode?: \"default\" | \"stream\";\n kafka_commit_on_select?: string;\n kafka_max_rows_per_message?: string;\n kafka_compression_codec?: string;\n kafka_compression_level?: string;\n}\n\n/** Kafka engine for streaming data from Kafka topics. Additional settings go in `settings`. */\nexport type KafkaConfig<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.Kafka;\n brokerList: string;\n topicList: string;\n groupName: string;\n format: string;\n settings?: KafkaTableSettings;\n};\n\n/**\n * Configuration for IcebergS3 engine - read-only Iceberg table access\n *\n * Provides direct querying of Apache Iceberg tables stored on S3.\n * Data is not copied; queries stream directly from Parquet/ORC files.\n *\n * @template T The data type of the records stored in the table.\n *\n * @example\n * ```typescript\n * const lakeEvents = new OlapTable<Event>(\"lake_events\", {\n * engine: ClickHouseEngines.IcebergS3,\n * path: \"s3://datalake/events/\",\n * format: \"Parquet\",\n * awsAccessKeyId: mooseRuntimeEnv.get(\"AWS_ACCESS_KEY_ID\"),\n * awsSecretAccessKey: mooseRuntimeEnv.get(\"AWS_SECRET_ACCESS_KEY\")\n * });\n * ```\n *\n * @remarks\n * - IcebergS3 engine is read-only\n * - Does not support ORDER BY, PARTITION BY, or SAMPLE BY clauses\n * - Queries always see the latest Iceberg snapshot (with metadata cache)\n */\nexport type IcebergS3Config<T> = Omit<\n BaseOlapConfig<T>,\n \"orderByFields\" | \"orderByExpression\" | \"partitionBy\" | \"sampleByExpression\"\n> & {\n engine: ClickHouseEngines.IcebergS3;\n /** S3 path to Iceberg table root (e.g., 's3://bucket/warehouse/events/') */\n path: string;\n /** Data format - 'Parquet' or 'ORC' */\n format: \"Parquet\" | \"ORC\";\n /** AWS access key ID (optional, omit for NOSIGN/public buckets) */\n awsAccessKeyId?: string;\n /** AWS secret access key (optional) */\n awsSecretAccessKey?: string;\n /** Compression type (optional: 'gzip', 'zstd', 'auto') */\n compression?: string;\n};\n\n/**\n * Legacy configuration (backward compatibility) - defaults to MergeTree engine\n * @template T The data type of the records stored in the table.\n */\nexport type LegacyOlapConfig<T> = BaseOlapConfig<T>;\n\ntype EngineConfig<T> =\n | MergeTreeConfig<T>\n | ReplacingMergeTreeConfig<T>\n | AggregatingMergeTreeConfig<T>\n | SummingMergeTreeConfig<T>\n | CollapsingMergeTreeConfig<T>\n | VersionedCollapsingMergeTreeConfig<T>\n | ReplicatedMergeTreeConfig<T>\n | ReplicatedReplacingMergeTreeConfig<T>\n | ReplicatedAggregatingMergeTreeConfig<T>\n | ReplicatedSummingMergeTreeConfig<T>\n | ReplicatedCollapsingMergeTreeConfig<T>\n | ReplicatedVersionedCollapsingMergeTreeConfig<T>\n | S3QueueConfig<T>\n | S3Config<T>\n | BufferConfig<T>\n | DistributedConfig<T>\n | IcebergS3Config<T>\n | KafkaConfig<T>;\n\n/**\n * Union of all engine-specific configurations (new API)\n * @template T The data type of the records stored in the table.\n */\nexport type OlapConfig<T> = EngineConfig<T> | LegacyOlapConfig<T>;\n\n/**\n * Represents an OLAP (Online Analytical Processing) table, typically corresponding to a ClickHouse table.\n * Provides a typed interface for interacting with the table.\n *\n * @template T The data type of the records stored in the table. The structure of T defines the table schema.\n */\nexport class OlapTable<T> extends TypedBase<T, OlapConfig<T>> {\n name: IdentifierBrandedString;\n\n /** @internal */\n public readonly kind = \"OlapTable\";\n\n /** @internal Memoized ClickHouse client for reusing connections across insert calls */\n private _memoizedClient?: any;\n /** @internal Hash of the configuration used to create the memoized client */\n private _configHash?: string;\n /** @internal Cached table name to avoid repeated generation */\n private _cachedTableName?: string;\n\n /**\n * Creates a new OlapTable instance.\n * @param name The name of the table. This name is used for the underlying ClickHouse table.\n * @param config Optional configuration for the OLAP table.\n */\n constructor(name: string, config?: OlapConfig<T>);\n\n /** @internal **/\n constructor(\n name: string,\n config: OlapConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators?: TypiaValidators<T>,\n );\n\n constructor(\n name: string,\n config?: OlapConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: TypiaValidators<T>,\n ) {\n // Handle legacy configuration by defaulting to MergeTree when no engine is specified\n const resolvedConfig =\n config ?\n \"engine\" in config ?\n config\n : { ...config, engine: ClickHouseEngines.MergeTree }\n : { engine: ClickHouseEngines.MergeTree };\n\n // Enforce mutual exclusivity at runtime as well\n const hasFields =\n Array.isArray((resolvedConfig as any).orderByFields) &&\n (resolvedConfig as any).orderByFields.length > 0;\n const hasExpr =\n typeof (resolvedConfig as any).orderByExpression === \"string\" &&\n (resolvedConfig as any).orderByExpression.length > 0;\n if (hasFields && hasExpr) {\n throw new Error(\n `OlapTable ${name}: Provide either orderByFields or orderByExpression, not both.`,\n );\n }\n\n // Validate cluster and explicit replication params are not both specified\n const hasCluster = typeof (resolvedConfig as any).cluster === \"string\";\n const hasKeeperPath =\n typeof (resolvedConfig as any).keeperPath === \"string\";\n const hasReplicaName =\n typeof (resolvedConfig as any).replicaName === \"string\";\n\n if (hasCluster && (hasKeeperPath || hasReplicaName)) {\n throw new Error(\n `OlapTable ${name}: Cannot specify both 'cluster' and explicit replication params ('keeperPath' or 'replicaName'). ` +\n `Use 'cluster' for auto-injected params, or use explicit 'keeperPath' and 'replicaName' without 'cluster'.`,\n );\n }\n\n super(name, resolvedConfig, schema, columns, validators);\n this.name = name;\n\n const tables = getMooseInternal().tables;\n const registryKey =\n this.config.version ? `${name}_${this.config.version}` : name;\n // In client-only mode (MOOSE_CLIENT_ONLY=true), allow duplicate registrations\n // to support Next.js HMR which re-executes modules without clearing the registry\n if (!isClientOnlyMode() && tables.has(registryKey)) {\n throw new Error(\n `OlapTable with name ${name} and version ${config?.version ?? \"unversioned\"} already exists`,\n );\n }\n tables.set(registryKey, this);\n }\n\n /**\n * Generates the versioned table name following Moose's naming convention\n * Format: {tableName}_{version_with_dots_replaced_by_underscores}\n */\n private generateTableName(): string {\n // Cache the table name since version rarely changes\n if (this._cachedTableName) {\n return this._cachedTableName;\n }\n\n const tableVersion = this.config.version;\n if (!tableVersion) {\n this._cachedTableName = this.name;\n } else {\n const versionSuffix = tableVersion.replace(/\\./g, \"_\");\n this._cachedTableName = `${this.name}_${versionSuffix}`;\n }\n\n return this._cachedTableName;\n }\n\n /**\n * Creates a fast hash of the ClickHouse configuration.\n * Uses crypto.createHash for better performance than JSON.stringify.\n *\n * @private\n */\n private createConfigHash(clickhouseConfig: any): string {\n // Use per-table database if specified, otherwise fall back to global config\n const effectiveDatabase = this.config.database ?? clickhouseConfig.database;\n const configString = `${clickhouseConfig.host}:${clickhouseConfig.port}:${clickhouseConfig.username}:${clickhouseConfig.password}:${effectiveDatabase}:${clickhouseConfig.useSSL}`;\n return createHash(\"sha256\")\n .update(configString)\n .digest(\"hex\")\n .substring(0, 16);\n }\n\n /**\n * Gets or creates a memoized ClickHouse client.\n * The client is cached and reused across multiple insert calls for better performance.\n * If the configuration changes, a new client will be created.\n *\n * @private\n */\n private async getMemoizedClient(): Promise<{\n client: NodeClickHouseClient;\n config: RuntimeClickHouseConfig;\n }> {\n await import(\"../../config/runtime\");\n const configRegistry = (globalThis as any)\n ._mooseConfigRegistry as ConfigurationRegistry;\n const { getClickhouseClient } = await import(\"../../commons\");\n\n const clickhouseConfig = await configRegistry.getClickHouseConfig();\n const currentConfigHash = this.createConfigHash(clickhouseConfig);\n\n // If we have a cached client and the config hasn't changed, reuse it\n if (this._memoizedClient && this._configHash === currentConfigHash) {\n return { client: this._memoizedClient, config: clickhouseConfig };\n }\n\n // Close existing client if config changed\n if (this._memoizedClient && this._configHash !== currentConfigHash) {\n try {\n await this._memoizedClient.close();\n } catch (error) {\n // Ignore errors when closing old client\n }\n }\n\n // Create new client with standard configuration\n // Use per-table database if specified, otherwise fall back to global config\n const effectiveDatabase = this.config.database ?? clickhouseConfig.database;\n const client = getClickhouseClient({\n username: clickhouseConfig.username,\n password: clickhouseConfig.password,\n database: effectiveDatabase,\n useSSL: clickhouseConfig.useSSL ? \"true\" : \"false\",\n host: clickhouseConfig.host,\n port: clickhouseConfig.port,\n });\n\n // Cache the new client and config hash\n this._memoizedClient = client;\n this._configHash = currentConfigHash;\n\n return { client, config: clickhouseConfig };\n }\n\n /**\n * Closes the memoized ClickHouse client if it exists.\n * This is useful for cleaning up connections when the table instance is no longer needed.\n * The client will be automatically recreated on the next insert call if needed.\n */\n async closeClient(): Promise<void> {\n if (this._memoizedClient) {\n try {\n await this._memoizedClient.close();\n } catch (error) {\n // Ignore errors when closing\n } finally {\n this._memoizedClient = undefined;\n this._configHash = undefined;\n }\n }\n }\n\n /**\n * Validates a single record using typia's comprehensive type checking.\n * This provides the most accurate validation as it uses the exact TypeScript type information.\n *\n * @param record The record to validate\n * @returns Validation result with detailed error information\n */\n validateRecord(record: unknown): {\n success: boolean;\n data?: T;\n errors?: string[];\n } {\n // Use injected typia validator if available\n if (this.validators?.validate) {\n try {\n const result = this.validators.validate(record);\n return {\n success: result.success,\n data: result.data,\n errors: result.errors?.map((err) =>\n typeof err === \"string\" ? err : JSON.stringify(err),\n ),\n };\n } catch (error) {\n return {\n success: false,\n errors: [error instanceof Error ? error.message : String(error)],\n };\n }\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Type guard function using typia's is() function.\n * Provides compile-time type narrowing for TypeScript.\n *\n * @param record The record to check\n * @returns True if record matches type T, with type narrowing\n */\n isValidRecord(record: unknown): record is T {\n if (this.validators?.is) {\n return this.validators.is(record);\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Assert that a record matches type T, throwing detailed errors if not.\n * Uses typia's assert() function for the most detailed error reporting.\n *\n * @param record The record to assert\n * @returns The validated and typed record\n * @throws Detailed validation error if record doesn't match type T\n */\n assertValidRecord(record: unknown): T {\n if (this.validators?.assert) {\n return this.validators.assert(record);\n }\n\n throw new Error(\"No typia validator found\");\n }\n\n /**\n * Validates an array of records with comprehensive error reporting.\n * Uses the most appropriate validation method available (typia or basic).\n *\n * @param data Array of records to validate\n * @returns Detailed validation results\n */\n async validateRecords(data: unknown[]): Promise<ValidationResult<T>> {\n const valid: T[] = [];\n const invalid: ValidationError[] = [];\n\n // Pre-allocate arrays with estimated sizes to reduce reallocations\n valid.length = 0;\n invalid.length = 0;\n\n // Use for loop instead of forEach for better performance\n const dataLength = data.length;\n for (let i = 0; i < dataLength; i++) {\n const record = data[i];\n\n try {\n // Fast path: use typia's is() function first for type checking\n if (this.isValidRecord(record)) {\n valid.push(this.mapToClickhouseRecord(record));\n } else {\n // Only use expensive validateRecord for detailed errors when needed\n const result = this.validateRecord(record);\n if (result.success) {\n valid.push(this.mapToClickhouseRecord(record));\n } else {\n invalid.push({\n record,\n error: result.errors?.join(\", \") || \"Validation failed\",\n index: i,\n path: \"root\",\n });\n }\n }\n } catch (error) {\n invalid.push({\n record,\n error: error instanceof Error ? error.message : String(error),\n index: i,\n path: \"root\",\n });\n }\n }\n\n return {\n valid,\n invalid,\n total: dataLength,\n };\n }\n\n /**\n * Optimized batch retry that minimizes individual insert operations.\n * Groups records into smaller batches to reduce round trips while still isolating failures.\n *\n * @private\n */\n private async retryIndividualRecords(\n client: any,\n tableName: string,\n records: T[],\n ): Promise<{ successful: T[]; failed: FailedRecord<T>[] }> {\n const successful: T[] = [];\n const failed: FailedRecord<T>[] = [];\n\n // Instead of individual inserts, try smaller batches first (batches of 10)\n const RETRY_BATCH_SIZE = 10;\n const totalRecords = records.length;\n\n for (let i = 0; i < totalRecords; i += RETRY_BATCH_SIZE) {\n const batchEnd = Math.min(i + RETRY_BATCH_SIZE, totalRecords);\n const batch = records.slice(i, batchEnd);\n\n try {\n await client.insert({\n table: quoteIdentifier(tableName),\n values: batch,\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n // Add performance settings for retries\n max_insert_block_size: RETRY_BATCH_SIZE,\n max_block_size: RETRY_BATCH_SIZE,\n },\n });\n successful.push(...batch);\n } catch (batchError) {\n // If small batch fails, fall back to individual records\n for (let j = 0; j < batch.length; j++) {\n const record = batch[j];\n try {\n await client.insert({\n table: quoteIdentifier(tableName),\n values: [record],\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n },\n });\n successful.push(record);\n } catch (error) {\n failed.push({\n record,\n error: error instanceof Error ? error.message : String(error),\n index: i + j,\n });\n }\n }\n }\n }\n\n return { successful, failed };\n }\n\n /**\n * Validates input parameters and strategy compatibility\n * @private\n */\n private validateInsertParameters(\n data: T[] | Readable,\n options?: InsertOptions,\n ): { isStream: boolean; strategy: string; shouldValidate: boolean } {\n const isStream = data instanceof Readable;\n const strategy = options?.strategy || \"fail-fast\";\n const shouldValidate = options?.validate !== false;\n\n // Validate strategy compatibility with streams\n if (isStream && strategy === \"isolate\") {\n throw new Error(\n \"The 'isolate' error strategy is not supported with stream input. Use 'fail-fast' or 'discard' instead.\",\n );\n }\n\n // Validate that validation is not attempted on streams\n if (isStream && shouldValidate) {\n console.warn(\n \"Validation is not supported with stream input. Validation will be skipped.\",\n );\n }\n\n return { isStream, strategy, shouldValidate };\n }\n\n /**\n * Handles early return cases for empty data\n * @private\n */\n private handleEmptyData(\n data: T[] | Readable,\n isStream: boolean,\n ): InsertResult<T> | null {\n if (isStream && !data) {\n return {\n successful: 0,\n failed: 0,\n total: 0,\n };\n }\n\n if (!isStream && (!data || (data as T[]).length === 0)) {\n return {\n successful: 0,\n failed: 0,\n total: 0,\n };\n }\n\n return null;\n }\n\n /**\n * Performs pre-insertion validation for array data\n * @private\n */\n private async performPreInsertionValidation(\n data: T[],\n shouldValidate: boolean,\n strategy: string,\n options?: InsertOptions,\n ): Promise<{ validatedData: T[]; validationErrors: ValidationError[] }> {\n if (!shouldValidate) {\n return { validatedData: data, validationErrors: [] };\n }\n\n try {\n const validationResult = await this.validateRecords(data as unknown[]);\n const validatedData = validationResult.valid;\n const validationErrors = validationResult.invalid;\n\n if (validationErrors.length > 0) {\n this.handleValidationErrors(validationErrors, strategy, data, options);\n\n // Return appropriate data based on strategy\n switch (strategy) {\n case \"discard\":\n return { validatedData, validationErrors };\n case \"isolate\":\n return { validatedData: data, validationErrors };\n default:\n return { validatedData, validationErrors };\n }\n }\n\n return { validatedData, validationErrors };\n } catch (validationError) {\n if (strategy === \"fail-fast\") {\n throw validationError;\n }\n console.warn(\"Validation error:\", validationError);\n return { validatedData: data, validationErrors: [] };\n }\n }\n\n /**\n * Handles validation errors based on the specified strategy\n * @private\n */\n private handleValidationErrors(\n validationErrors: ValidationError[],\n strategy: string,\n data: T[],\n options?: InsertOptions,\n ): void {\n switch (strategy) {\n case \"fail-fast\":\n const firstError = validationErrors[0];\n throw new Error(\n `Validation failed for record at index ${firstError.index}: ${firstError.error}`,\n );\n\n case \"discard\":\n this.checkValidationThresholds(validationErrors, data.length, options);\n break;\n\n case \"isolate\":\n // For isolate strategy, validation errors will be handled in the final result\n break;\n }\n }\n\n /**\n * Checks if validation errors exceed configured thresholds\n * @private\n */\n private checkValidationThresholds(\n validationErrors: ValidationError[],\n totalRecords: number,\n options?: InsertOptions,\n ): void {\n const validationFailedCount = validationErrors.length;\n const validationFailedRatio = validationFailedCount / totalRecords;\n\n if (\n options?.allowErrors !== undefined &&\n validationFailedCount > options.allowErrors\n ) {\n throw new Error(\n `Too many validation failures: ${validationFailedCount} > ${options.allowErrors}. Errors: ${validationErrors.map((e) => e.error).join(\", \")}`,\n );\n }\n\n if (\n options?.allowErrorsRatio !== undefined &&\n validationFailedRatio > options.allowErrorsRatio\n ) {\n throw new Error(\n `Validation failure ratio too high: ${validationFailedRatio.toFixed(3)} > ${options.allowErrorsRatio}. Errors: ${validationErrors.map((e) => e.error).join(\", \")}`,\n );\n }\n }\n\n /**\n * Optimized insert options preparation with better memory management\n * @private\n */\n private prepareInsertOptions(\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n isStream: boolean,\n strategy: string,\n options?: InsertOptions,\n ): any {\n const insertOptions: any = {\n table: quoteIdentifier(tableName),\n format: \"JSONEachRow\",\n clickhouse_settings: {\n date_time_input_format: \"best_effort\",\n wait_end_of_query: 1, // Ensure at least once delivery for INSERT operations\n // Performance optimizations\n max_insert_block_size:\n isStream ? 100000 : Math.min(validatedData.length, 100000),\n max_block_size: 65536,\n // Use async inserts for better performance with large datasets\n async_insert: validatedData.length > 1000 ? 1 : 0,\n wait_for_async_insert: 1, // For at least once delivery\n },\n };\n\n // Handle stream vs array input\n if (isStream) {\n insertOptions.values = data;\n } else {\n insertOptions.values = validatedData;\n }\n\n // For discard strategy, add optimized ClickHouse error tolerance settings\n if (\n strategy === \"discard\" &&\n (options?.allowErrors !== undefined ||\n options?.allowErrorsRatio !== undefined)\n ) {\n if (options.allowErrors !== undefined) {\n insertOptions.clickhouse_settings.input_format_allow_errors_num =\n options.allowErrors;\n }\n\n if (options.allowErrorsRatio !== undefined) {\n insertOptions.clickhouse_settings.input_format_allow_errors_ratio =\n options.allowErrorsRatio;\n }\n }\n\n return insertOptions;\n }\n\n /**\n * Creates success result for completed insertions\n * @private\n */\n private createSuccessResult(\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n strategy: string,\n ): InsertResult<T> {\n if (isStream) {\n return {\n successful: -1, // -1 indicates stream mode where count is unknown\n failed: 0,\n total: -1,\n };\n }\n\n const insertedCount = validatedData.length;\n const totalProcessed =\n shouldValidate ? (data as T[]).length : insertedCount;\n\n const result: InsertResult<T> = {\n successful: insertedCount,\n failed: shouldValidate ? validationErrors.length : 0,\n total: totalProcessed,\n };\n\n // Add failed records if there are validation errors and using discard strategy\n if (\n shouldValidate &&\n validationErrors.length > 0 &&\n strategy === \"discard\"\n ) {\n result.failedRecords = validationErrors.map((ve) => ({\n record: ve.record as T,\n error: `Validation error: ${ve.error}`,\n index: ve.index,\n }));\n }\n\n return result;\n }\n\n /**\n * Handles insertion errors based on the specified strategy\n * @private\n */\n private async handleInsertionError(\n batchError: any,\n strategy: string,\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n switch (strategy) {\n case \"fail-fast\":\n throw new Error(\n `Failed to insert data into table ${tableName}: ${batchError}`,\n );\n\n case \"discard\":\n throw new Error(\n `Too many errors during insert into table ${tableName}. Error threshold exceeded: ${batchError}`,\n );\n\n case \"isolate\":\n return await this.handleIsolateStrategy(\n batchError,\n tableName,\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n options,\n );\n\n default:\n throw new Error(`Unknown error strategy: ${strategy}`);\n }\n }\n\n /**\n * Handles the isolate strategy for insertion errors\n * @private\n */\n private async handleIsolateStrategy(\n batchError: any,\n tableName: string,\n data: T[] | Readable,\n validatedData: T[],\n validationErrors: ValidationError[],\n isStream: boolean,\n shouldValidate: boolean,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n if (isStream) {\n throw new Error(\n `Isolate strategy is not supported with stream input: ${batchError}`,\n );\n }\n\n try {\n const { client } = await this.getMemoizedClient();\n const skipValidationOnRetry = options?.skipValidationOnRetry || false;\n const retryData = skipValidationOnRetry ? (data as T[]) : validatedData;\n\n const { successful, failed } = await this.retryIndividualRecords(\n client,\n tableName,\n retryData,\n );\n\n // Combine validation errors with insertion errors\n const allFailedRecords: FailedRecord<T>[] = [\n // Validation errors (if any and not skipping validation on retry)\n ...(shouldValidate && !skipValidationOnRetry ?\n validationErrors.map((ve) => ({\n record: ve.record as T,\n error: `Validation error: ${ve.error}`,\n index: ve.index,\n }))\n : []),\n // Insertion errors\n ...failed,\n ];\n\n this.checkInsertionThresholds(\n allFailedRecords,\n (data as T[]).length,\n options,\n );\n\n return {\n successful: successful.length,\n failed: allFailedRecords.length,\n total: (data as T[]).length,\n failedRecords: allFailedRecords,\n };\n } catch (isolationError) {\n throw new Error(\n `Failed to insert data into table ${tableName} during record isolation: ${isolationError}`,\n );\n }\n }\n\n /**\n * Checks if insertion errors exceed configured thresholds\n * @private\n */\n private checkInsertionThresholds(\n failedRecords: FailedRecord<T>[],\n totalRecords: number,\n options?: InsertOptions,\n ): void {\n const totalFailed = failedRecords.length;\n const failedRatio = totalFailed / totalRecords;\n\n if (\n options?.allowErrors !== undefined &&\n totalFailed > options.allowErrors\n ) {\n throw new Error(\n `Too many failed records: ${totalFailed} > ${options.allowErrors}. Failed records: ${failedRecords.map((f) => f.error).join(\", \")}`,\n );\n }\n\n if (\n options?.allowErrorsRatio !== undefined &&\n failedRatio > options.allowErrorsRatio\n ) {\n throw new Error(\n `Failed record ratio too high: ${failedRatio.toFixed(3)} > ${options.allowErrorsRatio}. Failed records: ${failedRecords.map((f) => f.error).join(\", \")}`,\n );\n }\n }\n\n /**\n * Recursively transforms a record to match ClickHouse's JSONEachRow requirements\n *\n * - For every Array(Nested(...)) field at any depth, each item is wrapped in its own array and recursively processed.\n * - For every Nested struct (not array), it recurses into the struct.\n * - This ensures compatibility with kafka_clickhouse_sync\n *\n * @param record The input record to transform (may be deeply nested)\n * @param columns The schema columns for this level (defaults to this.columnArray at the top level)\n * @returns The transformed record, ready for ClickHouse JSONEachRow insertion\n */\n private mapToClickhouseRecord(\n record: any,\n columns: Column[] = this.columnArray,\n ): any {\n const result = { ...record };\n for (const col of columns) {\n const value = record[col.name];\n const dt = col.data_type;\n\n if (isArrayNestedType(dt)) {\n // For Array(Nested(...)), wrap each item in its own array and recurse\n if (\n Array.isArray(value) &&\n (value.length === 0 || typeof value[0] === \"object\")\n ) {\n result[col.name] = value.map((item) => [\n this.mapToClickhouseRecord(item, dt.elementType.columns),\n ]);\n }\n } else if (isNestedType(dt)) {\n // For Nested struct (not array), recurse into it\n if (value && typeof value === \"object\") {\n result[col.name] = this.mapToClickhouseRecord(value, dt.columns);\n }\n }\n // All other types: leave as is for now\n }\n return result;\n }\n\n /**\n * Inserts data directly into the ClickHouse table with enhanced error handling and validation.\n * This method establishes a direct connection to ClickHouse using the project configuration\n * and inserts the provided data into the versioned table.\n *\n * PERFORMANCE OPTIMIZATIONS:\n * - Memoized client connections with fast config hashing\n * - Single-pass validation with pre-allocated arrays\n * - Batch-optimized retry strategy (batches of 10, then individual)\n * - Optimized ClickHouse settings for large datasets\n * - Reduced memory allocations and object creation\n *\n * Uses advanced typia validation when available for comprehensive type checking,\n * with fallback to basic validation for compatibility.\n *\n * The ClickHouse client is memoized and reused across multiple insert calls for better performance.\n * If the configuration changes, a new client will be automatically created.\n *\n * @param data Array of objects conforming to the table schema, or a Node.js Readable stream\n * @param options Optional configuration for error handling, validation, and insertion behavior\n * @returns Promise resolving to detailed insertion results\n * @throws {ConfigError} When configuration cannot be read or parsed\n * @throws {ClickHouseError} When insertion fails based on the error strategy\n * @throws {ValidationError} When validation fails and strategy is 'fail-fast'\n *\n * @example\n * ```typescript\n * // Create an OlapTable instance (typia validators auto-injected)\n * const userTable = new OlapTable<User>('users');\n *\n * // Insert with comprehensive typia validation\n * const result1 = await userTable.insert([\n * { id: 1, name: 'John', email: 'john@example.com' },\n * { id: 2, name: 'Jane', email: 'jane@example.com' }\n * ]);\n *\n * // Insert data with stream input (validation not available for streams)\n * const dataStream = new Readable({\n * objectMode: true,\n * read() { // Stream implementation }\n * });\n * const result2 = await userTable.insert(dataStream, { strategy: 'fail-fast' });\n *\n * // Insert with validation disabled for performance\n * const result3 = await userTable.insert(data, { validate: false });\n *\n * // Insert with error handling strategies\n * const result4 = await userTable.insert(mixedData, {\n * strategy: 'isolate',\n * allowErrorsRatio: 0.1,\n * validate: true // Use typia validation (default)\n * });\n *\n * // Optional: Clean up connection when completely done\n * await userTable.closeClient();\n * ```\n */\n async insert(\n data: T[] | Readable,\n options?: InsertOptions,\n ): Promise<InsertResult<T>> {\n // Validate input parameters and strategy compatibility\n const { isStream, strategy, shouldValidate } =\n this.validateInsertParameters(data, options);\n\n // Handle early return cases for empty data\n const emptyResult = this.handleEmptyData(data, isStream);\n if (emptyResult) {\n return emptyResult;\n }\n\n // Pre-insertion validation for arrays (optimized single-pass)\n let validatedData: T[] = [];\n let validationErrors: ValidationError[] = [];\n\n if (!isStream && shouldValidate) {\n const validationResult = await this.performPreInsertionValidation(\n data as T[],\n shouldValidate,\n strategy,\n options,\n );\n validatedData = validationResult.validatedData;\n validationErrors = validationResult.validationErrors;\n } else {\n // No validation or stream input\n validatedData = isStream ? [] : (data as T[]);\n }\n\n // Get memoized client and generate cached table name\n const { client } = await this.getMemoizedClient();\n const tableName = this.generateTableName();\n\n try {\n // Prepare and execute insertion with optimized settings\n const insertOptions = this.prepareInsertOptions(\n tableName,\n data,\n validatedData,\n isStream,\n strategy,\n options,\n );\n\n await client.insert(insertOptions);\n\n // Return success result\n return this.createSuccessResult(\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n strategy,\n );\n } catch (batchError) {\n // Handle insertion failure based on strategy with optimized retry\n return await this.handleInsertionError(\n batchError,\n strategy,\n tableName,\n data,\n validatedData,\n validationErrors,\n isStream,\n shouldValidate,\n options,\n );\n }\n // Note: We don't close the client here since it's memoized for reuse\n // Use closeClient() method if you need to explicitly close the connection\n }\n\n // Note: Static factory methods (withS3Queue, withReplacingMergeTree, withMergeTree)\n // were removed in ENG-856. Use direct configuration instead, e.g.:\n // new OlapTable(name, { engine: ClickHouseEngines.ReplacingMergeTree, orderByFields: [\"id\"], ver: \"updated_at\" })\n}\n","/**\n * @fileoverview Stream SDK for data streaming operations in Moose.\n *\n * This module provides the core streaming functionality including:\n * - Stream creation and configuration\n * - Message transformations between streams\n * - Consumer registration for message processing\n * - Dead letter queue handling for error recovery\n *\n * @module Stream\n */\n\nimport { IJsonSchemaCollection } from \"typia\";\nimport { TypedBase } from \"../typedBase\";\nimport { Column } from \"../../dataModels/dataModelTypes\";\nimport { dlqColumns, dlqSchema, getMooseInternal } from \"../internal\";\nimport { OlapTable } from \"./olapTable\";\nimport { LifeCycle } from \"./lifeCycle\";\nimport type {\n RuntimeKafkaConfig,\n ConfigurationRegistry,\n} from \"../../config/runtime\";\nimport { createHash } from \"node:crypto\";\nimport { Logger, Producer } from \"../../commons\";\nimport { getSourceFileFromStack } from \"../utils/stackTrace\";\n\n/**\n * Represents zero, one, or many values of type T.\n * Used for flexible return types in transformations where a single input\n * can produce no output, one output, or multiple outputs.\n *\n * @template T The type of the value(s)\n * @example\n * ```typescript\n * // Can return a single value\n * const single: ZeroOrMany<string> = \"hello\";\n *\n * // Can return an array\n * const multiple: ZeroOrMany<string> = [\"hello\", \"world\"];\n *\n * // Can return null/undefined to filter out\n * const filtered: ZeroOrMany<string> = null;\n * ```\n */\nexport type ZeroOrMany<T> = T | T[] | undefined | null;\n\n/**\n * Function type for transforming records from one type to another.\n * Supports both synchronous and asynchronous transformations.\n *\n * @template T The input record type\n * @template U The output record type\n * @param record The input record to transform\n * @returns The transformed record(s), or null/undefined to filter out\n *\n * @example\n * ```typescript\n * const transform: SyncOrAsyncTransform<InputType, OutputType> = (record) => {\n * return { ...record, processed: true };\n * };\n * ```\n */\nexport type SyncOrAsyncTransform<T, U> = (\n record: T,\n) => ZeroOrMany<U> | Promise<ZeroOrMany<U>>;\n\n/**\n * Function type for consuming records without producing output.\n * Used for side effects like logging, external API calls, or database writes.\n *\n * @template T The record type to consume\n * @param record The record to process\n * @returns Promise<void> or void\n *\n * @example\n * ```typescript\n * const consumer: Consumer<UserEvent> = async (event) => {\n * await sendToAnalytics(event);\n * };\n * ```\n */\nexport type Consumer<T> = (record: T) => Promise<void> | void;\n\n/**\n * Configuration options for stream transformations.\n *\n * @template T The type of records being transformed\n */\nexport interface TransformConfig<T> {\n /**\n * Optional version identifier for this transformation.\n * Multiple transformations to the same destination can coexist with different versions.\n */\n version?: string;\n\n /**\n * Optional metadata for documentation and tracking purposes.\n */\n metadata?: { description?: string };\n\n /**\n * Optional dead letter queue for handling transformation failures.\n * Failed records will be sent to this queue for manual inspection or reprocessing.\n * Uses {@link Stream.defaultDeadLetterQueue} by default\n * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value\n */\n deadLetterQueue?: DeadLetterQueue<T> | null;\n\n /**\n * @internal Source file path where this transform was declared.\n * Automatically captured from stack trace.\n */\n sourceFile?: string;\n}\n\n/**\n * Configuration options for stream consumers.\n *\n * @template T The type of records being consumed\n */\nexport interface ConsumerConfig<T> {\n /**\n * Optional version identifier for this consumer.\n * Multiple consumers can coexist with different versions.\n */\n version?: string;\n\n /**\n * Optional dead letter queue for handling consumer failures.\n * Failed records will be sent to this queue for manual inspection or reprocessing.\n * Uses {@link Stream.defaultDeadLetterQueue} by default\n * unless a DeadLetterQueue is provided, or it is explicitly disabled with a null value\n */\n deadLetterQueue?: DeadLetterQueue<T> | null;\n\n /**\n * @internal Source file path where this consumer was declared.\n * Automatically captured from stack trace.\n */\n sourceFile?: string;\n}\n\nexport type SchemaRegistryEncoding = \"JSON\" | \"AVRO\" | \"PROTOBUF\";\n\nexport type SchemaRegistryReference =\n | { id: number }\n | { subjectLatest: string }\n | { subject: string; version: number };\n\nexport interface KafkaSchemaConfig {\n kind: SchemaRegistryEncoding;\n reference: SchemaRegistryReference;\n}\n\n/**\n * Represents a message routed to a specific destination stream.\n * Used internally by the multi-transform functionality to specify\n * where transformed messages should be sent.\n *\n * @internal\n */\nclass RoutedMessage {\n /** The destination stream for the message */\n destination: Stream<any>;\n\n /** The message value(s) to send */\n values: ZeroOrMany<any>;\n\n /**\n * Creates a new routed message.\n *\n * @param destination The target stream\n * @param values The message(s) to route\n */\n constructor(destination: Stream<any>, values: ZeroOrMany<any>) {\n this.destination = destination;\n this.values = values;\n }\n}\n\n/**\n * Configuration options for a data stream (e.g., a Redpanda topic).\n * @template T The data type of the messages in the stream.\n */\nexport interface StreamConfig<T> {\n /**\n * Specifies the number of partitions for the stream. Affects parallelism and throughput.\n */\n parallelism?: number;\n /**\n * Specifies the data retention period for the stream in seconds. Messages older than this may be deleted.\n */\n retentionPeriod?: number;\n /**\n * An optional destination OLAP table where messages from this stream should be automatically ingested.\n */\n destination?: OlapTable<T>;\n /**\n * An optional version string for this configuration. Can be used for tracking changes or managing deployments.\n */\n version?: string;\n metadata?: { description?: string };\n lifeCycle?: LifeCycle;\n\n defaultDeadLetterQueue?: DeadLetterQueue<T>;\n\n /** Optional Schema Registry configuration for this stream */\n schemaConfig?: KafkaSchemaConfig;\n}\n\n/**\n * Represents a data stream, typically corresponding to a Redpanda topic.\n * Provides a typed interface for producing to and consuming from the stream, and defining transformations.\n *\n * @template T The data type of the messages flowing through the stream. The structure of T defines the message schema.\n */\nexport class Stream<T> extends TypedBase<T, StreamConfig<T>> {\n defaultDeadLetterQueue?: DeadLetterQueue<T>;\n /** @internal Memoized KafkaJS producer for reusing connections across sends */\n private _memoizedProducer?: Producer;\n /** @internal Hash of the configuration used to create the memoized Kafka producer */\n private _kafkaConfigHash?: string;\n\n /**\n * Creates a new Stream instance.\n * @param name The name of the stream. This name is used for the underlying Redpanda topic.\n * @param config Optional configuration for the stream.\n */\n constructor(name: string, config?: StreamConfig<T>);\n\n /**\n * @internal\n * Note: `validators` parameter is a positional placeholder (always undefined for Stream).\n * It exists because TypedBase has validators as the 5th param, and we need to pass\n * allowExtraFields as the 6th param. Stream doesn't use validators.\n */\n constructor(\n name: string,\n config: StreamConfig<T>,\n schema: IJsonSchemaCollection.IV3_1,\n columns: Column[],\n validators: undefined,\n allowExtraFields: boolean,\n );\n\n constructor(\n name: string,\n config?: StreamConfig<T>,\n schema?: IJsonSchemaCollection.IV3_1,\n columns?: Column[],\n validators?: undefined,\n allowExtraFields?: boolean,\n ) {\n super(name, config ?? {}, schema, columns, undefined, allowExtraFields);\n const streams = getMooseInternal().streams;\n if (streams.has(name)) {\n throw new Error(`Stream with name ${name} already exists`);\n }\n streams.set(name, this);\n this.defaultDeadLetterQueue = this.config.defaultDeadLetterQueue;\n }\n\n /**\n * Internal map storing transformation configurations.\n * Maps destination stream names to arrays of transformation functions and their configs.\n *\n * @internal\n */\n _transformations = new Map<\n string,\n [Stream<any>, SyncOrAsyncTransform<T, any>, TransformConfig<T>][]\n >();\n\n /**\n * Internal function for multi-stream transformations.\n * Allows a single transformation to route messages to multiple destinations.\n *\n * @internal\n */\n _multipleTransformations?: (record: T) => [RoutedMessage];\n\n /**\n * Internal array storing consumer configurations.\n *\n * @internal\n */\n _consumers = new Array<{\n consumer: Consumer<T>;\n config: ConsumerConfig<T>;\n }>();\n\n /**\n * Builds the full Kafka topic name including optional namespace and version suffix.\n * Version suffix is appended as _x_y_z where dots in version are replaced with underscores.\n */\n private buildFullTopicName(namespace?: string): string {\n const versionSuffix =\n this.config.version ? `_${this.config.version.replace(/\\./g, \"_\")}` : \"\";\n const base = `${this.name}${versionSuffix}`;\n return namespace !== undefined && namespace.length > 0 ?\n `${namespace}.${base}`\n : base;\n }\n\n /**\n * Creates a fast hash string from relevant Kafka configuration fields.\n */\n private createConfigHash(kafkaConfig: RuntimeKafkaConfig): string {\n const configString = [\n kafkaConfig.broker,\n kafkaConfig.messageTimeoutMs,\n kafkaConfig.saslUsername,\n kafkaConfig.saslPassword,\n kafkaConfig.saslMechanism,\n kafkaConfig.securityProtocol,\n kafkaConfig.namespace,\n ].join(\":\");\n return createHash(\"sha256\")\n .update(configString)\n .digest(\"hex\")\n .substring(0, 16);\n }\n\n /**\n * Gets or creates a memoized KafkaJS producer using runtime configuration.\n */\n private async getMemoizedProducer(): Promise<{\n producer: Producer;\n kafkaConfig: RuntimeKafkaConfig;\n }> {\n // dynamic import to keep Stream objects browser compatible\n await import(\"../../config/runtime\");\n const configRegistry = (globalThis as any)\n ._mooseConfigRegistry as ConfigurationRegistry;\n const { getKafkaProducer } = await import(\"../../commons\");\n\n const kafkaConfig = await (configRegistry as any).getKafkaConfig();\n const currentHash = this.createConfigHash(kafkaConfig);\n\n if (this._memoizedProducer && this._kafkaConfigHash === currentHash) {\n return { producer: this._memoizedProducer, kafkaConfig };\n }\n\n // Close existing producer if config changed\n if (this._memoizedProducer && this._kafkaConfigHash !== currentHash) {\n try {\n await this._memoizedProducer.disconnect();\n } catch {\n // ignore\n }\n this._memoizedProducer = undefined;\n }\n\n const clientId = `moose-sdk-stream-${this.name}`;\n const logger: Logger = {\n logPrefix: clientId,\n log: (message: string): void => {\n console.log(`${clientId}: ${message}`);\n },\n error: (message: string): void => {\n console.error(`${clientId}: ${message}`);\n },\n warn: (message: string): void => {\n console.warn(`${clientId}: ${message}`);\n },\n };\n\n const producer = await getKafkaProducer(\n {\n clientId,\n broker: kafkaConfig.broker,\n securityProtocol: kafkaConfig.securityProtocol,\n saslUsername: kafkaConfig.saslUsername,\n saslPassword: kafkaConfig.saslPassword,\n saslMechanism: kafkaConfig.saslMechanism,\n },\n logger,\n );\n\n this._memoizedProducer = producer;\n this._kafkaConfigHash = currentHash;\n\n return { producer, kafkaConfig };\n }\n\n /**\n * Closes the memoized Kafka producer if it exists.\n */\n async closeProducer(): Promise<void> {\n if (this._memoizedProducer) {\n try {\n await this._memoizedProducer.disconnect();\n } catch {\n // ignore\n } finally {\n this._memoizedProducer = undefined;\n this._kafkaConfigHash = undefined;\n }\n }\n }\n\n /**\n * Sends one or more records to this stream's Kafka topic.\n * Values are JSON-serialized as message values.\n */\n async send(values: ZeroOrMany<T>): Promise<void> {\n // Normalize to flat array of records\n const flat: T[] =\n Array.isArray(values) ? values\n : values !== undefined && values !== null ? [values as T]\n : [];\n\n if (flat.length === 0) return;\n\n const { producer, kafkaConfig } = await this.getMemoizedProducer();\n const topic = this.buildFullTopicName(kafkaConfig.namespace);\n\n // Use Schema Registry JSON envelope if configured\n const sr = this.config.schemaConfig;\n if (sr && sr.kind === \"JSON\") {\n const schemaRegistryUrl = kafkaConfig.schemaRegistryUrl;\n if (!schemaRegistryUrl) {\n throw new Error(\"Schema Registry URL not configured\");\n }\n\n const {\n default: { SchemaRegistry },\n } = await import(\"@kafkajs/confluent-schema-registry\");\n const registry = new SchemaRegistry({ host: schemaRegistryUrl });\n\n let schemaId: undefined | number = undefined;\n\n if (\"id\" in sr.reference) {\n schemaId = sr.reference.id;\n } else if (\"subjectLatest\" in sr.reference) {\n schemaId = await registry.getLatestSchemaId(sr.reference.subjectLatest);\n } else if (\"subject\" in sr.reference) {\n schemaId = await registry.getRegistryId(\n sr.reference.subject,\n sr.reference.version,\n );\n }\n\n if (schemaId === undefined) {\n throw new Error(\"Malformed schema reference.\");\n }\n\n const encoded = await Promise.all(\n flat.map((v) =>\n registry.encode(schemaId, v as unknown as Record<string, unknown>),\n ),\n );\n await producer.send({\n topic,\n messages: encoded.map((value) => ({ value })),\n });\n return;\n } else if (sr !== undefined) {\n throw new Error(\"Currently only JSON Schema is supported.\");\n }\n\n await producer.send({\n topic,\n messages: flat.map((v) => ({ value: JSON.stringify(v) })),\n });\n }\n\n /**\n * Adds a transformation step that processes messages from this stream and sends the results to a destination stream.\n * Multiple transformations to the same destination stream can be added if they have distinct `version` identifiers in their config.\n *\n * @template U The data type of the messages in the destination stream.\n * @param destination The destination stream for the transformed messages.\n * @param transformation A function that takes a message of type T and returns zero or more messages of type U (or a Promise thereof).\n * Return `null` or `undefined` or an empty array `[]` to filter out a message. Return an array to emit multiple messages.\n * @param config Optional configuration for this specific transformation step, like a version.\n */\n addTransform<U>(\n destination: Stream<U>,\n transformation: SyncOrAsyncTransform<T, U>,\n config?: TransformConfig<T>,\n ) {\n // Capture source file from call stack at this exact moment\n const sourceFile = getSourceFileFromStack(new Error().stack);\n\n const transformConfig: TransformConfig<T> = {\n ...(config ?? {}),\n sourceFile,\n };\n if (transformConfig.deadLetterQueue === undefined) {\n transformConfig.deadLetterQueue = this.defaultDeadLetterQueue;\n }\n\n if (this._transformations.has(destination.name)) {\n const existingTransforms = this._transformations.get(destination.name)!;\n const hasVersion = existingTransforms.some(\n ([_, __, cfg]) => cfg.version === transformConfig.version,\n );\n\n if (!hasVersion) {\n existingTransforms.push([destination, transformation, transformConfig]);\n }\n } else {\n this._transformations.set(destination.name, [\n [destination, transformation, transformConfig],\n ]);\n }\n }\n\n /**\n * Adds a consumer function that processes messages from this stream.\n * Multiple consumers can be added if they have distinct `version` identifiers in their config.\n *\n * @param consumer A function that takes a message of type T and performs an action (e.g., side effect, logging). Should return void or Promise<void>.\n * @param config Optional configuration for this specific consumer, like a version.\n */\n addConsumer(consumer: Consumer<T>, config?: ConsumerConfig<T>) {\n // Capture source file from call stack at this exact moment\n const sourceFile = getSourceFileFromStack(new Error().stack);\n\n const consumerConfig: ConsumerConfig<T> = {\n ...(config ?? {}),\n sourceFile,\n };\n if (consumerConfig.deadLetterQueue === undefined) {\n consumerConfig.deadLetterQueue = this.defaultDeadLetterQueue;\n }\n const hasVersion = this._consumers.some(\n (existing) => existing.config.version === consumerConfig.version,\n );\n\n if (!hasVersion) {\n this._consumers.push({ consumer, config: consumerConfig });\n }\n }\n\n /**\n * Helper method for `addMultiTransform` to specify the destination and values for a routed message.\n * @param values The value or values to send to this stream.\n * @returns A `RoutedMessage` object associating the values with this stream.\n *\n * @example\n * ```typescript\n * sourceStream.addMultiTransform((record) => [\n * destinationStream1.routed(transformedRecord1),\n * destinationStream2.routed([record2a, record2b])\n * ]);\n * ```\n */\n routed = (values: ZeroOrMany<T>) => new RoutedMessage(this, values);\n\n /**\n * Adds a single transformation function that can route messages to multiple destination streams.\n * This is an alternative to adding multiple individual `addTransform` calls.\n * Only one multi-transform function can be added per stream.\n *\n * @param transformation A function that takes a message of type T and returns an array of `RoutedMessage` objects,\n * each specifying a destination stream and the message(s) to send to it.\n */\n addMultiTransform(transformation: (record: T) => [RoutedMessage]) {\n this._multipleTransformations = transformation;\n }\n}\n\n/**\n * Base model for dead letter queue entries.\n * Contains the original failed record along with error information.\n */\nexport interface DeadLetterModel {\n /** The original record that failed processing */\n originalRecord: Record<string, any>;\n\n /** Human-readable error message describing the failure */\n errorMessage: string;\n\n /** Classification of the error type (e.g., \"ValidationError\", \"TransformError\") */\n errorType: string;\n\n /** Timestamp when the failure occurred */\n failedAt: Date;\n\n /** The source component where the failure occurred */\n source: \"api\" | \"transform\" | \"table\";\n}\n\n/**\n * Enhanced dead letter model with type recovery functionality.\n * Extends the base model with the ability to recover the original typed record.\n *\n * @template T The original record type before failure\n */\nexport interface DeadLetter<T> extends DeadLetterModel {\n /**\n * Recovers the original record as its typed form.\n * Useful for reprocessing failed records with proper type safety.\n *\n * @returns The original record cast to type T\n */\n asTyped: () => T;\n}\n\n/**\n * Internal function to attach type guard functionality to dead letter records.\n *\n * @internal\n * @template T The original record type\n * @param dl The dead letter model to enhance\n * @param typeGuard Function to validate and cast the original record\n */\nfunction attachTypeGuard<T>(\n dl: DeadLetterModel,\n typeGuard: (input: any) => T,\n): asserts dl is DeadLetter<T> {\n (dl as any).asTyped = () => typeGuard(dl.originalRecord);\n}\n\n/**\n * Specialized stream for handling failed records (dead letters).\n * Provides type-safe access to failed records for reprocessing or analysis.\n *\n * @template T The original record type that failed processing\n *\n * @example\n * ```typescript\n * const dlq = new DeadLetterQueue<UserEvent>(\"user-events-dlq\");\n *\n * dlq.addConsumer(async (deadLetter) => {\n * const originalEvent = deadLetter.asTyped();\n * console.log(`Failed event: ${deadLetter.errorMessage}`);\n * // Potentially reprocess or alert\n * });\n * ```\n */\nexport class DeadLetterQueue<T> extends Stream<DeadLetterModel> {\n /**\n * Creates a new DeadLetterQueue instance.\n * @param name The name of the dead letter queue stream\n * @param config Optional configuration for the stream. The metadata property is always present and includes stackTrace.\n */\n constructor(name: string, config?: StreamConfig<DeadLetterModel>);\n\n /** @internal **/\n constructor(\n name: string,\n config: StreamConfig<DeadLetterModel>,\n validate: (originalRecord: any) => T,\n );\n\n constructor(\n name: string,\n config?: StreamConfig<DeadLetterModel>,\n typeGuard?: (originalRecord: any) => T,\n ) {\n if (typeGuard === undefined) {\n throw new Error(\n \"Supply the type param T so that the schema is inserted by the compiler plugin.\",\n );\n }\n\n super(name, config ?? {}, dlqSchema, dlqColumns, undefined, false);\n this.typeGuard = typeGuard;\n getMooseInternal().streams.set(name, this);\n }\n\n /**\n * Internal type guard function for validating and casting original records.\n *\n * @internal\n */\n private typeGuard: (originalRecord: any) => T;\n\n /**\n * Adds a transformation step for dead letter records.\n * The transformation function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @template U The output type for the transformation\n * @param destination The destination stream for transformed messages\n * @param transformation Function to transform dead letter records\n * @param config Optional transformation configuration\n */\n addTransform<U>(\n destination: Stream<U>,\n transformation: SyncOrAsyncTransform<DeadLetter<T>, U>,\n config?: TransformConfig<DeadLetterModel>,\n ) {\n const withValidate: SyncOrAsyncTransform<DeadLetterModel, U> = (\n deadLetter,\n ) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return transformation(deadLetter);\n };\n super.addTransform(destination, withValidate, config);\n }\n\n /**\n * Adds a consumer for dead letter records.\n * The consumer function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @param consumer Function to process dead letter records\n * @param config Optional consumer configuration\n */\n addConsumer(\n consumer: Consumer<DeadLetter<T>>,\n config?: ConsumerConfig<DeadLetterModel>,\n ) {\n const withValidate: Consumer<DeadLetterModel> = (deadLetter) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return consumer(deadLetter);\n };\n super.addConsumer(withValidate, config);\n }\n\n /**\n * Adds a multi-stream transformation for dead letter records.\n * The transformation function receives a DeadLetter<T> with type recovery capabilities.\n *\n * @param transformation Function to route dead letter records to multiple destinations\n */\n addMultiTransform(\n transformation: (record: DeadLetter<T>) => [RoutedMessage],\n ) {\n const withValidate: (record: DeadLetterModel) => [RoutedMessage] = (\n deadLetter,\n ) => {\n attachTypeGuard<T>(deadLetter, this.typeGuard);\n return transformation(deadLetter);\n };\n super.addMultiTransform(withValidate);\n }\n}\n","export * from \"./browserCompatible\";\n\nexport type DataModelConfig<T> = Partial<{\n ingestion: true;\n storage: {\n enabled?: boolean;\n order_by_fields?: (keyof T)[];\n deduplicate?: boolean;\n name?: string;\n };\n parallelism?: number;\n}>;\n\nexport * from \"./blocks/helpers\";\nexport * from \"./commons\";\nexport * from \"./secrets\";\nexport * from \"./consumption-apis/helpers\";\nexport * from \"./consumption-apis/webAppHelpers\";\nexport * from \"./scripts/task\";\n\nexport { createApi, createConsumptionApi } from \"./consumption-apis/runner\";\n\nexport { MooseCache } from \"./clients/redisClient\";\n\nexport { ApiUtil, ConsumptionUtil } from \"./consumption-apis/helpers\";\n\nexport { getMooseClients } from \"./consumption-apis/standalone\";\nexport { sql } from \"./sqlHelpers\";\n\nexport * from \"./utilities\";\nexport * from \"./connectors/dataSource\";\nexport {\n ClickHouseByteSize,\n ClickHouseInt,\n LowCardinality,\n ClickHouseNamedTuple,\n ClickHousePoint,\n ClickHouseRing,\n ClickHouseLineString,\n ClickHouseMultiLineString,\n ClickHousePolygon,\n ClickHouseMultiPolygon,\n} from \"./dataModels/types\";\n","import { ClickHouseClient, CommandResult, ResultSet } from \"@clickhouse/client\";\nimport {\n Client as TemporalClient,\n Connection,\n ConnectionOptions,\n} from \"@temporalio/client\";\nimport { StringValue } from \"@temporalio/common\";\nimport { createHash, randomUUID } from \"node:crypto\";\nimport { performance } from \"perf_hooks\";\nimport * as fs from \"fs\";\nimport { getWorkflows } from \"../dmv2/internal\";\nimport { JWTPayload } from \"jose\";\nimport { Sql, sql, RawValue, toQuery, toQueryPreview } from \"../sqlHelpers\";\n\n/**\n * Format elapsed milliseconds into a human-readable string.\n * Matches Python's format_timespan behavior.\n */\nfunction formatElapsedTime(ms: number): string {\n if (ms < 1000) {\n return `${Math.round(ms)} ms`;\n }\n const seconds = ms / 1000;\n if (seconds < 60) {\n return `${seconds.toFixed(2)} seconds`;\n }\n const minutes = Math.floor(seconds / 60);\n const remainingSeconds = seconds % 60;\n return `${minutes} minutes and ${remainingSeconds.toFixed(2)} seconds`;\n}\n\nexport interface ApiUtil {\n client: MooseClient;\n\n // SQL interpolator\n sql: typeof sql;\n jwt: JWTPayload | undefined;\n}\n\n/** @deprecated Use ApiUtil instead. */\nexport type ConsumptionUtil = ApiUtil;\n\nexport class MooseClient {\n query: QueryClient;\n workflow: WorkflowClient;\n\n constructor(queryClient: QueryClient, temporalClient?: TemporalClient) {\n this.query = queryClient;\n this.workflow = new WorkflowClient(temporalClient);\n }\n}\n\nexport class QueryClient {\n client: ClickHouseClient;\n query_id_prefix: string;\n constructor(client: ClickHouseClient, query_id_prefix: string) {\n this.client = client;\n this.query_id_prefix = query_id_prefix;\n }\n\n async execute<T = any>(\n sql: Sql,\n ): Promise<ResultSet<\"JSONEachRow\"> & { __query_result_t?: T[] }> {\n const [query, query_params] = toQuery(sql);\n\n console.log(`[QueryClient] | Query: ${toQueryPreview(sql)}`);\n const start = performance.now();\n const result = await this.client.query({\n query,\n query_params,\n format: \"JSONEachRow\",\n query_id: this.query_id_prefix + randomUUID(),\n // Note: wait_end_of_query deliberately NOT set here as this is used for SELECT queries\n // where response buffering would harm streaming performance and concurrency\n });\n const elapsedMs = performance.now() - start;\n console.log(\n `[QueryClient] | Query completed: ${formatElapsedTime(elapsedMs)}`,\n );\n return result;\n }\n\n async command(sql: Sql): Promise<CommandResult> {\n const [query, query_params] = toQuery(sql);\n\n console.log(`[QueryClient] | Command: ${toQueryPreview(sql)}`);\n const start = performance.now();\n const result = await this.client.command({\n query,\n query_params,\n query_id: this.query_id_prefix + randomUUID(),\n });\n const elapsedMs = performance.now() - start;\n console.log(\n `[QueryClient] | Command completed: ${formatElapsedTime(elapsedMs)}`,\n );\n return result;\n }\n}\n\nexport class WorkflowClient {\n client: TemporalClient | undefined;\n\n constructor(temporalClient?: TemporalClient) {\n this.client = temporalClient;\n }\n\n async execute(name: string, input_data: any) {\n try {\n if (!this.client) {\n return {\n status: 404,\n body: `Temporal client not found. Is the feature flag enabled?`,\n };\n }\n\n // Get workflow configuration\n const config = await this.getWorkflowConfig(name);\n\n // Process input data and generate workflow ID\n const [processedInput, workflowId] = this.processInputData(\n name,\n input_data,\n );\n\n console.log(\n `WorkflowClient - starting workflow: ${name} with config ${JSON.stringify(config)} and input_data ${JSON.stringify(processedInput)}`,\n );\n\n const handle = await this.client.workflow.start(\"ScriptWorkflow\", {\n args: [\n { workflow_name: name, execution_mode: \"start\" as const },\n processedInput,\n ],\n taskQueue: \"typescript-script-queue\",\n workflowId,\n workflowIdConflictPolicy: \"FAIL\",\n workflowIdReusePolicy: \"ALLOW_DUPLICATE\",\n retry: {\n maximumAttempts: config.retries,\n },\n workflowRunTimeout: config.timeout as StringValue,\n });\n\n return {\n status: 200,\n body: `Workflow started: ${name}. View it in the Temporal dashboard: http://localhost:8080/namespaces/default/workflows/${workflowId}/${handle.firstExecutionRunId}/history`,\n };\n } catch (error) {\n return {\n status: 400,\n body: `Error starting workflow: ${error}`,\n };\n }\n }\n\n async terminate(workflowId: string) {\n try {\n if (!this.client) {\n return {\n status: 404,\n body: `Temporal client not found. Is the feature flag enabled?`,\n };\n }\n\n const handle = this.client.workflow.getHandle(workflowId);\n await handle.terminate();\n\n return {\n status: 200,\n body: `Workflow terminated: ${workflowId}`,\n };\n } catch (error) {\n return {\n status: 400,\n body: `Error terminating workflow: ${error}`,\n };\n }\n }\n\n private async getWorkflowConfig(\n name: string,\n ): Promise<{ retries: number; timeout: string }> {\n const workflows = await getWorkflows();\n const dmv2Workflow = workflows.get(name);\n if (dmv2Workflow) {\n return {\n retries: dmv2Workflow.config.retries || 3,\n timeout: dmv2Workflow.config.timeout || \"1h\",\n };\n }\n\n throw new Error(`Workflow config not found for ${name}`);\n }\n\n private processInputData(name: string, input_data: any): [any, string] {\n let workflowId = name;\n if (input_data) {\n const hash = createHash(\"sha256\")\n .update(JSON.stringify(input_data))\n .digest(\"hex\")\n .slice(0, 16);\n workflowId = `${name}-${hash}`;\n }\n return [input_data, workflowId];\n }\n}\n\n/**\n * This looks similar to the client in runner.ts which is a worker.\n * Temporal SDK uses similar looking connection options & client,\n * but there are different libraries for a worker & client like this one\n * that triggers workflows.\n */\nexport async function getTemporalClient(\n temporalUrl: string,\n namespace: string,\n clientCert: string,\n clientKey: string,\n apiKey: string,\n): Promise<TemporalClient | undefined> {\n try {\n console.info(\n `<api> Using temporal_url: ${temporalUrl} and namespace: ${namespace}`,\n );\n\n let connectionOptions: ConnectionOptions = {\n address: temporalUrl,\n connectTimeout: \"3s\",\n };\n\n if (clientCert && clientKey) {\n // URL with mTLS uses gRPC namespace endpoint which is what temporalUrl already is\n console.log(\"Using TLS for secure Temporal\");\n const cert = await fs.readFileSync(clientCert);\n const key = await fs.readFileSync(clientKey);\n\n connectionOptions.tls = {\n clientCertPair: { crt: cert, key: key },\n };\n } else if (apiKey) {\n console.log(\"Using API key for secure Temporal\");\n // URL with API key uses gRPC regional endpoint\n connectionOptions.address = \"us-west1.gcp.api.temporal.io:7233\";\n connectionOptions.apiKey = apiKey;\n connectionOptions.tls = {};\n connectionOptions.metadata = {\n \"temporal-namespace\": namespace,\n };\n }\n\n console.log(`<api> Connecting to Temporal at ${connectionOptions.address}`);\n const connection = await Connection.connect(connectionOptions);\n const client = new TemporalClient({ connection, namespace });\n console.log(\"<api> Connected to Temporal server\");\n\n return client;\n } catch (error) {\n console.warn(`Failed to connect to Temporal. Is the feature flag enabled?`);\n console.warn(error);\n return undefined;\n }\n}\n\nexport const ApiHelpers = {\n column: (value: string) => [\"Identifier\", value] as [string, string],\n table: (value: string) => [\"Identifier\", value] as [string, string],\n};\n\n/** @deprecated Use ApiHelpers instead. */\nexport const ConsumptionHelpers = ApiHelpers;\n\nexport function joinQueries({\n values,\n separator = \",\",\n prefix = \"\",\n suffix = \"\",\n}: {\n values: readonly RawValue[];\n separator?: string;\n prefix?: string;\n suffix?: string;\n}) {\n if (values.length === 0) {\n throw new TypeError(\n \"Expected `join([])` to be called with an array of multiple elements, but got an empty array\",\n );\n }\n\n return new Sql(\n [prefix, ...Array(values.length - 1).fill(separator), suffix],\n values,\n );\n}\n","import http from \"http\";\nimport { getClickhouseClient } from \"../commons\";\nimport { MooseClient, QueryClient, getTemporalClient } from \"./helpers\";\nimport * as jose from \"jose\";\nimport { ClickHouseClient } from \"@clickhouse/client\";\nimport { Cluster } from \"../cluster-utils\";\nimport { ApiUtil } from \"../index\";\nimport { sql } from \"../sqlHelpers\";\nimport { Client as TemporalClient } from \"@temporalio/client\";\nimport { getApis, getWebApps } from \"../dmv2/internal\";\n\ninterface ClickhouseConfig {\n database: string;\n host: string;\n port: string;\n username: string;\n password: string;\n useSSL: boolean;\n}\n\ninterface JwtConfig {\n secret?: string;\n issuer: string;\n audience: string;\n}\n\ninterface TemporalConfig {\n url: string;\n namespace: string;\n clientCert: string;\n clientKey: string;\n apiKey: string;\n}\n\ninterface ApisConfig {\n apisDir: string;\n clickhouseConfig: ClickhouseConfig;\n jwtConfig?: JwtConfig;\n temporalConfig?: TemporalConfig;\n enforceAuth: boolean;\n isDmv2: boolean;\n proxyPort?: number;\n workerCount?: number;\n}\n\n// Convert our config to Clickhouse client config\nconst toClientConfig = (config: ClickhouseConfig) => ({\n ...config,\n useSSL: config.useSSL ? \"true\" : \"false\",\n});\n\nconst createPath = (apisDir: string, path: string) => `${apisDir}${path}.ts`;\n\nconst httpLogger = (\n req: http.IncomingMessage,\n res: http.ServerResponse,\n startMs: number,\n) => {\n console.log(\n `${req.method} ${req.url} ${res.statusCode} ${Date.now() - startMs}ms`,\n );\n};\n\nconst modulesCache = new Map<string, any>();\n\nexport function createApi<T extends object, R = any>(\n _handler: (params: T, utils: ApiUtil) => Promise<R>,\n): (\n rawParams: Record<string, string[] | string>,\n utils: ApiUtil,\n) => Promise<R> {\n throw new Error(\n \"This should be compiled-time replaced by compiler plugins to add parsing.\",\n );\n}\n\n/** @deprecated Use `Api` from \"dmv2/sdk/consumptionApi\" instead. */\nexport const createConsumptionApi = createApi;\n\nconst apiHandler = async (\n publicKey: jose.KeyLike | undefined,\n clickhouseClient: ClickHouseClient,\n temporalClient: TemporalClient | undefined,\n apisDir: string,\n enforceAuth: boolean,\n isDmv2: boolean,\n jwtConfig?: JwtConfig,\n) => {\n const apis = isDmv2 ? await getApis() : new Map();\n return async (req: http.IncomingMessage, res: http.ServerResponse) => {\n const start = Date.now();\n\n try {\n const url = new URL(req.url || \"\", \"http://localhost\");\n const fileName = url.pathname;\n\n let jwtPayload;\n if (publicKey && jwtConfig) {\n const jwt = req.headers.authorization?.split(\" \")[1]; // Bearer <token>\n if (jwt) {\n try {\n const { payload } = await jose.jwtVerify(jwt, publicKey, {\n issuer: jwtConfig.issuer,\n audience: jwtConfig.audience,\n });\n jwtPayload = payload;\n } catch (error) {\n console.log(\"JWT verification failed\");\n if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n }\n } else if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n } else if (enforceAuth) {\n res.writeHead(401, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Unauthorized\" }));\n httpLogger(req, res, start);\n return;\n }\n\n const pathName = createPath(apisDir, fileName);\n const paramsObject = Array.from(url.searchParams.entries()).reduce(\n (obj: { [key: string]: string[] | string }, [key, value]) => {\n const existingValue = obj[key];\n if (existingValue) {\n if (Array.isArray(existingValue)) {\n existingValue.push(value);\n } else {\n obj[key] = [existingValue, value];\n }\n } else {\n obj[key] = value;\n }\n return obj;\n },\n {},\n );\n\n let userFuncModule = modulesCache.get(pathName);\n if (userFuncModule === undefined) {\n if (isDmv2) {\n let apiName = fileName.replace(/^\\/+|\\/+$/g, \"\");\n let version: string | null = null;\n\n // First, try to find the API by the full path (for custom paths)\n userFuncModule = apis.get(apiName);\n\n if (!userFuncModule) {\n // Fall back to the old name:version parsing\n version = url.searchParams.get(\"version\");\n\n // Check if version is in the path (e.g., /bar/1)\n if (!version && apiName.includes(\"/\")) {\n const pathParts = apiName.split(\"/\");\n if (pathParts.length >= 2) {\n // Try the full path first (it might be a custom path)\n userFuncModule = apis.get(apiName);\n if (!userFuncModule) {\n // If not found, treat it as name/version\n apiName = pathParts[0];\n version = pathParts.slice(1).join(\"/\");\n }\n }\n }\n\n // Only do versioned lookup if we still haven't found it\n if (!userFuncModule) {\n if (version) {\n const versionedKey = `${apiName}:${version}`;\n userFuncModule = apis.get(versionedKey);\n } else {\n userFuncModule = apis.get(apiName);\n }\n }\n }\n\n if (!userFuncModule) {\n const availableApis = Array.from(apis.keys()).map((key) =>\n key.replace(\":\", \"/\"),\n );\n const errorMessage =\n version ?\n `API ${apiName} with version ${version} not found. Available APIs: ${availableApis.join(\", \")}`\n : `API ${apiName} not found. Available APIs: ${availableApis.join(\", \")}`;\n throw new Error(errorMessage);\n }\n\n modulesCache.set(pathName, userFuncModule);\n console.log(`[API] | Executing API: ${apiName}`);\n } else {\n userFuncModule = require(pathName);\n modulesCache.set(pathName, userFuncModule);\n }\n }\n\n const queryClient = new QueryClient(clickhouseClient, fileName);\n let result =\n isDmv2 ?\n await userFuncModule(paramsObject, {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n })\n : await userFuncModule.default(paramsObject, {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n });\n\n let body: string;\n let status: number | undefined;\n\n // TODO investigate why these prototypes are different\n if (Object.getPrototypeOf(result).constructor.name === \"ResultSet\") {\n body = JSON.stringify(await result.json());\n } else {\n if (\"body\" in result && \"status\" in result) {\n body = JSON.stringify(result.body);\n status = result.status;\n } else {\n body = JSON.stringify(result);\n }\n }\n\n if (status) {\n res.writeHead(status, { \"Content-Type\": \"application/json\" });\n httpLogger(req, res, start);\n } else {\n res.writeHead(200, { \"Content-Type\": \"application/json\" });\n httpLogger(req, res, start);\n }\n\n res.end(body);\n } catch (error: any) {\n console.log(\"error in path \", req.url, error);\n // todo: same workaround as ResultSet\n if (Object.getPrototypeOf(error).constructor.name === \"TypeGuardError\") {\n res.writeHead(400, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: error.message }));\n httpLogger(req, res, start);\n }\n if (error instanceof Error) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: error.message }));\n httpLogger(req, res, start);\n } else {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end();\n httpLogger(req, res, start);\n }\n }\n };\n};\n\nconst createMainRouter = async (\n publicKey: jose.KeyLike | undefined,\n clickhouseClient: ClickHouseClient,\n temporalClient: TemporalClient | undefined,\n apisDir: string,\n enforceAuth: boolean,\n isDmv2: boolean,\n jwtConfig?: JwtConfig,\n) => {\n const apiRequestHandler = await apiHandler(\n publicKey,\n clickhouseClient,\n temporalClient,\n apisDir,\n enforceAuth,\n isDmv2,\n jwtConfig,\n );\n\n const webApps = isDmv2 ? await getWebApps() : new Map();\n\n const sortedWebApps = Array.from(webApps.values()).sort((a, b) => {\n const pathA = a.config.mountPath || \"/\";\n const pathB = b.config.mountPath || \"/\";\n return pathB.length - pathA.length;\n });\n\n return async (req: http.IncomingMessage, res: http.ServerResponse) => {\n const start = Date.now();\n\n const url = new URL(req.url || \"\", \"http://localhost\");\n const pathname = url.pathname;\n\n // Health check - checked before all other routes\n if (pathname === \"/_moose_internal/health\") {\n res.writeHead(200, { \"Content-Type\": \"application/json\" });\n res.end(\n JSON.stringify({\n status: \"healthy\",\n timestamp: new Date().toISOString(),\n }),\n );\n return;\n }\n\n let jwtPayload;\n if (publicKey && jwtConfig) {\n const jwt = req.headers.authorization?.split(\" \")[1];\n if (jwt) {\n try {\n const { payload } = await jose.jwtVerify(jwt, publicKey, {\n issuer: jwtConfig.issuer,\n audience: jwtConfig.audience,\n });\n jwtPayload = payload;\n } catch (error) {\n console.log(\"JWT verification failed for WebApp route\");\n }\n }\n }\n\n for (const webApp of sortedWebApps) {\n const mountPath = webApp.config.mountPath || \"/\";\n const normalizedMount =\n mountPath.endsWith(\"/\") && mountPath !== \"/\" ?\n mountPath.slice(0, -1)\n : mountPath;\n\n const matches =\n pathname === normalizedMount ||\n pathname.startsWith(normalizedMount + \"/\");\n\n if (matches) {\n if (webApp.config.injectMooseUtils !== false) {\n const queryClient = new QueryClient(clickhouseClient, pathname);\n (req as any).moose = {\n client: new MooseClient(queryClient, temporalClient),\n sql: sql,\n jwt: jwtPayload,\n };\n }\n\n let proxiedUrl = req.url;\n if (normalizedMount !== \"/\") {\n const pathWithoutMount =\n pathname.substring(normalizedMount.length) || \"/\";\n proxiedUrl = pathWithoutMount + url.search;\n }\n\n try {\n // Create a modified request preserving all properties including headers\n // A shallow clone (like { ...req }) generally will not work since headers and other\n // members are not cloned.\n const modifiedReq = Object.assign(\n Object.create(Object.getPrototypeOf(req)),\n req,\n {\n url: proxiedUrl,\n },\n );\n await webApp.handler(modifiedReq, res);\n return;\n } catch (error) {\n console.error(`Error in WebApp ${webApp.name}:`, error);\n if (!res.headersSent) {\n res.writeHead(500, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Internal Server Error\" }));\n }\n return;\n }\n }\n }\n\n // If no WebApp matched, check if it's an Api request\n // Strip /api or /consumption prefix for Api routing\n let apiPath = pathname;\n if (pathname.startsWith(\"/api/\")) {\n apiPath = pathname.substring(4); // Remove \"/api\"\n } else if (pathname.startsWith(\"/consumption/\")) {\n apiPath = pathname.substring(13); // Remove \"/consumption\"\n }\n\n // If we stripped a prefix, it's an Api request\n if (apiPath !== pathname) {\n // Create a modified request with the rewritten URL for the apiHandler\n // Preserve all properties including headers by using Object.assign with prototype chain\n // A shallow clone (like { ...req }) generally will not work since headers and other\n // members are not cloned.\n const modifiedReq = Object.assign(\n Object.create(Object.getPrototypeOf(req)),\n req,\n {\n url: apiPath + url.search,\n },\n );\n await apiRequestHandler(modifiedReq as http.IncomingMessage, res);\n return;\n }\n\n res.writeHead(404, { \"Content-Type\": \"application/json\" });\n res.end(JSON.stringify({ error: \"Not Found\" }));\n httpLogger(req, res, start);\n };\n};\n\nexport const runApis = async (config: ApisConfig) => {\n const apisCluster = new Cluster({\n maxWorkerCount:\n (config.workerCount ?? 0) > 0 ? config.workerCount : undefined,\n workerStart: async () => {\n let temporalClient: TemporalClient | undefined;\n if (config.temporalConfig) {\n temporalClient = await getTemporalClient(\n config.temporalConfig.url,\n config.temporalConfig.namespace,\n config.temporalConfig.clientCert,\n config.temporalConfig.clientKey,\n config.temporalConfig.apiKey,\n );\n }\n const clickhouseClient = getClickhouseClient(\n toClientConfig(config.clickhouseConfig),\n );\n let publicKey: jose.KeyLike | undefined;\n if (config.jwtConfig?.secret) {\n console.log(\"Importing JWT public key...\");\n publicKey = await jose.importSPKI(config.jwtConfig.secret, \"RS256\");\n }\n\n const server = http.createServer(\n await createMainRouter(\n publicKey,\n clickhouseClient,\n temporalClient,\n config.apisDir,\n config.enforceAuth,\n config.isDmv2,\n config.jwtConfig,\n ),\n );\n // port is now passed via config.proxyPort or defaults to 4001\n const port = config.proxyPort !== undefined ? config.proxyPort : 4001;\n server.listen(port, \"localhost\", () => {\n console.log(`Server running on port ${port}`);\n });\n\n return server;\n },\n workerStop: async (server) => {\n return new Promise<void>((resolve) => {\n server.close(() => resolve());\n });\n },\n });\n\n apisCluster.start();\n};\n","import cluster from \"node:cluster\";\nimport { availableParallelism } from \"node:os\";\nimport { exit } from \"node:process\";\nimport { Worker } from \"node:cluster\";\n\nconst DEFAULT_MAX_CPU_USAGE_RATIO = 0.7;\n// Time to restart the worker when it exits unexpectedly\n// This value is not too high to avoid the worker to be stuck in a bad state\n// but also not too low to avoid restarting the worker too often\nconst RESTART_TIME_MS = 10000;\nconst SIGTERM = \"SIGTERM\";\nconst SIGINT = \"SIGINT\";\nconst SHUTDOWN_WORKERS_INTERVAL = 500;\n\n/**\n * Manages a cluster of worker processes, handling their lifecycle including startup,\n * shutdown, and error handling.\n *\n * @typeParam C - The type of output produced during worker startup\n */\nexport class Cluster<C> {\n // Tracks if shutdown is currently in progress\n private shutdownInProgress: boolean = false;\n // Tracks if workers exited cleanly during shutdown\n private hasCleanWorkerExit: boolean = true;\n\n // String identifying if this is primary or worker process\n private processStr = `${cluster.isPrimary ? \"primary\" : \"worker\"} process ${process.pid}`;\n\n // Functions for starting and stopping workers\n private workerStart: (w: Worker, paralelism: number) => Promise<C>;\n private workerStop: (c: C) => Promise<void>;\n\n // Result from starting worker, needed for cleanup\n private startOutput: C | undefined;\n private maxCpuUsageRatio: number;\n private usedCpuCount: number;\n\n /**\n * Creates a new cluster manager instance.\n *\n * @param options - Configuration options for the cluster\n * @param options.workerStart - Async function to execute when starting a worker\n * @param options.workerStop - Async function to execute when stopping a worker\n * @param options.maxCpuUsageRatio - Maximum ratio of CPU cores to utilize (0-1)\n * @param options.maxWorkerCount - Maximum number of workers to spawn\n * @throws {Error} If maxCpuUsageRatio is not between 0 and 1\n */\n constructor(options: {\n workerStart: (w: Worker, paralelism: number) => Promise<C>;\n workerStop: (c: C) => Promise<void>;\n maxCpuUsageRatio?: number;\n maxWorkerCount?: number;\n }) {\n this.workerStart = options.workerStart;\n this.workerStop = options.workerStop;\n if (\n options.maxCpuUsageRatio &&\n (options.maxCpuUsageRatio > 1 || options.maxCpuUsageRatio < 0)\n ) {\n throw new Error(\"maxCpuUsageRatio must be between 0 and 1\");\n }\n this.maxCpuUsageRatio =\n options.maxCpuUsageRatio || DEFAULT_MAX_CPU_USAGE_RATIO;\n this.usedCpuCount = this.computeCPUUsageCount(\n this.maxCpuUsageRatio,\n options.maxWorkerCount,\n );\n }\n\n /**\n * Calculates the number of CPU cores to utilize based on available parallelism and constraints.\n *\n * @param cpuUsageRatio - Ratio of CPU cores to use (0-1)\n * @param maxWorkerCount - Optional maximum number of workers\n * @returns The number of CPU cores to utilize\n */\n computeCPUUsageCount(cpuUsageRatio: number, maxWorkerCount?: number) {\n const cpuCount = availableParallelism();\n const maxWorkers = maxWorkerCount || cpuCount;\n return Math.min(\n maxWorkers,\n Math.max(1, Math.floor(cpuCount * cpuUsageRatio)),\n );\n }\n\n /**\n * Initializes the cluster by spawning worker processes and setting up signal handlers.\n * For the primary process, spawns workers and monitors parent process.\n * For worker processes, executes the worker startup function.\n *\n * @throws {Error} If worker is undefined in worker process\n */\n async start() {\n process.on(SIGTERM, this.gracefulClusterShutdown(SIGTERM));\n process.on(SIGINT, this.gracefulClusterShutdown(SIGINT));\n\n if (cluster.isPrimary) {\n const parentPid = process.ppid;\n\n setInterval(() => {\n try {\n process.kill(parentPid, 0);\n } catch (e) {\n console.log(\"Parent process has exited.\");\n this.gracefulClusterShutdown(SIGTERM)();\n }\n }, 1000);\n\n await this.bootWorkers(this.usedCpuCount);\n } else {\n if (!cluster.worker) {\n throw new Error(\n \"Worker is not defined, it should be defined in worker process\",\n );\n }\n\n this.startOutput = await this.workerStart(\n cluster.worker,\n this.usedCpuCount,\n );\n }\n }\n\n /**\n * Spawns worker processes and configures their lifecycle event handlers.\n * Handles worker online, exit and disconnect events.\n * Automatically restarts failed workers during normal operation.\n *\n * @param numWorkers - Number of worker processes to spawn\n */\n bootWorkers = async (numWorkers: number) => {\n console.info(`Setting ${numWorkers} workers...`);\n\n for (let i = 0; i < numWorkers; i++) {\n cluster.fork();\n }\n\n cluster.on(\"online\", (worker) => {\n console.info(`worker process ${worker.process.pid} is online`);\n });\n\n cluster.on(\"exit\", (worker, code, signal) => {\n console.info(\n `worker ${worker.process.pid} exited with code ${code} and signal ${signal}`,\n );\n\n if (!this.shutdownInProgress) {\n setTimeout(() => cluster.fork(), RESTART_TIME_MS);\n }\n\n if (this.shutdownInProgress && code != 0) {\n this.hasCleanWorkerExit = false;\n }\n });\n\n cluster.on(\"disconnect\", (worker) => {\n console.info(`worker process ${worker.process.pid} has disconnected`);\n });\n };\n\n /**\n * Creates a handler function for graceful shutdown on receipt of a signal.\n * Ensures only one shutdown can occur at a time.\n * Handles shutdown differently for primary and worker processes.\n *\n * @param signal - The signal triggering the shutdown (e.g. SIGTERM)\n * @returns An async function that performs the shutdown\n */\n gracefulClusterShutdown = (signal: NodeJS.Signals) => async () => {\n if (this.shutdownInProgress) {\n return;\n }\n\n this.shutdownInProgress = true;\n this.hasCleanWorkerExit = true;\n\n console.info(\n `Got ${signal} on ${this.processStr}. Graceful shutdown start at ${new Date().toISOString()}`,\n );\n\n try {\n if (cluster.isPrimary) {\n await this.shutdownWorkers(signal);\n console.info(`${this.processStr} - worker shutdown successful`);\n exit(0);\n } else {\n // Only attempt to stop if the worker has finished starting\n if (this.startOutput) {\n await this.workerStop(this.startOutput);\n } else {\n console.info(\n `${this.processStr} - shutdown before worker fully started`,\n );\n }\n console.info(`${this.processStr} shutdown successful`);\n this.hasCleanWorkerExit ? exit(0) : exit(1);\n }\n } catch (e) {\n console.error(`${this.processStr} - shutdown failed`, e);\n exit(1);\n }\n };\n\n /**\n * Gracefully terminates all worker processes.\n * Monitors workers until they all exit or timeout occurs.\n * Only relevant for the primary process.\n *\n * @param signal - The signal to send to worker processes\n * @returns A promise that resolves when all workers have terminated\n */\n shutdownWorkers = (signal: NodeJS.Signals) => {\n return new Promise<void>((resolve, reject) => {\n if (!cluster.isPrimary) {\n return resolve();\n }\n\n if (!cluster.workers) {\n return resolve();\n }\n\n const workerIds = Object.keys(cluster.workers);\n if (workerIds.length == 0) {\n return resolve();\n }\n\n let workersAlive = 0;\n let funcRun = 0;\n\n const cleanWorkers = () => {\n ++funcRun;\n workersAlive = 0;\n\n Object.values(cluster.workers || {})\n .filter((worker) => !!worker)\n .forEach((worker) => {\n if (worker && !worker.isDead()) {\n ++workersAlive;\n if (funcRun == 1) {\n worker.kill(signal);\n }\n }\n });\n\n console.info(workersAlive + \" workers alive\");\n if (workersAlive == 0) {\n clearInterval(interval);\n return resolve();\n }\n };\n\n const interval = setInterval(cleanWorkers, SHUTDOWN_WORKERS_INTERVAL);\n });\n };\n}\n","import { createClient, RedisClientType } from \"redis\";\n\n// Module-level singleton instance and initialization promise\nlet instance: MooseCache | null = null;\nlet initPromise: Promise<MooseCache> | null = null;\n\ntype SupportedTypes = string | object;\n\nexport class MooseCache {\n private client: RedisClientType;\n private isConnected: boolean = false;\n private readonly keyPrefix: string;\n private disconnectTimer: NodeJS.Timeout | null = null;\n private readonly idleTimeout: number;\n private connectPromise: Promise<void> | null = null;\n\n private constructor() {\n const redisUrl =\n process.env.MOOSE_REDIS_CONFIG__URL || \"redis://127.0.0.1:6379\";\n const prefix = process.env.MOOSE_REDIS_CONFIG__KEY_PREFIX || \"MS\";\n // 30 seconds of inactivity before disconnecting\n this.idleTimeout =\n parseInt(process.env.MOOSE_REDIS_CONFIG__IDLE_TIMEOUT || \"30\", 10) * 1000;\n this.keyPrefix = `${prefix}::moosecache::`;\n\n this.client = createClient({\n url: redisUrl,\n });\n\n process.on(\"SIGTERM\", this.gracefulShutdown);\n process.on(\"SIGINT\", this.gracefulShutdown);\n\n this.client.on(\"error\", async (err: Error) => {\n console.error(\"TS Redis client error:\", err);\n await this.disconnect();\n });\n\n this.client.on(\"connect\", () => {\n this.isConnected = true;\n console.log(\"TS Redis client connected\");\n });\n\n this.client.on(\"end\", () => {\n this.isConnected = false;\n console.log(\"TS Redis client disconnected\");\n this.clearDisconnectTimer();\n });\n }\n\n private clearDisconnectTimer(): void {\n if (this.disconnectTimer) {\n clearTimeout(this.disconnectTimer);\n this.disconnectTimer = null;\n }\n }\n\n private resetDisconnectTimer(): void {\n this.clearDisconnectTimer();\n this.disconnectTimer = setTimeout(async () => {\n if (this.isConnected) {\n console.log(\"TS Redis client disconnecting due to inactivity\");\n await this.disconnect();\n }\n }, this.idleTimeout);\n }\n\n private async ensureConnected(): Promise<void> {\n if (!this.isConnected) {\n await this.connect();\n }\n this.resetDisconnectTimer();\n }\n\n private async connect(): Promise<void> {\n // If already connected, return immediately\n if (this.isConnected) {\n return;\n }\n\n // If connection is in progress, wait for it\n // This prevents race conditions when multiple callers try to reconnect\n // simultaneously after a disconnection\n if (this.connectPromise) {\n return this.connectPromise;\n }\n\n // Start connection\n this.connectPromise = (async () => {\n try {\n await this.client.connect();\n this.resetDisconnectTimer();\n } catch (error) {\n // Reset the promise on error so retries can work\n this.connectPromise = null;\n throw error;\n }\n })();\n\n return this.connectPromise;\n }\n\n private async gracefulShutdown(): Promise<void> {\n if (this.isConnected) {\n await this.disconnect();\n }\n process.exit(0);\n }\n\n private getPrefixedKey(key: string): string {\n return `${this.keyPrefix}${key}`;\n }\n\n /**\n * Gets the singleton instance of MooseCache. Creates a new instance if one doesn't exist.\n * The client will automatically connect to Redis and handle reconnection if needed.\n *\n * @returns Promise<MooseCache> The singleton instance of MooseCache\n * @example\n * const cache = await MooseCache.get();\n */\n public static async get(): Promise<MooseCache> {\n // If we already have an instance, return it immediately\n if (instance) {\n return instance;\n }\n\n // If initialization is already in progress, wait for it\n // This prevents race conditions where multiple concurrent calls to get()\n // would each create their own instance and connection\n //\n // A simple singleton pattern (just checking if instance exists) isn't enough\n // because multiple async calls can check \"if (!instance)\" simultaneously,\n // find it's null, and each try to create their own instance before any\n // of them finish setting the instance variable\n if (initPromise) {\n return initPromise;\n }\n\n // Start initialization\n // We store the promise immediately so that any concurrent calls\n // will wait for this same initialization instead of starting their own\n initPromise = (async () => {\n try {\n const newInstance = new MooseCache();\n await newInstance.connect();\n instance = newInstance;\n return newInstance;\n } catch (error) {\n // Reset the promise on error so retries can work\n initPromise = null;\n throw error;\n }\n })();\n\n return initPromise;\n }\n\n /**\n * Sets a value in the cache. Objects are automatically JSON stringified.\n *\n * @param key - The key to store the value under\n * @param value - The value to store. Can be a string or any object (will be JSON stringified)\n * @param ttlSeconds - Optional time-to-live in seconds. If not provided, defaults to 1 hour (3600 seconds).\n * Must be a non-negative number. If 0, the key will expire immediately.\n * @example\n * // Store a string\n * await cache.set(\"foo\", \"bar\");\n *\n * // Store an object with custom TTL\n * await cache.set(\"foo:config\", { baz: 123, qux: true }, 60); // expires in 1 minute\n *\n * // This is essentially a get-set, which returns the previous value if it exists.\n * // You can create logic to only do work for the first time.\n * const value = await cache.set(\"testSessionId\", \"true\");\n * if (value) {\n * // Cache was set before, return\n * } else {\n * // Cache was set for first time, do work\n * }\n */\n public async set(\n key: string,\n value: string | object,\n ttlSeconds?: number,\n ): Promise<string | null> {\n try {\n // Validate TTL\n if (ttlSeconds !== undefined && ttlSeconds < 0) {\n throw new Error(\"ttlSeconds must be a non-negative number\");\n }\n\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n const stringValue =\n typeof value === \"object\" ? JSON.stringify(value) : value;\n\n // Use provided TTL or default to 1 hour\n const ttl = ttlSeconds ?? 3600;\n return await this.client.set(prefixedKey, stringValue, {\n EX: ttl,\n GET: true,\n });\n } catch (error) {\n console.error(`Error setting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Retrieves a value from the cache. Attempts to parse the value as JSON if possible.\n *\n * @param key - The key to retrieve\n * @returns Promise<T | null> The value, parsed as type T if it was JSON, or as string if not. Returns null if key doesn't exist\n * @example\n * // Get a string\n * const value = await cache.get(\"foo\");\n *\n * // Get and parse an object with type safety\n * interface Config { baz: number; qux: boolean; }\n * const config = await cache.get<Config>(\"foo:config\");\n */\n public async get<T extends SupportedTypes = string>(\n key: string,\n ): Promise<T | null> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n const value = await this.client.get(prefixedKey);\n\n if (value === null) return null;\n\n // Note: We can't check if T is string at runtime because TypeScript types are erased.\n // Instead, we try to parse as JSON and return the original string if that fails.\n try {\n const parsed = JSON.parse(value);\n // Only return parsed value if it's an object\n if (typeof parsed === \"object\" && parsed !== null) {\n return parsed as T;\n }\n // If parsed value isn't an object, return as string\n return value as T;\n } catch {\n // If JSON parse fails, return as string\n return value as T;\n }\n } catch (error) {\n console.error(`Error getting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Deletes a specific key from the cache.\n *\n * @param key - The key to delete\n * @example\n * await cache.delete(\"foo\");\n */\n public async delete(key: string): Promise<void> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(key);\n await this.client.del(prefixedKey);\n } catch (error) {\n console.error(`Error deleting cache key ${key}:`, error);\n throw error;\n }\n }\n\n /**\n * Deletes all keys that start with the given prefix.\n *\n * @param keyPrefix - The prefix of keys to delete\n * @example\n * // Delete all keys starting with \"foo\"\n * await cache.clearKeys(\"foo\");\n */\n public async clearKeys(keyPrefix: string): Promise<void> {\n try {\n await this.ensureConnected();\n const prefixedKey = this.getPrefixedKey(keyPrefix);\n const keys = await this.client.keys(`${prefixedKey}*`);\n if (keys.length > 0) {\n await this.client.del(keys);\n }\n } catch (error) {\n console.error(\n `Error clearing cache keys with prefix ${keyPrefix}:`,\n error,\n );\n throw error;\n }\n }\n\n /**\n * Deletes all keys in the cache\n *\n * @example\n * await cache.clear();\n */\n public async clear(): Promise<void> {\n try {\n await this.ensureConnected();\n const keys = await this.client.keys(`${this.keyPrefix}*`);\n if (keys.length > 0) {\n await this.client.del(keys);\n }\n } catch (error) {\n console.error(\"Error clearing cache:\", error);\n throw error;\n }\n }\n\n /**\n * Manually disconnects the Redis client. The client will automatically reconnect\n * when the next operation is performed.\n *\n * @example\n * await cache.disconnect();\n */\n public async disconnect(): Promise<void> {\n this.clearDisconnectTimer();\n this.connectPromise = null;\n if (this.isConnected) {\n await this.client.quit();\n }\n }\n}\n","import { MooseClient, QueryClient } from \"./helpers\";\nimport { getClickhouseClient } from \"../commons\";\nimport type { RuntimeClickHouseConfig } from \"../config/runtime\";\n\nexport async function getMooseClients(\n config?: Partial<RuntimeClickHouseConfig>,\n): Promise<{ client: MooseClient }> {\n await import(\"../config/runtime\");\n const configRegistry = (globalThis as any)._mooseConfigRegistry;\n\n if (!configRegistry) {\n throw new Error(\n \"Configuration registry not initialized. Ensure the Moose framework is properly set up.\",\n );\n }\n\n const clickhouseConfig =\n await configRegistry.getStandaloneClickhouseConfig(config);\n\n const clickhouseClient = getClickhouseClient({\n username: clickhouseConfig.username,\n password: clickhouseConfig.password,\n database: clickhouseConfig.database,\n useSSL: clickhouseConfig.useSSL ? \"true\" : \"false\",\n host: clickhouseConfig.host,\n port: clickhouseConfig.port,\n });\n\n const queryClient = new QueryClient(clickhouseClient, \"standalone\");\n const mooseClient = new MooseClient(queryClient);\n\n return { client: mooseClient };\n}\n","import { parse } from \"csv-parse\";\nimport { jsonDateReviver } from \"./json\";\n\n/**\n * Configuration for CSV parsing options\n */\nexport interface CSVParsingConfig {\n /** CSV delimiter character */\n delimiter: string;\n /** Whether to treat first row as headers */\n columns?: boolean;\n /** Whether to skip empty lines */\n skipEmptyLines?: boolean;\n /** Whether to trim whitespace from values */\n trim?: boolean;\n}\n\n/**\n * Configuration for JSON parsing options\n */\nexport interface JSONParsingConfig {\n /** Custom reviver function for JSON.parse */\n reviver?: (key: string, value: any) => any;\n}\n\n/**\n * Parses CSV content into an array of objects\n *\n * @param content - The CSV content as a string\n * @param config - CSV parsing configuration\n * @returns Promise resolving to an array of parsed objects\n */\nexport function parseCSV<T = Record<string, any>>(\n content: string,\n config: CSVParsingConfig,\n): Promise<T[]> {\n return new Promise((resolve, reject) => {\n const results: T[] = [];\n\n parse(content, {\n delimiter: config.delimiter,\n columns: config.columns ?? true,\n skip_empty_lines: config.skipEmptyLines ?? true,\n trim: config.trim ?? true,\n })\n .on(\"data\", (row) => {\n results.push(row as T);\n })\n .on(\"end\", () => {\n resolve(results);\n })\n .on(\"error\", (error) => {\n reject(error);\n });\n });\n}\n\n/**\n * Parses JSON content into an array of objects\n *\n * @param content - The JSON content as a string\n * @param config - JSON parsing configuration\n * @returns Array of parsed objects\n */\nexport function parseJSON<T = any>(\n content: string,\n config: JSONParsingConfig = {},\n): T[] {\n try {\n const parsed = JSON.parse(content, config.reviver);\n\n // Handle both array and single object cases\n if (Array.isArray(parsed)) {\n return parsed as T[];\n } else {\n return [parsed as T];\n }\n } catch (error) {\n throw new Error(\n `Failed to parse JSON: ${error instanceof Error ? error.message : \"Unknown error\"}`,\n );\n }\n}\n\n/**\n * Parses JSON content with automatic date revival\n *\n * @param content - The JSON content as a string\n * @returns Array of parsed objects with Date objects for ISO 8601 strings\n */\nexport function parseJSONWithDates<T = any>(content: string): T[] {\n return parseJSON<T>(content, { reviver: jsonDateReviver });\n}\n\n/**\n * Type guard to check if a value is a valid CSV delimiter\n */\nexport function isValidCSVDelimiter(delimiter: string): boolean {\n return delimiter.length === 1 && !/\\s/.test(delimiter);\n}\n\n/**\n * Common CSV delimiters\n */\nexport const CSV_DELIMITERS = {\n COMMA: \",\",\n TAB: \"\\t\",\n SEMICOLON: \";\",\n PIPE: \"|\",\n} as const;\n\n/**\n * Default CSV parsing configuration\n */\nexport const DEFAULT_CSV_CONFIG: CSVParsingConfig = {\n delimiter: CSV_DELIMITERS.COMMA,\n columns: true,\n skipEmptyLines: true,\n trim: true,\n};\n\n/**\n * Default JSON parsing configuration with date revival\n */\nexport const DEFAULT_JSON_CONFIG: JSONParsingConfig = {\n reviver: jsonDateReviver,\n};\n","import type {\n Column,\n DataType,\n Nested,\n ArrayType,\n} from \"../dataModels/dataModelTypes\";\n\n/**\n * Annotation key used to mark DateTime fields that should remain as strings\n * rather than being parsed into Date objects at runtime.\n */\nexport const STRING_DATE_ANNOTATION = \"stringDate\";\n\n/**\n * Type guard to check if a DataType is a nullable wrapper\n */\nfunction isNullableType(dt: DataType): dt is { nullable: DataType } {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"nullable\" in dt &&\n typeof dt.nullable !== \"undefined\"\n );\n}\n\n/**\n * Type guard to check if a DataType is a Nested type\n */\nfunction isNestedType(dt: DataType): dt is Nested {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"columns\" in dt &&\n Array.isArray(dt.columns)\n );\n}\n\n/**\n * Type guard to check if a DataType is an ArrayType\n */\nfunction isArrayType(dt: DataType): dt is ArrayType {\n return (\n typeof dt === \"object\" &&\n dt !== null &&\n \"elementType\" in dt &&\n typeof dt.elementType !== \"undefined\"\n );\n}\n\n/**\n * Revives ISO 8601 date strings into Date objects during JSON parsing\n * This is useful for automatically converting date strings to Date objects\n */\nexport function jsonDateReviver(key: string, value: unknown): unknown {\n const iso8601Format =\n /^([\\+-]?\\d{4}(?!\\d{2}\\b))((-?)((0[1-9]|1[0-2])(\\3([12]\\d|0[1-9]|3[01]))?|W([0-4]\\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\\d|[12]\\d{2}|3([0-5]\\d|6[1-6])))([T\\s]((([01]\\d|2[0-3])((:?)[0-5]\\d)?|24\\:?00)([\\.,]\\d+(?!:))?)?(\\17[0-5]\\d([\\.,]\\d+)?)?([zZ]|([\\+-])([01]\\d|2[0-3]):?([0-5]\\d)?)?)?)$/;\n\n if (typeof value === \"string\" && iso8601Format.test(value)) {\n return new Date(value);\n }\n\n return value;\n}\n\n/**\n * Checks if a DataType represents a datetime column (not just date)\n * AND if the column should be parsed from string to Date at runtime\n *\n * Note: Date and Date16 are date-only types and should remain as strings.\n * Only DateTime types are candidates for parsing to JavaScript Date objects.\n */\nfunction isDateType(dataType: DataType, annotations: [string, any][]): boolean {\n // Check if this is marked as a string-based date (from typia.tags.Format)\n // If so, it should remain as a string, not be parsed to Date\n if (\n annotations.some(\n ([key, value]) => key === STRING_DATE_ANNOTATION && value === true,\n )\n ) {\n return false;\n }\n\n if (typeof dataType === \"string\") {\n // Only DateTime types should be parsed to Date objects\n // Date and Date16 are date-only and should stay as strings\n return dataType === \"DateTime\" || dataType.startsWith(\"DateTime(\");\n }\n // Handle nullable wrapper\n if (isNullableType(dataType)) {\n return isDateType(dataType.nullable, annotations);\n }\n return false;\n}\n\n/**\n * Type of mutation to apply to a field during parsing\n */\nexport type Mutation = \"parseDate\"; // | \"parseBigInt\" - to be added later\n\n/**\n * Recursive tuple array structure representing field mutation operations\n * Each entry is [fieldName, mutation]:\n * - mutation is Mutation[] for leaf fields that need operations applied\n * - mutation is FieldMutations for nested objects/arrays (auto-applies to array elements)\n */\nexport type FieldMutations = [string, Mutation[] | FieldMutations][];\n\n/**\n * Recursively builds field mutations from column definitions\n *\n * @param columns - Array of Column definitions\n * @returns Tuple array of field mutations\n */\nfunction buildFieldMutations(columns: Column[]): FieldMutations {\n const mutations: FieldMutations = [];\n\n for (const column of columns) {\n const dataType = column.data_type;\n\n // Check if this is a date field that should be converted\n if (isDateType(dataType, column.annotations)) {\n mutations.push([column.name, [\"parseDate\"]]);\n continue;\n }\n\n // Handle nested structures\n if (typeof dataType === \"object\" && dataType !== null) {\n // Handle nullable wrapper\n let unwrappedType: DataType = dataType;\n if (isNullableType(dataType)) {\n unwrappedType = dataType.nullable;\n }\n\n // Handle nested objects\n if (isNestedType(unwrappedType)) {\n const nestedMutations = buildFieldMutations(unwrappedType.columns);\n if (nestedMutations.length > 0) {\n mutations.push([column.name, nestedMutations]);\n }\n continue;\n }\n\n // Handle arrays with nested columns\n // The mutations will be auto-applied to each array element at runtime\n if (isArrayType(unwrappedType)) {\n const elementType = unwrappedType.elementType;\n if (isNestedType(elementType)) {\n const nestedMutations = buildFieldMutations(elementType.columns);\n if (nestedMutations.length > 0) {\n mutations.push([column.name, nestedMutations]);\n }\n continue;\n }\n }\n }\n }\n\n return mutations;\n}\n\n/**\n * Applies a mutation operation to a field value\n *\n * @param value - The value to handle\n * @param mutation - The mutation operation to apply\n * @returns The handled value\n */\nfunction applyMutation(value: any, mutation: Mutation): any {\n if (mutation === \"parseDate\") {\n if (typeof value === \"string\") {\n try {\n const date = new Date(value);\n return !isNaN(date.getTime()) ? date : value;\n } catch {\n return value;\n }\n }\n }\n return value;\n}\n\n/**\n * Recursively mutates an object by applying field mutations\n *\n * @param obj - The object to mutate\n * @param mutations - The field mutations to apply\n */\nfunction applyFieldMutations(obj: any, mutations: FieldMutations): void {\n if (!obj || typeof obj !== \"object\") {\n return;\n }\n\n for (const [fieldName, mutation] of mutations) {\n if (!(fieldName in obj)) {\n continue;\n }\n\n if (Array.isArray(mutation)) {\n // Check if it's Mutation[] (leaf) or FieldMutations (nested)\n if (mutation.length > 0 && typeof mutation[0] === \"string\") {\n // It's Mutation[] - apply operations to this field\n const operations = mutation as Mutation[];\n for (const operation of operations) {\n obj[fieldName] = applyMutation(obj[fieldName], operation);\n }\n } else {\n // It's FieldMutations - recurse into nested structure\n const nestedMutations = mutation as FieldMutations;\n const fieldValue = obj[fieldName];\n\n if (Array.isArray(fieldValue)) {\n // Auto-apply to each array element\n for (const item of fieldValue) {\n applyFieldMutations(item, nestedMutations);\n }\n } else if (fieldValue && typeof fieldValue === \"object\") {\n // Apply to nested object\n applyFieldMutations(fieldValue, nestedMutations);\n }\n }\n }\n }\n}\n\n/**\n * Pre-builds field mutations from column schema for efficient reuse\n *\n * @param columns - Column definitions from the Stream schema\n * @returns Field mutations tuple array, or undefined if no columns\n *\n * @example\n * ```typescript\n * const fieldMutations = buildFieldMutationsFromColumns(stream.columnArray);\n * // Reuse fieldMutations for every message\n * ```\n */\nexport function buildFieldMutationsFromColumns(\n columns: Column[] | undefined,\n): FieldMutations | undefined {\n if (!columns || columns.length === 0) {\n return undefined;\n }\n const mutations = buildFieldMutations(columns);\n return mutations.length > 0 ? mutations : undefined;\n}\n\n/**\n * Applies field mutations to parsed data\n * Mutates the object in place for performance\n *\n * @param data - The parsed JSON object to mutate\n * @param fieldMutations - Pre-built field mutations from buildFieldMutationsFromColumns\n *\n * @example\n * ```typescript\n * const fieldMutations = buildFieldMutationsFromColumns(stream.columnArray);\n * const data = JSON.parse(jsonString);\n * mutateParsedJson(data, fieldMutations);\n * // data now has transformations applied per the field mutations\n * ```\n */\nexport function mutateParsedJson(\n data: any,\n fieldMutations: FieldMutations | undefined,\n): void {\n if (!fieldMutations || !data) {\n return;\n }\n\n applyFieldMutations(data, fieldMutations);\n}\n","import { ClickHouseClient } from \"@clickhouse/client\";\nimport fastq, { queueAsPromised } from \"fastq\";\nimport { cliLog, getClickhouseClient } from \"../commons\";\nimport { Blocks } from \"./helpers\";\nimport fs from \"node:fs\";\nimport path from \"node:path\";\n\nconst walkDir = (dir: string, fileExtension: string, fileList: string[]) => {\n const files = fs.readdirSync(dir);\n\n files.forEach((file) => {\n if (fs.statSync(path.join(dir, file)).isDirectory()) {\n fileList = walkDir(path.join(dir, file), fileExtension, fileList);\n } else if (file.endsWith(fileExtension)) {\n fileList.push(path.join(dir, file));\n }\n });\n\n return fileList;\n};\n\ninterface BlocksQueueTask {\n chClient: ClickHouseClient;\n blocks: Blocks;\n retries: number;\n}\n\ninterface ClickhouseConfig {\n database: string;\n host: string;\n port: string;\n username: string;\n password: string;\n useSSL: boolean;\n}\n\ninterface BlocksConfig {\n blocksDir: string;\n clickhouseConfig: ClickhouseConfig;\n}\n\nclass DependencyError extends Error {\n constructor(message: string) {\n super(message);\n this.name = \"DependencyError\";\n }\n}\n\n// Convert our config to Clickhouse client config\nconst toClientConfig = (config: ClickhouseConfig) => ({\n ...config,\n useSSL: config.useSSL ? \"true\" : \"false\",\n});\n\nconst createBlocks = async (chClient: ClickHouseClient, blocks: Blocks) => {\n for (const query of blocks.setup) {\n try {\n console.log(`Creating block using query ${query}`);\n await chClient.command({\n query,\n clickhouse_settings: {\n wait_end_of_query: 1, // Ensure at least once delivery and DDL acknowledgment\n },\n });\n } catch (err) {\n cliLog({\n action: \"Blocks\",\n message: `Failed to create blocks: ${err}`,\n message_type: \"Error\",\n });\n if (err && JSON.stringify(err).includes(`UNKNOWN_TABLE`)) {\n throw new DependencyError(err.toString());\n }\n }\n }\n};\n\nconst deleteBlocks = async (chClient: ClickHouseClient, blocks: Blocks) => {\n for (const query of blocks.teardown) {\n try {\n console.log(`Deleting block using query ${query}`);\n await chClient.command({\n query,\n clickhouse_settings: {\n wait_end_of_query: 1, // Ensure at least once delivery and DDL acknowledgment\n },\n });\n } catch (err) {\n cliLog({\n action: \"Blocks\",\n message: `Failed to delete blocks: ${err}`,\n message_type: \"Error\",\n });\n }\n }\n};\n\nconst asyncWorker = async (task: BlocksQueueTask) => {\n await deleteBlocks(task.chClient, task.blocks);\n await createBlocks(task.chClient, task.blocks);\n};\n\nexport const runBlocks = async (config: BlocksConfig) => {\n const chClient = getClickhouseClient(toClientConfig(config.clickhouseConfig));\n console.log(`Connected`);\n\n const blocksFiles = walkDir(config.blocksDir, \".ts\", []);\n const numOfBlockFiles = blocksFiles.length;\n console.log(`Found ${numOfBlockFiles} blocks files`);\n\n const queue: queueAsPromised<BlocksQueueTask> = fastq.promise(asyncWorker, 1);\n\n queue.error((err: Error, task: BlocksQueueTask) => {\n if (err && task.retries > 0) {\n if (err instanceof DependencyError) {\n queue.push({ ...task, retries: task.retries - 1 });\n }\n }\n });\n\n for (const path of blocksFiles) {\n console.log(`Adding to queue: ${path}`);\n\n try {\n const blocks = require(path).default as Blocks;\n queue.push({\n chClient,\n blocks,\n retries: numOfBlockFiles,\n });\n } catch (err) {\n cliLog({\n action: \"Blocks\",\n message: `Failed to import blocks from ${path}: ${err}`,\n message_type: \"Error\",\n });\n }\n }\n\n while (!queue.idle()) {\n await new Promise((resolve) => setTimeout(resolve, 1000));\n }\n};\n","import { Readable } from \"node:stream\";\nimport { KafkaJS } from \"@514labs/kafka-javascript\";\nconst { Kafka } = KafkaJS;\n\ntype Consumer = KafkaJS.Consumer;\ntype Producer = KafkaJS.Producer;\n\ntype KafkaMessage = {\n value: Buffer | string | null;\n key?: Buffer | string | null;\n partition?: number;\n offset?: string;\n timestamp?: string;\n headers?: Record<string, Buffer | string | undefined>;\n};\n\ntype SASLOptions = {\n mechanism: \"plain\" | \"scram-sha-256\" | \"scram-sha-512\";\n username: string;\n password: string;\n};\nimport { Buffer } from \"node:buffer\";\nimport * as process from \"node:process\";\nimport * as http from \"node:http\";\nimport {\n cliLog,\n getKafkaClient,\n createProducerConfig,\n Logger,\n logError,\n} from \"../commons\";\nimport { Cluster } from \"../cluster-utils\";\nimport { getStreamingFunctions } from \"../dmv2/internal\";\nimport type { ConsumerConfig, TransformConfig, DeadLetterQueue } from \"../dmv2\";\nimport {\n buildFieldMutationsFromColumns,\n mutateParsedJson,\n type FieldMutations,\n} from \"../utilities/json\";\nimport type { Column } from \"../dataModels/dataModelTypes\";\n\nconst HOSTNAME = process.env.HOSTNAME;\nconst AUTO_COMMIT_INTERVAL_MS = 5000;\nconst PARTITIONS_CONSUMED_CONCURRENTLY = 3;\nconst MAX_RETRIES_CONSUMER = 150;\nconst SESSION_TIMEOUT_CONSUMER = 30000;\nconst HEARTBEAT_INTERVAL_CONSUMER = 3000;\nconst DEFAULT_MAX_STREAMING_CONCURRENCY = 100;\n// Max messages per eachBatch call - Confluent client defaults to 32, increase for throughput\nconst CONSUMER_MAX_BATCH_SIZE = 1000;\n\n/**\n * Data structure for metrics logging containing counts and metadata\n */\ntype MetricsData = {\n count_in: number;\n count_out: number;\n bytes: number;\n function_name: string;\n timestamp: Date;\n};\n\n/**\n * Interface for tracking message processing metrics\n */\ninterface Metrics {\n count_in: number;\n count_out: number;\n bytes: number;\n}\n\n/**\n * Type definition for streaming transformation function\n */\ntype StreamingFunction = (data: unknown) => unknown | Promise<unknown>;\n\n/**\n * Simplified Kafka message type containing only value\n */\ntype KafkaMessageWithLineage = {\n value: string;\n originalValue: object;\n originalMessage: KafkaMessage;\n dlq?: DeadLetterQueue<any>;\n};\n\n/**\n * Configuration interface for Kafka topics including namespace and version support\n */\nexport interface TopicConfig {\n name: string; // Full topic name including namespace if present\n partitions: number;\n retention_ms: number;\n max_message_bytes: number;\n namespace?: string;\n version?: string;\n}\n\n/**\n * Configuration interface for streaming function arguments\n */\nexport interface StreamingFunctionArgs {\n sourceTopic: TopicConfig;\n targetTopic?: TopicConfig;\n functionFilePath: string;\n broker: string; // Comma-separated list of Kafka broker addresses (e.g., \"broker1:9092, broker2:9092\"). Whitespace around commas is automatically trimmed.\n maxSubscriberCount: number;\n isDmv2: boolean;\n saslUsername?: string;\n saslPassword?: string;\n saslMechanism?: string;\n securityProtocol?: string;\n}\n\n/**\n * Maximum number of concurrent streaming operations, configurable via environment\n */\nconst MAX_STREAMING_CONCURRENCY =\n process.env.MAX_STREAMING_CONCURRENCY ?\n parseInt(process.env.MAX_STREAMING_CONCURRENCY, 10)\n : DEFAULT_MAX_STREAMING_CONCURRENCY;\n\n/**\n * Logs metrics data to HTTP endpoint\n */\nexport const metricsLog: (log: MetricsData) => void = (log) => {\n const req = http.request({\n port: parseInt(process.env.MOOSE_MANAGEMENT_PORT ?? \"5001\", 10),\n method: \"POST\",\n path: \"/metrics-logs\",\n });\n\n req.on(\"error\", (err: Error) => {\n console.log(\n `Error ${err.name} sending metrics to management port.`,\n err.message,\n );\n });\n\n req.write(JSON.stringify({ ...log }));\n req.end();\n};\n\n/**\n * Initializes and connects Kafka producer\n */\nconst startProducer = async (\n logger: Logger,\n producer: Producer,\n): Promise<void> => {\n try {\n logger.log(\"Connecting producer...\");\n await producer.connect();\n logger.log(\"Producer is running...\");\n } catch (error) {\n logger.error(\"Failed to connect producer:\");\n if (error instanceof Error) {\n logError(logger, error);\n }\n throw error;\n }\n};\n\n/**\n * Disconnects a Kafka producer and logs the shutdown\n *\n * @param logger - Logger instance for outputting producer status\n * @param producer - KafkaJS Producer instance to disconnect\n * @returns Promise that resolves when producer is disconnected\n * @example\n * ```ts\n * await stopProducer(logger, producer); // Disconnects producer and logs shutdown\n * ```\n */\nconst stopProducer = async (\n logger: Logger,\n producer: Producer,\n): Promise<void> => {\n await producer.disconnect();\n logger.log(\"Producer is shutting down...\");\n};\n\n/**\n * Gracefully stops a Kafka consumer by pausing all partitions and then disconnecting\n *\n * @param logger - Logger instance for outputting consumer status\n * @param consumer - KafkaJS Consumer instance to disconnect\n * @param sourceTopic - Topic configuration containing name and partition count\n * @returns Promise that resolves when consumer is disconnected\n * @example\n * ```ts\n * await stopConsumer(logger, consumer, sourceTopic); // Pauses all partitions and disconnects consumer\n * ```\n */\nconst stopConsumer = async (\n logger: Logger,\n consumer: Consumer,\n sourceTopic: TopicConfig,\n): Promise<void> => {\n try {\n // Try to pause the consumer first if the method exists\n logger.log(\"Pausing consumer...\");\n\n // Generate partition numbers array based on the topic's partition count\n const partitionNumbers = Array.from(\n { length: sourceTopic.partitions },\n (_, i) => i,\n );\n\n await consumer.pause([\n {\n topic: sourceTopic.name,\n partitions: partitionNumbers,\n },\n ]);\n\n logger.log(\"Disconnecting consumer...\");\n await consumer.disconnect();\n logger.log(\"Consumer is shutting down...\");\n } catch (error) {\n logger.error(`Error during consumer shutdown: ${error}`);\n // Continue with disconnect even if pause fails\n try {\n await consumer.disconnect();\n logger.log(\"Consumer disconnected after error\");\n } catch (disconnectError) {\n logger.error(`Failed to disconnect consumer: ${disconnectError}`);\n }\n }\n};\n\n/**\n * Processes a single Kafka message through a streaming function and returns transformed message(s)\n *\n * @param logger - Logger instance for outputting message processing status and errors\n * @param streamingFunctionWithConfigList - functions (with their configs) that transforms input message data\n * @param message - Kafka message to be processed\n * @param producer - Kafka producer for sending dead letter\n * @param fieldMutations - Pre-built field mutations for data transformations\n * @returns Promise resolving to array of transformed messages or undefined if processing fails\n *\n * The function will:\n * 1. Check for null/undefined message values\n * 2. Parse the message value as JSON\n * 3. Apply field mutations (e.g., date parsing) using pre-built configuration\n * 4. Pass parsed data through the streaming function\n * 5. Convert transformed data back to string format\n * 6. Handle both single and array return values\n * 7. Log any processing errors\n */\nconst handleMessage = async (\n logger: Logger,\n // Note: TransformConfig<any> is intentionally generic here as it handles\n // various data model types that are determined at runtime\n streamingFunctionWithConfigList: [StreamingFunction, TransformConfig<any>][],\n message: KafkaMessage,\n producer: Producer,\n fieldMutations?: FieldMutations,\n): Promise<KafkaMessageWithLineage[] | undefined> => {\n if (message.value === undefined || message.value === null) {\n logger.log(`Received message with no value, skipping...`);\n return undefined;\n }\n\n try {\n // Detect Schema Registry JSON envelope: 0x00 + 4-byte schema ID (big-endian) + JSON bytes\n let payloadBuffer = message.value as Buffer;\n if (\n payloadBuffer &&\n payloadBuffer.length >= 5 &&\n payloadBuffer[0] === 0x00\n ) {\n payloadBuffer = payloadBuffer.subarray(5);\n }\n // Parse JSON then apply field mutations using pre-built configuration\n const parsedData = JSON.parse(payloadBuffer.toString());\n mutateParsedJson(parsedData, fieldMutations);\n const transformedData = await Promise.all(\n streamingFunctionWithConfigList.map(async ([fn, config]) => {\n try {\n return await fn(parsedData);\n } catch (e) {\n // Check if there's a deadLetterQueue configured\n const deadLetterQueue = config.deadLetterQueue;\n\n if (deadLetterQueue) {\n // Create a dead letter record\n const deadLetterRecord = {\n originalRecord: {\n ...parsedData,\n // Include original Kafka message metadata\n __sourcePartition: message.partition,\n __sourceOffset: message.offset,\n __sourceTimestamp: message.timestamp,\n },\n errorMessage: e instanceof Error ? e.message : String(e),\n errorType: e instanceof Error ? e.constructor.name : \"Unknown\",\n failedAt: new Date(),\n source: \"transform\",\n };\n\n cliLog({\n action: \"DeadLetter\",\n message: `Sending message to DLQ ${deadLetterQueue.name}: ${e instanceof Error ? e.message : String(e)}`,\n message_type: \"Error\",\n });\n // Send to the DLQ\n try {\n await producer.send({\n topic: deadLetterQueue.name,\n messages: [{ value: JSON.stringify(deadLetterRecord) }],\n });\n } catch (dlqError) {\n logger.error(`Failed to send to dead letter queue: ${dlqError}`);\n }\n } else {\n // No DLQ configured, just log the error\n cliLog({\n action: \"Function\",\n message: `Error processing message (no DLQ configured): ${e instanceof Error ? e.message : String(e)}`,\n message_type: \"Error\",\n });\n }\n\n // rethrow for the outside error handling\n throw e;\n }\n }),\n );\n\n return transformedData\n .map((userFunctionOutput, i) => {\n const [_, config] = streamingFunctionWithConfigList[i];\n if (userFunctionOutput) {\n if (Array.isArray(userFunctionOutput)) {\n // We Promise.all streamingFunctionWithConfigList above.\n // Promise.all always wraps results in an array, even for single transforms.\n // When a transform returns an array (e.g., [msg1, msg2] to emit multiple messages),\n // we get [[msg1, msg2]]. flat() unwraps one level so each item becomes its own message.\n // Without flat(), the entire array would be JSON.stringify'd as a single message.\n return userFunctionOutput\n .flat()\n .filter((item) => item !== undefined && item !== null)\n .map((item) => ({\n value: JSON.stringify(item),\n originalValue: parsedData,\n originalMessage: message,\n dlq: config.deadLetterQueue ?? undefined,\n }));\n } else {\n return [\n {\n value: JSON.stringify(userFunctionOutput),\n originalValue: parsedData,\n originalMessage: message,\n dlq: config.deadLetterQueue ?? undefined,\n },\n ];\n }\n }\n })\n .flat()\n .filter((item) => item !== undefined && item !== null);\n } catch (e) {\n // TODO: Track failure rate\n logger.error(`Failed to transform data`);\n if (e instanceof Error) {\n logError(logger, e);\n }\n }\n\n return undefined;\n};\n\n/**\n * Handles sending failed messages to their configured Dead Letter Queues\n *\n * @param logger - Logger instance for outputting DLQ status\n * @param producer - Kafka producer for sending to DLQ topics\n * @param messages - Array of failed messages with DLQ configuration\n * @param error - The error that caused the failure\n * @returns true if ALL messages were successfully sent to their DLQs, false otherwise\n */\nconst handleDLQForFailedMessages = async (\n logger: Logger,\n producer: Producer,\n messages: KafkaMessageWithLineage[],\n error: unknown,\n): Promise<boolean> => {\n let messagesHandledByDLQ = 0;\n let messagesWithoutDLQ = 0;\n let dlqErrors = 0;\n\n for (const msg of messages) {\n if (msg.dlq && msg.originalValue) {\n const deadLetterRecord = {\n originalRecord: {\n ...msg.originalValue,\n // Include original Kafka message metadata\n __sourcePartition: msg.originalMessage.partition,\n __sourceOffset: msg.originalMessage.offset,\n __sourceTimestamp: msg.originalMessage.timestamp,\n },\n errorMessage: error instanceof Error ? error.message : String(error),\n errorType: error instanceof Error ? error.constructor.name : \"Unknown\",\n failedAt: new Date(),\n source: \"transform\",\n };\n\n cliLog({\n action: \"DeadLetter\",\n message: `Sending failed message to DLQ ${msg.dlq.name}: ${error instanceof Error ? error.message : String(error)}`,\n message_type: \"Error\",\n });\n\n try {\n await producer.send({\n topic: msg.dlq.name,\n messages: [{ value: JSON.stringify(deadLetterRecord) }],\n });\n logger.log(`Sent failed message to DLQ ${msg.dlq.name}`);\n messagesHandledByDLQ++;\n } catch (dlqError) {\n logger.error(`Failed to send to DLQ: ${dlqError}`);\n dlqErrors++;\n }\n } else if (!msg.dlq) {\n messagesWithoutDLQ++;\n logger.warn(`Cannot send to DLQ: no DLQ configured for message`);\n } else {\n messagesWithoutDLQ++;\n logger.warn(`Cannot send to DLQ: original message value not available`);\n }\n }\n\n // Check if ALL messages were successfully handled by DLQ\n const allMessagesHandled =\n messagesHandledByDLQ === messages.length &&\n messagesWithoutDLQ === 0 &&\n dlqErrors === 0;\n\n if (allMessagesHandled) {\n logger.log(\n `All ${messagesHandledByDLQ} failed message(s) sent to DLQ, suppressing original error`,\n );\n } else if (messagesHandledByDLQ > 0) {\n // Log summary of partial DLQ handling\n logger.warn(\n `Partial DLQ success: ${messagesHandledByDLQ}/${messages.length} message(s) sent to DLQ`,\n );\n if (messagesWithoutDLQ > 0) {\n logger.error(\n `Cannot handle batch failure: ${messagesWithoutDLQ} message(s) have no DLQ configured or missing original value`,\n );\n }\n if (dlqErrors > 0) {\n logger.error(`${dlqErrors} message(s) failed to send to DLQ`);\n }\n }\n\n return allMessagesHandled;\n};\n\n/**\n * Sends processed messages to a target Kafka topic\n *\n * @param logger - Logger instance for outputting send status and errors\n * @param metrics - Metrics object for tracking message counts and bytes sent\n * @param targetTopic - Target topic configuration\n * @param producer - Kafka producer instance for sending messages\n * @param messages - Array of processed messages to send (messages carry their own DLQ config)\n * @returns Promise that resolves when all messages are sent\n *\n * The Confluent Kafka library handles batching internally via message.max.bytes\n * and retries transient failures automatically. This function simply sends all\n * messages and handles permanent failures by routing to DLQ.\n */\nconst sendMessages = async (\n logger: Logger,\n metrics: Metrics,\n targetTopic: TopicConfig,\n producer: Producer,\n messages: KafkaMessageWithLineage[],\n): Promise<void> => {\n if (messages.length === 0) return;\n\n try {\n // Library handles batching and retries internally\n await producer.send({\n topic: targetTopic.name,\n messages: messages,\n });\n\n // Track metrics only after successful send to target topic\n // Messages routed to DLQ should NOT be counted as successful sends\n for (const msg of messages) {\n metrics.bytes += Buffer.byteLength(msg.value, \"utf8\");\n }\n metrics.count_out += messages.length;\n\n logger.log(`Sent ${messages.length} messages to ${targetTopic.name}`);\n } catch (e) {\n // Library already retried - this is a permanent failure\n logger.error(`Failed to send transformed data`);\n if (e instanceof Error) {\n logError(logger, e);\n }\n\n // Handle DLQ for failed messages\n // Only throw if not all messages were successfully routed to DLQ\n const allHandledByDLQ = await handleDLQForFailedMessages(\n logger,\n producer,\n messages,\n e,\n );\n if (!allHandledByDLQ) {\n throw e;\n }\n }\n};\n\n/**\n * Periodically sends metrics about message processing to a metrics logging endpoint.\n * Resets metrics counters after each send. Runs every second via setTimeout.\n *\n * @param logger - Logger instance containing the function name prefix\n * @param metrics - Metrics object tracking message counts and bytes processed\n * @example\n * ```ts\n * const metrics = { count_in: 10, count_out: 8, bytes: 1024 };\n * sendMessageMetrics(logger, metrics); // Sends metrics and resets counters\n * ```\n */\nconst sendMessageMetrics = (logger: Logger, metrics: Metrics) => {\n if (metrics.count_in > 0 || metrics.count_out > 0 || metrics.bytes > 0) {\n metricsLog({\n count_in: metrics.count_in,\n count_out: metrics.count_out,\n function_name: logger.logPrefix,\n bytes: metrics.bytes,\n timestamp: new Date(),\n });\n }\n metrics.count_in = 0;\n metrics.bytes = 0;\n metrics.count_out = 0;\n setTimeout(() => sendMessageMetrics(logger, metrics), 1000);\n};\n\n/**\n * Dynamically loads a streaming function from a file path\n *\n * @param args - The streaming function arguments containing the function file path\n * @returns The default export of the streaming function module\n * @throws Will throw and log an error if the function file cannot be loaded\n * @example\n * ```ts\n * const fn = loadStreamingFunction({functionFilePath: './transform.js'});\n * const result = await fn(data);\n * ```\n */\nfunction loadStreamingFunction(functionFilePath: string) {\n let streamingFunctionImport: { default: StreamingFunction };\n try {\n streamingFunctionImport = require(\n functionFilePath.substring(0, functionFilePath.length - 3),\n );\n } catch (e) {\n cliLog({ action: \"Function\", message: `${e}`, message_type: \"Error\" });\n throw e;\n }\n return streamingFunctionImport.default;\n}\n\nasync function loadStreamingFunctionV2(\n sourceTopic: TopicConfig,\n targetTopic?: TopicConfig,\n): Promise<{\n functions: [StreamingFunction, TransformConfig<any> | ConsumerConfig<any>][];\n fieldMutations: FieldMutations | undefined;\n}> {\n const transformFunctions = await getStreamingFunctions();\n const transformFunctionKey = `${topicNameToStreamName(sourceTopic)}_${targetTopic ? topicNameToStreamName(targetTopic) : \"<no-target>\"}`;\n\n const matchingEntries = Array.from(transformFunctions.entries()).filter(\n ([key]) => key.startsWith(transformFunctionKey),\n );\n\n if (matchingEntries.length === 0) {\n const message = `No functions found for ${transformFunctionKey}`;\n cliLog({\n action: \"Function\",\n message: `${message}`,\n message_type: \"Error\",\n });\n throw new Error(message);\n }\n\n // Extract functions and configs, and get columns from the first entry\n // (all functions for the same source topic will have the same columns)\n const functions = matchingEntries.map(([_, [fn, config]]) => [\n fn,\n config,\n ]) as [StreamingFunction, TransformConfig<any> | ConsumerConfig<any>][];\n const [_key, firstEntry] = matchingEntries[0];\n const sourceColumns = firstEntry[2];\n\n // Pre-build field mutations once for all messages\n const fieldMutations = buildFieldMutationsFromColumns(sourceColumns);\n\n return { functions, fieldMutations };\n}\n\n/**\n * Initializes and starts a Kafka consumer that processes messages using a streaming function\n *\n * @param logger - Logger instance for outputting consumer status and errors\n * @param metrics - Metrics object for tracking message counts and bytes processed\n * @param parallelism - Number of parallel workers processing messages\n * @param args - Configuration arguments for source/target topics and streaming function\n * @param consumer - KafkaJS Consumer instance\n * @param producer - KafkaJS Producer instance for sending processed messages\n * @param streamingFuncId - Unique identifier for this consumer group\n * @param maxMessageSize - Maximum message size in bytes allowed by Kafka broker\n * @returns Promise that resolves when consumer is started\n *\n * The consumer will:\n * 1. Connect to Kafka\n * 2. Subscribe to the source topic\n * 3. Process messages in batches using the streaming function\n * 4. Send processed messages to target topic (if configured)\n * 5. Commit offsets after successful processing\n */\nconst startConsumer = async (\n args: StreamingFunctionArgs,\n logger: Logger,\n metrics: Metrics,\n _parallelism: number,\n consumer: Consumer,\n producer: Producer,\n streamingFuncId: string,\n): Promise<void> => {\n // Validate topic configurations\n validateTopicConfig(args.sourceTopic);\n if (args.targetTopic) {\n validateTopicConfig(args.targetTopic);\n }\n\n try {\n logger.log(\"Connecting consumer...\");\n await consumer.connect();\n logger.log(\"Consumer connected successfully\");\n } catch (error) {\n logger.error(\"Failed to connect consumer:\");\n if (error instanceof Error) {\n logError(logger, error);\n }\n throw error;\n }\n\n logger.log(\n `Starting consumer group '${streamingFuncId}' with source topic: ${args.sourceTopic.name} and target topic: ${args.targetTopic?.name || \"none\"}`,\n );\n\n // We preload the function to not have to load it for each message\n // Note: Config types use 'any' as generics because they handle various\n // data model types determined at runtime, not compile time\n let streamingFunctions: [\n StreamingFunction,\n TransformConfig<any> | ConsumerConfig<any>,\n ][];\n let fieldMutations: FieldMutations | undefined;\n\n if (args.isDmv2) {\n const result = await loadStreamingFunctionV2(\n args.sourceTopic,\n args.targetTopic,\n );\n streamingFunctions = result.functions;\n fieldMutations = result.fieldMutations;\n } else {\n streamingFunctions = [[loadStreamingFunction(args.functionFilePath), {}]];\n fieldMutations = undefined;\n }\n\n await consumer.subscribe({\n topics: [args.sourceTopic.name], // Use full topic name for Kafka operations\n });\n\n await consumer.run({\n eachBatchAutoResolve: true,\n // Enable parallel processing of partitions\n partitionsConsumedConcurrently: PARTITIONS_CONSUMED_CONCURRENTLY, // To be adjusted\n eachBatch: async ({ batch, heartbeat, isRunning, isStale }) => {\n if (!isRunning() || isStale()) {\n return;\n }\n\n metrics.count_in += batch.messages.length;\n\n cliLog({\n action: \"Received\",\n message: `${logger.logPrefix} ${batch.messages.length} message(s)`,\n });\n logger.log(`Received ${batch.messages.length} message(s)`);\n\n let index = 0;\n const readableStream = Readable.from(batch.messages);\n\n const processedMessages: (KafkaMessageWithLineage[] | undefined)[] =\n await readableStream\n .map(\n async (message) => {\n index++;\n if (\n (batch.messages.length > DEFAULT_MAX_STREAMING_CONCURRENCY &&\n index % DEFAULT_MAX_STREAMING_CONCURRENCY) ||\n index - 1 === batch.messages.length\n ) {\n await heartbeat();\n }\n return handleMessage(\n logger,\n streamingFunctions,\n message,\n producer,\n fieldMutations,\n );\n },\n {\n concurrency: MAX_STREAMING_CONCURRENCY,\n },\n )\n .toArray();\n\n const filteredMessages = processedMessages\n .flat()\n .filter((msg) => msg !== undefined && msg.value !== undefined);\n\n if (args.targetTopic === undefined || processedMessages.length === 0) {\n return;\n }\n\n await heartbeat();\n\n if (filteredMessages.length > 0) {\n // Messages now carry their own DLQ configuration in the lineage\n await sendMessages(\n logger,\n metrics,\n args.targetTopic,\n producer,\n filteredMessages as KafkaMessageWithLineage[],\n );\n }\n },\n });\n\n logger.log(\"Consumer is running...\");\n};\n\n/**\n * Creates a Logger instance that prefixes all log messages with the source and target topic\n *\n * @param args - The streaming function arguments containing source and target topics\n * @returns A Logger instance with standard log, error and warn methods\n * @example\n * ```ts\n * const logger = buildLogger({sourceTopic: 'source', targetTopic: 'target'});\n * logger.log('message'); // Outputs: \"source -> target: message\"\n * ```\n */\nconst buildLogger = (args: StreamingFunctionArgs, workerId: number): Logger => {\n const targetLabel =\n args.targetTopic?.name ? ` -> ${args.targetTopic.name}` : \" (consumer)\";\n const logPrefix = `${args.sourceTopic.name}${targetLabel} (worker ${workerId})`;\n return {\n logPrefix: logPrefix,\n log: (message: string): void => {\n console.log(`${logPrefix}: ${message}`);\n },\n error: (message: string): void => {\n console.error(`${logPrefix}: ${message}`);\n },\n warn: (message: string): void => {\n console.warn(`${logPrefix}: ${message}`);\n },\n };\n};\n\n/**\n * Formats a version string into a topic suffix format by replacing dots with underscores\n * Example: \"1.2.3\" -> \"_1_2_3\"\n */\nexport function formatVersionSuffix(version: string): string {\n return `_${version.replace(/\\./g, \"_\")}`;\n}\n\n/**\n * Transforms a topic name by removing namespace prefix and version suffix\n * to get the base stream name for function mapping\n */\nexport function topicNameToStreamName(config: TopicConfig): string {\n let name = config.name;\n\n // Handle version suffix if present\n if (config.version) {\n const versionSuffix = formatVersionSuffix(config.version);\n if (name.endsWith(versionSuffix)) {\n name = name.slice(0, -versionSuffix.length);\n } else {\n throw new Error(\n `Version suffix ${versionSuffix} not found in topic name ${name}`,\n );\n }\n }\n\n // Handle namespace prefix if present\n if (config.namespace && config.namespace !== \"\") {\n const prefix = `${config.namespace}.`;\n if (name.startsWith(prefix)) {\n name = name.slice(prefix.length);\n } else {\n throw new Error(\n `Namespace prefix ${prefix} not found in topic name ${name}`,\n );\n }\n }\n\n return name;\n}\n\n/**\n * Validates a topic configuration for proper namespace and version formatting\n */\nexport function validateTopicConfig(config: TopicConfig): void {\n if (config.namespace && !config.name.startsWith(`${config.namespace}.`)) {\n throw new Error(\n `Topic name ${config.name} must start with namespace ${config.namespace}`,\n );\n }\n\n if (config.version) {\n const versionSuffix = formatVersionSuffix(config.version);\n if (!config.name.endsWith(versionSuffix)) {\n throw new Error(\n `Topic name ${config.name} must end with version ${config.version}`,\n );\n }\n }\n}\n\n/**\n * Initializes and runs a clustered streaming function system that processes messages from Kafka\n *\n * This function:\n * 1. Creates a cluster of workers to handle Kafka message processing\n * 2. Sets up Kafka producers and consumers for each worker\n * 3. Configures logging and metrics collection\n * 4. Handles graceful shutdown on termination\n *\n * The system supports:\n * - Multiple workers processing messages in parallel\n * - Dynamic CPU usage control via maxCpuUsageRatio\n * - SASL authentication for Kafka\n * - Metrics tracking for message counts and bytes processed\n * - Graceful shutdown of Kafka connections\n *\n * @returns Promise that resolves when the cluster is started\n * @throws Will log errors if Kafka connections fail\n *\n * @example\n * ```ts\n * await runStreamingFunctions({\n * sourceTopic: { name: 'source', partitions: 3, retentionPeriod: 86400, maxMessageBytes: 1048576 },\n * targetTopic: { name: 'target', partitions: 3, retentionPeriod: 86400, maxMessageBytes: 1048576 },\n * functionFilePath: './transform.js',\n * broker: 'localhost:9092',\n * maxSubscriberCount: 3,\n * isDmv2: false\n * }); // Starts the streaming function cluster\n * ```\n */\nexport const runStreamingFunctions = async (\n args: StreamingFunctionArgs,\n): Promise<void> => {\n // Validate topic configurations at startup\n validateTopicConfig(args.sourceTopic);\n if (args.targetTopic) {\n validateTopicConfig(args.targetTopic);\n }\n\n // Use base stream names (without namespace/version) for function ID\n // We use flow- instead of function- because that's what the ACLs in boreal are linked with\n // When migrating - make sure the ACLs are updated to use the new prefix.\n const streamingFuncId = `flow-${args.sourceTopic.name}-${args.targetTopic?.name || \"\"}`;\n\n const cluster = new Cluster({\n maxCpuUsageRatio: 0.5,\n maxWorkerCount: args.maxSubscriberCount,\n workerStart: async (worker, parallelism) => {\n const logger = buildLogger(args, worker.id);\n\n const metrics = {\n count_in: 0,\n count_out: 0,\n bytes: 0,\n };\n\n setTimeout(() => sendMessageMetrics(logger, metrics), 1000);\n\n const clientIdPrefix = HOSTNAME ? `${HOSTNAME}-` : \"\";\n const processId = `${clientIdPrefix}${streamingFuncId}-ts-${worker.id}`;\n\n const kafka = await getKafkaClient(\n {\n clientId: processId,\n broker: args.broker,\n securityProtocol: args.securityProtocol,\n saslUsername: args.saslUsername,\n saslPassword: args.saslPassword,\n saslMechanism: args.saslMechanism,\n },\n logger,\n );\n\n // Note: \"js.consumer.max.batch.size\" is a librdkafka native config not in TS types\n const consumer: Consumer = kafka.consumer({\n kafkaJS: {\n groupId: streamingFuncId,\n sessionTimeout: SESSION_TIMEOUT_CONSUMER,\n heartbeatInterval: HEARTBEAT_INTERVAL_CONSUMER,\n retry: {\n retries: MAX_RETRIES_CONSUMER,\n },\n autoCommit: true,\n autoCommitInterval: AUTO_COMMIT_INTERVAL_MS,\n fromBeginning: true,\n },\n \"js.consumer.max.batch.size\": CONSUMER_MAX_BATCH_SIZE,\n });\n\n // Sync producer message.max.bytes with topic config\n const maxMessageBytes =\n args.targetTopic?.max_message_bytes || 1024 * 1024;\n\n const producer: Producer = kafka.producer(\n createProducerConfig(maxMessageBytes),\n );\n\n try {\n logger.log(\"Starting producer...\");\n await startProducer(logger, producer);\n\n try {\n logger.log(\"Starting consumer...\");\n await startConsumer(\n args,\n logger,\n metrics,\n parallelism,\n consumer,\n producer,\n streamingFuncId,\n );\n } catch (e) {\n logger.error(\"Failed to start kafka consumer: \");\n if (e instanceof Error) {\n logError(logger, e);\n }\n // Re-throw to ensure proper error handling\n throw e;\n }\n } catch (e) {\n logger.error(\"Failed to start kafka producer: \");\n if (e instanceof Error) {\n logError(logger, e);\n }\n // Re-throw to ensure proper error handling\n throw e;\n }\n\n return [logger, producer, consumer] as [Logger, Producer, Consumer];\n },\n workerStop: async ([logger, producer, consumer]) => {\n logger.log(`Received SIGTERM, shutting down gracefully...`);\n\n // First stop the consumer to prevent new messages\n logger.log(\"Stopping consumer first...\");\n await stopConsumer(logger, consumer, args.sourceTopic);\n\n // Wait a bit for in-flight messages to complete processing\n logger.log(\"Waiting for in-flight messages to complete...\");\n await new Promise((resolve) => setTimeout(resolve, 2000));\n\n // Then stop the producer\n logger.log(\"Stopping producer...\");\n await stopProducer(logger, producer);\n\n logger.log(\"Graceful shutdown completed\");\n },\n });\n\n cluster.start();\n};\n","export async function runExportSerializer(targetModel: string) {\n const exports_list = require(targetModel);\n console.log(JSON.stringify(exports_list));\n}\n","import process from \"process\";\n\n/**\n * Gets the source directory from environment variable or defaults to \"app\"\n */\nfunction getSourceDir(): string {\n return process.env.MOOSE_SOURCE_DIR || \"app\";\n}\n\nexport async function runApiTypeSerializer(targetModel: string) {\n const func = require(\n `${process.cwd()}/${getSourceDir()}/apis/${targetModel}.ts`,\n ).default;\n const inputSchema = func[\"moose_input_schema\"] || null;\n const outputSchema = func[\"moose_output_schema\"] || null;\n console.log(\n JSON.stringify({\n inputSchema,\n outputSchema,\n }),\n );\n}\n","import {\n DefaultLogger,\n NativeConnection,\n NativeConnectionOptions,\n Worker,\n bundleWorkflowCode,\n} from \"@temporalio/worker\";\nimport * as path from \"path\";\nimport * as fs from \"fs\";\nimport { Workflow } from \"../dmv2\";\nimport { getWorkflows } from \"../dmv2/internal\";\nimport { createActivityForScript } from \"./activity\";\nimport { activities } from \"./activity\";\nimport { initializeLogger } from \"./logger\";\n\ninterface TemporalConfig {\n url: string;\n namespace: string;\n clientCert?: string;\n clientKey?: string;\n apiKey?: string;\n}\n\ninterface ScriptsConfig {\n temporalConfig: TemporalConfig;\n}\n\n// Maintain a global set of activity names we've already registered\nconst ALREADY_REGISTERED = new Set<string>();\n\nfunction collectActivitiesDmv2(\n logger: DefaultLogger,\n workflows: Map<string, Workflow>,\n) {\n logger.info(`<DMV2WF> Collecting tasks from dmv2 workflows`);\n const scriptNames: string[] = [];\n for (const [name, workflow] of workflows.entries()) {\n logger.info(\n `<DMV2WF> Registering dmv2 workflow: ${name} with starting task: ${workflow.config.startingTask.name}`,\n );\n scriptNames.push(`${name}/${workflow.config.startingTask.name}`);\n }\n return scriptNames;\n}\n\n/**\n * This looks similar to the client in apis.\n * Temporal SDK uses similar looking connection options & client,\n * but there are different libraries for a worker like this & a client\n * like in the apis.\n */\nasync function createTemporalConnection(\n logger: DefaultLogger,\n temporalConfig: TemporalConfig,\n): Promise<NativeConnection> {\n logger.info(\n `<workflow> Using temporal_url: ${temporalConfig.url} and namespace: ${temporalConfig.namespace}`,\n );\n\n let connectionOptions: NativeConnectionOptions = {\n address: temporalConfig.url,\n };\n\n if (temporalConfig.clientCert && temporalConfig.clientKey) {\n logger.info(\"Using TLS for secure Temporal\");\n const cert = await fs.readFileSync(temporalConfig.clientCert);\n const key = await fs.readFileSync(temporalConfig.clientKey);\n\n connectionOptions.tls = {\n clientCertPair: {\n crt: cert,\n key: key,\n },\n };\n } else if (temporalConfig.apiKey) {\n logger.info(`Using API key for secure Temporal`);\n // URL with API key uses gRPC regional endpoint\n connectionOptions.address = \"us-west1.gcp.api.temporal.io:7233\";\n connectionOptions.apiKey = temporalConfig.apiKey;\n connectionOptions.tls = {};\n connectionOptions.metadata = {\n \"temporal-namespace\": temporalConfig.namespace,\n };\n }\n\n logger.info(\n `<workflow> Connecting to Temporal at ${connectionOptions.address}`,\n );\n\n const maxRetries = 5;\n const baseDelay = 1000;\n let attempt = 0;\n\n while (true) {\n try {\n const connection = await NativeConnection.connect(connectionOptions);\n logger.info(\"<workflow> Connected to Temporal server\");\n return connection;\n } catch (err) {\n attempt++;\n logger.error(`<workflow> Connection attempt ${attempt} failed: ${err}`);\n\n if (attempt >= maxRetries) {\n logger.error(`Failed to connect after ${attempt} attempts`);\n throw err;\n }\n\n const backoff = baseDelay * Math.pow(2, attempt - 1);\n logger.warn(`<workflow> Retrying connection in ${backoff}ms...`);\n await new Promise((resolve) => setTimeout(resolve, backoff));\n }\n }\n}\n\nasync function registerWorkflows(\n logger: DefaultLogger,\n config: ScriptsConfig,\n): Promise<Worker | null> {\n logger.info(`Registering workflows`);\n\n // Collect all TypeScript activities from registered workflows\n const allScriptPaths: string[] = [];\n const dynamicActivities: any[] = [];\n\n try {\n const workflows = await getWorkflows();\n if (workflows.size > 0) {\n logger.info(`<DMV2WF> Found ${workflows.size} dmv2 workflows`);\n allScriptPaths.push(...collectActivitiesDmv2(logger, workflows));\n\n if (allScriptPaths.length === 0) {\n logger.info(`<DMV2WF> No tasks found in dmv2 workflows`);\n return null;\n }\n\n logger.info(\n `<DMV2WF> Found ${allScriptPaths.length} tasks in dmv2 workflows`,\n );\n\n for (const activityName of allScriptPaths) {\n if (!ALREADY_REGISTERED.has(activityName)) {\n const activity = await createActivityForScript(activityName);\n dynamicActivities.push(activity);\n ALREADY_REGISTERED.add(activityName);\n logger.info(`<DMV2WF> Registered task ${activityName}`);\n }\n }\n\n if (dynamicActivities.length === 0) {\n logger.info(`<DMV2WF> No dynamic activities found in dmv2 workflows`);\n return null;\n }\n\n logger.info(\n `<DMV2WF> Found ${dynamicActivities.length} dynamic activities in dmv2 workflows`,\n );\n }\n\n if (allScriptPaths.length === 0) {\n logger.info(`No workflows found`);\n return null;\n }\n\n logger.info(`Found ${allScriptPaths.length} workflows`);\n\n if (dynamicActivities.length === 0) {\n logger.info(`No tasks found`);\n return null;\n }\n\n logger.info(`Found ${dynamicActivities.length} task(s)`);\n\n const connection = await createTemporalConnection(\n logger,\n config.temporalConfig,\n );\n\n // Create a custom logger that suppresses webpack output\n const silentLogger = {\n info: () => {}, // Suppress info logs (webpack output)\n debug: () => {}, // Suppress debug logs\n warn: () => {}, // Suppress warnings if desired\n log: () => {}, // Suppress general logs\n trace: () => {}, // Suppress trace logs\n error: (message: string, meta?: any) => {\n // Keep error logs but forward to the main logger\n logger.error(message, meta);\n },\n };\n\n // Pre-bundle workflows with silent logger to suppress webpack output\n // https://github.com/temporalio/sdk-typescript/issues/1740\n const workflowBundle = await bundleWorkflowCode({\n workflowsPath: path.resolve(__dirname, \"scripts/workflow.js\"),\n logger: silentLogger,\n });\n\n const worker = await Worker.create({\n connection,\n namespace: config.temporalConfig.namespace,\n taskQueue: \"typescript-script-queue\",\n workflowBundle,\n activities: {\n ...activities,\n ...Object.fromEntries(\n dynamicActivities.map((activity) => [\n Object.keys(activity)[0],\n Object.values(activity)[0],\n ]),\n ),\n },\n });\n\n return worker;\n } catch (error) {\n logger.error(`Error registering workflows: ${error}`);\n throw error;\n }\n}\n\n/**\n * Start a Temporal worker that handles TypeScript script execution workflows.\n */\nexport async function runScripts(\n config: ScriptsConfig,\n): Promise<Worker | null> {\n const logger = initializeLogger();\n\n // Add process-level uncaught exception handler\n process.on(\"uncaughtException\", (error) => {\n console.error(`[PROCESS] Uncaught Exception: ${error}`);\n process.exit(1);\n });\n\n const worker = await registerWorkflows(logger, config);\n\n if (!worker) {\n logger.warn(\n `No workflows found. To disable workflow infrastructure, set workflows=false in moose.config.toml`,\n );\n process.exit(0);\n }\n\n let isShuttingDown = false;\n\n // Handle shutdown signals\n async function handleSignal(signal: string) {\n console.log(`[SHUTDOWN] Received ${signal}`);\n\n if (isShuttingDown) {\n return;\n }\n\n isShuttingDown = true;\n\n try {\n if (!worker) {\n process.exit(0);\n }\n await Promise.race([\n worker.shutdown(),\n new Promise((_, reject) =>\n setTimeout(() => reject(new Error(\"Shutdown timeout\")), 3000),\n ),\n ]);\n process.exit(0);\n } catch (error) {\n console.log(`[SHUTDOWN] Error: ${error}`);\n process.exit(1);\n }\n }\n\n // Register signal handlers immediately\n [\"SIGTERM\", \"SIGINT\", \"SIGHUP\", \"SIGQUIT\"].forEach((signal) => {\n process.on(signal, () => {\n handleSignal(signal).catch((error) => {\n console.log(`[SHUTDOWN] Error: ${error}`);\n process.exit(1);\n });\n });\n });\n\n logger.info(\"Starting TypeScript worker...\");\n try {\n await worker.run();\n } catch (error) {\n console.log(`[SHUTDOWN] Error: ${error}`);\n process.exit(1);\n }\n\n return worker;\n}\n","import { log as logger, Context } from \"@temporalio/activity\";\nimport { isCancellation } from \"@temporalio/workflow\";\nimport { Task, Workflow } from \"../dmv2\";\nimport { getWorkflows, getTaskForWorkflow } from \"../dmv2/internal\";\nimport { jsonDateReviver } from \"../utilities/json\";\n\nexport interface ScriptExecutionInput {\n scriptPath: string;\n inputData?: any;\n}\n\nexport const activities = {\n async hasDmv2Workflow(name: string): Promise<boolean> {\n try {\n const workflows = await getWorkflows();\n const hasWorkflow = workflows.has(name);\n logger.info(`Found workflow:: ${hasWorkflow}`);\n return hasWorkflow;\n } catch (error) {\n logger.error(`Failed to check if workflow ${name} exists: ${error}`);\n return false;\n }\n },\n\n async getDmv2Workflow(name: string): Promise<Workflow> {\n try {\n logger.info(`Getting workflow ${name}`);\n\n const workflows = await getWorkflows();\n\n if (workflows.has(name)) {\n logger.info(`Workflow ${name} found`);\n return workflows.get(name)!;\n } else {\n const errorData = {\n error: \"Workflow not found\",\n details: `Workflow ${name} not found`,\n stack: undefined,\n };\n const errorMsg = JSON.stringify(errorData);\n logger.error(errorMsg);\n throw new Error(errorMsg);\n }\n } catch (error) {\n const errorData = {\n error: \"Failed to get workflow\",\n details: error instanceof Error ? error.message : String(error),\n stack: error instanceof Error ? error.stack : undefined,\n };\n const errorMsg = JSON.stringify(errorData);\n logger.error(errorMsg);\n throw new Error(errorMsg);\n }\n },\n\n async getTaskForWorkflow(\n workflowName: string,\n taskName: string,\n ): Promise<Task<any, any>> {\n try {\n logger.info(`Getting task ${taskName} from workflow ${workflowName}`);\n const task = await getTaskForWorkflow(workflowName, taskName);\n logger.info(`Task ${taskName} found in workflow ${workflowName}`);\n return task;\n } catch (error) {\n const errorData = {\n error: \"Failed to get task\",\n details: error instanceof Error ? error.message : String(error),\n stack: error instanceof Error ? error.stack : undefined,\n };\n const errorMsg = JSON.stringify(errorData);\n logger.error(errorMsg);\n throw new Error(errorMsg);\n }\n },\n\n async executeDmv2Task(\n workflow: Workflow,\n task: Task<any, any>,\n inputData: any,\n ): Promise<any[]> {\n // Get context for heartbeat (required for cancellation detection)\n const context = Context.current();\n const taskState = {};\n\n // Periodic heartbeat is required for cancellation detection\n // https://docs.temporal.io/develop/typescript/cancellation#cancel-an-activity\n // - Temporal activities can only receive cancellation if they send heartbeats\n // - Heartbeats are the communication channel between activity and Temporal server\n // - Server sends cancellation signals back in heartbeat responses\n // - Without heartbeats, context.cancelled will never resolve and cancellation is impossible\n let heartbeatInterval: NodeJS.Timeout | null = null;\n const startPeriodicHeartbeat = () => {\n heartbeatInterval = setInterval(() => {\n context.heartbeat(`Task ${task.name} in progress`);\n }, 5000);\n };\n const stopPeriodicHeartbeat = () => {\n if (heartbeatInterval) {\n clearInterval(heartbeatInterval);\n heartbeatInterval = null;\n }\n };\n\n try {\n logger.info(\n `Task ${task.name} received input: ${JSON.stringify(inputData)}`,\n );\n\n // Send initial heartbeat to enable cancellation detection\n context.heartbeat(`Starting task: ${task.name}`);\n\n // Data between temporal workflow & activities are serialized so we\n // have to get it again to access the user's run function\n const fullTask = await getTaskForWorkflow(workflow.name, task.name);\n\n // Revive any JSON serialized dates in the input data\n const revivedInputData =\n inputData ?\n JSON.parse(JSON.stringify(inputData), jsonDateReviver)\n : inputData;\n\n try {\n startPeriodicHeartbeat();\n\n // Race user code against cancellation detection\n // - context.cancelled Promise rejects when server signals cancellation via heartbeat response\n // - This allows immediate cancellation detection rather than waiting for user code to finish\n // - If cancellation happens first, we catch it below and call onCancel cleanup\n const result = await Promise.race([\n fullTask.config.run({ state: taskState, input: revivedInputData }),\n context.cancelled,\n ]);\n return result;\n } catch (error) {\n if (isCancellation(error)) {\n logger.info(\n `Task ${task.name} cancelled, calling onCancel handler if it exists`,\n );\n if (fullTask.config.onCancel) {\n await fullTask.config.onCancel({\n state: taskState,\n input: revivedInputData,\n });\n }\n return [];\n } else {\n throw error;\n }\n } finally {\n stopPeriodicHeartbeat();\n }\n } catch (error) {\n const errorData = {\n error: \"Task execution failed\",\n details: error instanceof Error ? error.message : String(error),\n stack: error instanceof Error ? error.stack : undefined,\n };\n const errorMsg = JSON.stringify(errorData);\n logger.error(errorMsg);\n throw new Error(errorMsg);\n }\n },\n};\n\n// Helper function to create activity for a specific script\nexport function createActivityForScript(scriptName: string) {\n return {\n [scriptName]: activities.executeDmv2Task,\n };\n}\n","import {\n makeTelemetryFilterString,\n DefaultLogger,\n Runtime,\n} from \"@temporalio/worker\";\n\nclass LoggerSingleton {\n private static instance: DefaultLogger | null = null;\n\n private constructor() {}\n\n public static initializeLogger(): DefaultLogger {\n if (!LoggerSingleton.instance) {\n LoggerSingleton.instance = new DefaultLogger(\n \"DEBUG\",\n ({ level, message }) => {\n console.log(`${level} | ${message}`);\n },\n );\n\n Runtime.install({\n logger: LoggerSingleton.instance,\n telemetryOptions: {\n logging: {\n filter: makeTelemetryFilterString({ core: \"INFO\", other: \"INFO\" }),\n forward: {},\n },\n },\n });\n }\n\n return LoggerSingleton.instance;\n }\n\n public static getInstance(): DefaultLogger {\n return LoggerSingleton.instance!;\n }\n}\n\nexport const initializeLogger = LoggerSingleton.initializeLogger;\n"],"mappings":";;;;;;;;;;;;;AAAA,OAAO,UAAU;AACjB,SAAS,oBAAoB;AAC7B,SAAS,eAAe;AAexB,SAAS,SAAS,OAAoC;AACpD,MAAI,CAAC,MAAO,QAAO;AACnB,UAAQ,MAAM,KAAK,EAAE,YAAY,GAAG;AAAA,IAClC,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,IACT;AACE,aAAO;AAAA,EACX;AACF;AAmGO,SAAS,qBAAqB,iBAA0B;AAC7D,SAAO;AAAA,IACL,SAAS;AAAA,MACP,YAAY;AAAA;AAAA,MACZ,MAAM;AAAA,MACN,OAAO;AAAA,QACL,SAAS;AAAA,QACT,cAAc;AAAA,MAChB;AAAA,IACF;AAAA,IACA,aAAa;AAAA;AAAA,IACb,GAAI,mBAAmB,EAAE,qBAAqB,gBAAgB;AAAA,EAChE;AACF;AA5IA,IAIQ,OA0BK,aA2BA,qBA4BA,QA0BA,aACA,mBACA,uBAEA,sBAGA,MA+BP,mBA6CO,UAWP,iBAwBO;AArOb;AAAA;AAAA;AAIA,KAAM,EAAE,UAAU;AA0BX,IAAM,cAAc,CAAC,YAAoB;AAC9C,UAAI,CAAC,SAAS,QAAQ,IAAI,2BAA2B,GAAG;AACtD,gBAAQ,IAAI,OAAO;AAAA,MACrB;AAAA,IACF;AAuBO,IAAM,sBAAsB,CAAC;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,MAAoB;AAClB,YAAM,WACJ,WAAW,OAAO,OAAO,YAAY,MAAM,SAAS,UAAU;AAChE,cAAQ,IAAI,+BAA+B,QAAQ,MAAM,IAAI,IAAI,IAAI,EAAE;AACvE,aAAO,aAAa;AAAA,QAClB,KAAK,GAAG,QAAQ,MAAM,IAAI,IAAI,IAAI;AAAA,QAClC;AAAA,QACA;AAAA,QACA;AAAA,QACA,aAAa;AAAA;AAAA;AAAA,MAGf,CAAC;AAAA,IACH;AAQO,IAAM,SAAoC,CAAC,QAAQ;AACxD,YAAM,MAAM,KAAK,QAAQ;AAAA,QACvB,MAAM,SAAS,QAAQ,IAAI,yBAAyB,MAAM;AAAA,QAC1D,QAAQ;AAAA,QACR,MAAM;AAAA,MACR,CAAC;AAED,UAAI,GAAG,SAAS,CAAC,QAAe;AAC9B,gBAAQ,IAAI,SAAS,IAAI,IAAI,qBAAqB,IAAI,OAAO;AAAA,MAC/D,CAAC;AAED,UAAI,MAAM,KAAK,UAAU,EAAE,cAAc,QAAQ,GAAG,IAAI,CAAC,CAAC;AAC1D,UAAI,IAAI;AAAA,IACV;AAaO,IAAM,cAAc;AACpB,IAAM,oBAAoB;AAC1B,IAAM,wBAAwB;AAE9B,IAAM,uBAAuB;AAG7B,IAAM,OAAO;AA+BpB,IAAM,oBAAoB,CAAC,iBACzB,aACG,MAAM,GAAG,EACT,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC,EACnB,OAAO,CAAC,MAAM,EAAE,SAAS,CAAC;AAyCxB,IAAM,WAAW,CAACA,SAAgB,MAAmB;AAC1D,MAAAA,QAAO,MAAM,EAAE,OAAO;AACtB,YAAM,QAAQ,EAAE;AAChB,UAAI,OAAO;AACT,QAAAA,QAAO,MAAM,KAAK;AAAA,MACpB;AAAA,IACF;AAKA,IAAM,kBAAkB,CACtBA,SACA,SAC4B;AAC5B,YAAM,YAAY,KAAK,gBAAgB,KAAK,cAAc,YAAY,IAAI;AAC1E,cAAQ,WAAW;AAAA,QACjB,KAAK;AAAA,QACL,KAAK;AAAA,QACL,KAAK;AACH,iBAAO;AAAA,YACL;AAAA,YACA,UAAU,KAAK,gBAAgB;AAAA,YAC/B,UAAU,KAAK,gBAAgB;AAAA,UACjC;AAAA,QACF;AACE,UAAAA,QAAO,KAAK,+BAA+B,KAAK,aAAa,EAAE;AAC/D,iBAAO;AAAA,MACX;AAAA,IACF;AAMO,IAAM,iBAAiB,OAC5B,KACAA,YACmB;AACnB,YAAM,UAAU,kBAAkB,IAAI,UAAU,EAAE;AAClD,UAAI,QAAQ,WAAW,GAAG;AACxB,cAAM,IAAI,MAAM,wCAAwC,IAAI,MAAM,GAAG;AAAA,MACvE;AAEA,MAAAA,QAAO,IAAI,uCAAuC,QAAQ,KAAK,IAAI,CAAC,EAAE;AACtE,MAAAA,QAAO,IAAI,sBAAsB,IAAI,oBAAoB,WAAW,EAAE;AACtE,MAAAA,QAAO,IAAI,cAAc,IAAI,QAAQ,EAAE;AAEvC,YAAM,aAAa,gBAAgBA,SAAQ,GAAG;AAE9C,aAAO,IAAI,MAAM;AAAA,QACf,SAAS;AAAA,UACP,UAAU,IAAI;AAAA,UACd;AAAA,UACA,KAAK,IAAI,qBAAqB;AAAA,UAC9B,GAAI,cAAc,EAAE,MAAM,WAAW;AAAA,UACrC,OAAO;AAAA,YACL,kBAAkB;AAAA,YAClB,cAAc;AAAA,YACd,SAAS;AAAA,UACX;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAAA;AAAA;;;AC3PA,SAAS,gBAAgB;;;ACOzB,OAAOC,cAAa;;;ACCpB,IAAM,UAAU,CACd,UAEA,OAAO,UAAU,YACjB,UAAU,QACV,UAAU,SACV,MAAM,SAAS;AAwBjB,IAAM,WAAW,CAAC,UAChB,OAAO,UAAU,YAAY,UAAU,SAAS,iBAAiB;AAE5D,SAAS,IACd,YACG,QACH;AACA,SAAO,IAAI,IAAI,SAAS,MAAM;AAChC;AAEA,IAAM,gBAAgB,CACpB,UAEA,OAAO,UAAU,YAAY,YAAY,SAAS,aAAa;AAK1D,IAAM,MAAN,MAAU;AAAA,EACN;AAAA,EACA;AAAA,EAET,YACE,YACA,WACA;AACA,QAAI,WAAW,SAAS,MAAM,UAAU,QAAQ;AAC9C,UAAI,WAAW,WAAW,GAAG;AAC3B,cAAM,IAAI,UAAU,4BAA4B;AAAA,MAClD;AAEA,YAAM,IAAI;AAAA,QACR,YAAY,WAAW,MAAM,oBAC3B,WAAW,SAAS,CACtB;AAAA,MACF;AAAA,IACF;AAEA,UAAM,eAAe,UAAU;AAAA,MAC7B,CAAC,KAAa,UACZ,OACC,cAAc,KAAK,IAAI,MAAM,OAAO,SACnC,SAAS,KAAK,KAAK,QAAQ,KAAK,IAAI,IACpC;AAAA,MACJ;AAAA,IACF;AAEA,SAAK,SAAS,IAAI,MAAM,YAAY;AACpC,SAAK,UAAU,IAAI,MAAM,eAAe,CAAC;AAEzC,SAAK,QAAQ,CAAC,IAAI,WAAW,CAAC;AAI9B,QAAI,IAAI,GACN,MAAM;AACR,WAAO,IAAI,UAAU,QAAQ;AAC3B,YAAM,QAAQ,UAAU,GAAG;AAC3B,YAAM,YAAY,WAAW,CAAC;AAG9B,UAAI,cAAc,KAAK,GAAG;AAExB,aAAK,QAAQ,GAAG,KAAK,MAAM,QAAQ,CAAC;AAEpC,YAAI,aAAa;AACjB,eAAO,aAAa,MAAM,OAAO,QAAQ;AACvC,eAAK,OAAO,KAAK,IAAI,MAAM,OAAO,YAAY;AAC9C,eAAK,QAAQ,GAAG,IAAI,MAAM,QAAQ,UAAU;AAAA,QAC9C;AAGA,aAAK,QAAQ,GAAG,KAAK;AAAA,MACvB,WAAW,SAAS,KAAK,GAAG;AAC1B,cAAM,sBAAsB,MAAM,YAAY;AAAA,UAC5C,CAAC,CAAC,GAAG,CAAC,MAAM,MAAM;AAAA,QACpB;AACA,YAAI,wBAAwB,QAAW;AACrC,eAAK,QAAQ,GAAG,KACd,GAAI,oBAAoB,CAAC,EAA0B,YAAY,WAAW,MAAM,IAAI;AAAA,QACxF,OAAO;AACL,eAAK,QAAQ,GAAG,KAAK,KAAK,MAAM,IAAI;AAAA,QACtC;AACA,aAAK,QAAQ,GAAG,KAAK;AAAA,MACvB,WAAW,QAAQ,KAAK,GAAG;AACzB,YAAI,MAAM,OAAO,UAAU;AACzB,eAAK,QAAQ,GAAG,KAAK,KAAK,MAAM,OAAO,QAAQ,QAAQ,MAAM,IAAI;AAAA,QACnE,OAAO;AACL,eAAK,QAAQ,GAAG,KAAK,KAAK,MAAM,IAAI;AAAA,QACtC;AACA,aAAK,QAAQ,GAAG,KAAK;AAAA,MACvB,OAAO;AACL,aAAK,OAAO,KAAK,IAAI;AACrB,aAAK,QAAQ,GAAG,IAAI;AAAA,MACtB;AAAA,IACF;AAAA,EACF;AACF;AAYO,IAAM,UAAU,CAACC,SAA8C;AACpE,QAAM,qBAAqBA,KAAI,OAAO;AAAA,IAAI,CAAC,GAAG,MAC5C,0BAA0B,GAAG,CAAC;AAAA,EAChC;AAEA,QAAM,QAAQA,KAAI,QACf;AAAA,IAAI,CAAC,GAAG,MACP,KAAK,KAAK,GAAG,CAAC,GAAG,iBAAiB,mBAAmB,CAAC,CAAC,CAAC,KAAK;AAAA,EAC/D,EACC,KAAK,EAAE;AAEV,QAAM,eAAeA,KAAI,OAAO;AAAA,IAC9B,CAAC,KAA8B,GAAG,OAAO;AAAA,MACvC,GAAG;AAAA,MACH,CAAC,IAAI,CAAC,EAAE,GAAG,sBAAsB,CAAC;AAAA,IACpC;AAAA,IACA,CAAC;AAAA,EACH;AACA,SAAO,CAAC,OAAO,YAAY;AAC7B;AAMO,IAAM,iBAAiB,CAACA,SAAqB;AAClD,MAAI;AACF,UAAM,cAAc,CAAC,MAAqB;AAExC,UAAI,MAAM,QAAQ,CAAC,GAAG;AACpB,cAAM,CAAC,MAAM,GAAG,IAAI;AACpB,YAAI,SAAS,cAAc;AAEzB,iBAAO,KAAK,OAAO,GAAG,CAAC;AAAA,QACzB;AAEA,eAAO,IAAK,EAAuB,IAAI,CAAC,MAAM,YAAY,CAAU,CAAC,EAAE,KAAK,IAAI,CAAC;AAAA,MACnF;AACA,UAAI,MAAM,QAAQ,MAAM,OAAW,QAAO;AAC1C,UAAI,OAAO,MAAM,SAAU,QAAO,IAAI,EAAE,QAAQ,MAAM,IAAI,CAAC;AAC3D,UAAI,OAAO,MAAM,SAAU,QAAO,OAAO,CAAC;AAC1C,UAAI,OAAO,MAAM,UAAW,QAAO,IAAI,SAAS;AAChD,UAAI,aAAa;AACf,eAAO,IAAI,EAAE,YAAY,EAAE,QAAQ,KAAK,GAAG,EAAE,MAAM,GAAG,EAAE,CAAC;AAC3D,UAAI;AACF,eAAO,KAAK,UAAU,CAAmB;AAAA,MAC3C,QAAQ;AACN,eAAO,OAAO,CAAC;AAAA,MACjB;AAAA,IACF;AAEA,QAAI,MAAMA,KAAI,QAAQ,CAAC,KAAK;AAC5B,aAAS,IAAI,GAAG,IAAIA,KAAI,OAAO,QAAQ,KAAK;AAC1C,YAAM,MAAM,sBAAsBA,KAAI,OAAO,CAAC,CAAQ;AACtD,aAAO,YAAY,GAAY;AAC/B,aAAOA,KAAI,QAAQ,IAAI,CAAC,KAAK;AAAA,IAC/B;AACA,WAAO,IAAI,QAAQ,QAAQ,GAAG,EAAE,KAAK;AAAA,EACvC,SAAS,OAAO;AACd,YAAQ,IAAI,yBAAyB,KAAK,EAAE;AAC5C,WAAO;AAAA,EACT;AACF;AAEO,IAAM,wBAAwB,CAAC,UAAe;AACnD,MAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,UAAM,CAAC,MAAM,GAAG,IAAI;AACpB,QAAI,SAAS,aAAc,QAAO;AAAA,EACpC;AACA,SAAO;AACT;AACO,SAAS,0BACd,gBACA,OACA;AAGA,SAAO,KAAK,cAAc,IAAI,oBAAoB,KAAK,CAAC;AAC1D;AAWO,IAAM,sBAAsB,CAAC,UAAiB;AACnD,MAAI,OAAO,UAAU,UAAU;AAE7B,WAAO,OAAO,UAAU,KAAK,IAAI,QAAQ;AAAA,EAC3C;AAGA,MAAI,OAAO,UAAU,UAAW,QAAO;AACvC,MAAI,iBAAiB,KAAM,QAAO;AAClC,MAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,UAAM,CAAC,MAAM,CAAC,IAAI;AAClB,WAAO;AAAA,EACT;AACA,SAAO;AACT;AACA,SAAS,iBAAiB,OAAmC;AAC3D,SAAO,UAAU,SAAY,KAAK;AACpC;;;AC1PA,SAAS,gBAAgB;AACzB,SAAS,kBAAkB;;;ACY3B,SAAS,cAAAC,mBAAkB;;;ACR3B;;;ACbA;AAAA,EACE,UAAU;AAAA,EACV;AAAA,OAEK;AAEP,SAAS,cAAAC,aAAY,kBAAkB;AACvC,SAAS,mBAAmB;AAC5B,YAAY,QAAQ;AASpB,SAAS,kBAAkB,IAAoB;AAC7C,MAAI,KAAK,KAAM;AACb,WAAO,GAAG,KAAK,MAAM,EAAE,CAAC;AAAA,EAC1B;AACA,QAAM,UAAU,KAAK;AACrB,MAAI,UAAU,IAAI;AAChB,WAAO,GAAG,QAAQ,QAAQ,CAAC,CAAC;AAAA,EAC9B;AACA,QAAM,UAAU,KAAK,MAAM,UAAU,EAAE;AACvC,QAAM,mBAAmB,UAAU;AACnC,SAAO,GAAG,OAAO,gBAAgB,iBAAiB,QAAQ,CAAC,CAAC;AAC9D;AAaO,IAAM,cAAN,MAAkB;AAAA,EACvB;AAAA,EACA;AAAA,EAEA,YAAY,aAA0B,gBAAiC;AACrE,SAAK,QAAQ;AACb,SAAK,WAAW,IAAI,eAAe,cAAc;AAAA,EACnD;AACF;AAEO,IAAM,cAAN,MAAkB;AAAA,EACvB;AAAA,EACA;AAAA,EACA,YAAY,QAA0B,iBAAyB;AAC7D,SAAK,SAAS;AACd,SAAK,kBAAkB;AAAA,EACzB;AAAA,EAEA,MAAM,QACJC,MACgE;AAChE,UAAM,CAAC,OAAO,YAAY,IAAI,QAAQA,IAAG;AAEzC,YAAQ,IAAI,0BAA0B,eAAeA,IAAG,CAAC,EAAE;AAC3D,UAAM,QAAQ,YAAY,IAAI;AAC9B,UAAM,SAAS,MAAM,KAAK,OAAO,MAAM;AAAA,MACrC;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,MACR,UAAU,KAAK,kBAAkB,WAAW;AAAA;AAAA;AAAA,IAG9C,CAAC;AACD,UAAM,YAAY,YAAY,IAAI,IAAI;AACtC,YAAQ;AAAA,MACN,oCAAoC,kBAAkB,SAAS,CAAC;AAAA,IAClE;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAAQA,MAAkC;AAC9C,UAAM,CAAC,OAAO,YAAY,IAAI,QAAQA,IAAG;AAEzC,YAAQ,IAAI,4BAA4B,eAAeA,IAAG,CAAC,EAAE;AAC7D,UAAM,QAAQ,YAAY,IAAI;AAC9B,UAAM,SAAS,MAAM,KAAK,OAAO,QAAQ;AAAA,MACvC;AAAA,MACA;AAAA,MACA,UAAU,KAAK,kBAAkB,WAAW;AAAA,IAC9C,CAAC;AACD,UAAM,YAAY,YAAY,IAAI,IAAI;AACtC,YAAQ;AAAA,MACN,sCAAsC,kBAAkB,SAAS,CAAC;AAAA,IACpE;AACA,WAAO;AAAA,EACT;AACF;AAEO,IAAM,iBAAN,MAAqB;AAAA,EAC1B;AAAA,EAEA,YAAY,gBAAiC;AAC3C,SAAK,SAAS;AAAA,EAChB;AAAA,EAEA,MAAM,QAAQ,MAAc,YAAiB;AAC3C,QAAI;AACF,UAAI,CAAC,KAAK,QAAQ;AAChB,eAAO;AAAA,UACL,QAAQ;AAAA,UACR,MAAM;AAAA,QACR;AAAA,MACF;AAGA,YAAM,SAAS,MAAM,KAAK,kBAAkB,IAAI;AAGhD,YAAM,CAAC,gBAAgB,UAAU,IAAI,KAAK;AAAA,QACxC;AAAA,QACA;AAAA,MACF;AAEA,cAAQ;AAAA,QACN,uCAAuC,IAAI,gBAAgB,KAAK,UAAU,MAAM,CAAC,mBAAmB,KAAK,UAAU,cAAc,CAAC;AAAA,MACpI;AAEA,YAAM,SAAS,MAAM,KAAK,OAAO,SAAS,MAAM,kBAAkB;AAAA,QAChE,MAAM;AAAA,UACJ,EAAE,eAAe,MAAM,gBAAgB,QAAiB;AAAA,UACxD;AAAA,QACF;AAAA,QACA,WAAW;AAAA,QACX;AAAA,QACA,0BAA0B;AAAA,QAC1B,uBAAuB;AAAA,QACvB,OAAO;AAAA,UACL,iBAAiB,OAAO;AAAA,QAC1B;AAAA,QACA,oBAAoB,OAAO;AAAA,MAC7B,CAAC;AAED,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,qBAAqB,IAAI,2FAA2F,UAAU,IAAI,OAAO,mBAAmB;AAAA,MACpK;AAAA,IACF,SAAS,OAAO;AACd,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,4BAA4B,KAAK;AAAA,MACzC;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,UAAU,YAAoB;AAClC,QAAI;AACF,UAAI,CAAC,KAAK,QAAQ;AAChB,eAAO;AAAA,UACL,QAAQ;AAAA,UACR,MAAM;AAAA,QACR;AAAA,MACF;AAEA,YAAM,SAAS,KAAK,OAAO,SAAS,UAAU,UAAU;AACxD,YAAM,OAAO,UAAU;AAEvB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,wBAAwB,UAAU;AAAA,MAC1C;AAAA,IACF,SAAS,OAAO;AACd,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,+BAA+B,KAAK;AAAA,MAC5C;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAc,kBACZ,MAC+C;AAC/C,UAAM,YAAY,MAAMC,cAAa;AACrC,UAAM,eAAe,UAAU,IAAI,IAAI;AACvC,QAAI,cAAc;AAChB,aAAO;AAAA,QACL,SAAS,aAAa,OAAO,WAAW;AAAA,QACxC,SAAS,aAAa,OAAO,WAAW;AAAA,MAC1C;AAAA,IACF;AAEA,UAAM,IAAI,MAAM,iCAAiC,IAAI,EAAE;AAAA,EACzD;AAAA,EAEQ,iBAAiB,MAAc,YAAgC;AACrE,QAAI,aAAa;AACjB,QAAI,YAAY;AACd,YAAM,OAAOC,YAAW,QAAQ,EAC7B,OAAO,KAAK,UAAU,UAAU,CAAC,EACjC,OAAO,KAAK,EACZ,MAAM,GAAG,EAAE;AACd,mBAAa,GAAG,IAAI,IAAI,IAAI;AAAA,IAC9B;AACA,WAAO,CAAC,YAAY,UAAU;AAAA,EAChC;AACF;AAQA,eAAsB,kBACpB,aACA,WACA,YACA,WACA,QACqC;AACrC,MAAI;AACF,YAAQ;AAAA,MACN,6BAA6B,WAAW,mBAAmB,SAAS;AAAA,IACtE;AAEA,QAAI,oBAAuC;AAAA,MACzC,SAAS;AAAA,MACT,gBAAgB;AAAA,IAClB;AAEA,QAAI,cAAc,WAAW;AAE3B,cAAQ,IAAI,+BAA+B;AAC3C,YAAM,OAAO,MAAS,gBAAa,UAAU;AAC7C,YAAM,MAAM,MAAS,gBAAa,SAAS;AAE3C,wBAAkB,MAAM;AAAA,QACtB,gBAAgB,EAAE,KAAK,MAAM,IAAS;AAAA,MACxC;AAAA,IACF,WAAW,QAAQ;AACjB,cAAQ,IAAI,mCAAmC;AAE/C,wBAAkB,UAAU;AAC5B,wBAAkB,SAAS;AAC3B,wBAAkB,MAAM,CAAC;AACzB,wBAAkB,WAAW;AAAA,QAC3B,sBAAsB;AAAA,MACxB;AAAA,IACF;AAEA,YAAQ,IAAI,mCAAmC,kBAAkB,OAAO,EAAE;AAC1E,UAAM,aAAa,MAAM,WAAW,QAAQ,iBAAiB;AAC7D,UAAM,SAAS,IAAI,eAAe,EAAE,YAAY,UAAU,CAAC;AAC3D,YAAQ,IAAI,oCAAoC;AAEhD,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,KAAK,6DAA6D;AAC1E,YAAQ,KAAK,KAAK;AAClB,WAAO;AAAA,EACT;AACF;;;ACrQA;AADA,OAAOC,WAAU;AAGjB,YAAY,UAAU;;;ACHtB,OAAO,aAAa;AACpB,SAAS,4BAA4B;AACrC,SAAS,YAAY;AAGrB,IAAM,8BAA8B;AAIpC,IAAM,kBAAkB;AACxB,IAAM,UAAU;AAChB,IAAM,SAAS;AACf,IAAM,4BAA4B;AAQ3B,IAAM,UAAN,MAAiB;AAAA;AAAA,EAEd,qBAA8B;AAAA;AAAA,EAE9B,qBAA8B;AAAA;AAAA,EAG9B,aAAa,GAAG,QAAQ,YAAY,YAAY,QAAQ,YAAY,QAAQ,GAAG;AAAA;AAAA,EAG/E;AAAA,EACA;AAAA;AAAA,EAGA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYR,YAAY,SAKT;AACD,SAAK,cAAc,QAAQ;AAC3B,SAAK,aAAa,QAAQ;AAC1B,QACE,QAAQ,qBACP,QAAQ,mBAAmB,KAAK,QAAQ,mBAAmB,IAC5D;AACA,YAAM,IAAI,MAAM,0CAA0C;AAAA,IAC5D;AACA,SAAK,mBACH,QAAQ,oBAAoB;AAC9B,SAAK,eAAe,KAAK;AAAA,MACvB,KAAK;AAAA,MACL,QAAQ;AAAA,IACV;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,qBAAqB,eAAuB,gBAAyB;AACnE,UAAM,WAAW,qBAAqB;AACtC,UAAM,aAAa,kBAAkB;AACrC,WAAO,KAAK;AAAA,MACV;AAAA,MACA,KAAK,IAAI,GAAG,KAAK,MAAM,WAAW,aAAa,CAAC;AAAA,IAClD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,QAAQ;AACZ,YAAQ,GAAG,SAAS,KAAK,wBAAwB,OAAO,CAAC;AACzD,YAAQ,GAAG,QAAQ,KAAK,wBAAwB,MAAM,CAAC;AAEvD,QAAI,QAAQ,WAAW;AACrB,YAAM,YAAY,QAAQ;AAE1B,kBAAY,MAAM;AAChB,YAAI;AACF,kBAAQ,KAAK,WAAW,CAAC;AAAA,QAC3B,SAAS,GAAG;AACV,kBAAQ,IAAI,4BAA4B;AACxC,eAAK,wBAAwB,OAAO,EAAE;AAAA,QACxC;AAAA,MACF,GAAG,GAAI;AAEP,YAAM,KAAK,YAAY,KAAK,YAAY;AAAA,IAC1C,OAAO;AACL,UAAI,CAAC,QAAQ,QAAQ;AACnB,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,WAAK,cAAc,MAAM,KAAK;AAAA,QAC5B,QAAQ;AAAA,QACR,KAAK;AAAA,MACP;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,cAAc,OAAO,eAAuB;AAC1C,YAAQ,KAAK,WAAW,UAAU,aAAa;AAE/C,aAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,cAAQ,KAAK;AAAA,IACf;AAEA,YAAQ,GAAG,UAAU,CAAC,WAAW;AAC/B,cAAQ,KAAK,kBAAkB,OAAO,QAAQ,GAAG,YAAY;AAAA,IAC/D,CAAC;AAED,YAAQ,GAAG,QAAQ,CAAC,QAAQ,MAAM,WAAW;AAC3C,cAAQ;AAAA,QACN,UAAU,OAAO,QAAQ,GAAG,qBAAqB,IAAI,eAAe,MAAM;AAAA,MAC5E;AAEA,UAAI,CAAC,KAAK,oBAAoB;AAC5B,mBAAW,MAAM,QAAQ,KAAK,GAAG,eAAe;AAAA,MAClD;AAEA,UAAI,KAAK,sBAAsB,QAAQ,GAAG;AACxC,aAAK,qBAAqB;AAAA,MAC5B;AAAA,IACF,CAAC;AAED,YAAQ,GAAG,cAAc,CAAC,WAAW;AACnC,cAAQ,KAAK,kBAAkB,OAAO,QAAQ,GAAG,mBAAmB;AAAA,IACtE,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,0BAA0B,CAAC,WAA2B,YAAY;AAChE,QAAI,KAAK,oBAAoB;AAC3B;AAAA,IACF;AAEA,SAAK,qBAAqB;AAC1B,SAAK,qBAAqB;AAE1B,YAAQ;AAAA,MACN,OAAO,MAAM,OAAO,KAAK,UAAU,iCAAgC,oBAAI,KAAK,GAAE,YAAY,CAAC;AAAA,IAC7F;AAEA,QAAI;AACF,UAAI,QAAQ,WAAW;AACrB,cAAM,KAAK,gBAAgB,MAAM;AACjC,gBAAQ,KAAK,GAAG,KAAK,UAAU,+BAA+B;AAC9D,aAAK,CAAC;AAAA,MACR,OAAO;AAEL,YAAI,KAAK,aAAa;AACpB,gBAAM,KAAK,WAAW,KAAK,WAAW;AAAA,QACxC,OAAO;AACL,kBAAQ;AAAA,YACN,GAAG,KAAK,UAAU;AAAA,UACpB;AAAA,QACF;AACA,gBAAQ,KAAK,GAAG,KAAK,UAAU,sBAAsB;AACrD,aAAK,qBAAqB,KAAK,CAAC,IAAI,KAAK,CAAC;AAAA,MAC5C;AAAA,IACF,SAAS,GAAG;AACV,cAAQ,MAAM,GAAG,KAAK,UAAU,sBAAsB,CAAC;AACvD,WAAK,CAAC;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,kBAAkB,CAAC,WAA2B;AAC5C,WAAO,IAAI,QAAc,CAACC,UAAS,WAAW;AAC5C,UAAI,CAAC,QAAQ,WAAW;AACtB,eAAOA,SAAQ;AAAA,MACjB;AAEA,UAAI,CAAC,QAAQ,SAAS;AACpB,eAAOA,SAAQ;AAAA,MACjB;AAEA,YAAM,YAAY,OAAO,KAAK,QAAQ,OAAO;AAC7C,UAAI,UAAU,UAAU,GAAG;AACzB,eAAOA,SAAQ;AAAA,MACjB;AAEA,UAAI,eAAe;AACnB,UAAI,UAAU;AAEd,YAAM,eAAe,MAAM;AACzB,UAAE;AACF,uBAAe;AAEf,eAAO,OAAO,QAAQ,WAAW,CAAC,CAAC,EAChC,OAAO,CAAC,WAAW,CAAC,CAAC,MAAM,EAC3B,QAAQ,CAAC,WAAW;AACnB,cAAI,UAAU,CAAC,OAAO,OAAO,GAAG;AAC9B,cAAE;AACF,gBAAI,WAAW,GAAG;AAChB,qBAAO,KAAK,MAAM;AAAA,YACpB;AAAA,UACF;AAAA,QACF,CAAC;AAEH,gBAAQ,KAAK,eAAe,gBAAgB;AAC5C,YAAI,gBAAgB,GAAG;AACrB,wBAAc,QAAQ;AACtB,iBAAOA,SAAQ;AAAA,QACjB;AAAA,MACF;AAEA,YAAM,WAAW,YAAY,cAAc,yBAAyB;AAAA,IACtE,CAAC;AAAA,EACH;AACF;;;ADjNA,IAAM,iBAAiB,CAAC,YAA8B;AAAA,EACpD,GAAG;AAAA,EACH,QAAQ,OAAO,SAAS,SAAS;AACnC;AAEA,IAAM,aAAa,CAAC,SAAiBC,UAAiB,GAAG,OAAO,GAAGA,KAAI;AAEvE,IAAM,aAAa,CACjB,KACA,KACA,YACG;AACH,UAAQ;AAAA,IACN,GAAG,IAAI,MAAM,IAAI,IAAI,GAAG,IAAI,IAAI,UAAU,IAAI,KAAK,IAAI,IAAI,OAAO;AAAA,EACpE;AACF;AAEA,IAAM,eAAe,oBAAI,IAAiB;AAgB1C,IAAM,aAAa,OACjB,WACA,kBACA,gBACA,SACA,aACA,QACA,cACG;AACH,QAAM,OAAO,SAAS,MAAMC,SAAQ,IAAI,oBAAI,IAAI;AAChD,SAAO,OAAO,KAA2B,QAA6B;AACpE,UAAM,QAAQ,KAAK,IAAI;AAEvB,QAAI;AACF,YAAM,MAAM,IAAI,IAAI,IAAI,OAAO,IAAI,kBAAkB;AACrD,YAAM,WAAW,IAAI;AAErB,UAAI;AACJ,UAAI,aAAa,WAAW;AAC1B,cAAM,MAAM,IAAI,QAAQ,eAAe,MAAM,GAAG,EAAE,CAAC;AACnD,YAAI,KAAK;AACP,cAAI;AACF,kBAAM,EAAE,QAAQ,IAAI,MAAW,eAAU,KAAK,WAAW;AAAA,cACvD,QAAQ,UAAU;AAAA,cAClB,UAAU,UAAU;AAAA,YACtB,CAAC;AACD,yBAAa;AAAA,UACf,SAAS,OAAO;AACd,oBAAQ,IAAI,yBAAyB;AACrC,gBAAI,aAAa;AACf,kBAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,kBAAI,IAAI,KAAK,UAAU,EAAE,OAAO,eAAe,CAAC,CAAC;AACjD,yBAAW,KAAK,KAAK,KAAK;AAC1B;AAAA,YACF;AAAA,UACF;AAAA,QACF,WAAW,aAAa;AACtB,cAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,cAAI,IAAI,KAAK,UAAU,EAAE,OAAO,eAAe,CAAC,CAAC;AACjD,qBAAW,KAAK,KAAK,KAAK;AAC1B;AAAA,QACF;AAAA,MACF,WAAW,aAAa;AACtB,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,YAAI,IAAI,KAAK,UAAU,EAAE,OAAO,eAAe,CAAC,CAAC;AACjD,mBAAW,KAAK,KAAK,KAAK;AAC1B;AAAA,MACF;AAEA,YAAM,WAAW,WAAW,SAAS,QAAQ;AAC7C,YAAM,eAAe,MAAM,KAAK,IAAI,aAAa,QAAQ,CAAC,EAAE;AAAA,QAC1D,CAAC,KAA2C,CAAC,KAAK,KAAK,MAAM;AAC3D,gBAAM,gBAAgB,IAAI,GAAG;AAC7B,cAAI,eAAe;AACjB,gBAAI,MAAM,QAAQ,aAAa,GAAG;AAChC,4BAAc,KAAK,KAAK;AAAA,YAC1B,OAAO;AACL,kBAAI,GAAG,IAAI,CAAC,eAAe,KAAK;AAAA,YAClC;AAAA,UACF,OAAO;AACL,gBAAI,GAAG,IAAI;AAAA,UACb;AACA,iBAAO;AAAA,QACT;AAAA,QACA,CAAC;AAAA,MACH;AAEA,UAAI,iBAAiB,aAAa,IAAI,QAAQ;AAC9C,UAAI,mBAAmB,QAAW;AAChC,YAAI,QAAQ;AACV,cAAI,UAAU,SAAS,QAAQ,cAAc,EAAE;AAC/C,cAAI,UAAyB;AAG7B,2BAAiB,KAAK,IAAI,OAAO;AAEjC,cAAI,CAAC,gBAAgB;AAEnB,sBAAU,IAAI,aAAa,IAAI,SAAS;AAGxC,gBAAI,CAAC,WAAW,QAAQ,SAAS,GAAG,GAAG;AACrC,oBAAM,YAAY,QAAQ,MAAM,GAAG;AACnC,kBAAI,UAAU,UAAU,GAAG;AAEzB,iCAAiB,KAAK,IAAI,OAAO;AACjC,oBAAI,CAAC,gBAAgB;AAEnB,4BAAU,UAAU,CAAC;AACrB,4BAAU,UAAU,MAAM,CAAC,EAAE,KAAK,GAAG;AAAA,gBACvC;AAAA,cACF;AAAA,YACF;AAGA,gBAAI,CAAC,gBAAgB;AACnB,kBAAI,SAAS;AACX,sBAAM,eAAe,GAAG,OAAO,IAAI,OAAO;AAC1C,iCAAiB,KAAK,IAAI,YAAY;AAAA,cACxC,OAAO;AACL,iCAAiB,KAAK,IAAI,OAAO;AAAA,cACnC;AAAA,YACF;AAAA,UACF;AAEA,cAAI,CAAC,gBAAgB;AACnB,kBAAM,gBAAgB,MAAM,KAAK,KAAK,KAAK,CAAC,EAAE;AAAA,cAAI,CAAC,QACjD,IAAI,QAAQ,KAAK,GAAG;AAAA,YACtB;AACA,kBAAM,eACJ,UACE,OAAO,OAAO,iBAAiB,OAAO,+BAA+B,cAAc,KAAK,IAAI,CAAC,KAC7F,OAAO,OAAO,+BAA+B,cAAc,KAAK,IAAI,CAAC;AACzE,kBAAM,IAAI,MAAM,YAAY;AAAA,UAC9B;AAEA,uBAAa,IAAI,UAAU,cAAc;AACzC,kBAAQ,IAAI,0BAA0B,OAAO,EAAE;AAAA,QACjD,OAAO;AACL,2BAAiB,UAAQ,QAAQ;AACjC,uBAAa,IAAI,UAAU,cAAc;AAAA,QAC3C;AAAA,MACF;AAEA,YAAM,cAAc,IAAI,YAAY,kBAAkB,QAAQ;AAC9D,UAAI,SACF,SACE,MAAM,eAAe,cAAc;AAAA,QACjC,QAAQ,IAAI,YAAY,aAAa,cAAc;AAAA,QACnD;AAAA,QACA,KAAK;AAAA,MACP,CAAC,IACD,MAAM,eAAe,QAAQ,cAAc;AAAA,QACzC,QAAQ,IAAI,YAAY,aAAa,cAAc;AAAA,QACnD;AAAA,QACA,KAAK;AAAA,MACP,CAAC;AAEL,UAAI;AACJ,UAAI;AAGJ,UAAI,OAAO,eAAe,MAAM,EAAE,YAAY,SAAS,aAAa;AAClE,eAAO,KAAK,UAAU,MAAM,OAAO,KAAK,CAAC;AAAA,MAC3C,OAAO;AACL,YAAI,UAAU,UAAU,YAAY,QAAQ;AAC1C,iBAAO,KAAK,UAAU,OAAO,IAAI;AACjC,mBAAS,OAAO;AAAA,QAClB,OAAO;AACL,iBAAO,KAAK,UAAU,MAAM;AAAA,QAC9B;AAAA,MACF;AAEA,UAAI,QAAQ;AACV,YAAI,UAAU,QAAQ,EAAE,gBAAgB,mBAAmB,CAAC;AAC5D,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B,OAAO;AACL,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B;AAEA,UAAI,IAAI,IAAI;AAAA,IACd,SAAS,OAAY;AACnB,cAAQ,IAAI,kBAAkB,IAAI,KAAK,KAAK;AAE5C,UAAI,OAAO,eAAe,KAAK,EAAE,YAAY,SAAS,kBAAkB;AACtE,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,YAAI,IAAI,KAAK,UAAU,EAAE,OAAO,MAAM,QAAQ,CAAC,CAAC;AAChD,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B;AACA,UAAI,iBAAiB,OAAO;AAC1B,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,YAAI,IAAI,KAAK,UAAU,EAAE,OAAO,MAAM,QAAQ,CAAC,CAAC;AAChD,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B,OAAO;AACL,YAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,YAAI,IAAI;AACR,mBAAW,KAAK,KAAK,KAAK;AAAA,MAC5B;AAAA,IACF;AAAA,EACF;AACF;AAEA,IAAM,mBAAmB,OACvB,WACA,kBACA,gBACA,SACA,aACA,QACA,cACG;AACH,QAAM,oBAAoB,MAAM;AAAA,IAC9B;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,QAAM,UAAU,SAAS,MAAMC,YAAW,IAAI,oBAAI,IAAI;AAEtD,QAAM,gBAAgB,MAAM,KAAK,QAAQ,OAAO,CAAC,EAAE,KAAK,CAAC,GAAG,MAAM;AAChE,UAAM,QAAQ,EAAE,OAAO,aAAa;AACpC,UAAM,QAAQ,EAAE,OAAO,aAAa;AACpC,WAAO,MAAM,SAAS,MAAM;AAAA,EAC9B,CAAC;AAED,SAAO,OAAO,KAA2B,QAA6B;AACpE,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,MAAM,IAAI,IAAI,IAAI,OAAO,IAAI,kBAAkB;AACrD,UAAM,WAAW,IAAI;AAGrB,QAAI,aAAa,2BAA2B;AAC1C,UAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,UAAI;AAAA,QACF,KAAK,UAAU;AAAA,UACb,QAAQ;AAAA,UACR,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,QACpC,CAAC;AAAA,MACH;AACA;AAAA,IACF;AAEA,QAAI;AACJ,QAAI,aAAa,WAAW;AAC1B,YAAM,MAAM,IAAI,QAAQ,eAAe,MAAM,GAAG,EAAE,CAAC;AACnD,UAAI,KAAK;AACP,YAAI;AACF,gBAAM,EAAE,QAAQ,IAAI,MAAW,eAAU,KAAK,WAAW;AAAA,YACvD,QAAQ,UAAU;AAAA,YAClB,UAAU,UAAU;AAAA,UACtB,CAAC;AACD,uBAAa;AAAA,QACf,SAAS,OAAO;AACd,kBAAQ,IAAI,0CAA0C;AAAA,QACxD;AAAA,MACF;AAAA,IACF;AAEA,eAAW,UAAU,eAAe;AAClC,YAAM,YAAY,OAAO,OAAO,aAAa;AAC7C,YAAM,kBACJ,UAAU,SAAS,GAAG,KAAK,cAAc,MACvC,UAAU,MAAM,GAAG,EAAE,IACrB;AAEJ,YAAM,UACJ,aAAa,mBACb,SAAS,WAAW,kBAAkB,GAAG;AAE3C,UAAI,SAAS;AACX,YAAI,OAAO,OAAO,qBAAqB,OAAO;AAC5C,gBAAM,cAAc,IAAI,YAAY,kBAAkB,QAAQ;AAC9D,UAAC,IAAY,QAAQ;AAAA,YACnB,QAAQ,IAAI,YAAY,aAAa,cAAc;AAAA,YACnD;AAAA,YACA,KAAK;AAAA,UACP;AAAA,QACF;AAEA,YAAI,aAAa,IAAI;AACrB,YAAI,oBAAoB,KAAK;AAC3B,gBAAM,mBACJ,SAAS,UAAU,gBAAgB,MAAM,KAAK;AAChD,uBAAa,mBAAmB,IAAI;AAAA,QACtC;AAEA,YAAI;AAIF,gBAAM,cAAc,OAAO;AAAA,YACzB,OAAO,OAAO,OAAO,eAAe,GAAG,CAAC;AAAA,YACxC;AAAA,YACA;AAAA,cACE,KAAK;AAAA,YACP;AAAA,UACF;AACA,gBAAM,OAAO,QAAQ,aAAa,GAAG;AACrC;AAAA,QACF,SAAS,OAAO;AACd,kBAAQ,MAAM,mBAAmB,OAAO,IAAI,KAAK,KAAK;AACtD,cAAI,CAAC,IAAI,aAAa;AACpB,gBAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,gBAAI,IAAI,KAAK,UAAU,EAAE,OAAO,wBAAwB,CAAC,CAAC;AAAA,UAC5D;AACA;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAIA,QAAI,UAAU;AACd,QAAI,SAAS,WAAW,OAAO,GAAG;AAChC,gBAAU,SAAS,UAAU,CAAC;AAAA,IAChC,WAAW,SAAS,WAAW,eAAe,GAAG;AAC/C,gBAAU,SAAS,UAAU,EAAE;AAAA,IACjC;AAGA,QAAI,YAAY,UAAU;AAKxB,YAAM,cAAc,OAAO;AAAA,QACzB,OAAO,OAAO,OAAO,eAAe,GAAG,CAAC;AAAA,QACxC;AAAA,QACA;AAAA,UACE,KAAK,UAAU,IAAI;AAAA,QACrB;AAAA,MACF;AACA,YAAM,kBAAkB,aAAqC,GAAG;AAChE;AAAA,IACF;AAEA,QAAI,UAAU,KAAK,EAAE,gBAAgB,mBAAmB,CAAC;AACzD,QAAI,IAAI,KAAK,UAAU,EAAE,OAAO,YAAY,CAAC,CAAC;AAC9C,eAAW,KAAK,KAAK,KAAK;AAAA,EAC5B;AACF;AAEO,IAAM,UAAU,OAAO,WAAuB;AACnD,QAAM,cAAc,IAAI,QAAQ;AAAA,IAC9B,iBACG,OAAO,eAAe,KAAK,IAAI,OAAO,cAAc;AAAA,IACvD,aAAa,YAAY;AACvB,UAAI;AACJ,UAAI,OAAO,gBAAgB;AACzB,yBAAiB,MAAM;AAAA,UACrB,OAAO,eAAe;AAAA,UACtB,OAAO,eAAe;AAAA,UACtB,OAAO,eAAe;AAAA,UACtB,OAAO,eAAe;AAAA,UACtB,OAAO,eAAe;AAAA,QACxB;AAAA,MACF;AACA,YAAM,mBAAmB;AAAA,QACvB,eAAe,OAAO,gBAAgB;AAAA,MACxC;AACA,UAAI;AACJ,UAAI,OAAO,WAAW,QAAQ;AAC5B,gBAAQ,IAAI,6BAA6B;AACzC,oBAAY,MAAW,gBAAW,OAAO,UAAU,QAAQ,OAAO;AAAA,MACpE;AAEA,YAAM,SAASC,MAAK;AAAA,QAClB,MAAM;AAAA,UACJ;AAAA,UACA;AAAA,UACA;AAAA,UACA,OAAO;AAAA,UACP,OAAO;AAAA,UACP,OAAO;AAAA,UACP,OAAO;AAAA,QACT;AAAA,MACF;AAEA,YAAM,OAAO,OAAO,cAAc,SAAY,OAAO,YAAY;AACjE,aAAO,OAAO,MAAM,aAAa,MAAM;AACrC,gBAAQ,IAAI,0BAA0B,IAAI,EAAE;AAAA,MAC9C,CAAC;AAED,aAAO;AAAA,IACT;AAAA,IACA,YAAY,OAAO,WAAW;AAC5B,aAAO,IAAI,QAAc,CAACC,aAAY;AACpC,eAAO,MAAM,MAAMA,SAAQ,CAAC;AAAA,MAC9B,CAAC;AAAA,IACH;AAAA,EACF,CAAC;AAED,cAAY,MAAM;AACpB;;;AE1cA,SAAS,gBAAAC,qBAAqC;;;ACC9C;;;ACDA,SAAS,aAAa;;;ACWf,IAAM,yBAAyB;AAKtC,SAAS,eAAe,IAA4C;AAClE,SACE,OAAO,OAAO,YACd,OAAO,QACP,cAAc,MACd,OAAO,GAAG,aAAa;AAE3B;AAKA,SAASC,cAAa,IAA4B;AAChD,SACE,OAAO,OAAO,YACd,OAAO,QACP,aAAa,MACb,MAAM,QAAQ,GAAG,OAAO;AAE5B;AAKA,SAAS,YAAY,IAA+B;AAClD,SACE,OAAO,OAAO,YACd,OAAO,QACP,iBAAiB,MACjB,OAAO,GAAG,gBAAgB;AAE9B;AAMO,SAAS,gBAAgB,KAAa,OAAyB;AACpE,QAAM,gBACJ;AAEF,MAAI,OAAO,UAAU,YAAY,cAAc,KAAK,KAAK,GAAG;AAC1D,WAAO,IAAI,KAAK,KAAK;AAAA,EACvB;AAEA,SAAO;AACT;AASA,SAAS,WAAW,UAAoB,aAAuC;AAG7E,MACE,YAAY;AAAA,IACV,CAAC,CAAC,KAAK,KAAK,MAAM,QAAQ,0BAA0B,UAAU;AAAA,EAChE,GACA;AACA,WAAO;AAAA,EACT;AAEA,MAAI,OAAO,aAAa,UAAU;AAGhC,WAAO,aAAa,cAAc,SAAS,WAAW,WAAW;AAAA,EACnE;AAEA,MAAI,eAAe,QAAQ,GAAG;AAC5B,WAAO,WAAW,SAAS,UAAU,WAAW;AAAA,EAClD;AACA,SAAO;AACT;AAqBA,SAAS,oBAAoB,SAAmC;AAC9D,QAAM,YAA4B,CAAC;AAEnC,aAAW,UAAU,SAAS;AAC5B,UAAM,WAAW,OAAO;AAGxB,QAAI,WAAW,UAAU,OAAO,WAAW,GAAG;AAC5C,gBAAU,KAAK,CAAC,OAAO,MAAM,CAAC,WAAW,CAAC,CAAC;AAC3C;AAAA,IACF;AAGA,QAAI,OAAO,aAAa,YAAY,aAAa,MAAM;AAErD,UAAI,gBAA0B;AAC9B,UAAI,eAAe,QAAQ,GAAG;AAC5B,wBAAgB,SAAS;AAAA,MAC3B;AAGA,UAAIA,cAAa,aAAa,GAAG;AAC/B,cAAM,kBAAkB,oBAAoB,cAAc,OAAO;AACjE,YAAI,gBAAgB,SAAS,GAAG;AAC9B,oBAAU,KAAK,CAAC,OAAO,MAAM,eAAe,CAAC;AAAA,QAC/C;AACA;AAAA,MACF;AAIA,UAAI,YAAY,aAAa,GAAG;AAC9B,cAAM,cAAc,cAAc;AAClC,YAAIA,cAAa,WAAW,GAAG;AAC7B,gBAAM,kBAAkB,oBAAoB,YAAY,OAAO;AAC/D,cAAI,gBAAgB,SAAS,GAAG;AAC9B,sBAAU,KAAK,CAAC,OAAO,MAAM,eAAe,CAAC;AAAA,UAC/C;AACA;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AASA,SAAS,cAAc,OAAY,UAAyB;AAC1D,MAAI,aAAa,aAAa;AAC5B,QAAI,OAAO,UAAU,UAAU;AAC7B,UAAI;AACF,cAAM,OAAO,IAAI,KAAK,KAAK;AAC3B,eAAO,CAAC,MAAM,KAAK,QAAQ,CAAC,IAAI,OAAO;AAAA,MACzC,QAAQ;AACN,eAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACA,SAAO;AACT;AAQA,SAAS,oBAAoB,KAAU,WAAiC;AACtE,MAAI,CAAC,OAAO,OAAO,QAAQ,UAAU;AACnC;AAAA,EACF;AAEA,aAAW,CAAC,WAAW,QAAQ,KAAK,WAAW;AAC7C,QAAI,EAAE,aAAa,MAAM;AACvB;AAAA,IACF;AAEA,QAAI,MAAM,QAAQ,QAAQ,GAAG;AAE3B,UAAI,SAAS,SAAS,KAAK,OAAO,SAAS,CAAC,MAAM,UAAU;AAE1D,cAAM,aAAa;AACnB,mBAAW,aAAa,YAAY;AAClC,cAAI,SAAS,IAAI,cAAc,IAAI,SAAS,GAAG,SAAS;AAAA,QAC1D;AAAA,MACF,OAAO;AAEL,cAAM,kBAAkB;AACxB,cAAM,aAAa,IAAI,SAAS;AAEhC,YAAI,MAAM,QAAQ,UAAU,GAAG;AAE7B,qBAAW,QAAQ,YAAY;AAC7B,gCAAoB,MAAM,eAAe;AAAA,UAC3C;AAAA,QACF,WAAW,cAAc,OAAO,eAAe,UAAU;AAEvD,8BAAoB,YAAY,eAAe;AAAA,QACjD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAcO,SAAS,+BACd,SAC4B;AAC5B,MAAI,CAAC,WAAW,QAAQ,WAAW,GAAG;AACpC,WAAO;AAAA,EACT;AACA,QAAM,YAAY,oBAAoB,OAAO;AAC7C,SAAO,UAAU,SAAS,IAAI,YAAY;AAC5C;AAiBO,SAAS,iBACd,MACA,gBACM;AACN,MAAI,CAAC,kBAAkB,CAAC,MAAM;AAC5B;AAAA,EACF;AAEA,sBAAoB,MAAM,cAAc;AAC1C;;;ADtKO,IAAM,iBAAiB;AAAA,EAC5B,OAAO;AAAA,EACP,KAAK;AAAA,EACL,WAAW;AAAA,EACX,MAAM;AACR;AAKO,IAAM,qBAAuC;AAAA,EAClD,WAAW,eAAe;AAAA,EAC1B,SAAS;AAAA,EACT,gBAAgB;AAAA,EAChB,MAAM;AACR;;;AVlFA;AAMA,SAAS,eAAuB;AAC9B,SAAOC,SAAQ,IAAI,oBAAoB;AACzC;AAoBA,IAAM,iBAAiB;AAAA,EACrB,QAAQ,oBAAI,IAA4B;AAAA,EACxC,SAAS,oBAAI,IAAyB;AAAA,EACtC,YAAY,oBAAI,IAA4B;AAAA,EAC5C,MAAM,oBAAI,IAAsB;AAAA,EAChC,cAAc,oBAAI,IAAyB;AAAA,EAC3C,WAAW,oBAAI,IAAsB;AAAA,EACrC,SAAS,oBAAI,IAAoB;AACnC;AAIA,IAAM,yBAAyB,KAAK,KAAK,KAAK;AAwW9C,SAAS,gBACP,QAC8B;AAC9B,SAAO,YAAY,UAAU,OAAO;AACtC;AAMA,SAAS,oBACP,QAOoD;AACpD,MAAI,EAAE,YAAY,SAAS;AACzB,WAAO;AAAA,EACT;AAEA,QAAM,SAAS,OAAO;AAEtB,SACE,8DACA,gFACA,oFACA,4EACA,kFACA;AAEJ;AAKA,SAAS,mBAAmB,QAA4C;AAEtE,MAAI,EAAE,YAAY,SAAS;AACzB;AAAA,EACF;AAGA,SAAO,OAAO;AAChB;AAKA,SAAS,yBACP,QACA,QAC0B;AAC1B,UAAQ,QAAQ;AAAA,IACd;AACE,aAAO,EAAE,QAAQ,YAAY;AAAA,IAE/B;AACE,aAAO,EAAE,QAAQ,uBAAuB;AAAA,IAE1C,oDAA2C;AACzC,YAAM,kBAAkB;AACxB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,KAAK,gBAAgB;AAAA,QACrB,WAAW,gBAAgB;AAAA,MAC7B;AAAA,IACF;AAAA,IAEA,gDAAyC;AACvC,YAAM,gBAAgB;AACtB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,cAAc;AAAA,MACzB;AAAA,IACF;AAAA,IAEA,sDAA4C;AAC1C,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,iBAAiB;AAAA,MACzB;AAAA,IACF;AAAA,IAEA,wEAAqD;AACnD,YAAM,kBAAkB;AACxB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM,gBAAgB;AAAA,QACtB,KAAK,gBAAgB;AAAA,MACvB;AAAA,IACF;AAAA,IAEA;AACE,aAAO;AAAA,EACX;AACF;AAKA,SAAS,8BACP,QACA,QAC0B;AAE1B,MAAI,CAAC,oBAAoB,MAAM,GAAG;AAChC,WAAO;AAAA,EACT;AAEA,UAAQ,QAAQ;AAAA,IACd,sDAA4C;AAC1C,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,MAChC;AAAA,IACF;AAAA,IAEA,wEAAqD;AACnD,YAAM,mBACJ;AACF,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,QAC9B,KAAK,iBAAiB;AAAA,QACtB,WAAW,iBAAiB;AAAA,MAC9B;AAAA,IACF;AAAA,IAEA,4EAAuD;AACrD,YAAM,mBACJ;AACF,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,MAChC;AAAA,IACF;AAAA,IAEA,oEAAmD;AACjD,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,QAC9B,SAAS,iBAAiB;AAAA,MAC5B;AAAA,IACF;AAAA,IAEA,0EAAsD;AACpD,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,QAC9B,MAAM,iBAAiB;AAAA,MACzB;AAAA,IACF;AAAA,IAEA,4FAA+D;AAC7D,YAAM,mBAAmB;AACzB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,YAAY,iBAAiB;AAAA,QAC7B,aAAa,iBAAiB;AAAA,QAC9B,MAAM,iBAAiB;AAAA,QACvB,KAAK,iBAAiB;AAAA,MACxB;AAAA,IACF;AAAA,IAEA;AACE,aAAO;AAAA,EACX;AACF;AAMA,SAAS,2BACP,QAC0B;AAC1B,MAAI,CAAC,gBAAgB,MAAM,GAAG;AAC5B,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,QAAQ,OAAO;AAAA,IACf,QAAQ,OAAO;AAAA,IACf,gBAAgB,OAAO;AAAA,IACvB,oBAAoB,OAAO;AAAA,IAC3B,aAAa,OAAO;AAAA,IACpB,SAAS,OAAO;AAAA,EAClB;AACF;AAKA,SAAS,sBACP,QAC0B;AAC1B,MAAI,EAAE,YAAY,WAAW,OAAO,0BAAiC;AACnE,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,MAAM,OAAO;AAAA,IACb,QAAQ,OAAO;AAAA,IACf,gBAAgB,OAAO;AAAA,IACvB,oBAAoB,OAAO;AAAA,IAC3B,aAAa,OAAO;AAAA,IACpB,mBAAmB,OAAO;AAAA,IAC1B,4BAA4B,OAAO;AAAA,EACrC;AACF;AAKA,SAAS,0BACP,QAC0B;AAC1B,MAAI,EAAE,YAAY,WAAW,OAAO,kCAAqC;AACvE,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,gBAAgB,OAAO;AAAA,IACvB,aAAa,OAAO;AAAA,IACpB,WAAW,OAAO;AAAA,IAClB,SAAS,OAAO;AAAA,IAChB,SAAS,OAAO;AAAA,IAChB,SAAS,OAAO;AAAA,IAChB,SAAS,OAAO;AAAA,IAChB,UAAU,OAAO;AAAA,IACjB,UAAU,OAAO;AAAA,IACjB,WAAW,OAAO;AAAA,IAClB,WAAW,OAAO;AAAA,IAClB,YAAY,OAAO;AAAA,EACrB;AACF;AAKA,SAAS,+BACP,QAC0B;AAC1B,MACE,EAAE,YAAY,WACd,OAAO,4CACP;AACA,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,SAAS,OAAO;AAAA,IAChB,gBAAgB,OAAO;AAAA,IACvB,aAAa,OAAO;AAAA,IACpB,aAAa,OAAO;AAAA,IACpB,YAAY,OAAO;AAAA,EACrB;AACF;AAKA,SAAS,6BACP,QAC0B;AAC1B,MAAI,EAAE,YAAY,WAAW,OAAO,wCAAwC;AAC1E,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,MAAM,OAAO;AAAA,IACb,QAAQ,OAAO;AAAA,IACf,gBAAgB,OAAO;AAAA,IACvB,oBAAoB,OAAO;AAAA,IAC3B,aAAa,OAAO;AAAA,EACtB;AACF;AAKA,SAAS,yBACP,QAC0B;AAC1B,MAAI,EAAE,YAAY,WAAW,OAAO,gCAAoC;AACtE,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,YAAY,OAAO;AAAA,IACnB,WAAW,OAAO;AAAA,IAClB,WAAW,OAAO;AAAA,IAClB,QAAQ,OAAO;AAAA,EACjB;AACF;AAKA,SAAS,iCACP,QAC0B;AAC1B,QAAM,SAAS,mBAAmB,MAAM;AAGxC,QAAM,cAAc,yBAAyB,QAAQ,MAAM;AAC3D,MAAI,aAAa;AACf,WAAO;AAAA,EACT;AAGA,QAAM,mBAAmB,8BAA8B,QAAQ,MAAM;AACrE,MAAI,kBAAkB;AACpB,WAAO;AAAA,EACT;AAGA,MAAI,oCAAsC;AACxC,WAAO,2BAA2B,MAAM;AAAA,EAC1C;AAGA,MAAI,0BAAiC;AACnC,WAAO,sBAAsB,MAAM;AAAA,EACrC;AAGA,MAAI,kCAAqC;AACvC,WAAO,0BAA0B,MAAM;AAAA,EACzC;AAGA,MAAI,4CAA0C;AAC5C,WAAO,+BAA+B,MAAM;AAAA,EAC9C;AAGA,MAAI,wCAAwC;AAC1C,WAAO,6BAA6B,MAAM;AAAA,EAC5C;AAGA,MAAI,gCAAoC;AACtC,WAAO,yBAAyB,MAAM;AAAA,EACxC;AAEA,SAAO;AACT;AAEO,IAAM,aAAa,CAAC,aAAoC;AAC7D,QAAM,SAAuC,CAAC;AAC9C,QAAM,SAAwC,CAAC;AAC/C,QAAM,aAA+C,CAAC;AACtD,QAAM,OAAmC,CAAC;AAC1C,QAAM,eAAmD,CAAC;AAC1D,QAAM,YAA6C,CAAC;AACpD,QAAM,UAAyC,CAAC;AAEhD,WAAS,OAAO,QAAQ,CAAC,UAAU;AACjC,UAAM,KACJ,MAAM,OAAO,UACX,GAAG,MAAM,IAAI,IAAI,MAAM,OAAO,OAAO,KACrC,MAAM;AAEV,QAAI,WAAY,MAAc;AAC9B,QAAI,CAAC,YAAY,MAAM,UAAW,MAAc,gBAAgB;AAC9D,iBAAY,MAAc,eAAe;AAAA,IAC3C;AAEA,UAAM,eACJ,iCAAiC,MAAM,MAAM;AAG/C,QAAI,gBAAuD;AAE3D,QAAI,MAAM,OAAO,UAAU;AAEzB,sBAAgB,OAAO,QAAQ,MAAM,OAAO,QAAQ,EAAE;AAAA,QACpD,CAAC,KAAK,CAAC,KAAK,KAAK,MAAM;AACrB,cAAI,UAAU,QAAW;AACvB,gBAAI,GAAG,IAAI,OAAO,KAAK;AAAA,UACzB;AACA,iBAAO;AAAA,QACT;AAAA,QACA,CAAC;AAAA,MACH;AAAA,IACF;AAGA,QAAI,cAAc,WAAW,WAAW;AACtC,UAAI,CAAC,eAAe;AAClB,wBAAgB,CAAC;AAAA,MACnB;AAEA,UAAI,CAAC,cAAc,MAAM;AACvB,sBAAc,OAAO;AAAA,MACvB;AAAA,IACF;AAIA,UAAM,mBACJ,mBAAmB,MAAM,UACzB,MAAM,QAAQ,MAAM,OAAO,aAAa,KACxC,MAAM,OAAO,cAAc,SAAS;AACtC,UAAM,uBACJ,uBAAuB,MAAM,UAC7B,OAAO,MAAM,OAAO,sBAAsB,YAC1C,MAAM,OAAO,kBAAkB,SAAS;AAC1C,QAAI,oBAAoB,sBAAsB;AAC5C,YAAM,IAAI;AAAA,QACR,SAAS,MAAM,IAAI;AAAA,MACrB;AAAA,IACF;AACA,UAAM,UACJ,wBAAwB,uBAAuB,MAAM,SAClD,MAAM,OAAO,qBAAqB,KACnC,mBAAmB,MAAM,SAAU,MAAM,OAAO,iBAAiB,CAAC,IAClE,CAAC;AAEL,WAAO,EAAE,IAAI;AAAA,MACX,MAAM,MAAM;AAAA,MACZ,SAAS,MAAM;AAAA,MACf;AAAA,MACA,aACE,iBAAiB,MAAM,SAAS,MAAM,OAAO,cAAc;AAAA,MAC7D,oBACE,wBAAwB,MAAM,SAC5B,MAAM,OAAO,qBACb;AAAA,MACJ,sBACE,0BAA0B,MAAM,SAC9B,MAAM,OAAO,uBACb;AAAA,MACJ;AAAA,MACA,SAAS,MAAM,OAAO;AAAA,MACtB;AAAA,MACA,WAAW,MAAM,OAAO;AAAA;AAAA,MAExB,eACE,iBAAiB,OAAO,KAAK,aAAa,EAAE,SAAS,IACnD,gBACA;AAAA,MACJ,SACE,MAAM,OAAO,SAAS,IAAI,CAAC,OAAO;AAAA,QAChC,GAAG;AAAA,QACH,aAAa,EAAE,gBAAgB,SAAY,IAAI,EAAE;AAAA,QACjD,WAAW,EAAE,cAAc,SAAY,CAAC,IAAI,EAAE;AAAA,MAChD,EAAE,KAAK,CAAC;AAAA,MACV,KAAK,MAAM,OAAO;AAAA,MAClB,UAAU,MAAM,OAAO;AAAA,MACvB,SAAS,MAAM,OAAO;AAAA,IACxB;AAAA,EACF,CAAC;AAED,WAAS,QAAQ,QAAQ,CAAC,WAAW;AAEnC,QAAI,WAAW,OAAO;AACtB,QAAI,CAAC,YAAY,OAAO,UAAW,OAAe,gBAAgB;AAChE,iBAAY,OAAe,eAAe;AAAA,IAC5C;AACA,UAAM,wBAAkC,CAAC;AACzC,UAAM,YAAwB,CAAC;AAE/B,WAAO,iBAAiB,QAAQ,CAAC,YAAY,oBAAoB;AAC/D,iBAAW,QAAQ,CAAC,CAAC,aAAa,GAAG,MAAM,MAAM;AAC/C,8BAAsB,KAAK;AAAA,UACzB,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS,OAAO;AAAA,UAChB,UAAU,OAAO;AAAA,UACjB,YAAY,OAAO;AAAA,QACrB,CAAC;AAAA,MACH,CAAC;AAAA,IACH,CAAC;AAED,WAAO,WAAW,QAAQ,CAAC,aAAa;AACtC,gBAAU,KAAK;AAAA,QACb,SAAS,SAAS,OAAO;AAAA,QACzB,YAAY,SAAS,OAAO;AAAA,MAC9B,CAAC;AAAA,IACH,CAAC;AAED,WAAO,OAAO,IAAI,IAAI;AAAA,MACpB,MAAM,OAAO;AAAA,MACb,SAAS,OAAO;AAAA,MAChB,aAAa,OAAO,OAAO,aAAa;AAAA,MACxC,oBAAoB,OAAO,OAAO,aAAa,OAAO;AAAA,MACtD,iBAAiB,OAAO,OAAO,mBAAmB;AAAA,MAClD,gBAAgB,OAAO,OAAO,eAAe;AAAA,MAC7C,SAAS,OAAO,OAAO;AAAA,MACvB;AAAA,MACA,mBAAmB,OAAO,6BAA6B;AAAA,MACvD;AAAA,MACA;AAAA,MACA,WAAW,OAAO,OAAO;AAAA,MACzB,cAAc,OAAO,OAAO;AAAA,IAC9B;AAAA,EACF,CAAC;AAED,WAAS,WAAW,QAAQ,CAAC,QAAQ;AAEnC,QAAI,WAAW,IAAI;AACnB,QAAI,CAAC,YAAY,IAAI,UAAW,IAAY,gBAAgB;AAC1D,iBAAY,IAAY,eAAe;AAAA,IACzC;AACA,eAAW,IAAI,IAAI,IAAI;AAAA,MACrB,MAAM,IAAI;AAAA,MACV,SAAS,IAAI;AAAA,MACb,SAAS,IAAI,OAAO;AAAA,MACpB,MAAM,IAAI,OAAO;AAAA,MACjB,SAAS;AAAA,QACP,MAAM;AAAA,QACN,MAAM,IAAI,OAAO,YAAY;AAAA,MAC/B;AAAA,MACA,iBAAiB,IAAI,OAAO,iBAAiB;AAAA,MAC7C;AAAA,MACA,QAAQ,IAAI;AAAA,MACZ,kBAAkB,IAAI;AAAA,IACxB;AAAA,EACF,CAAC;AAED,WAAS,KAAK,QAAQ,CAAC,KAAK,QAAQ;AAClC,UAAM,UACJ,IAAI,OAAO,UAAU,GAAG,IAAI,IAAI,IAAI,IAAI,OAAO,OAAO,KAAK,IAAI;AACjE,SAAK,OAAO,IAAI;AAAA,MACd,MAAM,IAAI;AAAA,MACV,aAAa,IAAI;AAAA,MACjB,gBAAgB,IAAI;AAAA,MACpB,SAAS,IAAI,OAAO;AAAA,MACpB,MAAM,IAAI,OAAO;AAAA,MACjB,UAAU,IAAI;AAAA,IAChB;AAAA,EACF,CAAC;AAED,WAAS,aAAa,QAAQ,CAAC,gBAAgB;AAC7C,iBAAa,YAAY,IAAI,IAAI;AAAA,MAC/B,MAAM,YAAY;AAAA,MAClB,OAAO,YAAY;AAAA,MACnB,UAAU,YAAY;AAAA,MACtB,YAAY,YAAY;AAAA,MACxB,YAAY,YAAY;AAAA,MACxB,cAAc,YAAY;AAAA,MAE1B,eAAe,YAAY,cAAc,IAAI,CAAC,MAAM;AAClD,YAAI,EAAE,SAAS,aAAa;AAC1B,gBAAM,QAAQ;AACd,gBAAM,KACJ,MAAM,OAAO,UACX,GAAG,MAAM,IAAI,IAAI,MAAM,OAAO,OAAO,KACrC,MAAM;AACV,iBAAO;AAAA,YACL;AAAA,YACA,MAAM;AAAA,UACR;AAAA,QACF,WAAW,EAAE,SAAS,eAAe;AACnC,gBAAM,WAAW;AACjB,iBAAO;AAAA,YACL,IAAI,SAAS;AAAA,YACb,MAAM;AAAA,UACR;AAAA,QACF,OAAO;AACL,gBAAM,IAAI,MAAM,yCAAyC,CAAC,EAAE;AAAA,QAC9D;AAAA,MACF,CAAC;AAAA,MACD,cAAc,YAAY,aAAa,IAAI,CAAC,MAAM;AAChD,YAAI,EAAE,SAAS,aAAa;AAC1B,gBAAM,QAAQ;AACd,gBAAM,KACJ,MAAM,OAAO,UACX,GAAG,MAAM,IAAI,IAAI,MAAM,OAAO,OAAO,KACrC,MAAM;AACV,iBAAO;AAAA,YACL;AAAA,YACA,MAAM;AAAA,UACR;AAAA,QACF,WAAW,EAAE,SAAS,eAAe;AACnC,gBAAM,WAAW;AACjB,iBAAO;AAAA,YACL,IAAI,SAAS;AAAA,YACb,MAAM;AAAA,UACR;AAAA,QACF,OAAO;AACL,gBAAM,IAAI,MAAM,yCAAyC,CAAC,EAAE;AAAA,QAC9D;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF,CAAC;AAED,WAAS,UAAU,QAAQ,CAAC,aAAa;AACvC,cAAU,SAAS,IAAI,IAAI;AAAA,MACzB,MAAM,SAAS;AAAA,MACf,SAAS,SAAS,OAAO;AAAA,MACzB,SAAS,SAAS,OAAO;AAAA,MACzB,UAAU,SAAS,OAAO;AAAA,IAC5B;AAAA,EACF,CAAC;AAED,WAAS,QAAQ,QAAQ,CAAC,WAAW;AACnC,YAAQ,OAAO,IAAI,IAAI;AAAA,MACrB,MAAM,OAAO;AAAA,MACb,WAAW,OAAO,OAAO,aAAa;AAAA,MACtC,UAAU,OAAO,OAAO;AAAA,IAC1B;AAAA,EACF,CAAC;AAED,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAQO,IAAM,mBAAmB,MAC7B,WAAmB;AAGtB,IAAI,iBAAiB,MAAM,QAAW;AACpC,EAAC,WAAmB,iBAAiB;AACvC;AAWO,IAAM,oBAAoB,YAAY;AAC3C,YAAU;AAEV,UAAQ;AAAA,IACN;AAAA,IACA,KAAK,UAAU,WAAW,iBAAiB,CAAC,CAAC;AAAA,IAC7C;AAAA,EACF;AACF;AAEA,IAAM,YAAY,MAAM;AAEtB,QAAM,WAAW,iBAAiB;AAClC,WAAS,OAAO,MAAM;AACtB,WAAS,QAAQ,MAAM;AACvB,WAAS,WAAW,MAAM;AAC1B,WAAS,KAAK,MAAM;AACpB,WAAS,aAAa,MAAM;AAC5B,WAAS,UAAU,MAAM;AACzB,WAAS,QAAQ,MAAM;AAGvB,QAAM,SAAS,GAAGC,SAAQ,IAAI,CAAC,IAAI,aAAa,CAAC;AACjD,SAAO,KAAK,UAAQ,KAAK,EAAE,QAAQ,CAAC,QAAQ;AAC1C,QAAI,IAAI,WAAW,MAAM,GAAG;AAC1B,aAAO,UAAQ,MAAM,GAAG;AAAA,IAC1B;AAAA,EACF,CAAC;AAED,MAAI;AACF,cAAQ,GAAGA,SAAQ,IAAI,CAAC,IAAI,aAAa,CAAC,WAAW;AAAA,EACvD,SAAS,OAAO;AACd,QAAI;AACJ,UAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,QAAI,QAAQ,SAAS,iBAAiB,KAAK,QAAQ,SAAS,WAAW,GAAG;AACxE,aACE;AAAA,IAEJ;AAEA,UAAM,WAAW,GAAG,QAAQ,EAAE,GAAG,OAAO;AACxC,UAAM,QAAQ,iBAAiB,QAAQ,QAAQ;AAC/C,UAAM,IAAI,MAAM,UAAU,EAAE,MAAM,CAAC;AAAA,EACrC;AACF;AAUO,IAAM,wBAAwB,YAAY;AAC/C,YAAU;AAEV,QAAM,WAAW,iBAAiB;AAClC,QAAM,qBAAqB,oBAAI,IAO7B;AAEF,WAAS,QAAQ,QAAQ,CAAC,WAAW;AACnC,WAAO,iBAAiB,QAAQ,CAAC,YAAY,oBAAoB;AAC/D,iBAAW,QAAQ,CAAC,CAAC,GAAG,WAAW,MAAM,MAAM;AAC7C,cAAM,uBAAuB,GAAG,OAAO,IAAI,IAAI,eAAe,GAAG,OAAO,UAAU,IAAI,OAAO,OAAO,KAAK,EAAE;AAC3G,oBAAY,0BAA0B,oBAAoB,EAAE;AAC5D,2BAAmB,IAAI,sBAAsB;AAAA,UAC3C;AAAA,UACA;AAAA,UACA,OAAO;AAAA,QACT,CAAC;AAAA,MACH,CAAC;AAAA,IACH,CAAC;AAED,WAAO,WAAW,QAAQ,CAAC,aAAa;AACtC,YAAM,sBAAsB,GAAG,OAAO,IAAI,eAAe,SAAS,OAAO,UAAU,IAAI,SAAS,OAAO,OAAO,KAAK,EAAE;AACrH,yBAAmB,IAAI,qBAAqB;AAAA,QAC1C,SAAS;AAAA,QACT,SAAS;AAAA,QACT,OAAO;AAAA,MACT,CAAC;AAAA,IACH,CAAC;AAAA,EACH,CAAC;AAED,SAAO;AACT;AASO,IAAMC,WAAU,YAAY;AACjC,YAAU;AACV,QAAM,eAAe,oBAAI,IAGvB;AAEF,QAAM,WAAW,iBAAiB;AAElC,QAAM,qBAAqB,oBAAI,IAAoB;AACnD,QAAM,2BAA2B,oBAAI,IAGnC;AAEF,WAAS,KAAK,QAAQ,CAAC,KAAK,QAAQ;AAClC,UAAM,UAAU,IAAI,WAAW;AAC/B,iBAAa,IAAI,KAAK,OAAO;AAE7B,QAAI,CAAC,IAAI,OAAO,SAAS;AAEvB,UAAI,CAAC,aAAa,IAAI,IAAI,IAAI,GAAG;AAC/B,qBAAa,IAAI,IAAI,MAAM,OAAO;AAAA,MACpC;AACA,+BAAyB,OAAO,IAAI,IAAI;AACxC,yBAAmB,OAAO,IAAI,IAAI;AAAA,IACpC,WAAW,CAAC,aAAa,IAAI,IAAI,IAAI,GAAG;AAEtC,YAAM,SAAS,mBAAmB,IAAI,IAAI,IAAI,KAAK,KAAK;AACxD,yBAAmB,IAAI,IAAI,MAAM,KAAK;AACtC,UAAI,UAAU,GAAG;AACf,iCAAyB,IAAI,IAAI,MAAM,OAAO;AAAA,MAChD,OAAO;AACL,iCAAyB,OAAO,IAAI,IAAI;AAAA,MAC1C;AAAA,IACF;AAAA,EACF,CAAC;AAGD,2BAAyB,QAAQ,CAAC,SAAS,SAAS;AAClD,QAAI,CAAC,aAAa,IAAI,IAAI,GAAG;AAC3B,mBAAa,IAAI,MAAM,OAAO;AAAA,IAChC;AAAA,EACF,CAAC;AAED,SAAO;AACT;AAgIO,IAAMC,gBAAe,YAAY;AACtC,YAAU;AAEV,QAAM,WAAW,iBAAiB;AAClC,SAAO,SAAS;AAClB;AAEA,SAAS,eACP,MACA,YAC4B;AAC5B,MAAI,KAAK,SAAS,YAAY;AAC5B,WAAO;AAAA,EACT;AAEA,MAAI,KAAK,OAAO,YAAY,QAAQ;AAClC,eAAW,aAAa,KAAK,OAAO,YAAY;AAC9C,YAAM,QAAQ,eAAe,WAAW,UAAU;AAClD,UAAI,OAAO;AACT,eAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAEO,IAAM,qBAAqB,OAChC,cACA,aAC4B;AAC5B,QAAM,YAAY,MAAMA,cAAa;AACrC,QAAM,WAAW,UAAU,IAAI,YAAY;AAC3C,MAAI,CAAC,UAAU;AACb,UAAM,IAAI,MAAM,YAAY,YAAY,YAAY;AAAA,EACtD;AAEA,QAAM,OAAO;AAAA,IACX,SAAS,OAAO;AAAA,IAChB;AAAA,EACF;AACA,MAAI,CAAC,MAAM;AACT,UAAM,IAAI,MAAM,QAAQ,QAAQ,0BAA0B,YAAY,EAAE;AAAA,EAC1E;AAEA,SAAO;AACT;AAEO,IAAMC,cAAa,YAAY;AACpC,YAAU;AACV,SAAO,iBAAiB,EAAE;AAC5B;;;AY14CA;AADA,OAAO,WAAgC;AAGvC,OAAOC,SAAQ;AACf,OAAO,UAAU;AAEjB,IAAM,UAAU,CAAC,KAAa,eAAuB,aAAuB;AAC1E,QAAM,QAAQC,IAAG,YAAY,GAAG;AAEhC,QAAM,QAAQ,CAAC,SAAS;AACtB,QAAIA,IAAG,SAAS,KAAK,KAAK,KAAK,IAAI,CAAC,EAAE,YAAY,GAAG;AACnD,iBAAW,QAAQ,KAAK,KAAK,KAAK,IAAI,GAAG,eAAe,QAAQ;AAAA,IAClE,WAAW,KAAK,SAAS,aAAa,GAAG;AACvC,eAAS,KAAK,KAAK,KAAK,KAAK,IAAI,CAAC;AAAA,IACpC;AAAA,EACF,CAAC;AAED,SAAO;AACT;AAsBA,IAAM,kBAAN,cAA8B,MAAM;AAAA,EAClC,YAAY,SAAiB;AAC3B,UAAM,OAAO;AACb,SAAK,OAAO;AAAA,EACd;AACF;AAGA,IAAMC,kBAAiB,CAAC,YAA8B;AAAA,EACpD,GAAG;AAAA,EACH,QAAQ,OAAO,SAAS,SAAS;AACnC;AAEA,IAAM,eAAe,OAAO,UAA4B,WAAmB;AACzE,aAAW,SAAS,OAAO,OAAO;AAChC,QAAI;AACF,cAAQ,IAAI,8BAA8B,KAAK,EAAE;AACjD,YAAM,SAAS,QAAQ;AAAA,QACrB;AAAA,QACA,qBAAqB;AAAA,UACnB,mBAAmB;AAAA;AAAA,QACrB;AAAA,MACF,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,4BAA4B,GAAG;AAAA,QACxC,cAAc;AAAA,MAChB,CAAC;AACD,UAAI,OAAO,KAAK,UAAU,GAAG,EAAE,SAAS,eAAe,GAAG;AACxD,cAAM,IAAI,gBAAgB,IAAI,SAAS,CAAC;AAAA,MAC1C;AAAA,IACF;AAAA,EACF;AACF;AAEA,IAAM,eAAe,OAAO,UAA4B,WAAmB;AACzE,aAAW,SAAS,OAAO,UAAU;AACnC,QAAI;AACF,cAAQ,IAAI,8BAA8B,KAAK,EAAE;AACjD,YAAM,SAAS,QAAQ;AAAA,QACrB;AAAA,QACA,qBAAqB;AAAA,UACnB,mBAAmB;AAAA;AAAA,QACrB;AAAA,MACF,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,4BAA4B,GAAG;AAAA,QACxC,cAAc;AAAA,MAChB,CAAC;AAAA,IACH;AAAA,EACF;AACF;AAEA,IAAM,cAAc,OAAO,SAA0B;AACnD,QAAM,aAAa,KAAK,UAAU,KAAK,MAAM;AAC7C,QAAM,aAAa,KAAK,UAAU,KAAK,MAAM;AAC/C;AAEO,IAAM,YAAY,OAAO,WAAyB;AACvD,QAAM,WAAW,oBAAoBA,gBAAe,OAAO,gBAAgB,CAAC;AAC5E,UAAQ,IAAI,WAAW;AAEvB,QAAM,cAAc,QAAQ,OAAO,WAAW,OAAO,CAAC,CAAC;AACvD,QAAM,kBAAkB,YAAY;AACpC,UAAQ,IAAI,SAAS,eAAe,eAAe;AAEnD,QAAM,QAA0C,MAAM,QAAQ,aAAa,CAAC;AAE5E,QAAM,MAAM,CAAC,KAAY,SAA0B;AACjD,QAAI,OAAO,KAAK,UAAU,GAAG;AAC3B,UAAI,eAAe,iBAAiB;AAClC,cAAM,KAAK,EAAE,GAAG,MAAM,SAAS,KAAK,UAAU,EAAE,CAAC;AAAA,MACnD;AAAA,IACF;AAAA,EACF,CAAC;AAED,aAAWC,SAAQ,aAAa;AAC9B,YAAQ,IAAI,oBAAoBA,KAAI,EAAE;AAEtC,QAAI;AACF,YAAM,SAAS,UAAQA,KAAI,EAAE;AAC7B,YAAM,KAAK;AAAA,QACT;AAAA,QACA;AAAA,QACA,SAAS;AAAA,MACX,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,gCAAgCA,KAAI,KAAK,GAAG;AAAA,QACrD,cAAc;AAAA,MAChB,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,CAAC,MAAM,KAAK,GAAG;AACpB,UAAM,IAAI,QAAQ,CAACC,aAAY,WAAWA,UAAS,GAAI,CAAC;AAAA,EAC1D;AACF;;;ACtHA;AAxBA,SAAS,YAAAC,iBAAgB;AACzB,SAAS,WAAAC,gBAAe;AAoBxB,SAAS,UAAAC,eAAc;AACvB,YAAYC,cAAa;AACzB,YAAYC,WAAU;AArBtB,IAAM,EAAE,OAAAC,OAAM,IAAIC;AAuClB,IAAM,WAAmB,aAAI;AAC7B,IAAM,0BAA0B;AAChC,IAAM,mCAAmC;AACzC,IAAM,uBAAuB;AAC7B,IAAM,2BAA2B;AACjC,IAAM,8BAA8B;AACpC,IAAM,oCAAoC;AAE1C,IAAM,0BAA0B;AAoEhC,IAAM,4BACI,aAAI,4BACV,SAAiB,aAAI,2BAA2B,EAAE,IAClD;AAKG,IAAM,aAAyC,CAAC,QAAQ;AAC7D,QAAM,MAAW,cAAQ;AAAA,IACvB,MAAM,SAAiB,aAAI,yBAAyB,QAAQ,EAAE;AAAA,IAC9D,QAAQ;AAAA,IACR,MAAM;AAAA,EACR,CAAC;AAED,MAAI,GAAG,SAAS,CAAC,QAAe;AAC9B,YAAQ;AAAA,MACN,SAAS,IAAI,IAAI;AAAA,MACjB,IAAI;AAAA,IACN;AAAA,EACF,CAAC;AAED,MAAI,MAAM,KAAK,UAAU,EAAE,GAAG,IAAI,CAAC,CAAC;AACpC,MAAI,IAAI;AACV;AAKA,IAAM,gBAAgB,OACpBC,SACA,aACkB;AAClB,MAAI;AACF,IAAAA,QAAO,IAAI,wBAAwB;AACnC,UAAM,SAAS,QAAQ;AACvB,IAAAA,QAAO,IAAI,wBAAwB;AAAA,EACrC,SAAS,OAAO;AACd,IAAAA,QAAO,MAAM,6BAA6B;AAC1C,QAAI,iBAAiB,OAAO;AAC1B,eAASA,SAAQ,KAAK;AAAA,IACxB;AACA,UAAM;AAAA,EACR;AACF;AAaA,IAAM,eAAe,OACnBA,SACA,aACkB;AAClB,QAAM,SAAS,WAAW;AAC1B,EAAAA,QAAO,IAAI,8BAA8B;AAC3C;AAcA,IAAM,eAAe,OACnBA,SACA,UACA,gBACkB;AAClB,MAAI;AAEF,IAAAA,QAAO,IAAI,qBAAqB;AAGhC,UAAM,mBAAmB,MAAM;AAAA,MAC7B,EAAE,QAAQ,YAAY,WAAW;AAAA,MACjC,CAAC,GAAG,MAAM;AAAA,IACZ;AAEA,UAAM,SAAS,MAAM;AAAA,MACnB;AAAA,QACE,OAAO,YAAY;AAAA,QACnB,YAAY;AAAA,MACd;AAAA,IACF,CAAC;AAED,IAAAA,QAAO,IAAI,2BAA2B;AACtC,UAAM,SAAS,WAAW;AAC1B,IAAAA,QAAO,IAAI,8BAA8B;AAAA,EAC3C,SAAS,OAAO;AACd,IAAAA,QAAO,MAAM,mCAAmC,KAAK,EAAE;AAEvD,QAAI;AACF,YAAM,SAAS,WAAW;AAC1B,MAAAA,QAAO,IAAI,mCAAmC;AAAA,IAChD,SAAS,iBAAiB;AACxB,MAAAA,QAAO,MAAM,kCAAkC,eAAe,EAAE;AAAA,IAClE;AAAA,EACF;AACF;AAqBA,IAAM,gBAAgB,OACpBA,SAGA,iCACA,SACA,UACA,mBACmD;AACnD,MAAI,QAAQ,UAAU,UAAa,QAAQ,UAAU,MAAM;AACzD,IAAAA,QAAO,IAAI,6CAA6C;AACxD,WAAO;AAAA,EACT;AAEA,MAAI;AAEF,QAAI,gBAAgB,QAAQ;AAC5B,QACE,iBACA,cAAc,UAAU,KACxB,cAAc,CAAC,MAAM,GACrB;AACA,sBAAgB,cAAc,SAAS,CAAC;AAAA,IAC1C;AAEA,UAAM,aAAa,KAAK,MAAM,cAAc,SAAS,CAAC;AACtD,qBAAiB,YAAY,cAAc;AAC3C,UAAM,kBAAkB,MAAM,QAAQ;AAAA,MACpC,gCAAgC,IAAI,OAAO,CAAC,IAAI,MAAM,MAAM;AAC1D,YAAI;AACF,iBAAO,MAAM,GAAG,UAAU;AAAA,QAC5B,SAAS,GAAG;AAEV,gBAAM,kBAAkB,OAAO;AAE/B,cAAI,iBAAiB;AAEnB,kBAAM,mBAAmB;AAAA,cACvB,gBAAgB;AAAA,gBACd,GAAG;AAAA;AAAA,gBAEH,mBAAmB,QAAQ;AAAA,gBAC3B,gBAAgB,QAAQ;AAAA,gBACxB,mBAAmB,QAAQ;AAAA,cAC7B;AAAA,cACA,cAAc,aAAa,QAAQ,EAAE,UAAU,OAAO,CAAC;AAAA,cACvD,WAAW,aAAa,QAAQ,EAAE,YAAY,OAAO;AAAA,cACrD,UAAU,oBAAI,KAAK;AAAA,cACnB,QAAQ;AAAA,YACV;AAEA,mBAAO;AAAA,cACL,QAAQ;AAAA,cACR,SAAS,0BAA0B,gBAAgB,IAAI,KAAK,aAAa,QAAQ,EAAE,UAAU,OAAO,CAAC,CAAC;AAAA,cACtG,cAAc;AAAA,YAChB,CAAC;AAED,gBAAI;AACF,oBAAM,SAAS,KAAK;AAAA,gBAClB,OAAO,gBAAgB;AAAA,gBACvB,UAAU,CAAC,EAAE,OAAO,KAAK,UAAU,gBAAgB,EAAE,CAAC;AAAA,cACxD,CAAC;AAAA,YACH,SAAS,UAAU;AACjB,cAAAA,QAAO,MAAM,wCAAwC,QAAQ,EAAE;AAAA,YACjE;AAAA,UACF,OAAO;AAEL,mBAAO;AAAA,cACL,QAAQ;AAAA,cACR,SAAS,iDAAiD,aAAa,QAAQ,EAAE,UAAU,OAAO,CAAC,CAAC;AAAA,cACpG,cAAc;AAAA,YAChB,CAAC;AAAA,UACH;AAGA,gBAAM;AAAA,QACR;AAAA,MACF,CAAC;AAAA,IACH;AAEA,WAAO,gBACJ,IAAI,CAAC,oBAAoB,MAAM;AAC9B,YAAM,CAAC,GAAG,MAAM,IAAI,gCAAgC,CAAC;AACrD,UAAI,oBAAoB;AACtB,YAAI,MAAM,QAAQ,kBAAkB,GAAG;AAMrC,iBAAO,mBACJ,KAAK,EACL,OAAO,CAAC,SAAS,SAAS,UAAa,SAAS,IAAI,EACpD,IAAI,CAAC,UAAU;AAAA,YACd,OAAO,KAAK,UAAU,IAAI;AAAA,YAC1B,eAAe;AAAA,YACf,iBAAiB;AAAA,YACjB,KAAK,OAAO,mBAAmB;AAAA,UACjC,EAAE;AAAA,QACN,OAAO;AACL,iBAAO;AAAA,YACL;AAAA,cACE,OAAO,KAAK,UAAU,kBAAkB;AAAA,cACxC,eAAe;AAAA,cACf,iBAAiB;AAAA,cACjB,KAAK,OAAO,mBAAmB;AAAA,YACjC;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAC,EACA,KAAK,EACL,OAAO,CAAC,SAAS,SAAS,UAAa,SAAS,IAAI;AAAA,EACzD,SAAS,GAAG;AAEV,IAAAA,QAAO,MAAM,0BAA0B;AACvC,QAAI,aAAa,OAAO;AACtB,eAASA,SAAQ,CAAC;AAAA,IACpB;AAAA,EACF;AAEA,SAAO;AACT;AAWA,IAAM,6BAA6B,OACjCA,SACA,UACA,UACA,UACqB;AACrB,MAAI,uBAAuB;AAC3B,MAAI,qBAAqB;AACzB,MAAI,YAAY;AAEhB,aAAW,OAAO,UAAU;AAC1B,QAAI,IAAI,OAAO,IAAI,eAAe;AAChC,YAAM,mBAAmB;AAAA,QACvB,gBAAgB;AAAA,UACd,GAAG,IAAI;AAAA;AAAA,UAEP,mBAAmB,IAAI,gBAAgB;AAAA,UACvC,gBAAgB,IAAI,gBAAgB;AAAA,UACpC,mBAAmB,IAAI,gBAAgB;AAAA,QACzC;AAAA,QACA,cAAc,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,QACnE,WAAW,iBAAiB,QAAQ,MAAM,YAAY,OAAO;AAAA,QAC7D,UAAU,oBAAI,KAAK;AAAA,QACnB,QAAQ;AAAA,MACV;AAEA,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,iCAAiC,IAAI,IAAI,IAAI,KAAK,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC;AAAA,QACjH,cAAc;AAAA,MAChB,CAAC;AAED,UAAI;AACF,cAAM,SAAS,KAAK;AAAA,UAClB,OAAO,IAAI,IAAI;AAAA,UACf,UAAU,CAAC,EAAE,OAAO,KAAK,UAAU,gBAAgB,EAAE,CAAC;AAAA,QACxD,CAAC;AACD,QAAAA,QAAO,IAAI,8BAA8B,IAAI,IAAI,IAAI,EAAE;AACvD;AAAA,MACF,SAAS,UAAU;AACjB,QAAAA,QAAO,MAAM,0BAA0B,QAAQ,EAAE;AACjD;AAAA,MACF;AAAA,IACF,WAAW,CAAC,IAAI,KAAK;AACnB;AACA,MAAAA,QAAO,KAAK,mDAAmD;AAAA,IACjE,OAAO;AACL;AACA,MAAAA,QAAO,KAAK,0DAA0D;AAAA,IACxE;AAAA,EACF;AAGA,QAAM,qBACJ,yBAAyB,SAAS,UAClC,uBAAuB,KACvB,cAAc;AAEhB,MAAI,oBAAoB;AACtB,IAAAA,QAAO;AAAA,MACL,OAAO,oBAAoB;AAAA,IAC7B;AAAA,EACF,WAAW,uBAAuB,GAAG;AAEnC,IAAAA,QAAO;AAAA,MACL,wBAAwB,oBAAoB,IAAI,SAAS,MAAM;AAAA,IACjE;AACA,QAAI,qBAAqB,GAAG;AAC1B,MAAAA,QAAO;AAAA,QACL,gCAAgC,kBAAkB;AAAA,MACpD;AAAA,IACF;AACA,QAAI,YAAY,GAAG;AACjB,MAAAA,QAAO,MAAM,GAAG,SAAS,mCAAmC;AAAA,IAC9D;AAAA,EACF;AAEA,SAAO;AACT;AAgBA,IAAM,eAAe,OACnBA,SACA,SACA,aACA,UACA,aACkB;AAClB,MAAI,SAAS,WAAW,EAAG;AAE3B,MAAI;AAEF,UAAM,SAAS,KAAK;AAAA,MAClB,OAAO,YAAY;AAAA,MACnB;AAAA,IACF,CAAC;AAID,eAAW,OAAO,UAAU;AAC1B,cAAQ,SAASC,QAAO,WAAW,IAAI,OAAO,MAAM;AAAA,IACtD;AACA,YAAQ,aAAa,SAAS;AAE9B,IAAAD,QAAO,IAAI,QAAQ,SAAS,MAAM,gBAAgB,YAAY,IAAI,EAAE;AAAA,EACtE,SAAS,GAAG;AAEV,IAAAA,QAAO,MAAM,iCAAiC;AAC9C,QAAI,aAAa,OAAO;AACtB,eAASA,SAAQ,CAAC;AAAA,IACpB;AAIA,UAAM,kBAAkB,MAAM;AAAA,MAC5BA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,QAAI,CAAC,iBAAiB;AACpB,YAAM;AAAA,IACR;AAAA,EACF;AACF;AAcA,IAAM,qBAAqB,CAACA,SAAgB,YAAqB;AAC/D,MAAI,QAAQ,WAAW,KAAK,QAAQ,YAAY,KAAK,QAAQ,QAAQ,GAAG;AACtE,eAAW;AAAA,MACT,UAAU,QAAQ;AAAA,MAClB,WAAW,QAAQ;AAAA,MACnB,eAAeA,QAAO;AAAA,MACtB,OAAO,QAAQ;AAAA,MACf,WAAW,oBAAI,KAAK;AAAA,IACtB,CAAC;AAAA,EACH;AACA,UAAQ,WAAW;AACnB,UAAQ,QAAQ;AAChB,UAAQ,YAAY;AACpB,aAAW,MAAM,mBAAmBA,SAAQ,OAAO,GAAG,GAAI;AAC5D;AAcA,SAAS,sBAAsB,kBAA0B;AACvD,MAAI;AACJ,MAAI;AACF,8BAA0B,UACxB,iBAAiB,UAAU,GAAG,iBAAiB,SAAS,CAAC,CAC3D;AAAA,EACF,SAAS,GAAG;AACV,WAAO,EAAE,QAAQ,YAAY,SAAS,GAAG,CAAC,IAAI,cAAc,QAAQ,CAAC;AACrE,UAAM;AAAA,EACR;AACA,SAAO,wBAAwB;AACjC;AAEA,eAAe,wBACb,aACA,aAIC;AACD,QAAM,qBAAqB,MAAM,sBAAsB;AACvD,QAAM,uBAAuB,GAAG,sBAAsB,WAAW,CAAC,IAAI,cAAc,sBAAsB,WAAW,IAAI,aAAa;AAEtI,QAAM,kBAAkB,MAAM,KAAK,mBAAmB,QAAQ,CAAC,EAAE;AAAA,IAC/D,CAAC,CAAC,GAAG,MAAM,IAAI,WAAW,oBAAoB;AAAA,EAChD;AAEA,MAAI,gBAAgB,WAAW,GAAG;AAChC,UAAM,UAAU,0BAA0B,oBAAoB;AAC9D,WAAO;AAAA,MACL,QAAQ;AAAA,MACR,SAAS,GAAG,OAAO;AAAA,MACnB,cAAc;AAAA,IAChB,CAAC;AACD,UAAM,IAAI,MAAM,OAAO;AAAA,EACzB;AAIA,QAAM,YAAY,gBAAgB,IAAI,CAAC,CAAC,GAAG,CAAC,IAAI,MAAM,CAAC,MAAM;AAAA,IAC3D;AAAA,IACA;AAAA,EACF,CAAC;AACD,QAAM,CAAC,MAAM,UAAU,IAAI,gBAAgB,CAAC;AAC5C,QAAM,gBAAgB,WAAW,CAAC;AAGlC,QAAM,iBAAiB,+BAA+B,aAAa;AAEnE,SAAO,EAAE,WAAW,eAAe;AACrC;AAsBA,IAAM,gBAAgB,OACpB,MACAA,SACA,SACA,cACA,UACA,UACA,oBACkB;AAElB,sBAAoB,KAAK,WAAW;AACpC,MAAI,KAAK,aAAa;AACpB,wBAAoB,KAAK,WAAW;AAAA,EACtC;AAEA,MAAI;AACF,IAAAA,QAAO,IAAI,wBAAwB;AACnC,UAAM,SAAS,QAAQ;AACvB,IAAAA,QAAO,IAAI,iCAAiC;AAAA,EAC9C,SAAS,OAAO;AACd,IAAAA,QAAO,MAAM,6BAA6B;AAC1C,QAAI,iBAAiB,OAAO;AAC1B,eAASA,SAAQ,KAAK;AAAA,IACxB;AACA,UAAM;AAAA,EACR;AAEA,EAAAA,QAAO;AAAA,IACL,4BAA4B,eAAe,wBAAwB,KAAK,YAAY,IAAI,sBAAsB,KAAK,aAAa,QAAQ,MAAM;AAAA,EAChJ;AAKA,MAAI;AAIJ,MAAI;AAEJ,MAAI,KAAK,QAAQ;AACf,UAAM,SAAS,MAAM;AAAA,MACnB,KAAK;AAAA,MACL,KAAK;AAAA,IACP;AACA,yBAAqB,OAAO;AAC5B,qBAAiB,OAAO;AAAA,EAC1B,OAAO;AACL,yBAAqB,CAAC,CAAC,sBAAsB,KAAK,gBAAgB,GAAG,CAAC,CAAC,CAAC;AACxE,qBAAiB;AAAA,EACnB;AAEA,QAAM,SAAS,UAAU;AAAA,IACvB,QAAQ,CAAC,KAAK,YAAY,IAAI;AAAA;AAAA,EAChC,CAAC;AAED,QAAM,SAAS,IAAI;AAAA,IACjB,sBAAsB;AAAA;AAAA,IAEtB,gCAAgC;AAAA;AAAA,IAChC,WAAW,OAAO,EAAE,OAAO,WAAW,WAAW,QAAQ,MAAM;AAC7D,UAAI,CAAC,UAAU,KAAK,QAAQ,GAAG;AAC7B;AAAA,MACF;AAEA,cAAQ,YAAY,MAAM,SAAS;AAEnC,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,SAAS,GAAGA,QAAO,SAAS,IAAI,MAAM,SAAS,MAAM;AAAA,MACvD,CAAC;AACD,MAAAA,QAAO,IAAI,YAAY,MAAM,SAAS,MAAM,aAAa;AAEzD,UAAI,QAAQ;AACZ,YAAM,iBAAiBE,UAAS,KAAK,MAAM,QAAQ;AAEnD,YAAM,oBACJ,MAAM,eACH;AAAA,QACC,OAAO,YAAY;AACjB;AACA,cACG,MAAM,SAAS,SAAS,qCACvB,QAAQ,qCACV,QAAQ,MAAM,MAAM,SAAS,QAC7B;AACA,kBAAM,UAAU;AAAA,UAClB;AACA,iBAAO;AAAA,YACLF;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,QACf;AAAA,MACF,EACC,QAAQ;AAEb,YAAM,mBAAmB,kBACtB,KAAK,EACL,OAAO,CAAC,QAAQ,QAAQ,UAAa,IAAI,UAAU,MAAS;AAE/D,UAAI,KAAK,gBAAgB,UAAa,kBAAkB,WAAW,GAAG;AACpE;AAAA,MACF;AAEA,YAAM,UAAU;AAEhB,UAAI,iBAAiB,SAAS,GAAG;AAE/B,cAAM;AAAA,UACJA;AAAA,UACA;AAAA,UACA,KAAK;AAAA,UACL;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF,CAAC;AAED,EAAAA,QAAO,IAAI,wBAAwB;AACrC;AAaA,IAAM,cAAc,CAAC,MAA6B,aAA6B;AAC7E,QAAM,cACJ,KAAK,aAAa,OAAO,OAAO,KAAK,YAAY,IAAI,KAAK;AAC5D,QAAM,YAAY,GAAG,KAAK,YAAY,IAAI,GAAG,WAAW,YAAY,QAAQ;AAC5E,SAAO;AAAA,IACL;AAAA,IACA,KAAK,CAAC,YAA0B;AAC9B,cAAQ,IAAI,GAAG,SAAS,KAAK,OAAO,EAAE;AAAA,IACxC;AAAA,IACA,OAAO,CAAC,YAA0B;AAChC,cAAQ,MAAM,GAAG,SAAS,KAAK,OAAO,EAAE;AAAA,IAC1C;AAAA,IACA,MAAM,CAAC,YAA0B;AAC/B,cAAQ,KAAK,GAAG,SAAS,KAAK,OAAO,EAAE;AAAA,IACzC;AAAA,EACF;AACF;AAMO,SAAS,oBAAoB,SAAyB;AAC3D,SAAO,IAAI,QAAQ,QAAQ,OAAO,GAAG,CAAC;AACxC;AAMO,SAAS,sBAAsB,QAA6B;AACjE,MAAI,OAAO,OAAO;AAGlB,MAAI,OAAO,SAAS;AAClB,UAAM,gBAAgB,oBAAoB,OAAO,OAAO;AACxD,QAAI,KAAK,SAAS,aAAa,GAAG;AAChC,aAAO,KAAK,MAAM,GAAG,CAAC,cAAc,MAAM;AAAA,IAC5C,OAAO;AACL,YAAM,IAAI;AAAA,QACR,kBAAkB,aAAa,4BAA4B,IAAI;AAAA,MACjE;AAAA,IACF;AAAA,EACF;AAGA,MAAI,OAAO,aAAa,OAAO,cAAc,IAAI;AAC/C,UAAM,SAAS,GAAG,OAAO,SAAS;AAClC,QAAI,KAAK,WAAW,MAAM,GAAG;AAC3B,aAAO,KAAK,MAAM,OAAO,MAAM;AAAA,IACjC,OAAO;AACL,YAAM,IAAI;AAAA,QACR,oBAAoB,MAAM,4BAA4B,IAAI;AAAA,MAC5D;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAKO,SAAS,oBAAoB,QAA2B;AAC7D,MAAI,OAAO,aAAa,CAAC,OAAO,KAAK,WAAW,GAAG,OAAO,SAAS,GAAG,GAAG;AACvE,UAAM,IAAI;AAAA,MACR,cAAc,OAAO,IAAI,8BAA8B,OAAO,SAAS;AAAA,IACzE;AAAA,EACF;AAEA,MAAI,OAAO,SAAS;AAClB,UAAM,gBAAgB,oBAAoB,OAAO,OAAO;AACxD,QAAI,CAAC,OAAO,KAAK,SAAS,aAAa,GAAG;AACxC,YAAM,IAAI;AAAA,QACR,cAAc,OAAO,IAAI,0BAA0B,OAAO,OAAO;AAAA,MACnE;AAAA,IACF;AAAA,EACF;AACF;AAiCO,IAAM,wBAAwB,OACnC,SACkB;AAElB,sBAAoB,KAAK,WAAW;AACpC,MAAI,KAAK,aAAa;AACpB,wBAAoB,KAAK,WAAW;AAAA,EACtC;AAKA,QAAM,kBAAkB,QAAQ,KAAK,YAAY,IAAI,IAAI,KAAK,aAAa,QAAQ,EAAE;AAErF,QAAMG,WAAU,IAAI,QAAQ;AAAA,IAC1B,kBAAkB;AAAA,IAClB,gBAAgB,KAAK;AAAA,IACrB,aAAa,OAAO,QAAQ,gBAAgB;AAC1C,YAAMH,UAAS,YAAY,MAAM,OAAO,EAAE;AAE1C,YAAM,UAAU;AAAA,QACd,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAEA,iBAAW,MAAM,mBAAmBA,SAAQ,OAAO,GAAG,GAAI;AAE1D,YAAM,iBAAiB,WAAW,GAAG,QAAQ,MAAM;AACnD,YAAM,YAAY,GAAG,cAAc,GAAG,eAAe,OAAO,OAAO,EAAE;AAErE,YAAM,QAAQ,MAAM;AAAA,QAClB;AAAA,UACE,UAAU;AAAA,UACV,QAAQ,KAAK;AAAA,UACb,kBAAkB,KAAK;AAAA,UACvB,cAAc,KAAK;AAAA,UACnB,cAAc,KAAK;AAAA,UACnB,eAAe,KAAK;AAAA,QACtB;AAAA,QACAA;AAAA,MACF;AAGA,YAAM,WAAqB,MAAM,SAAS;AAAA,QACxC,SAAS;AAAA,UACP,SAAS;AAAA,UACT,gBAAgB;AAAA,UAChB,mBAAmB;AAAA,UACnB,OAAO;AAAA,YACL,SAAS;AAAA,UACX;AAAA,UACA,YAAY;AAAA,UACZ,oBAAoB;AAAA,UACpB,eAAe;AAAA,QACjB;AAAA,QACA,8BAA8B;AAAA,MAChC,CAAC;AAGD,YAAM,kBACJ,KAAK,aAAa,qBAAqB,OAAO;AAEhD,YAAM,WAAqB,MAAM;AAAA,QAC/B,qBAAqB,eAAe;AAAA,MACtC;AAEA,UAAI;AACF,QAAAA,QAAO,IAAI,sBAAsB;AACjC,cAAM,cAAcA,SAAQ,QAAQ;AAEpC,YAAI;AACF,UAAAA,QAAO,IAAI,sBAAsB;AACjC,gBAAM;AAAA,YACJ;AAAA,YACAA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,SAAS,GAAG;AACV,UAAAA,QAAO,MAAM,kCAAkC;AAC/C,cAAI,aAAa,OAAO;AACtB,qBAASA,SAAQ,CAAC;AAAA,UACpB;AAEA,gBAAM;AAAA,QACR;AAAA,MACF,SAAS,GAAG;AACV,QAAAA,QAAO,MAAM,kCAAkC;AAC/C,YAAI,aAAa,OAAO;AACtB,mBAASA,SAAQ,CAAC;AAAA,QACpB;AAEA,cAAM;AAAA,MACR;AAEA,aAAO,CAACA,SAAQ,UAAU,QAAQ;AAAA,IACpC;AAAA,IACA,YAAY,OAAO,CAACA,SAAQ,UAAU,QAAQ,MAAM;AAClD,MAAAA,QAAO,IAAI,+CAA+C;AAG1D,MAAAA,QAAO,IAAI,4BAA4B;AACvC,YAAM,aAAaA,SAAQ,UAAU,KAAK,WAAW;AAGrD,MAAAA,QAAO,IAAI,+CAA+C;AAC1D,YAAM,IAAI,QAAQ,CAACI,aAAY,WAAWA,UAAS,GAAI,CAAC;AAGxD,MAAAJ,QAAO,IAAI,sBAAsB;AACjC,YAAM,aAAaA,SAAQ,QAAQ;AAEnC,MAAAA,QAAO,IAAI,6BAA6B;AAAA,IAC1C;AAAA,EACF,CAAC;AAED,EAAAG,SAAQ,MAAM;AAChB;;;AC7+BA,eAAsB,oBAAoB,aAAqB;AAC7D,QAAM,eAAe,UAAQ,WAAW;AACxC,UAAQ,IAAI,KAAK,UAAU,YAAY,CAAC;AAC1C;;;ACHA,OAAOE,cAAa;AAKpB,SAASC,gBAAuB;AAC9B,SAAOC,SAAQ,IAAI,oBAAoB;AACzC;AAEA,eAAsB,qBAAqB,aAAqB;AAC9D,QAAM,OAAO,UACX,GAAGA,SAAQ,IAAI,CAAC,IAAID,cAAa,CAAC,SAAS,WAAW,KACxD,EAAE;AACF,QAAM,cAAc,KAAK,oBAAoB,KAAK;AAClD,QAAM,eAAe,KAAK,qBAAqB,KAAK;AACpD,UAAQ;AAAA,IACN,KAAK,UAAU;AAAA,MACb;AAAA,MACA;AAAA,IACF,CAAC;AAAA,EACH;AACF;;;ACrBA;AAAA,EAEE;AAAA,EAEA;AAAA,EACA;AAAA,OACK;AACP,YAAYE,WAAU;AACtB,YAAYC,SAAQ;;;ACRpB,SAAS,OAAO,QAAQ,eAAe;AACvC,SAAS,sBAAsB;AAUxB,IAAM,aAAa;AAAA,EACxB,MAAM,gBAAgB,MAAgC;AACpD,QAAI;AACF,YAAM,YAAY,MAAMC,cAAa;AACrC,YAAM,cAAc,UAAU,IAAI,IAAI;AACtC,aAAO,KAAK,oBAAoB,WAAW,EAAE;AAC7C,aAAO;AAAA,IACT,SAAS,OAAO;AACd,aAAO,MAAM,+BAA+B,IAAI,YAAY,KAAK,EAAE;AACnE,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAEA,MAAM,gBAAgB,MAAiC;AACrD,QAAI;AACF,aAAO,KAAK,oBAAoB,IAAI,EAAE;AAEtC,YAAM,YAAY,MAAMA,cAAa;AAErC,UAAI,UAAU,IAAI,IAAI,GAAG;AACvB,eAAO,KAAK,YAAY,IAAI,QAAQ;AACpC,eAAO,UAAU,IAAI,IAAI;AAAA,MAC3B,OAAO;AACL,cAAM,YAAY;AAAA,UAChB,OAAO;AAAA,UACP,SAAS,YAAY,IAAI;AAAA,UACzB,OAAO;AAAA,QACT;AACA,cAAM,WAAW,KAAK,UAAU,SAAS;AACzC,eAAO,MAAM,QAAQ;AACrB,cAAM,IAAI,MAAM,QAAQ;AAAA,MAC1B;AAAA,IACF,SAAS,OAAO;AACd,YAAM,YAAY;AAAA,QAChB,OAAO;AAAA,QACP,SAAS,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,QAC9D,OAAO,iBAAiB,QAAQ,MAAM,QAAQ;AAAA,MAChD;AACA,YAAM,WAAW,KAAK,UAAU,SAAS;AACzC,aAAO,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,QAAQ;AAAA,IAC1B;AAAA,EACF;AAAA,EAEA,MAAM,mBACJ,cACA,UACyB;AACzB,QAAI;AACF,aAAO,KAAK,gBAAgB,QAAQ,kBAAkB,YAAY,EAAE;AACpE,YAAM,OAAO,MAAM,mBAAmB,cAAc,QAAQ;AAC5D,aAAO,KAAK,QAAQ,QAAQ,sBAAsB,YAAY,EAAE;AAChE,aAAO;AAAA,IACT,SAAS,OAAO;AACd,YAAM,YAAY;AAAA,QAChB,OAAO;AAAA,QACP,SAAS,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,QAC9D,OAAO,iBAAiB,QAAQ,MAAM,QAAQ;AAAA,MAChD;AACA,YAAM,WAAW,KAAK,UAAU,SAAS;AACzC,aAAO,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,QAAQ;AAAA,IAC1B;AAAA,EACF;AAAA,EAEA,MAAM,gBACJ,UACA,MACA,WACgB;AAEhB,UAAM,UAAU,QAAQ,QAAQ;AAChC,UAAM,YAAY,CAAC;AAQnB,QAAI,oBAA2C;AAC/C,UAAM,yBAAyB,MAAM;AACnC,0BAAoB,YAAY,MAAM;AACpC,gBAAQ,UAAU,QAAQ,KAAK,IAAI,cAAc;AAAA,MACnD,GAAG,GAAI;AAAA,IACT;AACA,UAAM,wBAAwB,MAAM;AAClC,UAAI,mBAAmB;AACrB,sBAAc,iBAAiB;AAC/B,4BAAoB;AAAA,MACtB;AAAA,IACF;AAEA,QAAI;AACF,aAAO;AAAA,QACL,QAAQ,KAAK,IAAI,oBAAoB,KAAK,UAAU,SAAS,CAAC;AAAA,MAChE;AAGA,cAAQ,UAAU,kBAAkB,KAAK,IAAI,EAAE;AAI/C,YAAM,WAAW,MAAM,mBAAmB,SAAS,MAAM,KAAK,IAAI;AAGlE,YAAM,mBACJ,YACE,KAAK,MAAM,KAAK,UAAU,SAAS,GAAG,eAAe,IACrD;AAEJ,UAAI;AACF,+BAAuB;AAMvB,cAAM,SAAS,MAAM,QAAQ,KAAK;AAAA,UAChC,SAAS,OAAO,IAAI,EAAE,OAAO,WAAW,OAAO,iBAAiB,CAAC;AAAA,UACjE,QAAQ;AAAA,QACV,CAAC;AACD,eAAO;AAAA,MACT,SAAS,OAAO;AACd,YAAI,eAAe,KAAK,GAAG;AACzB,iBAAO;AAAA,YACL,QAAQ,KAAK,IAAI;AAAA,UACnB;AACA,cAAI,SAAS,OAAO,UAAU;AAC5B,kBAAM,SAAS,OAAO,SAAS;AAAA,cAC7B,OAAO;AAAA,cACP,OAAO;AAAA,YACT,CAAC;AAAA,UACH;AACA,iBAAO,CAAC;AAAA,QACV,OAAO;AACL,gBAAM;AAAA,QACR;AAAA,MACF,UAAE;AACA,8BAAsB;AAAA,MACxB;AAAA,IACF,SAAS,OAAO;AACd,YAAM,YAAY;AAAA,QAChB,OAAO;AAAA,QACP,SAAS,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,QAC9D,OAAO,iBAAiB,QAAQ,MAAM,QAAQ;AAAA,MAChD;AACA,YAAM,WAAW,KAAK,UAAU,SAAS;AACzC,aAAO,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,QAAQ;AAAA,IAC1B;AAAA,EACF;AACF;AAGO,SAAS,wBAAwB,YAAoB;AAC1D,SAAO;AAAA,IACL,CAAC,UAAU,GAAG,WAAW;AAAA,EAC3B;AACF;;;AC1KA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAEP,IAAM,kBAAN,MAAM,iBAAgB;AAAA,EACpB,OAAe,WAAiC;AAAA,EAExC,cAAc;AAAA,EAAC;AAAA,EAEvB,OAAc,mBAAkC;AAC9C,QAAI,CAAC,iBAAgB,UAAU;AAC7B,uBAAgB,WAAW,IAAI;AAAA,QAC7B;AAAA,QACA,CAAC,EAAE,OAAO,QAAQ,MAAM;AACtB,kBAAQ,IAAI,GAAG,KAAK,MAAM,OAAO,EAAE;AAAA,QACrC;AAAA,MACF;AAEA,cAAQ,QAAQ;AAAA,QACd,QAAQ,iBAAgB;AAAA,QACxB,kBAAkB;AAAA,UAChB,SAAS;AAAA,YACP,QAAQ,0BAA0B,EAAE,MAAM,QAAQ,OAAO,OAAO,CAAC;AAAA,YACjE,SAAS,CAAC;AAAA,UACZ;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAEA,WAAO,iBAAgB;AAAA,EACzB;AAAA,EAEA,OAAc,cAA6B;AACzC,WAAO,iBAAgB;AAAA,EACzB;AACF;AAEO,IAAM,mBAAmB,gBAAgB;;;AFXhD,IAAM,qBAAqB,oBAAI,IAAY;AAE3C,SAAS,sBACPC,SACA,WACA;AACA,EAAAA,QAAO,KAAK,+CAA+C;AAC3D,QAAM,cAAwB,CAAC;AAC/B,aAAW,CAAC,MAAM,QAAQ,KAAK,UAAU,QAAQ,GAAG;AAClD,IAAAA,QAAO;AAAA,MACL,uCAAuC,IAAI,wBAAwB,SAAS,OAAO,aAAa,IAAI;AAAA,IACtG;AACA,gBAAY,KAAK,GAAG,IAAI,IAAI,SAAS,OAAO,aAAa,IAAI,EAAE;AAAA,EACjE;AACA,SAAO;AACT;AAQA,eAAe,yBACbA,SACA,gBAC2B;AAC3B,EAAAA,QAAO;AAAA,IACL,kCAAkC,eAAe,GAAG,mBAAmB,eAAe,SAAS;AAAA,EACjG;AAEA,MAAI,oBAA6C;AAAA,IAC/C,SAAS,eAAe;AAAA,EAC1B;AAEA,MAAI,eAAe,cAAc,eAAe,WAAW;AACzD,IAAAA,QAAO,KAAK,+BAA+B;AAC3C,UAAM,OAAO,MAAS,iBAAa,eAAe,UAAU;AAC5D,UAAM,MAAM,MAAS,iBAAa,eAAe,SAAS;AAE1D,sBAAkB,MAAM;AAAA,MACtB,gBAAgB;AAAA,QACd,KAAK;AAAA,QACL;AAAA,MACF;AAAA,IACF;AAAA,EACF,WAAW,eAAe,QAAQ;AAChC,IAAAA,QAAO,KAAK,mCAAmC;AAE/C,sBAAkB,UAAU;AAC5B,sBAAkB,SAAS,eAAe;AAC1C,sBAAkB,MAAM,CAAC;AACzB,sBAAkB,WAAW;AAAA,MAC3B,sBAAsB,eAAe;AAAA,IACvC;AAAA,EACF;AAEA,EAAAA,QAAO;AAAA,IACL,wCAAwC,kBAAkB,OAAO;AAAA,EACnE;AAEA,QAAM,aAAa;AACnB,QAAM,YAAY;AAClB,MAAI,UAAU;AAEd,SAAO,MAAM;AACX,QAAI;AACF,YAAM,aAAa,MAAM,iBAAiB,QAAQ,iBAAiB;AACnE,MAAAA,QAAO,KAAK,yCAAyC;AACrD,aAAO;AAAA,IACT,SAAS,KAAK;AACZ;AACA,MAAAA,QAAO,MAAM,iCAAiC,OAAO,YAAY,GAAG,EAAE;AAEtE,UAAI,WAAW,YAAY;AACzB,QAAAA,QAAO,MAAM,2BAA2B,OAAO,WAAW;AAC1D,cAAM;AAAA,MACR;AAEA,YAAM,UAAU,YAAY,KAAK,IAAI,GAAG,UAAU,CAAC;AACnD,MAAAA,QAAO,KAAK,qCAAqC,OAAO,OAAO;AAC/D,YAAM,IAAI,QAAQ,CAACC,aAAY,WAAWA,UAAS,OAAO,CAAC;AAAA,IAC7D;AAAA,EACF;AACF;AAEA,eAAe,kBACbD,SACA,QACwB;AACxB,EAAAA,QAAO,KAAK,uBAAuB;AAGnC,QAAM,iBAA2B,CAAC;AAClC,QAAM,oBAA2B,CAAC;AAElC,MAAI;AACF,UAAM,YAAY,MAAME,cAAa;AACrC,QAAI,UAAU,OAAO,GAAG;AACtB,MAAAF,QAAO,KAAK,kBAAkB,UAAU,IAAI,iBAAiB;AAC7D,qBAAe,KAAK,GAAG,sBAAsBA,SAAQ,SAAS,CAAC;AAE/D,UAAI,eAAe,WAAW,GAAG;AAC/B,QAAAA,QAAO,KAAK,2CAA2C;AACvD,eAAO;AAAA,MACT;AAEA,MAAAA,QAAO;AAAA,QACL,kBAAkB,eAAe,MAAM;AAAA,MACzC;AAEA,iBAAW,gBAAgB,gBAAgB;AACzC,YAAI,CAAC,mBAAmB,IAAI,YAAY,GAAG;AACzC,gBAAM,WAAW,MAAM,wBAAwB,YAAY;AAC3D,4BAAkB,KAAK,QAAQ;AAC/B,6BAAmB,IAAI,YAAY;AACnC,UAAAA,QAAO,KAAK,4BAA4B,YAAY,EAAE;AAAA,QACxD;AAAA,MACF;AAEA,UAAI,kBAAkB,WAAW,GAAG;AAClC,QAAAA,QAAO,KAAK,wDAAwD;AACpE,eAAO;AAAA,MACT;AAEA,MAAAA,QAAO;AAAA,QACL,kBAAkB,kBAAkB,MAAM;AAAA,MAC5C;AAAA,IACF;AAEA,QAAI,eAAe,WAAW,GAAG;AAC/B,MAAAA,QAAO,KAAK,oBAAoB;AAChC,aAAO;AAAA,IACT;AAEA,IAAAA,QAAO,KAAK,SAAS,eAAe,MAAM,YAAY;AAEtD,QAAI,kBAAkB,WAAW,GAAG;AAClC,MAAAA,QAAO,KAAK,gBAAgB;AAC5B,aAAO;AAAA,IACT;AAEA,IAAAA,QAAO,KAAK,SAAS,kBAAkB,MAAM,UAAU;AAEvD,UAAM,aAAa,MAAM;AAAA,MACvBA;AAAA,MACA,OAAO;AAAA,IACT;AAGA,UAAM,eAAe;AAAA,MACnB,MAAM,MAAM;AAAA,MAAC;AAAA;AAAA,MACb,OAAO,MAAM;AAAA,MAAC;AAAA;AAAA,MACd,MAAM,MAAM;AAAA,MAAC;AAAA;AAAA,MACb,KAAK,MAAM;AAAA,MAAC;AAAA;AAAA,MACZ,OAAO,MAAM;AAAA,MAAC;AAAA;AAAA,MACd,OAAO,CAAC,SAAiB,SAAe;AAEtC,QAAAA,QAAO,MAAM,SAAS,IAAI;AAAA,MAC5B;AAAA,IACF;AAIA,UAAM,iBAAiB,MAAM,mBAAmB;AAAA,MAC9C,eAAoB,cAAQ,WAAW,qBAAqB;AAAA,MAC5D,QAAQ;AAAA,IACV,CAAC;AAED,UAAM,SAAS,MAAM,OAAO,OAAO;AAAA,MACjC;AAAA,MACA,WAAW,OAAO,eAAe;AAAA,MACjC,WAAW;AAAA,MACX;AAAA,MACA,YAAY;AAAA,QACV,GAAG;AAAA,QACH,GAAG,OAAO;AAAA,UACR,kBAAkB,IAAI,CAAC,aAAa;AAAA,YAClC,OAAO,KAAK,QAAQ,EAAE,CAAC;AAAA,YACvB,OAAO,OAAO,QAAQ,EAAE,CAAC;AAAA,UAC3B,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF,CAAC;AAED,WAAO;AAAA,EACT,SAAS,OAAO;AACd,IAAAA,QAAO,MAAM,gCAAgC,KAAK,EAAE;AACpD,UAAM;AAAA,EACR;AACF;AAKA,eAAsB,WACpB,QACwB;AACxB,QAAMA,UAAS,iBAAiB;AAGhC,UAAQ,GAAG,qBAAqB,CAAC,UAAU;AACzC,YAAQ,MAAM,iCAAiC,KAAK,EAAE;AACtD,YAAQ,KAAK,CAAC;AAAA,EAChB,CAAC;AAED,QAAM,SAAS,MAAM,kBAAkBA,SAAQ,MAAM;AAErD,MAAI,CAAC,QAAQ;AACX,IAAAA,QAAO;AAAA,MACL;AAAA,IACF;AACA,YAAQ,KAAK,CAAC;AAAA,EAChB;AAEA,MAAI,iBAAiB;AAGrB,iBAAe,aAAa,QAAgB;AAC1C,YAAQ,IAAI,uBAAuB,MAAM,EAAE;AAE3C,QAAI,gBAAgB;AAClB;AAAA,IACF;AAEA,qBAAiB;AAEjB,QAAI;AACF,UAAI,CAAC,QAAQ;AACX,gBAAQ,KAAK,CAAC;AAAA,MAChB;AACA,YAAM,QAAQ,KAAK;AAAA,QACjB,OAAO,SAAS;AAAA,QAChB,IAAI;AAAA,UAAQ,CAAC,GAAG,WACd,WAAW,MAAM,OAAO,IAAI,MAAM,kBAAkB,CAAC,GAAG,GAAI;AAAA,QAC9D;AAAA,MACF,CAAC;AACD,cAAQ,KAAK,CAAC;AAAA,IAChB,SAAS,OAAO;AACd,cAAQ,IAAI,qBAAqB,KAAK,EAAE;AACxC,cAAQ,KAAK,CAAC;AAAA,IAChB;AAAA,EACF;AAGA,GAAC,WAAW,UAAU,UAAU,SAAS,EAAE,QAAQ,CAAC,WAAW;AAC7D,YAAQ,GAAG,QAAQ,MAAM;AACvB,mBAAa,MAAM,EAAE,MAAM,CAAC,UAAU;AACpC,gBAAQ,IAAI,qBAAqB,KAAK,EAAE;AACxC,gBAAQ,KAAK,CAAC;AAAA,MAChB,CAAC;AAAA,IACH,CAAC;AAAA,EACH,CAAC;AAED,EAAAA,QAAO,KAAK,+BAA+B;AAC3C,MAAI;AACF,UAAM,OAAO,IAAI;AAAA,EACnB,SAAS,OAAO;AACd,YAAQ,IAAI,qBAAqB,KAAK,EAAE;AACxC,YAAQ,KAAK,CAAC;AAAA,EAChB;AAEA,SAAO;AACT;;;AjBlPA,OAAOG,cAAa;AAEpB,SAAS,eAAe;AA1CxB,IACEA,SAAQ,KAAK,CAAC,KAAK,sBACnBA,SAAQ,KAAK,CAAC,KAAK,iCACnBA,SAAQ,KAAK,CAAC,KAAK;AAEnBA,SAAQ,KAAK,CAAC,KAAK,yBACnBA,SAAQ,KAAK,CAAC,KAAK,WACnB;AACA,WAAS;AAAA,IACP,SAAS,CAAC,yBAAyB;AAAA,IACnC,KAAK;AAAA,IACL,gCAAgC;AAAA,IAChC,UAAU;AAAA,IACV,iBAAiB;AAAA,MACf,SAAS;AAAA,QACP;AAAA,UACE,WAAW;AAAA,UACX,kBAAkB;AAAA,QACpB;AAAA,QACA;AAAA,UACE,WAAW;AAAA,QACb;AAAA,MACF;AAAA,MACA,wBAAwB;AAAA,IAC1B;AAAA,EACF,CAAC;AACH,OAAO;AACL,WAAS;AAAA,IACP,KAAK;AAAA,IACL,gCAAgC;AAAA,EAClC,CAAC;AACH;AAgBA,IAAM,UAAU,IAAI,QAAQ;AAE5B,QACG,KAAK,cAAc,EACnB,YAAY,qCAAqC,EACjD,QAAQ,OAAO;AAElB,QACG,QAAQ,iBAAiB,EACzB,YAAY,iBAAiB,EAC7B,OAAO,MAAM;AACZ,oBAAkB;AACpB,CAAC;AAEH,QACG,QAAQ,mBAAmB,EAC3B,YAAY,uBAAuB,EACnC,SAAS,kBAAkB,2BAA2B,EACtD,OAAO,CAAC,gBAAgB;AACvB,sBAAoB,WAAW;AACjC,CAAC;AAEH,QACG,QAAQ,QAAQ,EAChB,YAAY,YAAY,EACxB,SAAS,gBAAgB,6BAA6B,EACtD,SAAS,mBAAmB,0BAA0B,EACtD,SAAS,qBAAqB,iBAAiB,EAC/C,SAAS,qBAAqB,iBAAiB,EAC/C,SAAS,yBAAyB,qBAAqB,EACvD,SAAS,yBAAyB,qBAAqB,EACvD,OAAO,wBAAwB,qCAAqC,KAAK,EACzE;AAAA,EACC,CACE,WACA,cACA,gBACA,gBACA,oBACA,oBACA,YACG;AACH,cAAU;AAAA,MACR;AAAA,MACA,kBAAkB;AAAA,QAChB,UAAU;AAAA,QACV,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,UAAU;AAAA,QACV,QAAQ,QAAQ;AAAA,MAClB;AAAA,IACF,CAAC;AAAA,EACH;AACF;AAEF,QACG,QAAQ,kBAAkB,EAC1B,YAAY,sBAAsB,EAClC,SAAS,qBAAqB,uCAAuC,EACrE,SAAS,mBAAmB,0BAA0B,EACtD,SAAS,qBAAqB,iBAAiB,EAC/C,SAAS,qBAAqB,iBAAiB,EAC/C,SAAS,yBAAyB,qBAAqB,EACvD,SAAS,yBAAyB,qBAAqB,EACvD,OAAO,wBAAwB,qCAAqC,KAAK,EACzE,OAAO,yBAAyB,iCAAiC,EACjE,OAAO,yBAAyB,qBAAqB,EACrD,OAAO,6BAA6B,uBAAuB,EAC3D;AAAA,EACC;AAAA,EACA;AAAA,EACA;AACF,EACC,OAAO,wBAAwB,qBAAqB,EACpD,OAAO,oCAAoC,oBAAoB,EAC/D,OAAO,wBAAwB,4BAA4B,EAC3D,OAAO,uBAAuB,oBAAoB,EAClD,OAAO,mBAAmB,4BAA4B,EACtD,OAAO,aAAa,sCAAsC,KAAK,EAC/D,OAAO,uBAAuB,mCAAmC,QAAQ,EACzE;AAAA,EACC;AAAA,EACA;AAAA,EACA;AACF,EACC;AAAA,EACC,CACE,SACA,cACA,gBACA,gBACA,oBACA,oBACA,YACG;AACH,YAAQ;AAAA,MACN;AAAA,MACA,kBAAkB;AAAA,QAChB,UAAU;AAAA,QACV,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,UAAU;AAAA,QACV,QAAQ,QAAQ;AAAA,MAClB;AAAA,MACA,WAAW;AAAA,QACT,QAAQ,QAAQ;AAAA,QAChB,QAAQ,QAAQ;AAAA,QAChB,UAAU,QAAQ;AAAA,MACpB;AAAA,MACA,gBAAgB;AAAA,QACd,KAAK,QAAQ;AAAA,QACb,WAAW,QAAQ;AAAA,QACnB,YAAY,QAAQ;AAAA,QACpB,WAAW,QAAQ;AAAA,QACnB,QAAQ,QAAQ;AAAA,MAClB;AAAA,MACA,aAAa,QAAQ;AAAA,MACrB,QAAQ,QAAQ;AAAA,MAChB,WAAW,QAAQ;AAAA,MACnB,aAAa,QAAQ;AAAA,IACvB,CAAC;AAAA,EACH;AACF;AAEF,QACG,QAAQ,qBAAqB,EAC7B,YAAY,yBAAyB,EACrC,SAAS,kBAAkB,oCAAoC,EAC/D,SAAS,wBAAwB,2BAA2B,EAC5D;AAAA,EACC;AAAA,EACA;AACF,EACC,SAAS,0BAA0B,+BAA+B,EAClE,OAAO,iCAAiC,oCAAoC,EAC5E,OAAO,8BAA8B,eAAe,EACpD,OAAO,8BAA8B,eAAe,EACpD,OAAO,gCAAgC,gBAAgB,EACvD,OAAO,kCAAkC,mBAAmB,EAC5D,OAAO,aAAa,mCAAmC,KAAK,EAC5D;AAAA,EACC,CAAC,aAAa,kBAAkB,QAAQ,oBAAoB,YAAY;AACtE,UAAM,SAAgC;AAAA,MACpC,aAAa,KAAK,MAAM,WAAW;AAAA,MACnC,aACE,QAAQ,cAAc,KAAK,MAAM,QAAQ,WAAW,IAAI;AAAA,MAC1D;AAAA,MACA;AAAA,MACA,oBAAoB,SAAS,kBAAkB;AAAA,MAC/C,QAAQ,QAAQ;AAAA,MAChB,cAAc,QAAQ;AAAA,MACtB,cAAc,QAAQ;AAAA,MACtB,eAAe,QAAQ;AAAA,MACvB,kBAAkB,QAAQ;AAAA,IAC5B;AACA,0BAAsB,MAAM;AAAA,EAC9B;AACF;AAEF,QACG,QAAQ,6BAA6B,EACrC,YAAY,iCAAiC,EAC7C,SAAS,kBAAkB,2BAA2B,EACtD,OAAO,CAAC,gBAAgB;AACvB,uBAAqB,WAAW;AAClC,CAAC;AAEH,QACG,QAAQ,SAAS,EACjB,YAAY,aAAa,EACzB,OAAO,wBAAwB,qBAAqB,EACpD,OAAO,oCAAoC,oBAAoB,EAC/D,OAAO,wBAAwB,4BAA4B,EAC3D,OAAO,uBAAuB,oBAAoB,EAClD,OAAO,mBAAmB,4BAA4B,EACtD,OAAO,CAAC,YAAY;AACnB,aAAW;AAAA,IACT,gBAAgB;AAAA,MACd,KAAK,QAAQ;AAAA,MACb,WAAW,QAAQ;AAAA,MACnB,YAAY,QAAQ;AAAA,MACpB,WAAW,QAAQ;AAAA,MACnB,QAAQ,QAAQ;AAAA,IAClB;AAAA,EACF,CAAC;AACH,CAAC;AAEH,QAAQ,MAAM;","names":["logger","process","sql","createHash","createHash","sql","getWorkflows","createHash","http","resolve","path","getApis","getWebApps","http","resolve","createClient","isNestedType","process","process","getApis","getWorkflows","getWebApps","fs","fs","toClientConfig","path","resolve","Readable","KafkaJS","Buffer","process","http","Kafka","KafkaJS","logger","Buffer","Readable","cluster","resolve","process","getSourceDir","process","path","fs","getWorkflows","logger","resolve","getWorkflows","process"]}