@514labs/moose-lib 0.6.238-ci-5-g7f7049d1 → 0.6.238-ci-1-g65c37d1e

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist/{browserCompatible-CKgEUuZc.d.ts → browserCompatible-C7Tw6oiI.d.ts} +2 -26
  2. package/dist/{browserCompatible-DCc9Zd_X.d.mts → browserCompatible-Doxlb78a.d.mts} +2 -26
  3. package/dist/browserCompatible.d.mts +2 -2
  4. package/dist/browserCompatible.d.ts +2 -2
  5. package/dist/browserCompatible.js +5 -10
  6. package/dist/browserCompatible.js.map +1 -1
  7. package/dist/browserCompatible.mjs +5 -10
  8. package/dist/browserCompatible.mjs.map +1 -1
  9. package/dist/compilerPlugin.js +1 -25
  10. package/dist/compilerPlugin.js.map +1 -1
  11. package/dist/compilerPlugin.mjs +1 -25
  12. package/dist/compilerPlugin.mjs.map +1 -1
  13. package/dist/dataModels/toDataModels.js +1 -25
  14. package/dist/dataModels/toDataModels.js.map +1 -1
  15. package/dist/dataModels/toDataModels.mjs +1 -25
  16. package/dist/dataModels/toDataModels.mjs.map +1 -1
  17. package/dist/dmv2/index.d.mts +1 -1
  18. package/dist/dmv2/index.d.ts +1 -1
  19. package/dist/dmv2/index.js +5 -10
  20. package/dist/dmv2/index.js.map +1 -1
  21. package/dist/dmv2/index.mjs +5 -10
  22. package/dist/dmv2/index.mjs.map +1 -1
  23. package/dist/{index-CYFF3a0J.d.mts → index-DPREeoku.d.mts} +101 -2
  24. package/dist/{index-CYFF3a0J.d.ts → index-DPREeoku.d.ts} +101 -2
  25. package/dist/index.d.mts +3 -3
  26. package/dist/index.d.ts +3 -3
  27. package/dist/index.js +6 -10
  28. package/dist/index.js.map +1 -1
  29. package/dist/index.mjs +6 -10
  30. package/dist/index.mjs.map +1 -1
  31. package/dist/moose-runner.js +15 -0
  32. package/dist/moose-runner.js.map +1 -1
  33. package/dist/moose-runner.mjs +15 -0
  34. package/dist/moose-runner.mjs.map +1 -1
  35. package/package.json +1 -1
@@ -104,7 +104,6 @@ interface Column {
104
104
  unique: false;
105
105
  primary_key: boolean;
106
106
  default: string | null;
107
- materialized: string | null;
108
107
  ttl: string | null;
109
108
  codec: string | null;
110
109
  annotations: [string, any][];
@@ -190,6 +189,7 @@ declare enum ClickHouseEngines {
190
189
  Buffer = "Buffer",
191
190
  Distributed = "Distributed",
192
191
  IcebergS3 = "IcebergS3",
192
+ Kafka = "Kafka",
193
193
  ReplicatedMergeTree = "ReplicatedMergeTree",
194
194
  ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
195
195
  ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
@@ -637,6 +637,105 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
637
637
  /** Optional: Policy name for data distribution */
638
638
  policyName?: string;
639
639
  };
640
+ /**
641
+ * Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
642
+ *
643
+ * These settings control the behavior of the Kafka consumer and can be modified
644
+ * after table creation without recreating the table.
645
+ *
646
+ * Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
647
+ */
648
+ /**
649
+ * Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
650
+ *
651
+ * Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
652
+ */
653
+ interface KafkaTableSettings {
654
+ /** Security protocol for Kafka connection */
655
+ kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
656
+ /** SASL mechanism for authentication */
657
+ kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
658
+ /** SASL username */
659
+ kafka_sasl_username?: string;
660
+ /** SASL password */
661
+ kafka_sasl_password?: string;
662
+ /** Schema definition (required for formats like Avro, Cap'n Proto) */
663
+ kafka_schema?: string;
664
+ /** Number of parallel consumers (default: 1, max: number of topic partitions) */
665
+ kafka_num_consumers?: string;
666
+ /** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
667
+ kafka_max_block_size?: string;
668
+ /** Number of broken messages to skip per block before throwing an exception. Default: 0 */
669
+ kafka_skip_broken_messages?: string;
670
+ /** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
671
+ kafka_commit_every_batch?: string;
672
+ /** Client identifier passed to Kafka broker */
673
+ kafka_client_id?: string;
674
+ /** Timeout for polling Kafka in milliseconds. Default: 0 */
675
+ kafka_poll_timeout_ms?: string;
676
+ /** Maximum batch size for poll. Default: 0 */
677
+ kafka_poll_max_batch_size?: string;
678
+ /** Interval between flushes in milliseconds. Default: 0 */
679
+ kafka_flush_interval_ms?: string;
680
+ /** Consumer reschedule interval in milliseconds. Default: 0 */
681
+ kafka_consumer_reschedule_ms?: string;
682
+ /** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
683
+ kafka_thread_per_consumer?: string;
684
+ /** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
685
+ kafka_handle_error_mode?: "default" | "stream";
686
+ /** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
687
+ kafka_commit_on_select?: string;
688
+ /** Maximum rows per message for row-based formats. Default: 1 */
689
+ kafka_max_rows_per_message?: string;
690
+ /** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
691
+ kafka_compression_codec?: string;
692
+ /** Compression level. Default: -1 (use codec default) */
693
+ kafka_compression_level?: string;
694
+ }
695
+ /**
696
+ * Configuration for Kafka engine - streaming data ingestion from Kafka topics
697
+ *
698
+ * The Kafka engine creates an internal consumer that reads messages from topics.
699
+ * Typically used with materialized views to persist data into MergeTree tables.
700
+ *
701
+ * @template T The data type of the records stored in the table.
702
+ *
703
+ * @example
704
+ * ```typescript
705
+ * import { mooseRuntimeEnv } from "@514labs/moose-lib";
706
+ *
707
+ * const eventStream = new OlapTable<Event>("event_stream", {
708
+ * engine: ClickHouseEngines.Kafka,
709
+ * brokerList: "kafka-1:9092,kafka-2:9092",
710
+ * topicList: "events",
711
+ * groupName: "moose_consumer",
712
+ * format: "JSONEachRow",
713
+ * settings: {
714
+ * kafka_num_consumers: "3",
715
+ * kafka_sasl_mechanism: "SCRAM-SHA-256",
716
+ * kafka_security_protocol: "SASL_PLAINTEXT",
717
+ * kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
718
+ * kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
719
+ * },
720
+ * });
721
+ * ```
722
+ */
723
+ type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
724
+ engine: ClickHouseEngines.Kafka;
725
+ /** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
726
+ brokerList: string;
727
+ /** Kafka topics to consume from (comma-separated) */
728
+ topicList: string;
729
+ /** Consumer group identifier */
730
+ groupName: string;
731
+ /** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
732
+ format: string;
733
+ /**
734
+ * Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
735
+ * All other Kafka parameters must be specified here.
736
+ */
737
+ settings?: KafkaTableSettings;
738
+ };
640
739
  /**
641
740
  * Configuration for IcebergS3 engine - read-only Iceberg table access
642
741
  *
@@ -679,7 +778,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
679
778
  * @template T The data type of the records stored in the table.
680
779
  */
681
780
  type LegacyOlapConfig<T> = BaseOlapConfig<T>;
682
- type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
781
+ type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T> | KafkaConfig<T>;
683
782
  /**
684
783
  * Union of all engine-specific configurations (new API)
685
784
  * @template T The data type of the records stored in the table.
@@ -104,7 +104,6 @@ interface Column {
104
104
  unique: false;
105
105
  primary_key: boolean;
106
106
  default: string | null;
107
- materialized: string | null;
108
107
  ttl: string | null;
109
108
  codec: string | null;
110
109
  annotations: [string, any][];
@@ -190,6 +189,7 @@ declare enum ClickHouseEngines {
190
189
  Buffer = "Buffer",
191
190
  Distributed = "Distributed",
192
191
  IcebergS3 = "IcebergS3",
192
+ Kafka = "Kafka",
193
193
  ReplicatedMergeTree = "ReplicatedMergeTree",
194
194
  ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
195
195
  ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
@@ -637,6 +637,105 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
637
637
  /** Optional: Policy name for data distribution */
638
638
  policyName?: string;
639
639
  };
640
+ /**
641
+ * Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
642
+ *
643
+ * These settings control the behavior of the Kafka consumer and can be modified
644
+ * after table creation without recreating the table.
645
+ *
646
+ * Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
647
+ */
648
+ /**
649
+ * Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
650
+ *
651
+ * Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
652
+ */
653
+ interface KafkaTableSettings {
654
+ /** Security protocol for Kafka connection */
655
+ kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
656
+ /** SASL mechanism for authentication */
657
+ kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
658
+ /** SASL username */
659
+ kafka_sasl_username?: string;
660
+ /** SASL password */
661
+ kafka_sasl_password?: string;
662
+ /** Schema definition (required for formats like Avro, Cap'n Proto) */
663
+ kafka_schema?: string;
664
+ /** Number of parallel consumers (default: 1, max: number of topic partitions) */
665
+ kafka_num_consumers?: string;
666
+ /** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
667
+ kafka_max_block_size?: string;
668
+ /** Number of broken messages to skip per block before throwing an exception. Default: 0 */
669
+ kafka_skip_broken_messages?: string;
670
+ /** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
671
+ kafka_commit_every_batch?: string;
672
+ /** Client identifier passed to Kafka broker */
673
+ kafka_client_id?: string;
674
+ /** Timeout for polling Kafka in milliseconds. Default: 0 */
675
+ kafka_poll_timeout_ms?: string;
676
+ /** Maximum batch size for poll. Default: 0 */
677
+ kafka_poll_max_batch_size?: string;
678
+ /** Interval between flushes in milliseconds. Default: 0 */
679
+ kafka_flush_interval_ms?: string;
680
+ /** Consumer reschedule interval in milliseconds. Default: 0 */
681
+ kafka_consumer_reschedule_ms?: string;
682
+ /** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
683
+ kafka_thread_per_consumer?: string;
684
+ /** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
685
+ kafka_handle_error_mode?: "default" | "stream";
686
+ /** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
687
+ kafka_commit_on_select?: string;
688
+ /** Maximum rows per message for row-based formats. Default: 1 */
689
+ kafka_max_rows_per_message?: string;
690
+ /** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
691
+ kafka_compression_codec?: string;
692
+ /** Compression level. Default: -1 (use codec default) */
693
+ kafka_compression_level?: string;
694
+ }
695
+ /**
696
+ * Configuration for Kafka engine - streaming data ingestion from Kafka topics
697
+ *
698
+ * The Kafka engine creates an internal consumer that reads messages from topics.
699
+ * Typically used with materialized views to persist data into MergeTree tables.
700
+ *
701
+ * @template T The data type of the records stored in the table.
702
+ *
703
+ * @example
704
+ * ```typescript
705
+ * import { mooseRuntimeEnv } from "@514labs/moose-lib";
706
+ *
707
+ * const eventStream = new OlapTable<Event>("event_stream", {
708
+ * engine: ClickHouseEngines.Kafka,
709
+ * brokerList: "kafka-1:9092,kafka-2:9092",
710
+ * topicList: "events",
711
+ * groupName: "moose_consumer",
712
+ * format: "JSONEachRow",
713
+ * settings: {
714
+ * kafka_num_consumers: "3",
715
+ * kafka_sasl_mechanism: "SCRAM-SHA-256",
716
+ * kafka_security_protocol: "SASL_PLAINTEXT",
717
+ * kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
718
+ * kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
719
+ * },
720
+ * });
721
+ * ```
722
+ */
723
+ type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
724
+ engine: ClickHouseEngines.Kafka;
725
+ /** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
726
+ brokerList: string;
727
+ /** Kafka topics to consume from (comma-separated) */
728
+ topicList: string;
729
+ /** Consumer group identifier */
730
+ groupName: string;
731
+ /** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
732
+ format: string;
733
+ /**
734
+ * Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
735
+ * All other Kafka parameters must be specified here.
736
+ */
737
+ settings?: KafkaTableSettings;
738
+ };
640
739
  /**
641
740
  * Configuration for IcebergS3 engine - read-only Iceberg table access
642
741
  *
@@ -679,7 +778,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
679
778
  * @template T The data type of the records stored in the table.
680
779
  */
681
780
  type LegacyOlapConfig<T> = BaseOlapConfig<T>;
682
- type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
781
+ type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T> | KafkaConfig<T>;
683
782
  /**
684
783
  * Union of all engine-specific configurations (new API)
685
784
  * @template T The data type of the records stored in the table.
package/dist/index.d.mts CHANGED
@@ -1,6 +1,6 @@
1
- export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-DCc9Zd_X.mjs';
2
- import { K as ApiUtil, a4 as MooseClient } from './index-CYFF3a0J.mjs';
3
- export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-CYFF3a0J.mjs';
1
+ export { C as ClickHouseByteSize, p as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, q as DateTime64, s as DateTime64String, r as DateTimeString, B as Decimal, F as FixedString, t as Float32, u as Float64, v as Int16, w as Int32, x as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, y as UInt16, z as UInt32, A as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-Doxlb78a.mjs';
2
+ import { K as ApiUtil, a4 as MooseClient } from './index-DPREeoku.mjs';
3
+ export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-DPREeoku.mjs';
4
4
  import * as _clickhouse_client from '@clickhouse/client';
5
5
  import { KafkaJS } from '@confluentinc/kafka-javascript';
6
6
  import http from 'http';
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
- export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-CKgEUuZc.js';
2
- import { K as ApiUtil, a4 as MooseClient } from './index-CYFF3a0J.js';
3
- export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-CYFF3a0J.js';
1
+ export { C as ClickHouseByteSize, p as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, q as DateTime64, s as DateTime64String, r as DateTimeString, B as Decimal, F as FixedString, t as Float32, u as Float64, v as Int16, w as Int32, x as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, y as UInt16, z as UInt32, A as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-C7Tw6oiI.js';
2
+ import { K as ApiUtil, a4 as MooseClient } from './index-DPREeoku.js';
3
+ export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-DPREeoku.js';
4
4
  import * as _clickhouse_client from '@clickhouse/client';
5
5
  import { KafkaJS } from '@confluentinc/kafka-javascript';
6
6
  import http from 'http';
package/dist/index.js CHANGED
@@ -701,6 +701,7 @@ var ClickHouseEngines = /* @__PURE__ */ ((ClickHouseEngines2) => {
701
701
  ClickHouseEngines2["Buffer"] = "Buffer";
702
702
  ClickHouseEngines2["Distributed"] = "Distributed";
703
703
  ClickHouseEngines2["IcebergS3"] = "IcebergS3";
704
+ ClickHouseEngines2["Kafka"] = "Kafka";
704
705
  ClickHouseEngines2["ReplicatedMergeTree"] = "ReplicatedMergeTree";
705
706
  ClickHouseEngines2["ReplicatedReplacingMergeTree"] = "ReplicatedReplacingMergeTree";
706
707
  ClickHouseEngines2["ReplicatedAggregatingMergeTree"] = "ReplicatedAggregatingMergeTree";
@@ -835,8 +836,7 @@ var dlqColumns = [
835
836
  default: null,
836
837
  annotations: [],
837
838
  ttl: null,
838
- codec: null,
839
- materialized: null
839
+ codec: null
840
840
  },
841
841
  {
842
842
  name: "errorMessage",
@@ -847,8 +847,7 @@ var dlqColumns = [
847
847
  default: null,
848
848
  annotations: [],
849
849
  ttl: null,
850
- codec: null,
851
- materialized: null
850
+ codec: null
852
851
  },
853
852
  {
854
853
  name: "errorType",
@@ -859,8 +858,7 @@ var dlqColumns = [
859
858
  default: null,
860
859
  annotations: [],
861
860
  ttl: null,
862
- codec: null,
863
- materialized: null
861
+ codec: null
864
862
  },
865
863
  {
866
864
  name: "failedAt",
@@ -871,8 +869,7 @@ var dlqColumns = [
871
869
  default: null,
872
870
  annotations: [],
873
871
  ttl: null,
874
- codec: null,
875
- materialized: null
872
+ codec: null
876
873
  },
877
874
  {
878
875
  name: "source",
@@ -883,8 +880,7 @@ var dlqColumns = [
883
880
  default: null,
884
881
  annotations: [],
885
882
  ttl: null,
886
- codec: null,
887
- materialized: null
883
+ codec: null
888
884
  }
889
885
  ];
890
886
  var getWorkflows = async () => {