@514labs/moose-lib 0.6.247 → 0.6.248-ci-1-g620909c2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{browserCompatible-CwNwv0e0.d.mts → browserCompatible-CYDIQRRZ.d.mts} +26 -2
- package/dist/{browserCompatible-2eoeppzc.d.ts → browserCompatible-C_H_-QHB.d.ts} +26 -2
- package/dist/browserCompatible.d.mts +2 -2
- package/dist/browserCompatible.d.ts +2 -2
- package/dist/browserCompatible.js +10 -5
- package/dist/browserCompatible.js.map +1 -1
- package/dist/browserCompatible.mjs +10 -5
- package/dist/browserCompatible.mjs.map +1 -1
- package/dist/compilerPlugin.js +25 -1
- package/dist/compilerPlugin.js.map +1 -1
- package/dist/compilerPlugin.mjs +25 -1
- package/dist/compilerPlugin.mjs.map +1 -1
- package/dist/dataModels/toDataModels.js +25 -1
- package/dist/dataModels/toDataModels.js.map +1 -1
- package/dist/dataModels/toDataModels.mjs +25 -1
- package/dist/dataModels/toDataModels.mjs.map +1 -1
- package/dist/dmv2/index.d.mts +1 -1
- package/dist/dmv2/index.d.ts +1 -1
- package/dist/dmv2/index.js +10 -5
- package/dist/dmv2/index.js.map +1 -1
- package/dist/dmv2/index.mjs +10 -5
- package/dist/dmv2/index.mjs.map +1 -1
- package/dist/{index-bZEbqrMe.d.mts → index-xC52kbse.d.mts} +102 -1
- package/dist/{index-bZEbqrMe.d.ts → index-xC52kbse.d.ts} +102 -1
- package/dist/index.d.mts +3 -3
- package/dist/index.d.ts +3 -3
- package/dist/index.js +11 -5
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +11 -5
- package/dist/index.mjs.map +1 -1
- package/dist/moose-runner.js +15 -0
- package/dist/moose-runner.js.map +1 -1
- package/dist/moose-runner.mjs +15 -0
- package/dist/moose-runner.mjs.map +1 -1
- package/package.json +1 -1
|
@@ -104,6 +104,7 @@ interface Column {
|
|
|
104
104
|
unique: false;
|
|
105
105
|
primary_key: boolean;
|
|
106
106
|
default: string | null;
|
|
107
|
+
materialized: string | null;
|
|
107
108
|
ttl: string | null;
|
|
108
109
|
codec: string | null;
|
|
109
110
|
annotations: [string, any][];
|
|
@@ -189,6 +190,7 @@ declare enum ClickHouseEngines {
|
|
|
189
190
|
Buffer = "Buffer",
|
|
190
191
|
Distributed = "Distributed",
|
|
191
192
|
IcebergS3 = "IcebergS3",
|
|
193
|
+
Kafka = "Kafka",
|
|
192
194
|
ReplicatedMergeTree = "ReplicatedMergeTree",
|
|
193
195
|
ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
|
|
194
196
|
ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
|
|
@@ -636,6 +638,105 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
|
|
|
636
638
|
/** Optional: Policy name for data distribution */
|
|
637
639
|
policyName?: string;
|
|
638
640
|
};
|
|
641
|
+
/**
|
|
642
|
+
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
643
|
+
*
|
|
644
|
+
* These settings control the behavior of the Kafka consumer and can be modified
|
|
645
|
+
* after table creation without recreating the table.
|
|
646
|
+
*
|
|
647
|
+
* Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
|
|
648
|
+
*/
|
|
649
|
+
/**
|
|
650
|
+
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
651
|
+
*
|
|
652
|
+
* Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
|
|
653
|
+
*/
|
|
654
|
+
interface KafkaTableSettings {
|
|
655
|
+
/** Security protocol for Kafka connection */
|
|
656
|
+
kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
|
|
657
|
+
/** SASL mechanism for authentication */
|
|
658
|
+
kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
|
|
659
|
+
/** SASL username */
|
|
660
|
+
kafka_sasl_username?: string;
|
|
661
|
+
/** SASL password */
|
|
662
|
+
kafka_sasl_password?: string;
|
|
663
|
+
/** Schema definition (required for formats like Avro, Cap'n Proto) */
|
|
664
|
+
kafka_schema?: string;
|
|
665
|
+
/** Number of parallel consumers (default: 1, max: number of topic partitions) */
|
|
666
|
+
kafka_num_consumers?: string;
|
|
667
|
+
/** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
|
|
668
|
+
kafka_max_block_size?: string;
|
|
669
|
+
/** Number of broken messages to skip per block before throwing an exception. Default: 0 */
|
|
670
|
+
kafka_skip_broken_messages?: string;
|
|
671
|
+
/** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
|
|
672
|
+
kafka_commit_every_batch?: string;
|
|
673
|
+
/** Client identifier passed to Kafka broker */
|
|
674
|
+
kafka_client_id?: string;
|
|
675
|
+
/** Timeout for polling Kafka in milliseconds. Default: 0 */
|
|
676
|
+
kafka_poll_timeout_ms?: string;
|
|
677
|
+
/** Maximum batch size for poll. Default: 0 */
|
|
678
|
+
kafka_poll_max_batch_size?: string;
|
|
679
|
+
/** Interval between flushes in milliseconds. Default: 0 */
|
|
680
|
+
kafka_flush_interval_ms?: string;
|
|
681
|
+
/** Consumer reschedule interval in milliseconds. Default: 0 */
|
|
682
|
+
kafka_consumer_reschedule_ms?: string;
|
|
683
|
+
/** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
|
|
684
|
+
kafka_thread_per_consumer?: string;
|
|
685
|
+
/** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
|
|
686
|
+
kafka_handle_error_mode?: "default" | "stream";
|
|
687
|
+
/** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
|
|
688
|
+
kafka_commit_on_select?: string;
|
|
689
|
+
/** Maximum rows per message for row-based formats. Default: 1 */
|
|
690
|
+
kafka_max_rows_per_message?: string;
|
|
691
|
+
/** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
|
|
692
|
+
kafka_compression_codec?: string;
|
|
693
|
+
/** Compression level. Default: -1 (use codec default) */
|
|
694
|
+
kafka_compression_level?: string;
|
|
695
|
+
}
|
|
696
|
+
/**
|
|
697
|
+
* Configuration for Kafka engine - streaming data ingestion from Kafka topics
|
|
698
|
+
*
|
|
699
|
+
* The Kafka engine creates an internal consumer that reads messages from topics.
|
|
700
|
+
* Typically used with materialized views to persist data into MergeTree tables.
|
|
701
|
+
*
|
|
702
|
+
* @template T The data type of the records stored in the table.
|
|
703
|
+
*
|
|
704
|
+
* @example
|
|
705
|
+
* ```typescript
|
|
706
|
+
* import { mooseRuntimeEnv } from "@514labs/moose-lib";
|
|
707
|
+
*
|
|
708
|
+
* const eventStream = new OlapTable<Event>("event_stream", {
|
|
709
|
+
* engine: ClickHouseEngines.Kafka,
|
|
710
|
+
* brokerList: "kafka-1:9092,kafka-2:9092",
|
|
711
|
+
* topicList: "events",
|
|
712
|
+
* groupName: "moose_consumer",
|
|
713
|
+
* format: "JSONEachRow",
|
|
714
|
+
* settings: {
|
|
715
|
+
* kafka_num_consumers: "3",
|
|
716
|
+
* kafka_sasl_mechanism: "SCRAM-SHA-256",
|
|
717
|
+
* kafka_security_protocol: "SASL_PLAINTEXT",
|
|
718
|
+
* kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
|
|
719
|
+
* kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
|
|
720
|
+
* },
|
|
721
|
+
* });
|
|
722
|
+
* ```
|
|
723
|
+
*/
|
|
724
|
+
type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
|
|
725
|
+
engine: ClickHouseEngines.Kafka;
|
|
726
|
+
/** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
|
|
727
|
+
brokerList: string;
|
|
728
|
+
/** Kafka topics to consume from (comma-separated) */
|
|
729
|
+
topicList: string;
|
|
730
|
+
/** Consumer group identifier */
|
|
731
|
+
groupName: string;
|
|
732
|
+
/** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
|
|
733
|
+
format: string;
|
|
734
|
+
/**
|
|
735
|
+
* Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
|
|
736
|
+
* All other Kafka parameters must be specified here.
|
|
737
|
+
*/
|
|
738
|
+
settings?: KafkaTableSettings;
|
|
739
|
+
};
|
|
639
740
|
/**
|
|
640
741
|
* Configuration for IcebergS3 engine - read-only Iceberg table access
|
|
641
742
|
*
|
|
@@ -678,7 +779,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
|
|
|
678
779
|
* @template T The data type of the records stored in the table.
|
|
679
780
|
*/
|
|
680
781
|
type LegacyOlapConfig<T> = BaseOlapConfig<T>;
|
|
681
|
-
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
|
|
782
|
+
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T> | KafkaConfig<T>;
|
|
682
783
|
/**
|
|
683
784
|
* Union of all engine-specific configurations (new API)
|
|
684
785
|
* @template T The data type of the records stored in the table.
|
|
@@ -104,6 +104,7 @@ interface Column {
|
|
|
104
104
|
unique: false;
|
|
105
105
|
primary_key: boolean;
|
|
106
106
|
default: string | null;
|
|
107
|
+
materialized: string | null;
|
|
107
108
|
ttl: string | null;
|
|
108
109
|
codec: string | null;
|
|
109
110
|
annotations: [string, any][];
|
|
@@ -189,6 +190,7 @@ declare enum ClickHouseEngines {
|
|
|
189
190
|
Buffer = "Buffer",
|
|
190
191
|
Distributed = "Distributed",
|
|
191
192
|
IcebergS3 = "IcebergS3",
|
|
193
|
+
Kafka = "Kafka",
|
|
192
194
|
ReplicatedMergeTree = "ReplicatedMergeTree",
|
|
193
195
|
ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
|
|
194
196
|
ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
|
|
@@ -636,6 +638,105 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
|
|
|
636
638
|
/** Optional: Policy name for data distribution */
|
|
637
639
|
policyName?: string;
|
|
638
640
|
};
|
|
641
|
+
/**
|
|
642
|
+
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
643
|
+
*
|
|
644
|
+
* These settings control the behavior of the Kafka consumer and can be modified
|
|
645
|
+
* after table creation without recreating the table.
|
|
646
|
+
*
|
|
647
|
+
* Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
|
|
648
|
+
*/
|
|
649
|
+
/**
|
|
650
|
+
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
651
|
+
*
|
|
652
|
+
* Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
|
|
653
|
+
*/
|
|
654
|
+
interface KafkaTableSettings {
|
|
655
|
+
/** Security protocol for Kafka connection */
|
|
656
|
+
kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
|
|
657
|
+
/** SASL mechanism for authentication */
|
|
658
|
+
kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
|
|
659
|
+
/** SASL username */
|
|
660
|
+
kafka_sasl_username?: string;
|
|
661
|
+
/** SASL password */
|
|
662
|
+
kafka_sasl_password?: string;
|
|
663
|
+
/** Schema definition (required for formats like Avro, Cap'n Proto) */
|
|
664
|
+
kafka_schema?: string;
|
|
665
|
+
/** Number of parallel consumers (default: 1, max: number of topic partitions) */
|
|
666
|
+
kafka_num_consumers?: string;
|
|
667
|
+
/** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
|
|
668
|
+
kafka_max_block_size?: string;
|
|
669
|
+
/** Number of broken messages to skip per block before throwing an exception. Default: 0 */
|
|
670
|
+
kafka_skip_broken_messages?: string;
|
|
671
|
+
/** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
|
|
672
|
+
kafka_commit_every_batch?: string;
|
|
673
|
+
/** Client identifier passed to Kafka broker */
|
|
674
|
+
kafka_client_id?: string;
|
|
675
|
+
/** Timeout for polling Kafka in milliseconds. Default: 0 */
|
|
676
|
+
kafka_poll_timeout_ms?: string;
|
|
677
|
+
/** Maximum batch size for poll. Default: 0 */
|
|
678
|
+
kafka_poll_max_batch_size?: string;
|
|
679
|
+
/** Interval between flushes in milliseconds. Default: 0 */
|
|
680
|
+
kafka_flush_interval_ms?: string;
|
|
681
|
+
/** Consumer reschedule interval in milliseconds. Default: 0 */
|
|
682
|
+
kafka_consumer_reschedule_ms?: string;
|
|
683
|
+
/** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
|
|
684
|
+
kafka_thread_per_consumer?: string;
|
|
685
|
+
/** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
|
|
686
|
+
kafka_handle_error_mode?: "default" | "stream";
|
|
687
|
+
/** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
|
|
688
|
+
kafka_commit_on_select?: string;
|
|
689
|
+
/** Maximum rows per message for row-based formats. Default: 1 */
|
|
690
|
+
kafka_max_rows_per_message?: string;
|
|
691
|
+
/** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
|
|
692
|
+
kafka_compression_codec?: string;
|
|
693
|
+
/** Compression level. Default: -1 (use codec default) */
|
|
694
|
+
kafka_compression_level?: string;
|
|
695
|
+
}
|
|
696
|
+
/**
|
|
697
|
+
* Configuration for Kafka engine - streaming data ingestion from Kafka topics
|
|
698
|
+
*
|
|
699
|
+
* The Kafka engine creates an internal consumer that reads messages from topics.
|
|
700
|
+
* Typically used with materialized views to persist data into MergeTree tables.
|
|
701
|
+
*
|
|
702
|
+
* @template T The data type of the records stored in the table.
|
|
703
|
+
*
|
|
704
|
+
* @example
|
|
705
|
+
* ```typescript
|
|
706
|
+
* import { mooseRuntimeEnv } from "@514labs/moose-lib";
|
|
707
|
+
*
|
|
708
|
+
* const eventStream = new OlapTable<Event>("event_stream", {
|
|
709
|
+
* engine: ClickHouseEngines.Kafka,
|
|
710
|
+
* brokerList: "kafka-1:9092,kafka-2:9092",
|
|
711
|
+
* topicList: "events",
|
|
712
|
+
* groupName: "moose_consumer",
|
|
713
|
+
* format: "JSONEachRow",
|
|
714
|
+
* settings: {
|
|
715
|
+
* kafka_num_consumers: "3",
|
|
716
|
+
* kafka_sasl_mechanism: "SCRAM-SHA-256",
|
|
717
|
+
* kafka_security_protocol: "SASL_PLAINTEXT",
|
|
718
|
+
* kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
|
|
719
|
+
* kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
|
|
720
|
+
* },
|
|
721
|
+
* });
|
|
722
|
+
* ```
|
|
723
|
+
*/
|
|
724
|
+
type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
|
|
725
|
+
engine: ClickHouseEngines.Kafka;
|
|
726
|
+
/** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
|
|
727
|
+
brokerList: string;
|
|
728
|
+
/** Kafka topics to consume from (comma-separated) */
|
|
729
|
+
topicList: string;
|
|
730
|
+
/** Consumer group identifier */
|
|
731
|
+
groupName: string;
|
|
732
|
+
/** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
|
|
733
|
+
format: string;
|
|
734
|
+
/**
|
|
735
|
+
* Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
|
|
736
|
+
* All other Kafka parameters must be specified here.
|
|
737
|
+
*/
|
|
738
|
+
settings?: KafkaTableSettings;
|
|
739
|
+
};
|
|
639
740
|
/**
|
|
640
741
|
* Configuration for IcebergS3 engine - read-only Iceberg table access
|
|
641
742
|
*
|
|
@@ -678,7 +779,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
|
|
|
678
779
|
* @template T The data type of the records stored in the table.
|
|
679
780
|
*/
|
|
680
781
|
type LegacyOlapConfig<T> = BaseOlapConfig<T>;
|
|
681
|
-
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
|
|
782
|
+
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T> | KafkaConfig<T>;
|
|
682
783
|
/**
|
|
683
784
|
* Union of all engine-specific configurations (new API)
|
|
684
785
|
* @template T The data type of the records stored in the table.
|
package/dist/index.d.mts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
export { C as ClickHouseByteSize,
|
|
2
|
-
import { K as ApiUtil, a4 as MooseClient } from './index-
|
|
3
|
-
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-
|
|
1
|
+
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-CYDIQRRZ.mjs';
|
|
2
|
+
import { K as ApiUtil, a4 as MooseClient } from './index-xC52kbse.mjs';
|
|
3
|
+
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-xC52kbse.mjs';
|
|
4
4
|
import * as _clickhouse_client from '@clickhouse/client';
|
|
5
5
|
import { KafkaJS } from '@confluentinc/kafka-javascript';
|
|
6
6
|
import http from 'http';
|
package/dist/index.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
export { C as ClickHouseByteSize,
|
|
2
|
-
import { K as ApiUtil, a4 as MooseClient } from './index-
|
|
3
|
-
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-
|
|
1
|
+
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-C_H_-QHB.js';
|
|
2
|
+
import { K as ApiUtil, a4 as MooseClient } from './index-xC52kbse.js';
|
|
3
|
+
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-xC52kbse.js';
|
|
4
4
|
import * as _clickhouse_client from '@clickhouse/client';
|
|
5
5
|
import { KafkaJS } from '@confluentinc/kafka-javascript';
|
|
6
6
|
import http from 'http';
|
package/dist/index.js
CHANGED
|
@@ -716,6 +716,7 @@ var ClickHouseEngines = /* @__PURE__ */ ((ClickHouseEngines2) => {
|
|
|
716
716
|
ClickHouseEngines2["Buffer"] = "Buffer";
|
|
717
717
|
ClickHouseEngines2["Distributed"] = "Distributed";
|
|
718
718
|
ClickHouseEngines2["IcebergS3"] = "IcebergS3";
|
|
719
|
+
ClickHouseEngines2["Kafka"] = "Kafka";
|
|
719
720
|
ClickHouseEngines2["ReplicatedMergeTree"] = "ReplicatedMergeTree";
|
|
720
721
|
ClickHouseEngines2["ReplicatedReplacingMergeTree"] = "ReplicatedReplacingMergeTree";
|
|
721
722
|
ClickHouseEngines2["ReplicatedAggregatingMergeTree"] = "ReplicatedAggregatingMergeTree";
|
|
@@ -850,7 +851,8 @@ var dlqColumns = [
|
|
|
850
851
|
default: null,
|
|
851
852
|
annotations: [],
|
|
852
853
|
ttl: null,
|
|
853
|
-
codec: null
|
|
854
|
+
codec: null,
|
|
855
|
+
materialized: null
|
|
854
856
|
},
|
|
855
857
|
{
|
|
856
858
|
name: "errorMessage",
|
|
@@ -861,7 +863,8 @@ var dlqColumns = [
|
|
|
861
863
|
default: null,
|
|
862
864
|
annotations: [],
|
|
863
865
|
ttl: null,
|
|
864
|
-
codec: null
|
|
866
|
+
codec: null,
|
|
867
|
+
materialized: null
|
|
865
868
|
},
|
|
866
869
|
{
|
|
867
870
|
name: "errorType",
|
|
@@ -872,7 +875,8 @@ var dlqColumns = [
|
|
|
872
875
|
default: null,
|
|
873
876
|
annotations: [],
|
|
874
877
|
ttl: null,
|
|
875
|
-
codec: null
|
|
878
|
+
codec: null,
|
|
879
|
+
materialized: null
|
|
876
880
|
},
|
|
877
881
|
{
|
|
878
882
|
name: "failedAt",
|
|
@@ -883,7 +887,8 @@ var dlqColumns = [
|
|
|
883
887
|
default: null,
|
|
884
888
|
annotations: [],
|
|
885
889
|
ttl: null,
|
|
886
|
-
codec: null
|
|
890
|
+
codec: null,
|
|
891
|
+
materialized: null
|
|
887
892
|
},
|
|
888
893
|
{
|
|
889
894
|
name: "source",
|
|
@@ -894,7 +899,8 @@ var dlqColumns = [
|
|
|
894
899
|
default: null,
|
|
895
900
|
annotations: [],
|
|
896
901
|
ttl: null,
|
|
897
|
-
codec: null
|
|
902
|
+
codec: null,
|
|
903
|
+
materialized: null
|
|
898
904
|
}
|
|
899
905
|
];
|
|
900
906
|
var getWorkflows = async () => {
|