@514labs/moose-lib 0.6.249-ci-2-g7652377c → 0.6.249-ci-3-gc31139ef
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{browserCompatible-CYDIQRRZ.d.mts → browserCompatible-BTN82akc.d.mts} +1 -1
- package/dist/{browserCompatible-C_H_-QHB.d.ts → browserCompatible-CoxWbneN.d.ts} +1 -1
- package/dist/browserCompatible.d.mts +2 -2
- package/dist/browserCompatible.d.ts +2 -2
- package/dist/browserCompatible.js.map +1 -1
- package/dist/browserCompatible.mjs.map +1 -1
- package/dist/dmv2/index.d.mts +1 -1
- package/dist/dmv2/index.d.ts +1 -1
- package/dist/dmv2/index.js.map +1 -1
- package/dist/dmv2/index.mjs.map +1 -1
- package/dist/{index-xC52kbse.d.mts → index-CNlTyF6R.d.mts} +1 -101
- package/dist/{index-xC52kbse.d.ts → index-CNlTyF6R.d.ts} +1 -101
- package/dist/index.d.mts +3 -3
- package/dist/index.d.ts +3 -3
- package/dist/index.js +0 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +0 -1
- package/dist/index.mjs.map +1 -1
- package/dist/moose-runner.js +0 -15
- package/dist/moose-runner.js.map +1 -1
- package/dist/moose-runner.mjs +0 -15
- package/dist/moose-runner.mjs.map +1 -1
- package/package.json +1 -1
|
@@ -190,7 +190,6 @@ declare enum ClickHouseEngines {
|
|
|
190
190
|
Buffer = "Buffer",
|
|
191
191
|
Distributed = "Distributed",
|
|
192
192
|
IcebergS3 = "IcebergS3",
|
|
193
|
-
Kafka = "Kafka",
|
|
194
193
|
ReplicatedMergeTree = "ReplicatedMergeTree",
|
|
195
194
|
ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
|
|
196
195
|
ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
|
|
@@ -638,105 +637,6 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
|
|
|
638
637
|
/** Optional: Policy name for data distribution */
|
|
639
638
|
policyName?: string;
|
|
640
639
|
};
|
|
641
|
-
/**
|
|
642
|
-
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
643
|
-
*
|
|
644
|
-
* These settings control the behavior of the Kafka consumer and can be modified
|
|
645
|
-
* after table creation without recreating the table.
|
|
646
|
-
*
|
|
647
|
-
* Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
|
|
648
|
-
*/
|
|
649
|
-
/**
|
|
650
|
-
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
651
|
-
*
|
|
652
|
-
* Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
|
|
653
|
-
*/
|
|
654
|
-
interface KafkaTableSettings {
|
|
655
|
-
/** Security protocol for Kafka connection */
|
|
656
|
-
kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
|
|
657
|
-
/** SASL mechanism for authentication */
|
|
658
|
-
kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
|
|
659
|
-
/** SASL username */
|
|
660
|
-
kafka_sasl_username?: string;
|
|
661
|
-
/** SASL password */
|
|
662
|
-
kafka_sasl_password?: string;
|
|
663
|
-
/** Schema definition (required for formats like Avro, Cap'n Proto) */
|
|
664
|
-
kafka_schema?: string;
|
|
665
|
-
/** Number of parallel consumers (default: 1, max: number of topic partitions) */
|
|
666
|
-
kafka_num_consumers?: string;
|
|
667
|
-
/** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
|
|
668
|
-
kafka_max_block_size?: string;
|
|
669
|
-
/** Number of broken messages to skip per block before throwing an exception. Default: 0 */
|
|
670
|
-
kafka_skip_broken_messages?: string;
|
|
671
|
-
/** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
|
|
672
|
-
kafka_commit_every_batch?: string;
|
|
673
|
-
/** Client identifier passed to Kafka broker */
|
|
674
|
-
kafka_client_id?: string;
|
|
675
|
-
/** Timeout for polling Kafka in milliseconds. Default: 0 */
|
|
676
|
-
kafka_poll_timeout_ms?: string;
|
|
677
|
-
/** Maximum batch size for poll. Default: 0 */
|
|
678
|
-
kafka_poll_max_batch_size?: string;
|
|
679
|
-
/** Interval between flushes in milliseconds. Default: 0 */
|
|
680
|
-
kafka_flush_interval_ms?: string;
|
|
681
|
-
/** Consumer reschedule interval in milliseconds. Default: 0 */
|
|
682
|
-
kafka_consumer_reschedule_ms?: string;
|
|
683
|
-
/** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
|
|
684
|
-
kafka_thread_per_consumer?: string;
|
|
685
|
-
/** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
|
|
686
|
-
kafka_handle_error_mode?: "default" | "stream";
|
|
687
|
-
/** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
|
|
688
|
-
kafka_commit_on_select?: string;
|
|
689
|
-
/** Maximum rows per message for row-based formats. Default: 1 */
|
|
690
|
-
kafka_max_rows_per_message?: string;
|
|
691
|
-
/** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
|
|
692
|
-
kafka_compression_codec?: string;
|
|
693
|
-
/** Compression level. Default: -1 (use codec default) */
|
|
694
|
-
kafka_compression_level?: string;
|
|
695
|
-
}
|
|
696
|
-
/**
|
|
697
|
-
* Configuration for Kafka engine - streaming data ingestion from Kafka topics
|
|
698
|
-
*
|
|
699
|
-
* The Kafka engine creates an internal consumer that reads messages from topics.
|
|
700
|
-
* Typically used with materialized views to persist data into MergeTree tables.
|
|
701
|
-
*
|
|
702
|
-
* @template T The data type of the records stored in the table.
|
|
703
|
-
*
|
|
704
|
-
* @example
|
|
705
|
-
* ```typescript
|
|
706
|
-
* import { mooseRuntimeEnv } from "@514labs/moose-lib";
|
|
707
|
-
*
|
|
708
|
-
* const eventStream = new OlapTable<Event>("event_stream", {
|
|
709
|
-
* engine: ClickHouseEngines.Kafka,
|
|
710
|
-
* brokerList: "kafka-1:9092,kafka-2:9092",
|
|
711
|
-
* topicList: "events",
|
|
712
|
-
* groupName: "moose_consumer",
|
|
713
|
-
* format: "JSONEachRow",
|
|
714
|
-
* settings: {
|
|
715
|
-
* kafka_num_consumers: "3",
|
|
716
|
-
* kafka_sasl_mechanism: "SCRAM-SHA-256",
|
|
717
|
-
* kafka_security_protocol: "SASL_PLAINTEXT",
|
|
718
|
-
* kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
|
|
719
|
-
* kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
|
|
720
|
-
* },
|
|
721
|
-
* });
|
|
722
|
-
* ```
|
|
723
|
-
*/
|
|
724
|
-
type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
|
|
725
|
-
engine: ClickHouseEngines.Kafka;
|
|
726
|
-
/** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
|
|
727
|
-
brokerList: string;
|
|
728
|
-
/** Kafka topics to consume from (comma-separated) */
|
|
729
|
-
topicList: string;
|
|
730
|
-
/** Consumer group identifier */
|
|
731
|
-
groupName: string;
|
|
732
|
-
/** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
|
|
733
|
-
format: string;
|
|
734
|
-
/**
|
|
735
|
-
* Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
|
|
736
|
-
* All other Kafka parameters must be specified here.
|
|
737
|
-
*/
|
|
738
|
-
settings?: KafkaTableSettings;
|
|
739
|
-
};
|
|
740
640
|
/**
|
|
741
641
|
* Configuration for IcebergS3 engine - read-only Iceberg table access
|
|
742
642
|
*
|
|
@@ -779,7 +679,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
|
|
|
779
679
|
* @template T The data type of the records stored in the table.
|
|
780
680
|
*/
|
|
781
681
|
type LegacyOlapConfig<T> = BaseOlapConfig<T>;
|
|
782
|
-
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T
|
|
682
|
+
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
|
|
783
683
|
/**
|
|
784
684
|
* Union of all engine-specific configurations (new API)
|
|
785
685
|
* @template T The data type of the records stored in the table.
|
|
@@ -190,7 +190,6 @@ declare enum ClickHouseEngines {
|
|
|
190
190
|
Buffer = "Buffer",
|
|
191
191
|
Distributed = "Distributed",
|
|
192
192
|
IcebergS3 = "IcebergS3",
|
|
193
|
-
Kafka = "Kafka",
|
|
194
193
|
ReplicatedMergeTree = "ReplicatedMergeTree",
|
|
195
194
|
ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
|
|
196
195
|
ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
|
|
@@ -638,105 +637,6 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
|
|
|
638
637
|
/** Optional: Policy name for data distribution */
|
|
639
638
|
policyName?: string;
|
|
640
639
|
};
|
|
641
|
-
/**
|
|
642
|
-
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
643
|
-
*
|
|
644
|
-
* These settings control the behavior of the Kafka consumer and can be modified
|
|
645
|
-
* after table creation without recreating the table.
|
|
646
|
-
*
|
|
647
|
-
* Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
|
|
648
|
-
*/
|
|
649
|
-
/**
|
|
650
|
-
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
651
|
-
*
|
|
652
|
-
* Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
|
|
653
|
-
*/
|
|
654
|
-
interface KafkaTableSettings {
|
|
655
|
-
/** Security protocol for Kafka connection */
|
|
656
|
-
kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
|
|
657
|
-
/** SASL mechanism for authentication */
|
|
658
|
-
kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
|
|
659
|
-
/** SASL username */
|
|
660
|
-
kafka_sasl_username?: string;
|
|
661
|
-
/** SASL password */
|
|
662
|
-
kafka_sasl_password?: string;
|
|
663
|
-
/** Schema definition (required for formats like Avro, Cap'n Proto) */
|
|
664
|
-
kafka_schema?: string;
|
|
665
|
-
/** Number of parallel consumers (default: 1, max: number of topic partitions) */
|
|
666
|
-
kafka_num_consumers?: string;
|
|
667
|
-
/** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
|
|
668
|
-
kafka_max_block_size?: string;
|
|
669
|
-
/** Number of broken messages to skip per block before throwing an exception. Default: 0 */
|
|
670
|
-
kafka_skip_broken_messages?: string;
|
|
671
|
-
/** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
|
|
672
|
-
kafka_commit_every_batch?: string;
|
|
673
|
-
/** Client identifier passed to Kafka broker */
|
|
674
|
-
kafka_client_id?: string;
|
|
675
|
-
/** Timeout for polling Kafka in milliseconds. Default: 0 */
|
|
676
|
-
kafka_poll_timeout_ms?: string;
|
|
677
|
-
/** Maximum batch size for poll. Default: 0 */
|
|
678
|
-
kafka_poll_max_batch_size?: string;
|
|
679
|
-
/** Interval between flushes in milliseconds. Default: 0 */
|
|
680
|
-
kafka_flush_interval_ms?: string;
|
|
681
|
-
/** Consumer reschedule interval in milliseconds. Default: 0 */
|
|
682
|
-
kafka_consumer_reschedule_ms?: string;
|
|
683
|
-
/** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
|
|
684
|
-
kafka_thread_per_consumer?: string;
|
|
685
|
-
/** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
|
|
686
|
-
kafka_handle_error_mode?: "default" | "stream";
|
|
687
|
-
/** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
|
|
688
|
-
kafka_commit_on_select?: string;
|
|
689
|
-
/** Maximum rows per message for row-based formats. Default: 1 */
|
|
690
|
-
kafka_max_rows_per_message?: string;
|
|
691
|
-
/** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
|
|
692
|
-
kafka_compression_codec?: string;
|
|
693
|
-
/** Compression level. Default: -1 (use codec default) */
|
|
694
|
-
kafka_compression_level?: string;
|
|
695
|
-
}
|
|
696
|
-
/**
|
|
697
|
-
* Configuration for Kafka engine - streaming data ingestion from Kafka topics
|
|
698
|
-
*
|
|
699
|
-
* The Kafka engine creates an internal consumer that reads messages from topics.
|
|
700
|
-
* Typically used with materialized views to persist data into MergeTree tables.
|
|
701
|
-
*
|
|
702
|
-
* @template T The data type of the records stored in the table.
|
|
703
|
-
*
|
|
704
|
-
* @example
|
|
705
|
-
* ```typescript
|
|
706
|
-
* import { mooseRuntimeEnv } from "@514labs/moose-lib";
|
|
707
|
-
*
|
|
708
|
-
* const eventStream = new OlapTable<Event>("event_stream", {
|
|
709
|
-
* engine: ClickHouseEngines.Kafka,
|
|
710
|
-
* brokerList: "kafka-1:9092,kafka-2:9092",
|
|
711
|
-
* topicList: "events",
|
|
712
|
-
* groupName: "moose_consumer",
|
|
713
|
-
* format: "JSONEachRow",
|
|
714
|
-
* settings: {
|
|
715
|
-
* kafka_num_consumers: "3",
|
|
716
|
-
* kafka_sasl_mechanism: "SCRAM-SHA-256",
|
|
717
|
-
* kafka_security_protocol: "SASL_PLAINTEXT",
|
|
718
|
-
* kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
|
|
719
|
-
* kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
|
|
720
|
-
* },
|
|
721
|
-
* });
|
|
722
|
-
* ```
|
|
723
|
-
*/
|
|
724
|
-
type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
|
|
725
|
-
engine: ClickHouseEngines.Kafka;
|
|
726
|
-
/** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
|
|
727
|
-
brokerList: string;
|
|
728
|
-
/** Kafka topics to consume from (comma-separated) */
|
|
729
|
-
topicList: string;
|
|
730
|
-
/** Consumer group identifier */
|
|
731
|
-
groupName: string;
|
|
732
|
-
/** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
|
|
733
|
-
format: string;
|
|
734
|
-
/**
|
|
735
|
-
* Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
|
|
736
|
-
* All other Kafka parameters must be specified here.
|
|
737
|
-
*/
|
|
738
|
-
settings?: KafkaTableSettings;
|
|
739
|
-
};
|
|
740
640
|
/**
|
|
741
641
|
* Configuration for IcebergS3 engine - read-only Iceberg table access
|
|
742
642
|
*
|
|
@@ -779,7 +679,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
|
|
|
779
679
|
* @template T The data type of the records stored in the table.
|
|
780
680
|
*/
|
|
781
681
|
type LegacyOlapConfig<T> = BaseOlapConfig<T>;
|
|
782
|
-
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T
|
|
682
|
+
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
|
|
783
683
|
/**
|
|
784
684
|
* Union of all engine-specific configurations (new API)
|
|
785
685
|
* @template T The data type of the records stored in the table.
|
package/dist/index.d.mts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-
|
|
2
|
-
import { K as ApiUtil, a4 as MooseClient } from './index-
|
|
3
|
-
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-
|
|
1
|
+
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-BTN82akc.mjs';
|
|
2
|
+
import { K as ApiUtil, a4 as MooseClient } from './index-CNlTyF6R.mjs';
|
|
3
|
+
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-CNlTyF6R.mjs';
|
|
4
4
|
import * as _clickhouse_client from '@clickhouse/client';
|
|
5
5
|
import { KafkaJS } from '@confluentinc/kafka-javascript';
|
|
6
6
|
import http from 'http';
|
package/dist/index.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-
|
|
2
|
-
import { K as ApiUtil, a4 as MooseClient } from './index-
|
|
3
|
-
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-
|
|
1
|
+
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-CoxWbneN.js';
|
|
2
|
+
import { K as ApiUtil, a4 as MooseClient } from './index-CNlTyF6R.js';
|
|
3
|
+
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-CNlTyF6R.js';
|
|
4
4
|
import * as _clickhouse_client from '@clickhouse/client';
|
|
5
5
|
import { KafkaJS } from '@confluentinc/kafka-javascript';
|
|
6
6
|
import http from 'http';
|
package/dist/index.js
CHANGED
|
@@ -716,7 +716,6 @@ var ClickHouseEngines = /* @__PURE__ */ ((ClickHouseEngines2) => {
|
|
|
716
716
|
ClickHouseEngines2["Buffer"] = "Buffer";
|
|
717
717
|
ClickHouseEngines2["Distributed"] = "Distributed";
|
|
718
718
|
ClickHouseEngines2["IcebergS3"] = "IcebergS3";
|
|
719
|
-
ClickHouseEngines2["Kafka"] = "Kafka";
|
|
720
719
|
ClickHouseEngines2["ReplicatedMergeTree"] = "ReplicatedMergeTree";
|
|
721
720
|
ClickHouseEngines2["ReplicatedReplacingMergeTree"] = "ReplicatedReplacingMergeTree";
|
|
722
721
|
ClickHouseEngines2["ReplicatedAggregatingMergeTree"] = "ReplicatedAggregatingMergeTree";
|