@514labs/moose-lib 0.6.250 → 0.6.252-ci-4-gb8a461bd
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{browserCompatible-BTN82akc.d.mts → browserCompatible-CYDIQRRZ.d.mts} +1 -1
- package/dist/{browserCompatible-CoxWbneN.d.ts → browserCompatible-C_H_-QHB.d.ts} +1 -1
- package/dist/browserCompatible.d.mts +2 -2
- package/dist/browserCompatible.d.ts +2 -2
- package/dist/browserCompatible.js +6 -2
- package/dist/browserCompatible.js.map +1 -1
- package/dist/browserCompatible.mjs +6 -2
- package/dist/browserCompatible.mjs.map +1 -1
- package/dist/dmv2/index.d.mts +1 -1
- package/dist/dmv2/index.d.ts +1 -1
- package/dist/dmv2/index.js +6 -2
- package/dist/dmv2/index.js.map +1 -1
- package/dist/dmv2/index.mjs +6 -2
- package/dist/dmv2/index.mjs.map +1 -1
- package/dist/{index-CNlTyF6R.d.mts → index-xC52kbse.d.mts} +101 -1
- package/dist/{index-CNlTyF6R.d.ts → index-xC52kbse.d.ts} +101 -1
- package/dist/index.d.mts +3 -3
- package/dist/index.d.ts +3 -3
- package/dist/index.js +4 -2
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +4 -2
- package/dist/index.mjs.map +1 -1
- package/dist/moose-runner.js +15 -0
- package/dist/moose-runner.js.map +1 -1
- package/dist/moose-runner.mjs +15 -0
- package/dist/moose-runner.mjs.map +1 -1
- package/package.json +1 -1
|
@@ -190,6 +190,7 @@ declare enum ClickHouseEngines {
|
|
|
190
190
|
Buffer = "Buffer",
|
|
191
191
|
Distributed = "Distributed",
|
|
192
192
|
IcebergS3 = "IcebergS3",
|
|
193
|
+
Kafka = "Kafka",
|
|
193
194
|
ReplicatedMergeTree = "ReplicatedMergeTree",
|
|
194
195
|
ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
|
|
195
196
|
ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
|
|
@@ -637,6 +638,105 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
|
|
|
637
638
|
/** Optional: Policy name for data distribution */
|
|
638
639
|
policyName?: string;
|
|
639
640
|
};
|
|
641
|
+
/**
|
|
642
|
+
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
643
|
+
*
|
|
644
|
+
* These settings control the behavior of the Kafka consumer and can be modified
|
|
645
|
+
* after table creation without recreating the table.
|
|
646
|
+
*
|
|
647
|
+
* Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
|
|
648
|
+
*/
|
|
649
|
+
/**
|
|
650
|
+
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
651
|
+
*
|
|
652
|
+
* Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
|
|
653
|
+
*/
|
|
654
|
+
interface KafkaTableSettings {
|
|
655
|
+
/** Security protocol for Kafka connection */
|
|
656
|
+
kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
|
|
657
|
+
/** SASL mechanism for authentication */
|
|
658
|
+
kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
|
|
659
|
+
/** SASL username */
|
|
660
|
+
kafka_sasl_username?: string;
|
|
661
|
+
/** SASL password */
|
|
662
|
+
kafka_sasl_password?: string;
|
|
663
|
+
/** Schema definition (required for formats like Avro, Cap'n Proto) */
|
|
664
|
+
kafka_schema?: string;
|
|
665
|
+
/** Number of parallel consumers (default: 1, max: number of topic partitions) */
|
|
666
|
+
kafka_num_consumers?: string;
|
|
667
|
+
/** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
|
|
668
|
+
kafka_max_block_size?: string;
|
|
669
|
+
/** Number of broken messages to skip per block before throwing an exception. Default: 0 */
|
|
670
|
+
kafka_skip_broken_messages?: string;
|
|
671
|
+
/** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
|
|
672
|
+
kafka_commit_every_batch?: string;
|
|
673
|
+
/** Client identifier passed to Kafka broker */
|
|
674
|
+
kafka_client_id?: string;
|
|
675
|
+
/** Timeout for polling Kafka in milliseconds. Default: 0 */
|
|
676
|
+
kafka_poll_timeout_ms?: string;
|
|
677
|
+
/** Maximum batch size for poll. Default: 0 */
|
|
678
|
+
kafka_poll_max_batch_size?: string;
|
|
679
|
+
/** Interval between flushes in milliseconds. Default: 0 */
|
|
680
|
+
kafka_flush_interval_ms?: string;
|
|
681
|
+
/** Consumer reschedule interval in milliseconds. Default: 0 */
|
|
682
|
+
kafka_consumer_reschedule_ms?: string;
|
|
683
|
+
/** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
|
|
684
|
+
kafka_thread_per_consumer?: string;
|
|
685
|
+
/** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
|
|
686
|
+
kafka_handle_error_mode?: "default" | "stream";
|
|
687
|
+
/** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
|
|
688
|
+
kafka_commit_on_select?: string;
|
|
689
|
+
/** Maximum rows per message for row-based formats. Default: 1 */
|
|
690
|
+
kafka_max_rows_per_message?: string;
|
|
691
|
+
/** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
|
|
692
|
+
kafka_compression_codec?: string;
|
|
693
|
+
/** Compression level. Default: -1 (use codec default) */
|
|
694
|
+
kafka_compression_level?: string;
|
|
695
|
+
}
|
|
696
|
+
/**
|
|
697
|
+
* Configuration for Kafka engine - streaming data ingestion from Kafka topics
|
|
698
|
+
*
|
|
699
|
+
* The Kafka engine creates an internal consumer that reads messages from topics.
|
|
700
|
+
* Typically used with materialized views to persist data into MergeTree tables.
|
|
701
|
+
*
|
|
702
|
+
* @template T The data type of the records stored in the table.
|
|
703
|
+
*
|
|
704
|
+
* @example
|
|
705
|
+
* ```typescript
|
|
706
|
+
* import { mooseRuntimeEnv } from "@514labs/moose-lib";
|
|
707
|
+
*
|
|
708
|
+
* const eventStream = new OlapTable<Event>("event_stream", {
|
|
709
|
+
* engine: ClickHouseEngines.Kafka,
|
|
710
|
+
* brokerList: "kafka-1:9092,kafka-2:9092",
|
|
711
|
+
* topicList: "events",
|
|
712
|
+
* groupName: "moose_consumer",
|
|
713
|
+
* format: "JSONEachRow",
|
|
714
|
+
* settings: {
|
|
715
|
+
* kafka_num_consumers: "3",
|
|
716
|
+
* kafka_sasl_mechanism: "SCRAM-SHA-256",
|
|
717
|
+
* kafka_security_protocol: "SASL_PLAINTEXT",
|
|
718
|
+
* kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
|
|
719
|
+
* kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
|
|
720
|
+
* },
|
|
721
|
+
* });
|
|
722
|
+
* ```
|
|
723
|
+
*/
|
|
724
|
+
type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
|
|
725
|
+
engine: ClickHouseEngines.Kafka;
|
|
726
|
+
/** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
|
|
727
|
+
brokerList: string;
|
|
728
|
+
/** Kafka topics to consume from (comma-separated) */
|
|
729
|
+
topicList: string;
|
|
730
|
+
/** Consumer group identifier */
|
|
731
|
+
groupName: string;
|
|
732
|
+
/** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
|
|
733
|
+
format: string;
|
|
734
|
+
/**
|
|
735
|
+
* Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
|
|
736
|
+
* All other Kafka parameters must be specified here.
|
|
737
|
+
*/
|
|
738
|
+
settings?: KafkaTableSettings;
|
|
739
|
+
};
|
|
640
740
|
/**
|
|
641
741
|
* Configuration for IcebergS3 engine - read-only Iceberg table access
|
|
642
742
|
*
|
|
@@ -679,7 +779,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
|
|
|
679
779
|
* @template T The data type of the records stored in the table.
|
|
680
780
|
*/
|
|
681
781
|
type LegacyOlapConfig<T> = BaseOlapConfig<T>;
|
|
682
|
-
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
|
|
782
|
+
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T> | KafkaConfig<T>;
|
|
683
783
|
/**
|
|
684
784
|
* Union of all engine-specific configurations (new API)
|
|
685
785
|
* @template T The data type of the records stored in the table.
|
|
@@ -190,6 +190,7 @@ declare enum ClickHouseEngines {
|
|
|
190
190
|
Buffer = "Buffer",
|
|
191
191
|
Distributed = "Distributed",
|
|
192
192
|
IcebergS3 = "IcebergS3",
|
|
193
|
+
Kafka = "Kafka",
|
|
193
194
|
ReplicatedMergeTree = "ReplicatedMergeTree",
|
|
194
195
|
ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
|
|
195
196
|
ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
|
|
@@ -637,6 +638,105 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
|
|
|
637
638
|
/** Optional: Policy name for data distribution */
|
|
638
639
|
policyName?: string;
|
|
639
640
|
};
|
|
641
|
+
/**
|
|
642
|
+
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
643
|
+
*
|
|
644
|
+
* These settings control the behavior of the Kafka consumer and can be modified
|
|
645
|
+
* after table creation without recreating the table.
|
|
646
|
+
*
|
|
647
|
+
* Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
|
|
648
|
+
*/
|
|
649
|
+
/**
|
|
650
|
+
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
651
|
+
*
|
|
652
|
+
* Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
|
|
653
|
+
*/
|
|
654
|
+
interface KafkaTableSettings {
|
|
655
|
+
/** Security protocol for Kafka connection */
|
|
656
|
+
kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
|
|
657
|
+
/** SASL mechanism for authentication */
|
|
658
|
+
kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
|
|
659
|
+
/** SASL username */
|
|
660
|
+
kafka_sasl_username?: string;
|
|
661
|
+
/** SASL password */
|
|
662
|
+
kafka_sasl_password?: string;
|
|
663
|
+
/** Schema definition (required for formats like Avro, Cap'n Proto) */
|
|
664
|
+
kafka_schema?: string;
|
|
665
|
+
/** Number of parallel consumers (default: 1, max: number of topic partitions) */
|
|
666
|
+
kafka_num_consumers?: string;
|
|
667
|
+
/** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
|
|
668
|
+
kafka_max_block_size?: string;
|
|
669
|
+
/** Number of broken messages to skip per block before throwing an exception. Default: 0 */
|
|
670
|
+
kafka_skip_broken_messages?: string;
|
|
671
|
+
/** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
|
|
672
|
+
kafka_commit_every_batch?: string;
|
|
673
|
+
/** Client identifier passed to Kafka broker */
|
|
674
|
+
kafka_client_id?: string;
|
|
675
|
+
/** Timeout for polling Kafka in milliseconds. Default: 0 */
|
|
676
|
+
kafka_poll_timeout_ms?: string;
|
|
677
|
+
/** Maximum batch size for poll. Default: 0 */
|
|
678
|
+
kafka_poll_max_batch_size?: string;
|
|
679
|
+
/** Interval between flushes in milliseconds. Default: 0 */
|
|
680
|
+
kafka_flush_interval_ms?: string;
|
|
681
|
+
/** Consumer reschedule interval in milliseconds. Default: 0 */
|
|
682
|
+
kafka_consumer_reschedule_ms?: string;
|
|
683
|
+
/** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
|
|
684
|
+
kafka_thread_per_consumer?: string;
|
|
685
|
+
/** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
|
|
686
|
+
kafka_handle_error_mode?: "default" | "stream";
|
|
687
|
+
/** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
|
|
688
|
+
kafka_commit_on_select?: string;
|
|
689
|
+
/** Maximum rows per message for row-based formats. Default: 1 */
|
|
690
|
+
kafka_max_rows_per_message?: string;
|
|
691
|
+
/** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
|
|
692
|
+
kafka_compression_codec?: string;
|
|
693
|
+
/** Compression level. Default: -1 (use codec default) */
|
|
694
|
+
kafka_compression_level?: string;
|
|
695
|
+
}
|
|
696
|
+
/**
|
|
697
|
+
* Configuration for Kafka engine - streaming data ingestion from Kafka topics
|
|
698
|
+
*
|
|
699
|
+
* The Kafka engine creates an internal consumer that reads messages from topics.
|
|
700
|
+
* Typically used with materialized views to persist data into MergeTree tables.
|
|
701
|
+
*
|
|
702
|
+
* @template T The data type of the records stored in the table.
|
|
703
|
+
*
|
|
704
|
+
* @example
|
|
705
|
+
* ```typescript
|
|
706
|
+
* import { mooseRuntimeEnv } from "@514labs/moose-lib";
|
|
707
|
+
*
|
|
708
|
+
* const eventStream = new OlapTable<Event>("event_stream", {
|
|
709
|
+
* engine: ClickHouseEngines.Kafka,
|
|
710
|
+
* brokerList: "kafka-1:9092,kafka-2:9092",
|
|
711
|
+
* topicList: "events",
|
|
712
|
+
* groupName: "moose_consumer",
|
|
713
|
+
* format: "JSONEachRow",
|
|
714
|
+
* settings: {
|
|
715
|
+
* kafka_num_consumers: "3",
|
|
716
|
+
* kafka_sasl_mechanism: "SCRAM-SHA-256",
|
|
717
|
+
* kafka_security_protocol: "SASL_PLAINTEXT",
|
|
718
|
+
* kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
|
|
719
|
+
* kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
|
|
720
|
+
* },
|
|
721
|
+
* });
|
|
722
|
+
* ```
|
|
723
|
+
*/
|
|
724
|
+
type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
|
|
725
|
+
engine: ClickHouseEngines.Kafka;
|
|
726
|
+
/** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
|
|
727
|
+
brokerList: string;
|
|
728
|
+
/** Kafka topics to consume from (comma-separated) */
|
|
729
|
+
topicList: string;
|
|
730
|
+
/** Consumer group identifier */
|
|
731
|
+
groupName: string;
|
|
732
|
+
/** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
|
|
733
|
+
format: string;
|
|
734
|
+
/**
|
|
735
|
+
* Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
|
|
736
|
+
* All other Kafka parameters must be specified here.
|
|
737
|
+
*/
|
|
738
|
+
settings?: KafkaTableSettings;
|
|
739
|
+
};
|
|
640
740
|
/**
|
|
641
741
|
* Configuration for IcebergS3 engine - read-only Iceberg table access
|
|
642
742
|
*
|
|
@@ -679,7 +779,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
|
|
|
679
779
|
* @template T The data type of the records stored in the table.
|
|
680
780
|
*/
|
|
681
781
|
type LegacyOlapConfig<T> = BaseOlapConfig<T>;
|
|
682
|
-
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
|
|
782
|
+
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T> | KafkaConfig<T>;
|
|
683
783
|
/**
|
|
684
784
|
* Union of all engine-specific configurations (new API)
|
|
685
785
|
* @template T The data type of the records stored in the table.
|
package/dist/index.d.mts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-
|
|
2
|
-
import { K as ApiUtil, a4 as MooseClient } from './index-
|
|
3
|
-
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-
|
|
1
|
+
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-CYDIQRRZ.mjs';
|
|
2
|
+
import { K as ApiUtil, a4 as MooseClient } from './index-xC52kbse.mjs';
|
|
3
|
+
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-xC52kbse.mjs';
|
|
4
4
|
import * as _clickhouse_client from '@clickhouse/client';
|
|
5
5
|
import { KafkaJS } from '@confluentinc/kafka-javascript';
|
|
6
6
|
import http from 'http';
|
package/dist/index.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-
|
|
2
|
-
import { K as ApiUtil, a4 as MooseClient } from './index-
|
|
3
|
-
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-
|
|
1
|
+
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-C_H_-QHB.js';
|
|
2
|
+
import { K as ApiUtil, a4 as MooseClient } from './index-xC52kbse.js';
|
|
3
|
+
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-xC52kbse.js';
|
|
4
4
|
import * as _clickhouse_client from '@clickhouse/client';
|
|
5
5
|
import { KafkaJS } from '@confluentinc/kafka-javascript';
|
|
6
6
|
import http from 'http';
|
package/dist/index.js
CHANGED
|
@@ -716,6 +716,7 @@ var ClickHouseEngines = /* @__PURE__ */ ((ClickHouseEngines2) => {
|
|
|
716
716
|
ClickHouseEngines2["Buffer"] = "Buffer";
|
|
717
717
|
ClickHouseEngines2["Distributed"] = "Distributed";
|
|
718
718
|
ClickHouseEngines2["IcebergS3"] = "IcebergS3";
|
|
719
|
+
ClickHouseEngines2["Kafka"] = "Kafka";
|
|
719
720
|
ClickHouseEngines2["ReplicatedMergeTree"] = "ReplicatedMergeTree";
|
|
720
721
|
ClickHouseEngines2["ReplicatedReplacingMergeTree"] = "ReplicatedReplacingMergeTree";
|
|
721
722
|
ClickHouseEngines2["ReplicatedAggregatingMergeTree"] = "ReplicatedAggregatingMergeTree";
|
|
@@ -741,6 +742,7 @@ init_commons();
|
|
|
741
742
|
function getSourceDir() {
|
|
742
743
|
return import_process.default.env.MOOSE_SOURCE_DIR || "app";
|
|
743
744
|
}
|
|
745
|
+
var isClientOnlyMode = () => import_process.default.env.MOOSE_CLIENT_ONLY === "true";
|
|
744
746
|
var moose_internal = {
|
|
745
747
|
tables: /* @__PURE__ */ new Map(),
|
|
746
748
|
streams: /* @__PURE__ */ new Map(),
|
|
@@ -942,7 +944,7 @@ var OlapTable = class extends TypedBase {
|
|
|
942
944
|
this.name = name;
|
|
943
945
|
const tables = getMooseInternal().tables;
|
|
944
946
|
const registryKey = this.config.version ? `${name}_${this.config.version}` : name;
|
|
945
|
-
if (tables.has(registryKey)) {
|
|
947
|
+
if (!isClientOnlyMode() && tables.has(registryKey)) {
|
|
946
948
|
throw new Error(
|
|
947
949
|
`OlapTable with name ${name} and version ${config?.version ?? "unversioned"} already exists`
|
|
948
950
|
);
|
|
@@ -2424,7 +2426,7 @@ var SqlResource = class {
|
|
|
2424
2426
|
*/
|
|
2425
2427
|
constructor(name, setup, teardown, options) {
|
|
2426
2428
|
const sqlResources = getMooseInternal().sqlResources;
|
|
2427
|
-
if (sqlResources.has(name)) {
|
|
2429
|
+
if (!isClientOnlyMode() && sqlResources.has(name)) {
|
|
2428
2430
|
throw new Error(`SqlResource with name ${name} already exists`);
|
|
2429
2431
|
}
|
|
2430
2432
|
sqlResources.set(name, this);
|