@514labs/moose-lib 0.6.252-ci-4-gb8a461bd → 0.6.252-ci-2-g41538689
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{browserCompatible-CYDIQRRZ.d.mts → browserCompatible-BTN82akc.d.mts} +1 -1
- package/dist/{browserCompatible-C_H_-QHB.d.ts → browserCompatible-CoxWbneN.d.ts} +1 -1
- package/dist/browserCompatible.d.mts +2 -2
- package/dist/browserCompatible.d.ts +2 -2
- package/dist/browserCompatible.js +14 -6
- package/dist/browserCompatible.js.map +1 -1
- package/dist/browserCompatible.mjs +14 -6
- package/dist/browserCompatible.mjs.map +1 -1
- package/dist/compilerPlugin.js.map +1 -1
- package/dist/compilerPlugin.mjs.map +1 -1
- package/dist/dmv2/index.d.mts +1 -1
- package/dist/dmv2/index.d.ts +1 -1
- package/dist/dmv2/index.js +14 -6
- package/dist/dmv2/index.js.map +1 -1
- package/dist/dmv2/index.mjs +14 -6
- package/dist/dmv2/index.mjs.map +1 -1
- package/dist/{index-xC52kbse.d.mts → index-CNlTyF6R.d.mts} +1 -101
- package/dist/{index-xC52kbse.d.ts → index-CNlTyF6R.d.ts} +1 -101
- package/dist/index.d.mts +28 -5
- package/dist/index.d.ts +28 -5
- package/dist/index.js +16 -7
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +15 -7
- package/dist/index.mjs.map +1 -1
- package/dist/moose-runner.js +100 -217
- package/dist/moose-runner.js.map +1 -1
- package/dist/moose-runner.mjs +100 -217
- package/dist/moose-runner.mjs.map +1 -1
- package/package.json +1 -1
|
@@ -190,7 +190,6 @@ declare enum ClickHouseEngines {
|
|
|
190
190
|
Buffer = "Buffer",
|
|
191
191
|
Distributed = "Distributed",
|
|
192
192
|
IcebergS3 = "IcebergS3",
|
|
193
|
-
Kafka = "Kafka",
|
|
194
193
|
ReplicatedMergeTree = "ReplicatedMergeTree",
|
|
195
194
|
ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
|
|
196
195
|
ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
|
|
@@ -638,105 +637,6 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
|
|
|
638
637
|
/** Optional: Policy name for data distribution */
|
|
639
638
|
policyName?: string;
|
|
640
639
|
};
|
|
641
|
-
/**
|
|
642
|
-
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
643
|
-
*
|
|
644
|
-
* These settings control the behavior of the Kafka consumer and can be modified
|
|
645
|
-
* after table creation without recreating the table.
|
|
646
|
-
*
|
|
647
|
-
* Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
|
|
648
|
-
*/
|
|
649
|
-
/**
|
|
650
|
-
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
651
|
-
*
|
|
652
|
-
* Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
|
|
653
|
-
*/
|
|
654
|
-
interface KafkaTableSettings {
|
|
655
|
-
/** Security protocol for Kafka connection */
|
|
656
|
-
kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
|
|
657
|
-
/** SASL mechanism for authentication */
|
|
658
|
-
kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
|
|
659
|
-
/** SASL username */
|
|
660
|
-
kafka_sasl_username?: string;
|
|
661
|
-
/** SASL password */
|
|
662
|
-
kafka_sasl_password?: string;
|
|
663
|
-
/** Schema definition (required for formats like Avro, Cap'n Proto) */
|
|
664
|
-
kafka_schema?: string;
|
|
665
|
-
/** Number of parallel consumers (default: 1, max: number of topic partitions) */
|
|
666
|
-
kafka_num_consumers?: string;
|
|
667
|
-
/** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
|
|
668
|
-
kafka_max_block_size?: string;
|
|
669
|
-
/** Number of broken messages to skip per block before throwing an exception. Default: 0 */
|
|
670
|
-
kafka_skip_broken_messages?: string;
|
|
671
|
-
/** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
|
|
672
|
-
kafka_commit_every_batch?: string;
|
|
673
|
-
/** Client identifier passed to Kafka broker */
|
|
674
|
-
kafka_client_id?: string;
|
|
675
|
-
/** Timeout for polling Kafka in milliseconds. Default: 0 */
|
|
676
|
-
kafka_poll_timeout_ms?: string;
|
|
677
|
-
/** Maximum batch size for poll. Default: 0 */
|
|
678
|
-
kafka_poll_max_batch_size?: string;
|
|
679
|
-
/** Interval between flushes in milliseconds. Default: 0 */
|
|
680
|
-
kafka_flush_interval_ms?: string;
|
|
681
|
-
/** Consumer reschedule interval in milliseconds. Default: 0 */
|
|
682
|
-
kafka_consumer_reschedule_ms?: string;
|
|
683
|
-
/** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
|
|
684
|
-
kafka_thread_per_consumer?: string;
|
|
685
|
-
/** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
|
|
686
|
-
kafka_handle_error_mode?: "default" | "stream";
|
|
687
|
-
/** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
|
|
688
|
-
kafka_commit_on_select?: string;
|
|
689
|
-
/** Maximum rows per message for row-based formats. Default: 1 */
|
|
690
|
-
kafka_max_rows_per_message?: string;
|
|
691
|
-
/** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
|
|
692
|
-
kafka_compression_codec?: string;
|
|
693
|
-
/** Compression level. Default: -1 (use codec default) */
|
|
694
|
-
kafka_compression_level?: string;
|
|
695
|
-
}
|
|
696
|
-
/**
|
|
697
|
-
* Configuration for Kafka engine - streaming data ingestion from Kafka topics
|
|
698
|
-
*
|
|
699
|
-
* The Kafka engine creates an internal consumer that reads messages from topics.
|
|
700
|
-
* Typically used with materialized views to persist data into MergeTree tables.
|
|
701
|
-
*
|
|
702
|
-
* @template T The data type of the records stored in the table.
|
|
703
|
-
*
|
|
704
|
-
* @example
|
|
705
|
-
* ```typescript
|
|
706
|
-
* import { mooseRuntimeEnv } from "@514labs/moose-lib";
|
|
707
|
-
*
|
|
708
|
-
* const eventStream = new OlapTable<Event>("event_stream", {
|
|
709
|
-
* engine: ClickHouseEngines.Kafka,
|
|
710
|
-
* brokerList: "kafka-1:9092,kafka-2:9092",
|
|
711
|
-
* topicList: "events",
|
|
712
|
-
* groupName: "moose_consumer",
|
|
713
|
-
* format: "JSONEachRow",
|
|
714
|
-
* settings: {
|
|
715
|
-
* kafka_num_consumers: "3",
|
|
716
|
-
* kafka_sasl_mechanism: "SCRAM-SHA-256",
|
|
717
|
-
* kafka_security_protocol: "SASL_PLAINTEXT",
|
|
718
|
-
* kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
|
|
719
|
-
* kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
|
|
720
|
-
* },
|
|
721
|
-
* });
|
|
722
|
-
* ```
|
|
723
|
-
*/
|
|
724
|
-
type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
|
|
725
|
-
engine: ClickHouseEngines.Kafka;
|
|
726
|
-
/** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
|
|
727
|
-
brokerList: string;
|
|
728
|
-
/** Kafka topics to consume from (comma-separated) */
|
|
729
|
-
topicList: string;
|
|
730
|
-
/** Consumer group identifier */
|
|
731
|
-
groupName: string;
|
|
732
|
-
/** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
|
|
733
|
-
format: string;
|
|
734
|
-
/**
|
|
735
|
-
* Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
|
|
736
|
-
* All other Kafka parameters must be specified here.
|
|
737
|
-
*/
|
|
738
|
-
settings?: KafkaTableSettings;
|
|
739
|
-
};
|
|
740
640
|
/**
|
|
741
641
|
* Configuration for IcebergS3 engine - read-only Iceberg table access
|
|
742
642
|
*
|
|
@@ -779,7 +679,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
|
|
|
779
679
|
* @template T The data type of the records stored in the table.
|
|
780
680
|
*/
|
|
781
681
|
type LegacyOlapConfig<T> = BaseOlapConfig<T>;
|
|
782
|
-
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T
|
|
682
|
+
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
|
|
783
683
|
/**
|
|
784
684
|
* Union of all engine-specific configurations (new API)
|
|
785
685
|
* @template T The data type of the records stored in the table.
|
|
@@ -190,7 +190,6 @@ declare enum ClickHouseEngines {
|
|
|
190
190
|
Buffer = "Buffer",
|
|
191
191
|
Distributed = "Distributed",
|
|
192
192
|
IcebergS3 = "IcebergS3",
|
|
193
|
-
Kafka = "Kafka",
|
|
194
193
|
ReplicatedMergeTree = "ReplicatedMergeTree",
|
|
195
194
|
ReplicatedReplacingMergeTree = "ReplicatedReplacingMergeTree",
|
|
196
195
|
ReplicatedAggregatingMergeTree = "ReplicatedAggregatingMergeTree",
|
|
@@ -638,105 +637,6 @@ type DistributedConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByEx
|
|
|
638
637
|
/** Optional: Policy name for data distribution */
|
|
639
638
|
policyName?: string;
|
|
640
639
|
};
|
|
641
|
-
/**
|
|
642
|
-
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
643
|
-
*
|
|
644
|
-
* These settings control the behavior of the Kafka consumer and can be modified
|
|
645
|
-
* after table creation without recreating the table.
|
|
646
|
-
*
|
|
647
|
-
* Reference: https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka
|
|
648
|
-
*/
|
|
649
|
-
/**
|
|
650
|
-
* Kafka engine table settings (alterable properties via ALTER TABLE MODIFY SETTING)
|
|
651
|
-
*
|
|
652
|
-
* Reference: https://clickhouse.com/docs/engines/table-engines/integrations/kafka
|
|
653
|
-
*/
|
|
654
|
-
interface KafkaTableSettings {
|
|
655
|
-
/** Security protocol for Kafka connection */
|
|
656
|
-
kafka_security_protocol?: "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL";
|
|
657
|
-
/** SASL mechanism for authentication */
|
|
658
|
-
kafka_sasl_mechanism?: "GSSAPI" | "PLAIN" | "SCRAM-SHA-256" | "SCRAM-SHA-512" | "OAUTHBEARER";
|
|
659
|
-
/** SASL username */
|
|
660
|
-
kafka_sasl_username?: string;
|
|
661
|
-
/** SASL password */
|
|
662
|
-
kafka_sasl_password?: string;
|
|
663
|
-
/** Schema definition (required for formats like Avro, Cap'n Proto) */
|
|
664
|
-
kafka_schema?: string;
|
|
665
|
-
/** Number of parallel consumers (default: 1, max: number of topic partitions) */
|
|
666
|
-
kafka_num_consumers?: string;
|
|
667
|
-
/** Maximum batch size (in messages) for poll. Default: max_insert_block_size */
|
|
668
|
-
kafka_max_block_size?: string;
|
|
669
|
-
/** Number of broken messages to skip per block before throwing an exception. Default: 0 */
|
|
670
|
-
kafka_skip_broken_messages?: string;
|
|
671
|
-
/** Commit offsets after each inserted batch: "0" (manual) or "1" (auto-commit per batch). Default: "0" */
|
|
672
|
-
kafka_commit_every_batch?: string;
|
|
673
|
-
/** Client identifier passed to Kafka broker */
|
|
674
|
-
kafka_client_id?: string;
|
|
675
|
-
/** Timeout for polling Kafka in milliseconds. Default: 0 */
|
|
676
|
-
kafka_poll_timeout_ms?: string;
|
|
677
|
-
/** Maximum batch size for poll. Default: 0 */
|
|
678
|
-
kafka_poll_max_batch_size?: string;
|
|
679
|
-
/** Interval between flushes in milliseconds. Default: 0 */
|
|
680
|
-
kafka_flush_interval_ms?: string;
|
|
681
|
-
/** Consumer reschedule interval in milliseconds. Default: 0 */
|
|
682
|
-
kafka_consumer_reschedule_ms?: string;
|
|
683
|
-
/** Use dedicated thread per consumer: "0" (shared) or "1" (dedicated). Default: "0" */
|
|
684
|
-
kafka_thread_per_consumer?: string;
|
|
685
|
-
/** Error handling mode: 'default' (stop on error) or 'stream' (write errors to separate stream). Default: 'default' */
|
|
686
|
-
kafka_handle_error_mode?: "default" | "stream";
|
|
687
|
-
/** Commit on SELECT queries: "0" (false) or "1" (true). Default: "0" */
|
|
688
|
-
kafka_commit_on_select?: string;
|
|
689
|
-
/** Maximum rows per message for row-based formats. Default: 1 */
|
|
690
|
-
kafka_max_rows_per_message?: string;
|
|
691
|
-
/** Compression codec for producing messages (e.g., 'gzip', 'snappy', 'lz4', 'zstd') */
|
|
692
|
-
kafka_compression_codec?: string;
|
|
693
|
-
/** Compression level. Default: -1 (use codec default) */
|
|
694
|
-
kafka_compression_level?: string;
|
|
695
|
-
}
|
|
696
|
-
/**
|
|
697
|
-
* Configuration for Kafka engine - streaming data ingestion from Kafka topics
|
|
698
|
-
*
|
|
699
|
-
* The Kafka engine creates an internal consumer that reads messages from topics.
|
|
700
|
-
* Typically used with materialized views to persist data into MergeTree tables.
|
|
701
|
-
*
|
|
702
|
-
* @template T The data type of the records stored in the table.
|
|
703
|
-
*
|
|
704
|
-
* @example
|
|
705
|
-
* ```typescript
|
|
706
|
-
* import { mooseRuntimeEnv } from "@514labs/moose-lib";
|
|
707
|
-
*
|
|
708
|
-
* const eventStream = new OlapTable<Event>("event_stream", {
|
|
709
|
-
* engine: ClickHouseEngines.Kafka,
|
|
710
|
-
* brokerList: "kafka-1:9092,kafka-2:9092",
|
|
711
|
-
* topicList: "events",
|
|
712
|
-
* groupName: "moose_consumer",
|
|
713
|
-
* format: "JSONEachRow",
|
|
714
|
-
* settings: {
|
|
715
|
-
* kafka_num_consumers: "3",
|
|
716
|
-
* kafka_sasl_mechanism: "SCRAM-SHA-256",
|
|
717
|
-
* kafka_security_protocol: "SASL_PLAINTEXT",
|
|
718
|
-
* kafka_sasl_username: mooseRuntimeEnv.get("KAFKA_USERNAME"),
|
|
719
|
-
* kafka_sasl_password: mooseRuntimeEnv.get("KAFKA_PASSWORD"),
|
|
720
|
-
* },
|
|
721
|
-
* });
|
|
722
|
-
* ```
|
|
723
|
-
*/
|
|
724
|
-
type KafkaConfig<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpression" | "partitionBy" | "sampleByExpression"> & {
|
|
725
|
-
engine: ClickHouseEngines.Kafka;
|
|
726
|
-
/** Kafka broker addresses (comma-separated, e.g., 'kafka-1:9092,kafka-2:9092') */
|
|
727
|
-
brokerList: string;
|
|
728
|
-
/** Kafka topics to consume from (comma-separated) */
|
|
729
|
-
topicList: string;
|
|
730
|
-
/** Consumer group identifier */
|
|
731
|
-
groupName: string;
|
|
732
|
-
/** Message format (e.g., 'JSONEachRow', 'CSV', 'Avro') */
|
|
733
|
-
format: string;
|
|
734
|
-
/**
|
|
735
|
-
* Kafka settings (kafka_schema, kafka_num_consumers, security, tuning params)
|
|
736
|
-
* All other Kafka parameters must be specified here.
|
|
737
|
-
*/
|
|
738
|
-
settings?: KafkaTableSettings;
|
|
739
|
-
};
|
|
740
640
|
/**
|
|
741
641
|
* Configuration for IcebergS3 engine - read-only Iceberg table access
|
|
742
642
|
*
|
|
@@ -779,7 +679,7 @@ type IcebergS3Config<T> = Omit<BaseOlapConfig<T>, "orderByFields" | "orderByExpr
|
|
|
779
679
|
* @template T The data type of the records stored in the table.
|
|
780
680
|
*/
|
|
781
681
|
type LegacyOlapConfig<T> = BaseOlapConfig<T>;
|
|
782
|
-
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T
|
|
682
|
+
type EngineConfig<T> = MergeTreeConfig<T> | ReplacingMergeTreeConfig<T> | AggregatingMergeTreeConfig<T> | SummingMergeTreeConfig<T> | ReplicatedMergeTreeConfig<T> | ReplicatedReplacingMergeTreeConfig<T> | ReplicatedAggregatingMergeTreeConfig<T> | ReplicatedSummingMergeTreeConfig<T> | S3QueueConfig<T> | S3Config<T> | BufferConfig<T> | DistributedConfig<T> | IcebergS3Config<T>;
|
|
783
683
|
/**
|
|
784
684
|
* Union of all engine-specific configurations (new API)
|
|
785
685
|
* @template T The data type of the records stored in the table.
|
package/dist/index.d.mts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-
|
|
2
|
-
import { K as ApiUtil, a4 as MooseClient } from './index-
|
|
3
|
-
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-
|
|
1
|
+
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-BTN82akc.mjs';
|
|
2
|
+
import { K as ApiUtil, a4 as MooseClient } from './index-CNlTyF6R.mjs';
|
|
3
|
+
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-CNlTyF6R.mjs';
|
|
4
4
|
import * as _clickhouse_client from '@clickhouse/client';
|
|
5
5
|
import { KafkaJS } from '@confluentinc/kafka-javascript';
|
|
6
6
|
import http from 'http';
|
|
@@ -44,6 +44,25 @@ declare const RETRY_INITIAL_TIME_MS = 100;
|
|
|
44
44
|
declare const MAX_RETRIES_PRODUCER = 150;
|
|
45
45
|
declare const RETRY_FACTOR_PRODUCER = 0.2;
|
|
46
46
|
declare const ACKs = -1;
|
|
47
|
+
/**
|
|
48
|
+
* Creates the base producer configuration for Kafka.
|
|
49
|
+
* Used by both the SDK stream publishing and streaming function workers.
|
|
50
|
+
*
|
|
51
|
+
* @param maxMessageBytes - Optional max message size in bytes (synced with topic config)
|
|
52
|
+
* @returns Producer configuration object for the Confluent Kafka client
|
|
53
|
+
*/
|
|
54
|
+
declare function createProducerConfig(maxMessageBytes?: number): {
|
|
55
|
+
"message.max.bytes"?: number | undefined;
|
|
56
|
+
kafkaJS: {
|
|
57
|
+
idempotent: boolean;
|
|
58
|
+
acks: number;
|
|
59
|
+
retry: {
|
|
60
|
+
retries: number;
|
|
61
|
+
maxRetryTime: number;
|
|
62
|
+
};
|
|
63
|
+
};
|
|
64
|
+
"linger.ms": number;
|
|
65
|
+
};
|
|
47
66
|
type KafkaClientConfig = {
|
|
48
67
|
clientId: string;
|
|
49
68
|
broker: string;
|
|
@@ -55,8 +74,12 @@ type KafkaClientConfig = {
|
|
|
55
74
|
/**
|
|
56
75
|
* Dynamically creates and connects a KafkaJS producer using the provided configuration.
|
|
57
76
|
* Returns a connected producer instance.
|
|
77
|
+
*
|
|
78
|
+
* @param cfg - Kafka client configuration
|
|
79
|
+
* @param logger - Logger instance
|
|
80
|
+
* @param maxMessageBytes - Optional max message size in bytes (synced with topic config)
|
|
58
81
|
*/
|
|
59
|
-
declare function getKafkaProducer(cfg: KafkaClientConfig, logger: Logger): Promise<Producer>;
|
|
82
|
+
declare function getKafkaProducer(cfg: KafkaClientConfig, logger: Logger, maxMessageBytes?: number): Promise<Producer>;
|
|
60
83
|
/**
|
|
61
84
|
* Interface for logging functionality
|
|
62
85
|
*/
|
|
@@ -531,4 +554,4 @@ type DataModelConfig<T> = Partial<{
|
|
|
531
554
|
parallelism?: number;
|
|
532
555
|
}>;
|
|
533
556
|
|
|
534
|
-
export { ACKs, ApiUtil, type CSVParsingConfig, CSV_DELIMITERS, type CliLogData, DEFAULT_CSV_CONFIG, DEFAULT_JSON_CONFIG, type DataModelConfig, DataSource, type DataSourceConfig, type ExpressRequestWithMoose, type ExtractionResult, type JSONParsingConfig, type KafkaClientConfig, type Logger, MAX_RETRIES, MAX_RETRIES_PRODUCER, MAX_RETRY_TIME_MS, MOOSE_RUNTIME_ENV_PREFIX, MooseCache, MooseClient, type Producer, RETRY_FACTOR_PRODUCER, RETRY_INITIAL_TIME_MS, type StripDateIntersection, type TaskConfig, type TaskDefinition, type TaskFunction, antiCachePath, cliLog, compilerLog, createApi, createConsumptionApi, expressMiddleware, getClickhouseClient, getFileName, getKafkaClient, getKafkaProducer, getMooseClients, getMooseUtils, isValidCSVDelimiter, logError, mapTstoJs, mooseEnvSecrets, mooseRuntimeEnv, parseCSV, parseJSON, parseJSONWithDates };
|
|
557
|
+
export { ACKs, ApiUtil, type CSVParsingConfig, CSV_DELIMITERS, type CliLogData, DEFAULT_CSV_CONFIG, DEFAULT_JSON_CONFIG, type DataModelConfig, DataSource, type DataSourceConfig, type ExpressRequestWithMoose, type ExtractionResult, type JSONParsingConfig, type KafkaClientConfig, type Logger, MAX_RETRIES, MAX_RETRIES_PRODUCER, MAX_RETRY_TIME_MS, MOOSE_RUNTIME_ENV_PREFIX, MooseCache, MooseClient, type Producer, RETRY_FACTOR_PRODUCER, RETRY_INITIAL_TIME_MS, type StripDateIntersection, type TaskConfig, type TaskDefinition, type TaskFunction, antiCachePath, cliLog, compilerLog, createApi, createConsumptionApi, createProducerConfig, expressMiddleware, getClickhouseClient, getFileName, getKafkaClient, getKafkaProducer, getMooseClients, getMooseUtils, isValidCSVDelimiter, logError, mapTstoJs, mooseEnvSecrets, mooseRuntimeEnv, parseCSV, parseJSON, parseJSONWithDates };
|
package/dist/index.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-
|
|
2
|
-
import { K as ApiUtil, a4 as MooseClient } from './index-
|
|
3
|
-
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-
|
|
1
|
+
export { C as ClickHouseByteSize, q as ClickHouseCodec, j as ClickHouseDecimal, n as ClickHouseDefault, k as ClickHouseFixedStringSize, l as ClickHouseFloat, a as ClickHouseInt, m as ClickHouseJson, e as ClickHouseLineString, p as ClickHouseMaterialized, f as ClickHouseMultiLineString, h as ClickHouseMultiPolygon, b as ClickHouseNamedTuple, c as ClickHousePoint, g as ClickHousePolygon, i as ClickHousePrecision, d as ClickHouseRing, o as ClickHouseTTL, D as DateTime, r as DateTime64, t as DateTime64String, s as DateTimeString, E as Decimal, F as FixedString, u as Float32, v as Float64, w as Int16, x as Int32, y as Int64, I as Int8, J as JWT, K as Key, L as LowCardinality, z as UInt16, A as UInt32, B as UInt64, U as UInt8, W as WithDefault } from './browserCompatible-CoxWbneN.js';
|
|
2
|
+
import { K as ApiUtil, a4 as MooseClient } from './index-CNlTyF6R.js';
|
|
3
|
+
export { A as Aggregated, h as Api, i as ApiConfig, ad as ApiHelpers, a5 as Blocks, a6 as ClickHouseEngines, C as ConsumptionApi, ae as ConsumptionHelpers, N as ConsumptionUtil, e as DeadLetter, D as DeadLetterModel, f as DeadLetterQueue, l as ETLPipeline, m as ETLPipelineConfig, E as EgressConfig, F as FrameworkApp, Q as IdentifierBrandedString, I as IngestApi, g as IngestConfig, j as IngestPipeline, L as LifeCycle, M as MaterializedView, R as NonIdentifierBrandedString, a as OlapConfig, O as OlapTable, aa as QueryClient, X as RawValue, b as S3QueueTableSettings, S as SimpleAggregated, Z as Sql, k as SqlResource, c as Stream, d as StreamConfig, T as Task, U as Value, V as View, n as WebApp, o as WebAppConfig, p as WebAppHandler, W as Workflow, ab as WorkflowClient, a2 as createClickhouseParameter, a8 as createMaterializedView, a7 as dropView, x as getApi, w as getApis, v as getIngestApi, u as getIngestApis, z as getSqlResource, y as getSqlResources, t as getStream, s as getStreams, r as getTable, q as getTables, ac as getTemporalClient, a1 as getValueFromParameter, J as getWebApp, H as getWebApps, G as getWorkflow, B as getWorkflows, af as joinQueries, a3 as mapToClickHouseType, a9 as populateTable, P as quoteIdentifier, Y as sql, $ as toQuery, a0 as toQueryPreview, _ as toStaticQuery } from './index-CNlTyF6R.js';
|
|
4
4
|
import * as _clickhouse_client from '@clickhouse/client';
|
|
5
5
|
import { KafkaJS } from '@confluentinc/kafka-javascript';
|
|
6
6
|
import http from 'http';
|
|
@@ -44,6 +44,25 @@ declare const RETRY_INITIAL_TIME_MS = 100;
|
|
|
44
44
|
declare const MAX_RETRIES_PRODUCER = 150;
|
|
45
45
|
declare const RETRY_FACTOR_PRODUCER = 0.2;
|
|
46
46
|
declare const ACKs = -1;
|
|
47
|
+
/**
|
|
48
|
+
* Creates the base producer configuration for Kafka.
|
|
49
|
+
* Used by both the SDK stream publishing and streaming function workers.
|
|
50
|
+
*
|
|
51
|
+
* @param maxMessageBytes - Optional max message size in bytes (synced with topic config)
|
|
52
|
+
* @returns Producer configuration object for the Confluent Kafka client
|
|
53
|
+
*/
|
|
54
|
+
declare function createProducerConfig(maxMessageBytes?: number): {
|
|
55
|
+
"message.max.bytes"?: number | undefined;
|
|
56
|
+
kafkaJS: {
|
|
57
|
+
idempotent: boolean;
|
|
58
|
+
acks: number;
|
|
59
|
+
retry: {
|
|
60
|
+
retries: number;
|
|
61
|
+
maxRetryTime: number;
|
|
62
|
+
};
|
|
63
|
+
};
|
|
64
|
+
"linger.ms": number;
|
|
65
|
+
};
|
|
47
66
|
type KafkaClientConfig = {
|
|
48
67
|
clientId: string;
|
|
49
68
|
broker: string;
|
|
@@ -55,8 +74,12 @@ type KafkaClientConfig = {
|
|
|
55
74
|
/**
|
|
56
75
|
* Dynamically creates and connects a KafkaJS producer using the provided configuration.
|
|
57
76
|
* Returns a connected producer instance.
|
|
77
|
+
*
|
|
78
|
+
* @param cfg - Kafka client configuration
|
|
79
|
+
* @param logger - Logger instance
|
|
80
|
+
* @param maxMessageBytes - Optional max message size in bytes (synced with topic config)
|
|
58
81
|
*/
|
|
59
|
-
declare function getKafkaProducer(cfg: KafkaClientConfig, logger: Logger): Promise<Producer>;
|
|
82
|
+
declare function getKafkaProducer(cfg: KafkaClientConfig, logger: Logger, maxMessageBytes?: number): Promise<Producer>;
|
|
60
83
|
/**
|
|
61
84
|
* Interface for logging functionality
|
|
62
85
|
*/
|
|
@@ -531,4 +554,4 @@ type DataModelConfig<T> = Partial<{
|
|
|
531
554
|
parallelism?: number;
|
|
532
555
|
}>;
|
|
533
556
|
|
|
534
|
-
export { ACKs, ApiUtil, type CSVParsingConfig, CSV_DELIMITERS, type CliLogData, DEFAULT_CSV_CONFIG, DEFAULT_JSON_CONFIG, type DataModelConfig, DataSource, type DataSourceConfig, type ExpressRequestWithMoose, type ExtractionResult, type JSONParsingConfig, type KafkaClientConfig, type Logger, MAX_RETRIES, MAX_RETRIES_PRODUCER, MAX_RETRY_TIME_MS, MOOSE_RUNTIME_ENV_PREFIX, MooseCache, MooseClient, type Producer, RETRY_FACTOR_PRODUCER, RETRY_INITIAL_TIME_MS, type StripDateIntersection, type TaskConfig, type TaskDefinition, type TaskFunction, antiCachePath, cliLog, compilerLog, createApi, createConsumptionApi, expressMiddleware, getClickhouseClient, getFileName, getKafkaClient, getKafkaProducer, getMooseClients, getMooseUtils, isValidCSVDelimiter, logError, mapTstoJs, mooseEnvSecrets, mooseRuntimeEnv, parseCSV, parseJSON, parseJSONWithDates };
|
|
557
|
+
export { ACKs, ApiUtil, type CSVParsingConfig, CSV_DELIMITERS, type CliLogData, DEFAULT_CSV_CONFIG, DEFAULT_JSON_CONFIG, type DataModelConfig, DataSource, type DataSourceConfig, type ExpressRequestWithMoose, type ExtractionResult, type JSONParsingConfig, type KafkaClientConfig, type Logger, MAX_RETRIES, MAX_RETRIES_PRODUCER, MAX_RETRY_TIME_MS, MOOSE_RUNTIME_ENV_PREFIX, MooseCache, MooseClient, type Producer, RETRY_FACTOR_PRODUCER, RETRY_INITIAL_TIME_MS, type StripDateIntersection, type TaskConfig, type TaskDefinition, type TaskFunction, antiCachePath, cliLog, compilerLog, createApi, createConsumptionApi, createProducerConfig, expressMiddleware, getClickhouseClient, getFileName, getKafkaClient, getKafkaProducer, getMooseClients, getMooseUtils, isValidCSVDelimiter, logError, mapTstoJs, mooseEnvSecrets, mooseRuntimeEnv, parseCSV, parseJSON, parseJSONWithDates };
|
package/dist/index.js
CHANGED
|
@@ -42,6 +42,7 @@ __export(commons_exports, {
|
|
|
42
42
|
antiCachePath: () => antiCachePath,
|
|
43
43
|
cliLog: () => cliLog,
|
|
44
44
|
compilerLog: () => compilerLog,
|
|
45
|
+
createProducerConfig: () => createProducerConfig,
|
|
45
46
|
getClickhouseClient: () => getClickhouseClient,
|
|
46
47
|
getFileName: () => getFileName,
|
|
47
48
|
getKafkaClient: () => getKafkaClient,
|
|
@@ -64,18 +65,25 @@ function isTruthy(value) {
|
|
|
64
65
|
function mapTstoJs(filePath) {
|
|
65
66
|
return filePath.replace(/\.ts$/, ".js").replace(/\.cts$/, ".cjs").replace(/\.mts$/, ".mjs");
|
|
66
67
|
}
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
const producer = kafka.producer({
|
|
68
|
+
function createProducerConfig(maxMessageBytes) {
|
|
69
|
+
return {
|
|
70
70
|
kafkaJS: {
|
|
71
|
-
idempotent:
|
|
71
|
+
idempotent: false,
|
|
72
|
+
// Not needed for at-least-once delivery
|
|
72
73
|
acks: ACKs,
|
|
73
74
|
retry: {
|
|
74
75
|
retries: MAX_RETRIES_PRODUCER,
|
|
75
76
|
maxRetryTime: MAX_RETRY_TIME_MS
|
|
76
77
|
}
|
|
77
|
-
}
|
|
78
|
-
|
|
78
|
+
},
|
|
79
|
+
"linger.ms": 0,
|
|
80
|
+
// Send immediately - batching happens at application level
|
|
81
|
+
...maxMessageBytes && { "message.max.bytes": maxMessageBytes }
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
async function getKafkaProducer(cfg, logger, maxMessageBytes) {
|
|
85
|
+
const kafka = await getKafkaClient(cfg, logger);
|
|
86
|
+
const producer = kafka.producer(createProducerConfig(maxMessageBytes));
|
|
79
87
|
await producer.connect();
|
|
80
88
|
return producer;
|
|
81
89
|
}
|
|
@@ -417,6 +425,7 @@ __export(index_exports, {
|
|
|
417
425
|
createClickhouseParameter: () => createClickhouseParameter,
|
|
418
426
|
createConsumptionApi: () => createConsumptionApi,
|
|
419
427
|
createMaterializedView: () => createMaterializedView,
|
|
428
|
+
createProducerConfig: () => createProducerConfig,
|
|
420
429
|
dropView: () => dropView,
|
|
421
430
|
expressMiddleware: () => expressMiddleware,
|
|
422
431
|
getApi: () => getApi,
|
|
@@ -716,7 +725,6 @@ var ClickHouseEngines = /* @__PURE__ */ ((ClickHouseEngines2) => {
|
|
|
716
725
|
ClickHouseEngines2["Buffer"] = "Buffer";
|
|
717
726
|
ClickHouseEngines2["Distributed"] = "Distributed";
|
|
718
727
|
ClickHouseEngines2["IcebergS3"] = "IcebergS3";
|
|
719
|
-
ClickHouseEngines2["Kafka"] = "Kafka";
|
|
720
728
|
ClickHouseEngines2["ReplicatedMergeTree"] = "ReplicatedMergeTree";
|
|
721
729
|
ClickHouseEngines2["ReplicatedReplacingMergeTree"] = "ReplicatedReplacingMergeTree";
|
|
722
730
|
ClickHouseEngines2["ReplicatedAggregatingMergeTree"] = "ReplicatedAggregatingMergeTree";
|
|
@@ -3399,6 +3407,7 @@ var DataSource = class {
|
|
|
3399
3407
|
createClickhouseParameter,
|
|
3400
3408
|
createConsumptionApi,
|
|
3401
3409
|
createMaterializedView,
|
|
3410
|
+
createProducerConfig,
|
|
3402
3411
|
dropView,
|
|
3403
3412
|
expressMiddleware,
|
|
3404
3413
|
getApi,
|