@514labs/moose-lib 0.6.252-ci-4-gb8a461bd → 0.6.252-ci-2-g41538689

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -26,6 +26,22 @@ function isTruthy(value) {
26
26
  return false;
27
27
  }
28
28
  }
29
+ function createProducerConfig(maxMessageBytes) {
30
+ return {
31
+ kafkaJS: {
32
+ idempotent: false,
33
+ // Not needed for at-least-once delivery
34
+ acks: ACKs,
35
+ retry: {
36
+ retries: MAX_RETRIES_PRODUCER,
37
+ maxRetryTime: MAX_RETRY_TIME_MS
38
+ }
39
+ },
40
+ "linger.ms": 0,
41
+ // Send immediately - batching happens at application level
42
+ ...maxMessageBytes && { "message.max.bytes": maxMessageBytes }
43
+ };
44
+ }
29
45
  var Kafka, compilerLog, getClickhouseClient, cliLog, MAX_RETRIES, MAX_RETRY_TIME_MS, RETRY_INITIAL_TIME_MS, MAX_RETRIES_PRODUCER, ACKs, parseBrokerString, logError, buildSaslConfig, getKafkaClient;
30
46
  var init_commons = __esm({
31
47
  "src/commons.ts"() {
@@ -1309,18 +1325,6 @@ function convertIcebergS3EngineConfig(config) {
1309
1325
  compression: config.compression
1310
1326
  };
1311
1327
  }
1312
- function convertKafkaEngineConfig(config) {
1313
- if (!("engine" in config) || config.engine !== "Kafka" /* Kafka */) {
1314
- return void 0;
1315
- }
1316
- return {
1317
- engine: "Kafka",
1318
- brokerList: config.brokerList,
1319
- topicList: config.topicList,
1320
- groupName: config.groupName,
1321
- format: config.format
1322
- };
1323
- }
1324
1328
  function convertTableConfigToEngineConfig(config) {
1325
1329
  const engine = extractEngineValue(config);
1326
1330
  const basicConfig = convertBasicEngineConfig(engine, config);
@@ -1346,9 +1350,6 @@ function convertTableConfigToEngineConfig(config) {
1346
1350
  if (engine === "IcebergS3" /* IcebergS3 */) {
1347
1351
  return convertIcebergS3EngineConfig(config);
1348
1352
  }
1349
- if (engine === "Kafka" /* Kafka */) {
1350
- return convertKafkaEngineConfig(config);
1351
- }
1352
1353
  return void 0;
1353
1354
  }
1354
1355
  var toInfraMap = (registry) => {
@@ -1818,156 +1819,6 @@ var MAX_RETRIES_CONSUMER = 150;
1818
1819
  var SESSION_TIMEOUT_CONSUMER = 3e4;
1819
1820
  var HEARTBEAT_INTERVAL_CONSUMER = 3e3;
1820
1821
  var DEFAULT_MAX_STREAMING_CONCURRENCY = 100;
1821
- var KAFKAJS_BYTE_MESSAGE_OVERHEAD = 500;
1822
- var isMessageTooLargeError = (error) => {
1823
- if (KafkaJS2.isKafkaJSError && error instanceof Error && KafkaJS2.isKafkaJSError(error)) {
1824
- return error.type === "ERR_MSG_SIZE_TOO_LARGE" || error.code === 10 || error.cause !== void 0 && isMessageTooLargeError(error.cause);
1825
- }
1826
- if (error && typeof error === "object") {
1827
- const err = error;
1828
- return err.type === "ERR_MSG_SIZE_TOO_LARGE" || err.code === 10 || err.cause !== void 0 && isMessageTooLargeError(err.cause);
1829
- }
1830
- return false;
1831
- };
1832
- var splitBatch = (messages, maxChunkSize) => {
1833
- if (messages.length <= 1) {
1834
- return [messages];
1835
- }
1836
- const chunks = [];
1837
- let currentChunk = [];
1838
- let currentSize = 0;
1839
- for (const message of messages) {
1840
- const messageSize = Buffer2.byteLength(message.value, "utf8") + KAFKAJS_BYTE_MESSAGE_OVERHEAD;
1841
- if (currentSize + messageSize > maxChunkSize && currentChunk.length > 0) {
1842
- chunks.push(currentChunk);
1843
- currentChunk = [message];
1844
- currentSize = messageSize;
1845
- } else {
1846
- currentChunk.push(message);
1847
- currentSize += messageSize;
1848
- }
1849
- }
1850
- if (currentChunk.length > 0) {
1851
- chunks.push(currentChunk);
1852
- }
1853
- return chunks;
1854
- };
1855
- var sendChunkWithRetry = async (logger2, targetTopic, producer, messages, currentMaxSize, maxRetries = 3) => {
1856
- const currentMessages = messages;
1857
- let attempts = 0;
1858
- while (attempts < maxRetries) {
1859
- try {
1860
- await producer.send({
1861
- topic: targetTopic.name,
1862
- messages: currentMessages
1863
- });
1864
- logger2.log(
1865
- `Successfully sent ${currentMessages.length} messages to ${targetTopic.name}`
1866
- );
1867
- return;
1868
- } catch (error) {
1869
- if (isMessageTooLargeError(error) && currentMessages.length > 1) {
1870
- logger2.warn(
1871
- `Got MESSAGE_TOO_LARGE error, splitting batch of ${currentMessages.length} messages and retrying (${maxRetries - attempts} attempts left)`
1872
- );
1873
- const newMaxSize = Math.floor(currentMaxSize / 2);
1874
- const splitChunks = splitBatch(currentMessages, newMaxSize);
1875
- for (const chunk of splitChunks) {
1876
- await sendChunkWithRetry(
1877
- logger2,
1878
- targetTopic,
1879
- producer,
1880
- chunk,
1881
- newMaxSize,
1882
- // this error does not count as one failed attempt
1883
- maxRetries - attempts
1884
- );
1885
- }
1886
- return;
1887
- } else {
1888
- attempts++;
1889
- if (attempts >= maxRetries) {
1890
- let messagesHandledByDLQ = 0;
1891
- let messagesWithoutDLQ = 0;
1892
- const dlqErrors = [];
1893
- for (const failedMessage of currentMessages) {
1894
- const dlqTopic = failedMessage.dlq;
1895
- if (dlqTopic && failedMessage.originalValue) {
1896
- const dlqTopicName = dlqTopic.name;
1897
- const deadLetterRecord = {
1898
- originalRecord: {
1899
- ...failedMessage.originalValue,
1900
- // Include original Kafka message metadata
1901
- __sourcePartition: failedMessage.originalMessage.partition,
1902
- __sourceOffset: failedMessage.originalMessage.offset,
1903
- __sourceTimestamp: failedMessage.originalMessage.timestamp
1904
- },
1905
- errorMessage: error instanceof Error ? error.message : String(error),
1906
- errorType: error instanceof Error ? error.constructor.name : "Unknown",
1907
- failedAt: /* @__PURE__ */ new Date(),
1908
- source: "transform"
1909
- };
1910
- cliLog({
1911
- action: "DeadLetter",
1912
- message: `Sending failed message to DLQ ${dlqTopicName}: ${error instanceof Error ? error.message : String(error)}`,
1913
- message_type: "Error"
1914
- });
1915
- try {
1916
- await producer.send({
1917
- topic: dlqTopicName,
1918
- messages: [{ value: JSON.stringify(deadLetterRecord) }]
1919
- });
1920
- logger2.log(`Sent failed message to DLQ ${dlqTopicName}`);
1921
- messagesHandledByDLQ++;
1922
- } catch (dlqError) {
1923
- const errorMsg = `Failed to send message to DLQ: ${dlqError}`;
1924
- logger2.error(errorMsg);
1925
- dlqErrors.push(errorMsg);
1926
- }
1927
- } else if (!dlqTopic) {
1928
- messagesWithoutDLQ++;
1929
- logger2.warn(
1930
- `Cannot send to DLQ: no DLQ configured for message (batch has mixed DLQ configurations)`
1931
- );
1932
- } else {
1933
- messagesWithoutDLQ++;
1934
- logger2.warn(
1935
- `Cannot send to DLQ: original message value not available`
1936
- );
1937
- }
1938
- }
1939
- const allMessagesHandled = messagesHandledByDLQ === currentMessages.length && messagesWithoutDLQ === 0 && dlqErrors.length === 0;
1940
- if (allMessagesHandled) {
1941
- logger2.log(
1942
- `All ${messagesHandledByDLQ} failed message(s) sent to DLQ, not throwing original error`
1943
- );
1944
- return;
1945
- }
1946
- if (messagesWithoutDLQ > 0) {
1947
- logger2.error(
1948
- `Cannot handle batch failure: ${messagesWithoutDLQ} message(s) have no DLQ configured`
1949
- );
1950
- }
1951
- if (dlqErrors.length > 0) {
1952
- logger2.error(
1953
- `Some messages failed to send to DLQ: ${dlqErrors.join(", ")}`
1954
- );
1955
- }
1956
- if (messagesHandledByDLQ > 0) {
1957
- logger2.warn(
1958
- `Partial DLQ success: ${messagesHandledByDLQ}/${currentMessages.length} message(s) sent to DLQ, but throwing due to incomplete batch handling`
1959
- );
1960
- }
1961
- throw error;
1962
- }
1963
- logger2.warn(
1964
- `Send ${currentMessages.length} messages failed (attempt ${attempts}/${maxRetries}), retrying: ${error}`
1965
- );
1966
- await new Promise((resolve2) => setTimeout(resolve2, 100 * attempts));
1967
- }
1968
- }
1969
- }
1970
- };
1971
1822
  var MAX_STREAMING_CONCURRENCY = process3.env.MAX_STREAMING_CONCURRENCY ? parseInt(process3.env.MAX_STREAMING_CONCURRENCY, 10) : DEFAULT_MAX_STREAMING_CONCURRENCY;
1972
1823
  var metricsLog = (log) => {
1973
1824
  const req = http3.request({
@@ -2113,57 +1964,95 @@ var handleMessage = async (logger2, streamingFunctionWithConfigList, message, pr
2113
1964
  }
2114
1965
  return void 0;
2115
1966
  };
2116
- var sendMessages = async (logger2, metrics, targetTopic, producer, messages) => {
2117
- try {
2118
- let chunk = [];
2119
- let chunkSize = 0;
2120
- const maxMessageSize = targetTopic.max_message_bytes || 1024 * 1024;
2121
- for (const message of messages) {
2122
- const messageSize = Buffer2.byteLength(message.value, "utf8") + KAFKAJS_BYTE_MESSAGE_OVERHEAD;
2123
- if (chunkSize + messageSize > maxMessageSize) {
2124
- logger2.log(
2125
- `Sending ${chunkSize} bytes of a transformed record batch to ${targetTopic.name}`
2126
- );
2127
- await sendChunkWithRetry(
2128
- logger2,
2129
- targetTopic,
2130
- producer,
2131
- chunk,
2132
- maxMessageSize
2133
- );
2134
- logger2.log(
2135
- `Sent ${chunk.length} transformed records to ${targetTopic.name}`
2136
- );
2137
- chunk = [message];
2138
- chunkSize = messageSize;
2139
- } else {
2140
- chunk.push(message);
2141
- metrics.bytes += Buffer2.byteLength(message.value, "utf8");
2142
- chunkSize += messageSize;
1967
+ var handleDLQForFailedMessages = async (logger2, producer, messages, error) => {
1968
+ let messagesHandledByDLQ = 0;
1969
+ let messagesWithoutDLQ = 0;
1970
+ let dlqErrors = 0;
1971
+ for (const msg of messages) {
1972
+ if (msg.dlq && msg.originalValue) {
1973
+ const deadLetterRecord = {
1974
+ originalRecord: {
1975
+ ...msg.originalValue,
1976
+ // Include original Kafka message metadata
1977
+ __sourcePartition: msg.originalMessage.partition,
1978
+ __sourceOffset: msg.originalMessage.offset,
1979
+ __sourceTimestamp: msg.originalMessage.timestamp
1980
+ },
1981
+ errorMessage: error instanceof Error ? error.message : String(error),
1982
+ errorType: error instanceof Error ? error.constructor.name : "Unknown",
1983
+ failedAt: /* @__PURE__ */ new Date(),
1984
+ source: "transform"
1985
+ };
1986
+ cliLog({
1987
+ action: "DeadLetter",
1988
+ message: `Sending failed message to DLQ ${msg.dlq.name}: ${error instanceof Error ? error.message : String(error)}`,
1989
+ message_type: "Error"
1990
+ });
1991
+ try {
1992
+ await producer.send({
1993
+ topic: msg.dlq.name,
1994
+ messages: [{ value: JSON.stringify(deadLetterRecord) }]
1995
+ });
1996
+ logger2.log(`Sent failed message to DLQ ${msg.dlq.name}`);
1997
+ messagesHandledByDLQ++;
1998
+ } catch (dlqError) {
1999
+ logger2.error(`Failed to send to DLQ: ${dlqError}`);
2000
+ dlqErrors++;
2143
2001
  }
2002
+ } else if (!msg.dlq) {
2003
+ messagesWithoutDLQ++;
2004
+ logger2.warn(`Cannot send to DLQ: no DLQ configured for message`);
2005
+ } else {
2006
+ messagesWithoutDLQ++;
2007
+ logger2.warn(`Cannot send to DLQ: original message value not available`);
2144
2008
  }
2145
- metrics.count_out += chunk.length;
2146
- if (chunk.length > 0) {
2147
- logger2.log(
2148
- `Sending ${chunkSize} bytes of a transformed record batch to ${targetTopic.name}`
2149
- );
2150
- await sendChunkWithRetry(
2151
- logger2,
2152
- targetTopic,
2153
- producer,
2154
- chunk,
2155
- maxMessageSize
2156
- );
2157
- logger2.log(
2158
- `Sent final ${chunk.length} transformed data to ${targetTopic.name}`
2009
+ }
2010
+ const allMessagesHandled = messagesHandledByDLQ === messages.length && messagesWithoutDLQ === 0 && dlqErrors === 0;
2011
+ if (allMessagesHandled) {
2012
+ logger2.log(
2013
+ `All ${messagesHandledByDLQ} failed message(s) sent to DLQ, suppressing original error`
2014
+ );
2015
+ } else if (messagesHandledByDLQ > 0) {
2016
+ logger2.warn(
2017
+ `Partial DLQ success: ${messagesHandledByDLQ}/${messages.length} message(s) sent to DLQ`
2018
+ );
2019
+ if (messagesWithoutDLQ > 0) {
2020
+ logger2.error(
2021
+ `Cannot handle batch failure: ${messagesWithoutDLQ} message(s) have no DLQ configured or missing original value`
2159
2022
  );
2160
2023
  }
2024
+ if (dlqErrors > 0) {
2025
+ logger2.error(`${dlqErrors} message(s) failed to send to DLQ`);
2026
+ }
2027
+ }
2028
+ return allMessagesHandled;
2029
+ };
2030
+ var sendMessages = async (logger2, metrics, targetTopic, producer, messages) => {
2031
+ if (messages.length === 0) return;
2032
+ for (const msg of messages) {
2033
+ metrics.bytes += Buffer2.byteLength(msg.value, "utf8");
2034
+ }
2035
+ metrics.count_out += messages.length;
2036
+ try {
2037
+ await producer.send({
2038
+ topic: targetTopic.name,
2039
+ messages
2040
+ });
2041
+ logger2.log(`Sent ${messages.length} messages to ${targetTopic.name}`);
2161
2042
  } catch (e) {
2162
2043
  logger2.error(`Failed to send transformed data`);
2163
2044
  if (e instanceof Error) {
2164
2045
  logError(logger2, e);
2165
2046
  }
2166
- throw e;
2047
+ const allHandledByDLQ = await handleDLQForFailedMessages(
2048
+ logger2,
2049
+ producer,
2050
+ messages,
2051
+ e
2052
+ );
2053
+ if (!allHandledByDLQ) {
2054
+ throw e;
2055
+ }
2167
2056
  }
2168
2057
  };
2169
2058
  var sendMessageMetrics = (logger2, metrics) => {
@@ -2405,16 +2294,10 @@ var runStreamingFunctions = async (args) => {
2405
2294
  fromBeginning: true
2406
2295
  }
2407
2296
  });
2408
- const producer = kafka.producer({
2409
- kafkaJS: {
2410
- idempotent: true,
2411
- acks: ACKs,
2412
- retry: {
2413
- retries: MAX_RETRIES_PRODUCER,
2414
- maxRetryTime: MAX_RETRY_TIME_MS
2415
- }
2416
- }
2417
- });
2297
+ const maxMessageBytes = args.targetTopic?.max_message_bytes || 1024 * 1024;
2298
+ const producer = kafka.producer(
2299
+ createProducerConfig(maxMessageBytes)
2300
+ );
2418
2301
  try {
2419
2302
  logger2.log("Starting producer...");
2420
2303
  await startProducer(logger2, producer);