@514labs/moose-lib 0.6.256-ci-1-g6ca86038 → 0.6.256-ci-4-g0ca62054

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -26,6 +26,22 @@ function isTruthy(value) {
26
26
  return false;
27
27
  }
28
28
  }
29
+ function createProducerConfig(maxMessageBytes) {
30
+ return {
31
+ kafkaJS: {
32
+ idempotent: false,
33
+ // Not needed for at-least-once delivery
34
+ acks: ACKs,
35
+ retry: {
36
+ retries: MAX_RETRIES_PRODUCER,
37
+ maxRetryTime: MAX_RETRY_TIME_MS
38
+ }
39
+ },
40
+ "linger.ms": 0,
41
+ // This is to make sure at least once delivery with immediate feedback on the send
42
+ ...maxMessageBytes && { "message.max.bytes": maxMessageBytes }
43
+ };
44
+ }
29
45
  var Kafka, compilerLog, getClickhouseClient, cliLog, MAX_RETRIES, MAX_RETRY_TIME_MS, RETRY_INITIAL_TIME_MS, MAX_RETRIES_PRODUCER, ACKs, parseBrokerString, logError, buildSaslConfig, getKafkaClient;
30
46
  var init_commons = __esm({
31
47
  "src/commons.ts"() {
@@ -1803,156 +1819,7 @@ var MAX_RETRIES_CONSUMER = 150;
1803
1819
  var SESSION_TIMEOUT_CONSUMER = 3e4;
1804
1820
  var HEARTBEAT_INTERVAL_CONSUMER = 3e3;
1805
1821
  var DEFAULT_MAX_STREAMING_CONCURRENCY = 100;
1806
- var KAFKAJS_BYTE_MESSAGE_OVERHEAD = 500;
1807
- var isMessageTooLargeError = (error) => {
1808
- if (KafkaJS2.isKafkaJSError && error instanceof Error && KafkaJS2.isKafkaJSError(error)) {
1809
- return error.type === "ERR_MSG_SIZE_TOO_LARGE" || error.code === 10 || error.cause !== void 0 && isMessageTooLargeError(error.cause);
1810
- }
1811
- if (error && typeof error === "object") {
1812
- const err = error;
1813
- return err.type === "ERR_MSG_SIZE_TOO_LARGE" || err.code === 10 || err.cause !== void 0 && isMessageTooLargeError(err.cause);
1814
- }
1815
- return false;
1816
- };
1817
- var splitBatch = (messages, maxChunkSize) => {
1818
- if (messages.length <= 1) {
1819
- return [messages];
1820
- }
1821
- const chunks = [];
1822
- let currentChunk = [];
1823
- let currentSize = 0;
1824
- for (const message of messages) {
1825
- const messageSize = Buffer2.byteLength(message.value, "utf8") + KAFKAJS_BYTE_MESSAGE_OVERHEAD;
1826
- if (currentSize + messageSize > maxChunkSize && currentChunk.length > 0) {
1827
- chunks.push(currentChunk);
1828
- currentChunk = [message];
1829
- currentSize = messageSize;
1830
- } else {
1831
- currentChunk.push(message);
1832
- currentSize += messageSize;
1833
- }
1834
- }
1835
- if (currentChunk.length > 0) {
1836
- chunks.push(currentChunk);
1837
- }
1838
- return chunks;
1839
- };
1840
- var sendChunkWithRetry = async (logger2, targetTopic, producer, messages, currentMaxSize, maxRetries = 3) => {
1841
- const currentMessages = messages;
1842
- let attempts = 0;
1843
- while (attempts < maxRetries) {
1844
- try {
1845
- await producer.send({
1846
- topic: targetTopic.name,
1847
- messages: currentMessages
1848
- });
1849
- logger2.log(
1850
- `Successfully sent ${currentMessages.length} messages to ${targetTopic.name}`
1851
- );
1852
- return;
1853
- } catch (error) {
1854
- if (isMessageTooLargeError(error) && currentMessages.length > 1) {
1855
- logger2.warn(
1856
- `Got MESSAGE_TOO_LARGE error, splitting batch of ${currentMessages.length} messages and retrying (${maxRetries - attempts} attempts left)`
1857
- );
1858
- const newMaxSize = Math.floor(currentMaxSize / 2);
1859
- const splitChunks = splitBatch(currentMessages, newMaxSize);
1860
- for (const chunk of splitChunks) {
1861
- await sendChunkWithRetry(
1862
- logger2,
1863
- targetTopic,
1864
- producer,
1865
- chunk,
1866
- newMaxSize,
1867
- // this error does not count as one failed attempt
1868
- maxRetries - attempts
1869
- );
1870
- }
1871
- return;
1872
- } else {
1873
- attempts++;
1874
- if (attempts >= maxRetries) {
1875
- let messagesHandledByDLQ = 0;
1876
- let messagesWithoutDLQ = 0;
1877
- const dlqErrors = [];
1878
- for (const failedMessage of currentMessages) {
1879
- const dlqTopic = failedMessage.dlq;
1880
- if (dlqTopic && failedMessage.originalValue) {
1881
- const dlqTopicName = dlqTopic.name;
1882
- const deadLetterRecord = {
1883
- originalRecord: {
1884
- ...failedMessage.originalValue,
1885
- // Include original Kafka message metadata
1886
- __sourcePartition: failedMessage.originalMessage.partition,
1887
- __sourceOffset: failedMessage.originalMessage.offset,
1888
- __sourceTimestamp: failedMessage.originalMessage.timestamp
1889
- },
1890
- errorMessage: error instanceof Error ? error.message : String(error),
1891
- errorType: error instanceof Error ? error.constructor.name : "Unknown",
1892
- failedAt: /* @__PURE__ */ new Date(),
1893
- source: "transform"
1894
- };
1895
- cliLog({
1896
- action: "DeadLetter",
1897
- message: `Sending failed message to DLQ ${dlqTopicName}: ${error instanceof Error ? error.message : String(error)}`,
1898
- message_type: "Error"
1899
- });
1900
- try {
1901
- await producer.send({
1902
- topic: dlqTopicName,
1903
- messages: [{ value: JSON.stringify(deadLetterRecord) }]
1904
- });
1905
- logger2.log(`Sent failed message to DLQ ${dlqTopicName}`);
1906
- messagesHandledByDLQ++;
1907
- } catch (dlqError) {
1908
- const errorMsg = `Failed to send message to DLQ: ${dlqError}`;
1909
- logger2.error(errorMsg);
1910
- dlqErrors.push(errorMsg);
1911
- }
1912
- } else if (!dlqTopic) {
1913
- messagesWithoutDLQ++;
1914
- logger2.warn(
1915
- `Cannot send to DLQ: no DLQ configured for message (batch has mixed DLQ configurations)`
1916
- );
1917
- } else {
1918
- messagesWithoutDLQ++;
1919
- logger2.warn(
1920
- `Cannot send to DLQ: original message value not available`
1921
- );
1922
- }
1923
- }
1924
- const allMessagesHandled = messagesHandledByDLQ === currentMessages.length && messagesWithoutDLQ === 0 && dlqErrors.length === 0;
1925
- if (allMessagesHandled) {
1926
- logger2.log(
1927
- `All ${messagesHandledByDLQ} failed message(s) sent to DLQ, not throwing original error`
1928
- );
1929
- return;
1930
- }
1931
- if (messagesWithoutDLQ > 0) {
1932
- logger2.error(
1933
- `Cannot handle batch failure: ${messagesWithoutDLQ} message(s) have no DLQ configured`
1934
- );
1935
- }
1936
- if (dlqErrors.length > 0) {
1937
- logger2.error(
1938
- `Some messages failed to send to DLQ: ${dlqErrors.join(", ")}`
1939
- );
1940
- }
1941
- if (messagesHandledByDLQ > 0) {
1942
- logger2.warn(
1943
- `Partial DLQ success: ${messagesHandledByDLQ}/${currentMessages.length} message(s) sent to DLQ, but throwing due to incomplete batch handling`
1944
- );
1945
- }
1946
- throw error;
1947
- }
1948
- logger2.warn(
1949
- `Send ${currentMessages.length} messages failed (attempt ${attempts}/${maxRetries}), retrying: ${error}`
1950
- );
1951
- await new Promise((resolve2) => setTimeout(resolve2, 100 * attempts));
1952
- }
1953
- }
1954
- }
1955
- };
1822
+ var CONSUMER_MAX_BATCH_SIZE = 1e3;
1956
1823
  var MAX_STREAMING_CONCURRENCY = process3.env.MAX_STREAMING_CONCURRENCY ? parseInt(process3.env.MAX_STREAMING_CONCURRENCY, 10) : DEFAULT_MAX_STREAMING_CONCURRENCY;
1957
1824
  var metricsLog = (log) => {
1958
1825
  const req = http3.request({
@@ -2098,57 +1965,95 @@ var handleMessage = async (logger2, streamingFunctionWithConfigList, message, pr
2098
1965
  }
2099
1966
  return void 0;
2100
1967
  };
2101
- var sendMessages = async (logger2, metrics, targetTopic, producer, messages) => {
2102
- try {
2103
- let chunk = [];
2104
- let chunkSize = 0;
2105
- const maxMessageSize = targetTopic.max_message_bytes || 1024 * 1024;
2106
- for (const message of messages) {
2107
- const messageSize = Buffer2.byteLength(message.value, "utf8") + KAFKAJS_BYTE_MESSAGE_OVERHEAD;
2108
- if (chunkSize + messageSize > maxMessageSize) {
2109
- logger2.log(
2110
- `Sending ${chunkSize} bytes of a transformed record batch to ${targetTopic.name}`
2111
- );
2112
- await sendChunkWithRetry(
2113
- logger2,
2114
- targetTopic,
2115
- producer,
2116
- chunk,
2117
- maxMessageSize
2118
- );
2119
- logger2.log(
2120
- `Sent ${chunk.length} transformed records to ${targetTopic.name}`
2121
- );
2122
- chunk = [message];
2123
- chunkSize = messageSize;
2124
- } else {
2125
- chunk.push(message);
2126
- metrics.bytes += Buffer2.byteLength(message.value, "utf8");
2127
- chunkSize += messageSize;
1968
+ var handleDLQForFailedMessages = async (logger2, producer, messages, error) => {
1969
+ let messagesHandledByDLQ = 0;
1970
+ let messagesWithoutDLQ = 0;
1971
+ let dlqErrors = 0;
1972
+ for (const msg of messages) {
1973
+ if (msg.dlq && msg.originalValue) {
1974
+ const deadLetterRecord = {
1975
+ originalRecord: {
1976
+ ...msg.originalValue,
1977
+ // Include original Kafka message metadata
1978
+ __sourcePartition: msg.originalMessage.partition,
1979
+ __sourceOffset: msg.originalMessage.offset,
1980
+ __sourceTimestamp: msg.originalMessage.timestamp
1981
+ },
1982
+ errorMessage: error instanceof Error ? error.message : String(error),
1983
+ errorType: error instanceof Error ? error.constructor.name : "Unknown",
1984
+ failedAt: /* @__PURE__ */ new Date(),
1985
+ source: "transform"
1986
+ };
1987
+ cliLog({
1988
+ action: "DeadLetter",
1989
+ message: `Sending failed message to DLQ ${msg.dlq.name}: ${error instanceof Error ? error.message : String(error)}`,
1990
+ message_type: "Error"
1991
+ });
1992
+ try {
1993
+ await producer.send({
1994
+ topic: msg.dlq.name,
1995
+ messages: [{ value: JSON.stringify(deadLetterRecord) }]
1996
+ });
1997
+ logger2.log(`Sent failed message to DLQ ${msg.dlq.name}`);
1998
+ messagesHandledByDLQ++;
1999
+ } catch (dlqError) {
2000
+ logger2.error(`Failed to send to DLQ: ${dlqError}`);
2001
+ dlqErrors++;
2128
2002
  }
2003
+ } else if (!msg.dlq) {
2004
+ messagesWithoutDLQ++;
2005
+ logger2.warn(`Cannot send to DLQ: no DLQ configured for message`);
2006
+ } else {
2007
+ messagesWithoutDLQ++;
2008
+ logger2.warn(`Cannot send to DLQ: original message value not available`);
2129
2009
  }
2130
- metrics.count_out += chunk.length;
2131
- if (chunk.length > 0) {
2132
- logger2.log(
2133
- `Sending ${chunkSize} bytes of a transformed record batch to ${targetTopic.name}`
2134
- );
2135
- await sendChunkWithRetry(
2136
- logger2,
2137
- targetTopic,
2138
- producer,
2139
- chunk,
2140
- maxMessageSize
2141
- );
2142
- logger2.log(
2143
- `Sent final ${chunk.length} transformed data to ${targetTopic.name}`
2010
+ }
2011
+ const allMessagesHandled = messagesHandledByDLQ === messages.length && messagesWithoutDLQ === 0 && dlqErrors === 0;
2012
+ if (allMessagesHandled) {
2013
+ logger2.log(
2014
+ `All ${messagesHandledByDLQ} failed message(s) sent to DLQ, suppressing original error`
2015
+ );
2016
+ } else if (messagesHandledByDLQ > 0) {
2017
+ logger2.warn(
2018
+ `Partial DLQ success: ${messagesHandledByDLQ}/${messages.length} message(s) sent to DLQ`
2019
+ );
2020
+ if (messagesWithoutDLQ > 0) {
2021
+ logger2.error(
2022
+ `Cannot handle batch failure: ${messagesWithoutDLQ} message(s) have no DLQ configured or missing original value`
2144
2023
  );
2145
2024
  }
2025
+ if (dlqErrors > 0) {
2026
+ logger2.error(`${dlqErrors} message(s) failed to send to DLQ`);
2027
+ }
2028
+ }
2029
+ return allMessagesHandled;
2030
+ };
2031
+ var sendMessages = async (logger2, metrics, targetTopic, producer, messages) => {
2032
+ if (messages.length === 0) return;
2033
+ try {
2034
+ await producer.send({
2035
+ topic: targetTopic.name,
2036
+ messages
2037
+ });
2038
+ for (const msg of messages) {
2039
+ metrics.bytes += Buffer2.byteLength(msg.value, "utf8");
2040
+ }
2041
+ metrics.count_out += messages.length;
2042
+ logger2.log(`Sent ${messages.length} messages to ${targetTopic.name}`);
2146
2043
  } catch (e) {
2147
2044
  logger2.error(`Failed to send transformed data`);
2148
2045
  if (e instanceof Error) {
2149
2046
  logError(logger2, e);
2150
2047
  }
2151
- throw e;
2048
+ const allHandledByDLQ = await handleDLQForFailedMessages(
2049
+ logger2,
2050
+ producer,
2051
+ messages,
2052
+ e
2053
+ );
2054
+ if (!allHandledByDLQ) {
2055
+ throw e;
2056
+ }
2152
2057
  }
2153
2058
  };
2154
2059
  var sendMessageMetrics = (logger2, metrics) => {
@@ -2388,18 +2293,13 @@ var runStreamingFunctions = async (args) => {
2388
2293
  autoCommit: true,
2389
2294
  autoCommitInterval: AUTO_COMMIT_INTERVAL_MS,
2390
2295
  fromBeginning: true
2391
- }
2392
- });
2393
- const producer = kafka.producer({
2394
- kafkaJS: {
2395
- idempotent: true,
2396
- acks: ACKs,
2397
- retry: {
2398
- retries: MAX_RETRIES_PRODUCER,
2399
- maxRetryTime: MAX_RETRY_TIME_MS
2400
- }
2401
- }
2296
+ },
2297
+ "js.consumer.max.batch.size": CONSUMER_MAX_BATCH_SIZE
2402
2298
  });
2299
+ const maxMessageBytes = args.targetTopic?.max_message_bytes || 1024 * 1024;
2300
+ const producer = kafka.producer(
2301
+ createProducerConfig(maxMessageBytes)
2302
+ );
2403
2303
  try {
2404
2304
  logger2.log("Starting producer...");
2405
2305
  await startProducer(logger2, producer);