@514labs/moose-lib 0.6.252-ci-2-g41538689 → 0.6.252-ci-3-g37e54b29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -39,22 +39,6 @@ function isTruthy(value) {
39
39
  return false;
40
40
  }
41
41
  }
42
- function createProducerConfig(maxMessageBytes) {
43
- return {
44
- kafkaJS: {
45
- idempotent: false,
46
- // Not needed for at-least-once delivery
47
- acks: ACKs,
48
- retry: {
49
- retries: MAX_RETRIES_PRODUCER,
50
- maxRetryTime: MAX_RETRY_TIME_MS
51
- }
52
- },
53
- "linger.ms": 0,
54
- // Send immediately - batching happens at application level
55
- ...maxMessageBytes && { "message.max.bytes": maxMessageBytes }
56
- };
57
- }
58
42
  var import_http, import_client, import_kafka_javascript, Kafka, compilerLog, getClickhouseClient, cliLog, MAX_RETRIES, MAX_RETRY_TIME_MS, RETRY_INITIAL_TIME_MS, MAX_RETRIES_PRODUCER, ACKs, parseBrokerString, logError, buildSaslConfig, getKafkaClient;
59
43
  var init_commons = __esm({
60
44
  "src/commons.ts"() {
@@ -1832,6 +1816,156 @@ var MAX_RETRIES_CONSUMER = 150;
1832
1816
  var SESSION_TIMEOUT_CONSUMER = 3e4;
1833
1817
  var HEARTBEAT_INTERVAL_CONSUMER = 3e3;
1834
1818
  var DEFAULT_MAX_STREAMING_CONCURRENCY = 100;
1819
+ var KAFKAJS_BYTE_MESSAGE_OVERHEAD = 500;
1820
+ var isMessageTooLargeError = (error) => {
1821
+ if (import_kafka_javascript2.KafkaJS.isKafkaJSError && error instanceof Error && import_kafka_javascript2.KafkaJS.isKafkaJSError(error)) {
1822
+ return error.type === "ERR_MSG_SIZE_TOO_LARGE" || error.code === 10 || error.cause !== void 0 && isMessageTooLargeError(error.cause);
1823
+ }
1824
+ if (error && typeof error === "object") {
1825
+ const err = error;
1826
+ return err.type === "ERR_MSG_SIZE_TOO_LARGE" || err.code === 10 || err.cause !== void 0 && isMessageTooLargeError(err.cause);
1827
+ }
1828
+ return false;
1829
+ };
1830
+ var splitBatch = (messages, maxChunkSize) => {
1831
+ if (messages.length <= 1) {
1832
+ return [messages];
1833
+ }
1834
+ const chunks = [];
1835
+ let currentChunk = [];
1836
+ let currentSize = 0;
1837
+ for (const message of messages) {
1838
+ const messageSize = import_node_buffer.Buffer.byteLength(message.value, "utf8") + KAFKAJS_BYTE_MESSAGE_OVERHEAD;
1839
+ if (currentSize + messageSize > maxChunkSize && currentChunk.length > 0) {
1840
+ chunks.push(currentChunk);
1841
+ currentChunk = [message];
1842
+ currentSize = messageSize;
1843
+ } else {
1844
+ currentChunk.push(message);
1845
+ currentSize += messageSize;
1846
+ }
1847
+ }
1848
+ if (currentChunk.length > 0) {
1849
+ chunks.push(currentChunk);
1850
+ }
1851
+ return chunks;
1852
+ };
1853
+ var sendChunkWithRetry = async (logger2, targetTopic, producer, messages, currentMaxSize, maxRetries = 3) => {
1854
+ const currentMessages = messages;
1855
+ let attempts = 0;
1856
+ while (attempts < maxRetries) {
1857
+ try {
1858
+ await producer.send({
1859
+ topic: targetTopic.name,
1860
+ messages: currentMessages
1861
+ });
1862
+ logger2.log(
1863
+ `Successfully sent ${currentMessages.length} messages to ${targetTopic.name}`
1864
+ );
1865
+ return;
1866
+ } catch (error) {
1867
+ if (isMessageTooLargeError(error) && currentMessages.length > 1) {
1868
+ logger2.warn(
1869
+ `Got MESSAGE_TOO_LARGE error, splitting batch of ${currentMessages.length} messages and retrying (${maxRetries - attempts} attempts left)`
1870
+ );
1871
+ const newMaxSize = Math.floor(currentMaxSize / 2);
1872
+ const splitChunks = splitBatch(currentMessages, newMaxSize);
1873
+ for (const chunk of splitChunks) {
1874
+ await sendChunkWithRetry(
1875
+ logger2,
1876
+ targetTopic,
1877
+ producer,
1878
+ chunk,
1879
+ newMaxSize,
1880
+ // this error does not count as one failed attempt
1881
+ maxRetries - attempts
1882
+ );
1883
+ }
1884
+ return;
1885
+ } else {
1886
+ attempts++;
1887
+ if (attempts >= maxRetries) {
1888
+ let messagesHandledByDLQ = 0;
1889
+ let messagesWithoutDLQ = 0;
1890
+ const dlqErrors = [];
1891
+ for (const failedMessage of currentMessages) {
1892
+ const dlqTopic = failedMessage.dlq;
1893
+ if (dlqTopic && failedMessage.originalValue) {
1894
+ const dlqTopicName = dlqTopic.name;
1895
+ const deadLetterRecord = {
1896
+ originalRecord: {
1897
+ ...failedMessage.originalValue,
1898
+ // Include original Kafka message metadata
1899
+ __sourcePartition: failedMessage.originalMessage.partition,
1900
+ __sourceOffset: failedMessage.originalMessage.offset,
1901
+ __sourceTimestamp: failedMessage.originalMessage.timestamp
1902
+ },
1903
+ errorMessage: error instanceof Error ? error.message : String(error),
1904
+ errorType: error instanceof Error ? error.constructor.name : "Unknown",
1905
+ failedAt: /* @__PURE__ */ new Date(),
1906
+ source: "transform"
1907
+ };
1908
+ cliLog({
1909
+ action: "DeadLetter",
1910
+ message: `Sending failed message to DLQ ${dlqTopicName}: ${error instanceof Error ? error.message : String(error)}`,
1911
+ message_type: "Error"
1912
+ });
1913
+ try {
1914
+ await producer.send({
1915
+ topic: dlqTopicName,
1916
+ messages: [{ value: JSON.stringify(deadLetterRecord) }]
1917
+ });
1918
+ logger2.log(`Sent failed message to DLQ ${dlqTopicName}`);
1919
+ messagesHandledByDLQ++;
1920
+ } catch (dlqError) {
1921
+ const errorMsg = `Failed to send message to DLQ: ${dlqError}`;
1922
+ logger2.error(errorMsg);
1923
+ dlqErrors.push(errorMsg);
1924
+ }
1925
+ } else if (!dlqTopic) {
1926
+ messagesWithoutDLQ++;
1927
+ logger2.warn(
1928
+ `Cannot send to DLQ: no DLQ configured for message (batch has mixed DLQ configurations)`
1929
+ );
1930
+ } else {
1931
+ messagesWithoutDLQ++;
1932
+ logger2.warn(
1933
+ `Cannot send to DLQ: original message value not available`
1934
+ );
1935
+ }
1936
+ }
1937
+ const allMessagesHandled = messagesHandledByDLQ === currentMessages.length && messagesWithoutDLQ === 0 && dlqErrors.length === 0;
1938
+ if (allMessagesHandled) {
1939
+ logger2.log(
1940
+ `All ${messagesHandledByDLQ} failed message(s) sent to DLQ, not throwing original error`
1941
+ );
1942
+ return;
1943
+ }
1944
+ if (messagesWithoutDLQ > 0) {
1945
+ logger2.error(
1946
+ `Cannot handle batch failure: ${messagesWithoutDLQ} message(s) have no DLQ configured`
1947
+ );
1948
+ }
1949
+ if (dlqErrors.length > 0) {
1950
+ logger2.error(
1951
+ `Some messages failed to send to DLQ: ${dlqErrors.join(", ")}`
1952
+ );
1953
+ }
1954
+ if (messagesHandledByDLQ > 0) {
1955
+ logger2.warn(
1956
+ `Partial DLQ success: ${messagesHandledByDLQ}/${currentMessages.length} message(s) sent to DLQ, but throwing due to incomplete batch handling`
1957
+ );
1958
+ }
1959
+ throw error;
1960
+ }
1961
+ logger2.warn(
1962
+ `Send ${currentMessages.length} messages failed (attempt ${attempts}/${maxRetries}), retrying: ${error}`
1963
+ );
1964
+ await new Promise((resolve2) => setTimeout(resolve2, 100 * attempts));
1965
+ }
1966
+ }
1967
+ }
1968
+ };
1835
1969
  var MAX_STREAMING_CONCURRENCY = process3.env.MAX_STREAMING_CONCURRENCY ? parseInt(process3.env.MAX_STREAMING_CONCURRENCY, 10) : DEFAULT_MAX_STREAMING_CONCURRENCY;
1836
1970
  var metricsLog = (log) => {
1837
1971
  const req = http3.request({
@@ -1977,95 +2111,57 @@ var handleMessage = async (logger2, streamingFunctionWithConfigList, message, pr
1977
2111
  }
1978
2112
  return void 0;
1979
2113
  };
1980
- var handleDLQForFailedMessages = async (logger2, producer, messages, error) => {
1981
- let messagesHandledByDLQ = 0;
1982
- let messagesWithoutDLQ = 0;
1983
- let dlqErrors = 0;
1984
- for (const msg of messages) {
1985
- if (msg.dlq && msg.originalValue) {
1986
- const deadLetterRecord = {
1987
- originalRecord: {
1988
- ...msg.originalValue,
1989
- // Include original Kafka message metadata
1990
- __sourcePartition: msg.originalMessage.partition,
1991
- __sourceOffset: msg.originalMessage.offset,
1992
- __sourceTimestamp: msg.originalMessage.timestamp
1993
- },
1994
- errorMessage: error instanceof Error ? error.message : String(error),
1995
- errorType: error instanceof Error ? error.constructor.name : "Unknown",
1996
- failedAt: /* @__PURE__ */ new Date(),
1997
- source: "transform"
1998
- };
1999
- cliLog({
2000
- action: "DeadLetter",
2001
- message: `Sending failed message to DLQ ${msg.dlq.name}: ${error instanceof Error ? error.message : String(error)}`,
2002
- message_type: "Error"
2003
- });
2004
- try {
2005
- await producer.send({
2006
- topic: msg.dlq.name,
2007
- messages: [{ value: JSON.stringify(deadLetterRecord) }]
2008
- });
2009
- logger2.log(`Sent failed message to DLQ ${msg.dlq.name}`);
2010
- messagesHandledByDLQ++;
2011
- } catch (dlqError) {
2012
- logger2.error(`Failed to send to DLQ: ${dlqError}`);
2013
- dlqErrors++;
2114
+ var sendMessages = async (logger2, metrics, targetTopic, producer, messages) => {
2115
+ try {
2116
+ let chunk = [];
2117
+ let chunkSize = 0;
2118
+ const maxMessageSize = targetTopic.max_message_bytes || 1024 * 1024;
2119
+ for (const message of messages) {
2120
+ const messageSize = import_node_buffer.Buffer.byteLength(message.value, "utf8") + KAFKAJS_BYTE_MESSAGE_OVERHEAD;
2121
+ if (chunkSize + messageSize > maxMessageSize) {
2122
+ logger2.log(
2123
+ `Sending ${chunkSize} bytes of a transformed record batch to ${targetTopic.name}`
2124
+ );
2125
+ await sendChunkWithRetry(
2126
+ logger2,
2127
+ targetTopic,
2128
+ producer,
2129
+ chunk,
2130
+ maxMessageSize
2131
+ );
2132
+ logger2.log(
2133
+ `Sent ${chunk.length} transformed records to ${targetTopic.name}`
2134
+ );
2135
+ chunk = [message];
2136
+ chunkSize = messageSize;
2137
+ } else {
2138
+ chunk.push(message);
2139
+ metrics.bytes += import_node_buffer.Buffer.byteLength(message.value, "utf8");
2140
+ chunkSize += messageSize;
2014
2141
  }
2015
- } else if (!msg.dlq) {
2016
- messagesWithoutDLQ++;
2017
- logger2.warn(`Cannot send to DLQ: no DLQ configured for message`);
2018
- } else {
2019
- messagesWithoutDLQ++;
2020
- logger2.warn(`Cannot send to DLQ: original message value not available`);
2021
2142
  }
2022
- }
2023
- const allMessagesHandled = messagesHandledByDLQ === messages.length && messagesWithoutDLQ === 0 && dlqErrors === 0;
2024
- if (allMessagesHandled) {
2025
- logger2.log(
2026
- `All ${messagesHandledByDLQ} failed message(s) sent to DLQ, suppressing original error`
2027
- );
2028
- } else if (messagesHandledByDLQ > 0) {
2029
- logger2.warn(
2030
- `Partial DLQ success: ${messagesHandledByDLQ}/${messages.length} message(s) sent to DLQ`
2031
- );
2032
- if (messagesWithoutDLQ > 0) {
2033
- logger2.error(
2034
- `Cannot handle batch failure: ${messagesWithoutDLQ} message(s) have no DLQ configured or missing original value`
2143
+ metrics.count_out += chunk.length;
2144
+ if (chunk.length > 0) {
2145
+ logger2.log(
2146
+ `Sending ${chunkSize} bytes of a transformed record batch to ${targetTopic.name}`
2147
+ );
2148
+ await sendChunkWithRetry(
2149
+ logger2,
2150
+ targetTopic,
2151
+ producer,
2152
+ chunk,
2153
+ maxMessageSize
2154
+ );
2155
+ logger2.log(
2156
+ `Sent final ${chunk.length} transformed data to ${targetTopic.name}`
2035
2157
  );
2036
2158
  }
2037
- if (dlqErrors > 0) {
2038
- logger2.error(`${dlqErrors} message(s) failed to send to DLQ`);
2039
- }
2040
- }
2041
- return allMessagesHandled;
2042
- };
2043
- var sendMessages = async (logger2, metrics, targetTopic, producer, messages) => {
2044
- if (messages.length === 0) return;
2045
- for (const msg of messages) {
2046
- metrics.bytes += import_node_buffer.Buffer.byteLength(msg.value, "utf8");
2047
- }
2048
- metrics.count_out += messages.length;
2049
- try {
2050
- await producer.send({
2051
- topic: targetTopic.name,
2052
- messages
2053
- });
2054
- logger2.log(`Sent ${messages.length} messages to ${targetTopic.name}`);
2055
2159
  } catch (e) {
2056
2160
  logger2.error(`Failed to send transformed data`);
2057
2161
  if (e instanceof Error) {
2058
2162
  logError(logger2, e);
2059
2163
  }
2060
- const allHandledByDLQ = await handleDLQForFailedMessages(
2061
- logger2,
2062
- producer,
2063
- messages,
2064
- e
2065
- );
2066
- if (!allHandledByDLQ) {
2067
- throw e;
2068
- }
2164
+ throw e;
2069
2165
  }
2070
2166
  };
2071
2167
  var sendMessageMetrics = (logger2, metrics) => {
@@ -2307,10 +2403,16 @@ var runStreamingFunctions = async (args) => {
2307
2403
  fromBeginning: true
2308
2404
  }
2309
2405
  });
2310
- const maxMessageBytes = args.targetTopic?.max_message_bytes || 1024 * 1024;
2311
- const producer = kafka.producer(
2312
- createProducerConfig(maxMessageBytes)
2313
- );
2406
+ const producer = kafka.producer({
2407
+ kafkaJS: {
2408
+ idempotent: true,
2409
+ acks: ACKs,
2410
+ retry: {
2411
+ retries: MAX_RETRIES_PRODUCER,
2412
+ maxRetryTime: MAX_RETRY_TIME_MS
2413
+ }
2414
+ }
2415
+ });
2314
2416
  try {
2315
2417
  logger2.log("Starting producer...");
2316
2418
  await startProducer(logger2, producer);