@514labs/moose-lib 0.6.252-ci-3-gf88b2b20 → 0.6.252-ci-1-g40bb1da4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browserCompatible.js +6 -14
- package/dist/browserCompatible.js.map +1 -1
- package/dist/browserCompatible.mjs +6 -14
- package/dist/browserCompatible.mjs.map +1 -1
- package/dist/compilerPlugin.js.map +1 -1
- package/dist/compilerPlugin.mjs.map +1 -1
- package/dist/dmv2/index.js +6 -14
- package/dist/dmv2/index.js.map +1 -1
- package/dist/dmv2/index.mjs +6 -14
- package/dist/dmv2/index.mjs.map +1 -1
- package/dist/index.d.mts +2 -25
- package/dist/index.d.ts +2 -25
- package/dist/index.js +6 -16
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +6 -15
- package/dist/index.mjs.map +1 -1
- package/dist/moose-runner.js +202 -100
- package/dist/moose-runner.js.map +1 -1
- package/dist/moose-runner.mjs +202 -100
- package/dist/moose-runner.mjs.map +1 -1
- package/package.json +1 -1
package/dist/moose-runner.mjs
CHANGED
|
@@ -26,22 +26,6 @@ function isTruthy(value) {
|
|
|
26
26
|
return false;
|
|
27
27
|
}
|
|
28
28
|
}
|
|
29
|
-
function createProducerConfig(maxMessageBytes) {
|
|
30
|
-
return {
|
|
31
|
-
kafkaJS: {
|
|
32
|
-
idempotent: false,
|
|
33
|
-
// Not needed for at-least-once delivery
|
|
34
|
-
acks: ACKs,
|
|
35
|
-
retry: {
|
|
36
|
-
retries: MAX_RETRIES_PRODUCER,
|
|
37
|
-
maxRetryTime: MAX_RETRY_TIME_MS
|
|
38
|
-
}
|
|
39
|
-
},
|
|
40
|
-
"linger.ms": 0,
|
|
41
|
-
// Send immediately - batching happens at application level
|
|
42
|
-
...maxMessageBytes && { "message.max.bytes": maxMessageBytes }
|
|
43
|
-
};
|
|
44
|
-
}
|
|
45
29
|
var Kafka, compilerLog, getClickhouseClient, cliLog, MAX_RETRIES, MAX_RETRY_TIME_MS, RETRY_INITIAL_TIME_MS, MAX_RETRIES_PRODUCER, ACKs, parseBrokerString, logError, buildSaslConfig, getKafkaClient;
|
|
46
30
|
var init_commons = __esm({
|
|
47
31
|
"src/commons.ts"() {
|
|
@@ -1819,6 +1803,156 @@ var MAX_RETRIES_CONSUMER = 150;
|
|
|
1819
1803
|
var SESSION_TIMEOUT_CONSUMER = 3e4;
|
|
1820
1804
|
var HEARTBEAT_INTERVAL_CONSUMER = 3e3;
|
|
1821
1805
|
var DEFAULT_MAX_STREAMING_CONCURRENCY = 100;
|
|
1806
|
+
var KAFKAJS_BYTE_MESSAGE_OVERHEAD = 500;
|
|
1807
|
+
var isMessageTooLargeError = (error) => {
|
|
1808
|
+
if (KafkaJS2.isKafkaJSError && error instanceof Error && KafkaJS2.isKafkaJSError(error)) {
|
|
1809
|
+
return error.type === "ERR_MSG_SIZE_TOO_LARGE" || error.code === 10 || error.cause !== void 0 && isMessageTooLargeError(error.cause);
|
|
1810
|
+
}
|
|
1811
|
+
if (error && typeof error === "object") {
|
|
1812
|
+
const err = error;
|
|
1813
|
+
return err.type === "ERR_MSG_SIZE_TOO_LARGE" || err.code === 10 || err.cause !== void 0 && isMessageTooLargeError(err.cause);
|
|
1814
|
+
}
|
|
1815
|
+
return false;
|
|
1816
|
+
};
|
|
1817
|
+
var splitBatch = (messages, maxChunkSize) => {
|
|
1818
|
+
if (messages.length <= 1) {
|
|
1819
|
+
return [messages];
|
|
1820
|
+
}
|
|
1821
|
+
const chunks = [];
|
|
1822
|
+
let currentChunk = [];
|
|
1823
|
+
let currentSize = 0;
|
|
1824
|
+
for (const message of messages) {
|
|
1825
|
+
const messageSize = Buffer2.byteLength(message.value, "utf8") + KAFKAJS_BYTE_MESSAGE_OVERHEAD;
|
|
1826
|
+
if (currentSize + messageSize > maxChunkSize && currentChunk.length > 0) {
|
|
1827
|
+
chunks.push(currentChunk);
|
|
1828
|
+
currentChunk = [message];
|
|
1829
|
+
currentSize = messageSize;
|
|
1830
|
+
} else {
|
|
1831
|
+
currentChunk.push(message);
|
|
1832
|
+
currentSize += messageSize;
|
|
1833
|
+
}
|
|
1834
|
+
}
|
|
1835
|
+
if (currentChunk.length > 0) {
|
|
1836
|
+
chunks.push(currentChunk);
|
|
1837
|
+
}
|
|
1838
|
+
return chunks;
|
|
1839
|
+
};
|
|
1840
|
+
var sendChunkWithRetry = async (logger2, targetTopic, producer, messages, currentMaxSize, maxRetries = 3) => {
|
|
1841
|
+
const currentMessages = messages;
|
|
1842
|
+
let attempts = 0;
|
|
1843
|
+
while (attempts < maxRetries) {
|
|
1844
|
+
try {
|
|
1845
|
+
await producer.send({
|
|
1846
|
+
topic: targetTopic.name,
|
|
1847
|
+
messages: currentMessages
|
|
1848
|
+
});
|
|
1849
|
+
logger2.log(
|
|
1850
|
+
`Successfully sent ${currentMessages.length} messages to ${targetTopic.name}`
|
|
1851
|
+
);
|
|
1852
|
+
return;
|
|
1853
|
+
} catch (error) {
|
|
1854
|
+
if (isMessageTooLargeError(error) && currentMessages.length > 1) {
|
|
1855
|
+
logger2.warn(
|
|
1856
|
+
`Got MESSAGE_TOO_LARGE error, splitting batch of ${currentMessages.length} messages and retrying (${maxRetries - attempts} attempts left)`
|
|
1857
|
+
);
|
|
1858
|
+
const newMaxSize = Math.floor(currentMaxSize / 2);
|
|
1859
|
+
const splitChunks = splitBatch(currentMessages, newMaxSize);
|
|
1860
|
+
for (const chunk of splitChunks) {
|
|
1861
|
+
await sendChunkWithRetry(
|
|
1862
|
+
logger2,
|
|
1863
|
+
targetTopic,
|
|
1864
|
+
producer,
|
|
1865
|
+
chunk,
|
|
1866
|
+
newMaxSize,
|
|
1867
|
+
// this error does not count as one failed attempt
|
|
1868
|
+
maxRetries - attempts
|
|
1869
|
+
);
|
|
1870
|
+
}
|
|
1871
|
+
return;
|
|
1872
|
+
} else {
|
|
1873
|
+
attempts++;
|
|
1874
|
+
if (attempts >= maxRetries) {
|
|
1875
|
+
let messagesHandledByDLQ = 0;
|
|
1876
|
+
let messagesWithoutDLQ = 0;
|
|
1877
|
+
const dlqErrors = [];
|
|
1878
|
+
for (const failedMessage of currentMessages) {
|
|
1879
|
+
const dlqTopic = failedMessage.dlq;
|
|
1880
|
+
if (dlqTopic && failedMessage.originalValue) {
|
|
1881
|
+
const dlqTopicName = dlqTopic.name;
|
|
1882
|
+
const deadLetterRecord = {
|
|
1883
|
+
originalRecord: {
|
|
1884
|
+
...failedMessage.originalValue,
|
|
1885
|
+
// Include original Kafka message metadata
|
|
1886
|
+
__sourcePartition: failedMessage.originalMessage.partition,
|
|
1887
|
+
__sourceOffset: failedMessage.originalMessage.offset,
|
|
1888
|
+
__sourceTimestamp: failedMessage.originalMessage.timestamp
|
|
1889
|
+
},
|
|
1890
|
+
errorMessage: error instanceof Error ? error.message : String(error),
|
|
1891
|
+
errorType: error instanceof Error ? error.constructor.name : "Unknown",
|
|
1892
|
+
failedAt: /* @__PURE__ */ new Date(),
|
|
1893
|
+
source: "transform"
|
|
1894
|
+
};
|
|
1895
|
+
cliLog({
|
|
1896
|
+
action: "DeadLetter",
|
|
1897
|
+
message: `Sending failed message to DLQ ${dlqTopicName}: ${error instanceof Error ? error.message : String(error)}`,
|
|
1898
|
+
message_type: "Error"
|
|
1899
|
+
});
|
|
1900
|
+
try {
|
|
1901
|
+
await producer.send({
|
|
1902
|
+
topic: dlqTopicName,
|
|
1903
|
+
messages: [{ value: JSON.stringify(deadLetterRecord) }]
|
|
1904
|
+
});
|
|
1905
|
+
logger2.log(`Sent failed message to DLQ ${dlqTopicName}`);
|
|
1906
|
+
messagesHandledByDLQ++;
|
|
1907
|
+
} catch (dlqError) {
|
|
1908
|
+
const errorMsg = `Failed to send message to DLQ: ${dlqError}`;
|
|
1909
|
+
logger2.error(errorMsg);
|
|
1910
|
+
dlqErrors.push(errorMsg);
|
|
1911
|
+
}
|
|
1912
|
+
} else if (!dlqTopic) {
|
|
1913
|
+
messagesWithoutDLQ++;
|
|
1914
|
+
logger2.warn(
|
|
1915
|
+
`Cannot send to DLQ: no DLQ configured for message (batch has mixed DLQ configurations)`
|
|
1916
|
+
);
|
|
1917
|
+
} else {
|
|
1918
|
+
messagesWithoutDLQ++;
|
|
1919
|
+
logger2.warn(
|
|
1920
|
+
`Cannot send to DLQ: original message value not available`
|
|
1921
|
+
);
|
|
1922
|
+
}
|
|
1923
|
+
}
|
|
1924
|
+
const allMessagesHandled = messagesHandledByDLQ === currentMessages.length && messagesWithoutDLQ === 0 && dlqErrors.length === 0;
|
|
1925
|
+
if (allMessagesHandled) {
|
|
1926
|
+
logger2.log(
|
|
1927
|
+
`All ${messagesHandledByDLQ} failed message(s) sent to DLQ, not throwing original error`
|
|
1928
|
+
);
|
|
1929
|
+
return;
|
|
1930
|
+
}
|
|
1931
|
+
if (messagesWithoutDLQ > 0) {
|
|
1932
|
+
logger2.error(
|
|
1933
|
+
`Cannot handle batch failure: ${messagesWithoutDLQ} message(s) have no DLQ configured`
|
|
1934
|
+
);
|
|
1935
|
+
}
|
|
1936
|
+
if (dlqErrors.length > 0) {
|
|
1937
|
+
logger2.error(
|
|
1938
|
+
`Some messages failed to send to DLQ: ${dlqErrors.join(", ")}`
|
|
1939
|
+
);
|
|
1940
|
+
}
|
|
1941
|
+
if (messagesHandledByDLQ > 0) {
|
|
1942
|
+
logger2.warn(
|
|
1943
|
+
`Partial DLQ success: ${messagesHandledByDLQ}/${currentMessages.length} message(s) sent to DLQ, but throwing due to incomplete batch handling`
|
|
1944
|
+
);
|
|
1945
|
+
}
|
|
1946
|
+
throw error;
|
|
1947
|
+
}
|
|
1948
|
+
logger2.warn(
|
|
1949
|
+
`Send ${currentMessages.length} messages failed (attempt ${attempts}/${maxRetries}), retrying: ${error}`
|
|
1950
|
+
);
|
|
1951
|
+
await new Promise((resolve2) => setTimeout(resolve2, 100 * attempts));
|
|
1952
|
+
}
|
|
1953
|
+
}
|
|
1954
|
+
}
|
|
1955
|
+
};
|
|
1822
1956
|
var MAX_STREAMING_CONCURRENCY = process3.env.MAX_STREAMING_CONCURRENCY ? parseInt(process3.env.MAX_STREAMING_CONCURRENCY, 10) : DEFAULT_MAX_STREAMING_CONCURRENCY;
|
|
1823
1957
|
var metricsLog = (log) => {
|
|
1824
1958
|
const req = http3.request({
|
|
@@ -1964,95 +2098,57 @@ var handleMessage = async (logger2, streamingFunctionWithConfigList, message, pr
|
|
|
1964
2098
|
}
|
|
1965
2099
|
return void 0;
|
|
1966
2100
|
};
|
|
1967
|
-
var
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
const
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
|
|
1982
|
-
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
|
|
1986
|
-
|
|
1987
|
-
|
|
1988
|
-
|
|
1989
|
-
|
|
1990
|
-
}
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
messages: [{ value: JSON.stringify(deadLetterRecord) }]
|
|
1995
|
-
});
|
|
1996
|
-
logger2.log(`Sent failed message to DLQ ${msg.dlq.name}`);
|
|
1997
|
-
messagesHandledByDLQ++;
|
|
1998
|
-
} catch (dlqError) {
|
|
1999
|
-
logger2.error(`Failed to send to DLQ: ${dlqError}`);
|
|
2000
|
-
dlqErrors++;
|
|
2101
|
+
var sendMessages = async (logger2, metrics, targetTopic, producer, messages) => {
|
|
2102
|
+
try {
|
|
2103
|
+
let chunk = [];
|
|
2104
|
+
let chunkSize = 0;
|
|
2105
|
+
const maxMessageSize = targetTopic.max_message_bytes || 1024 * 1024;
|
|
2106
|
+
for (const message of messages) {
|
|
2107
|
+
const messageSize = Buffer2.byteLength(message.value, "utf8") + KAFKAJS_BYTE_MESSAGE_OVERHEAD;
|
|
2108
|
+
if (chunkSize + messageSize > maxMessageSize) {
|
|
2109
|
+
logger2.log(
|
|
2110
|
+
`Sending ${chunkSize} bytes of a transformed record batch to ${targetTopic.name}`
|
|
2111
|
+
);
|
|
2112
|
+
await sendChunkWithRetry(
|
|
2113
|
+
logger2,
|
|
2114
|
+
targetTopic,
|
|
2115
|
+
producer,
|
|
2116
|
+
chunk,
|
|
2117
|
+
maxMessageSize
|
|
2118
|
+
);
|
|
2119
|
+
logger2.log(
|
|
2120
|
+
`Sent ${chunk.length} transformed records to ${targetTopic.name}`
|
|
2121
|
+
);
|
|
2122
|
+
chunk = [message];
|
|
2123
|
+
chunkSize = messageSize;
|
|
2124
|
+
} else {
|
|
2125
|
+
chunk.push(message);
|
|
2126
|
+
metrics.bytes += Buffer2.byteLength(message.value, "utf8");
|
|
2127
|
+
chunkSize += messageSize;
|
|
2001
2128
|
}
|
|
2002
|
-
} else if (!msg.dlq) {
|
|
2003
|
-
messagesWithoutDLQ++;
|
|
2004
|
-
logger2.warn(`Cannot send to DLQ: no DLQ configured for message`);
|
|
2005
|
-
} else {
|
|
2006
|
-
messagesWithoutDLQ++;
|
|
2007
|
-
logger2.warn(`Cannot send to DLQ: original message value not available`);
|
|
2008
2129
|
}
|
|
2009
|
-
|
|
2010
|
-
|
|
2011
|
-
|
|
2012
|
-
|
|
2013
|
-
|
|
2014
|
-
|
|
2015
|
-
|
|
2016
|
-
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
|
|
2020
|
-
|
|
2021
|
-
|
|
2130
|
+
metrics.count_out += chunk.length;
|
|
2131
|
+
if (chunk.length > 0) {
|
|
2132
|
+
logger2.log(
|
|
2133
|
+
`Sending ${chunkSize} bytes of a transformed record batch to ${targetTopic.name}`
|
|
2134
|
+
);
|
|
2135
|
+
await sendChunkWithRetry(
|
|
2136
|
+
logger2,
|
|
2137
|
+
targetTopic,
|
|
2138
|
+
producer,
|
|
2139
|
+
chunk,
|
|
2140
|
+
maxMessageSize
|
|
2141
|
+
);
|
|
2142
|
+
logger2.log(
|
|
2143
|
+
`Sent final ${chunk.length} transformed data to ${targetTopic.name}`
|
|
2022
2144
|
);
|
|
2023
2145
|
}
|
|
2024
|
-
if (dlqErrors > 0) {
|
|
2025
|
-
logger2.error(`${dlqErrors} message(s) failed to send to DLQ`);
|
|
2026
|
-
}
|
|
2027
|
-
}
|
|
2028
|
-
return allMessagesHandled;
|
|
2029
|
-
};
|
|
2030
|
-
var sendMessages = async (logger2, metrics, targetTopic, producer, messages) => {
|
|
2031
|
-
if (messages.length === 0) return;
|
|
2032
|
-
try {
|
|
2033
|
-
await producer.send({
|
|
2034
|
-
topic: targetTopic.name,
|
|
2035
|
-
messages
|
|
2036
|
-
});
|
|
2037
|
-
for (const msg of messages) {
|
|
2038
|
-
metrics.bytes += Buffer2.byteLength(msg.value, "utf8");
|
|
2039
|
-
}
|
|
2040
|
-
metrics.count_out += messages.length;
|
|
2041
|
-
logger2.log(`Sent ${messages.length} messages to ${targetTopic.name}`);
|
|
2042
2146
|
} catch (e) {
|
|
2043
2147
|
logger2.error(`Failed to send transformed data`);
|
|
2044
2148
|
if (e instanceof Error) {
|
|
2045
2149
|
logError(logger2, e);
|
|
2046
2150
|
}
|
|
2047
|
-
|
|
2048
|
-
logger2,
|
|
2049
|
-
producer,
|
|
2050
|
-
messages,
|
|
2051
|
-
e
|
|
2052
|
-
);
|
|
2053
|
-
if (!allHandledByDLQ) {
|
|
2054
|
-
throw e;
|
|
2055
|
-
}
|
|
2151
|
+
throw e;
|
|
2056
2152
|
}
|
|
2057
2153
|
};
|
|
2058
2154
|
var sendMessageMetrics = (logger2, metrics) => {
|
|
@@ -2294,10 +2390,16 @@ var runStreamingFunctions = async (args) => {
|
|
|
2294
2390
|
fromBeginning: true
|
|
2295
2391
|
}
|
|
2296
2392
|
});
|
|
2297
|
-
const
|
|
2298
|
-
|
|
2299
|
-
|
|
2300
|
-
|
|
2393
|
+
const producer = kafka.producer({
|
|
2394
|
+
kafkaJS: {
|
|
2395
|
+
idempotent: true,
|
|
2396
|
+
acks: ACKs,
|
|
2397
|
+
retry: {
|
|
2398
|
+
retries: MAX_RETRIES_PRODUCER,
|
|
2399
|
+
maxRetryTime: MAX_RETRY_TIME_MS
|
|
2400
|
+
}
|
|
2401
|
+
}
|
|
2402
|
+
});
|
|
2301
2403
|
try {
|
|
2302
2404
|
logger2.log("Starting producer...");
|
|
2303
2405
|
await startProducer(logger2, producer);
|