@drarzter/kafka-client 0.7.1 → 0.7.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +141 -8
- package/dist/{chunk-AMEGMOZH.mjs → chunk-XP7LLRGQ.mjs} +1026 -849
- package/dist/chunk-XP7LLRGQ.mjs.map +1 -0
- package/dist/core.d.mts +167 -56
- package/dist/core.d.ts +167 -56
- package/dist/core.js +1025 -848
- package/dist/core.js.map +1 -1
- package/dist/core.mjs +1 -1
- package/dist/index.d.mts +37 -4
- package/dist/index.d.ts +37 -4
- package/dist/index.js +1031 -848
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +7 -1
- package/dist/index.mjs.map +1 -1
- package/dist/otel.d.mts +1 -1
- package/dist/otel.d.ts +1 -1
- package/dist/testing.d.mts +1 -1
- package/dist/testing.d.ts +1 -1
- package/dist/{types-BEIGjmV6.d.mts → types-4qWrf2aJ.d.mts} +136 -4
- package/dist/{types-BEIGjmV6.d.ts → types-4qWrf2aJ.d.ts} +136 -4
- package/package.json +1 -1
- package/dist/chunk-AMEGMOZH.mjs.map +0 -1
package/dist/core.js
CHANGED
|
@@ -40,7 +40,7 @@ __export(core_exports, {
|
|
|
40
40
|
module.exports = __toCommonJS(core_exports);
|
|
41
41
|
|
|
42
42
|
// src/client/kafka.client/index.ts
|
|
43
|
-
var
|
|
43
|
+
var import_kafka_javascript2 = require("@confluentinc/kafka-javascript");
|
|
44
44
|
|
|
45
45
|
// src/client/message/envelope.ts
|
|
46
46
|
var import_node_async_hooks = require("async_hooks");
|
|
@@ -136,7 +136,7 @@ var KafkaRetryExhaustedError = class extends KafkaProcessingError {
|
|
|
136
136
|
}
|
|
137
137
|
};
|
|
138
138
|
|
|
139
|
-
// src/client/kafka.client/producer
|
|
139
|
+
// src/client/kafka.client/producer/ops.ts
|
|
140
140
|
function resolveTopicName(topicOrDescriptor) {
|
|
141
141
|
if (typeof topicOrDescriptor === "string") return topicOrDescriptor;
|
|
142
142
|
if (topicOrDescriptor && typeof topicOrDescriptor === "object" && "__topic" in topicOrDescriptor) {
|
|
@@ -181,7 +181,7 @@ async function validateMessage(topicOrDesc, message, deps, ctx) {
|
|
|
181
181
|
}
|
|
182
182
|
return message;
|
|
183
183
|
}
|
|
184
|
-
async function buildSendPayload(topicOrDesc, messages, deps) {
|
|
184
|
+
async function buildSendPayload(topicOrDesc, messages, deps, compression) {
|
|
185
185
|
const topic2 = resolveTopicName(topicOrDesc);
|
|
186
186
|
const builtMessages = await Promise.all(
|
|
187
187
|
messages.map(async (m) => {
|
|
@@ -211,11 +211,12 @@ async function buildSendPayload(topicOrDesc, messages, deps) {
|
|
|
211
211
|
};
|
|
212
212
|
})
|
|
213
213
|
);
|
|
214
|
-
return { topic: topic2, messages: builtMessages };
|
|
214
|
+
return { topic: topic2, messages: builtMessages, ...compression && { compression } };
|
|
215
215
|
}
|
|
216
216
|
|
|
217
|
-
// src/client/kafka.client/consumer
|
|
218
|
-
|
|
217
|
+
// src/client/kafka.client/consumer/ops.ts
|
|
218
|
+
var import_kafka_javascript = require("@confluentinc/kafka-javascript");
|
|
219
|
+
function getOrCreateConsumer(groupId, fromBeginning, autoCommit, deps, partitionAssigner) {
|
|
219
220
|
const { consumers, consumerCreationOptions, kafka, onRebalance, logger } = deps;
|
|
220
221
|
if (consumers.has(groupId)) {
|
|
221
222
|
const prev = consumerCreationOptions.get(groupId);
|
|
@@ -227,8 +228,11 @@ function getOrCreateConsumer(groupId, fromBeginning, autoCommit, deps) {
|
|
|
227
228
|
return consumers.get(groupId);
|
|
228
229
|
}
|
|
229
230
|
consumerCreationOptions.set(groupId, { fromBeginning, autoCommit });
|
|
231
|
+
const assigners = [
|
|
232
|
+
partitionAssigner === "roundrobin" ? import_kafka_javascript.KafkaJS.PartitionAssigners.roundRobin : partitionAssigner === "range" ? import_kafka_javascript.KafkaJS.PartitionAssigners.range : import_kafka_javascript.KafkaJS.PartitionAssigners.cooperativeSticky
|
|
233
|
+
];
|
|
230
234
|
const config = {
|
|
231
|
-
kafkaJS: { groupId, fromBeginning, autoCommit }
|
|
235
|
+
kafkaJS: { groupId, fromBeginning, autoCommit, partitionAssigners: assigners }
|
|
232
236
|
};
|
|
233
237
|
if (onRebalance) {
|
|
234
238
|
const cb = onRebalance;
|
|
@@ -273,7 +277,7 @@ function buildSchemaMap(topics, schemaRegistry, optionSchemas, logger) {
|
|
|
273
277
|
return schemaMap;
|
|
274
278
|
}
|
|
275
279
|
|
|
276
|
-
// src/client/consumer/pipeline.ts
|
|
280
|
+
// src/client/kafka.client/consumer/pipeline.ts
|
|
277
281
|
function toError(error) {
|
|
278
282
|
return error instanceof Error ? error : new Error(String(error));
|
|
279
283
|
}
|
|
@@ -623,7 +627,7 @@ async function executeWithRetry(fn, ctx, deps) {
|
|
|
623
627
|
}
|
|
624
628
|
}
|
|
625
629
|
|
|
626
|
-
// src/client/kafka.client/
|
|
630
|
+
// src/client/kafka.client/consumer/handler.ts
|
|
627
631
|
async function applyDeduplication(envelope, raw, dedup, dlq, deps) {
|
|
628
632
|
const clockRaw = envelope.headers[HEADER_LAMPORT_CLOCK];
|
|
629
633
|
if (clockRaw === void 0) return false;
|
|
@@ -773,7 +777,8 @@ async function handleEachMessage(payload, opts, deps) {
|
|
|
773
777
|
});
|
|
774
778
|
deps.onDlq?.(envelope, "ttl-expired");
|
|
775
779
|
} else {
|
|
776
|
-
|
|
780
|
+
const ttlHandler = opts.onTtlExpired ?? deps.onTtlExpired;
|
|
781
|
+
await ttlHandler?.({
|
|
777
782
|
topic: topic2,
|
|
778
783
|
ageMs,
|
|
779
784
|
messageTtlMs: opts.messageTtlMs,
|
|
@@ -900,7 +905,8 @@ async function handleEachBatch(payload, opts, deps) {
|
|
|
900
905
|
});
|
|
901
906
|
deps.onDlq?.(envelope, "ttl-expired");
|
|
902
907
|
} else {
|
|
903
|
-
|
|
908
|
+
const ttlHandler = opts.onTtlExpired ?? deps.onTtlExpired;
|
|
909
|
+
await ttlHandler?.({
|
|
904
910
|
topic: batch.topic,
|
|
905
911
|
ageMs,
|
|
906
912
|
messageTtlMs: opts.messageTtlMs,
|
|
@@ -942,10 +948,11 @@ async function handleEachBatch(payload, opts, deps) {
|
|
|
942
948
|
);
|
|
943
949
|
}
|
|
944
950
|
|
|
945
|
-
// src/client/consumer/subscribe-retry.ts
|
|
951
|
+
// src/client/kafka.client/consumer/subscribe-retry.ts
|
|
946
952
|
async function subscribeWithRetry(consumer, topics, logger, retryOpts) {
|
|
947
953
|
const maxAttempts = retryOpts?.retries ?? 5;
|
|
948
954
|
const backoffMs = retryOpts?.backoffMs ?? 5e3;
|
|
955
|
+
const displayTopics = topics.map((t) => t instanceof RegExp ? t.toString() : t).join(", ");
|
|
949
956
|
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
950
957
|
try {
|
|
951
958
|
await consumer.subscribe({ topics });
|
|
@@ -955,14 +962,14 @@ async function subscribeWithRetry(consumer, topics, logger, retryOpts) {
|
|
|
955
962
|
const msg = toError(error).message;
|
|
956
963
|
const delay = Math.floor(Math.random() * backoffMs);
|
|
957
964
|
logger.warn(
|
|
958
|
-
`Failed to subscribe to [${
|
|
965
|
+
`Failed to subscribe to [${displayTopics}] (attempt ${attempt}/${maxAttempts}): ${msg}. Retrying in ${delay}ms...`
|
|
959
966
|
);
|
|
960
967
|
await sleep(delay);
|
|
961
968
|
}
|
|
962
969
|
}
|
|
963
970
|
}
|
|
964
971
|
|
|
965
|
-
// src/client/kafka.client/retry-topic.ts
|
|
972
|
+
// src/client/kafka.client/consumer/retry-topic.ts
|
|
966
973
|
async function waitForPartitionAssignment(consumer, topics, logger, timeoutMs = 1e4) {
|
|
967
974
|
const topicSet = new Set(topics);
|
|
968
975
|
const deadline = Date.now() + timeoutMs;
|
|
@@ -1216,9 +1223,7 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
|
|
|
1216
1223
|
return levelGroupIds;
|
|
1217
1224
|
}
|
|
1218
1225
|
|
|
1219
|
-
// src/client/kafka.client/
|
|
1220
|
-
var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = import_kafka_javascript.KafkaJS;
|
|
1221
|
-
var _activeTransactionalIds = /* @__PURE__ */ new Set();
|
|
1226
|
+
// src/client/kafka.client/consumer/queue.ts
|
|
1222
1227
|
var AsyncQueue = class {
|
|
1223
1228
|
constructor(highWaterMark = Infinity, onFull = () => {
|
|
1224
1229
|
}, onDrained = () => {
|
|
@@ -1267,216 +1272,806 @@ var AsyncQueue = class {
|
|
|
1267
1272
|
return new Promise((resolve, reject) => this.waiting.push({ resolve, reject }));
|
|
1268
1273
|
}
|
|
1269
1274
|
};
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
/** Maps transactionalId → Producer for each active retry level consumer. */
|
|
1276
|
-
retryTxProducers = /* @__PURE__ */ new Map();
|
|
1277
|
-
consumers = /* @__PURE__ */ new Map();
|
|
1278
|
-
admin;
|
|
1279
|
-
logger;
|
|
1280
|
-
autoCreateTopicsEnabled;
|
|
1281
|
-
strictSchemasEnabled;
|
|
1282
|
-
numPartitions;
|
|
1283
|
-
ensuredTopics = /* @__PURE__ */ new Set();
|
|
1284
|
-
/** Pending topic-creation promises keyed by topic name. Prevents duplicate createTopics calls. */
|
|
1285
|
-
ensureTopicPromises = /* @__PURE__ */ new Map();
|
|
1286
|
-
defaultGroupId;
|
|
1287
|
-
schemaRegistry = /* @__PURE__ */ new Map();
|
|
1288
|
-
runningConsumers = /* @__PURE__ */ new Map();
|
|
1289
|
-
consumerCreationOptions = /* @__PURE__ */ new Map();
|
|
1290
|
-
/** Maps each main consumer groupId to its companion retry level groupIds. */
|
|
1291
|
-
companionGroupIds = /* @__PURE__ */ new Map();
|
|
1292
|
-
instrumentation;
|
|
1293
|
-
onMessageLost;
|
|
1294
|
-
onTtlExpired;
|
|
1295
|
-
onRebalance;
|
|
1296
|
-
/** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
|
|
1297
|
-
txId;
|
|
1298
|
-
/** Per-topic event counters, lazily created on first event. Aggregated by `getMetrics()`. */
|
|
1299
|
-
_topicMetrics = /* @__PURE__ */ new Map();
|
|
1300
|
-
/** Monotonically increasing Lamport clock stamped on every outgoing message. */
|
|
1301
|
-
_lamportClock = 0;
|
|
1302
|
-
/** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
|
|
1303
|
-
dedupStates = /* @__PURE__ */ new Map();
|
|
1304
|
-
/** Circuit breaker state per `"${gid}:${topic}:${partition}"` key. */
|
|
1305
|
-
circuitStates = /* @__PURE__ */ new Map();
|
|
1306
|
-
/** Circuit breaker config per groupId, set at startConsumer/startBatchConsumer time. */
|
|
1307
|
-
circuitConfigs = /* @__PURE__ */ new Map();
|
|
1308
|
-
isAdminConnected = false;
|
|
1309
|
-
inFlightTotal = 0;
|
|
1310
|
-
drainResolvers = [];
|
|
1311
|
-
clientId;
|
|
1312
|
-
constructor(clientId, groupId, brokers, options) {
|
|
1313
|
-
this.clientId = clientId;
|
|
1314
|
-
this.defaultGroupId = groupId;
|
|
1315
|
-
this.logger = options?.logger ?? {
|
|
1316
|
-
log: (msg) => console.log(`[KafkaClient:${clientId}] ${msg}`),
|
|
1317
|
-
warn: (msg, ...args) => console.warn(`[KafkaClient:${clientId}] ${msg}`, ...args),
|
|
1318
|
-
error: (msg, ...args) => console.error(`[KafkaClient:${clientId}] ${msg}`, ...args),
|
|
1319
|
-
debug: (msg, ...args) => console.debug(`[KafkaClient:${clientId}] ${msg}`, ...args)
|
|
1320
|
-
};
|
|
1321
|
-
this.autoCreateTopicsEnabled = options?.autoCreateTopics ?? false;
|
|
1322
|
-
this.strictSchemasEnabled = options?.strictSchemas ?? true;
|
|
1323
|
-
this.numPartitions = options?.numPartitions ?? 1;
|
|
1324
|
-
this.instrumentation = options?.instrumentation ?? [];
|
|
1325
|
-
this.onMessageLost = options?.onMessageLost;
|
|
1326
|
-
this.onTtlExpired = options?.onTtlExpired;
|
|
1327
|
-
this.onRebalance = options?.onRebalance;
|
|
1328
|
-
this.txId = options?.transactionalId ?? `${clientId}-tx`;
|
|
1329
|
-
this.kafka = new KafkaClass({
|
|
1330
|
-
kafkaJS: {
|
|
1331
|
-
clientId: this.clientId,
|
|
1332
|
-
brokers,
|
|
1333
|
-
logLevel: KafkaLogLevel.ERROR
|
|
1334
|
-
}
|
|
1335
|
-
});
|
|
1336
|
-
this.producer = this.kafka.producer({
|
|
1337
|
-
kafkaJS: {
|
|
1338
|
-
acks: -1
|
|
1339
|
-
}
|
|
1340
|
-
});
|
|
1341
|
-
this.admin = this.kafka.admin();
|
|
1275
|
+
|
|
1276
|
+
// src/client/kafka.client/infra/circuit-breaker.ts
|
|
1277
|
+
var CircuitBreakerManager = class {
|
|
1278
|
+
constructor(deps) {
|
|
1279
|
+
this.deps = deps;
|
|
1342
1280
|
}
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
key: options.key,
|
|
1348
|
-
headers: options.headers,
|
|
1349
|
-
correlationId: options.correlationId,
|
|
1350
|
-
schemaVersion: options.schemaVersion,
|
|
1351
|
-
eventId: options.eventId
|
|
1352
|
-
}
|
|
1353
|
-
]);
|
|
1354
|
-
await this.producer.send(payload);
|
|
1355
|
-
this.notifyAfterSend(payload.topic, payload.messages.length);
|
|
1281
|
+
states = /* @__PURE__ */ new Map();
|
|
1282
|
+
configs = /* @__PURE__ */ new Map();
|
|
1283
|
+
setConfig(gid, options) {
|
|
1284
|
+
this.configs.set(gid, options);
|
|
1356
1285
|
}
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1286
|
+
/**
|
|
1287
|
+
* Returns a snapshot of the circuit breaker state for a given topic-partition.
|
|
1288
|
+
* Returns `undefined` when no state exists for the key.
|
|
1289
|
+
*/
|
|
1290
|
+
getState(topic2, partition, gid) {
|
|
1291
|
+
const state = this.states.get(`${gid}:${topic2}:${partition}`);
|
|
1292
|
+
if (!state) return void 0;
|
|
1293
|
+
return {
|
|
1294
|
+
status: state.status,
|
|
1295
|
+
failures: state.window.filter((v) => !v).length,
|
|
1296
|
+
windowSize: state.window.length
|
|
1297
|
+
};
|
|
1361
1298
|
}
|
|
1362
|
-
/**
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1299
|
+
/**
|
|
1300
|
+
* Record a failure for the given envelope and group.
|
|
1301
|
+
* Drives the CLOSED → OPEN and HALF-OPEN → OPEN transitions.
|
|
1302
|
+
*/
|
|
1303
|
+
onFailure(envelope, gid) {
|
|
1304
|
+
const cfg = this.configs.get(gid);
|
|
1305
|
+
if (!cfg) return;
|
|
1306
|
+
const threshold = cfg.threshold ?? 5;
|
|
1307
|
+
const recoveryMs = cfg.recoveryMs ?? 3e4;
|
|
1308
|
+
const stateKey = `${gid}:${envelope.topic}:${envelope.partition}`;
|
|
1309
|
+
let state = this.states.get(stateKey);
|
|
1310
|
+
if (!state) {
|
|
1311
|
+
state = { status: "closed", window: [], successes: 0 };
|
|
1312
|
+
this.states.set(stateKey, state);
|
|
1313
|
+
}
|
|
1314
|
+
if (state.status === "open") return;
|
|
1315
|
+
const openCircuit = () => {
|
|
1316
|
+
state.status = "open";
|
|
1317
|
+
state.window = [];
|
|
1318
|
+
state.successes = 0;
|
|
1319
|
+
clearTimeout(state.timer);
|
|
1320
|
+
for (const inst of this.deps.instrumentation)
|
|
1321
|
+
inst.onCircuitOpen?.(envelope.topic, envelope.partition);
|
|
1322
|
+
this.deps.pauseConsumer(gid, [{ topic: envelope.topic, partitions: [envelope.partition] }]);
|
|
1323
|
+
state.timer = setTimeout(() => {
|
|
1324
|
+
state.status = "half-open";
|
|
1325
|
+
state.successes = 0;
|
|
1326
|
+
this.deps.logger.log(
|
|
1327
|
+
`[CircuitBreaker] HALF-OPEN \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
1368
1328
|
);
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
return p;
|
|
1382
|
-
})();
|
|
1383
|
-
this.txProducerInitPromise = initPromise.catch((err) => {
|
|
1384
|
-
this.txProducerInitPromise = void 0;
|
|
1385
|
-
throw err;
|
|
1386
|
-
});
|
|
1329
|
+
for (const inst of this.deps.instrumentation)
|
|
1330
|
+
inst.onCircuitHalfOpen?.(envelope.topic, envelope.partition);
|
|
1331
|
+
this.deps.resumeConsumer(gid, [{ topic: envelope.topic, partitions: [envelope.partition] }]);
|
|
1332
|
+
}, recoveryMs);
|
|
1333
|
+
};
|
|
1334
|
+
if (state.status === "half-open") {
|
|
1335
|
+
clearTimeout(state.timer);
|
|
1336
|
+
this.deps.logger.warn(
|
|
1337
|
+
`[CircuitBreaker] OPEN (half-open failure) \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
1338
|
+
);
|
|
1339
|
+
openCircuit();
|
|
1340
|
+
return;
|
|
1387
1341
|
}
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1342
|
+
const windowSize = cfg.windowSize ?? Math.max(threshold * 2, 10);
|
|
1343
|
+
state.window = [...state.window, false];
|
|
1344
|
+
if (state.window.length > windowSize) {
|
|
1345
|
+
state.window = state.window.slice(state.window.length - windowSize);
|
|
1346
|
+
}
|
|
1347
|
+
const failures = state.window.filter((v) => !v).length;
|
|
1348
|
+
if (failures >= threshold) {
|
|
1349
|
+
this.deps.logger.warn(
|
|
1350
|
+
`[CircuitBreaker] OPEN \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition} (${failures}/${state.window.length} failures, threshold=${threshold})`
|
|
1351
|
+
);
|
|
1352
|
+
openCircuit();
|
|
1353
|
+
}
|
|
1354
|
+
}
|
|
1355
|
+
/**
|
|
1356
|
+
* Record a success for the given envelope and group.
|
|
1357
|
+
* Drives the HALF-OPEN → CLOSED transition and updates the success window.
|
|
1358
|
+
*/
|
|
1359
|
+
onSuccess(envelope, gid) {
|
|
1360
|
+
const cfg = this.configs.get(gid);
|
|
1361
|
+
if (!cfg) return;
|
|
1362
|
+
const stateKey = `${gid}:${envelope.topic}:${envelope.partition}`;
|
|
1363
|
+
const state = this.states.get(stateKey);
|
|
1364
|
+
if (!state) return;
|
|
1365
|
+
const halfOpenSuccesses = cfg.halfOpenSuccesses ?? 1;
|
|
1366
|
+
if (state.status === "half-open") {
|
|
1367
|
+
state.successes++;
|
|
1368
|
+
if (state.successes >= halfOpenSuccesses) {
|
|
1369
|
+
clearTimeout(state.timer);
|
|
1370
|
+
state.timer = void 0;
|
|
1371
|
+
state.status = "closed";
|
|
1372
|
+
state.window = [];
|
|
1373
|
+
state.successes = 0;
|
|
1374
|
+
this.deps.logger.log(
|
|
1375
|
+
`[CircuitBreaker] CLOSED \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
1421
1376
|
);
|
|
1377
|
+
for (const inst of this.deps.instrumentation)
|
|
1378
|
+
inst.onCircuitClose?.(envelope.topic, envelope.partition);
|
|
1379
|
+
}
|
|
1380
|
+
} else if (state.status === "closed") {
|
|
1381
|
+
const threshold = cfg.threshold ?? 5;
|
|
1382
|
+
const windowSize = cfg.windowSize ?? Math.max(threshold * 2, 10);
|
|
1383
|
+
state.window = [...state.window, true];
|
|
1384
|
+
if (state.window.length > windowSize) {
|
|
1385
|
+
state.window = state.window.slice(state.window.length - windowSize);
|
|
1422
1386
|
}
|
|
1423
|
-
throw error;
|
|
1424
1387
|
}
|
|
1425
1388
|
}
|
|
1426
|
-
// ── Producer lifecycle ───────────────────────────────────────────
|
|
1427
1389
|
/**
|
|
1428
|
-
*
|
|
1429
|
-
*
|
|
1390
|
+
* Remove all circuit state and config for the given group.
|
|
1391
|
+
* Called when a consumer is stopped via `stopConsumer(groupId)`.
|
|
1430
1392
|
*/
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1393
|
+
removeGroup(gid) {
|
|
1394
|
+
for (const key of [...this.states.keys()]) {
|
|
1395
|
+
if (key.startsWith(`${gid}:`)) {
|
|
1396
|
+
clearTimeout(this.states.get(key).timer);
|
|
1397
|
+
this.states.delete(key);
|
|
1398
|
+
}
|
|
1399
|
+
}
|
|
1400
|
+
this.configs.delete(gid);
|
|
1401
|
+
}
|
|
1402
|
+
/** Clear all circuit state and config. Called on `disconnect()`. */
|
|
1403
|
+
clear() {
|
|
1404
|
+
for (const state of this.states.values()) clearTimeout(state.timer);
|
|
1405
|
+
this.states.clear();
|
|
1406
|
+
this.configs.clear();
|
|
1407
|
+
}
|
|
1408
|
+
};
|
|
1409
|
+
|
|
1410
|
+
// src/client/kafka.client/admin/ops.ts
|
|
1411
|
+
var AdminOps = class {
|
|
1412
|
+
constructor(deps) {
|
|
1413
|
+
this.deps = deps;
|
|
1414
|
+
}
|
|
1415
|
+
isConnected = false;
|
|
1416
|
+
/** Underlying admin client — used by index.ts for topic validation. */
|
|
1417
|
+
get admin() {
|
|
1418
|
+
return this.deps.admin;
|
|
1419
|
+
}
|
|
1420
|
+
/** Whether the admin client is currently connected. */
|
|
1421
|
+
get connected() {
|
|
1422
|
+
return this.isConnected;
|
|
1434
1423
|
}
|
|
1435
1424
|
/**
|
|
1436
|
-
*
|
|
1425
|
+
* Connect the admin client if not already connected.
|
|
1426
|
+
* The flag is only set to `true` after a successful connect — if `admin.connect()`
|
|
1427
|
+
* throws the flag remains `false` so the next call will retry the connection.
|
|
1437
1428
|
*/
|
|
1438
|
-
async
|
|
1439
|
-
|
|
1440
|
-
|
|
1429
|
+
async ensureConnected() {
|
|
1430
|
+
if (this.isConnected) return;
|
|
1431
|
+
try {
|
|
1432
|
+
await this.deps.admin.connect();
|
|
1433
|
+
this.isConnected = true;
|
|
1434
|
+
} catch (err) {
|
|
1435
|
+
this.isConnected = false;
|
|
1436
|
+
throw err;
|
|
1437
|
+
}
|
|
1441
1438
|
}
|
|
1442
|
-
|
|
1443
|
-
|
|
1439
|
+
/** Disconnect admin if connected. Resets the connected flag. */
|
|
1440
|
+
async disconnect() {
|
|
1441
|
+
if (!this.isConnected) return;
|
|
1442
|
+
await this.deps.admin.disconnect();
|
|
1443
|
+
this.isConnected = false;
|
|
1444
|
+
}
|
|
1445
|
+
async resetOffsets(groupId, topic2, position) {
|
|
1446
|
+
const gid = groupId ?? this.deps.defaultGroupId;
|
|
1447
|
+
if (this.deps.runningConsumers.has(gid)) {
|
|
1444
1448
|
throw new Error(
|
|
1445
|
-
|
|
1449
|
+
`resetOffsets: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before resetting offsets.`
|
|
1446
1450
|
);
|
|
1447
1451
|
}
|
|
1448
|
-
|
|
1449
|
-
const
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1452
|
+
await this.ensureConnected();
|
|
1453
|
+
const partitionOffsets = await this.deps.admin.fetchTopicOffsets(topic2);
|
|
1454
|
+
const partitions = partitionOffsets.map(({ partition, low, high }) => ({
|
|
1455
|
+
partition,
|
|
1456
|
+
offset: position === "earliest" ? low : high
|
|
1457
|
+
}));
|
|
1458
|
+
await this.deps.admin.setOffsets({ groupId: gid, topic: topic2, partitions });
|
|
1459
|
+
this.deps.logger.log(
|
|
1460
|
+
`Offsets reset to ${position} for group "${gid}" on topic "${topic2}"`
|
|
1457
1461
|
);
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1462
|
+
}
|
|
1463
|
+
/**
|
|
1464
|
+
* Seek specific topic-partition pairs to explicit offsets for a stopped consumer group.
|
|
1465
|
+
* Throws if the group is still running — call `stopConsumer(groupId)` first.
|
|
1466
|
+
* Assignments are grouped by topic and committed via `admin.setOffsets`.
|
|
1467
|
+
*/
|
|
1468
|
+
async seekToOffset(groupId, assignments) {
|
|
1469
|
+
const gid = groupId ?? this.deps.defaultGroupId;
|
|
1470
|
+
if (this.deps.runningConsumers.has(gid)) {
|
|
1471
|
+
throw new Error(
|
|
1472
|
+
`seekToOffset: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before seeking offsets.`
|
|
1473
|
+
);
|
|
1463
1474
|
}
|
|
1464
|
-
await
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1475
|
+
await this.ensureConnected();
|
|
1476
|
+
const byTopic = /* @__PURE__ */ new Map();
|
|
1477
|
+
for (const { topic: topic2, partition, offset } of assignments) {
|
|
1478
|
+
const list = byTopic.get(topic2) ?? [];
|
|
1479
|
+
list.push({ partition, offset });
|
|
1480
|
+
byTopic.set(topic2, list);
|
|
1481
|
+
}
|
|
1482
|
+
for (const [topic2, partitions] of byTopic) {
|
|
1483
|
+
await this.deps.admin.setOffsets({ groupId: gid, topic: topic2, partitions });
|
|
1484
|
+
this.deps.logger.log(
|
|
1485
|
+
`Offsets set for group "${gid}" on "${topic2}": ${JSON.stringify(partitions)}`
|
|
1486
|
+
);
|
|
1487
|
+
}
|
|
1488
|
+
}
|
|
1489
|
+
/**
|
|
1490
|
+
* Seek specific topic-partition pairs to the offset nearest to a given timestamp
|
|
1491
|
+
* (in milliseconds) for a stopped consumer group.
|
|
1492
|
+
* Throws if the group is still running — call `stopConsumer(groupId)` first.
|
|
1493
|
+
* Assignments are grouped by topic and committed via `admin.setOffsets`.
|
|
1494
|
+
* If no offset exists at the requested timestamp (e.g. empty partition or
|
|
1495
|
+
* future timestamp), the partition falls back to `-1` (end of topic — new messages only).
|
|
1496
|
+
*/
|
|
1497
|
+
async seekToTimestamp(groupId, assignments) {
|
|
1498
|
+
const gid = groupId ?? this.deps.defaultGroupId;
|
|
1499
|
+
if (this.deps.runningConsumers.has(gid)) {
|
|
1500
|
+
throw new Error(
|
|
1501
|
+
`seekToTimestamp: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before seeking offsets.`
|
|
1502
|
+
);
|
|
1503
|
+
}
|
|
1504
|
+
await this.ensureConnected();
|
|
1505
|
+
const byTopic = /* @__PURE__ */ new Map();
|
|
1506
|
+
for (const { topic: topic2, partition, timestamp } of assignments) {
|
|
1507
|
+
const list = byTopic.get(topic2) ?? [];
|
|
1508
|
+
list.push({ partition, timestamp });
|
|
1509
|
+
byTopic.set(topic2, list);
|
|
1510
|
+
}
|
|
1511
|
+
for (const [topic2, parts] of byTopic) {
|
|
1512
|
+
const offsets = await Promise.all(
|
|
1513
|
+
parts.map(async ({ partition, timestamp }) => {
|
|
1514
|
+
const results = await this.deps.admin.fetchTopicOffsetsByTime(
|
|
1515
|
+
topic2,
|
|
1516
|
+
timestamp
|
|
1517
|
+
);
|
|
1518
|
+
const found = results.find(
|
|
1519
|
+
(r) => r.partition === partition
|
|
1520
|
+
);
|
|
1521
|
+
return { partition, offset: found?.offset ?? "-1" };
|
|
1522
|
+
})
|
|
1523
|
+
);
|
|
1524
|
+
await this.deps.admin.setOffsets({ groupId: gid, topic: topic2, partitions: offsets });
|
|
1525
|
+
this.deps.logger.log(
|
|
1526
|
+
`Offsets set by timestamp for group "${gid}" on "${topic2}": ${JSON.stringify(offsets)}`
|
|
1527
|
+
);
|
|
1528
|
+
}
|
|
1529
|
+
}
|
|
1530
|
+
/**
|
|
1531
|
+
* Query consumer group lag per partition.
|
|
1532
|
+
* Lag = broker high-watermark − last committed offset.
|
|
1533
|
+
* A committed offset of -1 (nothing committed yet) counts as full lag.
|
|
1534
|
+
*
|
|
1535
|
+
* Returns an empty array when the consumer group has never committed any
|
|
1536
|
+
* offsets (freshly created group, `autoCommit: false` with no manual commits,
|
|
1537
|
+
* or group not yet assigned). This is a Kafka protocol limitation:
|
|
1538
|
+
* `fetchOffsets` only returns data for topic-partitions that have at least one
|
|
1539
|
+
* committed offset. Use `checkStatus()` to verify broker connectivity in that case.
|
|
1540
|
+
*/
|
|
1541
|
+
async getConsumerLag(groupId) {
|
|
1542
|
+
const gid = groupId ?? this.deps.defaultGroupId;
|
|
1543
|
+
await this.ensureConnected();
|
|
1544
|
+
const committedByTopic = await this.deps.admin.fetchOffsets({ groupId: gid });
|
|
1545
|
+
const brokerOffsetsAll = await Promise.all(
|
|
1546
|
+
committedByTopic.map(({ topic: topic2 }) => this.deps.admin.fetchTopicOffsets(topic2))
|
|
1547
|
+
);
|
|
1548
|
+
const result = [];
|
|
1549
|
+
for (let i = 0; i < committedByTopic.length; i++) {
|
|
1550
|
+
const { topic: topic2, partitions } = committedByTopic[i];
|
|
1551
|
+
const brokerOffsets = brokerOffsetsAll[i];
|
|
1552
|
+
for (const { partition, offset } of partitions) {
|
|
1553
|
+
const broker = brokerOffsets.find((o) => o.partition === partition);
|
|
1554
|
+
if (!broker) continue;
|
|
1555
|
+
const committed = parseInt(offset, 10);
|
|
1556
|
+
const high = parseInt(broker.high, 10);
|
|
1557
|
+
const lag = committed === -1 ? high : Math.max(0, high - committed);
|
|
1558
|
+
result.push({ topic: topic2, partition, lag });
|
|
1559
|
+
}
|
|
1560
|
+
}
|
|
1561
|
+
return result;
|
|
1562
|
+
}
|
|
1563
|
+
/** Check broker connectivity. Never throws — returns a discriminated union. */
|
|
1564
|
+
async checkStatus() {
|
|
1565
|
+
try {
|
|
1566
|
+
await this.ensureConnected();
|
|
1567
|
+
const topics = await this.deps.admin.listTopics();
|
|
1568
|
+
return { status: "up", clientId: this.deps.clientId, topics };
|
|
1569
|
+
} catch (error) {
|
|
1570
|
+
return {
|
|
1571
|
+
status: "down",
|
|
1572
|
+
clientId: this.deps.clientId,
|
|
1573
|
+
error: error instanceof Error ? error.message : String(error)
|
|
1574
|
+
};
|
|
1575
|
+
}
|
|
1576
|
+
}
|
|
1577
|
+
/**
|
|
1578
|
+
* List all consumer groups known to the broker.
|
|
1579
|
+
* Useful for monitoring which groups are active and their current state.
|
|
1580
|
+
*/
|
|
1581
|
+
async listConsumerGroups() {
|
|
1582
|
+
await this.ensureConnected();
|
|
1583
|
+
const result = await this.deps.admin.listGroups();
|
|
1584
|
+
return result.groups.map((g) => ({
|
|
1585
|
+
groupId: g.groupId,
|
|
1586
|
+
state: g.state ?? "Unknown"
|
|
1587
|
+
}));
|
|
1588
|
+
}
|
|
1589
|
+
/**
|
|
1590
|
+
* Describe topics — returns partition layout, leader, replicas, and ISR.
|
|
1591
|
+
* @param topics Topic names to describe. Omit to describe all topics.
|
|
1592
|
+
*/
|
|
1593
|
+
async describeTopics(topics) {
|
|
1594
|
+
await this.ensureConnected();
|
|
1595
|
+
const result = await this.deps.admin.fetchTopicMetadata(
|
|
1596
|
+
topics ? { topics } : void 0
|
|
1597
|
+
);
|
|
1598
|
+
return result.topics.map((t) => ({
|
|
1599
|
+
name: t.name,
|
|
1600
|
+
partitions: t.partitions.map((p) => ({
|
|
1601
|
+
partition: p.partitionId ?? p.partition,
|
|
1602
|
+
leader: p.leader,
|
|
1603
|
+
replicas: p.replicas.map(
|
|
1604
|
+
(r) => typeof r === "number" ? r : r.nodeId
|
|
1605
|
+
),
|
|
1606
|
+
isr: p.isr.map(
|
|
1607
|
+
(r) => typeof r === "number" ? r : r.nodeId
|
|
1608
|
+
)
|
|
1609
|
+
}))
|
|
1610
|
+
}));
|
|
1611
|
+
}
|
|
1612
|
+
/**
|
|
1613
|
+
* Delete records from a topic up to (but not including) the given offsets.
|
|
1614
|
+
* All messages with offsets **before** the given offset are deleted.
|
|
1615
|
+
*/
|
|
1616
|
+
async deleteRecords(topic2, partitions) {
|
|
1617
|
+
await this.ensureConnected();
|
|
1618
|
+
await this.deps.admin.deleteTopicRecords({ topic: topic2, partitions });
|
|
1619
|
+
}
|
|
1620
|
+
/**
|
|
1621
|
+
* When `retryTopics: true` and `autoCreateTopics: false`, verify that every
|
|
1622
|
+
* `<topic>.retry.<level>` topic already exists. Throws a clear error at startup
|
|
1623
|
+
* rather than silently discovering missing topics on the first handler failure.
|
|
1624
|
+
*/
|
|
1625
|
+
async validateRetryTopicsExist(topicNames, maxRetries) {
|
|
1626
|
+
await this.ensureConnected();
|
|
1627
|
+
const existing = new Set(await this.deps.admin.listTopics());
|
|
1628
|
+
const missing = [];
|
|
1629
|
+
for (const t of topicNames) {
|
|
1630
|
+
for (let level = 1; level <= maxRetries; level++) {
|
|
1631
|
+
const retryTopic = `${t}.retry.${level}`;
|
|
1632
|
+
if (!existing.has(retryTopic)) missing.push(retryTopic);
|
|
1633
|
+
}
|
|
1634
|
+
}
|
|
1635
|
+
if (missing.length > 0) {
|
|
1636
|
+
throw new Error(
|
|
1637
|
+
`retryTopics: true but the following retry topics do not exist: ${missing.join(", ")}. Create them manually or set autoCreateTopics: true.`
|
|
1638
|
+
);
|
|
1639
|
+
}
|
|
1640
|
+
}
|
|
1641
|
+
/**
|
|
1642
|
+
* When `autoCreateTopics` is disabled, verify that `<topic>.dlq` exists for every
|
|
1643
|
+
* consumed topic. Throws a clear error at startup rather than silently discovering
|
|
1644
|
+
* missing DLQ topics on the first handler failure.
|
|
1645
|
+
*/
|
|
1646
|
+
async validateDlqTopicsExist(topicNames) {
|
|
1647
|
+
await this.ensureConnected();
|
|
1648
|
+
const existing = new Set(await this.deps.admin.listTopics());
|
|
1649
|
+
const missing = topicNames.filter((t) => !existing.has(`${t}.dlq`)).map((t) => `${t}.dlq`);
|
|
1650
|
+
if (missing.length > 0) {
|
|
1651
|
+
throw new Error(
|
|
1652
|
+
`dlq: true but the following DLQ topics do not exist: ${missing.join(", ")}. Create them manually or set autoCreateTopics: true.`
|
|
1653
|
+
);
|
|
1654
|
+
}
|
|
1655
|
+
}
|
|
1656
|
+
/**
|
|
1657
|
+
* When `deduplication.strategy: 'topic'` and `autoCreateTopics: false`, verify
|
|
1658
|
+
* that every `<topic>.duplicates` destination topic already exists. Throws a
|
|
1659
|
+
* clear error at startup rather than silently dropping duplicates on first hit.
|
|
1660
|
+
*/
|
|
1661
|
+
async validateDuplicatesTopicsExist(topicNames, customDestination) {
|
|
1662
|
+
await this.ensureConnected();
|
|
1663
|
+
const existing = new Set(await this.deps.admin.listTopics());
|
|
1664
|
+
const toCheck = customDestination ? [customDestination] : topicNames.map((t) => `${t}.duplicates`);
|
|
1665
|
+
const missing = toCheck.filter((t) => !existing.has(t));
|
|
1666
|
+
if (missing.length > 0) {
|
|
1667
|
+
throw new Error(
|
|
1668
|
+
`deduplication.strategy: 'topic' but the following duplicate-routing topics do not exist: ${missing.join(", ")}. Create them manually or set autoCreateTopics: true.`
|
|
1669
|
+
);
|
|
1670
|
+
}
|
|
1671
|
+
}
|
|
1672
|
+
};
|
|
1673
|
+
|
|
1674
|
+
// src/client/kafka.client/consumer/dlq-replay.ts
|
|
1675
|
+
async function replayDlqTopic(topic2, options = {}, deps) {
|
|
1676
|
+
const dlqTopic = `${topic2}.dlq`;
|
|
1677
|
+
const partitionOffsets = await deps.fetchTopicOffsets(dlqTopic);
|
|
1678
|
+
const activePartitions = partitionOffsets.filter((p) => parseInt(p.high, 10) > 0);
|
|
1679
|
+
if (activePartitions.length === 0) {
|
|
1680
|
+
deps.logger.log(`replayDlq: "${dlqTopic}" is empty \u2014 nothing to replay`);
|
|
1681
|
+
return { replayed: 0, skipped: 0 };
|
|
1682
|
+
}
|
|
1683
|
+
const highWatermarks = new Map(
|
|
1684
|
+
activePartitions.map(({ partition, high }) => [partition, parseInt(high, 10)])
|
|
1685
|
+
);
|
|
1686
|
+
const processedOffsets = /* @__PURE__ */ new Map();
|
|
1687
|
+
let replayed = 0;
|
|
1688
|
+
let skipped = 0;
|
|
1689
|
+
const tempGroupId = `${dlqTopic}-replay-${Date.now()}`;
|
|
1690
|
+
await new Promise((resolve, reject) => {
|
|
1691
|
+
const consumer = deps.createConsumer(tempGroupId);
|
|
1692
|
+
const cleanup = () => deps.cleanupConsumer(consumer, tempGroupId);
|
|
1693
|
+
consumer.connect().then(() => subscribeWithRetry(consumer, [dlqTopic], deps.logger)).then(
|
|
1694
|
+
() => consumer.run({
|
|
1695
|
+
eachMessage: async ({ partition, message }) => {
|
|
1696
|
+
if (!message.value) return;
|
|
1697
|
+
const offset = parseInt(message.offset, 10);
|
|
1698
|
+
processedOffsets.set(partition, offset);
|
|
1699
|
+
const headers = decodeHeaders(message.headers);
|
|
1700
|
+
const targetTopic = options.targetTopic ?? headers["x-dlq-original-topic"];
|
|
1701
|
+
const originalHeaders = Object.fromEntries(
|
|
1702
|
+
Object.entries(headers).filter(([k]) => !deps.dlqHeaderKeys.has(k))
|
|
1703
|
+
);
|
|
1704
|
+
const value = message.value.toString();
|
|
1705
|
+
const shouldProcess = !options.filter || options.filter(headers, value);
|
|
1706
|
+
if (!targetTopic || !shouldProcess) {
|
|
1707
|
+
skipped++;
|
|
1708
|
+
} else if (options.dryRun) {
|
|
1709
|
+
deps.logger.log(`[DLQ replay dry-run] Would replay to "${targetTopic}"`);
|
|
1710
|
+
replayed++;
|
|
1711
|
+
} else {
|
|
1712
|
+
await deps.send(targetTopic, [{ value, headers: originalHeaders }]);
|
|
1713
|
+
replayed++;
|
|
1714
|
+
}
|
|
1715
|
+
const allDone = Array.from(highWatermarks.entries()).every(
|
|
1716
|
+
([p, hwm]) => (processedOffsets.get(p) ?? -1) >= hwm - 1
|
|
1717
|
+
);
|
|
1718
|
+
if (allDone) {
|
|
1719
|
+
cleanup();
|
|
1720
|
+
resolve();
|
|
1721
|
+
}
|
|
1722
|
+
}
|
|
1723
|
+
})
|
|
1724
|
+
).catch((err) => {
|
|
1725
|
+
cleanup();
|
|
1726
|
+
reject(err);
|
|
1727
|
+
});
|
|
1728
|
+
});
|
|
1729
|
+
deps.logger.log(`replayDlq: replayed ${replayed}, skipped ${skipped} from "${dlqTopic}"`);
|
|
1730
|
+
return { replayed, skipped };
|
|
1731
|
+
}
|
|
1732
|
+
|
|
1733
|
+
// src/client/kafka.client/infra/metrics-manager.ts
|
|
1734
|
+
var MetricsManager = class {
|
|
1735
|
+
constructor(deps) {
|
|
1736
|
+
this.deps = deps;
|
|
1737
|
+
}
|
|
1738
|
+
topicMetrics = /* @__PURE__ */ new Map();
|
|
1739
|
+
metricsFor(topic2) {
|
|
1740
|
+
let m = this.topicMetrics.get(topic2);
|
|
1741
|
+
if (!m) {
|
|
1742
|
+
m = { processedCount: 0, retryCount: 0, dlqCount: 0, dedupCount: 0 };
|
|
1743
|
+
this.topicMetrics.set(topic2, m);
|
|
1744
|
+
}
|
|
1745
|
+
return m;
|
|
1746
|
+
}
|
|
1747
|
+
/** Fire `afterSend` instrumentation hooks for each message in a batch. */
|
|
1748
|
+
notifyAfterSend(topic2, count) {
|
|
1749
|
+
for (let i = 0; i < count; i++)
|
|
1750
|
+
for (const inst of this.deps.instrumentation) inst.afterSend?.(topic2);
|
|
1751
|
+
}
|
|
1752
|
+
notifyRetry(envelope, attempt, maxRetries) {
|
|
1753
|
+
this.metricsFor(envelope.topic).retryCount++;
|
|
1754
|
+
for (const inst of this.deps.instrumentation) inst.onRetry?.(envelope, attempt, maxRetries);
|
|
1755
|
+
}
|
|
1756
|
+
notifyDlq(envelope, reason, gid) {
|
|
1757
|
+
this.metricsFor(envelope.topic).dlqCount++;
|
|
1758
|
+
for (const inst of this.deps.instrumentation) inst.onDlq?.(envelope, reason);
|
|
1759
|
+
if (gid) this.deps.onCircuitFailure(envelope, gid);
|
|
1760
|
+
}
|
|
1761
|
+
notifyDuplicate(envelope, strategy) {
|
|
1762
|
+
this.metricsFor(envelope.topic).dedupCount++;
|
|
1763
|
+
for (const inst of this.deps.instrumentation) inst.onDuplicate?.(envelope, strategy);
|
|
1764
|
+
}
|
|
1765
|
+
notifyMessage(envelope, gid) {
|
|
1766
|
+
this.metricsFor(envelope.topic).processedCount++;
|
|
1767
|
+
for (const inst of this.deps.instrumentation) inst.onMessage?.(envelope);
|
|
1768
|
+
if (gid) this.deps.onCircuitSuccess(envelope, gid);
|
|
1769
|
+
}
|
|
1770
|
+
getMetrics(topic2) {
|
|
1771
|
+
if (topic2 !== void 0) {
|
|
1772
|
+
const m = this.topicMetrics.get(topic2);
|
|
1773
|
+
return m ? { ...m } : { processedCount: 0, retryCount: 0, dlqCount: 0, dedupCount: 0 };
|
|
1774
|
+
}
|
|
1775
|
+
const agg = { processedCount: 0, retryCount: 0, dlqCount: 0, dedupCount: 0 };
|
|
1776
|
+
for (const m of this.topicMetrics.values()) {
|
|
1777
|
+
agg.processedCount += m.processedCount;
|
|
1778
|
+
agg.retryCount += m.retryCount;
|
|
1779
|
+
agg.dlqCount += m.dlqCount;
|
|
1780
|
+
agg.dedupCount += m.dedupCount;
|
|
1781
|
+
}
|
|
1782
|
+
return agg;
|
|
1783
|
+
}
|
|
1784
|
+
resetMetrics(topic2) {
|
|
1785
|
+
if (topic2 !== void 0) {
|
|
1786
|
+
this.topicMetrics.delete(topic2);
|
|
1787
|
+
return;
|
|
1788
|
+
}
|
|
1789
|
+
this.topicMetrics.clear();
|
|
1790
|
+
}
|
|
1791
|
+
};
|
|
1792
|
+
|
|
1793
|
+
// src/client/kafka.client/infra/inflight-tracker.ts
|
|
1794
|
+
var InFlightTracker = class {
|
|
1795
|
+
constructor(warn) {
|
|
1796
|
+
this.warn = warn;
|
|
1797
|
+
}
|
|
1798
|
+
inFlightTotal = 0;
|
|
1799
|
+
drainResolvers = [];
|
|
1800
|
+
track(fn) {
|
|
1801
|
+
this.inFlightTotal++;
|
|
1802
|
+
return fn().finally(() => {
|
|
1803
|
+
this.inFlightTotal--;
|
|
1804
|
+
if (this.inFlightTotal === 0) this.drainResolvers.splice(0).forEach((r) => r());
|
|
1805
|
+
});
|
|
1806
|
+
}
|
|
1807
|
+
waitForDrain(timeoutMs) {
|
|
1808
|
+
if (this.inFlightTotal === 0) return Promise.resolve();
|
|
1809
|
+
return new Promise((resolve) => {
|
|
1810
|
+
let handle;
|
|
1811
|
+
const onDrain = () => {
|
|
1812
|
+
clearTimeout(handle);
|
|
1813
|
+
resolve();
|
|
1814
|
+
};
|
|
1815
|
+
this.drainResolvers.push(onDrain);
|
|
1816
|
+
handle = setTimeout(() => {
|
|
1817
|
+
const idx = this.drainResolvers.indexOf(onDrain);
|
|
1818
|
+
if (idx !== -1) this.drainResolvers.splice(idx, 1);
|
|
1819
|
+
this.warn(
|
|
1820
|
+
`Drain timed out after ${timeoutMs}ms \u2014 ${this.inFlightTotal} handler(s) still in flight`
|
|
1821
|
+
);
|
|
1822
|
+
resolve();
|
|
1823
|
+
}, timeoutMs);
|
|
1824
|
+
});
|
|
1825
|
+
}
|
|
1826
|
+
};
|
|
1827
|
+
|
|
1828
|
+
// src/client/kafka.client/index.ts
|
|
1829
|
+
var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = import_kafka_javascript2.KafkaJS;
|
|
1830
|
+
var _activeTransactionalIds = /* @__PURE__ */ new Set();
|
|
1831
|
+
var KafkaClient = class _KafkaClient {
|
|
1832
|
+
kafka;
|
|
1833
|
+
producer;
|
|
1834
|
+
txProducer;
|
|
1835
|
+
txProducerInitPromise;
|
|
1836
|
+
/** Maps transactionalId → Producer for each active retry level consumer. */
|
|
1837
|
+
retryTxProducers = /* @__PURE__ */ new Map();
|
|
1838
|
+
consumers = /* @__PURE__ */ new Map();
|
|
1839
|
+
logger;
|
|
1840
|
+
autoCreateTopicsEnabled;
|
|
1841
|
+
strictSchemasEnabled;
|
|
1842
|
+
numPartitions;
|
|
1843
|
+
ensuredTopics = /* @__PURE__ */ new Set();
|
|
1844
|
+
/** Pending topic-creation promises keyed by topic name. Prevents duplicate createTopics calls. */
|
|
1845
|
+
ensureTopicPromises = /* @__PURE__ */ new Map();
|
|
1846
|
+
defaultGroupId;
|
|
1847
|
+
schemaRegistry = /* @__PURE__ */ new Map();
|
|
1848
|
+
runningConsumers = /* @__PURE__ */ new Map();
|
|
1849
|
+
consumerCreationOptions = /* @__PURE__ */ new Map();
|
|
1850
|
+
/** Maps each main consumer groupId to its companion retry level groupIds. */
|
|
1851
|
+
companionGroupIds = /* @__PURE__ */ new Map();
|
|
1852
|
+
instrumentation;
|
|
1853
|
+
onMessageLost;
|
|
1854
|
+
onTtlExpired;
|
|
1855
|
+
onRebalance;
|
|
1856
|
+
/** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
|
|
1857
|
+
txId;
|
|
1858
|
+
/** Monotonically increasing Lamport clock stamped on every outgoing message. */
|
|
1859
|
+
_lamportClock = 0;
|
|
1860
|
+
/** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
|
|
1861
|
+
dedupStates = /* @__PURE__ */ new Map();
|
|
1862
|
+
circuitBreaker;
|
|
1863
|
+
adminOps;
|
|
1864
|
+
metrics;
|
|
1865
|
+
inFlight;
|
|
1866
|
+
clientId;
|
|
1867
|
+
_producerOpsDeps;
|
|
1868
|
+
_consumerOpsDeps;
|
|
1869
|
+
_retryTopicDeps;
|
|
1870
|
+
/** DLQ header keys added by the pipeline — stripped before re-publishing. */
|
|
1871
|
+
static DLQ_HEADER_KEYS = /* @__PURE__ */ new Set([
|
|
1872
|
+
"x-dlq-original-topic",
|
|
1873
|
+
"x-dlq-failed-at",
|
|
1874
|
+
"x-dlq-error-message",
|
|
1875
|
+
"x-dlq-error-stack",
|
|
1876
|
+
"x-dlq-attempt-count"
|
|
1877
|
+
]);
|
|
1878
|
+
constructor(clientId, groupId, brokers, options) {
|
|
1879
|
+
this.clientId = clientId;
|
|
1880
|
+
this.defaultGroupId = groupId;
|
|
1881
|
+
this.logger = options?.logger ?? {
|
|
1882
|
+
log: (msg) => console.log(`[KafkaClient:${clientId}] ${msg}`),
|
|
1883
|
+
warn: (msg, ...args) => console.warn(`[KafkaClient:${clientId}] ${msg}`, ...args),
|
|
1884
|
+
error: (msg, ...args) => console.error(`[KafkaClient:${clientId}] ${msg}`, ...args),
|
|
1885
|
+
debug: (msg, ...args) => console.debug(`[KafkaClient:${clientId}] ${msg}`, ...args)
|
|
1886
|
+
};
|
|
1887
|
+
this.autoCreateTopicsEnabled = options?.autoCreateTopics ?? false;
|
|
1888
|
+
this.strictSchemasEnabled = options?.strictSchemas ?? true;
|
|
1889
|
+
this.numPartitions = options?.numPartitions ?? 1;
|
|
1890
|
+
this.instrumentation = options?.instrumentation ?? [];
|
|
1891
|
+
this.onMessageLost = options?.onMessageLost;
|
|
1892
|
+
this.onTtlExpired = options?.onTtlExpired;
|
|
1893
|
+
this.onRebalance = options?.onRebalance;
|
|
1894
|
+
this.txId = options?.transactionalId ?? `${clientId}-tx`;
|
|
1895
|
+
this.kafka = new KafkaClass({
|
|
1896
|
+
kafkaJS: {
|
|
1897
|
+
clientId: this.clientId,
|
|
1898
|
+
brokers,
|
|
1899
|
+
logLevel: KafkaLogLevel.ERROR
|
|
1900
|
+
}
|
|
1901
|
+
});
|
|
1902
|
+
this.producer = this.kafka.producer({ kafkaJS: { acks: -1 } });
|
|
1903
|
+
this.adminOps = new AdminOps({
|
|
1904
|
+
admin: this.kafka.admin(),
|
|
1905
|
+
logger: this.logger,
|
|
1906
|
+
runningConsumers: this.runningConsumers,
|
|
1907
|
+
defaultGroupId: this.defaultGroupId,
|
|
1908
|
+
clientId: this.clientId
|
|
1909
|
+
});
|
|
1910
|
+
this.circuitBreaker = new CircuitBreakerManager({
|
|
1911
|
+
pauseConsumer: (gid, assignments) => this.pauseConsumer(gid, assignments),
|
|
1912
|
+
resumeConsumer: (gid, assignments) => this.resumeConsumer(gid, assignments),
|
|
1913
|
+
logger: this.logger,
|
|
1914
|
+
instrumentation: this.instrumentation
|
|
1915
|
+
});
|
|
1916
|
+
this.metrics = new MetricsManager({
|
|
1917
|
+
instrumentation: this.instrumentation,
|
|
1918
|
+
onCircuitFailure: (envelope, gid) => this.circuitBreaker.onFailure(envelope, gid),
|
|
1919
|
+
onCircuitSuccess: (envelope, gid) => this.circuitBreaker.onSuccess(envelope, gid)
|
|
1920
|
+
});
|
|
1921
|
+
this.inFlight = new InFlightTracker((msg) => this.logger.warn(msg));
|
|
1922
|
+
this._producerOpsDeps = {
|
|
1923
|
+
schemaRegistry: this.schemaRegistry,
|
|
1924
|
+
strictSchemasEnabled: this.strictSchemasEnabled,
|
|
1925
|
+
instrumentation: this.instrumentation,
|
|
1926
|
+
logger: this.logger,
|
|
1927
|
+
nextLamportClock: () => ++this._lamportClock
|
|
1928
|
+
};
|
|
1929
|
+
this._consumerOpsDeps = {
|
|
1930
|
+
consumers: this.consumers,
|
|
1931
|
+
consumerCreationOptions: this.consumerCreationOptions,
|
|
1932
|
+
kafka: this.kafka,
|
|
1933
|
+
onRebalance: this.onRebalance,
|
|
1934
|
+
logger: this.logger
|
|
1935
|
+
};
|
|
1936
|
+
this._retryTopicDeps = this.buildRetryTopicDeps();
|
|
1937
|
+
}
|
|
1938
|
+
async sendMessage(topicOrDesc, message, options = {}) {
|
|
1939
|
+
const payload = await this.preparePayload(
|
|
1940
|
+
topicOrDesc,
|
|
1941
|
+
[
|
|
1942
|
+
{
|
|
1943
|
+
value: message,
|
|
1944
|
+
key: options.key,
|
|
1945
|
+
headers: options.headers,
|
|
1946
|
+
correlationId: options.correlationId,
|
|
1947
|
+
schemaVersion: options.schemaVersion,
|
|
1948
|
+
eventId: options.eventId
|
|
1949
|
+
}
|
|
1950
|
+
],
|
|
1951
|
+
options.compression
|
|
1952
|
+
);
|
|
1953
|
+
await this.producer.send(payload);
|
|
1954
|
+
this.metrics.notifyAfterSend(payload.topic, payload.messages.length);
|
|
1955
|
+
}
|
|
1956
|
+
/**
|
|
1957
|
+
* Send a null-value (tombstone) message. Used with log-compacted topics to signal
|
|
1958
|
+
* that a key's record should be removed during the next compaction cycle.
|
|
1959
|
+
*
|
|
1960
|
+
* Tombstones skip envelope headers, schema validation, and Lamport clock stamping.
|
|
1961
|
+
* Both `beforeSend` and `afterSend` instrumentation hooks are still called so tracing works correctly.
|
|
1962
|
+
*
|
|
1963
|
+
* @param topic Topic name.
|
|
1964
|
+
* @param key Partition key identifying the record to tombstone.
|
|
1965
|
+
* @param headers Optional custom Kafka headers.
|
|
1966
|
+
*/
|
|
1967
|
+
async sendTombstone(topic2, key, headers) {
|
|
1968
|
+
const hdrs = { ...headers };
|
|
1969
|
+
for (const inst of this.instrumentation) inst.beforeSend?.(topic2, hdrs);
|
|
1970
|
+
await this.ensureTopic(topic2);
|
|
1971
|
+
await this.producer.send({ topic: topic2, messages: [{ value: null, key, headers: hdrs }] });
|
|
1972
|
+
for (const inst of this.instrumentation) inst.afterSend?.(topic2);
|
|
1973
|
+
}
|
|
1974
|
+
async sendBatch(topicOrDesc, messages, options) {
|
|
1975
|
+
const payload = await this.preparePayload(topicOrDesc, messages, options?.compression);
|
|
1976
|
+
await this.producer.send(payload);
|
|
1977
|
+
this.metrics.notifyAfterSend(payload.topic, payload.messages.length);
|
|
1978
|
+
}
|
|
1979
|
+
/** Execute multiple sends atomically. Commits on success, aborts on error. */
|
|
1980
|
+
async transaction(fn) {
|
|
1981
|
+
if (!this.txProducerInitPromise) {
|
|
1982
|
+
if (_activeTransactionalIds.has(this.txId)) {
|
|
1983
|
+
this.logger.warn(
|
|
1984
|
+
`transactionalId "${this.txId}" is already in use by another KafkaClient in this process. Kafka will fence one of the producers. Set a unique \`transactionalId\` (or distinct \`clientId\`) per instance.`
|
|
1985
|
+
);
|
|
1986
|
+
}
|
|
1987
|
+
const initPromise = (async () => {
|
|
1988
|
+
const p = this.kafka.producer({
|
|
1989
|
+
kafkaJS: { acks: -1, idempotent: true, transactionalId: this.txId, maxInFlightRequests: 1 }
|
|
1990
|
+
});
|
|
1991
|
+
await p.connect();
|
|
1992
|
+
_activeTransactionalIds.add(this.txId);
|
|
1993
|
+
return p;
|
|
1994
|
+
})();
|
|
1995
|
+
this.txProducerInitPromise = initPromise.catch((err) => {
|
|
1996
|
+
this.txProducerInitPromise = void 0;
|
|
1997
|
+
throw err;
|
|
1998
|
+
});
|
|
1999
|
+
}
|
|
2000
|
+
this.txProducer = await this.txProducerInitPromise;
|
|
2001
|
+
const tx = await this.txProducer.transaction();
|
|
2002
|
+
try {
|
|
2003
|
+
const ctx = {
|
|
2004
|
+
send: async (topicOrDesc, message, options = {}) => {
|
|
2005
|
+
const payload = await this.preparePayload(topicOrDesc, [
|
|
2006
|
+
{
|
|
2007
|
+
value: message,
|
|
2008
|
+
key: options.key,
|
|
2009
|
+
headers: options.headers,
|
|
2010
|
+
correlationId: options.correlationId,
|
|
2011
|
+
schemaVersion: options.schemaVersion,
|
|
2012
|
+
eventId: options.eventId
|
|
2013
|
+
}
|
|
2014
|
+
]);
|
|
2015
|
+
await tx.send(payload);
|
|
2016
|
+
this.metrics.notifyAfterSend(payload.topic, payload.messages.length);
|
|
2017
|
+
},
|
|
2018
|
+
sendBatch: async (topicOrDesc, messages) => {
|
|
2019
|
+
const payload = await this.preparePayload(topicOrDesc, messages);
|
|
2020
|
+
await tx.send(payload);
|
|
2021
|
+
this.metrics.notifyAfterSend(payload.topic, payload.messages.length);
|
|
2022
|
+
}
|
|
2023
|
+
};
|
|
2024
|
+
await fn(ctx);
|
|
2025
|
+
await tx.commit();
|
|
2026
|
+
} catch (error) {
|
|
2027
|
+
try {
|
|
2028
|
+
await tx.abort();
|
|
2029
|
+
} catch (abortError) {
|
|
2030
|
+
this.logger.error("Failed to abort transaction:", toError(abortError).message);
|
|
2031
|
+
}
|
|
2032
|
+
throw error;
|
|
2033
|
+
}
|
|
2034
|
+
}
|
|
2035
|
+
// ── Producer lifecycle ───────────────────────────────────────────
|
|
2036
|
+
/**
|
|
2037
|
+
* Connect the idempotent producer. Called automatically by `KafkaModule.register()`.
|
|
2038
|
+
* @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
|
|
2039
|
+
*/
|
|
2040
|
+
async connectProducer() {
|
|
2041
|
+
await this.producer.connect();
|
|
2042
|
+
this.logger.log("Producer connected");
|
|
2043
|
+
}
|
|
2044
|
+
/**
|
|
2045
|
+
* @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
|
|
2046
|
+
*/
|
|
2047
|
+
async disconnectProducer() {
|
|
2048
|
+
await this.producer.disconnect();
|
|
2049
|
+
this.logger.log("Producer disconnected");
|
|
2050
|
+
}
|
|
2051
|
+
async startConsumer(topics, handleMessage, options = {}) {
|
|
2052
|
+
this.validateTopicConsumerOpts(topics, options);
|
|
2053
|
+
const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
|
|
2054
|
+
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", setupOptions);
|
|
2055
|
+
if (options.circuitBreaker) this.circuitBreaker.setConfig(gid, options.circuitBreaker);
|
|
2056
|
+
const deps = this.messageDepsFor(gid);
|
|
2057
|
+
const eosMainContext = await this.makeEosMainContext(gid, consumer, options);
|
|
2058
|
+
await consumer.run({
|
|
2059
|
+
eachMessage: (payload) => this.inFlight.track(
|
|
2060
|
+
() => handleEachMessage(
|
|
2061
|
+
payload,
|
|
2062
|
+
{
|
|
2063
|
+
schemaMap,
|
|
2064
|
+
handleMessage,
|
|
2065
|
+
interceptors,
|
|
2066
|
+
dlq,
|
|
2067
|
+
retry,
|
|
2068
|
+
retryTopics: options.retryTopics,
|
|
2069
|
+
timeoutMs: options.handlerTimeoutMs,
|
|
2070
|
+
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
|
|
2071
|
+
deduplication: this.resolveDeduplicationContext(gid, options.deduplication),
|
|
2072
|
+
messageTtlMs: options.messageTtlMs,
|
|
2073
|
+
onTtlExpired: options.onTtlExpired,
|
|
2074
|
+
eosMainContext
|
|
1480
2075
|
},
|
|
1481
2076
|
deps
|
|
1482
2077
|
)
|
|
@@ -1484,54 +2079,24 @@ var KafkaClient = class _KafkaClient {
|
|
|
1484
2079
|
});
|
|
1485
2080
|
this.runningConsumers.set(gid, "eachMessage");
|
|
1486
2081
|
if (options.retryTopics && retry) {
|
|
1487
|
-
|
|
1488
|
-
await this.validateRetryTopicsExist(topicNames, retry.maxRetries);
|
|
1489
|
-
}
|
|
1490
|
-
const companions = await startRetryTopicConsumers(
|
|
1491
|
-
topicNames,
|
|
1492
|
-
gid,
|
|
1493
|
-
handleMessage,
|
|
1494
|
-
retry,
|
|
1495
|
-
dlq,
|
|
1496
|
-
interceptors,
|
|
1497
|
-
schemaMap,
|
|
1498
|
-
this.retryTopicDeps,
|
|
1499
|
-
options.retryTopicAssignmentTimeoutMs
|
|
1500
|
-
);
|
|
1501
|
-
this.companionGroupIds.set(gid, companions);
|
|
2082
|
+
await this.launchRetryChain(gid, topicNames, handleMessage, retry, dlq, interceptors, schemaMap, options.retryTopicAssignmentTimeoutMs);
|
|
1502
2083
|
}
|
|
1503
2084
|
return { groupId: gid, stop: () => this.stopConsumer(gid) };
|
|
1504
2085
|
}
|
|
1505
2086
|
async startBatchConsumer(topics, handleBatch, options = {}) {
|
|
1506
|
-
|
|
1507
|
-
|
|
1508
|
-
"retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
|
|
1509
|
-
);
|
|
1510
|
-
}
|
|
1511
|
-
if (options.retryTopics) {
|
|
1512
|
-
} else if (options.autoCommit !== false) {
|
|
2087
|
+
this.validateTopicConsumerOpts(topics, options);
|
|
2088
|
+
if (!options.retryTopics && options.autoCommit !== false) {
|
|
1513
2089
|
this.logger.debug?.(
|
|
1514
2090
|
`startBatchConsumer: autoCommit is enabled (default true). If your handler calls resolveOffset() or commitOffsetsIfNecessary(), set autoCommit: false to avoid offset conflicts.`
|
|
1515
2091
|
);
|
|
1516
2092
|
}
|
|
1517
2093
|
const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
|
|
1518
2094
|
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", setupOptions);
|
|
1519
|
-
if (options.circuitBreaker)
|
|
1520
|
-
this.circuitConfigs.set(gid, options.circuitBreaker);
|
|
2095
|
+
if (options.circuitBreaker) this.circuitBreaker.setConfig(gid, options.circuitBreaker);
|
|
1521
2096
|
const deps = this.messageDepsFor(gid);
|
|
1522
|
-
const
|
|
1523
|
-
const deduplication = this.resolveDeduplicationContext(
|
|
1524
|
-
gid,
|
|
1525
|
-
options.deduplication
|
|
1526
|
-
);
|
|
1527
|
-
let eosMainContext;
|
|
1528
|
-
if (options.retryTopics && retry) {
|
|
1529
|
-
const mainTxId = `${gid}-main-tx`;
|
|
1530
|
-
const txProducer = await this.createRetryTxProducer(mainTxId);
|
|
1531
|
-
eosMainContext = { txProducer, consumer };
|
|
1532
|
-
}
|
|
2097
|
+
const eosMainContext = await this.makeEosMainContext(gid, consumer, options);
|
|
1533
2098
|
await consumer.run({
|
|
1534
|
-
eachBatch: (payload) => this.
|
|
2099
|
+
eachBatch: (payload) => this.inFlight.track(
|
|
1535
2100
|
() => handleEachBatch(
|
|
1536
2101
|
payload,
|
|
1537
2102
|
{
|
|
@@ -1541,10 +2106,11 @@ var KafkaClient = class _KafkaClient {
|
|
|
1541
2106
|
dlq,
|
|
1542
2107
|
retry,
|
|
1543
2108
|
retryTopics: options.retryTopics,
|
|
1544
|
-
timeoutMs,
|
|
2109
|
+
timeoutMs: options.handlerTimeoutMs,
|
|
1545
2110
|
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
|
|
1546
|
-
deduplication,
|
|
2111
|
+
deduplication: this.resolveDeduplicationContext(gid, options.deduplication),
|
|
1547
2112
|
messageTtlMs: options.messageTtlMs,
|
|
2113
|
+
onTtlExpired: options.onTtlExpired,
|
|
1548
2114
|
eosMainContext
|
|
1549
2115
|
},
|
|
1550
2116
|
deps
|
|
@@ -1553,9 +2119,6 @@ var KafkaClient = class _KafkaClient {
|
|
|
1553
2119
|
});
|
|
1554
2120
|
this.runningConsumers.set(gid, "eachBatch");
|
|
1555
2121
|
if (options.retryTopics && retry) {
|
|
1556
|
-
if (!this.autoCreateTopicsEnabled) {
|
|
1557
|
-
await this.validateRetryTopicsExist(topicNames, retry.maxRetries);
|
|
1558
|
-
}
|
|
1559
2122
|
const handleMessageForRetry = (env) => handleBatch([env], {
|
|
1560
2123
|
partition: env.partition,
|
|
1561
2124
|
highWatermark: null,
|
|
@@ -1566,18 +2129,7 @@ var KafkaClient = class _KafkaClient {
|
|
|
1566
2129
|
commitOffsetsIfNecessary: async () => {
|
|
1567
2130
|
}
|
|
1568
2131
|
});
|
|
1569
|
-
|
|
1570
|
-
topicNames,
|
|
1571
|
-
gid,
|
|
1572
|
-
handleMessageForRetry,
|
|
1573
|
-
retry,
|
|
1574
|
-
dlq,
|
|
1575
|
-
interceptors,
|
|
1576
|
-
schemaMap,
|
|
1577
|
-
this.retryTopicDeps,
|
|
1578
|
-
options.retryTopicAssignmentTimeoutMs
|
|
1579
|
-
);
|
|
1580
|
-
this.companionGroupIds.set(gid, companions);
|
|
2132
|
+
await this.launchRetryChain(gid, topicNames, handleMessageForRetry, retry, dlq, interceptors, schemaMap, options.retryTopicAssignmentTimeoutMs);
|
|
1581
2133
|
}
|
|
1582
2134
|
return { groupId: gid, stop: () => this.stopConsumer(gid) };
|
|
1583
2135
|
}
|
|
@@ -1591,6 +2143,11 @@ var KafkaClient = class _KafkaClient {
|
|
|
1591
2143
|
* }
|
|
1592
2144
|
*/
|
|
1593
2145
|
consume(topic2, options) {
|
|
2146
|
+
if (options?.retryTopics) {
|
|
2147
|
+
throw new Error(
|
|
2148
|
+
"consume() does not support retryTopics (EOS retry chains). Use startConsumer() with retryTopics: true for guaranteed retry delivery."
|
|
2149
|
+
);
|
|
2150
|
+
}
|
|
1594
2151
|
const gid = options?.groupId ?? this.defaultGroupId;
|
|
1595
2152
|
const queue = new AsyncQueue(
|
|
1596
2153
|
options?.queueHighWaterMark,
|
|
@@ -1619,42 +2176,32 @@ var KafkaClient = class _KafkaClient {
|
|
|
1619
2176
|
};
|
|
1620
2177
|
}
|
|
1621
2178
|
// ── Consumer lifecycle ───────────────────────────────────────────
|
|
2179
|
+
/**
|
|
2180
|
+
* Stop all consumers or a specific group.
|
|
2181
|
+
*
|
|
2182
|
+
* If `groupId` is unspecified, all active consumers are stopped.
|
|
2183
|
+
* If `groupId` is specified, only the consumer with that group ID is stopped.
|
|
2184
|
+
*
|
|
2185
|
+
* @throws {Error} if the consumer fails to disconnect.
|
|
2186
|
+
*/
|
|
1622
2187
|
async stopConsumer(groupId) {
|
|
1623
2188
|
if (groupId !== void 0) {
|
|
1624
2189
|
const consumer = this.consumers.get(groupId);
|
|
1625
2190
|
if (!consumer) {
|
|
1626
|
-
this.logger.warn(
|
|
1627
|
-
`stopConsumer: no active consumer for group "${groupId}"`
|
|
1628
|
-
);
|
|
2191
|
+
this.logger.warn(`stopConsumer: no active consumer for group "${groupId}"`);
|
|
1629
2192
|
return;
|
|
1630
2193
|
}
|
|
1631
|
-
await consumer.disconnect().catch(
|
|
1632
|
-
(e) => this.logger.warn(
|
|
1633
|
-
`Error disconnecting consumer "${groupId}":`,
|
|
1634
|
-
toError(e).message
|
|
1635
|
-
)
|
|
1636
|
-
);
|
|
2194
|
+
await consumer.disconnect().catch((e) => this.logger.warn(`Error disconnecting consumer "${groupId}":`, toError(e).message));
|
|
1637
2195
|
this.consumers.delete(groupId);
|
|
1638
2196
|
this.runningConsumers.delete(groupId);
|
|
1639
2197
|
this.consumerCreationOptions.delete(groupId);
|
|
1640
2198
|
this.dedupStates.delete(groupId);
|
|
1641
|
-
|
|
1642
|
-
if (key.startsWith(`${groupId}:`)) {
|
|
1643
|
-
clearTimeout(this.circuitStates.get(key).timer);
|
|
1644
|
-
this.circuitStates.delete(key);
|
|
1645
|
-
}
|
|
1646
|
-
}
|
|
1647
|
-
this.circuitConfigs.delete(groupId);
|
|
2199
|
+
this.circuitBreaker.removeGroup(groupId);
|
|
1648
2200
|
this.logger.log(`Consumer disconnected: group "${groupId}"`);
|
|
1649
2201
|
const mainTxId = `${groupId}-main-tx`;
|
|
1650
2202
|
const mainTxProducer = this.retryTxProducers.get(mainTxId);
|
|
1651
2203
|
if (mainTxProducer) {
|
|
1652
|
-
await mainTxProducer.disconnect().catch(
|
|
1653
|
-
(e) => this.logger.warn(
|
|
1654
|
-
`Error disconnecting main tx producer "${mainTxId}":`,
|
|
1655
|
-
toError(e).message
|
|
1656
|
-
)
|
|
1657
|
-
);
|
|
2204
|
+
await mainTxProducer.disconnect().catch((e) => this.logger.warn(`Error disconnecting main tx producer "${mainTxId}":`, toError(e).message));
|
|
1658
2205
|
_activeTransactionalIds.delete(mainTxId);
|
|
1659
2206
|
this.retryTxProducers.delete(mainTxId);
|
|
1660
2207
|
}
|
|
@@ -1662,12 +2209,7 @@ var KafkaClient = class _KafkaClient {
|
|
|
1662
2209
|
for (const cGroupId of companions) {
|
|
1663
2210
|
const cConsumer = this.consumers.get(cGroupId);
|
|
1664
2211
|
if (cConsumer) {
|
|
1665
|
-
await cConsumer.disconnect().catch(
|
|
1666
|
-
(e) => this.logger.warn(
|
|
1667
|
-
`Error disconnecting retry consumer "${cGroupId}":`,
|
|
1668
|
-
toError(e).message
|
|
1669
|
-
)
|
|
1670
|
-
);
|
|
2212
|
+
await cConsumer.disconnect().catch((e) => this.logger.warn(`Error disconnecting retry consumer "${cGroupId}":`, toError(e).message));
|
|
1671
2213
|
this.consumers.delete(cGroupId);
|
|
1672
2214
|
this.runningConsumers.delete(cGroupId);
|
|
1673
2215
|
this.consumerCreationOptions.delete(cGroupId);
|
|
@@ -1676,12 +2218,7 @@ var KafkaClient = class _KafkaClient {
|
|
|
1676
2218
|
const txId = `${cGroupId}-tx`;
|
|
1677
2219
|
const txProducer = this.retryTxProducers.get(txId);
|
|
1678
2220
|
if (txProducer) {
|
|
1679
|
-
await txProducer.disconnect().catch(
|
|
1680
|
-
(e) => this.logger.warn(
|
|
1681
|
-
`Error disconnecting retry tx producer "${txId}":`,
|
|
1682
|
-
toError(e).message
|
|
1683
|
-
)
|
|
1684
|
-
);
|
|
2221
|
+
await txProducer.disconnect().catch((e) => this.logger.warn(`Error disconnecting retry tx producer "${txId}":`, toError(e).message));
|
|
1685
2222
|
_activeTransactionalIds.delete(txId);
|
|
1686
2223
|
this.retryTxProducers.delete(txId);
|
|
1687
2224
|
}
|
|
@@ -1689,14 +2226,10 @@ var KafkaClient = class _KafkaClient {
|
|
|
1689
2226
|
this.companionGroupIds.delete(groupId);
|
|
1690
2227
|
} else {
|
|
1691
2228
|
const tasks = [
|
|
1692
|
-
...Array.from(this.consumers.values()).map(
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
)
|
|
1696
|
-
...Array.from(this.retryTxProducers.values()).map(
|
|
1697
|
-
(p) => p.disconnect().catch(() => {
|
|
1698
|
-
})
|
|
1699
|
-
)
|
|
2229
|
+
...Array.from(this.consumers.values()).map((c) => c.disconnect().catch(() => {
|
|
2230
|
+
})),
|
|
2231
|
+
...Array.from(this.retryTxProducers.values()).map((p) => p.disconnect().catch(() => {
|
|
2232
|
+
}))
|
|
1700
2233
|
];
|
|
1701
2234
|
await Promise.allSettled(tasks);
|
|
1702
2235
|
this.consumers.clear();
|
|
@@ -1705,13 +2238,16 @@ var KafkaClient = class _KafkaClient {
|
|
|
1705
2238
|
this.companionGroupIds.clear();
|
|
1706
2239
|
this.retryTxProducers.clear();
|
|
1707
2240
|
this.dedupStates.clear();
|
|
1708
|
-
|
|
1709
|
-
clearTimeout(state.timer);
|
|
1710
|
-
this.circuitStates.clear();
|
|
1711
|
-
this.circuitConfigs.clear();
|
|
2241
|
+
this.circuitBreaker.clear();
|
|
1712
2242
|
this.logger.log("All consumers disconnected");
|
|
1713
2243
|
}
|
|
1714
2244
|
}
|
|
2245
|
+
/**
|
|
2246
|
+
* Temporarily stop delivering messages from specific partitions without disconnecting the consumer.
|
|
2247
|
+
*
|
|
2248
|
+
* @param groupId Consumer group to pause. Defaults to the client's default groupId.
|
|
2249
|
+
* @param assignments Topic-partition pairs to pause.
|
|
2250
|
+
*/
|
|
1715
2251
|
pauseConsumer(groupId, assignments) {
|
|
1716
2252
|
const gid = groupId ?? this.defaultGroupId;
|
|
1717
2253
|
const consumer = this.consumers.get(gid);
|
|
@@ -1720,11 +2256,15 @@ var KafkaClient = class _KafkaClient {
|
|
|
1720
2256
|
return;
|
|
1721
2257
|
}
|
|
1722
2258
|
consumer.pause(
|
|
1723
|
-
assignments.flatMap(
|
|
1724
|
-
({ topic: topic2, partitions }) => partitions.map((p) => ({ topic: topic2, partitions: [p] }))
|
|
1725
|
-
)
|
|
2259
|
+
assignments.flatMap(({ topic: topic2, partitions }) => partitions.map((p) => ({ topic: topic2, partitions: [p] })))
|
|
1726
2260
|
);
|
|
1727
2261
|
}
|
|
2262
|
+
/**
|
|
2263
|
+
* Resume message delivery for previously paused topic-partitions.
|
|
2264
|
+
*
|
|
2265
|
+
* @param {string|undefined} groupId Consumer group to resume. Defaults to the client's default groupId.
|
|
2266
|
+
* @param {Array<{ topic: string; partitions: number[] }>} assignments Topic-partition pairs to resume.
|
|
2267
|
+
*/
|
|
1728
2268
|
resumeConsumer(groupId, assignments) {
|
|
1729
2269
|
const gid = groupId ?? this.defaultGroupId;
|
|
1730
2270
|
const consumer = this.consumers.get(gid);
|
|
@@ -1733,9 +2273,7 @@ var KafkaClient = class _KafkaClient {
|
|
|
1733
2273
|
return;
|
|
1734
2274
|
}
|
|
1735
2275
|
consumer.resume(
|
|
1736
|
-
assignments.flatMap(
|
|
1737
|
-
({ topic: topic2, partitions }) => partitions.map((p) => ({ topic: topic2, partitions: [p] }))
|
|
1738
|
-
)
|
|
2276
|
+
assignments.flatMap(({ topic: topic2, partitions }) => partitions.map((p) => ({ topic: topic2, partitions: [p] })))
|
|
1739
2277
|
);
|
|
1740
2278
|
}
|
|
1741
2279
|
/** Pause all assigned partitions of a topic for a consumer group (used for queue backpressure). */
|
|
@@ -1756,115 +2294,39 @@ var KafkaClient = class _KafkaClient {
|
|
|
1756
2294
|
if (partitions.length > 0)
|
|
1757
2295
|
consumer.resume(partitions.map((p) => ({ topic: topic2, partitions: [p] })));
|
|
1758
2296
|
}
|
|
1759
|
-
/**
|
|
1760
|
-
|
|
1761
|
-
|
|
1762
|
-
|
|
1763
|
-
|
|
1764
|
-
|
|
1765
|
-
|
|
1766
|
-
|
|
2297
|
+
/**
|
|
2298
|
+
* Re-publish messages from a dead letter queue back to the original topic.
|
|
2299
|
+
*
|
|
2300
|
+
* Messages are consumed from `<topic>.dlq` and re-published to `<topic>`.
|
|
2301
|
+
* The original topic is determined by the `x-dlq-original-topic` header.
|
|
2302
|
+
* The `x-dlq-*` headers are stripped before re-publishing.
|
|
2303
|
+
*
|
|
2304
|
+
* @param topic - The topic to replay from `<topic>.dlq`
|
|
2305
|
+
* @param options - Options for replay
|
|
2306
|
+
* @returns { replayed: number; skipped: number } - counts of re-published vs skipped messages
|
|
2307
|
+
*/
|
|
1767
2308
|
async replayDlq(topic2, options = {}) {
|
|
1768
|
-
|
|
1769
|
-
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
}
|
|
1778
|
-
const highWatermarks = new Map(
|
|
1779
|
-
activePartitions.map(({ partition, high }) => [
|
|
1780
|
-
partition,
|
|
1781
|
-
parseInt(high, 10)
|
|
1782
|
-
])
|
|
1783
|
-
);
|
|
1784
|
-
const processedOffsets = /* @__PURE__ */ new Map();
|
|
1785
|
-
let replayed = 0;
|
|
1786
|
-
let skipped = 0;
|
|
1787
|
-
const tempGroupId = `${dlqTopic}-replay-${Date.now()}`;
|
|
1788
|
-
await new Promise((resolve, reject) => {
|
|
1789
|
-
const consumer = getOrCreateConsumer(
|
|
1790
|
-
tempGroupId,
|
|
1791
|
-
true,
|
|
1792
|
-
true,
|
|
1793
|
-
this.consumerOpsDeps
|
|
1794
|
-
);
|
|
1795
|
-
const cleanup = () => {
|
|
2309
|
+
await this.adminOps.ensureConnected();
|
|
2310
|
+
return replayDlqTopic(topic2, options, {
|
|
2311
|
+
logger: this.logger,
|
|
2312
|
+
fetchTopicOffsets: (t) => this.adminOps.admin.fetchTopicOffsets(t),
|
|
2313
|
+
send: async (t, messages) => {
|
|
2314
|
+
await this.producer.send({ topic: t, messages });
|
|
2315
|
+
},
|
|
2316
|
+
createConsumer: (gid) => getOrCreateConsumer(gid, true, true, this._consumerOpsDeps),
|
|
2317
|
+
cleanupConsumer: (consumer, gid) => {
|
|
1796
2318
|
consumer.disconnect().catch(() => {
|
|
1797
2319
|
}).finally(() => {
|
|
1798
|
-
this.consumers.delete(
|
|
1799
|
-
this.runningConsumers.delete(
|
|
1800
|
-
this.consumerCreationOptions.delete(
|
|
2320
|
+
this.consumers.delete(gid);
|
|
2321
|
+
this.runningConsumers.delete(gid);
|
|
2322
|
+
this.consumerCreationOptions.delete(gid);
|
|
1801
2323
|
});
|
|
1802
|
-
}
|
|
1803
|
-
|
|
1804
|
-
() => consumer.run({
|
|
1805
|
-
eachMessage: async ({ partition, message }) => {
|
|
1806
|
-
if (!message.value) return;
|
|
1807
|
-
const offset = parseInt(message.offset, 10);
|
|
1808
|
-
processedOffsets.set(partition, offset);
|
|
1809
|
-
const headers = decodeHeaders(message.headers);
|
|
1810
|
-
const targetTopic = options.targetTopic ?? headers["x-dlq-original-topic"];
|
|
1811
|
-
const originalHeaders = Object.fromEntries(
|
|
1812
|
-
Object.entries(headers).filter(
|
|
1813
|
-
([k]) => !_KafkaClient.DLQ_HEADER_KEYS.has(k)
|
|
1814
|
-
)
|
|
1815
|
-
);
|
|
1816
|
-
const value = message.value.toString();
|
|
1817
|
-
const shouldProcess = !options.filter || options.filter(headers, value);
|
|
1818
|
-
if (!targetTopic || !shouldProcess) {
|
|
1819
|
-
skipped++;
|
|
1820
|
-
} else if (options.dryRun) {
|
|
1821
|
-
this.logger.log(
|
|
1822
|
-
`[DLQ replay dry-run] Would replay to "${targetTopic}"`
|
|
1823
|
-
);
|
|
1824
|
-
replayed++;
|
|
1825
|
-
} else {
|
|
1826
|
-
await this.producer.send({
|
|
1827
|
-
topic: targetTopic,
|
|
1828
|
-
messages: [{ value, headers: originalHeaders }]
|
|
1829
|
-
});
|
|
1830
|
-
replayed++;
|
|
1831
|
-
}
|
|
1832
|
-
const allDone = Array.from(highWatermarks.entries()).every(
|
|
1833
|
-
([p, hwm]) => (processedOffsets.get(p) ?? -1) >= hwm - 1
|
|
1834
|
-
);
|
|
1835
|
-
if (allDone) {
|
|
1836
|
-
cleanup();
|
|
1837
|
-
resolve();
|
|
1838
|
-
}
|
|
1839
|
-
}
|
|
1840
|
-
})
|
|
1841
|
-
).catch((err) => {
|
|
1842
|
-
cleanup();
|
|
1843
|
-
reject(err);
|
|
1844
|
-
});
|
|
2324
|
+
},
|
|
2325
|
+
dlqHeaderKeys: _KafkaClient.DLQ_HEADER_KEYS
|
|
1845
2326
|
});
|
|
1846
|
-
this.logger.log(
|
|
1847
|
-
`replayDlq: replayed ${replayed}, skipped ${skipped} from "${dlqTopic}"`
|
|
1848
|
-
);
|
|
1849
|
-
return { replayed, skipped };
|
|
1850
2327
|
}
|
|
1851
2328
|
async resetOffsets(groupId, topic2, position) {
|
|
1852
|
-
|
|
1853
|
-
if (this.runningConsumers.has(gid)) {
|
|
1854
|
-
throw new Error(
|
|
1855
|
-
`resetOffsets: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before resetting offsets.`
|
|
1856
|
-
);
|
|
1857
|
-
}
|
|
1858
|
-
await this.ensureAdminConnected();
|
|
1859
|
-
const partitionOffsets = await this.admin.fetchTopicOffsets(topic2);
|
|
1860
|
-
const partitions = partitionOffsets.map(({ partition, low, high }) => ({
|
|
1861
|
-
partition,
|
|
1862
|
-
offset: position === "earliest" ? low : high
|
|
1863
|
-
}));
|
|
1864
|
-
await this.admin.setOffsets({ groupId: gid, topic: topic2, partitions });
|
|
1865
|
-
this.logger.log(
|
|
1866
|
-
`Offsets reset to ${position} for group "${gid}" on topic "${topic2}"`
|
|
1867
|
-
);
|
|
2329
|
+
return this.adminOps.resetOffsets(groupId, topic2, position);
|
|
1868
2330
|
}
|
|
1869
2331
|
/**
|
|
1870
2332
|
* Seek specific topic-partition pairs to explicit offsets for a stopped consumer group.
|
|
@@ -1872,68 +2334,32 @@ var KafkaClient = class _KafkaClient {
|
|
|
1872
2334
|
* Assignments are grouped by topic and committed via `admin.setOffsets`.
|
|
1873
2335
|
*/
|
|
1874
2336
|
async seekToOffset(groupId, assignments) {
|
|
1875
|
-
|
|
1876
|
-
if (this.runningConsumers.has(gid)) {
|
|
1877
|
-
throw new Error(
|
|
1878
|
-
`seekToOffset: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before seeking offsets.`
|
|
1879
|
-
);
|
|
1880
|
-
}
|
|
1881
|
-
await this.ensureAdminConnected();
|
|
1882
|
-
const byTopic = /* @__PURE__ */ new Map();
|
|
1883
|
-
for (const { topic: topic2, partition, offset } of assignments) {
|
|
1884
|
-
const list = byTopic.get(topic2) ?? [];
|
|
1885
|
-
list.push({ partition, offset });
|
|
1886
|
-
byTopic.set(topic2, list);
|
|
1887
|
-
}
|
|
1888
|
-
for (const [topic2, partitions] of byTopic) {
|
|
1889
|
-
await this.admin.setOffsets({ groupId: gid, topic: topic2, partitions });
|
|
1890
|
-
this.logger.log(
|
|
1891
|
-
`Offsets set for group "${gid}" on "${topic2}": ${JSON.stringify(partitions)}`
|
|
1892
|
-
);
|
|
1893
|
-
}
|
|
2337
|
+
return this.adminOps.seekToOffset(groupId, assignments);
|
|
1894
2338
|
}
|
|
2339
|
+
/**
|
|
2340
|
+
* Seek specific topic-partition pairs to the offset nearest to a given timestamp
|
|
2341
|
+
* (in milliseconds) for a stopped consumer group.
|
|
2342
|
+
* Throws if the group is still running — call `stopConsumer(groupId)` first.
|
|
2343
|
+
* Assignments are grouped by topic and committed via `admin.setOffsets`.
|
|
2344
|
+
* If no offset exists at the requested timestamp (e.g. empty partition or
|
|
2345
|
+
* future timestamp), the partition falls back to `-1` (end of topic — new messages only).
|
|
2346
|
+
*/
|
|
1895
2347
|
async seekToTimestamp(groupId, assignments) {
|
|
1896
|
-
|
|
1897
|
-
if (this.runningConsumers.has(gid)) {
|
|
1898
|
-
throw new Error(
|
|
1899
|
-
`seekToTimestamp: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before seeking offsets.`
|
|
1900
|
-
);
|
|
1901
|
-
}
|
|
1902
|
-
await this.ensureAdminConnected();
|
|
1903
|
-
const byTopic = /* @__PURE__ */ new Map();
|
|
1904
|
-
for (const { topic: topic2, partition, timestamp } of assignments) {
|
|
1905
|
-
const list = byTopic.get(topic2) ?? [];
|
|
1906
|
-
list.push({ partition, timestamp });
|
|
1907
|
-
byTopic.set(topic2, list);
|
|
1908
|
-
}
|
|
1909
|
-
for (const [topic2, parts] of byTopic) {
|
|
1910
|
-
const offsets = await Promise.all(
|
|
1911
|
-
parts.map(async ({ partition, timestamp }) => {
|
|
1912
|
-
const results = await this.admin.fetchTopicOffsetsByTime(
|
|
1913
|
-
topic2,
|
|
1914
|
-
timestamp
|
|
1915
|
-
);
|
|
1916
|
-
const found = results.find(
|
|
1917
|
-
(r) => r.partition === partition
|
|
1918
|
-
);
|
|
1919
|
-
return { partition, offset: found?.offset ?? "-1" };
|
|
1920
|
-
})
|
|
1921
|
-
);
|
|
1922
|
-
await this.admin.setOffsets({ groupId: gid, topic: topic2, partitions: offsets });
|
|
1923
|
-
this.logger.log(
|
|
1924
|
-
`Offsets set by timestamp for group "${gid}" on "${topic2}": ${JSON.stringify(offsets)}`
|
|
1925
|
-
);
|
|
1926
|
-
}
|
|
2348
|
+
return this.adminOps.seekToTimestamp(groupId, assignments);
|
|
1927
2349
|
}
|
|
2350
|
+
/**
|
|
2351
|
+
* Returns the current circuit breaker state for a specific topic partition.
|
|
2352
|
+
* Returns `undefined` when no circuit state exists — either `circuitBreaker` is not
|
|
2353
|
+
* configured for the group, or the circuit has never been tripped.
|
|
2354
|
+
*
|
|
2355
|
+
* @param topic Topic name.
|
|
2356
|
+
* @param partition Partition index.
|
|
2357
|
+
* @param groupId Consumer group. Defaults to the client's default groupId.
|
|
2358
|
+
*
|
|
2359
|
+
* @returns `{ status, failures, windowSize }` snapshot for a given partition or `undefined` if no state exists.
|
|
2360
|
+
*/
|
|
1928
2361
|
getCircuitState(topic2, partition, groupId) {
|
|
1929
|
-
|
|
1930
|
-
const state = this.circuitStates.get(`${gid}:${topic2}:${partition}`);
|
|
1931
|
-
if (!state) return void 0;
|
|
1932
|
-
return {
|
|
1933
|
-
status: state.status,
|
|
1934
|
-
failures: state.window.filter((v) => !v).length,
|
|
1935
|
-
windowSize: state.window.length
|
|
1936
|
-
};
|
|
2362
|
+
return this.circuitBreaker.getState(topic2, partition, groupId ?? this.defaultGroupId);
|
|
1937
2363
|
}
|
|
1938
2364
|
/**
|
|
1939
2365
|
* Query consumer group lag per partition.
|
|
@@ -1947,73 +2373,60 @@ var KafkaClient = class _KafkaClient {
|
|
|
1947
2373
|
* committed offset. Use `checkStatus()` to verify broker connectivity in that case.
|
|
1948
2374
|
*/
|
|
1949
2375
|
async getConsumerLag(groupId) {
|
|
1950
|
-
|
|
1951
|
-
await this.ensureAdminConnected();
|
|
1952
|
-
const committedByTopic = await this.admin.fetchOffsets({ groupId: gid });
|
|
1953
|
-
const brokerOffsetsAll = await Promise.all(
|
|
1954
|
-
committedByTopic.map(({ topic: topic2 }) => this.admin.fetchTopicOffsets(topic2))
|
|
1955
|
-
);
|
|
1956
|
-
const result = [];
|
|
1957
|
-
for (let i = 0; i < committedByTopic.length; i++) {
|
|
1958
|
-
const { topic: topic2, partitions } = committedByTopic[i];
|
|
1959
|
-
const brokerOffsets = brokerOffsetsAll[i];
|
|
1960
|
-
for (const { partition, offset } of partitions) {
|
|
1961
|
-
const broker = brokerOffsets.find((o) => o.partition === partition);
|
|
1962
|
-
if (!broker) continue;
|
|
1963
|
-
const committed = parseInt(offset, 10);
|
|
1964
|
-
const high = parseInt(broker.high, 10);
|
|
1965
|
-
const lag = committed === -1 ? high : Math.max(0, high - committed);
|
|
1966
|
-
result.push({ topic: topic2, partition, lag });
|
|
1967
|
-
}
|
|
1968
|
-
}
|
|
1969
|
-
return result;
|
|
2376
|
+
return this.adminOps.getConsumerLag(groupId);
|
|
1970
2377
|
}
|
|
1971
2378
|
/** Check broker connectivity. Never throws — returns a discriminated union. */
|
|
1972
2379
|
async checkStatus() {
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
error: error instanceof Error ? error.message : String(error)
|
|
1982
|
-
};
|
|
1983
|
-
}
|
|
2380
|
+
return this.adminOps.checkStatus();
|
|
2381
|
+
}
|
|
2382
|
+
/**
|
|
2383
|
+
* List all consumer groups known to the broker.
|
|
2384
|
+
* Useful for monitoring which groups are active and their current state.
|
|
2385
|
+
*/
|
|
2386
|
+
async listConsumerGroups() {
|
|
2387
|
+
return this.adminOps.listConsumerGroups();
|
|
1984
2388
|
}
|
|
2389
|
+
/**
|
|
2390
|
+
* Describe topics — returns partition layout, leader, replicas, and ISR.
|
|
2391
|
+
* @param topics Topic names to describe. Omit to describe all topics.
|
|
2392
|
+
*/
|
|
2393
|
+
async describeTopics(topics) {
|
|
2394
|
+
return this.adminOps.describeTopics(topics);
|
|
2395
|
+
}
|
|
2396
|
+
/**
|
|
2397
|
+
* Delete records from a topic up to (but not including) the given offsets.
|
|
2398
|
+
* All messages with offsets **before** the given offset are deleted.
|
|
2399
|
+
*/
|
|
2400
|
+
async deleteRecords(topic2, partitions) {
|
|
2401
|
+
return this.adminOps.deleteRecords(topic2, partitions);
|
|
2402
|
+
}
|
|
2403
|
+
/** Return the client ID provided during `KafkaClient` construction. */
|
|
1985
2404
|
getClientId() {
|
|
1986
2405
|
return this.clientId;
|
|
1987
2406
|
}
|
|
2407
|
+
/**
|
|
2408
|
+
* Return a snapshot of internal event counters accumulated since client creation
|
|
2409
|
+
* (or since the last `resetMetrics()` call).
|
|
2410
|
+
*
|
|
2411
|
+
* @param topic Topic name to scope the snapshot to. When omitted, counters are
|
|
2412
|
+
* aggregated across all topics. If the topic has no recorded events yet, returns
|
|
2413
|
+
* a zero-valued snapshot.
|
|
2414
|
+
* @returns Read-only `KafkaMetrics` snapshot: `processedCount`, `retryCount`, `dlqCount`, `dedupCount`.
|
|
2415
|
+
*/
|
|
1988
2416
|
getMetrics(topic2) {
|
|
1989
|
-
|
|
1990
|
-
const m = this._topicMetrics.get(topic2);
|
|
1991
|
-
return m ? { ...m } : { processedCount: 0, retryCount: 0, dlqCount: 0, dedupCount: 0 };
|
|
1992
|
-
}
|
|
1993
|
-
const agg = {
|
|
1994
|
-
processedCount: 0,
|
|
1995
|
-
retryCount: 0,
|
|
1996
|
-
dlqCount: 0,
|
|
1997
|
-
dedupCount: 0
|
|
1998
|
-
};
|
|
1999
|
-
for (const m of this._topicMetrics.values()) {
|
|
2000
|
-
agg.processedCount += m.processedCount;
|
|
2001
|
-
agg.retryCount += m.retryCount;
|
|
2002
|
-
agg.dlqCount += m.dlqCount;
|
|
2003
|
-
agg.dedupCount += m.dedupCount;
|
|
2004
|
-
}
|
|
2005
|
-
return agg;
|
|
2417
|
+
return this.metrics.getMetrics(topic2);
|
|
2006
2418
|
}
|
|
2419
|
+
/**
|
|
2420
|
+
* Reset internal event counters to zero.
|
|
2421
|
+
*
|
|
2422
|
+
* @param topic Topic name to reset. When omitted, all topics are reset.
|
|
2423
|
+
*/
|
|
2007
2424
|
resetMetrics(topic2) {
|
|
2008
|
-
|
|
2009
|
-
this._topicMetrics.delete(topic2);
|
|
2010
|
-
return;
|
|
2011
|
-
}
|
|
2012
|
-
this._topicMetrics.clear();
|
|
2425
|
+
this.metrics.resetMetrics(topic2);
|
|
2013
2426
|
}
|
|
2014
2427
|
/** Gracefully disconnect producer, all consumers, and admin. */
|
|
2015
2428
|
async disconnect(drainTimeoutMs = 3e4) {
|
|
2016
|
-
await this.waitForDrain(drainTimeoutMs);
|
|
2429
|
+
await this.inFlight.waitForDrain(drainTimeoutMs);
|
|
2017
2430
|
const tasks = [this.producer.disconnect()];
|
|
2018
2431
|
if (this.txProducer) {
|
|
2019
2432
|
tasks.push(this.txProducer.disconnect());
|
|
@@ -2021,28 +2434,17 @@ var KafkaClient = class _KafkaClient {
|
|
|
2021
2434
|
this.txProducer = void 0;
|
|
2022
2435
|
this.txProducerInitPromise = void 0;
|
|
2023
2436
|
}
|
|
2024
|
-
for (const txId of this.retryTxProducers.keys())
|
|
2025
|
-
|
|
2026
|
-
}
|
|
2027
|
-
for (const p of this.retryTxProducers.values()) {
|
|
2028
|
-
tasks.push(p.disconnect());
|
|
2029
|
-
}
|
|
2437
|
+
for (const txId of this.retryTxProducers.keys()) _activeTransactionalIds.delete(txId);
|
|
2438
|
+
for (const p of this.retryTxProducers.values()) tasks.push(p.disconnect());
|
|
2030
2439
|
this.retryTxProducers.clear();
|
|
2031
|
-
for (const consumer of this.consumers.values())
|
|
2032
|
-
|
|
2033
|
-
}
|
|
2034
|
-
if (this.isAdminConnected) {
|
|
2035
|
-
tasks.push(this.admin.disconnect());
|
|
2036
|
-
this.isAdminConnected = false;
|
|
2037
|
-
}
|
|
2440
|
+
for (const consumer of this.consumers.values()) tasks.push(consumer.disconnect());
|
|
2441
|
+
tasks.push(this.adminOps.disconnect());
|
|
2038
2442
|
await Promise.allSettled(tasks);
|
|
2039
2443
|
this.consumers.clear();
|
|
2040
2444
|
this.runningConsumers.clear();
|
|
2041
2445
|
this.consumerCreationOptions.clear();
|
|
2042
2446
|
this.companionGroupIds.clear();
|
|
2043
|
-
|
|
2044
|
-
this.circuitStates.clear();
|
|
2045
|
-
this.circuitConfigs.clear();
|
|
2447
|
+
this.circuitBreaker.clear();
|
|
2046
2448
|
this.logger.log("All connections closed");
|
|
2047
2449
|
}
|
|
2048
2450
|
// ── Graceful shutdown ────────────────────────────────────────────
|
|
@@ -2061,183 +2463,20 @@ var KafkaClient = class _KafkaClient {
|
|
|
2061
2463
|
*/
|
|
2062
2464
|
enableGracefulShutdown(signals = ["SIGTERM", "SIGINT"], drainTimeoutMs = 3e4) {
|
|
2063
2465
|
const handler = () => {
|
|
2064
|
-
this.logger.log(
|
|
2065
|
-
"Shutdown signal received \u2014 draining in-flight handlers..."
|
|
2066
|
-
);
|
|
2466
|
+
this.logger.log("Shutdown signal received \u2014 draining in-flight handlers...");
|
|
2067
2467
|
this.disconnect(drainTimeoutMs).catch(
|
|
2068
|
-
(err) => this.logger.error(
|
|
2069
|
-
"Error during graceful shutdown:",
|
|
2070
|
-
toError(err).message
|
|
2071
|
-
)
|
|
2468
|
+
(err) => this.logger.error("Error during graceful shutdown:", toError(err).message)
|
|
2072
2469
|
);
|
|
2073
2470
|
};
|
|
2074
|
-
for (const signal of signals)
|
|
2075
|
-
process.once(signal, handler);
|
|
2076
|
-
}
|
|
2077
|
-
}
|
|
2078
|
-
trackInFlight(fn) {
|
|
2079
|
-
this.inFlightTotal++;
|
|
2080
|
-
return fn().finally(() => {
|
|
2081
|
-
this.inFlightTotal--;
|
|
2082
|
-
if (this.inFlightTotal === 0) {
|
|
2083
|
-
this.drainResolvers.splice(0).forEach((r) => r());
|
|
2084
|
-
}
|
|
2085
|
-
});
|
|
2086
|
-
}
|
|
2087
|
-
waitForDrain(timeoutMs) {
|
|
2088
|
-
if (this.inFlightTotal === 0) return Promise.resolve();
|
|
2089
|
-
return new Promise((resolve) => {
|
|
2090
|
-
let handle;
|
|
2091
|
-
const onDrain = () => {
|
|
2092
|
-
clearTimeout(handle);
|
|
2093
|
-
resolve();
|
|
2094
|
-
};
|
|
2095
|
-
this.drainResolvers.push(onDrain);
|
|
2096
|
-
handle = setTimeout(() => {
|
|
2097
|
-
const idx = this.drainResolvers.indexOf(onDrain);
|
|
2098
|
-
if (idx !== -1) this.drainResolvers.splice(idx, 1);
|
|
2099
|
-
this.logger.warn(
|
|
2100
|
-
`Drain timed out after ${timeoutMs}ms \u2014 ${this.inFlightTotal} handler(s) still in flight`
|
|
2101
|
-
);
|
|
2102
|
-
resolve();
|
|
2103
|
-
}, timeoutMs);
|
|
2104
|
-
});
|
|
2471
|
+
for (const signal of signals) process.once(signal, handler);
|
|
2105
2472
|
}
|
|
2106
2473
|
// ── Private helpers ──────────────────────────────────────────────
|
|
2107
|
-
async preparePayload(topicOrDesc, messages) {
|
|
2474
|
+
async preparePayload(topicOrDesc, messages, compression) {
|
|
2108
2475
|
registerSchema(topicOrDesc, this.schemaRegistry, this.logger);
|
|
2109
|
-
const payload = await buildSendPayload(
|
|
2110
|
-
topicOrDesc,
|
|
2111
|
-
messages,
|
|
2112
|
-
this.producerOpsDeps
|
|
2113
|
-
);
|
|
2476
|
+
const payload = await buildSendPayload(topicOrDesc, messages, this._producerOpsDeps, compression);
|
|
2114
2477
|
await this.ensureTopic(payload.topic);
|
|
2115
2478
|
return payload;
|
|
2116
2479
|
}
|
|
2117
|
-
// afterSend is called once per message — symmetric with beforeSend in buildSendPayload.
|
|
2118
|
-
notifyAfterSend(topic2, count) {
|
|
2119
|
-
for (let i = 0; i < count; i++) {
|
|
2120
|
-
for (const inst of this.instrumentation) {
|
|
2121
|
-
inst.afterSend?.(topic2);
|
|
2122
|
-
}
|
|
2123
|
-
}
|
|
2124
|
-
}
|
|
2125
|
-
metricsFor(topic2) {
|
|
2126
|
-
let m = this._topicMetrics.get(topic2);
|
|
2127
|
-
if (!m) {
|
|
2128
|
-
m = { processedCount: 0, retryCount: 0, dlqCount: 0, dedupCount: 0 };
|
|
2129
|
-
this._topicMetrics.set(topic2, m);
|
|
2130
|
-
}
|
|
2131
|
-
return m;
|
|
2132
|
-
}
|
|
2133
|
-
notifyRetry(envelope, attempt, maxRetries) {
|
|
2134
|
-
this.metricsFor(envelope.topic).retryCount++;
|
|
2135
|
-
for (const inst of this.instrumentation) {
|
|
2136
|
-
inst.onRetry?.(envelope, attempt, maxRetries);
|
|
2137
|
-
}
|
|
2138
|
-
}
|
|
2139
|
-
notifyDlq(envelope, reason, gid) {
|
|
2140
|
-
this.metricsFor(envelope.topic).dlqCount++;
|
|
2141
|
-
for (const inst of this.instrumentation) {
|
|
2142
|
-
inst.onDlq?.(envelope, reason);
|
|
2143
|
-
}
|
|
2144
|
-
if (!gid) return;
|
|
2145
|
-
const cfg = this.circuitConfigs.get(gid);
|
|
2146
|
-
if (!cfg) return;
|
|
2147
|
-
const threshold = cfg.threshold ?? 5;
|
|
2148
|
-
const recoveryMs = cfg.recoveryMs ?? 3e4;
|
|
2149
|
-
const stateKey = `${gid}:${envelope.topic}:${envelope.partition}`;
|
|
2150
|
-
let state = this.circuitStates.get(stateKey);
|
|
2151
|
-
if (!state) {
|
|
2152
|
-
state = { status: "closed", window: [], successes: 0 };
|
|
2153
|
-
this.circuitStates.set(stateKey, state);
|
|
2154
|
-
}
|
|
2155
|
-
if (state.status === "open") return;
|
|
2156
|
-
const openCircuit = () => {
|
|
2157
|
-
state.status = "open";
|
|
2158
|
-
state.window = [];
|
|
2159
|
-
state.successes = 0;
|
|
2160
|
-
clearTimeout(state.timer);
|
|
2161
|
-
for (const inst of this.instrumentation)
|
|
2162
|
-
inst.onCircuitOpen?.(envelope.topic, envelope.partition);
|
|
2163
|
-
this.pauseConsumer(gid, [
|
|
2164
|
-
{ topic: envelope.topic, partitions: [envelope.partition] }
|
|
2165
|
-
]);
|
|
2166
|
-
state.timer = setTimeout(() => {
|
|
2167
|
-
state.status = "half-open";
|
|
2168
|
-
state.successes = 0;
|
|
2169
|
-
this.logger.log(
|
|
2170
|
-
`[CircuitBreaker] HALF-OPEN \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
2171
|
-
);
|
|
2172
|
-
for (const inst of this.instrumentation)
|
|
2173
|
-
inst.onCircuitHalfOpen?.(envelope.topic, envelope.partition);
|
|
2174
|
-
this.resumeConsumer(gid, [
|
|
2175
|
-
{ topic: envelope.topic, partitions: [envelope.partition] }
|
|
2176
|
-
]);
|
|
2177
|
-
}, recoveryMs);
|
|
2178
|
-
};
|
|
2179
|
-
if (state.status === "half-open") {
|
|
2180
|
-
clearTimeout(state.timer);
|
|
2181
|
-
this.logger.warn(
|
|
2182
|
-
`[CircuitBreaker] OPEN (half-open failure) \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
2183
|
-
);
|
|
2184
|
-
openCircuit();
|
|
2185
|
-
return;
|
|
2186
|
-
}
|
|
2187
|
-
const windowSize = cfg.windowSize ?? Math.max(threshold * 2, 10);
|
|
2188
|
-
state.window = [...state.window, false];
|
|
2189
|
-
if (state.window.length > windowSize) {
|
|
2190
|
-
state.window = state.window.slice(state.window.length - windowSize);
|
|
2191
|
-
}
|
|
2192
|
-
const failures = state.window.filter((v) => !v).length;
|
|
2193
|
-
if (failures >= threshold) {
|
|
2194
|
-
this.logger.warn(
|
|
2195
|
-
`[CircuitBreaker] OPEN \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition} (${failures}/${state.window.length} failures, threshold=${threshold})`
|
|
2196
|
-
);
|
|
2197
|
-
openCircuit();
|
|
2198
|
-
}
|
|
2199
|
-
}
|
|
2200
|
-
notifyDuplicate(envelope, strategy) {
|
|
2201
|
-
this.metricsFor(envelope.topic).dedupCount++;
|
|
2202
|
-
for (const inst of this.instrumentation) {
|
|
2203
|
-
inst.onDuplicate?.(envelope, strategy);
|
|
2204
|
-
}
|
|
2205
|
-
}
|
|
2206
|
-
notifyMessage(envelope, gid) {
|
|
2207
|
-
this.metricsFor(envelope.topic).processedCount++;
|
|
2208
|
-
for (const inst of this.instrumentation) {
|
|
2209
|
-
inst.onMessage?.(envelope);
|
|
2210
|
-
}
|
|
2211
|
-
if (!gid) return;
|
|
2212
|
-
const cfg = this.circuitConfigs.get(gid);
|
|
2213
|
-
if (!cfg) return;
|
|
2214
|
-
const stateKey = `${gid}:${envelope.topic}:${envelope.partition}`;
|
|
2215
|
-
const state = this.circuitStates.get(stateKey);
|
|
2216
|
-
if (!state) return;
|
|
2217
|
-
const halfOpenSuccesses = cfg.halfOpenSuccesses ?? 1;
|
|
2218
|
-
if (state.status === "half-open") {
|
|
2219
|
-
state.successes++;
|
|
2220
|
-
if (state.successes >= halfOpenSuccesses) {
|
|
2221
|
-
clearTimeout(state.timer);
|
|
2222
|
-
state.timer = void 0;
|
|
2223
|
-
state.status = "closed";
|
|
2224
|
-
state.window = [];
|
|
2225
|
-
state.successes = 0;
|
|
2226
|
-
this.logger.log(
|
|
2227
|
-
`[CircuitBreaker] CLOSED \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
2228
|
-
);
|
|
2229
|
-
for (const inst of this.instrumentation)
|
|
2230
|
-
inst.onCircuitClose?.(envelope.topic, envelope.partition);
|
|
2231
|
-
}
|
|
2232
|
-
} else if (state.status === "closed") {
|
|
2233
|
-
const threshold = cfg.threshold ?? 5;
|
|
2234
|
-
const windowSize = cfg.windowSize ?? Math.max(threshold * 2, 10);
|
|
2235
|
-
state.window = [...state.window, true];
|
|
2236
|
-
if (state.window.length > windowSize) {
|
|
2237
|
-
state.window = state.window.slice(state.window.length - windowSize);
|
|
2238
|
-
}
|
|
2239
|
-
}
|
|
2240
|
-
}
|
|
2241
2480
|
/**
|
|
2242
2481
|
* Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
|
|
2243
2482
|
* The handler itself is not cancelled — the warning is diagnostic only.
|
|
@@ -2248,79 +2487,10 @@ var KafkaClient = class _KafkaClient {
|
|
|
2248
2487
|
if (timer !== void 0) clearTimeout(timer);
|
|
2249
2488
|
});
|
|
2250
2489
|
timer = setTimeout(() => {
|
|
2251
|
-
this.logger.warn(
|
|
2252
|
-
`Handler for topic "${topic2}" has not resolved after ${timeoutMs}ms \u2014 possible stuck handler`
|
|
2253
|
-
);
|
|
2490
|
+
this.logger.warn(`Handler for topic "${topic2}" has not resolved after ${timeoutMs}ms \u2014 possible stuck handler`);
|
|
2254
2491
|
}, timeoutMs);
|
|
2255
2492
|
return promise;
|
|
2256
2493
|
}
|
|
2257
|
-
/**
|
|
2258
|
-
* When `retryTopics: true` and `autoCreateTopics: false`, verify that every
|
|
2259
|
-
* `<topic>.retry.<level>` topic already exists. Throws a clear error at startup
|
|
2260
|
-
* rather than silently discovering missing topics on the first handler failure.
|
|
2261
|
-
*/
|
|
2262
|
-
async validateRetryTopicsExist(topicNames, maxRetries) {
|
|
2263
|
-
await this.ensureAdminConnected();
|
|
2264
|
-
const existing = new Set(await this.admin.listTopics());
|
|
2265
|
-
const missing = [];
|
|
2266
|
-
for (const t of topicNames) {
|
|
2267
|
-
for (let level = 1; level <= maxRetries; level++) {
|
|
2268
|
-
const retryTopic = `${t}.retry.${level}`;
|
|
2269
|
-
if (!existing.has(retryTopic)) missing.push(retryTopic);
|
|
2270
|
-
}
|
|
2271
|
-
}
|
|
2272
|
-
if (missing.length > 0) {
|
|
2273
|
-
throw new Error(
|
|
2274
|
-
`retryTopics: true but the following retry topics do not exist: ${missing.join(", ")}. Create them manually or set autoCreateTopics: true.`
|
|
2275
|
-
);
|
|
2276
|
-
}
|
|
2277
|
-
}
|
|
2278
|
-
/**
|
|
2279
|
-
* When `autoCreateTopics` is disabled, verify that `<topic>.dlq` exists for every
|
|
2280
|
-
* consumed topic. Throws a clear error at startup rather than silently discovering
|
|
2281
|
-
* missing DLQ topics on the first handler failure.
|
|
2282
|
-
*/
|
|
2283
|
-
async validateDlqTopicsExist(topicNames) {
|
|
2284
|
-
await this.ensureAdminConnected();
|
|
2285
|
-
const existing = new Set(await this.admin.listTopics());
|
|
2286
|
-
const missing = topicNames.filter((t) => !existing.has(`${t}.dlq`)).map((t) => `${t}.dlq`);
|
|
2287
|
-
if (missing.length > 0) {
|
|
2288
|
-
throw new Error(
|
|
2289
|
-
`dlq: true but the following DLQ topics do not exist: ${missing.join(", ")}. Create them manually or set autoCreateTopics: true.`
|
|
2290
|
-
);
|
|
2291
|
-
}
|
|
2292
|
-
}
|
|
2293
|
-
/**
|
|
2294
|
-
* When `deduplication.strategy: 'topic'` and `autoCreateTopics: false`, verify
|
|
2295
|
-
* that every `<topic>.duplicates` destination topic already exists. Throws a
|
|
2296
|
-
* clear error at startup rather than silently dropping duplicates on first hit.
|
|
2297
|
-
*/
|
|
2298
|
-
async validateDuplicatesTopicsExist(topicNames, customDestination) {
|
|
2299
|
-
await this.ensureAdminConnected();
|
|
2300
|
-
const existing = new Set(await this.admin.listTopics());
|
|
2301
|
-
const toCheck = customDestination ? [customDestination] : topicNames.map((t) => `${t}.duplicates`);
|
|
2302
|
-
const missing = toCheck.filter((t) => !existing.has(t));
|
|
2303
|
-
if (missing.length > 0) {
|
|
2304
|
-
throw new Error(
|
|
2305
|
-
`deduplication.strategy: 'topic' but the following duplicate-routing topics do not exist: ${missing.join(", ")}. Create them manually or set autoCreateTopics: true.`
|
|
2306
|
-
);
|
|
2307
|
-
}
|
|
2308
|
-
}
|
|
2309
|
-
/**
|
|
2310
|
-
* Connect the admin client if not already connected.
|
|
2311
|
-
* The flag is only set to `true` after a successful connect — if `admin.connect()`
|
|
2312
|
-
* throws the flag remains `false` so the next call will retry the connection.
|
|
2313
|
-
*/
|
|
2314
|
-
async ensureAdminConnected() {
|
|
2315
|
-
if (this.isAdminConnected) return;
|
|
2316
|
-
try {
|
|
2317
|
-
await this.admin.connect();
|
|
2318
|
-
this.isAdminConnected = true;
|
|
2319
|
-
} catch (err) {
|
|
2320
|
-
this.isAdminConnected = false;
|
|
2321
|
-
throw err;
|
|
2322
|
-
}
|
|
2323
|
-
}
|
|
2324
2494
|
/**
|
|
2325
2495
|
* Create and connect a transactional producer for EOS retry routing.
|
|
2326
2496
|
* Each retry level consumer gets its own producer with a unique `transactionalId`
|
|
@@ -2333,25 +2503,25 @@ var KafkaClient = class _KafkaClient {
|
|
|
2333
2503
|
);
|
|
2334
2504
|
}
|
|
2335
2505
|
const p = this.kafka.producer({
|
|
2336
|
-
kafkaJS: {
|
|
2337
|
-
acks: -1,
|
|
2338
|
-
idempotent: true,
|
|
2339
|
-
transactionalId,
|
|
2340
|
-
maxInFlightRequests: 1
|
|
2341
|
-
}
|
|
2506
|
+
kafkaJS: { acks: -1, idempotent: true, transactionalId, maxInFlightRequests: 1 }
|
|
2342
2507
|
});
|
|
2343
2508
|
await p.connect();
|
|
2344
2509
|
_activeTransactionalIds.add(transactionalId);
|
|
2345
2510
|
this.retryTxProducers.set(transactionalId, p);
|
|
2346
2511
|
return p;
|
|
2347
2512
|
}
|
|
2513
|
+
/**
|
|
2514
|
+
* Ensure that a topic exists by creating it if it doesn't already exist.
|
|
2515
|
+
* If `autoCreateTopics` is disabled, returns immediately.
|
|
2516
|
+
* Concurrent calls for the same topic are deduplicated.
|
|
2517
|
+
*/
|
|
2348
2518
|
async ensureTopic(topic2) {
|
|
2349
2519
|
if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
|
|
2350
2520
|
let p = this.ensureTopicPromises.get(topic2);
|
|
2351
2521
|
if (!p) {
|
|
2352
2522
|
p = (async () => {
|
|
2353
|
-
await this.
|
|
2354
|
-
await this.admin.createTopics({
|
|
2523
|
+
await this.adminOps.ensureConnected();
|
|
2524
|
+
await this.adminOps.admin.createTopics({
|
|
2355
2525
|
topics: [{ topic: topic2, numPartitions: this.numPartitions }]
|
|
2356
2526
|
});
|
|
2357
2527
|
this.ensuredTopics.add(topic2);
|
|
@@ -2370,6 +2540,9 @@ var KafkaClient = class _KafkaClient {
|
|
|
2370
2540
|
interceptors = [],
|
|
2371
2541
|
schemas: optionSchemas
|
|
2372
2542
|
} = options;
|
|
2543
|
+
const stringTopics = topics.filter((t) => !(t instanceof RegExp));
|
|
2544
|
+
const regexTopics = topics.filter((t) => t instanceof RegExp);
|
|
2545
|
+
const hasRegex = regexTopics.length > 0;
|
|
2373
2546
|
const gid = optGroupId || this.defaultGroupId;
|
|
2374
2547
|
const existingMode = this.runningConsumers.get(gid);
|
|
2375
2548
|
const oppositeMode = mode === "eachMessage" ? "eachBatch" : "eachMessage";
|
|
@@ -2388,75 +2561,78 @@ var KafkaClient = class _KafkaClient {
|
|
|
2388
2561
|
gid,
|
|
2389
2562
|
fromBeginning,
|
|
2390
2563
|
options.autoCommit ?? true,
|
|
2391
|
-
this.
|
|
2392
|
-
|
|
2393
|
-
const schemaMap = buildSchemaMap(
|
|
2394
|
-
topics,
|
|
2395
|
-
this.schemaRegistry,
|
|
2396
|
-
optionSchemas,
|
|
2397
|
-
this.logger
|
|
2564
|
+
this._consumerOpsDeps,
|
|
2565
|
+
options.partitionAssigner
|
|
2398
2566
|
);
|
|
2399
|
-
const
|
|
2400
|
-
|
|
2401
|
-
|
|
2402
|
-
|
|
2567
|
+
const schemaMap = buildSchemaMap(stringTopics, this.schemaRegistry, optionSchemas, this.logger);
|
|
2568
|
+
const topicNames = stringTopics.map((t) => resolveTopicName(t));
|
|
2569
|
+
const subscribeTopics = [...topicNames, ...regexTopics];
|
|
2570
|
+
for (const t of topicNames) await this.ensureTopic(t);
|
|
2403
2571
|
if (dlq) {
|
|
2404
|
-
for (const t of topicNames) {
|
|
2405
|
-
|
|
2406
|
-
|
|
2407
|
-
if (!this.autoCreateTopicsEnabled) {
|
|
2408
|
-
await this.validateDlqTopicsExist(topicNames);
|
|
2572
|
+
for (const t of topicNames) await this.ensureTopic(`${t}.dlq`);
|
|
2573
|
+
if (!this.autoCreateTopicsEnabled && topicNames.length > 0) {
|
|
2574
|
+
await this.adminOps.validateDlqTopicsExist(topicNames);
|
|
2409
2575
|
}
|
|
2410
2576
|
}
|
|
2411
2577
|
if (options.deduplication?.strategy === "topic") {
|
|
2412
2578
|
const dest = options.deduplication.duplicatesTopic;
|
|
2413
2579
|
if (this.autoCreateTopicsEnabled) {
|
|
2414
|
-
for (const t of topicNames) {
|
|
2415
|
-
|
|
2416
|
-
|
|
2417
|
-
} else {
|
|
2418
|
-
await this.validateDuplicatesTopicsExist(topicNames, dest);
|
|
2580
|
+
for (const t of topicNames) await this.ensureTopic(dest ?? `${t}.duplicates`);
|
|
2581
|
+
} else if (topicNames.length > 0) {
|
|
2582
|
+
await this.adminOps.validateDuplicatesTopicsExist(topicNames, dest);
|
|
2419
2583
|
}
|
|
2420
2584
|
}
|
|
2421
2585
|
await consumer.connect();
|
|
2422
|
-
await subscribeWithRetry(
|
|
2423
|
-
|
|
2424
|
-
|
|
2425
|
-
|
|
2426
|
-
options.subscribeRetry
|
|
2427
|
-
);
|
|
2428
|
-
this.logger.log(
|
|
2429
|
-
`${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${topicNames.join(", ")}`
|
|
2430
|
-
);
|
|
2431
|
-
return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
|
|
2586
|
+
await subscribeWithRetry(consumer, subscribeTopics, this.logger, options.subscribeRetry);
|
|
2587
|
+
const displayTopics = subscribeTopics.map((t) => t instanceof RegExp ? t.toString() : t).join(", ");
|
|
2588
|
+
this.logger.log(`${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${displayTopics}`);
|
|
2589
|
+
return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry, hasRegex };
|
|
2432
2590
|
}
|
|
2433
2591
|
/** Create or retrieve the deduplication context for a consumer group. */
|
|
2434
2592
|
resolveDeduplicationContext(groupId, options) {
|
|
2435
2593
|
if (!options) return void 0;
|
|
2436
|
-
if (!this.dedupStates.has(groupId))
|
|
2437
|
-
this.dedupStates.set(groupId, /* @__PURE__ */ new Map());
|
|
2438
|
-
}
|
|
2594
|
+
if (!this.dedupStates.has(groupId)) this.dedupStates.set(groupId, /* @__PURE__ */ new Map());
|
|
2439
2595
|
return { options, state: this.dedupStates.get(groupId) };
|
|
2440
2596
|
}
|
|
2441
|
-
// ──
|
|
2442
|
-
|
|
2443
|
-
|
|
2444
|
-
|
|
2445
|
-
|
|
2446
|
-
|
|
2447
|
-
|
|
2448
|
-
|
|
2449
|
-
|
|
2597
|
+
// ── Shared consumer setup helpers ────────────────────────────────
|
|
2598
|
+
/** Guard checks shared by startConsumer and startBatchConsumer. */
|
|
2599
|
+
validateTopicConsumerOpts(topics, options) {
|
|
2600
|
+
if (options.retryTopics && !options.retry) {
|
|
2601
|
+
throw new Error(
|
|
2602
|
+
"retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
|
|
2603
|
+
);
|
|
2604
|
+
}
|
|
2605
|
+
if (options.retryTopics && topics.some((t) => t instanceof RegExp)) {
|
|
2606
|
+
throw new Error(
|
|
2607
|
+
"retryTopics is incompatible with regex topic patterns \u2014 retry topics require a fixed topic name to build the retry chain."
|
|
2608
|
+
);
|
|
2609
|
+
}
|
|
2450
2610
|
}
|
|
2451
|
-
|
|
2452
|
-
|
|
2453
|
-
|
|
2454
|
-
|
|
2455
|
-
|
|
2456
|
-
|
|
2457
|
-
|
|
2458
|
-
|
|
2611
|
+
/** Create EOS transactional producer context for atomic main → retry.1 routing. */
|
|
2612
|
+
async makeEosMainContext(gid, consumer, options) {
|
|
2613
|
+
if (!options.retryTopics || !options.retry) return void 0;
|
|
2614
|
+
const txProducer = await this.createRetryTxProducer(`${gid}-main-tx`);
|
|
2615
|
+
return { txProducer, consumer };
|
|
2616
|
+
}
|
|
2617
|
+
/** Start companion retry-level consumers and register them under the main groupId. */
|
|
2618
|
+
async launchRetryChain(gid, topicNames, handleMessage, retry, dlq, interceptors, schemaMap, assignmentTimeoutMs) {
|
|
2619
|
+
if (!this.autoCreateTopicsEnabled) {
|
|
2620
|
+
await this.adminOps.validateRetryTopicsExist(topicNames, retry.maxRetries);
|
|
2621
|
+
}
|
|
2622
|
+
const companions = await startRetryTopicConsumers(
|
|
2623
|
+
topicNames,
|
|
2624
|
+
gid,
|
|
2625
|
+
handleMessage,
|
|
2626
|
+
retry,
|
|
2627
|
+
dlq,
|
|
2628
|
+
interceptors,
|
|
2629
|
+
schemaMap,
|
|
2630
|
+
this._retryTopicDeps,
|
|
2631
|
+
assignmentTimeoutMs
|
|
2632
|
+
);
|
|
2633
|
+
this.companionGroupIds.set(gid, companions);
|
|
2459
2634
|
}
|
|
2635
|
+
// ── Deps object builders ─────────────────────────────────────────
|
|
2460
2636
|
/** Build MessageHandlerDeps with circuit breaker callbacks bound to the given groupId. */
|
|
2461
2637
|
messageDepsFor(gid) {
|
|
2462
2638
|
return {
|
|
@@ -2465,23 +2641,24 @@ var KafkaClient = class _KafkaClient {
|
|
|
2465
2641
|
instrumentation: this.instrumentation,
|
|
2466
2642
|
onMessageLost: this.onMessageLost,
|
|
2467
2643
|
onTtlExpired: this.onTtlExpired,
|
|
2468
|
-
onRetry: this.notifyRetry.bind(this),
|
|
2469
|
-
onDlq: (envelope, reason) => this.notifyDlq(envelope, reason, gid),
|
|
2470
|
-
onDuplicate: this.notifyDuplicate.bind(this),
|
|
2471
|
-
onMessage: (envelope) => this.notifyMessage(envelope, gid)
|
|
2644
|
+
onRetry: this.metrics.notifyRetry.bind(this.metrics),
|
|
2645
|
+
onDlq: (envelope, reason) => this.metrics.notifyDlq(envelope, reason, gid),
|
|
2646
|
+
onDuplicate: this.metrics.notifyDuplicate.bind(this.metrics),
|
|
2647
|
+
onMessage: (envelope) => this.metrics.notifyMessage(envelope, gid)
|
|
2472
2648
|
};
|
|
2473
2649
|
}
|
|
2474
|
-
|
|
2650
|
+
/** Build the deps object passed to retry topic consumers. */
|
|
2651
|
+
buildRetryTopicDeps() {
|
|
2475
2652
|
return {
|
|
2476
2653
|
logger: this.logger,
|
|
2477
2654
|
producer: this.producer,
|
|
2478
2655
|
instrumentation: this.instrumentation,
|
|
2479
2656
|
onMessageLost: this.onMessageLost,
|
|
2480
|
-
onRetry: this.notifyRetry.bind(this),
|
|
2481
|
-
onDlq: this.notifyDlq.bind(this),
|
|
2482
|
-
onMessage: this.notifyMessage.bind(this),
|
|
2657
|
+
onRetry: this.metrics.notifyRetry.bind(this.metrics),
|
|
2658
|
+
onDlq: this.metrics.notifyDlq.bind(this.metrics),
|
|
2659
|
+
onMessage: this.metrics.notifyMessage.bind(this.metrics),
|
|
2483
2660
|
ensureTopic: (t) => this.ensureTopic(t),
|
|
2484
|
-
getOrCreateConsumer: (gid, fb, ac) => getOrCreateConsumer(gid, fb, ac, this.
|
|
2661
|
+
getOrCreateConsumer: (gid, fb, ac) => getOrCreateConsumer(gid, fb, ac, this._consumerOpsDeps),
|
|
2485
2662
|
runningConsumers: this.runningConsumers,
|
|
2486
2663
|
createRetryTxProducer: (txId) => this.createRetryTxProducer(txId)
|
|
2487
2664
|
};
|