@drarzter/kafka-client 0.6.7 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +216 -5
- package/dist/{chunk-ISYOEX4W.mjs → chunk-MJ342P4R.mjs} +585 -49
- package/dist/chunk-MJ342P4R.mjs.map +1 -0
- package/dist/core.d.mts +49 -8
- package/dist/core.d.ts +49 -8
- package/dist/core.js +584 -48
- package/dist/core.js.map +1 -1
- package/dist/core.mjs +1 -1
- package/dist/index.d.mts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +584 -48
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1 -1
- package/dist/otel.d.mts +1 -1
- package/dist/otel.d.ts +1 -1
- package/dist/testing.d.mts +1 -1
- package/dist/testing.d.ts +1 -1
- package/dist/testing.js +16 -0
- package/dist/testing.js.map +1 -1
- package/dist/testing.mjs +16 -0
- package/dist/testing.mjs.map +1 -1
- package/dist/{types-CqjRm-Cd.d.mts → types-DqQ7IXZr.d.mts} +161 -5
- package/dist/{types-CqjRm-Cd.d.ts → types-DqQ7IXZr.d.ts} +161 -5
- package/package.json +1 -1
- package/dist/chunk-ISYOEX4W.mjs.map +0 -1
|
@@ -336,9 +336,16 @@ var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
|
|
|
336
336
|
var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
|
|
337
337
|
function buildRetryTopicPayload(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders) {
|
|
338
338
|
const retryTopic = `${originalTopic}.retry.${attempt}`;
|
|
339
|
-
const STRIP = /* @__PURE__ */ new Set([
|
|
339
|
+
const STRIP = /* @__PURE__ */ new Set([
|
|
340
|
+
RETRY_HEADER_ATTEMPT,
|
|
341
|
+
RETRY_HEADER_AFTER,
|
|
342
|
+
RETRY_HEADER_MAX_RETRIES,
|
|
343
|
+
RETRY_HEADER_ORIGINAL_TOPIC
|
|
344
|
+
]);
|
|
340
345
|
function buildHeaders(hdr) {
|
|
341
|
-
const userHeaders = Object.fromEntries(
|
|
346
|
+
const userHeaders = Object.fromEntries(
|
|
347
|
+
Object.entries(hdr).filter(([k]) => !STRIP.has(k))
|
|
348
|
+
);
|
|
342
349
|
return {
|
|
343
350
|
...userHeaders,
|
|
344
351
|
[RETRY_HEADER_ATTEMPT]: String(attempt),
|
|
@@ -498,6 +505,17 @@ async function executeWithRetry(fn, ctx, deps) {
|
|
|
498
505
|
deps.instrumentation
|
|
499
506
|
);
|
|
500
507
|
if (!error) {
|
|
508
|
+
if (deps.eosCommitOnSuccess) {
|
|
509
|
+
try {
|
|
510
|
+
await deps.eosCommitOnSuccess();
|
|
511
|
+
} catch (commitErr) {
|
|
512
|
+
deps.logger.error(
|
|
513
|
+
`EOS offset commit failed after successful handler \u2014 message will be redelivered:`,
|
|
514
|
+
toError(commitErr).stack
|
|
515
|
+
);
|
|
516
|
+
return;
|
|
517
|
+
}
|
|
518
|
+
}
|
|
501
519
|
for (const env of envelopes) deps.onMessage?.(env);
|
|
502
520
|
return;
|
|
503
521
|
}
|
|
@@ -516,16 +534,28 @@ async function executeWithRetry(fn, ctx, deps) {
|
|
|
516
534
|
if (retryTopics && retry) {
|
|
517
535
|
const cap = Math.min(backoffMs, maxBackoffMs);
|
|
518
536
|
const delay = Math.floor(Math.random() * cap);
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
537
|
+
if (deps.eosRouteToRetry) {
|
|
538
|
+
try {
|
|
539
|
+
await deps.eosRouteToRetry(rawMessages, envelopes, delay);
|
|
540
|
+
deps.onRetry?.(envelopes[0], 1, retry.maxRetries);
|
|
541
|
+
} catch (txErr) {
|
|
542
|
+
deps.logger.error(
|
|
543
|
+
`EOS routing to retry topic failed \u2014 message will be redelivered:`,
|
|
544
|
+
toError(txErr).stack
|
|
545
|
+
);
|
|
546
|
+
}
|
|
547
|
+
} else {
|
|
548
|
+
await sendToRetryTopic(
|
|
549
|
+
topic2,
|
|
550
|
+
rawMessages,
|
|
551
|
+
1,
|
|
552
|
+
retry.maxRetries,
|
|
553
|
+
delay,
|
|
554
|
+
isBatch ? envelopes.map((e) => e.headers) : envelopes[0]?.headers ?? {},
|
|
555
|
+
deps
|
|
556
|
+
);
|
|
557
|
+
deps.onRetry?.(envelopes[0], 1, retry.maxRetries);
|
|
558
|
+
}
|
|
529
559
|
} else if (isLastAttempt) {
|
|
530
560
|
if (dlq) {
|
|
531
561
|
for (let i = 0; i < rawMessages.length; i++) {
|
|
@@ -625,6 +655,43 @@ async function handleEachMessage(payload, opts, deps) {
|
|
|
625
655
|
timeoutMs,
|
|
626
656
|
wrapWithTimeout
|
|
627
657
|
} = opts;
|
|
658
|
+
const eos = opts.eosMainContext;
|
|
659
|
+
const nextOffsetStr = (parseInt(message.offset, 10) + 1).toString();
|
|
660
|
+
const commitOffset = eos ? async () => {
|
|
661
|
+
await eos.consumer.commitOffsets([
|
|
662
|
+
{ topic: topic2, partition, offset: nextOffsetStr }
|
|
663
|
+
]);
|
|
664
|
+
} : void 0;
|
|
665
|
+
const eosRouteToRetry = eos && retry ? async (rawMsgs, envelopes, delay) => {
|
|
666
|
+
const { topic: rtTopic, messages: rtMsgs } = buildRetryTopicPayload(
|
|
667
|
+
topic2,
|
|
668
|
+
rawMsgs,
|
|
669
|
+
1,
|
|
670
|
+
retry.maxRetries,
|
|
671
|
+
delay,
|
|
672
|
+
envelopes[0]?.headers ?? {}
|
|
673
|
+
);
|
|
674
|
+
const tx = await eos.txProducer.transaction();
|
|
675
|
+
try {
|
|
676
|
+
await tx.send({ topic: rtTopic, messages: rtMsgs });
|
|
677
|
+
await tx.sendOffsets({
|
|
678
|
+
consumer: eos.consumer,
|
|
679
|
+
topics: [
|
|
680
|
+
{
|
|
681
|
+
topic: topic2,
|
|
682
|
+
partitions: [{ partition, offset: nextOffsetStr }]
|
|
683
|
+
}
|
|
684
|
+
]
|
|
685
|
+
});
|
|
686
|
+
await tx.commit();
|
|
687
|
+
} catch (txErr) {
|
|
688
|
+
try {
|
|
689
|
+
await tx.abort();
|
|
690
|
+
} catch {
|
|
691
|
+
}
|
|
692
|
+
throw txErr;
|
|
693
|
+
}
|
|
694
|
+
} : void 0;
|
|
628
695
|
const envelope = await parseSingleMessage(
|
|
629
696
|
message,
|
|
630
697
|
topic2,
|
|
@@ -634,7 +701,10 @@ async function handleEachMessage(payload, opts, deps) {
|
|
|
634
701
|
dlq,
|
|
635
702
|
deps
|
|
636
703
|
);
|
|
637
|
-
if (envelope === null)
|
|
704
|
+
if (envelope === null) {
|
|
705
|
+
await commitOffset?.();
|
|
706
|
+
return;
|
|
707
|
+
}
|
|
638
708
|
if (opts.deduplication) {
|
|
639
709
|
const isDuplicate = await applyDeduplication(
|
|
640
710
|
envelope,
|
|
@@ -643,7 +713,35 @@ async function handleEachMessage(payload, opts, deps) {
|
|
|
643
713
|
dlq,
|
|
644
714
|
deps
|
|
645
715
|
);
|
|
646
|
-
if (isDuplicate)
|
|
716
|
+
if (isDuplicate) {
|
|
717
|
+
await commitOffset?.();
|
|
718
|
+
return;
|
|
719
|
+
}
|
|
720
|
+
}
|
|
721
|
+
if (opts.messageTtlMs !== void 0) {
|
|
722
|
+
const ageMs = Date.now() - new Date(envelope.timestamp).getTime();
|
|
723
|
+
if (ageMs > opts.messageTtlMs) {
|
|
724
|
+
deps.logger.warn(
|
|
725
|
+
`[KafkaClient] TTL expired on ${topic2}: age ${ageMs}ms > ${opts.messageTtlMs}ms`
|
|
726
|
+
);
|
|
727
|
+
if (dlq) {
|
|
728
|
+
await sendToDlq(topic2, message.value.toString(), deps, {
|
|
729
|
+
error: new Error(`Message TTL expired: age ${ageMs}ms`),
|
|
730
|
+
attempt: 0,
|
|
731
|
+
originalHeaders: envelope.headers
|
|
732
|
+
});
|
|
733
|
+
deps.onDlq?.(envelope, "ttl-expired");
|
|
734
|
+
} else {
|
|
735
|
+
await deps.onMessageLost?.({
|
|
736
|
+
topic: topic2,
|
|
737
|
+
error: new Error(`TTL expired: ${ageMs}ms`),
|
|
738
|
+
attempt: 0,
|
|
739
|
+
headers: envelope.headers
|
|
740
|
+
});
|
|
741
|
+
}
|
|
742
|
+
await commitOffset?.();
|
|
743
|
+
return;
|
|
744
|
+
}
|
|
647
745
|
}
|
|
648
746
|
await executeWithRetry(
|
|
649
747
|
() => {
|
|
@@ -664,7 +762,7 @@ async function handleEachMessage(payload, opts, deps) {
|
|
|
664
762
|
retry,
|
|
665
763
|
retryTopics
|
|
666
764
|
},
|
|
667
|
-
deps
|
|
765
|
+
{ ...deps, eosRouteToRetry, eosCommitOnSuccess: commitOffset }
|
|
668
766
|
);
|
|
669
767
|
}
|
|
670
768
|
async function handleEachBatch(payload, opts, deps) {
|
|
@@ -679,6 +777,50 @@ async function handleEachBatch(payload, opts, deps) {
|
|
|
679
777
|
timeoutMs,
|
|
680
778
|
wrapWithTimeout
|
|
681
779
|
} = opts;
|
|
780
|
+
const eos = opts.eosMainContext;
|
|
781
|
+
const lastRawOffset = batch.messages.length > 0 ? batch.messages[batch.messages.length - 1].offset : void 0;
|
|
782
|
+
const batchNextOffsetStr = lastRawOffset ? (parseInt(lastRawOffset, 10) + 1).toString() : void 0;
|
|
783
|
+
const commitBatchOffset = eos && batchNextOffsetStr ? async () => {
|
|
784
|
+
await eos.consumer.commitOffsets([
|
|
785
|
+
{
|
|
786
|
+
topic: batch.topic,
|
|
787
|
+
partition: batch.partition,
|
|
788
|
+
offset: batchNextOffsetStr
|
|
789
|
+
}
|
|
790
|
+
]);
|
|
791
|
+
} : void 0;
|
|
792
|
+
const eosRouteToRetry = eos && retry && batchNextOffsetStr ? async (rawMsgs, envelopes2, delay) => {
|
|
793
|
+
const { topic: rtTopic, messages: rtMsgs } = buildRetryTopicPayload(
|
|
794
|
+
batch.topic,
|
|
795
|
+
rawMsgs,
|
|
796
|
+
1,
|
|
797
|
+
retry.maxRetries,
|
|
798
|
+
delay,
|
|
799
|
+
envelopes2.map((e) => e.headers)
|
|
800
|
+
);
|
|
801
|
+
const tx = await eos.txProducer.transaction();
|
|
802
|
+
try {
|
|
803
|
+
await tx.send({ topic: rtTopic, messages: rtMsgs });
|
|
804
|
+
await tx.sendOffsets({
|
|
805
|
+
consumer: eos.consumer,
|
|
806
|
+
topics: [
|
|
807
|
+
{
|
|
808
|
+
topic: batch.topic,
|
|
809
|
+
partitions: [
|
|
810
|
+
{ partition: batch.partition, offset: batchNextOffsetStr }
|
|
811
|
+
]
|
|
812
|
+
}
|
|
813
|
+
]
|
|
814
|
+
});
|
|
815
|
+
await tx.commit();
|
|
816
|
+
} catch (txErr) {
|
|
817
|
+
try {
|
|
818
|
+
await tx.abort();
|
|
819
|
+
} catch {
|
|
820
|
+
}
|
|
821
|
+
throw txErr;
|
|
822
|
+
}
|
|
823
|
+
} : void 0;
|
|
682
824
|
const envelopes = [];
|
|
683
825
|
const rawMessages = [];
|
|
684
826
|
for (const message of batch.messages) {
|
|
@@ -703,10 +845,37 @@ async function handleEachBatch(payload, opts, deps) {
|
|
|
703
845
|
);
|
|
704
846
|
if (isDuplicate) continue;
|
|
705
847
|
}
|
|
848
|
+
if (opts.messageTtlMs !== void 0) {
|
|
849
|
+
const ageMs = Date.now() - new Date(envelope.timestamp).getTime();
|
|
850
|
+
if (ageMs > opts.messageTtlMs) {
|
|
851
|
+
deps.logger.warn(
|
|
852
|
+
`[KafkaClient] TTL expired on ${batch.topic}: age ${ageMs}ms > ${opts.messageTtlMs}ms`
|
|
853
|
+
);
|
|
854
|
+
if (dlq) {
|
|
855
|
+
await sendToDlq(batch.topic, message.value.toString(), deps, {
|
|
856
|
+
error: new Error(`Message TTL expired: age ${ageMs}ms`),
|
|
857
|
+
attempt: 0,
|
|
858
|
+
originalHeaders: envelope.headers
|
|
859
|
+
});
|
|
860
|
+
deps.onDlq?.(envelope, "ttl-expired");
|
|
861
|
+
} else {
|
|
862
|
+
await deps.onMessageLost?.({
|
|
863
|
+
topic: batch.topic,
|
|
864
|
+
error: new Error(`TTL expired: ${ageMs}ms`),
|
|
865
|
+
attempt: 0,
|
|
866
|
+
headers: envelope.headers
|
|
867
|
+
});
|
|
868
|
+
}
|
|
869
|
+
continue;
|
|
870
|
+
}
|
|
871
|
+
}
|
|
706
872
|
envelopes.push(envelope);
|
|
707
873
|
rawMessages.push(message.value.toString());
|
|
708
874
|
}
|
|
709
|
-
if (envelopes.length === 0)
|
|
875
|
+
if (envelopes.length === 0) {
|
|
876
|
+
await commitBatchOffset?.();
|
|
877
|
+
return;
|
|
878
|
+
}
|
|
710
879
|
const meta = {
|
|
711
880
|
partition: batch.partition,
|
|
712
881
|
highWatermark: batch.highWatermark,
|
|
@@ -728,7 +897,7 @@ async function handleEachBatch(payload, opts, deps) {
|
|
|
728
897
|
isBatch: true,
|
|
729
898
|
retryTopics
|
|
730
899
|
},
|
|
731
|
-
deps
|
|
900
|
+
{ ...deps, eosRouteToRetry, eosCommitOnSuccess: commitBatchOffset }
|
|
732
901
|
);
|
|
733
902
|
}
|
|
734
903
|
|
|
@@ -1009,7 +1178,32 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
|
|
|
1009
1178
|
// src/client/kafka.client/index.ts
|
|
1010
1179
|
var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = KafkaJS;
|
|
1011
1180
|
var _activeTransactionalIds = /* @__PURE__ */ new Set();
|
|
1012
|
-
var
|
|
1181
|
+
var AsyncQueue = class {
|
|
1182
|
+
items = [];
|
|
1183
|
+
waiting = [];
|
|
1184
|
+
closed = false;
|
|
1185
|
+
push(item) {
|
|
1186
|
+
if (this.waiting.length > 0) {
|
|
1187
|
+
this.waiting.shift()({ value: item, done: false });
|
|
1188
|
+
} else {
|
|
1189
|
+
this.items.push(item);
|
|
1190
|
+
}
|
|
1191
|
+
}
|
|
1192
|
+
close() {
|
|
1193
|
+
this.closed = true;
|
|
1194
|
+
for (const r of this.waiting.splice(0)) {
|
|
1195
|
+
r({ value: void 0, done: true });
|
|
1196
|
+
}
|
|
1197
|
+
}
|
|
1198
|
+
next() {
|
|
1199
|
+
if (this.items.length > 0)
|
|
1200
|
+
return Promise.resolve({ value: this.items.shift(), done: false });
|
|
1201
|
+
if (this.closed)
|
|
1202
|
+
return Promise.resolve({ value: void 0, done: true });
|
|
1203
|
+
return new Promise((r) => this.waiting.push(r));
|
|
1204
|
+
}
|
|
1205
|
+
};
|
|
1206
|
+
var KafkaClient = class _KafkaClient {
|
|
1013
1207
|
kafka;
|
|
1014
1208
|
producer;
|
|
1015
1209
|
txProducer;
|
|
@@ -1036,17 +1230,16 @@ var KafkaClient = class {
|
|
|
1036
1230
|
onRebalance;
|
|
1037
1231
|
/** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
|
|
1038
1232
|
txId;
|
|
1039
|
-
/**
|
|
1040
|
-
|
|
1041
|
-
processedCount: 0,
|
|
1042
|
-
retryCount: 0,
|
|
1043
|
-
dlqCount: 0,
|
|
1044
|
-
dedupCount: 0
|
|
1045
|
-
};
|
|
1233
|
+
/** Per-topic event counters, lazily created on first event. Aggregated by `getMetrics()`. */
|
|
1234
|
+
_topicMetrics = /* @__PURE__ */ new Map();
|
|
1046
1235
|
/** Monotonically increasing Lamport clock stamped on every outgoing message. */
|
|
1047
1236
|
_lamportClock = 0;
|
|
1048
1237
|
/** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
|
|
1049
1238
|
dedupStates = /* @__PURE__ */ new Map();
|
|
1239
|
+
/** Circuit breaker state per `"${gid}:${topic}:${partition}"` key. */
|
|
1240
|
+
circuitStates = /* @__PURE__ */ new Map();
|
|
1241
|
+
/** Circuit breaker config per groupId, set at startConsumer/startBatchConsumer time. */
|
|
1242
|
+
circuitConfigs = /* @__PURE__ */ new Map();
|
|
1050
1243
|
isAdminConnected = false;
|
|
1051
1244
|
inFlightTotal = 0;
|
|
1052
1245
|
drainResolvers = [];
|
|
@@ -1186,13 +1379,22 @@ var KafkaClient = class {
|
|
|
1186
1379
|
"retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
|
|
1187
1380
|
);
|
|
1188
1381
|
}
|
|
1189
|
-
const
|
|
1190
|
-
const
|
|
1382
|
+
const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
|
|
1383
|
+
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", setupOptions);
|
|
1384
|
+
if (options.circuitBreaker)
|
|
1385
|
+
this.circuitConfigs.set(gid, options.circuitBreaker);
|
|
1386
|
+
const deps = this.messageDepsFor(gid);
|
|
1191
1387
|
const timeoutMs = options.handlerTimeoutMs;
|
|
1192
1388
|
const deduplication = this.resolveDeduplicationContext(
|
|
1193
1389
|
gid,
|
|
1194
1390
|
options.deduplication
|
|
1195
1391
|
);
|
|
1392
|
+
let eosMainContext;
|
|
1393
|
+
if (options.retryTopics && retry) {
|
|
1394
|
+
const mainTxId = `${gid}-main-tx`;
|
|
1395
|
+
const txProducer = await this.createRetryTxProducer(mainTxId);
|
|
1396
|
+
eosMainContext = { txProducer, consumer };
|
|
1397
|
+
}
|
|
1196
1398
|
await consumer.run({
|
|
1197
1399
|
eachMessage: (payload) => this.trackInFlight(
|
|
1198
1400
|
() => handleEachMessage(
|
|
@@ -1206,7 +1408,9 @@ var KafkaClient = class {
|
|
|
1206
1408
|
retryTopics: options.retryTopics,
|
|
1207
1409
|
timeoutMs,
|
|
1208
1410
|
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
|
|
1209
|
-
deduplication
|
|
1411
|
+
deduplication,
|
|
1412
|
+
messageTtlMs: options.messageTtlMs,
|
|
1413
|
+
eosMainContext
|
|
1210
1414
|
},
|
|
1211
1415
|
deps
|
|
1212
1416
|
)
|
|
@@ -1238,18 +1442,28 @@ var KafkaClient = class {
|
|
|
1238
1442
|
"retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
|
|
1239
1443
|
);
|
|
1240
1444
|
}
|
|
1241
|
-
if (options.
|
|
1445
|
+
if (options.retryTopics) {
|
|
1446
|
+
} else if (options.autoCommit !== false) {
|
|
1242
1447
|
this.logger.debug?.(
|
|
1243
1448
|
`startBatchConsumer: autoCommit is enabled (default true). If your handler calls resolveOffset() or commitOffsetsIfNecessary(), set autoCommit: false to avoid offset conflicts.`
|
|
1244
1449
|
);
|
|
1245
1450
|
}
|
|
1246
|
-
const
|
|
1247
|
-
const
|
|
1451
|
+
const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
|
|
1452
|
+
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", setupOptions);
|
|
1453
|
+
if (options.circuitBreaker)
|
|
1454
|
+
this.circuitConfigs.set(gid, options.circuitBreaker);
|
|
1455
|
+
const deps = this.messageDepsFor(gid);
|
|
1248
1456
|
const timeoutMs = options.handlerTimeoutMs;
|
|
1249
1457
|
const deduplication = this.resolveDeduplicationContext(
|
|
1250
1458
|
gid,
|
|
1251
1459
|
options.deduplication
|
|
1252
1460
|
);
|
|
1461
|
+
let eosMainContext;
|
|
1462
|
+
if (options.retryTopics && retry) {
|
|
1463
|
+
const mainTxId = `${gid}-main-tx`;
|
|
1464
|
+
const txProducer = await this.createRetryTxProducer(mainTxId);
|
|
1465
|
+
eosMainContext = { txProducer, consumer };
|
|
1466
|
+
}
|
|
1253
1467
|
await consumer.run({
|
|
1254
1468
|
eachBatch: (payload) => this.trackInFlight(
|
|
1255
1469
|
() => handleEachBatch(
|
|
@@ -1263,7 +1477,9 @@ var KafkaClient = class {
|
|
|
1263
1477
|
retryTopics: options.retryTopics,
|
|
1264
1478
|
timeoutMs,
|
|
1265
1479
|
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
|
|
1266
|
-
deduplication
|
|
1480
|
+
deduplication,
|
|
1481
|
+
messageTtlMs: options.messageTtlMs,
|
|
1482
|
+
eosMainContext
|
|
1267
1483
|
},
|
|
1268
1484
|
deps
|
|
1269
1485
|
)
|
|
@@ -1299,6 +1515,37 @@ var KafkaClient = class {
|
|
|
1299
1515
|
}
|
|
1300
1516
|
return { groupId: gid, stop: () => this.stopConsumer(gid) };
|
|
1301
1517
|
}
|
|
1518
|
+
/**
|
|
1519
|
+
* Consume messages from a topic as an AsyncIterableIterator.
|
|
1520
|
+
* Use with `for await` — breaking out of the loop automatically stops the consumer.
|
|
1521
|
+
*
|
|
1522
|
+
* @example
|
|
1523
|
+
* for await (const envelope of kafka.consume('my.topic')) {
|
|
1524
|
+
* console.log(envelope.data);
|
|
1525
|
+
* }
|
|
1526
|
+
*/
|
|
1527
|
+
consume(topic2, options) {
|
|
1528
|
+
const queue = new AsyncQueue();
|
|
1529
|
+
const handlePromise = this.startConsumer(
|
|
1530
|
+
[topic2],
|
|
1531
|
+
async (envelope) => {
|
|
1532
|
+
queue.push(envelope);
|
|
1533
|
+
},
|
|
1534
|
+
options
|
|
1535
|
+
);
|
|
1536
|
+
return {
|
|
1537
|
+
[Symbol.asyncIterator]() {
|
|
1538
|
+
return this;
|
|
1539
|
+
},
|
|
1540
|
+
next: () => queue.next(),
|
|
1541
|
+
return: async () => {
|
|
1542
|
+
queue.close();
|
|
1543
|
+
const handle = await handlePromise;
|
|
1544
|
+
await handle.stop();
|
|
1545
|
+
return { value: void 0, done: true };
|
|
1546
|
+
}
|
|
1547
|
+
};
|
|
1548
|
+
}
|
|
1302
1549
|
// ── Consumer lifecycle ───────────────────────────────────────────
|
|
1303
1550
|
async stopConsumer(groupId) {
|
|
1304
1551
|
if (groupId !== void 0) {
|
|
@@ -1319,7 +1566,26 @@ var KafkaClient = class {
|
|
|
1319
1566
|
this.runningConsumers.delete(groupId);
|
|
1320
1567
|
this.consumerCreationOptions.delete(groupId);
|
|
1321
1568
|
this.dedupStates.delete(groupId);
|
|
1569
|
+
for (const key of [...this.circuitStates.keys()]) {
|
|
1570
|
+
if (key.startsWith(`${groupId}:`)) {
|
|
1571
|
+
clearTimeout(this.circuitStates.get(key).timer);
|
|
1572
|
+
this.circuitStates.delete(key);
|
|
1573
|
+
}
|
|
1574
|
+
}
|
|
1575
|
+
this.circuitConfigs.delete(groupId);
|
|
1322
1576
|
this.logger.log(`Consumer disconnected: group "${groupId}"`);
|
|
1577
|
+
const mainTxId = `${groupId}-main-tx`;
|
|
1578
|
+
const mainTxProducer = this.retryTxProducers.get(mainTxId);
|
|
1579
|
+
if (mainTxProducer) {
|
|
1580
|
+
await mainTxProducer.disconnect().catch(
|
|
1581
|
+
(e) => this.logger.warn(
|
|
1582
|
+
`Error disconnecting main tx producer "${mainTxId}":`,
|
|
1583
|
+
toError(e).message
|
|
1584
|
+
)
|
|
1585
|
+
);
|
|
1586
|
+
_activeTransactionalIds.delete(mainTxId);
|
|
1587
|
+
this.retryTxProducers.delete(mainTxId);
|
|
1588
|
+
}
|
|
1323
1589
|
const companions = this.companionGroupIds.get(groupId) ?? [];
|
|
1324
1590
|
for (const cGroupId of companions) {
|
|
1325
1591
|
const cConsumer = this.consumers.get(cGroupId);
|
|
@@ -1367,9 +1633,175 @@ var KafkaClient = class {
|
|
|
1367
1633
|
this.companionGroupIds.clear();
|
|
1368
1634
|
this.retryTxProducers.clear();
|
|
1369
1635
|
this.dedupStates.clear();
|
|
1636
|
+
for (const state of this.circuitStates.values())
|
|
1637
|
+
clearTimeout(state.timer);
|
|
1638
|
+
this.circuitStates.clear();
|
|
1639
|
+
this.circuitConfigs.clear();
|
|
1370
1640
|
this.logger.log("All consumers disconnected");
|
|
1371
1641
|
}
|
|
1372
1642
|
}
|
|
1643
|
+
pauseConsumer(groupId, assignments) {
|
|
1644
|
+
const gid = groupId ?? this.defaultGroupId;
|
|
1645
|
+
const consumer = this.consumers.get(gid);
|
|
1646
|
+
if (!consumer) {
|
|
1647
|
+
this.logger.warn(`pauseConsumer: no active consumer for group "${gid}"`);
|
|
1648
|
+
return;
|
|
1649
|
+
}
|
|
1650
|
+
consumer.pause(
|
|
1651
|
+
assignments.flatMap(
|
|
1652
|
+
({ topic: topic2, partitions }) => partitions.map((p) => ({ topic: topic2, partitions: [p] }))
|
|
1653
|
+
)
|
|
1654
|
+
);
|
|
1655
|
+
}
|
|
1656
|
+
resumeConsumer(groupId, assignments) {
|
|
1657
|
+
const gid = groupId ?? this.defaultGroupId;
|
|
1658
|
+
const consumer = this.consumers.get(gid);
|
|
1659
|
+
if (!consumer) {
|
|
1660
|
+
this.logger.warn(`resumeConsumer: no active consumer for group "${gid}"`);
|
|
1661
|
+
return;
|
|
1662
|
+
}
|
|
1663
|
+
consumer.resume(
|
|
1664
|
+
assignments.flatMap(
|
|
1665
|
+
({ topic: topic2, partitions }) => partitions.map((p) => ({ topic: topic2, partitions: [p] }))
|
|
1666
|
+
)
|
|
1667
|
+
);
|
|
1668
|
+
}
|
|
1669
|
+
/** DLQ header keys added by `sendToDlq` — stripped before re-publishing. */
|
|
1670
|
+
static DLQ_HEADER_KEYS = /* @__PURE__ */ new Set([
|
|
1671
|
+
"x-dlq-original-topic",
|
|
1672
|
+
"x-dlq-failed-at",
|
|
1673
|
+
"x-dlq-error-message",
|
|
1674
|
+
"x-dlq-error-stack",
|
|
1675
|
+
"x-dlq-attempt-count"
|
|
1676
|
+
]);
|
|
1677
|
+
async replayDlq(topic2, options = {}) {
|
|
1678
|
+
const dlqTopic = `${topic2}.dlq`;
|
|
1679
|
+
await this.ensureAdminConnected();
|
|
1680
|
+
const partitionOffsets = await this.admin.fetchTopicOffsets(dlqTopic);
|
|
1681
|
+
const activePartitions = partitionOffsets.filter(
|
|
1682
|
+
(p) => parseInt(p.high, 10) > 0
|
|
1683
|
+
);
|
|
1684
|
+
if (activePartitions.length === 0) {
|
|
1685
|
+
this.logger.log(`replayDlq: "${dlqTopic}" is empty \u2014 nothing to replay`);
|
|
1686
|
+
return { replayed: 0, skipped: 0 };
|
|
1687
|
+
}
|
|
1688
|
+
const highWatermarks = new Map(
|
|
1689
|
+
activePartitions.map(({ partition, high }) => [
|
|
1690
|
+
partition,
|
|
1691
|
+
parseInt(high, 10)
|
|
1692
|
+
])
|
|
1693
|
+
);
|
|
1694
|
+
const processedOffsets = /* @__PURE__ */ new Map();
|
|
1695
|
+
let replayed = 0;
|
|
1696
|
+
let skipped = 0;
|
|
1697
|
+
const tempGroupId = `${dlqTopic}-replay-${Date.now()}`;
|
|
1698
|
+
await new Promise((resolve, reject) => {
|
|
1699
|
+
const consumer = getOrCreateConsumer(
|
|
1700
|
+
tempGroupId,
|
|
1701
|
+
true,
|
|
1702
|
+
true,
|
|
1703
|
+
this.consumerOpsDeps
|
|
1704
|
+
);
|
|
1705
|
+
const cleanup = () => {
|
|
1706
|
+
consumer.disconnect().catch(() => {
|
|
1707
|
+
}).finally(() => {
|
|
1708
|
+
this.consumers.delete(tempGroupId);
|
|
1709
|
+
this.runningConsumers.delete(tempGroupId);
|
|
1710
|
+
this.consumerCreationOptions.delete(tempGroupId);
|
|
1711
|
+
});
|
|
1712
|
+
};
|
|
1713
|
+
consumer.connect().then(() => subscribeWithRetry(consumer, [dlqTopic], this.logger)).then(
|
|
1714
|
+
() => consumer.run({
|
|
1715
|
+
eachMessage: async ({ partition, message }) => {
|
|
1716
|
+
if (!message.value) return;
|
|
1717
|
+
const offset = parseInt(message.offset, 10);
|
|
1718
|
+
processedOffsets.set(partition, offset);
|
|
1719
|
+
const headers = decodeHeaders(message.headers);
|
|
1720
|
+
const targetTopic = options.targetTopic ?? headers["x-dlq-original-topic"];
|
|
1721
|
+
const originalHeaders = Object.fromEntries(
|
|
1722
|
+
Object.entries(headers).filter(
|
|
1723
|
+
([k]) => !_KafkaClient.DLQ_HEADER_KEYS.has(k)
|
|
1724
|
+
)
|
|
1725
|
+
);
|
|
1726
|
+
const value = message.value.toString();
|
|
1727
|
+
const shouldProcess = !options.filter || options.filter(headers, value);
|
|
1728
|
+
if (!targetTopic || !shouldProcess) {
|
|
1729
|
+
skipped++;
|
|
1730
|
+
} else if (options.dryRun) {
|
|
1731
|
+
this.logger.log(
|
|
1732
|
+
`[DLQ replay dry-run] Would replay to "${targetTopic}"`
|
|
1733
|
+
);
|
|
1734
|
+
replayed++;
|
|
1735
|
+
} else {
|
|
1736
|
+
await this.producer.send({
|
|
1737
|
+
topic: targetTopic,
|
|
1738
|
+
messages: [{ value, headers: originalHeaders }]
|
|
1739
|
+
});
|
|
1740
|
+
replayed++;
|
|
1741
|
+
}
|
|
1742
|
+
const allDone = Array.from(highWatermarks.entries()).every(
|
|
1743
|
+
([p, hwm]) => (processedOffsets.get(p) ?? -1) >= hwm - 1
|
|
1744
|
+
);
|
|
1745
|
+
if (allDone) {
|
|
1746
|
+
cleanup();
|
|
1747
|
+
resolve();
|
|
1748
|
+
}
|
|
1749
|
+
}
|
|
1750
|
+
})
|
|
1751
|
+
).catch((err) => {
|
|
1752
|
+
cleanup();
|
|
1753
|
+
reject(err);
|
|
1754
|
+
});
|
|
1755
|
+
});
|
|
1756
|
+
this.logger.log(
|
|
1757
|
+
`replayDlq: replayed ${replayed}, skipped ${skipped} from "${dlqTopic}"`
|
|
1758
|
+
);
|
|
1759
|
+
return { replayed, skipped };
|
|
1760
|
+
}
|
|
1761
|
+
async resetOffsets(groupId, topic2, position) {
|
|
1762
|
+
const gid = groupId ?? this.defaultGroupId;
|
|
1763
|
+
if (this.runningConsumers.has(gid)) {
|
|
1764
|
+
throw new Error(
|
|
1765
|
+
`resetOffsets: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before resetting offsets.`
|
|
1766
|
+
);
|
|
1767
|
+
}
|
|
1768
|
+
await this.ensureAdminConnected();
|
|
1769
|
+
const partitionOffsets = await this.admin.fetchTopicOffsets(topic2);
|
|
1770
|
+
const partitions = partitionOffsets.map(({ partition, low, high }) => ({
|
|
1771
|
+
partition,
|
|
1772
|
+
offset: position === "earliest" ? low : high
|
|
1773
|
+
}));
|
|
1774
|
+
await this.admin.setOffsets({ groupId: gid, topic: topic2, partitions });
|
|
1775
|
+
this.logger.log(
|
|
1776
|
+
`Offsets reset to ${position} for group "${gid}" on topic "${topic2}"`
|
|
1777
|
+
);
|
|
1778
|
+
}
|
|
1779
|
+
/**
|
|
1780
|
+
* Seek specific topic-partition pairs to explicit offsets for a stopped consumer group.
|
|
1781
|
+
* Throws if the group is still running — call `stopConsumer(groupId)` first.
|
|
1782
|
+
* Assignments are grouped by topic and committed via `admin.setOffsets`.
|
|
1783
|
+
*/
|
|
1784
|
+
async seekToOffset(groupId, assignments) {
|
|
1785
|
+
const gid = groupId ?? this.defaultGroupId;
|
|
1786
|
+
if (this.runningConsumers.has(gid)) {
|
|
1787
|
+
throw new Error(
|
|
1788
|
+
`seekToOffset: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before seeking offsets.`
|
|
1789
|
+
);
|
|
1790
|
+
}
|
|
1791
|
+
await this.ensureAdminConnected();
|
|
1792
|
+
const byTopic = /* @__PURE__ */ new Map();
|
|
1793
|
+
for (const { topic: topic2, partition, offset } of assignments) {
|
|
1794
|
+
const list = byTopic.get(topic2) ?? [];
|
|
1795
|
+
list.push({ partition, offset });
|
|
1796
|
+
byTopic.set(topic2, list);
|
|
1797
|
+
}
|
|
1798
|
+
for (const [topic2, partitions] of byTopic) {
|
|
1799
|
+
await this.admin.setOffsets({ groupId: gid, topic: topic2, partitions });
|
|
1800
|
+
this.logger.log(
|
|
1801
|
+
`Offsets set for group "${gid}" on "${topic2}": ${JSON.stringify(partitions)}`
|
|
1802
|
+
);
|
|
1803
|
+
}
|
|
1804
|
+
}
|
|
1373
1805
|
/**
|
|
1374
1806
|
* Query consumer group lag per partition.
|
|
1375
1807
|
* Lag = broker high-watermark − last committed offset.
|
|
@@ -1420,14 +1852,31 @@ var KafkaClient = class {
|
|
|
1420
1852
|
getClientId() {
|
|
1421
1853
|
return this.clientId;
|
|
1422
1854
|
}
|
|
1423
|
-
getMetrics() {
|
|
1424
|
-
|
|
1855
|
+
getMetrics(topic2) {
|
|
1856
|
+
if (topic2 !== void 0) {
|
|
1857
|
+
const m = this._topicMetrics.get(topic2);
|
|
1858
|
+
return m ? { ...m } : { processedCount: 0, retryCount: 0, dlqCount: 0, dedupCount: 0 };
|
|
1859
|
+
}
|
|
1860
|
+
const agg = {
|
|
1861
|
+
processedCount: 0,
|
|
1862
|
+
retryCount: 0,
|
|
1863
|
+
dlqCount: 0,
|
|
1864
|
+
dedupCount: 0
|
|
1865
|
+
};
|
|
1866
|
+
for (const m of this._topicMetrics.values()) {
|
|
1867
|
+
agg.processedCount += m.processedCount;
|
|
1868
|
+
agg.retryCount += m.retryCount;
|
|
1869
|
+
agg.dlqCount += m.dlqCount;
|
|
1870
|
+
agg.dedupCount += m.dedupCount;
|
|
1871
|
+
}
|
|
1872
|
+
return agg;
|
|
1425
1873
|
}
|
|
1426
|
-
resetMetrics() {
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1874
|
+
resetMetrics(topic2) {
|
|
1875
|
+
if (topic2 !== void 0) {
|
|
1876
|
+
this._topicMetrics.delete(topic2);
|
|
1877
|
+
return;
|
|
1878
|
+
}
|
|
1879
|
+
this._topicMetrics.clear();
|
|
1431
1880
|
}
|
|
1432
1881
|
/** Gracefully disconnect producer, all consumers, and admin. */
|
|
1433
1882
|
async disconnect(drainTimeoutMs = 3e4) {
|
|
@@ -1458,6 +1907,9 @@ var KafkaClient = class {
|
|
|
1458
1907
|
this.runningConsumers.clear();
|
|
1459
1908
|
this.consumerCreationOptions.clear();
|
|
1460
1909
|
this.companionGroupIds.clear();
|
|
1910
|
+
for (const state of this.circuitStates.values()) clearTimeout(state.timer);
|
|
1911
|
+
this.circuitStates.clear();
|
|
1912
|
+
this.circuitConfigs.clear();
|
|
1461
1913
|
this.logger.log("All connections closed");
|
|
1462
1914
|
}
|
|
1463
1915
|
// ── Graceful shutdown ────────────────────────────────────────────
|
|
@@ -1537,29 +1989,112 @@ var KafkaClient = class {
|
|
|
1537
1989
|
}
|
|
1538
1990
|
}
|
|
1539
1991
|
}
|
|
1992
|
+
metricsFor(topic2) {
|
|
1993
|
+
let m = this._topicMetrics.get(topic2);
|
|
1994
|
+
if (!m) {
|
|
1995
|
+
m = { processedCount: 0, retryCount: 0, dlqCount: 0, dedupCount: 0 };
|
|
1996
|
+
this._topicMetrics.set(topic2, m);
|
|
1997
|
+
}
|
|
1998
|
+
return m;
|
|
1999
|
+
}
|
|
1540
2000
|
notifyRetry(envelope, attempt, maxRetries) {
|
|
1541
|
-
this.
|
|
2001
|
+
this.metricsFor(envelope.topic).retryCount++;
|
|
1542
2002
|
for (const inst of this.instrumentation) {
|
|
1543
2003
|
inst.onRetry?.(envelope, attempt, maxRetries);
|
|
1544
2004
|
}
|
|
1545
2005
|
}
|
|
1546
|
-
notifyDlq(envelope, reason) {
|
|
1547
|
-
this.
|
|
2006
|
+
notifyDlq(envelope, reason, gid) {
|
|
2007
|
+
this.metricsFor(envelope.topic).dlqCount++;
|
|
1548
2008
|
for (const inst of this.instrumentation) {
|
|
1549
2009
|
inst.onDlq?.(envelope, reason);
|
|
1550
2010
|
}
|
|
2011
|
+
if (!gid) return;
|
|
2012
|
+
const cfg = this.circuitConfigs.get(gid);
|
|
2013
|
+
if (!cfg) return;
|
|
2014
|
+
const threshold = cfg.threshold ?? 5;
|
|
2015
|
+
const recoveryMs = cfg.recoveryMs ?? 3e4;
|
|
2016
|
+
const stateKey = `${gid}:${envelope.topic}:${envelope.partition}`;
|
|
2017
|
+
let state = this.circuitStates.get(stateKey);
|
|
2018
|
+
if (!state) {
|
|
2019
|
+
state = { status: "closed", window: [], successes: 0 };
|
|
2020
|
+
this.circuitStates.set(stateKey, state);
|
|
2021
|
+
}
|
|
2022
|
+
if (state.status === "open") return;
|
|
2023
|
+
const openCircuit = () => {
|
|
2024
|
+
state.status = "open";
|
|
2025
|
+
this.pauseConsumer(gid, [
|
|
2026
|
+
{ topic: envelope.topic, partitions: [envelope.partition] }
|
|
2027
|
+
]);
|
|
2028
|
+
state.timer = setTimeout(() => {
|
|
2029
|
+
state.status = "half-open";
|
|
2030
|
+
state.successes = 0;
|
|
2031
|
+
this.logger.log(
|
|
2032
|
+
`[CircuitBreaker] HALF-OPEN \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
2033
|
+
);
|
|
2034
|
+
this.resumeConsumer(gid, [
|
|
2035
|
+
{ topic: envelope.topic, partitions: [envelope.partition] }
|
|
2036
|
+
]);
|
|
2037
|
+
}, recoveryMs);
|
|
2038
|
+
};
|
|
2039
|
+
if (state.status === "half-open") {
|
|
2040
|
+
clearTimeout(state.timer);
|
|
2041
|
+
this.logger.warn(
|
|
2042
|
+
`[CircuitBreaker] OPEN (half-open failure) \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
2043
|
+
);
|
|
2044
|
+
openCircuit();
|
|
2045
|
+
return;
|
|
2046
|
+
}
|
|
2047
|
+
const windowSize = cfg.windowSize ?? Math.max(threshold * 2, 10);
|
|
2048
|
+
state.window = [...state.window, false];
|
|
2049
|
+
if (state.window.length > windowSize) {
|
|
2050
|
+
state.window = state.window.slice(state.window.length - windowSize);
|
|
2051
|
+
}
|
|
2052
|
+
const failures = state.window.filter((v) => !v).length;
|
|
2053
|
+
if (failures >= threshold) {
|
|
2054
|
+
this.logger.warn(
|
|
2055
|
+
`[CircuitBreaker] OPEN \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition} (${failures}/${state.window.length} failures, threshold=${threshold})`
|
|
2056
|
+
);
|
|
2057
|
+
openCircuit();
|
|
2058
|
+
}
|
|
1551
2059
|
}
|
|
1552
2060
|
notifyDuplicate(envelope, strategy) {
|
|
1553
|
-
this.
|
|
2061
|
+
this.metricsFor(envelope.topic).dedupCount++;
|
|
1554
2062
|
for (const inst of this.instrumentation) {
|
|
1555
2063
|
inst.onDuplicate?.(envelope, strategy);
|
|
1556
2064
|
}
|
|
1557
2065
|
}
|
|
1558
|
-
notifyMessage(envelope) {
|
|
1559
|
-
this.
|
|
2066
|
+
notifyMessage(envelope, gid) {
|
|
2067
|
+
this.metricsFor(envelope.topic).processedCount++;
|
|
1560
2068
|
for (const inst of this.instrumentation) {
|
|
1561
2069
|
inst.onMessage?.(envelope);
|
|
1562
2070
|
}
|
|
2071
|
+
if (!gid) return;
|
|
2072
|
+
const cfg = this.circuitConfigs.get(gid);
|
|
2073
|
+
if (!cfg) return;
|
|
2074
|
+
const stateKey = `${gid}:${envelope.topic}:${envelope.partition}`;
|
|
2075
|
+
const state = this.circuitStates.get(stateKey);
|
|
2076
|
+
if (!state) return;
|
|
2077
|
+
const halfOpenSuccesses = cfg.halfOpenSuccesses ?? 1;
|
|
2078
|
+
if (state.status === "half-open") {
|
|
2079
|
+
state.successes++;
|
|
2080
|
+
if (state.successes >= halfOpenSuccesses) {
|
|
2081
|
+
clearTimeout(state.timer);
|
|
2082
|
+
state.timer = void 0;
|
|
2083
|
+
state.status = "closed";
|
|
2084
|
+
state.window = [];
|
|
2085
|
+
state.successes = 0;
|
|
2086
|
+
this.logger.log(
|
|
2087
|
+
`[CircuitBreaker] CLOSED \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
2088
|
+
);
|
|
2089
|
+
}
|
|
2090
|
+
} else if (state.status === "closed") {
|
|
2091
|
+
const threshold = cfg.threshold ?? 5;
|
|
2092
|
+
const windowSize = cfg.windowSize ?? Math.max(threshold * 2, 10);
|
|
2093
|
+
state.window = [...state.window, true];
|
|
2094
|
+
if (state.window.length > windowSize) {
|
|
2095
|
+
state.window = state.window.slice(state.window.length - windowSize);
|
|
2096
|
+
}
|
|
2097
|
+
}
|
|
1563
2098
|
}
|
|
1564
2099
|
/**
|
|
1565
2100
|
* Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
|
|
@@ -1780,16 +2315,17 @@ var KafkaClient = class {
|
|
|
1780
2315
|
logger: this.logger
|
|
1781
2316
|
};
|
|
1782
2317
|
}
|
|
1783
|
-
|
|
2318
|
+
/** Build MessageHandlerDeps with circuit breaker callbacks bound to the given groupId. */
|
|
2319
|
+
messageDepsFor(gid) {
|
|
1784
2320
|
return {
|
|
1785
2321
|
logger: this.logger,
|
|
1786
2322
|
producer: this.producer,
|
|
1787
2323
|
instrumentation: this.instrumentation,
|
|
1788
2324
|
onMessageLost: this.onMessageLost,
|
|
1789
2325
|
onRetry: this.notifyRetry.bind(this),
|
|
1790
|
-
onDlq: this.notifyDlq
|
|
2326
|
+
onDlq: (envelope, reason) => this.notifyDlq(envelope, reason, gid),
|
|
1791
2327
|
onDuplicate: this.notifyDuplicate.bind(this),
|
|
1792
|
-
onMessage: this.notifyMessage
|
|
2328
|
+
onMessage: (envelope) => this.notifyMessage(envelope, gid)
|
|
1793
2329
|
};
|
|
1794
2330
|
}
|
|
1795
2331
|
get retryTopicDeps() {
|
|
@@ -1843,4 +2379,4 @@ export {
|
|
|
1843
2379
|
KafkaClient,
|
|
1844
2380
|
topic
|
|
1845
2381
|
};
|
|
1846
|
-
//# sourceMappingURL=chunk-
|
|
2382
|
+
//# sourceMappingURL=chunk-MJ342P4R.mjs.map
|