@drarzter/kafka-client 0.6.3 → 0.6.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +84 -2
- package/dist/{chunk-RGRKN4E5.mjs → chunk-KCUKXR6B.mjs} +244 -39
- package/dist/chunk-KCUKXR6B.mjs.map +1 -0
- package/dist/core.d.mts +37 -3
- package/dist/core.d.ts +37 -3
- package/dist/core.js +244 -38
- package/dist/core.js.map +1 -1
- package/dist/core.mjs +3 -1
- package/dist/index.d.mts +2 -3
- package/dist/index.d.ts +2 -3
- package/dist/index.js +246 -57
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +5 -20
- package/dist/index.mjs.map +1 -1
- package/dist/otel.d.mts +1 -1
- package/dist/otel.d.ts +1 -1
- package/dist/testing.d.mts +1 -1
- package/dist/testing.d.ts +1 -1
- package/dist/{types-zFbQH_Cy.d.mts → types-CTwLrJVU.d.mts} +44 -1
- package/dist/{types-zFbQH_Cy.d.ts → types-CTwLrJVU.d.ts} +44 -1
- package/package.json +1 -1
- package/dist/chunk-RGRKN4E5.mjs.map +0 -1
package/dist/core.js
CHANGED
|
@@ -22,6 +22,7 @@ var core_exports = {};
|
|
|
22
22
|
__export(core_exports, {
|
|
23
23
|
HEADER_CORRELATION_ID: () => HEADER_CORRELATION_ID,
|
|
24
24
|
HEADER_EVENT_ID: () => HEADER_EVENT_ID,
|
|
25
|
+
HEADER_LAMPORT_CLOCK: () => HEADER_LAMPORT_CLOCK,
|
|
25
26
|
HEADER_SCHEMA_VERSION: () => HEADER_SCHEMA_VERSION,
|
|
26
27
|
HEADER_TIMESTAMP: () => HEADER_TIMESTAMP,
|
|
27
28
|
HEADER_TRACEPARENT: () => HEADER_TRACEPARENT,
|
|
@@ -49,6 +50,7 @@ var HEADER_CORRELATION_ID = "x-correlation-id";
|
|
|
49
50
|
var HEADER_TIMESTAMP = "x-timestamp";
|
|
50
51
|
var HEADER_SCHEMA_VERSION = "x-schema-version";
|
|
51
52
|
var HEADER_TRACEPARENT = "traceparent";
|
|
53
|
+
var HEADER_LAMPORT_CLOCK = "x-lamport-clock";
|
|
52
54
|
var envelopeStorage = new import_node_async_hooks.AsyncLocalStorage();
|
|
53
55
|
function getEnvelopeContext() {
|
|
54
56
|
return envelopeStorage.getStore();
|
|
@@ -189,6 +191,9 @@ async function buildSendPayload(topicOrDesc, messages, deps) {
|
|
|
189
191
|
eventId: m.eventId,
|
|
190
192
|
headers: m.headers
|
|
191
193
|
});
|
|
194
|
+
if (deps.nextLamportClock) {
|
|
195
|
+
envelopeHeaders[HEADER_LAMPORT_CLOCK] = String(deps.nextLamportClock());
|
|
196
|
+
}
|
|
192
197
|
for (const inst of deps.instrumentation) {
|
|
193
198
|
inst.beforeSend?.(topic2, envelopeHeaders);
|
|
194
199
|
}
|
|
@@ -326,6 +331,9 @@ async function validateWithSchema(message, raw, topic2, schemaMap, interceptors,
|
|
|
326
331
|
-1,
|
|
327
332
|
""
|
|
328
333
|
);
|
|
334
|
+
for (const inst of deps.instrumentation ?? []) {
|
|
335
|
+
inst.onConsumeError?.(errorEnvelope, validationError);
|
|
336
|
+
}
|
|
329
337
|
for (const interceptor of interceptors) {
|
|
330
338
|
await interceptor.onError?.(errorEnvelope, validationError);
|
|
331
339
|
}
|
|
@@ -420,6 +428,29 @@ async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries,
|
|
|
420
428
|
});
|
|
421
429
|
}
|
|
422
430
|
}
|
|
431
|
+
function buildDuplicateTopicPayload(sourceTopic, rawMessage, destinationTopic, meta) {
|
|
432
|
+
const headers = {
|
|
433
|
+
...meta?.originalHeaders ?? {},
|
|
434
|
+
"x-duplicate-original-topic": sourceTopic,
|
|
435
|
+
"x-duplicate-detected-at": (/* @__PURE__ */ new Date()).toISOString(),
|
|
436
|
+
"x-duplicate-reason": "lamport-clock-duplicate",
|
|
437
|
+
"x-duplicate-incoming-clock": String(meta?.incomingClock ?? 0),
|
|
438
|
+
"x-duplicate-last-processed-clock": String(meta?.lastProcessedClock ?? 0)
|
|
439
|
+
};
|
|
440
|
+
return { topic: destinationTopic, messages: [{ value: rawMessage, headers }] };
|
|
441
|
+
}
|
|
442
|
+
async function sendToDuplicatesTopic(sourceTopic, rawMessage, destinationTopic, deps, meta) {
|
|
443
|
+
const payload = buildDuplicateTopicPayload(sourceTopic, rawMessage, destinationTopic, meta);
|
|
444
|
+
try {
|
|
445
|
+
await deps.producer.send(payload);
|
|
446
|
+
deps.logger.warn(`Duplicate message forwarded to ${destinationTopic}`);
|
|
447
|
+
} catch (error) {
|
|
448
|
+
deps.logger.error(
|
|
449
|
+
`Failed to forward duplicate to ${destinationTopic}:`,
|
|
450
|
+
toError(error).stack
|
|
451
|
+
);
|
|
452
|
+
}
|
|
453
|
+
}
|
|
423
454
|
async function broadcastToInterceptors(envelopes, interceptors, cb) {
|
|
424
455
|
for (const env of envelopes) {
|
|
425
456
|
for (const interceptor of interceptors) {
|
|
@@ -528,13 +559,12 @@ async function executeWithRetry(fn, ctx, deps) {
|
|
|
528
559
|
);
|
|
529
560
|
} else if (isLastAttempt) {
|
|
530
561
|
if (dlq) {
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
await sendToDlq(topic2, raw, deps, dlqMeta);
|
|
562
|
+
for (let i = 0; i < rawMessages.length; i++) {
|
|
563
|
+
await sendToDlq(topic2, rawMessages[i], deps, {
|
|
564
|
+
error,
|
|
565
|
+
attempt,
|
|
566
|
+
originalHeaders: envelopes[i]?.headers
|
|
567
|
+
});
|
|
538
568
|
}
|
|
539
569
|
} else {
|
|
540
570
|
await deps.onMessageLost?.({
|
|
@@ -546,12 +576,50 @@ async function executeWithRetry(fn, ctx, deps) {
|
|
|
546
576
|
}
|
|
547
577
|
} else {
|
|
548
578
|
const cap = Math.min(backoffMs * 2 ** (attempt - 1), maxBackoffMs);
|
|
549
|
-
await sleep(Math.random() * cap);
|
|
579
|
+
await sleep(Math.floor(Math.random() * cap));
|
|
550
580
|
}
|
|
551
581
|
}
|
|
552
582
|
}
|
|
553
583
|
|
|
554
584
|
// src/client/kafka.client/message-handler.ts
|
|
585
|
+
async function applyDeduplication(envelope, raw, dedup, dlq, deps) {
|
|
586
|
+
const clockRaw = envelope.headers[HEADER_LAMPORT_CLOCK];
|
|
587
|
+
if (clockRaw === void 0) return false;
|
|
588
|
+
const incomingClock = Number(clockRaw);
|
|
589
|
+
if (Number.isNaN(incomingClock)) return false;
|
|
590
|
+
const stateKey = `${envelope.topic}:${envelope.partition}`;
|
|
591
|
+
const lastProcessedClock = dedup.state.get(stateKey) ?? -1;
|
|
592
|
+
if (incomingClock <= lastProcessedClock) {
|
|
593
|
+
const meta = {
|
|
594
|
+
incomingClock,
|
|
595
|
+
lastProcessedClock,
|
|
596
|
+
originalHeaders: envelope.headers
|
|
597
|
+
};
|
|
598
|
+
const strategy = dedup.options.strategy ?? "drop";
|
|
599
|
+
deps.logger.warn(
|
|
600
|
+
`Duplicate message on ${envelope.topic}[${envelope.partition}]: clock=${incomingClock} <= last=${lastProcessedClock} \u2014 strategy=${strategy}`
|
|
601
|
+
);
|
|
602
|
+
if (strategy === "dlq" && dlq) {
|
|
603
|
+
const augmentedHeaders = {
|
|
604
|
+
...envelope.headers,
|
|
605
|
+
"x-dlq-reason": "lamport-clock-duplicate",
|
|
606
|
+
"x-dlq-duplicate-incoming-clock": String(incomingClock),
|
|
607
|
+
"x-dlq-duplicate-last-processed-clock": String(lastProcessedClock)
|
|
608
|
+
};
|
|
609
|
+
await sendToDlq(envelope.topic, raw, deps, {
|
|
610
|
+
error: new Error("Lamport Clock duplicate detected"),
|
|
611
|
+
attempt: 0,
|
|
612
|
+
originalHeaders: augmentedHeaders
|
|
613
|
+
});
|
|
614
|
+
} else if (strategy === "topic") {
|
|
615
|
+
const destination = dedup.options.duplicatesTopic ?? `${envelope.topic}.duplicates`;
|
|
616
|
+
await sendToDuplicatesTopic(envelope.topic, raw, destination, deps, meta);
|
|
617
|
+
}
|
|
618
|
+
return true;
|
|
619
|
+
}
|
|
620
|
+
dedup.state.set(stateKey, incomingClock);
|
|
621
|
+
return false;
|
|
622
|
+
}
|
|
555
623
|
async function parseSingleMessage(message, topic2, partition, schemaMap, interceptors, dlq, deps) {
|
|
556
624
|
if (!message.value) {
|
|
557
625
|
deps.logger.warn(`Received empty message from topic ${topic2}`);
|
|
@@ -595,6 +663,16 @@ async function handleEachMessage(payload, opts, deps) {
|
|
|
595
663
|
deps
|
|
596
664
|
);
|
|
597
665
|
if (envelope === null) return;
|
|
666
|
+
if (opts.deduplication) {
|
|
667
|
+
const isDuplicate = await applyDeduplication(
|
|
668
|
+
envelope,
|
|
669
|
+
message.value.toString(),
|
|
670
|
+
opts.deduplication,
|
|
671
|
+
dlq,
|
|
672
|
+
deps
|
|
673
|
+
);
|
|
674
|
+
if (isDuplicate) return;
|
|
675
|
+
}
|
|
598
676
|
await executeWithRetry(
|
|
599
677
|
() => {
|
|
600
678
|
const fn = () => runWithEnvelopeContext(
|
|
@@ -642,6 +720,17 @@ async function handleEachBatch(payload, opts, deps) {
|
|
|
642
720
|
deps
|
|
643
721
|
);
|
|
644
722
|
if (envelope === null) continue;
|
|
723
|
+
if (opts.deduplication) {
|
|
724
|
+
const raw = message.value.toString();
|
|
725
|
+
const isDuplicate = await applyDeduplication(
|
|
726
|
+
envelope,
|
|
727
|
+
raw,
|
|
728
|
+
opts.deduplication,
|
|
729
|
+
dlq,
|
|
730
|
+
deps
|
|
731
|
+
);
|
|
732
|
+
if (isDuplicate) continue;
|
|
733
|
+
}
|
|
645
734
|
envelopes.push(envelope);
|
|
646
735
|
rawMessages.push(message.value.toString());
|
|
647
736
|
}
|
|
@@ -920,7 +1009,9 @@ var KafkaClient = class {
|
|
|
920
1009
|
kafka;
|
|
921
1010
|
producer;
|
|
922
1011
|
txProducer;
|
|
923
|
-
|
|
1012
|
+
txProducerInitPromise;
|
|
1013
|
+
/** Maps transactionalId → Producer for each active retry level consumer. */
|
|
1014
|
+
retryTxProducers = /* @__PURE__ */ new Map();
|
|
924
1015
|
consumers = /* @__PURE__ */ new Map();
|
|
925
1016
|
admin;
|
|
926
1017
|
logger;
|
|
@@ -928,6 +1019,8 @@ var KafkaClient = class {
|
|
|
928
1019
|
strictSchemasEnabled;
|
|
929
1020
|
numPartitions;
|
|
930
1021
|
ensuredTopics = /* @__PURE__ */ new Set();
|
|
1022
|
+
/** Pending topic-creation promises keyed by topic name. Prevents duplicate createTopics calls. */
|
|
1023
|
+
ensureTopicPromises = /* @__PURE__ */ new Map();
|
|
931
1024
|
defaultGroupId;
|
|
932
1025
|
schemaRegistry = /* @__PURE__ */ new Map();
|
|
933
1026
|
runningConsumers = /* @__PURE__ */ new Map();
|
|
@@ -937,6 +1030,10 @@ var KafkaClient = class {
|
|
|
937
1030
|
instrumentation;
|
|
938
1031
|
onMessageLost;
|
|
939
1032
|
onRebalance;
|
|
1033
|
+
/** Monotonically increasing Lamport clock stamped on every outgoing message. */
|
|
1034
|
+
_lamportClock = 0;
|
|
1035
|
+
/** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
|
|
1036
|
+
dedupStates = /* @__PURE__ */ new Map();
|
|
940
1037
|
isAdminConnected = false;
|
|
941
1038
|
inFlightTotal = 0;
|
|
942
1039
|
drainResolvers = [];
|
|
@@ -991,18 +1088,25 @@ var KafkaClient = class {
|
|
|
991
1088
|
}
|
|
992
1089
|
/** Execute multiple sends atomically. Commits on success, aborts on error. */
|
|
993
1090
|
async transaction(fn) {
|
|
994
|
-
if (!this.
|
|
995
|
-
const
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1091
|
+
if (!this.txProducerInitPromise) {
|
|
1092
|
+
const initPromise = (async () => {
|
|
1093
|
+
const p = this.kafka.producer({
|
|
1094
|
+
kafkaJS: {
|
|
1095
|
+
acks: -1,
|
|
1096
|
+
idempotent: true,
|
|
1097
|
+
transactionalId: `${this.clientId}-tx`,
|
|
1098
|
+
maxInFlightRequests: 1
|
|
1099
|
+
}
|
|
1100
|
+
});
|
|
1101
|
+
await p.connect();
|
|
1102
|
+
return p;
|
|
1103
|
+
})();
|
|
1104
|
+
this.txProducerInitPromise = initPromise.catch((err) => {
|
|
1105
|
+
this.txProducerInitPromise = void 0;
|
|
1106
|
+
throw err;
|
|
1002
1107
|
});
|
|
1003
|
-
await p.connect();
|
|
1004
|
-
this.txProducer = p;
|
|
1005
1108
|
}
|
|
1109
|
+
this.txProducer = await this.txProducerInitPromise;
|
|
1006
1110
|
const tx = await this.txProducer.transaction();
|
|
1007
1111
|
try {
|
|
1008
1112
|
const ctx = {
|
|
@@ -1041,11 +1145,17 @@ var KafkaClient = class {
|
|
|
1041
1145
|
}
|
|
1042
1146
|
}
|
|
1043
1147
|
// ── Producer lifecycle ───────────────────────────────────────────
|
|
1044
|
-
/**
|
|
1148
|
+
/**
|
|
1149
|
+
* Connect the idempotent producer. Called automatically by `KafkaModule.register()`.
|
|
1150
|
+
* @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
|
|
1151
|
+
*/
|
|
1045
1152
|
async connectProducer() {
|
|
1046
1153
|
await this.producer.connect();
|
|
1047
1154
|
this.logger.log("Producer connected");
|
|
1048
1155
|
}
|
|
1156
|
+
/**
|
|
1157
|
+
* @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
|
|
1158
|
+
*/
|
|
1049
1159
|
async disconnectProducer() {
|
|
1050
1160
|
await this.producer.disconnect();
|
|
1051
1161
|
this.logger.log("Producer disconnected");
|
|
@@ -1059,6 +1169,7 @@ var KafkaClient = class {
|
|
|
1059
1169
|
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
|
|
1060
1170
|
const deps = this.messageDeps;
|
|
1061
1171
|
const timeoutMs = options.handlerTimeoutMs;
|
|
1172
|
+
const deduplication = this.resolveDeduplicationContext(gid, options.deduplication);
|
|
1062
1173
|
await consumer.run({
|
|
1063
1174
|
eachMessage: (payload) => this.trackInFlight(
|
|
1064
1175
|
() => handleEachMessage(
|
|
@@ -1071,7 +1182,8 @@ var KafkaClient = class {
|
|
|
1071
1182
|
retry,
|
|
1072
1183
|
retryTopics: options.retryTopics,
|
|
1073
1184
|
timeoutMs,
|
|
1074
|
-
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this)
|
|
1185
|
+
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
|
|
1186
|
+
deduplication
|
|
1075
1187
|
},
|
|
1076
1188
|
deps
|
|
1077
1189
|
)
|
|
@@ -1111,6 +1223,7 @@ var KafkaClient = class {
|
|
|
1111
1223
|
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
|
|
1112
1224
|
const deps = this.messageDeps;
|
|
1113
1225
|
const timeoutMs = options.handlerTimeoutMs;
|
|
1226
|
+
const deduplication = this.resolveDeduplicationContext(gid, options.deduplication);
|
|
1114
1227
|
await consumer.run({
|
|
1115
1228
|
eachBatch: (payload) => this.trackInFlight(
|
|
1116
1229
|
() => handleEachBatch(
|
|
@@ -1123,7 +1236,8 @@ var KafkaClient = class {
|
|
|
1123
1236
|
retry,
|
|
1124
1237
|
retryTopics: options.retryTopics,
|
|
1125
1238
|
timeoutMs,
|
|
1126
|
-
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this)
|
|
1239
|
+
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
|
|
1240
|
+
deduplication
|
|
1127
1241
|
},
|
|
1128
1242
|
deps
|
|
1129
1243
|
)
|
|
@@ -1169,35 +1283,63 @@ var KafkaClient = class {
|
|
|
1169
1283
|
);
|
|
1170
1284
|
return;
|
|
1171
1285
|
}
|
|
1172
|
-
await consumer.disconnect().catch(
|
|
1173
|
-
|
|
1286
|
+
await consumer.disconnect().catch(
|
|
1287
|
+
(e) => this.logger.warn(
|
|
1288
|
+
`Error disconnecting consumer "${groupId}":`,
|
|
1289
|
+
toError(e).message
|
|
1290
|
+
)
|
|
1291
|
+
);
|
|
1174
1292
|
this.consumers.delete(groupId);
|
|
1175
1293
|
this.runningConsumers.delete(groupId);
|
|
1176
1294
|
this.consumerCreationOptions.delete(groupId);
|
|
1295
|
+
this.dedupStates.delete(groupId);
|
|
1177
1296
|
this.logger.log(`Consumer disconnected: group "${groupId}"`);
|
|
1178
1297
|
const companions = this.companionGroupIds.get(groupId) ?? [];
|
|
1179
1298
|
for (const cGroupId of companions) {
|
|
1180
1299
|
const cConsumer = this.consumers.get(cGroupId);
|
|
1181
1300
|
if (cConsumer) {
|
|
1182
|
-
await cConsumer.disconnect().catch(
|
|
1183
|
-
|
|
1301
|
+
await cConsumer.disconnect().catch(
|
|
1302
|
+
(e) => this.logger.warn(
|
|
1303
|
+
`Error disconnecting retry consumer "${cGroupId}":`,
|
|
1304
|
+
toError(e).message
|
|
1305
|
+
)
|
|
1306
|
+
);
|
|
1184
1307
|
this.consumers.delete(cGroupId);
|
|
1185
1308
|
this.runningConsumers.delete(cGroupId);
|
|
1186
1309
|
this.consumerCreationOptions.delete(cGroupId);
|
|
1187
1310
|
this.logger.log(`Retry consumer disconnected: group "${cGroupId}"`);
|
|
1188
1311
|
}
|
|
1312
|
+
const txId = `${cGroupId}-tx`;
|
|
1313
|
+
const txProducer = this.retryTxProducers.get(txId);
|
|
1314
|
+
if (txProducer) {
|
|
1315
|
+
await txProducer.disconnect().catch(
|
|
1316
|
+
(e) => this.logger.warn(
|
|
1317
|
+
`Error disconnecting retry tx producer "${txId}":`,
|
|
1318
|
+
toError(e).message
|
|
1319
|
+
)
|
|
1320
|
+
);
|
|
1321
|
+
this.retryTxProducers.delete(txId);
|
|
1322
|
+
}
|
|
1189
1323
|
}
|
|
1190
1324
|
this.companionGroupIds.delete(groupId);
|
|
1191
1325
|
} else {
|
|
1192
|
-
const tasks =
|
|
1193
|
-
(
|
|
1194
|
-
|
|
1195
|
-
|
|
1326
|
+
const tasks = [
|
|
1327
|
+
...Array.from(this.consumers.values()).map(
|
|
1328
|
+
(c) => c.disconnect().catch(() => {
|
|
1329
|
+
})
|
|
1330
|
+
),
|
|
1331
|
+
...Array.from(this.retryTxProducers.values()).map(
|
|
1332
|
+
(p) => p.disconnect().catch(() => {
|
|
1333
|
+
})
|
|
1334
|
+
)
|
|
1335
|
+
];
|
|
1196
1336
|
await Promise.allSettled(tasks);
|
|
1197
1337
|
this.consumers.clear();
|
|
1198
1338
|
this.runningConsumers.clear();
|
|
1199
1339
|
this.consumerCreationOptions.clear();
|
|
1200
1340
|
this.companionGroupIds.clear();
|
|
1341
|
+
this.retryTxProducers.clear();
|
|
1342
|
+
this.dedupStates.clear();
|
|
1201
1343
|
this.logger.log("All consumers disconnected");
|
|
1202
1344
|
}
|
|
1203
1345
|
}
|
|
@@ -1205,6 +1347,12 @@ var KafkaClient = class {
|
|
|
1205
1347
|
* Query consumer group lag per partition.
|
|
1206
1348
|
* Lag = broker high-watermark − last committed offset.
|
|
1207
1349
|
* A committed offset of -1 (nothing committed yet) counts as full lag.
|
|
1350
|
+
*
|
|
1351
|
+
* Returns an empty array when the consumer group has never committed any
|
|
1352
|
+
* offsets (freshly created group, `autoCommit: false` with no manual commits,
|
|
1353
|
+
* or group not yet assigned). This is a Kafka protocol limitation:
|
|
1354
|
+
* `fetchOffsets` only returns data for topic-partitions that have at least one
|
|
1355
|
+
* committed offset. Use `checkStatus()` to verify broker connectivity in that case.
|
|
1208
1356
|
*/
|
|
1209
1357
|
async getConsumerLag(groupId) {
|
|
1210
1358
|
const gid = groupId ?? this.defaultGroupId;
|
|
@@ -1252,8 +1400,9 @@ var KafkaClient = class {
|
|
|
1252
1400
|
if (this.txProducer) {
|
|
1253
1401
|
tasks.push(this.txProducer.disconnect());
|
|
1254
1402
|
this.txProducer = void 0;
|
|
1403
|
+
this.txProducerInitPromise = void 0;
|
|
1255
1404
|
}
|
|
1256
|
-
for (const p of this.retryTxProducers) {
|
|
1405
|
+
for (const p of this.retryTxProducers.values()) {
|
|
1257
1406
|
tasks.push(p.disconnect());
|
|
1258
1407
|
}
|
|
1259
1408
|
this.retryTxProducers.clear();
|
|
@@ -1272,6 +1421,14 @@ var KafkaClient = class {
|
|
|
1272
1421
|
this.logger.log("All connections closed");
|
|
1273
1422
|
}
|
|
1274
1423
|
// ── Graceful shutdown ────────────────────────────────────────────
|
|
1424
|
+
/**
|
|
1425
|
+
* NestJS lifecycle hook — called automatically when the host module is torn down.
|
|
1426
|
+
* Drains in-flight handlers and disconnects all producers, consumers, and admin.
|
|
1427
|
+
* `KafkaModule` relies on this method; no separate destroy provider is needed.
|
|
1428
|
+
*/
|
|
1429
|
+
async onModuleDestroy() {
|
|
1430
|
+
await this.disconnect();
|
|
1431
|
+
}
|
|
1275
1432
|
/**
|
|
1276
1433
|
* Register SIGTERM / SIGINT handlers that drain in-flight messages before
|
|
1277
1434
|
* disconnecting. Call this once after constructing the client in non-NestJS apps.
|
|
@@ -1392,6 +1549,22 @@ var KafkaClient = class {
|
|
|
1392
1549
|
);
|
|
1393
1550
|
}
|
|
1394
1551
|
}
|
|
1552
|
+
/**
|
|
1553
|
+
* When `deduplication.strategy: 'topic'` and `autoCreateTopics: false`, verify
|
|
1554
|
+
* that every `<topic>.duplicates` destination topic already exists. Throws a
|
|
1555
|
+
* clear error at startup rather than silently dropping duplicates on first hit.
|
|
1556
|
+
*/
|
|
1557
|
+
async validateDuplicatesTopicsExist(topicNames, customDestination) {
|
|
1558
|
+
await this.ensureAdminConnected();
|
|
1559
|
+
const existing = new Set(await this.admin.listTopics());
|
|
1560
|
+
const toCheck = customDestination ? [customDestination] : topicNames.map((t) => `${t}.duplicates`);
|
|
1561
|
+
const missing = toCheck.filter((t) => !existing.has(t));
|
|
1562
|
+
if (missing.length > 0) {
|
|
1563
|
+
throw new Error(
|
|
1564
|
+
`deduplication.strategy: 'topic' but the following duplicate-routing topics do not exist: ${missing.join(", ")}. Create them manually or set autoCreateTopics: true.`
|
|
1565
|
+
);
|
|
1566
|
+
}
|
|
1567
|
+
}
|
|
1395
1568
|
/**
|
|
1396
1569
|
* Connect the admin client if not already connected.
|
|
1397
1570
|
* The flag is only set to `true` after a successful connect — if `admin.connect()`
|
|
@@ -1422,16 +1595,23 @@ var KafkaClient = class {
|
|
|
1422
1595
|
}
|
|
1423
1596
|
});
|
|
1424
1597
|
await p.connect();
|
|
1425
|
-
this.retryTxProducers.
|
|
1598
|
+
this.retryTxProducers.set(transactionalId, p);
|
|
1426
1599
|
return p;
|
|
1427
1600
|
}
|
|
1428
1601
|
async ensureTopic(topic2) {
|
|
1429
1602
|
if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1603
|
+
let p = this.ensureTopicPromises.get(topic2);
|
|
1604
|
+
if (!p) {
|
|
1605
|
+
p = (async () => {
|
|
1606
|
+
await this.ensureAdminConnected();
|
|
1607
|
+
await this.admin.createTopics({
|
|
1608
|
+
topics: [{ topic: topic2, numPartitions: this.numPartitions }]
|
|
1609
|
+
});
|
|
1610
|
+
this.ensuredTopics.add(topic2);
|
|
1611
|
+
})().finally(() => this.ensureTopicPromises.delete(topic2));
|
|
1612
|
+
this.ensureTopicPromises.set(topic2, p);
|
|
1613
|
+
}
|
|
1614
|
+
await p;
|
|
1435
1615
|
}
|
|
1436
1616
|
/** Shared consumer setup: groupId check, schema map, connect, subscribe. */
|
|
1437
1617
|
async setupConsumer(topics, mode, options) {
|
|
@@ -1451,6 +1631,12 @@ var KafkaClient = class {
|
|
|
1451
1631
|
`Cannot use ${mode} on consumer group "${gid}" \u2014 it is already running with ${oppositeMode}. Use a different groupId for this consumer.`
|
|
1452
1632
|
);
|
|
1453
1633
|
}
|
|
1634
|
+
if (existingMode === mode) {
|
|
1635
|
+
const callerName = mode === "eachMessage" ? "startConsumer" : "startBatchConsumer";
|
|
1636
|
+
throw new Error(
|
|
1637
|
+
`${callerName}("${gid}") called twice \u2014 this group is already consuming. Call stopConsumer("${gid}") first or pass a different groupId.`
|
|
1638
|
+
);
|
|
1639
|
+
}
|
|
1454
1640
|
const consumer = getOrCreateConsumer(
|
|
1455
1641
|
gid,
|
|
1456
1642
|
fromBeginning,
|
|
@@ -1475,6 +1661,16 @@ var KafkaClient = class {
|
|
|
1475
1661
|
await this.validateDlqTopicsExist(topicNames);
|
|
1476
1662
|
}
|
|
1477
1663
|
}
|
|
1664
|
+
if (options.deduplication?.strategy === "topic") {
|
|
1665
|
+
const dest = options.deduplication.duplicatesTopic;
|
|
1666
|
+
if (this.autoCreateTopicsEnabled) {
|
|
1667
|
+
for (const t of topicNames) {
|
|
1668
|
+
await this.ensureTopic(dest ?? `${t}.duplicates`);
|
|
1669
|
+
}
|
|
1670
|
+
} else {
|
|
1671
|
+
await this.validateDuplicatesTopicsExist(topicNames, dest);
|
|
1672
|
+
}
|
|
1673
|
+
}
|
|
1478
1674
|
await consumer.connect();
|
|
1479
1675
|
await subscribeWithRetry(
|
|
1480
1676
|
consumer,
|
|
@@ -1487,13 +1683,22 @@ var KafkaClient = class {
|
|
|
1487
1683
|
);
|
|
1488
1684
|
return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
|
|
1489
1685
|
}
|
|
1686
|
+
/** Create or retrieve the deduplication context for a consumer group. */
|
|
1687
|
+
resolveDeduplicationContext(groupId, options) {
|
|
1688
|
+
if (!options) return void 0;
|
|
1689
|
+
if (!this.dedupStates.has(groupId)) {
|
|
1690
|
+
this.dedupStates.set(groupId, /* @__PURE__ */ new Map());
|
|
1691
|
+
}
|
|
1692
|
+
return { options, state: this.dedupStates.get(groupId) };
|
|
1693
|
+
}
|
|
1490
1694
|
// ── Deps object getters ──────────────────────────────────────────
|
|
1491
1695
|
get producerOpsDeps() {
|
|
1492
1696
|
return {
|
|
1493
1697
|
schemaRegistry: this.schemaRegistry,
|
|
1494
1698
|
strictSchemasEnabled: this.strictSchemasEnabled,
|
|
1495
1699
|
instrumentation: this.instrumentation,
|
|
1496
|
-
logger: this.logger
|
|
1700
|
+
logger: this.logger,
|
|
1701
|
+
nextLamportClock: () => ++this._lamportClock
|
|
1497
1702
|
};
|
|
1498
1703
|
}
|
|
1499
1704
|
get consumerOpsDeps() {
|
|
@@ -1546,6 +1751,7 @@ function topic(name) {
|
|
|
1546
1751
|
0 && (module.exports = {
|
|
1547
1752
|
HEADER_CORRELATION_ID,
|
|
1548
1753
|
HEADER_EVENT_ID,
|
|
1754
|
+
HEADER_LAMPORT_CLOCK,
|
|
1549
1755
|
HEADER_SCHEMA_VERSION,
|
|
1550
1756
|
HEADER_TIMESTAMP,
|
|
1551
1757
|
HEADER_TRACEPARENT,
|