@drarzter/kafka-client 0.6.4 → 0.6.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,6 +9,7 @@ var HEADER_CORRELATION_ID = "x-correlation-id";
9
9
  var HEADER_TIMESTAMP = "x-timestamp";
10
10
  var HEADER_SCHEMA_VERSION = "x-schema-version";
11
11
  var HEADER_TRACEPARENT = "traceparent";
12
+ var HEADER_LAMPORT_CLOCK = "x-lamport-clock";
12
13
  var envelopeStorage = new AsyncLocalStorage();
13
14
  function getEnvelopeContext() {
14
15
  return envelopeStorage.getStore();
@@ -149,6 +150,9 @@ async function buildSendPayload(topicOrDesc, messages, deps) {
149
150
  eventId: m.eventId,
150
151
  headers: m.headers
151
152
  });
153
+ if (deps.nextLamportClock) {
154
+ envelopeHeaders[HEADER_LAMPORT_CLOCK] = String(deps.nextLamportClock());
155
+ }
152
156
  for (const inst of deps.instrumentation) {
153
157
  inst.beforeSend?.(topic2, envelopeHeaders);
154
158
  }
@@ -286,6 +290,9 @@ async function validateWithSchema(message, raw, topic2, schemaMap, interceptors,
286
290
  -1,
287
291
  ""
288
292
  );
293
+ for (const inst of deps.instrumentation ?? []) {
294
+ inst.onConsumeError?.(errorEnvelope, validationError);
295
+ }
289
296
  for (const interceptor of interceptors) {
290
297
  await interceptor.onError?.(errorEnvelope, validationError);
291
298
  }
@@ -311,7 +318,10 @@ async function sendToDlq(topic2, rawMessage, deps, meta) {
311
318
  deps.logger.warn(`Message sent to DLQ: ${payload.topic}`);
312
319
  } catch (error) {
313
320
  const err = toError(error);
314
- deps.logger.error(`Failed to send message to DLQ ${payload.topic}:`, err.stack);
321
+ deps.logger.error(
322
+ `Failed to send message to DLQ ${payload.topic}:`,
323
+ err.stack
324
+ );
315
325
  await deps.onMessageLost?.({
316
326
  topic: topic2,
317
327
  error: err,
@@ -326,14 +336,9 @@ var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
326
336
  var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
327
337
  function buildRetryTopicPayload(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders) {
328
338
  const retryTopic = `${originalTopic}.retry.${attempt}`;
339
+ const STRIP = /* @__PURE__ */ new Set([RETRY_HEADER_ATTEMPT, RETRY_HEADER_AFTER, RETRY_HEADER_MAX_RETRIES, RETRY_HEADER_ORIGINAL_TOPIC]);
329
340
  function buildHeaders(hdr) {
330
- const {
331
- [RETRY_HEADER_ATTEMPT]: _a,
332
- [RETRY_HEADER_AFTER]: _b,
333
- [RETRY_HEADER_MAX_RETRIES]: _c,
334
- [RETRY_HEADER_ORIGINAL_TOPIC]: _d,
335
- ...userHeaders
336
- } = hdr;
341
+ const userHeaders = Object.fromEntries(Object.entries(hdr).filter(([k]) => !STRIP.has(k)));
337
342
  return {
338
343
  ...userHeaders,
339
344
  [RETRY_HEADER_ATTEMPT]: String(attempt),
@@ -380,6 +385,37 @@ async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries,
380
385
  });
381
386
  }
382
387
  }
388
+ function buildDuplicateTopicPayload(sourceTopic, rawMessage, destinationTopic, meta) {
389
+ const headers = {
390
+ ...meta?.originalHeaders ?? {},
391
+ "x-duplicate-original-topic": sourceTopic,
392
+ "x-duplicate-detected-at": (/* @__PURE__ */ new Date()).toISOString(),
393
+ "x-duplicate-reason": "lamport-clock-duplicate",
394
+ "x-duplicate-incoming-clock": String(meta?.incomingClock ?? 0),
395
+ "x-duplicate-last-processed-clock": String(meta?.lastProcessedClock ?? 0)
396
+ };
397
+ return {
398
+ topic: destinationTopic,
399
+ messages: [{ value: rawMessage, headers }]
400
+ };
401
+ }
402
+ async function sendToDuplicatesTopic(sourceTopic, rawMessage, destinationTopic, deps, meta) {
403
+ const payload = buildDuplicateTopicPayload(
404
+ sourceTopic,
405
+ rawMessage,
406
+ destinationTopic,
407
+ meta
408
+ );
409
+ try {
410
+ await deps.producer.send(payload);
411
+ deps.logger.warn(`Duplicate message forwarded to ${destinationTopic}`);
412
+ } catch (error) {
413
+ deps.logger.error(
414
+ `Failed to forward duplicate to ${destinationTopic}:`,
415
+ toError(error).stack
416
+ );
417
+ }
418
+ }
383
419
  async function broadcastToInterceptors(envelopes, interceptors, cb) {
384
420
  for (const env of envelopes) {
385
421
  for (const interceptor of interceptors) {
@@ -461,7 +497,10 @@ async function executeWithRetry(fn, ctx, deps) {
461
497
  interceptors,
462
498
  deps.instrumentation
463
499
  );
464
- if (!error) return;
500
+ if (!error) {
501
+ for (const env of envelopes) deps.onMessage?.(env);
502
+ return;
503
+ }
465
504
  const isLastAttempt = attempt === maxAttempts;
466
505
  const reportedError = isLastAttempt && maxAttempts > 1 ? new KafkaRetryExhaustedError(
467
506
  topic2,
@@ -486,15 +525,16 @@ async function executeWithRetry(fn, ctx, deps) {
486
525
  isBatch ? envelopes.map((e) => e.headers) : envelopes[0]?.headers ?? {},
487
526
  deps
488
527
  );
528
+ deps.onRetry?.(envelopes[0], 1, retry.maxRetries);
489
529
  } else if (isLastAttempt) {
490
530
  if (dlq) {
491
- const dlqMeta = {
492
- error,
493
- attempt,
494
- originalHeaders: envelopes[0]?.headers
495
- };
496
- for (const raw of rawMessages) {
497
- await sendToDlq(topic2, raw, deps, dlqMeta);
531
+ for (let i = 0; i < rawMessages.length; i++) {
532
+ await sendToDlq(topic2, rawMessages[i], deps, {
533
+ error,
534
+ attempt,
535
+ originalHeaders: envelopes[i]?.headers
536
+ });
537
+ deps.onDlq?.(envelopes[i] ?? envelopes[0], "handler-error");
498
538
  }
499
539
  } else {
500
540
  await deps.onMessageLost?.({
@@ -506,12 +546,52 @@ async function executeWithRetry(fn, ctx, deps) {
506
546
  }
507
547
  } else {
508
548
  const cap = Math.min(backoffMs * 2 ** (attempt - 1), maxBackoffMs);
549
+ deps.onRetry?.(envelopes[0], attempt, maxAttempts - 1);
509
550
  await sleep(Math.floor(Math.random() * cap));
510
551
  }
511
552
  }
512
553
  }
513
554
 
514
555
  // src/client/kafka.client/message-handler.ts
556
+ async function applyDeduplication(envelope, raw, dedup, dlq, deps) {
557
+ const clockRaw = envelope.headers[HEADER_LAMPORT_CLOCK];
558
+ if (clockRaw === void 0) return false;
559
+ const incomingClock = Number(clockRaw);
560
+ if (Number.isNaN(incomingClock)) return false;
561
+ const stateKey = `${envelope.topic}:${envelope.partition}`;
562
+ const lastProcessedClock = dedup.state.get(stateKey) ?? -1;
563
+ if (incomingClock <= lastProcessedClock) {
564
+ const meta = {
565
+ incomingClock,
566
+ lastProcessedClock,
567
+ originalHeaders: envelope.headers
568
+ };
569
+ const strategy = dedup.options.strategy ?? "drop";
570
+ deps.logger.warn(
571
+ `Duplicate message on ${envelope.topic}[${envelope.partition}]: clock=${incomingClock} <= last=${lastProcessedClock} \u2014 strategy=${strategy}`
572
+ );
573
+ deps.onDuplicate?.(envelope, strategy);
574
+ if (strategy === "dlq" && dlq) {
575
+ const augmentedHeaders = {
576
+ ...envelope.headers,
577
+ "x-dlq-reason": "lamport-clock-duplicate",
578
+ "x-dlq-duplicate-incoming-clock": String(incomingClock),
579
+ "x-dlq-duplicate-last-processed-clock": String(lastProcessedClock)
580
+ };
581
+ await sendToDlq(envelope.topic, raw, deps, {
582
+ error: new Error("Lamport Clock duplicate detected"),
583
+ attempt: 0,
584
+ originalHeaders: augmentedHeaders
585
+ });
586
+ } else if (strategy === "topic") {
587
+ const destination = dedup.options.duplicatesTopic ?? `${envelope.topic}.duplicates`;
588
+ await sendToDuplicatesTopic(envelope.topic, raw, destination, deps, meta);
589
+ }
590
+ return true;
591
+ }
592
+ dedup.state.set(stateKey, incomingClock);
593
+ return false;
594
+ }
515
595
  async function parseSingleMessage(message, topic2, partition, schemaMap, interceptors, dlq, deps) {
516
596
  if (!message.value) {
517
597
  deps.logger.warn(`Received empty message from topic ${topic2}`);
@@ -555,6 +635,16 @@ async function handleEachMessage(payload, opts, deps) {
555
635
  deps
556
636
  );
557
637
  if (envelope === null) return;
638
+ if (opts.deduplication) {
639
+ const isDuplicate = await applyDeduplication(
640
+ envelope,
641
+ message.value.toString(),
642
+ opts.deduplication,
643
+ dlq,
644
+ deps
645
+ );
646
+ if (isDuplicate) return;
647
+ }
558
648
  await executeWithRetry(
559
649
  () => {
560
650
  const fn = () => runWithEnvelopeContext(
@@ -602,6 +692,17 @@ async function handleEachBatch(payload, opts, deps) {
602
692
  deps
603
693
  );
604
694
  if (envelope === null) continue;
695
+ if (opts.deduplication) {
696
+ const raw = message.value.toString();
697
+ const isDuplicate = await applyDeduplication(
698
+ envelope,
699
+ raw,
700
+ opts.deduplication,
701
+ dlq,
702
+ deps
703
+ );
704
+ if (isDuplicate) continue;
705
+ }
605
706
  envelopes.push(envelope);
606
707
  rawMessages.push(message.value.toString());
607
708
  }
@@ -673,6 +774,9 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
673
774
  producer,
674
775
  instrumentation,
675
776
  onMessageLost,
777
+ onRetry,
778
+ onDlq,
779
+ onMessage,
676
780
  ensureTopic,
677
781
  getOrCreateConsumer: getOrCreateConsumer2,
678
782
  runningConsumers,
@@ -754,6 +858,7 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
754
858
  instrumentation
755
859
  );
756
860
  if (!error) {
861
+ onMessage?.(envelope);
757
862
  await consumer.commitOffsets([nextOffset]);
758
863
  return;
759
864
  }
@@ -786,12 +891,23 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
786
891
  await tx.send({ topic: rtTopic, messages: rtMsgs });
787
892
  await tx.sendOffsets({
788
893
  consumer,
789
- topics: [{ topic: nextOffset.topic, partitions: [{ partition: nextOffset.partition, offset: nextOffset.offset }] }]
894
+ topics: [
895
+ {
896
+ topic: nextOffset.topic,
897
+ partitions: [
898
+ {
899
+ partition: nextOffset.partition,
900
+ offset: nextOffset.offset
901
+ }
902
+ ]
903
+ }
904
+ ]
790
905
  });
791
906
  await tx.commit();
792
907
  logger.warn(
793
908
  `Message routed to ${rtTopic} (EOS, level ${nextLevel}/${currentMaxRetries})`
794
909
  );
910
+ onRetry?.(envelope, nextLevel, currentMaxRetries);
795
911
  } catch (txErr) {
796
912
  try {
797
913
  await tx.abort();
@@ -819,10 +935,21 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
819
935
  await tx.send({ topic: dTopic, messages: dMsgs });
820
936
  await tx.sendOffsets({
821
937
  consumer,
822
- topics: [{ topic: nextOffset.topic, partitions: [{ partition: nextOffset.partition, offset: nextOffset.offset }] }]
938
+ topics: [
939
+ {
940
+ topic: nextOffset.topic,
941
+ partitions: [
942
+ {
943
+ partition: nextOffset.partition,
944
+ offset: nextOffset.offset
945
+ }
946
+ ]
947
+ }
948
+ ]
823
949
  });
824
950
  await tx.commit();
825
951
  logger.warn(`Message sent to DLQ: ${dTopic} (EOS)`);
952
+ onDlq?.(envelope, "handler-error");
826
953
  } catch (txErr) {
827
954
  try {
828
955
  await tx.abort();
@@ -846,7 +973,12 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
846
973
  }
847
974
  });
848
975
  runningConsumers.set(levelGroupId, "eachMessage");
849
- await waitForPartitionAssignment(consumer, levelTopics, logger, assignmentTimeoutMs);
976
+ await waitForPartitionAssignment(
977
+ consumer,
978
+ levelTopics,
979
+ logger,
980
+ assignmentTimeoutMs
981
+ );
850
982
  logger.log(
851
983
  `Retry level ${level}/${retry.maxRetries} consumer started for: ${originalTopics.join(", ")} (group: ${levelGroupId})`
852
984
  );
@@ -876,10 +1008,12 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
876
1008
 
877
1009
  // src/client/kafka.client/index.ts
878
1010
  var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = KafkaJS;
1011
+ var _activeTransactionalIds = /* @__PURE__ */ new Set();
879
1012
  var KafkaClient = class {
880
1013
  kafka;
881
1014
  producer;
882
1015
  txProducer;
1016
+ txProducerInitPromise;
883
1017
  /** Maps transactionalId → Producer for each active retry level consumer. */
884
1018
  retryTxProducers = /* @__PURE__ */ new Map();
885
1019
  consumers = /* @__PURE__ */ new Map();
@@ -889,6 +1023,8 @@ var KafkaClient = class {
889
1023
  strictSchemasEnabled;
890
1024
  numPartitions;
891
1025
  ensuredTopics = /* @__PURE__ */ new Set();
1026
+ /** Pending topic-creation promises keyed by topic name. Prevents duplicate createTopics calls. */
1027
+ ensureTopicPromises = /* @__PURE__ */ new Map();
892
1028
  defaultGroupId;
893
1029
  schemaRegistry = /* @__PURE__ */ new Map();
894
1030
  runningConsumers = /* @__PURE__ */ new Map();
@@ -898,6 +1034,19 @@ var KafkaClient = class {
898
1034
  instrumentation;
899
1035
  onMessageLost;
900
1036
  onRebalance;
1037
+ /** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
1038
+ txId;
1039
+ /** Internal event counters exposed via `getMetrics()`. */
1040
+ _metrics = {
1041
+ processedCount: 0,
1042
+ retryCount: 0,
1043
+ dlqCount: 0,
1044
+ dedupCount: 0
1045
+ };
1046
+ /** Monotonically increasing Lamport clock stamped on every outgoing message. */
1047
+ _lamportClock = 0;
1048
+ /** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
1049
+ dedupStates = /* @__PURE__ */ new Map();
901
1050
  isAdminConnected = false;
902
1051
  inFlightTotal = 0;
903
1052
  drainResolvers = [];
@@ -917,6 +1066,7 @@ var KafkaClient = class {
917
1066
  this.instrumentation = options?.instrumentation ?? [];
918
1067
  this.onMessageLost = options?.onMessageLost;
919
1068
  this.onRebalance = options?.onRebalance;
1069
+ this.txId = options?.transactionalId ?? `${clientId}-tx`;
920
1070
  this.kafka = new KafkaClass({
921
1071
  kafkaJS: {
922
1072
  clientId: this.clientId,
@@ -952,18 +1102,31 @@ var KafkaClient = class {
952
1102
  }
953
1103
  /** Execute multiple sends atomically. Commits on success, aborts on error. */
954
1104
  async transaction(fn) {
955
- if (!this.txProducer) {
956
- const p = this.kafka.producer({
957
- kafkaJS: {
958
- acks: -1,
959
- idempotent: true,
960
- transactionalId: `${this.clientId}-tx`,
961
- maxInFlightRequests: 1
962
- }
1105
+ if (!this.txProducerInitPromise) {
1106
+ if (_activeTransactionalIds.has(this.txId)) {
1107
+ this.logger.warn(
1108
+ `transactionalId "${this.txId}" is already in use by another KafkaClient in this process. Kafka will fence one of the producers. Set a unique \`transactionalId\` (or distinct \`clientId\`) per instance.`
1109
+ );
1110
+ }
1111
+ const initPromise = (async () => {
1112
+ const p = this.kafka.producer({
1113
+ kafkaJS: {
1114
+ acks: -1,
1115
+ idempotent: true,
1116
+ transactionalId: this.txId,
1117
+ maxInFlightRequests: 1
1118
+ }
1119
+ });
1120
+ await p.connect();
1121
+ _activeTransactionalIds.add(this.txId);
1122
+ return p;
1123
+ })();
1124
+ this.txProducerInitPromise = initPromise.catch((err) => {
1125
+ this.txProducerInitPromise = void 0;
1126
+ throw err;
963
1127
  });
964
- await p.connect();
965
- this.txProducer = p;
966
1128
  }
1129
+ this.txProducer = await this.txProducerInitPromise;
967
1130
  const tx = await this.txProducer.transaction();
968
1131
  try {
969
1132
  const ctx = {
@@ -1002,11 +1165,17 @@ var KafkaClient = class {
1002
1165
  }
1003
1166
  }
1004
1167
  // ── Producer lifecycle ───────────────────────────────────────────
1005
- /** Connect the idempotent producer. Called automatically by `KafkaModule.register()`. */
1168
+ /**
1169
+ * Connect the idempotent producer. Called automatically by `KafkaModule.register()`.
1170
+ * @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
1171
+ */
1006
1172
  async connectProducer() {
1007
1173
  await this.producer.connect();
1008
1174
  this.logger.log("Producer connected");
1009
1175
  }
1176
+ /**
1177
+ * @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
1178
+ */
1010
1179
  async disconnectProducer() {
1011
1180
  await this.producer.disconnect();
1012
1181
  this.logger.log("Producer disconnected");
@@ -1020,6 +1189,10 @@ var KafkaClient = class {
1020
1189
  const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
1021
1190
  const deps = this.messageDeps;
1022
1191
  const timeoutMs = options.handlerTimeoutMs;
1192
+ const deduplication = this.resolveDeduplicationContext(
1193
+ gid,
1194
+ options.deduplication
1195
+ );
1023
1196
  await consumer.run({
1024
1197
  eachMessage: (payload) => this.trackInFlight(
1025
1198
  () => handleEachMessage(
@@ -1032,7 +1205,8 @@ var KafkaClient = class {
1032
1205
  retry,
1033
1206
  retryTopics: options.retryTopics,
1034
1207
  timeoutMs,
1035
- wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this)
1208
+ wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1209
+ deduplication
1036
1210
  },
1037
1211
  deps
1038
1212
  )
@@ -1072,6 +1246,10 @@ var KafkaClient = class {
1072
1246
  const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
1073
1247
  const deps = this.messageDeps;
1074
1248
  const timeoutMs = options.handlerTimeoutMs;
1249
+ const deduplication = this.resolveDeduplicationContext(
1250
+ gid,
1251
+ options.deduplication
1252
+ );
1075
1253
  await consumer.run({
1076
1254
  eachBatch: (payload) => this.trackInFlight(
1077
1255
  () => handleEachBatch(
@@ -1084,7 +1262,8 @@ var KafkaClient = class {
1084
1262
  retry,
1085
1263
  retryTopics: options.retryTopics,
1086
1264
  timeoutMs,
1087
- wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this)
1265
+ wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1266
+ deduplication
1088
1267
  },
1089
1268
  deps
1090
1269
  )
@@ -1097,7 +1276,7 @@ var KafkaClient = class {
1097
1276
  }
1098
1277
  const handleMessageForRetry = (env) => handleBatch([env], {
1099
1278
  partition: env.partition,
1100
- highWatermark: env.offset,
1279
+ highWatermark: null,
1101
1280
  heartbeat: async () => {
1102
1281
  },
1103
1282
  resolveOffset: () => {
@@ -1130,18 +1309,27 @@ var KafkaClient = class {
1130
1309
  );
1131
1310
  return;
1132
1311
  }
1133
- await consumer.disconnect().catch(() => {
1134
- });
1312
+ await consumer.disconnect().catch(
1313
+ (e) => this.logger.warn(
1314
+ `Error disconnecting consumer "${groupId}":`,
1315
+ toError(e).message
1316
+ )
1317
+ );
1135
1318
  this.consumers.delete(groupId);
1136
1319
  this.runningConsumers.delete(groupId);
1137
1320
  this.consumerCreationOptions.delete(groupId);
1321
+ this.dedupStates.delete(groupId);
1138
1322
  this.logger.log(`Consumer disconnected: group "${groupId}"`);
1139
1323
  const companions = this.companionGroupIds.get(groupId) ?? [];
1140
1324
  for (const cGroupId of companions) {
1141
1325
  const cConsumer = this.consumers.get(cGroupId);
1142
1326
  if (cConsumer) {
1143
- await cConsumer.disconnect().catch(() => {
1144
- });
1327
+ await cConsumer.disconnect().catch(
1328
+ (e) => this.logger.warn(
1329
+ `Error disconnecting retry consumer "${cGroupId}":`,
1330
+ toError(e).message
1331
+ )
1332
+ );
1145
1333
  this.consumers.delete(cGroupId);
1146
1334
  this.runningConsumers.delete(cGroupId);
1147
1335
  this.consumerCreationOptions.delete(cGroupId);
@@ -1150,8 +1338,13 @@ var KafkaClient = class {
1150
1338
  const txId = `${cGroupId}-tx`;
1151
1339
  const txProducer = this.retryTxProducers.get(txId);
1152
1340
  if (txProducer) {
1153
- await txProducer.disconnect().catch(() => {
1154
- });
1341
+ await txProducer.disconnect().catch(
1342
+ (e) => this.logger.warn(
1343
+ `Error disconnecting retry tx producer "${txId}":`,
1344
+ toError(e).message
1345
+ )
1346
+ );
1347
+ _activeTransactionalIds.delete(txId);
1155
1348
  this.retryTxProducers.delete(txId);
1156
1349
  }
1157
1350
  }
@@ -1173,6 +1366,7 @@ var KafkaClient = class {
1173
1366
  this.consumerCreationOptions.clear();
1174
1367
  this.companionGroupIds.clear();
1175
1368
  this.retryTxProducers.clear();
1369
+ this.dedupStates.clear();
1176
1370
  this.logger.log("All consumers disconnected");
1177
1371
  }
1178
1372
  }
@@ -1226,13 +1420,27 @@ var KafkaClient = class {
1226
1420
  getClientId() {
1227
1421
  return this.clientId;
1228
1422
  }
1423
+ getMetrics() {
1424
+ return { ...this._metrics };
1425
+ }
1426
+ resetMetrics() {
1427
+ this._metrics.processedCount = 0;
1428
+ this._metrics.retryCount = 0;
1429
+ this._metrics.dlqCount = 0;
1430
+ this._metrics.dedupCount = 0;
1431
+ }
1229
1432
  /** Gracefully disconnect producer, all consumers, and admin. */
1230
1433
  async disconnect(drainTimeoutMs = 3e4) {
1231
1434
  await this.waitForDrain(drainTimeoutMs);
1232
1435
  const tasks = [this.producer.disconnect()];
1233
1436
  if (this.txProducer) {
1234
1437
  tasks.push(this.txProducer.disconnect());
1438
+ _activeTransactionalIds.delete(this.txId);
1235
1439
  this.txProducer = void 0;
1440
+ this.txProducerInitPromise = void 0;
1441
+ }
1442
+ for (const txId of this.retryTxProducers.keys()) {
1443
+ _activeTransactionalIds.delete(txId);
1236
1444
  }
1237
1445
  for (const p of this.retryTxProducers.values()) {
1238
1446
  tasks.push(p.disconnect());
@@ -1329,6 +1537,30 @@ var KafkaClient = class {
1329
1537
  }
1330
1538
  }
1331
1539
  }
1540
+ notifyRetry(envelope, attempt, maxRetries) {
1541
+ this._metrics.retryCount++;
1542
+ for (const inst of this.instrumentation) {
1543
+ inst.onRetry?.(envelope, attempt, maxRetries);
1544
+ }
1545
+ }
1546
+ notifyDlq(envelope, reason) {
1547
+ this._metrics.dlqCount++;
1548
+ for (const inst of this.instrumentation) {
1549
+ inst.onDlq?.(envelope, reason);
1550
+ }
1551
+ }
1552
+ notifyDuplicate(envelope, strategy) {
1553
+ this._metrics.dedupCount++;
1554
+ for (const inst of this.instrumentation) {
1555
+ inst.onDuplicate?.(envelope, strategy);
1556
+ }
1557
+ }
1558
+ notifyMessage(envelope) {
1559
+ this._metrics.processedCount++;
1560
+ for (const inst of this.instrumentation) {
1561
+ inst.onMessage?.(envelope);
1562
+ }
1563
+ }
1332
1564
  /**
1333
1565
  * Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
1334
1566
  * The handler itself is not cancelled — the warning is diagnostic only.
@@ -1381,6 +1613,22 @@ var KafkaClient = class {
1381
1613
  );
1382
1614
  }
1383
1615
  }
1616
+ /**
1617
+ * When `deduplication.strategy: 'topic'` and `autoCreateTopics: false`, verify
1618
+ * that every `<topic>.duplicates` destination topic already exists. Throws a
1619
+ * clear error at startup rather than silently dropping duplicates on first hit.
1620
+ */
1621
+ async validateDuplicatesTopicsExist(topicNames, customDestination) {
1622
+ await this.ensureAdminConnected();
1623
+ const existing = new Set(await this.admin.listTopics());
1624
+ const toCheck = customDestination ? [customDestination] : topicNames.map((t) => `${t}.duplicates`);
1625
+ const missing = toCheck.filter((t) => !existing.has(t));
1626
+ if (missing.length > 0) {
1627
+ throw new Error(
1628
+ `deduplication.strategy: 'topic' but the following duplicate-routing topics do not exist: ${missing.join(", ")}. Create them manually or set autoCreateTopics: true.`
1629
+ );
1630
+ }
1631
+ }
1384
1632
  /**
1385
1633
  * Connect the admin client if not already connected.
1386
1634
  * The flag is only set to `true` after a successful connect — if `admin.connect()`
@@ -1402,6 +1650,11 @@ var KafkaClient = class {
1402
1650
  * so Kafka can fence stale producers on restart without affecting other levels.
1403
1651
  */
1404
1652
  async createRetryTxProducer(transactionalId) {
1653
+ if (_activeTransactionalIds.has(transactionalId)) {
1654
+ this.logger.warn(
1655
+ `transactionalId "${transactionalId}" is already in use by another KafkaClient in this process. Kafka will fence one of the producers. Set a unique \`transactionalId\` (or distinct \`clientId\`) per instance.`
1656
+ );
1657
+ }
1405
1658
  const p = this.kafka.producer({
1406
1659
  kafkaJS: {
1407
1660
  acks: -1,
@@ -1411,16 +1664,24 @@ var KafkaClient = class {
1411
1664
  }
1412
1665
  });
1413
1666
  await p.connect();
1667
+ _activeTransactionalIds.add(transactionalId);
1414
1668
  this.retryTxProducers.set(transactionalId, p);
1415
1669
  return p;
1416
1670
  }
1417
1671
  async ensureTopic(topic2) {
1418
1672
  if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
1419
- await this.ensureAdminConnected();
1420
- await this.admin.createTopics({
1421
- topics: [{ topic: topic2, numPartitions: this.numPartitions }]
1422
- });
1423
- this.ensuredTopics.add(topic2);
1673
+ let p = this.ensureTopicPromises.get(topic2);
1674
+ if (!p) {
1675
+ p = (async () => {
1676
+ await this.ensureAdminConnected();
1677
+ await this.admin.createTopics({
1678
+ topics: [{ topic: topic2, numPartitions: this.numPartitions }]
1679
+ });
1680
+ this.ensuredTopics.add(topic2);
1681
+ })().finally(() => this.ensureTopicPromises.delete(topic2));
1682
+ this.ensureTopicPromises.set(topic2, p);
1683
+ }
1684
+ await p;
1424
1685
  }
1425
1686
  /** Shared consumer setup: groupId check, schema map, connect, subscribe. */
1426
1687
  async setupConsumer(topics, mode, options) {
@@ -1470,6 +1731,16 @@ var KafkaClient = class {
1470
1731
  await this.validateDlqTopicsExist(topicNames);
1471
1732
  }
1472
1733
  }
1734
+ if (options.deduplication?.strategy === "topic") {
1735
+ const dest = options.deduplication.duplicatesTopic;
1736
+ if (this.autoCreateTopicsEnabled) {
1737
+ for (const t of topicNames) {
1738
+ await this.ensureTopic(dest ?? `${t}.duplicates`);
1739
+ }
1740
+ } else {
1741
+ await this.validateDuplicatesTopicsExist(topicNames, dest);
1742
+ }
1743
+ }
1473
1744
  await consumer.connect();
1474
1745
  await subscribeWithRetry(
1475
1746
  consumer,
@@ -1482,13 +1753,22 @@ var KafkaClient = class {
1482
1753
  );
1483
1754
  return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
1484
1755
  }
1756
+ /** Create or retrieve the deduplication context for a consumer group. */
1757
+ resolveDeduplicationContext(groupId, options) {
1758
+ if (!options) return void 0;
1759
+ if (!this.dedupStates.has(groupId)) {
1760
+ this.dedupStates.set(groupId, /* @__PURE__ */ new Map());
1761
+ }
1762
+ return { options, state: this.dedupStates.get(groupId) };
1763
+ }
1485
1764
  // ── Deps object getters ──────────────────────────────────────────
1486
1765
  get producerOpsDeps() {
1487
1766
  return {
1488
1767
  schemaRegistry: this.schemaRegistry,
1489
1768
  strictSchemasEnabled: this.strictSchemasEnabled,
1490
1769
  instrumentation: this.instrumentation,
1491
- logger: this.logger
1770
+ logger: this.logger,
1771
+ nextLamportClock: () => ++this._lamportClock
1492
1772
  };
1493
1773
  }
1494
1774
  get consumerOpsDeps() {
@@ -1505,7 +1785,11 @@ var KafkaClient = class {
1505
1785
  logger: this.logger,
1506
1786
  producer: this.producer,
1507
1787
  instrumentation: this.instrumentation,
1508
- onMessageLost: this.onMessageLost
1788
+ onMessageLost: this.onMessageLost,
1789
+ onRetry: this.notifyRetry.bind(this),
1790
+ onDlq: this.notifyDlq.bind(this),
1791
+ onDuplicate: this.notifyDuplicate.bind(this),
1792
+ onMessage: this.notifyMessage.bind(this)
1509
1793
  };
1510
1794
  }
1511
1795
  get retryTopicDeps() {
@@ -1514,6 +1798,9 @@ var KafkaClient = class {
1514
1798
  producer: this.producer,
1515
1799
  instrumentation: this.instrumentation,
1516
1800
  onMessageLost: this.onMessageLost,
1801
+ onRetry: this.notifyRetry.bind(this),
1802
+ onDlq: this.notifyDlq.bind(this),
1803
+ onMessage: this.notifyMessage.bind(this),
1517
1804
  ensureTopic: (t) => this.ensureTopic(t),
1518
1805
  getOrCreateConsumer: (gid, fb, ac) => getOrCreateConsumer(gid, fb, ac, this.consumerOpsDeps),
1519
1806
  runningConsumers: this.runningConsumers,
@@ -1544,6 +1831,7 @@ export {
1544
1831
  HEADER_TIMESTAMP,
1545
1832
  HEADER_SCHEMA_VERSION,
1546
1833
  HEADER_TRACEPARENT,
1834
+ HEADER_LAMPORT_CLOCK,
1547
1835
  getEnvelopeContext,
1548
1836
  runWithEnvelopeContext,
1549
1837
  buildEnvelopeHeaders,
@@ -1555,4 +1843,4 @@ export {
1555
1843
  KafkaClient,
1556
1844
  topic
1557
1845
  };
1558
- //# sourceMappingURL=chunk-7IX4AKQX.mjs.map
1846
+ //# sourceMappingURL=chunk-ISYOEX4W.mjs.map