@drarzter/kafka-client 0.6.4 → 0.6.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -30,6 +30,7 @@ Type-safe Kafka client for Node.js. Framework-agnostic core with a first-class N
30
30
  - [Instrumentation](#instrumentation)
31
31
  - [Options reference](#options-reference)
32
32
  - [Error classes](#error-classes)
33
+ - [Deduplication (Lamport Clock)](#deduplication-lamport-clock)
33
34
  - [Retry topic chain](#retry-topic-chain)
34
35
  - [stopConsumer](#stopconsumer)
35
36
  - [Graceful shutdown](#graceful-shutdown)
@@ -65,6 +66,7 @@ Safe by default. Configurable when you need it. Escape hatches for when you know
65
66
  - **Topic descriptors** — `topic()` DX sugar lets you define topics as standalone typed objects instead of string keys
66
67
  - **Framework-agnostic** — use standalone or with NestJS (`register()` / `registerAsync()`, DI, lifecycle hooks)
67
68
  - **Idempotent producer** — `acks: -1`, `idempotent: true` by default
69
+ - **Lamport Clock deduplication** — every outgoing message is stamped with a monotonically increasing `x-lamport-clock` header; the consumer tracks the last processed value per `topic:partition` and silently drops (or routes to DLQ / a dedicated topic) any message whose clock is not strictly greater than the last seen value
68
70
  - **Retry + DLQ** — exponential backoff with full jitter; dead letter queue with error metadata headers (original topic, error message, stack, attempt count)
69
71
  - **Batch sending** — send multiple messages in a single request
70
72
  - **Batch consuming** — `startBatchConsumer()` for high-throughput `eachBatch` processing
@@ -772,6 +774,8 @@ Options for `sendMessage()` — the third argument:
772
774
  | `interceptors` | `[]` | Array of before/after/onError hooks |
773
775
  | `retryTopicAssignmentTimeoutMs` | `10000` | Timeout (ms) to wait for each retry level consumer to receive partition assignments after connecting; increase for slow brokers |
774
776
  | `handlerTimeoutMs` | — | Log a warning if the handler hasn't resolved within this window (ms) — does not cancel the handler |
777
+ | `deduplication.strategy` | `'drop'` | What to do with duplicate messages: `'drop'` silently discards, `'dlq'` forwards to `{topic}.dlq` (requires `dlq: true`), `'topic'` forwards to `{topic}.duplicates` |
778
+ | `deduplication.duplicatesTopic` | `{topic}.duplicates` | Custom destination for `strategy: 'topic'` |
775
779
  | `batch` | `false` | (decorator only) Use `startBatchConsumer` instead of `startConsumer` |
776
780
  | `subscribeRetry.retries` | `5` | Max attempts for `consumer.subscribe()` when topic doesn't exist yet |
777
781
  | `subscribeRetry.backoffMs` | `5000` | Delay between subscribe retry attempts (ms) |
@@ -880,6 +884,81 @@ const interceptor: ConsumerInterceptor<MyTopics> = {
880
884
  };
881
885
  ```
882
886
 
887
+ ## Deduplication (Lamport Clock)
888
+
889
+ Every outgoing message produced by this library is stamped with a monotonically increasing logical clock — the `x-lamport-clock` header. The counter lives in the `KafkaClient` instance and increments by one per message (including individual messages inside `sendBatch` and `transaction`).
890
+
891
+ On the consumer side, enable deduplication by passing `deduplication` to `startConsumer` or `startBatchConsumer`. The library checks the incoming clock against the last processed value for that `topic:partition` combination and skips any message whose clock is not strictly greater.
892
+
893
+ ```typescript
894
+ await kafka.startConsumer(['orders.created'], handler, {
895
+ deduplication: {}, // 'drop' strategy — silently discard duplicates
896
+ });
897
+ ```
898
+
899
+ ### How duplicates happen
900
+
901
+ The most common scenario: a producer service restarts. Its in-memory clock resets to `0`. The consumer already processed messages with clocks `1…N`. All new messages from the restarted producer (clocks `1`, `2`, `3`, …) have clocks ≤ `N` and are treated as duplicates.
902
+
903
+ ```text
904
+ Producer A (running): sends clock 1, 2, 3, 4, 5 → consumer processes all 5
905
+ Producer A (restarts): sends clock 1, 2, 3 → consumer sees 1 ≤ 5 — duplicate!
906
+ ```
907
+
908
+ ### Strategies
909
+
910
+ | Strategy | Behaviour |
911
+ | -------- | --------- |
912
+ | `'drop'` *(default)* | Log a warning and silently discard the message |
913
+ | `'dlq'` | Forward to `{topic}.dlq` with reason metadata headers (`x-dlq-reason`, `x-dlq-duplicate-incoming-clock`, `x-dlq-duplicate-last-processed-clock`). Requires `dlq: true` |
914
+ | `'topic'` | Forward to `{topic}.duplicates` (or `duplicatesTopic` if set) with reason metadata headers (`x-duplicate-reason`, `x-duplicate-incoming-clock`, `x-duplicate-last-processed-clock`, `x-duplicate-detected-at`) |
915
+
916
+ ```typescript
917
+ // Strategy: drop (default)
918
+ await kafka.startConsumer(['orders'], handler, {
919
+ deduplication: {},
920
+ });
921
+
922
+ // Strategy: DLQ — inspect duplicates from {topic}.dlq
923
+ await kafka.startConsumer(['orders'], handler, {
924
+ dlq: true,
925
+ deduplication: { strategy: 'dlq' },
926
+ });
927
+
928
+ // Strategy: dedicated topic — consume from {topic}.duplicates
929
+ await kafka.startConsumer(['orders'], handler, {
930
+ deduplication: { strategy: 'topic' },
931
+ });
932
+
933
+ // Strategy: custom topic name
934
+ await kafka.startConsumer(['orders'], handler, {
935
+ deduplication: {
936
+ strategy: 'topic',
937
+ duplicatesTopic: 'ops.orders.duplicates',
938
+ },
939
+ });
940
+ ```
941
+
942
+ ### Startup validation
943
+
944
+ When `autoCreateTopics: false` and `strategy: 'topic'`, `startConsumer` / `startBatchConsumer` validates that the destination topic (`{topic}.duplicates` or `duplicatesTopic`) exists before starting the consumer. A clear error is thrown at startup listing every missing topic, rather than silently failing on the first duplicate.
945
+
946
+ With `autoCreateTopics: true` the check is skipped — the topic is created automatically instead.
947
+
948
+ ### Backwards compatibility
949
+
950
+ Messages without an `x-lamport-clock` header pass through unchanged. Producers not using this library are unaffected.
951
+
952
+ ### Limitations
953
+
954
+ Deduplication state is **in-memory and per-consumer-instance**. Understand what that means:
955
+
956
+ - **Consumer restart** — state is cleared on restart. The first batch of messages after restart is accepted regardless of their clock values, so duplicates spanning a restart window are not caught.
957
+ - **Multiple consumer instances** (same group, different machines) — each instance tracks its own partition subset. Partitions are reassigned on rebalance, so a rebalance can reset the state for moved partitions.
958
+ - **Cross-session duplicates** — this guards against duplicates from a **producer that restarted within the same consumer session**. For durable, cross-restart deduplication, persist the clock state externally (Redis, database) and implement idempotent handlers.
959
+
960
+ Use this feature as a lightweight first line of defence — not as a substitute for idempotent business logic.
961
+
883
962
  ## Retry topic chain
884
963
 
885
964
  > **tl;dr — recommended production setup:**
@@ -9,6 +9,7 @@ var HEADER_CORRELATION_ID = "x-correlation-id";
9
9
  var HEADER_TIMESTAMP = "x-timestamp";
10
10
  var HEADER_SCHEMA_VERSION = "x-schema-version";
11
11
  var HEADER_TRACEPARENT = "traceparent";
12
+ var HEADER_LAMPORT_CLOCK = "x-lamport-clock";
12
13
  var envelopeStorage = new AsyncLocalStorage();
13
14
  function getEnvelopeContext() {
14
15
  return envelopeStorage.getStore();
@@ -149,6 +150,9 @@ async function buildSendPayload(topicOrDesc, messages, deps) {
149
150
  eventId: m.eventId,
150
151
  headers: m.headers
151
152
  });
153
+ if (deps.nextLamportClock) {
154
+ envelopeHeaders[HEADER_LAMPORT_CLOCK] = String(deps.nextLamportClock());
155
+ }
152
156
  for (const inst of deps.instrumentation) {
153
157
  inst.beforeSend?.(topic2, envelopeHeaders);
154
158
  }
@@ -286,6 +290,9 @@ async function validateWithSchema(message, raw, topic2, schemaMap, interceptors,
286
290
  -1,
287
291
  ""
288
292
  );
293
+ for (const inst of deps.instrumentation ?? []) {
294
+ inst.onConsumeError?.(errorEnvelope, validationError);
295
+ }
289
296
  for (const interceptor of interceptors) {
290
297
  await interceptor.onError?.(errorEnvelope, validationError);
291
298
  }
@@ -380,6 +387,29 @@ async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries,
380
387
  });
381
388
  }
382
389
  }
390
+ function buildDuplicateTopicPayload(sourceTopic, rawMessage, destinationTopic, meta) {
391
+ const headers = {
392
+ ...meta?.originalHeaders ?? {},
393
+ "x-duplicate-original-topic": sourceTopic,
394
+ "x-duplicate-detected-at": (/* @__PURE__ */ new Date()).toISOString(),
395
+ "x-duplicate-reason": "lamport-clock-duplicate",
396
+ "x-duplicate-incoming-clock": String(meta?.incomingClock ?? 0),
397
+ "x-duplicate-last-processed-clock": String(meta?.lastProcessedClock ?? 0)
398
+ };
399
+ return { topic: destinationTopic, messages: [{ value: rawMessage, headers }] };
400
+ }
401
+ async function sendToDuplicatesTopic(sourceTopic, rawMessage, destinationTopic, deps, meta) {
402
+ const payload = buildDuplicateTopicPayload(sourceTopic, rawMessage, destinationTopic, meta);
403
+ try {
404
+ await deps.producer.send(payload);
405
+ deps.logger.warn(`Duplicate message forwarded to ${destinationTopic}`);
406
+ } catch (error) {
407
+ deps.logger.error(
408
+ `Failed to forward duplicate to ${destinationTopic}:`,
409
+ toError(error).stack
410
+ );
411
+ }
412
+ }
383
413
  async function broadcastToInterceptors(envelopes, interceptors, cb) {
384
414
  for (const env of envelopes) {
385
415
  for (const interceptor of interceptors) {
@@ -488,13 +518,12 @@ async function executeWithRetry(fn, ctx, deps) {
488
518
  );
489
519
  } else if (isLastAttempt) {
490
520
  if (dlq) {
491
- const dlqMeta = {
492
- error,
493
- attempt,
494
- originalHeaders: envelopes[0]?.headers
495
- };
496
- for (const raw of rawMessages) {
497
- await sendToDlq(topic2, raw, deps, dlqMeta);
521
+ for (let i = 0; i < rawMessages.length; i++) {
522
+ await sendToDlq(topic2, rawMessages[i], deps, {
523
+ error,
524
+ attempt,
525
+ originalHeaders: envelopes[i]?.headers
526
+ });
498
527
  }
499
528
  } else {
500
529
  await deps.onMessageLost?.({
@@ -512,6 +541,44 @@ async function executeWithRetry(fn, ctx, deps) {
512
541
  }
513
542
 
514
543
  // src/client/kafka.client/message-handler.ts
544
+ async function applyDeduplication(envelope, raw, dedup, dlq, deps) {
545
+ const clockRaw = envelope.headers[HEADER_LAMPORT_CLOCK];
546
+ if (clockRaw === void 0) return false;
547
+ const incomingClock = Number(clockRaw);
548
+ if (Number.isNaN(incomingClock)) return false;
549
+ const stateKey = `${envelope.topic}:${envelope.partition}`;
550
+ const lastProcessedClock = dedup.state.get(stateKey) ?? -1;
551
+ if (incomingClock <= lastProcessedClock) {
552
+ const meta = {
553
+ incomingClock,
554
+ lastProcessedClock,
555
+ originalHeaders: envelope.headers
556
+ };
557
+ const strategy = dedup.options.strategy ?? "drop";
558
+ deps.logger.warn(
559
+ `Duplicate message on ${envelope.topic}[${envelope.partition}]: clock=${incomingClock} <= last=${lastProcessedClock} \u2014 strategy=${strategy}`
560
+ );
561
+ if (strategy === "dlq" && dlq) {
562
+ const augmentedHeaders = {
563
+ ...envelope.headers,
564
+ "x-dlq-reason": "lamport-clock-duplicate",
565
+ "x-dlq-duplicate-incoming-clock": String(incomingClock),
566
+ "x-dlq-duplicate-last-processed-clock": String(lastProcessedClock)
567
+ };
568
+ await sendToDlq(envelope.topic, raw, deps, {
569
+ error: new Error("Lamport Clock duplicate detected"),
570
+ attempt: 0,
571
+ originalHeaders: augmentedHeaders
572
+ });
573
+ } else if (strategy === "topic") {
574
+ const destination = dedup.options.duplicatesTopic ?? `${envelope.topic}.duplicates`;
575
+ await sendToDuplicatesTopic(envelope.topic, raw, destination, deps, meta);
576
+ }
577
+ return true;
578
+ }
579
+ dedup.state.set(stateKey, incomingClock);
580
+ return false;
581
+ }
515
582
  async function parseSingleMessage(message, topic2, partition, schemaMap, interceptors, dlq, deps) {
516
583
  if (!message.value) {
517
584
  deps.logger.warn(`Received empty message from topic ${topic2}`);
@@ -555,6 +622,16 @@ async function handleEachMessage(payload, opts, deps) {
555
622
  deps
556
623
  );
557
624
  if (envelope === null) return;
625
+ if (opts.deduplication) {
626
+ const isDuplicate = await applyDeduplication(
627
+ envelope,
628
+ message.value.toString(),
629
+ opts.deduplication,
630
+ dlq,
631
+ deps
632
+ );
633
+ if (isDuplicate) return;
634
+ }
558
635
  await executeWithRetry(
559
636
  () => {
560
637
  const fn = () => runWithEnvelopeContext(
@@ -602,6 +679,17 @@ async function handleEachBatch(payload, opts, deps) {
602
679
  deps
603
680
  );
604
681
  if (envelope === null) continue;
682
+ if (opts.deduplication) {
683
+ const raw = message.value.toString();
684
+ const isDuplicate = await applyDeduplication(
685
+ envelope,
686
+ raw,
687
+ opts.deduplication,
688
+ dlq,
689
+ deps
690
+ );
691
+ if (isDuplicate) continue;
692
+ }
605
693
  envelopes.push(envelope);
606
694
  rawMessages.push(message.value.toString());
607
695
  }
@@ -880,6 +968,7 @@ var KafkaClient = class {
880
968
  kafka;
881
969
  producer;
882
970
  txProducer;
971
+ txProducerInitPromise;
883
972
  /** Maps transactionalId → Producer for each active retry level consumer. */
884
973
  retryTxProducers = /* @__PURE__ */ new Map();
885
974
  consumers = /* @__PURE__ */ new Map();
@@ -889,6 +978,8 @@ var KafkaClient = class {
889
978
  strictSchemasEnabled;
890
979
  numPartitions;
891
980
  ensuredTopics = /* @__PURE__ */ new Set();
981
+ /** Pending topic-creation promises keyed by topic name. Prevents duplicate createTopics calls. */
982
+ ensureTopicPromises = /* @__PURE__ */ new Map();
892
983
  defaultGroupId;
893
984
  schemaRegistry = /* @__PURE__ */ new Map();
894
985
  runningConsumers = /* @__PURE__ */ new Map();
@@ -898,6 +989,10 @@ var KafkaClient = class {
898
989
  instrumentation;
899
990
  onMessageLost;
900
991
  onRebalance;
992
+ /** Monotonically increasing Lamport clock stamped on every outgoing message. */
993
+ _lamportClock = 0;
994
+ /** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
995
+ dedupStates = /* @__PURE__ */ new Map();
901
996
  isAdminConnected = false;
902
997
  inFlightTotal = 0;
903
998
  drainResolvers = [];
@@ -952,18 +1047,25 @@ var KafkaClient = class {
952
1047
  }
953
1048
  /** Execute multiple sends atomically. Commits on success, aborts on error. */
954
1049
  async transaction(fn) {
955
- if (!this.txProducer) {
956
- const p = this.kafka.producer({
957
- kafkaJS: {
958
- acks: -1,
959
- idempotent: true,
960
- transactionalId: `${this.clientId}-tx`,
961
- maxInFlightRequests: 1
962
- }
1050
+ if (!this.txProducerInitPromise) {
1051
+ const initPromise = (async () => {
1052
+ const p = this.kafka.producer({
1053
+ kafkaJS: {
1054
+ acks: -1,
1055
+ idempotent: true,
1056
+ transactionalId: `${this.clientId}-tx`,
1057
+ maxInFlightRequests: 1
1058
+ }
1059
+ });
1060
+ await p.connect();
1061
+ return p;
1062
+ })();
1063
+ this.txProducerInitPromise = initPromise.catch((err) => {
1064
+ this.txProducerInitPromise = void 0;
1065
+ throw err;
963
1066
  });
964
- await p.connect();
965
- this.txProducer = p;
966
1067
  }
1068
+ this.txProducer = await this.txProducerInitPromise;
967
1069
  const tx = await this.txProducer.transaction();
968
1070
  try {
969
1071
  const ctx = {
@@ -1002,11 +1104,17 @@ var KafkaClient = class {
1002
1104
  }
1003
1105
  }
1004
1106
  // ── Producer lifecycle ───────────────────────────────────────────
1005
- /** Connect the idempotent producer. Called automatically by `KafkaModule.register()`. */
1107
+ /**
1108
+ * Connect the idempotent producer. Called automatically by `KafkaModule.register()`.
1109
+ * @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
1110
+ */
1006
1111
  async connectProducer() {
1007
1112
  await this.producer.connect();
1008
1113
  this.logger.log("Producer connected");
1009
1114
  }
1115
+ /**
1116
+ * @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
1117
+ */
1010
1118
  async disconnectProducer() {
1011
1119
  await this.producer.disconnect();
1012
1120
  this.logger.log("Producer disconnected");
@@ -1020,6 +1128,7 @@ var KafkaClient = class {
1020
1128
  const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
1021
1129
  const deps = this.messageDeps;
1022
1130
  const timeoutMs = options.handlerTimeoutMs;
1131
+ const deduplication = this.resolveDeduplicationContext(gid, options.deduplication);
1023
1132
  await consumer.run({
1024
1133
  eachMessage: (payload) => this.trackInFlight(
1025
1134
  () => handleEachMessage(
@@ -1032,7 +1141,8 @@ var KafkaClient = class {
1032
1141
  retry,
1033
1142
  retryTopics: options.retryTopics,
1034
1143
  timeoutMs,
1035
- wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this)
1144
+ wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1145
+ deduplication
1036
1146
  },
1037
1147
  deps
1038
1148
  )
@@ -1072,6 +1182,7 @@ var KafkaClient = class {
1072
1182
  const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
1073
1183
  const deps = this.messageDeps;
1074
1184
  const timeoutMs = options.handlerTimeoutMs;
1185
+ const deduplication = this.resolveDeduplicationContext(gid, options.deduplication);
1075
1186
  await consumer.run({
1076
1187
  eachBatch: (payload) => this.trackInFlight(
1077
1188
  () => handleEachBatch(
@@ -1084,7 +1195,8 @@ var KafkaClient = class {
1084
1195
  retry,
1085
1196
  retryTopics: options.retryTopics,
1086
1197
  timeoutMs,
1087
- wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this)
1198
+ wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1199
+ deduplication
1088
1200
  },
1089
1201
  deps
1090
1202
  )
@@ -1130,18 +1242,27 @@ var KafkaClient = class {
1130
1242
  );
1131
1243
  return;
1132
1244
  }
1133
- await consumer.disconnect().catch(() => {
1134
- });
1245
+ await consumer.disconnect().catch(
1246
+ (e) => this.logger.warn(
1247
+ `Error disconnecting consumer "${groupId}":`,
1248
+ toError(e).message
1249
+ )
1250
+ );
1135
1251
  this.consumers.delete(groupId);
1136
1252
  this.runningConsumers.delete(groupId);
1137
1253
  this.consumerCreationOptions.delete(groupId);
1254
+ this.dedupStates.delete(groupId);
1138
1255
  this.logger.log(`Consumer disconnected: group "${groupId}"`);
1139
1256
  const companions = this.companionGroupIds.get(groupId) ?? [];
1140
1257
  for (const cGroupId of companions) {
1141
1258
  const cConsumer = this.consumers.get(cGroupId);
1142
1259
  if (cConsumer) {
1143
- await cConsumer.disconnect().catch(() => {
1144
- });
1260
+ await cConsumer.disconnect().catch(
1261
+ (e) => this.logger.warn(
1262
+ `Error disconnecting retry consumer "${cGroupId}":`,
1263
+ toError(e).message
1264
+ )
1265
+ );
1145
1266
  this.consumers.delete(cGroupId);
1146
1267
  this.runningConsumers.delete(cGroupId);
1147
1268
  this.consumerCreationOptions.delete(cGroupId);
@@ -1150,8 +1271,12 @@ var KafkaClient = class {
1150
1271
  const txId = `${cGroupId}-tx`;
1151
1272
  const txProducer = this.retryTxProducers.get(txId);
1152
1273
  if (txProducer) {
1153
- await txProducer.disconnect().catch(() => {
1154
- });
1274
+ await txProducer.disconnect().catch(
1275
+ (e) => this.logger.warn(
1276
+ `Error disconnecting retry tx producer "${txId}":`,
1277
+ toError(e).message
1278
+ )
1279
+ );
1155
1280
  this.retryTxProducers.delete(txId);
1156
1281
  }
1157
1282
  }
@@ -1173,6 +1298,7 @@ var KafkaClient = class {
1173
1298
  this.consumerCreationOptions.clear();
1174
1299
  this.companionGroupIds.clear();
1175
1300
  this.retryTxProducers.clear();
1301
+ this.dedupStates.clear();
1176
1302
  this.logger.log("All consumers disconnected");
1177
1303
  }
1178
1304
  }
@@ -1233,6 +1359,7 @@ var KafkaClient = class {
1233
1359
  if (this.txProducer) {
1234
1360
  tasks.push(this.txProducer.disconnect());
1235
1361
  this.txProducer = void 0;
1362
+ this.txProducerInitPromise = void 0;
1236
1363
  }
1237
1364
  for (const p of this.retryTxProducers.values()) {
1238
1365
  tasks.push(p.disconnect());
@@ -1381,6 +1508,22 @@ var KafkaClient = class {
1381
1508
  );
1382
1509
  }
1383
1510
  }
1511
+ /**
1512
+ * When `deduplication.strategy: 'topic'` and `autoCreateTopics: false`, verify
1513
+ * that every `<topic>.duplicates` destination topic already exists. Throws a
1514
+ * clear error at startup rather than silently dropping duplicates on first hit.
1515
+ */
1516
+ async validateDuplicatesTopicsExist(topicNames, customDestination) {
1517
+ await this.ensureAdminConnected();
1518
+ const existing = new Set(await this.admin.listTopics());
1519
+ const toCheck = customDestination ? [customDestination] : topicNames.map((t) => `${t}.duplicates`);
1520
+ const missing = toCheck.filter((t) => !existing.has(t));
1521
+ if (missing.length > 0) {
1522
+ throw new Error(
1523
+ `deduplication.strategy: 'topic' but the following duplicate-routing topics do not exist: ${missing.join(", ")}. Create them manually or set autoCreateTopics: true.`
1524
+ );
1525
+ }
1526
+ }
1384
1527
  /**
1385
1528
  * Connect the admin client if not already connected.
1386
1529
  * The flag is only set to `true` after a successful connect — if `admin.connect()`
@@ -1416,11 +1559,18 @@ var KafkaClient = class {
1416
1559
  }
1417
1560
  async ensureTopic(topic2) {
1418
1561
  if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
1419
- await this.ensureAdminConnected();
1420
- await this.admin.createTopics({
1421
- topics: [{ topic: topic2, numPartitions: this.numPartitions }]
1422
- });
1423
- this.ensuredTopics.add(topic2);
1562
+ let p = this.ensureTopicPromises.get(topic2);
1563
+ if (!p) {
1564
+ p = (async () => {
1565
+ await this.ensureAdminConnected();
1566
+ await this.admin.createTopics({
1567
+ topics: [{ topic: topic2, numPartitions: this.numPartitions }]
1568
+ });
1569
+ this.ensuredTopics.add(topic2);
1570
+ })().finally(() => this.ensureTopicPromises.delete(topic2));
1571
+ this.ensureTopicPromises.set(topic2, p);
1572
+ }
1573
+ await p;
1424
1574
  }
1425
1575
  /** Shared consumer setup: groupId check, schema map, connect, subscribe. */
1426
1576
  async setupConsumer(topics, mode, options) {
@@ -1470,6 +1620,16 @@ var KafkaClient = class {
1470
1620
  await this.validateDlqTopicsExist(topicNames);
1471
1621
  }
1472
1622
  }
1623
+ if (options.deduplication?.strategy === "topic") {
1624
+ const dest = options.deduplication.duplicatesTopic;
1625
+ if (this.autoCreateTopicsEnabled) {
1626
+ for (const t of topicNames) {
1627
+ await this.ensureTopic(dest ?? `${t}.duplicates`);
1628
+ }
1629
+ } else {
1630
+ await this.validateDuplicatesTopicsExist(topicNames, dest);
1631
+ }
1632
+ }
1473
1633
  await consumer.connect();
1474
1634
  await subscribeWithRetry(
1475
1635
  consumer,
@@ -1482,13 +1642,22 @@ var KafkaClient = class {
1482
1642
  );
1483
1643
  return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
1484
1644
  }
1645
+ /** Create or retrieve the deduplication context for a consumer group. */
1646
+ resolveDeduplicationContext(groupId, options) {
1647
+ if (!options) return void 0;
1648
+ if (!this.dedupStates.has(groupId)) {
1649
+ this.dedupStates.set(groupId, /* @__PURE__ */ new Map());
1650
+ }
1651
+ return { options, state: this.dedupStates.get(groupId) };
1652
+ }
1485
1653
  // ── Deps object getters ──────────────────────────────────────────
1486
1654
  get producerOpsDeps() {
1487
1655
  return {
1488
1656
  schemaRegistry: this.schemaRegistry,
1489
1657
  strictSchemasEnabled: this.strictSchemasEnabled,
1490
1658
  instrumentation: this.instrumentation,
1491
- logger: this.logger
1659
+ logger: this.logger,
1660
+ nextLamportClock: () => ++this._lamportClock
1492
1661
  };
1493
1662
  }
1494
1663
  get consumerOpsDeps() {
@@ -1544,6 +1713,7 @@ export {
1544
1713
  HEADER_TIMESTAMP,
1545
1714
  HEADER_SCHEMA_VERSION,
1546
1715
  HEADER_TRACEPARENT,
1716
+ HEADER_LAMPORT_CLOCK,
1547
1717
  getEnvelopeContext,
1548
1718
  runWithEnvelopeContext,
1549
1719
  buildEnvelopeHeaders,
@@ -1555,4 +1725,4 @@ export {
1555
1725
  KafkaClient,
1556
1726
  topic
1557
1727
  };
1558
- //# sourceMappingURL=chunk-7IX4AKQX.mjs.map
1728
+ //# sourceMappingURL=chunk-KCUKXR6B.mjs.map