@drarzter/kafka-client 0.6.7 → 0.6.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/core.mjs CHANGED
@@ -15,7 +15,7 @@ import {
15
15
  getEnvelopeContext,
16
16
  runWithEnvelopeContext,
17
17
  topic
18
- } from "./chunk-ISYOEX4W.mjs";
18
+ } from "./chunk-4526Y4PV.mjs";
19
19
  import "./chunk-EQQGB2QZ.mjs";
20
20
  export {
21
21
  HEADER_CORRELATION_ID,
package/dist/index.d.mts CHANGED
@@ -1,6 +1,6 @@
1
1
  export { KafkaClient, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError } from './core.mjs';
2
- import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-CqjRm-Cd.mjs';
3
- export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as ConsumerHandle, h as ConsumerInterceptor, D as DeduplicationOptions, i as DlqReason, E as EnvelopeHeaderOptions, j as EventEnvelope, H as HEADER_CORRELATION_ID, k as HEADER_EVENT_ID, l as HEADER_LAMPORT_CLOCK, m as HEADER_SCHEMA_VERSION, n as HEADER_TIMESTAMP, o as HEADER_TRACEPARENT, p as InferSchema, q as KafkaLogger, r as KafkaMetrics, M as MessageHeaders, s as MessageLostContext, R as RetryOptions, t as SchemaParseContext, u as SendOptions, v as SubscribeRetryOptions, w as TTopicMessageMap, x as TopicsFrom, y as TransactionContext, z as buildEnvelopeHeaders, A as decodeHeaders, F as extractEnvelope, J as getEnvelopeContext, L as runWithEnvelopeContext, N as topic } from './types-CqjRm-Cd.mjs';
2
+ import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-736Gj0J3.mjs';
3
+ export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as ConsumerHandle, h as ConsumerInterceptor, D as DeduplicationOptions, i as DlqReason, j as DlqReplayOptions, E as EnvelopeHeaderOptions, k as EventEnvelope, H as HEADER_CORRELATION_ID, l as HEADER_EVENT_ID, m as HEADER_LAMPORT_CLOCK, n as HEADER_SCHEMA_VERSION, o as HEADER_TIMESTAMP, p as HEADER_TRACEPARENT, q as InferSchema, r as KafkaLogger, s as KafkaMetrics, M as MessageHeaders, t as MessageLostContext, R as RetryOptions, u as SchemaParseContext, v as SendOptions, w as SubscribeRetryOptions, x as TTopicMessageMap, y as TopicsFrom, z as TransactionContext, A as buildEnvelopeHeaders, F as decodeHeaders, J as extractEnvelope, L as getEnvelopeContext, N as runWithEnvelopeContext, O as topic } from './types-736Gj0J3.mjs';
4
4
  import { DynamicModule, OnModuleInit } from '@nestjs/common';
5
5
  import { DiscoveryService, ModuleRef } from '@nestjs/core';
6
6
 
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  export { KafkaClient, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError } from './core.js';
2
- import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-CqjRm-Cd.js';
3
- export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as ConsumerHandle, h as ConsumerInterceptor, D as DeduplicationOptions, i as DlqReason, E as EnvelopeHeaderOptions, j as EventEnvelope, H as HEADER_CORRELATION_ID, k as HEADER_EVENT_ID, l as HEADER_LAMPORT_CLOCK, m as HEADER_SCHEMA_VERSION, n as HEADER_TIMESTAMP, o as HEADER_TRACEPARENT, p as InferSchema, q as KafkaLogger, r as KafkaMetrics, M as MessageHeaders, s as MessageLostContext, R as RetryOptions, t as SchemaParseContext, u as SendOptions, v as SubscribeRetryOptions, w as TTopicMessageMap, x as TopicsFrom, y as TransactionContext, z as buildEnvelopeHeaders, A as decodeHeaders, F as extractEnvelope, J as getEnvelopeContext, L as runWithEnvelopeContext, N as topic } from './types-CqjRm-Cd.js';
2
+ import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-736Gj0J3.js';
3
+ export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as ConsumerHandle, h as ConsumerInterceptor, D as DeduplicationOptions, i as DlqReason, j as DlqReplayOptions, E as EnvelopeHeaderOptions, k as EventEnvelope, H as HEADER_CORRELATION_ID, l as HEADER_EVENT_ID, m as HEADER_LAMPORT_CLOCK, n as HEADER_SCHEMA_VERSION, o as HEADER_TIMESTAMP, p as HEADER_TRACEPARENT, q as InferSchema, r as KafkaLogger, s as KafkaMetrics, M as MessageHeaders, t as MessageLostContext, R as RetryOptions, u as SchemaParseContext, v as SendOptions, w as SubscribeRetryOptions, x as TTopicMessageMap, y as TopicsFrom, z as TransactionContext, A as buildEnvelopeHeaders, F as decodeHeaders, J as extractEnvelope, L as getEnvelopeContext, N as runWithEnvelopeContext, O as topic } from './types-736Gj0J3.js';
4
4
  import { DynamicModule, OnModuleInit } from '@nestjs/common';
5
5
  import { DiscoveryService, ModuleRef } from '@nestjs/core';
6
6
 
package/dist/index.js CHANGED
@@ -556,6 +556,17 @@ async function executeWithRetry(fn, ctx, deps) {
556
556
  deps.instrumentation
557
557
  );
558
558
  if (!error) {
559
+ if (deps.eosCommitOnSuccess) {
560
+ try {
561
+ await deps.eosCommitOnSuccess();
562
+ } catch (commitErr) {
563
+ deps.logger.error(
564
+ `EOS offset commit failed after successful handler \u2014 message will be redelivered:`,
565
+ toError(commitErr).stack
566
+ );
567
+ return;
568
+ }
569
+ }
559
570
  for (const env of envelopes) deps.onMessage?.(env);
560
571
  return;
561
572
  }
@@ -574,16 +585,28 @@ async function executeWithRetry(fn, ctx, deps) {
574
585
  if (retryTopics && retry) {
575
586
  const cap = Math.min(backoffMs, maxBackoffMs);
576
587
  const delay = Math.floor(Math.random() * cap);
577
- await sendToRetryTopic(
578
- topic2,
579
- rawMessages,
580
- 1,
581
- retry.maxRetries,
582
- delay,
583
- isBatch ? envelopes.map((e) => e.headers) : envelopes[0]?.headers ?? {},
584
- deps
585
- );
586
- deps.onRetry?.(envelopes[0], 1, retry.maxRetries);
588
+ if (deps.eosRouteToRetry) {
589
+ try {
590
+ await deps.eosRouteToRetry(rawMessages, envelopes, delay);
591
+ deps.onRetry?.(envelopes[0], 1, retry.maxRetries);
592
+ } catch (txErr) {
593
+ deps.logger.error(
594
+ `EOS routing to retry topic failed \u2014 message will be redelivered:`,
595
+ toError(txErr).stack
596
+ );
597
+ }
598
+ } else {
599
+ await sendToRetryTopic(
600
+ topic2,
601
+ rawMessages,
602
+ 1,
603
+ retry.maxRetries,
604
+ delay,
605
+ isBatch ? envelopes.map((e) => e.headers) : envelopes[0]?.headers ?? {},
606
+ deps
607
+ );
608
+ deps.onRetry?.(envelopes[0], 1, retry.maxRetries);
609
+ }
587
610
  } else if (isLastAttempt) {
588
611
  if (dlq) {
589
612
  for (let i = 0; i < rawMessages.length; i++) {
@@ -683,6 +706,43 @@ async function handleEachMessage(payload, opts, deps) {
683
706
  timeoutMs,
684
707
  wrapWithTimeout
685
708
  } = opts;
709
+ const eos = opts.eosMainContext;
710
+ const nextOffsetStr = (parseInt(message.offset, 10) + 1).toString();
711
+ const commitOffset = eos ? async () => {
712
+ await eos.consumer.commitOffsets([
713
+ { topic: topic2, partition, offset: nextOffsetStr }
714
+ ]);
715
+ } : void 0;
716
+ const eosRouteToRetry = eos && retry ? async (rawMsgs, envelopes, delay) => {
717
+ const { topic: rtTopic, messages: rtMsgs } = buildRetryTopicPayload(
718
+ topic2,
719
+ rawMsgs,
720
+ 1,
721
+ retry.maxRetries,
722
+ delay,
723
+ envelopes[0]?.headers ?? {}
724
+ );
725
+ const tx = await eos.txProducer.transaction();
726
+ try {
727
+ await tx.send({ topic: rtTopic, messages: rtMsgs });
728
+ await tx.sendOffsets({
729
+ consumer: eos.consumer,
730
+ topics: [
731
+ {
732
+ topic: topic2,
733
+ partitions: [{ partition, offset: nextOffsetStr }]
734
+ }
735
+ ]
736
+ });
737
+ await tx.commit();
738
+ } catch (txErr) {
739
+ try {
740
+ await tx.abort();
741
+ } catch {
742
+ }
743
+ throw txErr;
744
+ }
745
+ } : void 0;
686
746
  const envelope = await parseSingleMessage(
687
747
  message,
688
748
  topic2,
@@ -692,7 +752,10 @@ async function handleEachMessage(payload, opts, deps) {
692
752
  dlq,
693
753
  deps
694
754
  );
695
- if (envelope === null) return;
755
+ if (envelope === null) {
756
+ await commitOffset?.();
757
+ return;
758
+ }
696
759
  if (opts.deduplication) {
697
760
  const isDuplicate = await applyDeduplication(
698
761
  envelope,
@@ -701,7 +764,10 @@ async function handleEachMessage(payload, opts, deps) {
701
764
  dlq,
702
765
  deps
703
766
  );
704
- if (isDuplicate) return;
767
+ if (isDuplicate) {
768
+ await commitOffset?.();
769
+ return;
770
+ }
705
771
  }
706
772
  await executeWithRetry(
707
773
  () => {
@@ -722,7 +788,7 @@ async function handleEachMessage(payload, opts, deps) {
722
788
  retry,
723
789
  retryTopics
724
790
  },
725
- deps
791
+ { ...deps, eosRouteToRetry, eosCommitOnSuccess: commitOffset }
726
792
  );
727
793
  }
728
794
  async function handleEachBatch(payload, opts, deps) {
@@ -737,6 +803,50 @@ async function handleEachBatch(payload, opts, deps) {
737
803
  timeoutMs,
738
804
  wrapWithTimeout
739
805
  } = opts;
806
+ const eos = opts.eosMainContext;
807
+ const lastRawOffset = batch.messages.length > 0 ? batch.messages[batch.messages.length - 1].offset : void 0;
808
+ const batchNextOffsetStr = lastRawOffset ? (parseInt(lastRawOffset, 10) + 1).toString() : void 0;
809
+ const commitBatchOffset = eos && batchNextOffsetStr ? async () => {
810
+ await eos.consumer.commitOffsets([
811
+ {
812
+ topic: batch.topic,
813
+ partition: batch.partition,
814
+ offset: batchNextOffsetStr
815
+ }
816
+ ]);
817
+ } : void 0;
818
+ const eosRouteToRetry = eos && retry && batchNextOffsetStr ? async (rawMsgs, envelopes2, delay) => {
819
+ const { topic: rtTopic, messages: rtMsgs } = buildRetryTopicPayload(
820
+ batch.topic,
821
+ rawMsgs,
822
+ 1,
823
+ retry.maxRetries,
824
+ delay,
825
+ envelopes2.map((e) => e.headers)
826
+ );
827
+ const tx = await eos.txProducer.transaction();
828
+ try {
829
+ await tx.send({ topic: rtTopic, messages: rtMsgs });
830
+ await tx.sendOffsets({
831
+ consumer: eos.consumer,
832
+ topics: [
833
+ {
834
+ topic: batch.topic,
835
+ partitions: [
836
+ { partition: batch.partition, offset: batchNextOffsetStr }
837
+ ]
838
+ }
839
+ ]
840
+ });
841
+ await tx.commit();
842
+ } catch (txErr) {
843
+ try {
844
+ await tx.abort();
845
+ } catch {
846
+ }
847
+ throw txErr;
848
+ }
849
+ } : void 0;
740
850
  const envelopes = [];
741
851
  const rawMessages = [];
742
852
  for (const message of batch.messages) {
@@ -764,7 +874,10 @@ async function handleEachBatch(payload, opts, deps) {
764
874
  envelopes.push(envelope);
765
875
  rawMessages.push(message.value.toString());
766
876
  }
767
- if (envelopes.length === 0) return;
877
+ if (envelopes.length === 0) {
878
+ await commitBatchOffset?.();
879
+ return;
880
+ }
768
881
  const meta = {
769
882
  partition: batch.partition,
770
883
  highWatermark: batch.highWatermark,
@@ -786,7 +899,7 @@ async function handleEachBatch(payload, opts, deps) {
786
899
  isBatch: true,
787
900
  retryTopics
788
901
  },
789
- deps
902
+ { ...deps, eosRouteToRetry, eosCommitOnSuccess: commitBatchOffset }
790
903
  );
791
904
  }
792
905
 
@@ -1067,7 +1180,7 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
1067
1180
  // src/client/kafka.client/index.ts
1068
1181
  var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = import_kafka_javascript.KafkaJS;
1069
1182
  var _activeTransactionalIds = /* @__PURE__ */ new Set();
1070
- var KafkaClient = class {
1183
+ var KafkaClient = class _KafkaClient {
1071
1184
  kafka;
1072
1185
  producer;
1073
1186
  txProducer;
@@ -1094,13 +1207,8 @@ var KafkaClient = class {
1094
1207
  onRebalance;
1095
1208
  /** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
1096
1209
  txId;
1097
- /** Internal event counters exposed via `getMetrics()`. */
1098
- _metrics = {
1099
- processedCount: 0,
1100
- retryCount: 0,
1101
- dlqCount: 0,
1102
- dedupCount: 0
1103
- };
1210
+ /** Per-topic event counters, lazily created on first event. Aggregated by `getMetrics()`. */
1211
+ _topicMetrics = /* @__PURE__ */ new Map();
1104
1212
  /** Monotonically increasing Lamport clock stamped on every outgoing message. */
1105
1213
  _lamportClock = 0;
1106
1214
  /** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
@@ -1244,13 +1352,20 @@ var KafkaClient = class {
1244
1352
  "retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
1245
1353
  );
1246
1354
  }
1247
- const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
1355
+ const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
1356
+ const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", setupOptions);
1248
1357
  const deps = this.messageDeps;
1249
1358
  const timeoutMs = options.handlerTimeoutMs;
1250
1359
  const deduplication = this.resolveDeduplicationContext(
1251
1360
  gid,
1252
1361
  options.deduplication
1253
1362
  );
1363
+ let eosMainContext;
1364
+ if (options.retryTopics && retry) {
1365
+ const mainTxId = `${gid}-main-tx`;
1366
+ const txProducer = await this.createRetryTxProducer(mainTxId);
1367
+ eosMainContext = { txProducer, consumer };
1368
+ }
1254
1369
  await consumer.run({
1255
1370
  eachMessage: (payload) => this.trackInFlight(
1256
1371
  () => handleEachMessage(
@@ -1264,7 +1379,8 @@ var KafkaClient = class {
1264
1379
  retryTopics: options.retryTopics,
1265
1380
  timeoutMs,
1266
1381
  wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1267
- deduplication
1382
+ deduplication,
1383
+ eosMainContext
1268
1384
  },
1269
1385
  deps
1270
1386
  )
@@ -1296,18 +1412,26 @@ var KafkaClient = class {
1296
1412
  "retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
1297
1413
  );
1298
1414
  }
1299
- if (options.autoCommit !== false) {
1415
+ if (options.retryTopics) {
1416
+ } else if (options.autoCommit !== false) {
1300
1417
  this.logger.debug?.(
1301
1418
  `startBatchConsumer: autoCommit is enabled (default true). If your handler calls resolveOffset() or commitOffsetsIfNecessary(), set autoCommit: false to avoid offset conflicts.`
1302
1419
  );
1303
1420
  }
1304
- const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
1421
+ const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
1422
+ const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", setupOptions);
1305
1423
  const deps = this.messageDeps;
1306
1424
  const timeoutMs = options.handlerTimeoutMs;
1307
1425
  const deduplication = this.resolveDeduplicationContext(
1308
1426
  gid,
1309
1427
  options.deduplication
1310
1428
  );
1429
+ let eosMainContext;
1430
+ if (options.retryTopics && retry) {
1431
+ const mainTxId = `${gid}-main-tx`;
1432
+ const txProducer = await this.createRetryTxProducer(mainTxId);
1433
+ eosMainContext = { txProducer, consumer };
1434
+ }
1311
1435
  await consumer.run({
1312
1436
  eachBatch: (payload) => this.trackInFlight(
1313
1437
  () => handleEachBatch(
@@ -1321,7 +1445,8 @@ var KafkaClient = class {
1321
1445
  retryTopics: options.retryTopics,
1322
1446
  timeoutMs,
1323
1447
  wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1324
- deduplication
1448
+ deduplication,
1449
+ eosMainContext
1325
1450
  },
1326
1451
  deps
1327
1452
  )
@@ -1378,6 +1503,18 @@ var KafkaClient = class {
1378
1503
  this.consumerCreationOptions.delete(groupId);
1379
1504
  this.dedupStates.delete(groupId);
1380
1505
  this.logger.log(`Consumer disconnected: group "${groupId}"`);
1506
+ const mainTxId = `${groupId}-main-tx`;
1507
+ const mainTxProducer = this.retryTxProducers.get(mainTxId);
1508
+ if (mainTxProducer) {
1509
+ await mainTxProducer.disconnect().catch(
1510
+ (e) => this.logger.warn(
1511
+ `Error disconnecting main tx producer "${mainTxId}":`,
1512
+ toError(e).message
1513
+ )
1514
+ );
1515
+ _activeTransactionalIds.delete(mainTxId);
1516
+ this.retryTxProducers.delete(mainTxId);
1517
+ }
1381
1518
  const companions = this.companionGroupIds.get(groupId) ?? [];
1382
1519
  for (const cGroupId of companions) {
1383
1520
  const cConsumer = this.consumers.get(cGroupId);
@@ -1428,6 +1565,144 @@ var KafkaClient = class {
1428
1565
  this.logger.log("All consumers disconnected");
1429
1566
  }
1430
1567
  }
1568
+ pauseConsumer(groupId, assignments) {
1569
+ const gid = groupId ?? this.defaultGroupId;
1570
+ const consumer = this.consumers.get(gid);
1571
+ if (!consumer) {
1572
+ this.logger.warn(`pauseConsumer: no active consumer for group "${gid}"`);
1573
+ return;
1574
+ }
1575
+ consumer.pause(
1576
+ assignments.flatMap(
1577
+ ({ topic: topic2, partitions }) => partitions.map((p) => ({ topic: topic2, partitions: [p] }))
1578
+ )
1579
+ );
1580
+ }
1581
+ resumeConsumer(groupId, assignments) {
1582
+ const gid = groupId ?? this.defaultGroupId;
1583
+ const consumer = this.consumers.get(gid);
1584
+ if (!consumer) {
1585
+ this.logger.warn(`resumeConsumer: no active consumer for group "${gid}"`);
1586
+ return;
1587
+ }
1588
+ consumer.resume(
1589
+ assignments.flatMap(
1590
+ ({ topic: topic2, partitions }) => partitions.map((p) => ({ topic: topic2, partitions: [p] }))
1591
+ )
1592
+ );
1593
+ }
1594
+ /** DLQ header keys added by `sendToDlq` — stripped before re-publishing. */
1595
+ static DLQ_HEADER_KEYS = /* @__PURE__ */ new Set([
1596
+ "x-dlq-original-topic",
1597
+ "x-dlq-failed-at",
1598
+ "x-dlq-error-message",
1599
+ "x-dlq-error-stack",
1600
+ "x-dlq-attempt-count"
1601
+ ]);
1602
+ async replayDlq(topic2, options = {}) {
1603
+ const dlqTopic = `${topic2}.dlq`;
1604
+ await this.ensureAdminConnected();
1605
+ const partitionOffsets = await this.admin.fetchTopicOffsets(dlqTopic);
1606
+ const activePartitions = partitionOffsets.filter(
1607
+ (p) => parseInt(p.high, 10) > 0
1608
+ );
1609
+ if (activePartitions.length === 0) {
1610
+ this.logger.log(`replayDlq: "${dlqTopic}" is empty \u2014 nothing to replay`);
1611
+ return { replayed: 0, skipped: 0 };
1612
+ }
1613
+ const highWatermarks = new Map(
1614
+ activePartitions.map(({ partition, high }) => [
1615
+ partition,
1616
+ parseInt(high, 10)
1617
+ ])
1618
+ );
1619
+ const processedOffsets = /* @__PURE__ */ new Map();
1620
+ let replayed = 0;
1621
+ let skipped = 0;
1622
+ const tempGroupId = `${dlqTopic}-replay-${Date.now()}`;
1623
+ await new Promise((resolve, reject) => {
1624
+ const consumer = getOrCreateConsumer(
1625
+ tempGroupId,
1626
+ true,
1627
+ true,
1628
+ this.consumerOpsDeps
1629
+ );
1630
+ const cleanup = () => {
1631
+ consumer.disconnect().catch(() => {
1632
+ }).finally(() => {
1633
+ this.consumers.delete(tempGroupId);
1634
+ this.runningConsumers.delete(tempGroupId);
1635
+ this.consumerCreationOptions.delete(tempGroupId);
1636
+ });
1637
+ };
1638
+ consumer.connect().then(
1639
+ () => subscribeWithRetry(consumer, [dlqTopic], this.logger)
1640
+ ).then(
1641
+ () => consumer.run({
1642
+ eachMessage: async ({ partition, message }) => {
1643
+ if (!message.value) return;
1644
+ const offset = parseInt(message.offset, 10);
1645
+ processedOffsets.set(partition, offset);
1646
+ const headers = decodeHeaders(message.headers);
1647
+ const targetTopic = options.targetTopic ?? headers["x-dlq-original-topic"];
1648
+ const originalHeaders = Object.fromEntries(
1649
+ Object.entries(headers).filter(
1650
+ ([k]) => !_KafkaClient.DLQ_HEADER_KEYS.has(k)
1651
+ )
1652
+ );
1653
+ const value = message.value.toString();
1654
+ const shouldProcess = !options.filter || options.filter(headers, value);
1655
+ if (!targetTopic || !shouldProcess) {
1656
+ skipped++;
1657
+ } else if (options.dryRun) {
1658
+ this.logger.log(
1659
+ `[DLQ replay dry-run] Would replay to "${targetTopic}"`
1660
+ );
1661
+ replayed++;
1662
+ } else {
1663
+ await this.producer.send({
1664
+ topic: targetTopic,
1665
+ messages: [{ value, headers: originalHeaders }]
1666
+ });
1667
+ replayed++;
1668
+ }
1669
+ const allDone = Array.from(highWatermarks.entries()).every(
1670
+ ([p, hwm]) => (processedOffsets.get(p) ?? -1) >= hwm - 1
1671
+ );
1672
+ if (allDone) {
1673
+ cleanup();
1674
+ resolve();
1675
+ }
1676
+ }
1677
+ })
1678
+ ).catch((err) => {
1679
+ cleanup();
1680
+ reject(err);
1681
+ });
1682
+ });
1683
+ this.logger.log(
1684
+ `replayDlq: replayed ${replayed}, skipped ${skipped} from "${dlqTopic}"`
1685
+ );
1686
+ return { replayed, skipped };
1687
+ }
1688
+ async resetOffsets(groupId, topic2, position) {
1689
+ const gid = groupId ?? this.defaultGroupId;
1690
+ if (this.runningConsumers.has(gid)) {
1691
+ throw new Error(
1692
+ `resetOffsets: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before resetting offsets.`
1693
+ );
1694
+ }
1695
+ await this.ensureAdminConnected();
1696
+ const partitionOffsets = await this.admin.fetchTopicOffsets(topic2);
1697
+ const partitions = partitionOffsets.map(({ partition, low, high }) => ({
1698
+ partition,
1699
+ offset: position === "earliest" ? low : high
1700
+ }));
1701
+ await this.admin.setOffsets({ groupId: gid, topic: topic2, partitions });
1702
+ this.logger.log(
1703
+ `Offsets reset to ${position} for group "${gid}" on topic "${topic2}"`
1704
+ );
1705
+ }
1431
1706
  /**
1432
1707
  * Query consumer group lag per partition.
1433
1708
  * Lag = broker high-watermark − last committed offset.
@@ -1478,14 +1753,31 @@ var KafkaClient = class {
1478
1753
  getClientId() {
1479
1754
  return this.clientId;
1480
1755
  }
1481
- getMetrics() {
1482
- return { ...this._metrics };
1756
+ getMetrics(topic2) {
1757
+ if (topic2 !== void 0) {
1758
+ const m = this._topicMetrics.get(topic2);
1759
+ return m ? { ...m } : { processedCount: 0, retryCount: 0, dlqCount: 0, dedupCount: 0 };
1760
+ }
1761
+ const agg = {
1762
+ processedCount: 0,
1763
+ retryCount: 0,
1764
+ dlqCount: 0,
1765
+ dedupCount: 0
1766
+ };
1767
+ for (const m of this._topicMetrics.values()) {
1768
+ agg.processedCount += m.processedCount;
1769
+ agg.retryCount += m.retryCount;
1770
+ agg.dlqCount += m.dlqCount;
1771
+ agg.dedupCount += m.dedupCount;
1772
+ }
1773
+ return agg;
1483
1774
  }
1484
- resetMetrics() {
1485
- this._metrics.processedCount = 0;
1486
- this._metrics.retryCount = 0;
1487
- this._metrics.dlqCount = 0;
1488
- this._metrics.dedupCount = 0;
1775
+ resetMetrics(topic2) {
1776
+ if (topic2 !== void 0) {
1777
+ this._topicMetrics.delete(topic2);
1778
+ return;
1779
+ }
1780
+ this._topicMetrics.clear();
1489
1781
  }
1490
1782
  /** Gracefully disconnect producer, all consumers, and admin. */
1491
1783
  async disconnect(drainTimeoutMs = 3e4) {
@@ -1595,26 +1887,34 @@ var KafkaClient = class {
1595
1887
  }
1596
1888
  }
1597
1889
  }
1890
+ metricsFor(topic2) {
1891
+ let m = this._topicMetrics.get(topic2);
1892
+ if (!m) {
1893
+ m = { processedCount: 0, retryCount: 0, dlqCount: 0, dedupCount: 0 };
1894
+ this._topicMetrics.set(topic2, m);
1895
+ }
1896
+ return m;
1897
+ }
1598
1898
  notifyRetry(envelope, attempt, maxRetries) {
1599
- this._metrics.retryCount++;
1899
+ this.metricsFor(envelope.topic).retryCount++;
1600
1900
  for (const inst of this.instrumentation) {
1601
1901
  inst.onRetry?.(envelope, attempt, maxRetries);
1602
1902
  }
1603
1903
  }
1604
1904
  notifyDlq(envelope, reason) {
1605
- this._metrics.dlqCount++;
1905
+ this.metricsFor(envelope.topic).dlqCount++;
1606
1906
  for (const inst of this.instrumentation) {
1607
1907
  inst.onDlq?.(envelope, reason);
1608
1908
  }
1609
1909
  }
1610
1910
  notifyDuplicate(envelope, strategy) {
1611
- this._metrics.dedupCount++;
1911
+ this.metricsFor(envelope.topic).dedupCount++;
1612
1912
  for (const inst of this.instrumentation) {
1613
1913
  inst.onDuplicate?.(envelope, strategy);
1614
1914
  }
1615
1915
  }
1616
1916
  notifyMessage(envelope) {
1617
- this._metrics.processedCount++;
1917
+ this.metricsFor(envelope.topic).processedCount++;
1618
1918
  for (const inst of this.instrumentation) {
1619
1919
  inst.onMessage?.(envelope);
1620
1920
  }