@drarzter/kafka-client 0.6.6 → 0.6.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/core.mjs CHANGED
@@ -15,7 +15,7 @@ import {
15
15
  getEnvelopeContext,
16
16
  runWithEnvelopeContext,
17
17
  topic
18
- } from "./chunk-KCUKXR6B.mjs";
18
+ } from "./chunk-ISYOEX4W.mjs";
19
19
  import "./chunk-EQQGB2QZ.mjs";
20
20
  export {
21
21
  HEADER_CORRELATION_ID,
package/dist/index.d.mts CHANGED
@@ -1,6 +1,6 @@
1
1
  export { KafkaClient, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError } from './core.mjs';
2
- import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-CTwLrJVU.mjs';
3
- export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as ConsumerHandle, h as ConsumerInterceptor, D as DeduplicationOptions, E as EnvelopeHeaderOptions, i as EventEnvelope, H as HEADER_CORRELATION_ID, j as HEADER_EVENT_ID, k as HEADER_LAMPORT_CLOCK, l as HEADER_SCHEMA_VERSION, m as HEADER_TIMESTAMP, n as HEADER_TRACEPARENT, o as InferSchema, p as KafkaLogger, M as MessageHeaders, q as MessageLostContext, R as RetryOptions, r as SchemaParseContext, s as SendOptions, t as SubscribeRetryOptions, u as TTopicMessageMap, v as TopicsFrom, w as TransactionContext, x as buildEnvelopeHeaders, y as decodeHeaders, z as extractEnvelope, A as getEnvelopeContext, F as runWithEnvelopeContext, J as topic } from './types-CTwLrJVU.mjs';
2
+ import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-CqjRm-Cd.mjs';
3
+ export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as ConsumerHandle, h as ConsumerInterceptor, D as DeduplicationOptions, i as DlqReason, E as EnvelopeHeaderOptions, j as EventEnvelope, H as HEADER_CORRELATION_ID, k as HEADER_EVENT_ID, l as HEADER_LAMPORT_CLOCK, m as HEADER_SCHEMA_VERSION, n as HEADER_TIMESTAMP, o as HEADER_TRACEPARENT, p as InferSchema, q as KafkaLogger, r as KafkaMetrics, M as MessageHeaders, s as MessageLostContext, R as RetryOptions, t as SchemaParseContext, u as SendOptions, v as SubscribeRetryOptions, w as TTopicMessageMap, x as TopicsFrom, y as TransactionContext, z as buildEnvelopeHeaders, A as decodeHeaders, F as extractEnvelope, J as getEnvelopeContext, L as runWithEnvelopeContext, N as topic } from './types-CqjRm-Cd.mjs';
4
4
  import { DynamicModule, OnModuleInit } from '@nestjs/common';
5
5
  import { DiscoveryService, ModuleRef } from '@nestjs/core';
6
6
 
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  export { KafkaClient, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError } from './core.js';
2
- import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-CTwLrJVU.js';
3
- export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as ConsumerHandle, h as ConsumerInterceptor, D as DeduplicationOptions, E as EnvelopeHeaderOptions, i as EventEnvelope, H as HEADER_CORRELATION_ID, j as HEADER_EVENT_ID, k as HEADER_LAMPORT_CLOCK, l as HEADER_SCHEMA_VERSION, m as HEADER_TIMESTAMP, n as HEADER_TRACEPARENT, o as InferSchema, p as KafkaLogger, M as MessageHeaders, q as MessageLostContext, R as RetryOptions, r as SchemaParseContext, s as SendOptions, t as SubscribeRetryOptions, u as TTopicMessageMap, v as TopicsFrom, w as TransactionContext, x as buildEnvelopeHeaders, y as decodeHeaders, z as extractEnvelope, A as getEnvelopeContext, F as runWithEnvelopeContext, J as topic } from './types-CTwLrJVU.js';
2
+ import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-CqjRm-Cd.js';
3
+ export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as ConsumerHandle, h as ConsumerInterceptor, D as DeduplicationOptions, i as DlqReason, E as EnvelopeHeaderOptions, j as EventEnvelope, H as HEADER_CORRELATION_ID, k as HEADER_EVENT_ID, l as HEADER_LAMPORT_CLOCK, m as HEADER_SCHEMA_VERSION, n as HEADER_TIMESTAMP, o as HEADER_TRACEPARENT, p as InferSchema, q as KafkaLogger, r as KafkaMetrics, M as MessageHeaders, s as MessageLostContext, R as RetryOptions, t as SchemaParseContext, u as SendOptions, v as SubscribeRetryOptions, w as TTopicMessageMap, x as TopicsFrom, y as TransactionContext, z as buildEnvelopeHeaders, A as decodeHeaders, F as extractEnvelope, J as getEnvelopeContext, L as runWithEnvelopeContext, N as topic } from './types-CqjRm-Cd.js';
4
4
  import { DynamicModule, OnModuleInit } from '@nestjs/common';
5
5
  import { DiscoveryService, ModuleRef } from '@nestjs/core';
6
6
 
package/dist/index.js CHANGED
@@ -376,7 +376,10 @@ async function sendToDlq(topic2, rawMessage, deps, meta) {
376
376
  deps.logger.warn(`Message sent to DLQ: ${payload.topic}`);
377
377
  } catch (error) {
378
378
  const err = toError(error);
379
- deps.logger.error(`Failed to send message to DLQ ${payload.topic}:`, err.stack);
379
+ deps.logger.error(
380
+ `Failed to send message to DLQ ${payload.topic}:`,
381
+ err.stack
382
+ );
380
383
  await deps.onMessageLost?.({
381
384
  topic: topic2,
382
385
  error: err,
@@ -391,14 +394,9 @@ var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
391
394
  var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
392
395
  function buildRetryTopicPayload(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders) {
393
396
  const retryTopic = `${originalTopic}.retry.${attempt}`;
397
+ const STRIP = /* @__PURE__ */ new Set([RETRY_HEADER_ATTEMPT, RETRY_HEADER_AFTER, RETRY_HEADER_MAX_RETRIES, RETRY_HEADER_ORIGINAL_TOPIC]);
394
398
  function buildHeaders(hdr) {
395
- const {
396
- [RETRY_HEADER_ATTEMPT]: _a,
397
- [RETRY_HEADER_AFTER]: _b,
398
- [RETRY_HEADER_MAX_RETRIES]: _c,
399
- [RETRY_HEADER_ORIGINAL_TOPIC]: _d,
400
- ...userHeaders
401
- } = hdr;
399
+ const userHeaders = Object.fromEntries(Object.entries(hdr).filter(([k]) => !STRIP.has(k)));
402
400
  return {
403
401
  ...userHeaders,
404
402
  [RETRY_HEADER_ATTEMPT]: String(attempt),
@@ -454,10 +452,18 @@ function buildDuplicateTopicPayload(sourceTopic, rawMessage, destinationTopic, m
454
452
  "x-duplicate-incoming-clock": String(meta?.incomingClock ?? 0),
455
453
  "x-duplicate-last-processed-clock": String(meta?.lastProcessedClock ?? 0)
456
454
  };
457
- return { topic: destinationTopic, messages: [{ value: rawMessage, headers }] };
455
+ return {
456
+ topic: destinationTopic,
457
+ messages: [{ value: rawMessage, headers }]
458
+ };
458
459
  }
459
460
  async function sendToDuplicatesTopic(sourceTopic, rawMessage, destinationTopic, deps, meta) {
460
- const payload = buildDuplicateTopicPayload(sourceTopic, rawMessage, destinationTopic, meta);
461
+ const payload = buildDuplicateTopicPayload(
462
+ sourceTopic,
463
+ rawMessage,
464
+ destinationTopic,
465
+ meta
466
+ );
461
467
  try {
462
468
  await deps.producer.send(payload);
463
469
  deps.logger.warn(`Duplicate message forwarded to ${destinationTopic}`);
@@ -549,7 +555,10 @@ async function executeWithRetry(fn, ctx, deps) {
549
555
  interceptors,
550
556
  deps.instrumentation
551
557
  );
552
- if (!error) return;
558
+ if (!error) {
559
+ for (const env of envelopes) deps.onMessage?.(env);
560
+ return;
561
+ }
553
562
  const isLastAttempt = attempt === maxAttempts;
554
563
  const reportedError = isLastAttempt && maxAttempts > 1 ? new KafkaRetryExhaustedError(
555
564
  topic2,
@@ -574,6 +583,7 @@ async function executeWithRetry(fn, ctx, deps) {
574
583
  isBatch ? envelopes.map((e) => e.headers) : envelopes[0]?.headers ?? {},
575
584
  deps
576
585
  );
586
+ deps.onRetry?.(envelopes[0], 1, retry.maxRetries);
577
587
  } else if (isLastAttempt) {
578
588
  if (dlq) {
579
589
  for (let i = 0; i < rawMessages.length; i++) {
@@ -582,6 +592,7 @@ async function executeWithRetry(fn, ctx, deps) {
582
592
  attempt,
583
593
  originalHeaders: envelopes[i]?.headers
584
594
  });
595
+ deps.onDlq?.(envelopes[i] ?? envelopes[0], "handler-error");
585
596
  }
586
597
  } else {
587
598
  await deps.onMessageLost?.({
@@ -593,6 +604,7 @@ async function executeWithRetry(fn, ctx, deps) {
593
604
  }
594
605
  } else {
595
606
  const cap = Math.min(backoffMs * 2 ** (attempt - 1), maxBackoffMs);
607
+ deps.onRetry?.(envelopes[0], attempt, maxAttempts - 1);
596
608
  await sleep(Math.floor(Math.random() * cap));
597
609
  }
598
610
  }
@@ -616,6 +628,7 @@ async function applyDeduplication(envelope, raw, dedup, dlq, deps) {
616
628
  deps.logger.warn(
617
629
  `Duplicate message on ${envelope.topic}[${envelope.partition}]: clock=${incomingClock} <= last=${lastProcessedClock} \u2014 strategy=${strategy}`
618
630
  );
631
+ deps.onDuplicate?.(envelope, strategy);
619
632
  if (strategy === "dlq" && dlq) {
620
633
  const augmentedHeaders = {
621
634
  ...envelope.headers,
@@ -819,6 +832,9 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
819
832
  producer,
820
833
  instrumentation,
821
834
  onMessageLost,
835
+ onRetry,
836
+ onDlq,
837
+ onMessage,
822
838
  ensureTopic,
823
839
  getOrCreateConsumer: getOrCreateConsumer2,
824
840
  runningConsumers,
@@ -900,6 +916,7 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
900
916
  instrumentation
901
917
  );
902
918
  if (!error) {
919
+ onMessage?.(envelope);
903
920
  await consumer.commitOffsets([nextOffset]);
904
921
  return;
905
922
  }
@@ -932,12 +949,23 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
932
949
  await tx.send({ topic: rtTopic, messages: rtMsgs });
933
950
  await tx.sendOffsets({
934
951
  consumer,
935
- topics: [{ topic: nextOffset.topic, partitions: [{ partition: nextOffset.partition, offset: nextOffset.offset }] }]
952
+ topics: [
953
+ {
954
+ topic: nextOffset.topic,
955
+ partitions: [
956
+ {
957
+ partition: nextOffset.partition,
958
+ offset: nextOffset.offset
959
+ }
960
+ ]
961
+ }
962
+ ]
936
963
  });
937
964
  await tx.commit();
938
965
  logger.warn(
939
966
  `Message routed to ${rtTopic} (EOS, level ${nextLevel}/${currentMaxRetries})`
940
967
  );
968
+ onRetry?.(envelope, nextLevel, currentMaxRetries);
941
969
  } catch (txErr) {
942
970
  try {
943
971
  await tx.abort();
@@ -965,10 +993,21 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
965
993
  await tx.send({ topic: dTopic, messages: dMsgs });
966
994
  await tx.sendOffsets({
967
995
  consumer,
968
- topics: [{ topic: nextOffset.topic, partitions: [{ partition: nextOffset.partition, offset: nextOffset.offset }] }]
996
+ topics: [
997
+ {
998
+ topic: nextOffset.topic,
999
+ partitions: [
1000
+ {
1001
+ partition: nextOffset.partition,
1002
+ offset: nextOffset.offset
1003
+ }
1004
+ ]
1005
+ }
1006
+ ]
969
1007
  });
970
1008
  await tx.commit();
971
1009
  logger.warn(`Message sent to DLQ: ${dTopic} (EOS)`);
1010
+ onDlq?.(envelope, "handler-error");
972
1011
  } catch (txErr) {
973
1012
  try {
974
1013
  await tx.abort();
@@ -992,7 +1031,12 @@ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopi
992
1031
  }
993
1032
  });
994
1033
  runningConsumers.set(levelGroupId, "eachMessage");
995
- await waitForPartitionAssignment(consumer, levelTopics, logger, assignmentTimeoutMs);
1034
+ await waitForPartitionAssignment(
1035
+ consumer,
1036
+ levelTopics,
1037
+ logger,
1038
+ assignmentTimeoutMs
1039
+ );
996
1040
  logger.log(
997
1041
  `Retry level ${level}/${retry.maxRetries} consumer started for: ${originalTopics.join(", ")} (group: ${levelGroupId})`
998
1042
  );
@@ -1022,6 +1066,7 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
1022
1066
 
1023
1067
  // src/client/kafka.client/index.ts
1024
1068
  var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = import_kafka_javascript.KafkaJS;
1069
+ var _activeTransactionalIds = /* @__PURE__ */ new Set();
1025
1070
  var KafkaClient = class {
1026
1071
  kafka;
1027
1072
  producer;
@@ -1047,6 +1092,15 @@ var KafkaClient = class {
1047
1092
  instrumentation;
1048
1093
  onMessageLost;
1049
1094
  onRebalance;
1095
+ /** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
1096
+ txId;
1097
+ /** Internal event counters exposed via `getMetrics()`. */
1098
+ _metrics = {
1099
+ processedCount: 0,
1100
+ retryCount: 0,
1101
+ dlqCount: 0,
1102
+ dedupCount: 0
1103
+ };
1050
1104
  /** Monotonically increasing Lamport clock stamped on every outgoing message. */
1051
1105
  _lamportClock = 0;
1052
1106
  /** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
@@ -1070,6 +1124,7 @@ var KafkaClient = class {
1070
1124
  this.instrumentation = options?.instrumentation ?? [];
1071
1125
  this.onMessageLost = options?.onMessageLost;
1072
1126
  this.onRebalance = options?.onRebalance;
1127
+ this.txId = options?.transactionalId ?? `${clientId}-tx`;
1073
1128
  this.kafka = new KafkaClass({
1074
1129
  kafkaJS: {
1075
1130
  clientId: this.clientId,
@@ -1106,16 +1161,22 @@ var KafkaClient = class {
1106
1161
  /** Execute multiple sends atomically. Commits on success, aborts on error. */
1107
1162
  async transaction(fn) {
1108
1163
  if (!this.txProducerInitPromise) {
1164
+ if (_activeTransactionalIds.has(this.txId)) {
1165
+ this.logger.warn(
1166
+ `transactionalId "${this.txId}" is already in use by another KafkaClient in this process. Kafka will fence one of the producers. Set a unique \`transactionalId\` (or distinct \`clientId\`) per instance.`
1167
+ );
1168
+ }
1109
1169
  const initPromise = (async () => {
1110
1170
  const p = this.kafka.producer({
1111
1171
  kafkaJS: {
1112
1172
  acks: -1,
1113
1173
  idempotent: true,
1114
- transactionalId: `${this.clientId}-tx`,
1174
+ transactionalId: this.txId,
1115
1175
  maxInFlightRequests: 1
1116
1176
  }
1117
1177
  });
1118
1178
  await p.connect();
1179
+ _activeTransactionalIds.add(this.txId);
1119
1180
  return p;
1120
1181
  })();
1121
1182
  this.txProducerInitPromise = initPromise.catch((err) => {
@@ -1186,7 +1247,10 @@ var KafkaClient = class {
1186
1247
  const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
1187
1248
  const deps = this.messageDeps;
1188
1249
  const timeoutMs = options.handlerTimeoutMs;
1189
- const deduplication = this.resolveDeduplicationContext(gid, options.deduplication);
1250
+ const deduplication = this.resolveDeduplicationContext(
1251
+ gid,
1252
+ options.deduplication
1253
+ );
1190
1254
  await consumer.run({
1191
1255
  eachMessage: (payload) => this.trackInFlight(
1192
1256
  () => handleEachMessage(
@@ -1240,7 +1304,10 @@ var KafkaClient = class {
1240
1304
  const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
1241
1305
  const deps = this.messageDeps;
1242
1306
  const timeoutMs = options.handlerTimeoutMs;
1243
- const deduplication = this.resolveDeduplicationContext(gid, options.deduplication);
1307
+ const deduplication = this.resolveDeduplicationContext(
1308
+ gid,
1309
+ options.deduplication
1310
+ );
1244
1311
  await consumer.run({
1245
1312
  eachBatch: (payload) => this.trackInFlight(
1246
1313
  () => handleEachBatch(
@@ -1267,7 +1334,7 @@ var KafkaClient = class {
1267
1334
  }
1268
1335
  const handleMessageForRetry = (env) => handleBatch([env], {
1269
1336
  partition: env.partition,
1270
- highWatermark: env.offset,
1337
+ highWatermark: null,
1271
1338
  heartbeat: async () => {
1272
1339
  },
1273
1340
  resolveOffset: () => {
@@ -1335,6 +1402,7 @@ var KafkaClient = class {
1335
1402
  toError(e).message
1336
1403
  )
1337
1404
  );
1405
+ _activeTransactionalIds.delete(txId);
1338
1406
  this.retryTxProducers.delete(txId);
1339
1407
  }
1340
1408
  }
@@ -1410,15 +1478,28 @@ var KafkaClient = class {
1410
1478
  getClientId() {
1411
1479
  return this.clientId;
1412
1480
  }
1481
+ getMetrics() {
1482
+ return { ...this._metrics };
1483
+ }
1484
+ resetMetrics() {
1485
+ this._metrics.processedCount = 0;
1486
+ this._metrics.retryCount = 0;
1487
+ this._metrics.dlqCount = 0;
1488
+ this._metrics.dedupCount = 0;
1489
+ }
1413
1490
  /** Gracefully disconnect producer, all consumers, and admin. */
1414
1491
  async disconnect(drainTimeoutMs = 3e4) {
1415
1492
  await this.waitForDrain(drainTimeoutMs);
1416
1493
  const tasks = [this.producer.disconnect()];
1417
1494
  if (this.txProducer) {
1418
1495
  tasks.push(this.txProducer.disconnect());
1496
+ _activeTransactionalIds.delete(this.txId);
1419
1497
  this.txProducer = void 0;
1420
1498
  this.txProducerInitPromise = void 0;
1421
1499
  }
1500
+ for (const txId of this.retryTxProducers.keys()) {
1501
+ _activeTransactionalIds.delete(txId);
1502
+ }
1422
1503
  for (const p of this.retryTxProducers.values()) {
1423
1504
  tasks.push(p.disconnect());
1424
1505
  }
@@ -1514,6 +1595,30 @@ var KafkaClient = class {
1514
1595
  }
1515
1596
  }
1516
1597
  }
1598
+ notifyRetry(envelope, attempt, maxRetries) {
1599
+ this._metrics.retryCount++;
1600
+ for (const inst of this.instrumentation) {
1601
+ inst.onRetry?.(envelope, attempt, maxRetries);
1602
+ }
1603
+ }
1604
+ notifyDlq(envelope, reason) {
1605
+ this._metrics.dlqCount++;
1606
+ for (const inst of this.instrumentation) {
1607
+ inst.onDlq?.(envelope, reason);
1608
+ }
1609
+ }
1610
+ notifyDuplicate(envelope, strategy) {
1611
+ this._metrics.dedupCount++;
1612
+ for (const inst of this.instrumentation) {
1613
+ inst.onDuplicate?.(envelope, strategy);
1614
+ }
1615
+ }
1616
+ notifyMessage(envelope) {
1617
+ this._metrics.processedCount++;
1618
+ for (const inst of this.instrumentation) {
1619
+ inst.onMessage?.(envelope);
1620
+ }
1621
+ }
1517
1622
  /**
1518
1623
  * Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
1519
1624
  * The handler itself is not cancelled — the warning is diagnostic only.
@@ -1603,6 +1708,11 @@ var KafkaClient = class {
1603
1708
  * so Kafka can fence stale producers on restart without affecting other levels.
1604
1709
  */
1605
1710
  async createRetryTxProducer(transactionalId) {
1711
+ if (_activeTransactionalIds.has(transactionalId)) {
1712
+ this.logger.warn(
1713
+ `transactionalId "${transactionalId}" is already in use by another KafkaClient in this process. Kafka will fence one of the producers. Set a unique \`transactionalId\` (or distinct \`clientId\`) per instance.`
1714
+ );
1715
+ }
1606
1716
  const p = this.kafka.producer({
1607
1717
  kafkaJS: {
1608
1718
  acks: -1,
@@ -1612,6 +1722,7 @@ var KafkaClient = class {
1612
1722
  }
1613
1723
  });
1614
1724
  await p.connect();
1725
+ _activeTransactionalIds.add(transactionalId);
1615
1726
  this.retryTxProducers.set(transactionalId, p);
1616
1727
  return p;
1617
1728
  }
@@ -1732,7 +1843,11 @@ var KafkaClient = class {
1732
1843
  logger: this.logger,
1733
1844
  producer: this.producer,
1734
1845
  instrumentation: this.instrumentation,
1735
- onMessageLost: this.onMessageLost
1846
+ onMessageLost: this.onMessageLost,
1847
+ onRetry: this.notifyRetry.bind(this),
1848
+ onDlq: this.notifyDlq.bind(this),
1849
+ onDuplicate: this.notifyDuplicate.bind(this),
1850
+ onMessage: this.notifyMessage.bind(this)
1736
1851
  };
1737
1852
  }
1738
1853
  get retryTopicDeps() {
@@ -1741,6 +1856,9 @@ var KafkaClient = class {
1741
1856
  producer: this.producer,
1742
1857
  instrumentation: this.instrumentation,
1743
1858
  onMessageLost: this.onMessageLost,
1859
+ onRetry: this.notifyRetry.bind(this),
1860
+ onDlq: this.notifyDlq.bind(this),
1861
+ onMessage: this.notifyMessage.bind(this),
1744
1862
  ensureTopic: (t) => this.ensureTopic(t),
1745
1863
  getOrCreateConsumer: (gid, fb, ac) => getOrCreateConsumer(gid, fb, ac, this.consumerOpsDeps),
1746
1864
  runningConsumers: this.runningConsumers,