@drarzter/kafka-client 0.7.0 → 0.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  // src/client/kafka.client/index.ts
2
- import { KafkaJS } from "@confluentinc/kafka-javascript";
2
+ import { KafkaJS as KafkaJS2 } from "@confluentinc/kafka-javascript";
3
3
 
4
4
  // src/client/message/envelope.ts
5
5
  import { AsyncLocalStorage } from "async_hooks";
@@ -140,7 +140,7 @@ async function validateMessage(topicOrDesc, message, deps, ctx) {
140
140
  }
141
141
  return message;
142
142
  }
143
- async function buildSendPayload(topicOrDesc, messages, deps) {
143
+ async function buildSendPayload(topicOrDesc, messages, deps, compression) {
144
144
  const topic2 = resolveTopicName(topicOrDesc);
145
145
  const builtMessages = await Promise.all(
146
146
  messages.map(async (m) => {
@@ -170,11 +170,12 @@ async function buildSendPayload(topicOrDesc, messages, deps) {
170
170
  };
171
171
  })
172
172
  );
173
- return { topic: topic2, messages: builtMessages };
173
+ return { topic: topic2, messages: builtMessages, ...compression && { compression } };
174
174
  }
175
175
 
176
176
  // src/client/kafka.client/consumer-ops.ts
177
- function getOrCreateConsumer(groupId, fromBeginning, autoCommit, deps) {
177
+ import { KafkaJS } from "@confluentinc/kafka-javascript";
178
+ function getOrCreateConsumer(groupId, fromBeginning, autoCommit, deps, partitionAssigner) {
178
179
  const { consumers, consumerCreationOptions, kafka, onRebalance, logger } = deps;
179
180
  if (consumers.has(groupId)) {
180
181
  const prev = consumerCreationOptions.get(groupId);
@@ -186,8 +187,11 @@ function getOrCreateConsumer(groupId, fromBeginning, autoCommit, deps) {
186
187
  return consumers.get(groupId);
187
188
  }
188
189
  consumerCreationOptions.set(groupId, { fromBeginning, autoCommit });
190
+ const assigners = [
191
+ partitionAssigner === "roundrobin" ? KafkaJS.PartitionAssigners.roundRobin : partitionAssigner === "range" ? KafkaJS.PartitionAssigners.range : KafkaJS.PartitionAssigners.cooperativeSticky
192
+ ];
189
193
  const config = {
190
- kafkaJS: { groupId, fromBeginning, autoCommit }
194
+ kafkaJS: { groupId, fromBeginning, autoCommit, partitionAssigners: assigners }
191
195
  };
192
196
  if (onRebalance) {
193
197
  const cb = onRebalance;
@@ -732,10 +736,11 @@ async function handleEachMessage(payload, opts, deps) {
732
736
  });
733
737
  deps.onDlq?.(envelope, "ttl-expired");
734
738
  } else {
735
- await deps.onMessageLost?.({
739
+ const ttlHandler = opts.onTtlExpired ?? deps.onTtlExpired;
740
+ await ttlHandler?.({
736
741
  topic: topic2,
737
- error: new Error(`TTL expired: ${ageMs}ms`),
738
- attempt: 0,
742
+ ageMs,
743
+ messageTtlMs: opts.messageTtlMs,
739
744
  headers: envelope.headers
740
745
  });
741
746
  }
@@ -859,10 +864,11 @@ async function handleEachBatch(payload, opts, deps) {
859
864
  });
860
865
  deps.onDlq?.(envelope, "ttl-expired");
861
866
  } else {
862
- await deps.onMessageLost?.({
867
+ const ttlHandler = opts.onTtlExpired ?? deps.onTtlExpired;
868
+ await ttlHandler?.({
863
869
  topic: batch.topic,
864
- error: new Error(`TTL expired: ${ageMs}ms`),
865
- attempt: 0,
870
+ ageMs,
871
+ messageTtlMs: opts.messageTtlMs,
866
872
  headers: envelope.headers
867
873
  });
868
874
  }
@@ -905,6 +911,7 @@ async function handleEachBatch(payload, opts, deps) {
905
911
  async function subscribeWithRetry(consumer, topics, logger, retryOpts) {
906
912
  const maxAttempts = retryOpts?.retries ?? 5;
907
913
  const backoffMs = retryOpts?.backoffMs ?? 5e3;
914
+ const displayTopics = topics.map((t) => t instanceof RegExp ? t.toString() : t).join(", ");
908
915
  for (let attempt = 1; attempt <= maxAttempts; attempt++) {
909
916
  try {
910
917
  await consumer.subscribe({ topics });
@@ -914,7 +921,7 @@ async function subscribeWithRetry(consumer, topics, logger, retryOpts) {
914
921
  const msg = toError(error).message;
915
922
  const delay = Math.floor(Math.random() * backoffMs);
916
923
  logger.warn(
917
- `Failed to subscribe to [${topics.join(", ")}] (attempt ${attempt}/${maxAttempts}): ${msg}. Retrying in ${delay}ms...`
924
+ `Failed to subscribe to [${displayTopics}] (attempt ${attempt}/${maxAttempts}): ${msg}. Retrying in ${delay}ms...`
918
925
  );
919
926
  await sleep(delay);
920
927
  }
@@ -1176,31 +1183,54 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
1176
1183
  }
1177
1184
 
1178
1185
  // src/client/kafka.client/index.ts
1179
- var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = KafkaJS;
1186
+ var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = KafkaJS2;
1180
1187
  var _activeTransactionalIds = /* @__PURE__ */ new Set();
1181
1188
  var AsyncQueue = class {
1189
+ constructor(highWaterMark = Infinity, onFull = () => {
1190
+ }, onDrained = () => {
1191
+ }) {
1192
+ this.highWaterMark = highWaterMark;
1193
+ this.onFull = onFull;
1194
+ this.onDrained = onDrained;
1195
+ }
1182
1196
  items = [];
1183
1197
  waiting = [];
1184
1198
  closed = false;
1199
+ error;
1200
+ paused = false;
1185
1201
  push(item) {
1186
1202
  if (this.waiting.length > 0) {
1187
- this.waiting.shift()({ value: item, done: false });
1203
+ this.waiting.shift().resolve({ value: item, done: false });
1188
1204
  } else {
1189
1205
  this.items.push(item);
1206
+ if (!this.paused && this.items.length >= this.highWaterMark) {
1207
+ this.paused = true;
1208
+ this.onFull();
1209
+ }
1190
1210
  }
1191
1211
  }
1212
+ fail(err) {
1213
+ this.closed = true;
1214
+ this.error = err;
1215
+ for (const { reject } of this.waiting.splice(0)) reject(err);
1216
+ }
1192
1217
  close() {
1193
1218
  this.closed = true;
1194
- for (const r of this.waiting.splice(0)) {
1195
- r({ value: void 0, done: true });
1196
- }
1219
+ for (const { resolve } of this.waiting.splice(0))
1220
+ resolve({ value: void 0, done: true });
1197
1221
  }
1198
1222
  next() {
1199
- if (this.items.length > 0)
1200
- return Promise.resolve({ value: this.items.shift(), done: false });
1201
- if (this.closed)
1202
- return Promise.resolve({ value: void 0, done: true });
1203
- return new Promise((r) => this.waiting.push(r));
1223
+ if (this.error) return Promise.reject(this.error);
1224
+ if (this.items.length > 0) {
1225
+ const value = this.items.shift();
1226
+ if (this.paused && this.items.length <= Math.floor(this.highWaterMark / 2)) {
1227
+ this.paused = false;
1228
+ this.onDrained();
1229
+ }
1230
+ return Promise.resolve({ value, done: false });
1231
+ }
1232
+ if (this.closed) return Promise.resolve({ value: void 0, done: true });
1233
+ return new Promise((resolve, reject) => this.waiting.push({ resolve, reject }));
1204
1234
  }
1205
1235
  };
1206
1236
  var KafkaClient = class _KafkaClient {
@@ -1227,6 +1257,7 @@ var KafkaClient = class _KafkaClient {
1227
1257
  companionGroupIds = /* @__PURE__ */ new Map();
1228
1258
  instrumentation;
1229
1259
  onMessageLost;
1260
+ onTtlExpired;
1230
1261
  onRebalance;
1231
1262
  /** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
1232
1263
  txId;
@@ -1258,6 +1289,7 @@ var KafkaClient = class _KafkaClient {
1258
1289
  this.numPartitions = options?.numPartitions ?? 1;
1259
1290
  this.instrumentation = options?.instrumentation ?? [];
1260
1291
  this.onMessageLost = options?.onMessageLost;
1292
+ this.onTtlExpired = options?.onTtlExpired;
1261
1293
  this.onRebalance = options?.onRebalance;
1262
1294
  this.txId = options?.transactionalId ?? `${clientId}-tx`;
1263
1295
  this.kafka = new KafkaClass({
@@ -1275,21 +1307,54 @@ var KafkaClient = class _KafkaClient {
1275
1307
  this.admin = this.kafka.admin();
1276
1308
  }
1277
1309
  async sendMessage(topicOrDesc, message, options = {}) {
1278
- const payload = await this.preparePayload(topicOrDesc, [
1279
- {
1280
- value: message,
1281
- key: options.key,
1282
- headers: options.headers,
1283
- correlationId: options.correlationId,
1284
- schemaVersion: options.schemaVersion,
1285
- eventId: options.eventId
1286
- }
1287
- ]);
1310
+ const payload = await this.preparePayload(
1311
+ topicOrDesc,
1312
+ [
1313
+ {
1314
+ value: message,
1315
+ key: options.key,
1316
+ headers: options.headers,
1317
+ correlationId: options.correlationId,
1318
+ schemaVersion: options.schemaVersion,
1319
+ eventId: options.eventId
1320
+ }
1321
+ ],
1322
+ options.compression
1323
+ );
1288
1324
  await this.producer.send(payload);
1289
1325
  this.notifyAfterSend(payload.topic, payload.messages.length);
1290
1326
  }
1291
- async sendBatch(topicOrDesc, messages) {
1292
- const payload = await this.preparePayload(topicOrDesc, messages);
1327
+ /**
1328
+ * Send a null-value (tombstone) message. Used with log-compacted topics to signal
1329
+ * that a key's record should be removed during the next compaction cycle.
1330
+ *
1331
+ * Tombstones skip envelope headers, schema validation, and Lamport clock stamping.
1332
+ * Both `beforeSend` and `afterSend` instrumentation hooks are still called so tracing works correctly.
1333
+ *
1334
+ * @param topic Topic name.
1335
+ * @param key Partition key identifying the record to tombstone.
1336
+ * @param headers Optional custom Kafka headers.
1337
+ */
1338
+ async sendTombstone(topic2, key, headers) {
1339
+ const hdrs = { ...headers };
1340
+ for (const inst of this.instrumentation) {
1341
+ inst.beforeSend?.(topic2, hdrs);
1342
+ }
1343
+ await this.ensureTopic(topic2);
1344
+ await this.producer.send({
1345
+ topic: topic2,
1346
+ messages: [{ value: null, key, headers: hdrs }]
1347
+ });
1348
+ for (const inst of this.instrumentation) {
1349
+ inst.afterSend?.(topic2);
1350
+ }
1351
+ }
1352
+ async sendBatch(topicOrDesc, messages, options) {
1353
+ const payload = await this.preparePayload(
1354
+ topicOrDesc,
1355
+ messages,
1356
+ options?.compression
1357
+ );
1293
1358
  await this.producer.send(payload);
1294
1359
  this.notifyAfterSend(payload.topic, payload.messages.length);
1295
1360
  }
@@ -1337,6 +1402,13 @@ var KafkaClient = class _KafkaClient {
1337
1402
  await tx.send(payload);
1338
1403
  this.notifyAfterSend(payload.topic, payload.messages.length);
1339
1404
  },
1405
+ /**
1406
+ * Send multiple messages in a single call to the topic.
1407
+ * All messages in the batch will be sent atomically.
1408
+ * If any message fails to send, the entire batch will be aborted.
1409
+ * @param topicOrDesc - topic name or TopicDescriptor
1410
+ * @param messages - array of messages to send with optional key, headers, correlationId, schemaVersion, and eventId
1411
+ */
1340
1412
  sendBatch: async (topicOrDesc, messages) => {
1341
1413
  const payload = await this.preparePayload(topicOrDesc, messages);
1342
1414
  await tx.send(payload);
@@ -1379,6 +1451,12 @@ var KafkaClient = class _KafkaClient {
1379
1451
  "retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
1380
1452
  );
1381
1453
  }
1454
+ const hasRegexTopics = topics.some((t) => t instanceof RegExp);
1455
+ if (options.retryTopics && hasRegexTopics) {
1456
+ throw new Error(
1457
+ "retryTopics is incompatible with regex topic patterns \u2014 retry topics require a fixed topic name to build the retry chain."
1458
+ );
1459
+ }
1382
1460
  const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
1383
1461
  const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", setupOptions);
1384
1462
  if (options.circuitBreaker)
@@ -1410,6 +1488,7 @@ var KafkaClient = class _KafkaClient {
1410
1488
  wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1411
1489
  deduplication,
1412
1490
  messageTtlMs: options.messageTtlMs,
1491
+ onTtlExpired: options.onTtlExpired,
1413
1492
  eosMainContext
1414
1493
  },
1415
1494
  deps
@@ -1442,6 +1521,12 @@ var KafkaClient = class _KafkaClient {
1442
1521
  "retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
1443
1522
  );
1444
1523
  }
1524
+ const hasRegexTopics = topics.some((t) => t instanceof RegExp);
1525
+ if (options.retryTopics && hasRegexTopics) {
1526
+ throw new Error(
1527
+ "retryTopics is incompatible with regex topic patterns \u2014 retry topics require a fixed topic name to build the retry chain."
1528
+ );
1529
+ }
1445
1530
  if (options.retryTopics) {
1446
1531
  } else if (options.autoCommit !== false) {
1447
1532
  this.logger.debug?.(
@@ -1465,6 +1550,16 @@ var KafkaClient = class _KafkaClient {
1465
1550
  eosMainContext = { txProducer, consumer };
1466
1551
  }
1467
1552
  await consumer.run({
1553
+ /**
1554
+ * eachBatch: called by the consumer for each batch of messages.
1555
+ * Called with the `payload` argument, which is an object containing the
1556
+ * batch of messages and a `BatchMeta` object with offset management controls.
1557
+ *
1558
+ * The function is wrapped with `trackInFlight` and `handleEachBatch` to provide
1559
+ * error handling and offset management.
1560
+ *
1561
+ * @param payload - an object containing the batch of messages and a `BatchMeta` object.
1562
+ */
1468
1563
  eachBatch: (payload) => this.trackInFlight(
1469
1564
  () => handleEachBatch(
1470
1565
  payload,
@@ -1479,6 +1574,7 @@ var KafkaClient = class _KafkaClient {
1479
1574
  wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1480
1575
  deduplication,
1481
1576
  messageTtlMs: options.messageTtlMs,
1577
+ onTtlExpired: options.onTtlExpired,
1482
1578
  eosMainContext
1483
1579
  },
1484
1580
  deps
@@ -1525,7 +1621,17 @@ var KafkaClient = class _KafkaClient {
1525
1621
  * }
1526
1622
  */
1527
1623
  consume(topic2, options) {
1528
- const queue = new AsyncQueue();
1624
+ if (options?.retryTopics) {
1625
+ throw new Error(
1626
+ "consume() does not support retryTopics (EOS retry chains). Use startConsumer() with retryTopics: true for guaranteed retry delivery."
1627
+ );
1628
+ }
1629
+ const gid = options?.groupId ?? this.defaultGroupId;
1630
+ const queue = new AsyncQueue(
1631
+ options?.queueHighWaterMark,
1632
+ () => this.pauseTopicAllPartitions(gid, topic2),
1633
+ () => this.resumeTopicAllPartitions(gid, topic2)
1634
+ );
1529
1635
  const handlePromise = this.startConsumer(
1530
1636
  [topic2],
1531
1637
  async (envelope) => {
@@ -1533,6 +1639,7 @@ var KafkaClient = class _KafkaClient {
1533
1639
  },
1534
1640
  options
1535
1641
  );
1642
+ handlePromise.catch((err) => queue.fail(err));
1536
1643
  return {
1537
1644
  [Symbol.asyncIterator]() {
1538
1645
  return this;
@@ -1547,6 +1654,14 @@ var KafkaClient = class _KafkaClient {
1547
1654
  };
1548
1655
  }
1549
1656
  // ── Consumer lifecycle ───────────────────────────────────────────
1657
+ /**
1658
+ * Stop all consumers or a specific group.
1659
+ *
1660
+ * If `groupId` is unspecified, all active consumers are stopped.
1661
+ * If `groupId` is specified, only the consumer with that group ID is stopped.
1662
+ *
1663
+ * @throws {Error} if the consumer fails to disconnect.
1664
+ */
1550
1665
  async stopConsumer(groupId) {
1551
1666
  if (groupId !== void 0) {
1552
1667
  const consumer = this.consumers.get(groupId);
@@ -1640,6 +1755,12 @@ var KafkaClient = class _KafkaClient {
1640
1755
  this.logger.log("All consumers disconnected");
1641
1756
  }
1642
1757
  }
1758
+ /**
1759
+ * Temporarily stop delivering messages from specific partitions without disconnecting the consumer.
1760
+ *
1761
+ * @param groupId Consumer group to pause. Defaults to the client's default groupId.
1762
+ * @param assignments Topic-partition pairs to pause.
1763
+ */
1643
1764
  pauseConsumer(groupId, assignments) {
1644
1765
  const gid = groupId ?? this.defaultGroupId;
1645
1766
  const consumer = this.consumers.get(gid);
@@ -1653,6 +1774,12 @@ var KafkaClient = class _KafkaClient {
1653
1774
  )
1654
1775
  );
1655
1776
  }
1777
+ /**
1778
+ * Resume message delivery for previously paused topic-partitions.
1779
+ *
1780
+ * @param {string|undefined} groupId Consumer group to resume. Defaults to the client's default groupId.
1781
+ * @param {Array<{ topic: string; partitions: number[] }>} assignments Topic-partition pairs to resume.
1782
+ */
1656
1783
  resumeConsumer(groupId, assignments) {
1657
1784
  const gid = groupId ?? this.defaultGroupId;
1658
1785
  const consumer = this.consumers.get(gid);
@@ -1666,6 +1793,24 @@ var KafkaClient = class _KafkaClient {
1666
1793
  )
1667
1794
  );
1668
1795
  }
1796
+ /** Pause all assigned partitions of a topic for a consumer group (used for queue backpressure). */
1797
+ pauseTopicAllPartitions(gid, topic2) {
1798
+ const consumer = this.consumers.get(gid);
1799
+ if (!consumer) return;
1800
+ const assignment = consumer.assignment?.() ?? [];
1801
+ const partitions = assignment.filter((a) => a.topic === topic2).map((a) => a.partition);
1802
+ if (partitions.length > 0)
1803
+ consumer.pause(partitions.map((p) => ({ topic: topic2, partitions: [p] })));
1804
+ }
1805
+ /** Resume all assigned partitions of a topic for a consumer group (used for queue backpressure). */
1806
+ resumeTopicAllPartitions(gid, topic2) {
1807
+ const consumer = this.consumers.get(gid);
1808
+ if (!consumer) return;
1809
+ const assignment = consumer.assignment?.() ?? [];
1810
+ const partitions = assignment.filter((a) => a.topic === topic2).map((a) => a.partition);
1811
+ if (partitions.length > 0)
1812
+ consumer.resume(partitions.map((p) => ({ topic: topic2, partitions: [p] })));
1813
+ }
1669
1814
  /** DLQ header keys added by `sendToDlq` — stripped before re-publishing. */
1670
1815
  static DLQ_HEADER_KEYS = /* @__PURE__ */ new Set([
1671
1816
  "x-dlq-original-topic",
@@ -1674,6 +1819,17 @@ var KafkaClient = class _KafkaClient {
1674
1819
  "x-dlq-error-stack",
1675
1820
  "x-dlq-attempt-count"
1676
1821
  ]);
1822
+ /**
1823
+ * Re-publish messages from a dead letter queue back to the original topic.
1824
+ *
1825
+ * Messages are consumed from `<topic>.dlq` and re-published to `<topic>`.
1826
+ * The original topic is determined by the `x-dlq-original-topic` header.
1827
+ * The `x-dlq-*` headers are stripped before re-publishing.
1828
+ *
1829
+ * @param topic - The topic to replay from `<topic>.dlq`
1830
+ * @param options - Options for replay
1831
+ * @returns { replayed: number; skipped: number } - counts of re-published vs skipped messages
1832
+ */
1677
1833
  async replayDlq(topic2, options = {}) {
1678
1834
  const dlqTopic = `${topic2}.dlq`;
1679
1835
  await this.ensureAdminConnected();
@@ -1802,6 +1958,68 @@ var KafkaClient = class _KafkaClient {
1802
1958
  );
1803
1959
  }
1804
1960
  }
1961
+ /**
1962
+ * Seek specific topic-partition pairs to the offset nearest to a given timestamp
1963
+ * (in milliseconds) for a stopped consumer group.
1964
+ * Throws if the group is still running — call `stopConsumer(groupId)` first.
1965
+ * Assignments are grouped by topic and committed via `admin.setOffsets`.
1966
+ * If no offset exists at the requested timestamp (e.g. empty partition or
1967
+ * future timestamp), the partition falls back to `-1` (end of topic — new messages only).
1968
+ */
1969
+ async seekToTimestamp(groupId, assignments) {
1970
+ const gid = groupId ?? this.defaultGroupId;
1971
+ if (this.runningConsumers.has(gid)) {
1972
+ throw new Error(
1973
+ `seekToTimestamp: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before seeking offsets.`
1974
+ );
1975
+ }
1976
+ await this.ensureAdminConnected();
1977
+ const byTopic = /* @__PURE__ */ new Map();
1978
+ for (const { topic: topic2, partition, timestamp } of assignments) {
1979
+ const list = byTopic.get(topic2) ?? [];
1980
+ list.push({ partition, timestamp });
1981
+ byTopic.set(topic2, list);
1982
+ }
1983
+ for (const [topic2, parts] of byTopic) {
1984
+ const offsets = await Promise.all(
1985
+ parts.map(async ({ partition, timestamp }) => {
1986
+ const results = await this.admin.fetchTopicOffsetsByTime(
1987
+ topic2,
1988
+ timestamp
1989
+ );
1990
+ const found = results.find(
1991
+ (r) => r.partition === partition
1992
+ );
1993
+ return { partition, offset: found?.offset ?? "-1" };
1994
+ })
1995
+ );
1996
+ await this.admin.setOffsets({ groupId: gid, topic: topic2, partitions: offsets });
1997
+ this.logger.log(
1998
+ `Offsets set by timestamp for group "${gid}" on "${topic2}": ${JSON.stringify(offsets)}`
1999
+ );
2000
+ }
2001
+ }
2002
+ /**
2003
+ * Returns the current circuit breaker state for a specific topic partition.
2004
+ * Returns `undefined` when no circuit state exists — either `circuitBreaker` is not
2005
+ * configured for the group, or the circuit has never been tripped.
2006
+ *
2007
+ * @param topic Topic name.
2008
+ * @param partition Partition index.
2009
+ * @param groupId Consumer group. Defaults to the client's default groupId.
2010
+ *
2011
+ * @returns `{ status, failures, windowSize }` snapshot for a given partition or `undefined` if no state exists.
2012
+ */
2013
+ getCircuitState(topic2, partition, groupId) {
2014
+ const gid = groupId ?? this.defaultGroupId;
2015
+ const state = this.circuitStates.get(`${gid}:${topic2}:${partition}`);
2016
+ if (!state) return void 0;
2017
+ return {
2018
+ status: state.status,
2019
+ failures: state.window.filter((v) => !v).length,
2020
+ windowSize: state.window.length
2021
+ };
2022
+ }
1805
2023
  /**
1806
2024
  * Query consumer group lag per partition.
1807
2025
  * Lag = broker high-watermark − last committed offset.
@@ -1849,9 +2067,62 @@ var KafkaClient = class _KafkaClient {
1849
2067
  };
1850
2068
  }
1851
2069
  }
2070
+ /**
2071
+ * List all consumer groups known to the broker.
2072
+ * Useful for monitoring which groups are active and their current state.
2073
+ */
2074
+ async listConsumerGroups() {
2075
+ await this.ensureAdminConnected();
2076
+ const result = await this.admin.listGroups();
2077
+ return result.groups.map((g) => ({
2078
+ groupId: g.groupId,
2079
+ state: g.state ?? "Unknown"
2080
+ }));
2081
+ }
2082
+ /**
2083
+ * Describe topics — returns partition layout, leader, replicas, and ISR.
2084
+ * @param topics Topic names to describe. Omit to describe all topics.
2085
+ */
2086
+ async describeTopics(topics) {
2087
+ await this.ensureAdminConnected();
2088
+ const result = await this.admin.fetchTopicMetadata(
2089
+ topics ? { topics } : void 0
2090
+ );
2091
+ return result.topics.map((t) => ({
2092
+ name: t.name,
2093
+ partitions: t.partitions.map((p) => ({
2094
+ partition: p.partitionId ?? p.partition,
2095
+ leader: p.leader,
2096
+ replicas: p.replicas.map(
2097
+ (r) => typeof r === "number" ? r : r.nodeId
2098
+ ),
2099
+ isr: p.isr.map(
2100
+ (r) => typeof r === "number" ? r : r.nodeId
2101
+ )
2102
+ }))
2103
+ }));
2104
+ }
2105
+ /**
2106
+ * Delete records from a topic up to (but not including) the given offsets.
2107
+ * All messages with offsets **before** the given offset are deleted.
2108
+ */
2109
+ async deleteRecords(topic2, partitions) {
2110
+ await this.ensureAdminConnected();
2111
+ await this.admin.deleteTopicRecords({ topic: topic2, partitions });
2112
+ }
2113
+ /** Return the client ID provided during `KafkaClient` construction. */
1852
2114
  getClientId() {
1853
2115
  return this.clientId;
1854
2116
  }
2117
+ /**
2118
+ * Return a snapshot of internal event counters accumulated since client creation
2119
+ * (or since the last `resetMetrics()` call).
2120
+ *
2121
+ * @param topic Topic name to scope the snapshot to. When omitted, counters are
2122
+ * aggregated across all topics. If the topic has no recorded events yet, returns
2123
+ * a zero-valued snapshot.
2124
+ * @returns Read-only `KafkaMetrics` snapshot: `processedCount`, `retryCount`, `dlqCount`, `dedupCount`.
2125
+ */
1855
2126
  getMetrics(topic2) {
1856
2127
  if (topic2 !== void 0) {
1857
2128
  const m = this._topicMetrics.get(topic2);
@@ -1871,6 +2142,11 @@ var KafkaClient = class _KafkaClient {
1871
2142
  }
1872
2143
  return agg;
1873
2144
  }
2145
+ /**
2146
+ * Reset internal event counters to zero.
2147
+ *
2148
+ * @param topic Topic name to reset. When omitted, all topics are reset.
2149
+ */
1874
2150
  resetMetrics(topic2) {
1875
2151
  if (topic2 !== void 0) {
1876
2152
  this._topicMetrics.delete(topic2);
@@ -1942,6 +2218,13 @@ var KafkaClient = class _KafkaClient {
1942
2218
  process.once(signal, handler);
1943
2219
  }
1944
2220
  }
2221
+ /**
2222
+ * Increment the in-flight handler count and return a promise that calls the given handler.
2223
+ * When the promise resolves or rejects, decrement the in flight handler count.
2224
+ * If the in flight handler count reaches 0, call all previously registered drain resolvers.
2225
+ * @param fn The handler to call when the promise is resolved or rejected.
2226
+ * @returns A promise that resolves or rejects with the result of calling the handler.
2227
+ */
1945
2228
  trackInFlight(fn) {
1946
2229
  this.inFlightTotal++;
1947
2230
  return fn().finally(() => {
@@ -1951,6 +2234,12 @@ var KafkaClient = class _KafkaClient {
1951
2234
  }
1952
2235
  });
1953
2236
  }
2237
+ /**
2238
+ * Waits for all in-flight handlers to complete or for a given timeout, whichever comes first.
2239
+ * @param timeoutMs Maximum time to wait in milliseconds.
2240
+ * @returns A promise that resolves when all handlers have completed or the timeout is reached.
2241
+ * @private
2242
+ */
1954
2243
  waitForDrain(timeoutMs) {
1955
2244
  if (this.inFlightTotal === 0) return Promise.resolve();
1956
2245
  return new Promise((resolve) => {
@@ -1971,12 +2260,19 @@ var KafkaClient = class _KafkaClient {
1971
2260
  });
1972
2261
  }
1973
2262
  // ── Private helpers ──────────────────────────────────────────────
1974
- async preparePayload(topicOrDesc, messages) {
2263
+ /**
2264
+ * Prepare a send payload by registering the topic's schema and then building the payload.
2265
+ * @param topicOrDesc - topic name or topic descriptor
2266
+ * @param messages - batch of messages to send
2267
+ * @returns - prepared payload
2268
+ */
2269
+ async preparePayload(topicOrDesc, messages, compression) {
1975
2270
  registerSchema(topicOrDesc, this.schemaRegistry, this.logger);
1976
2271
  const payload = await buildSendPayload(
1977
2272
  topicOrDesc,
1978
2273
  messages,
1979
- this.producerOpsDeps
2274
+ this.producerOpsDeps,
2275
+ compression
1980
2276
  );
1981
2277
  await this.ensureTopic(payload.topic);
1982
2278
  return payload;
@@ -1989,6 +2285,12 @@ var KafkaClient = class _KafkaClient {
1989
2285
  }
1990
2286
  }
1991
2287
  }
2288
+ /**
2289
+ * Returns the KafkaMetrics for a given topic.
2290
+ * If the topic hasn't seen any events, initializes a zero-valued snapshot.
2291
+ * @param topic - name of the topic to get the metrics for
2292
+ * @returns - KafkaMetrics for the given topic
2293
+ */
1992
2294
  metricsFor(topic2) {
1993
2295
  let m = this._topicMetrics.get(topic2);
1994
2296
  if (!m) {
@@ -1997,12 +2299,24 @@ var KafkaClient = class _KafkaClient {
1997
2299
  }
1998
2300
  return m;
1999
2301
  }
2302
+ /**
2303
+ * Notifies instrumentation hooks of a retry event.
2304
+ * @param envelope The original message envelope that triggered the retry.
2305
+ * @param attempt The current retry attempt (1-indexed).
2306
+ * @param maxRetries The maximum number of retries configured for this topic.
2307
+ */
2000
2308
  notifyRetry(envelope, attempt, maxRetries) {
2001
2309
  this.metricsFor(envelope.topic).retryCount++;
2002
2310
  for (const inst of this.instrumentation) {
2003
2311
  inst.onRetry?.(envelope, attempt, maxRetries);
2004
2312
  }
2005
2313
  }
2314
+ /**
2315
+ * Called whenever a message is routed to the dead letter queue.
2316
+ * @param envelope The original message envelope.
2317
+ * @param reason The reason for routing to the dead letter queue.
2318
+ * @param gid The group ID of the consumer that triggered the circuit breaker, if any.
2319
+ */
2006
2320
  notifyDlq(envelope, reason, gid) {
2007
2321
  this.metricsFor(envelope.topic).dlqCount++;
2008
2322
  for (const inst of this.instrumentation) {
@@ -2022,6 +2336,11 @@ var KafkaClient = class _KafkaClient {
2022
2336
  if (state.status === "open") return;
2023
2337
  const openCircuit = () => {
2024
2338
  state.status = "open";
2339
+ state.window = [];
2340
+ state.successes = 0;
2341
+ clearTimeout(state.timer);
2342
+ for (const inst of this.instrumentation)
2343
+ inst.onCircuitOpen?.(envelope.topic, envelope.partition);
2025
2344
  this.pauseConsumer(gid, [
2026
2345
  { topic: envelope.topic, partitions: [envelope.partition] }
2027
2346
  ]);
@@ -2031,6 +2350,8 @@ var KafkaClient = class _KafkaClient {
2031
2350
  this.logger.log(
2032
2351
  `[CircuitBreaker] HALF-OPEN \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
2033
2352
  );
2353
+ for (const inst of this.instrumentation)
2354
+ inst.onCircuitHalfOpen?.(envelope.topic, envelope.partition);
2034
2355
  this.resumeConsumer(gid, [
2035
2356
  { topic: envelope.topic, partitions: [envelope.partition] }
2036
2357
  ]);
@@ -2057,12 +2378,26 @@ var KafkaClient = class _KafkaClient {
2057
2378
  openCircuit();
2058
2379
  }
2059
2380
  }
2381
+ /**
2382
+ * Notify all instrumentation hooks about a duplicate message detection.
2383
+ * Invoked by the consumer after a message has been successfully processed
2384
+ * and the Lamport clock detected a duplicate.
2385
+ * @param envelope The processed message envelope.
2386
+ * @param strategy The duplicate detection strategy used.
2387
+ */
2060
2388
  notifyDuplicate(envelope, strategy) {
2061
2389
  this.metricsFor(envelope.topic).dedupCount++;
2062
2390
  for (const inst of this.instrumentation) {
2063
2391
  inst.onDuplicate?.(envelope, strategy);
2064
2392
  }
2065
2393
  }
2394
+ /**
2395
+ * Notify all instrumentation hooks about a successfully processed message.
2396
+ * Invoked by the consumer after a message has been successfully processed
2397
+ * by the handler.
2398
+ * @param envelope The processed message envelope.
2399
+ * @param gid The optional consumer group ID.
2400
+ */
2066
2401
  notifyMessage(envelope, gid) {
2067
2402
  this.metricsFor(envelope.topic).processedCount++;
2068
2403
  for (const inst of this.instrumentation) {
@@ -2086,6 +2421,8 @@ var KafkaClient = class _KafkaClient {
2086
2421
  this.logger.log(
2087
2422
  `[CircuitBreaker] CLOSED \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
2088
2423
  );
2424
+ for (const inst of this.instrumentation)
2425
+ inst.onCircuitClose?.(envelope.topic, envelope.partition);
2089
2426
  }
2090
2427
  } else if (state.status === "closed") {
2091
2428
  const threshold = cfg.threshold ?? 5;
@@ -2203,6 +2540,15 @@ var KafkaClient = class _KafkaClient {
2203
2540
  this.retryTxProducers.set(transactionalId, p);
2204
2541
  return p;
2205
2542
  }
2543
+ /**
2544
+ * Ensure that a topic exists by creating it if it doesn't already exist.
2545
+ * If `autoCreateTopics` is disabled, this method will not create the topic and
2546
+ * will return immediately.
2547
+ * If multiple concurrent calls are made to `ensureTopic` for the same topic,
2548
+ * they are deduplicated to prevent multiple calls to `admin.createTopics()`.
2549
+ * @param topic - The topic to ensure exists.
2550
+ * @returns A promise that resolves when the topic has been created or already exists.
2551
+ */
2206
2552
  async ensureTopic(topic2) {
2207
2553
  if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
2208
2554
  let p = this.ensureTopicPromises.get(topic2);
@@ -2228,6 +2574,9 @@ var KafkaClient = class _KafkaClient {
2228
2574
  interceptors = [],
2229
2575
  schemas: optionSchemas
2230
2576
  } = options;
2577
+ const stringTopics = topics.filter((t) => !(t instanceof RegExp));
2578
+ const regexTopics = topics.filter((t) => t instanceof RegExp);
2579
+ const hasRegex = regexTopics.length > 0;
2231
2580
  const gid = optGroupId || this.defaultGroupId;
2232
2581
  const existingMode = this.runningConsumers.get(gid);
2233
2582
  const oppositeMode = mode === "eachMessage" ? "eachBatch" : "eachMessage";
@@ -2246,15 +2595,20 @@ var KafkaClient = class _KafkaClient {
2246
2595
  gid,
2247
2596
  fromBeginning,
2248
2597
  options.autoCommit ?? true,
2249
- this.consumerOpsDeps
2598
+ this.consumerOpsDeps,
2599
+ options.partitionAssigner
2250
2600
  );
2251
2601
  const schemaMap = buildSchemaMap(
2252
- topics,
2602
+ stringTopics,
2253
2603
  this.schemaRegistry,
2254
2604
  optionSchemas,
2255
2605
  this.logger
2256
2606
  );
2257
- const topicNames = topics.map((t) => resolveTopicName(t));
2607
+ const topicNames = stringTopics.map((t) => resolveTopicName(t));
2608
+ const subscribeTopics = [
2609
+ ...topicNames,
2610
+ ...regexTopics
2611
+ ];
2258
2612
  for (const t of topicNames) {
2259
2613
  await this.ensureTopic(t);
2260
2614
  }
@@ -2262,7 +2616,7 @@ var KafkaClient = class _KafkaClient {
2262
2616
  for (const t of topicNames) {
2263
2617
  await this.ensureTopic(`${t}.dlq`);
2264
2618
  }
2265
- if (!this.autoCreateTopicsEnabled) {
2619
+ if (!this.autoCreateTopicsEnabled && topicNames.length > 0) {
2266
2620
  await this.validateDlqTopicsExist(topicNames);
2267
2621
  }
2268
2622
  }
@@ -2272,21 +2626,22 @@ var KafkaClient = class _KafkaClient {
2272
2626
  for (const t of topicNames) {
2273
2627
  await this.ensureTopic(dest ?? `${t}.duplicates`);
2274
2628
  }
2275
- } else {
2629
+ } else if (topicNames.length > 0) {
2276
2630
  await this.validateDuplicatesTopicsExist(topicNames, dest);
2277
2631
  }
2278
2632
  }
2279
2633
  await consumer.connect();
2280
2634
  await subscribeWithRetry(
2281
2635
  consumer,
2282
- topicNames,
2636
+ subscribeTopics,
2283
2637
  this.logger,
2284
2638
  options.subscribeRetry
2285
2639
  );
2640
+ const displayTopics = subscribeTopics.map((t) => t instanceof RegExp ? t.toString() : t).join(", ");
2286
2641
  this.logger.log(
2287
- `${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${topicNames.join(", ")}`
2642
+ `${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${displayTopics}`
2288
2643
  );
2289
- return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
2644
+ return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry, hasRegex };
2290
2645
  }
2291
2646
  /** Create or retrieve the deduplication context for a consumer group. */
2292
2647
  resolveDeduplicationContext(groupId, options) {
@@ -2297,6 +2652,15 @@ var KafkaClient = class _KafkaClient {
2297
2652
  return { options, state: this.dedupStates.get(groupId) };
2298
2653
  }
2299
2654
  // ── Deps object getters ──────────────────────────────────────────
2655
+ /**
2656
+ * An object containing the necessary dependencies for building a send payload.
2657
+ *
2658
+ * @property {Map<string, SchemaLike>} schemaRegistry - A map of topic names to their schemas.
2659
+ * @property {boolean} strictSchemasEnabled - Whether strict schema validation is enabled.
2660
+ * @property {KafkaInstrumentation} instrumentation - An object for creating a span for instrumentation.
2661
+ * @property {KafkaLogger} logger - A logger for logging messages.
2662
+ * @property {() => number} nextLamportClock - A function that returns the next value of the logical clock.
2663
+ */
2300
2664
  get producerOpsDeps() {
2301
2665
  return {
2302
2666
  schemaRegistry: this.schemaRegistry,
@@ -2306,6 +2670,15 @@ var KafkaClient = class _KafkaClient {
2306
2670
  nextLamportClock: () => ++this._lamportClock
2307
2671
  };
2308
2672
  }
2673
+ /**
2674
+ * ConsumerOpsDeps object properties:
2675
+ *
2676
+ * @property {Map<string, Consumer>} consumers - A map of consumer group IDs to their corresponding consumer instances.
2677
+ * @property {Map<string, { fromBeginning: boolean; autoCommit: boolean }>} consumerCreationOptions - A map of consumer group IDs to their creation options.
2678
+ * @property {Kafka} kafka - The Kafka client instance.
2679
+ * @property {function(string, Partition[]): void} onRebalance - An optional callback function called when a consumer group is rebalanced.
2680
+ * @property {KafkaLogger} logger - The logger instance used for logging consumer operations.
2681
+ */
2309
2682
  get consumerOpsDeps() {
2310
2683
  return {
2311
2684
  consumers: this.consumers,
@@ -2322,12 +2695,28 @@ var KafkaClient = class _KafkaClient {
2322
2695
  producer: this.producer,
2323
2696
  instrumentation: this.instrumentation,
2324
2697
  onMessageLost: this.onMessageLost,
2698
+ onTtlExpired: this.onTtlExpired,
2325
2699
  onRetry: this.notifyRetry.bind(this),
2326
2700
  onDlq: (envelope, reason) => this.notifyDlq(envelope, reason, gid),
2327
2701
  onDuplicate: this.notifyDuplicate.bind(this),
2328
2702
  onMessage: (envelope) => this.notifyMessage(envelope, gid)
2329
2703
  };
2330
2704
  }
2705
+ /**
2706
+ * The dependencies object passed to the retry topic consumers.
2707
+ *
2708
+ * `logger`: The logger instance passed to the retry topic consumers.
2709
+ * `producer`: The producer instance passed to the retry topic consumers.
2710
+ * `instrumentation`: The instrumentation instance passed to the retry topic consumers.
2711
+ * `onMessageLost`: The callback function passed to the retry topic consumers for tracking lost messages.
2712
+ * `onRetry`: The callback function passed to the retry topic consumers for tracking retry attempts.
2713
+ * `onDlq`: The callback function passed to the retry topic consumers for tracking dead-letter queue routing.
2714
+ * `onMessage`: The callback function passed to the retry topic consumers for tracking message delivery.
2715
+ * `ensureTopic`: A function that ensures a topic exists before subscribing to it.
2716
+ * `getOrCreateConsumer`: A function that creates or retrieves a consumer instance.
2717
+ * `runningConsumers`: A map of consumer group IDs to their corresponding consumer instances.
2718
+ * `createRetryTxProducer`: A function that creates a retry transactional producer instance.
2719
+ */
2331
2720
  get retryTopicDeps() {
2332
2721
  return {
2333
2722
  logger: this.logger,
@@ -2379,4 +2768,4 @@ export {
2379
2768
  KafkaClient,
2380
2769
  topic
2381
2770
  };
2382
- //# sourceMappingURL=chunk-MJ342P4R.mjs.map
2771
+ //# sourceMappingURL=chunk-MADAJD2F.mjs.map