@drarzter/kafka-client 0.7.1 → 0.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -57,7 +57,7 @@ __export(index_exports, {
57
57
  module.exports = __toCommonJS(index_exports);
58
58
 
59
59
  // src/client/kafka.client/index.ts
60
- var import_kafka_javascript = require("@confluentinc/kafka-javascript");
60
+ var import_kafka_javascript2 = require("@confluentinc/kafka-javascript");
61
61
 
62
62
  // src/client/message/envelope.ts
63
63
  var import_node_async_hooks = require("async_hooks");
@@ -198,7 +198,7 @@ async function validateMessage(topicOrDesc, message, deps, ctx) {
198
198
  }
199
199
  return message;
200
200
  }
201
- async function buildSendPayload(topicOrDesc, messages, deps) {
201
+ async function buildSendPayload(topicOrDesc, messages, deps, compression) {
202
202
  const topic2 = resolveTopicName(topicOrDesc);
203
203
  const builtMessages = await Promise.all(
204
204
  messages.map(async (m) => {
@@ -228,11 +228,12 @@ async function buildSendPayload(topicOrDesc, messages, deps) {
228
228
  };
229
229
  })
230
230
  );
231
- return { topic: topic2, messages: builtMessages };
231
+ return { topic: topic2, messages: builtMessages, ...compression && { compression } };
232
232
  }
233
233
 
234
234
  // src/client/kafka.client/consumer-ops.ts
235
- function getOrCreateConsumer(groupId, fromBeginning, autoCommit, deps) {
235
+ var import_kafka_javascript = require("@confluentinc/kafka-javascript");
236
+ function getOrCreateConsumer(groupId, fromBeginning, autoCommit, deps, partitionAssigner) {
236
237
  const { consumers, consumerCreationOptions, kafka, onRebalance, logger } = deps;
237
238
  if (consumers.has(groupId)) {
238
239
  const prev = consumerCreationOptions.get(groupId);
@@ -244,8 +245,11 @@ function getOrCreateConsumer(groupId, fromBeginning, autoCommit, deps) {
244
245
  return consumers.get(groupId);
245
246
  }
246
247
  consumerCreationOptions.set(groupId, { fromBeginning, autoCommit });
248
+ const assigners = [
249
+ partitionAssigner === "roundrobin" ? import_kafka_javascript.KafkaJS.PartitionAssigners.roundRobin : partitionAssigner === "range" ? import_kafka_javascript.KafkaJS.PartitionAssigners.range : import_kafka_javascript.KafkaJS.PartitionAssigners.cooperativeSticky
250
+ ];
247
251
  const config = {
248
- kafkaJS: { groupId, fromBeginning, autoCommit }
252
+ kafkaJS: { groupId, fromBeginning, autoCommit, partitionAssigners: assigners }
249
253
  };
250
254
  if (onRebalance) {
251
255
  const cb = onRebalance;
@@ -790,7 +794,8 @@ async function handleEachMessage(payload, opts, deps) {
790
794
  });
791
795
  deps.onDlq?.(envelope, "ttl-expired");
792
796
  } else {
793
- await deps.onTtlExpired?.({
797
+ const ttlHandler = opts.onTtlExpired ?? deps.onTtlExpired;
798
+ await ttlHandler?.({
794
799
  topic: topic2,
795
800
  ageMs,
796
801
  messageTtlMs: opts.messageTtlMs,
@@ -917,7 +922,8 @@ async function handleEachBatch(payload, opts, deps) {
917
922
  });
918
923
  deps.onDlq?.(envelope, "ttl-expired");
919
924
  } else {
920
- await deps.onTtlExpired?.({
925
+ const ttlHandler = opts.onTtlExpired ?? deps.onTtlExpired;
926
+ await ttlHandler?.({
921
927
  topic: batch.topic,
922
928
  ageMs,
923
929
  messageTtlMs: opts.messageTtlMs,
@@ -963,6 +969,7 @@ async function handleEachBatch(payload, opts, deps) {
963
969
  async function subscribeWithRetry(consumer, topics, logger, retryOpts) {
964
970
  const maxAttempts = retryOpts?.retries ?? 5;
965
971
  const backoffMs = retryOpts?.backoffMs ?? 5e3;
972
+ const displayTopics = topics.map((t) => t instanceof RegExp ? t.toString() : t).join(", ");
966
973
  for (let attempt = 1; attempt <= maxAttempts; attempt++) {
967
974
  try {
968
975
  await consumer.subscribe({ topics });
@@ -972,7 +979,7 @@ async function subscribeWithRetry(consumer, topics, logger, retryOpts) {
972
979
  const msg = toError(error).message;
973
980
  const delay = Math.floor(Math.random() * backoffMs);
974
981
  logger.warn(
975
- `Failed to subscribe to [${topics.join(", ")}] (attempt ${attempt}/${maxAttempts}): ${msg}. Retrying in ${delay}ms...`
982
+ `Failed to subscribe to [${displayTopics}] (attempt ${attempt}/${maxAttempts}): ${msg}. Retrying in ${delay}ms...`
976
983
  );
977
984
  await sleep(delay);
978
985
  }
@@ -1234,7 +1241,7 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
1234
1241
  }
1235
1242
 
1236
1243
  // src/client/kafka.client/index.ts
1237
- var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = import_kafka_javascript.KafkaJS;
1244
+ var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = import_kafka_javascript2.KafkaJS;
1238
1245
  var _activeTransactionalIds = /* @__PURE__ */ new Set();
1239
1246
  var AsyncQueue = class {
1240
1247
  constructor(highWaterMark = Infinity, onFull = () => {
@@ -1358,21 +1365,54 @@ var KafkaClient = class _KafkaClient {
1358
1365
  this.admin = this.kafka.admin();
1359
1366
  }
1360
1367
  async sendMessage(topicOrDesc, message, options = {}) {
1361
- const payload = await this.preparePayload(topicOrDesc, [
1362
- {
1363
- value: message,
1364
- key: options.key,
1365
- headers: options.headers,
1366
- correlationId: options.correlationId,
1367
- schemaVersion: options.schemaVersion,
1368
- eventId: options.eventId
1369
- }
1370
- ]);
1368
+ const payload = await this.preparePayload(
1369
+ topicOrDesc,
1370
+ [
1371
+ {
1372
+ value: message,
1373
+ key: options.key,
1374
+ headers: options.headers,
1375
+ correlationId: options.correlationId,
1376
+ schemaVersion: options.schemaVersion,
1377
+ eventId: options.eventId
1378
+ }
1379
+ ],
1380
+ options.compression
1381
+ );
1371
1382
  await this.producer.send(payload);
1372
1383
  this.notifyAfterSend(payload.topic, payload.messages.length);
1373
1384
  }
1374
- async sendBatch(topicOrDesc, messages) {
1375
- const payload = await this.preparePayload(topicOrDesc, messages);
1385
+ /**
1386
+ * Send a null-value (tombstone) message. Used with log-compacted topics to signal
1387
+ * that a key's record should be removed during the next compaction cycle.
1388
+ *
1389
+ * Tombstones skip envelope headers, schema validation, and Lamport clock stamping.
1390
+ * Both `beforeSend` and `afterSend` instrumentation hooks are still called so tracing works correctly.
1391
+ *
1392
+ * @param topic Topic name.
1393
+ * @param key Partition key identifying the record to tombstone.
1394
+ * @param headers Optional custom Kafka headers.
1395
+ */
1396
+ async sendTombstone(topic2, key, headers) {
1397
+ const hdrs = { ...headers };
1398
+ for (const inst of this.instrumentation) {
1399
+ inst.beforeSend?.(topic2, hdrs);
1400
+ }
1401
+ await this.ensureTopic(topic2);
1402
+ await this.producer.send({
1403
+ topic: topic2,
1404
+ messages: [{ value: null, key, headers: hdrs }]
1405
+ });
1406
+ for (const inst of this.instrumentation) {
1407
+ inst.afterSend?.(topic2);
1408
+ }
1409
+ }
1410
+ async sendBatch(topicOrDesc, messages, options) {
1411
+ const payload = await this.preparePayload(
1412
+ topicOrDesc,
1413
+ messages,
1414
+ options?.compression
1415
+ );
1376
1416
  await this.producer.send(payload);
1377
1417
  this.notifyAfterSend(payload.topic, payload.messages.length);
1378
1418
  }
@@ -1420,6 +1460,13 @@ var KafkaClient = class _KafkaClient {
1420
1460
  await tx.send(payload);
1421
1461
  this.notifyAfterSend(payload.topic, payload.messages.length);
1422
1462
  },
1463
+ /**
1464
+ * Send multiple messages in a single call to the topic.
1465
+ * All messages in the batch will be sent atomically.
1466
+ * If any message fails to send, the entire batch will be aborted.
1467
+ * @param topicOrDesc - topic name or TopicDescriptor
1468
+ * @param messages - array of messages to send with optional key, headers, correlationId, schemaVersion, and eventId
1469
+ */
1423
1470
  sendBatch: async (topicOrDesc, messages) => {
1424
1471
  const payload = await this.preparePayload(topicOrDesc, messages);
1425
1472
  await tx.send(payload);
@@ -1462,6 +1509,12 @@ var KafkaClient = class _KafkaClient {
1462
1509
  "retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
1463
1510
  );
1464
1511
  }
1512
+ const hasRegexTopics = topics.some((t) => t instanceof RegExp);
1513
+ if (options.retryTopics && hasRegexTopics) {
1514
+ throw new Error(
1515
+ "retryTopics is incompatible with regex topic patterns \u2014 retry topics require a fixed topic name to build the retry chain."
1516
+ );
1517
+ }
1465
1518
  const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
1466
1519
  const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", setupOptions);
1467
1520
  if (options.circuitBreaker)
@@ -1493,6 +1546,7 @@ var KafkaClient = class _KafkaClient {
1493
1546
  wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1494
1547
  deduplication,
1495
1548
  messageTtlMs: options.messageTtlMs,
1549
+ onTtlExpired: options.onTtlExpired,
1496
1550
  eosMainContext
1497
1551
  },
1498
1552
  deps
@@ -1525,6 +1579,12 @@ var KafkaClient = class _KafkaClient {
1525
1579
  "retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
1526
1580
  );
1527
1581
  }
1582
+ const hasRegexTopics = topics.some((t) => t instanceof RegExp);
1583
+ if (options.retryTopics && hasRegexTopics) {
1584
+ throw new Error(
1585
+ "retryTopics is incompatible with regex topic patterns \u2014 retry topics require a fixed topic name to build the retry chain."
1586
+ );
1587
+ }
1528
1588
  if (options.retryTopics) {
1529
1589
  } else if (options.autoCommit !== false) {
1530
1590
  this.logger.debug?.(
@@ -1548,6 +1608,16 @@ var KafkaClient = class _KafkaClient {
1548
1608
  eosMainContext = { txProducer, consumer };
1549
1609
  }
1550
1610
  await consumer.run({
1611
+ /**
1612
+ * eachBatch: called by the consumer for each batch of messages.
1613
+ * Called with the `payload` argument, which is an object containing the
1614
+ * batch of messages and a `BatchMeta` object with offset management controls.
1615
+ *
1616
+ * The function is wrapped with `trackInFlight` and `handleEachBatch` to provide
1617
+ * error handling and offset management.
1618
+ *
1619
+ * @param payload - an object containing the batch of messages and a `BatchMeta` object.
1620
+ */
1551
1621
  eachBatch: (payload) => this.trackInFlight(
1552
1622
  () => handleEachBatch(
1553
1623
  payload,
@@ -1562,6 +1632,7 @@ var KafkaClient = class _KafkaClient {
1562
1632
  wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
1563
1633
  deduplication,
1564
1634
  messageTtlMs: options.messageTtlMs,
1635
+ onTtlExpired: options.onTtlExpired,
1565
1636
  eosMainContext
1566
1637
  },
1567
1638
  deps
@@ -1608,6 +1679,11 @@ var KafkaClient = class _KafkaClient {
1608
1679
  * }
1609
1680
  */
1610
1681
  consume(topic2, options) {
1682
+ if (options?.retryTopics) {
1683
+ throw new Error(
1684
+ "consume() does not support retryTopics (EOS retry chains). Use startConsumer() with retryTopics: true for guaranteed retry delivery."
1685
+ );
1686
+ }
1611
1687
  const gid = options?.groupId ?? this.defaultGroupId;
1612
1688
  const queue = new AsyncQueue(
1613
1689
  options?.queueHighWaterMark,
@@ -1636,6 +1712,14 @@ var KafkaClient = class _KafkaClient {
1636
1712
  };
1637
1713
  }
1638
1714
  // ── Consumer lifecycle ───────────────────────────────────────────
1715
+ /**
1716
+ * Stop all consumers or a specific group.
1717
+ *
1718
+ * If `groupId` is unspecified, all active consumers are stopped.
1719
+ * If `groupId` is specified, only the consumer with that group ID is stopped.
1720
+ *
1721
+ * @throws {Error} if the consumer fails to disconnect.
1722
+ */
1639
1723
  async stopConsumer(groupId) {
1640
1724
  if (groupId !== void 0) {
1641
1725
  const consumer = this.consumers.get(groupId);
@@ -1729,6 +1813,12 @@ var KafkaClient = class _KafkaClient {
1729
1813
  this.logger.log("All consumers disconnected");
1730
1814
  }
1731
1815
  }
1816
+ /**
1817
+ * Temporarily stop delivering messages from specific partitions without disconnecting the consumer.
1818
+ *
1819
+ * @param groupId Consumer group to pause. Defaults to the client's default groupId.
1820
+ * @param assignments Topic-partition pairs to pause.
1821
+ */
1732
1822
  pauseConsumer(groupId, assignments) {
1733
1823
  const gid = groupId ?? this.defaultGroupId;
1734
1824
  const consumer = this.consumers.get(gid);
@@ -1742,6 +1832,12 @@ var KafkaClient = class _KafkaClient {
1742
1832
  )
1743
1833
  );
1744
1834
  }
1835
+ /**
1836
+ * Resume message delivery for previously paused topic-partitions.
1837
+ *
1838
+ * @param {string|undefined} groupId Consumer group to resume. Defaults to the client's default groupId.
1839
+ * @param {Array<{ topic: string; partitions: number[] }>} assignments Topic-partition pairs to resume.
1840
+ */
1745
1841
  resumeConsumer(groupId, assignments) {
1746
1842
  const gid = groupId ?? this.defaultGroupId;
1747
1843
  const consumer = this.consumers.get(gid);
@@ -1781,6 +1877,17 @@ var KafkaClient = class _KafkaClient {
1781
1877
  "x-dlq-error-stack",
1782
1878
  "x-dlq-attempt-count"
1783
1879
  ]);
1880
+ /**
1881
+ * Re-publish messages from a dead letter queue back to the original topic.
1882
+ *
1883
+ * Messages are consumed from `<topic>.dlq` and re-published to `<topic>`.
1884
+ * The original topic is determined by the `x-dlq-original-topic` header.
1885
+ * The `x-dlq-*` headers are stripped before re-publishing.
1886
+ *
1887
+ * @param topic - The topic to replay from `<topic>.dlq`
1888
+ * @param options - Options for replay
1889
+ * @returns { replayed: number; skipped: number } - counts of re-published vs skipped messages
1890
+ */
1784
1891
  async replayDlq(topic2, options = {}) {
1785
1892
  const dlqTopic = `${topic2}.dlq`;
1786
1893
  await this.ensureAdminConnected();
@@ -1909,6 +2016,14 @@ var KafkaClient = class _KafkaClient {
1909
2016
  );
1910
2017
  }
1911
2018
  }
2019
+ /**
2020
+ * Seek specific topic-partition pairs to the offset nearest to a given timestamp
2021
+ * (in milliseconds) for a stopped consumer group.
2022
+ * Throws if the group is still running — call `stopConsumer(groupId)` first.
2023
+ * Assignments are grouped by topic and committed via `admin.setOffsets`.
2024
+ * If no offset exists at the requested timestamp (e.g. empty partition or
2025
+ * future timestamp), the partition falls back to `-1` (end of topic — new messages only).
2026
+ */
1912
2027
  async seekToTimestamp(groupId, assignments) {
1913
2028
  const gid = groupId ?? this.defaultGroupId;
1914
2029
  if (this.runningConsumers.has(gid)) {
@@ -1942,6 +2057,17 @@ var KafkaClient = class _KafkaClient {
1942
2057
  );
1943
2058
  }
1944
2059
  }
2060
+ /**
2061
+ * Returns the current circuit breaker state for a specific topic partition.
2062
+ * Returns `undefined` when no circuit state exists — either `circuitBreaker` is not
2063
+ * configured for the group, or the circuit has never been tripped.
2064
+ *
2065
+ * @param topic Topic name.
2066
+ * @param partition Partition index.
2067
+ * @param groupId Consumer group. Defaults to the client's default groupId.
2068
+ *
2069
+ * @returns `{ status, failures, windowSize }` snapshot for a given partition or `undefined` if no state exists.
2070
+ */
1945
2071
  getCircuitState(topic2, partition, groupId) {
1946
2072
  const gid = groupId ?? this.defaultGroupId;
1947
2073
  const state = this.circuitStates.get(`${gid}:${topic2}:${partition}`);
@@ -1999,9 +2125,62 @@ var KafkaClient = class _KafkaClient {
1999
2125
  };
2000
2126
  }
2001
2127
  }
2128
+ /**
2129
+ * List all consumer groups known to the broker.
2130
+ * Useful for monitoring which groups are active and their current state.
2131
+ */
2132
+ async listConsumerGroups() {
2133
+ await this.ensureAdminConnected();
2134
+ const result = await this.admin.listGroups();
2135
+ return result.groups.map((g) => ({
2136
+ groupId: g.groupId,
2137
+ state: g.state ?? "Unknown"
2138
+ }));
2139
+ }
2140
+ /**
2141
+ * Describe topics — returns partition layout, leader, replicas, and ISR.
2142
+ * @param topics Topic names to describe. Omit to describe all topics.
2143
+ */
2144
+ async describeTopics(topics) {
2145
+ await this.ensureAdminConnected();
2146
+ const result = await this.admin.fetchTopicMetadata(
2147
+ topics ? { topics } : void 0
2148
+ );
2149
+ return result.topics.map((t) => ({
2150
+ name: t.name,
2151
+ partitions: t.partitions.map((p) => ({
2152
+ partition: p.partitionId ?? p.partition,
2153
+ leader: p.leader,
2154
+ replicas: p.replicas.map(
2155
+ (r) => typeof r === "number" ? r : r.nodeId
2156
+ ),
2157
+ isr: p.isr.map(
2158
+ (r) => typeof r === "number" ? r : r.nodeId
2159
+ )
2160
+ }))
2161
+ }));
2162
+ }
2163
+ /**
2164
+ * Delete records from a topic up to (but not including) the given offsets.
2165
+ * All messages with offsets **before** the given offset are deleted.
2166
+ */
2167
+ async deleteRecords(topic2, partitions) {
2168
+ await this.ensureAdminConnected();
2169
+ await this.admin.deleteTopicRecords({ topic: topic2, partitions });
2170
+ }
2171
+ /** Return the client ID provided during `KafkaClient` construction. */
2002
2172
  getClientId() {
2003
2173
  return this.clientId;
2004
2174
  }
2175
+ /**
2176
+ * Return a snapshot of internal event counters accumulated since client creation
2177
+ * (or since the last `resetMetrics()` call).
2178
+ *
2179
+ * @param topic Topic name to scope the snapshot to. When omitted, counters are
2180
+ * aggregated across all topics. If the topic has no recorded events yet, returns
2181
+ * a zero-valued snapshot.
2182
+ * @returns Read-only `KafkaMetrics` snapshot: `processedCount`, `retryCount`, `dlqCount`, `dedupCount`.
2183
+ */
2005
2184
  getMetrics(topic2) {
2006
2185
  if (topic2 !== void 0) {
2007
2186
  const m = this._topicMetrics.get(topic2);
@@ -2021,6 +2200,11 @@ var KafkaClient = class _KafkaClient {
2021
2200
  }
2022
2201
  return agg;
2023
2202
  }
2203
+ /**
2204
+ * Reset internal event counters to zero.
2205
+ *
2206
+ * @param topic Topic name to reset. When omitted, all topics are reset.
2207
+ */
2024
2208
  resetMetrics(topic2) {
2025
2209
  if (topic2 !== void 0) {
2026
2210
  this._topicMetrics.delete(topic2);
@@ -2092,6 +2276,13 @@ var KafkaClient = class _KafkaClient {
2092
2276
  process.once(signal, handler);
2093
2277
  }
2094
2278
  }
2279
+ /**
2280
+ * Increment the in-flight handler count and return a promise that calls the given handler.
2281
+ * When the promise resolves or rejects, decrement the in flight handler count.
2282
+ * If the in flight handler count reaches 0, call all previously registered drain resolvers.
2283
+ * @param fn The handler to call when the promise is resolved or rejected.
2284
+ * @returns A promise that resolves or rejects with the result of calling the handler.
2285
+ */
2095
2286
  trackInFlight(fn) {
2096
2287
  this.inFlightTotal++;
2097
2288
  return fn().finally(() => {
@@ -2101,6 +2292,12 @@ var KafkaClient = class _KafkaClient {
2101
2292
  }
2102
2293
  });
2103
2294
  }
2295
+ /**
2296
+ * Waits for all in-flight handlers to complete or for a given timeout, whichever comes first.
2297
+ * @param timeoutMs Maximum time to wait in milliseconds.
2298
+ * @returns A promise that resolves when all handlers have completed or the timeout is reached.
2299
+ * @private
2300
+ */
2104
2301
  waitForDrain(timeoutMs) {
2105
2302
  if (this.inFlightTotal === 0) return Promise.resolve();
2106
2303
  return new Promise((resolve) => {
@@ -2121,12 +2318,19 @@ var KafkaClient = class _KafkaClient {
2121
2318
  });
2122
2319
  }
2123
2320
  // ── Private helpers ──────────────────────────────────────────────
2124
- async preparePayload(topicOrDesc, messages) {
2321
+ /**
2322
+ * Prepare a send payload by registering the topic's schema and then building the payload.
2323
+ * @param topicOrDesc - topic name or topic descriptor
2324
+ * @param messages - batch of messages to send
2325
+ * @returns - prepared payload
2326
+ */
2327
+ async preparePayload(topicOrDesc, messages, compression) {
2125
2328
  registerSchema(topicOrDesc, this.schemaRegistry, this.logger);
2126
2329
  const payload = await buildSendPayload(
2127
2330
  topicOrDesc,
2128
2331
  messages,
2129
- this.producerOpsDeps
2332
+ this.producerOpsDeps,
2333
+ compression
2130
2334
  );
2131
2335
  await this.ensureTopic(payload.topic);
2132
2336
  return payload;
@@ -2139,6 +2343,12 @@ var KafkaClient = class _KafkaClient {
2139
2343
  }
2140
2344
  }
2141
2345
  }
2346
+ /**
2347
+ * Returns the KafkaMetrics for a given topic.
2348
+ * If the topic hasn't seen any events, initializes a zero-valued snapshot.
2349
+ * @param topic - name of the topic to get the metrics for
2350
+ * @returns - KafkaMetrics for the given topic
2351
+ */
2142
2352
  metricsFor(topic2) {
2143
2353
  let m = this._topicMetrics.get(topic2);
2144
2354
  if (!m) {
@@ -2147,12 +2357,24 @@ var KafkaClient = class _KafkaClient {
2147
2357
  }
2148
2358
  return m;
2149
2359
  }
2360
+ /**
2361
+ * Notifies instrumentation hooks of a retry event.
2362
+ * @param envelope The original message envelope that triggered the retry.
2363
+ * @param attempt The current retry attempt (1-indexed).
2364
+ * @param maxRetries The maximum number of retries configured for this topic.
2365
+ */
2150
2366
  notifyRetry(envelope, attempt, maxRetries) {
2151
2367
  this.metricsFor(envelope.topic).retryCount++;
2152
2368
  for (const inst of this.instrumentation) {
2153
2369
  inst.onRetry?.(envelope, attempt, maxRetries);
2154
2370
  }
2155
2371
  }
2372
+ /**
2373
+ * Called whenever a message is routed to the dead letter queue.
2374
+ * @param envelope The original message envelope.
2375
+ * @param reason The reason for routing to the dead letter queue.
2376
+ * @param gid The group ID of the consumer that triggered the circuit breaker, if any.
2377
+ */
2156
2378
  notifyDlq(envelope, reason, gid) {
2157
2379
  this.metricsFor(envelope.topic).dlqCount++;
2158
2380
  for (const inst of this.instrumentation) {
@@ -2214,12 +2436,26 @@ var KafkaClient = class _KafkaClient {
2214
2436
  openCircuit();
2215
2437
  }
2216
2438
  }
2439
+ /**
2440
+ * Notify all instrumentation hooks about a duplicate message detection.
2441
+ * Invoked by the consumer after a message has been successfully processed
2442
+ * and the Lamport clock detected a duplicate.
2443
+ * @param envelope The processed message envelope.
2444
+ * @param strategy The duplicate detection strategy used.
2445
+ */
2217
2446
  notifyDuplicate(envelope, strategy) {
2218
2447
  this.metricsFor(envelope.topic).dedupCount++;
2219
2448
  for (const inst of this.instrumentation) {
2220
2449
  inst.onDuplicate?.(envelope, strategy);
2221
2450
  }
2222
2451
  }
2452
+ /**
2453
+ * Notify all instrumentation hooks about a successfully processed message.
2454
+ * Invoked by the consumer after a message has been successfully processed
2455
+ * by the handler.
2456
+ * @param envelope The processed message envelope.
2457
+ * @param gid The optional consumer group ID.
2458
+ */
2223
2459
  notifyMessage(envelope, gid) {
2224
2460
  this.metricsFor(envelope.topic).processedCount++;
2225
2461
  for (const inst of this.instrumentation) {
@@ -2362,6 +2598,15 @@ var KafkaClient = class _KafkaClient {
2362
2598
  this.retryTxProducers.set(transactionalId, p);
2363
2599
  return p;
2364
2600
  }
2601
+ /**
2602
+ * Ensure that a topic exists by creating it if it doesn't already exist.
2603
+ * If `autoCreateTopics` is disabled, this method will not create the topic and
2604
+ * will return immediately.
2605
+ * If multiple concurrent calls are made to `ensureTopic` for the same topic,
2606
+ * they are deduplicated to prevent multiple calls to `admin.createTopics()`.
2607
+ * @param topic - The topic to ensure exists.
2608
+ * @returns A promise that resolves when the topic has been created or already exists.
2609
+ */
2365
2610
  async ensureTopic(topic2) {
2366
2611
  if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
2367
2612
  let p = this.ensureTopicPromises.get(topic2);
@@ -2387,6 +2632,9 @@ var KafkaClient = class _KafkaClient {
2387
2632
  interceptors = [],
2388
2633
  schemas: optionSchemas
2389
2634
  } = options;
2635
+ const stringTopics = topics.filter((t) => !(t instanceof RegExp));
2636
+ const regexTopics = topics.filter((t) => t instanceof RegExp);
2637
+ const hasRegex = regexTopics.length > 0;
2390
2638
  const gid = optGroupId || this.defaultGroupId;
2391
2639
  const existingMode = this.runningConsumers.get(gid);
2392
2640
  const oppositeMode = mode === "eachMessage" ? "eachBatch" : "eachMessage";
@@ -2405,15 +2653,20 @@ var KafkaClient = class _KafkaClient {
2405
2653
  gid,
2406
2654
  fromBeginning,
2407
2655
  options.autoCommit ?? true,
2408
- this.consumerOpsDeps
2656
+ this.consumerOpsDeps,
2657
+ options.partitionAssigner
2409
2658
  );
2410
2659
  const schemaMap = buildSchemaMap(
2411
- topics,
2660
+ stringTopics,
2412
2661
  this.schemaRegistry,
2413
2662
  optionSchemas,
2414
2663
  this.logger
2415
2664
  );
2416
- const topicNames = topics.map((t) => resolveTopicName(t));
2665
+ const topicNames = stringTopics.map((t) => resolveTopicName(t));
2666
+ const subscribeTopics = [
2667
+ ...topicNames,
2668
+ ...regexTopics
2669
+ ];
2417
2670
  for (const t of topicNames) {
2418
2671
  await this.ensureTopic(t);
2419
2672
  }
@@ -2421,7 +2674,7 @@ var KafkaClient = class _KafkaClient {
2421
2674
  for (const t of topicNames) {
2422
2675
  await this.ensureTopic(`${t}.dlq`);
2423
2676
  }
2424
- if (!this.autoCreateTopicsEnabled) {
2677
+ if (!this.autoCreateTopicsEnabled && topicNames.length > 0) {
2425
2678
  await this.validateDlqTopicsExist(topicNames);
2426
2679
  }
2427
2680
  }
@@ -2431,21 +2684,22 @@ var KafkaClient = class _KafkaClient {
2431
2684
  for (const t of topicNames) {
2432
2685
  await this.ensureTopic(dest ?? `${t}.duplicates`);
2433
2686
  }
2434
- } else {
2687
+ } else if (topicNames.length > 0) {
2435
2688
  await this.validateDuplicatesTopicsExist(topicNames, dest);
2436
2689
  }
2437
2690
  }
2438
2691
  await consumer.connect();
2439
2692
  await subscribeWithRetry(
2440
2693
  consumer,
2441
- topicNames,
2694
+ subscribeTopics,
2442
2695
  this.logger,
2443
2696
  options.subscribeRetry
2444
2697
  );
2698
+ const displayTopics = subscribeTopics.map((t) => t instanceof RegExp ? t.toString() : t).join(", ");
2445
2699
  this.logger.log(
2446
- `${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${topicNames.join(", ")}`
2700
+ `${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${displayTopics}`
2447
2701
  );
2448
- return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
2702
+ return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry, hasRegex };
2449
2703
  }
2450
2704
  /** Create or retrieve the deduplication context for a consumer group. */
2451
2705
  resolveDeduplicationContext(groupId, options) {
@@ -2456,6 +2710,15 @@ var KafkaClient = class _KafkaClient {
2456
2710
  return { options, state: this.dedupStates.get(groupId) };
2457
2711
  }
2458
2712
  // ── Deps object getters ──────────────────────────────────────────
2713
+ /**
2714
+ * An object containing the necessary dependencies for building a send payload.
2715
+ *
2716
+ * @property {Map<string, SchemaLike>} schemaRegistry - A map of topic names to their schemas.
2717
+ * @property {boolean} strictSchemasEnabled - Whether strict schema validation is enabled.
2718
+ * @property {KafkaInstrumentation} instrumentation - An object for creating a span for instrumentation.
2719
+ * @property {KafkaLogger} logger - A logger for logging messages.
2720
+ * @property {() => number} nextLamportClock - A function that returns the next value of the logical clock.
2721
+ */
2459
2722
  get producerOpsDeps() {
2460
2723
  return {
2461
2724
  schemaRegistry: this.schemaRegistry,
@@ -2465,6 +2728,15 @@ var KafkaClient = class _KafkaClient {
2465
2728
  nextLamportClock: () => ++this._lamportClock
2466
2729
  };
2467
2730
  }
2731
+ /**
2732
+ * ConsumerOpsDeps object properties:
2733
+ *
2734
+ * @property {Map<string, Consumer>} consumers - A map of consumer group IDs to their corresponding consumer instances.
2735
+ * @property {Map<string, { fromBeginning: boolean; autoCommit: boolean }>} consumerCreationOptions - A map of consumer group IDs to their creation options.
2736
+ * @property {Kafka} kafka - The Kafka client instance.
2737
+ * @property {function(string, Partition[]): void} onRebalance - An optional callback function called when a consumer group is rebalanced.
2738
+ * @property {KafkaLogger} logger - The logger instance used for logging consumer operations.
2739
+ */
2468
2740
  get consumerOpsDeps() {
2469
2741
  return {
2470
2742
  consumers: this.consumers,
@@ -2488,6 +2760,21 @@ var KafkaClient = class _KafkaClient {
2488
2760
  onMessage: (envelope) => this.notifyMessage(envelope, gid)
2489
2761
  };
2490
2762
  }
2763
+ /**
2764
+ * The dependencies object passed to the retry topic consumers.
2765
+ *
2766
+ * `logger`: The logger instance passed to the retry topic consumers.
2767
+ * `producer`: The producer instance passed to the retry topic consumers.
2768
+ * `instrumentation`: The instrumentation instance passed to the retry topic consumers.
2769
+ * `onMessageLost`: The callback function passed to the retry topic consumers for tracking lost messages.
2770
+ * `onRetry`: The callback function passed to the retry topic consumers for tracking retry attempts.
2771
+ * `onDlq`: The callback function passed to the retry topic consumers for tracking dead-letter queue routing.
2772
+ * `onMessage`: The callback function passed to the retry topic consumers for tracking message delivery.
2773
+ * `ensureTopic`: A function that ensures a topic exists before subscribing to it.
2774
+ * `getOrCreateConsumer`: A function that creates or retrieves a consumer instance.
2775
+ * `runningConsumers`: A map of consumer group IDs to their corresponding consumer instances.
2776
+ * `createRetryTxProducer`: A function that creates a retry transactional producer instance.
2777
+ */
2491
2778
  get retryTopicDeps() {
2492
2779
  return {
2493
2780
  logger: this.logger,
@@ -2574,6 +2861,12 @@ var KafkaExplorer = class {
2574
2861
  this.moduleRef = moduleRef;
2575
2862
  }
2576
2863
  logger = new import_common2.Logger(KafkaExplorer.name);
2864
+ /**
2865
+ * Scan all NestJS providers for `@SubscribeTo()` metadata and wire each decorated
2866
+ * method to its Kafka client via `startConsumer` or `startBatchConsumer`.
2867
+ *
2868
+ * Called automatically by the NestJS lifecycle — do not invoke manually.
2869
+ */
2577
2870
  async onModuleInit() {
2578
2871
  const providers = this.discoveryService.getProviders();
2579
2872
  for (const wrapper of providers) {