@drarzter/kafka-client 0.5.2 → 0.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { KafkaClient } from './core.js';
2
2
  export { KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError } from './core.js';
3
- import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, S as SchemaLike, a as ConsumerOptions, b as TopicDescriptor } from './envelope-C66_h8r_.js';
4
- export { B as BatchMessageItem, c as BatchMeta, d as ConsumerInterceptor, E as EnvelopeHeaderOptions, e as EventEnvelope, H as HEADER_CORRELATION_ID, f as HEADER_EVENT_ID, g as HEADER_SCHEMA_VERSION, h as HEADER_TIMESTAMP, i as HEADER_TRACEPARENT, I as IKafkaClient, j as InferSchema, k as KafkaClientOptions, l as KafkaLogger, M as MessageHeaders, m as MessageLostContext, R as RetryOptions, n as SendOptions, o as SubscribeRetryOptions, p as TTopicMessageMap, q as TopicsFrom, r as TransactionContext, s as buildEnvelopeHeaders, t as decodeHeaders, u as extractEnvelope, v as getEnvelopeContext, w as runWithEnvelopeContext, x as topic } from './envelope-C66_h8r_.js';
3
+ import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, S as SchemaLike, a as ConsumerOptions, b as TopicDescriptor } from './envelope-BpyKN_WL.js';
4
+ export { B as BatchMessageItem, c as BatchMeta, d as ConsumerHandle, e as ConsumerInterceptor, E as EnvelopeHeaderOptions, f as EventEnvelope, H as HEADER_CORRELATION_ID, g as HEADER_EVENT_ID, h as HEADER_SCHEMA_VERSION, i as HEADER_TIMESTAMP, j as HEADER_TRACEPARENT, I as IKafkaClient, k as InferSchema, l as KafkaClientOptions, m as KafkaLogger, M as MessageHeaders, n as MessageLostContext, R as RetryOptions, o as SendOptions, p as SubscribeRetryOptions, q as TTopicMessageMap, r as TopicsFrom, s as TransactionContext, t as buildEnvelopeHeaders, u as decodeHeaders, v as extractEnvelope, w as getEnvelopeContext, x as runWithEnvelopeContext, y as topic } from './envelope-BpyKN_WL.js';
5
5
  import { DynamicModule, OnModuleInit } from '@nestjs/common';
6
6
  import { DiscoveryService, ModuleRef } from '@nestjs/core';
7
7
 
@@ -83,12 +83,15 @@ declare class KafkaExplorer implements OnModuleInit {
83
83
  }
84
84
 
85
85
  /** Result returned by `KafkaHealthIndicator.check()`. */
86
- interface KafkaHealthResult {
87
- status: "up" | "down";
86
+ type KafkaHealthResult = {
87
+ status: "up";
88
88
  clientId: string;
89
- topics?: string[];
90
- error?: string;
91
- }
89
+ topics: string[];
90
+ } | {
91
+ status: "down";
92
+ clientId: string;
93
+ error: string;
94
+ };
92
95
  /** Health check service. Call `check(client)` to verify broker connectivity. */
93
96
  declare class KafkaHealthIndicator {
94
97
  check<T extends TopicMapConstraint<T>>(client: KafkaClient<T>): Promise<KafkaHealthResult>;
package/dist/index.js CHANGED
@@ -189,9 +189,20 @@ async function validateWithSchema(message, raw, topic2, schemaMap, interceptors,
189
189
  originalHeaders: deps.originalHeaders
190
190
  });
191
191
  } else {
192
- await deps.onMessageLost?.({ topic: topic2, error: validationError, attempt: 0, headers: deps.originalHeaders ?? {} });
192
+ await deps.onMessageLost?.({
193
+ topic: topic2,
194
+ error: validationError,
195
+ attempt: 0,
196
+ headers: deps.originalHeaders ?? {}
197
+ });
193
198
  }
194
- const errorEnvelope = extractEnvelope(message, deps.originalHeaders ?? {}, topic2, -1, "");
199
+ const errorEnvelope = extractEnvelope(
200
+ message,
201
+ deps.originalHeaders ?? {},
202
+ topic2,
203
+ -1,
204
+ ""
205
+ );
195
206
  for (const interceptor of interceptors) {
196
207
  await interceptor.onError?.(errorEnvelope, validationError);
197
208
  }
@@ -221,9 +232,54 @@ async function sendToDlq(topic2, rawMessage, deps, meta) {
221
232
  );
222
233
  }
223
234
  }
235
+ var RETRY_HEADER_ATTEMPT = "x-retry-attempt";
236
+ var RETRY_HEADER_AFTER = "x-retry-after";
237
+ var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
238
+ var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
239
+ async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders, deps) {
240
+ const retryTopic = `${originalTopic}.retry`;
241
+ const {
242
+ [RETRY_HEADER_ATTEMPT]: _a,
243
+ [RETRY_HEADER_AFTER]: _b,
244
+ [RETRY_HEADER_MAX_RETRIES]: _c,
245
+ [RETRY_HEADER_ORIGINAL_TOPIC]: _d,
246
+ ...userHeaders
247
+ } = originalHeaders;
248
+ const headers = {
249
+ ...userHeaders,
250
+ [RETRY_HEADER_ATTEMPT]: String(attempt),
251
+ [RETRY_HEADER_AFTER]: String(Date.now() + delayMs),
252
+ [RETRY_HEADER_MAX_RETRIES]: String(maxRetries),
253
+ [RETRY_HEADER_ORIGINAL_TOPIC]: originalTopic
254
+ };
255
+ try {
256
+ for (const raw of rawMessages) {
257
+ await deps.producer.send({
258
+ topic: retryTopic,
259
+ messages: [{ value: raw, headers }]
260
+ });
261
+ }
262
+ deps.logger.warn(
263
+ `Message queued in retry topic ${retryTopic} (attempt ${attempt}/${maxRetries})`
264
+ );
265
+ } catch (error) {
266
+ deps.logger.error(
267
+ `Failed to send message to retry topic ${retryTopic}:`,
268
+ toError(error).stack
269
+ );
270
+ }
271
+ }
224
272
  async function executeWithRetry(fn, ctx, deps) {
225
- const { envelope, rawMessages, interceptors, dlq, retry, isBatch } = ctx;
226
- const maxAttempts = retry ? retry.maxRetries + 1 : 1;
273
+ const {
274
+ envelope,
275
+ rawMessages,
276
+ interceptors,
277
+ dlq,
278
+ retry,
279
+ isBatch,
280
+ retryTopics
281
+ } = ctx;
282
+ const maxAttempts = retryTopics ? 1 : retry ? retry.maxRetries + 1 : 1;
227
283
  const backoffMs = retry?.backoffMs ?? 1e3;
228
284
  const maxBackoffMs = retry?.maxBackoffMs ?? 3e4;
229
285
  const envelopes = Array.isArray(envelope) ? envelope : [envelope];
@@ -282,7 +338,19 @@ async function executeWithRetry(fn, ctx, deps) {
282
338
  `Error processing ${isBatch ? "batch" : "message"} from topic ${topic2} (attempt ${attempt}/${maxAttempts}):`,
283
339
  err.stack
284
340
  );
285
- if (isLastAttempt) {
341
+ if (retryTopics && retry) {
342
+ const cap = Math.min(backoffMs, maxBackoffMs);
343
+ const delay = Math.floor(Math.random() * cap);
344
+ await sendToRetryTopic(
345
+ topic2,
346
+ rawMessages,
347
+ 1,
348
+ retry.maxRetries,
349
+ delay,
350
+ envelopes[0]?.headers ?? {},
351
+ deps
352
+ );
353
+ } else if (isLastAttempt) {
286
354
  if (dlq) {
287
355
  const dlqMeta = {
288
356
  error: err,
@@ -345,6 +413,7 @@ var KafkaClient = class {
345
413
  runningConsumers = /* @__PURE__ */ new Map();
346
414
  instrumentation;
347
415
  onMessageLost;
416
+ onRebalance;
348
417
  isAdminConnected = false;
349
418
  clientId;
350
419
  constructor(clientId, groupId, brokers, options) {
@@ -360,6 +429,7 @@ var KafkaClient = class {
360
429
  this.numPartitions = options?.numPartitions ?? 1;
361
430
  this.instrumentation = options?.instrumentation ?? [];
362
431
  this.onMessageLost = options?.onMessageLost;
432
+ this.onRebalance = options?.onRebalance;
363
433
  this.kafka = new KafkaClass({
364
434
  kafkaJS: {
365
435
  clientId: this.clientId,
@@ -460,8 +530,19 @@ var KafkaClient = class {
460
530
  this.logger.log("Producer disconnected");
461
531
  }
462
532
  async startConsumer(topics, handleMessage, options = {}) {
463
- const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
464
- const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
533
+ if (options.retryTopics && !options.retry) {
534
+ throw new Error(
535
+ "retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
536
+ );
537
+ }
538
+ const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
539
+ const deps = {
540
+ logger: this.logger,
541
+ producer: this.producer,
542
+ instrumentation: this.instrumentation,
543
+ onMessageLost: this.onMessageLost
544
+ };
545
+ const timeoutMs = options.handlerTimeoutMs;
465
546
  await consumer.run({
466
547
  eachMessage: async ({ topic: topic2, partition, message }) => {
467
548
  if (!message.value) {
@@ -490,20 +571,51 @@ var KafkaClient = class {
490
571
  message.offset
491
572
  );
492
573
  await executeWithRetry(
493
- () => runWithEnvelopeContext(
494
- { correlationId: envelope.correlationId, traceparent: envelope.traceparent },
495
- () => handleMessage(envelope)
496
- ),
497
- { envelope, rawMessages: [raw], interceptors, dlq, retry },
574
+ () => {
575
+ const fn = () => runWithEnvelopeContext(
576
+ {
577
+ correlationId: envelope.correlationId,
578
+ traceparent: envelope.traceparent
579
+ },
580
+ () => handleMessage(envelope)
581
+ );
582
+ return timeoutMs ? this.wrapWithTimeoutWarning(fn, timeoutMs, topic2) : fn();
583
+ },
584
+ {
585
+ envelope,
586
+ rawMessages: [raw],
587
+ interceptors,
588
+ dlq,
589
+ retry,
590
+ retryTopics: options.retryTopics
591
+ },
498
592
  deps
499
593
  );
500
594
  }
501
595
  });
502
596
  this.runningConsumers.set(gid, "eachMessage");
597
+ if (options.retryTopics && retry) {
598
+ await this.startRetryTopicConsumers(
599
+ topicNames,
600
+ gid,
601
+ handleMessage,
602
+ retry,
603
+ dlq,
604
+ interceptors,
605
+ schemaMap
606
+ );
607
+ }
608
+ return { groupId: gid, stop: () => this.stopConsumer(gid) };
503
609
  }
504
610
  async startBatchConsumer(topics, handleBatch, options = {}) {
505
611
  const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
506
- const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
612
+ const deps = {
613
+ logger: this.logger,
614
+ producer: this.producer,
615
+ instrumentation: this.instrumentation,
616
+ onMessageLost: this.onMessageLost
617
+ };
618
+ const timeoutMs = options.handlerTimeoutMs;
507
619
  await consumer.run({
508
620
  eachBatch: async ({
509
621
  batch,
@@ -535,7 +647,13 @@ var KafkaClient = class {
535
647
  );
536
648
  if (validated === null) continue;
537
649
  envelopes.push(
538
- extractEnvelope(validated, headers, batch.topic, batch.partition, message.offset)
650
+ extractEnvelope(
651
+ validated,
652
+ headers,
653
+ batch.topic,
654
+ batch.partition,
655
+ message.offset
656
+ )
539
657
  );
540
658
  rawMessages.push(raw);
541
659
  }
@@ -548,7 +666,10 @@ var KafkaClient = class {
548
666
  commitOffsetsIfNecessary
549
667
  };
550
668
  await executeWithRetry(
551
- () => handleBatch(envelopes, meta),
669
+ () => {
670
+ const fn = () => handleBatch(envelopes, meta);
671
+ return timeoutMs ? this.wrapWithTimeoutWarning(fn, timeoutMs, batch.topic) : fn();
672
+ },
552
673
  {
553
674
  envelope: envelopes,
554
675
  rawMessages: batch.messages.filter((m) => m.value).map((m) => m.value.toString()),
@@ -562,17 +683,59 @@ var KafkaClient = class {
562
683
  }
563
684
  });
564
685
  this.runningConsumers.set(gid, "eachBatch");
686
+ return { groupId: gid, stop: () => this.stopConsumer(gid) };
565
687
  }
566
688
  // ── Consumer lifecycle ───────────────────────────────────────────
567
- async stopConsumer() {
568
- const tasks = [];
569
- for (const consumer of this.consumers.values()) {
570
- tasks.push(consumer.disconnect());
689
+ async stopConsumer(groupId) {
690
+ if (groupId !== void 0) {
691
+ const consumer = this.consumers.get(groupId);
692
+ if (!consumer) {
693
+ this.logger.warn(
694
+ `stopConsumer: no active consumer for group "${groupId}"`
695
+ );
696
+ return;
697
+ }
698
+ await consumer.disconnect().catch(() => {
699
+ });
700
+ this.consumers.delete(groupId);
701
+ this.runningConsumers.delete(groupId);
702
+ this.logger.log(`Consumer disconnected: group "${groupId}"`);
703
+ } else {
704
+ const tasks = Array.from(this.consumers.values()).map(
705
+ (c) => c.disconnect().catch(() => {
706
+ })
707
+ );
708
+ await Promise.allSettled(tasks);
709
+ this.consumers.clear();
710
+ this.runningConsumers.clear();
711
+ this.logger.log("All consumers disconnected");
571
712
  }
572
- await Promise.allSettled(tasks);
573
- this.consumers.clear();
574
- this.runningConsumers.clear();
575
- this.logger.log("All consumers disconnected");
713
+ }
714
+ /**
715
+ * Query consumer group lag per partition.
716
+ * Lag = broker high-watermark − last committed offset.
717
+ * A committed offset of -1 (nothing committed yet) counts as full lag.
718
+ */
719
+ async getConsumerLag(groupId) {
720
+ const gid = groupId ?? this.defaultGroupId;
721
+ if (!this.isAdminConnected) {
722
+ await this.admin.connect();
723
+ this.isAdminConnected = true;
724
+ }
725
+ const committedByTopic = await this.admin.fetchOffsets({ groupId: gid });
726
+ const result = [];
727
+ for (const { topic: topic2, partitions } of committedByTopic) {
728
+ const brokerOffsets = await this.admin.fetchTopicOffsets(topic2);
729
+ for (const { partition, offset } of partitions) {
730
+ const broker = brokerOffsets.find((o) => o.partition === partition);
731
+ if (!broker) continue;
732
+ const committed = parseInt(offset, 10);
733
+ const high = parseInt(broker.high, 10);
734
+ const lag = committed === -1 ? high : Math.max(0, high - committed);
735
+ result.push({ topic: topic2, partition, lag });
736
+ }
737
+ }
738
+ return result;
576
739
  }
577
740
  /** Check broker connectivity and return status, clientId, and available topics. */
578
741
  async checkStatus() {
@@ -605,18 +768,219 @@ var KafkaClient = class {
605
768
  this.runningConsumers.clear();
606
769
  this.logger.log("All connections closed");
607
770
  }
771
+ // ── Retry topic chain ────────────────────────────────────────────
772
+ /**
773
+ * Auto-start companion consumers on `<topic>.retry` for each original topic.
774
+ * Called by `startConsumer` when `retryTopics: true`.
775
+ *
776
+ * Flow per message:
777
+ * 1. Sleep until `x-retry-after` (scheduled by the main consumer or previous retry hop)
778
+ * 2. Call the original handler
779
+ * 3. On failure: if retries remain → re-send to `<originalTopic>.retry` with incremented attempt
780
+ * if exhausted → DLQ or onMessageLost
781
+ */
782
+ async startRetryTopicConsumers(originalTopics, originalGroupId, handleMessage, retry, dlq, interceptors, schemaMap) {
783
+ const retryTopicNames = originalTopics.map((t) => `${t}.retry`);
784
+ const retryGroupId = `${originalGroupId}-retry`;
785
+ const backoffMs = retry.backoffMs ?? 1e3;
786
+ const maxBackoffMs = retry.maxBackoffMs ?? 3e4;
787
+ const deps = {
788
+ logger: this.logger,
789
+ producer: this.producer,
790
+ instrumentation: this.instrumentation,
791
+ onMessageLost: this.onMessageLost
792
+ };
793
+ for (const rt of retryTopicNames) {
794
+ await this.ensureTopic(rt);
795
+ }
796
+ const consumer = this.getOrCreateConsumer(retryGroupId, false, true);
797
+ await consumer.connect();
798
+ await subscribeWithRetry(consumer, retryTopicNames, this.logger);
799
+ await consumer.run({
800
+ eachMessage: async ({ topic: retryTopic, partition, message }) => {
801
+ if (!message.value) return;
802
+ const raw = message.value.toString();
803
+ const parsed = parseJsonMessage(raw, retryTopic, this.logger);
804
+ if (parsed === null) return;
805
+ const headers = decodeHeaders(message.headers);
806
+ const originalTopic = headers[RETRY_HEADER_ORIGINAL_TOPIC] ?? retryTopic.replace(/\.retry$/, "");
807
+ const currentAttempt = parseInt(
808
+ headers[RETRY_HEADER_ATTEMPT] ?? "1",
809
+ 10
810
+ );
811
+ const maxRetries = parseInt(
812
+ headers[RETRY_HEADER_MAX_RETRIES] ?? String(retry.maxRetries),
813
+ 10
814
+ );
815
+ const retryAfter = parseInt(
816
+ headers[RETRY_HEADER_AFTER] ?? "0",
817
+ 10
818
+ );
819
+ const remaining = retryAfter - Date.now();
820
+ if (remaining > 0) {
821
+ consumer.pause([{ topic: retryTopic, partitions: [partition] }]);
822
+ await sleep(remaining);
823
+ consumer.resume([{ topic: retryTopic, partitions: [partition] }]);
824
+ }
825
+ const validated = await validateWithSchema(
826
+ parsed,
827
+ raw,
828
+ originalTopic,
829
+ schemaMap,
830
+ interceptors,
831
+ dlq,
832
+ { ...deps, originalHeaders: headers }
833
+ );
834
+ if (validated === null) return;
835
+ const envelope = extractEnvelope(
836
+ validated,
837
+ headers,
838
+ originalTopic,
839
+ partition,
840
+ message.offset
841
+ );
842
+ try {
843
+ const cleanups = [];
844
+ for (const inst of this.instrumentation) {
845
+ const c = inst.beforeConsume?.(envelope);
846
+ if (typeof c === "function") cleanups.push(c);
847
+ }
848
+ for (const interceptor of interceptors)
849
+ await interceptor.before?.(envelope);
850
+ await runWithEnvelopeContext(
851
+ {
852
+ correlationId: envelope.correlationId,
853
+ traceparent: envelope.traceparent
854
+ },
855
+ () => handleMessage(envelope)
856
+ );
857
+ for (const interceptor of interceptors)
858
+ await interceptor.after?.(envelope);
859
+ for (const cleanup of cleanups) cleanup();
860
+ } catch (error) {
861
+ const err = toError(error);
862
+ const nextAttempt = currentAttempt + 1;
863
+ const exhausted = currentAttempt >= maxRetries;
864
+ for (const inst of this.instrumentation)
865
+ inst.onConsumeError?.(envelope, err);
866
+ const reportedError = exhausted && maxRetries > 1 ? new KafkaRetryExhaustedError(
867
+ originalTopic,
868
+ [envelope.payload],
869
+ maxRetries,
870
+ { cause: err }
871
+ ) : err;
872
+ for (const interceptor of interceptors) {
873
+ await interceptor.onError?.(envelope, reportedError);
874
+ }
875
+ this.logger.error(
876
+ `Retry consumer error for ${originalTopic} (attempt ${currentAttempt}/${maxRetries}):`,
877
+ err.stack
878
+ );
879
+ if (!exhausted) {
880
+ const cap = Math.min(backoffMs * 2 ** currentAttempt, maxBackoffMs);
881
+ const delay = Math.floor(Math.random() * cap);
882
+ await sendToRetryTopic(
883
+ originalTopic,
884
+ [raw],
885
+ nextAttempt,
886
+ maxRetries,
887
+ delay,
888
+ headers,
889
+ deps
890
+ );
891
+ } else if (dlq) {
892
+ await sendToDlq(originalTopic, raw, deps, {
893
+ error: err,
894
+ // +1 to account for the main consumer's initial attempt before
895
+ // routing to the retry topic, making this consistent with the
896
+ // in-process retry path where attempt counts all tries.
897
+ attempt: currentAttempt + 1,
898
+ originalHeaders: headers
899
+ });
900
+ } else {
901
+ await deps.onMessageLost?.({
902
+ topic: originalTopic,
903
+ error: err,
904
+ attempt: currentAttempt,
905
+ headers
906
+ });
907
+ }
908
+ }
909
+ }
910
+ });
911
+ this.runningConsumers.set(retryGroupId, "eachMessage");
912
+ await this.waitForPartitionAssignment(consumer, retryTopicNames);
913
+ this.logger.log(
914
+ `Retry topic consumers started for: ${originalTopics.join(", ")} (group: ${retryGroupId})`
915
+ );
916
+ }
608
917
  // ── Private helpers ──────────────────────────────────────────────
918
+ /**
919
+ * Poll `consumer.assignment()` until the consumer has received at least one
920
+ * partition for the given topics, then return. Logs a warning and returns
921
+ * (rather than throwing) on timeout so that a slow broker does not break
922
+ * the caller — in the worst case a message sent immediately after would be
923
+ * missed, which is the same behaviour as before this guard was added.
924
+ */
925
+ async waitForPartitionAssignment(consumer, topics, timeoutMs = 1e4) {
926
+ const topicSet = new Set(topics);
927
+ const deadline = Date.now() + timeoutMs;
928
+ while (Date.now() < deadline) {
929
+ try {
930
+ const assigned = consumer.assignment();
931
+ if (assigned.some((a) => topicSet.has(a.topic))) return;
932
+ } catch {
933
+ }
934
+ await sleep(200);
935
+ }
936
+ this.logger.warn(
937
+ `Retry consumer did not receive partition assignments for [${topics.join(", ")}] within ${timeoutMs}ms`
938
+ );
939
+ }
609
940
  getOrCreateConsumer(groupId, fromBeginning, autoCommit) {
610
941
  if (!this.consumers.has(groupId)) {
611
- this.consumers.set(
612
- groupId,
613
- this.kafka.consumer({
614
- kafkaJS: { groupId, fromBeginning, autoCommit }
615
- })
616
- );
942
+ const config = {
943
+ kafkaJS: { groupId, fromBeginning, autoCommit }
944
+ };
945
+ if (this.onRebalance) {
946
+ const onRebalance = this.onRebalance;
947
+ config["rebalance_cb"] = (err, assignment) => {
948
+ const type = err.code === -175 ? "assign" : "revoke";
949
+ try {
950
+ onRebalance(
951
+ type,
952
+ assignment.map((p) => ({
953
+ topic: p.topic,
954
+ partition: p.partition
955
+ }))
956
+ );
957
+ } catch (e) {
958
+ this.logger.warn(
959
+ `onRebalance callback threw: ${e.message}`
960
+ );
961
+ }
962
+ };
963
+ }
964
+ this.consumers.set(groupId, this.kafka.consumer(config));
617
965
  }
618
966
  return this.consumers.get(groupId);
619
967
  }
968
+ /**
969
+ * Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
970
+ * The handler itself is not cancelled — the warning is diagnostic only.
971
+ */
972
+ wrapWithTimeoutWarning(fn, timeoutMs, topic2) {
973
+ let timer;
974
+ const promise = fn().finally(() => {
975
+ if (timer !== void 0) clearTimeout(timer);
976
+ });
977
+ timer = setTimeout(() => {
978
+ this.logger.warn(
979
+ `Handler for topic "${topic2}" has not resolved after ${timeoutMs}ms \u2014 possible stuck handler`
980
+ );
981
+ }, timeoutMs);
982
+ return promise;
983
+ }
620
984
  resolveTopicName(topicOrDescriptor) {
621
985
  if (typeof topicOrDescriptor === "string") return topicOrDescriptor;
622
986
  if (topicOrDescriptor && typeof topicOrDescriptor === "object" && "__topic" in topicOrDescriptor) {
@@ -673,7 +1037,9 @@ var KafkaClient = class {
673
1037
  inst.beforeSend?.(topic2, envelopeHeaders);
674
1038
  }
675
1039
  return {
676
- value: JSON.stringify(await this.validateMessage(topicOrDesc, m.value)),
1040
+ value: JSON.stringify(
1041
+ await this.validateMessage(topicOrDesc, m.value)
1042
+ ),
677
1043
  key: m.key ?? null,
678
1044
  headers: envelopeHeaders
679
1045
  };
@@ -699,7 +1065,11 @@ var KafkaClient = class {
699
1065
  `Cannot use ${mode} on consumer group "${gid}" \u2014 it is already running with ${oppositeMode}. Use a different groupId for this consumer.`
700
1066
  );
701
1067
  }
702
- const consumer = this.getOrCreateConsumer(gid, fromBeginning, options.autoCommit ?? true);
1068
+ const consumer = this.getOrCreateConsumer(
1069
+ gid,
1070
+ fromBeginning,
1071
+ options.autoCommit ?? true
1072
+ );
703
1073
  const schemaMap = this.buildSchemaMap(topics, optionSchemas);
704
1074
  const topicNames = topics.map(
705
1075
  (t) => this.resolveTopicName(t)
@@ -713,7 +1083,12 @@ var KafkaClient = class {
713
1083
  }
714
1084
  }
715
1085
  await consumer.connect();
716
- await subscribeWithRetry(consumer, topicNames, this.logger, options.subscribeRetry);
1086
+ await subscribeWithRetry(
1087
+ consumer,
1088
+ topicNames,
1089
+ this.logger,
1090
+ options.subscribeRetry
1091
+ );
717
1092
  this.logger.log(
718
1093
  `${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${topicNames.join(", ")}`
719
1094
  );