@drarzter/kafka-client 0.5.4 → 0.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/core.d.mts CHANGED
@@ -1,5 +1,5 @@
1
- import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, k as KafkaClientOptions, b as TopicDescriptor, n as SendOptions, B as BatchMessageItem, r as TransactionContext, e as EventEnvelope, a as ConsumerOptions, c as BatchMeta } from './envelope-BR8d1m8c.mjs';
2
- export { d as ConsumerInterceptor, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, f as HEADER_EVENT_ID, g as HEADER_SCHEMA_VERSION, h as HEADER_TIMESTAMP, i as HEADER_TRACEPARENT, j as InferSchema, K as KafkaInstrumentation, l as KafkaLogger, M as MessageHeaders, m as MessageLostContext, R as RetryOptions, S as SchemaLike, o as SubscribeRetryOptions, p as TTopicMessageMap, q as TopicsFrom, s as buildEnvelopeHeaders, t as decodeHeaders, u as extractEnvelope, v as getEnvelopeContext, w as runWithEnvelopeContext, x as topic } from './envelope-BR8d1m8c.mjs';
1
+ import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, l as KafkaClientOptions, b as TopicDescriptor, o as SendOptions, B as BatchMessageItem, s as TransactionContext, f as EventEnvelope, a as ConsumerOptions, d as ConsumerHandle, c as BatchMeta } from './envelope-BpyKN_WL.mjs';
2
+ export { e as ConsumerInterceptor, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, g as HEADER_EVENT_ID, h as HEADER_SCHEMA_VERSION, i as HEADER_TIMESTAMP, j as HEADER_TRACEPARENT, k as InferSchema, K as KafkaInstrumentation, m as KafkaLogger, M as MessageHeaders, n as MessageLostContext, R as RetryOptions, S as SchemaLike, p as SubscribeRetryOptions, q as TTopicMessageMap, r as TopicsFrom, t as buildEnvelopeHeaders, u as decodeHeaders, v as extractEnvelope, w as getEnvelopeContext, x as runWithEnvelopeContext, y as topic } from './envelope-BpyKN_WL.mjs';
3
3
 
4
4
  /**
5
5
  * Type-safe Kafka client.
@@ -24,6 +24,7 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
24
24
  private readonly runningConsumers;
25
25
  private readonly instrumentation;
26
26
  private readonly onMessageLost;
27
+ private readonly onRebalance;
27
28
  private isAdminConnected;
28
29
  readonly clientId: ClientId;
29
30
  constructor(clientId: ClientId, groupId: GroupId, brokers: string[], options?: KafkaClientOptions);
@@ -39,15 +40,25 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
39
40
  connectProducer(): Promise<void>;
40
41
  disconnectProducer(): Promise<void>;
41
42
  /** Subscribe to topics and start consuming messages with the given handler. */
42
- startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (envelope: EventEnvelope<T[K[number]]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
43
- startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (envelope: EventEnvelope<D["__type"]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
43
+ startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (envelope: EventEnvelope<T[K[number]]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
44
+ startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (envelope: EventEnvelope<D["__type"]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
44
45
  /** Subscribe to topics and consume messages in batches. */
45
- startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (envelopes: EventEnvelope<T[K[number]]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
46
- startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (envelopes: EventEnvelope<D["__type"]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
46
+ startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (envelopes: EventEnvelope<T[K[number]]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
47
+ startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (envelopes: EventEnvelope<D["__type"]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
47
48
  stopConsumer(groupId?: string): Promise<void>;
49
+ /**
50
+ * Query consumer group lag per partition.
51
+ * Lag = broker high-watermark − last committed offset.
52
+ * A committed offset of -1 (nothing committed yet) counts as full lag.
53
+ */
54
+ getConsumerLag(groupId?: string): Promise<Array<{
55
+ topic: string;
56
+ partition: number;
57
+ lag: number;
58
+ }>>;
48
59
  /** Check broker connectivity and return status, clientId, and available topics. */
49
60
  checkStatus(): Promise<{
50
- status: 'up';
61
+ status: "up";
51
62
  clientId: string;
52
63
  topics: string[];
53
64
  }>;
@@ -74,6 +85,11 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
74
85
  */
75
86
  private waitForPartitionAssignment;
76
87
  private getOrCreateConsumer;
88
+ /**
89
+ * Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
90
+ * The handler itself is not cancelled — the warning is diagnostic only.
91
+ */
92
+ private wrapWithTimeoutWarning;
77
93
  private resolveTopicName;
78
94
  private ensureTopic;
79
95
  /** Register schema from descriptor into global registry (side-effect). */
@@ -117,4 +133,4 @@ declare class KafkaRetryExhaustedError extends KafkaProcessingError {
117
133
  });
118
134
  }
119
135
 
120
- export { BatchMessageItem, BatchMeta, ClientId, ConsumerOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, KafkaClientOptions, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, SendOptions, TopicDescriptor, TopicMapConstraint, TransactionContext };
136
+ export { BatchMessageItem, BatchMeta, ClientId, ConsumerHandle, ConsumerOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, KafkaClientOptions, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, SendOptions, TopicDescriptor, TopicMapConstraint, TransactionContext };
package/dist/core.d.ts CHANGED
@@ -1,5 +1,5 @@
1
- import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, k as KafkaClientOptions, b as TopicDescriptor, n as SendOptions, B as BatchMessageItem, r as TransactionContext, e as EventEnvelope, a as ConsumerOptions, c as BatchMeta } from './envelope-BR8d1m8c.js';
2
- export { d as ConsumerInterceptor, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, f as HEADER_EVENT_ID, g as HEADER_SCHEMA_VERSION, h as HEADER_TIMESTAMP, i as HEADER_TRACEPARENT, j as InferSchema, K as KafkaInstrumentation, l as KafkaLogger, M as MessageHeaders, m as MessageLostContext, R as RetryOptions, S as SchemaLike, o as SubscribeRetryOptions, p as TTopicMessageMap, q as TopicsFrom, s as buildEnvelopeHeaders, t as decodeHeaders, u as extractEnvelope, v as getEnvelopeContext, w as runWithEnvelopeContext, x as topic } from './envelope-BR8d1m8c.js';
1
+ import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, l as KafkaClientOptions, b as TopicDescriptor, o as SendOptions, B as BatchMessageItem, s as TransactionContext, f as EventEnvelope, a as ConsumerOptions, d as ConsumerHandle, c as BatchMeta } from './envelope-BpyKN_WL.js';
2
+ export { e as ConsumerInterceptor, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, g as HEADER_EVENT_ID, h as HEADER_SCHEMA_VERSION, i as HEADER_TIMESTAMP, j as HEADER_TRACEPARENT, k as InferSchema, K as KafkaInstrumentation, m as KafkaLogger, M as MessageHeaders, n as MessageLostContext, R as RetryOptions, S as SchemaLike, p as SubscribeRetryOptions, q as TTopicMessageMap, r as TopicsFrom, t as buildEnvelopeHeaders, u as decodeHeaders, v as extractEnvelope, w as getEnvelopeContext, x as runWithEnvelopeContext, y as topic } from './envelope-BpyKN_WL.js';
3
3
 
4
4
  /**
5
5
  * Type-safe Kafka client.
@@ -24,6 +24,7 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
24
24
  private readonly runningConsumers;
25
25
  private readonly instrumentation;
26
26
  private readonly onMessageLost;
27
+ private readonly onRebalance;
27
28
  private isAdminConnected;
28
29
  readonly clientId: ClientId;
29
30
  constructor(clientId: ClientId, groupId: GroupId, brokers: string[], options?: KafkaClientOptions);
@@ -39,15 +40,25 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
39
40
  connectProducer(): Promise<void>;
40
41
  disconnectProducer(): Promise<void>;
41
42
  /** Subscribe to topics and start consuming messages with the given handler. */
42
- startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (envelope: EventEnvelope<T[K[number]]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
43
- startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (envelope: EventEnvelope<D["__type"]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
43
+ startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (envelope: EventEnvelope<T[K[number]]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
44
+ startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (envelope: EventEnvelope<D["__type"]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
44
45
  /** Subscribe to topics and consume messages in batches. */
45
- startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (envelopes: EventEnvelope<T[K[number]]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
46
- startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (envelopes: EventEnvelope<D["__type"]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
46
+ startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (envelopes: EventEnvelope<T[K[number]]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
47
+ startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (envelopes: EventEnvelope<D["__type"]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
47
48
  stopConsumer(groupId?: string): Promise<void>;
49
+ /**
50
+ * Query consumer group lag per partition.
51
+ * Lag = broker high-watermark − last committed offset.
52
+ * A committed offset of -1 (nothing committed yet) counts as full lag.
53
+ */
54
+ getConsumerLag(groupId?: string): Promise<Array<{
55
+ topic: string;
56
+ partition: number;
57
+ lag: number;
58
+ }>>;
48
59
  /** Check broker connectivity and return status, clientId, and available topics. */
49
60
  checkStatus(): Promise<{
50
- status: 'up';
61
+ status: "up";
51
62
  clientId: string;
52
63
  topics: string[];
53
64
  }>;
@@ -74,6 +85,11 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
74
85
  */
75
86
  private waitForPartitionAssignment;
76
87
  private getOrCreateConsumer;
88
+ /**
89
+ * Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
90
+ * The handler itself is not cancelled — the warning is diagnostic only.
91
+ */
92
+ private wrapWithTimeoutWarning;
77
93
  private resolveTopicName;
78
94
  private ensureTopic;
79
95
  /** Register schema from descriptor into global registry (side-effect). */
@@ -117,4 +133,4 @@ declare class KafkaRetryExhaustedError extends KafkaProcessingError {
117
133
  });
118
134
  }
119
135
 
120
- export { BatchMessageItem, BatchMeta, ClientId, ConsumerOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, KafkaClientOptions, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, SendOptions, TopicDescriptor, TopicMapConstraint, TransactionContext };
136
+ export { BatchMessageItem, BatchMeta, ClientId, ConsumerHandle, ConsumerOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, KafkaClientOptions, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, SendOptions, TopicDescriptor, TopicMapConstraint, TransactionContext };
package/dist/core.js CHANGED
@@ -172,9 +172,20 @@ async function validateWithSchema(message, raw, topic2, schemaMap, interceptors,
172
172
  originalHeaders: deps.originalHeaders
173
173
  });
174
174
  } else {
175
- await deps.onMessageLost?.({ topic: topic2, error: validationError, attempt: 0, headers: deps.originalHeaders ?? {} });
175
+ await deps.onMessageLost?.({
176
+ topic: topic2,
177
+ error: validationError,
178
+ attempt: 0,
179
+ headers: deps.originalHeaders ?? {}
180
+ });
176
181
  }
177
- const errorEnvelope = extractEnvelope(message, deps.originalHeaders ?? {}, topic2, -1, "");
182
+ const errorEnvelope = extractEnvelope(
183
+ message,
184
+ deps.originalHeaders ?? {},
185
+ topic2,
186
+ -1,
187
+ ""
188
+ );
178
189
  for (const interceptor of interceptors) {
179
190
  await interceptor.onError?.(errorEnvelope, validationError);
180
191
  }
@@ -226,7 +237,10 @@ async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries,
226
237
  };
227
238
  try {
228
239
  for (const raw of rawMessages) {
229
- await deps.producer.send({ topic: retryTopic, messages: [{ value: raw, headers }] });
240
+ await deps.producer.send({
241
+ topic: retryTopic,
242
+ messages: [{ value: raw, headers }]
243
+ });
230
244
  }
231
245
  deps.logger.warn(
232
246
  `Message queued in retry topic ${retryTopic} (attempt ${attempt}/${maxRetries})`
@@ -239,7 +253,15 @@ async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries,
239
253
  }
240
254
  }
241
255
  async function executeWithRetry(fn, ctx, deps) {
242
- const { envelope, rawMessages, interceptors, dlq, retry, isBatch, retryTopics } = ctx;
256
+ const {
257
+ envelope,
258
+ rawMessages,
259
+ interceptors,
260
+ dlq,
261
+ retry,
262
+ isBatch,
263
+ retryTopics
264
+ } = ctx;
243
265
  const maxAttempts = retryTopics ? 1 : retry ? retry.maxRetries + 1 : 1;
244
266
  const backoffMs = retry?.backoffMs ?? 1e3;
245
267
  const maxBackoffMs = retry?.maxBackoffMs ?? 3e4;
@@ -374,6 +396,7 @@ var KafkaClient = class {
374
396
  runningConsumers = /* @__PURE__ */ new Map();
375
397
  instrumentation;
376
398
  onMessageLost;
399
+ onRebalance;
377
400
  isAdminConnected = false;
378
401
  clientId;
379
402
  constructor(clientId, groupId, brokers, options) {
@@ -389,6 +412,7 @@ var KafkaClient = class {
389
412
  this.numPartitions = options?.numPartitions ?? 1;
390
413
  this.instrumentation = options?.instrumentation ?? [];
391
414
  this.onMessageLost = options?.onMessageLost;
415
+ this.onRebalance = options?.onRebalance;
392
416
  this.kafka = new KafkaClass({
393
417
  kafkaJS: {
394
418
  clientId: this.clientId,
@@ -495,7 +519,13 @@ var KafkaClient = class {
495
519
  );
496
520
  }
497
521
  const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
498
- const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
522
+ const deps = {
523
+ logger: this.logger,
524
+ producer: this.producer,
525
+ instrumentation: this.instrumentation,
526
+ onMessageLost: this.onMessageLost
527
+ };
528
+ const timeoutMs = options.handlerTimeoutMs;
499
529
  await consumer.run({
500
530
  eachMessage: async ({ topic: topic2, partition, message }) => {
501
531
  if (!message.value) {
@@ -524,11 +554,24 @@ var KafkaClient = class {
524
554
  message.offset
525
555
  );
526
556
  await executeWithRetry(
527
- () => runWithEnvelopeContext(
528
- { correlationId: envelope.correlationId, traceparent: envelope.traceparent },
529
- () => handleMessage(envelope)
530
- ),
531
- { envelope, rawMessages: [raw], interceptors, dlq, retry, retryTopics: options.retryTopics },
557
+ () => {
558
+ const fn = () => runWithEnvelopeContext(
559
+ {
560
+ correlationId: envelope.correlationId,
561
+ traceparent: envelope.traceparent
562
+ },
563
+ () => handleMessage(envelope)
564
+ );
565
+ return timeoutMs ? this.wrapWithTimeoutWarning(fn, timeoutMs, topic2) : fn();
566
+ },
567
+ {
568
+ envelope,
569
+ rawMessages: [raw],
570
+ interceptors,
571
+ dlq,
572
+ retry,
573
+ retryTopics: options.retryTopics
574
+ },
532
575
  deps
533
576
  );
534
577
  }
@@ -545,10 +588,17 @@ var KafkaClient = class {
545
588
  schemaMap
546
589
  );
547
590
  }
591
+ return { groupId: gid, stop: () => this.stopConsumer(gid) };
548
592
  }
549
593
  async startBatchConsumer(topics, handleBatch, options = {}) {
550
594
  const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
551
- const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
595
+ const deps = {
596
+ logger: this.logger,
597
+ producer: this.producer,
598
+ instrumentation: this.instrumentation,
599
+ onMessageLost: this.onMessageLost
600
+ };
601
+ const timeoutMs = options.handlerTimeoutMs;
552
602
  await consumer.run({
553
603
  eachBatch: async ({
554
604
  batch,
@@ -580,7 +630,13 @@ var KafkaClient = class {
580
630
  );
581
631
  if (validated === null) continue;
582
632
  envelopes.push(
583
- extractEnvelope(validated, headers, batch.topic, batch.partition, message.offset)
633
+ extractEnvelope(
634
+ validated,
635
+ headers,
636
+ batch.topic,
637
+ batch.partition,
638
+ message.offset
639
+ )
584
640
  );
585
641
  rawMessages.push(raw);
586
642
  }
@@ -593,7 +649,10 @@ var KafkaClient = class {
593
649
  commitOffsetsIfNecessary
594
650
  };
595
651
  await executeWithRetry(
596
- () => handleBatch(envelopes, meta),
652
+ () => {
653
+ const fn = () => handleBatch(envelopes, meta);
654
+ return timeoutMs ? this.wrapWithTimeoutWarning(fn, timeoutMs, batch.topic) : fn();
655
+ },
597
656
  {
598
657
  envelope: envelopes,
599
658
  rawMessages: batch.messages.filter((m) => m.value).map((m) => m.value.toString()),
@@ -607,13 +666,16 @@ var KafkaClient = class {
607
666
  }
608
667
  });
609
668
  this.runningConsumers.set(gid, "eachBatch");
669
+ return { groupId: gid, stop: () => this.stopConsumer(gid) };
610
670
  }
611
671
  // ── Consumer lifecycle ───────────────────────────────────────────
612
672
  async stopConsumer(groupId) {
613
673
  if (groupId !== void 0) {
614
674
  const consumer = this.consumers.get(groupId);
615
675
  if (!consumer) {
616
- this.logger.warn(`stopConsumer: no active consumer for group "${groupId}"`);
676
+ this.logger.warn(
677
+ `stopConsumer: no active consumer for group "${groupId}"`
678
+ );
617
679
  return;
618
680
  }
619
681
  await consumer.disconnect().catch(() => {
@@ -632,6 +694,32 @@ var KafkaClient = class {
632
694
  this.logger.log("All consumers disconnected");
633
695
  }
634
696
  }
697
+ /**
698
+ * Query consumer group lag per partition.
699
+ * Lag = broker high-watermark − last committed offset.
700
+ * A committed offset of -1 (nothing committed yet) counts as full lag.
701
+ */
702
+ async getConsumerLag(groupId) {
703
+ const gid = groupId ?? this.defaultGroupId;
704
+ if (!this.isAdminConnected) {
705
+ await this.admin.connect();
706
+ this.isAdminConnected = true;
707
+ }
708
+ const committedByTopic = await this.admin.fetchOffsets({ groupId: gid });
709
+ const result = [];
710
+ for (const { topic: topic2, partitions } of committedByTopic) {
711
+ const brokerOffsets = await this.admin.fetchTopicOffsets(topic2);
712
+ for (const { partition, offset } of partitions) {
713
+ const broker = brokerOffsets.find((o) => o.partition === partition);
714
+ if (!broker) continue;
715
+ const committed = parseInt(offset, 10);
716
+ const high = parseInt(broker.high, 10);
717
+ const lag = committed === -1 ? high : Math.max(0, high - committed);
718
+ result.push({ topic: topic2, partition, lag });
719
+ }
720
+ }
721
+ return result;
722
+ }
635
723
  /** Check broker connectivity and return status, clientId, and available topics. */
636
724
  async checkStatus() {
637
725
  if (!this.isAdminConnected) {
@@ -740,19 +828,30 @@ var KafkaClient = class {
740
828
  const c = inst.beforeConsume?.(envelope);
741
829
  if (typeof c === "function") cleanups.push(c);
742
830
  }
743
- for (const interceptor of interceptors) await interceptor.before?.(envelope);
831
+ for (const interceptor of interceptors)
832
+ await interceptor.before?.(envelope);
744
833
  await runWithEnvelopeContext(
745
- { correlationId: envelope.correlationId, traceparent: envelope.traceparent },
834
+ {
835
+ correlationId: envelope.correlationId,
836
+ traceparent: envelope.traceparent
837
+ },
746
838
  () => handleMessage(envelope)
747
839
  );
748
- for (const interceptor of interceptors) await interceptor.after?.(envelope);
840
+ for (const interceptor of interceptors)
841
+ await interceptor.after?.(envelope);
749
842
  for (const cleanup of cleanups) cleanup();
750
843
  } catch (error) {
751
844
  const err = toError(error);
752
845
  const nextAttempt = currentAttempt + 1;
753
846
  const exhausted = currentAttempt >= maxRetries;
754
- for (const inst of this.instrumentation) inst.onConsumeError?.(envelope, err);
755
- const reportedError = exhausted && maxRetries > 1 ? new KafkaRetryExhaustedError(originalTopic, [envelope.payload], maxRetries, { cause: err }) : err;
847
+ for (const inst of this.instrumentation)
848
+ inst.onConsumeError?.(envelope, err);
849
+ const reportedError = exhausted && maxRetries > 1 ? new KafkaRetryExhaustedError(
850
+ originalTopic,
851
+ [envelope.payload],
852
+ maxRetries,
853
+ { cause: err }
854
+ ) : err;
756
855
  for (const interceptor of interceptors) {
757
856
  await interceptor.onError?.(envelope, reportedError);
758
857
  }
@@ -823,15 +922,48 @@ var KafkaClient = class {
823
922
  }
824
923
  getOrCreateConsumer(groupId, fromBeginning, autoCommit) {
825
924
  if (!this.consumers.has(groupId)) {
826
- this.consumers.set(
827
- groupId,
828
- this.kafka.consumer({
829
- kafkaJS: { groupId, fromBeginning, autoCommit }
830
- })
831
- );
925
+ const config = {
926
+ kafkaJS: { groupId, fromBeginning, autoCommit }
927
+ };
928
+ if (this.onRebalance) {
929
+ const onRebalance = this.onRebalance;
930
+ config["rebalance_cb"] = (err, assignment) => {
931
+ const type = err.code === -175 ? "assign" : "revoke";
932
+ try {
933
+ onRebalance(
934
+ type,
935
+ assignment.map((p) => ({
936
+ topic: p.topic,
937
+ partition: p.partition
938
+ }))
939
+ );
940
+ } catch (e) {
941
+ this.logger.warn(
942
+ `onRebalance callback threw: ${e.message}`
943
+ );
944
+ }
945
+ };
946
+ }
947
+ this.consumers.set(groupId, this.kafka.consumer(config));
832
948
  }
833
949
  return this.consumers.get(groupId);
834
950
  }
951
+ /**
952
+ * Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
953
+ * The handler itself is not cancelled — the warning is diagnostic only.
954
+ */
955
+ wrapWithTimeoutWarning(fn, timeoutMs, topic2) {
956
+ let timer;
957
+ const promise = fn().finally(() => {
958
+ if (timer !== void 0) clearTimeout(timer);
959
+ });
960
+ timer = setTimeout(() => {
961
+ this.logger.warn(
962
+ `Handler for topic "${topic2}" has not resolved after ${timeoutMs}ms \u2014 possible stuck handler`
963
+ );
964
+ }, timeoutMs);
965
+ return promise;
966
+ }
835
967
  resolveTopicName(topicOrDescriptor) {
836
968
  if (typeof topicOrDescriptor === "string") return topicOrDescriptor;
837
969
  if (topicOrDescriptor && typeof topicOrDescriptor === "object" && "__topic" in topicOrDescriptor) {
@@ -888,7 +1020,9 @@ var KafkaClient = class {
888
1020
  inst.beforeSend?.(topic2, envelopeHeaders);
889
1021
  }
890
1022
  return {
891
- value: JSON.stringify(await this.validateMessage(topicOrDesc, m.value)),
1023
+ value: JSON.stringify(
1024
+ await this.validateMessage(topicOrDesc, m.value)
1025
+ ),
892
1026
  key: m.key ?? null,
893
1027
  headers: envelopeHeaders
894
1028
  };
@@ -914,7 +1048,11 @@ var KafkaClient = class {
914
1048
  `Cannot use ${mode} on consumer group "${gid}" \u2014 it is already running with ${oppositeMode}. Use a different groupId for this consumer.`
915
1049
  );
916
1050
  }
917
- const consumer = this.getOrCreateConsumer(gid, fromBeginning, options.autoCommit ?? true);
1051
+ const consumer = this.getOrCreateConsumer(
1052
+ gid,
1053
+ fromBeginning,
1054
+ options.autoCommit ?? true
1055
+ );
918
1056
  const schemaMap = this.buildSchemaMap(topics, optionSchemas);
919
1057
  const topicNames = topics.map(
920
1058
  (t) => this.resolveTopicName(t)
@@ -928,7 +1066,12 @@ var KafkaClient = class {
928
1066
  }
929
1067
  }
930
1068
  await consumer.connect();
931
- await subscribeWithRetry(consumer, topicNames, this.logger, options.subscribeRetry);
1069
+ await subscribeWithRetry(
1070
+ consumer,
1071
+ topicNames,
1072
+ this.logger,
1073
+ options.subscribeRetry
1074
+ );
932
1075
  this.logger.log(
933
1076
  `${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${topicNames.join(", ")}`
934
1077
  );