@drarzter/kafka-client 0.5.6 → 0.5.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -112,7 +112,7 @@ For standalone usage (Express, Fastify, raw Node), no extra dependencies needed
112
112
  ```typescript
113
113
  import { KafkaClient, topic } from '@drarzter/kafka-client/core';
114
114
 
115
- const OrderCreated = topic('order.created')<{ orderId: string; amount: number }>();
115
+ const OrderCreated = topic('order.created').type<{ orderId: string; amount: number }>();
116
116
 
117
117
  const kafka = new KafkaClient('my-app', 'my-group', ['localhost:9092']);
118
118
  await kafka.connectProducer();
@@ -245,13 +245,13 @@ Instead of a centralized topic map, define each topic as a standalone typed obje
245
245
  ```typescript
246
246
  import { topic, TopicsFrom } from '@drarzter/kafka-client';
247
247
 
248
- export const OrderCreated = topic('order.created')<{
248
+ export const OrderCreated = topic('order.created').type<{
249
249
  orderId: string;
250
250
  userId: string;
251
251
  amount: number;
252
252
  }>();
253
253
 
254
- export const OrderCompleted = topic('order.completed')<{
254
+ export const OrderCompleted = topic('order.completed').type<{
255
255
  orderId: string;
256
256
  completedAt: string;
257
257
  }>();
@@ -713,8 +713,9 @@ Options for `sendMessage()` — the third argument:
713
713
  | `retry.backoffMs` | `1000` | Base delay for exponential backoff in ms |
714
714
  | `retry.maxBackoffMs` | `30000` | Maximum delay cap for exponential backoff in ms |
715
715
  | `dlq` | `false` | Send to `{topic}.dlq` after all retries exhausted — message carries `x-dlq-*` metadata headers |
716
- | `retryTopics` | `false` | Route failed messages through `{topic}.retry` instead of sleeping in-process (see [Retry topic chain](#retry-topic-chain)) |
716
+ | `retryTopics` | `false` | Route failed messages through per-level topics (`{topic}.retry.1`, `{topic}.retry.2`, …) instead of sleeping in-process; at-least-once semantics; requires `retry` (see [Retry topic chain](#retry-topic-chain)) |
717
717
  | `interceptors` | `[]` | Array of before/after/onError hooks |
718
+ | `retryTopicAssignmentTimeoutMs` | `10000` | Timeout (ms) to wait for each retry level consumer to receive partition assignments after connecting; increase for slow brokers |
718
719
  | `handlerTimeoutMs` | — | Log a warning if the handler hasn't resolved within this window (ms) — does not cancel the handler |
719
720
  | `batch` | `false` | (decorator only) Use `startBatchConsumer` instead of `startConsumer` |
720
721
  | `subscribeRetry.retries` | `5` | Max attempts for `consumer.subscribe()` when topic doesn't exist yet |
@@ -826,12 +827,13 @@ const interceptor: ConsumerInterceptor<MyTopics> = {
826
827
 
827
828
  ## Retry topic chain
828
829
 
829
- By default, retry is handled in-process: the consumer sleeps between attempts while holding the partition. With `retryTopics: true`, failed messages are routed to a `<topic>.retry` Kafka topic instead. A companion consumer auto-starts on `<topic>.retry` (group `<groupId>-retry`), waits until the scheduled retry time, then calls the same handler.
830
+ By default, retry is handled in-process: the consumer sleeps between attempts while holding the partition. With `retryTopics: true`, failed messages are routed through a chain of Kafka topics instead — one topic per retry level. A companion consumer auto-starts per level, waits for the scheduled delay using partition pause/resume, then calls the same handler.
830
831
 
831
832
  Benefits over in-process retry:
832
833
 
833
- - **Durable** — retry messages survive a consumer restart
834
- - **Non-blocking** — the original consumer is free immediately; the retry consumer pauses only the specific partition being delayed, so other partitions continue processing
834
+ - **Durable** — retry messages survive a consumer restart (at-least-once semantics)
835
+ - **Non-blocking** — the original consumer is free immediately; each level consumer only pauses its specific partition during the delay window, so other partitions continue processing
836
+ - **Isolated** — each retry level has its own consumer group, so a slow level 3 consumer never blocks a level 1 consumer
835
837
 
836
838
  ```typescript
837
839
  await kafka.startConsumer(['orders.created'], handler, {
@@ -841,18 +843,32 @@ await kafka.startConsumer(['orders.created'], handler, {
841
843
  });
842
844
  ```
843
845
 
844
- Message flow with `maxRetries: 2`:
846
+ With `maxRetries: 3`, this creates three dedicated topics and three companion consumers:
845
847
 
846
848
  ```text
847
- orders.created → handler fails → orders.created.retry (attempt 1, delay ~1 s)
848
- → handler fails → orders.created.retry (attempt 2, delay ~2 s)
849
- handler fails → orders.created.dlq
849
+ orders.created.retry.1consumer group: my-group-retry.1 (delay ~1 s)
850
+ orders.created.retry.2 → consumer group: my-group-retry.2 (delay ~2 s)
851
+ orders.created.retry.3 consumer group: my-group-retry.3 (delay ~4 s)
850
852
  ```
851
853
 
852
- The retry topic messages carry scheduling headers (`x-retry-attempt`, `x-retry-after`, `x-retry-original-topic`, `x-retry-max-retries`) that the companion consumer reads automatically — no manual configuration needed.
854
+ Message flow with `maxRetries: 2` and `dlq: true`:
853
855
 
856
+ ```text
857
+ orders.created → handler fails → orders.created.retry.1 (attempt 1, delay ~1 s)
858
+ orders.created.retry.1 → handler fails → orders.created.retry.2 (attempt 2, delay ~2 s)
859
+ orders.created.retry.2 → handler fails → orders.created.dlq
860
+ ```
861
+
862
+ Each level consumer uses `consumer.pause → sleep(remaining) → consumer.resume` so the partition offset is never committed before the message is processed. On a process crash during sleep or handler execution, the message is redelivered on restart.
863
+
864
+ The retry topic messages carry scheduling headers (`x-retry-attempt`, `x-retry-after`, `x-retry-original-topic`, `x-retry-max-retries`) that each level consumer reads automatically — no manual configuration needed.
865
+
866
+ > **Delivery guarantee:** retry messages are at-least-once. A duplicate can occur in the rare case where a process crashes after routing to the next level but before committing the offset — the message appears twice in the next level topic. Design handlers to be idempotent if duplicates are unacceptable.
867
+ >
854
868
  > **Note:** `retryTopics` requires `retry` to be set — an error is thrown at startup if `retry` is missing. Currently only applies to `startConsumer`; batch consumers (`startBatchConsumer`) use in-process retry regardless.
855
869
 
870
+ `stopConsumer(groupId)` automatically stops all companion retry level consumers started for that group.
871
+
856
872
  ## stopConsumer
857
873
 
858
874
  Stop all consumers or a specific group:
@@ -977,18 +993,18 @@ export const OrderCreated = topic('order.created').schema(z.object({
977
993
  amount: z.number().positive(),
978
994
  }));
979
995
 
980
- // Without schema — explicit generic (still works)
981
- export const OrderAudit = topic('order.audit')<{ orderId: string; action: string }>();
996
+ // Without schema — explicit type via .type<T>()
997
+ export const OrderAudit = topic('order.audit').type<{ orderId: string; action: string }>();
982
998
 
983
999
  export type MyTopics = TopicsFrom<typeof OrderCreated | typeof OrderAudit>;
984
1000
  ```
985
1001
 
986
1002
  ### How it works
987
1003
 
988
- **On send** — `sendMessage`, `sendBatch`, and `transaction` call `schema.parse(message)` before serializing. Invalid messages throw immediately (the schema library's error, e.g. `ZodError`):
1004
+ **On send** — `sendMessage`, `sendBatch`, and `transaction` call `schema.parse(message)` before serializing. Invalid messages throw immediately as `KafkaValidationError` (the original schema error is available as `cause`):
989
1005
 
990
1006
  ```typescript
991
- // This throws ZodError — amount must be positive
1007
+ // This throws KafkaValidationError — amount must be positive
992
1008
  await kafka.sendMessage(OrderCreated, { orderId: '1', userId: '2', amount: -5 });
993
1009
  ```
994
1010
 
@@ -1100,7 +1116,7 @@ expect(kafka.sendMessage).toHaveBeenCalledWith(
1100
1116
  );
1101
1117
 
1102
1118
  // Override return values
1103
- kafka.checkStatus.mockResolvedValueOnce({ topics: ['order.created'] });
1119
+ kafka.checkStatus.mockResolvedValueOnce({ status: 'up', clientId: 'mock-client', topics: ['order.created'] });
1104
1120
 
1105
1121
  // Mock rejections
1106
1122
  kafka.sendMessage.mockRejectedValueOnce(new Error('broker down'));
@@ -61,6 +61,38 @@ function extractEnvelope(payload, headers, topic2, partition, offset) {
61
61
  };
62
62
  }
63
63
 
64
+ // src/client/errors.ts
65
+ var KafkaProcessingError = class extends Error {
66
+ constructor(message, topic2, originalMessage, options) {
67
+ super(message, options);
68
+ this.topic = topic2;
69
+ this.originalMessage = originalMessage;
70
+ this.name = "KafkaProcessingError";
71
+ if (options?.cause) this.cause = options.cause;
72
+ }
73
+ };
74
+ var KafkaValidationError = class extends Error {
75
+ constructor(topic2, originalMessage, options) {
76
+ super(`Schema validation failed for topic "${topic2}"`, options);
77
+ this.topic = topic2;
78
+ this.originalMessage = originalMessage;
79
+ this.name = "KafkaValidationError";
80
+ if (options?.cause) this.cause = options.cause;
81
+ }
82
+ };
83
+ var KafkaRetryExhaustedError = class extends KafkaProcessingError {
84
+ constructor(topic2, originalMessage, attempts, options) {
85
+ super(
86
+ `Message processing failed after ${attempts} attempts on topic "${topic2}"`,
87
+ topic2,
88
+ originalMessage,
89
+ options
90
+ );
91
+ this.attempts = attempts;
92
+ this.name = "KafkaRetryExhaustedError";
93
+ }
94
+ };
95
+
64
96
  // src/client/kafka.client/producer-ops.ts
65
97
  function resolveTopicName(topicOrDescriptor) {
66
98
  if (typeof topicOrDescriptor === "string") return topicOrDescriptor;
@@ -76,17 +108,31 @@ function registerSchema(topicOrDesc, schemaRegistry) {
76
108
  }
77
109
  }
78
110
  async function validateMessage(topicOrDesc, message, deps) {
111
+ const topicName = resolveTopicName(topicOrDesc);
79
112
  if (topicOrDesc?.__schema) {
80
- return await topicOrDesc.__schema.parse(message);
113
+ try {
114
+ return await topicOrDesc.__schema.parse(message);
115
+ } catch (error) {
116
+ throw new KafkaValidationError(topicName, message, {
117
+ cause: error instanceof Error ? error : new Error(String(error))
118
+ });
119
+ }
81
120
  }
82
121
  if (deps.strictSchemasEnabled && typeof topicOrDesc === "string") {
83
122
  const schema = deps.schemaRegistry.get(topicOrDesc);
84
- if (schema) return await schema.parse(message);
123
+ if (schema) {
124
+ try {
125
+ return await schema.parse(message);
126
+ } catch (error) {
127
+ throw new KafkaValidationError(topicName, message, {
128
+ cause: error instanceof Error ? error : new Error(String(error))
129
+ });
130
+ }
131
+ }
85
132
  }
86
133
  return message;
87
134
  }
88
135
  async function buildSendPayload(topicOrDesc, messages, deps) {
89
- registerSchema(topicOrDesc, deps.schemaRegistry);
90
136
  const topic2 = resolveTopicName(topicOrDesc);
91
137
  const builtMessages = await Promise.all(
92
138
  messages.map(async (m) => {
@@ -163,38 +209,6 @@ function buildSchemaMap(topics, schemaRegistry, optionSchemas) {
163
209
  return schemaMap;
164
210
  }
165
211
 
166
- // src/client/errors.ts
167
- var KafkaProcessingError = class extends Error {
168
- constructor(message, topic2, originalMessage, options) {
169
- super(message, options);
170
- this.topic = topic2;
171
- this.originalMessage = originalMessage;
172
- this.name = "KafkaProcessingError";
173
- if (options?.cause) this.cause = options.cause;
174
- }
175
- };
176
- var KafkaValidationError = class extends Error {
177
- constructor(topic2, originalMessage, options) {
178
- super(`Schema validation failed for topic "${topic2}"`, options);
179
- this.topic = topic2;
180
- this.originalMessage = originalMessage;
181
- this.name = "KafkaValidationError";
182
- if (options?.cause) this.cause = options.cause;
183
- }
184
- };
185
- var KafkaRetryExhaustedError = class extends KafkaProcessingError {
186
- constructor(topic2, originalMessage, attempts, options) {
187
- super(
188
- `Message processing failed after ${attempts} attempts on topic "${topic2}"`,
189
- topic2,
190
- originalMessage,
191
- options
192
- );
193
- this.attempts = attempts;
194
- this.name = "KafkaRetryExhaustedError";
195
- }
196
- };
197
-
198
212
  // src/client/consumer/pipeline.ts
199
213
  function toError(error) {
200
214
  return error instanceof Error ? error : new Error(String(error));
@@ -282,7 +296,7 @@ var RETRY_HEADER_AFTER = "x-retry-after";
282
296
  var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
283
297
  var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
284
298
  async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders, deps) {
285
- const retryTopic = `${originalTopic}.retry`;
299
+ const retryTopic = `${originalTopic}.retry.${attempt}`;
286
300
  const {
287
301
  [RETRY_HEADER_ATTEMPT]: _a,
288
302
  [RETRY_HEADER_AFTER]: _b,
@@ -586,7 +600,7 @@ async function waitForPartitionAssignment(consumer, topics, logger, timeoutMs =
586
600
  `Retry consumer did not receive partition assignments for [${topics.join(", ")}] within ${timeoutMs}ms`
587
601
  );
588
602
  }
589
- async function startRetryTopicConsumers(originalTopics, originalGroupId, handleMessage, retry, dlq, interceptors, schemaMap, deps) {
603
+ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopics, handleMessage, retry, dlq, interceptors, schemaMap, deps, assignmentTimeoutMs) {
590
604
  const {
591
605
  logger,
592
606
  producer,
@@ -596,43 +610,48 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
596
610
  getOrCreateConsumer: getOrCreateConsumer2,
597
611
  runningConsumers
598
612
  } = deps;
599
- const retryTopicNames = originalTopics.map((t) => `${t}.retry`);
600
- const retryGroupId = `${originalGroupId}-retry`;
601
613
  const backoffMs = retry.backoffMs ?? 1e3;
602
614
  const maxBackoffMs = retry.maxBackoffMs ?? 3e4;
603
615
  const pipelineDeps = { logger, producer, instrumentation, onMessageLost };
604
- for (const rt of retryTopicNames) {
605
- await ensureTopic(rt);
616
+ for (const lt of levelTopics) {
617
+ await ensureTopic(lt);
606
618
  }
607
- const consumer = getOrCreateConsumer2(retryGroupId, false, true);
619
+ const consumer = getOrCreateConsumer2(levelGroupId, false, false);
608
620
  await consumer.connect();
609
- await subscribeWithRetry(consumer, retryTopicNames, logger);
621
+ await subscribeWithRetry(consumer, levelTopics, logger);
610
622
  await consumer.run({
611
- eachMessage: async ({ topic: retryTopic, partition, message }) => {
612
- if (!message.value) return;
613
- const raw = message.value.toString();
614
- const parsed = parseJsonMessage(raw, retryTopic, logger);
615
- if (parsed === null) return;
623
+ eachMessage: async ({ topic: levelTopic, partition, message }) => {
624
+ const nextOffset = {
625
+ topic: levelTopic,
626
+ partition,
627
+ offset: (parseInt(message.offset, 10) + 1).toString()
628
+ };
629
+ if (!message.value) {
630
+ await consumer.commitOffsets([nextOffset]);
631
+ return;
632
+ }
616
633
  const headers = decodeHeaders(message.headers);
617
- const originalTopic = headers[RETRY_HEADER_ORIGINAL_TOPIC] ?? retryTopic.replace(/\.retry$/, "");
618
- const currentAttempt = parseInt(
619
- headers[RETRY_HEADER_ATTEMPT] ?? "1",
620
- 10
621
- );
622
- const maxRetries = parseInt(
623
- headers[RETRY_HEADER_MAX_RETRIES] ?? String(retry.maxRetries),
624
- 10
625
- );
626
634
  const retryAfter = parseInt(
627
635
  headers[RETRY_HEADER_AFTER] ?? "0",
628
636
  10
629
637
  );
630
638
  const remaining = retryAfter - Date.now();
631
639
  if (remaining > 0) {
632
- consumer.pause([{ topic: retryTopic, partitions: [partition] }]);
640
+ consumer.pause([{ topic: levelTopic, partitions: [partition] }]);
633
641
  await sleep(remaining);
634
- consumer.resume([{ topic: retryTopic, partitions: [partition] }]);
642
+ consumer.resume([{ topic: levelTopic, partitions: [partition] }]);
643
+ }
644
+ const raw = message.value.toString();
645
+ const parsed = parseJsonMessage(raw, levelTopic, logger);
646
+ if (parsed === null) {
647
+ await consumer.commitOffsets([nextOffset]);
648
+ return;
635
649
  }
650
+ const currentMaxRetries = parseInt(
651
+ headers[RETRY_HEADER_MAX_RETRIES] ?? String(retry.maxRetries),
652
+ 10
653
+ );
654
+ const originalTopic = headers[RETRY_HEADER_ORIGINAL_TOPIC] ?? levelTopic.replace(/\.retry\.\d+$/, "");
636
655
  const validated = await validateWithSchema(
637
656
  parsed,
638
657
  raw,
@@ -642,7 +661,10 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
642
661
  dlq,
643
662
  { ...pipelineDeps, originalHeaders: headers }
644
663
  );
645
- if (validated === null) return;
664
+ if (validated === null) {
665
+ await consumer.commitOffsets([nextOffset]);
666
+ return;
667
+ }
646
668
  const envelope = extractEnvelope(
647
669
  validated,
648
670
  headers,
@@ -662,62 +684,81 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
662
684
  interceptors,
663
685
  instrumentation
664
686
  );
665
- if (error) {
666
- const nextAttempt = currentAttempt + 1;
667
- const exhausted = currentAttempt >= maxRetries;
668
- const reportedError = exhausted && maxRetries > 1 ? new KafkaRetryExhaustedError(
687
+ if (!error) {
688
+ await consumer.commitOffsets([nextOffset]);
689
+ return;
690
+ }
691
+ const exhausted = level >= currentMaxRetries;
692
+ const reportedError = exhausted && currentMaxRetries > 1 ? new KafkaRetryExhaustedError(
693
+ originalTopic,
694
+ [envelope.payload],
695
+ currentMaxRetries,
696
+ { cause: error }
697
+ ) : error;
698
+ await notifyInterceptorsOnError([envelope], interceptors, reportedError);
699
+ logger.error(
700
+ `Retry consumer error for ${originalTopic} (level ${level}/${currentMaxRetries}):`,
701
+ error.stack
702
+ );
703
+ if (!exhausted) {
704
+ const nextLevel = level + 1;
705
+ const cap = Math.min(backoffMs * 2 ** level, maxBackoffMs);
706
+ const delay = Math.floor(Math.random() * cap);
707
+ await sendToRetryTopic(
669
708
  originalTopic,
670
- [envelope.payload],
671
- maxRetries,
672
- { cause: error }
673
- ) : error;
674
- await notifyInterceptorsOnError(
675
- [envelope],
676
- interceptors,
677
- reportedError
678
- );
679
- logger.error(
680
- `Retry consumer error for ${originalTopic} (attempt ${currentAttempt}/${maxRetries}):`,
681
- error.stack
709
+ [raw],
710
+ nextLevel,
711
+ currentMaxRetries,
712
+ delay,
713
+ headers,
714
+ pipelineDeps
682
715
  );
683
- if (!exhausted) {
684
- const cap = Math.min(backoffMs * 2 ** currentAttempt, maxBackoffMs);
685
- const delay = Math.floor(Math.random() * cap);
686
- await sendToRetryTopic(
687
- originalTopic,
688
- [raw],
689
- nextAttempt,
690
- maxRetries,
691
- delay,
692
- headers,
693
- pipelineDeps
694
- );
695
- } else if (dlq) {
696
- await sendToDlq(originalTopic, raw, pipelineDeps, {
697
- error,
698
- // +1 to account for the main consumer's initial attempt before
699
- // routing to the retry topic, making this consistent with the
700
- // in-process retry path where attempt counts all tries.
701
- attempt: currentAttempt + 1,
702
- originalHeaders: headers
703
- });
704
- } else {
705
- await onMessageLost?.({
706
- topic: originalTopic,
707
- error,
708
- attempt: currentAttempt,
709
- headers
710
- });
711
- }
716
+ } else if (dlq) {
717
+ await sendToDlq(originalTopic, raw, pipelineDeps, {
718
+ error,
719
+ // +1 to account for the main consumer's initial attempt before routing.
720
+ attempt: level + 1,
721
+ originalHeaders: headers
722
+ });
723
+ } else {
724
+ await onMessageLost?.({
725
+ topic: originalTopic,
726
+ error,
727
+ attempt: level,
728
+ headers
729
+ });
712
730
  }
731
+ await consumer.commitOffsets([nextOffset]);
713
732
  }
714
733
  });
715
- runningConsumers.set(retryGroupId, "eachMessage");
716
- await waitForPartitionAssignment(consumer, retryTopicNames, logger);
734
+ runningConsumers.set(levelGroupId, "eachMessage");
735
+ await waitForPartitionAssignment(consumer, levelTopics, logger, assignmentTimeoutMs);
717
736
  logger.log(
718
- `Retry topic consumers started for: ${originalTopics.join(", ")} (group: ${retryGroupId})`
737
+ `Retry level ${level}/${retry.maxRetries} consumer started for: ${originalTopics.join(", ")} (group: ${levelGroupId})`
719
738
  );
720
739
  }
740
+ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleMessage, retry, dlq, interceptors, schemaMap, deps, assignmentTimeoutMs) {
741
+ const levelGroupIds = [];
742
+ for (let level = 1; level <= retry.maxRetries; level++) {
743
+ const levelTopics = originalTopics.map((t) => `${t}.retry.${level}`);
744
+ const levelGroupId = `${originalGroupId}-retry.${level}`;
745
+ await startLevelConsumer(
746
+ level,
747
+ levelTopics,
748
+ levelGroupId,
749
+ originalTopics,
750
+ handleMessage,
751
+ retry,
752
+ dlq,
753
+ interceptors,
754
+ schemaMap,
755
+ deps,
756
+ assignmentTimeoutMs
757
+ );
758
+ levelGroupIds.push(levelGroupId);
759
+ }
760
+ return levelGroupIds;
761
+ }
721
762
 
722
763
  // src/client/kafka.client/index.ts
723
764
  var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = KafkaJS;
@@ -736,6 +777,8 @@ var KafkaClient = class {
736
777
  schemaRegistry = /* @__PURE__ */ new Map();
737
778
  runningConsumers = /* @__PURE__ */ new Map();
738
779
  consumerCreationOptions = /* @__PURE__ */ new Map();
780
+ /** Maps each main consumer groupId to its companion retry level groupIds. */
781
+ companionGroupIds = /* @__PURE__ */ new Map();
739
782
  instrumentation;
740
783
  onMessageLost;
741
784
  onRebalance;
@@ -872,7 +915,7 @@ var KafkaClient = class {
872
915
  });
873
916
  this.runningConsumers.set(gid, "eachMessage");
874
917
  if (options.retryTopics && retry) {
875
- await startRetryTopicConsumers(
918
+ const companions = await startRetryTopicConsumers(
876
919
  topicNames,
877
920
  gid,
878
921
  handleMessage,
@@ -880,8 +923,10 @@ var KafkaClient = class {
880
923
  dlq,
881
924
  interceptors,
882
925
  schemaMap,
883
- this.retryTopicDeps
926
+ this.retryTopicDeps,
927
+ options.retryTopicAssignmentTimeoutMs
884
928
  );
929
+ this.companionGroupIds.set(gid, companions);
885
930
  }
886
931
  return { groupId: gid, stop: () => this.stopConsumer(gid) };
887
932
  }
@@ -923,6 +968,19 @@ var KafkaClient = class {
923
968
  this.runningConsumers.delete(groupId);
924
969
  this.consumerCreationOptions.delete(groupId);
925
970
  this.logger.log(`Consumer disconnected: group "${groupId}"`);
971
+ const companions = this.companionGroupIds.get(groupId) ?? [];
972
+ for (const cGroupId of companions) {
973
+ const cConsumer = this.consumers.get(cGroupId);
974
+ if (cConsumer) {
975
+ await cConsumer.disconnect().catch(() => {
976
+ });
977
+ this.consumers.delete(cGroupId);
978
+ this.runningConsumers.delete(cGroupId);
979
+ this.consumerCreationOptions.delete(cGroupId);
980
+ this.logger.log(`Retry consumer disconnected: group "${cGroupId}"`);
981
+ }
982
+ }
983
+ this.companionGroupIds.delete(groupId);
926
984
  } else {
927
985
  const tasks = Array.from(this.consumers.values()).map(
928
986
  (c) => c.disconnect().catch(() => {
@@ -932,6 +990,7 @@ var KafkaClient = class {
932
990
  this.consumers.clear();
933
991
  this.runningConsumers.clear();
934
992
  this.consumerCreationOptions.clear();
993
+ this.companionGroupIds.clear();
935
994
  this.logger.log("All consumers disconnected");
936
995
  }
937
996
  }
@@ -961,14 +1020,22 @@ var KafkaClient = class {
961
1020
  }
962
1021
  return result;
963
1022
  }
964
- /** Check broker connectivity and return status, clientId, and available topics. */
1023
+ /** Check broker connectivity. Never throws returns a discriminated union. */
965
1024
  async checkStatus() {
966
- if (!this.isAdminConnected) {
967
- await this.admin.connect();
968
- this.isAdminConnected = true;
1025
+ try {
1026
+ if (!this.isAdminConnected) {
1027
+ await this.admin.connect();
1028
+ this.isAdminConnected = true;
1029
+ }
1030
+ const topics = await this.admin.listTopics();
1031
+ return { status: "up", clientId: this.clientId, topics };
1032
+ } catch (error) {
1033
+ return {
1034
+ status: "down",
1035
+ clientId: this.clientId,
1036
+ error: error instanceof Error ? error.message : String(error)
1037
+ };
969
1038
  }
970
- const topics = await this.admin.listTopics();
971
- return { status: "up", clientId: this.clientId, topics };
972
1039
  }
973
1040
  getClientId() {
974
1041
  return this.clientId;
@@ -991,10 +1058,12 @@ var KafkaClient = class {
991
1058
  this.consumers.clear();
992
1059
  this.runningConsumers.clear();
993
1060
  this.consumerCreationOptions.clear();
1061
+ this.companionGroupIds.clear();
994
1062
  this.logger.log("All connections closed");
995
1063
  }
996
1064
  // ── Private helpers ──────────────────────────────────────────────
997
1065
  async preparePayload(topicOrDesc, messages) {
1066
+ registerSchema(topicOrDesc, this.schemaRegistry);
998
1067
  const payload = await buildSendPayload(
999
1068
  topicOrDesc,
1000
1069
  messages,
@@ -1128,16 +1197,18 @@ var KafkaClient = class {
1128
1197
 
1129
1198
  // src/client/message/topic.ts
1130
1199
  function topic(name) {
1131
- const fn = () => ({
1132
- __topic: name,
1133
- __type: void 0
1134
- });
1135
- fn.schema = (schema) => ({
1136
- __topic: name,
1137
- __type: void 0,
1138
- __schema: schema
1139
- });
1140
- return fn;
1200
+ return {
1201
+ /** Provide an explicit message type without a runtime schema. */
1202
+ type: () => ({
1203
+ __topic: name,
1204
+ __type: void 0
1205
+ }),
1206
+ schema: (schema) => ({
1207
+ __topic: name,
1208
+ __type: void 0,
1209
+ __schema: schema
1210
+ })
1211
+ };
1141
1212
  }
1142
1213
 
1143
1214
  export {
@@ -1157,4 +1228,4 @@ export {
1157
1228
  KafkaClient,
1158
1229
  topic
1159
1230
  };
1160
- //# sourceMappingURL=chunk-6B72MJPU.mjs.map
1231
+ //# sourceMappingURL=chunk-TD2AE774.mjs.map