@drarzter/kafka-client 0.5.6 → 0.5.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -118,6 +118,38 @@ function extractEnvelope(payload, headers, topic2, partition, offset) {
118
118
  };
119
119
  }
120
120
 
121
+ // src/client/errors.ts
122
+ var KafkaProcessingError = class extends Error {
123
+ constructor(message, topic2, originalMessage, options) {
124
+ super(message, options);
125
+ this.topic = topic2;
126
+ this.originalMessage = originalMessage;
127
+ this.name = "KafkaProcessingError";
128
+ if (options?.cause) this.cause = options.cause;
129
+ }
130
+ };
131
+ var KafkaValidationError = class extends Error {
132
+ constructor(topic2, originalMessage, options) {
133
+ super(`Schema validation failed for topic "${topic2}"`, options);
134
+ this.topic = topic2;
135
+ this.originalMessage = originalMessage;
136
+ this.name = "KafkaValidationError";
137
+ if (options?.cause) this.cause = options.cause;
138
+ }
139
+ };
140
+ var KafkaRetryExhaustedError = class extends KafkaProcessingError {
141
+ constructor(topic2, originalMessage, attempts, options) {
142
+ super(
143
+ `Message processing failed after ${attempts} attempts on topic "${topic2}"`,
144
+ topic2,
145
+ originalMessage,
146
+ options
147
+ );
148
+ this.attempts = attempts;
149
+ this.name = "KafkaRetryExhaustedError";
150
+ }
151
+ };
152
+
121
153
  // src/client/kafka.client/producer-ops.ts
122
154
  function resolveTopicName(topicOrDescriptor) {
123
155
  if (typeof topicOrDescriptor === "string") return topicOrDescriptor;
@@ -133,17 +165,31 @@ function registerSchema(topicOrDesc, schemaRegistry) {
133
165
  }
134
166
  }
135
167
  async function validateMessage(topicOrDesc, message, deps) {
168
+ const topicName = resolveTopicName(topicOrDesc);
136
169
  if (topicOrDesc?.__schema) {
137
- return await topicOrDesc.__schema.parse(message);
170
+ try {
171
+ return await topicOrDesc.__schema.parse(message);
172
+ } catch (error) {
173
+ throw new KafkaValidationError(topicName, message, {
174
+ cause: error instanceof Error ? error : new Error(String(error))
175
+ });
176
+ }
138
177
  }
139
178
  if (deps.strictSchemasEnabled && typeof topicOrDesc === "string") {
140
179
  const schema = deps.schemaRegistry.get(topicOrDesc);
141
- if (schema) return await schema.parse(message);
180
+ if (schema) {
181
+ try {
182
+ return await schema.parse(message);
183
+ } catch (error) {
184
+ throw new KafkaValidationError(topicName, message, {
185
+ cause: error instanceof Error ? error : new Error(String(error))
186
+ });
187
+ }
188
+ }
142
189
  }
143
190
  return message;
144
191
  }
145
192
  async function buildSendPayload(topicOrDesc, messages, deps) {
146
- registerSchema(topicOrDesc, deps.schemaRegistry);
147
193
  const topic2 = resolveTopicName(topicOrDesc);
148
194
  const builtMessages = await Promise.all(
149
195
  messages.map(async (m) => {
@@ -220,38 +266,6 @@ function buildSchemaMap(topics, schemaRegistry, optionSchemas) {
220
266
  return schemaMap;
221
267
  }
222
268
 
223
- // src/client/errors.ts
224
- var KafkaProcessingError = class extends Error {
225
- constructor(message, topic2, originalMessage, options) {
226
- super(message, options);
227
- this.topic = topic2;
228
- this.originalMessage = originalMessage;
229
- this.name = "KafkaProcessingError";
230
- if (options?.cause) this.cause = options.cause;
231
- }
232
- };
233
- var KafkaValidationError = class extends Error {
234
- constructor(topic2, originalMessage, options) {
235
- super(`Schema validation failed for topic "${topic2}"`, options);
236
- this.topic = topic2;
237
- this.originalMessage = originalMessage;
238
- this.name = "KafkaValidationError";
239
- if (options?.cause) this.cause = options.cause;
240
- }
241
- };
242
- var KafkaRetryExhaustedError = class extends KafkaProcessingError {
243
- constructor(topic2, originalMessage, attempts, options) {
244
- super(
245
- `Message processing failed after ${attempts} attempts on topic "${topic2}"`,
246
- topic2,
247
- originalMessage,
248
- options
249
- );
250
- this.attempts = attempts;
251
- this.name = "KafkaRetryExhaustedError";
252
- }
253
- };
254
-
255
269
  // src/client/consumer/pipeline.ts
256
270
  function toError(error) {
257
271
  return error instanceof Error ? error : new Error(String(error));
@@ -339,7 +353,7 @@ var RETRY_HEADER_AFTER = "x-retry-after";
339
353
  var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
340
354
  var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
341
355
  async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders, deps) {
342
- const retryTopic = `${originalTopic}.retry`;
356
+ const retryTopic = `${originalTopic}.retry.${attempt}`;
343
357
  const {
344
358
  [RETRY_HEADER_ATTEMPT]: _a,
345
359
  [RETRY_HEADER_AFTER]: _b,
@@ -643,7 +657,7 @@ async function waitForPartitionAssignment(consumer, topics, logger, timeoutMs =
643
657
  `Retry consumer did not receive partition assignments for [${topics.join(", ")}] within ${timeoutMs}ms`
644
658
  );
645
659
  }
646
- async function startRetryTopicConsumers(originalTopics, originalGroupId, handleMessage, retry, dlq, interceptors, schemaMap, deps) {
660
+ async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopics, handleMessage, retry, dlq, interceptors, schemaMap, deps, assignmentTimeoutMs) {
647
661
  const {
648
662
  logger,
649
663
  producer,
@@ -653,43 +667,48 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
653
667
  getOrCreateConsumer: getOrCreateConsumer2,
654
668
  runningConsumers
655
669
  } = deps;
656
- const retryTopicNames = originalTopics.map((t) => `${t}.retry`);
657
- const retryGroupId = `${originalGroupId}-retry`;
658
670
  const backoffMs = retry.backoffMs ?? 1e3;
659
671
  const maxBackoffMs = retry.maxBackoffMs ?? 3e4;
660
672
  const pipelineDeps = { logger, producer, instrumentation, onMessageLost };
661
- for (const rt of retryTopicNames) {
662
- await ensureTopic(rt);
673
+ for (const lt of levelTopics) {
674
+ await ensureTopic(lt);
663
675
  }
664
- const consumer = getOrCreateConsumer2(retryGroupId, false, true);
676
+ const consumer = getOrCreateConsumer2(levelGroupId, false, false);
665
677
  await consumer.connect();
666
- await subscribeWithRetry(consumer, retryTopicNames, logger);
678
+ await subscribeWithRetry(consumer, levelTopics, logger);
667
679
  await consumer.run({
668
- eachMessage: async ({ topic: retryTopic, partition, message }) => {
669
- if (!message.value) return;
670
- const raw = message.value.toString();
671
- const parsed = parseJsonMessage(raw, retryTopic, logger);
672
- if (parsed === null) return;
680
+ eachMessage: async ({ topic: levelTopic, partition, message }) => {
681
+ const nextOffset = {
682
+ topic: levelTopic,
683
+ partition,
684
+ offset: (parseInt(message.offset, 10) + 1).toString()
685
+ };
686
+ if (!message.value) {
687
+ await consumer.commitOffsets([nextOffset]);
688
+ return;
689
+ }
673
690
  const headers = decodeHeaders(message.headers);
674
- const originalTopic = headers[RETRY_HEADER_ORIGINAL_TOPIC] ?? retryTopic.replace(/\.retry$/, "");
675
- const currentAttempt = parseInt(
676
- headers[RETRY_HEADER_ATTEMPT] ?? "1",
677
- 10
678
- );
679
- const maxRetries = parseInt(
680
- headers[RETRY_HEADER_MAX_RETRIES] ?? String(retry.maxRetries),
681
- 10
682
- );
683
691
  const retryAfter = parseInt(
684
692
  headers[RETRY_HEADER_AFTER] ?? "0",
685
693
  10
686
694
  );
687
695
  const remaining = retryAfter - Date.now();
688
696
  if (remaining > 0) {
689
- consumer.pause([{ topic: retryTopic, partitions: [partition] }]);
697
+ consumer.pause([{ topic: levelTopic, partitions: [partition] }]);
690
698
  await sleep(remaining);
691
- consumer.resume([{ topic: retryTopic, partitions: [partition] }]);
699
+ consumer.resume([{ topic: levelTopic, partitions: [partition] }]);
692
700
  }
701
+ const raw = message.value.toString();
702
+ const parsed = parseJsonMessage(raw, levelTopic, logger);
703
+ if (parsed === null) {
704
+ await consumer.commitOffsets([nextOffset]);
705
+ return;
706
+ }
707
+ const currentMaxRetries = parseInt(
708
+ headers[RETRY_HEADER_MAX_RETRIES] ?? String(retry.maxRetries),
709
+ 10
710
+ );
711
+ const originalTopic = headers[RETRY_HEADER_ORIGINAL_TOPIC] ?? levelTopic.replace(/\.retry\.\d+$/, "");
693
712
  const validated = await validateWithSchema(
694
713
  parsed,
695
714
  raw,
@@ -699,7 +718,10 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
699
718
  dlq,
700
719
  { ...pipelineDeps, originalHeaders: headers }
701
720
  );
702
- if (validated === null) return;
721
+ if (validated === null) {
722
+ await consumer.commitOffsets([nextOffset]);
723
+ return;
724
+ }
703
725
  const envelope = extractEnvelope(
704
726
  validated,
705
727
  headers,
@@ -719,62 +741,81 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
719
741
  interceptors,
720
742
  instrumentation
721
743
  );
722
- if (error) {
723
- const nextAttempt = currentAttempt + 1;
724
- const exhausted = currentAttempt >= maxRetries;
725
- const reportedError = exhausted && maxRetries > 1 ? new KafkaRetryExhaustedError(
744
+ if (!error) {
745
+ await consumer.commitOffsets([nextOffset]);
746
+ return;
747
+ }
748
+ const exhausted = level >= currentMaxRetries;
749
+ const reportedError = exhausted && currentMaxRetries > 1 ? new KafkaRetryExhaustedError(
750
+ originalTopic,
751
+ [envelope.payload],
752
+ currentMaxRetries,
753
+ { cause: error }
754
+ ) : error;
755
+ await notifyInterceptorsOnError([envelope], interceptors, reportedError);
756
+ logger.error(
757
+ `Retry consumer error for ${originalTopic} (level ${level}/${currentMaxRetries}):`,
758
+ error.stack
759
+ );
760
+ if (!exhausted) {
761
+ const nextLevel = level + 1;
762
+ const cap = Math.min(backoffMs * 2 ** level, maxBackoffMs);
763
+ const delay = Math.floor(Math.random() * cap);
764
+ await sendToRetryTopic(
726
765
  originalTopic,
727
- [envelope.payload],
728
- maxRetries,
729
- { cause: error }
730
- ) : error;
731
- await notifyInterceptorsOnError(
732
- [envelope],
733
- interceptors,
734
- reportedError
766
+ [raw],
767
+ nextLevel,
768
+ currentMaxRetries,
769
+ delay,
770
+ headers,
771
+ pipelineDeps
735
772
  );
736
- logger.error(
737
- `Retry consumer error for ${originalTopic} (attempt ${currentAttempt}/${maxRetries}):`,
738
- error.stack
739
- );
740
- if (!exhausted) {
741
- const cap = Math.min(backoffMs * 2 ** currentAttempt, maxBackoffMs);
742
- const delay = Math.floor(Math.random() * cap);
743
- await sendToRetryTopic(
744
- originalTopic,
745
- [raw],
746
- nextAttempt,
747
- maxRetries,
748
- delay,
749
- headers,
750
- pipelineDeps
751
- );
752
- } else if (dlq) {
753
- await sendToDlq(originalTopic, raw, pipelineDeps, {
754
- error,
755
- // +1 to account for the main consumer's initial attempt before
756
- // routing to the retry topic, making this consistent with the
757
- // in-process retry path where attempt counts all tries.
758
- attempt: currentAttempt + 1,
759
- originalHeaders: headers
760
- });
761
- } else {
762
- await onMessageLost?.({
763
- topic: originalTopic,
764
- error,
765
- attempt: currentAttempt,
766
- headers
767
- });
768
- }
773
+ } else if (dlq) {
774
+ await sendToDlq(originalTopic, raw, pipelineDeps, {
775
+ error,
776
+ // +1 to account for the main consumer's initial attempt before routing.
777
+ attempt: level + 1,
778
+ originalHeaders: headers
779
+ });
780
+ } else {
781
+ await onMessageLost?.({
782
+ topic: originalTopic,
783
+ error,
784
+ attempt: level,
785
+ headers
786
+ });
769
787
  }
788
+ await consumer.commitOffsets([nextOffset]);
770
789
  }
771
790
  });
772
- runningConsumers.set(retryGroupId, "eachMessage");
773
- await waitForPartitionAssignment(consumer, retryTopicNames, logger);
791
+ runningConsumers.set(levelGroupId, "eachMessage");
792
+ await waitForPartitionAssignment(consumer, levelTopics, logger, assignmentTimeoutMs);
774
793
  logger.log(
775
- `Retry topic consumers started for: ${originalTopics.join(", ")} (group: ${retryGroupId})`
794
+ `Retry level ${level}/${retry.maxRetries} consumer started for: ${originalTopics.join(", ")} (group: ${levelGroupId})`
776
795
  );
777
796
  }
797
+ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleMessage, retry, dlq, interceptors, schemaMap, deps, assignmentTimeoutMs) {
798
+ const levelGroupIds = [];
799
+ for (let level = 1; level <= retry.maxRetries; level++) {
800
+ const levelTopics = originalTopics.map((t) => `${t}.retry.${level}`);
801
+ const levelGroupId = `${originalGroupId}-retry.${level}`;
802
+ await startLevelConsumer(
803
+ level,
804
+ levelTopics,
805
+ levelGroupId,
806
+ originalTopics,
807
+ handleMessage,
808
+ retry,
809
+ dlq,
810
+ interceptors,
811
+ schemaMap,
812
+ deps,
813
+ assignmentTimeoutMs
814
+ );
815
+ levelGroupIds.push(levelGroupId);
816
+ }
817
+ return levelGroupIds;
818
+ }
778
819
 
779
820
  // src/client/kafka.client/index.ts
780
821
  var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = import_kafka_javascript.KafkaJS;
@@ -793,6 +834,8 @@ var KafkaClient = class {
793
834
  schemaRegistry = /* @__PURE__ */ new Map();
794
835
  runningConsumers = /* @__PURE__ */ new Map();
795
836
  consumerCreationOptions = /* @__PURE__ */ new Map();
837
+ /** Maps each main consumer groupId to its companion retry level groupIds. */
838
+ companionGroupIds = /* @__PURE__ */ new Map();
796
839
  instrumentation;
797
840
  onMessageLost;
798
841
  onRebalance;
@@ -929,7 +972,7 @@ var KafkaClient = class {
929
972
  });
930
973
  this.runningConsumers.set(gid, "eachMessage");
931
974
  if (options.retryTopics && retry) {
932
- await startRetryTopicConsumers(
975
+ const companions = await startRetryTopicConsumers(
933
976
  topicNames,
934
977
  gid,
935
978
  handleMessage,
@@ -937,8 +980,10 @@ var KafkaClient = class {
937
980
  dlq,
938
981
  interceptors,
939
982
  schemaMap,
940
- this.retryTopicDeps
983
+ this.retryTopicDeps,
984
+ options.retryTopicAssignmentTimeoutMs
941
985
  );
986
+ this.companionGroupIds.set(gid, companions);
942
987
  }
943
988
  return { groupId: gid, stop: () => this.stopConsumer(gid) };
944
989
  }
@@ -980,6 +1025,19 @@ var KafkaClient = class {
980
1025
  this.runningConsumers.delete(groupId);
981
1026
  this.consumerCreationOptions.delete(groupId);
982
1027
  this.logger.log(`Consumer disconnected: group "${groupId}"`);
1028
+ const companions = this.companionGroupIds.get(groupId) ?? [];
1029
+ for (const cGroupId of companions) {
1030
+ const cConsumer = this.consumers.get(cGroupId);
1031
+ if (cConsumer) {
1032
+ await cConsumer.disconnect().catch(() => {
1033
+ });
1034
+ this.consumers.delete(cGroupId);
1035
+ this.runningConsumers.delete(cGroupId);
1036
+ this.consumerCreationOptions.delete(cGroupId);
1037
+ this.logger.log(`Retry consumer disconnected: group "${cGroupId}"`);
1038
+ }
1039
+ }
1040
+ this.companionGroupIds.delete(groupId);
983
1041
  } else {
984
1042
  const tasks = Array.from(this.consumers.values()).map(
985
1043
  (c) => c.disconnect().catch(() => {
@@ -989,6 +1047,7 @@ var KafkaClient = class {
989
1047
  this.consumers.clear();
990
1048
  this.runningConsumers.clear();
991
1049
  this.consumerCreationOptions.clear();
1050
+ this.companionGroupIds.clear();
992
1051
  this.logger.log("All consumers disconnected");
993
1052
  }
994
1053
  }
@@ -1018,14 +1077,22 @@ var KafkaClient = class {
1018
1077
  }
1019
1078
  return result;
1020
1079
  }
1021
- /** Check broker connectivity and return status, clientId, and available topics. */
1080
+ /** Check broker connectivity. Never throws returns a discriminated union. */
1022
1081
  async checkStatus() {
1023
- if (!this.isAdminConnected) {
1024
- await this.admin.connect();
1025
- this.isAdminConnected = true;
1082
+ try {
1083
+ if (!this.isAdminConnected) {
1084
+ await this.admin.connect();
1085
+ this.isAdminConnected = true;
1086
+ }
1087
+ const topics = await this.admin.listTopics();
1088
+ return { status: "up", clientId: this.clientId, topics };
1089
+ } catch (error) {
1090
+ return {
1091
+ status: "down",
1092
+ clientId: this.clientId,
1093
+ error: error instanceof Error ? error.message : String(error)
1094
+ };
1026
1095
  }
1027
- const topics = await this.admin.listTopics();
1028
- return { status: "up", clientId: this.clientId, topics };
1029
1096
  }
1030
1097
  getClientId() {
1031
1098
  return this.clientId;
@@ -1048,10 +1115,12 @@ var KafkaClient = class {
1048
1115
  this.consumers.clear();
1049
1116
  this.runningConsumers.clear();
1050
1117
  this.consumerCreationOptions.clear();
1118
+ this.companionGroupIds.clear();
1051
1119
  this.logger.log("All connections closed");
1052
1120
  }
1053
1121
  // ── Private helpers ──────────────────────────────────────────────
1054
1122
  async preparePayload(topicOrDesc, messages) {
1123
+ registerSchema(topicOrDesc, this.schemaRegistry);
1055
1124
  const payload = await buildSendPayload(
1056
1125
  topicOrDesc,
1057
1126
  messages,
@@ -1185,16 +1254,18 @@ var KafkaClient = class {
1185
1254
 
1186
1255
  // src/client/message/topic.ts
1187
1256
  function topic(name) {
1188
- const fn = () => ({
1189
- __topic: name,
1190
- __type: void 0
1191
- });
1192
- fn.schema = (schema) => ({
1193
- __topic: name,
1194
- __type: void 0,
1195
- __schema: schema
1196
- });
1197
- return fn;
1257
+ return {
1258
+ /** Provide an explicit message type without a runtime schema. */
1259
+ type: () => ({
1260
+ __topic: name,
1261
+ __type: void 0
1262
+ }),
1263
+ schema: (schema) => ({
1264
+ __topic: name,
1265
+ __type: void 0,
1266
+ __schema: schema
1267
+ })
1268
+ };
1198
1269
  }
1199
1270
 
1200
1271
  // src/nest/kafka.module.ts
@@ -1383,15 +1454,7 @@ KafkaModule = __decorateClass([
1383
1454
  var import_common4 = require("@nestjs/common");
1384
1455
  var KafkaHealthIndicator = class {
1385
1456
  async check(client) {
1386
- try {
1387
- return await client.checkStatus();
1388
- } catch (error) {
1389
- return {
1390
- status: "down",
1391
- clientId: client.clientId,
1392
- error: error instanceof Error ? error.message : String(error)
1393
- };
1394
- }
1457
+ return client.checkStatus();
1395
1458
  }
1396
1459
  };
1397
1460
  KafkaHealthIndicator = __decorateClass([