@drarzter/kafka-client 0.5.1 → 0.5.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +127 -4
- package/dist/{chunk-P7GY4BLV.mjs → chunk-3QXTW66R.mjs} +304 -40
- package/dist/chunk-3QXTW66R.mjs.map +1 -0
- package/dist/core.d.mts +23 -3
- package/dist/core.d.ts +23 -3
- package/dist/core.js +303 -39
- package/dist/core.js.map +1 -1
- package/dist/core.mjs +1 -1
- package/dist/{envelope-CPX1qudy.d.mts → envelope-BR8d1m8c.d.mts} +43 -4
- package/dist/{envelope-CPX1qudy.d.ts → envelope-BR8d1m8c.d.ts} +43 -4
- package/dist/index.d.mts +10 -7
- package/dist/index.d.ts +10 -7
- package/dist/index.js +303 -39
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1 -1
- package/dist/index.mjs.map +1 -1
- package/dist/otel.d.mts +1 -1
- package/dist/otel.d.ts +1 -1
- package/dist/otel.js +1 -1
- package/dist/otel.js.map +1 -1
- package/dist/otel.mjs +1 -1
- package/dist/otel.mjs.map +1 -1
- package/dist/testing.d.mts +1 -1
- package/dist/testing.d.ts +1 -1
- package/package.json +1 -1
- package/dist/chunk-P7GY4BLV.mjs.map +0 -1
package/dist/index.js
CHANGED
|
@@ -172,7 +172,7 @@ async function validateWithSchema(message, raw, topic2, schemaMap, interceptors,
|
|
|
172
172
|
const schema = schemaMap.get(topic2);
|
|
173
173
|
if (!schema) return message;
|
|
174
174
|
try {
|
|
175
|
-
return schema.parse(message);
|
|
175
|
+
return await schema.parse(message);
|
|
176
176
|
} catch (error) {
|
|
177
177
|
const err = toError(error);
|
|
178
178
|
const validationError = new KafkaValidationError(topic2, message, {
|
|
@@ -182,20 +182,36 @@ async function validateWithSchema(message, raw, topic2, schemaMap, interceptors,
|
|
|
182
182
|
`Schema validation failed for topic ${topic2}:`,
|
|
183
183
|
err.message
|
|
184
184
|
);
|
|
185
|
-
if (dlq)
|
|
186
|
-
|
|
185
|
+
if (dlq) {
|
|
186
|
+
await sendToDlq(topic2, raw, deps, {
|
|
187
|
+
error: validationError,
|
|
188
|
+
attempt: 0,
|
|
189
|
+
originalHeaders: deps.originalHeaders
|
|
190
|
+
});
|
|
191
|
+
} else {
|
|
192
|
+
await deps.onMessageLost?.({ topic: topic2, error: validationError, attempt: 0, headers: deps.originalHeaders ?? {} });
|
|
193
|
+
}
|
|
194
|
+
const errorEnvelope = extractEnvelope(message, deps.originalHeaders ?? {}, topic2, -1, "");
|
|
187
195
|
for (const interceptor of interceptors) {
|
|
188
196
|
await interceptor.onError?.(errorEnvelope, validationError);
|
|
189
197
|
}
|
|
190
198
|
return null;
|
|
191
199
|
}
|
|
192
200
|
}
|
|
193
|
-
async function sendToDlq(topic2, rawMessage, deps) {
|
|
201
|
+
async function sendToDlq(topic2, rawMessage, deps, meta) {
|
|
194
202
|
const dlqTopic = `${topic2}.dlq`;
|
|
203
|
+
const headers = {
|
|
204
|
+
...meta?.originalHeaders ?? {},
|
|
205
|
+
"x-dlq-original-topic": topic2,
|
|
206
|
+
"x-dlq-failed-at": (/* @__PURE__ */ new Date()).toISOString(),
|
|
207
|
+
"x-dlq-error-message": meta?.error.message ?? "unknown",
|
|
208
|
+
"x-dlq-error-stack": meta?.error.stack?.slice(0, 2e3) ?? "",
|
|
209
|
+
"x-dlq-attempt-count": String(meta?.attempt ?? 0)
|
|
210
|
+
};
|
|
195
211
|
try {
|
|
196
212
|
await deps.producer.send({
|
|
197
213
|
topic: dlqTopic,
|
|
198
|
-
messages: [{ value: rawMessage }]
|
|
214
|
+
messages: [{ value: rawMessage, headers }]
|
|
199
215
|
});
|
|
200
216
|
deps.logger.warn(`Message sent to DLQ: ${dlqTopic}`);
|
|
201
217
|
} catch (error) {
|
|
@@ -205,10 +221,45 @@ async function sendToDlq(topic2, rawMessage, deps) {
|
|
|
205
221
|
);
|
|
206
222
|
}
|
|
207
223
|
}
|
|
224
|
+
var RETRY_HEADER_ATTEMPT = "x-retry-attempt";
|
|
225
|
+
var RETRY_HEADER_AFTER = "x-retry-after";
|
|
226
|
+
var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
|
|
227
|
+
var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
|
|
228
|
+
async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders, deps) {
|
|
229
|
+
const retryTopic = `${originalTopic}.retry`;
|
|
230
|
+
const {
|
|
231
|
+
[RETRY_HEADER_ATTEMPT]: _a,
|
|
232
|
+
[RETRY_HEADER_AFTER]: _b,
|
|
233
|
+
[RETRY_HEADER_MAX_RETRIES]: _c,
|
|
234
|
+
[RETRY_HEADER_ORIGINAL_TOPIC]: _d,
|
|
235
|
+
...userHeaders
|
|
236
|
+
} = originalHeaders;
|
|
237
|
+
const headers = {
|
|
238
|
+
...userHeaders,
|
|
239
|
+
[RETRY_HEADER_ATTEMPT]: String(attempt),
|
|
240
|
+
[RETRY_HEADER_AFTER]: String(Date.now() + delayMs),
|
|
241
|
+
[RETRY_HEADER_MAX_RETRIES]: String(maxRetries),
|
|
242
|
+
[RETRY_HEADER_ORIGINAL_TOPIC]: originalTopic
|
|
243
|
+
};
|
|
244
|
+
try {
|
|
245
|
+
for (const raw of rawMessages) {
|
|
246
|
+
await deps.producer.send({ topic: retryTopic, messages: [{ value: raw, headers }] });
|
|
247
|
+
}
|
|
248
|
+
deps.logger.warn(
|
|
249
|
+
`Message queued in retry topic ${retryTopic} (attempt ${attempt}/${maxRetries})`
|
|
250
|
+
);
|
|
251
|
+
} catch (error) {
|
|
252
|
+
deps.logger.error(
|
|
253
|
+
`Failed to send message to retry topic ${retryTopic}:`,
|
|
254
|
+
toError(error).stack
|
|
255
|
+
);
|
|
256
|
+
}
|
|
257
|
+
}
|
|
208
258
|
async function executeWithRetry(fn, ctx, deps) {
|
|
209
|
-
const { envelope, rawMessages, interceptors, dlq, retry, isBatch } = ctx;
|
|
210
|
-
const maxAttempts = retry ? retry.maxRetries + 1 : 1;
|
|
259
|
+
const { envelope, rawMessages, interceptors, dlq, retry, isBatch, retryTopics } = ctx;
|
|
260
|
+
const maxAttempts = retryTopics ? 1 : retry ? retry.maxRetries + 1 : 1;
|
|
211
261
|
const backoffMs = retry?.backoffMs ?? 1e3;
|
|
262
|
+
const maxBackoffMs = retry?.maxBackoffMs ?? 3e4;
|
|
212
263
|
const envelopes = Array.isArray(envelope) ? envelope : [envelope];
|
|
213
264
|
const topic2 = envelopes[0]?.topic ?? "unknown";
|
|
214
265
|
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
@@ -265,14 +316,39 @@ async function executeWithRetry(fn, ctx, deps) {
|
|
|
265
316
|
`Error processing ${isBatch ? "batch" : "message"} from topic ${topic2} (attempt ${attempt}/${maxAttempts}):`,
|
|
266
317
|
err.stack
|
|
267
318
|
);
|
|
268
|
-
if (
|
|
319
|
+
if (retryTopics && retry) {
|
|
320
|
+
const cap = Math.min(backoffMs, maxBackoffMs);
|
|
321
|
+
const delay = Math.floor(Math.random() * cap);
|
|
322
|
+
await sendToRetryTopic(
|
|
323
|
+
topic2,
|
|
324
|
+
rawMessages,
|
|
325
|
+
1,
|
|
326
|
+
retry.maxRetries,
|
|
327
|
+
delay,
|
|
328
|
+
envelopes[0]?.headers ?? {},
|
|
329
|
+
deps
|
|
330
|
+
);
|
|
331
|
+
} else if (isLastAttempt) {
|
|
269
332
|
if (dlq) {
|
|
333
|
+
const dlqMeta = {
|
|
334
|
+
error: err,
|
|
335
|
+
attempt,
|
|
336
|
+
originalHeaders: envelopes[0]?.headers
|
|
337
|
+
};
|
|
270
338
|
for (const raw of rawMessages) {
|
|
271
|
-
await sendToDlq(topic2, raw, deps);
|
|
339
|
+
await sendToDlq(topic2, raw, deps, dlqMeta);
|
|
272
340
|
}
|
|
341
|
+
} else {
|
|
342
|
+
await deps.onMessageLost?.({
|
|
343
|
+
topic: topic2,
|
|
344
|
+
error: err,
|
|
345
|
+
attempt,
|
|
346
|
+
headers: envelopes[0]?.headers ?? {}
|
|
347
|
+
});
|
|
273
348
|
}
|
|
274
349
|
} else {
|
|
275
|
-
|
|
350
|
+
const cap = Math.min(backoffMs * 2 ** (attempt - 1), maxBackoffMs);
|
|
351
|
+
await sleep(Math.random() * cap);
|
|
276
352
|
}
|
|
277
353
|
}
|
|
278
354
|
}
|
|
@@ -314,6 +390,7 @@ var KafkaClient = class {
|
|
|
314
390
|
schemaRegistry = /* @__PURE__ */ new Map();
|
|
315
391
|
runningConsumers = /* @__PURE__ */ new Map();
|
|
316
392
|
instrumentation;
|
|
393
|
+
onMessageLost;
|
|
317
394
|
isAdminConnected = false;
|
|
318
395
|
clientId;
|
|
319
396
|
constructor(clientId, groupId, brokers, options) {
|
|
@@ -328,6 +405,7 @@ var KafkaClient = class {
|
|
|
328
405
|
this.strictSchemasEnabled = options?.strictSchemas ?? true;
|
|
329
406
|
this.numPartitions = options?.numPartitions ?? 1;
|
|
330
407
|
this.instrumentation = options?.instrumentation ?? [];
|
|
408
|
+
this.onMessageLost = options?.onMessageLost;
|
|
331
409
|
this.kafka = new KafkaClass({
|
|
332
410
|
kafkaJS: {
|
|
333
411
|
clientId: this.clientId,
|
|
@@ -343,7 +421,7 @@ var KafkaClient = class {
|
|
|
343
421
|
this.admin = this.kafka.admin();
|
|
344
422
|
}
|
|
345
423
|
async sendMessage(topicOrDesc, message, options = {}) {
|
|
346
|
-
const payload = this.buildSendPayload(topicOrDesc, [
|
|
424
|
+
const payload = await this.buildSendPayload(topicOrDesc, [
|
|
347
425
|
{
|
|
348
426
|
value: message,
|
|
349
427
|
key: options.key,
|
|
@@ -360,7 +438,7 @@ var KafkaClient = class {
|
|
|
360
438
|
}
|
|
361
439
|
}
|
|
362
440
|
async sendBatch(topicOrDesc, messages) {
|
|
363
|
-
const payload = this.buildSendPayload(topicOrDesc, messages);
|
|
441
|
+
const payload = await this.buildSendPayload(topicOrDesc, messages);
|
|
364
442
|
await this.ensureTopic(payload.topic);
|
|
365
443
|
await this.producer.send(payload);
|
|
366
444
|
for (const inst of this.instrumentation) {
|
|
@@ -384,7 +462,7 @@ var KafkaClient = class {
|
|
|
384
462
|
try {
|
|
385
463
|
const ctx = {
|
|
386
464
|
send: async (topicOrDesc, message, options = {}) => {
|
|
387
|
-
const payload = this.buildSendPayload(topicOrDesc, [
|
|
465
|
+
const payload = await this.buildSendPayload(topicOrDesc, [
|
|
388
466
|
{
|
|
389
467
|
value: message,
|
|
390
468
|
key: options.key,
|
|
@@ -398,7 +476,7 @@ var KafkaClient = class {
|
|
|
398
476
|
await tx.send(payload);
|
|
399
477
|
},
|
|
400
478
|
sendBatch: async (topicOrDesc, messages) => {
|
|
401
|
-
const payload = this.buildSendPayload(topicOrDesc, messages);
|
|
479
|
+
const payload = await this.buildSendPayload(topicOrDesc, messages);
|
|
402
480
|
await this.ensureTopic(payload.topic);
|
|
403
481
|
await tx.send(payload);
|
|
404
482
|
}
|
|
@@ -428,8 +506,13 @@ var KafkaClient = class {
|
|
|
428
506
|
this.logger.log("Producer disconnected");
|
|
429
507
|
}
|
|
430
508
|
async startConsumer(topics, handleMessage, options = {}) {
|
|
431
|
-
|
|
432
|
-
|
|
509
|
+
if (options.retryTopics && !options.retry) {
|
|
510
|
+
throw new Error(
|
|
511
|
+
"retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
|
|
512
|
+
);
|
|
513
|
+
}
|
|
514
|
+
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
|
|
515
|
+
const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
|
|
433
516
|
await consumer.run({
|
|
434
517
|
eachMessage: async ({ topic: topic2, partition, message }) => {
|
|
435
518
|
if (!message.value) {
|
|
@@ -439,6 +522,7 @@ var KafkaClient = class {
|
|
|
439
522
|
const raw = message.value.toString();
|
|
440
523
|
const parsed = parseJsonMessage(raw, topic2, this.logger);
|
|
441
524
|
if (parsed === null) return;
|
|
525
|
+
const headers = decodeHeaders(message.headers);
|
|
442
526
|
const validated = await validateWithSchema(
|
|
443
527
|
parsed,
|
|
444
528
|
raw,
|
|
@@ -446,10 +530,9 @@ var KafkaClient = class {
|
|
|
446
530
|
schemaMap,
|
|
447
531
|
interceptors,
|
|
448
532
|
dlq,
|
|
449
|
-
deps
|
|
533
|
+
{ ...deps, originalHeaders: headers }
|
|
450
534
|
);
|
|
451
535
|
if (validated === null) return;
|
|
452
|
-
const headers = decodeHeaders(message.headers);
|
|
453
536
|
const envelope = extractEnvelope(
|
|
454
537
|
validated,
|
|
455
538
|
headers,
|
|
@@ -462,16 +545,27 @@ var KafkaClient = class {
|
|
|
462
545
|
{ correlationId: envelope.correlationId, traceparent: envelope.traceparent },
|
|
463
546
|
() => handleMessage(envelope)
|
|
464
547
|
),
|
|
465
|
-
{ envelope, rawMessages: [raw], interceptors, dlq, retry },
|
|
548
|
+
{ envelope, rawMessages: [raw], interceptors, dlq, retry, retryTopics: options.retryTopics },
|
|
466
549
|
deps
|
|
467
550
|
);
|
|
468
551
|
}
|
|
469
552
|
});
|
|
470
553
|
this.runningConsumers.set(gid, "eachMessage");
|
|
554
|
+
if (options.retryTopics && retry) {
|
|
555
|
+
await this.startRetryTopicConsumers(
|
|
556
|
+
topicNames,
|
|
557
|
+
gid,
|
|
558
|
+
handleMessage,
|
|
559
|
+
retry,
|
|
560
|
+
dlq,
|
|
561
|
+
interceptors,
|
|
562
|
+
schemaMap
|
|
563
|
+
);
|
|
564
|
+
}
|
|
471
565
|
}
|
|
472
566
|
async startBatchConsumer(topics, handleBatch, options = {}) {
|
|
473
567
|
const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
|
|
474
|
-
const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation };
|
|
568
|
+
const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
|
|
475
569
|
await consumer.run({
|
|
476
570
|
eachBatch: async ({
|
|
477
571
|
batch,
|
|
@@ -491,6 +585,7 @@ var KafkaClient = class {
|
|
|
491
585
|
const raw = message.value.toString();
|
|
492
586
|
const parsed = parseJsonMessage(raw, batch.topic, this.logger);
|
|
493
587
|
if (parsed === null) continue;
|
|
588
|
+
const headers = decodeHeaders(message.headers);
|
|
494
589
|
const validated = await validateWithSchema(
|
|
495
590
|
parsed,
|
|
496
591
|
raw,
|
|
@@ -498,10 +593,9 @@ var KafkaClient = class {
|
|
|
498
593
|
schemaMap,
|
|
499
594
|
interceptors,
|
|
500
595
|
dlq,
|
|
501
|
-
deps
|
|
596
|
+
{ ...deps, originalHeaders: headers }
|
|
502
597
|
);
|
|
503
598
|
if (validated === null) continue;
|
|
504
|
-
const headers = decodeHeaders(message.headers);
|
|
505
599
|
envelopes.push(
|
|
506
600
|
extractEnvelope(validated, headers, batch.topic, batch.partition, message.offset)
|
|
507
601
|
);
|
|
@@ -532,15 +626,28 @@ var KafkaClient = class {
|
|
|
532
626
|
this.runningConsumers.set(gid, "eachBatch");
|
|
533
627
|
}
|
|
534
628
|
// ── Consumer lifecycle ───────────────────────────────────────────
|
|
535
|
-
async stopConsumer() {
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
629
|
+
async stopConsumer(groupId) {
|
|
630
|
+
if (groupId !== void 0) {
|
|
631
|
+
const consumer = this.consumers.get(groupId);
|
|
632
|
+
if (!consumer) {
|
|
633
|
+
this.logger.warn(`stopConsumer: no active consumer for group "${groupId}"`);
|
|
634
|
+
return;
|
|
635
|
+
}
|
|
636
|
+
await consumer.disconnect().catch(() => {
|
|
637
|
+
});
|
|
638
|
+
this.consumers.delete(groupId);
|
|
639
|
+
this.runningConsumers.delete(groupId);
|
|
640
|
+
this.logger.log(`Consumer disconnected: group "${groupId}"`);
|
|
641
|
+
} else {
|
|
642
|
+
const tasks = Array.from(this.consumers.values()).map(
|
|
643
|
+
(c) => c.disconnect().catch(() => {
|
|
644
|
+
})
|
|
645
|
+
);
|
|
646
|
+
await Promise.allSettled(tasks);
|
|
647
|
+
this.consumers.clear();
|
|
648
|
+
this.runningConsumers.clear();
|
|
649
|
+
this.logger.log("All consumers disconnected");
|
|
539
650
|
}
|
|
540
|
-
await Promise.allSettled(tasks);
|
|
541
|
-
this.consumers.clear();
|
|
542
|
-
this.runningConsumers.clear();
|
|
543
|
-
this.logger.log("All consumers disconnected");
|
|
544
651
|
}
|
|
545
652
|
/** Check broker connectivity and return status, clientId, and available topics. */
|
|
546
653
|
async checkStatus() {
|
|
@@ -573,7 +680,164 @@ var KafkaClient = class {
|
|
|
573
680
|
this.runningConsumers.clear();
|
|
574
681
|
this.logger.log("All connections closed");
|
|
575
682
|
}
|
|
683
|
+
// ── Retry topic chain ────────────────────────────────────────────
|
|
684
|
+
/**
|
|
685
|
+
* Auto-start companion consumers on `<topic>.retry` for each original topic.
|
|
686
|
+
* Called by `startConsumer` when `retryTopics: true`.
|
|
687
|
+
*
|
|
688
|
+
* Flow per message:
|
|
689
|
+
* 1. Sleep until `x-retry-after` (scheduled by the main consumer or previous retry hop)
|
|
690
|
+
* 2. Call the original handler
|
|
691
|
+
* 3. On failure: if retries remain → re-send to `<originalTopic>.retry` with incremented attempt
|
|
692
|
+
* if exhausted → DLQ or onMessageLost
|
|
693
|
+
*/
|
|
694
|
+
async startRetryTopicConsumers(originalTopics, originalGroupId, handleMessage, retry, dlq, interceptors, schemaMap) {
|
|
695
|
+
const retryTopicNames = originalTopics.map((t) => `${t}.retry`);
|
|
696
|
+
const retryGroupId = `${originalGroupId}-retry`;
|
|
697
|
+
const backoffMs = retry.backoffMs ?? 1e3;
|
|
698
|
+
const maxBackoffMs = retry.maxBackoffMs ?? 3e4;
|
|
699
|
+
const deps = {
|
|
700
|
+
logger: this.logger,
|
|
701
|
+
producer: this.producer,
|
|
702
|
+
instrumentation: this.instrumentation,
|
|
703
|
+
onMessageLost: this.onMessageLost
|
|
704
|
+
};
|
|
705
|
+
for (const rt of retryTopicNames) {
|
|
706
|
+
await this.ensureTopic(rt);
|
|
707
|
+
}
|
|
708
|
+
const consumer = this.getOrCreateConsumer(retryGroupId, false, true);
|
|
709
|
+
await consumer.connect();
|
|
710
|
+
await subscribeWithRetry(consumer, retryTopicNames, this.logger);
|
|
711
|
+
await consumer.run({
|
|
712
|
+
eachMessage: async ({ topic: retryTopic, partition, message }) => {
|
|
713
|
+
if (!message.value) return;
|
|
714
|
+
const raw = message.value.toString();
|
|
715
|
+
const parsed = parseJsonMessage(raw, retryTopic, this.logger);
|
|
716
|
+
if (parsed === null) return;
|
|
717
|
+
const headers = decodeHeaders(message.headers);
|
|
718
|
+
const originalTopic = headers[RETRY_HEADER_ORIGINAL_TOPIC] ?? retryTopic.replace(/\.retry$/, "");
|
|
719
|
+
const currentAttempt = parseInt(
|
|
720
|
+
headers[RETRY_HEADER_ATTEMPT] ?? "1",
|
|
721
|
+
10
|
|
722
|
+
);
|
|
723
|
+
const maxRetries = parseInt(
|
|
724
|
+
headers[RETRY_HEADER_MAX_RETRIES] ?? String(retry.maxRetries),
|
|
725
|
+
10
|
|
726
|
+
);
|
|
727
|
+
const retryAfter = parseInt(
|
|
728
|
+
headers[RETRY_HEADER_AFTER] ?? "0",
|
|
729
|
+
10
|
|
730
|
+
);
|
|
731
|
+
const remaining = retryAfter - Date.now();
|
|
732
|
+
if (remaining > 0) {
|
|
733
|
+
consumer.pause([{ topic: retryTopic, partitions: [partition] }]);
|
|
734
|
+
await sleep(remaining);
|
|
735
|
+
consumer.resume([{ topic: retryTopic, partitions: [partition] }]);
|
|
736
|
+
}
|
|
737
|
+
const validated = await validateWithSchema(
|
|
738
|
+
parsed,
|
|
739
|
+
raw,
|
|
740
|
+
originalTopic,
|
|
741
|
+
schemaMap,
|
|
742
|
+
interceptors,
|
|
743
|
+
dlq,
|
|
744
|
+
{ ...deps, originalHeaders: headers }
|
|
745
|
+
);
|
|
746
|
+
if (validated === null) return;
|
|
747
|
+
const envelope = extractEnvelope(
|
|
748
|
+
validated,
|
|
749
|
+
headers,
|
|
750
|
+
originalTopic,
|
|
751
|
+
partition,
|
|
752
|
+
message.offset
|
|
753
|
+
);
|
|
754
|
+
try {
|
|
755
|
+
const cleanups = [];
|
|
756
|
+
for (const inst of this.instrumentation) {
|
|
757
|
+
const c = inst.beforeConsume?.(envelope);
|
|
758
|
+
if (typeof c === "function") cleanups.push(c);
|
|
759
|
+
}
|
|
760
|
+
for (const interceptor of interceptors) await interceptor.before?.(envelope);
|
|
761
|
+
await runWithEnvelopeContext(
|
|
762
|
+
{ correlationId: envelope.correlationId, traceparent: envelope.traceparent },
|
|
763
|
+
() => handleMessage(envelope)
|
|
764
|
+
);
|
|
765
|
+
for (const interceptor of interceptors) await interceptor.after?.(envelope);
|
|
766
|
+
for (const cleanup of cleanups) cleanup();
|
|
767
|
+
} catch (error) {
|
|
768
|
+
const err = toError(error);
|
|
769
|
+
const nextAttempt = currentAttempt + 1;
|
|
770
|
+
const exhausted = currentAttempt >= maxRetries;
|
|
771
|
+
for (const inst of this.instrumentation) inst.onConsumeError?.(envelope, err);
|
|
772
|
+
const reportedError = exhausted && maxRetries > 1 ? new KafkaRetryExhaustedError(originalTopic, [envelope.payload], maxRetries, { cause: err }) : err;
|
|
773
|
+
for (const interceptor of interceptors) {
|
|
774
|
+
await interceptor.onError?.(envelope, reportedError);
|
|
775
|
+
}
|
|
776
|
+
this.logger.error(
|
|
777
|
+
`Retry consumer error for ${originalTopic} (attempt ${currentAttempt}/${maxRetries}):`,
|
|
778
|
+
err.stack
|
|
779
|
+
);
|
|
780
|
+
if (!exhausted) {
|
|
781
|
+
const cap = Math.min(backoffMs * 2 ** currentAttempt, maxBackoffMs);
|
|
782
|
+
const delay = Math.floor(Math.random() * cap);
|
|
783
|
+
await sendToRetryTopic(
|
|
784
|
+
originalTopic,
|
|
785
|
+
[raw],
|
|
786
|
+
nextAttempt,
|
|
787
|
+
maxRetries,
|
|
788
|
+
delay,
|
|
789
|
+
headers,
|
|
790
|
+
deps
|
|
791
|
+
);
|
|
792
|
+
} else if (dlq) {
|
|
793
|
+
await sendToDlq(originalTopic, raw, deps, {
|
|
794
|
+
error: err,
|
|
795
|
+
// +1 to account for the main consumer's initial attempt before
|
|
796
|
+
// routing to the retry topic, making this consistent with the
|
|
797
|
+
// in-process retry path where attempt counts all tries.
|
|
798
|
+
attempt: currentAttempt + 1,
|
|
799
|
+
originalHeaders: headers
|
|
800
|
+
});
|
|
801
|
+
} else {
|
|
802
|
+
await deps.onMessageLost?.({
|
|
803
|
+
topic: originalTopic,
|
|
804
|
+
error: err,
|
|
805
|
+
attempt: currentAttempt,
|
|
806
|
+
headers
|
|
807
|
+
});
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
}
|
|
811
|
+
});
|
|
812
|
+
this.runningConsumers.set(retryGroupId, "eachMessage");
|
|
813
|
+
await this.waitForPartitionAssignment(consumer, retryTopicNames);
|
|
814
|
+
this.logger.log(
|
|
815
|
+
`Retry topic consumers started for: ${originalTopics.join(", ")} (group: ${retryGroupId})`
|
|
816
|
+
);
|
|
817
|
+
}
|
|
576
818
|
// ── Private helpers ──────────────────────────────────────────────
|
|
819
|
+
/**
|
|
820
|
+
* Poll `consumer.assignment()` until the consumer has received at least one
|
|
821
|
+
* partition for the given topics, then return. Logs a warning and returns
|
|
822
|
+
* (rather than throwing) on timeout so that a slow broker does not break
|
|
823
|
+
* the caller — in the worst case a message sent immediately after would be
|
|
824
|
+
* missed, which is the same behaviour as before this guard was added.
|
|
825
|
+
*/
|
|
826
|
+
async waitForPartitionAssignment(consumer, topics, timeoutMs = 1e4) {
|
|
827
|
+
const topicSet = new Set(topics);
|
|
828
|
+
const deadline = Date.now() + timeoutMs;
|
|
829
|
+
while (Date.now() < deadline) {
|
|
830
|
+
try {
|
|
831
|
+
const assigned = consumer.assignment();
|
|
832
|
+
if (assigned.some((a) => topicSet.has(a.topic))) return;
|
|
833
|
+
} catch {
|
|
834
|
+
}
|
|
835
|
+
await sleep(200);
|
|
836
|
+
}
|
|
837
|
+
this.logger.warn(
|
|
838
|
+
`Retry consumer did not receive partition assignments for [${topics.join(", ")}] within ${timeoutMs}ms`
|
|
839
|
+
);
|
|
840
|
+
}
|
|
577
841
|
getOrCreateConsumer(groupId, fromBeginning, autoCommit) {
|
|
578
842
|
if (!this.consumers.has(groupId)) {
|
|
579
843
|
this.consumers.set(
|
|
@@ -611,13 +875,13 @@ var KafkaClient = class {
|
|
|
611
875
|
}
|
|
612
876
|
}
|
|
613
877
|
/** Validate message against schema. Pure — no side-effects on registry. */
|
|
614
|
-
validateMessage(topicOrDesc, message) {
|
|
878
|
+
async validateMessage(topicOrDesc, message) {
|
|
615
879
|
if (topicOrDesc?.__schema) {
|
|
616
|
-
return topicOrDesc.__schema.parse(message);
|
|
880
|
+
return await topicOrDesc.__schema.parse(message);
|
|
617
881
|
}
|
|
618
882
|
if (this.strictSchemasEnabled && typeof topicOrDesc === "string") {
|
|
619
883
|
const schema = this.schemaRegistry.get(topicOrDesc);
|
|
620
|
-
if (schema) return schema.parse(message);
|
|
884
|
+
if (schema) return await schema.parse(message);
|
|
621
885
|
}
|
|
622
886
|
return message;
|
|
623
887
|
}
|
|
@@ -626,12 +890,11 @@ var KafkaClient = class {
|
|
|
626
890
|
* Handles: topic resolution, schema registration, validation, JSON serialization,
|
|
627
891
|
* envelope header generation, and instrumentation hooks.
|
|
628
892
|
*/
|
|
629
|
-
buildSendPayload(topicOrDesc, messages) {
|
|
893
|
+
async buildSendPayload(topicOrDesc, messages) {
|
|
630
894
|
this.registerSchema(topicOrDesc);
|
|
631
895
|
const topic2 = this.resolveTopicName(topicOrDesc);
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
messages: messages.map((m) => {
|
|
896
|
+
const builtMessages = await Promise.all(
|
|
897
|
+
messages.map(async (m) => {
|
|
635
898
|
const envelopeHeaders = buildEnvelopeHeaders({
|
|
636
899
|
correlationId: m.correlationId,
|
|
637
900
|
schemaVersion: m.schemaVersion,
|
|
@@ -642,12 +905,13 @@ var KafkaClient = class {
|
|
|
642
905
|
inst.beforeSend?.(topic2, envelopeHeaders);
|
|
643
906
|
}
|
|
644
907
|
return {
|
|
645
|
-
value: JSON.stringify(this.validateMessage(topicOrDesc, m.value)),
|
|
908
|
+
value: JSON.stringify(await this.validateMessage(topicOrDesc, m.value)),
|
|
646
909
|
key: m.key ?? null,
|
|
647
910
|
headers: envelopeHeaders
|
|
648
911
|
};
|
|
649
912
|
})
|
|
650
|
-
|
|
913
|
+
);
|
|
914
|
+
return { topic: topic2, messages: builtMessages };
|
|
651
915
|
}
|
|
652
916
|
/** Shared consumer setup: groupId check, schema map, connect, subscribe. */
|
|
653
917
|
async setupConsumer(topics, mode, options) {
|