@drarzter/kafka-client 0.5.1 → 0.5.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +127 -4
- package/dist/{chunk-P7GY4BLV.mjs → chunk-3QXTW66R.mjs} +304 -40
- package/dist/chunk-3QXTW66R.mjs.map +1 -0
- package/dist/core.d.mts +23 -3
- package/dist/core.d.ts +23 -3
- package/dist/core.js +303 -39
- package/dist/core.js.map +1 -1
- package/dist/core.mjs +1 -1
- package/dist/{envelope-CPX1qudy.d.mts → envelope-BR8d1m8c.d.mts} +43 -4
- package/dist/{envelope-CPX1qudy.d.ts → envelope-BR8d1m8c.d.ts} +43 -4
- package/dist/index.d.mts +10 -7
- package/dist/index.d.ts +10 -7
- package/dist/index.js +303 -39
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1 -1
- package/dist/index.mjs.map +1 -1
- package/dist/otel.d.mts +1 -1
- package/dist/otel.d.ts +1 -1
- package/dist/otel.js +1 -1
- package/dist/otel.js.map +1 -1
- package/dist/otel.mjs +1 -1
- package/dist/otel.mjs.map +1 -1
- package/dist/testing.d.mts +1 -1
- package/dist/testing.d.ts +1 -1
- package/package.json +1 -1
- package/dist/chunk-P7GY4BLV.mjs.map +0 -1
package/dist/core.js
CHANGED
|
@@ -155,7 +155,7 @@ async function validateWithSchema(message, raw, topic2, schemaMap, interceptors,
|
|
|
155
155
|
const schema = schemaMap.get(topic2);
|
|
156
156
|
if (!schema) return message;
|
|
157
157
|
try {
|
|
158
|
-
return schema.parse(message);
|
|
158
|
+
return await schema.parse(message);
|
|
159
159
|
} catch (error) {
|
|
160
160
|
const err = toError(error);
|
|
161
161
|
const validationError = new KafkaValidationError(topic2, message, {
|
|
@@ -165,20 +165,36 @@ async function validateWithSchema(message, raw, topic2, schemaMap, interceptors,
|
|
|
165
165
|
`Schema validation failed for topic ${topic2}:`,
|
|
166
166
|
err.message
|
|
167
167
|
);
|
|
168
|
-
if (dlq)
|
|
169
|
-
|
|
168
|
+
if (dlq) {
|
|
169
|
+
await sendToDlq(topic2, raw, deps, {
|
|
170
|
+
error: validationError,
|
|
171
|
+
attempt: 0,
|
|
172
|
+
originalHeaders: deps.originalHeaders
|
|
173
|
+
});
|
|
174
|
+
} else {
|
|
175
|
+
await deps.onMessageLost?.({ topic: topic2, error: validationError, attempt: 0, headers: deps.originalHeaders ?? {} });
|
|
176
|
+
}
|
|
177
|
+
const errorEnvelope = extractEnvelope(message, deps.originalHeaders ?? {}, topic2, -1, "");
|
|
170
178
|
for (const interceptor of interceptors) {
|
|
171
179
|
await interceptor.onError?.(errorEnvelope, validationError);
|
|
172
180
|
}
|
|
173
181
|
return null;
|
|
174
182
|
}
|
|
175
183
|
}
|
|
176
|
-
async function sendToDlq(topic2, rawMessage, deps) {
|
|
184
|
+
async function sendToDlq(topic2, rawMessage, deps, meta) {
|
|
177
185
|
const dlqTopic = `${topic2}.dlq`;
|
|
186
|
+
const headers = {
|
|
187
|
+
...meta?.originalHeaders ?? {},
|
|
188
|
+
"x-dlq-original-topic": topic2,
|
|
189
|
+
"x-dlq-failed-at": (/* @__PURE__ */ new Date()).toISOString(),
|
|
190
|
+
"x-dlq-error-message": meta?.error.message ?? "unknown",
|
|
191
|
+
"x-dlq-error-stack": meta?.error.stack?.slice(0, 2e3) ?? "",
|
|
192
|
+
"x-dlq-attempt-count": String(meta?.attempt ?? 0)
|
|
193
|
+
};
|
|
178
194
|
try {
|
|
179
195
|
await deps.producer.send({
|
|
180
196
|
topic: dlqTopic,
|
|
181
|
-
messages: [{ value: rawMessage }]
|
|
197
|
+
messages: [{ value: rawMessage, headers }]
|
|
182
198
|
});
|
|
183
199
|
deps.logger.warn(`Message sent to DLQ: ${dlqTopic}`);
|
|
184
200
|
} catch (error) {
|
|
@@ -188,10 +204,45 @@ async function sendToDlq(topic2, rawMessage, deps) {
|
|
|
188
204
|
);
|
|
189
205
|
}
|
|
190
206
|
}
|
|
207
|
+
var RETRY_HEADER_ATTEMPT = "x-retry-attempt";
|
|
208
|
+
var RETRY_HEADER_AFTER = "x-retry-after";
|
|
209
|
+
var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
|
|
210
|
+
var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
|
|
211
|
+
async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders, deps) {
|
|
212
|
+
const retryTopic = `${originalTopic}.retry`;
|
|
213
|
+
const {
|
|
214
|
+
[RETRY_HEADER_ATTEMPT]: _a,
|
|
215
|
+
[RETRY_HEADER_AFTER]: _b,
|
|
216
|
+
[RETRY_HEADER_MAX_RETRIES]: _c,
|
|
217
|
+
[RETRY_HEADER_ORIGINAL_TOPIC]: _d,
|
|
218
|
+
...userHeaders
|
|
219
|
+
} = originalHeaders;
|
|
220
|
+
const headers = {
|
|
221
|
+
...userHeaders,
|
|
222
|
+
[RETRY_HEADER_ATTEMPT]: String(attempt),
|
|
223
|
+
[RETRY_HEADER_AFTER]: String(Date.now() + delayMs),
|
|
224
|
+
[RETRY_HEADER_MAX_RETRIES]: String(maxRetries),
|
|
225
|
+
[RETRY_HEADER_ORIGINAL_TOPIC]: originalTopic
|
|
226
|
+
};
|
|
227
|
+
try {
|
|
228
|
+
for (const raw of rawMessages) {
|
|
229
|
+
await deps.producer.send({ topic: retryTopic, messages: [{ value: raw, headers }] });
|
|
230
|
+
}
|
|
231
|
+
deps.logger.warn(
|
|
232
|
+
`Message queued in retry topic ${retryTopic} (attempt ${attempt}/${maxRetries})`
|
|
233
|
+
);
|
|
234
|
+
} catch (error) {
|
|
235
|
+
deps.logger.error(
|
|
236
|
+
`Failed to send message to retry topic ${retryTopic}:`,
|
|
237
|
+
toError(error).stack
|
|
238
|
+
);
|
|
239
|
+
}
|
|
240
|
+
}
|
|
191
241
|
async function executeWithRetry(fn, ctx, deps) {
|
|
192
|
-
const { envelope, rawMessages, interceptors, dlq, retry, isBatch } = ctx;
|
|
193
|
-
const maxAttempts = retry ? retry.maxRetries + 1 : 1;
|
|
242
|
+
const { envelope, rawMessages, interceptors, dlq, retry, isBatch, retryTopics } = ctx;
|
|
243
|
+
const maxAttempts = retryTopics ? 1 : retry ? retry.maxRetries + 1 : 1;
|
|
194
244
|
const backoffMs = retry?.backoffMs ?? 1e3;
|
|
245
|
+
const maxBackoffMs = retry?.maxBackoffMs ?? 3e4;
|
|
195
246
|
const envelopes = Array.isArray(envelope) ? envelope : [envelope];
|
|
196
247
|
const topic2 = envelopes[0]?.topic ?? "unknown";
|
|
197
248
|
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
@@ -248,14 +299,39 @@ async function executeWithRetry(fn, ctx, deps) {
|
|
|
248
299
|
`Error processing ${isBatch ? "batch" : "message"} from topic ${topic2} (attempt ${attempt}/${maxAttempts}):`,
|
|
249
300
|
err.stack
|
|
250
301
|
);
|
|
251
|
-
if (
|
|
302
|
+
if (retryTopics && retry) {
|
|
303
|
+
const cap = Math.min(backoffMs, maxBackoffMs);
|
|
304
|
+
const delay = Math.floor(Math.random() * cap);
|
|
305
|
+
await sendToRetryTopic(
|
|
306
|
+
topic2,
|
|
307
|
+
rawMessages,
|
|
308
|
+
1,
|
|
309
|
+
retry.maxRetries,
|
|
310
|
+
delay,
|
|
311
|
+
envelopes[0]?.headers ?? {},
|
|
312
|
+
deps
|
|
313
|
+
);
|
|
314
|
+
} else if (isLastAttempt) {
|
|
252
315
|
if (dlq) {
|
|
316
|
+
const dlqMeta = {
|
|
317
|
+
error: err,
|
|
318
|
+
attempt,
|
|
319
|
+
originalHeaders: envelopes[0]?.headers
|
|
320
|
+
};
|
|
253
321
|
for (const raw of rawMessages) {
|
|
254
|
-
await sendToDlq(topic2, raw, deps);
|
|
322
|
+
await sendToDlq(topic2, raw, deps, dlqMeta);
|
|
255
323
|
}
|
|
324
|
+
} else {
|
|
325
|
+
await deps.onMessageLost?.({
|
|
326
|
+
topic: topic2,
|
|
327
|
+
error: err,
|
|
328
|
+
attempt,
|
|
329
|
+
headers: envelopes[0]?.headers ?? {}
|
|
330
|
+
});
|
|
256
331
|
}
|
|
257
332
|
} else {
|
|
258
|
-
|
|
333
|
+
const cap = Math.min(backoffMs * 2 ** (attempt - 1), maxBackoffMs);
|
|
334
|
+
await sleep(Math.random() * cap);
|
|
259
335
|
}
|
|
260
336
|
}
|
|
261
337
|
}
|
|
@@ -297,6 +373,7 @@ var KafkaClient = class {
|
|
|
297
373
|
schemaRegistry = /* @__PURE__ */ new Map();
|
|
298
374
|
runningConsumers = /* @__PURE__ */ new Map();
|
|
299
375
|
instrumentation;
|
|
376
|
+
onMessageLost;
|
|
300
377
|
isAdminConnected = false;
|
|
301
378
|
clientId;
|
|
302
379
|
constructor(clientId, groupId, brokers, options) {
|
|
@@ -311,6 +388,7 @@ var KafkaClient = class {
|
|
|
311
388
|
this.strictSchemasEnabled = options?.strictSchemas ?? true;
|
|
312
389
|
this.numPartitions = options?.numPartitions ?? 1;
|
|
313
390
|
this.instrumentation = options?.instrumentation ?? [];
|
|
391
|
+
this.onMessageLost = options?.onMessageLost;
|
|
314
392
|
this.kafka = new KafkaClass({
|
|
315
393
|
kafkaJS: {
|
|
316
394
|
clientId: this.clientId,
|
|
@@ -326,7 +404,7 @@ var KafkaClient = class {
|
|
|
326
404
|
this.admin = this.kafka.admin();
|
|
327
405
|
}
|
|
328
406
|
async sendMessage(topicOrDesc, message, options = {}) {
|
|
329
|
-
const payload = this.buildSendPayload(topicOrDesc, [
|
|
407
|
+
const payload = await this.buildSendPayload(topicOrDesc, [
|
|
330
408
|
{
|
|
331
409
|
value: message,
|
|
332
410
|
key: options.key,
|
|
@@ -343,7 +421,7 @@ var KafkaClient = class {
|
|
|
343
421
|
}
|
|
344
422
|
}
|
|
345
423
|
async sendBatch(topicOrDesc, messages) {
|
|
346
|
-
const payload = this.buildSendPayload(topicOrDesc, messages);
|
|
424
|
+
const payload = await this.buildSendPayload(topicOrDesc, messages);
|
|
347
425
|
await this.ensureTopic(payload.topic);
|
|
348
426
|
await this.producer.send(payload);
|
|
349
427
|
for (const inst of this.instrumentation) {
|
|
@@ -367,7 +445,7 @@ var KafkaClient = class {
|
|
|
367
445
|
try {
|
|
368
446
|
const ctx = {
|
|
369
447
|
send: async (topicOrDesc, message, options = {}) => {
|
|
370
|
-
const payload = this.buildSendPayload(topicOrDesc, [
|
|
448
|
+
const payload = await this.buildSendPayload(topicOrDesc, [
|
|
371
449
|
{
|
|
372
450
|
value: message,
|
|
373
451
|
key: options.key,
|
|
@@ -381,7 +459,7 @@ var KafkaClient = class {
|
|
|
381
459
|
await tx.send(payload);
|
|
382
460
|
},
|
|
383
461
|
sendBatch: async (topicOrDesc, messages) => {
|
|
384
|
-
const payload = this.buildSendPayload(topicOrDesc, messages);
|
|
462
|
+
const payload = await this.buildSendPayload(topicOrDesc, messages);
|
|
385
463
|
await this.ensureTopic(payload.topic);
|
|
386
464
|
await tx.send(payload);
|
|
387
465
|
}
|
|
@@ -411,8 +489,13 @@ var KafkaClient = class {
|
|
|
411
489
|
this.logger.log("Producer disconnected");
|
|
412
490
|
}
|
|
413
491
|
async startConsumer(topics, handleMessage, options = {}) {
|
|
414
|
-
|
|
415
|
-
|
|
492
|
+
if (options.retryTopics && !options.retry) {
|
|
493
|
+
throw new Error(
|
|
494
|
+
"retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
|
|
495
|
+
);
|
|
496
|
+
}
|
|
497
|
+
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
|
|
498
|
+
const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
|
|
416
499
|
await consumer.run({
|
|
417
500
|
eachMessage: async ({ topic: topic2, partition, message }) => {
|
|
418
501
|
if (!message.value) {
|
|
@@ -422,6 +505,7 @@ var KafkaClient = class {
|
|
|
422
505
|
const raw = message.value.toString();
|
|
423
506
|
const parsed = parseJsonMessage(raw, topic2, this.logger);
|
|
424
507
|
if (parsed === null) return;
|
|
508
|
+
const headers = decodeHeaders(message.headers);
|
|
425
509
|
const validated = await validateWithSchema(
|
|
426
510
|
parsed,
|
|
427
511
|
raw,
|
|
@@ -429,10 +513,9 @@ var KafkaClient = class {
|
|
|
429
513
|
schemaMap,
|
|
430
514
|
interceptors,
|
|
431
515
|
dlq,
|
|
432
|
-
deps
|
|
516
|
+
{ ...deps, originalHeaders: headers }
|
|
433
517
|
);
|
|
434
518
|
if (validated === null) return;
|
|
435
|
-
const headers = decodeHeaders(message.headers);
|
|
436
519
|
const envelope = extractEnvelope(
|
|
437
520
|
validated,
|
|
438
521
|
headers,
|
|
@@ -445,16 +528,27 @@ var KafkaClient = class {
|
|
|
445
528
|
{ correlationId: envelope.correlationId, traceparent: envelope.traceparent },
|
|
446
529
|
() => handleMessage(envelope)
|
|
447
530
|
),
|
|
448
|
-
{ envelope, rawMessages: [raw], interceptors, dlq, retry },
|
|
531
|
+
{ envelope, rawMessages: [raw], interceptors, dlq, retry, retryTopics: options.retryTopics },
|
|
449
532
|
deps
|
|
450
533
|
);
|
|
451
534
|
}
|
|
452
535
|
});
|
|
453
536
|
this.runningConsumers.set(gid, "eachMessage");
|
|
537
|
+
if (options.retryTopics && retry) {
|
|
538
|
+
await this.startRetryTopicConsumers(
|
|
539
|
+
topicNames,
|
|
540
|
+
gid,
|
|
541
|
+
handleMessage,
|
|
542
|
+
retry,
|
|
543
|
+
dlq,
|
|
544
|
+
interceptors,
|
|
545
|
+
schemaMap
|
|
546
|
+
);
|
|
547
|
+
}
|
|
454
548
|
}
|
|
455
549
|
async startBatchConsumer(topics, handleBatch, options = {}) {
|
|
456
550
|
const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
|
|
457
|
-
const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation };
|
|
551
|
+
const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
|
|
458
552
|
await consumer.run({
|
|
459
553
|
eachBatch: async ({
|
|
460
554
|
batch,
|
|
@@ -474,6 +568,7 @@ var KafkaClient = class {
|
|
|
474
568
|
const raw = message.value.toString();
|
|
475
569
|
const parsed = parseJsonMessage(raw, batch.topic, this.logger);
|
|
476
570
|
if (parsed === null) continue;
|
|
571
|
+
const headers = decodeHeaders(message.headers);
|
|
477
572
|
const validated = await validateWithSchema(
|
|
478
573
|
parsed,
|
|
479
574
|
raw,
|
|
@@ -481,10 +576,9 @@ var KafkaClient = class {
|
|
|
481
576
|
schemaMap,
|
|
482
577
|
interceptors,
|
|
483
578
|
dlq,
|
|
484
|
-
deps
|
|
579
|
+
{ ...deps, originalHeaders: headers }
|
|
485
580
|
);
|
|
486
581
|
if (validated === null) continue;
|
|
487
|
-
const headers = decodeHeaders(message.headers);
|
|
488
582
|
envelopes.push(
|
|
489
583
|
extractEnvelope(validated, headers, batch.topic, batch.partition, message.offset)
|
|
490
584
|
);
|
|
@@ -515,15 +609,28 @@ var KafkaClient = class {
|
|
|
515
609
|
this.runningConsumers.set(gid, "eachBatch");
|
|
516
610
|
}
|
|
517
611
|
// ── Consumer lifecycle ───────────────────────────────────────────
|
|
518
|
-
async stopConsumer() {
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
612
|
+
async stopConsumer(groupId) {
|
|
613
|
+
if (groupId !== void 0) {
|
|
614
|
+
const consumer = this.consumers.get(groupId);
|
|
615
|
+
if (!consumer) {
|
|
616
|
+
this.logger.warn(`stopConsumer: no active consumer for group "${groupId}"`);
|
|
617
|
+
return;
|
|
618
|
+
}
|
|
619
|
+
await consumer.disconnect().catch(() => {
|
|
620
|
+
});
|
|
621
|
+
this.consumers.delete(groupId);
|
|
622
|
+
this.runningConsumers.delete(groupId);
|
|
623
|
+
this.logger.log(`Consumer disconnected: group "${groupId}"`);
|
|
624
|
+
} else {
|
|
625
|
+
const tasks = Array.from(this.consumers.values()).map(
|
|
626
|
+
(c) => c.disconnect().catch(() => {
|
|
627
|
+
})
|
|
628
|
+
);
|
|
629
|
+
await Promise.allSettled(tasks);
|
|
630
|
+
this.consumers.clear();
|
|
631
|
+
this.runningConsumers.clear();
|
|
632
|
+
this.logger.log("All consumers disconnected");
|
|
522
633
|
}
|
|
523
|
-
await Promise.allSettled(tasks);
|
|
524
|
-
this.consumers.clear();
|
|
525
|
-
this.runningConsumers.clear();
|
|
526
|
-
this.logger.log("All consumers disconnected");
|
|
527
634
|
}
|
|
528
635
|
/** Check broker connectivity and return status, clientId, and available topics. */
|
|
529
636
|
async checkStatus() {
|
|
@@ -556,7 +663,164 @@ var KafkaClient = class {
|
|
|
556
663
|
this.runningConsumers.clear();
|
|
557
664
|
this.logger.log("All connections closed");
|
|
558
665
|
}
|
|
666
|
+
// ── Retry topic chain ────────────────────────────────────────────
|
|
667
|
+
/**
|
|
668
|
+
* Auto-start companion consumers on `<topic>.retry` for each original topic.
|
|
669
|
+
* Called by `startConsumer` when `retryTopics: true`.
|
|
670
|
+
*
|
|
671
|
+
* Flow per message:
|
|
672
|
+
* 1. Sleep until `x-retry-after` (scheduled by the main consumer or previous retry hop)
|
|
673
|
+
* 2. Call the original handler
|
|
674
|
+
* 3. On failure: if retries remain → re-send to `<originalTopic>.retry` with incremented attempt
|
|
675
|
+
* if exhausted → DLQ or onMessageLost
|
|
676
|
+
*/
|
|
677
|
+
async startRetryTopicConsumers(originalTopics, originalGroupId, handleMessage, retry, dlq, interceptors, schemaMap) {
|
|
678
|
+
const retryTopicNames = originalTopics.map((t) => `${t}.retry`);
|
|
679
|
+
const retryGroupId = `${originalGroupId}-retry`;
|
|
680
|
+
const backoffMs = retry.backoffMs ?? 1e3;
|
|
681
|
+
const maxBackoffMs = retry.maxBackoffMs ?? 3e4;
|
|
682
|
+
const deps = {
|
|
683
|
+
logger: this.logger,
|
|
684
|
+
producer: this.producer,
|
|
685
|
+
instrumentation: this.instrumentation,
|
|
686
|
+
onMessageLost: this.onMessageLost
|
|
687
|
+
};
|
|
688
|
+
for (const rt of retryTopicNames) {
|
|
689
|
+
await this.ensureTopic(rt);
|
|
690
|
+
}
|
|
691
|
+
const consumer = this.getOrCreateConsumer(retryGroupId, false, true);
|
|
692
|
+
await consumer.connect();
|
|
693
|
+
await subscribeWithRetry(consumer, retryTopicNames, this.logger);
|
|
694
|
+
await consumer.run({
|
|
695
|
+
eachMessage: async ({ topic: retryTopic, partition, message }) => {
|
|
696
|
+
if (!message.value) return;
|
|
697
|
+
const raw = message.value.toString();
|
|
698
|
+
const parsed = parseJsonMessage(raw, retryTopic, this.logger);
|
|
699
|
+
if (parsed === null) return;
|
|
700
|
+
const headers = decodeHeaders(message.headers);
|
|
701
|
+
const originalTopic = headers[RETRY_HEADER_ORIGINAL_TOPIC] ?? retryTopic.replace(/\.retry$/, "");
|
|
702
|
+
const currentAttempt = parseInt(
|
|
703
|
+
headers[RETRY_HEADER_ATTEMPT] ?? "1",
|
|
704
|
+
10
|
|
705
|
+
);
|
|
706
|
+
const maxRetries = parseInt(
|
|
707
|
+
headers[RETRY_HEADER_MAX_RETRIES] ?? String(retry.maxRetries),
|
|
708
|
+
10
|
|
709
|
+
);
|
|
710
|
+
const retryAfter = parseInt(
|
|
711
|
+
headers[RETRY_HEADER_AFTER] ?? "0",
|
|
712
|
+
10
|
|
713
|
+
);
|
|
714
|
+
const remaining = retryAfter - Date.now();
|
|
715
|
+
if (remaining > 0) {
|
|
716
|
+
consumer.pause([{ topic: retryTopic, partitions: [partition] }]);
|
|
717
|
+
await sleep(remaining);
|
|
718
|
+
consumer.resume([{ topic: retryTopic, partitions: [partition] }]);
|
|
719
|
+
}
|
|
720
|
+
const validated = await validateWithSchema(
|
|
721
|
+
parsed,
|
|
722
|
+
raw,
|
|
723
|
+
originalTopic,
|
|
724
|
+
schemaMap,
|
|
725
|
+
interceptors,
|
|
726
|
+
dlq,
|
|
727
|
+
{ ...deps, originalHeaders: headers }
|
|
728
|
+
);
|
|
729
|
+
if (validated === null) return;
|
|
730
|
+
const envelope = extractEnvelope(
|
|
731
|
+
validated,
|
|
732
|
+
headers,
|
|
733
|
+
originalTopic,
|
|
734
|
+
partition,
|
|
735
|
+
message.offset
|
|
736
|
+
);
|
|
737
|
+
try {
|
|
738
|
+
const cleanups = [];
|
|
739
|
+
for (const inst of this.instrumentation) {
|
|
740
|
+
const c = inst.beforeConsume?.(envelope);
|
|
741
|
+
if (typeof c === "function") cleanups.push(c);
|
|
742
|
+
}
|
|
743
|
+
for (const interceptor of interceptors) await interceptor.before?.(envelope);
|
|
744
|
+
await runWithEnvelopeContext(
|
|
745
|
+
{ correlationId: envelope.correlationId, traceparent: envelope.traceparent },
|
|
746
|
+
() => handleMessage(envelope)
|
|
747
|
+
);
|
|
748
|
+
for (const interceptor of interceptors) await interceptor.after?.(envelope);
|
|
749
|
+
for (const cleanup of cleanups) cleanup();
|
|
750
|
+
} catch (error) {
|
|
751
|
+
const err = toError(error);
|
|
752
|
+
const nextAttempt = currentAttempt + 1;
|
|
753
|
+
const exhausted = currentAttempt >= maxRetries;
|
|
754
|
+
for (const inst of this.instrumentation) inst.onConsumeError?.(envelope, err);
|
|
755
|
+
const reportedError = exhausted && maxRetries > 1 ? new KafkaRetryExhaustedError(originalTopic, [envelope.payload], maxRetries, { cause: err }) : err;
|
|
756
|
+
for (const interceptor of interceptors) {
|
|
757
|
+
await interceptor.onError?.(envelope, reportedError);
|
|
758
|
+
}
|
|
759
|
+
this.logger.error(
|
|
760
|
+
`Retry consumer error for ${originalTopic} (attempt ${currentAttempt}/${maxRetries}):`,
|
|
761
|
+
err.stack
|
|
762
|
+
);
|
|
763
|
+
if (!exhausted) {
|
|
764
|
+
const cap = Math.min(backoffMs * 2 ** currentAttempt, maxBackoffMs);
|
|
765
|
+
const delay = Math.floor(Math.random() * cap);
|
|
766
|
+
await sendToRetryTopic(
|
|
767
|
+
originalTopic,
|
|
768
|
+
[raw],
|
|
769
|
+
nextAttempt,
|
|
770
|
+
maxRetries,
|
|
771
|
+
delay,
|
|
772
|
+
headers,
|
|
773
|
+
deps
|
|
774
|
+
);
|
|
775
|
+
} else if (dlq) {
|
|
776
|
+
await sendToDlq(originalTopic, raw, deps, {
|
|
777
|
+
error: err,
|
|
778
|
+
// +1 to account for the main consumer's initial attempt before
|
|
779
|
+
// routing to the retry topic, making this consistent with the
|
|
780
|
+
// in-process retry path where attempt counts all tries.
|
|
781
|
+
attempt: currentAttempt + 1,
|
|
782
|
+
originalHeaders: headers
|
|
783
|
+
});
|
|
784
|
+
} else {
|
|
785
|
+
await deps.onMessageLost?.({
|
|
786
|
+
topic: originalTopic,
|
|
787
|
+
error: err,
|
|
788
|
+
attempt: currentAttempt,
|
|
789
|
+
headers
|
|
790
|
+
});
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
}
|
|
794
|
+
});
|
|
795
|
+
this.runningConsumers.set(retryGroupId, "eachMessage");
|
|
796
|
+
await this.waitForPartitionAssignment(consumer, retryTopicNames);
|
|
797
|
+
this.logger.log(
|
|
798
|
+
`Retry topic consumers started for: ${originalTopics.join(", ")} (group: ${retryGroupId})`
|
|
799
|
+
);
|
|
800
|
+
}
|
|
559
801
|
// ── Private helpers ──────────────────────────────────────────────
|
|
802
|
+
/**
|
|
803
|
+
* Poll `consumer.assignment()` until the consumer has received at least one
|
|
804
|
+
* partition for the given topics, then return. Logs a warning and returns
|
|
805
|
+
* (rather than throwing) on timeout so that a slow broker does not break
|
|
806
|
+
* the caller — in the worst case a message sent immediately after would be
|
|
807
|
+
* missed, which is the same behaviour as before this guard was added.
|
|
808
|
+
*/
|
|
809
|
+
async waitForPartitionAssignment(consumer, topics, timeoutMs = 1e4) {
|
|
810
|
+
const topicSet = new Set(topics);
|
|
811
|
+
const deadline = Date.now() + timeoutMs;
|
|
812
|
+
while (Date.now() < deadline) {
|
|
813
|
+
try {
|
|
814
|
+
const assigned = consumer.assignment();
|
|
815
|
+
if (assigned.some((a) => topicSet.has(a.topic))) return;
|
|
816
|
+
} catch {
|
|
817
|
+
}
|
|
818
|
+
await sleep(200);
|
|
819
|
+
}
|
|
820
|
+
this.logger.warn(
|
|
821
|
+
`Retry consumer did not receive partition assignments for [${topics.join(", ")}] within ${timeoutMs}ms`
|
|
822
|
+
);
|
|
823
|
+
}
|
|
560
824
|
getOrCreateConsumer(groupId, fromBeginning, autoCommit) {
|
|
561
825
|
if (!this.consumers.has(groupId)) {
|
|
562
826
|
this.consumers.set(
|
|
@@ -594,13 +858,13 @@ var KafkaClient = class {
|
|
|
594
858
|
}
|
|
595
859
|
}
|
|
596
860
|
/** Validate message against schema. Pure — no side-effects on registry. */
|
|
597
|
-
validateMessage(topicOrDesc, message) {
|
|
861
|
+
async validateMessage(topicOrDesc, message) {
|
|
598
862
|
if (topicOrDesc?.__schema) {
|
|
599
|
-
return topicOrDesc.__schema.parse(message);
|
|
863
|
+
return await topicOrDesc.__schema.parse(message);
|
|
600
864
|
}
|
|
601
865
|
if (this.strictSchemasEnabled && typeof topicOrDesc === "string") {
|
|
602
866
|
const schema = this.schemaRegistry.get(topicOrDesc);
|
|
603
|
-
if (schema) return schema.parse(message);
|
|
867
|
+
if (schema) return await schema.parse(message);
|
|
604
868
|
}
|
|
605
869
|
return message;
|
|
606
870
|
}
|
|
@@ -609,12 +873,11 @@ var KafkaClient = class {
|
|
|
609
873
|
* Handles: topic resolution, schema registration, validation, JSON serialization,
|
|
610
874
|
* envelope header generation, and instrumentation hooks.
|
|
611
875
|
*/
|
|
612
|
-
buildSendPayload(topicOrDesc, messages) {
|
|
876
|
+
async buildSendPayload(topicOrDesc, messages) {
|
|
613
877
|
this.registerSchema(topicOrDesc);
|
|
614
878
|
const topic2 = this.resolveTopicName(topicOrDesc);
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
messages: messages.map((m) => {
|
|
879
|
+
const builtMessages = await Promise.all(
|
|
880
|
+
messages.map(async (m) => {
|
|
618
881
|
const envelopeHeaders = buildEnvelopeHeaders({
|
|
619
882
|
correlationId: m.correlationId,
|
|
620
883
|
schemaVersion: m.schemaVersion,
|
|
@@ -625,12 +888,13 @@ var KafkaClient = class {
|
|
|
625
888
|
inst.beforeSend?.(topic2, envelopeHeaders);
|
|
626
889
|
}
|
|
627
890
|
return {
|
|
628
|
-
value: JSON.stringify(this.validateMessage(topicOrDesc, m.value)),
|
|
891
|
+
value: JSON.stringify(await this.validateMessage(topicOrDesc, m.value)),
|
|
629
892
|
key: m.key ?? null,
|
|
630
893
|
headers: envelopeHeaders
|
|
631
894
|
};
|
|
632
895
|
})
|
|
633
|
-
|
|
896
|
+
);
|
|
897
|
+
return { topic: topic2, messages: builtMessages };
|
|
634
898
|
}
|
|
635
899
|
/** Shared consumer setup: groupId check, schema map, connect, subscribe. */
|
|
636
900
|
async setupConsumer(topics, mode, options) {
|