@drarzter/kafka-client 0.5.4 → 0.5.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,947 +0,0 @@
1
- // src/client/kafka.client.ts
2
- import { KafkaJS } from "@confluentinc/kafka-javascript";
3
-
4
- // src/client/envelope.ts
5
- import { AsyncLocalStorage } from "async_hooks";
6
- import { randomUUID } from "crypto";
7
- var HEADER_EVENT_ID = "x-event-id";
8
- var HEADER_CORRELATION_ID = "x-correlation-id";
9
- var HEADER_TIMESTAMP = "x-timestamp";
10
- var HEADER_SCHEMA_VERSION = "x-schema-version";
11
- var HEADER_TRACEPARENT = "traceparent";
12
- var envelopeStorage = new AsyncLocalStorage();
13
- function getEnvelopeContext() {
14
- return envelopeStorage.getStore();
15
- }
16
- function runWithEnvelopeContext(ctx, fn) {
17
- return envelopeStorage.run(ctx, fn);
18
- }
19
- function buildEnvelopeHeaders(options = {}) {
20
- const ctx = getEnvelopeContext();
21
- const correlationId = options.correlationId ?? ctx?.correlationId ?? randomUUID();
22
- const eventId = options.eventId ?? randomUUID();
23
- const timestamp = (/* @__PURE__ */ new Date()).toISOString();
24
- const schemaVersion = String(options.schemaVersion ?? 1);
25
- const envelope = {
26
- [HEADER_EVENT_ID]: eventId,
27
- [HEADER_CORRELATION_ID]: correlationId,
28
- [HEADER_TIMESTAMP]: timestamp,
29
- [HEADER_SCHEMA_VERSION]: schemaVersion
30
- };
31
- if (ctx?.traceparent) {
32
- envelope[HEADER_TRACEPARENT] = ctx.traceparent;
33
- }
34
- return { ...envelope, ...options.headers };
35
- }
36
- function decodeHeaders(raw) {
37
- if (!raw) return {};
38
- const result = {};
39
- for (const [key, value] of Object.entries(raw)) {
40
- if (value === void 0) continue;
41
- if (Array.isArray(value)) {
42
- result[key] = value.map((v) => Buffer.isBuffer(v) ? v.toString() : v).join(",");
43
- } else {
44
- result[key] = Buffer.isBuffer(value) ? value.toString() : value;
45
- }
46
- }
47
- return result;
48
- }
49
- function extractEnvelope(payload, headers, topic2, partition, offset) {
50
- return {
51
- payload,
52
- topic: topic2,
53
- partition,
54
- offset,
55
- eventId: headers[HEADER_EVENT_ID] ?? randomUUID(),
56
- correlationId: headers[HEADER_CORRELATION_ID] ?? randomUUID(),
57
- timestamp: headers[HEADER_TIMESTAMP] ?? (/* @__PURE__ */ new Date()).toISOString(),
58
- schemaVersion: Number(headers[HEADER_SCHEMA_VERSION] ?? 1),
59
- traceparent: headers[HEADER_TRACEPARENT],
60
- headers
61
- };
62
- }
63
-
64
- // src/client/errors.ts
65
- var KafkaProcessingError = class extends Error {
66
- constructor(message, topic2, originalMessage, options) {
67
- super(message, options);
68
- this.topic = topic2;
69
- this.originalMessage = originalMessage;
70
- this.name = "KafkaProcessingError";
71
- if (options?.cause) this.cause = options.cause;
72
- }
73
- };
74
- var KafkaValidationError = class extends Error {
75
- constructor(topic2, originalMessage, options) {
76
- super(`Schema validation failed for topic "${topic2}"`, options);
77
- this.topic = topic2;
78
- this.originalMessage = originalMessage;
79
- this.name = "KafkaValidationError";
80
- if (options?.cause) this.cause = options.cause;
81
- }
82
- };
83
- var KafkaRetryExhaustedError = class extends KafkaProcessingError {
84
- constructor(topic2, originalMessage, attempts, options) {
85
- super(
86
- `Message processing failed after ${attempts} attempts on topic "${topic2}"`,
87
- topic2,
88
- originalMessage,
89
- options
90
- );
91
- this.attempts = attempts;
92
- this.name = "KafkaRetryExhaustedError";
93
- }
94
- };
95
-
96
- // src/client/consumer-pipeline.ts
97
- function toError(error) {
98
- return error instanceof Error ? error : new Error(String(error));
99
- }
100
- function sleep(ms) {
101
- return new Promise((resolve) => setTimeout(resolve, ms));
102
- }
103
- function parseJsonMessage(raw, topic2, logger) {
104
- try {
105
- return JSON.parse(raw);
106
- } catch (error) {
107
- logger.error(
108
- `Failed to parse message from topic ${topic2}:`,
109
- toError(error).stack
110
- );
111
- return null;
112
- }
113
- }
114
- async function validateWithSchema(message, raw, topic2, schemaMap, interceptors, dlq, deps) {
115
- const schema = schemaMap.get(topic2);
116
- if (!schema) return message;
117
- try {
118
- return await schema.parse(message);
119
- } catch (error) {
120
- const err = toError(error);
121
- const validationError = new KafkaValidationError(topic2, message, {
122
- cause: err
123
- });
124
- deps.logger.error(
125
- `Schema validation failed for topic ${topic2}:`,
126
- err.message
127
- );
128
- if (dlq) {
129
- await sendToDlq(topic2, raw, deps, {
130
- error: validationError,
131
- attempt: 0,
132
- originalHeaders: deps.originalHeaders
133
- });
134
- } else {
135
- await deps.onMessageLost?.({ topic: topic2, error: validationError, attempt: 0, headers: deps.originalHeaders ?? {} });
136
- }
137
- const errorEnvelope = extractEnvelope(message, deps.originalHeaders ?? {}, topic2, -1, "");
138
- for (const interceptor of interceptors) {
139
- await interceptor.onError?.(errorEnvelope, validationError);
140
- }
141
- return null;
142
- }
143
- }
144
- async function sendToDlq(topic2, rawMessage, deps, meta) {
145
- const dlqTopic = `${topic2}.dlq`;
146
- const headers = {
147
- ...meta?.originalHeaders ?? {},
148
- "x-dlq-original-topic": topic2,
149
- "x-dlq-failed-at": (/* @__PURE__ */ new Date()).toISOString(),
150
- "x-dlq-error-message": meta?.error.message ?? "unknown",
151
- "x-dlq-error-stack": meta?.error.stack?.slice(0, 2e3) ?? "",
152
- "x-dlq-attempt-count": String(meta?.attempt ?? 0)
153
- };
154
- try {
155
- await deps.producer.send({
156
- topic: dlqTopic,
157
- messages: [{ value: rawMessage, headers }]
158
- });
159
- deps.logger.warn(`Message sent to DLQ: ${dlqTopic}`);
160
- } catch (error) {
161
- deps.logger.error(
162
- `Failed to send message to DLQ ${dlqTopic}:`,
163
- toError(error).stack
164
- );
165
- }
166
- }
167
- var RETRY_HEADER_ATTEMPT = "x-retry-attempt";
168
- var RETRY_HEADER_AFTER = "x-retry-after";
169
- var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
170
- var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
171
- async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders, deps) {
172
- const retryTopic = `${originalTopic}.retry`;
173
- const {
174
- [RETRY_HEADER_ATTEMPT]: _a,
175
- [RETRY_HEADER_AFTER]: _b,
176
- [RETRY_HEADER_MAX_RETRIES]: _c,
177
- [RETRY_HEADER_ORIGINAL_TOPIC]: _d,
178
- ...userHeaders
179
- } = originalHeaders;
180
- const headers = {
181
- ...userHeaders,
182
- [RETRY_HEADER_ATTEMPT]: String(attempt),
183
- [RETRY_HEADER_AFTER]: String(Date.now() + delayMs),
184
- [RETRY_HEADER_MAX_RETRIES]: String(maxRetries),
185
- [RETRY_HEADER_ORIGINAL_TOPIC]: originalTopic
186
- };
187
- try {
188
- for (const raw of rawMessages) {
189
- await deps.producer.send({ topic: retryTopic, messages: [{ value: raw, headers }] });
190
- }
191
- deps.logger.warn(
192
- `Message queued in retry topic ${retryTopic} (attempt ${attempt}/${maxRetries})`
193
- );
194
- } catch (error) {
195
- deps.logger.error(
196
- `Failed to send message to retry topic ${retryTopic}:`,
197
- toError(error).stack
198
- );
199
- }
200
- }
201
- async function executeWithRetry(fn, ctx, deps) {
202
- const { envelope, rawMessages, interceptors, dlq, retry, isBatch, retryTopics } = ctx;
203
- const maxAttempts = retryTopics ? 1 : retry ? retry.maxRetries + 1 : 1;
204
- const backoffMs = retry?.backoffMs ?? 1e3;
205
- const maxBackoffMs = retry?.maxBackoffMs ?? 3e4;
206
- const envelopes = Array.isArray(envelope) ? envelope : [envelope];
207
- const topic2 = envelopes[0]?.topic ?? "unknown";
208
- for (let attempt = 1; attempt <= maxAttempts; attempt++) {
209
- const cleanups = [];
210
- try {
211
- for (const env of envelopes) {
212
- for (const inst of deps.instrumentation) {
213
- const cleanup = inst.beforeConsume?.(env);
214
- if (typeof cleanup === "function") cleanups.push(cleanup);
215
- }
216
- }
217
- for (const env of envelopes) {
218
- for (const interceptor of interceptors) {
219
- await interceptor.before?.(env);
220
- }
221
- }
222
- await fn();
223
- for (const env of envelopes) {
224
- for (const interceptor of interceptors) {
225
- await interceptor.after?.(env);
226
- }
227
- }
228
- for (const cleanup of cleanups) cleanup();
229
- return;
230
- } catch (error) {
231
- const err = toError(error);
232
- const isLastAttempt = attempt === maxAttempts;
233
- for (const env of envelopes) {
234
- for (const inst of deps.instrumentation) {
235
- inst.onConsumeError?.(env, err);
236
- }
237
- }
238
- for (const cleanup of cleanups) cleanup();
239
- if (isLastAttempt && maxAttempts > 1) {
240
- const exhaustedError = new KafkaRetryExhaustedError(
241
- topic2,
242
- envelopes.map((e) => e.payload),
243
- maxAttempts,
244
- { cause: err }
245
- );
246
- for (const env of envelopes) {
247
- for (const interceptor of interceptors) {
248
- await interceptor.onError?.(env, exhaustedError);
249
- }
250
- }
251
- } else {
252
- for (const env of envelopes) {
253
- for (const interceptor of interceptors) {
254
- await interceptor.onError?.(env, err);
255
- }
256
- }
257
- }
258
- deps.logger.error(
259
- `Error processing ${isBatch ? "batch" : "message"} from topic ${topic2} (attempt ${attempt}/${maxAttempts}):`,
260
- err.stack
261
- );
262
- if (retryTopics && retry) {
263
- const cap = Math.min(backoffMs, maxBackoffMs);
264
- const delay = Math.floor(Math.random() * cap);
265
- await sendToRetryTopic(
266
- topic2,
267
- rawMessages,
268
- 1,
269
- retry.maxRetries,
270
- delay,
271
- envelopes[0]?.headers ?? {},
272
- deps
273
- );
274
- } else if (isLastAttempt) {
275
- if (dlq) {
276
- const dlqMeta = {
277
- error: err,
278
- attempt,
279
- originalHeaders: envelopes[0]?.headers
280
- };
281
- for (const raw of rawMessages) {
282
- await sendToDlq(topic2, raw, deps, dlqMeta);
283
- }
284
- } else {
285
- await deps.onMessageLost?.({
286
- topic: topic2,
287
- error: err,
288
- attempt,
289
- headers: envelopes[0]?.headers ?? {}
290
- });
291
- }
292
- } else {
293
- const cap = Math.min(backoffMs * 2 ** (attempt - 1), maxBackoffMs);
294
- await sleep(Math.random() * cap);
295
- }
296
- }
297
- }
298
- }
299
-
300
- // src/client/subscribe-retry.ts
301
- async function subscribeWithRetry(consumer, topics, logger, retryOpts) {
302
- const maxAttempts = retryOpts?.retries ?? 5;
303
- const backoffMs = retryOpts?.backoffMs ?? 5e3;
304
- for (let attempt = 1; attempt <= maxAttempts; attempt++) {
305
- try {
306
- await consumer.subscribe({ topics });
307
- return;
308
- } catch (error) {
309
- if (attempt === maxAttempts) throw error;
310
- const msg = toError(error).message;
311
- logger.warn(
312
- `Failed to subscribe to [${topics.join(", ")}] (attempt ${attempt}/${maxAttempts}): ${msg}. Retrying in ${backoffMs}ms...`
313
- );
314
- await sleep(backoffMs);
315
- }
316
- }
317
- }
318
-
319
- // src/client/kafka.client.ts
320
- var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = KafkaJS;
321
- var KafkaClient = class {
322
- kafka;
323
- producer;
324
- txProducer;
325
- consumers = /* @__PURE__ */ new Map();
326
- admin;
327
- logger;
328
- autoCreateTopicsEnabled;
329
- strictSchemasEnabled;
330
- numPartitions;
331
- ensuredTopics = /* @__PURE__ */ new Set();
332
- defaultGroupId;
333
- schemaRegistry = /* @__PURE__ */ new Map();
334
- runningConsumers = /* @__PURE__ */ new Map();
335
- instrumentation;
336
- onMessageLost;
337
- isAdminConnected = false;
338
- clientId;
339
- constructor(clientId, groupId, brokers, options) {
340
- this.clientId = clientId;
341
- this.defaultGroupId = groupId;
342
- this.logger = options?.logger ?? {
343
- log: (msg) => console.log(`[KafkaClient:${clientId}] ${msg}`),
344
- warn: (msg, ...args) => console.warn(`[KafkaClient:${clientId}] ${msg}`, ...args),
345
- error: (msg, ...args) => console.error(`[KafkaClient:${clientId}] ${msg}`, ...args)
346
- };
347
- this.autoCreateTopicsEnabled = options?.autoCreateTopics ?? false;
348
- this.strictSchemasEnabled = options?.strictSchemas ?? true;
349
- this.numPartitions = options?.numPartitions ?? 1;
350
- this.instrumentation = options?.instrumentation ?? [];
351
- this.onMessageLost = options?.onMessageLost;
352
- this.kafka = new KafkaClass({
353
- kafkaJS: {
354
- clientId: this.clientId,
355
- brokers,
356
- logLevel: KafkaLogLevel.ERROR
357
- }
358
- });
359
- this.producer = this.kafka.producer({
360
- kafkaJS: {
361
- acks: -1
362
- }
363
- });
364
- this.admin = this.kafka.admin();
365
- }
366
- async sendMessage(topicOrDesc, message, options = {}) {
367
- const payload = await this.buildSendPayload(topicOrDesc, [
368
- {
369
- value: message,
370
- key: options.key,
371
- headers: options.headers,
372
- correlationId: options.correlationId,
373
- schemaVersion: options.schemaVersion,
374
- eventId: options.eventId
375
- }
376
- ]);
377
- await this.ensureTopic(payload.topic);
378
- await this.producer.send(payload);
379
- for (const inst of this.instrumentation) {
380
- inst.afterSend?.(payload.topic);
381
- }
382
- }
383
- async sendBatch(topicOrDesc, messages) {
384
- const payload = await this.buildSendPayload(topicOrDesc, messages);
385
- await this.ensureTopic(payload.topic);
386
- await this.producer.send(payload);
387
- for (const inst of this.instrumentation) {
388
- inst.afterSend?.(payload.topic);
389
- }
390
- }
391
- /** Execute multiple sends atomically. Commits on success, aborts on error. */
392
- async transaction(fn) {
393
- if (!this.txProducer) {
394
- this.txProducer = this.kafka.producer({
395
- kafkaJS: {
396
- acks: -1,
397
- idempotent: true,
398
- transactionalId: `${this.clientId}-tx`,
399
- maxInFlightRequests: 1
400
- }
401
- });
402
- await this.txProducer.connect();
403
- }
404
- const tx = await this.txProducer.transaction();
405
- try {
406
- const ctx = {
407
- send: async (topicOrDesc, message, options = {}) => {
408
- const payload = await this.buildSendPayload(topicOrDesc, [
409
- {
410
- value: message,
411
- key: options.key,
412
- headers: options.headers,
413
- correlationId: options.correlationId,
414
- schemaVersion: options.schemaVersion,
415
- eventId: options.eventId
416
- }
417
- ]);
418
- await this.ensureTopic(payload.topic);
419
- await tx.send(payload);
420
- },
421
- sendBatch: async (topicOrDesc, messages) => {
422
- const payload = await this.buildSendPayload(topicOrDesc, messages);
423
- await this.ensureTopic(payload.topic);
424
- await tx.send(payload);
425
- }
426
- };
427
- await fn(ctx);
428
- await tx.commit();
429
- } catch (error) {
430
- try {
431
- await tx.abort();
432
- } catch (abortError) {
433
- this.logger.error(
434
- "Failed to abort transaction:",
435
- toError(abortError).message
436
- );
437
- }
438
- throw error;
439
- }
440
- }
441
- // ── Producer lifecycle ───────────────────────────────────────────
442
- /** Connect the idempotent producer. Called automatically by `KafkaModule.register()`. */
443
- async connectProducer() {
444
- await this.producer.connect();
445
- this.logger.log("Producer connected");
446
- }
447
- async disconnectProducer() {
448
- await this.producer.disconnect();
449
- this.logger.log("Producer disconnected");
450
- }
451
- async startConsumer(topics, handleMessage, options = {}) {
452
- if (options.retryTopics && !options.retry) {
453
- throw new Error(
454
- "retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
455
- );
456
- }
457
- const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
458
- const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
459
- await consumer.run({
460
- eachMessage: async ({ topic: topic2, partition, message }) => {
461
- if (!message.value) {
462
- this.logger.warn(`Received empty message from topic ${topic2}`);
463
- return;
464
- }
465
- const raw = message.value.toString();
466
- const parsed = parseJsonMessage(raw, topic2, this.logger);
467
- if (parsed === null) return;
468
- const headers = decodeHeaders(message.headers);
469
- const validated = await validateWithSchema(
470
- parsed,
471
- raw,
472
- topic2,
473
- schemaMap,
474
- interceptors,
475
- dlq,
476
- { ...deps, originalHeaders: headers }
477
- );
478
- if (validated === null) return;
479
- const envelope = extractEnvelope(
480
- validated,
481
- headers,
482
- topic2,
483
- partition,
484
- message.offset
485
- );
486
- await executeWithRetry(
487
- () => runWithEnvelopeContext(
488
- { correlationId: envelope.correlationId, traceparent: envelope.traceparent },
489
- () => handleMessage(envelope)
490
- ),
491
- { envelope, rawMessages: [raw], interceptors, dlq, retry, retryTopics: options.retryTopics },
492
- deps
493
- );
494
- }
495
- });
496
- this.runningConsumers.set(gid, "eachMessage");
497
- if (options.retryTopics && retry) {
498
- await this.startRetryTopicConsumers(
499
- topicNames,
500
- gid,
501
- handleMessage,
502
- retry,
503
- dlq,
504
- interceptors,
505
- schemaMap
506
- );
507
- }
508
- }
509
- async startBatchConsumer(topics, handleBatch, options = {}) {
510
- const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
511
- const deps = { logger: this.logger, producer: this.producer, instrumentation: this.instrumentation, onMessageLost: this.onMessageLost };
512
- await consumer.run({
513
- eachBatch: async ({
514
- batch,
515
- heartbeat,
516
- resolveOffset,
517
- commitOffsetsIfNecessary
518
- }) => {
519
- const envelopes = [];
520
- const rawMessages = [];
521
- for (const message of batch.messages) {
522
- if (!message.value) {
523
- this.logger.warn(
524
- `Received empty message from topic ${batch.topic}`
525
- );
526
- continue;
527
- }
528
- const raw = message.value.toString();
529
- const parsed = parseJsonMessage(raw, batch.topic, this.logger);
530
- if (parsed === null) continue;
531
- const headers = decodeHeaders(message.headers);
532
- const validated = await validateWithSchema(
533
- parsed,
534
- raw,
535
- batch.topic,
536
- schemaMap,
537
- interceptors,
538
- dlq,
539
- { ...deps, originalHeaders: headers }
540
- );
541
- if (validated === null) continue;
542
- envelopes.push(
543
- extractEnvelope(validated, headers, batch.topic, batch.partition, message.offset)
544
- );
545
- rawMessages.push(raw);
546
- }
547
- if (envelopes.length === 0) return;
548
- const meta = {
549
- partition: batch.partition,
550
- highWatermark: batch.highWatermark,
551
- heartbeat,
552
- resolveOffset,
553
- commitOffsetsIfNecessary
554
- };
555
- await executeWithRetry(
556
- () => handleBatch(envelopes, meta),
557
- {
558
- envelope: envelopes,
559
- rawMessages: batch.messages.filter((m) => m.value).map((m) => m.value.toString()),
560
- interceptors,
561
- dlq,
562
- retry,
563
- isBatch: true
564
- },
565
- deps
566
- );
567
- }
568
- });
569
- this.runningConsumers.set(gid, "eachBatch");
570
- }
571
- // ── Consumer lifecycle ───────────────────────────────────────────
572
- async stopConsumer(groupId) {
573
- if (groupId !== void 0) {
574
- const consumer = this.consumers.get(groupId);
575
- if (!consumer) {
576
- this.logger.warn(`stopConsumer: no active consumer for group "${groupId}"`);
577
- return;
578
- }
579
- await consumer.disconnect().catch(() => {
580
- });
581
- this.consumers.delete(groupId);
582
- this.runningConsumers.delete(groupId);
583
- this.logger.log(`Consumer disconnected: group "${groupId}"`);
584
- } else {
585
- const tasks = Array.from(this.consumers.values()).map(
586
- (c) => c.disconnect().catch(() => {
587
- })
588
- );
589
- await Promise.allSettled(tasks);
590
- this.consumers.clear();
591
- this.runningConsumers.clear();
592
- this.logger.log("All consumers disconnected");
593
- }
594
- }
595
- /** Check broker connectivity and return status, clientId, and available topics. */
596
- async checkStatus() {
597
- if (!this.isAdminConnected) {
598
- await this.admin.connect();
599
- this.isAdminConnected = true;
600
- }
601
- const topics = await this.admin.listTopics();
602
- return { status: "up", clientId: this.clientId, topics };
603
- }
604
- getClientId() {
605
- return this.clientId;
606
- }
607
- /** Gracefully disconnect producer, all consumers, and admin. */
608
- async disconnect() {
609
- const tasks = [this.producer.disconnect()];
610
- if (this.txProducer) {
611
- tasks.push(this.txProducer.disconnect());
612
- this.txProducer = void 0;
613
- }
614
- for (const consumer of this.consumers.values()) {
615
- tasks.push(consumer.disconnect());
616
- }
617
- if (this.isAdminConnected) {
618
- tasks.push(this.admin.disconnect());
619
- this.isAdminConnected = false;
620
- }
621
- await Promise.allSettled(tasks);
622
- this.consumers.clear();
623
- this.runningConsumers.clear();
624
- this.logger.log("All connections closed");
625
- }
626
- // ── Retry topic chain ────────────────────────────────────────────
627
- /**
628
- * Auto-start companion consumers on `<topic>.retry` for each original topic.
629
- * Called by `startConsumer` when `retryTopics: true`.
630
- *
631
- * Flow per message:
632
- * 1. Sleep until `x-retry-after` (scheduled by the main consumer or previous retry hop)
633
- * 2. Call the original handler
634
- * 3. On failure: if retries remain → re-send to `<originalTopic>.retry` with incremented attempt
635
- * if exhausted → DLQ or onMessageLost
636
- */
637
- async startRetryTopicConsumers(originalTopics, originalGroupId, handleMessage, retry, dlq, interceptors, schemaMap) {
638
- const retryTopicNames = originalTopics.map((t) => `${t}.retry`);
639
- const retryGroupId = `${originalGroupId}-retry`;
640
- const backoffMs = retry.backoffMs ?? 1e3;
641
- const maxBackoffMs = retry.maxBackoffMs ?? 3e4;
642
- const deps = {
643
- logger: this.logger,
644
- producer: this.producer,
645
- instrumentation: this.instrumentation,
646
- onMessageLost: this.onMessageLost
647
- };
648
- for (const rt of retryTopicNames) {
649
- await this.ensureTopic(rt);
650
- }
651
- const consumer = this.getOrCreateConsumer(retryGroupId, false, true);
652
- await consumer.connect();
653
- await subscribeWithRetry(consumer, retryTopicNames, this.logger);
654
- await consumer.run({
655
- eachMessage: async ({ topic: retryTopic, partition, message }) => {
656
- if (!message.value) return;
657
- const raw = message.value.toString();
658
- const parsed = parseJsonMessage(raw, retryTopic, this.logger);
659
- if (parsed === null) return;
660
- const headers = decodeHeaders(message.headers);
661
- const originalTopic = headers[RETRY_HEADER_ORIGINAL_TOPIC] ?? retryTopic.replace(/\.retry$/, "");
662
- const currentAttempt = parseInt(
663
- headers[RETRY_HEADER_ATTEMPT] ?? "1",
664
- 10
665
- );
666
- const maxRetries = parseInt(
667
- headers[RETRY_HEADER_MAX_RETRIES] ?? String(retry.maxRetries),
668
- 10
669
- );
670
- const retryAfter = parseInt(
671
- headers[RETRY_HEADER_AFTER] ?? "0",
672
- 10
673
- );
674
- const remaining = retryAfter - Date.now();
675
- if (remaining > 0) {
676
- consumer.pause([{ topic: retryTopic, partitions: [partition] }]);
677
- await sleep(remaining);
678
- consumer.resume([{ topic: retryTopic, partitions: [partition] }]);
679
- }
680
- const validated = await validateWithSchema(
681
- parsed,
682
- raw,
683
- originalTopic,
684
- schemaMap,
685
- interceptors,
686
- dlq,
687
- { ...deps, originalHeaders: headers }
688
- );
689
- if (validated === null) return;
690
- const envelope = extractEnvelope(
691
- validated,
692
- headers,
693
- originalTopic,
694
- partition,
695
- message.offset
696
- );
697
- try {
698
- const cleanups = [];
699
- for (const inst of this.instrumentation) {
700
- const c = inst.beforeConsume?.(envelope);
701
- if (typeof c === "function") cleanups.push(c);
702
- }
703
- for (const interceptor of interceptors) await interceptor.before?.(envelope);
704
- await runWithEnvelopeContext(
705
- { correlationId: envelope.correlationId, traceparent: envelope.traceparent },
706
- () => handleMessage(envelope)
707
- );
708
- for (const interceptor of interceptors) await interceptor.after?.(envelope);
709
- for (const cleanup of cleanups) cleanup();
710
- } catch (error) {
711
- const err = toError(error);
712
- const nextAttempt = currentAttempt + 1;
713
- const exhausted = currentAttempt >= maxRetries;
714
- for (const inst of this.instrumentation) inst.onConsumeError?.(envelope, err);
715
- const reportedError = exhausted && maxRetries > 1 ? new KafkaRetryExhaustedError(originalTopic, [envelope.payload], maxRetries, { cause: err }) : err;
716
- for (const interceptor of interceptors) {
717
- await interceptor.onError?.(envelope, reportedError);
718
- }
719
- this.logger.error(
720
- `Retry consumer error for ${originalTopic} (attempt ${currentAttempt}/${maxRetries}):`,
721
- err.stack
722
- );
723
- if (!exhausted) {
724
- const cap = Math.min(backoffMs * 2 ** currentAttempt, maxBackoffMs);
725
- const delay = Math.floor(Math.random() * cap);
726
- await sendToRetryTopic(
727
- originalTopic,
728
- [raw],
729
- nextAttempt,
730
- maxRetries,
731
- delay,
732
- headers,
733
- deps
734
- );
735
- } else if (dlq) {
736
- await sendToDlq(originalTopic, raw, deps, {
737
- error: err,
738
- // +1 to account for the main consumer's initial attempt before
739
- // routing to the retry topic, making this consistent with the
740
- // in-process retry path where attempt counts all tries.
741
- attempt: currentAttempt + 1,
742
- originalHeaders: headers
743
- });
744
- } else {
745
- await deps.onMessageLost?.({
746
- topic: originalTopic,
747
- error: err,
748
- attempt: currentAttempt,
749
- headers
750
- });
751
- }
752
- }
753
- }
754
- });
755
- this.runningConsumers.set(retryGroupId, "eachMessage");
756
- await this.waitForPartitionAssignment(consumer, retryTopicNames);
757
- this.logger.log(
758
- `Retry topic consumers started for: ${originalTopics.join(", ")} (group: ${retryGroupId})`
759
- );
760
- }
761
- // ── Private helpers ──────────────────────────────────────────────
762
- /**
763
- * Poll `consumer.assignment()` until the consumer has received at least one
764
- * partition for the given topics, then return. Logs a warning and returns
765
- * (rather than throwing) on timeout so that a slow broker does not break
766
- * the caller — in the worst case a message sent immediately after would be
767
- * missed, which is the same behaviour as before this guard was added.
768
- */
769
- async waitForPartitionAssignment(consumer, topics, timeoutMs = 1e4) {
770
- const topicSet = new Set(topics);
771
- const deadline = Date.now() + timeoutMs;
772
- while (Date.now() < deadline) {
773
- try {
774
- const assigned = consumer.assignment();
775
- if (assigned.some((a) => topicSet.has(a.topic))) return;
776
- } catch {
777
- }
778
- await sleep(200);
779
- }
780
- this.logger.warn(
781
- `Retry consumer did not receive partition assignments for [${topics.join(", ")}] within ${timeoutMs}ms`
782
- );
783
- }
784
- getOrCreateConsumer(groupId, fromBeginning, autoCommit) {
785
- if (!this.consumers.has(groupId)) {
786
- this.consumers.set(
787
- groupId,
788
- this.kafka.consumer({
789
- kafkaJS: { groupId, fromBeginning, autoCommit }
790
- })
791
- );
792
- }
793
- return this.consumers.get(groupId);
794
- }
795
- resolveTopicName(topicOrDescriptor) {
796
- if (typeof topicOrDescriptor === "string") return topicOrDescriptor;
797
- if (topicOrDescriptor && typeof topicOrDescriptor === "object" && "__topic" in topicOrDescriptor) {
798
- return topicOrDescriptor.__topic;
799
- }
800
- return String(topicOrDescriptor);
801
- }
802
- async ensureTopic(topic2) {
803
- if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
804
- if (!this.isAdminConnected) {
805
- await this.admin.connect();
806
- this.isAdminConnected = true;
807
- }
808
- await this.admin.createTopics({
809
- topics: [{ topic: topic2, numPartitions: this.numPartitions }]
810
- });
811
- this.ensuredTopics.add(topic2);
812
- }
813
- /** Register schema from descriptor into global registry (side-effect). */
814
- registerSchema(topicOrDesc) {
815
- if (topicOrDesc?.__schema) {
816
- const topic2 = this.resolveTopicName(topicOrDesc);
817
- this.schemaRegistry.set(topic2, topicOrDesc.__schema);
818
- }
819
- }
820
- /** Validate message against schema. Pure — no side-effects on registry. */
821
- async validateMessage(topicOrDesc, message) {
822
- if (topicOrDesc?.__schema) {
823
- return await topicOrDesc.__schema.parse(message);
824
- }
825
- if (this.strictSchemasEnabled && typeof topicOrDesc === "string") {
826
- const schema = this.schemaRegistry.get(topicOrDesc);
827
- if (schema) return await schema.parse(message);
828
- }
829
- return message;
830
- }
831
- /**
832
- * Build a kafkajs-ready send payload.
833
- * Handles: topic resolution, schema registration, validation, JSON serialization,
834
- * envelope header generation, and instrumentation hooks.
835
- */
836
- async buildSendPayload(topicOrDesc, messages) {
837
- this.registerSchema(topicOrDesc);
838
- const topic2 = this.resolveTopicName(topicOrDesc);
839
- const builtMessages = await Promise.all(
840
- messages.map(async (m) => {
841
- const envelopeHeaders = buildEnvelopeHeaders({
842
- correlationId: m.correlationId,
843
- schemaVersion: m.schemaVersion,
844
- eventId: m.eventId,
845
- headers: m.headers
846
- });
847
- for (const inst of this.instrumentation) {
848
- inst.beforeSend?.(topic2, envelopeHeaders);
849
- }
850
- return {
851
- value: JSON.stringify(await this.validateMessage(topicOrDesc, m.value)),
852
- key: m.key ?? null,
853
- headers: envelopeHeaders
854
- };
855
- })
856
- );
857
- return { topic: topic2, messages: builtMessages };
858
- }
859
- /** Shared consumer setup: groupId check, schema map, connect, subscribe. */
860
- async setupConsumer(topics, mode, options) {
861
- const {
862
- groupId: optGroupId,
863
- fromBeginning = false,
864
- retry,
865
- dlq = false,
866
- interceptors = [],
867
- schemas: optionSchemas
868
- } = options;
869
- const gid = optGroupId || this.defaultGroupId;
870
- const existingMode = this.runningConsumers.get(gid);
871
- const oppositeMode = mode === "eachMessage" ? "eachBatch" : "eachMessage";
872
- if (existingMode === oppositeMode) {
873
- throw new Error(
874
- `Cannot use ${mode} on consumer group "${gid}" \u2014 it is already running with ${oppositeMode}. Use a different groupId for this consumer.`
875
- );
876
- }
877
- const consumer = this.getOrCreateConsumer(gid, fromBeginning, options.autoCommit ?? true);
878
- const schemaMap = this.buildSchemaMap(topics, optionSchemas);
879
- const topicNames = topics.map(
880
- (t) => this.resolveTopicName(t)
881
- );
882
- for (const t of topicNames) {
883
- await this.ensureTopic(t);
884
- }
885
- if (dlq) {
886
- for (const t of topicNames) {
887
- await this.ensureTopic(`${t}.dlq`);
888
- }
889
- }
890
- await consumer.connect();
891
- await subscribeWithRetry(consumer, topicNames, this.logger, options.subscribeRetry);
892
- this.logger.log(
893
- `${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${topicNames.join(", ")}`
894
- );
895
- return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
896
- }
897
- buildSchemaMap(topics, optionSchemas) {
898
- const schemaMap = /* @__PURE__ */ new Map();
899
- for (const t of topics) {
900
- if (t?.__schema) {
901
- const name = this.resolveTopicName(t);
902
- schemaMap.set(name, t.__schema);
903
- this.schemaRegistry.set(name, t.__schema);
904
- }
905
- }
906
- if (optionSchemas) {
907
- for (const [k, v] of optionSchemas) {
908
- schemaMap.set(k, v);
909
- this.schemaRegistry.set(k, v);
910
- }
911
- }
912
- return schemaMap;
913
- }
914
- };
915
-
916
- // src/client/topic.ts
917
- function topic(name) {
918
- const fn = () => ({
919
- __topic: name,
920
- __type: void 0
921
- });
922
- fn.schema = (schema) => ({
923
- __topic: name,
924
- __type: void 0,
925
- __schema: schema
926
- });
927
- return fn;
928
- }
929
-
930
- export {
931
- HEADER_EVENT_ID,
932
- HEADER_CORRELATION_ID,
933
- HEADER_TIMESTAMP,
934
- HEADER_SCHEMA_VERSION,
935
- HEADER_TRACEPARENT,
936
- getEnvelopeContext,
937
- runWithEnvelopeContext,
938
- buildEnvelopeHeaders,
939
- decodeHeaders,
940
- extractEnvelope,
941
- KafkaProcessingError,
942
- KafkaValidationError,
943
- KafkaRetryExhaustedError,
944
- KafkaClient,
945
- topic
946
- };
947
- //# sourceMappingURL=chunk-3QXTW66R.mjs.map