@drarzter/kafka-client 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -4,17 +4,17 @@
4
4
  [![CI](https://github.com/drarzter/kafka-client/actions/workflows/publish.yml/badge.svg)](https://github.com/drarzter/kafka-client/actions/workflows/publish.yml)
5
5
  [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
6
6
 
7
- Type-safe Kafka client wrapper for NestJS. Built on top of [kafkajs](https://kafka.js.org/).
7
+ Type-safe Kafka client for Node.js. Framework-agnostic core with a first-class NestJS adapter. Built on top of [kafkajs](https://kafka.js.org/).
8
8
 
9
9
  ## What is this?
10
10
 
11
- An opinionated wrapper around kafkajs that integrates with NestJS as a DynamicModule. Not a full-featured framework — just a clean, typed abstraction for producing and consuming Kafka messages.
11
+ An opinionated, type-safe abstraction over kafkajs. Works standalone (Express, Fastify, raw Node) or as a NestJS DynamicModule. Not a full-featured framework — just a clean, typed layer for producing and consuming Kafka messages.
12
12
 
13
13
  ## Why?
14
14
 
15
15
  - **Typed topics** — you define a map of topic -> message shape, and the compiler won't let you send wrong data to wrong topic
16
16
  - **Topic descriptors** — `topic()` DX sugar lets you define topics as standalone typed objects instead of string keys
17
- - **NestJS-native** — `register()` / `registerAsync()`, DI injection, lifecycle hooks out of the box
17
+ - **Framework-agnostic** — use standalone or with NestJS (`register()` / `registerAsync()`, DI, lifecycle hooks)
18
18
  - **Idempotent producer** — `acks: -1`, `idempotent: true` by default
19
19
  - **Retry + DLQ** — configurable retries with backoff, dead letter queue for failed messages
20
20
  - **Batch sending** — send multiple messages in a single request
@@ -29,17 +29,43 @@ An opinionated wrapper around kafkajs that integrates with NestJS as a DynamicMo
29
29
  - **Multiple consumer groups** — named clients for different bounded contexts
30
30
  - **Declarative & imperative** — use `@SubscribeTo()` decorator or `startConsumer()` directly
31
31
 
32
+ See the [Roadmap](./ROADMAP.md) for upcoming features and version history.
33
+
32
34
  ## Installation
33
35
 
34
36
  ```bash
35
37
  npm install @drarzter/kafka-client
36
- # or
37
- pnpm add @drarzter/kafka-client
38
38
  ```
39
39
 
40
- Peer dependencies: `@nestjs/common`, `@nestjs/core`, `reflect-metadata`, `rxjs`
40
+ For NestJS projects, install peer dependencies: `@nestjs/common`, `@nestjs/core`, `reflect-metadata`, `rxjs`.
41
+
42
+ For standalone usage (Express, Fastify, raw Node), no extra dependencies needed — import from `@drarzter/kafka-client/core`.
43
+
44
+ ## Standalone usage (no NestJS)
45
+
46
+ ```typescript
47
+ import { KafkaClient, topic } from '@drarzter/kafka-client/core';
48
+
49
+ const OrderCreated = topic('order.created')<{ orderId: string; amount: number }>();
50
+
51
+ const kafka = new KafkaClient('my-app', 'my-group', ['localhost:9092']);
52
+ await kafka.connectProducer();
53
+
54
+ // Send
55
+ await kafka.sendMessage(OrderCreated, { orderId: '123', amount: 100 });
56
+
57
+ // Consume
58
+ await kafka.startConsumer([OrderCreated], async (message, topic) => {
59
+ console.log(`${topic}:`, message.orderId);
60
+ });
61
+
62
+ // Custom logger (winston, pino, etc.)
63
+ const kafka2 = new KafkaClient('my-app', 'my-group', ['localhost:9092'], {
64
+ logger: myWinstonLogger,
65
+ });
66
+ ```
41
67
 
42
- ## Quick start
68
+ ## Quick start (NestJS)
43
69
 
44
70
  Send and receive a message in 3 files:
45
71
 
@@ -551,6 +577,7 @@ Passed to `KafkaModule.register()` or returned from `registerAsync()` factory:
551
577
  | `name` | — | Named client identifier for multi-client setups |
552
578
  | `isGlobal` | `false` | Make the client available in all modules without re-importing |
553
579
  | `autoCreateTopics` | `false` | Auto-create topics on first send (dev only) |
580
+ | `numPartitions` | `1` | Number of partitions for auto-created topics |
554
581
  | `strictSchemas` | `true` | Validate string topic keys against schemas registered via TopicDescriptor |
555
582
 
556
583
  **Module-scoped** (default) — import `KafkaModule` in each module that needs it:
@@ -766,11 +793,10 @@ Both suites run in CI on every push to `main`.
766
793
 
767
794
  ```
768
795
  src/
769
- ├── client/ # KafkaClient, types, topic(), error classes
770
- ├── module/ # KafkaModule, KafkaExplorer, DI constants
771
- ├── decorators/ # @InjectKafkaClient(), @SubscribeTo()
772
- ├── health/ # KafkaHealthIndicator
773
- └── index.ts # Public API re-exports
796
+ ├── client/ # Core — KafkaClient, types, topic(), error classes (0 framework deps)
797
+ ├── nest/ # NestJS adapter — Module, Explorer, decorators, health
798
+ ├── core.ts # Standalone entrypoint (@drarzter/kafka-client/core)
799
+ └── index.ts # Full entrypoint — core + NestJS adapter
774
800
  ```
775
801
 
776
802
  All exported types and methods have JSDoc comments — your IDE will show inline docs and autocomplete.
@@ -0,0 +1,545 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
3
+ var __decorateClass = (decorators, target, key, kind) => {
4
+ var result = kind > 1 ? void 0 : kind ? __getOwnPropDesc(target, key) : target;
5
+ for (var i = decorators.length - 1, decorator; i >= 0; i--)
6
+ if (decorator = decorators[i])
7
+ result = (kind ? decorator(target, key, result) : decorator(result)) || result;
8
+ if (kind && result) __defProp(target, key, result);
9
+ return result;
10
+ };
11
+ var __decorateParam = (index, decorator) => (target, key) => decorator(target, key, index);
12
+
13
+ // src/client/kafka.client.ts
14
+ import { Kafka, Partitioners } from "kafkajs";
15
+
16
+ // src/client/errors.ts
17
+ var KafkaProcessingError = class extends Error {
18
+ constructor(message, topic2, originalMessage, options) {
19
+ super(message, options);
20
+ this.topic = topic2;
21
+ this.originalMessage = originalMessage;
22
+ this.name = "KafkaProcessingError";
23
+ if (options?.cause) this.cause = options.cause;
24
+ }
25
+ };
26
+ var KafkaValidationError = class extends Error {
27
+ constructor(topic2, originalMessage, options) {
28
+ super(`Schema validation failed for topic "${topic2}"`, options);
29
+ this.topic = topic2;
30
+ this.originalMessage = originalMessage;
31
+ this.name = "KafkaValidationError";
32
+ if (options?.cause) this.cause = options.cause;
33
+ }
34
+ };
35
+ var KafkaRetryExhaustedError = class extends KafkaProcessingError {
36
+ constructor(topic2, originalMessage, attempts, options) {
37
+ super(
38
+ `Message processing failed after ${attempts} attempts on topic "${topic2}"`,
39
+ topic2,
40
+ originalMessage,
41
+ options
42
+ );
43
+ this.attempts = attempts;
44
+ this.name = "KafkaRetryExhaustedError";
45
+ }
46
+ };
47
+
48
+ // src/client/kafka.client.ts
49
+ var ACKS_ALL = -1;
50
+ function toError(error) {
51
+ return error instanceof Error ? error : new Error(String(error));
52
+ }
53
+ var KafkaClient = class {
54
+ kafka;
55
+ producer;
56
+ consumers = /* @__PURE__ */ new Map();
57
+ admin;
58
+ logger;
59
+ autoCreateTopicsEnabled;
60
+ strictSchemasEnabled;
61
+ numPartitions;
62
+ ensuredTopics = /* @__PURE__ */ new Set();
63
+ defaultGroupId;
64
+ schemaRegistry = /* @__PURE__ */ new Map();
65
+ runningConsumers = /* @__PURE__ */ new Map();
66
+ isAdminConnected = false;
67
+ clientId;
68
+ constructor(clientId, groupId, brokers, options) {
69
+ this.clientId = clientId;
70
+ this.defaultGroupId = groupId;
71
+ this.logger = options?.logger ?? {
72
+ log: (msg) => console.log(`[KafkaClient:${clientId}] ${msg}`),
73
+ warn: (msg, ...args) => console.warn(`[KafkaClient:${clientId}] ${msg}`, ...args),
74
+ error: (msg, ...args) => console.error(`[KafkaClient:${clientId}] ${msg}`, ...args)
75
+ };
76
+ this.autoCreateTopicsEnabled = options?.autoCreateTopics ?? false;
77
+ this.strictSchemasEnabled = options?.strictSchemas ?? true;
78
+ this.numPartitions = options?.numPartitions ?? 1;
79
+ this.kafka = new Kafka({
80
+ clientId: this.clientId,
81
+ brokers
82
+ });
83
+ this.producer = this.kafka.producer({
84
+ createPartitioner: Partitioners.DefaultPartitioner,
85
+ idempotent: true,
86
+ transactionalId: `${clientId}-tx`,
87
+ maxInFlightRequests: 1
88
+ });
89
+ this.admin = this.kafka.admin();
90
+ }
91
+ async sendMessage(topicOrDesc, message, options = {}) {
92
+ const payload = this.buildSendPayload(topicOrDesc, [
93
+ { value: message, key: options.key, headers: options.headers }
94
+ ]);
95
+ await this.ensureTopic(payload.topic);
96
+ await this.producer.send(payload);
97
+ }
98
+ async sendBatch(topicOrDesc, messages) {
99
+ const payload = this.buildSendPayload(topicOrDesc, messages);
100
+ await this.ensureTopic(payload.topic);
101
+ await this.producer.send(payload);
102
+ }
103
+ /** Execute multiple sends atomically. Commits on success, aborts on error. */
104
+ async transaction(fn) {
105
+ const tx = await this.producer.transaction();
106
+ try {
107
+ const ctx = {
108
+ send: async (topicOrDesc, message, options = {}) => {
109
+ const payload = this.buildSendPayload(topicOrDesc, [
110
+ { value: message, key: options.key, headers: options.headers }
111
+ ]);
112
+ await this.ensureTopic(payload.topic);
113
+ await tx.send(payload);
114
+ },
115
+ sendBatch: async (topicOrDesc, messages) => {
116
+ const payload = this.buildSendPayload(topicOrDesc, messages);
117
+ await this.ensureTopic(payload.topic);
118
+ await tx.send(payload);
119
+ }
120
+ };
121
+ await fn(ctx);
122
+ await tx.commit();
123
+ } catch (error) {
124
+ try {
125
+ await tx.abort();
126
+ } catch (abortError) {
127
+ this.logger.error(
128
+ "Failed to abort transaction:",
129
+ toError(abortError).message
130
+ );
131
+ }
132
+ throw error;
133
+ }
134
+ }
135
+ // ── Producer lifecycle ───────────────────────────────────────────
136
+ /** Connect the idempotent producer. Called automatically by `KafkaModule.register()`. */
137
+ async connectProducer() {
138
+ await this.producer.connect();
139
+ this.logger.log("Producer connected");
140
+ }
141
+ async disconnectProducer() {
142
+ await this.producer.disconnect();
143
+ this.logger.log("Producer disconnected");
144
+ }
145
+ async startConsumer(topics, handleMessage, options = {}) {
146
+ const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
147
+ await consumer.run({
148
+ autoCommit: options.autoCommit ?? true,
149
+ eachMessage: async ({ topic: topic2, message }) => {
150
+ if (!message.value) {
151
+ this.logger.warn(`Received empty message from topic ${topic2}`);
152
+ return;
153
+ }
154
+ const raw = message.value.toString();
155
+ const parsed = this.parseJsonMessage(raw, topic2);
156
+ if (parsed === null) return;
157
+ const validated = await this.validateWithSchema(
158
+ parsed,
159
+ raw,
160
+ topic2,
161
+ schemaMap,
162
+ interceptors,
163
+ dlq
164
+ );
165
+ if (validated === null) return;
166
+ await this.executeWithRetry(
167
+ () => handleMessage(validated, topic2),
168
+ { topic: topic2, messages: validated, rawMessages: [raw], interceptors, dlq, retry }
169
+ );
170
+ }
171
+ });
172
+ this.runningConsumers.set(gid, "eachMessage");
173
+ }
174
+ async startBatchConsumer(topics, handleBatch, options = {}) {
175
+ const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
176
+ await consumer.run({
177
+ autoCommit: options.autoCommit ?? true,
178
+ eachBatch: async ({
179
+ batch,
180
+ heartbeat,
181
+ resolveOffset,
182
+ commitOffsetsIfNecessary
183
+ }) => {
184
+ const validMessages = [];
185
+ const rawMessages = [];
186
+ for (const message of batch.messages) {
187
+ if (!message.value) {
188
+ this.logger.warn(
189
+ `Received empty message from topic ${batch.topic}`
190
+ );
191
+ continue;
192
+ }
193
+ const raw = message.value.toString();
194
+ const parsed = this.parseJsonMessage(raw, batch.topic);
195
+ if (parsed === null) continue;
196
+ const validated = await this.validateWithSchema(
197
+ parsed,
198
+ raw,
199
+ batch.topic,
200
+ schemaMap,
201
+ interceptors,
202
+ dlq
203
+ );
204
+ if (validated === null) continue;
205
+ validMessages.push(validated);
206
+ rawMessages.push(raw);
207
+ }
208
+ if (validMessages.length === 0) return;
209
+ const meta = {
210
+ partition: batch.partition,
211
+ highWatermark: batch.highWatermark,
212
+ heartbeat,
213
+ resolveOffset,
214
+ commitOffsetsIfNecessary
215
+ };
216
+ await this.executeWithRetry(
217
+ () => handleBatch(validMessages, batch.topic, meta),
218
+ {
219
+ topic: batch.topic,
220
+ messages: validMessages,
221
+ rawMessages: batch.messages.filter((m) => m.value).map((m) => m.value.toString()),
222
+ interceptors,
223
+ dlq,
224
+ retry,
225
+ isBatch: true
226
+ }
227
+ );
228
+ }
229
+ });
230
+ this.runningConsumers.set(gid, "eachBatch");
231
+ }
232
+ // ── Consumer lifecycle ───────────────────────────────────────────
233
+ async stopConsumer() {
234
+ const tasks = [];
235
+ for (const consumer of this.consumers.values()) {
236
+ tasks.push(consumer.disconnect());
237
+ }
238
+ await Promise.allSettled(tasks);
239
+ this.consumers.clear();
240
+ this.runningConsumers.clear();
241
+ this.logger.log("All consumers disconnected");
242
+ }
243
+ /** Check broker connectivity and return available topics. */
244
+ async checkStatus() {
245
+ if (!this.isAdminConnected) {
246
+ await this.admin.connect();
247
+ this.isAdminConnected = true;
248
+ }
249
+ const topics = await this.admin.listTopics();
250
+ return { topics };
251
+ }
252
+ getClientId() {
253
+ return this.clientId;
254
+ }
255
+ /** Gracefully disconnect producer, all consumers, and admin. */
256
+ async disconnect() {
257
+ const tasks = [this.producer.disconnect()];
258
+ for (const consumer of this.consumers.values()) {
259
+ tasks.push(consumer.disconnect());
260
+ }
261
+ if (this.isAdminConnected) {
262
+ tasks.push(this.admin.disconnect());
263
+ this.isAdminConnected = false;
264
+ }
265
+ await Promise.allSettled(tasks);
266
+ this.consumers.clear();
267
+ this.runningConsumers.clear();
268
+ this.logger.log("All connections closed");
269
+ }
270
+ // ── Private helpers ──────────────────────────────────────────────
271
+ getOrCreateConsumer(groupId) {
272
+ const gid = groupId || this.defaultGroupId;
273
+ if (!this.consumers.has(gid)) {
274
+ this.consumers.set(gid, this.kafka.consumer({ groupId: gid }));
275
+ }
276
+ return this.consumers.get(gid);
277
+ }
278
+ resolveTopicName(topicOrDescriptor) {
279
+ if (typeof topicOrDescriptor === "string") return topicOrDescriptor;
280
+ if (topicOrDescriptor && typeof topicOrDescriptor === "object" && "__topic" in topicOrDescriptor) {
281
+ return topicOrDescriptor.__topic;
282
+ }
283
+ return String(topicOrDescriptor);
284
+ }
285
+ async ensureTopic(topic2) {
286
+ if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
287
+ if (!this.isAdminConnected) {
288
+ await this.admin.connect();
289
+ this.isAdminConnected = true;
290
+ }
291
+ await this.admin.createTopics({
292
+ topics: [{ topic: topic2, numPartitions: this.numPartitions }]
293
+ });
294
+ this.ensuredTopics.add(topic2);
295
+ }
296
+ /** Register schema from descriptor into global registry (side-effect). */
297
+ registerSchema(topicOrDesc) {
298
+ if (topicOrDesc?.__schema) {
299
+ const topic2 = this.resolveTopicName(topicOrDesc);
300
+ this.schemaRegistry.set(topic2, topicOrDesc.__schema);
301
+ }
302
+ }
303
+ /** Validate message against schema. Pure — no side-effects on registry. */
304
+ validateMessage(topicOrDesc, message) {
305
+ if (topicOrDesc?.__schema) {
306
+ return topicOrDesc.__schema.parse(message);
307
+ }
308
+ if (this.strictSchemasEnabled && typeof topicOrDesc === "string") {
309
+ const schema = this.schemaRegistry.get(topicOrDesc);
310
+ if (schema) return schema.parse(message);
311
+ }
312
+ return message;
313
+ }
314
+ /**
315
+ * Build a kafkajs-ready send payload.
316
+ * Handles: topic resolution, schema registration, validation, JSON serialization.
317
+ */
318
+ buildSendPayload(topicOrDesc, messages) {
319
+ this.registerSchema(topicOrDesc);
320
+ const topic2 = this.resolveTopicName(topicOrDesc);
321
+ return {
322
+ topic: topic2,
323
+ messages: messages.map((m) => ({
324
+ value: JSON.stringify(this.validateMessage(topicOrDesc, m.value)),
325
+ key: m.key ?? null,
326
+ headers: m.headers
327
+ })),
328
+ acks: ACKS_ALL
329
+ };
330
+ }
331
+ /** Shared consumer setup: groupId check, schema map, connect, subscribe. */
332
+ async setupConsumer(topics, mode, options) {
333
+ const {
334
+ groupId: optGroupId,
335
+ fromBeginning = false,
336
+ retry,
337
+ dlq = false,
338
+ interceptors = [],
339
+ schemas: optionSchemas
340
+ } = options;
341
+ const gid = optGroupId || this.defaultGroupId;
342
+ const existingMode = this.runningConsumers.get(gid);
343
+ const oppositeMode = mode === "eachMessage" ? "eachBatch" : "eachMessage";
344
+ if (existingMode === oppositeMode) {
345
+ throw new Error(
346
+ `Cannot use ${mode} on consumer group "${gid}" \u2014 it is already running with ${oppositeMode}. Use a different groupId for this consumer.`
347
+ );
348
+ }
349
+ const consumer = this.getOrCreateConsumer(optGroupId);
350
+ const schemaMap = this.buildSchemaMap(topics, optionSchemas);
351
+ const topicNames = topics.map(
352
+ (t) => this.resolveTopicName(t)
353
+ );
354
+ await consumer.connect();
355
+ await this.subscribeWithRetry(consumer, topicNames, fromBeginning, options.subscribeRetry);
356
+ this.logger.log(
357
+ `${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${topicNames.join(", ")}`
358
+ );
359
+ return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
360
+ }
361
+ buildSchemaMap(topics, optionSchemas) {
362
+ const schemaMap = /* @__PURE__ */ new Map();
363
+ for (const t of topics) {
364
+ if (t?.__schema) {
365
+ const name = this.resolveTopicName(t);
366
+ schemaMap.set(name, t.__schema);
367
+ this.schemaRegistry.set(name, t.__schema);
368
+ }
369
+ }
370
+ if (optionSchemas) {
371
+ for (const [k, v] of optionSchemas) {
372
+ schemaMap.set(k, v);
373
+ this.schemaRegistry.set(k, v);
374
+ }
375
+ }
376
+ return schemaMap;
377
+ }
378
+ /** Parse raw message as JSON. Returns null on failure (logs error). */
379
+ parseJsonMessage(raw, topic2) {
380
+ try {
381
+ return JSON.parse(raw);
382
+ } catch (error) {
383
+ this.logger.error(
384
+ `Failed to parse message from topic ${topic2}:`,
385
+ toError(error).stack
386
+ );
387
+ return null;
388
+ }
389
+ }
390
+ /**
391
+ * Validate a parsed message against the schema map.
392
+ * On failure: logs error, sends to DLQ if enabled, calls interceptor.onError.
393
+ * Returns validated message or null.
394
+ */
395
+ async validateWithSchema(message, raw, topic2, schemaMap, interceptors, dlq) {
396
+ const schema = schemaMap.get(topic2);
397
+ if (!schema) return message;
398
+ try {
399
+ return schema.parse(message);
400
+ } catch (error) {
401
+ const err = toError(error);
402
+ const validationError = new KafkaValidationError(topic2, message, {
403
+ cause: err
404
+ });
405
+ this.logger.error(
406
+ `Schema validation failed for topic ${topic2}:`,
407
+ err.message
408
+ );
409
+ if (dlq) await this.sendToDlq(topic2, raw);
410
+ for (const interceptor of interceptors) {
411
+ await interceptor.onError?.(message, topic2, validationError);
412
+ }
413
+ return null;
414
+ }
415
+ }
416
+ /**
417
+ * Execute a handler with retry, interceptors, and DLQ support.
418
+ * Used by both single-message and batch consumers.
419
+ */
420
+ async executeWithRetry(fn, ctx) {
421
+ const { topic: topic2, messages, rawMessages, interceptors, dlq, retry, isBatch } = ctx;
422
+ const maxAttempts = retry ? retry.maxRetries + 1 : 1;
423
+ const backoffMs = retry?.backoffMs ?? 1e3;
424
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
425
+ try {
426
+ if (isBatch) {
427
+ for (const interceptor of interceptors) {
428
+ for (const msg of messages) {
429
+ await interceptor.before?.(msg, topic2);
430
+ }
431
+ }
432
+ } else {
433
+ for (const interceptor of interceptors) {
434
+ await interceptor.before?.(messages, topic2);
435
+ }
436
+ }
437
+ await fn();
438
+ if (isBatch) {
439
+ for (const interceptor of interceptors) {
440
+ for (const msg of messages) {
441
+ await interceptor.after?.(msg, topic2);
442
+ }
443
+ }
444
+ } else {
445
+ for (const interceptor of interceptors) {
446
+ await interceptor.after?.(messages, topic2);
447
+ }
448
+ }
449
+ return;
450
+ } catch (error) {
451
+ const err = toError(error);
452
+ const isLastAttempt = attempt === maxAttempts;
453
+ if (isLastAttempt && maxAttempts > 1) {
454
+ const exhaustedError = new KafkaRetryExhaustedError(
455
+ topic2,
456
+ messages,
457
+ maxAttempts,
458
+ { cause: err }
459
+ );
460
+ for (const interceptor of interceptors) {
461
+ await interceptor.onError?.(messages, topic2, exhaustedError);
462
+ }
463
+ } else {
464
+ for (const interceptor of interceptors) {
465
+ await interceptor.onError?.(messages, topic2, err);
466
+ }
467
+ }
468
+ this.logger.error(
469
+ `Error processing ${isBatch ? "batch" : "message"} from topic ${topic2} (attempt ${attempt}/${maxAttempts}):`,
470
+ err.stack
471
+ );
472
+ if (isLastAttempt) {
473
+ if (dlq) {
474
+ for (const raw of rawMessages) {
475
+ await this.sendToDlq(topic2, raw);
476
+ }
477
+ }
478
+ } else {
479
+ await this.sleep(backoffMs * attempt);
480
+ }
481
+ }
482
+ }
483
+ }
484
+ async sendToDlq(topic2, rawMessage) {
485
+ const dlqTopic = `${topic2}.dlq`;
486
+ try {
487
+ await this.producer.send({
488
+ topic: dlqTopic,
489
+ messages: [{ value: rawMessage }],
490
+ acks: ACKS_ALL
491
+ });
492
+ this.logger.warn(`Message sent to DLQ: ${dlqTopic}`);
493
+ } catch (error) {
494
+ this.logger.error(
495
+ `Failed to send message to DLQ ${dlqTopic}:`,
496
+ toError(error).stack
497
+ );
498
+ }
499
+ }
500
+ async subscribeWithRetry(consumer, topics, fromBeginning, retryOpts) {
501
+ const maxAttempts = retryOpts?.retries ?? 5;
502
+ const backoffMs = retryOpts?.backoffMs ?? 5e3;
503
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
504
+ try {
505
+ await consumer.subscribe({ topics, fromBeginning });
506
+ return;
507
+ } catch (error) {
508
+ if (attempt === maxAttempts) throw error;
509
+ const msg = toError(error).message;
510
+ this.logger.warn(
511
+ `Failed to subscribe to [${topics.join(", ")}] (attempt ${attempt}/${maxAttempts}): ${msg}. Retrying in ${backoffMs}ms...`
512
+ );
513
+ await this.sleep(backoffMs);
514
+ }
515
+ }
516
+ }
517
+ sleep(ms) {
518
+ return new Promise((resolve) => setTimeout(resolve, ms));
519
+ }
520
+ };
521
+
522
+ // src/client/topic.ts
523
+ function topic(name) {
524
+ const fn = () => ({
525
+ __topic: name,
526
+ __type: void 0
527
+ });
528
+ fn.schema = (schema) => ({
529
+ __topic: name,
530
+ __type: void 0,
531
+ __schema: schema
532
+ });
533
+ return fn;
534
+ }
535
+
536
+ export {
537
+ __decorateClass,
538
+ __decorateParam,
539
+ KafkaProcessingError,
540
+ KafkaValidationError,
541
+ KafkaRetryExhaustedError,
542
+ KafkaClient,
543
+ topic
544
+ };
545
+ //# sourceMappingURL=chunk-UDOHIMAZ.mjs.map