@drarzter/kafka-client 0.1.9 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -18,6 +18,7 @@ An opinionated wrapper around kafkajs that integrates with NestJS as a DynamicMo
18
18
  - **Idempotent producer** — `acks: -1`, `idempotent: true` by default
19
19
  - **Retry + DLQ** — configurable retries with backoff, dead letter queue for failed messages
20
20
  - **Batch sending** — send multiple messages in a single request
21
+ - **Batch consuming** — `startBatchConsumer()` for high-throughput `eachBatch` processing
21
22
  - **Partition key support** — route related messages to the same partition
22
23
  - **Custom headers** — attach metadata headers to messages
23
24
  - **Transactions** — exactly-once semantics with `producer.transaction()`
@@ -314,6 +315,31 @@ export class OrdersService implements OnModuleInit {
314
315
 
315
316
  ## Multiple consumer groups
316
317
 
318
+ ### Per-consumer groupId
319
+
320
+ Override the default consumer group for specific consumers. Each unique `groupId` creates a separate kafkajs Consumer internally:
321
+
322
+ ```typescript
323
+ // Default group from constructor
324
+ await kafka.startConsumer(['orders'], handler);
325
+
326
+ // Custom group — receives its own copy of messages
327
+ await kafka.startConsumer(['orders'], auditHandler, { groupId: 'orders-audit' });
328
+
329
+ // Works with @SubscribeTo too
330
+ @SubscribeTo('orders', { groupId: 'orders-audit' })
331
+ async auditOrders(message) { ... }
332
+ ```
333
+
334
+ **Important:** You cannot mix `eachMessage` and `eachBatch` consumers on the same `groupId`. The library throws a clear error if you try:
335
+
336
+ ```text
337
+ Cannot use eachBatch on consumer group "my-group" — it is already running with eachMessage.
338
+ Use a different groupId for this consumer.
339
+ ```
340
+
341
+ ### Named clients
342
+
317
343
  Register multiple named clients for different bounded contexts:
318
344
 
319
345
  ```typescript
@@ -406,6 +432,38 @@ await this.kafka.sendBatch('order.created', [
406
432
  ]);
407
433
  ```
408
434
 
435
+ ## Batch consuming
436
+
437
+ Process messages in batches for higher throughput. The handler receives an array of parsed messages and a `BatchMeta` object with offset management controls:
438
+
439
+ ```typescript
440
+ await this.kafka.startBatchConsumer(
441
+ ['order.created'],
442
+ async (messages, topic, meta) => {
443
+ // messages: OrdersTopicMap['order.created'][]
444
+ for (const msg of messages) {
445
+ await processOrder(msg);
446
+ meta.resolveOffset(/* ... */);
447
+ }
448
+ await meta.commitOffsetsIfNecessary();
449
+ },
450
+ { retry: { maxRetries: 3 }, dlq: true },
451
+ );
452
+ ```
453
+
454
+ With `@SubscribeTo()`:
455
+
456
+ ```typescript
457
+ @SubscribeTo('order.created', { batch: true })
458
+ async handleOrders(messages: OrdersTopicMap['order.created'][], topic: string) {
459
+ // messages is an array
460
+ }
461
+ ```
462
+
463
+ Schema validation runs per-message — invalid messages are skipped (DLQ'd if enabled), valid ones are passed to the handler. Retry applies to the whole batch.
464
+
465
+ `BatchMeta` exposes: `partition`, `highWatermark`, `heartbeat()`, `resolveOffset(offset)`, `commitOffsetsIfNecessary()`.
466
+
409
467
  ## Transactions
410
468
 
411
469
  Send multiple messages atomically with exactly-once semantics:
@@ -453,27 +511,86 @@ await this.kafka.startConsumer(['order.created'], handler, {
453
511
 
454
512
  Multiple interceptors run in order. All hooks are optional.
455
513
 
456
- ## Consumer options
514
+ ## Options reference
515
+
516
+ ### Send options
517
+
518
+ Options for `sendMessage()` — the third argument:
519
+
520
+ | Option | Default | Description |
521
+ |-----------|---------|--------------------------------------------------|
522
+ | `key` | — | Partition key for message routing |
523
+ | `headers` | — | Custom metadata headers (`Record<string, string>`) |
524
+
525
+ `sendBatch()` accepts `key` and `headers` per message inside the array items.
526
+
527
+ ### Consumer options
457
528
 
458
529
  | Option | Default | Description |
459
530
  |--------|---------|-------------|
531
+ | `groupId` | constructor value | Override consumer group for this subscription |
460
532
  | `fromBeginning` | `false` | Read from the beginning of the topic |
461
533
  | `autoCommit` | `true` | Auto-commit offsets |
462
534
  | `retry.maxRetries` | — | Number of retry attempts |
463
535
  | `retry.backoffMs` | `1000` | Base delay between retries (multiplied by attempt number) |
464
536
  | `dlq` | `false` | Send to `{topic}.dlq` after all retries exhausted |
465
537
  | `interceptors` | `[]` | Array of before/after/onError hooks |
538
+ | `batch` | `false` | (decorator only) Use `startBatchConsumer` instead of `startConsumer` |
539
+ | `subscribeRetry.retries` | `5` | Max attempts for `consumer.subscribe()` when topic doesn't exist yet |
540
+ | `subscribeRetry.backoffMs` | `5000` | Delay between subscribe retry attempts (ms) |
466
541
 
467
542
  ### Module options
468
543
 
544
+ Passed to `KafkaModule.register()` or returned from `registerAsync()` factory:
545
+
469
546
  | Option | Default | Description |
470
547
  |--------|---------|-------------|
471
- | `clientId` | — | Kafka client identifier |
472
- | `groupId` | — | Consumer group ID |
473
- | `brokers` | — | Array of broker addresses |
474
- | `name` | — | Named client for multi-group setups |
475
- | `isGlobal` | `false` | Make the client available in all modules |
476
- | `autoCreateTopics` | `false` | Auto-create topics on first send/consume |
548
+ | `clientId` | — | Kafka client identifier (required) |
549
+ | `groupId` | — | Default consumer group ID (required) |
550
+ | `brokers` | — | Array of broker addresses (required) |
551
+ | `name` | — | Named client identifier for multi-client setups |
552
+ | `isGlobal` | `false` | Make the client available in all modules without re-importing |
553
+ | `autoCreateTopics` | `false` | Auto-create topics on first send (dev only) |
554
+ | `strictSchemas` | `true` | Validate string topic keys against schemas registered via TopicDescriptor |
555
+
556
+ **Module-scoped** (default) — import `KafkaModule` in each module that needs it:
557
+
558
+ ```typescript
559
+ // orders.module.ts
560
+ @Module({
561
+ imports: [
562
+ KafkaModule.register<OrdersTopicMap>({
563
+ clientId: 'orders',
564
+ groupId: 'orders-group',
565
+ brokers: ['localhost:9092'],
566
+ }),
567
+ ],
568
+ })
569
+ export class OrdersModule {}
570
+ ```
571
+
572
+ **App-wide** — register once in `AppModule` with `isGlobal: true`, inject anywhere:
573
+
574
+ ```typescript
575
+ // app.module.ts
576
+ @Module({
577
+ imports: [
578
+ KafkaModule.register<MyTopics>({
579
+ clientId: 'my-app',
580
+ groupId: 'my-group',
581
+ brokers: ['localhost:9092'],
582
+ isGlobal: true,
583
+ }),
584
+ ],
585
+ })
586
+ export class AppModule {}
587
+
588
+ // any module — no KafkaModule import needed
589
+ @Injectable()
590
+ export class PaymentService {
591
+ constructor(@InjectKafkaClient() private readonly kafka: KafkaClient<MyTopics>) {}
592
+ }
593
+ ```
477
594
 
478
595
  ## Error classes
479
596
 
@@ -508,6 +625,99 @@ const interceptor: ConsumerInterceptor<MyTopics> = {
508
625
 
509
626
  When `retry.maxRetries` is set and all attempts fail, `KafkaRetryExhaustedError` is passed to `onError` interceptors automatically.
510
627
 
628
+ **`KafkaValidationError`** — thrown when schema validation fails on the consumer side. Has `topic`, `originalMessage`, and `cause`:
629
+
630
+ ```typescript
631
+ import { KafkaValidationError } from '@drarzter/kafka-client';
632
+
633
+ const interceptor: ConsumerInterceptor<MyTopics> = {
634
+ onError: (message, topic, error) => {
635
+ if (error instanceof KafkaValidationError) {
636
+ console.log(`Bad message on ${error.topic}:`, error.cause?.message);
637
+ }
638
+ },
639
+ };
640
+ ```
641
+
642
+ ## Schema validation
643
+
644
+ Add runtime message validation using any library with a `.parse()` method — Zod, Valibot, ArkType, or a custom validator. No extra dependency required.
645
+
646
+ ### Defining topics with schemas
647
+
648
+ ```typescript
649
+ import { topic, TopicsFrom } from '@drarzter/kafka-client';
650
+ import { z } from 'zod'; // or valibot, arktype, etc.
651
+
652
+ // Schema-validated — type inferred from schema, no generic needed
653
+ export const OrderCreated = topic('order.created').schema(z.object({
654
+ orderId: z.string(),
655
+ userId: z.string(),
656
+ amount: z.number().positive(),
657
+ }));
658
+
659
+ // Without schema — explicit generic (still works)
660
+ export const OrderAudit = topic('order.audit')<{ orderId: string; action: string }>();
661
+
662
+ export type MyTopics = TopicsFrom<typeof OrderCreated | typeof OrderAudit>;
663
+ ```
664
+
665
+ ### How it works
666
+
667
+ **On send** — `sendMessage`, `sendBatch`, and `transaction` call `schema.parse(message)` before serializing. Invalid messages throw immediately (the schema library's error, e.g. `ZodError`):
668
+
669
+ ```typescript
670
+ // This throws ZodError — amount must be positive
671
+ await kafka.sendMessage(OrderCreated, { orderId: '1', userId: '2', amount: -5 });
672
+ ```
673
+
674
+ **On consume** — after `JSON.parse`, the consumer validates each message against the schema. Invalid messages are:
675
+
676
+ 1. Logged as errors
677
+ 2. Sent to DLQ if `dlq: true`
678
+ 3. Passed to `onError` interceptors as `KafkaValidationError`
679
+ 4. Skipped (handler is NOT called)
680
+
681
+ ```typescript
682
+ @SubscribeTo(OrderCreated, { dlq: true })
683
+ async handleOrder(message) {
684
+ // `message` is guaranteed to match the schema
685
+ console.log(message.orderId); // string — validated at runtime
686
+ }
687
+ ```
688
+
689
+ ### Strict schema mode
690
+
691
+ By default (`strictSchemas: true`), once a schema is registered via a TopicDescriptor, string topic keys are also validated against it:
692
+
693
+ ```typescript
694
+ // First call registers the schema in the internal registry
695
+ await kafka.sendMessage(OrderCreated, { orderId: '1', userId: '2', amount: 100 });
696
+
697
+ // Now this is ALSO validated — throws if data doesn't match OrderCreated's schema
698
+ await kafka.sendMessage('order.created', { orderId: 123, userId: null, amount: -5 });
699
+ ```
700
+
701
+ Disable with `strictSchemas: false` in `KafkaModule.register()` options if you want the old behavior (string topics bypass validation).
702
+
703
+ ### Bring your own validator
704
+
705
+ Any object with `parse(data: unknown): T` works:
706
+
707
+ ```typescript
708
+ import { SchemaLike } from '@drarzter/kafka-client';
709
+
710
+ const customValidator: SchemaLike<{ id: string }> = {
711
+ parse(data: unknown) {
712
+ const d = data as any;
713
+ if (typeof d?.id !== 'string') throw new Error('id must be a string');
714
+ return { id: d.id };
715
+ },
716
+ };
717
+
718
+ const MyTopic = topic('my.topic').schema(customValidator);
719
+ ```
720
+
511
721
  ## Health check
512
722
 
513
723
  Monitor Kafka connectivity with the built-in health indicator:
package/dist/index.d.mts CHANGED
@@ -1,6 +1,21 @@
1
1
  import { DynamicModule, OnModuleInit } from '@nestjs/common';
2
2
  import { DiscoveryService, ModuleRef } from '@nestjs/core';
3
3
 
4
+ /**
5
+ * Any validation library with a `.parse()` method.
6
+ * Works with Zod, Valibot, ArkType, or any custom validator.
7
+ *
8
+ * @example
9
+ * ```ts
10
+ * import { z } from 'zod';
11
+ * const schema: SchemaLike<{ id: string }> = z.object({ id: z.string() });
12
+ * ```
13
+ */
14
+ interface SchemaLike<T = any> {
15
+ parse(data: unknown): T;
16
+ }
17
+ /** Infer the output type from a SchemaLike. */
18
+ type InferSchema<S extends SchemaLike> = S extends SchemaLike<infer T> ? T : never;
4
19
  /**
5
20
  * A typed topic descriptor that pairs a topic name with its message type.
6
21
  * Created via the `topic()` factory function.
@@ -12,14 +27,23 @@ interface TopicDescriptor<N extends string = string, M extends Record<string, an
12
27
  readonly __topic: N;
13
28
  /** @internal Phantom type — never has a real value at runtime. */
14
29
  readonly __type: M;
30
+ /** Runtime schema validator. Present only when created via `topic().schema()`. */
31
+ readonly __schema?: SchemaLike<M>;
15
32
  }
16
33
  /**
17
34
  * Define a typed topic descriptor.
18
35
  *
19
36
  * @example
20
37
  * ```ts
38
+ * // Without schema — type provided explicitly:
21
39
  * const OrderCreated = topic('order.created')<{ orderId: string; amount: number }>();
22
40
  *
41
+ * // With schema — type inferred from schema:
42
+ * const OrderCreated = topic('order.created').schema(z.object({
43
+ * orderId: z.string(),
44
+ * amount: z.number(),
45
+ * }));
46
+ *
23
47
  * // Use with KafkaClient:
24
48
  * await kafka.sendMessage(OrderCreated, { orderId: '123', amount: 100 });
25
49
  *
@@ -28,7 +52,10 @@ interface TopicDescriptor<N extends string = string, M extends Record<string, an
28
52
  * async handleOrder(msg) { ... }
29
53
  * ```
30
54
  */
31
- declare function topic<N extends string>(name: N): <M extends Record<string, any>>() => TopicDescriptor<N, M>;
55
+ declare function topic<N extends string>(name: N): {
56
+ <M extends Record<string, any>>(): TopicDescriptor<N, M>;
57
+ schema<S extends SchemaLike<Record<string, any>>>(schema: S): TopicDescriptor<N, InferSchema<S>>;
58
+ };
32
59
  /**
33
60
  * Build a topic-message map type from a union of TopicDescriptors.
34
61
  *
@@ -83,8 +110,23 @@ interface SendOptions {
83
110
  /** Custom headers attached to the message. */
84
111
  headers?: MessageHeaders;
85
112
  }
113
+ /** Metadata exposed to batch consumer handlers. */
114
+ interface BatchMeta {
115
+ /** Partition number for this batch. */
116
+ partition: number;
117
+ /** Highest offset available on the broker for this partition. */
118
+ highWatermark: string;
119
+ /** Send a heartbeat to the broker to prevent session timeout. */
120
+ heartbeat(): Promise<void>;
121
+ /** Mark an offset as processed (for manual offset management). */
122
+ resolveOffset(offset: string): void;
123
+ /** Commit offsets if the auto-commit threshold has been reached. */
124
+ commitOffsetsIfNecessary(): Promise<void>;
125
+ }
86
126
  /** Options for configuring a Kafka consumer. */
87
127
  interface ConsumerOptions<T extends TopicMapConstraint<T> = TTopicMessageMap> {
128
+ /** Override the default consumer group ID from the constructor. */
129
+ groupId?: string;
88
130
  /** Start reading from earliest offset. Default: `false`. */
89
131
  fromBeginning?: boolean;
90
132
  /** Automatically commit offsets. Default: `true`. */
@@ -95,6 +137,10 @@ interface ConsumerOptions<T extends TopicMapConstraint<T> = TTopicMessageMap> {
95
137
  dlq?: boolean;
96
138
  /** Interceptors called before/after each message. */
97
139
  interceptors?: ConsumerInterceptor<T>[];
140
+ /** @internal Schema map populated by @SubscribeTo when descriptors have schemas. */
141
+ schemas?: Map<string, SchemaLike>;
142
+ /** Retry config for `consumer.subscribe()` when the topic doesn't exist yet. */
143
+ subscribeRetry?: SubscribeRetryOptions;
98
144
  }
99
145
  /** Configuration for consumer retry behavior. */
100
146
  interface RetryOptions {
@@ -137,6 +183,8 @@ interface IKafkaClient<T extends TopicMapConstraint<T>> {
137
183
  }>;
138
184
  startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (message: T[K[number]], topic: K[number]) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
139
185
  startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (message: D["__type"], topic: D["__topic"]) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
186
+ startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (messages: T[K[number]][], topic: K[number], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
187
+ startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (messages: D["__type"][], topic: D["__topic"], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
140
188
  stopConsumer(): Promise<void>;
141
189
  sendMessage<K extends keyof T>(topic: K, message: T[K], options?: SendOptions): Promise<void>;
142
190
  sendBatch<K extends keyof T>(topic: K, messages: Array<{
@@ -150,8 +198,17 @@ interface IKafkaClient<T extends TopicMapConstraint<T>> {
150
198
  }
151
199
  /** Options for `KafkaClient` constructor. */
152
200
  interface KafkaClientOptions {
153
- /** Auto-create topics via admin before the first `sendMessage`, `sendBatch`, `transaction`, or `startConsumer` for each topic. Useful for development — not recommended in production. */
201
+ /** Auto-create topics via admin before the first `sendMessage`, `sendBatch`, or `transaction` for each topic. Useful for development — not recommended in production. */
154
202
  autoCreateTopics?: boolean;
203
+ /** When `true`, string topic keys are validated against any schema previously registered via a TopicDescriptor. Default: `true`. */
204
+ strictSchemas?: boolean;
205
+ }
206
+ /** Options for consumer subscribe retry when topic doesn't exist yet. */
207
+ interface SubscribeRetryOptions {
208
+ /** Maximum number of subscribe attempts. Default: `5`. */
209
+ retries?: number;
210
+ /** Delay between retries in ms. Default: `5000`. */
211
+ backoffMs?: number;
155
212
  }
156
213
 
157
214
  /**
@@ -163,16 +220,22 @@ interface KafkaClientOptions {
163
220
  declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClient<T> {
164
221
  private readonly kafka;
165
222
  private readonly producer;
166
- private readonly consumer;
223
+ private readonly consumers;
167
224
  private readonly admin;
168
225
  private readonly logger;
169
226
  private readonly autoCreateTopicsEnabled;
227
+ private readonly strictSchemasEnabled;
170
228
  private readonly ensuredTopics;
229
+ private readonly defaultGroupId;
230
+ private readonly schemaRegistry;
231
+ private readonly runningConsumers;
171
232
  private isAdminConnected;
172
233
  readonly clientId: ClientId;
173
234
  constructor(clientId: ClientId, groupId: GroupId, brokers: string[], options?: KafkaClientOptions);
235
+ private getOrCreateConsumer;
174
236
  private resolveTopicName;
175
237
  private ensureTopic;
238
+ private validateMessage;
176
239
  /** Send a single typed message. Accepts a topic key or a TopicDescriptor. */
177
240
  sendMessage<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, message: D["__type"], options?: SendOptions): Promise<void>;
178
241
  sendMessage<K extends keyof T>(topic: K, message: T[K], options?: SendOptions): Promise<void>;
@@ -195,16 +258,21 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
195
258
  /** Subscribe to topics and start consuming messages with the given handler. */
196
259
  startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (message: T[K[number]], topic: K[number]) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
197
260
  startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (message: D["__type"], topic: D["__topic"]) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
261
+ /** Subscribe to topics and consume messages in batches. */
262
+ startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (messages: T[K[number]][], topic: K[number], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
263
+ startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (messages: D["__type"][], topic: D["__topic"], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<void>;
198
264
  stopConsumer(): Promise<void>;
199
265
  /** Check broker connectivity and return available topics. */
200
266
  checkStatus(): Promise<{
201
267
  topics: string[];
202
268
  }>;
203
269
  getClientId(): ClientId;
204
- /** Gracefully disconnect producer, consumer, and admin. */
270
+ /** Gracefully disconnect producer, all consumers, and admin. */
205
271
  disconnect(): Promise<void>;
272
+ private buildSchemaMap;
206
273
  private processMessage;
207
274
  private sendToDlq;
275
+ private subscribeWithRetry;
208
276
  private sleep;
209
277
  }
210
278
 
@@ -254,6 +322,15 @@ declare class KafkaProcessingError extends Error {
254
322
  cause?: Error;
255
323
  });
256
324
  }
325
+ /** Error thrown when schema validation fails on send or consume. */
326
+ declare class KafkaValidationError extends Error {
327
+ readonly topic: string;
328
+ readonly originalMessage: unknown;
329
+ readonly cause?: Error;
330
+ constructor(topic: string, originalMessage: unknown, options?: {
331
+ cause?: Error;
332
+ });
333
+ }
257
334
  /** Error thrown when all retry attempts are exhausted for a message. */
258
335
  declare class KafkaRetryExhaustedError extends KafkaProcessingError {
259
336
  readonly attempts: number;
@@ -270,8 +347,11 @@ declare const getKafkaClientToken: (name?: string) => string;
270
347
  declare const KAFKA_SUBSCRIBER_METADATA = "KAFKA_SUBSCRIBER_METADATA";
271
348
  interface KafkaSubscriberMetadata {
272
349
  topics: string[];
350
+ schemas?: Map<string, SchemaLike>;
273
351
  options?: ConsumerOptions;
274
352
  clientName?: string;
353
+ batch?: boolean;
354
+ methodName?: string | symbol;
275
355
  }
276
356
  /** Inject a `KafkaClient` instance. Pass a name to target a specific named client. */
277
357
  declare const InjectKafkaClient: (name?: string) => ParameterDecorator;
@@ -281,6 +361,7 @@ declare const InjectKafkaClient: (name?: string) => ParameterDecorator;
281
361
  */
282
362
  declare const SubscribeTo: (topics: string | string[] | TopicDescriptor | TopicDescriptor[] | (string | TopicDescriptor)[], options?: ConsumerOptions & {
283
363
  clientName?: string;
364
+ batch?: boolean;
284
365
  }) => MethodDecorator;
285
366
 
286
367
  /** Discovers `@SubscribeTo()` decorators and wires them to their Kafka clients on startup. */
@@ -304,4 +385,4 @@ declare class KafkaHealthIndicator {
304
385
  check<T extends TopicMapConstraint<T>>(client: KafkaClient<T>): Promise<KafkaHealthResult>;
305
386
  }
306
387
 
307
- export { type ClientId, type ConsumerInterceptor, type ConsumerOptions, type GroupId, type IKafkaClient, InjectKafkaClient, KAFKA_CLIENT, KAFKA_SUBSCRIBER_METADATA, KafkaClient, type KafkaClientOptions, KafkaExplorer, KafkaHealthIndicator, type KafkaHealthResult, KafkaModule, type KafkaModuleAsyncOptions, type KafkaModuleOptions, KafkaProcessingError, KafkaRetryExhaustedError, type KafkaSubscriberMetadata, type MessageHeaders, type RetryOptions, type SendOptions, SubscribeTo, type TTopicMessageMap, type TopicDescriptor, type TopicMapConstraint, type TopicsFrom, type TransactionContext, getKafkaClientToken, topic };
388
+ export { type BatchMeta, type ClientId, type ConsumerInterceptor, type ConsumerOptions, type GroupId, type IKafkaClient, type InferSchema, InjectKafkaClient, KAFKA_CLIENT, KAFKA_SUBSCRIBER_METADATA, KafkaClient, type KafkaClientOptions, KafkaExplorer, KafkaHealthIndicator, type KafkaHealthResult, KafkaModule, type KafkaModuleAsyncOptions, type KafkaModuleOptions, KafkaProcessingError, KafkaRetryExhaustedError, type KafkaSubscriberMetadata, KafkaValidationError, type MessageHeaders, type RetryOptions, type SchemaLike, type SendOptions, type SubscribeRetryOptions, SubscribeTo, type TTopicMessageMap, type TopicDescriptor, type TopicMapConstraint, type TopicsFrom, type TransactionContext, getKafkaClientToken, topic };