@drarzter/kafka-client 0.7.4 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/core.d.mts CHANGED
@@ -1,5 +1,5 @@
1
- import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, a as KafkaClientOptions, c as TopicDescriptor, z as SendOptions, M as MessageHeaders, B as BatchMessageItem, f as BatchSendOptions, O as TransactionContext, o as EventEnvelope, b as ConsumerOptions, k as ConsumerHandle, e as BatchMeta, n as DlqReplayOptions, d as KafkaHealthResult, j as ConsumerGroupSummary, J as TopicDescription, w as KafkaMetrics } from './types-Db7qSbZP.mjs';
2
- export { g as BeforeConsumeResult, h as CircuitBreakerOptions, i as CompressionType, l as ConsumerInterceptor, D as DeduplicationOptions, m as DlqReason, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, p as HEADER_EVENT_ID, q as HEADER_LAMPORT_CLOCK, r as HEADER_SCHEMA_VERSION, s as HEADER_TIMESTAMP, t as HEADER_TRACEPARENT, u as InferSchema, K as KafkaInstrumentation, v as KafkaLogger, x as MessageLostContext, R as RetryOptions, S as SchemaLike, y as SchemaParseContext, A as SubscribeRetryOptions, F as TTopicMessageMap, L as TopicPartitionInfo, N as TopicsFrom, P as TtlExpiredContext, Q as buildEnvelopeHeaders, U as decodeHeaders, V as extractEnvelope, W as getEnvelopeContext, X as runWithEnvelopeContext, Y as topic } from './types-Db7qSbZP.mjs';
1
+ import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, a as KafkaClientOptions, c as TopicDescriptor, O as SendOptions, M as MessageHeaders, B as BatchMessageItem, f as BatchSendOptions, X as TransactionContext, r as EventEnvelope, b as ConsumerOptions, n as ConsumerHandle, e as BatchMeta, $ as WindowMeta, _ as WindowConsumerOptions, L as RoutingOptions, Y as TransactionalHandlerContext, q as DlqReplayOptions, R as ReadSnapshotOptions, j as CheckpointResult, F as RestoreCheckpointOptions, i as CheckpointRestoreResult, d as KafkaHealthResult, m as ConsumerGroupSummary, U as TopicDescription, z as KafkaMetrics } from './types-CNfeoF3_.mjs';
2
+ export { g as BeforeConsumeResult, h as CheckpointEntry, k as CircuitBreakerOptions, l as CompressionType, o as ConsumerInterceptor, D as DeduplicationOptions, p as DlqReason, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, s as HEADER_EVENT_ID, t as HEADER_LAMPORT_CLOCK, u as HEADER_SCHEMA_VERSION, v as HEADER_TIMESTAMP, w as HEADER_TRACEPARENT, x as InferSchema, K as KafkaInstrumentation, y as KafkaLogger, A as MessageLostContext, J as RetryOptions, S as SchemaLike, N as SchemaParseContext, P as SubscribeRetryOptions, Q as TTopicMessageMap, V as TopicPartitionInfo, W as TopicsFrom, Z as TtlExpiredContext, a0 as buildEnvelopeHeaders, a1 as decodeHeaders, a2 as extractEnvelope, a3 as getEnvelopeContext, a4 as runWithEnvelopeContext, a5 as topic } from './types-CNfeoF3_.mjs';
3
3
 
4
4
  /**
5
5
  * Type-safe Kafka client.
@@ -39,6 +39,12 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
39
39
  private _lamportClock;
40
40
  /** Topics to scan for the highest Lamport clock value on `connectProducer()`. */
41
41
  private readonly clockRecoveryTopics;
42
+ /** Lag-throttle configuration — set when `lagThrottle` is configured. */
43
+ private readonly lagThrottleOpts;
44
+ /** `true` while the observed consumer group lag exceeds `lagThrottle.maxLag`. */
45
+ private _lagThrottled;
46
+ /** Background polling timer for lag throttle. */
47
+ private _lagThrottleTimer;
42
48
  /** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
43
49
  private readonly dedupStates;
44
50
  private readonly circuitBreaker;
@@ -51,6 +57,21 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
51
57
  private readonly _retryTopicDeps;
52
58
  /** DLQ header keys added by the pipeline — stripped before re-publishing. */
53
59
  private static readonly DLQ_HEADER_KEYS;
60
+ /**
61
+ * Create a new KafkaClient.
62
+ * @param clientId Unique client identifier (used in Kafka metadata and logs).
63
+ * @param groupId Default consumer group ID for this client.
64
+ * @param brokers Array of broker addresses, e.g. `['localhost:9092']`.
65
+ * @param options Optional client-wide configuration.
66
+ * @example
67
+ * ```ts
68
+ * const kafka = new KafkaClient('my-service', 'my-service-group', ['localhost:9092'], {
69
+ * lagThrottle: { maxLag: 10_000 },
70
+ * onMessageLost: (ctx) => logger.error('Message lost', ctx),
71
+ * });
72
+ * await kafka.connectProducer();
73
+ * ```
74
+ */
54
75
  constructor(clientId: ClientId, groupId: GroupId, brokers: string[], options?: KafkaClientOptions);
55
76
  /**
56
77
  * Send a single typed message. Accepts a topic key or a `TopicDescriptor`.
@@ -58,6 +79,10 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
58
79
  * @param topic Topic key from the `TopicMessageMap` or a `TopicDescriptor` object.
59
80
  * @param message Message payload — validated against the topic schema when one is registered.
60
81
  * @param options Optional per-send settings: `key`, `headers`, `correlationId`, `compression`, etc.
82
+ * @example
83
+ * ```ts
84
+ * await kafka.sendMessage('orders.created', { orderId: '123', amount: 99 });
85
+ * ```
61
86
  */
62
87
  sendMessage<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, message: D["__type"], options?: SendOptions): Promise<void>;
63
88
  sendMessage<K extends keyof T>(topic: K, message: T[K], options?: SendOptions): Promise<void>;
@@ -71,6 +96,10 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
71
96
  * @param topic Topic name.
72
97
  * @param key Partition key identifying the record to tombstone.
73
98
  * @param headers Optional custom Kafka headers.
99
+ * @example
100
+ * ```ts
101
+ * await kafka.sendTombstone('users.state', 'user-42');
102
+ * ```
74
103
  */
75
104
  sendTombstone(topic: string, key: string, headers?: MessageHeaders): Promise<void>;
76
105
  /**
@@ -82,16 +111,35 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
82
111
  * @param topic Topic key from the `TopicMessageMap` or a `TopicDescriptor` object.
83
112
  * @param messages Array of messages to send.
84
113
  * @param options Optional batch-level settings: `compression` codec.
114
+ * @example
115
+ * ```ts
116
+ * await kafka.sendBatch('orders.created', [
117
+ * { value: { orderId: '1', amount: 10 }, key: 'order-1' },
118
+ * ]);
119
+ * ```
85
120
  */
86
121
  sendBatch<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, messages: Array<BatchMessageItem<D["__type"]>>, options?: BatchSendOptions): Promise<void>;
87
122
  sendBatch<K extends keyof T>(topic: K, messages: Array<BatchMessageItem<T[K]>>, options?: BatchSendOptions): Promise<void>;
88
- /** Execute multiple sends atomically. Commits on success, aborts on error. */
123
+ /**
124
+ * Execute multiple sends atomically. Commits on success, aborts on error.
125
+ * @example
126
+ * ```ts
127
+ * await kafka.transaction(async (tx) => {
128
+ * await tx.send('orders.created', { orderId: '123' });
129
+ * await tx.send('inventory.reserved', { itemId: 'a', qty: 1 });
130
+ * });
131
+ * ```
132
+ */
89
133
  transaction(fn: (ctx: TransactionContext<T>) => Promise<void>): Promise<void>;
90
134
  /**
91
135
  * Connect the idempotent producer. Called automatically by `KafkaModule.register()`.
92
136
  * @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
93
137
  */
94
138
  connectProducer(): Promise<void>;
139
+ /** Start the background lag-polling loop for producer throttling. */
140
+ private startLagThrottlePoller;
141
+ /** Wait until lag drops below the threshold (or maxWaitMs is exceeded). */
142
+ private waitIfThrottled;
95
143
  /**
96
144
  * Recover the Lamport clock from the last message across the given topics.
97
145
  *
@@ -121,6 +169,13 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
121
169
  * @param handleMessage Async handler called for every message. Throw to trigger retries.
122
170
  * @param options Consumer configuration — `groupId`, `retry`, `dlq`, `circuitBreaker`, etc.
123
171
  * @returns A handle with `{ groupId, stop() }` for managing the consumer lifecycle.
172
+ * @example
173
+ * ```ts
174
+ * const handle = await kafka.startConsumer(['orders.created'], async (envelope) => {
175
+ * await processOrder(envelope.payload);
176
+ * }, { retry: { maxRetries: 3 }, dlq: true });
177
+ * await handle.stop();
178
+ * ```
124
179
  */
125
180
  startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (envelope: EventEnvelope<T[K[number]]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
126
181
  startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (envelope: EventEnvelope<D["__type"]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
@@ -138,6 +193,13 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
138
193
  * @param handleBatch Async handler called with each batch of decoded messages.
139
194
  * @param options Consumer configuration — `groupId`, `retry`, `dlq`, `autoCommit`, etc.
140
195
  * @returns A handle with `{ groupId, stop() }` for managing the consumer lifecycle.
196
+ * @example
197
+ * ```ts
198
+ * await kafka.startBatchConsumer(['metrics'], async (envelopes, meta) => {
199
+ * await db.insertMany(envelopes.map(e => e.payload));
200
+ * meta.resolveOffset(envelopes.at(-1)!.offset);
201
+ * }, { autoCommit: false });
202
+ * ```
141
203
  */
142
204
  startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (envelopes: EventEnvelope<T[K[number]]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
143
205
  startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (envelopes: EventEnvelope<D["__type"]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
@@ -151,6 +213,55 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
151
213
  * }
152
214
  */
153
215
  consume<K extends keyof T & string>(topic: K, options?: ConsumerOptions<T>): AsyncIterableIterator<EventEnvelope<T[K]>>;
216
+ /**
217
+ * Accumulate messages into a window and flush the handler when either
218
+ * `maxMessages` is reached or `maxMs` has elapsed — whichever fires first.
219
+ * Remaining messages are flushed before the consumer disconnects on `stop()`.
220
+ * @example
221
+ * ```ts
222
+ * await kafka.startWindowConsumer('events', async (batch, meta) => {
223
+ * await db.insertMany(batch.map(e => e.payload));
224
+ * }, { maxMessages: 100, maxMs: 5_000 });
225
+ * ```
226
+ */
227
+ startWindowConsumer<K extends keyof T & string>(topic: K, handler: (envelopes: EventEnvelope<T[K]>[], meta: WindowMeta) => Promise<void>, options: WindowConsumerOptions<T>): Promise<ConsumerHandle>;
228
+ /**
229
+ * Subscribe to topics and dispatch each message to a handler based on the value
230
+ * of a specific Kafka header. A thin, zero-overhead wrapper over `startConsumer`.
231
+ *
232
+ * All `ConsumerOptions` (retry, DLQ, deduplication, circuit breaker, etc.) apply
233
+ * uniformly across every route.
234
+ * @example
235
+ * ```ts
236
+ * await kafka.startRoutedConsumer(['domain.events'], {
237
+ * header: 'x-event-type',
238
+ * routes: {
239
+ * 'order.created': async (e) => handleOrderCreated(e.payload),
240
+ * },
241
+ * });
242
+ * ```
243
+ */
244
+ startRoutedConsumer<K extends Array<keyof T>>(topics: K, routing: RoutingOptions<T[K[number]]>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
245
+ /**
246
+ * Subscribe to topics and consume messages with exactly-once semantics for
247
+ * read-process-write pipelines.
248
+ *
249
+ * Each message is handled inside a dedicated Kafka transaction.
250
+ * The handler receives a `TransactionalHandlerContext` whose `send` / `sendBatch`
251
+ * methods stage outgoing messages inside that transaction. On handler success the
252
+ * source offset commit and all staged sends are committed atomically. On handler
253
+ * failure the transaction is aborted and the source message is redelivered — no
254
+ * partial writes become visible to downstream consumers.
255
+ *
256
+ * Incompatible with `retryTopics: true` — throws at startup if set.
257
+ * @example
258
+ * ```ts
259
+ * await kafka.startTransactionalConsumer(['orders.created'], async (envelope, tx) => {
260
+ * await tx.send('inventory.reserved', { orderId: envelope.payload.orderId, qty: 1 });
261
+ * });
262
+ * ```
263
+ */
264
+ startTransactionalConsumer<K extends Array<keyof T>>(topics: K, handler: (envelope: EventEnvelope<T[K[number]]>, tx: TransactionalHandlerContext<T>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
154
265
  /**
155
266
  * Stop all consumers or a specific group.
156
267
  *
@@ -158,24 +269,19 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
158
269
  * If `groupId` is specified, only the consumer with that group ID is stopped.
159
270
  *
160
271
  * @throws {Error} if the consumer fails to disconnect.
272
+ * @example
273
+ * ```ts
274
+ * await kafka.stopConsumer('billing-service'); // stop one group
275
+ * await kafka.stopConsumer(); // stop all
276
+ * ```
161
277
  */
162
278
  stopConsumer(groupId?: string): Promise<void>;
163
- /**
164
- * Temporarily stop delivering messages from specific partitions without disconnecting the consumer.
165
- *
166
- * @param groupId Consumer group to pause. Defaults to the client's default groupId.
167
- * @param assignments Topic-partition pairs to pause.
168
- */
279
+ /** @inheritDoc */
169
280
  pauseConsumer(groupId: string | undefined, assignments: Array<{
170
281
  topic: string;
171
282
  partitions: number[];
172
283
  }>): void;
173
- /**
174
- * Resume message delivery for previously paused topic-partitions.
175
- *
176
- * @param {string|undefined} groupId Consumer group to resume. Defaults to the client's default groupId.
177
- * @param {Array<{ topic: string; partitions: number[] }>} assignments Topic-partition pairs to resume.
178
- */
284
+ /** @inheritDoc */
179
285
  resumeConsumer(groupId: string | undefined, assignments: Array<{
180
286
  topic: string;
181
287
  partitions: number[];
@@ -194,30 +300,56 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
194
300
  * @param topic - The topic to replay from `<topic>.dlq`
195
301
  * @param options - Options for replay
196
302
  * @returns { replayed: number; skipped: number } - counts of re-published vs skipped messages
303
+ * @example
304
+ * ```ts
305
+ * const { replayed, skipped } = await kafka.replayDlq('orders.created');
306
+ * ```
197
307
  */
198
308
  replayDlq(topic: string, options?: DlqReplayOptions): Promise<{
199
309
  replayed: number;
200
310
  skipped: number;
201
311
  }>;
202
- resetOffsets(groupId: string | undefined, topic: string, position: "earliest" | "latest"): Promise<void>;
203
312
  /**
204
- * Seek specific topic-partition pairs to explicit offsets for a stopped consumer group.
205
- * Throws if the group is still running call `stopConsumer(groupId)` first.
206
- * Assignments are grouped by topic and committed via `admin.setOffsets`.
313
+ * Read a compacted topic from the beginning to its current high-watermark.
314
+ * Returns a `Map<key, EventEnvelope>` with the latest value per key.
315
+ * Tombstone messages (null value) remove the key from the map.
316
+ * @example
317
+ * ```ts
318
+ * const snapshot = await kafka.readSnapshot('users.state');
319
+ * const user = snapshot.get('user-42')?.payload;
320
+ * ```
207
321
  */
322
+ readSnapshot<K extends keyof T & string>(topic: K, options?: ReadSnapshotOptions): Promise<Map<string, EventEnvelope<T[K]>>>;
323
+ private applySnapshotMessage;
324
+ /**
325
+ * Snapshot the current committed offsets of a consumer group into a Kafka topic.
326
+ * Each call appends a new record — the checkpoint topic is an append-only audit log.
327
+ * @example
328
+ * ```ts
329
+ * const result = await kafka.checkpointOffsets(undefined, 'checkpoints');
330
+ * console.log(`Saved ${result.partitionCount} offsets`);
331
+ * ```
332
+ */
333
+ checkpointOffsets(groupId: string | undefined, checkpointTopic: string): Promise<CheckpointResult>;
334
+ /**
335
+ * Restore a consumer group's committed offsets from the nearest checkpoint in `checkpointTopic`.
336
+ * Requires the consumer group to be stopped.
337
+ * @example
338
+ * ```ts
339
+ * await kafka.stopConsumer();
340
+ * await kafka.restoreFromCheckpoint(undefined, 'checkpoints');
341
+ * ```
342
+ */
343
+ restoreFromCheckpoint(groupId: string | undefined, checkpointTopic: string, options?: RestoreCheckpointOptions): Promise<CheckpointRestoreResult>;
344
+ /** @inheritDoc */
345
+ resetOffsets(groupId: string | undefined, topic: string, position: "earliest" | "latest"): Promise<void>;
346
+ /** @inheritDoc */
208
347
  seekToOffset(groupId: string | undefined, assignments: Array<{
209
348
  topic: string;
210
349
  partition: number;
211
350
  offset: string;
212
351
  }>): Promise<void>;
213
- /**
214
- * Seek specific topic-partition pairs to the offset nearest to a given timestamp
215
- * (in milliseconds) for a stopped consumer group.
216
- * Throws if the group is still running — call `stopConsumer(groupId)` first.
217
- * Assignments are grouped by topic and committed via `admin.setOffsets`.
218
- * If no offset exists at the requested timestamp (e.g. empty partition or
219
- * future timestamp), the partition falls back to `-1` (end of topic — new messages only).
220
- */
352
+ /** @inheritDoc */
221
353
  seekToTimestamp(groupId: string | undefined, assignments: Array<{
222
354
  topic: string;
223
355
  partition: number;
@@ -233,6 +365,11 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
233
365
  * @param groupId Consumer group. Defaults to the client's default groupId.
234
366
  *
235
367
  * @returns `{ status, failures, windowSize }` snapshot for a given partition or `undefined` if no state exists.
368
+ * @example
369
+ * ```ts
370
+ * const state = kafka.getCircuitState('orders.created', 0);
371
+ * if (state?.status === 'open') console.warn('Circuit open!');
372
+ * ```
236
373
  */
237
374
  getCircuitState(topic: string, partition: number, groupId?: string): {
238
375
  status: "closed" | "open" | "half-open";
@@ -249,33 +386,35 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
249
386
  * or group not yet assigned). This is a Kafka protocol limitation:
250
387
  * `fetchOffsets` only returns data for topic-partitions that have at least one
251
388
  * committed offset. Use `checkStatus()` to verify broker connectivity in that case.
389
+ * @example
390
+ * ```ts
391
+ * const lag = await kafka.getConsumerLag();
392
+ * const total = lag.reduce((sum, p) => sum + p.lag, 0);
393
+ * ```
252
394
  */
253
395
  getConsumerLag(groupId?: string): Promise<Array<{
254
396
  topic: string;
255
397
  partition: number;
256
398
  lag: number;
257
399
  }>>;
258
- /** Check broker connectivity. Never throws — returns a discriminated union. */
400
+ /** @inheritDoc */
259
401
  checkStatus(): Promise<KafkaHealthResult>;
260
- /**
261
- * List all consumer groups known to the broker.
262
- * Useful for monitoring which groups are active and their current state.
263
- */
402
+ /** @inheritDoc */
264
403
  listConsumerGroups(): Promise<ConsumerGroupSummary[]>;
265
- /**
266
- * Describe topics — returns partition layout, leader, replicas, and ISR.
267
- * @param topics Topic names to describe. Omit to describe all topics.
268
- */
404
+ /** @inheritDoc */
269
405
  describeTopics(topics?: string[]): Promise<TopicDescription[]>;
270
- /**
271
- * Delete records from a topic up to (but not including) the given offsets.
272
- * All messages with offsets **before** the given offset are deleted.
273
- */
406
+ /** @inheritDoc */
274
407
  deleteRecords(topic: string, partitions: Array<{
275
408
  partition: number;
276
409
  offset: string;
277
410
  }>): Promise<void>;
278
- /** Return the client ID provided during `KafkaClient` construction. */
411
+ /**
412
+ * Return the client ID provided during `KafkaClient` construction.
413
+ * @example
414
+ * ```ts
415
+ * const id = kafka.getClientId(); // e.g. 'my-service'
416
+ * ```
417
+ */
279
418
  getClientId(): ClientId;
280
419
  /**
281
420
  * Return a snapshot of internal event counters accumulated since client creation
@@ -285,15 +424,16 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
285
424
  * aggregated across all topics. If the topic has no recorded events yet, returns
286
425
  * a zero-valued snapshot.
287
426
  * @returns Read-only `KafkaMetrics` snapshot: `processedCount`, `retryCount`, `dlqCount`, `dedupCount`.
427
+ * @example
428
+ * ```ts
429
+ * const { processedCount, dlqCount } = kafka.getMetrics();
430
+ * const topicMetrics = kafka.getMetrics('orders.created');
431
+ * ```
288
432
  */
289
433
  getMetrics(topic?: string): Readonly<KafkaMetrics>;
290
- /**
291
- * Reset internal event counters to zero.
292
- *
293
- * @param topic Topic name to reset. When omitted, all topics are reset.
294
- */
434
+ /** @inheritDoc */
295
435
  resetMetrics(topic?: string): void;
296
- /** Gracefully disconnect producer, all consumers, and admin. */
436
+ /** @inheritDoc */
297
437
  disconnect(drainTimeoutMs?: number): Promise<void>;
298
438
  /**
299
439
  * NestJS lifecycle hook — called automatically when the host module is torn down.
@@ -301,11 +441,7 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
301
441
  * `KafkaModule` relies on this method; no separate destroy provider is needed.
302
442
  */
303
443
  onModuleDestroy(): Promise<void>;
304
- /**
305
- * Register SIGTERM / SIGINT handlers that drain in-flight messages before
306
- * disconnecting. Call this once after constructing the client in non-NestJS apps.
307
- * NestJS apps get drain for free via `onModuleDestroy` → `disconnect()`.
308
- */
444
+ /** @inheritDoc */
309
445
  enableGracefulShutdown(signals?: NodeJS.Signals[], drainTimeoutMs?: number): void;
310
446
  private preparePayload;
311
447
  /**
@@ -331,6 +467,8 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
331
467
  private resolveDeduplicationContext;
332
468
  /** Guard checks shared by startConsumer and startBatchConsumer. */
333
469
  private validateTopicConsumerOpts;
470
+ /** Ensure all required topics exist for a consumer: base, DLQ, and dedup topics. */
471
+ private ensureConsumerTopics;
334
472
  /** Create EOS transactional producer context for atomic main → retry.1 routing. */
335
473
  private makeEosMainContext;
336
474
  /** Start companion retry-level consumers and register them under the main groupId. */
@@ -341,7 +479,20 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
341
479
  private buildRetryTopicDeps;
342
480
  }
343
481
 
344
- /** Error thrown when a consumer message handler fails. */
482
+ /**
483
+ * Error thrown when a consumer message handler fails.
484
+ * @example
485
+ * ```ts
486
+ * await kafka.startConsumer(['orders'], async (envelope) => {
487
+ * try { await process(envelope); }
488
+ * catch (err) {
489
+ * if (err instanceof KafkaProcessingError) {
490
+ * console.error(err.topic, err.originalMessage);
491
+ * }
492
+ * }
493
+ * });
494
+ * ```
495
+ */
345
496
  declare class KafkaProcessingError extends Error {
346
497
  readonly topic: string;
347
498
  readonly originalMessage: unknown;
@@ -350,7 +501,18 @@ declare class KafkaProcessingError extends Error {
350
501
  cause?: Error;
351
502
  });
352
503
  }
353
- /** Error thrown when schema validation fails on send or consume. */
504
+ /**
505
+ * Error thrown when schema validation fails on send or consume.
506
+ * @example
507
+ * ```ts
508
+ * try { await kafka.sendMessage('orders.created', invalidPayload); }
509
+ * catch (err) {
510
+ * if (err instanceof KafkaValidationError) {
511
+ * console.error('Validation failed for topic:', err.topic);
512
+ * }
513
+ * }
514
+ * ```
515
+ */
354
516
  declare class KafkaValidationError extends Error {
355
517
  readonly topic: string;
356
518
  readonly originalMessage: unknown;
@@ -359,7 +521,17 @@ declare class KafkaValidationError extends Error {
359
521
  cause?: Error;
360
522
  });
361
523
  }
362
- /** Error thrown when all retry attempts are exhausted for a message. */
524
+ /**
525
+ * Error thrown when all retry attempts are exhausted for a message.
526
+ * @example
527
+ * ```ts
528
+ * const kafka = new KafkaClient(config, groupId, { onMessageLost: (ctx) => {
529
+ * if (ctx.error instanceof KafkaRetryExhaustedError) {
530
+ * console.error(`Exhausted after ${ctx.error.attempts} attempts on ${ctx.error.topic}`);
531
+ * }
532
+ * }});
533
+ * ```
534
+ */
363
535
  declare class KafkaRetryExhaustedError extends KafkaProcessingError {
364
536
  readonly attempts: number;
365
537
  constructor(topic: string, originalMessage: unknown, attempts: number, options?: {
@@ -367,4 +539,4 @@ declare class KafkaRetryExhaustedError extends KafkaProcessingError {
367
539
  });
368
540
  }
369
541
 
370
- export { BatchMessageItem, BatchMeta, BatchSendOptions, ClientId, ConsumerGroupSummary, ConsumerHandle, ConsumerOptions, DlqReplayOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, KafkaClientOptions, KafkaHealthResult, KafkaMetrics, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, MessageHeaders, SendOptions, TopicDescription, TopicDescriptor, TopicMapConstraint, TransactionContext };
542
+ export { BatchMessageItem, BatchMeta, BatchSendOptions, CheckpointRestoreResult, CheckpointResult, ClientId, ConsumerGroupSummary, ConsumerHandle, ConsumerOptions, DlqReplayOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, KafkaClientOptions, KafkaHealthResult, KafkaMetrics, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, MessageHeaders, ReadSnapshotOptions, RestoreCheckpointOptions, RoutingOptions, SendOptions, TopicDescription, TopicDescriptor, TopicMapConstraint, TransactionContext, TransactionalHandlerContext, WindowConsumerOptions, WindowMeta };