@drarzter/kafka-client 0.8.0 → 0.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/core.d.ts CHANGED
@@ -1,5 +1,5 @@
1
- import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, a as KafkaClientOptions, c as TopicDescriptor, O as SendOptions, M as MessageHeaders, B as BatchMessageItem, f as BatchSendOptions, X as TransactionContext, r as EventEnvelope, b as ConsumerOptions, n as ConsumerHandle, e as BatchMeta, $ as WindowMeta, _ as WindowConsumerOptions, L as RoutingOptions, Y as TransactionalHandlerContext, q as DlqReplayOptions, R as ReadSnapshotOptions, j as CheckpointResult, F as RestoreCheckpointOptions, i as CheckpointRestoreResult, d as KafkaHealthResult, m as ConsumerGroupSummary, U as TopicDescription, z as KafkaMetrics } from './types-CNfeoF3_.js';
2
- export { g as BeforeConsumeResult, h as CheckpointEntry, k as CircuitBreakerOptions, l as CompressionType, o as ConsumerInterceptor, D as DeduplicationOptions, p as DlqReason, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, s as HEADER_EVENT_ID, t as HEADER_LAMPORT_CLOCK, u as HEADER_SCHEMA_VERSION, v as HEADER_TIMESTAMP, w as HEADER_TRACEPARENT, x as InferSchema, K as KafkaInstrumentation, y as KafkaLogger, A as MessageLostContext, J as RetryOptions, S as SchemaLike, N as SchemaParseContext, P as SubscribeRetryOptions, Q as TTopicMessageMap, V as TopicPartitionInfo, W as TopicsFrom, Z as TtlExpiredContext, a0 as buildEnvelopeHeaders, a1 as decodeHeaders, a2 as extractEnvelope, a3 as getEnvelopeContext, a4 as runWithEnvelopeContext, a5 as topic } from './types-CNfeoF3_.js';
1
+ import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, s as KafkaClientOptions, u as TopicDescriptor, a7 as SendOptions, a0 as MessageHeaders, B as BatchMessageItem, x as BatchSendOptions, ad as TransactionContext, Q as EventEnvelope, t as ConsumerOptions, J as ConsumerHandle, w as BatchMeta, ah as WindowMeta, ag as WindowConsumerOptions, a5 as RoutingOptions, ae as TransactionalHandlerContext, O as DlqReplayOptions, a2 as ReadSnapshotOptions, D as CheckpointResult, a3 as RestoreCheckpointOptions, A as CheckpointRestoreResult, v as KafkaHealthResult, H as ConsumerGroupSummary, aa as TopicDescription, $ as KafkaMetrics } from './types-4XNxkici.js';
2
+ export { y as BeforeConsumeResult, z as CheckpointEntry, E as CircuitBreakerOptions, F as CompressionType, L as ConsumerInterceptor, M as DeduplicationOptions, N as DlqReason, P as EnvelopeHeaderOptions, R as HEADER_CORRELATION_ID, U as HEADER_EVENT_ID, V as HEADER_LAMPORT_CLOCK, W as HEADER_SCHEMA_VERSION, X as HEADER_TIMESTAMP, Y as HEADER_TRACEPARENT, Z as InferSchema, K as KafkaInstrumentation, _ as KafkaLogger, a1 as MessageLostContext, a4 as RetryOptions, S as SchemaLike, a6 as SchemaParseContext, a8 as SubscribeRetryOptions, a9 as TTopicMessageMap, ab as TopicPartitionInfo, ac as TopicsFrom, af as TtlExpiredContext, ai as buildEnvelopeHeaders, aj as decodeHeaders, ak as extractEnvelope, al as getEnvelopeContext, am as runWithEnvelopeContext, an as topic } from './types-4XNxkici.js';
3
3
 
4
4
  /**
5
5
  * Type-safe Kafka client.
@@ -9,54 +9,8 @@ export { g as BeforeConsumeResult, h as CheckpointEntry, k as CircuitBreakerOpti
9
9
  * @typeParam T - Topic-to-message type mapping for compile-time safety.
10
10
  */
11
11
  declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClient<T> {
12
- private readonly kafka;
13
- private readonly producer;
14
- private txProducer;
15
- private txProducerInitPromise;
16
- /** Maps transactionalId → Producer for each active retry level consumer. */
17
- private readonly retryTxProducers;
18
- private readonly consumers;
19
- private readonly logger;
20
- private readonly autoCreateTopicsEnabled;
21
- private readonly strictSchemasEnabled;
22
- private readonly numPartitions;
23
- private readonly ensuredTopics;
24
- /** Pending topic-creation promises keyed by topic name. Prevents duplicate createTopics calls. */
25
- private readonly ensureTopicPromises;
26
- private readonly defaultGroupId;
27
- private readonly schemaRegistry;
28
- private readonly runningConsumers;
29
- private readonly consumerCreationOptions;
30
- /** Maps each main consumer groupId to its companion retry level groupIds. */
31
- private readonly companionGroupIds;
32
- private readonly instrumentation;
33
- private readonly onMessageLost;
34
- private readonly onTtlExpired;
35
- private readonly onRebalance;
36
- /** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
37
- private readonly txId;
38
- /** Monotonically increasing Lamport clock stamped on every outgoing message. */
39
- private _lamportClock;
40
- /** Topics to scan for the highest Lamport clock value on `connectProducer()`. */
41
- private readonly clockRecoveryTopics;
42
- /** Lag-throttle configuration — set when `lagThrottle` is configured. */
43
- private readonly lagThrottleOpts;
44
- /** `true` while the observed consumer group lag exceeds `lagThrottle.maxLag`. */
45
- private _lagThrottled;
46
- /** Background polling timer for lag throttle. */
47
- private _lagThrottleTimer;
48
- /** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
49
- private readonly dedupStates;
50
- private readonly circuitBreaker;
51
- private readonly adminOps;
52
- private readonly metrics;
53
- private readonly inFlight;
54
12
  readonly clientId: ClientId;
55
- private readonly _producerOpsDeps;
56
- private readonly _consumerOpsDeps;
57
- private readonly _retryTopicDeps;
58
- /** DLQ header keys added by the pipeline — stripped before re-publishing. */
59
- private static readonly DLQ_HEADER_KEYS;
13
+ private readonly ctx;
60
14
  /**
61
15
  * Create a new KafkaClient.
62
16
  * @param clientId Unique client identifier (used in Kafka metadata and logs).
@@ -73,208 +27,35 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
73
27
  * ```
74
28
  */
75
29
  constructor(clientId: ClientId, groupId: GroupId, brokers: string[], options?: KafkaClientOptions);
76
- /**
77
- * Send a single typed message. Accepts a topic key or a `TopicDescriptor`.
78
- *
79
- * @param topic Topic key from the `TopicMessageMap` or a `TopicDescriptor` object.
80
- * @param message Message payload — validated against the topic schema when one is registered.
81
- * @param options Optional per-send settings: `key`, `headers`, `correlationId`, `compression`, etc.
82
- * @example
83
- * ```ts
84
- * await kafka.sendMessage('orders.created', { orderId: '123', amount: 99 });
85
- * ```
86
- */
30
+ /** @inheritDoc */
87
31
  sendMessage<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, message: D["__type"], options?: SendOptions): Promise<void>;
88
32
  sendMessage<K extends keyof T>(topic: K, message: T[K], options?: SendOptions): Promise<void>;
89
- /**
90
- * Send a null-value (tombstone) message. Used with log-compacted topics to signal
91
- * that a key's record should be removed during the next compaction cycle.
92
- *
93
- * Tombstones skip envelope headers, schema validation, and Lamport clock stamping.
94
- * Both `beforeSend` and `afterSend` instrumentation hooks are still called so tracing works correctly.
95
- *
96
- * @param topic Topic name.
97
- * @param key Partition key identifying the record to tombstone.
98
- * @param headers Optional custom Kafka headers.
99
- * @example
100
- * ```ts
101
- * await kafka.sendTombstone('users.state', 'user-42');
102
- * ```
103
- */
33
+ /** @inheritDoc */
104
34
  sendTombstone(topic: string, key: string, headers?: MessageHeaders): Promise<void>;
105
- /**
106
- * Send multiple typed messages in a single Kafka produce request. Accepts a topic key or a `TopicDescriptor`.
107
- *
108
- * Each item in `messages` can carry its own `key`, `headers`, `correlationId`, and `schemaVersion`.
109
- * The `key` is used for partition routing — messages with the same key always land on the same partition.
110
- *
111
- * @param topic Topic key from the `TopicMessageMap` or a `TopicDescriptor` object.
112
- * @param messages Array of messages to send.
113
- * @param options Optional batch-level settings: `compression` codec.
114
- * @example
115
- * ```ts
116
- * await kafka.sendBatch('orders.created', [
117
- * { value: { orderId: '1', amount: 10 }, key: 'order-1' },
118
- * ]);
119
- * ```
120
- */
35
+ /** @inheritDoc */
121
36
  sendBatch<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, messages: Array<BatchMessageItem<D["__type"]>>, options?: BatchSendOptions): Promise<void>;
122
37
  sendBatch<K extends keyof T>(topic: K, messages: Array<BatchMessageItem<T[K]>>, options?: BatchSendOptions): Promise<void>;
123
- /**
124
- * Execute multiple sends atomically. Commits on success, aborts on error.
125
- * @example
126
- * ```ts
127
- * await kafka.transaction(async (tx) => {
128
- * await tx.send('orders.created', { orderId: '123' });
129
- * await tx.send('inventory.reserved', { itemId: 'a', qty: 1 });
130
- * });
131
- * ```
132
- */
38
+ /** @inheritDoc */
133
39
  transaction(fn: (ctx: TransactionContext<T>) => Promise<void>): Promise<void>;
134
- /**
135
- * Connect the idempotent producer. Called automatically by `KafkaModule.register()`.
136
- * @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
137
- */
40
+ /** @inheritDoc */
138
41
  connectProducer(): Promise<void>;
139
- /** Start the background lag-polling loop for producer throttling. */
140
- private startLagThrottlePoller;
141
- /** Wait until lag drops below the threshold (or maxWaitMs is exceeded). */
142
- private waitIfThrottled;
143
- /**
144
- * Recover the Lamport clock from the last message across the given topics.
145
- *
146
- * For each topic, fetches partition high-watermarks via admin, creates a
147
- * short-lived consumer, seeks every non-empty partition to its last offset
148
- * (`highWatermark − 1`), reads one message per partition, and extracts the
149
- * maximum `x-lamport-clock` header value. On completion `_lamportClock` is
150
- * set to that maximum so the next `++_lamportClock` yields a strictly greater
151
- * value than any previously sent clock.
152
- *
153
- * Topics that are empty or missing are silently skipped.
154
- */
155
- private recoverLamportClock;
156
- /**
157
- * @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
158
- */
42
+ /** @internal */
159
43
  disconnectProducer(): Promise<void>;
160
- /**
161
- * Subscribe to one or more topics and start consuming messages one at a time.
162
- *
163
- * Each message is delivered to `handleMessage` as a fully-decoded `EventEnvelope`.
164
- * The call blocks until the consumer is connected and the subscription is set up,
165
- * then returns a `ConsumerHandle` with a `stop()` method for clean shutdown.
166
- *
167
- * @param topics Array of topic keys, `TopicDescriptor` objects, or `RegExp` patterns.
168
- * Regex patterns cannot be combined with `retryTopics: true`.
169
- * @param handleMessage Async handler called for every message. Throw to trigger retries.
170
- * @param options Consumer configuration — `groupId`, `retry`, `dlq`, `circuitBreaker`, etc.
171
- * @returns A handle with `{ groupId, stop() }` for managing the consumer lifecycle.
172
- * @example
173
- * ```ts
174
- * const handle = await kafka.startConsumer(['orders.created'], async (envelope) => {
175
- * await processOrder(envelope.payload);
176
- * }, { retry: { maxRetries: 3 }, dlq: true });
177
- * await handle.stop();
178
- * ```
179
- */
44
+ /** @inheritDoc */
180
45
  startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (envelope: EventEnvelope<T[K[number]]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
181
46
  startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (envelope: EventEnvelope<D["__type"]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
182
- /**
183
- * Subscribe to one or more topics and consume messages in batches.
184
- *
185
- * `handleBatch` receives an array of decoded `EventEnvelope` objects together with
186
- * batch metadata (topic, partition, high-watermark offset). Prefer this over
187
- * `startConsumer` when throughput matters more than per-message latency.
188
- *
189
- * Set `autoCommit: false` in options when the handler calls `resolveOffset()` or
190
- * `commitOffsetsIfNecessary()` directly, to avoid offset conflicts.
191
- *
192
- * @param topics Array of topic keys, `TopicDescriptor` objects, or `RegExp` patterns.
193
- * @param handleBatch Async handler called with each batch of decoded messages.
194
- * @param options Consumer configuration — `groupId`, `retry`, `dlq`, `autoCommit`, etc.
195
- * @returns A handle with `{ groupId, stop() }` for managing the consumer lifecycle.
196
- * @example
197
- * ```ts
198
- * await kafka.startBatchConsumer(['metrics'], async (envelopes, meta) => {
199
- * await db.insertMany(envelopes.map(e => e.payload));
200
- * meta.resolveOffset(envelopes.at(-1)!.offset);
201
- * }, { autoCommit: false });
202
- * ```
203
- */
47
+ /** @inheritDoc */
204
48
  startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (envelopes: EventEnvelope<T[K[number]]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
205
49
  startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (envelopes: EventEnvelope<D["__type"]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
206
- /**
207
- * Consume messages from a topic as an AsyncIterableIterator.
208
- * Use with `for await` — breaking out of the loop automatically stops the consumer.
209
- *
210
- * @example
211
- * for await (const envelope of kafka.consume('my.topic')) {
212
- * console.log(envelope.data);
213
- * }
214
- */
50
+ /** @inheritDoc */
215
51
  consume<K extends keyof T & string>(topic: K, options?: ConsumerOptions<T>): AsyncIterableIterator<EventEnvelope<T[K]>>;
216
- /**
217
- * Accumulate messages into a window and flush the handler when either
218
- * `maxMessages` is reached or `maxMs` has elapsed — whichever fires first.
219
- * Remaining messages are flushed before the consumer disconnects on `stop()`.
220
- * @example
221
- * ```ts
222
- * await kafka.startWindowConsumer('events', async (batch, meta) => {
223
- * await db.insertMany(batch.map(e => e.payload));
224
- * }, { maxMessages: 100, maxMs: 5_000 });
225
- * ```
226
- */
52
+ /** @inheritDoc */
227
53
  startWindowConsumer<K extends keyof T & string>(topic: K, handler: (envelopes: EventEnvelope<T[K]>[], meta: WindowMeta) => Promise<void>, options: WindowConsumerOptions<T>): Promise<ConsumerHandle>;
228
- /**
229
- * Subscribe to topics and dispatch each message to a handler based on the value
230
- * of a specific Kafka header. A thin, zero-overhead wrapper over `startConsumer`.
231
- *
232
- * All `ConsumerOptions` (retry, DLQ, deduplication, circuit breaker, etc.) apply
233
- * uniformly across every route.
234
- * @example
235
- * ```ts
236
- * await kafka.startRoutedConsumer(['domain.events'], {
237
- * header: 'x-event-type',
238
- * routes: {
239
- * 'order.created': async (e) => handleOrderCreated(e.payload),
240
- * },
241
- * });
242
- * ```
243
- */
54
+ /** @inheritDoc */
244
55
  startRoutedConsumer<K extends Array<keyof T>>(topics: K, routing: RoutingOptions<T[K[number]]>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
245
- /**
246
- * Subscribe to topics and consume messages with exactly-once semantics for
247
- * read-process-write pipelines.
248
- *
249
- * Each message is handled inside a dedicated Kafka transaction.
250
- * The handler receives a `TransactionalHandlerContext` whose `send` / `sendBatch`
251
- * methods stage outgoing messages inside that transaction. On handler success the
252
- * source offset commit and all staged sends are committed atomically. On handler
253
- * failure the transaction is aborted and the source message is redelivered — no
254
- * partial writes become visible to downstream consumers.
255
- *
256
- * Incompatible with `retryTopics: true` — throws at startup if set.
257
- * @example
258
- * ```ts
259
- * await kafka.startTransactionalConsumer(['orders.created'], async (envelope, tx) => {
260
- * await tx.send('inventory.reserved', { orderId: envelope.payload.orderId, qty: 1 });
261
- * });
262
- * ```
263
- */
56
+ /** @inheritDoc */
264
57
  startTransactionalConsumer<K extends Array<keyof T>>(topics: K, handler: (envelope: EventEnvelope<T[K[number]]>, tx: TransactionalHandlerContext<T>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
265
- /**
266
- * Stop all consumers or a specific group.
267
- *
268
- * If `groupId` is unspecified, all active consumers are stopped.
269
- * If `groupId` is specified, only the consumer with that group ID is stopped.
270
- *
271
- * @throws {Error} if the consumer fails to disconnect.
272
- * @example
273
- * ```ts
274
- * await kafka.stopConsumer('billing-service'); // stop one group
275
- * await kafka.stopConsumer(); // stop all
276
- * ```
277
- */
58
+ /** @inheritDoc */
278
59
  stopConsumer(groupId?: string): Promise<void>;
279
60
  /** @inheritDoc */
280
61
  pauseConsumer(groupId: string | undefined, assignments: Array<{
@@ -286,60 +67,16 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
286
67
  topic: string;
287
68
  partitions: number[];
288
69
  }>): void;
289
- /** Pause all assigned partitions of a topic for a consumer group (used for queue backpressure). */
290
- private pauseTopicAllPartitions;
291
- /** Resume all assigned partitions of a topic for a consumer group (used for queue backpressure). */
292
- private resumeTopicAllPartitions;
293
- /**
294
- * Re-publish messages from a dead letter queue back to the original topic.
295
- *
296
- * Messages are consumed from `<topic>.dlq` and re-published to `<topic>`.
297
- * The original topic is determined by the `x-dlq-original-topic` header.
298
- * The `x-dlq-*` headers are stripped before re-publishing.
299
- *
300
- * @param topic - The topic to replay from `<topic>.dlq`
301
- * @param options - Options for replay
302
- * @returns { replayed: number; skipped: number } - counts of re-published vs skipped messages
303
- * @example
304
- * ```ts
305
- * const { replayed, skipped } = await kafka.replayDlq('orders.created');
306
- * ```
307
- */
70
+ /** @inheritDoc */
308
71
  replayDlq(topic: string, options?: DlqReplayOptions): Promise<{
309
72
  replayed: number;
310
73
  skipped: number;
311
74
  }>;
312
- /**
313
- * Read a compacted topic from the beginning to its current high-watermark.
314
- * Returns a `Map<key, EventEnvelope>` with the latest value per key.
315
- * Tombstone messages (null value) remove the key from the map.
316
- * @example
317
- * ```ts
318
- * const snapshot = await kafka.readSnapshot('users.state');
319
- * const user = snapshot.get('user-42')?.payload;
320
- * ```
321
- */
75
+ /** @inheritDoc */
322
76
  readSnapshot<K extends keyof T & string>(topic: K, options?: ReadSnapshotOptions): Promise<Map<string, EventEnvelope<T[K]>>>;
323
- private applySnapshotMessage;
324
- /**
325
- * Snapshot the current committed offsets of a consumer group into a Kafka topic.
326
- * Each call appends a new record — the checkpoint topic is an append-only audit log.
327
- * @example
328
- * ```ts
329
- * const result = await kafka.checkpointOffsets(undefined, 'checkpoints');
330
- * console.log(`Saved ${result.partitionCount} offsets`);
331
- * ```
332
- */
77
+ /** @inheritDoc */
333
78
  checkpointOffsets(groupId: string | undefined, checkpointTopic: string): Promise<CheckpointResult>;
334
- /**
335
- * Restore a consumer group's committed offsets from the nearest checkpoint in `checkpointTopic`.
336
- * Requires the consumer group to be stopped.
337
- * @example
338
- * ```ts
339
- * await kafka.stopConsumer();
340
- * await kafka.restoreFromCheckpoint(undefined, 'checkpoints');
341
- * ```
342
- */
79
+ /** @inheritDoc */
343
80
  restoreFromCheckpoint(groupId: string | undefined, checkpointTopic: string, options?: RestoreCheckpointOptions): Promise<CheckpointRestoreResult>;
344
81
  /** @inheritDoc */
345
82
  resetOffsets(groupId: string | undefined, topic: string, position: "earliest" | "latest"): Promise<void>;
@@ -355,43 +92,7 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
355
92
  partition: number;
356
93
  timestamp: number;
357
94
  }>): Promise<void>;
358
- /**
359
- * Returns the current circuit breaker state for a specific topic partition.
360
- * Returns `undefined` when no circuit state exists — either `circuitBreaker` is not
361
- * configured for the group, or the circuit has never been tripped.
362
- *
363
- * @param topic Topic name.
364
- * @param partition Partition index.
365
- * @param groupId Consumer group. Defaults to the client's default groupId.
366
- *
367
- * @returns `{ status, failures, windowSize }` snapshot for a given partition or `undefined` if no state exists.
368
- * @example
369
- * ```ts
370
- * const state = kafka.getCircuitState('orders.created', 0);
371
- * if (state?.status === 'open') console.warn('Circuit open!');
372
- * ```
373
- */
374
- getCircuitState(topic: string, partition: number, groupId?: string): {
375
- status: "closed" | "open" | "half-open";
376
- failures: number;
377
- windowSize: number;
378
- } | undefined;
379
- /**
380
- * Query consumer group lag per partition.
381
- * Lag = broker high-watermark − last committed offset.
382
- * A committed offset of -1 (nothing committed yet) counts as full lag.
383
- *
384
- * Returns an empty array when the consumer group has never committed any
385
- * offsets (freshly created group, `autoCommit: false` with no manual commits,
386
- * or group not yet assigned). This is a Kafka protocol limitation:
387
- * `fetchOffsets` only returns data for topic-partitions that have at least one
388
- * committed offset. Use `checkStatus()` to verify broker connectivity in that case.
389
- * @example
390
- * ```ts
391
- * const lag = await kafka.getConsumerLag();
392
- * const total = lag.reduce((sum, p) => sum + p.lag, 0);
393
- * ```
394
- */
95
+ /** @inheritDoc */
395
96
  getConsumerLag(groupId?: string): Promise<Array<{
396
97
  topic: string;
397
98
  partition: number;
@@ -408,75 +109,23 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
408
109
  partition: number;
409
110
  offset: string;
410
111
  }>): Promise<void>;
411
- /**
412
- * Return the client ID provided during `KafkaClient` construction.
413
- * @example
414
- * ```ts
415
- * const id = kafka.getClientId(); // e.g. 'my-service'
416
- * ```
417
- */
418
- getClientId(): ClientId;
419
- /**
420
- * Return a snapshot of internal event counters accumulated since client creation
421
- * (or since the last `resetMetrics()` call).
422
- *
423
- * @param topic Topic name to scope the snapshot to. When omitted, counters are
424
- * aggregated across all topics. If the topic has no recorded events yet, returns
425
- * a zero-valued snapshot.
426
- * @returns Read-only `KafkaMetrics` snapshot: `processedCount`, `retryCount`, `dlqCount`, `dedupCount`.
427
- * @example
428
- * ```ts
429
- * const { processedCount, dlqCount } = kafka.getMetrics();
430
- * const topicMetrics = kafka.getMetrics('orders.created');
431
- * ```
432
- */
112
+ /** @inheritDoc */
113
+ getCircuitState(topic: string, partition: number, groupId?: string): {
114
+ status: "closed" | "open" | "half-open";
115
+ failures: number;
116
+ windowSize: number;
117
+ } | undefined;
118
+ /** @inheritDoc */
433
119
  getMetrics(topic?: string): Readonly<KafkaMetrics>;
434
120
  /** @inheritDoc */
435
121
  resetMetrics(topic?: string): void;
122
+ getClientId(): ClientId;
436
123
  /** @inheritDoc */
437
124
  disconnect(drainTimeoutMs?: number): Promise<void>;
438
- /**
439
- * NestJS lifecycle hook — called automatically when the host module is torn down.
440
- * Drains in-flight handlers and disconnects all producers, consumers, and admin.
441
- * `KafkaModule` relies on this method; no separate destroy provider is needed.
442
- */
125
+ /** NestJS lifecycle hook — called automatically on module teardown. */
443
126
  onModuleDestroy(): Promise<void>;
444
127
  /** @inheritDoc */
445
128
  enableGracefulShutdown(signals?: NodeJS.Signals[], drainTimeoutMs?: number): void;
446
- private preparePayload;
447
- /**
448
- * Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
449
- * The handler itself is not cancelled — the warning is diagnostic only.
450
- */
451
- private wrapWithTimeoutWarning;
452
- /**
453
- * Create and connect a transactional producer for EOS retry routing.
454
- * Each retry level consumer gets its own producer with a unique `transactionalId`
455
- * so Kafka can fence stale producers on restart without affecting other levels.
456
- */
457
- private createRetryTxProducer;
458
- /**
459
- * Ensure that a topic exists by creating it if it doesn't already exist.
460
- * If `autoCreateTopics` is disabled, returns immediately.
461
- * Concurrent calls for the same topic are deduplicated.
462
- */
463
- private ensureTopic;
464
- /** Shared consumer setup: groupId check, schema map, connect, subscribe. */
465
- private setupConsumer;
466
- /** Create or retrieve the deduplication context for a consumer group. */
467
- private resolveDeduplicationContext;
468
- /** Guard checks shared by startConsumer and startBatchConsumer. */
469
- private validateTopicConsumerOpts;
470
- /** Ensure all required topics exist for a consumer: base, DLQ, and dedup topics. */
471
- private ensureConsumerTopics;
472
- /** Create EOS transactional producer context for atomic main → retry.1 routing. */
473
- private makeEosMainContext;
474
- /** Start companion retry-level consumers and register them under the main groupId. */
475
- private launchRetryChain;
476
- /** Build MessageHandlerDeps with circuit breaker callbacks bound to the given groupId. */
477
- private messageDepsFor;
478
- /** Build the deps object passed to retry topic consumers. */
479
- private buildRetryTopicDeps;
480
129
  }
481
130
 
482
131
  /**