@drarzter/kafka-client 0.7.0 → 0.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/core.d.ts CHANGED
@@ -1,5 +1,5 @@
1
- import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, a as KafkaClientOptions, c as TopicDescriptor, w as SendOptions, B as BatchMessageItem, A as TransactionContext, l as EventEnvelope, b as ConsumerOptions, h as ConsumerHandle, e as BatchMeta, k as DlqReplayOptions, d as KafkaHealthResult, t as KafkaMetrics } from './types-DqQ7IXZr.js';
2
- export { f as BeforeConsumeResult, g as CircuitBreakerOptions, i as ConsumerInterceptor, D as DeduplicationOptions, j as DlqReason, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, m as HEADER_EVENT_ID, n as HEADER_LAMPORT_CLOCK, o as HEADER_SCHEMA_VERSION, p as HEADER_TIMESTAMP, q as HEADER_TRACEPARENT, r as InferSchema, K as KafkaInstrumentation, s as KafkaLogger, M as MessageHeaders, u as MessageLostContext, R as RetryOptions, S as SchemaLike, v as SchemaParseContext, x as SubscribeRetryOptions, y as TTopicMessageMap, z as TopicsFrom, F as buildEnvelopeHeaders, J as decodeHeaders, L as extractEnvelope, N as getEnvelopeContext, O as runWithEnvelopeContext, P as topic } from './types-DqQ7IXZr.js';
1
+ import { T as TopicMapConstraint, I as IKafkaClient, C as ClientId, G as GroupId, a as KafkaClientOptions, c as TopicDescriptor, z as SendOptions, M as MessageHeaders, B as BatchMessageItem, f as BatchSendOptions, O as TransactionContext, o as EventEnvelope, b as ConsumerOptions, k as ConsumerHandle, e as BatchMeta, n as DlqReplayOptions, d as KafkaHealthResult, j as ConsumerGroupSummary, J as TopicDescription, w as KafkaMetrics } from './types-4qWrf2aJ.js';
2
+ export { g as BeforeConsumeResult, h as CircuitBreakerOptions, i as CompressionType, l as ConsumerInterceptor, D as DeduplicationOptions, m as DlqReason, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, p as HEADER_EVENT_ID, q as HEADER_LAMPORT_CLOCK, r as HEADER_SCHEMA_VERSION, s as HEADER_TIMESTAMP, t as HEADER_TRACEPARENT, u as InferSchema, K as KafkaInstrumentation, v as KafkaLogger, x as MessageLostContext, R as RetryOptions, S as SchemaLike, y as SchemaParseContext, A as SubscribeRetryOptions, F as TTopicMessageMap, L as TopicPartitionInfo, N as TopicsFrom, P as TtlExpiredContext, Q as buildEnvelopeHeaders, U as decodeHeaders, V as extractEnvelope, W as getEnvelopeContext, X as runWithEnvelopeContext, Y as topic } from './types-4qWrf2aJ.js';
3
3
 
4
4
  /**
5
5
  * Type-safe Kafka client.
@@ -32,6 +32,7 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
32
32
  private readonly companionGroupIds;
33
33
  private readonly instrumentation;
34
34
  private readonly onMessageLost;
35
+ private readonly onTtlExpired;
35
36
  private readonly onRebalance;
36
37
  /** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
37
38
  private readonly txId;
@@ -50,12 +51,39 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
50
51
  private readonly drainResolvers;
51
52
  readonly clientId: ClientId;
52
53
  constructor(clientId: ClientId, groupId: GroupId, brokers: string[], options?: KafkaClientOptions);
53
- /** Send a single typed message. Accepts a topic key or a TopicDescriptor. */
54
+ /**
55
+ * Send a single typed message. Accepts a topic key or a `TopicDescriptor`.
56
+ *
57
+ * @param topic Topic key from the `TopicMessageMap` or a `TopicDescriptor` object.
58
+ * @param message Message payload — validated against the topic schema when one is registered.
59
+ * @param options Optional per-send settings: `key`, `headers`, `correlationId`, `compression`, etc.
60
+ */
54
61
  sendMessage<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, message: D["__type"], options?: SendOptions): Promise<void>;
55
62
  sendMessage<K extends keyof T>(topic: K, message: T[K], options?: SendOptions): Promise<void>;
56
- /** Send multiple typed messages in one call. Accepts a topic key or a TopicDescriptor. */
57
- sendBatch<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, messages: Array<BatchMessageItem<D["__type"]>>): Promise<void>;
58
- sendBatch<K extends keyof T>(topic: K, messages: Array<BatchMessageItem<T[K]>>): Promise<void>;
63
+ /**
64
+ * Send a null-value (tombstone) message. Used with log-compacted topics to signal
65
+ * that a key's record should be removed during the next compaction cycle.
66
+ *
67
+ * Tombstones skip envelope headers, schema validation, and Lamport clock stamping.
68
+ * Both `beforeSend` and `afterSend` instrumentation hooks are still called so tracing works correctly.
69
+ *
70
+ * @param topic Topic name.
71
+ * @param key Partition key identifying the record to tombstone.
72
+ * @param headers Optional custom Kafka headers.
73
+ */
74
+ sendTombstone(topic: string, key: string, headers?: MessageHeaders): Promise<void>;
75
+ /**
76
+ * Send multiple typed messages in a single Kafka produce request. Accepts a topic key or a `TopicDescriptor`.
77
+ *
78
+ * Each item in `messages` can carry its own `key`, `headers`, `correlationId`, and `schemaVersion`.
79
+ * The `key` is used for partition routing — messages with the same key always land on the same partition.
80
+ *
81
+ * @param topic Topic key from the `TopicMessageMap` or a `TopicDescriptor` object.
82
+ * @param messages Array of messages to send.
83
+ * @param options Optional batch-level settings: `compression` codec.
84
+ */
85
+ sendBatch<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, messages: Array<BatchMessageItem<D["__type"]>>, options?: BatchSendOptions): Promise<void>;
86
+ sendBatch<K extends keyof T>(topic: K, messages: Array<BatchMessageItem<T[K]>>, options?: BatchSendOptions): Promise<void>;
59
87
  /** Execute multiple sends atomically. Commits on success, aborts on error. */
60
88
  transaction(fn: (ctx: TransactionContext<T>) => Promise<void>): Promise<void>;
61
89
  /**
@@ -67,10 +95,36 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
67
95
  * @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
68
96
  */
69
97
  disconnectProducer(): Promise<void>;
70
- /** Subscribe to topics and start consuming messages with the given handler. */
98
+ /**
99
+ * Subscribe to one or more topics and start consuming messages one at a time.
100
+ *
101
+ * Each message is delivered to `handleMessage` as a fully-decoded `EventEnvelope`.
102
+ * The call blocks until the consumer is connected and the subscription is set up,
103
+ * then returns a `ConsumerHandle` with a `stop()` method for clean shutdown.
104
+ *
105
+ * @param topics Array of topic keys, `TopicDescriptor` objects, or `RegExp` patterns.
106
+ * Regex patterns cannot be combined with `retryTopics: true`.
107
+ * @param handleMessage Async handler called for every message. Throw to trigger retries.
108
+ * @param options Consumer configuration — `groupId`, `retry`, `dlq`, `circuitBreaker`, etc.
109
+ * @returns A handle with `{ groupId, stop() }` for managing the consumer lifecycle.
110
+ */
71
111
  startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (envelope: EventEnvelope<T[K[number]]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
72
112
  startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (envelope: EventEnvelope<D["__type"]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
73
- /** Subscribe to topics and consume messages in batches. */
113
+ /**
114
+ * Subscribe to one or more topics and consume messages in batches.
115
+ *
116
+ * `handleBatch` receives an array of decoded `EventEnvelope` objects together with
117
+ * batch metadata (topic, partition, high-watermark offset). Prefer this over
118
+ * `startConsumer` when throughput matters more than per-message latency.
119
+ *
120
+ * Set `autoCommit: false` in options when the handler calls `resolveOffset()` or
121
+ * `commitOffsetsIfNecessary()` directly, to avoid offset conflicts.
122
+ *
123
+ * @param topics Array of topic keys, `TopicDescriptor` objects, or `RegExp` patterns.
124
+ * @param handleBatch Async handler called with each batch of decoded messages.
125
+ * @param options Consumer configuration — `groupId`, `retry`, `dlq`, `autoCommit`, etc.
126
+ * @returns A handle with `{ groupId, stop() }` for managing the consumer lifecycle.
127
+ */
74
128
  startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (envelopes: EventEnvelope<T[K[number]]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
75
129
  startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (envelopes: EventEnvelope<D["__type"]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
76
130
  /**
@@ -83,17 +137,52 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
83
137
  * }
84
138
  */
85
139
  consume<K extends keyof T & string>(topic: K, options?: ConsumerOptions<T>): AsyncIterableIterator<EventEnvelope<T[K]>>;
140
+ /**
141
+ * Stop all consumers or a specific group.
142
+ *
143
+ * If `groupId` is unspecified, all active consumers are stopped.
144
+ * If `groupId` is specified, only the consumer with that group ID is stopped.
145
+ *
146
+ * @throws {Error} if the consumer fails to disconnect.
147
+ */
86
148
  stopConsumer(groupId?: string): Promise<void>;
149
+ /**
150
+ * Temporarily stop delivering messages from specific partitions without disconnecting the consumer.
151
+ *
152
+ * @param groupId Consumer group to pause. Defaults to the client's default groupId.
153
+ * @param assignments Topic-partition pairs to pause.
154
+ */
87
155
  pauseConsumer(groupId: string | undefined, assignments: Array<{
88
156
  topic: string;
89
157
  partitions: number[];
90
158
  }>): void;
159
+ /**
160
+ * Resume message delivery for previously paused topic-partitions.
161
+ *
162
+ * @param {string|undefined} groupId Consumer group to resume. Defaults to the client's default groupId.
163
+ * @param {Array<{ topic: string; partitions: number[] }>} assignments Topic-partition pairs to resume.
164
+ */
91
165
  resumeConsumer(groupId: string | undefined, assignments: Array<{
92
166
  topic: string;
93
167
  partitions: number[];
94
168
  }>): void;
169
+ /** Pause all assigned partitions of a topic for a consumer group (used for queue backpressure). */
170
+ private pauseTopicAllPartitions;
171
+ /** Resume all assigned partitions of a topic for a consumer group (used for queue backpressure). */
172
+ private resumeTopicAllPartitions;
95
173
  /** DLQ header keys added by `sendToDlq` — stripped before re-publishing. */
96
174
  private static readonly DLQ_HEADER_KEYS;
175
+ /**
176
+ * Re-publish messages from a dead letter queue back to the original topic.
177
+ *
178
+ * Messages are consumed from `<topic>.dlq` and re-published to `<topic>`.
179
+ * The original topic is determined by the `x-dlq-original-topic` header.
180
+ * The `x-dlq-*` headers are stripped before re-publishing.
181
+ *
182
+ * @param topic - The topic to replay from `<topic>.dlq`
183
+ * @param options - Options for replay
184
+ * @returns { replayed: number; skipped: number } - counts of re-published vs skipped messages
185
+ */
97
186
  replayDlq(topic: string, options?: DlqReplayOptions): Promise<{
98
187
  replayed: number;
99
188
  skipped: number;
@@ -109,6 +198,35 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
109
198
  partition: number;
110
199
  offset: string;
111
200
  }>): Promise<void>;
201
+ /**
202
+ * Seek specific topic-partition pairs to the offset nearest to a given timestamp
203
+ * (in milliseconds) for a stopped consumer group.
204
+ * Throws if the group is still running — call `stopConsumer(groupId)` first.
205
+ * Assignments are grouped by topic and committed via `admin.setOffsets`.
206
+ * If no offset exists at the requested timestamp (e.g. empty partition or
207
+ * future timestamp), the partition falls back to `-1` (end of topic — new messages only).
208
+ */
209
+ seekToTimestamp(groupId: string | undefined, assignments: Array<{
210
+ topic: string;
211
+ partition: number;
212
+ timestamp: number;
213
+ }>): Promise<void>;
214
+ /**
215
+ * Returns the current circuit breaker state for a specific topic partition.
216
+ * Returns `undefined` when no circuit state exists — either `circuitBreaker` is not
217
+ * configured for the group, or the circuit has never been tripped.
218
+ *
219
+ * @param topic Topic name.
220
+ * @param partition Partition index.
221
+ * @param groupId Consumer group. Defaults to the client's default groupId.
222
+ *
223
+ * @returns `{ status, failures, windowSize }` snapshot for a given partition or `undefined` if no state exists.
224
+ */
225
+ getCircuitState(topic: string, partition: number, groupId?: string): {
226
+ status: "closed" | "open" | "half-open";
227
+ failures: number;
228
+ windowSize: number;
229
+ } | undefined;
112
230
  /**
113
231
  * Query consumer group lag per partition.
114
232
  * Lag = broker high-watermark − last committed offset.
@@ -127,8 +245,41 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
127
245
  }>>;
128
246
  /** Check broker connectivity. Never throws — returns a discriminated union. */
129
247
  checkStatus(): Promise<KafkaHealthResult>;
248
+ /**
249
+ * List all consumer groups known to the broker.
250
+ * Useful for monitoring which groups are active and their current state.
251
+ */
252
+ listConsumerGroups(): Promise<ConsumerGroupSummary[]>;
253
+ /**
254
+ * Describe topics — returns partition layout, leader, replicas, and ISR.
255
+ * @param topics Topic names to describe. Omit to describe all topics.
256
+ */
257
+ describeTopics(topics?: string[]): Promise<TopicDescription[]>;
258
+ /**
259
+ * Delete records from a topic up to (but not including) the given offsets.
260
+ * All messages with offsets **before** the given offset are deleted.
261
+ */
262
+ deleteRecords(topic: string, partitions: Array<{
263
+ partition: number;
264
+ offset: string;
265
+ }>): Promise<void>;
266
+ /** Return the client ID provided during `KafkaClient` construction. */
130
267
  getClientId(): ClientId;
268
+ /**
269
+ * Return a snapshot of internal event counters accumulated since client creation
270
+ * (or since the last `resetMetrics()` call).
271
+ *
272
+ * @param topic Topic name to scope the snapshot to. When omitted, counters are
273
+ * aggregated across all topics. If the topic has no recorded events yet, returns
274
+ * a zero-valued snapshot.
275
+ * @returns Read-only `KafkaMetrics` snapshot: `processedCount`, `retryCount`, `dlqCount`, `dedupCount`.
276
+ */
131
277
  getMetrics(topic?: string): Readonly<KafkaMetrics>;
278
+ /**
279
+ * Reset internal event counters to zero.
280
+ *
281
+ * @param topic Topic name to reset. When omitted, all topics are reset.
282
+ */
132
283
  resetMetrics(topic?: string): void;
133
284
  /** Gracefully disconnect producer, all consumers, and admin. */
134
285
  disconnect(drainTimeoutMs?: number): Promise<void>;
@@ -144,14 +295,65 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
144
295
  * NestJS apps get drain for free via `onModuleDestroy` → `disconnect()`.
145
296
  */
146
297
  enableGracefulShutdown(signals?: NodeJS.Signals[], drainTimeoutMs?: number): void;
298
+ /**
299
+ * Increment the in-flight handler count and return a promise that calls the given handler.
300
+ * When the promise resolves or rejects, decrement the in flight handler count.
301
+ * If the in flight handler count reaches 0, call all previously registered drain resolvers.
302
+ * @param fn The handler to call when the promise is resolved or rejected.
303
+ * @returns A promise that resolves or rejects with the result of calling the handler.
304
+ */
147
305
  private trackInFlight;
306
+ /**
307
+ * Waits for all in-flight handlers to complete or for a given timeout, whichever comes first.
308
+ * @param timeoutMs Maximum time to wait in milliseconds.
309
+ * @returns A promise that resolves when all handlers have completed or the timeout is reached.
310
+ * @private
311
+ */
148
312
  private waitForDrain;
313
+ /**
314
+ * Prepare a send payload by registering the topic's schema and then building the payload.
315
+ * @param topicOrDesc - topic name or topic descriptor
316
+ * @param messages - batch of messages to send
317
+ * @returns - prepared payload
318
+ */
149
319
  private preparePayload;
150
320
  private notifyAfterSend;
321
+ /**
322
+ * Returns the KafkaMetrics for a given topic.
323
+ * If the topic hasn't seen any events, initializes a zero-valued snapshot.
324
+ * @param topic - name of the topic to get the metrics for
325
+ * @returns - KafkaMetrics for the given topic
326
+ */
151
327
  private metricsFor;
328
+ /**
329
+ * Notifies instrumentation hooks of a retry event.
330
+ * @param envelope The original message envelope that triggered the retry.
331
+ * @param attempt The current retry attempt (1-indexed).
332
+ * @param maxRetries The maximum number of retries configured for this topic.
333
+ */
152
334
  private notifyRetry;
335
+ /**
336
+ * Called whenever a message is routed to the dead letter queue.
337
+ * @param envelope The original message envelope.
338
+ * @param reason The reason for routing to the dead letter queue.
339
+ * @param gid The group ID of the consumer that triggered the circuit breaker, if any.
340
+ */
153
341
  private notifyDlq;
342
+ /**
343
+ * Notify all instrumentation hooks about a duplicate message detection.
344
+ * Invoked by the consumer after a message has been successfully processed
345
+ * and the Lamport clock detected a duplicate.
346
+ * @param envelope The processed message envelope.
347
+ * @param strategy The duplicate detection strategy used.
348
+ */
154
349
  private notifyDuplicate;
350
+ /**
351
+ * Notify all instrumentation hooks about a successfully processed message.
352
+ * Invoked by the consumer after a message has been successfully processed
353
+ * by the handler.
354
+ * @param envelope The processed message envelope.
355
+ * @param gid The optional consumer group ID.
356
+ */
155
357
  private notifyMessage;
156
358
  /**
157
359
  * Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
@@ -188,15 +390,57 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
188
390
  * so Kafka can fence stale producers on restart without affecting other levels.
189
391
  */
190
392
  private createRetryTxProducer;
393
+ /**
394
+ * Ensure that a topic exists by creating it if it doesn't already exist.
395
+ * If `autoCreateTopics` is disabled, this method will not create the topic and
396
+ * will return immediately.
397
+ * If multiple concurrent calls are made to `ensureTopic` for the same topic,
398
+ * they are deduplicated to prevent multiple calls to `admin.createTopics()`.
399
+ * @param topic - The topic to ensure exists.
400
+ * @returns A promise that resolves when the topic has been created or already exists.
401
+ */
191
402
  private ensureTopic;
192
403
  /** Shared consumer setup: groupId check, schema map, connect, subscribe. */
193
404
  private setupConsumer;
194
405
  /** Create or retrieve the deduplication context for a consumer group. */
195
406
  private resolveDeduplicationContext;
407
+ /**
408
+ * An object containing the necessary dependencies for building a send payload.
409
+ *
410
+ * @property {Map<string, SchemaLike>} schemaRegistry - A map of topic names to their schemas.
411
+ * @property {boolean} strictSchemasEnabled - Whether strict schema validation is enabled.
412
+ * @property {KafkaInstrumentation} instrumentation - An object for creating a span for instrumentation.
413
+ * @property {KafkaLogger} logger - A logger for logging messages.
414
+ * @property {() => number} nextLamportClock - A function that returns the next value of the logical clock.
415
+ */
196
416
  private get producerOpsDeps();
417
+ /**
418
+ * ConsumerOpsDeps object properties:
419
+ *
420
+ * @property {Map<string, Consumer>} consumers - A map of consumer group IDs to their corresponding consumer instances.
421
+ * @property {Map<string, { fromBeginning: boolean; autoCommit: boolean }>} consumerCreationOptions - A map of consumer group IDs to their creation options.
422
+ * @property {Kafka} kafka - The Kafka client instance.
423
+ * @property {function(string, Partition[]): void} onRebalance - An optional callback function called when a consumer group is rebalanced.
424
+ * @property {KafkaLogger} logger - The logger instance used for logging consumer operations.
425
+ */
197
426
  private get consumerOpsDeps();
198
427
  /** Build MessageHandlerDeps with circuit breaker callbacks bound to the given groupId. */
199
428
  private messageDepsFor;
429
+ /**
430
+ * The dependencies object passed to the retry topic consumers.
431
+ *
432
+ * `logger`: The logger instance passed to the retry topic consumers.
433
+ * `producer`: The producer instance passed to the retry topic consumers.
434
+ * `instrumentation`: The instrumentation instance passed to the retry topic consumers.
435
+ * `onMessageLost`: The callback function passed to the retry topic consumers for tracking lost messages.
436
+ * `onRetry`: The callback function passed to the retry topic consumers for tracking retry attempts.
437
+ * `onDlq`: The callback function passed to the retry topic consumers for tracking dead-letter queue routing.
438
+ * `onMessage`: The callback function passed to the retry topic consumers for tracking message delivery.
439
+ * `ensureTopic`: A function that ensures a topic exists before subscribing to it.
440
+ * `getOrCreateConsumer`: A function that creates or retrieves a consumer instance.
441
+ * `runningConsumers`: A map of consumer group IDs to their corresponding consumer instances.
442
+ * `createRetryTxProducer`: A function that creates a retry transactional producer instance.
443
+ */
200
444
  private get retryTopicDeps();
201
445
  }
202
446
 
@@ -226,4 +470,4 @@ declare class KafkaRetryExhaustedError extends KafkaProcessingError {
226
470
  });
227
471
  }
228
472
 
229
- export { BatchMessageItem, BatchMeta, ClientId, ConsumerHandle, ConsumerOptions, DlqReplayOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, KafkaClientOptions, KafkaHealthResult, KafkaMetrics, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, SendOptions, TopicDescriptor, TopicMapConstraint, TransactionContext };
473
+ export { BatchMessageItem, BatchMeta, BatchSendOptions, ClientId, ConsumerGroupSummary, ConsumerHandle, ConsumerOptions, DlqReplayOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, KafkaClientOptions, KafkaHealthResult, KafkaMetrics, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, MessageHeaders, SendOptions, TopicDescription, TopicDescriptor, TopicMapConstraint, TransactionContext };