@drarzter/kafka-client 0.9.2 → 0.9.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,751 @@
1
+ import { M as MessageHeaders, S as SchemaLike, T as TopicMapConstraint, v as SendOptions, b as TopicDescriptor, B as BatchMessageItem, d as BatchSendOptions, z as TransactionContext, k as EventEnvelope, a as ConsumerOptions, h as ConsumerHandle, c as BatchMeta, J as WindowMeta, W as WindowConsumerOptions, t as RoutingOptions, A as TransactionalHandlerContext, C as ClientId, r as KafkaMetrics } from './consumer.types-fFCag3VJ.mjs';
2
+
3
+ /**
4
+ * Options for `readSnapshot`.
5
+ *
6
+ * @example
7
+ * ```ts
8
+ * const snapshot = await kafka.readSnapshot('users.state', {
9
+ * schema: UserSchema,
10
+ * onTombstone: (key) => console.log(`Key ${key} was compacted away`),
11
+ * });
12
+ * ```
13
+ */
14
+ interface ReadSnapshotOptions {
15
+ /**
16
+ * Schema to validate each message payload against (Zod, Valibot, ArkType, or any `.parse()` shape).
17
+ * Messages that fail validation are skipped with a warning log — they do not throw.
18
+ */
19
+ schema?: SchemaLike;
20
+ /**
21
+ * Called when a tombstone record (null-value message) is encountered.
22
+ * The corresponding key is removed from the snapshot automatically.
23
+ * Use this for auditing or logging which keys were compacted away.
24
+ */
25
+ onTombstone?: (key: string) => void;
26
+ }
27
+ /** A single partition offset entry stored in a checkpoint record. */
28
+ interface CheckpointEntry {
29
+ topic: string;
30
+ partition: number;
31
+ offset: string;
32
+ }
33
+ /** Result returned by a successful `checkpointOffsets` call. */
34
+ interface CheckpointResult {
35
+ /** Consumer group whose offsets were saved. */
36
+ groupId: string;
37
+ /** Topics included in the checkpoint. */
38
+ topics: string[];
39
+ /** Total number of topic-partition pairs saved. */
40
+ partitionCount: number;
41
+ /** Unix timestamp (ms) when the checkpoint was created. */
42
+ savedAt: number;
43
+ }
44
+ /** Options for `restoreFromCheckpoint`. */
45
+ interface RestoreCheckpointOptions {
46
+ /**
47
+ * Target Unix timestamp (ms). The newest checkpoint whose `savedAt` is **≤ this value**
48
+ * is selected. Defaults to the latest available checkpoint when omitted.
49
+ */
50
+ timestamp?: number;
51
+ }
52
+ /** Result returned by a successful `restoreFromCheckpoint` call. */
53
+ interface CheckpointRestoreResult {
54
+ /** Consumer group that was repositioned. */
55
+ groupId: string;
56
+ /** The committed offsets restored from the checkpoint. */
57
+ offsets: CheckpointEntry[];
58
+ /** Unix timestamp (ms) recorded when the checkpoint was originally saved. */
59
+ restoredAt: number;
60
+ /** Age of the restored checkpoint in milliseconds (now − `restoredAt`). */
61
+ checkpointAge: number;
62
+ }
63
+ /**
64
+ * Options for `replayDlq`.
65
+ *
66
+ * @example
67
+ * ```ts
68
+ * await kafka.replayDlq('orders.created', {
69
+ * targetTopic: 'orders.retry-manual',
70
+ * dryRun: false,
71
+ * filter: (headers, value) =>
72
+ * headers['x-dlq-reason'] === 'handler-error' &&
73
+ * JSON.parse(value).amount > 0,
74
+ * });
75
+ * ```
76
+ */
77
+ interface DlqReplayOptions {
78
+ /**
79
+ * Override the target topic to re-publish to.
80
+ * Default: reads the `x-dlq-original-topic` header from each DLQ message.
81
+ */
82
+ targetTopic?: string;
83
+ /**
84
+ * Dry-run mode — log what would be replayed without actually sending.
85
+ * Increments the `replayed` counter so you can see what would happen.
86
+ */
87
+ dryRun?: boolean;
88
+ /**
89
+ * Optional filter — return `false` to skip a message.
90
+ * @param headers All headers on the DLQ message (including `x-dlq-*` metadata).
91
+ * @param value Raw message value (JSON string).
92
+ */
93
+ filter?: (headers: MessageHeaders, value: string) => boolean;
94
+ /**
95
+ * Seek to the earliest available offset before consuming, regardless of any
96
+ * previously committed offsets for the replay consumer group.
97
+ * Default: `true` — full replay of all DLQ messages on every call.
98
+ * Set to `false` to replay only messages added since the previous `replayDlq` call.
99
+ */
100
+ fromBeginning?: boolean;
101
+ }
102
+ /** Result returned by `KafkaClient.checkStatus()`. */
103
+ type KafkaHealthResult = {
104
+ status: "up";
105
+ clientId: string;
106
+ topics: string[];
107
+ } | {
108
+ status: "down";
109
+ clientId: string;
110
+ error: string;
111
+ };
112
+ /** Summary of a consumer group returned by `listConsumerGroups`. */
113
+ interface ConsumerGroupSummary {
114
+ /** Consumer group ID. */
115
+ groupId: string;
116
+ /**
117
+ * Current broker-reported state of the group.
118
+ * Common values: `'Empty'`, `'Stable'`, `'PreparingRebalance'`, `'CompletingRebalance'`, `'Dead'`.
119
+ */
120
+ state: string;
121
+ }
122
+ /** Partition-level metadata for a topic. */
123
+ interface TopicPartitionInfo {
124
+ /** Partition index (0-based). */
125
+ partition: number;
126
+ /** Node ID of the partition leader broker. */
127
+ leader: number;
128
+ /** Node IDs of all replica brokers. */
129
+ replicas: number[];
130
+ /** Node IDs of in-sync replicas. */
131
+ isr: number[];
132
+ }
133
+ /** Topic metadata returned by `describeTopics`. */
134
+ interface TopicDescription {
135
+ /** Topic name. */
136
+ name: string;
137
+ /** Per-partition metadata. */
138
+ partitions: TopicPartitionInfo[];
139
+ }
140
+
141
+ /** A topic-partition pair. */
142
+ type ITopicPartition = {
143
+ topic: string;
144
+ partition: number;
145
+ };
146
+ /** A topic-partition pair with an absolute offset string. */
147
+ type ITopicPartitionOffset = {
148
+ topic: string;
149
+ partition: number;
150
+ offset: string;
151
+ };
152
+ /** Pause / resume assignment shape: one topic + its partition list. */
153
+ type ITopicPartitions = {
154
+ topic: string;
155
+ partitions: number[];
156
+ };
157
+ /** A single message in a produce request. */
158
+ type IProducerMessage = {
159
+ value: string | null;
160
+ key?: string | null;
161
+ headers?: Record<string, string | Buffer | string[]>;
162
+ };
163
+ /** Produce request payload for one topic. */
164
+ type IProducerRecord = {
165
+ topic: string;
166
+ messages: IProducerMessage[];
167
+ };
168
+ /** Options for creating a producer. */
169
+ type IProducerCreationOptions = {
170
+ /** When set, the producer uses idempotent + exactly-once semantics. */
171
+ transactionalId?: string;
172
+ /** Enable idempotent writes (required for `transactionalId`). */
173
+ idempotent?: boolean;
174
+ };
175
+ /** An open Kafka transaction. */
176
+ interface ITransaction {
177
+ send(record: IProducerRecord): Promise<void>;
178
+ /**
179
+ * Atomically commit offsets for `consumer` as part of this transaction.
180
+ * The `consumer` parameter must be the `IConsumer` whose offsets are being committed.
181
+ */
182
+ sendOffsets(options: {
183
+ consumer: IConsumer;
184
+ topics: Array<{
185
+ topic: string;
186
+ partitions: Array<{
187
+ partition: number;
188
+ offset: string;
189
+ }>;
190
+ }>;
191
+ }): Promise<void>;
192
+ commit(): Promise<void>;
193
+ abort(): Promise<void>;
194
+ }
195
+ /** A Kafka producer. */
196
+ interface IProducer {
197
+ connect(): Promise<void>;
198
+ disconnect(): Promise<void>;
199
+ send(record: IProducerRecord): Promise<void>;
200
+ transaction(): Promise<ITransaction>;
201
+ }
202
+ /** A single message in an `eachMessage` callback. */
203
+ type IMessage = {
204
+ value: Buffer | null;
205
+ /** Header map as returned by librdkafka — values may be arrays. */
206
+ headers: Record<string, any>;
207
+ offset: string;
208
+ key: Buffer | null;
209
+ };
210
+ /** Payload passed to the `eachMessage` handler. */
211
+ type IEachMessagePayload = {
212
+ topic: string;
213
+ partition: number;
214
+ message: IMessage;
215
+ };
216
+ /** A batch of messages from one topic-partition. */
217
+ type IMessageBatch = {
218
+ topic: string;
219
+ partition: number;
220
+ messages: IMessage[];
221
+ highWatermark: string;
222
+ };
223
+ /** Payload passed to the `eachBatch` handler. */
224
+ type IEachBatchPayload = {
225
+ batch: IMessageBatch;
226
+ /** Send a heartbeat to the broker to prevent session timeout. */
227
+ heartbeat: () => Promise<void>;
228
+ /** Mark `offset` as processed (without committing). */
229
+ resolveOffset: (offset: string) => void;
230
+ /** Commit if the auto-commit threshold has been reached. */
231
+ commitOffsetsIfNecessary: () => Promise<void>;
232
+ };
233
+ /** Configuration passed to `IConsumer.run()`. */
234
+ type IConsumerRunConfig = {
235
+ eachMessage?: (payload: IEachMessagePayload) => Promise<void>;
236
+ eachBatch?: (payload: IEachBatchPayload) => Promise<void>;
237
+ };
238
+ /** Options for creating a consumer. */
239
+ type IConsumerCreationOptions = {
240
+ groupId: string;
241
+ fromBeginning?: boolean;
242
+ autoCommit?: boolean;
243
+ partitionAssigner?: "cooperative-sticky" | "roundrobin" | "range";
244
+ /** Fired on every partition assign/revoke. */
245
+ onRebalance?: (type: "assign" | "revoke", assignments: ITopicPartition[]) => void;
246
+ };
247
+ /** A Kafka consumer. */
248
+ interface IConsumer {
249
+ connect(): Promise<void>;
250
+ disconnect(): Promise<void>;
251
+ subscribe(options: {
252
+ topics: (string | RegExp)[];
253
+ }): Promise<void>;
254
+ run(config: IConsumerRunConfig): Promise<void>;
255
+ pause(assignments: ITopicPartitions[]): void;
256
+ resume(assignments: ITopicPartitions[]): void;
257
+ /** Seek a partition to an explicit offset. */
258
+ seek(options: ITopicPartitionOffset): void;
259
+ /** Current partition assignment for this consumer. */
260
+ assignment(): ITopicPartition[];
261
+ commitOffsets(offsets: ITopicPartitionOffset[]): Promise<void>;
262
+ /** Stop processing (alias for disconnect in some usages). */
263
+ stop(): Promise<void>;
264
+ }
265
+ /** Low/current/high watermark offsets for one partition. */
266
+ type IPartitionWatermarks = {
267
+ partition: number;
268
+ low: string;
269
+ high: string;
270
+ };
271
+ /** A partition → offset pair. */
272
+ type IPartitionOffset = {
273
+ partition: number;
274
+ offset: string;
275
+ };
276
+ /** Committed offsets for a group's topic. */
277
+ type IGroupTopicOffsets = {
278
+ topic: string;
279
+ partitions: IPartitionOffset[];
280
+ };
281
+ /** A consumer group descriptor. */
282
+ type IGroupDescription = {
283
+ groupId: string;
284
+ state?: string;
285
+ };
286
+ /** Partition metadata. */
287
+ type IPartitionMetadata = {
288
+ partitionId?: number;
289
+ partition?: number;
290
+ leader?: number;
291
+ replicas?: (number | {
292
+ nodeId: number;
293
+ })[];
294
+ isr?: (number | {
295
+ nodeId: number;
296
+ })[];
297
+ };
298
+ /** Topic metadata. */
299
+ type ITopicMetadata = {
300
+ name: string;
301
+ partitions: IPartitionMetadata[];
302
+ };
303
+ /** A Kafka admin client. */
304
+ interface IAdmin {
305
+ connect(): Promise<void>;
306
+ disconnect(): Promise<void>;
307
+ createTopics(options: {
308
+ topics: Array<{
309
+ topic: string;
310
+ numPartitions: number;
311
+ }>;
312
+ }): Promise<void>;
313
+ fetchTopicOffsets(topic: string): Promise<IPartitionWatermarks[]>;
314
+ fetchTopicOffsetsByTimestamp(topic: string, timestamp: number): Promise<IPartitionOffset[]>;
315
+ fetchOffsets(options: {
316
+ groupId: string;
317
+ }): Promise<IGroupTopicOffsets[]>;
318
+ setOffsets(options: {
319
+ groupId: string;
320
+ topic: string;
321
+ partitions: IPartitionOffset[];
322
+ }): Promise<void>;
323
+ listTopics(): Promise<string[]>;
324
+ listGroups(): Promise<{
325
+ groups: IGroupDescription[];
326
+ }>;
327
+ fetchTopicMetadata(options?: {
328
+ topics?: string[];
329
+ }): Promise<{
330
+ topics: ITopicMetadata[];
331
+ }>;
332
+ deleteGroups(groupIds: string[]): Promise<void>;
333
+ deleteTopicRecords(options: {
334
+ topic: string;
335
+ partitions: IPartitionOffset[];
336
+ }): Promise<void>;
337
+ }
338
+ /**
339
+ * Factory that creates connected Kafka primitives.
340
+ * The default implementation wraps `@confluentinc/kafka-javascript` via
341
+ * `ConfluentTransport`. Inject a custom transport (e.g. a fake) via
342
+ * `KafkaClientOptions.transport` for testing or alternative broker support.
343
+ */
344
+ interface KafkaTransport {
345
+ producer(options?: IProducerCreationOptions): IProducer;
346
+ consumer(options: IConsumerCreationOptions): IConsumer;
347
+ admin(): IAdmin;
348
+ }
349
+
350
+ /** Producer methods of `IKafkaClient`. */
351
+ interface IKafkaProducer<T extends TopicMapConstraint<T>> {
352
+ /**
353
+ * @example
354
+ * ```ts
355
+ * await kafka.sendMessage('orders.created', { orderId: '123', amount: 99 });
356
+ * ```
357
+ */
358
+ sendMessage<K extends keyof T>(topic: K, message: T[K], options?: SendOptions): Promise<void>;
359
+ sendMessage<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, message: D["__type"], options?: SendOptions): Promise<void>;
360
+ /**
361
+ * Send a null-value (tombstone) message to a topic.
362
+ * Tombstones are used with log-compacted topics to signal that a key's record
363
+ * should be removed during the next compaction cycle.
364
+ *
365
+ * Unlike `sendMessage`, tombstones carry no payload, no envelope headers, and
366
+ * skip schema validation. Only the partition `key` and optional custom `headers`
367
+ * are forwarded to Kafka.
368
+ *
369
+ * @example
370
+ * ```ts
371
+ * await kafka.sendTombstone('users.state', 'user-42');
372
+ * ```
373
+ */
374
+ sendTombstone(topic: string, key: string, headers?: MessageHeaders): Promise<void>;
375
+ /**
376
+ * @example
377
+ * ```ts
378
+ * await kafka.sendBatch('orders.created', [
379
+ * { value: { orderId: '1', amount: 10 }, key: 'order-1' },
380
+ * { value: { orderId: '2', amount: 20 }, key: 'order-2' },
381
+ * ]);
382
+ * ```
383
+ */
384
+ sendBatch<K extends keyof T>(topic: K, messages: Array<BatchMessageItem<T[K]>>, options?: BatchSendOptions): Promise<void>;
385
+ sendBatch<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, messages: Array<BatchMessageItem<D["__type"]>>, options?: BatchSendOptions): Promise<void>;
386
+ /**
387
+ * @example
388
+ * ```ts
389
+ * await kafka.transaction(async (tx) => {
390
+ * await tx.send('orders.created', { orderId: '123', amount: 99 });
391
+ * await tx.send('inventory.reserved', { itemId: 'a', qty: 1 });
392
+ * });
393
+ * ```
394
+ */
395
+ transaction(fn: (ctx: TransactionContext<T>) => Promise<void>): Promise<void>;
396
+ }
397
+
398
+ /** Consumer methods of `IKafkaClient`. */
399
+ interface IKafkaConsumer<T extends TopicMapConstraint<T>> {
400
+ /**
401
+ * @example
402
+ * ```ts
403
+ * const handle = await kafka.startConsumer(['orders.created'], async (envelope) => {
404
+ * await processOrder(envelope.payload);
405
+ * }, { retry: { maxRetries: 3 }, dlq: true });
406
+ *
407
+ * // on shutdown:
408
+ * await handle.stop();
409
+ * ```
410
+ */
411
+ startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (envelope: EventEnvelope<T[K[number]]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
412
+ startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (envelope: EventEnvelope<D["__type"]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
413
+ /**
414
+ * Subscribe using regex topic patterns (or a mix of strings and patterns).
415
+ * Note: type-safety is reduced to the union of all topic payloads when using regex.
416
+ * Incompatible with `retryTopics: true`.
417
+ */
418
+ startConsumer(topics: (string | RegExp)[], handleMessage: (envelope: EventEnvelope<T[keyof T]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
419
+ /**
420
+ * @example
421
+ * ```ts
422
+ * await kafka.startBatchConsumer(['metrics'], async (envelopes, meta) => {
423
+ * await db.insertMany(envelopes.map(e => e.payload));
424
+ * meta.resolveOffset(envelopes.at(-1)!.offset);
425
+ * }, { autoCommit: false });
426
+ * ```
427
+ */
428
+ startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (envelopes: EventEnvelope<T[K[number]]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
429
+ startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (envelopes: EventEnvelope<D["__type"]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
430
+ /**
431
+ * Subscribe using regex topic patterns (or a mix of strings and patterns).
432
+ * Note: type-safety is reduced to the union of all topic payloads when using regex.
433
+ * Incompatible with `retryTopics: true`.
434
+ */
435
+ startBatchConsumer(topics: (string | RegExp)[], handleBatch: (envelopes: EventEnvelope<T[keyof T]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
436
+ /**
437
+ * Subscribe to a topic and accumulate messages into a window, flushing the handler
438
+ * when **either** `maxMessages` messages have accumulated **or** `maxMs` milliseconds
439
+ * have elapsed — whichever fires first. On `handle.stop()`, any remaining buffered
440
+ * messages are flushed before the consumer disconnects.
441
+ */
442
+ startWindowConsumer<K extends keyof T & string>(topic: K, handler: (batch: EventEnvelope<T[K]>[], meta: WindowMeta) => Promise<void>, options: WindowConsumerOptions<T>): Promise<ConsumerHandle>;
443
+ /**
444
+ * Subscribe to topics and dispatch each message to a handler based on a Kafka header value.
445
+ * Messages whose header is absent or doesn't match any route key are forwarded to `fallback`
446
+ * (or silently skipped when no fallback is set).
447
+ */
448
+ startRoutedConsumer<K extends Array<keyof T>>(topics: K, routing: RoutingOptions<T[K[number]]>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
449
+ /**
450
+ * Subscribe to topics and consume messages with **exactly-once semantics** for
451
+ * read-process-write pipelines. Each message is processed inside a Kafka transaction;
452
+ * on handler success the source offset commit and all staged sends are committed
453
+ * atomically. Incompatible with `retryTopics: true`.
454
+ *
455
+ * @example
456
+ * ```ts
457
+ * await kafka.startTransactionalConsumer(['orders.created'], async (envelope, tx) => {
458
+ * await tx.send('inventory.reserved', { orderId: envelope.payload.orderId, qty: 1 });
459
+ * });
460
+ * ```
461
+ */
462
+ startTransactionalConsumer<K extends Array<keyof T>>(topics: K, handler: (envelope: EventEnvelope<T[K[number]]>, tx: TransactionalHandlerContext<T>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
463
+ /**
464
+ * Stop consumer(s).
465
+ * - `stopConsumer(groupId)` — disconnect and remove the consumer for a specific group.
466
+ * - `stopConsumer()` — disconnect and remove all consumers.
467
+ */
468
+ stopConsumer(groupId?: string): Promise<void>;
469
+ /**
470
+ * Consume messages as an async iterator. Useful for scripts, migrations, and
471
+ * one-off processing. Breaking out of the loop stops the consumer automatically.
472
+ * Does **not** support `retryTopics: true`.
473
+ *
474
+ * @example
475
+ * ```ts
476
+ * for await (const envelope of kafka.consume('orders')) {
477
+ * await process(envelope);
478
+ * }
479
+ * ```
480
+ */
481
+ consume<K extends keyof T & string>(topic: K, options?: ConsumerOptions<T>): AsyncIterableIterator<EventEnvelope<T[K]>>;
482
+ /**
483
+ * Pause message delivery for specific topic-partitions. The consumer remains
484
+ * connected; only polling is suspended. Call `resumeConsumer` to restart.
485
+ *
486
+ * @example
487
+ * ```ts
488
+ * kafka.pauseConsumer(undefined, [{ topic: 'orders.created', partitions: [0, 1] }]);
489
+ * ```
490
+ */
491
+ pauseConsumer(groupId: string | undefined, assignments: Array<{
492
+ topic: string;
493
+ partitions: number[];
494
+ }>): void;
495
+ /**
496
+ * Resume message delivery for previously paused topic-partitions.
497
+ *
498
+ * @example
499
+ * ```ts
500
+ * kafka.resumeConsumer(undefined, [{ topic: 'orders.created', partitions: [0, 1] }]);
501
+ * ```
502
+ */
503
+ resumeConsumer(groupId: string | undefined, assignments: Array<{
504
+ topic: string;
505
+ partitions: number[];
506
+ }>): void;
507
+ }
508
+
509
+ /** Admin, offset-management, and state methods of `IKafkaClient`. */
510
+ interface IKafkaAdmin<T extends TopicMapConstraint<T>> {
511
+ /**
512
+ * @example
513
+ * ```ts
514
+ * const status = await kafka.checkStatus();
515
+ * if (status.status === 'down') console.error(status.error);
516
+ * ```
517
+ */
518
+ checkStatus(): Promise<KafkaHealthResult>;
519
+ /**
520
+ * List all consumer groups known to the broker.
521
+ *
522
+ * @example
523
+ * ```ts
524
+ * const groups = await kafka.listConsumerGroups();
525
+ * console.log(groups.map(g => `${g.groupId}: ${g.state}`));
526
+ * ```
527
+ */
528
+ listConsumerGroups(): Promise<ConsumerGroupSummary[]>;
529
+ /**
530
+ * Describe topics — returns partition layout, leader, replicas, and ISR for each topic.
531
+ * Omit `topics` to describe all topics visible to this client.
532
+ *
533
+ * @example
534
+ * ```ts
535
+ * const [desc] = await kafka.describeTopics(['orders.created']);
536
+ * console.log(desc.partitions.length);
537
+ * ```
538
+ */
539
+ describeTopics(topics?: string[]): Promise<TopicDescription[]>;
540
+ /**
541
+ * Delete records from a topic up to (but not including) the specified offsets.
542
+ *
543
+ * @example
544
+ * ```ts
545
+ * await kafka.deleteRecords('orders.created', [
546
+ * { partition: 0, offset: '1000' },
547
+ * { partition: 1, offset: '500' },
548
+ * ]);
549
+ * ```
550
+ */
551
+ deleteRecords(topic: string, partitions: Array<{
552
+ partition: number;
553
+ offset: string;
554
+ }>): Promise<void>;
555
+ /**
556
+ * Query the consumer group lag per partition.
557
+ * Lag = (broker high-watermark offset) − (last committed offset).
558
+ *
559
+ * @example
560
+ * ```ts
561
+ * const lag = await kafka.getConsumerLag();
562
+ * const total = lag.reduce((sum, p) => sum + p.lag, 0);
563
+ * ```
564
+ */
565
+ getConsumerLag(groupId?: string): Promise<Array<{
566
+ topic: string;
567
+ partition: number;
568
+ lag: number;
569
+ }>>;
570
+ /**
571
+ * Consume all messages in `{topic}.dlq` and re-publish each to its original topic
572
+ * (or `options.targetTopic`). The DLQ topic itself is not modified.
573
+ *
574
+ * @returns `{ replayed, skipped }` counts.
575
+ *
576
+ * @example
577
+ * ```ts
578
+ * const { replayed } = await kafka.replayDlq('orders.created');
579
+ * ```
580
+ */
581
+ replayDlq(topic: string, options?: DlqReplayOptions): Promise<{
582
+ replayed: number;
583
+ skipped: number;
584
+ }>;
585
+ /**
586
+ * Reset committed offsets to `'earliest'` or `'latest'`.
587
+ * The consumer group must be inactive — call `stopConsumer(groupId)` first.
588
+ *
589
+ * @example
590
+ * ```ts
591
+ * await kafka.stopConsumer('billing-service');
592
+ * await kafka.resetOffsets('billing-service', 'orders.created', 'earliest');
593
+ * ```
594
+ */
595
+ resetOffsets(groupId: string | undefined, topic: string, position: "earliest" | "latest"): Promise<void>;
596
+ /**
597
+ * Seek specific partitions to explicit offsets.
598
+ * The consumer group must be inactive.
599
+ *
600
+ * @example
601
+ * ```ts
602
+ * await kafka.seekToOffset('billing-service', [
603
+ * { topic: 'orders.created', partition: 0, offset: '1000' },
604
+ * ]);
605
+ * ```
606
+ */
607
+ seekToOffset(groupId: string | undefined, assignments: Array<{
608
+ topic: string;
609
+ partition: number;
610
+ offset: string;
611
+ }>): Promise<void>;
612
+ /**
613
+ * Seek partitions to the offset nearest to a given Unix timestamp (ms).
614
+ * The consumer group must be inactive.
615
+ *
616
+ * @example
617
+ * ```ts
618
+ * const midnight = new Date('2025-01-01').getTime();
619
+ * await kafka.seekToTimestamp('billing-service', [
620
+ * { topic: 'orders.created', partition: 0, timestamp: midnight },
621
+ * ]);
622
+ * ```
623
+ */
624
+ seekToTimestamp(groupId: string | undefined, assignments: Array<{
625
+ topic: string;
626
+ partition: number;
627
+ timestamp: number;
628
+ }>): Promise<void>;
629
+ /**
630
+ * Returns the current circuit breaker state for a topic partition.
631
+ * Returns `undefined` when `circuitBreaker` is not configured or never tripped.
632
+ *
633
+ * @example
634
+ * ```ts
635
+ * const state = kafka.getCircuitState('orders.created', 0);
636
+ * if (state?.status === 'open') console.warn('Circuit open!', state.failures);
637
+ * ```
638
+ */
639
+ getCircuitState(topic: string, partition: number, groupId?: string): {
640
+ status: "closed" | "open" | "half-open";
641
+ failures: number;
642
+ windowSize: number;
643
+ } | undefined;
644
+ /**
645
+ * Read a compacted topic from the beginning to its high-watermark and return a
646
+ * `Map<key, EventEnvelope<T>>` with the latest value per key.
647
+ * Tombstone records remove the key from the map.
648
+ *
649
+ * @example
650
+ * ```ts
651
+ * const orders = await kafka.readSnapshot('orders.state');
652
+ * // orders.get('order-123') → latest EventEnvelope for that key
653
+ * ```
654
+ */
655
+ readSnapshot<K extends keyof T & string>(topic: K, options?: ReadSnapshotOptions): Promise<Map<string, EventEnvelope<T[K]>>>;
656
+ /**
657
+ * Snapshot the current committed offsets of a consumer group into a Kafka topic.
658
+ * Requires `connectProducer()` to have been called.
659
+ *
660
+ * @example
661
+ * ```ts
662
+ * const result = await kafka.checkpointOffsets(undefined, 'checkpoints');
663
+ * console.log(`Saved ${result.partitionCount} offsets at ${result.savedAt}`);
664
+ * ```
665
+ */
666
+ checkpointOffsets(groupId: string | undefined, checkpointTopic: string): Promise<CheckpointResult>;
667
+ /**
668
+ * Restore a consumer group's committed offsets from the nearest checkpoint.
669
+ * The consumer group must be stopped before calling this method.
670
+ *
671
+ * @example
672
+ * ```ts
673
+ * await kafka.stopConsumer('billing-service');
674
+ * const result = await kafka.restoreFromCheckpoint(undefined, 'checkpoints');
675
+ * ```
676
+ */
677
+ restoreFromCheckpoint(groupId: string | undefined, checkpointTopic: string, options?: RestoreCheckpointOptions): Promise<CheckpointRestoreResult>;
678
+ }
679
+
680
+ /** Lifecycle and observability methods of `IKafkaClient`. */
681
+ interface IKafkaLifecycle {
682
+ /**
683
+ * @example
684
+ * ```ts
685
+ * const id = kafka.getClientId(); // e.g. 'my-service'
686
+ * ```
687
+ */
688
+ getClientId(): ClientId;
689
+ /**
690
+ * Return a snapshot of internal event counters (retry / DLQ / dedup).
691
+ * - `getMetrics()` — aggregate across all topics.
692
+ * - `getMetrics(topic)` — counters for a specific topic only; returns all-zero
693
+ * if no events have been observed for that topic yet.
694
+ *
695
+ * Counters accumulate since client creation or the last `resetMetrics()` call.
696
+ *
697
+ * @example
698
+ * ```ts
699
+ * const { processedCount, dlqCount, retryCount } = kafka.getMetrics();
700
+ * console.log(`Processed: ${processedCount}, DLQ: ${dlqCount}`);
701
+ * ```
702
+ */
703
+ getMetrics(topic?: string): Readonly<KafkaMetrics>;
704
+ /**
705
+ * Reset internal event counters to zero.
706
+ * - `resetMetrics()` — reset all topics.
707
+ * - `resetMetrics(topic)` — reset a single topic only.
708
+ *
709
+ * @example
710
+ * ```ts
711
+ * kafka.resetMetrics();
712
+ * kafka.resetMetrics('orders.created');
713
+ * ```
714
+ */
715
+ resetMetrics(topic?: string): void;
716
+ /**
717
+ * Drain in-flight handlers, then disconnect all producers, consumers, and admin.
718
+ * @param drainTimeoutMs Max ms to wait for in-flight handlers (default 30 000).
719
+ *
720
+ * @example
721
+ * ```ts
722
+ * await kafka.disconnect();
723
+ * ```
724
+ */
725
+ disconnect(drainTimeoutMs?: number): Promise<void>;
726
+ /**
727
+ * Register SIGTERM / SIGINT signal handlers that drain in-flight messages before
728
+ * disconnecting. Call once after constructing the client in non-NestJS apps.
729
+ * NestJS apps get drain automatically via `onModuleDestroy` → `disconnect()`.
730
+ *
731
+ * @example
732
+ * ```ts
733
+ * kafka.enableGracefulShutdown(['SIGTERM', 'SIGINT'], 30_000);
734
+ * ```
735
+ */
736
+ enableGracefulShutdown(signals?: NodeJS.Signals[], drainTimeoutMs?: number): void;
737
+ }
738
+
739
+ /**
740
+ * Full Kafka client interface — the union of all role-specific sub-interfaces.
741
+ *
742
+ * Compose sub-interfaces directly when you need a narrower dependency:
743
+ * ```ts
744
+ * function sendOrder(producer: IKafkaProducer<MyTopics>) { ... }
745
+ * function startWorker(consumer: IKafkaConsumer<MyTopics>) { ... }
746
+ * ```
747
+ */
748
+ interface IKafkaClient<T extends TopicMapConstraint<T>> extends IKafkaProducer<T>, IKafkaConsumer<T>, IKafkaAdmin<T>, IKafkaLifecycle {
749
+ }
750
+
751
+ export type { TopicPartitionInfo as A, CheckpointEntry as C, DlqReplayOptions as D, IKafkaClient as I, KafkaTransport as K, ReadSnapshotOptions as R, TopicDescription as T, IAdmin as a, IPartitionWatermarks as b, IGroupTopicOffsets as c, IPartitionOffset as d, IGroupDescription as e, ITopicMetadata as f, IConsumer as g, IConsumerCreationOptions as h, IConsumerRunConfig as i, ITopicPartitions as j, ITopicPartitionOffset as k, ITopicPartition as l, IMessage as m, IProducer as n, IProducerRecord as o, ITransaction as p, IProducerCreationOptions as q, KafkaHealthResult as r, CheckpointRestoreResult as s, CheckpointResult as t, ConsumerGroupSummary as u, IKafkaAdmin as v, IKafkaConsumer as w, IKafkaLifecycle as x, IKafkaProducer as y, RestoreCheckpointOptions as z };