@drarzter/kafka-client 0.8.0 → 0.9.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -10
- package/dist/{chunk-CHFLNQXK.mjs → chunk-TPIP5VV7.mjs} +2915 -2871
- package/dist/chunk-TPIP5VV7.mjs.map +1 -0
- package/dist/client-CBBUDDtu.d.ts +751 -0
- package/dist/client-D-SxYV2b.d.mts +751 -0
- package/dist/{types-CNfeoF3_.d.mts → consumer.types-fFCag3VJ.d.mts} +416 -1148
- package/dist/{types-CNfeoF3_.d.ts → consumer.types-fFCag3VJ.d.ts} +416 -1148
- package/dist/core.d.mts +154 -382
- package/dist/core.d.ts +154 -382
- package/dist/core.js +2914 -2870
- package/dist/core.js.map +1 -1
- package/dist/core.mjs +1 -1
- package/dist/index.d.mts +5 -2
- package/dist/index.d.ts +5 -2
- package/dist/index.js +2914 -2870
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1 -1
- package/dist/otel.d.mts +1 -1
- package/dist/otel.d.ts +1 -1
- package/dist/testing.d.mts +216 -2
- package/dist/testing.d.ts +216 -2
- package/dist/testing.js +300 -4
- package/dist/testing.js.map +1 -1
- package/dist/testing.mjs +295 -4
- package/dist/testing.mjs.map +1 -1
- package/package.json +1 -1
- package/dist/chunk-CHFLNQXK.mjs.map +0 -1
package/dist/core.d.ts
CHANGED
|
@@ -1,5 +1,128 @@
|
|
|
1
|
-
import {
|
|
2
|
-
export {
|
|
1
|
+
import { q as KafkaLogger, K as KafkaInstrumentation, s as MessageLostContext, F as TtlExpiredContext, T as TopicMapConstraint, C as ClientId, G as GroupId, b as TopicDescriptor, v as SendOptions, M as MessageHeaders, B as BatchMessageItem, d as BatchSendOptions, z as TransactionContext, k as EventEnvelope, a as ConsumerOptions, h as ConsumerHandle, c as BatchMeta, J as WindowMeta, W as WindowConsumerOptions, t as RoutingOptions, A as TransactionalHandlerContext, r as KafkaMetrics } from './consumer.types-fFCag3VJ.js';
|
|
2
|
+
export { e as BeforeConsumeResult, f as CircuitBreakerOptions, g as CompressionType, i as ConsumerInterceptor, D as DeduplicationOptions, j as DlqReason, E as EnvelopeHeaderOptions, H as HEADER_CORRELATION_ID, l as HEADER_EVENT_ID, m as HEADER_LAMPORT_CLOCK, n as HEADER_SCHEMA_VERSION, o as HEADER_TIMESTAMP, p as HEADER_TRACEPARENT, I as InferSchema, R as RetryOptions, S as SchemaLike, u as SchemaParseContext, w as SubscribeRetryOptions, x as TTopicMessageMap, y as TopicsFrom, L as buildEnvelopeHeaders, N as decodeHeaders, O as extractEnvelope, P as getEnvelopeContext, Q as runWithEnvelopeContext, U as topic } from './consumer.types-fFCag3VJ.js';
|
|
3
|
+
import { K as KafkaTransport, I as IKafkaClient, D as DlqReplayOptions, R as ReadSnapshotOptions, t as CheckpointResult, z as RestoreCheckpointOptions, s as CheckpointRestoreResult, r as KafkaHealthResult, u as ConsumerGroupSummary, T as TopicDescription } from './client-CBBUDDtu.js';
|
|
4
|
+
export { C as CheckpointEntry, v as IKafkaAdmin, w as IKafkaConsumer, x as IKafkaLifecycle, y as IKafkaProducer, A as TopicPartitionInfo } from './client-CBBUDDtu.js';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Options for `KafkaClient` constructor.
|
|
8
|
+
*
|
|
9
|
+
* @example
|
|
10
|
+
* ```ts
|
|
11
|
+
* const kafka = new KafkaClient(kafkaConfig, 'my-service', {
|
|
12
|
+
* transactionalId: `my-service-tx-${replicaIndex}`,
|
|
13
|
+
* lagThrottle: { maxLag: 10_000, pollIntervalMs: 3_000 },
|
|
14
|
+
* clockRecovery: { topics: ['orders.created'] },
|
|
15
|
+
* onMessageLost: (ctx) => alerting.fire('kafka.message-lost', ctx),
|
|
16
|
+
* instrumentation: [otelInstrumentation()],
|
|
17
|
+
* });
|
|
18
|
+
* ```
|
|
19
|
+
*/
|
|
20
|
+
interface KafkaClientOptions {
|
|
21
|
+
/** Auto-create topics via admin before the first `sendMessage`, `sendBatch`, or `transaction` for each topic. Useful for development — not recommended in production. */
|
|
22
|
+
autoCreateTopics?: boolean;
|
|
23
|
+
/** When `true`, string topic keys are validated against any schema previously registered via a TopicDescriptor. Default: `true`. */
|
|
24
|
+
strictSchemas?: boolean;
|
|
25
|
+
/** Custom logger. Defaults to console with `[KafkaClient:<clientId>]` prefix. */
|
|
26
|
+
logger?: KafkaLogger;
|
|
27
|
+
/** Number of partitions for auto-created topics. Default: `1`. */
|
|
28
|
+
numPartitions?: number;
|
|
29
|
+
/** Client-wide instrumentation hooks (e.g. OTel). Applied to both send and consume paths. */
|
|
30
|
+
instrumentation?: KafkaInstrumentation[];
|
|
31
|
+
/**
|
|
32
|
+
* Override the transactional producer ID used by `transaction()`.
|
|
33
|
+
* Defaults to `${clientId}-tx`.
|
|
34
|
+
*
|
|
35
|
+
* The transactional ID must be **unique per producer instance** across the
|
|
36
|
+
* entire Kafka cluster. Two `KafkaClient` instances with the same ID will
|
|
37
|
+
* cause Kafka to fence one of the producers — the fenced producer will fail
|
|
38
|
+
* on the next `transaction()` call. Set a distinct value per replica when
|
|
39
|
+
* running multiple instances of the same service.
|
|
40
|
+
*/
|
|
41
|
+
transactionalId?: string;
|
|
42
|
+
/**
|
|
43
|
+
* Called when a message is dropped without being sent to a DLQ.
|
|
44
|
+
* Fires when the handler throws after all retries, or schema validation fails — and `dlq` is not enabled.
|
|
45
|
+
* Use this to alert, log to external systems, or trigger fallback logic.
|
|
46
|
+
*/
|
|
47
|
+
onMessageLost?: (ctx: MessageLostContext) => void | Promise<void>;
|
|
48
|
+
/**
|
|
49
|
+
* Called when a message is dropped due to TTL expiration (`messageTtlMs`).
|
|
50
|
+
* Fires instead of `onMessageLost` for expired messages when `dlq` is not enabled.
|
|
51
|
+
* When `dlq: true`, expired messages go to the DLQ and this callback is NOT called.
|
|
52
|
+
*
|
|
53
|
+
* **Client-wide fallback**: if `ConsumerOptions.onTtlExpired` is set on the consumer,
|
|
54
|
+
* it takes precedence over this client-level callback.
|
|
55
|
+
*/
|
|
56
|
+
onTtlExpired?: (ctx: TtlExpiredContext) => void | Promise<void>;
|
|
57
|
+
/**
|
|
58
|
+
* Called whenever a consumer group rebalance occurs.
|
|
59
|
+
* - `'assign'` — new partitions were granted to this instance.
|
|
60
|
+
* - `'revoke'` — partitions were taken away (e.g. another consumer joined).
|
|
61
|
+
*
|
|
62
|
+
* Applied to every consumer created by this client. If you need per-consumer
|
|
63
|
+
* rebalance handling, use separate `KafkaClient` instances.
|
|
64
|
+
*/
|
|
65
|
+
onRebalance?: (type: "assign" | "revoke", partitions: Array<{
|
|
66
|
+
topic: string;
|
|
67
|
+
partition: number;
|
|
68
|
+
}>) => void;
|
|
69
|
+
/**
|
|
70
|
+
* Recover the Lamport clock from the last message in the given topics on `connectProducer()`.
|
|
71
|
+
*
|
|
72
|
+
* On startup the producer creates a short-lived consumer, seeks each partition to its
|
|
73
|
+
* last message (`highWatermark − 1`), reads the `x-lamport-clock` header, then
|
|
74
|
+
* initialises `_lamportClock` to the maximum value found. This guarantees monotonic
|
|
75
|
+
* clock values across restarts without an external store.
|
|
76
|
+
*
|
|
77
|
+
* Topics that do not exist or are empty are silently skipped.
|
|
78
|
+
*/
|
|
79
|
+
clockRecovery?: {
|
|
80
|
+
/** Topic names to scan for the highest Lamport clock. */
|
|
81
|
+
topics: string[];
|
|
82
|
+
};
|
|
83
|
+
/**
|
|
84
|
+
* Delay `sendMessage` / `sendBatch` / `sendTombstone` when the observed lag of a
|
|
85
|
+
* consumer group exceeds `maxLag`. Resumes immediately when lag drops below the threshold.
|
|
86
|
+
*
|
|
87
|
+
* Lag is polled via `getConsumerLag()` every `pollIntervalMs` in the background;
|
|
88
|
+
* no admin call is made on each individual send.
|
|
89
|
+
*
|
|
90
|
+
* When `maxWaitMs` is exceeded the send is unblocked with a warning — this is
|
|
91
|
+
* best-effort throttling, not hard back-pressure.
|
|
92
|
+
*
|
|
93
|
+
* Requires `connectProducer()` to have been called to start the polling loop.
|
|
94
|
+
*/
|
|
95
|
+
lagThrottle?: {
|
|
96
|
+
/** Consumer group whose lag is monitored. Defaults to the client's default group. */
|
|
97
|
+
groupId?: string;
|
|
98
|
+
/** Lag threshold (number of messages) above which sends are delayed. */
|
|
99
|
+
maxLag: number;
|
|
100
|
+
/** How often to poll `getConsumerLag()`. Default: `5000` ms. */
|
|
101
|
+
pollIntervalMs?: number;
|
|
102
|
+
/**
|
|
103
|
+
* Maximum time (ms) a send will wait while throttled before proceeding anyway.
|
|
104
|
+
* Default: `30_000` ms.
|
|
105
|
+
*/
|
|
106
|
+
maxWaitMs?: number;
|
|
107
|
+
};
|
|
108
|
+
/**
|
|
109
|
+
* Custom transport implementation.
|
|
110
|
+
*
|
|
111
|
+
* By default `KafkaClient` uses `ConfluentTransport` which wraps
|
|
112
|
+
* `@confluentinc/kafka-javascript` (librdkafka). Inject a different
|
|
113
|
+
* `KafkaTransport` to target an alternative broker library, or to supply
|
|
114
|
+
* a deterministic fake in unit tests without mocking the confluentinc module.
|
|
115
|
+
*
|
|
116
|
+
* @example
|
|
117
|
+
* ```ts
|
|
118
|
+
* // In tests — no jest.mock() needed
|
|
119
|
+
* const kafka = new KafkaClient('svc', 'grp', [], {
|
|
120
|
+
* transport: new FakeTransport(),
|
|
121
|
+
* });
|
|
122
|
+
* ```
|
|
123
|
+
*/
|
|
124
|
+
transport?: KafkaTransport;
|
|
125
|
+
}
|
|
3
126
|
|
|
4
127
|
/**
|
|
5
128
|
* Type-safe Kafka client.
|
|
@@ -9,54 +132,8 @@ export { g as BeforeConsumeResult, h as CheckpointEntry, k as CircuitBreakerOpti
|
|
|
9
132
|
* @typeParam T - Topic-to-message type mapping for compile-time safety.
|
|
10
133
|
*/
|
|
11
134
|
declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClient<T> {
|
|
12
|
-
private readonly kafka;
|
|
13
|
-
private readonly producer;
|
|
14
|
-
private txProducer;
|
|
15
|
-
private txProducerInitPromise;
|
|
16
|
-
/** Maps transactionalId → Producer for each active retry level consumer. */
|
|
17
|
-
private readonly retryTxProducers;
|
|
18
|
-
private readonly consumers;
|
|
19
|
-
private readonly logger;
|
|
20
|
-
private readonly autoCreateTopicsEnabled;
|
|
21
|
-
private readonly strictSchemasEnabled;
|
|
22
|
-
private readonly numPartitions;
|
|
23
|
-
private readonly ensuredTopics;
|
|
24
|
-
/** Pending topic-creation promises keyed by topic name. Prevents duplicate createTopics calls. */
|
|
25
|
-
private readonly ensureTopicPromises;
|
|
26
|
-
private readonly defaultGroupId;
|
|
27
|
-
private readonly schemaRegistry;
|
|
28
|
-
private readonly runningConsumers;
|
|
29
|
-
private readonly consumerCreationOptions;
|
|
30
|
-
/** Maps each main consumer groupId to its companion retry level groupIds. */
|
|
31
|
-
private readonly companionGroupIds;
|
|
32
|
-
private readonly instrumentation;
|
|
33
|
-
private readonly onMessageLost;
|
|
34
|
-
private readonly onTtlExpired;
|
|
35
|
-
private readonly onRebalance;
|
|
36
|
-
/** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
|
|
37
|
-
private readonly txId;
|
|
38
|
-
/** Monotonically increasing Lamport clock stamped on every outgoing message. */
|
|
39
|
-
private _lamportClock;
|
|
40
|
-
/** Topics to scan for the highest Lamport clock value on `connectProducer()`. */
|
|
41
|
-
private readonly clockRecoveryTopics;
|
|
42
|
-
/** Lag-throttle configuration — set when `lagThrottle` is configured. */
|
|
43
|
-
private readonly lagThrottleOpts;
|
|
44
|
-
/** `true` while the observed consumer group lag exceeds `lagThrottle.maxLag`. */
|
|
45
|
-
private _lagThrottled;
|
|
46
|
-
/** Background polling timer for lag throttle. */
|
|
47
|
-
private _lagThrottleTimer;
|
|
48
|
-
/** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
|
|
49
|
-
private readonly dedupStates;
|
|
50
|
-
private readonly circuitBreaker;
|
|
51
|
-
private readonly adminOps;
|
|
52
|
-
private readonly metrics;
|
|
53
|
-
private readonly inFlight;
|
|
54
135
|
readonly clientId: ClientId;
|
|
55
|
-
private readonly
|
|
56
|
-
private readonly _consumerOpsDeps;
|
|
57
|
-
private readonly _retryTopicDeps;
|
|
58
|
-
/** DLQ header keys added by the pipeline — stripped before re-publishing. */
|
|
59
|
-
private static readonly DLQ_HEADER_KEYS;
|
|
136
|
+
private readonly ctx;
|
|
60
137
|
/**
|
|
61
138
|
* Create a new KafkaClient.
|
|
62
139
|
* @param clientId Unique client identifier (used in Kafka metadata and logs).
|
|
@@ -73,208 +150,35 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
|
|
|
73
150
|
* ```
|
|
74
151
|
*/
|
|
75
152
|
constructor(clientId: ClientId, groupId: GroupId, brokers: string[], options?: KafkaClientOptions);
|
|
76
|
-
/**
|
|
77
|
-
* Send a single typed message. Accepts a topic key or a `TopicDescriptor`.
|
|
78
|
-
*
|
|
79
|
-
* @param topic Topic key from the `TopicMessageMap` or a `TopicDescriptor` object.
|
|
80
|
-
* @param message Message payload — validated against the topic schema when one is registered.
|
|
81
|
-
* @param options Optional per-send settings: `key`, `headers`, `correlationId`, `compression`, etc.
|
|
82
|
-
* @example
|
|
83
|
-
* ```ts
|
|
84
|
-
* await kafka.sendMessage('orders.created', { orderId: '123', amount: 99 });
|
|
85
|
-
* ```
|
|
86
|
-
*/
|
|
153
|
+
/** @inheritDoc */
|
|
87
154
|
sendMessage<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, message: D["__type"], options?: SendOptions): Promise<void>;
|
|
88
155
|
sendMessage<K extends keyof T>(topic: K, message: T[K], options?: SendOptions): Promise<void>;
|
|
89
|
-
/**
|
|
90
|
-
* Send a null-value (tombstone) message. Used with log-compacted topics to signal
|
|
91
|
-
* that a key's record should be removed during the next compaction cycle.
|
|
92
|
-
*
|
|
93
|
-
* Tombstones skip envelope headers, schema validation, and Lamport clock stamping.
|
|
94
|
-
* Both `beforeSend` and `afterSend` instrumentation hooks are still called so tracing works correctly.
|
|
95
|
-
*
|
|
96
|
-
* @param topic Topic name.
|
|
97
|
-
* @param key Partition key identifying the record to tombstone.
|
|
98
|
-
* @param headers Optional custom Kafka headers.
|
|
99
|
-
* @example
|
|
100
|
-
* ```ts
|
|
101
|
-
* await kafka.sendTombstone('users.state', 'user-42');
|
|
102
|
-
* ```
|
|
103
|
-
*/
|
|
156
|
+
/** @inheritDoc */
|
|
104
157
|
sendTombstone(topic: string, key: string, headers?: MessageHeaders): Promise<void>;
|
|
105
|
-
/**
|
|
106
|
-
* Send multiple typed messages in a single Kafka produce request. Accepts a topic key or a `TopicDescriptor`.
|
|
107
|
-
*
|
|
108
|
-
* Each item in `messages` can carry its own `key`, `headers`, `correlationId`, and `schemaVersion`.
|
|
109
|
-
* The `key` is used for partition routing — messages with the same key always land on the same partition.
|
|
110
|
-
*
|
|
111
|
-
* @param topic Topic key from the `TopicMessageMap` or a `TopicDescriptor` object.
|
|
112
|
-
* @param messages Array of messages to send.
|
|
113
|
-
* @param options Optional batch-level settings: `compression` codec.
|
|
114
|
-
* @example
|
|
115
|
-
* ```ts
|
|
116
|
-
* await kafka.sendBatch('orders.created', [
|
|
117
|
-
* { value: { orderId: '1', amount: 10 }, key: 'order-1' },
|
|
118
|
-
* ]);
|
|
119
|
-
* ```
|
|
120
|
-
*/
|
|
158
|
+
/** @inheritDoc */
|
|
121
159
|
sendBatch<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(descriptor: D, messages: Array<BatchMessageItem<D["__type"]>>, options?: BatchSendOptions): Promise<void>;
|
|
122
160
|
sendBatch<K extends keyof T>(topic: K, messages: Array<BatchMessageItem<T[K]>>, options?: BatchSendOptions): Promise<void>;
|
|
123
|
-
/**
|
|
124
|
-
* Execute multiple sends atomically. Commits on success, aborts on error.
|
|
125
|
-
* @example
|
|
126
|
-
* ```ts
|
|
127
|
-
* await kafka.transaction(async (tx) => {
|
|
128
|
-
* await tx.send('orders.created', { orderId: '123' });
|
|
129
|
-
* await tx.send('inventory.reserved', { itemId: 'a', qty: 1 });
|
|
130
|
-
* });
|
|
131
|
-
* ```
|
|
132
|
-
*/
|
|
161
|
+
/** @inheritDoc */
|
|
133
162
|
transaction(fn: (ctx: TransactionContext<T>) => Promise<void>): Promise<void>;
|
|
134
|
-
/**
|
|
135
|
-
* Connect the idempotent producer. Called automatically by `KafkaModule.register()`.
|
|
136
|
-
* @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
|
|
137
|
-
*/
|
|
163
|
+
/** @inheritDoc */
|
|
138
164
|
connectProducer(): Promise<void>;
|
|
139
|
-
/**
|
|
140
|
-
private startLagThrottlePoller;
|
|
141
|
-
/** Wait until lag drops below the threshold (or maxWaitMs is exceeded). */
|
|
142
|
-
private waitIfThrottled;
|
|
143
|
-
/**
|
|
144
|
-
* Recover the Lamport clock from the last message across the given topics.
|
|
145
|
-
*
|
|
146
|
-
* For each topic, fetches partition high-watermarks via admin, creates a
|
|
147
|
-
* short-lived consumer, seeks every non-empty partition to its last offset
|
|
148
|
-
* (`highWatermark − 1`), reads one message per partition, and extracts the
|
|
149
|
-
* maximum `x-lamport-clock` header value. On completion `_lamportClock` is
|
|
150
|
-
* set to that maximum so the next `++_lamportClock` yields a strictly greater
|
|
151
|
-
* value than any previously sent clock.
|
|
152
|
-
*
|
|
153
|
-
* Topics that are empty or missing are silently skipped.
|
|
154
|
-
*/
|
|
155
|
-
private recoverLamportClock;
|
|
156
|
-
/**
|
|
157
|
-
* @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
|
|
158
|
-
*/
|
|
165
|
+
/** @internal */
|
|
159
166
|
disconnectProducer(): Promise<void>;
|
|
160
|
-
/**
|
|
161
|
-
* Subscribe to one or more topics and start consuming messages one at a time.
|
|
162
|
-
*
|
|
163
|
-
* Each message is delivered to `handleMessage` as a fully-decoded `EventEnvelope`.
|
|
164
|
-
* The call blocks until the consumer is connected and the subscription is set up,
|
|
165
|
-
* then returns a `ConsumerHandle` with a `stop()` method for clean shutdown.
|
|
166
|
-
*
|
|
167
|
-
* @param topics Array of topic keys, `TopicDescriptor` objects, or `RegExp` patterns.
|
|
168
|
-
* Regex patterns cannot be combined with `retryTopics: true`.
|
|
169
|
-
* @param handleMessage Async handler called for every message. Throw to trigger retries.
|
|
170
|
-
* @param options Consumer configuration — `groupId`, `retry`, `dlq`, `circuitBreaker`, etc.
|
|
171
|
-
* @returns A handle with `{ groupId, stop() }` for managing the consumer lifecycle.
|
|
172
|
-
* @example
|
|
173
|
-
* ```ts
|
|
174
|
-
* const handle = await kafka.startConsumer(['orders.created'], async (envelope) => {
|
|
175
|
-
* await processOrder(envelope.payload);
|
|
176
|
-
* }, { retry: { maxRetries: 3 }, dlq: true });
|
|
177
|
-
* await handle.stop();
|
|
178
|
-
* ```
|
|
179
|
-
*/
|
|
167
|
+
/** @inheritDoc */
|
|
180
168
|
startConsumer<K extends Array<keyof T>>(topics: K, handleMessage: (envelope: EventEnvelope<T[K[number]]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
|
|
181
169
|
startConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleMessage: (envelope: EventEnvelope<D["__type"]>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
|
|
182
|
-
/**
|
|
183
|
-
* Subscribe to one or more topics and consume messages in batches.
|
|
184
|
-
*
|
|
185
|
-
* `handleBatch` receives an array of decoded `EventEnvelope` objects together with
|
|
186
|
-
* batch metadata (topic, partition, high-watermark offset). Prefer this over
|
|
187
|
-
* `startConsumer` when throughput matters more than per-message latency.
|
|
188
|
-
*
|
|
189
|
-
* Set `autoCommit: false` in options when the handler calls `resolveOffset()` or
|
|
190
|
-
* `commitOffsetsIfNecessary()` directly, to avoid offset conflicts.
|
|
191
|
-
*
|
|
192
|
-
* @param topics Array of topic keys, `TopicDescriptor` objects, or `RegExp` patterns.
|
|
193
|
-
* @param handleBatch Async handler called with each batch of decoded messages.
|
|
194
|
-
* @param options Consumer configuration — `groupId`, `retry`, `dlq`, `autoCommit`, etc.
|
|
195
|
-
* @returns A handle with `{ groupId, stop() }` for managing the consumer lifecycle.
|
|
196
|
-
* @example
|
|
197
|
-
* ```ts
|
|
198
|
-
* await kafka.startBatchConsumer(['metrics'], async (envelopes, meta) => {
|
|
199
|
-
* await db.insertMany(envelopes.map(e => e.payload));
|
|
200
|
-
* meta.resolveOffset(envelopes.at(-1)!.offset);
|
|
201
|
-
* }, { autoCommit: false });
|
|
202
|
-
* ```
|
|
203
|
-
*/
|
|
170
|
+
/** @inheritDoc */
|
|
204
171
|
startBatchConsumer<K extends Array<keyof T>>(topics: K, handleBatch: (envelopes: EventEnvelope<T[K[number]]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
|
|
205
172
|
startBatchConsumer<D extends TopicDescriptor<string & keyof T, T[string & keyof T]>>(topics: D[], handleBatch: (envelopes: EventEnvelope<D["__type"]>[], meta: BatchMeta) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
|
|
206
|
-
/**
|
|
207
|
-
* Consume messages from a topic as an AsyncIterableIterator.
|
|
208
|
-
* Use with `for await` — breaking out of the loop automatically stops the consumer.
|
|
209
|
-
*
|
|
210
|
-
* @example
|
|
211
|
-
* for await (const envelope of kafka.consume('my.topic')) {
|
|
212
|
-
* console.log(envelope.data);
|
|
213
|
-
* }
|
|
214
|
-
*/
|
|
173
|
+
/** @inheritDoc */
|
|
215
174
|
consume<K extends keyof T & string>(topic: K, options?: ConsumerOptions<T>): AsyncIterableIterator<EventEnvelope<T[K]>>;
|
|
216
|
-
/**
|
|
217
|
-
* Accumulate messages into a window and flush the handler when either
|
|
218
|
-
* `maxMessages` is reached or `maxMs` has elapsed — whichever fires first.
|
|
219
|
-
* Remaining messages are flushed before the consumer disconnects on `stop()`.
|
|
220
|
-
* @example
|
|
221
|
-
* ```ts
|
|
222
|
-
* await kafka.startWindowConsumer('events', async (batch, meta) => {
|
|
223
|
-
* await db.insertMany(batch.map(e => e.payload));
|
|
224
|
-
* }, { maxMessages: 100, maxMs: 5_000 });
|
|
225
|
-
* ```
|
|
226
|
-
*/
|
|
175
|
+
/** @inheritDoc */
|
|
227
176
|
startWindowConsumer<K extends keyof T & string>(topic: K, handler: (envelopes: EventEnvelope<T[K]>[], meta: WindowMeta) => Promise<void>, options: WindowConsumerOptions<T>): Promise<ConsumerHandle>;
|
|
228
|
-
/**
|
|
229
|
-
* Subscribe to topics and dispatch each message to a handler based on the value
|
|
230
|
-
* of a specific Kafka header. A thin, zero-overhead wrapper over `startConsumer`.
|
|
231
|
-
*
|
|
232
|
-
* All `ConsumerOptions` (retry, DLQ, deduplication, circuit breaker, etc.) apply
|
|
233
|
-
* uniformly across every route.
|
|
234
|
-
* @example
|
|
235
|
-
* ```ts
|
|
236
|
-
* await kafka.startRoutedConsumer(['domain.events'], {
|
|
237
|
-
* header: 'x-event-type',
|
|
238
|
-
* routes: {
|
|
239
|
-
* 'order.created': async (e) => handleOrderCreated(e.payload),
|
|
240
|
-
* },
|
|
241
|
-
* });
|
|
242
|
-
* ```
|
|
243
|
-
*/
|
|
177
|
+
/** @inheritDoc */
|
|
244
178
|
startRoutedConsumer<K extends Array<keyof T>>(topics: K, routing: RoutingOptions<T[K[number]]>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
|
|
245
|
-
/**
|
|
246
|
-
* Subscribe to topics and consume messages with exactly-once semantics for
|
|
247
|
-
* read-process-write pipelines.
|
|
248
|
-
*
|
|
249
|
-
* Each message is handled inside a dedicated Kafka transaction.
|
|
250
|
-
* The handler receives a `TransactionalHandlerContext` whose `send` / `sendBatch`
|
|
251
|
-
* methods stage outgoing messages inside that transaction. On handler success the
|
|
252
|
-
* source offset commit and all staged sends are committed atomically. On handler
|
|
253
|
-
* failure the transaction is aborted and the source message is redelivered — no
|
|
254
|
-
* partial writes become visible to downstream consumers.
|
|
255
|
-
*
|
|
256
|
-
* Incompatible with `retryTopics: true` — throws at startup if set.
|
|
257
|
-
* @example
|
|
258
|
-
* ```ts
|
|
259
|
-
* await kafka.startTransactionalConsumer(['orders.created'], async (envelope, tx) => {
|
|
260
|
-
* await tx.send('inventory.reserved', { orderId: envelope.payload.orderId, qty: 1 });
|
|
261
|
-
* });
|
|
262
|
-
* ```
|
|
263
|
-
*/
|
|
179
|
+
/** @inheritDoc */
|
|
264
180
|
startTransactionalConsumer<K extends Array<keyof T>>(topics: K, handler: (envelope: EventEnvelope<T[K[number]]>, tx: TransactionalHandlerContext<T>) => Promise<void>, options?: ConsumerOptions<T>): Promise<ConsumerHandle>;
|
|
265
|
-
/**
|
|
266
|
-
* Stop all consumers or a specific group.
|
|
267
|
-
*
|
|
268
|
-
* If `groupId` is unspecified, all active consumers are stopped.
|
|
269
|
-
* If `groupId` is specified, only the consumer with that group ID is stopped.
|
|
270
|
-
*
|
|
271
|
-
* @throws {Error} if the consumer fails to disconnect.
|
|
272
|
-
* @example
|
|
273
|
-
* ```ts
|
|
274
|
-
* await kafka.stopConsumer('billing-service'); // stop one group
|
|
275
|
-
* await kafka.stopConsumer(); // stop all
|
|
276
|
-
* ```
|
|
277
|
-
*/
|
|
181
|
+
/** @inheritDoc */
|
|
278
182
|
stopConsumer(groupId?: string): Promise<void>;
|
|
279
183
|
/** @inheritDoc */
|
|
280
184
|
pauseConsumer(groupId: string | undefined, assignments: Array<{
|
|
@@ -286,60 +190,16 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
|
|
|
286
190
|
topic: string;
|
|
287
191
|
partitions: number[];
|
|
288
192
|
}>): void;
|
|
289
|
-
/**
|
|
290
|
-
private pauseTopicAllPartitions;
|
|
291
|
-
/** Resume all assigned partitions of a topic for a consumer group (used for queue backpressure). */
|
|
292
|
-
private resumeTopicAllPartitions;
|
|
293
|
-
/**
|
|
294
|
-
* Re-publish messages from a dead letter queue back to the original topic.
|
|
295
|
-
*
|
|
296
|
-
* Messages are consumed from `<topic>.dlq` and re-published to `<topic>`.
|
|
297
|
-
* The original topic is determined by the `x-dlq-original-topic` header.
|
|
298
|
-
* The `x-dlq-*` headers are stripped before re-publishing.
|
|
299
|
-
*
|
|
300
|
-
* @param topic - The topic to replay from `<topic>.dlq`
|
|
301
|
-
* @param options - Options for replay
|
|
302
|
-
* @returns { replayed: number; skipped: number } - counts of re-published vs skipped messages
|
|
303
|
-
* @example
|
|
304
|
-
* ```ts
|
|
305
|
-
* const { replayed, skipped } = await kafka.replayDlq('orders.created');
|
|
306
|
-
* ```
|
|
307
|
-
*/
|
|
193
|
+
/** @inheritDoc */
|
|
308
194
|
replayDlq(topic: string, options?: DlqReplayOptions): Promise<{
|
|
309
195
|
replayed: number;
|
|
310
196
|
skipped: number;
|
|
311
197
|
}>;
|
|
312
|
-
/**
|
|
313
|
-
* Read a compacted topic from the beginning to its current high-watermark.
|
|
314
|
-
* Returns a `Map<key, EventEnvelope>` with the latest value per key.
|
|
315
|
-
* Tombstone messages (null value) remove the key from the map.
|
|
316
|
-
* @example
|
|
317
|
-
* ```ts
|
|
318
|
-
* const snapshot = await kafka.readSnapshot('users.state');
|
|
319
|
-
* const user = snapshot.get('user-42')?.payload;
|
|
320
|
-
* ```
|
|
321
|
-
*/
|
|
198
|
+
/** @inheritDoc */
|
|
322
199
|
readSnapshot<K extends keyof T & string>(topic: K, options?: ReadSnapshotOptions): Promise<Map<string, EventEnvelope<T[K]>>>;
|
|
323
|
-
|
|
324
|
-
/**
|
|
325
|
-
* Snapshot the current committed offsets of a consumer group into a Kafka topic.
|
|
326
|
-
* Each call appends a new record — the checkpoint topic is an append-only audit log.
|
|
327
|
-
* @example
|
|
328
|
-
* ```ts
|
|
329
|
-
* const result = await kafka.checkpointOffsets(undefined, 'checkpoints');
|
|
330
|
-
* console.log(`Saved ${result.partitionCount} offsets`);
|
|
331
|
-
* ```
|
|
332
|
-
*/
|
|
200
|
+
/** @inheritDoc */
|
|
333
201
|
checkpointOffsets(groupId: string | undefined, checkpointTopic: string): Promise<CheckpointResult>;
|
|
334
|
-
/**
|
|
335
|
-
* Restore a consumer group's committed offsets from the nearest checkpoint in `checkpointTopic`.
|
|
336
|
-
* Requires the consumer group to be stopped.
|
|
337
|
-
* @example
|
|
338
|
-
* ```ts
|
|
339
|
-
* await kafka.stopConsumer();
|
|
340
|
-
* await kafka.restoreFromCheckpoint(undefined, 'checkpoints');
|
|
341
|
-
* ```
|
|
342
|
-
*/
|
|
202
|
+
/** @inheritDoc */
|
|
343
203
|
restoreFromCheckpoint(groupId: string | undefined, checkpointTopic: string, options?: RestoreCheckpointOptions): Promise<CheckpointRestoreResult>;
|
|
344
204
|
/** @inheritDoc */
|
|
345
205
|
resetOffsets(groupId: string | undefined, topic: string, position: "earliest" | "latest"): Promise<void>;
|
|
@@ -355,43 +215,7 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
|
|
|
355
215
|
partition: number;
|
|
356
216
|
timestamp: number;
|
|
357
217
|
}>): Promise<void>;
|
|
358
|
-
/**
|
|
359
|
-
* Returns the current circuit breaker state for a specific topic partition.
|
|
360
|
-
* Returns `undefined` when no circuit state exists — either `circuitBreaker` is not
|
|
361
|
-
* configured for the group, or the circuit has never been tripped.
|
|
362
|
-
*
|
|
363
|
-
* @param topic Topic name.
|
|
364
|
-
* @param partition Partition index.
|
|
365
|
-
* @param groupId Consumer group. Defaults to the client's default groupId.
|
|
366
|
-
*
|
|
367
|
-
* @returns `{ status, failures, windowSize }` snapshot for a given partition or `undefined` if no state exists.
|
|
368
|
-
* @example
|
|
369
|
-
* ```ts
|
|
370
|
-
* const state = kafka.getCircuitState('orders.created', 0);
|
|
371
|
-
* if (state?.status === 'open') console.warn('Circuit open!');
|
|
372
|
-
* ```
|
|
373
|
-
*/
|
|
374
|
-
getCircuitState(topic: string, partition: number, groupId?: string): {
|
|
375
|
-
status: "closed" | "open" | "half-open";
|
|
376
|
-
failures: number;
|
|
377
|
-
windowSize: number;
|
|
378
|
-
} | undefined;
|
|
379
|
-
/**
|
|
380
|
-
* Query consumer group lag per partition.
|
|
381
|
-
* Lag = broker high-watermark − last committed offset.
|
|
382
|
-
* A committed offset of -1 (nothing committed yet) counts as full lag.
|
|
383
|
-
*
|
|
384
|
-
* Returns an empty array when the consumer group has never committed any
|
|
385
|
-
* offsets (freshly created group, `autoCommit: false` with no manual commits,
|
|
386
|
-
* or group not yet assigned). This is a Kafka protocol limitation:
|
|
387
|
-
* `fetchOffsets` only returns data for topic-partitions that have at least one
|
|
388
|
-
* committed offset. Use `checkStatus()` to verify broker connectivity in that case.
|
|
389
|
-
* @example
|
|
390
|
-
* ```ts
|
|
391
|
-
* const lag = await kafka.getConsumerLag();
|
|
392
|
-
* const total = lag.reduce((sum, p) => sum + p.lag, 0);
|
|
393
|
-
* ```
|
|
394
|
-
*/
|
|
218
|
+
/** @inheritDoc */
|
|
395
219
|
getConsumerLag(groupId?: string): Promise<Array<{
|
|
396
220
|
topic: string;
|
|
397
221
|
partition: number;
|
|
@@ -408,75 +232,23 @@ declare class KafkaClient<T extends TopicMapConstraint<T>> implements IKafkaClie
|
|
|
408
232
|
partition: number;
|
|
409
233
|
offset: string;
|
|
410
234
|
}>): Promise<void>;
|
|
411
|
-
/**
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
getClientId(): ClientId;
|
|
419
|
-
/**
|
|
420
|
-
* Return a snapshot of internal event counters accumulated since client creation
|
|
421
|
-
* (or since the last `resetMetrics()` call).
|
|
422
|
-
*
|
|
423
|
-
* @param topic Topic name to scope the snapshot to. When omitted, counters are
|
|
424
|
-
* aggregated across all topics. If the topic has no recorded events yet, returns
|
|
425
|
-
* a zero-valued snapshot.
|
|
426
|
-
* @returns Read-only `KafkaMetrics` snapshot: `processedCount`, `retryCount`, `dlqCount`, `dedupCount`.
|
|
427
|
-
* @example
|
|
428
|
-
* ```ts
|
|
429
|
-
* const { processedCount, dlqCount } = kafka.getMetrics();
|
|
430
|
-
* const topicMetrics = kafka.getMetrics('orders.created');
|
|
431
|
-
* ```
|
|
432
|
-
*/
|
|
235
|
+
/** @inheritDoc */
|
|
236
|
+
getCircuitState(topic: string, partition: number, groupId?: string): {
|
|
237
|
+
status: "closed" | "open" | "half-open";
|
|
238
|
+
failures: number;
|
|
239
|
+
windowSize: number;
|
|
240
|
+
} | undefined;
|
|
241
|
+
/** @inheritDoc */
|
|
433
242
|
getMetrics(topic?: string): Readonly<KafkaMetrics>;
|
|
434
243
|
/** @inheritDoc */
|
|
435
244
|
resetMetrics(topic?: string): void;
|
|
245
|
+
getClientId(): ClientId;
|
|
436
246
|
/** @inheritDoc */
|
|
437
247
|
disconnect(drainTimeoutMs?: number): Promise<void>;
|
|
438
|
-
/**
|
|
439
|
-
* NestJS lifecycle hook — called automatically when the host module is torn down.
|
|
440
|
-
* Drains in-flight handlers and disconnects all producers, consumers, and admin.
|
|
441
|
-
* `KafkaModule` relies on this method; no separate destroy provider is needed.
|
|
442
|
-
*/
|
|
248
|
+
/** NestJS lifecycle hook — called automatically on module teardown. */
|
|
443
249
|
onModuleDestroy(): Promise<void>;
|
|
444
250
|
/** @inheritDoc */
|
|
445
251
|
enableGracefulShutdown(signals?: NodeJS.Signals[], drainTimeoutMs?: number): void;
|
|
446
|
-
private preparePayload;
|
|
447
|
-
/**
|
|
448
|
-
* Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
|
|
449
|
-
* The handler itself is not cancelled — the warning is diagnostic only.
|
|
450
|
-
*/
|
|
451
|
-
private wrapWithTimeoutWarning;
|
|
452
|
-
/**
|
|
453
|
-
* Create and connect a transactional producer for EOS retry routing.
|
|
454
|
-
* Each retry level consumer gets its own producer with a unique `transactionalId`
|
|
455
|
-
* so Kafka can fence stale producers on restart without affecting other levels.
|
|
456
|
-
*/
|
|
457
|
-
private createRetryTxProducer;
|
|
458
|
-
/**
|
|
459
|
-
* Ensure that a topic exists by creating it if it doesn't already exist.
|
|
460
|
-
* If `autoCreateTopics` is disabled, returns immediately.
|
|
461
|
-
* Concurrent calls for the same topic are deduplicated.
|
|
462
|
-
*/
|
|
463
|
-
private ensureTopic;
|
|
464
|
-
/** Shared consumer setup: groupId check, schema map, connect, subscribe. */
|
|
465
|
-
private setupConsumer;
|
|
466
|
-
/** Create or retrieve the deduplication context for a consumer group. */
|
|
467
|
-
private resolveDeduplicationContext;
|
|
468
|
-
/** Guard checks shared by startConsumer and startBatchConsumer. */
|
|
469
|
-
private validateTopicConsumerOpts;
|
|
470
|
-
/** Ensure all required topics exist for a consumer: base, DLQ, and dedup topics. */
|
|
471
|
-
private ensureConsumerTopics;
|
|
472
|
-
/** Create EOS transactional producer context for atomic main → retry.1 routing. */
|
|
473
|
-
private makeEosMainContext;
|
|
474
|
-
/** Start companion retry-level consumers and register them under the main groupId. */
|
|
475
|
-
private launchRetryChain;
|
|
476
|
-
/** Build MessageHandlerDeps with circuit breaker callbacks bound to the given groupId. */
|
|
477
|
-
private messageDepsFor;
|
|
478
|
-
/** Build the deps object passed to retry topic consumers. */
|
|
479
|
-
private buildRetryTopicDeps;
|
|
480
252
|
}
|
|
481
253
|
|
|
482
254
|
/**
|
|
@@ -539,4 +311,4 @@ declare class KafkaRetryExhaustedError extends KafkaProcessingError {
|
|
|
539
311
|
});
|
|
540
312
|
}
|
|
541
313
|
|
|
542
|
-
export { BatchMessageItem, BatchMeta, BatchSendOptions, CheckpointRestoreResult, CheckpointResult, ClientId, ConsumerGroupSummary, ConsumerHandle, ConsumerOptions, DlqReplayOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, KafkaClientOptions, KafkaHealthResult, KafkaMetrics, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, MessageHeaders, ReadSnapshotOptions, RestoreCheckpointOptions, RoutingOptions, SendOptions, TopicDescription, TopicDescriptor, TopicMapConstraint, TransactionContext, TransactionalHandlerContext, WindowConsumerOptions, WindowMeta };
|
|
314
|
+
export { BatchMessageItem, BatchMeta, BatchSendOptions, CheckpointRestoreResult, CheckpointResult, ClientId, ConsumerGroupSummary, ConsumerHandle, ConsumerOptions, DlqReplayOptions, EventEnvelope, GroupId, IKafkaClient, KafkaClient, type KafkaClientOptions, KafkaHealthResult, KafkaInstrumentation, KafkaLogger, KafkaMetrics, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError, MessageHeaders, MessageLostContext, ReadSnapshotOptions, RestoreCheckpointOptions, RoutingOptions, SendOptions, TopicDescription, TopicDescriptor, TopicMapConstraint, TransactionContext, TransactionalHandlerContext, TtlExpiredContext, WindowConsumerOptions, WindowMeta };
|