@drarzter/kafka-client 0.2.2 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +104 -12
- package/dist/chunk-A56D7HXR.mjs +545 -0
- package/dist/chunk-A56D7HXR.mjs.map +1 -0
- package/dist/chunk-EQQGB2QZ.mjs +17 -0
- package/dist/chunk-EQQGB2QZ.mjs.map +1 -0
- package/dist/core.d.mts +118 -0
- package/dist/core.d.ts +118 -0
- package/dist/core.js +575 -0
- package/dist/core.js.map +1 -0
- package/dist/core.mjs +16 -0
- package/dist/core.mjs.map +1 -0
- package/dist/index.d.mts +5 -324
- package/dist/index.d.ts +5 -324
- package/dist/index.js +73 -41
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +29 -534
- package/dist/index.mjs.map +1 -1
- package/dist/testing.d.mts +104 -0
- package/dist/testing.d.ts +104 -0
- package/dist/testing.js +151 -0
- package/dist/testing.js.map +1 -0
- package/dist/testing.mjs +127 -0
- package/dist/testing.mjs.map +1 -0
- package/dist/types-CtwJihJ3.d.mts +224 -0
- package/dist/types-CtwJihJ3.d.ts +224 -0
- package/package.json +23 -1
package/README.md
CHANGED
|
@@ -4,17 +4,17 @@
|
|
|
4
4
|
[](https://github.com/drarzter/kafka-client/actions/workflows/publish.yml)
|
|
5
5
|
[](https://opensource.org/licenses/MIT)
|
|
6
6
|
|
|
7
|
-
Type-safe Kafka client
|
|
7
|
+
Type-safe Kafka client for Node.js. Framework-agnostic core with a first-class NestJS adapter. Built on top of [kafkajs](https://kafka.js.org/).
|
|
8
8
|
|
|
9
9
|
## What is this?
|
|
10
10
|
|
|
11
|
-
An opinionated
|
|
11
|
+
An opinionated, type-safe abstraction over kafkajs. Works standalone (Express, Fastify, raw Node) or as a NestJS DynamicModule. Not a full-featured framework — just a clean, typed layer for producing and consuming Kafka messages.
|
|
12
12
|
|
|
13
13
|
## Why?
|
|
14
14
|
|
|
15
15
|
- **Typed topics** — you define a map of topic -> message shape, and the compiler won't let you send wrong data to wrong topic
|
|
16
16
|
- **Topic descriptors** — `topic()` DX sugar lets you define topics as standalone typed objects instead of string keys
|
|
17
|
-
- **
|
|
17
|
+
- **Framework-agnostic** — use standalone or with NestJS (`register()` / `registerAsync()`, DI, lifecycle hooks)
|
|
18
18
|
- **Idempotent producer** — `acks: -1`, `idempotent: true` by default
|
|
19
19
|
- **Retry + DLQ** — configurable retries with backoff, dead letter queue for failed messages
|
|
20
20
|
- **Batch sending** — send multiple messages in a single request
|
|
@@ -29,17 +29,43 @@ An opinionated wrapper around kafkajs that integrates with NestJS as a DynamicMo
|
|
|
29
29
|
- **Multiple consumer groups** — named clients for different bounded contexts
|
|
30
30
|
- **Declarative & imperative** — use `@SubscribeTo()` decorator or `startConsumer()` directly
|
|
31
31
|
|
|
32
|
+
See the [Roadmap](./ROADMAP.md) for upcoming features and version history.
|
|
33
|
+
|
|
32
34
|
## Installation
|
|
33
35
|
|
|
34
36
|
```bash
|
|
35
37
|
npm install @drarzter/kafka-client
|
|
36
|
-
# or
|
|
37
|
-
pnpm add @drarzter/kafka-client
|
|
38
38
|
```
|
|
39
39
|
|
|
40
|
-
|
|
40
|
+
For NestJS projects, install peer dependencies: `@nestjs/common`, `@nestjs/core`, `reflect-metadata`, `rxjs`.
|
|
41
|
+
|
|
42
|
+
For standalone usage (Express, Fastify, raw Node), no extra dependencies needed — import from `@drarzter/kafka-client/core`.
|
|
43
|
+
|
|
44
|
+
## Standalone usage (no NestJS)
|
|
45
|
+
|
|
46
|
+
```typescript
|
|
47
|
+
import { KafkaClient, topic } from '@drarzter/kafka-client/core';
|
|
48
|
+
|
|
49
|
+
const OrderCreated = topic('order.created')<{ orderId: string; amount: number }>();
|
|
50
|
+
|
|
51
|
+
const kafka = new KafkaClient('my-app', 'my-group', ['localhost:9092']);
|
|
52
|
+
await kafka.connectProducer();
|
|
53
|
+
|
|
54
|
+
// Send
|
|
55
|
+
await kafka.sendMessage(OrderCreated, { orderId: '123', amount: 100 });
|
|
56
|
+
|
|
57
|
+
// Consume
|
|
58
|
+
await kafka.startConsumer([OrderCreated], async (message, topic) => {
|
|
59
|
+
console.log(`${topic}:`, message.orderId);
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
// Custom logger (winston, pino, etc.)
|
|
63
|
+
const kafka2 = new KafkaClient('my-app', 'my-group', ['localhost:9092'], {
|
|
64
|
+
logger: myWinstonLogger,
|
|
65
|
+
});
|
|
66
|
+
```
|
|
41
67
|
|
|
42
|
-
## Quick start
|
|
68
|
+
## Quick start (NestJS)
|
|
43
69
|
|
|
44
70
|
Send and receive a message in 3 files:
|
|
45
71
|
|
|
@@ -551,6 +577,7 @@ Passed to `KafkaModule.register()` or returned from `registerAsync()` factory:
|
|
|
551
577
|
| `name` | — | Named client identifier for multi-client setups |
|
|
552
578
|
| `isGlobal` | `false` | Make the client available in all modules without re-importing |
|
|
553
579
|
| `autoCreateTopics` | `false` | Auto-create topics on first send (dev only) |
|
|
580
|
+
| `numPartitions` | `1` | Number of partitions for auto-created topics |
|
|
554
581
|
| `strictSchemas` | `true` | Validate string topic keys against schemas registered via TopicDescriptor |
|
|
555
582
|
|
|
556
583
|
**Module-scoped** (default) — import `KafkaModule` in each module that needs it:
|
|
@@ -746,6 +773,70 @@ export class HealthService {
|
|
|
746
773
|
|
|
747
774
|
## Testing
|
|
748
775
|
|
|
776
|
+
### Testing utilities
|
|
777
|
+
|
|
778
|
+
Import from `@drarzter/kafka-client/testing` — zero runtime deps, only `jest` and `@testcontainers/kafka` as peer dependencies.
|
|
779
|
+
|
|
780
|
+
#### `createMockKafkaClient<T>()`
|
|
781
|
+
|
|
782
|
+
Fully typed mock with `jest.fn()` on every `IKafkaClient` method. All methods resolve to sensible defaults:
|
|
783
|
+
|
|
784
|
+
```typescript
|
|
785
|
+
import { createMockKafkaClient } from '@drarzter/kafka-client/testing';
|
|
786
|
+
|
|
787
|
+
const kafka = createMockKafkaClient<MyTopics>();
|
|
788
|
+
|
|
789
|
+
const service = new OrdersService(kafka);
|
|
790
|
+
await service.createOrder();
|
|
791
|
+
|
|
792
|
+
expect(kafka.sendMessage).toHaveBeenCalledWith(
|
|
793
|
+
'order.created',
|
|
794
|
+
expect.objectContaining({ orderId: '123' }),
|
|
795
|
+
);
|
|
796
|
+
|
|
797
|
+
// Override return values
|
|
798
|
+
kafka.checkStatus.mockResolvedValueOnce({ topics: ['order.created'] });
|
|
799
|
+
|
|
800
|
+
// Mock rejections
|
|
801
|
+
kafka.sendMessage.mockRejectedValueOnce(new Error('broker down'));
|
|
802
|
+
```
|
|
803
|
+
|
|
804
|
+
#### `KafkaTestContainer`
|
|
805
|
+
|
|
806
|
+
Thin wrapper around `@testcontainers/kafka` that handles common setup pain points — transaction coordinator warmup, topic pre-creation:
|
|
807
|
+
|
|
808
|
+
```typescript
|
|
809
|
+
import { KafkaTestContainer } from '@drarzter/kafka-client/testing';
|
|
810
|
+
import { KafkaClient } from '@drarzter/kafka-client/core';
|
|
811
|
+
|
|
812
|
+
let container: KafkaTestContainer;
|
|
813
|
+
let brokers: string[];
|
|
814
|
+
|
|
815
|
+
beforeAll(async () => {
|
|
816
|
+
container = new KafkaTestContainer({
|
|
817
|
+
topics: ['orders', { topic: 'payments', numPartitions: 3 }],
|
|
818
|
+
});
|
|
819
|
+
brokers = await container.start();
|
|
820
|
+
}, 120_000);
|
|
821
|
+
|
|
822
|
+
afterAll(() => container.stop());
|
|
823
|
+
|
|
824
|
+
it('sends and receives', async () => {
|
|
825
|
+
const kafka = new KafkaClient('test', 'test-group', brokers);
|
|
826
|
+
// ...
|
|
827
|
+
});
|
|
828
|
+
```
|
|
829
|
+
|
|
830
|
+
Options:
|
|
831
|
+
|
|
832
|
+
| Option | Default | Description |
|
|
833
|
+
|--------|---------|-------------|
|
|
834
|
+
| `image` | `"confluentinc/cp-kafka:7.7.0"` | Docker image |
|
|
835
|
+
| `transactionWarmup` | `true` | Warm up transaction coordinator on start |
|
|
836
|
+
| `topics` | `[]` | Topics to pre-create (string or `{ topic, numPartitions }`) |
|
|
837
|
+
|
|
838
|
+
### Running tests
|
|
839
|
+
|
|
749
840
|
Unit tests (mocked kafkajs):
|
|
750
841
|
|
|
751
842
|
```bash
|
|
@@ -766,11 +857,12 @@ Both suites run in CI on every push to `main`.
|
|
|
766
857
|
|
|
767
858
|
```
|
|
768
859
|
src/
|
|
769
|
-
├── client/ # KafkaClient, types, topic(), error classes
|
|
770
|
-
├──
|
|
771
|
-
├──
|
|
772
|
-
├──
|
|
773
|
-
|
|
860
|
+
├── client/ # Core — KafkaClient, types, topic(), error classes (0 framework deps)
|
|
861
|
+
├── nest/ # NestJS adapter — Module, Explorer, decorators, health
|
|
862
|
+
├── testing/ # Testing utilities — mock client, testcontainer wrapper
|
|
863
|
+
├── core.ts # Standalone entrypoint (@drarzter/kafka-client/core)
|
|
864
|
+
├── testing.ts # Testing entrypoint (@drarzter/kafka-client/testing)
|
|
865
|
+
└── index.ts # Full entrypoint — core + NestJS adapter
|
|
774
866
|
```
|
|
775
867
|
|
|
776
868
|
All exported types and methods have JSDoc comments — your IDE will show inline docs and autocomplete.
|
|
@@ -0,0 +1,545 @@
|
|
|
1
|
+
// src/client/kafka.client.ts
|
|
2
|
+
import { Kafka, Partitioners, logLevel as KafkaLogLevel } from "kafkajs";
|
|
3
|
+
|
|
4
|
+
// src/client/errors.ts
|
|
5
|
+
var KafkaProcessingError = class extends Error {
|
|
6
|
+
constructor(message, topic2, originalMessage, options) {
|
|
7
|
+
super(message, options);
|
|
8
|
+
this.topic = topic2;
|
|
9
|
+
this.originalMessage = originalMessage;
|
|
10
|
+
this.name = "KafkaProcessingError";
|
|
11
|
+
if (options?.cause) this.cause = options.cause;
|
|
12
|
+
}
|
|
13
|
+
};
|
|
14
|
+
var KafkaValidationError = class extends Error {
|
|
15
|
+
constructor(topic2, originalMessage, options) {
|
|
16
|
+
super(`Schema validation failed for topic "${topic2}"`, options);
|
|
17
|
+
this.topic = topic2;
|
|
18
|
+
this.originalMessage = originalMessage;
|
|
19
|
+
this.name = "KafkaValidationError";
|
|
20
|
+
if (options?.cause) this.cause = options.cause;
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
var KafkaRetryExhaustedError = class extends KafkaProcessingError {
|
|
24
|
+
constructor(topic2, originalMessage, attempts, options) {
|
|
25
|
+
super(
|
|
26
|
+
`Message processing failed after ${attempts} attempts on topic "${topic2}"`,
|
|
27
|
+
topic2,
|
|
28
|
+
originalMessage,
|
|
29
|
+
options
|
|
30
|
+
);
|
|
31
|
+
this.attempts = attempts;
|
|
32
|
+
this.name = "KafkaRetryExhaustedError";
|
|
33
|
+
}
|
|
34
|
+
};
|
|
35
|
+
|
|
36
|
+
// src/client/kafka.client.ts
|
|
37
|
+
var ACKS_ALL = -1;
|
|
38
|
+
function toError(error) {
|
|
39
|
+
return error instanceof Error ? error : new Error(String(error));
|
|
40
|
+
}
|
|
41
|
+
var KafkaClient = class {
|
|
42
|
+
kafka;
|
|
43
|
+
producer;
|
|
44
|
+
consumers = /* @__PURE__ */ new Map();
|
|
45
|
+
admin;
|
|
46
|
+
logger;
|
|
47
|
+
autoCreateTopicsEnabled;
|
|
48
|
+
strictSchemasEnabled;
|
|
49
|
+
numPartitions;
|
|
50
|
+
ensuredTopics = /* @__PURE__ */ new Set();
|
|
51
|
+
defaultGroupId;
|
|
52
|
+
schemaRegistry = /* @__PURE__ */ new Map();
|
|
53
|
+
runningConsumers = /* @__PURE__ */ new Map();
|
|
54
|
+
isAdminConnected = false;
|
|
55
|
+
clientId;
|
|
56
|
+
constructor(clientId, groupId, brokers, options) {
|
|
57
|
+
this.clientId = clientId;
|
|
58
|
+
this.defaultGroupId = groupId;
|
|
59
|
+
this.logger = options?.logger ?? {
|
|
60
|
+
log: (msg) => console.log(`[KafkaClient:${clientId}] ${msg}`),
|
|
61
|
+
warn: (msg, ...args) => console.warn(`[KafkaClient:${clientId}] ${msg}`, ...args),
|
|
62
|
+
error: (msg, ...args) => console.error(`[KafkaClient:${clientId}] ${msg}`, ...args)
|
|
63
|
+
};
|
|
64
|
+
this.autoCreateTopicsEnabled = options?.autoCreateTopics ?? false;
|
|
65
|
+
this.strictSchemasEnabled = options?.strictSchemas ?? true;
|
|
66
|
+
this.numPartitions = options?.numPartitions ?? 1;
|
|
67
|
+
this.kafka = new Kafka({
|
|
68
|
+
clientId: this.clientId,
|
|
69
|
+
brokers,
|
|
70
|
+
logLevel: KafkaLogLevel.WARN,
|
|
71
|
+
logCreator: () => ({ level, log }) => {
|
|
72
|
+
const msg = `[kafkajs] ${log.message}`;
|
|
73
|
+
if (level === KafkaLogLevel.ERROR) {
|
|
74
|
+
const text = log.message ?? "";
|
|
75
|
+
const isRetriable = text.includes("TOPIC_ALREADY_EXISTS") || text.includes("GROUP_COORDINATOR_NOT_AVAILABLE") || text.includes("NOT_COORDINATOR") || text.includes("Response GroupCoordinator") || text.includes("Response CreateTopics");
|
|
76
|
+
if (isRetriable) this.logger.warn(msg);
|
|
77
|
+
else this.logger.error(msg);
|
|
78
|
+
} else if (level === KafkaLogLevel.WARN) {
|
|
79
|
+
this.logger.warn(msg);
|
|
80
|
+
} else {
|
|
81
|
+
this.logger.log(msg);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
});
|
|
85
|
+
this.producer = this.kafka.producer({
|
|
86
|
+
createPartitioner: Partitioners.DefaultPartitioner,
|
|
87
|
+
idempotent: true,
|
|
88
|
+
transactionalId: `${clientId}-tx`,
|
|
89
|
+
maxInFlightRequests: 1
|
|
90
|
+
});
|
|
91
|
+
this.admin = this.kafka.admin();
|
|
92
|
+
}
|
|
93
|
+
async sendMessage(topicOrDesc, message, options = {}) {
|
|
94
|
+
const payload = this.buildSendPayload(topicOrDesc, [
|
|
95
|
+
{ value: message, key: options.key, headers: options.headers }
|
|
96
|
+
]);
|
|
97
|
+
await this.ensureTopic(payload.topic);
|
|
98
|
+
await this.producer.send(payload);
|
|
99
|
+
}
|
|
100
|
+
async sendBatch(topicOrDesc, messages) {
|
|
101
|
+
const payload = this.buildSendPayload(topicOrDesc, messages);
|
|
102
|
+
await this.ensureTopic(payload.topic);
|
|
103
|
+
await this.producer.send(payload);
|
|
104
|
+
}
|
|
105
|
+
/** Execute multiple sends atomically. Commits on success, aborts on error. */
|
|
106
|
+
async transaction(fn) {
|
|
107
|
+
const tx = await this.producer.transaction();
|
|
108
|
+
try {
|
|
109
|
+
const ctx = {
|
|
110
|
+
send: async (topicOrDesc, message, options = {}) => {
|
|
111
|
+
const payload = this.buildSendPayload(topicOrDesc, [
|
|
112
|
+
{ value: message, key: options.key, headers: options.headers }
|
|
113
|
+
]);
|
|
114
|
+
await this.ensureTopic(payload.topic);
|
|
115
|
+
await tx.send(payload);
|
|
116
|
+
},
|
|
117
|
+
sendBatch: async (topicOrDesc, messages) => {
|
|
118
|
+
const payload = this.buildSendPayload(topicOrDesc, messages);
|
|
119
|
+
await this.ensureTopic(payload.topic);
|
|
120
|
+
await tx.send(payload);
|
|
121
|
+
}
|
|
122
|
+
};
|
|
123
|
+
await fn(ctx);
|
|
124
|
+
await tx.commit();
|
|
125
|
+
} catch (error) {
|
|
126
|
+
try {
|
|
127
|
+
await tx.abort();
|
|
128
|
+
} catch (abortError) {
|
|
129
|
+
this.logger.error(
|
|
130
|
+
"Failed to abort transaction:",
|
|
131
|
+
toError(abortError).message
|
|
132
|
+
);
|
|
133
|
+
}
|
|
134
|
+
throw error;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
// ── Producer lifecycle ───────────────────────────────────────────
|
|
138
|
+
/** Connect the idempotent producer. Called automatically by `KafkaModule.register()`. */
|
|
139
|
+
async connectProducer() {
|
|
140
|
+
await this.producer.connect();
|
|
141
|
+
this.logger.log("Producer connected");
|
|
142
|
+
}
|
|
143
|
+
async disconnectProducer() {
|
|
144
|
+
await this.producer.disconnect();
|
|
145
|
+
this.logger.log("Producer disconnected");
|
|
146
|
+
}
|
|
147
|
+
async startConsumer(topics, handleMessage, options = {}) {
|
|
148
|
+
const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
|
|
149
|
+
await consumer.run({
|
|
150
|
+
autoCommit: options.autoCommit ?? true,
|
|
151
|
+
eachMessage: async ({ topic: topic2, message }) => {
|
|
152
|
+
if (!message.value) {
|
|
153
|
+
this.logger.warn(`Received empty message from topic ${topic2}`);
|
|
154
|
+
return;
|
|
155
|
+
}
|
|
156
|
+
const raw = message.value.toString();
|
|
157
|
+
const parsed = this.parseJsonMessage(raw, topic2);
|
|
158
|
+
if (parsed === null) return;
|
|
159
|
+
const validated = await this.validateWithSchema(
|
|
160
|
+
parsed,
|
|
161
|
+
raw,
|
|
162
|
+
topic2,
|
|
163
|
+
schemaMap,
|
|
164
|
+
interceptors,
|
|
165
|
+
dlq
|
|
166
|
+
);
|
|
167
|
+
if (validated === null) return;
|
|
168
|
+
await this.executeWithRetry(
|
|
169
|
+
() => handleMessage(validated, topic2),
|
|
170
|
+
{ topic: topic2, messages: validated, rawMessages: [raw], interceptors, dlq, retry }
|
|
171
|
+
);
|
|
172
|
+
}
|
|
173
|
+
});
|
|
174
|
+
this.runningConsumers.set(gid, "eachMessage");
|
|
175
|
+
}
|
|
176
|
+
async startBatchConsumer(topics, handleBatch, options = {}) {
|
|
177
|
+
const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
|
|
178
|
+
await consumer.run({
|
|
179
|
+
autoCommit: options.autoCommit ?? true,
|
|
180
|
+
eachBatch: async ({
|
|
181
|
+
batch,
|
|
182
|
+
heartbeat,
|
|
183
|
+
resolveOffset,
|
|
184
|
+
commitOffsetsIfNecessary
|
|
185
|
+
}) => {
|
|
186
|
+
const validMessages = [];
|
|
187
|
+
const rawMessages = [];
|
|
188
|
+
for (const message of batch.messages) {
|
|
189
|
+
if (!message.value) {
|
|
190
|
+
this.logger.warn(
|
|
191
|
+
`Received empty message from topic ${batch.topic}`
|
|
192
|
+
);
|
|
193
|
+
continue;
|
|
194
|
+
}
|
|
195
|
+
const raw = message.value.toString();
|
|
196
|
+
const parsed = this.parseJsonMessage(raw, batch.topic);
|
|
197
|
+
if (parsed === null) continue;
|
|
198
|
+
const validated = await this.validateWithSchema(
|
|
199
|
+
parsed,
|
|
200
|
+
raw,
|
|
201
|
+
batch.topic,
|
|
202
|
+
schemaMap,
|
|
203
|
+
interceptors,
|
|
204
|
+
dlq
|
|
205
|
+
);
|
|
206
|
+
if (validated === null) continue;
|
|
207
|
+
validMessages.push(validated);
|
|
208
|
+
rawMessages.push(raw);
|
|
209
|
+
}
|
|
210
|
+
if (validMessages.length === 0) return;
|
|
211
|
+
const meta = {
|
|
212
|
+
partition: batch.partition,
|
|
213
|
+
highWatermark: batch.highWatermark,
|
|
214
|
+
heartbeat,
|
|
215
|
+
resolveOffset,
|
|
216
|
+
commitOffsetsIfNecessary
|
|
217
|
+
};
|
|
218
|
+
await this.executeWithRetry(
|
|
219
|
+
() => handleBatch(validMessages, batch.topic, meta),
|
|
220
|
+
{
|
|
221
|
+
topic: batch.topic,
|
|
222
|
+
messages: validMessages,
|
|
223
|
+
rawMessages: batch.messages.filter((m) => m.value).map((m) => m.value.toString()),
|
|
224
|
+
interceptors,
|
|
225
|
+
dlq,
|
|
226
|
+
retry,
|
|
227
|
+
isBatch: true
|
|
228
|
+
}
|
|
229
|
+
);
|
|
230
|
+
}
|
|
231
|
+
});
|
|
232
|
+
this.runningConsumers.set(gid, "eachBatch");
|
|
233
|
+
}
|
|
234
|
+
// ── Consumer lifecycle ───────────────────────────────────────────
|
|
235
|
+
async stopConsumer() {
|
|
236
|
+
const tasks = [];
|
|
237
|
+
for (const consumer of this.consumers.values()) {
|
|
238
|
+
tasks.push(consumer.disconnect());
|
|
239
|
+
}
|
|
240
|
+
await Promise.allSettled(tasks);
|
|
241
|
+
this.consumers.clear();
|
|
242
|
+
this.runningConsumers.clear();
|
|
243
|
+
this.logger.log("All consumers disconnected");
|
|
244
|
+
}
|
|
245
|
+
/** Check broker connectivity and return available topics. */
|
|
246
|
+
async checkStatus() {
|
|
247
|
+
if (!this.isAdminConnected) {
|
|
248
|
+
await this.admin.connect();
|
|
249
|
+
this.isAdminConnected = true;
|
|
250
|
+
}
|
|
251
|
+
const topics = await this.admin.listTopics();
|
|
252
|
+
return { topics };
|
|
253
|
+
}
|
|
254
|
+
getClientId() {
|
|
255
|
+
return this.clientId;
|
|
256
|
+
}
|
|
257
|
+
/** Gracefully disconnect producer, all consumers, and admin. */
|
|
258
|
+
async disconnect() {
|
|
259
|
+
const tasks = [this.producer.disconnect()];
|
|
260
|
+
for (const consumer of this.consumers.values()) {
|
|
261
|
+
tasks.push(consumer.disconnect());
|
|
262
|
+
}
|
|
263
|
+
if (this.isAdminConnected) {
|
|
264
|
+
tasks.push(this.admin.disconnect());
|
|
265
|
+
this.isAdminConnected = false;
|
|
266
|
+
}
|
|
267
|
+
await Promise.allSettled(tasks);
|
|
268
|
+
this.consumers.clear();
|
|
269
|
+
this.runningConsumers.clear();
|
|
270
|
+
this.logger.log("All connections closed");
|
|
271
|
+
}
|
|
272
|
+
// ── Private helpers ──────────────────────────────────────────────
|
|
273
|
+
getOrCreateConsumer(groupId) {
|
|
274
|
+
const gid = groupId || this.defaultGroupId;
|
|
275
|
+
if (!this.consumers.has(gid)) {
|
|
276
|
+
this.consumers.set(gid, this.kafka.consumer({ groupId: gid }));
|
|
277
|
+
}
|
|
278
|
+
return this.consumers.get(gid);
|
|
279
|
+
}
|
|
280
|
+
resolveTopicName(topicOrDescriptor) {
|
|
281
|
+
if (typeof topicOrDescriptor === "string") return topicOrDescriptor;
|
|
282
|
+
if (topicOrDescriptor && typeof topicOrDescriptor === "object" && "__topic" in topicOrDescriptor) {
|
|
283
|
+
return topicOrDescriptor.__topic;
|
|
284
|
+
}
|
|
285
|
+
return String(topicOrDescriptor);
|
|
286
|
+
}
|
|
287
|
+
async ensureTopic(topic2) {
|
|
288
|
+
if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
|
|
289
|
+
if (!this.isAdminConnected) {
|
|
290
|
+
await this.admin.connect();
|
|
291
|
+
this.isAdminConnected = true;
|
|
292
|
+
}
|
|
293
|
+
await this.admin.createTopics({
|
|
294
|
+
topics: [{ topic: topic2, numPartitions: this.numPartitions }]
|
|
295
|
+
});
|
|
296
|
+
this.ensuredTopics.add(topic2);
|
|
297
|
+
}
|
|
298
|
+
/** Register schema from descriptor into global registry (side-effect). */
|
|
299
|
+
registerSchema(topicOrDesc) {
|
|
300
|
+
if (topicOrDesc?.__schema) {
|
|
301
|
+
const topic2 = this.resolveTopicName(topicOrDesc);
|
|
302
|
+
this.schemaRegistry.set(topic2, topicOrDesc.__schema);
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
/** Validate message against schema. Pure — no side-effects on registry. */
|
|
306
|
+
validateMessage(topicOrDesc, message) {
|
|
307
|
+
if (topicOrDesc?.__schema) {
|
|
308
|
+
return topicOrDesc.__schema.parse(message);
|
|
309
|
+
}
|
|
310
|
+
if (this.strictSchemasEnabled && typeof topicOrDesc === "string") {
|
|
311
|
+
const schema = this.schemaRegistry.get(topicOrDesc);
|
|
312
|
+
if (schema) return schema.parse(message);
|
|
313
|
+
}
|
|
314
|
+
return message;
|
|
315
|
+
}
|
|
316
|
+
/**
|
|
317
|
+
* Build a kafkajs-ready send payload.
|
|
318
|
+
* Handles: topic resolution, schema registration, validation, JSON serialization.
|
|
319
|
+
*/
|
|
320
|
+
buildSendPayload(topicOrDesc, messages) {
|
|
321
|
+
this.registerSchema(topicOrDesc);
|
|
322
|
+
const topic2 = this.resolveTopicName(topicOrDesc);
|
|
323
|
+
return {
|
|
324
|
+
topic: topic2,
|
|
325
|
+
messages: messages.map((m) => ({
|
|
326
|
+
value: JSON.stringify(this.validateMessage(topicOrDesc, m.value)),
|
|
327
|
+
key: m.key ?? null,
|
|
328
|
+
headers: m.headers
|
|
329
|
+
})),
|
|
330
|
+
acks: ACKS_ALL
|
|
331
|
+
};
|
|
332
|
+
}
|
|
333
|
+
/** Shared consumer setup: groupId check, schema map, connect, subscribe. */
|
|
334
|
+
async setupConsumer(topics, mode, options) {
|
|
335
|
+
const {
|
|
336
|
+
groupId: optGroupId,
|
|
337
|
+
fromBeginning = false,
|
|
338
|
+
retry,
|
|
339
|
+
dlq = false,
|
|
340
|
+
interceptors = [],
|
|
341
|
+
schemas: optionSchemas
|
|
342
|
+
} = options;
|
|
343
|
+
const gid = optGroupId || this.defaultGroupId;
|
|
344
|
+
const existingMode = this.runningConsumers.get(gid);
|
|
345
|
+
const oppositeMode = mode === "eachMessage" ? "eachBatch" : "eachMessage";
|
|
346
|
+
if (existingMode === oppositeMode) {
|
|
347
|
+
throw new Error(
|
|
348
|
+
`Cannot use ${mode} on consumer group "${gid}" \u2014 it is already running with ${oppositeMode}. Use a different groupId for this consumer.`
|
|
349
|
+
);
|
|
350
|
+
}
|
|
351
|
+
const consumer = this.getOrCreateConsumer(optGroupId);
|
|
352
|
+
const schemaMap = this.buildSchemaMap(topics, optionSchemas);
|
|
353
|
+
const topicNames = topics.map(
|
|
354
|
+
(t) => this.resolveTopicName(t)
|
|
355
|
+
);
|
|
356
|
+
await consumer.connect();
|
|
357
|
+
await this.subscribeWithRetry(consumer, topicNames, fromBeginning, options.subscribeRetry);
|
|
358
|
+
this.logger.log(
|
|
359
|
+
`${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${topicNames.join(", ")}`
|
|
360
|
+
);
|
|
361
|
+
return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
|
|
362
|
+
}
|
|
363
|
+
buildSchemaMap(topics, optionSchemas) {
|
|
364
|
+
const schemaMap = /* @__PURE__ */ new Map();
|
|
365
|
+
for (const t of topics) {
|
|
366
|
+
if (t?.__schema) {
|
|
367
|
+
const name = this.resolveTopicName(t);
|
|
368
|
+
schemaMap.set(name, t.__schema);
|
|
369
|
+
this.schemaRegistry.set(name, t.__schema);
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
if (optionSchemas) {
|
|
373
|
+
for (const [k, v] of optionSchemas) {
|
|
374
|
+
schemaMap.set(k, v);
|
|
375
|
+
this.schemaRegistry.set(k, v);
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
return schemaMap;
|
|
379
|
+
}
|
|
380
|
+
/** Parse raw message as JSON. Returns null on failure (logs error). */
|
|
381
|
+
parseJsonMessage(raw, topic2) {
|
|
382
|
+
try {
|
|
383
|
+
return JSON.parse(raw);
|
|
384
|
+
} catch (error) {
|
|
385
|
+
this.logger.error(
|
|
386
|
+
`Failed to parse message from topic ${topic2}:`,
|
|
387
|
+
toError(error).stack
|
|
388
|
+
);
|
|
389
|
+
return null;
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
/**
|
|
393
|
+
* Validate a parsed message against the schema map.
|
|
394
|
+
* On failure: logs error, sends to DLQ if enabled, calls interceptor.onError.
|
|
395
|
+
* Returns validated message or null.
|
|
396
|
+
*/
|
|
397
|
+
async validateWithSchema(message, raw, topic2, schemaMap, interceptors, dlq) {
|
|
398
|
+
const schema = schemaMap.get(topic2);
|
|
399
|
+
if (!schema) return message;
|
|
400
|
+
try {
|
|
401
|
+
return schema.parse(message);
|
|
402
|
+
} catch (error) {
|
|
403
|
+
const err = toError(error);
|
|
404
|
+
const validationError = new KafkaValidationError(topic2, message, {
|
|
405
|
+
cause: err
|
|
406
|
+
});
|
|
407
|
+
this.logger.error(
|
|
408
|
+
`Schema validation failed for topic ${topic2}:`,
|
|
409
|
+
err.message
|
|
410
|
+
);
|
|
411
|
+
if (dlq) await this.sendToDlq(topic2, raw);
|
|
412
|
+
for (const interceptor of interceptors) {
|
|
413
|
+
await interceptor.onError?.(message, topic2, validationError);
|
|
414
|
+
}
|
|
415
|
+
return null;
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
/**
|
|
419
|
+
* Execute a handler with retry, interceptors, and DLQ support.
|
|
420
|
+
* Used by both single-message and batch consumers.
|
|
421
|
+
*/
|
|
422
|
+
async executeWithRetry(fn, ctx) {
|
|
423
|
+
const { topic: topic2, messages, rawMessages, interceptors, dlq, retry, isBatch } = ctx;
|
|
424
|
+
const maxAttempts = retry ? retry.maxRetries + 1 : 1;
|
|
425
|
+
const backoffMs = retry?.backoffMs ?? 1e3;
|
|
426
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
427
|
+
try {
|
|
428
|
+
if (isBatch) {
|
|
429
|
+
for (const interceptor of interceptors) {
|
|
430
|
+
for (const msg of messages) {
|
|
431
|
+
await interceptor.before?.(msg, topic2);
|
|
432
|
+
}
|
|
433
|
+
}
|
|
434
|
+
} else {
|
|
435
|
+
for (const interceptor of interceptors) {
|
|
436
|
+
await interceptor.before?.(messages, topic2);
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
await fn();
|
|
440
|
+
if (isBatch) {
|
|
441
|
+
for (const interceptor of interceptors) {
|
|
442
|
+
for (const msg of messages) {
|
|
443
|
+
await interceptor.after?.(msg, topic2);
|
|
444
|
+
}
|
|
445
|
+
}
|
|
446
|
+
} else {
|
|
447
|
+
for (const interceptor of interceptors) {
|
|
448
|
+
await interceptor.after?.(messages, topic2);
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
return;
|
|
452
|
+
} catch (error) {
|
|
453
|
+
const err = toError(error);
|
|
454
|
+
const isLastAttempt = attempt === maxAttempts;
|
|
455
|
+
if (isLastAttempt && maxAttempts > 1) {
|
|
456
|
+
const exhaustedError = new KafkaRetryExhaustedError(
|
|
457
|
+
topic2,
|
|
458
|
+
messages,
|
|
459
|
+
maxAttempts,
|
|
460
|
+
{ cause: err }
|
|
461
|
+
);
|
|
462
|
+
for (const interceptor of interceptors) {
|
|
463
|
+
await interceptor.onError?.(messages, topic2, exhaustedError);
|
|
464
|
+
}
|
|
465
|
+
} else {
|
|
466
|
+
for (const interceptor of interceptors) {
|
|
467
|
+
await interceptor.onError?.(messages, topic2, err);
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
this.logger.error(
|
|
471
|
+
`Error processing ${isBatch ? "batch" : "message"} from topic ${topic2} (attempt ${attempt}/${maxAttempts}):`,
|
|
472
|
+
err.stack
|
|
473
|
+
);
|
|
474
|
+
if (isLastAttempt) {
|
|
475
|
+
if (dlq) {
|
|
476
|
+
for (const raw of rawMessages) {
|
|
477
|
+
await this.sendToDlq(topic2, raw);
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
} else {
|
|
481
|
+
await this.sleep(backoffMs * attempt);
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
async sendToDlq(topic2, rawMessage) {
|
|
487
|
+
const dlqTopic = `${topic2}.dlq`;
|
|
488
|
+
try {
|
|
489
|
+
await this.producer.send({
|
|
490
|
+
topic: dlqTopic,
|
|
491
|
+
messages: [{ value: rawMessage }],
|
|
492
|
+
acks: ACKS_ALL
|
|
493
|
+
});
|
|
494
|
+
this.logger.warn(`Message sent to DLQ: ${dlqTopic}`);
|
|
495
|
+
} catch (error) {
|
|
496
|
+
this.logger.error(
|
|
497
|
+
`Failed to send message to DLQ ${dlqTopic}:`,
|
|
498
|
+
toError(error).stack
|
|
499
|
+
);
|
|
500
|
+
}
|
|
501
|
+
}
|
|
502
|
+
async subscribeWithRetry(consumer, topics, fromBeginning, retryOpts) {
|
|
503
|
+
const maxAttempts = retryOpts?.retries ?? 5;
|
|
504
|
+
const backoffMs = retryOpts?.backoffMs ?? 5e3;
|
|
505
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
506
|
+
try {
|
|
507
|
+
await consumer.subscribe({ topics, fromBeginning });
|
|
508
|
+
return;
|
|
509
|
+
} catch (error) {
|
|
510
|
+
if (attempt === maxAttempts) throw error;
|
|
511
|
+
const msg = toError(error).message;
|
|
512
|
+
this.logger.warn(
|
|
513
|
+
`Failed to subscribe to [${topics.join(", ")}] (attempt ${attempt}/${maxAttempts}): ${msg}. Retrying in ${backoffMs}ms...`
|
|
514
|
+
);
|
|
515
|
+
await this.sleep(backoffMs);
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
sleep(ms) {
|
|
520
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
521
|
+
}
|
|
522
|
+
};
|
|
523
|
+
|
|
524
|
+
// src/client/topic.ts
|
|
525
|
+
function topic(name) {
|
|
526
|
+
const fn = () => ({
|
|
527
|
+
__topic: name,
|
|
528
|
+
__type: void 0
|
|
529
|
+
});
|
|
530
|
+
fn.schema = (schema) => ({
|
|
531
|
+
__topic: name,
|
|
532
|
+
__type: void 0,
|
|
533
|
+
__schema: schema
|
|
534
|
+
});
|
|
535
|
+
return fn;
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
export {
|
|
539
|
+
KafkaProcessingError,
|
|
540
|
+
KafkaValidationError,
|
|
541
|
+
KafkaRetryExhaustedError,
|
|
542
|
+
KafkaClient,
|
|
543
|
+
topic
|
|
544
|
+
};
|
|
545
|
+
//# sourceMappingURL=chunk-A56D7HXR.mjs.map
|