@saga-bus/transport-kafka 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +89 -0
- package/dist/index.cjs +223 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +123 -0
- package/dist/index.d.ts +123 -0
- package/dist/index.js +196 -0
- package/dist/index.js.map +1 -0
- package/package.json +59 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Dean Foran
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# @saga-bus/transport-kafka
|
|
2
|
+
|
|
3
|
+
Apache Kafka transport for saga-bus using KafkaJS.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pnpm add @saga-bus/transport-kafka kafkajs
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import { Kafka } from "kafkajs";
|
|
15
|
+
import { KafkaTransport } from "@saga-bus/transport-kafka";
|
|
16
|
+
import { createBus } from "@saga-bus/core";
|
|
17
|
+
|
|
18
|
+
const kafka = new Kafka({
|
|
19
|
+
clientId: "my-app",
|
|
20
|
+
brokers: ["localhost:9092"],
|
|
21
|
+
});
|
|
22
|
+
|
|
23
|
+
const transport = new KafkaTransport({
|
|
24
|
+
kafka,
|
|
25
|
+
groupId: "my-consumer-group",
|
|
26
|
+
defaultTopic: "saga-events",
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
const bus = createBus({
|
|
30
|
+
transport,
|
|
31
|
+
sagas: [...],
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
await bus.start();
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Features
|
|
38
|
+
|
|
39
|
+
- Topic-based message routing
|
|
40
|
+
- Consumer group coordination
|
|
41
|
+
- Automatic topic creation (optional)
|
|
42
|
+
- Configurable partitions and replication
|
|
43
|
+
- Message key routing via correlation ID
|
|
44
|
+
- Offset management
|
|
45
|
+
|
|
46
|
+
## Message Format
|
|
47
|
+
|
|
48
|
+
Messages are published with:
|
|
49
|
+
|
|
50
|
+
| Field | Value |
|
|
51
|
+
|-------|-------|
|
|
52
|
+
| `key` | Correlation ID (for partition affinity) |
|
|
53
|
+
| `value` | JSON-serialized message envelope |
|
|
54
|
+
| `headers.type` | Message type |
|
|
55
|
+
| `headers.correlationId` | Correlation ID |
|
|
56
|
+
| `headers.messageId` | Unique message ID |
|
|
57
|
+
|
|
58
|
+
## Configuration
|
|
59
|
+
|
|
60
|
+
| Option | Type | Default | Description |
|
|
61
|
+
|--------|------|---------|-------------|
|
|
62
|
+
| `kafka` | `Kafka` | required | KafkaJS instance |
|
|
63
|
+
| `defaultTopic` | `string` | - | Default topic |
|
|
64
|
+
| `groupId` | `string` | - | Consumer group ID |
|
|
65
|
+
| `createTopics` | `boolean` | `false` | Auto-create topics |
|
|
66
|
+
| `numPartitions` | `number` | `3` | Partitions for new topics |
|
|
67
|
+
| `replicationFactor` | `number` | `1` | Replication factor |
|
|
68
|
+
| `fromBeginning` | `boolean` | `false` | Start from earliest |
|
|
69
|
+
| `sessionTimeout` | `number` | `30000` | Session timeout (ms) |
|
|
70
|
+
| `heartbeatInterval` | `number` | `3000` | Heartbeat interval (ms) |
|
|
71
|
+
|
|
72
|
+
## Topic Routing
|
|
73
|
+
|
|
74
|
+
Override the default topic per-publish:
|
|
75
|
+
|
|
76
|
+
```typescript
|
|
77
|
+
await bus.publish(
|
|
78
|
+
{ type: "OrderCreated", payload: { orderId: "123" } },
|
|
79
|
+
{ endpoint: "orders-topic" }
|
|
80
|
+
);
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
## Partition Affinity
|
|
84
|
+
|
|
85
|
+
Messages with the same correlation ID are routed to the same partition, ensuring ordered processing per saga instance.
|
|
86
|
+
|
|
87
|
+
## License
|
|
88
|
+
|
|
89
|
+
MIT
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var index_exports = {};
|
|
22
|
+
__export(index_exports, {
|
|
23
|
+
KafkaTransport: () => KafkaTransport
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(index_exports);
|
|
26
|
+
|
|
27
|
+
// src/KafkaTransport.ts
|
|
28
|
+
var import_node_crypto = require("crypto");
|
|
29
|
+
var KafkaTransport = class {
|
|
30
|
+
kafka;
|
|
31
|
+
defaultTopic;
|
|
32
|
+
groupId;
|
|
33
|
+
createTopics;
|
|
34
|
+
numPartitions;
|
|
35
|
+
replicationFactor;
|
|
36
|
+
fromBeginning;
|
|
37
|
+
sessionTimeout;
|
|
38
|
+
heartbeatInterval;
|
|
39
|
+
producer = null;
|
|
40
|
+
consumer = null;
|
|
41
|
+
handlers = /* @__PURE__ */ new Map();
|
|
42
|
+
subscriptions = [];
|
|
43
|
+
isRunning = false;
|
|
44
|
+
constructor(options) {
|
|
45
|
+
this.kafka = options.kafka;
|
|
46
|
+
this.defaultTopic = options.defaultTopic;
|
|
47
|
+
this.groupId = options.groupId;
|
|
48
|
+
this.createTopics = options.createTopics ?? false;
|
|
49
|
+
this.numPartitions = options.numPartitions ?? 3;
|
|
50
|
+
this.replicationFactor = options.replicationFactor ?? 1;
|
|
51
|
+
this.fromBeginning = options.fromBeginning ?? false;
|
|
52
|
+
this.sessionTimeout = options.sessionTimeout ?? 3e4;
|
|
53
|
+
this.heartbeatInterval = options.heartbeatInterval ?? 3e3;
|
|
54
|
+
}
|
|
55
|
+
async start() {
|
|
56
|
+
if (this.isRunning) {
|
|
57
|
+
return;
|
|
58
|
+
}
|
|
59
|
+
this.producer = this.kafka.producer();
|
|
60
|
+
await this.producer.connect();
|
|
61
|
+
if (this.subscriptions.length > 0) {
|
|
62
|
+
if (!this.groupId) {
|
|
63
|
+
throw new Error("groupId is required for consuming");
|
|
64
|
+
}
|
|
65
|
+
if (this.createTopics) {
|
|
66
|
+
await this.ensureTopics();
|
|
67
|
+
}
|
|
68
|
+
this.consumer = this.kafka.consumer({
|
|
69
|
+
groupId: this.groupId,
|
|
70
|
+
sessionTimeout: this.sessionTimeout,
|
|
71
|
+
heartbeatInterval: this.heartbeatInterval
|
|
72
|
+
});
|
|
73
|
+
await this.consumer.connect();
|
|
74
|
+
for (const sub of this.subscriptions) {
|
|
75
|
+
await this.consumer.subscribe({
|
|
76
|
+
topic: sub.topic,
|
|
77
|
+
fromBeginning: this.fromBeginning
|
|
78
|
+
});
|
|
79
|
+
}
|
|
80
|
+
this.isRunning = true;
|
|
81
|
+
await this.consumer.run({
|
|
82
|
+
autoCommit: false,
|
|
83
|
+
eachMessage: async (payload) => {
|
|
84
|
+
await this.processMessage(payload);
|
|
85
|
+
}
|
|
86
|
+
});
|
|
87
|
+
} else {
|
|
88
|
+
this.isRunning = true;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
async stop() {
|
|
92
|
+
this.isRunning = false;
|
|
93
|
+
if (this.consumer) {
|
|
94
|
+
await this.consumer.disconnect();
|
|
95
|
+
this.consumer = null;
|
|
96
|
+
}
|
|
97
|
+
if (this.producer) {
|
|
98
|
+
await this.producer.disconnect();
|
|
99
|
+
this.producer = null;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
async subscribe(options, handler) {
|
|
103
|
+
const topic = options.endpoint;
|
|
104
|
+
if (!topic) {
|
|
105
|
+
throw new Error("endpoint (topic) is required for subscribing");
|
|
106
|
+
}
|
|
107
|
+
if (!this.groupId) {
|
|
108
|
+
throw new Error("groupId is required for subscribing");
|
|
109
|
+
}
|
|
110
|
+
this.handlers.set(
|
|
111
|
+
topic,
|
|
112
|
+
handler
|
|
113
|
+
);
|
|
114
|
+
this.subscriptions.push({
|
|
115
|
+
topic,
|
|
116
|
+
concurrency: options.concurrency ?? 1
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
async publish(message, options) {
|
|
120
|
+
const topic = options.endpoint || this.defaultTopic;
|
|
121
|
+
if (!topic) {
|
|
122
|
+
throw new Error("endpoint (topic) is required for publishing");
|
|
123
|
+
}
|
|
124
|
+
if (!this.producer) {
|
|
125
|
+
this.producer = this.kafka.producer();
|
|
126
|
+
await this.producer.connect();
|
|
127
|
+
}
|
|
128
|
+
const { key, headers = {} } = options;
|
|
129
|
+
const envelope = {
|
|
130
|
+
id: (0, import_node_crypto.randomUUID)(),
|
|
131
|
+
type: message.type,
|
|
132
|
+
payload: message,
|
|
133
|
+
headers: {
|
|
134
|
+
...headers,
|
|
135
|
+
"x-message-type": message.type
|
|
136
|
+
},
|
|
137
|
+
timestamp: /* @__PURE__ */ new Date(),
|
|
138
|
+
partitionKey: key
|
|
139
|
+
};
|
|
140
|
+
const partitionKey = key ?? envelope.id;
|
|
141
|
+
const kafkaHeaders = {
|
|
142
|
+
messageId: envelope.id,
|
|
143
|
+
messageType: envelope.type,
|
|
144
|
+
...headers
|
|
145
|
+
};
|
|
146
|
+
await this.producer.send({
|
|
147
|
+
topic,
|
|
148
|
+
messages: [
|
|
149
|
+
{
|
|
150
|
+
key: partitionKey,
|
|
151
|
+
value: JSON.stringify(envelope),
|
|
152
|
+
headers: kafkaHeaders
|
|
153
|
+
}
|
|
154
|
+
]
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
async ensureTopics() {
|
|
158
|
+
const admin = this.kafka.admin();
|
|
159
|
+
await admin.connect();
|
|
160
|
+
try {
|
|
161
|
+
const existingTopics = await admin.listTopics();
|
|
162
|
+
const topicsToCreate = this.subscriptions.map((s) => s.topic).filter((topic) => !existingTopics.includes(topic));
|
|
163
|
+
if (topicsToCreate.length > 0) {
|
|
164
|
+
await admin.createTopics({
|
|
165
|
+
topics: topicsToCreate.map((topic) => ({
|
|
166
|
+
topic,
|
|
167
|
+
numPartitions: this.numPartitions,
|
|
168
|
+
replicationFactor: this.replicationFactor
|
|
169
|
+
}))
|
|
170
|
+
});
|
|
171
|
+
}
|
|
172
|
+
} finally {
|
|
173
|
+
await admin.disconnect();
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
async processMessage(payload) {
|
|
177
|
+
const { topic, partition, message } = payload;
|
|
178
|
+
if (!message.value) {
|
|
179
|
+
return;
|
|
180
|
+
}
|
|
181
|
+
try {
|
|
182
|
+
const parsed = JSON.parse(message.value.toString());
|
|
183
|
+
const envelope = {
|
|
184
|
+
...parsed,
|
|
185
|
+
timestamp: new Date(parsed.timestamp)
|
|
186
|
+
};
|
|
187
|
+
const handler = this.handlers.get(topic);
|
|
188
|
+
if (handler) {
|
|
189
|
+
await handler(envelope);
|
|
190
|
+
}
|
|
191
|
+
await this.consumer.commitOffsets([
|
|
192
|
+
{
|
|
193
|
+
topic,
|
|
194
|
+
partition,
|
|
195
|
+
offset: (BigInt(message.offset) + 1n).toString()
|
|
196
|
+
}
|
|
197
|
+
]);
|
|
198
|
+
} catch (error) {
|
|
199
|
+
console.error("[Kafka] Message processing error:", error);
|
|
200
|
+
throw error;
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
/**
|
|
204
|
+
* Check if the transport is running.
|
|
205
|
+
*/
|
|
206
|
+
isStarted() {
|
|
207
|
+
return this.isRunning;
|
|
208
|
+
}
|
|
209
|
+
/**
|
|
210
|
+
* Get transport statistics.
|
|
211
|
+
*/
|
|
212
|
+
getStats() {
|
|
213
|
+
return {
|
|
214
|
+
subscriptionCount: this.subscriptions.length,
|
|
215
|
+
isRunning: this.isRunning
|
|
216
|
+
};
|
|
217
|
+
}
|
|
218
|
+
};
|
|
219
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
220
|
+
0 && (module.exports = {
|
|
221
|
+
KafkaTransport
|
|
222
|
+
});
|
|
223
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts","../src/KafkaTransport.ts"],"sourcesContent":["export { KafkaTransport } from \"./KafkaTransport.js\";\nexport type { KafkaTransportOptions } from \"./types.js\";\n","import { randomUUID } from \"node:crypto\";\nimport type {\n Kafka,\n Producer,\n Consumer,\n EachMessagePayload,\n Admin,\n} from \"kafkajs\";\nimport type {\n Transport,\n TransportSubscribeOptions,\n TransportPublishOptions,\n BaseMessage,\n MessageEnvelope,\n} from \"@saga-bus/core\";\nimport type { KafkaTransportOptions, KafkaSubscription } from \"./types.js\";\n\n/**\n * Kafka transport for saga-bus using KafkaJS.\n *\n * Uses partition keys for message ordering within a saga.\n * Consumer groups enable horizontal scaling.\n *\n * @example\n * ```typescript\n * import { Kafka } from \"kafkajs\";\n *\n * const kafka = new Kafka({\n * clientId: \"my-app\",\n * brokers: [\"localhost:9092\"],\n * });\n *\n * const transport = new KafkaTransport({\n * kafka,\n * defaultTopic: \"saga-bus.events\",\n * groupId: \"order-processor\",\n * });\n *\n * await transport.start();\n *\n * await transport.subscribe(\n * { endpoint: \"saga-bus.orders\", concurrency: 5 },\n * async (envelope) => { ... }\n * );\n *\n * await transport.publish(\n * { type: \"OrderSubmitted\", orderId: \"123\" },\n * { endpoint: \"saga-bus.orders\", key: \"order-123\" }\n * );\n * ```\n */\nexport class KafkaTransport implements Transport {\n private readonly kafka: Kafka;\n private readonly defaultTopic: string | undefined;\n private readonly groupId: string | undefined;\n private readonly createTopics: boolean;\n private readonly numPartitions: number;\n private readonly replicationFactor: number;\n private readonly fromBeginning: boolean;\n private readonly sessionTimeout: number;\n private readonly heartbeatInterval: number;\n\n private producer: Producer | null = null;\n private consumer: Consumer | null = null;\n private readonly handlers = new Map<\n string,\n (envelope: MessageEnvelope) => Promise<void>\n >();\n private readonly subscriptions: KafkaSubscription[] = [];\n private isRunning = false;\n\n constructor(options: KafkaTransportOptions) {\n this.kafka = options.kafka;\n this.defaultTopic = options.defaultTopic;\n this.groupId = options.groupId;\n this.createTopics = options.createTopics ?? false;\n this.numPartitions = options.numPartitions ?? 3;\n this.replicationFactor = options.replicationFactor ?? 1;\n this.fromBeginning = options.fromBeginning ?? false;\n this.sessionTimeout = options.sessionTimeout ?? 30000;\n this.heartbeatInterval = options.heartbeatInterval ?? 3000;\n }\n\n async start(): Promise<void> {\n if (this.isRunning) {\n return;\n }\n\n // Connect producer\n this.producer = this.kafka.producer();\n await this.producer.connect();\n\n // If we have subscriptions, start consumer\n if (this.subscriptions.length > 0) {\n if (!this.groupId) {\n throw new Error(\"groupId is required for consuming\");\n }\n\n // Create topics if needed\n if (this.createTopics) {\n await this.ensureTopics();\n }\n\n // Create and connect consumer\n this.consumer = this.kafka.consumer({\n groupId: this.groupId,\n sessionTimeout: this.sessionTimeout,\n heartbeatInterval: this.heartbeatInterval,\n });\n\n await this.consumer.connect();\n\n // Subscribe to all topics\n for (const sub of this.subscriptions) {\n await this.consumer.subscribe({\n topic: sub.topic,\n fromBeginning: this.fromBeginning,\n });\n }\n\n this.isRunning = true;\n\n // Start consuming with manual commit\n await this.consumer.run({\n autoCommit: false,\n eachMessage: async (payload: EachMessagePayload) => {\n await this.processMessage(payload);\n },\n });\n } else {\n this.isRunning = true;\n }\n }\n\n async stop(): Promise<void> {\n this.isRunning = false;\n\n if (this.consumer) {\n await this.consumer.disconnect();\n this.consumer = null;\n }\n\n if (this.producer) {\n await this.producer.disconnect();\n this.producer = null;\n }\n }\n\n async subscribe<TMessage extends BaseMessage>(\n options: TransportSubscribeOptions,\n handler: (envelope: MessageEnvelope<TMessage>) => Promise<void>\n ): Promise<void> {\n const topic = options.endpoint;\n\n if (!topic) {\n throw new Error(\"endpoint (topic) is required for subscribing\");\n }\n\n if (!this.groupId) {\n throw new Error(\"groupId is required for subscribing\");\n }\n\n // Store handler\n this.handlers.set(\n topic,\n handler as (envelope: MessageEnvelope) => Promise<void>\n );\n\n this.subscriptions.push({\n topic,\n concurrency: options.concurrency ?? 1,\n });\n }\n\n async publish<TMessage extends BaseMessage>(\n message: TMessage,\n options: TransportPublishOptions\n ): Promise<void> {\n const topic = options.endpoint || this.defaultTopic;\n\n if (!topic) {\n throw new Error(\"endpoint (topic) is required for publishing\");\n }\n\n // Lazily connect producer if not started\n if (!this.producer) {\n this.producer = this.kafka.producer();\n await this.producer.connect();\n }\n\n const { key, headers = {} } = options;\n\n // Create envelope\n const envelope: MessageEnvelope<TMessage> = {\n id: randomUUID(),\n type: message.type,\n payload: message,\n headers: {\n ...headers,\n \"x-message-type\": message.type,\n },\n timestamp: new Date(),\n partitionKey: key,\n };\n\n // Partition key for ordering\n const partitionKey = key ?? envelope.id;\n\n // Convert headers to Kafka format (Buffer values)\n const kafkaHeaders: Record<string, string> = {\n messageId: envelope.id,\n messageType: envelope.type,\n ...headers,\n };\n\n await this.producer.send({\n topic,\n messages: [\n {\n key: partitionKey,\n value: JSON.stringify(envelope),\n headers: kafkaHeaders,\n },\n ],\n });\n }\n\n private async ensureTopics(): Promise<void> {\n const admin: Admin = this.kafka.admin();\n await admin.connect();\n\n try {\n const existingTopics = await admin.listTopics();\n const topicsToCreate = this.subscriptions\n .map((s) => s.topic)\n .filter((topic) => !existingTopics.includes(topic));\n\n if (topicsToCreate.length > 0) {\n await admin.createTopics({\n topics: topicsToCreate.map((topic) => ({\n topic,\n numPartitions: this.numPartitions,\n replicationFactor: this.replicationFactor,\n })),\n });\n }\n } finally {\n await admin.disconnect();\n }\n }\n\n private async processMessage(payload: EachMessagePayload): Promise<void> {\n const { topic, partition, message } = payload;\n\n if (!message.value) {\n return;\n }\n\n try {\n // Parse envelope\n const parsed = JSON.parse(message.value.toString()) as MessageEnvelope;\n\n // Reconstruct Date objects\n const envelope: MessageEnvelope = {\n ...parsed,\n timestamp: new Date(parsed.timestamp),\n };\n\n // Find handler for this topic\n const handler = this.handlers.get(topic);\n\n if (handler) {\n await handler(envelope);\n }\n\n // Commit offset on success\n await this.consumer!.commitOffsets([\n {\n topic,\n partition,\n offset: (BigInt(message.offset) + 1n).toString(),\n },\n ]);\n } catch (error) {\n console.error(\"[Kafka] Message processing error:\", error);\n // Don't commit - message will be redelivered\n throw error;\n }\n }\n\n /**\n * Check if the transport is running.\n */\n isStarted(): boolean {\n return this.isRunning;\n }\n\n /**\n * Get transport statistics.\n */\n getStats(): { subscriptionCount: number; isRunning: boolean } {\n return {\n subscriptionCount: this.subscriptions.length,\n isRunning: this.isRunning,\n };\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,yBAA2B;AAmDpB,IAAM,iBAAN,MAA0C;AAAA,EAC9B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET,WAA4B;AAAA,EAC5B,WAA4B;AAAA,EACnB,WAAW,oBAAI,IAG9B;AAAA,EACe,gBAAqC,CAAC;AAAA,EAC/C,YAAY;AAAA,EAEpB,YAAY,SAAgC;AAC1C,SAAK,QAAQ,QAAQ;AACrB,SAAK,eAAe,QAAQ;AAC5B,SAAK,UAAU,QAAQ;AACvB,SAAK,eAAe,QAAQ,gBAAgB;AAC5C,SAAK,gBAAgB,QAAQ,iBAAiB;AAC9C,SAAK,oBAAoB,QAAQ,qBAAqB;AACtD,SAAK,gBAAgB,QAAQ,iBAAiB;AAC9C,SAAK,iBAAiB,QAAQ,kBAAkB;AAChD,SAAK,oBAAoB,QAAQ,qBAAqB;AAAA,EACxD;AAAA,EAEA,MAAM,QAAuB;AAC3B,QAAI,KAAK,WAAW;AAClB;AAAA,IACF;AAGA,SAAK,WAAW,KAAK,MAAM,SAAS;AACpC,UAAM,KAAK,SAAS,QAAQ;AAG5B,QAAI,KAAK,cAAc,SAAS,GAAG;AACjC,UAAI,CAAC,KAAK,SAAS;AACjB,cAAM,IAAI,MAAM,mCAAmC;AAAA,MACrD;AAGA,UAAI,KAAK,cAAc;AACrB,cAAM,KAAK,aAAa;AAAA,MAC1B;AAGA,WAAK,WAAW,KAAK,MAAM,SAAS;AAAA,QAClC,SAAS,KAAK;AAAA,QACd,gBAAgB,KAAK;AAAA,QACrB,mBAAmB,KAAK;AAAA,MAC1B,CAAC;AAED,YAAM,KAAK,SAAS,QAAQ;AAG5B,iBAAW,OAAO,KAAK,eAAe;AACpC,cAAM,KAAK,SAAS,UAAU;AAAA,UAC5B,OAAO,IAAI;AAAA,UACX,eAAe,KAAK;AAAA,QACtB,CAAC;AAAA,MACH;AAEA,WAAK,YAAY;AAGjB,YAAM,KAAK,SAAS,IAAI;AAAA,QACtB,YAAY;AAAA,QACZ,aAAa,OAAO,YAAgC;AAClD,gBAAM,KAAK,eAAe,OAAO;AAAA,QACnC;AAAA,MACF,CAAC;AAAA,IACH,OAAO;AACL,WAAK,YAAY;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,MAAM,OAAsB;AAC1B,SAAK,YAAY;AAEjB,QAAI,KAAK,UAAU;AACjB,YAAM,KAAK,SAAS,WAAW;AAC/B,WAAK,WAAW;AAAA,IAClB;AAEA,QAAI,KAAK,UAAU;AACjB,YAAM,KAAK,SAAS,WAAW;AAC/B,WAAK,WAAW;AAAA,IAClB;AAAA,EACF;AAAA,EAEA,MAAM,UACJ,SACA,SACe;AACf,UAAM,QAAQ,QAAQ;AAEtB,QAAI,CAAC,OAAO;AACV,YAAM,IAAI,MAAM,8CAA8C;AAAA,IAChE;AAEA,QAAI,CAAC,KAAK,SAAS;AACjB,YAAM,IAAI,MAAM,qCAAqC;AAAA,IACvD;AAGA,SAAK,SAAS;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAEA,SAAK,cAAc,KAAK;AAAA,MACtB;AAAA,MACA,aAAa,QAAQ,eAAe;AAAA,IACtC,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,QACJ,SACA,SACe;AACf,UAAM,QAAQ,QAAQ,YAAY,KAAK;AAEvC,QAAI,CAAC,OAAO;AACV,YAAM,IAAI,MAAM,6CAA6C;AAAA,IAC/D;AAGA,QAAI,CAAC,KAAK,UAAU;AAClB,WAAK,WAAW,KAAK,MAAM,SAAS;AACpC,YAAM,KAAK,SAAS,QAAQ;AAAA,IAC9B;AAEA,UAAM,EAAE,KAAK,UAAU,CAAC,EAAE,IAAI;AAG9B,UAAM,WAAsC;AAAA,MAC1C,QAAI,+BAAW;AAAA,MACf,MAAM,QAAQ;AAAA,MACd,SAAS;AAAA,MACT,SAAS;AAAA,QACP,GAAG;AAAA,QACH,kBAAkB,QAAQ;AAAA,MAC5B;AAAA,MACA,WAAW,oBAAI,KAAK;AAAA,MACpB,cAAc;AAAA,IAChB;AAGA,UAAM,eAAe,OAAO,SAAS;AAGrC,UAAM,eAAuC;AAAA,MAC3C,WAAW,SAAS;AAAA,MACpB,aAAa,SAAS;AAAA,MACtB,GAAG;AAAA,IACL;AAEA,UAAM,KAAK,SAAS,KAAK;AAAA,MACvB;AAAA,MACA,UAAU;AAAA,QACR;AAAA,UACE,KAAK;AAAA,UACL,OAAO,KAAK,UAAU,QAAQ;AAAA,UAC9B,SAAS;AAAA,QACX;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,MAAc,eAA8B;AAC1C,UAAM,QAAe,KAAK,MAAM,MAAM;AACtC,UAAM,MAAM,QAAQ;AAEpB,QAAI;AACF,YAAM,iBAAiB,MAAM,MAAM,WAAW;AAC9C,YAAM,iBAAiB,KAAK,cACzB,IAAI,CAAC,MAAM,EAAE,KAAK,EAClB,OAAO,CAAC,UAAU,CAAC,eAAe,SAAS,KAAK,CAAC;AAEpD,UAAI,eAAe,SAAS,GAAG;AAC7B,cAAM,MAAM,aAAa;AAAA,UACvB,QAAQ,eAAe,IAAI,CAAC,WAAW;AAAA,YACrC;AAAA,YACA,eAAe,KAAK;AAAA,YACpB,mBAAmB,KAAK;AAAA,UAC1B,EAAE;AAAA,QACJ,CAAC;AAAA,MACH;AAAA,IACF,UAAE;AACA,YAAM,MAAM,WAAW;AAAA,IACzB;AAAA,EACF;AAAA,EAEA,MAAc,eAAe,SAA4C;AACvE,UAAM,EAAE,OAAO,WAAW,QAAQ,IAAI;AAEtC,QAAI,CAAC,QAAQ,OAAO;AAClB;AAAA,IACF;AAEA,QAAI;AAEF,YAAM,SAAS,KAAK,MAAM,QAAQ,MAAM,SAAS,CAAC;AAGlD,YAAM,WAA4B;AAAA,QAChC,GAAG;AAAA,QACH,WAAW,IAAI,KAAK,OAAO,SAAS;AAAA,MACtC;AAGA,YAAM,UAAU,KAAK,SAAS,IAAI,KAAK;AAEvC,UAAI,SAAS;AACX,cAAM,QAAQ,QAAQ;AAAA,MACxB;AAGA,YAAM,KAAK,SAAU,cAAc;AAAA,QACjC;AAAA,UACE;AAAA,UACA;AAAA,UACA,SAAS,OAAO,QAAQ,MAAM,IAAI,IAAI,SAAS;AAAA,QACjD;AAAA,MACF,CAAC;AAAA,IACH,SAAS,OAAO;AACd,cAAQ,MAAM,qCAAqC,KAAK;AAExD,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,YAAqB;AACnB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,WAA8D;AAC5D,WAAO;AAAA,MACL,mBAAmB,KAAK,cAAc;AAAA,MACtC,WAAW,KAAK;AAAA,IAClB;AAAA,EACF;AACF;","names":[]}
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import { Transport, BaseMessage, TransportSubscribeOptions, MessageEnvelope, TransportPublishOptions } from '@saga-bus/core';
|
|
2
|
+
import { Kafka } from 'kafkajs';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Kafka transport configuration options.
|
|
6
|
+
*/
|
|
7
|
+
interface KafkaTransportOptions {
|
|
8
|
+
/**
|
|
9
|
+
* KafkaJS Kafka instance.
|
|
10
|
+
*/
|
|
11
|
+
kafka: Kafka;
|
|
12
|
+
/**
|
|
13
|
+
* Default topic for publish/subscribe.
|
|
14
|
+
* Can be overridden per-operation via endpoint.
|
|
15
|
+
*/
|
|
16
|
+
defaultTopic?: string;
|
|
17
|
+
/**
|
|
18
|
+
* Consumer group ID.
|
|
19
|
+
* Required for subscribing.
|
|
20
|
+
*/
|
|
21
|
+
groupId?: string;
|
|
22
|
+
/**
|
|
23
|
+
* Whether to create topics if they don't exist.
|
|
24
|
+
* @default false
|
|
25
|
+
*/
|
|
26
|
+
createTopics?: boolean;
|
|
27
|
+
/**
|
|
28
|
+
* Number of partitions when creating topics.
|
|
29
|
+
* @default 3
|
|
30
|
+
*/
|
|
31
|
+
numPartitions?: number;
|
|
32
|
+
/**
|
|
33
|
+
* Replication factor when creating topics.
|
|
34
|
+
* @default 1
|
|
35
|
+
*/
|
|
36
|
+
replicationFactor?: number;
|
|
37
|
+
/**
|
|
38
|
+
* Whether to start consuming from the beginning.
|
|
39
|
+
* @default false (latest)
|
|
40
|
+
*/
|
|
41
|
+
fromBeginning?: boolean;
|
|
42
|
+
/**
|
|
43
|
+
* Session timeout in milliseconds.
|
|
44
|
+
* @default 30000
|
|
45
|
+
*/
|
|
46
|
+
sessionTimeout?: number;
|
|
47
|
+
/**
|
|
48
|
+
* Heartbeat interval in milliseconds.
|
|
49
|
+
* @default 3000
|
|
50
|
+
*/
|
|
51
|
+
heartbeatInterval?: number;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Kafka transport for saga-bus using KafkaJS.
|
|
56
|
+
*
|
|
57
|
+
* Uses partition keys for message ordering within a saga.
|
|
58
|
+
* Consumer groups enable horizontal scaling.
|
|
59
|
+
*
|
|
60
|
+
* @example
|
|
61
|
+
* ```typescript
|
|
62
|
+
* import { Kafka } from "kafkajs";
|
|
63
|
+
*
|
|
64
|
+
* const kafka = new Kafka({
|
|
65
|
+
* clientId: "my-app",
|
|
66
|
+
* brokers: ["localhost:9092"],
|
|
67
|
+
* });
|
|
68
|
+
*
|
|
69
|
+
* const transport = new KafkaTransport({
|
|
70
|
+
* kafka,
|
|
71
|
+
* defaultTopic: "saga-bus.events",
|
|
72
|
+
* groupId: "order-processor",
|
|
73
|
+
* });
|
|
74
|
+
*
|
|
75
|
+
* await transport.start();
|
|
76
|
+
*
|
|
77
|
+
* await transport.subscribe(
|
|
78
|
+
* { endpoint: "saga-bus.orders", concurrency: 5 },
|
|
79
|
+
* async (envelope) => { ... }
|
|
80
|
+
* );
|
|
81
|
+
*
|
|
82
|
+
* await transport.publish(
|
|
83
|
+
* { type: "OrderSubmitted", orderId: "123" },
|
|
84
|
+
* { endpoint: "saga-bus.orders", key: "order-123" }
|
|
85
|
+
* );
|
|
86
|
+
* ```
|
|
87
|
+
*/
|
|
88
|
+
declare class KafkaTransport implements Transport {
|
|
89
|
+
private readonly kafka;
|
|
90
|
+
private readonly defaultTopic;
|
|
91
|
+
private readonly groupId;
|
|
92
|
+
private readonly createTopics;
|
|
93
|
+
private readonly numPartitions;
|
|
94
|
+
private readonly replicationFactor;
|
|
95
|
+
private readonly fromBeginning;
|
|
96
|
+
private readonly sessionTimeout;
|
|
97
|
+
private readonly heartbeatInterval;
|
|
98
|
+
private producer;
|
|
99
|
+
private consumer;
|
|
100
|
+
private readonly handlers;
|
|
101
|
+
private readonly subscriptions;
|
|
102
|
+
private isRunning;
|
|
103
|
+
constructor(options: KafkaTransportOptions);
|
|
104
|
+
start(): Promise<void>;
|
|
105
|
+
stop(): Promise<void>;
|
|
106
|
+
subscribe<TMessage extends BaseMessage>(options: TransportSubscribeOptions, handler: (envelope: MessageEnvelope<TMessage>) => Promise<void>): Promise<void>;
|
|
107
|
+
publish<TMessage extends BaseMessage>(message: TMessage, options: TransportPublishOptions): Promise<void>;
|
|
108
|
+
private ensureTopics;
|
|
109
|
+
private processMessage;
|
|
110
|
+
/**
|
|
111
|
+
* Check if the transport is running.
|
|
112
|
+
*/
|
|
113
|
+
isStarted(): boolean;
|
|
114
|
+
/**
|
|
115
|
+
* Get transport statistics.
|
|
116
|
+
*/
|
|
117
|
+
getStats(): {
|
|
118
|
+
subscriptionCount: number;
|
|
119
|
+
isRunning: boolean;
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
export { KafkaTransport, type KafkaTransportOptions };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import { Transport, BaseMessage, TransportSubscribeOptions, MessageEnvelope, TransportPublishOptions } from '@saga-bus/core';
|
|
2
|
+
import { Kafka } from 'kafkajs';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Kafka transport configuration options.
|
|
6
|
+
*/
|
|
7
|
+
interface KafkaTransportOptions {
|
|
8
|
+
/**
|
|
9
|
+
* KafkaJS Kafka instance.
|
|
10
|
+
*/
|
|
11
|
+
kafka: Kafka;
|
|
12
|
+
/**
|
|
13
|
+
* Default topic for publish/subscribe.
|
|
14
|
+
* Can be overridden per-operation via endpoint.
|
|
15
|
+
*/
|
|
16
|
+
defaultTopic?: string;
|
|
17
|
+
/**
|
|
18
|
+
* Consumer group ID.
|
|
19
|
+
* Required for subscribing.
|
|
20
|
+
*/
|
|
21
|
+
groupId?: string;
|
|
22
|
+
/**
|
|
23
|
+
* Whether to create topics if they don't exist.
|
|
24
|
+
* @default false
|
|
25
|
+
*/
|
|
26
|
+
createTopics?: boolean;
|
|
27
|
+
/**
|
|
28
|
+
* Number of partitions when creating topics.
|
|
29
|
+
* @default 3
|
|
30
|
+
*/
|
|
31
|
+
numPartitions?: number;
|
|
32
|
+
/**
|
|
33
|
+
* Replication factor when creating topics.
|
|
34
|
+
* @default 1
|
|
35
|
+
*/
|
|
36
|
+
replicationFactor?: number;
|
|
37
|
+
/**
|
|
38
|
+
* Whether to start consuming from the beginning.
|
|
39
|
+
* @default false (latest)
|
|
40
|
+
*/
|
|
41
|
+
fromBeginning?: boolean;
|
|
42
|
+
/**
|
|
43
|
+
* Session timeout in milliseconds.
|
|
44
|
+
* @default 30000
|
|
45
|
+
*/
|
|
46
|
+
sessionTimeout?: number;
|
|
47
|
+
/**
|
|
48
|
+
* Heartbeat interval in milliseconds.
|
|
49
|
+
* @default 3000
|
|
50
|
+
*/
|
|
51
|
+
heartbeatInterval?: number;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Kafka transport for saga-bus using KafkaJS.
|
|
56
|
+
*
|
|
57
|
+
* Uses partition keys for message ordering within a saga.
|
|
58
|
+
* Consumer groups enable horizontal scaling.
|
|
59
|
+
*
|
|
60
|
+
* @example
|
|
61
|
+
* ```typescript
|
|
62
|
+
* import { Kafka } from "kafkajs";
|
|
63
|
+
*
|
|
64
|
+
* const kafka = new Kafka({
|
|
65
|
+
* clientId: "my-app",
|
|
66
|
+
* brokers: ["localhost:9092"],
|
|
67
|
+
* });
|
|
68
|
+
*
|
|
69
|
+
* const transport = new KafkaTransport({
|
|
70
|
+
* kafka,
|
|
71
|
+
* defaultTopic: "saga-bus.events",
|
|
72
|
+
* groupId: "order-processor",
|
|
73
|
+
* });
|
|
74
|
+
*
|
|
75
|
+
* await transport.start();
|
|
76
|
+
*
|
|
77
|
+
* await transport.subscribe(
|
|
78
|
+
* { endpoint: "saga-bus.orders", concurrency: 5 },
|
|
79
|
+
* async (envelope) => { ... }
|
|
80
|
+
* );
|
|
81
|
+
*
|
|
82
|
+
* await transport.publish(
|
|
83
|
+
* { type: "OrderSubmitted", orderId: "123" },
|
|
84
|
+
* { endpoint: "saga-bus.orders", key: "order-123" }
|
|
85
|
+
* );
|
|
86
|
+
* ```
|
|
87
|
+
*/
|
|
88
|
+
declare class KafkaTransport implements Transport {
|
|
89
|
+
private readonly kafka;
|
|
90
|
+
private readonly defaultTopic;
|
|
91
|
+
private readonly groupId;
|
|
92
|
+
private readonly createTopics;
|
|
93
|
+
private readonly numPartitions;
|
|
94
|
+
private readonly replicationFactor;
|
|
95
|
+
private readonly fromBeginning;
|
|
96
|
+
private readonly sessionTimeout;
|
|
97
|
+
private readonly heartbeatInterval;
|
|
98
|
+
private producer;
|
|
99
|
+
private consumer;
|
|
100
|
+
private readonly handlers;
|
|
101
|
+
private readonly subscriptions;
|
|
102
|
+
private isRunning;
|
|
103
|
+
constructor(options: KafkaTransportOptions);
|
|
104
|
+
start(): Promise<void>;
|
|
105
|
+
stop(): Promise<void>;
|
|
106
|
+
subscribe<TMessage extends BaseMessage>(options: TransportSubscribeOptions, handler: (envelope: MessageEnvelope<TMessage>) => Promise<void>): Promise<void>;
|
|
107
|
+
publish<TMessage extends BaseMessage>(message: TMessage, options: TransportPublishOptions): Promise<void>;
|
|
108
|
+
private ensureTopics;
|
|
109
|
+
private processMessage;
|
|
110
|
+
/**
|
|
111
|
+
* Check if the transport is running.
|
|
112
|
+
*/
|
|
113
|
+
isStarted(): boolean;
|
|
114
|
+
/**
|
|
115
|
+
* Get transport statistics.
|
|
116
|
+
*/
|
|
117
|
+
getStats(): {
|
|
118
|
+
subscriptionCount: number;
|
|
119
|
+
isRunning: boolean;
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
export { KafkaTransport, type KafkaTransportOptions };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
// src/KafkaTransport.ts
|
|
2
|
+
import { randomUUID } from "crypto";
|
|
3
|
+
var KafkaTransport = class {
|
|
4
|
+
kafka;
|
|
5
|
+
defaultTopic;
|
|
6
|
+
groupId;
|
|
7
|
+
createTopics;
|
|
8
|
+
numPartitions;
|
|
9
|
+
replicationFactor;
|
|
10
|
+
fromBeginning;
|
|
11
|
+
sessionTimeout;
|
|
12
|
+
heartbeatInterval;
|
|
13
|
+
producer = null;
|
|
14
|
+
consumer = null;
|
|
15
|
+
handlers = /* @__PURE__ */ new Map();
|
|
16
|
+
subscriptions = [];
|
|
17
|
+
isRunning = false;
|
|
18
|
+
constructor(options) {
|
|
19
|
+
this.kafka = options.kafka;
|
|
20
|
+
this.defaultTopic = options.defaultTopic;
|
|
21
|
+
this.groupId = options.groupId;
|
|
22
|
+
this.createTopics = options.createTopics ?? false;
|
|
23
|
+
this.numPartitions = options.numPartitions ?? 3;
|
|
24
|
+
this.replicationFactor = options.replicationFactor ?? 1;
|
|
25
|
+
this.fromBeginning = options.fromBeginning ?? false;
|
|
26
|
+
this.sessionTimeout = options.sessionTimeout ?? 3e4;
|
|
27
|
+
this.heartbeatInterval = options.heartbeatInterval ?? 3e3;
|
|
28
|
+
}
|
|
29
|
+
async start() {
|
|
30
|
+
if (this.isRunning) {
|
|
31
|
+
return;
|
|
32
|
+
}
|
|
33
|
+
this.producer = this.kafka.producer();
|
|
34
|
+
await this.producer.connect();
|
|
35
|
+
if (this.subscriptions.length > 0) {
|
|
36
|
+
if (!this.groupId) {
|
|
37
|
+
throw new Error("groupId is required for consuming");
|
|
38
|
+
}
|
|
39
|
+
if (this.createTopics) {
|
|
40
|
+
await this.ensureTopics();
|
|
41
|
+
}
|
|
42
|
+
this.consumer = this.kafka.consumer({
|
|
43
|
+
groupId: this.groupId,
|
|
44
|
+
sessionTimeout: this.sessionTimeout,
|
|
45
|
+
heartbeatInterval: this.heartbeatInterval
|
|
46
|
+
});
|
|
47
|
+
await this.consumer.connect();
|
|
48
|
+
for (const sub of this.subscriptions) {
|
|
49
|
+
await this.consumer.subscribe({
|
|
50
|
+
topic: sub.topic,
|
|
51
|
+
fromBeginning: this.fromBeginning
|
|
52
|
+
});
|
|
53
|
+
}
|
|
54
|
+
this.isRunning = true;
|
|
55
|
+
await this.consumer.run({
|
|
56
|
+
autoCommit: false,
|
|
57
|
+
eachMessage: async (payload) => {
|
|
58
|
+
await this.processMessage(payload);
|
|
59
|
+
}
|
|
60
|
+
});
|
|
61
|
+
} else {
|
|
62
|
+
this.isRunning = true;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
async stop() {
|
|
66
|
+
this.isRunning = false;
|
|
67
|
+
if (this.consumer) {
|
|
68
|
+
await this.consumer.disconnect();
|
|
69
|
+
this.consumer = null;
|
|
70
|
+
}
|
|
71
|
+
if (this.producer) {
|
|
72
|
+
await this.producer.disconnect();
|
|
73
|
+
this.producer = null;
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
async subscribe(options, handler) {
|
|
77
|
+
const topic = options.endpoint;
|
|
78
|
+
if (!topic) {
|
|
79
|
+
throw new Error("endpoint (topic) is required for subscribing");
|
|
80
|
+
}
|
|
81
|
+
if (!this.groupId) {
|
|
82
|
+
throw new Error("groupId is required for subscribing");
|
|
83
|
+
}
|
|
84
|
+
this.handlers.set(
|
|
85
|
+
topic,
|
|
86
|
+
handler
|
|
87
|
+
);
|
|
88
|
+
this.subscriptions.push({
|
|
89
|
+
topic,
|
|
90
|
+
concurrency: options.concurrency ?? 1
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
async publish(message, options) {
|
|
94
|
+
const topic = options.endpoint || this.defaultTopic;
|
|
95
|
+
if (!topic) {
|
|
96
|
+
throw new Error("endpoint (topic) is required for publishing");
|
|
97
|
+
}
|
|
98
|
+
if (!this.producer) {
|
|
99
|
+
this.producer = this.kafka.producer();
|
|
100
|
+
await this.producer.connect();
|
|
101
|
+
}
|
|
102
|
+
const { key, headers = {} } = options;
|
|
103
|
+
const envelope = {
|
|
104
|
+
id: randomUUID(),
|
|
105
|
+
type: message.type,
|
|
106
|
+
payload: message,
|
|
107
|
+
headers: {
|
|
108
|
+
...headers,
|
|
109
|
+
"x-message-type": message.type
|
|
110
|
+
},
|
|
111
|
+
timestamp: /* @__PURE__ */ new Date(),
|
|
112
|
+
partitionKey: key
|
|
113
|
+
};
|
|
114
|
+
const partitionKey = key ?? envelope.id;
|
|
115
|
+
const kafkaHeaders = {
|
|
116
|
+
messageId: envelope.id,
|
|
117
|
+
messageType: envelope.type,
|
|
118
|
+
...headers
|
|
119
|
+
};
|
|
120
|
+
await this.producer.send({
|
|
121
|
+
topic,
|
|
122
|
+
messages: [
|
|
123
|
+
{
|
|
124
|
+
key: partitionKey,
|
|
125
|
+
value: JSON.stringify(envelope),
|
|
126
|
+
headers: kafkaHeaders
|
|
127
|
+
}
|
|
128
|
+
]
|
|
129
|
+
});
|
|
130
|
+
}
|
|
131
|
+
async ensureTopics() {
|
|
132
|
+
const admin = this.kafka.admin();
|
|
133
|
+
await admin.connect();
|
|
134
|
+
try {
|
|
135
|
+
const existingTopics = await admin.listTopics();
|
|
136
|
+
const topicsToCreate = this.subscriptions.map((s) => s.topic).filter((topic) => !existingTopics.includes(topic));
|
|
137
|
+
if (topicsToCreate.length > 0) {
|
|
138
|
+
await admin.createTopics({
|
|
139
|
+
topics: topicsToCreate.map((topic) => ({
|
|
140
|
+
topic,
|
|
141
|
+
numPartitions: this.numPartitions,
|
|
142
|
+
replicationFactor: this.replicationFactor
|
|
143
|
+
}))
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
} finally {
|
|
147
|
+
await admin.disconnect();
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
async processMessage(payload) {
|
|
151
|
+
const { topic, partition, message } = payload;
|
|
152
|
+
if (!message.value) {
|
|
153
|
+
return;
|
|
154
|
+
}
|
|
155
|
+
try {
|
|
156
|
+
const parsed = JSON.parse(message.value.toString());
|
|
157
|
+
const envelope = {
|
|
158
|
+
...parsed,
|
|
159
|
+
timestamp: new Date(parsed.timestamp)
|
|
160
|
+
};
|
|
161
|
+
const handler = this.handlers.get(topic);
|
|
162
|
+
if (handler) {
|
|
163
|
+
await handler(envelope);
|
|
164
|
+
}
|
|
165
|
+
await this.consumer.commitOffsets([
|
|
166
|
+
{
|
|
167
|
+
topic,
|
|
168
|
+
partition,
|
|
169
|
+
offset: (BigInt(message.offset) + 1n).toString()
|
|
170
|
+
}
|
|
171
|
+
]);
|
|
172
|
+
} catch (error) {
|
|
173
|
+
console.error("[Kafka] Message processing error:", error);
|
|
174
|
+
throw error;
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
/**
|
|
178
|
+
* Check if the transport is running.
|
|
179
|
+
*/
|
|
180
|
+
isStarted() {
|
|
181
|
+
return this.isRunning;
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Get transport statistics.
|
|
185
|
+
*/
|
|
186
|
+
getStats() {
|
|
187
|
+
return {
|
|
188
|
+
subscriptionCount: this.subscriptions.length,
|
|
189
|
+
isRunning: this.isRunning
|
|
190
|
+
};
|
|
191
|
+
}
|
|
192
|
+
};
|
|
193
|
+
export {
|
|
194
|
+
KafkaTransport
|
|
195
|
+
};
|
|
196
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/KafkaTransport.ts"],"sourcesContent":["import { randomUUID } from \"node:crypto\";\nimport type {\n Kafka,\n Producer,\n Consumer,\n EachMessagePayload,\n Admin,\n} from \"kafkajs\";\nimport type {\n Transport,\n TransportSubscribeOptions,\n TransportPublishOptions,\n BaseMessage,\n MessageEnvelope,\n} from \"@saga-bus/core\";\nimport type { KafkaTransportOptions, KafkaSubscription } from \"./types.js\";\n\n/**\n * Kafka transport for saga-bus using KafkaJS.\n *\n * Uses partition keys for message ordering within a saga.\n * Consumer groups enable horizontal scaling.\n *\n * @example\n * ```typescript\n * import { Kafka } from \"kafkajs\";\n *\n * const kafka = new Kafka({\n * clientId: \"my-app\",\n * brokers: [\"localhost:9092\"],\n * });\n *\n * const transport = new KafkaTransport({\n * kafka,\n * defaultTopic: \"saga-bus.events\",\n * groupId: \"order-processor\",\n * });\n *\n * await transport.start();\n *\n * await transport.subscribe(\n * { endpoint: \"saga-bus.orders\", concurrency: 5 },\n * async (envelope) => { ... }\n * );\n *\n * await transport.publish(\n * { type: \"OrderSubmitted\", orderId: \"123\" },\n * { endpoint: \"saga-bus.orders\", key: \"order-123\" }\n * );\n * ```\n */\nexport class KafkaTransport implements Transport {\n private readonly kafka: Kafka;\n private readonly defaultTopic: string | undefined;\n private readonly groupId: string | undefined;\n private readonly createTopics: boolean;\n private readonly numPartitions: number;\n private readonly replicationFactor: number;\n private readonly fromBeginning: boolean;\n private readonly sessionTimeout: number;\n private readonly heartbeatInterval: number;\n\n private producer: Producer | null = null;\n private consumer: Consumer | null = null;\n private readonly handlers = new Map<\n string,\n (envelope: MessageEnvelope) => Promise<void>\n >();\n private readonly subscriptions: KafkaSubscription[] = [];\n private isRunning = false;\n\n constructor(options: KafkaTransportOptions) {\n this.kafka = options.kafka;\n this.defaultTopic = options.defaultTopic;\n this.groupId = options.groupId;\n this.createTopics = options.createTopics ?? false;\n this.numPartitions = options.numPartitions ?? 3;\n this.replicationFactor = options.replicationFactor ?? 1;\n this.fromBeginning = options.fromBeginning ?? false;\n this.sessionTimeout = options.sessionTimeout ?? 30000;\n this.heartbeatInterval = options.heartbeatInterval ?? 3000;\n }\n\n async start(): Promise<void> {\n if (this.isRunning) {\n return;\n }\n\n // Connect producer\n this.producer = this.kafka.producer();\n await this.producer.connect();\n\n // If we have subscriptions, start consumer\n if (this.subscriptions.length > 0) {\n if (!this.groupId) {\n throw new Error(\"groupId is required for consuming\");\n }\n\n // Create topics if needed\n if (this.createTopics) {\n await this.ensureTopics();\n }\n\n // Create and connect consumer\n this.consumer = this.kafka.consumer({\n groupId: this.groupId,\n sessionTimeout: this.sessionTimeout,\n heartbeatInterval: this.heartbeatInterval,\n });\n\n await this.consumer.connect();\n\n // Subscribe to all topics\n for (const sub of this.subscriptions) {\n await this.consumer.subscribe({\n topic: sub.topic,\n fromBeginning: this.fromBeginning,\n });\n }\n\n this.isRunning = true;\n\n // Start consuming with manual commit\n await this.consumer.run({\n autoCommit: false,\n eachMessage: async (payload: EachMessagePayload) => {\n await this.processMessage(payload);\n },\n });\n } else {\n this.isRunning = true;\n }\n }\n\n async stop(): Promise<void> {\n this.isRunning = false;\n\n if (this.consumer) {\n await this.consumer.disconnect();\n this.consumer = null;\n }\n\n if (this.producer) {\n await this.producer.disconnect();\n this.producer = null;\n }\n }\n\n async subscribe<TMessage extends BaseMessage>(\n options: TransportSubscribeOptions,\n handler: (envelope: MessageEnvelope<TMessage>) => Promise<void>\n ): Promise<void> {\n const topic = options.endpoint;\n\n if (!topic) {\n throw new Error(\"endpoint (topic) is required for subscribing\");\n }\n\n if (!this.groupId) {\n throw new Error(\"groupId is required for subscribing\");\n }\n\n // Store handler\n this.handlers.set(\n topic,\n handler as (envelope: MessageEnvelope) => Promise<void>\n );\n\n this.subscriptions.push({\n topic,\n concurrency: options.concurrency ?? 1,\n });\n }\n\n async publish<TMessage extends BaseMessage>(\n message: TMessage,\n options: TransportPublishOptions\n ): Promise<void> {\n const topic = options.endpoint || this.defaultTopic;\n\n if (!topic) {\n throw new Error(\"endpoint (topic) is required for publishing\");\n }\n\n // Lazily connect producer if not started\n if (!this.producer) {\n this.producer = this.kafka.producer();\n await this.producer.connect();\n }\n\n const { key, headers = {} } = options;\n\n // Create envelope\n const envelope: MessageEnvelope<TMessage> = {\n id: randomUUID(),\n type: message.type,\n payload: message,\n headers: {\n ...headers,\n \"x-message-type\": message.type,\n },\n timestamp: new Date(),\n partitionKey: key,\n };\n\n // Partition key for ordering\n const partitionKey = key ?? envelope.id;\n\n // Convert headers to Kafka format (Buffer values)\n const kafkaHeaders: Record<string, string> = {\n messageId: envelope.id,\n messageType: envelope.type,\n ...headers,\n };\n\n await this.producer.send({\n topic,\n messages: [\n {\n key: partitionKey,\n value: JSON.stringify(envelope),\n headers: kafkaHeaders,\n },\n ],\n });\n }\n\n private async ensureTopics(): Promise<void> {\n const admin: Admin = this.kafka.admin();\n await admin.connect();\n\n try {\n const existingTopics = await admin.listTopics();\n const topicsToCreate = this.subscriptions\n .map((s) => s.topic)\n .filter((topic) => !existingTopics.includes(topic));\n\n if (topicsToCreate.length > 0) {\n await admin.createTopics({\n topics: topicsToCreate.map((topic) => ({\n topic,\n numPartitions: this.numPartitions,\n replicationFactor: this.replicationFactor,\n })),\n });\n }\n } finally {\n await admin.disconnect();\n }\n }\n\n private async processMessage(payload: EachMessagePayload): Promise<void> {\n const { topic, partition, message } = payload;\n\n if (!message.value) {\n return;\n }\n\n try {\n // Parse envelope\n const parsed = JSON.parse(message.value.toString()) as MessageEnvelope;\n\n // Reconstruct Date objects\n const envelope: MessageEnvelope = {\n ...parsed,\n timestamp: new Date(parsed.timestamp),\n };\n\n // Find handler for this topic\n const handler = this.handlers.get(topic);\n\n if (handler) {\n await handler(envelope);\n }\n\n // Commit offset on success\n await this.consumer!.commitOffsets([\n {\n topic,\n partition,\n offset: (BigInt(message.offset) + 1n).toString(),\n },\n ]);\n } catch (error) {\n console.error(\"[Kafka] Message processing error:\", error);\n // Don't commit - message will be redelivered\n throw error;\n }\n }\n\n /**\n * Check if the transport is running.\n */\n isStarted(): boolean {\n return this.isRunning;\n }\n\n /**\n * Get transport statistics.\n */\n getStats(): { subscriptionCount: number; isRunning: boolean } {\n return {\n subscriptionCount: this.subscriptions.length,\n isRunning: this.isRunning,\n };\n }\n}\n"],"mappings":";AAAA,SAAS,kBAAkB;AAmDpB,IAAM,iBAAN,MAA0C;AAAA,EAC9B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET,WAA4B;AAAA,EAC5B,WAA4B;AAAA,EACnB,WAAW,oBAAI,IAG9B;AAAA,EACe,gBAAqC,CAAC;AAAA,EAC/C,YAAY;AAAA,EAEpB,YAAY,SAAgC;AAC1C,SAAK,QAAQ,QAAQ;AACrB,SAAK,eAAe,QAAQ;AAC5B,SAAK,UAAU,QAAQ;AACvB,SAAK,eAAe,QAAQ,gBAAgB;AAC5C,SAAK,gBAAgB,QAAQ,iBAAiB;AAC9C,SAAK,oBAAoB,QAAQ,qBAAqB;AACtD,SAAK,gBAAgB,QAAQ,iBAAiB;AAC9C,SAAK,iBAAiB,QAAQ,kBAAkB;AAChD,SAAK,oBAAoB,QAAQ,qBAAqB;AAAA,EACxD;AAAA,EAEA,MAAM,QAAuB;AAC3B,QAAI,KAAK,WAAW;AAClB;AAAA,IACF;AAGA,SAAK,WAAW,KAAK,MAAM,SAAS;AACpC,UAAM,KAAK,SAAS,QAAQ;AAG5B,QAAI,KAAK,cAAc,SAAS,GAAG;AACjC,UAAI,CAAC,KAAK,SAAS;AACjB,cAAM,IAAI,MAAM,mCAAmC;AAAA,MACrD;AAGA,UAAI,KAAK,cAAc;AACrB,cAAM,KAAK,aAAa;AAAA,MAC1B;AAGA,WAAK,WAAW,KAAK,MAAM,SAAS;AAAA,QAClC,SAAS,KAAK;AAAA,QACd,gBAAgB,KAAK;AAAA,QACrB,mBAAmB,KAAK;AAAA,MAC1B,CAAC;AAED,YAAM,KAAK,SAAS,QAAQ;AAG5B,iBAAW,OAAO,KAAK,eAAe;AACpC,cAAM,KAAK,SAAS,UAAU;AAAA,UAC5B,OAAO,IAAI;AAAA,UACX,eAAe,KAAK;AAAA,QACtB,CAAC;AAAA,MACH;AAEA,WAAK,YAAY;AAGjB,YAAM,KAAK,SAAS,IAAI;AAAA,QACtB,YAAY;AAAA,QACZ,aAAa,OAAO,YAAgC;AAClD,gBAAM,KAAK,eAAe,OAAO;AAAA,QACnC;AAAA,MACF,CAAC;AAAA,IACH,OAAO;AACL,WAAK,YAAY;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,MAAM,OAAsB;AAC1B,SAAK,YAAY;AAEjB,QAAI,KAAK,UAAU;AACjB,YAAM,KAAK,SAAS,WAAW;AAC/B,WAAK,WAAW;AAAA,IAClB;AAEA,QAAI,KAAK,UAAU;AACjB,YAAM,KAAK,SAAS,WAAW;AAC/B,WAAK,WAAW;AAAA,IAClB;AAAA,EACF;AAAA,EAEA,MAAM,UACJ,SACA,SACe;AACf,UAAM,QAAQ,QAAQ;AAEtB,QAAI,CAAC,OAAO;AACV,YAAM,IAAI,MAAM,8CAA8C;AAAA,IAChE;AAEA,QAAI,CAAC,KAAK,SAAS;AACjB,YAAM,IAAI,MAAM,qCAAqC;AAAA,IACvD;AAGA,SAAK,SAAS;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAEA,SAAK,cAAc,KAAK;AAAA,MACtB;AAAA,MACA,aAAa,QAAQ,eAAe;AAAA,IACtC,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,QACJ,SACA,SACe;AACf,UAAM,QAAQ,QAAQ,YAAY,KAAK;AAEvC,QAAI,CAAC,OAAO;AACV,YAAM,IAAI,MAAM,6CAA6C;AAAA,IAC/D;AAGA,QAAI,CAAC,KAAK,UAAU;AAClB,WAAK,WAAW,KAAK,MAAM,SAAS;AACpC,YAAM,KAAK,SAAS,QAAQ;AAAA,IAC9B;AAEA,UAAM,EAAE,KAAK,UAAU,CAAC,EAAE,IAAI;AAG9B,UAAM,WAAsC;AAAA,MAC1C,IAAI,WAAW;AAAA,MACf,MAAM,QAAQ;AAAA,MACd,SAAS;AAAA,MACT,SAAS;AAAA,QACP,GAAG;AAAA,QACH,kBAAkB,QAAQ;AAAA,MAC5B;AAAA,MACA,WAAW,oBAAI,KAAK;AAAA,MACpB,cAAc;AAAA,IAChB;AAGA,UAAM,eAAe,OAAO,SAAS;AAGrC,UAAM,eAAuC;AAAA,MAC3C,WAAW,SAAS;AAAA,MACpB,aAAa,SAAS;AAAA,MACtB,GAAG;AAAA,IACL;AAEA,UAAM,KAAK,SAAS,KAAK;AAAA,MACvB;AAAA,MACA,UAAU;AAAA,QACR;AAAA,UACE,KAAK;AAAA,UACL,OAAO,KAAK,UAAU,QAAQ;AAAA,UAC9B,SAAS;AAAA,QACX;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,MAAc,eAA8B;AAC1C,UAAM,QAAe,KAAK,MAAM,MAAM;AACtC,UAAM,MAAM,QAAQ;AAEpB,QAAI;AACF,YAAM,iBAAiB,MAAM,MAAM,WAAW;AAC9C,YAAM,iBAAiB,KAAK,cACzB,IAAI,CAAC,MAAM,EAAE,KAAK,EAClB,OAAO,CAAC,UAAU,CAAC,eAAe,SAAS,KAAK,CAAC;AAEpD,UAAI,eAAe,SAAS,GAAG;AAC7B,cAAM,MAAM,aAAa;AAAA,UACvB,QAAQ,eAAe,IAAI,CAAC,WAAW;AAAA,YACrC;AAAA,YACA,eAAe,KAAK;AAAA,YACpB,mBAAmB,KAAK;AAAA,UAC1B,EAAE;AAAA,QACJ,CAAC;AAAA,MACH;AAAA,IACF,UAAE;AACA,YAAM,MAAM,WAAW;AAAA,IACzB;AAAA,EACF;AAAA,EAEA,MAAc,eAAe,SAA4C;AACvE,UAAM,EAAE,OAAO,WAAW,QAAQ,IAAI;AAEtC,QAAI,CAAC,QAAQ,OAAO;AAClB;AAAA,IACF;AAEA,QAAI;AAEF,YAAM,SAAS,KAAK,MAAM,QAAQ,MAAM,SAAS,CAAC;AAGlD,YAAM,WAA4B;AAAA,QAChC,GAAG;AAAA,QACH,WAAW,IAAI,KAAK,OAAO,SAAS;AAAA,MACtC;AAGA,YAAM,UAAU,KAAK,SAAS,IAAI,KAAK;AAEvC,UAAI,SAAS;AACX,cAAM,QAAQ,QAAQ;AAAA,MACxB;AAGA,YAAM,KAAK,SAAU,cAAc;AAAA,QACjC;AAAA,UACE;AAAA,UACA;AAAA,UACA,SAAS,OAAO,QAAQ,MAAM,IAAI,IAAI,SAAS;AAAA,QACjD;AAAA,MACF,CAAC;AAAA,IACH,SAAS,OAAO;AACd,cAAQ,MAAM,qCAAqC,KAAK;AAExD,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,YAAqB;AACnB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,WAA8D;AAC5D,WAAO;AAAA,MACL,mBAAmB,KAAK,cAAc;AAAA,MACtC,WAAW,KAAK;AAAA,IAClB;AAAA,EACF;AACF;","names":[]}
|
package/package.json
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@saga-bus/transport-kafka",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Kafka transport for saga-bus",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.cjs",
|
|
7
|
+
"module": "./dist/index.js",
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"types": "./dist/index.d.ts",
|
|
12
|
+
"import": "./dist/index.js",
|
|
13
|
+
"require": "./dist/index.cjs"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"files": [
|
|
17
|
+
"dist",
|
|
18
|
+
"README.md"
|
|
19
|
+
],
|
|
20
|
+
"publishConfig": {
|
|
21
|
+
"access": "public"
|
|
22
|
+
},
|
|
23
|
+
"repository": {
|
|
24
|
+
"type": "git",
|
|
25
|
+
"url": "https://github.com/deanforan/saga-bus.git",
|
|
26
|
+
"directory": "packages/transport-kafka"
|
|
27
|
+
},
|
|
28
|
+
"keywords": [
|
|
29
|
+
"saga",
|
|
30
|
+
"message-bus",
|
|
31
|
+
"transport",
|
|
32
|
+
"kafka",
|
|
33
|
+
"kafkajs"
|
|
34
|
+
],
|
|
35
|
+
"dependencies": {
|
|
36
|
+
"kafkajs": "^2.2.4",
|
|
37
|
+
"@saga-bus/core": "0.1.0"
|
|
38
|
+
},
|
|
39
|
+
"devDependencies": {
|
|
40
|
+
"@testcontainers/kafka": "^10.0.0",
|
|
41
|
+
"@types/node": "^20.0.0",
|
|
42
|
+
"tsup": "^8.0.0",
|
|
43
|
+
"typescript": "^5.9.2",
|
|
44
|
+
"vitest": "^3.0.0",
|
|
45
|
+
"@repo/eslint-config": "0.0.0",
|
|
46
|
+
"@repo/typescript-config": "0.0.0"
|
|
47
|
+
},
|
|
48
|
+
"peerDependencies": {
|
|
49
|
+
"kafkajs": ">=2.0.0"
|
|
50
|
+
},
|
|
51
|
+
"scripts": {
|
|
52
|
+
"build": "tsup",
|
|
53
|
+
"dev": "tsup --watch",
|
|
54
|
+
"lint": "eslint src/",
|
|
55
|
+
"check-types": "tsc --noEmit",
|
|
56
|
+
"test": "vitest run",
|
|
57
|
+
"test:watch": "vitest"
|
|
58
|
+
}
|
|
59
|
+
}
|