@fbsm/saga-transport-kafka 0.0.1-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,141 @@
1
+ # @fbsm/saga-transport-kafka
2
+
3
+ KafkaJS-based transport adapter for `@fbsm/saga-core`. Provides `eachBatch` consumption, key-based message grouping, and watermark-based offset tracking.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @fbsm/saga-transport-kafka kafkajs
9
+ ```
10
+
11
+ ## Usage
12
+
13
+ ### With NestJS (`@fbsm/saga-nestjs`)
14
+
15
+ ```typescript
16
+ import { Module } from '@nestjs/common';
17
+ import { SagaModule } from '@fbsm/saga-nestjs';
18
+ import { KafkaTransport } from '@fbsm/saga-transport-kafka';
19
+
20
+ @Module({
21
+ imports: [
22
+ SagaModule.forRoot({
23
+ serviceName: 'my-service',
24
+ transport: new KafkaTransport({
25
+ brokers: ['localhost:9092'],
26
+ clientId: 'my-service',
27
+ autoCreateTopics: true,
28
+ }),
29
+ }),
30
+ ],
31
+ })
32
+ export class AppModule {}
33
+ ```
34
+
35
+ ### Standalone (without NestJS)
36
+
37
+ ```typescript
38
+ import { SagaPublisher, SagaRunner, SagaRegistry, SagaParser, createOtelContext } from '@fbsm/saga-core';
39
+ import { KafkaTransport } from '@fbsm/saga-transport-kafka';
40
+
41
+ const transport = new KafkaTransport({
42
+ brokers: ['localhost:9092'],
43
+ clientId: 'my-service',
44
+ });
45
+
46
+ const otelCtx = createOtelContext();
47
+ const registry = new SagaRegistry();
48
+ const parser = new SagaParser();
49
+ const publisher = new SagaPublisher(transport, otelCtx);
50
+ const runner = new SagaRunner(registry, transport, publisher, parser, {
51
+ serviceName: 'my-service',
52
+ });
53
+
54
+ // Register participants...
55
+ // registry.register(participant);
56
+
57
+ await runner.start();
58
+ ```
59
+
60
+ ## API Reference
61
+
62
+ ### `KafkaTransport`
63
+
64
+ ```typescript
65
+ import { KafkaTransport } from '@fbsm/saga-transport-kafka';
66
+
67
+ const transport = new KafkaTransport({
68
+ brokers: ['localhost:9092'],
69
+ clientId: 'my-service',
70
+ ssl: true,
71
+ sasl: {
72
+ mechanism: 'scram-sha-256',
73
+ username: 'user',
74
+ password: 'pass',
75
+ },
76
+ partitionsConsumedConcurrently: 3,
77
+ autoCreateTopics: true,
78
+ });
79
+ ```
80
+
81
+ ### `KafkaTransportOptions`
82
+
83
+ | Field | Type | Default | Description |
84
+ |-------|------|---------|-------------|
85
+ | `brokers` | `string[]` | — | Kafka broker addresses (**required**) |
86
+ | `clientId` | `string` | `'saga-client'` | Client identifier |
87
+ | `ssl` | `tls.ConnectionOptions \| boolean` | — | TLS/SSL config. Pass `true` for default TLS |
88
+ | `sasl` | `SASLOptions` | — | SASL auth (PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER, AWS) |
89
+ | `connectionTimeout` | `number` | `1000` | Connection timeout (ms) |
90
+ | `authenticationTimeout` | `number` | — | Authentication timeout (ms) |
91
+ | `requestTimeout` | `number` | — | Request timeout (ms) |
92
+ | `retry` | `RetryOptions` | `{ initialRetryTime: 300, retries: 10 }` | KafkaJS client retry policy |
93
+ | `logLevel` | `logLevel` | `WARN` | KafkaJS log level |
94
+ | `socketFactory` | `ISocketFactory` | — | Custom socket factory (e.g., SOCKS proxy) |
95
+ | `partitionsConsumedConcurrently` | `number` | `3` | Partitions consumed concurrently |
96
+ | `enableOtelInstrumentation` | `boolean` | — | Register KafkaJS OTel instrumentation |
97
+ | `autoCreateTopics` | `boolean` | `false` | Auto-create missing topics via admin client |
98
+ | `logger` | `SagaLogger` | `ConsoleSagaLogger` | Custom logger |
99
+
100
+ ## Features
101
+
102
+ ### eachBatch with Key-Based Grouping
103
+
104
+ Messages are consumed via KafkaJS `eachBatch` for high throughput. Within each batch, messages are grouped by key (default: `rootSagaId`):
105
+
106
+ - **Parallel** across different keys (different saga trees)
107
+ - **Sequential** within the same key (same saga tree)
108
+
109
+ This ensures event ordering within a saga while maximizing throughput.
110
+
111
+ ### Watermark-Based Offset Tracking
112
+
113
+ Offsets are committed using a watermark strategy: only the lowest unprocessed offset is committed. This prevents message loss if a later message in the batch completes before an earlier one.
114
+
115
+ ### Topic Naming
116
+
117
+ Topics are derived from event types with an optional prefix:
118
+
119
+ ```
120
+ topic = topicPrefix + eventType
121
+ ```
122
+
123
+ Example with `topicPrefix: 'prod.'`:
124
+ - `order.created` → topic `prod.order.created`
125
+ - `payment.completed` → topic `prod.payment.completed`
126
+
127
+ ### Auto-Create Topics
128
+
129
+ When `autoCreateTopics: true`, the transport uses the Kafka admin client to create any missing topics before subscribing. Useful for development environments.
130
+
131
+ ### Header-Based Metadata
132
+
133
+ All saga context metadata is propagated via Kafka headers. See [Kafka Headers](../saga-core/README.md#kafka-headers) for the full list.
134
+
135
+ ---
136
+
137
+ ## Further Reading
138
+
139
+ - [@fbsm/saga-core](../saga-core/README.md) — Core library reference
140
+ - [@fbsm/saga-nestjs](../saga-nestjs/README.md) — NestJS integration
141
+ - [Custom Transport](../doc/custom-transport.md) — Implement your own transport
package/dist/index.cjs ADDED
@@ -0,0 +1,218 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ KafkaTransport: () => KafkaTransport
24
+ });
25
+ module.exports = __toCommonJS(index_exports);
26
+
27
+ // src/kafka.transport.ts
28
+ var import_kafkajs = require("kafkajs");
29
+ var import_saga_core = require("@fbsm/saga-core");
30
+
31
+ // src/watermark-tracker.ts
32
+ var WatermarkTracker = class {
33
+ arrivedOffsets = [];
34
+ completed = /* @__PURE__ */ new Set();
35
+ reset(offsets) {
36
+ this.arrivedOffsets = offsets;
37
+ this.completed.clear();
38
+ }
39
+ markCompleted(offset) {
40
+ this.completed.add(offset);
41
+ }
42
+ getCommittableOffset() {
43
+ let lastSafe = null;
44
+ for (const offset of this.arrivedOffsets) {
45
+ if (this.completed.has(offset)) {
46
+ lastSafe = offset;
47
+ } else {
48
+ break;
49
+ }
50
+ }
51
+ return lastSafe;
52
+ }
53
+ };
54
+
55
+ // src/kafka.transport.ts
56
+ var KafkaTransport = class {
57
+ constructor(options) {
58
+ this.options = options;
59
+ this.logger = options.logger ?? new import_saga_core.ConsoleSagaLogger();
60
+ this.kafka = new import_kafkajs.Kafka({
61
+ clientId: options.clientId ?? "saga-client",
62
+ brokers: options.brokers,
63
+ ssl: options.ssl,
64
+ sasl: options.sasl,
65
+ connectionTimeout: options.connectionTimeout,
66
+ authenticationTimeout: options.authenticationTimeout,
67
+ requestTimeout: options.requestTimeout,
68
+ retry: options.retry ?? {
69
+ initialRetryTime: 300,
70
+ retries: 10
71
+ },
72
+ logLevel: options.logLevel ?? import_kafkajs.logLevel.WARN,
73
+ socketFactory: options.socketFactory
74
+ });
75
+ this.producer = this.kafka.producer();
76
+ }
77
+ kafka;
78
+ producer;
79
+ consumer = null;
80
+ logger;
81
+ async connect() {
82
+ await this.producer.connect();
83
+ }
84
+ async disconnect() {
85
+ await this.producer.disconnect();
86
+ if (this.consumer) {
87
+ await this.consumer.disconnect();
88
+ }
89
+ }
90
+ async publish(message) {
91
+ await this.producer.send({
92
+ topic: message.topic,
93
+ messages: [
94
+ {
95
+ key: message.key,
96
+ value: message.value,
97
+ headers: message.headers
98
+ }
99
+ ]
100
+ });
101
+ }
102
+ async subscribe(topics, handler, options) {
103
+ const groupId = options?.groupId ?? "saga-default-group";
104
+ if (this.options.autoCreateTopics) {
105
+ await this.ensureTopicsExist(topics);
106
+ }
107
+ await this.connectConsumerWithRetry(groupId, topics, handler, options);
108
+ }
109
+ async ensureTopicsExist(topics) {
110
+ const admin = this.kafka.admin();
111
+ await admin.connect();
112
+ try {
113
+ const existing = await admin.listTopics();
114
+ const missing = topics.filter((t) => !existing.includes(t));
115
+ if (missing.length > 0) {
116
+ await admin.createTopics({
117
+ topics: missing.map((t) => ({ topic: t, numPartitions: 3, replicationFactor: 1 }))
118
+ });
119
+ }
120
+ } finally {
121
+ await admin.disconnect();
122
+ }
123
+ }
124
+ async connectConsumerWithRetry(groupId, topics, handler, options, maxAttempts = 10) {
125
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
126
+ try {
127
+ this.consumer = this.kafka.consumer({
128
+ groupId,
129
+ retry: { initialRetryTime: 500, retries: 8 }
130
+ });
131
+ await this.consumer.connect();
132
+ for (const topic of topics) {
133
+ await this.consumer.subscribe({
134
+ topic,
135
+ fromBeginning: options?.fromBeginning ?? false
136
+ });
137
+ }
138
+ await this.consumer.run({
139
+ partitionsConsumedConcurrently: this.options.partitionsConsumedConcurrently ?? 3,
140
+ eachBatch: async (payload) => {
141
+ await this.processBatch(payload, handler);
142
+ }
143
+ });
144
+ return;
145
+ } catch (err) {
146
+ this.logger.warn(
147
+ `[KafkaTransport] Consumer failed to start (attempt ${attempt}/${maxAttempts}): ${err.message}`
148
+ );
149
+ try {
150
+ await this.consumer?.disconnect();
151
+ } catch {
152
+ }
153
+ this.consumer = null;
154
+ if (attempt === maxAttempts) {
155
+ throw err;
156
+ }
157
+ const delay = Math.min(1e3 * Math.pow(2, attempt - 1), 1e4);
158
+ await new Promise((r) => setTimeout(r, delay));
159
+ }
160
+ }
161
+ }
162
+ async processBatch(payload, handler) {
163
+ const { batch, resolveOffset, heartbeat, isRunning, isStale } = payload;
164
+ const tracker = new WatermarkTracker();
165
+ const offsets = batch.messages.map((m) => m.offset);
166
+ tracker.reset(offsets);
167
+ const groups = /* @__PURE__ */ new Map();
168
+ for (const message of batch.messages) {
169
+ const key = message.key?.toString() ?? "__no_key__";
170
+ if (!groups.has(key)) {
171
+ groups.set(key, []);
172
+ }
173
+ groups.get(key).push(message);
174
+ }
175
+ await Promise.all(
176
+ Array.from(groups.entries()).map(async ([, messages]) => {
177
+ for (const message of messages) {
178
+ if (!isRunning() || isStale()) {
179
+ return;
180
+ }
181
+ const inbound = {
182
+ topic: batch.topic,
183
+ key: message.key?.toString() ?? "",
184
+ value: message.value?.toString() ?? "",
185
+ headers: this.convertHeaders(message.headers)
186
+ };
187
+ await handler(inbound);
188
+ tracker.markCompleted(message.offset);
189
+ const committable = tracker.getCommittableOffset();
190
+ if (committable) {
191
+ resolveOffset(committable);
192
+ }
193
+ await heartbeat();
194
+ }
195
+ })
196
+ );
197
+ }
198
+ convertHeaders(headers) {
199
+ const result = {};
200
+ if (!headers) return result;
201
+ for (const [key, value] of Object.entries(headers)) {
202
+ if (Buffer.isBuffer(value)) {
203
+ result[key] = value.toString();
204
+ } else if (typeof value === "string") {
205
+ result[key] = value;
206
+ } else if (Array.isArray(value) && value.length > 0) {
207
+ const first = value[0];
208
+ result[key] = Buffer.isBuffer(first) ? first.toString() : String(first);
209
+ }
210
+ }
211
+ return result;
212
+ }
213
+ };
214
+ // Annotate the CommonJS export names for ESM import in node:
215
+ 0 && (module.exports = {
216
+ KafkaTransport
217
+ });
218
+ //# sourceMappingURL=index.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts","../src/kafka.transport.ts","../src/watermark-tracker.ts"],"sourcesContent":["export { KafkaTransport } from './kafka.transport';\nexport type { KafkaTransportOptions } from './kafka-transport-options';\n","import { Kafka, Producer, Consumer, EachBatchPayload, IHeaders, logLevel } from 'kafkajs';\nimport type {\n SagaTransport,\n InboundMessage,\n OutboundMessage,\n TransportSubscribeOptions,\n SagaLogger,\n} from '@fbsm/saga-core';\nimport { ConsoleSagaLogger } from '@fbsm/saga-core';\nimport { WatermarkTracker } from './watermark-tracker';\nimport type { KafkaTransportOptions } from './kafka-transport-options';\n\nexport class KafkaTransport implements SagaTransport {\n private kafka: Kafka;\n private producer: Producer;\n private consumer: Consumer | null = null;\n private logger: SagaLogger;\n\n constructor(private options: KafkaTransportOptions) {\n this.logger = options.logger ?? new ConsoleSagaLogger();\n this.kafka = new Kafka({\n clientId: options.clientId ?? 'saga-client',\n brokers: options.brokers,\n ssl: options.ssl,\n sasl: options.sasl,\n connectionTimeout: options.connectionTimeout,\n authenticationTimeout: options.authenticationTimeout,\n requestTimeout: options.requestTimeout,\n retry: options.retry ?? {\n initialRetryTime: 300,\n retries: 10,\n },\n logLevel: options.logLevel ?? logLevel.WARN,\n socketFactory: options.socketFactory,\n });\n this.producer = this.kafka.producer();\n }\n\n async connect(): Promise<void> {\n await this.producer.connect();\n }\n\n async disconnect(): Promise<void> {\n await this.producer.disconnect();\n if (this.consumer) {\n await this.consumer.disconnect();\n }\n }\n\n async publish(message: OutboundMessage): Promise<void> {\n await this.producer.send({\n topic: message.topic,\n messages: [\n {\n key: message.key,\n value: message.value,\n headers: message.headers,\n },\n ],\n });\n }\n\n async subscribe(\n topics: string[],\n handler: (message: InboundMessage) => Promise<void>,\n options?: TransportSubscribeOptions,\n ): Promise<void> {\n const groupId = options?.groupId ?? 'saga-default-group';\n\n if (this.options.autoCreateTopics) {\n await this.ensureTopicsExist(topics);\n }\n\n await this.connectConsumerWithRetry(groupId, topics, handler, options);\n }\n\n private async ensureTopicsExist(topics: string[]): Promise<void> {\n const admin = this.kafka.admin();\n await admin.connect();\n try {\n const existing = await admin.listTopics();\n const missing = topics.filter((t) => !existing.includes(t));\n if (missing.length > 0) {\n await admin.createTopics({\n topics: missing.map((t) => ({ topic: t, numPartitions: 3, replicationFactor: 1 })),\n });\n }\n } finally {\n await admin.disconnect();\n }\n }\n\n private async connectConsumerWithRetry(\n groupId: string,\n topics: string[],\n handler: (message: InboundMessage) => Promise<void>,\n options?: TransportSubscribeOptions,\n maxAttempts = 10,\n ): Promise<void> {\n for (let attempt = 1; attempt <= maxAttempts; attempt++) {\n try {\n this.consumer = this.kafka.consumer({\n groupId,\n retry: { initialRetryTime: 500, retries: 8 },\n });\n\n await this.consumer.connect();\n\n for (const topic of topics) {\n await this.consumer.subscribe({\n topic,\n fromBeginning: options?.fromBeginning ?? false,\n });\n }\n\n await this.consumer.run({\n partitionsConsumedConcurrently: this.options.partitionsConsumedConcurrently ?? 3,\n eachBatch: async (payload: EachBatchPayload) => {\n await this.processBatch(payload, handler);\n },\n });\n\n return;\n } catch (err) {\n this.logger.warn(\n `[KafkaTransport] Consumer failed to start (attempt ${attempt}/${maxAttempts}): ${(err as Error).message}`,\n );\n\n try {\n await this.consumer?.disconnect();\n } catch {\n // ignore disconnect errors\n }\n this.consumer = null;\n\n if (attempt === maxAttempts) {\n throw err;\n }\n\n const delay = Math.min(1000 * Math.pow(2, attempt - 1), 10000);\n await new Promise((r) => setTimeout(r, delay));\n }\n }\n }\n\n private async processBatch(\n payload: EachBatchPayload,\n handler: (message: InboundMessage) => Promise<void>,\n ): Promise<void> {\n const { batch, resolveOffset, heartbeat, isRunning, isStale } = payload;\n const tracker = new WatermarkTracker();\n\n const offsets = batch.messages.map((m) => m.offset);\n tracker.reset(offsets);\n\n // Group messages by key (sagaId) for ordered processing within saga\n const groups = new Map<string, typeof batch.messages>();\n\n for (const message of batch.messages) {\n const key = message.key?.toString() ?? '__no_key__';\n if (!groups.has(key)) {\n groups.set(key, []);\n }\n groups.get(key)!.push(message);\n }\n\n // Process groups in parallel, messages within each group sequentially\n await Promise.all(\n Array.from(groups.entries()).map(async ([, messages]) => {\n for (const message of messages) {\n if (!isRunning() || isStale()) {\n return;\n }\n\n const inbound: InboundMessage = {\n topic: batch.topic,\n key: message.key?.toString() ?? '',\n value: message.value?.toString() ?? '',\n headers: this.convertHeaders(message.headers),\n };\n\n await handler(inbound);\n\n tracker.markCompleted(message.offset);\n const committable = tracker.getCommittableOffset();\n if (committable) {\n resolveOffset(committable);\n }\n\n await heartbeat();\n }\n }),\n );\n }\n\n private convertHeaders(headers?: IHeaders): Record<string, string> {\n const result: Record<string, string> = {};\n if (!headers) return result;\n\n for (const [key, value] of Object.entries(headers)) {\n if (Buffer.isBuffer(value)) {\n result[key] = value.toString();\n } else if (typeof value === 'string') {\n result[key] = value;\n } else if (Array.isArray(value) && value.length > 0) {\n const first = value[0];\n result[key] = Buffer.isBuffer(first) ? first.toString() : String(first);\n }\n }\n\n return result;\n }\n}\n","export class WatermarkTracker {\n private arrivedOffsets: string[] = [];\n private completed = new Set<string>();\n\n reset(offsets: string[]): void {\n this.arrivedOffsets = offsets;\n this.completed.clear();\n }\n\n markCompleted(offset: string): void {\n this.completed.add(offset);\n }\n\n getCommittableOffset(): string | null {\n let lastSafe: string | null = null;\n for (const offset of this.arrivedOffsets) {\n if (this.completed.has(offset)) {\n lastSafe = offset;\n } else {\n break;\n }\n }\n return lastSafe;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,qBAAgF;AAQhF,uBAAkC;;;ACR3B,IAAM,mBAAN,MAAuB;AAAA,EACpB,iBAA2B,CAAC;AAAA,EAC5B,YAAY,oBAAI,IAAY;AAAA,EAEpC,MAAM,SAAyB;AAC7B,SAAK,iBAAiB;AACtB,SAAK,UAAU,MAAM;AAAA,EACvB;AAAA,EAEA,cAAc,QAAsB;AAClC,SAAK,UAAU,IAAI,MAAM;AAAA,EAC3B;AAAA,EAEA,uBAAsC;AACpC,QAAI,WAA0B;AAC9B,eAAW,UAAU,KAAK,gBAAgB;AACxC,UAAI,KAAK,UAAU,IAAI,MAAM,GAAG;AAC9B,mBAAW;AAAA,MACb,OAAO;AACL;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;;;ADZO,IAAM,iBAAN,MAA8C;AAAA,EAMnD,YAAoB,SAAgC;AAAhC;AAClB,SAAK,SAAS,QAAQ,UAAU,IAAI,mCAAkB;AACtD,SAAK,QAAQ,IAAI,qBAAM;AAAA,MACrB,UAAU,QAAQ,YAAY;AAAA,MAC9B,SAAS,QAAQ;AAAA,MACjB,KAAK,QAAQ;AAAA,MACb,MAAM,QAAQ;AAAA,MACd,mBAAmB,QAAQ;AAAA,MAC3B,uBAAuB,QAAQ;AAAA,MAC/B,gBAAgB,QAAQ;AAAA,MACxB,OAAO,QAAQ,SAAS;AAAA,QACtB,kBAAkB;AAAA,QAClB,SAAS;AAAA,MACX;AAAA,MACA,UAAU,QAAQ,YAAY,wBAAS;AAAA,MACvC,eAAe,QAAQ;AAAA,IACzB,CAAC;AACD,SAAK,WAAW,KAAK,MAAM,SAAS;AAAA,EACtC;AAAA,EAvBQ;AAAA,EACA;AAAA,EACA,WAA4B;AAAA,EAC5B;AAAA,EAsBR,MAAM,UAAyB;AAC7B,UAAM,KAAK,SAAS,QAAQ;AAAA,EAC9B;AAAA,EAEA,MAAM,aAA4B;AAChC,UAAM,KAAK,SAAS,WAAW;AAC/B,QAAI,KAAK,UAAU;AACjB,YAAM,KAAK,SAAS,WAAW;AAAA,IACjC;AAAA,EACF;AAAA,EAEA,MAAM,QAAQ,SAAyC;AACrD,UAAM,KAAK,SAAS,KAAK;AAAA,MACvB,OAAO,QAAQ;AAAA,MACf,UAAU;AAAA,QACR;AAAA,UACE,KAAK,QAAQ;AAAA,UACb,OAAO,QAAQ;AAAA,UACf,SAAS,QAAQ;AAAA,QACnB;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,UACJ,QACA,SACA,SACe;AACf,UAAM,UAAU,SAAS,WAAW;AAEpC,QAAI,KAAK,QAAQ,kBAAkB;AACjC,YAAM,KAAK,kBAAkB,MAAM;AAAA,IACrC;AAEA,UAAM,KAAK,yBAAyB,SAAS,QAAQ,SAAS,OAAO;AAAA,EACvE;AAAA,EAEA,MAAc,kBAAkB,QAAiC;AAC/D,UAAM,QAAQ,KAAK,MAAM,MAAM;AAC/B,UAAM,MAAM,QAAQ;AACpB,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,WAAW;AACxC,YAAM,UAAU,OAAO,OAAO,CAAC,MAAM,CAAC,SAAS,SAAS,CAAC,CAAC;AAC1D,UAAI,QAAQ,SAAS,GAAG;AACtB,cAAM,MAAM,aAAa;AAAA,UACvB,QAAQ,QAAQ,IAAI,CAAC,OAAO,EAAE,OAAO,GAAG,eAAe,GAAG,mBAAmB,EAAE,EAAE;AAAA,QACnF,CAAC;AAAA,MACH;AAAA,IACF,UAAE;AACA,YAAM,MAAM,WAAW;AAAA,IACzB;AAAA,EACF;AAAA,EAEA,MAAc,yBACZ,SACA,QACA,SACA,SACA,cAAc,IACC;AACf,aAAS,UAAU,GAAG,WAAW,aAAa,WAAW;AACvD,UAAI;AACF,aAAK,WAAW,KAAK,MAAM,SAAS;AAAA,UAClC;AAAA,UACA,OAAO,EAAE,kBAAkB,KAAK,SAAS,EAAE;AAAA,QAC7C,CAAC;AAED,cAAM,KAAK,SAAS,QAAQ;AAE5B,mBAAW,SAAS,QAAQ;AAC1B,gBAAM,KAAK,SAAS,UAAU;AAAA,YAC5B;AAAA,YACA,eAAe,SAAS,iBAAiB;AAAA,UAC3C,CAAC;AAAA,QACH;AAEA,cAAM,KAAK,SAAS,IAAI;AAAA,UACtB,gCAAgC,KAAK,QAAQ,kCAAkC;AAAA,UAC/E,WAAW,OAAO,YAA8B;AAC9C,kBAAM,KAAK,aAAa,SAAS,OAAO;AAAA,UAC1C;AAAA,QACF,CAAC;AAED;AAAA,MACF,SAAS,KAAK;AACZ,aAAK,OAAO;AAAA,UACV,sDAAsD,OAAO,IAAI,WAAW,MAAO,IAAc,OAAO;AAAA,QAC1G;AAEA,YAAI;AACF,gBAAM,KAAK,UAAU,WAAW;AAAA,QAClC,QAAQ;AAAA,QAER;AACA,aAAK,WAAW;AAEhB,YAAI,YAAY,aAAa;AAC3B,gBAAM;AAAA,QACR;AAEA,cAAM,QAAQ,KAAK,IAAI,MAAO,KAAK,IAAI,GAAG,UAAU,CAAC,GAAG,GAAK;AAC7D,cAAM,IAAI,QAAQ,CAAC,MAAM,WAAW,GAAG,KAAK,CAAC;AAAA,MAC/C;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAc,aACZ,SACA,SACe;AACf,UAAM,EAAE,OAAO,eAAe,WAAW,WAAW,QAAQ,IAAI;AAChE,UAAM,UAAU,IAAI,iBAAiB;AAErC,UAAM,UAAU,MAAM,SAAS,IAAI,CAAC,MAAM,EAAE,MAAM;AAClD,YAAQ,MAAM,OAAO;AAGrB,UAAM,SAAS,oBAAI,IAAmC;AAEtD,eAAW,WAAW,MAAM,UAAU;AACpC,YAAM,MAAM,QAAQ,KAAK,SAAS,KAAK;AACvC,UAAI,CAAC,OAAO,IAAI,GAAG,GAAG;AACpB,eAAO,IAAI,KAAK,CAAC,CAAC;AAAA,MACpB;AACA,aAAO,IAAI,GAAG,EAAG,KAAK,OAAO;AAAA,IAC/B;AAGA,UAAM,QAAQ;AAAA,MACZ,MAAM,KAAK,OAAO,QAAQ,CAAC,EAAE,IAAI,OAAO,CAAC,EAAE,QAAQ,MAAM;AACvD,mBAAW,WAAW,UAAU;AAC9B,cAAI,CAAC,UAAU,KAAK,QAAQ,GAAG;AAC7B;AAAA,UACF;AAEA,gBAAM,UAA0B;AAAA,YAC9B,OAAO,MAAM;AAAA,YACb,KAAK,QAAQ,KAAK,SAAS,KAAK;AAAA,YAChC,OAAO,QAAQ,OAAO,SAAS,KAAK;AAAA,YACpC,SAAS,KAAK,eAAe,QAAQ,OAAO;AAAA,UAC9C;AAEA,gBAAM,QAAQ,OAAO;AAErB,kBAAQ,cAAc,QAAQ,MAAM;AACpC,gBAAM,cAAc,QAAQ,qBAAqB;AACjD,cAAI,aAAa;AACf,0BAAc,WAAW;AAAA,UAC3B;AAEA,gBAAM,UAAU;AAAA,QAClB;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA,EAEQ,eAAe,SAA4C;AACjE,UAAM,SAAiC,CAAC;AACxC,QAAI,CAAC,QAAS,QAAO;AAErB,eAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,OAAO,GAAG;AAClD,UAAI,OAAO,SAAS,KAAK,GAAG;AAC1B,eAAO,GAAG,IAAI,MAAM,SAAS;AAAA,MAC/B,WAAW,OAAO,UAAU,UAAU;AACpC,eAAO,GAAG,IAAI;AAAA,MAChB,WAAW,MAAM,QAAQ,KAAK,KAAK,MAAM,SAAS,GAAG;AACnD,cAAM,QAAQ,MAAM,CAAC;AACrB,eAAO,GAAG,IAAI,OAAO,SAAS,KAAK,IAAI,MAAM,SAAS,IAAI,OAAO,KAAK;AAAA,MACxE;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AACF;","names":[]}
@@ -0,0 +1,52 @@
1
+ import { SagaLogger, SagaTransport, OutboundMessage, InboundMessage, TransportSubscribeOptions } from '@fbsm/saga-core';
2
+ import { KafkaConfig, logLevel } from 'kafkajs';
3
+
4
+ interface KafkaTransportOptions {
5
+ /** Kafka broker addresses */
6
+ brokers: string[] | KafkaConfig['brokers'];
7
+ /** Client identifier for Kafka connections. Default: 'saga-client' */
8
+ clientId?: string;
9
+ /** TLS/SSL configuration. Pass `true` for default TLS or a tls.ConnectionOptions object */
10
+ ssl?: KafkaConfig['ssl'];
11
+ /** SASL authentication configuration (PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER, AWS) */
12
+ sasl?: KafkaConfig['sasl'];
13
+ /** Connection timeout in milliseconds. Default: KafkaJS default (1000) */
14
+ connectionTimeout?: number;
15
+ /** Authentication timeout in milliseconds */
16
+ authenticationTimeout?: number;
17
+ /** Request timeout in milliseconds */
18
+ requestTimeout?: number;
19
+ /** KafkaJS retry options for the client-level retry policy */
20
+ retry?: KafkaConfig['retry'];
21
+ /** KafkaJS log level. Default: WARN */
22
+ logLevel?: logLevel;
23
+ /** Custom socket factory for advanced networking (e.g., SOCKS proxy) */
24
+ socketFactory?: KafkaConfig['socketFactory'];
25
+ /** Number of partitions consumed concurrently. Default: 3 */
26
+ partitionsConsumedConcurrently?: number;
27
+ /** Register KafkaJsInstrumentation with OTel SDK. Default: true if OTel available */
28
+ enableOtelInstrumentation?: boolean;
29
+ /** Auto-create topics via admin client before subscribing. Default: false */
30
+ autoCreateTopics?: boolean;
31
+ /** Custom logger. Default: ConsoleSagaLogger */
32
+ logger?: SagaLogger;
33
+ }
34
+
35
+ declare class KafkaTransport implements SagaTransport {
36
+ private options;
37
+ private kafka;
38
+ private producer;
39
+ private consumer;
40
+ private logger;
41
+ constructor(options: KafkaTransportOptions);
42
+ connect(): Promise<void>;
43
+ disconnect(): Promise<void>;
44
+ publish(message: OutboundMessage): Promise<void>;
45
+ subscribe(topics: string[], handler: (message: InboundMessage) => Promise<void>, options?: TransportSubscribeOptions): Promise<void>;
46
+ private ensureTopicsExist;
47
+ private connectConsumerWithRetry;
48
+ private processBatch;
49
+ private convertHeaders;
50
+ }
51
+
52
+ export { KafkaTransport, type KafkaTransportOptions };
@@ -0,0 +1,52 @@
1
+ import { SagaLogger, SagaTransport, OutboundMessage, InboundMessage, TransportSubscribeOptions } from '@fbsm/saga-core';
2
+ import { KafkaConfig, logLevel } from 'kafkajs';
3
+
4
+ interface KafkaTransportOptions {
5
+ /** Kafka broker addresses */
6
+ brokers: string[] | KafkaConfig['brokers'];
7
+ /** Client identifier for Kafka connections. Default: 'saga-client' */
8
+ clientId?: string;
9
+ /** TLS/SSL configuration. Pass `true` for default TLS or a tls.ConnectionOptions object */
10
+ ssl?: KafkaConfig['ssl'];
11
+ /** SASL authentication configuration (PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER, AWS) */
12
+ sasl?: KafkaConfig['sasl'];
13
+ /** Connection timeout in milliseconds. Default: KafkaJS default (1000) */
14
+ connectionTimeout?: number;
15
+ /** Authentication timeout in milliseconds */
16
+ authenticationTimeout?: number;
17
+ /** Request timeout in milliseconds */
18
+ requestTimeout?: number;
19
+ /** KafkaJS retry options for the client-level retry policy */
20
+ retry?: KafkaConfig['retry'];
21
+ /** KafkaJS log level. Default: WARN */
22
+ logLevel?: logLevel;
23
+ /** Custom socket factory for advanced networking (e.g., SOCKS proxy) */
24
+ socketFactory?: KafkaConfig['socketFactory'];
25
+ /** Number of partitions consumed concurrently. Default: 3 */
26
+ partitionsConsumedConcurrently?: number;
27
+ /** Register KafkaJsInstrumentation with OTel SDK. Default: true if OTel available */
28
+ enableOtelInstrumentation?: boolean;
29
+ /** Auto-create topics via admin client before subscribing. Default: false */
30
+ autoCreateTopics?: boolean;
31
+ /** Custom logger. Default: ConsoleSagaLogger */
32
+ logger?: SagaLogger;
33
+ }
34
+
35
+ declare class KafkaTransport implements SagaTransport {
36
+ private options;
37
+ private kafka;
38
+ private producer;
39
+ private consumer;
40
+ private logger;
41
+ constructor(options: KafkaTransportOptions);
42
+ connect(): Promise<void>;
43
+ disconnect(): Promise<void>;
44
+ publish(message: OutboundMessage): Promise<void>;
45
+ subscribe(topics: string[], handler: (message: InboundMessage) => Promise<void>, options?: TransportSubscribeOptions): Promise<void>;
46
+ private ensureTopicsExist;
47
+ private connectConsumerWithRetry;
48
+ private processBatch;
49
+ private convertHeaders;
50
+ }
51
+
52
+ export { KafkaTransport, type KafkaTransportOptions };
package/dist/index.js ADDED
@@ -0,0 +1,191 @@
1
+ // src/kafka.transport.ts
2
+ import { Kafka, logLevel } from "kafkajs";
3
+ import { ConsoleSagaLogger } from "@fbsm/saga-core";
4
+
5
+ // src/watermark-tracker.ts
6
+ var WatermarkTracker = class {
7
+ arrivedOffsets = [];
8
+ completed = /* @__PURE__ */ new Set();
9
+ reset(offsets) {
10
+ this.arrivedOffsets = offsets;
11
+ this.completed.clear();
12
+ }
13
+ markCompleted(offset) {
14
+ this.completed.add(offset);
15
+ }
16
+ getCommittableOffset() {
17
+ let lastSafe = null;
18
+ for (const offset of this.arrivedOffsets) {
19
+ if (this.completed.has(offset)) {
20
+ lastSafe = offset;
21
+ } else {
22
+ break;
23
+ }
24
+ }
25
+ return lastSafe;
26
+ }
27
+ };
28
+
29
+ // src/kafka.transport.ts
30
+ var KafkaTransport = class {
31
+ constructor(options) {
32
+ this.options = options;
33
+ this.logger = options.logger ?? new ConsoleSagaLogger();
34
+ this.kafka = new Kafka({
35
+ clientId: options.clientId ?? "saga-client",
36
+ brokers: options.brokers,
37
+ ssl: options.ssl,
38
+ sasl: options.sasl,
39
+ connectionTimeout: options.connectionTimeout,
40
+ authenticationTimeout: options.authenticationTimeout,
41
+ requestTimeout: options.requestTimeout,
42
+ retry: options.retry ?? {
43
+ initialRetryTime: 300,
44
+ retries: 10
45
+ },
46
+ logLevel: options.logLevel ?? logLevel.WARN,
47
+ socketFactory: options.socketFactory
48
+ });
49
+ this.producer = this.kafka.producer();
50
+ }
51
+ kafka;
52
+ producer;
53
+ consumer = null;
54
+ logger;
55
+ async connect() {
56
+ await this.producer.connect();
57
+ }
58
+ async disconnect() {
59
+ await this.producer.disconnect();
60
+ if (this.consumer) {
61
+ await this.consumer.disconnect();
62
+ }
63
+ }
64
+ async publish(message) {
65
+ await this.producer.send({
66
+ topic: message.topic,
67
+ messages: [
68
+ {
69
+ key: message.key,
70
+ value: message.value,
71
+ headers: message.headers
72
+ }
73
+ ]
74
+ });
75
+ }
76
+ async subscribe(topics, handler, options) {
77
+ const groupId = options?.groupId ?? "saga-default-group";
78
+ if (this.options.autoCreateTopics) {
79
+ await this.ensureTopicsExist(topics);
80
+ }
81
+ await this.connectConsumerWithRetry(groupId, topics, handler, options);
82
+ }
83
+ async ensureTopicsExist(topics) {
84
+ const admin = this.kafka.admin();
85
+ await admin.connect();
86
+ try {
87
+ const existing = await admin.listTopics();
88
+ const missing = topics.filter((t) => !existing.includes(t));
89
+ if (missing.length > 0) {
90
+ await admin.createTopics({
91
+ topics: missing.map((t) => ({ topic: t, numPartitions: 3, replicationFactor: 1 }))
92
+ });
93
+ }
94
+ } finally {
95
+ await admin.disconnect();
96
+ }
97
+ }
98
+ async connectConsumerWithRetry(groupId, topics, handler, options, maxAttempts = 10) {
99
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
100
+ try {
101
+ this.consumer = this.kafka.consumer({
102
+ groupId,
103
+ retry: { initialRetryTime: 500, retries: 8 }
104
+ });
105
+ await this.consumer.connect();
106
+ for (const topic of topics) {
107
+ await this.consumer.subscribe({
108
+ topic,
109
+ fromBeginning: options?.fromBeginning ?? false
110
+ });
111
+ }
112
+ await this.consumer.run({
113
+ partitionsConsumedConcurrently: this.options.partitionsConsumedConcurrently ?? 3,
114
+ eachBatch: async (payload) => {
115
+ await this.processBatch(payload, handler);
116
+ }
117
+ });
118
+ return;
119
+ } catch (err) {
120
+ this.logger.warn(
121
+ `[KafkaTransport] Consumer failed to start (attempt ${attempt}/${maxAttempts}): ${err.message}`
122
+ );
123
+ try {
124
+ await this.consumer?.disconnect();
125
+ } catch {
126
+ }
127
+ this.consumer = null;
128
+ if (attempt === maxAttempts) {
129
+ throw err;
130
+ }
131
+ const delay = Math.min(1e3 * Math.pow(2, attempt - 1), 1e4);
132
+ await new Promise((r) => setTimeout(r, delay));
133
+ }
134
+ }
135
+ }
136
+ async processBatch(payload, handler) {
137
+ const { batch, resolveOffset, heartbeat, isRunning, isStale } = payload;
138
+ const tracker = new WatermarkTracker();
139
+ const offsets = batch.messages.map((m) => m.offset);
140
+ tracker.reset(offsets);
141
+ const groups = /* @__PURE__ */ new Map();
142
+ for (const message of batch.messages) {
143
+ const key = message.key?.toString() ?? "__no_key__";
144
+ if (!groups.has(key)) {
145
+ groups.set(key, []);
146
+ }
147
+ groups.get(key).push(message);
148
+ }
149
+ await Promise.all(
150
+ Array.from(groups.entries()).map(async ([, messages]) => {
151
+ for (const message of messages) {
152
+ if (!isRunning() || isStale()) {
153
+ return;
154
+ }
155
+ const inbound = {
156
+ topic: batch.topic,
157
+ key: message.key?.toString() ?? "",
158
+ value: message.value?.toString() ?? "",
159
+ headers: this.convertHeaders(message.headers)
160
+ };
161
+ await handler(inbound);
162
+ tracker.markCompleted(message.offset);
163
+ const committable = tracker.getCommittableOffset();
164
+ if (committable) {
165
+ resolveOffset(committable);
166
+ }
167
+ await heartbeat();
168
+ }
169
+ })
170
+ );
171
+ }
172
+ convertHeaders(headers) {
173
+ const result = {};
174
+ if (!headers) return result;
175
+ for (const [key, value] of Object.entries(headers)) {
176
+ if (Buffer.isBuffer(value)) {
177
+ result[key] = value.toString();
178
+ } else if (typeof value === "string") {
179
+ result[key] = value;
180
+ } else if (Array.isArray(value) && value.length > 0) {
181
+ const first = value[0];
182
+ result[key] = Buffer.isBuffer(first) ? first.toString() : String(first);
183
+ }
184
+ }
185
+ return result;
186
+ }
187
+ };
188
+ export {
189
+ KafkaTransport
190
+ };
191
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/kafka.transport.ts","../src/watermark-tracker.ts"],"sourcesContent":["import { Kafka, Producer, Consumer, EachBatchPayload, IHeaders, logLevel } from 'kafkajs';\nimport type {\n SagaTransport,\n InboundMessage,\n OutboundMessage,\n TransportSubscribeOptions,\n SagaLogger,\n} from '@fbsm/saga-core';\nimport { ConsoleSagaLogger } from '@fbsm/saga-core';\nimport { WatermarkTracker } from './watermark-tracker';\nimport type { KafkaTransportOptions } from './kafka-transport-options';\n\nexport class KafkaTransport implements SagaTransport {\n private kafka: Kafka;\n private producer: Producer;\n private consumer: Consumer | null = null;\n private logger: SagaLogger;\n\n constructor(private options: KafkaTransportOptions) {\n this.logger = options.logger ?? new ConsoleSagaLogger();\n this.kafka = new Kafka({\n clientId: options.clientId ?? 'saga-client',\n brokers: options.brokers,\n ssl: options.ssl,\n sasl: options.sasl,\n connectionTimeout: options.connectionTimeout,\n authenticationTimeout: options.authenticationTimeout,\n requestTimeout: options.requestTimeout,\n retry: options.retry ?? {\n initialRetryTime: 300,\n retries: 10,\n },\n logLevel: options.logLevel ?? logLevel.WARN,\n socketFactory: options.socketFactory,\n });\n this.producer = this.kafka.producer();\n }\n\n async connect(): Promise<void> {\n await this.producer.connect();\n }\n\n async disconnect(): Promise<void> {\n await this.producer.disconnect();\n if (this.consumer) {\n await this.consumer.disconnect();\n }\n }\n\n async publish(message: OutboundMessage): Promise<void> {\n await this.producer.send({\n topic: message.topic,\n messages: [\n {\n key: message.key,\n value: message.value,\n headers: message.headers,\n },\n ],\n });\n }\n\n async subscribe(\n topics: string[],\n handler: (message: InboundMessage) => Promise<void>,\n options?: TransportSubscribeOptions,\n ): Promise<void> {\n const groupId = options?.groupId ?? 'saga-default-group';\n\n if (this.options.autoCreateTopics) {\n await this.ensureTopicsExist(topics);\n }\n\n await this.connectConsumerWithRetry(groupId, topics, handler, options);\n }\n\n private async ensureTopicsExist(topics: string[]): Promise<void> {\n const admin = this.kafka.admin();\n await admin.connect();\n try {\n const existing = await admin.listTopics();\n const missing = topics.filter((t) => !existing.includes(t));\n if (missing.length > 0) {\n await admin.createTopics({\n topics: missing.map((t) => ({ topic: t, numPartitions: 3, replicationFactor: 1 })),\n });\n }\n } finally {\n await admin.disconnect();\n }\n }\n\n private async connectConsumerWithRetry(\n groupId: string,\n topics: string[],\n handler: (message: InboundMessage) => Promise<void>,\n options?: TransportSubscribeOptions,\n maxAttempts = 10,\n ): Promise<void> {\n for (let attempt = 1; attempt <= maxAttempts; attempt++) {\n try {\n this.consumer = this.kafka.consumer({\n groupId,\n retry: { initialRetryTime: 500, retries: 8 },\n });\n\n await this.consumer.connect();\n\n for (const topic of topics) {\n await this.consumer.subscribe({\n topic,\n fromBeginning: options?.fromBeginning ?? false,\n });\n }\n\n await this.consumer.run({\n partitionsConsumedConcurrently: this.options.partitionsConsumedConcurrently ?? 3,\n eachBatch: async (payload: EachBatchPayload) => {\n await this.processBatch(payload, handler);\n },\n });\n\n return;\n } catch (err) {\n this.logger.warn(\n `[KafkaTransport] Consumer failed to start (attempt ${attempt}/${maxAttempts}): ${(err as Error).message}`,\n );\n\n try {\n await this.consumer?.disconnect();\n } catch {\n // ignore disconnect errors\n }\n this.consumer = null;\n\n if (attempt === maxAttempts) {\n throw err;\n }\n\n const delay = Math.min(1000 * Math.pow(2, attempt - 1), 10000);\n await new Promise((r) => setTimeout(r, delay));\n }\n }\n }\n\n private async processBatch(\n payload: EachBatchPayload,\n handler: (message: InboundMessage) => Promise<void>,\n ): Promise<void> {\n const { batch, resolveOffset, heartbeat, isRunning, isStale } = payload;\n const tracker = new WatermarkTracker();\n\n const offsets = batch.messages.map((m) => m.offset);\n tracker.reset(offsets);\n\n // Group messages by key (sagaId) for ordered processing within saga\n const groups = new Map<string, typeof batch.messages>();\n\n for (const message of batch.messages) {\n const key = message.key?.toString() ?? '__no_key__';\n if (!groups.has(key)) {\n groups.set(key, []);\n }\n groups.get(key)!.push(message);\n }\n\n // Process groups in parallel, messages within each group sequentially\n await Promise.all(\n Array.from(groups.entries()).map(async ([, messages]) => {\n for (const message of messages) {\n if (!isRunning() || isStale()) {\n return;\n }\n\n const inbound: InboundMessage = {\n topic: batch.topic,\n key: message.key?.toString() ?? '',\n value: message.value?.toString() ?? '',\n headers: this.convertHeaders(message.headers),\n };\n\n await handler(inbound);\n\n tracker.markCompleted(message.offset);\n const committable = tracker.getCommittableOffset();\n if (committable) {\n resolveOffset(committable);\n }\n\n await heartbeat();\n }\n }),\n );\n }\n\n private convertHeaders(headers?: IHeaders): Record<string, string> {\n const result: Record<string, string> = {};\n if (!headers) return result;\n\n for (const [key, value] of Object.entries(headers)) {\n if (Buffer.isBuffer(value)) {\n result[key] = value.toString();\n } else if (typeof value === 'string') {\n result[key] = value;\n } else if (Array.isArray(value) && value.length > 0) {\n const first = value[0];\n result[key] = Buffer.isBuffer(first) ? first.toString() : String(first);\n }\n }\n\n return result;\n }\n}\n","export class WatermarkTracker {\n private arrivedOffsets: string[] = [];\n private completed = new Set<string>();\n\n reset(offsets: string[]): void {\n this.arrivedOffsets = offsets;\n this.completed.clear();\n }\n\n markCompleted(offset: string): void {\n this.completed.add(offset);\n }\n\n getCommittableOffset(): string | null {\n let lastSafe: string | null = null;\n for (const offset of this.arrivedOffsets) {\n if (this.completed.has(offset)) {\n lastSafe = offset;\n } else {\n break;\n }\n }\n return lastSafe;\n }\n}\n"],"mappings":";AAAA,SAAS,OAAuD,gBAAgB;AAQhF,SAAS,yBAAyB;;;ACR3B,IAAM,mBAAN,MAAuB;AAAA,EACpB,iBAA2B,CAAC;AAAA,EAC5B,YAAY,oBAAI,IAAY;AAAA,EAEpC,MAAM,SAAyB;AAC7B,SAAK,iBAAiB;AACtB,SAAK,UAAU,MAAM;AAAA,EACvB;AAAA,EAEA,cAAc,QAAsB;AAClC,SAAK,UAAU,IAAI,MAAM;AAAA,EAC3B;AAAA,EAEA,uBAAsC;AACpC,QAAI,WAA0B;AAC9B,eAAW,UAAU,KAAK,gBAAgB;AACxC,UAAI,KAAK,UAAU,IAAI,MAAM,GAAG;AAC9B,mBAAW;AAAA,MACb,OAAO;AACL;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;;;ADZO,IAAM,iBAAN,MAA8C;AAAA,EAMnD,YAAoB,SAAgC;AAAhC;AAClB,SAAK,SAAS,QAAQ,UAAU,IAAI,kBAAkB;AACtD,SAAK,QAAQ,IAAI,MAAM;AAAA,MACrB,UAAU,QAAQ,YAAY;AAAA,MAC9B,SAAS,QAAQ;AAAA,MACjB,KAAK,QAAQ;AAAA,MACb,MAAM,QAAQ;AAAA,MACd,mBAAmB,QAAQ;AAAA,MAC3B,uBAAuB,QAAQ;AAAA,MAC/B,gBAAgB,QAAQ;AAAA,MACxB,OAAO,QAAQ,SAAS;AAAA,QACtB,kBAAkB;AAAA,QAClB,SAAS;AAAA,MACX;AAAA,MACA,UAAU,QAAQ,YAAY,SAAS;AAAA,MACvC,eAAe,QAAQ;AAAA,IACzB,CAAC;AACD,SAAK,WAAW,KAAK,MAAM,SAAS;AAAA,EACtC;AAAA,EAvBQ;AAAA,EACA;AAAA,EACA,WAA4B;AAAA,EAC5B;AAAA,EAsBR,MAAM,UAAyB;AAC7B,UAAM,KAAK,SAAS,QAAQ;AAAA,EAC9B;AAAA,EAEA,MAAM,aAA4B;AAChC,UAAM,KAAK,SAAS,WAAW;AAC/B,QAAI,KAAK,UAAU;AACjB,YAAM,KAAK,SAAS,WAAW;AAAA,IACjC;AAAA,EACF;AAAA,EAEA,MAAM,QAAQ,SAAyC;AACrD,UAAM,KAAK,SAAS,KAAK;AAAA,MACvB,OAAO,QAAQ;AAAA,MACf,UAAU;AAAA,QACR;AAAA,UACE,KAAK,QAAQ;AAAA,UACb,OAAO,QAAQ;AAAA,UACf,SAAS,QAAQ;AAAA,QACnB;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,UACJ,QACA,SACA,SACe;AACf,UAAM,UAAU,SAAS,WAAW;AAEpC,QAAI,KAAK,QAAQ,kBAAkB;AACjC,YAAM,KAAK,kBAAkB,MAAM;AAAA,IACrC;AAEA,UAAM,KAAK,yBAAyB,SAAS,QAAQ,SAAS,OAAO;AAAA,EACvE;AAAA,EAEA,MAAc,kBAAkB,QAAiC;AAC/D,UAAM,QAAQ,KAAK,MAAM,MAAM;AAC/B,UAAM,MAAM,QAAQ;AACpB,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,WAAW;AACxC,YAAM,UAAU,OAAO,OAAO,CAAC,MAAM,CAAC,SAAS,SAAS,CAAC,CAAC;AAC1D,UAAI,QAAQ,SAAS,GAAG;AACtB,cAAM,MAAM,aAAa;AAAA,UACvB,QAAQ,QAAQ,IAAI,CAAC,OAAO,EAAE,OAAO,GAAG,eAAe,GAAG,mBAAmB,EAAE,EAAE;AAAA,QACnF,CAAC;AAAA,MACH;AAAA,IACF,UAAE;AACA,YAAM,MAAM,WAAW;AAAA,IACzB;AAAA,EACF;AAAA,EAEA,MAAc,yBACZ,SACA,QACA,SACA,SACA,cAAc,IACC;AACf,aAAS,UAAU,GAAG,WAAW,aAAa,WAAW;AACvD,UAAI;AACF,aAAK,WAAW,KAAK,MAAM,SAAS;AAAA,UAClC;AAAA,UACA,OAAO,EAAE,kBAAkB,KAAK,SAAS,EAAE;AAAA,QAC7C,CAAC;AAED,cAAM,KAAK,SAAS,QAAQ;AAE5B,mBAAW,SAAS,QAAQ;AAC1B,gBAAM,KAAK,SAAS,UAAU;AAAA,YAC5B;AAAA,YACA,eAAe,SAAS,iBAAiB;AAAA,UAC3C,CAAC;AAAA,QACH;AAEA,cAAM,KAAK,SAAS,IAAI;AAAA,UACtB,gCAAgC,KAAK,QAAQ,kCAAkC;AAAA,UAC/E,WAAW,OAAO,YAA8B;AAC9C,kBAAM,KAAK,aAAa,SAAS,OAAO;AAAA,UAC1C;AAAA,QACF,CAAC;AAED;AAAA,MACF,SAAS,KAAK;AACZ,aAAK,OAAO;AAAA,UACV,sDAAsD,OAAO,IAAI,WAAW,MAAO,IAAc,OAAO;AAAA,QAC1G;AAEA,YAAI;AACF,gBAAM,KAAK,UAAU,WAAW;AAAA,QAClC,QAAQ;AAAA,QAER;AACA,aAAK,WAAW;AAEhB,YAAI,YAAY,aAAa;AAC3B,gBAAM;AAAA,QACR;AAEA,cAAM,QAAQ,KAAK,IAAI,MAAO,KAAK,IAAI,GAAG,UAAU,CAAC,GAAG,GAAK;AAC7D,cAAM,IAAI,QAAQ,CAAC,MAAM,WAAW,GAAG,KAAK,CAAC;AAAA,MAC/C;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAc,aACZ,SACA,SACe;AACf,UAAM,EAAE,OAAO,eAAe,WAAW,WAAW,QAAQ,IAAI;AAChE,UAAM,UAAU,IAAI,iBAAiB;AAErC,UAAM,UAAU,MAAM,SAAS,IAAI,CAAC,MAAM,EAAE,MAAM;AAClD,YAAQ,MAAM,OAAO;AAGrB,UAAM,SAAS,oBAAI,IAAmC;AAEtD,eAAW,WAAW,MAAM,UAAU;AACpC,YAAM,MAAM,QAAQ,KAAK,SAAS,KAAK;AACvC,UAAI,CAAC,OAAO,IAAI,GAAG,GAAG;AACpB,eAAO,IAAI,KAAK,CAAC,CAAC;AAAA,MACpB;AACA,aAAO,IAAI,GAAG,EAAG,KAAK,OAAO;AAAA,IAC/B;AAGA,UAAM,QAAQ;AAAA,MACZ,MAAM,KAAK,OAAO,QAAQ,CAAC,EAAE,IAAI,OAAO,CAAC,EAAE,QAAQ,MAAM;AACvD,mBAAW,WAAW,UAAU;AAC9B,cAAI,CAAC,UAAU,KAAK,QAAQ,GAAG;AAC7B;AAAA,UACF;AAEA,gBAAM,UAA0B;AAAA,YAC9B,OAAO,MAAM;AAAA,YACb,KAAK,QAAQ,KAAK,SAAS,KAAK;AAAA,YAChC,OAAO,QAAQ,OAAO,SAAS,KAAK;AAAA,YACpC,SAAS,KAAK,eAAe,QAAQ,OAAO;AAAA,UAC9C;AAEA,gBAAM,QAAQ,OAAO;AAErB,kBAAQ,cAAc,QAAQ,MAAM;AACpC,gBAAM,cAAc,QAAQ,qBAAqB;AACjD,cAAI,aAAa;AACf,0BAAc,WAAW;AAAA,UAC3B;AAEA,gBAAM,UAAU;AAAA,QAClB;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA,EAEQ,eAAe,SAA4C;AACjE,UAAM,SAAiC,CAAC;AACxC,QAAI,CAAC,QAAS,QAAO;AAErB,eAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,OAAO,GAAG;AAClD,UAAI,OAAO,SAAS,KAAK,GAAG;AAC1B,eAAO,GAAG,IAAI,MAAM,SAAS;AAAA,MAC/B,WAAW,OAAO,UAAU,UAAU;AACpC,eAAO,GAAG,IAAI;AAAA,MAChB,WAAW,MAAM,QAAQ,KAAK,KAAK,MAAM,SAAS,GAAG;AACnD,cAAM,QAAQ,MAAM,CAAC;AACrB,eAAO,GAAG,IAAI,OAAO,SAAS,KAAK,IAAI,MAAM,SAAS,IAAI,OAAO,KAAK;AAAA,MACxE;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AACF;","names":[]}
package/package.json ADDED
@@ -0,0 +1,59 @@
1
+ {
2
+ "name": "@fbsm/saga-transport-kafka",
3
+ "version": "0.0.1-beta.0",
4
+ "description": "KafkaJS transport adapter for saga choreography: eachBatch, watermark offset tracking",
5
+ "license": "MIT",
6
+ "author": "fbsm",
7
+ "repository": {
8
+ "type": "git",
9
+ "url": "https://github.com/fbsm/saga-library",
10
+ "directory": "packages/saga-transport-kafka"
11
+ },
12
+ "keywords": [
13
+ "saga",
14
+ "kafka",
15
+ "kafkajs",
16
+ "transport",
17
+ "microservices",
18
+ "event-driven"
19
+ ],
20
+ "publishConfig": {
21
+ "access": "public"
22
+ },
23
+ "type": "module",
24
+ "main": "./dist/index.cjs",
25
+ "module": "./dist/index.js",
26
+ "types": "./dist/index.d.ts",
27
+ "exports": {
28
+ ".": {
29
+ "import": {
30
+ "types": "./dist/index.d.ts",
31
+ "default": "./dist/index.js"
32
+ },
33
+ "require": {
34
+ "types": "./dist/index.d.cts",
35
+ "default": "./dist/index.cjs"
36
+ }
37
+ }
38
+ },
39
+ "files": [
40
+ "dist"
41
+ ],
42
+ "dependencies": {
43
+ "kafkajs": "^2.2.0",
44
+ "@fbsm/saga-core": "0.0.1-beta.0"
45
+ },
46
+ "peerDependencies": {
47
+ "@opentelemetry/instrumentation-kafkajs": "^0.7.0"
48
+ },
49
+ "peerDependenciesMeta": {
50
+ "@opentelemetry/instrumentation-kafkajs": {
51
+ "optional": true
52
+ }
53
+ },
54
+ "scripts": {
55
+ "build": "tsup",
56
+ "dev": "tsup --watch",
57
+ "typecheck": "tsc --noEmit"
58
+ }
59
+ }