@forklaunch/implementation-worker-kafka 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/LICENSE +21 -0
  2. package/lib/__test__/schemaEquality.test.d.ts +2 -0
  3. package/lib/__test__/schemaEquality.test.d.ts.map +1 -0
  4. package/lib/__test__/schemaEquality.test.js +16 -0
  5. package/lib/consumers/index.d.ts +2 -0
  6. package/lib/consumers/index.d.ts.map +1 -0
  7. package/lib/consumers/index.js +1 -0
  8. package/lib/consumers/kafkaWorker.consumer.d.ts +19 -0
  9. package/lib/consumers/kafkaWorker.consumer.d.ts.map +1 -0
  10. package/lib/consumers/kafkaWorker.consumer.js +149 -0
  11. package/lib/eject/consumers/index.ts +1 -0
  12. package/lib/eject/consumers/kafkaWorker.consumer.ts +180 -0
  13. package/lib/eject/domain/schemas/index.ts +1 -0
  14. package/lib/eject/domain/schemas/kafkaWorker.schema.ts +10 -0
  15. package/lib/eject/producers/index.ts +1 -0
  16. package/lib/eject/producers/kafkaWorker.producer.ts +33 -0
  17. package/lib/eject/types/index.ts +1 -0
  18. package/lib/eject/types/kafkaWorker.types.ts +8 -0
  19. package/lib/jest.config.d.ts +4 -0
  20. package/lib/jest.config.d.ts.map +1 -0
  21. package/lib/jest.config.js +19 -0
  22. package/lib/producers/index.d.ts +2 -0
  23. package/lib/producers/index.d.ts.map +1 -0
  24. package/lib/producers/index.js +1 -0
  25. package/lib/producers/kafkaWorker.producer.d.ts +11 -0
  26. package/lib/producers/kafkaWorker.producer.d.ts.map +1 -0
  27. package/lib/producers/kafkaWorker.producer.js +28 -0
  28. package/lib/schemas/index.d.ts +2 -0
  29. package/lib/schemas/index.d.ts.map +1 -0
  30. package/lib/schemas/index.js +1 -0
  31. package/lib/schemas/kafka.schema.d.ts +18 -0
  32. package/lib/schemas/kafka.schema.d.ts.map +1 -0
  33. package/lib/schemas/kafka.schema.js +4 -0
  34. package/lib/schemas/typebox/kafkaWorker.schema.d.ts +9 -0
  35. package/lib/schemas/typebox/kafkaWorker.schema.d.ts.map +1 -0
  36. package/lib/schemas/typebox/kafkaWorker.schema.js +9 -0
  37. package/lib/schemas/zod/kafkaWorker.schema.d.ts +9 -0
  38. package/lib/schemas/zod/kafkaWorker.schema.d.ts.map +1 -0
  39. package/lib/schemas/zod/kafkaWorker.schema.js +9 -0
  40. package/lib/tsconfig.tsbuildinfo +1 -0
  41. package/lib/types/index.d.ts +2 -0
  42. package/lib/types/index.d.ts.map +1 -0
  43. package/lib/types/index.js +1 -0
  44. package/lib/types/kafkaWorker.types.d.ts +9 -0
  45. package/lib/types/kafkaWorker.types.d.ts.map +1 -0
  46. package/lib/types/kafkaWorker.types.js +1 -0
  47. package/lib/vitest.config.d.ts +3 -0
  48. package/lib/vitest.config.d.ts.map +1 -0
  49. package/lib/vitest.config.js +7 -0
  50. package/package.json +65 -0
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 forklaunch
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=schemaEquality.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"schemaEquality.test.d.ts","sourceRoot":"","sources":["../../__test__/schemaEquality.test.ts"],"names":[],"mappings":""}
@@ -0,0 +1,16 @@
1
+ import { isTrue } from '@forklaunch/common';
2
+ import { testSchemaEquality } from '@forklaunch/core/test';
3
+ import { KafkaWorkerOptionsSchema as TypeboxKafkaWorkerOptionsSchema } from '../schemas/typebox/kafkaWorker.schema';
4
+ import { KafkaWorkerOptionsSchema as ZodKafkaWorkerOptionsSchema } from '../schemas/zod/kafkaWorker.schema';
5
+ describe('schema equality', () => {
6
+ it('should be equal for bullmq worker', () => {
7
+ expect(isTrue(testSchemaEquality(ZodKafkaWorkerOptionsSchema, TypeboxKafkaWorkerOptionsSchema, {
8
+ brokers: ['localhost:9092'],
9
+ clientId: 'test',
10
+ groupId: 'test',
11
+ retries: 1,
12
+ interval: 1000,
13
+ peekCount: 1
14
+ }))).toBeTruthy();
15
+ });
16
+ });
@@ -0,0 +1,2 @@
1
+ export * from './kafkaWorker.consumer';
2
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../consumers/index.ts"],"names":[],"mappings":"AAAA,cAAc,wBAAwB,CAAC"}
@@ -0,0 +1 @@
1
+ export * from './kafkaWorker.consumer';
@@ -0,0 +1,19 @@
1
+ import { WorkerConsumer } from '@forklaunch/interfaces-worker/interfaces';
2
+ import { WorkerEventEntity, WorkerFailureHandler, WorkerProcessFunction } from '@forklaunch/interfaces-worker/types';
3
+ import { KafkaWorkerOptions } from '../types/kafkaWorker.types';
4
+ export declare class KafkaWorkerConsumer<EventEntity extends WorkerEventEntity> implements WorkerConsumer<EventEntity> {
5
+ protected readonly queueName: string;
6
+ protected readonly options: KafkaWorkerOptions;
7
+ protected readonly processEventsFunction: WorkerProcessFunction<EventEntity>;
8
+ protected readonly failureHandler: WorkerFailureHandler<EventEntity>;
9
+ private kafka;
10
+ private producer;
11
+ private consumer;
12
+ private processedMessages;
13
+ constructor(queueName: string, options: KafkaWorkerOptions, processEventsFunction: WorkerProcessFunction<EventEntity>, failureHandler: WorkerFailureHandler<EventEntity>);
14
+ private setupConsumer;
15
+ peekEvents(): Promise<EventEntity[]>;
16
+ start(): Promise<void>;
17
+ close(): Promise<void>;
18
+ }
19
+ //# sourceMappingURL=kafkaWorker.consumer.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"kafkaWorker.consumer.d.ts","sourceRoot":"","sources":["../../consumers/kafkaWorker.consumer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,cAAc,EAAE,MAAM,0CAA0C,CAAC;AAC1E,OAAO,EACL,iBAAiB,EACjB,oBAAoB,EACpB,qBAAqB,EACtB,MAAM,qCAAqC,CAAC;AAE7C,OAAO,EAAE,kBAAkB,EAAE,MAAM,4BAA4B,CAAC;AAEhE,qBAAa,mBAAmB,CAAC,WAAW,SAAS,iBAAiB,CACpE,YAAW,cAAc,CAAC,WAAW,CAAC;IAQpC,SAAS,CAAC,QAAQ,CAAC,SAAS,EAAE,MAAM;IACpC,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,kBAAkB;IAC9C,SAAS,CAAC,QAAQ,CAAC,qBAAqB,EAAE,qBAAqB,CAAC,WAAW,CAAC;IAC5E,SAAS,CAAC,QAAQ,CAAC,cAAc,EAAE,oBAAoB,CAAC,WAAW,CAAC;IATtE,OAAO,CAAC,KAAK,CAAQ;IACrB,OAAO,CAAC,QAAQ,CAAW;IAC3B,OAAO,CAAC,QAAQ,CAAW;IAC3B,OAAO,CAAC,iBAAiB,CAA0B;gBAG9B,SAAS,EAAE,MAAM,EACjB,OAAO,EAAE,kBAAkB,EAC3B,qBAAqB,EAAE,qBAAqB,CAAC,WAAW,CAAC,EACzD,cAAc,EAAE,oBAAoB,CAAC,WAAW,CAAC;YAaxD,aAAa;IA4DrB,UAAU,IAAI,OAAO,CAAC,WAAW,EAAE,CAAC;IA4EpC,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAKtB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;CAI7B"}
@@ -0,0 +1,149 @@
1
+ import { Kafka } from 'kafkajs';
2
+ export class KafkaWorkerConsumer {
3
+ queueName;
4
+ options;
5
+ processEventsFunction;
6
+ failureHandler;
7
+ kafka;
8
+ producer;
9
+ consumer;
10
+ processedMessages = new Set();
11
+ constructor(queueName, options, processEventsFunction, failureHandler) {
12
+ this.queueName = queueName;
13
+ this.options = options;
14
+ this.processEventsFunction = processEventsFunction;
15
+ this.failureHandler = failureHandler;
16
+ this.kafka = new Kafka({
17
+ clientId: this.options.clientId,
18
+ brokers: this.options.brokers
19
+ });
20
+ this.producer = this.kafka.producer();
21
+ this.consumer = this.kafka.consumer({
22
+ groupId: this.options.groupId
23
+ });
24
+ }
25
+ async setupConsumer() {
26
+ await this.consumer.connect();
27
+ await this.consumer.subscribe({
28
+ topic: this.queueName,
29
+ fromBeginning: false
30
+ });
31
+ await this.consumer.run({
32
+ eachMessage: async ({ topic, partition, message }) => {
33
+ if (!message.value)
34
+ return;
35
+ const messageKey = `${topic}-${partition}-${message.offset}`;
36
+ if (this.processedMessages.has(messageKey)) {
37
+ return;
38
+ }
39
+ const events = JSON.parse(message.value.toString());
40
+ try {
41
+ await this.processEventsFunction(events);
42
+ this.processedMessages.add(messageKey);
43
+ await this.consumer.commitOffsets([
44
+ {
45
+ topic,
46
+ partition,
47
+ offset: (parseInt(message.offset) + 1).toString()
48
+ }
49
+ ]);
50
+ }
51
+ catch (error) {
52
+ this.failureHandler([
53
+ {
54
+ value: events[0],
55
+ error: error
56
+ }
57
+ ]);
58
+ for (const event of events) {
59
+ if (event.retryCount <= this.options.retries) {
60
+ await this.producer.send({
61
+ topic: this.queueName,
62
+ messages: [
63
+ {
64
+ value: JSON.stringify([
65
+ {
66
+ ...event,
67
+ retryCount: event.retryCount + 1
68
+ }
69
+ ]),
70
+ key: event.id
71
+ }
72
+ ]
73
+ });
74
+ }
75
+ }
76
+ }
77
+ }
78
+ });
79
+ }
80
+ async peekEvents() {
81
+ const events = [];
82
+ const admin = this.kafka.admin();
83
+ await admin.connect();
84
+ try {
85
+ // Get topic metadata to find partitions
86
+ const metadata = await admin.fetchTopicMetadata({
87
+ topics: [this.queueName]
88
+ });
89
+ const topic = metadata.topics[0];
90
+ if (!topic) {
91
+ return events;
92
+ }
93
+ // For each partition, get the latest offset
94
+ for (const partition of topic.partitions) {
95
+ const offsets = await admin.fetchTopicOffsets(this.queueName);
96
+ const partitionOffset = offsets.find((o) => o.partition === partition.partitionId);
97
+ if (!partitionOffset) {
98
+ continue;
99
+ }
100
+ // Create a temporary consumer to read messages
101
+ const peekConsumer = this.kafka.consumer({
102
+ groupId: `${this.options.groupId}-peek-${Date.now()}`
103
+ });
104
+ try {
105
+ await peekConsumer.connect();
106
+ await peekConsumer.subscribe({
107
+ topic: this.queueName,
108
+ fromBeginning: false
109
+ });
110
+ const messagePromise = new Promise((resolve) => {
111
+ peekConsumer.run({
112
+ eachMessage: async ({ message }) => {
113
+ if (message.value && events.length < this.options.peekCount) {
114
+ const messageEvents = JSON.parse(message.value.toString());
115
+ events.push(...messageEvents);
116
+ if (events.length >= this.options.peekCount) {
117
+ resolve();
118
+ }
119
+ }
120
+ }
121
+ });
122
+ });
123
+ await Promise.race([
124
+ messagePromise,
125
+ new Promise((resolve) => setTimeout(resolve, 5000))
126
+ ]);
127
+ if (events.length >= this.options.peekCount) {
128
+ break;
129
+ }
130
+ }
131
+ finally {
132
+ await peekConsumer.disconnect();
133
+ }
134
+ }
135
+ return events;
136
+ }
137
+ finally {
138
+ await admin.disconnect();
139
+ }
140
+ }
141
+ async start() {
142
+ await this.setupConsumer();
143
+ await this.producer.connect();
144
+ }
145
+ async close() {
146
+ await this.producer.disconnect();
147
+ await this.consumer.disconnect();
148
+ }
149
+ }
@@ -0,0 +1 @@
1
+ export * from './kafkaWorker.consumer';
@@ -0,0 +1,180 @@
1
+ import { WorkerConsumer } from '@forklaunch/interfaces-worker/interfaces';
2
+ import {
3
+ WorkerEventEntity,
4
+ WorkerFailureHandler,
5
+ WorkerProcessFunction
6
+ } from '@forklaunch/interfaces-worker/types';
7
+ import { Consumer, Kafka, Producer } from 'kafkajs';
8
+ import { KafkaWorkerOptions } from '../types/kafkaWorker.types';
9
+
10
+ export class KafkaWorkerConsumer<EventEntity extends WorkerEventEntity>
11
+ implements WorkerConsumer<EventEntity>
12
+ {
13
+ private kafka: Kafka;
14
+ private producer: Producer;
15
+ private consumer: Consumer;
16
+ private processedMessages: Set<string> = new Set();
17
+
18
+ constructor(
19
+ protected readonly queueName: string,
20
+ protected readonly options: KafkaWorkerOptions,
21
+ protected readonly processEventsFunction: WorkerProcessFunction<EventEntity>,
22
+ protected readonly failureHandler: WorkerFailureHandler<EventEntity>
23
+ ) {
24
+ this.kafka = new Kafka({
25
+ clientId: this.options.clientId,
26
+ brokers: this.options.brokers
27
+ });
28
+
29
+ this.producer = this.kafka.producer();
30
+ this.consumer = this.kafka.consumer({
31
+ groupId: this.options.groupId
32
+ });
33
+ }
34
+
35
+ private async setupConsumer() {
36
+ await this.consumer.connect();
37
+ await this.consumer.subscribe({
38
+ topic: this.queueName,
39
+ fromBeginning: false
40
+ });
41
+
42
+ await this.consumer.run({
43
+ eachMessage: async ({ topic, partition, message }) => {
44
+ if (!message.value) return;
45
+
46
+ const messageKey = `${topic}-${partition}-${message.offset}`;
47
+
48
+ if (this.processedMessages.has(messageKey)) {
49
+ return;
50
+ }
51
+
52
+ const events = JSON.parse(message.value.toString()) as EventEntity[];
53
+
54
+ try {
55
+ await this.processEventsFunction(events);
56
+ this.processedMessages.add(messageKey);
57
+
58
+ await this.consumer.commitOffsets([
59
+ {
60
+ topic,
61
+ partition,
62
+ offset: (parseInt(message.offset) + 1).toString()
63
+ }
64
+ ]);
65
+ } catch (error) {
66
+ this.failureHandler([
67
+ {
68
+ value: events[0],
69
+ error: error as Error
70
+ }
71
+ ]);
72
+ for (const event of events) {
73
+ if (event.retryCount <= this.options.retries) {
74
+ await this.producer.send({
75
+ topic: this.queueName,
76
+ messages: [
77
+ {
78
+ value: JSON.stringify([
79
+ {
80
+ ...event,
81
+ retryCount: event.retryCount + 1
82
+ }
83
+ ]),
84
+ key: event.id
85
+ }
86
+ ]
87
+ });
88
+ }
89
+ }
90
+ }
91
+ }
92
+ });
93
+ }
94
+
95
+ async peekEvents(): Promise<EventEntity[]> {
96
+ const events: EventEntity[] = [];
97
+
98
+ const admin = this.kafka.admin();
99
+ await admin.connect();
100
+
101
+ try {
102
+ // Get topic metadata to find partitions
103
+ const metadata = await admin.fetchTopicMetadata({
104
+ topics: [this.queueName]
105
+ });
106
+ const topic = metadata.topics[0];
107
+
108
+ if (!topic) {
109
+ return events;
110
+ }
111
+
112
+ // For each partition, get the latest offset
113
+ for (const partition of topic.partitions) {
114
+ const offsets = await admin.fetchTopicOffsets(this.queueName);
115
+ const partitionOffset = offsets.find(
116
+ (o) => o.partition === partition.partitionId
117
+ );
118
+
119
+ if (!partitionOffset) {
120
+ continue;
121
+ }
122
+
123
+ // Create a temporary consumer to read messages
124
+ const peekConsumer = this.kafka.consumer({
125
+ groupId: `${this.options.groupId}-peek-${Date.now()}`
126
+ });
127
+
128
+ try {
129
+ await peekConsumer.connect();
130
+ await peekConsumer.subscribe({
131
+ topic: this.queueName,
132
+ fromBeginning: false
133
+ });
134
+
135
+ const messagePromise = new Promise<void>((resolve) => {
136
+ peekConsumer.run({
137
+ eachMessage: async ({ message }) => {
138
+ if (message.value && events.length < this.options.peekCount) {
139
+ const messageEvents = JSON.parse(
140
+ message.value.toString()
141
+ ) as EventEntity[];
142
+ events.push(...messageEvents);
143
+
144
+ if (events.length >= this.options.peekCount) {
145
+ resolve();
146
+ }
147
+ }
148
+ }
149
+ });
150
+ });
151
+
152
+ await Promise.race([
153
+ messagePromise,
154
+ new Promise((resolve) => setTimeout(resolve, 5000))
155
+ ]);
156
+
157
+ if (events.length >= this.options.peekCount) {
158
+ break;
159
+ }
160
+ } finally {
161
+ await peekConsumer.disconnect();
162
+ }
163
+ }
164
+
165
+ return events;
166
+ } finally {
167
+ await admin.disconnect();
168
+ }
169
+ }
170
+
171
+ async start(): Promise<void> {
172
+ await this.setupConsumer();
173
+ await this.producer.connect();
174
+ }
175
+
176
+ async close(): Promise<void> {
177
+ await this.producer.disconnect();
178
+ await this.consumer.disconnect();
179
+ }
180
+ }
@@ -0,0 +1 @@
1
+ export * from './kafka.schema';
@@ -0,0 +1,10 @@
1
+ import { array, number, string } from '@{{app_name}}/core';
2
+
3
+ export const KafkaWorkerOptionsSchema = {
4
+ brokers: array(string),
5
+ clientId: string,
6
+ groupId: string,
7
+ retries: number,
8
+ interval: number,
9
+ peekCount: number
10
+ };
@@ -0,0 +1 @@
1
+ export * from './kafkaWorker.producer';
@@ -0,0 +1,33 @@
1
+ import { WorkerEventEntity } from '@forklaunch/interfaces-worker/types';
2
+ import { Kafka } from 'kafkajs';
3
+ import { KafkaWorkerOptions } from '../types/kafkaWorker.types';
4
+
5
+ export class KafkaWorkerProducer<EventEntity extends WorkerEventEntity> {
6
+ private producer;
7
+
8
+ constructor(
9
+ private readonly queueName: string,
10
+ private readonly options: KafkaWorkerOptions
11
+ ) {
12
+ const kafka = new Kafka({
13
+ clientId: this.options.clientId,
14
+ brokers: this.options.brokers
15
+ });
16
+ this.producer = kafka.producer();
17
+ this.producer.connect();
18
+ }
19
+
20
+ async enqueueJob(event: EventEntity): Promise<void> {
21
+ await this.producer.send({
22
+ topic: this.queueName,
23
+ messages: [{ value: JSON.stringify([event]) }]
24
+ });
25
+ }
26
+
27
+ async enqueueBatchJobs(events: EventEntity[]): Promise<void> {
28
+ await this.producer.send({
29
+ topic: this.queueName,
30
+ messages: events.map((event) => ({ value: JSON.stringify(event) }))
31
+ });
32
+ }
33
+ }
@@ -0,0 +1 @@
1
+ export * from './kafkaWorker.types';
@@ -0,0 +1,8 @@
1
+ export type KafkaWorkerOptions = {
2
+ brokers: string[];
3
+ clientId: string;
4
+ groupId: string;
5
+ retries: number;
6
+ interval: number;
7
+ peekCount: number;
8
+ };
@@ -0,0 +1,4 @@
1
+ import type { JestConfigWithTsJest } from 'ts-jest';
2
+ declare const jestConfig: JestConfigWithTsJest;
3
+ export default jestConfig;
4
+ //# sourceMappingURL=jest.config.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"jest.config.d.ts","sourceRoot":"","sources":["../jest.config.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,oBAAoB,EAAE,MAAM,SAAS,CAAC;AAEpD,QAAA,MAAM,UAAU,EAAE,oBAiBjB,CAAC;AAEF,eAAe,UAAU,CAAC"}
@@ -0,0 +1,19 @@
1
+ const jestConfig = {
2
+ preset: 'ts-jest/presets/default-esm', // or other ESM presets
3
+ moduleNameMapper: {
4
+ '^(\\.{1,2}/.*)\\.js$': '$1'
5
+ },
6
+ transform: {
7
+ // '^.+\\.[tj]sx?$' to process ts,js,tsx,jsx with `ts-jest`
8
+ // '^.+\\.m?[tj]sx?$' to process ts,js,tsx,jsx,mts,mjs,mtsx,mjsx with `ts-jest`
9
+ '^.+\\.[tj]sx?$': [
10
+ 'ts-jest',
11
+ {
12
+ useESM: true
13
+ }
14
+ ],
15
+ '^.+\\.js$': 'babel-jest'
16
+ },
17
+ testPathIgnorePatterns: ['.*dist/', '.*node_modules/']
18
+ };
19
+ export default jestConfig;
@@ -0,0 +1,2 @@
1
+ export * from './kafkaWorker.producer';
2
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../producers/index.ts"],"names":[],"mappings":"AAAA,cAAc,wBAAwB,CAAC"}
@@ -0,0 +1 @@
1
+ export * from './kafkaWorker.producer';
@@ -0,0 +1,11 @@
1
+ import { WorkerEventEntity } from '@forklaunch/interfaces-worker/types';
2
+ import { KafkaWorkerOptions } from '../types/kafkaWorker.types';
3
+ export declare class KafkaWorkerProducer<EventEntity extends WorkerEventEntity> {
4
+ private readonly queueName;
5
+ private readonly options;
6
+ private producer;
7
+ constructor(queueName: string, options: KafkaWorkerOptions);
8
+ enqueueJob(event: EventEntity): Promise<void>;
9
+ enqueueBatchJobs(events: EventEntity[]): Promise<void>;
10
+ }
11
+ //# sourceMappingURL=kafkaWorker.producer.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"kafkaWorker.producer.d.ts","sourceRoot":"","sources":["../../producers/kafkaWorker.producer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,iBAAiB,EAAE,MAAM,qCAAqC,CAAC;AAExE,OAAO,EAAE,kBAAkB,EAAE,MAAM,4BAA4B,CAAC;AAEhE,qBAAa,mBAAmB,CAAC,WAAW,SAAS,iBAAiB;IAIlE,OAAO,CAAC,QAAQ,CAAC,SAAS;IAC1B,OAAO,CAAC,QAAQ,CAAC,OAAO;IAJ1B,OAAO,CAAC,QAAQ,CAAC;gBAGE,SAAS,EAAE,MAAM,EACjB,OAAO,EAAE,kBAAkB;IAUxC,UAAU,CAAC,KAAK,EAAE,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC;IAO7C,gBAAgB,CAAC,MAAM,EAAE,WAAW,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC;CAM7D"}
@@ -0,0 +1,28 @@
1
+ import { Kafka } from 'kafkajs';
2
+ export class KafkaWorkerProducer {
3
+ queueName;
4
+ options;
5
+ producer;
6
+ constructor(queueName, options) {
7
+ this.queueName = queueName;
8
+ this.options = options;
9
+ const kafka = new Kafka({
10
+ clientId: this.options.clientId,
11
+ brokers: this.options.brokers
12
+ });
13
+ this.producer = kafka.producer();
14
+ this.producer.connect();
15
+ }
16
+ async enqueueJob(event) {
17
+ await this.producer.send({
18
+ topic: this.queueName,
19
+ messages: [{ value: JSON.stringify([event]) }]
20
+ });
21
+ }
22
+ async enqueueBatchJobs(events) {
23
+ await this.producer.send({
24
+ topic: this.queueName,
25
+ messages: events.map((event) => ({ value: JSON.stringify(event) }))
26
+ });
27
+ }
28
+ }
@@ -0,0 +1,2 @@
1
+ export * from './kafka.schema';
2
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../schemas/index.ts"],"names":[],"mappings":"AAAA,cAAc,gBAAgB,CAAC"}
@@ -0,0 +1 @@
1
+ export * from './kafka.schema';
@@ -0,0 +1,18 @@
1
+ export declare const KafkaWorkerSchemas: <SchemaValidator extends import("@forklaunch/validator").AnySchemaValidator>(options: Record<string, unknown> & {
2
+ validator: SchemaValidator;
3
+ }) => import("@forklaunch/core/mappers").SchemasByValidator<SchemaValidator, (options: Record<string, unknown>) => {
4
+ brokers: import("@sinclair/typebox").TArray<import("@sinclair/typebox").TString>;
5
+ clientId: import("@sinclair/typebox").TString;
6
+ groupId: import("@sinclair/typebox").TString;
7
+ retries: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
8
+ interval: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
9
+ peekCount: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
10
+ }, (options: Record<string, unknown>) => {
11
+ brokers: import("zod").ZodArray<import("zod").ZodString, "many">;
12
+ clientId: import("zod").ZodString;
13
+ groupId: import("zod").ZodString;
14
+ retries: import("zod").ZodNumber;
15
+ interval: import("zod").ZodNumber;
16
+ peekCount: import("zod").ZodNumber;
17
+ }>;
18
+ //# sourceMappingURL=kafka.schema.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"kafka.schema.d.ts","sourceRoot":"","sources":["../../schemas/kafka.schema.ts"],"names":[],"mappings":"AAIA,eAAO,MAAM,kBAAkB;;;;;;;;;;;;;;;;EAG9B,CAAC"}
@@ -0,0 +1,4 @@
1
+ import { serviceSchemaResolver } from '@forklaunch/core/mappers';
2
+ import { KafkaWorkerOptionsSchema as TypeBoxSchemas } from './typebox/kafkaWorker.schema';
3
+ import { KafkaWorkerOptionsSchema as ZodSchemas } from './zod/kafkaWorker.schema';
4
+ export const KafkaWorkerSchemas = serviceSchemaResolver(() => TypeBoxSchemas, () => ZodSchemas);
@@ -0,0 +1,9 @@
1
+ export declare const KafkaWorkerOptionsSchema: {
2
+ brokers: import("@sinclair/typebox").TArray<import("@sinclair/typebox").TString>;
3
+ clientId: import("@sinclair/typebox").TString;
4
+ groupId: import("@sinclair/typebox").TString;
5
+ retries: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
6
+ interval: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
7
+ peekCount: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
8
+ };
9
+ //# sourceMappingURL=kafkaWorker.schema.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"kafkaWorker.schema.d.ts","sourceRoot":"","sources":["../../../schemas/typebox/kafkaWorker.schema.ts"],"names":[],"mappings":"AAEA,eAAO,MAAM,wBAAwB;;;;;;;CAOpC,CAAC"}
@@ -0,0 +1,9 @@
1
+ import { array, number, string } from '@forklaunch/validator/typebox';
2
+ export const KafkaWorkerOptionsSchema = {
3
+ brokers: array(string),
4
+ clientId: string,
5
+ groupId: string,
6
+ retries: number,
7
+ interval: number,
8
+ peekCount: number
9
+ };
@@ -0,0 +1,9 @@
1
+ export declare const KafkaWorkerOptionsSchema: {
2
+ brokers: import("zod").ZodArray<import("zod").ZodString, "many">;
3
+ clientId: import("zod").ZodString;
4
+ groupId: import("zod").ZodString;
5
+ retries: import("zod").ZodNumber;
6
+ interval: import("zod").ZodNumber;
7
+ peekCount: import("zod").ZodNumber;
8
+ };
9
+ //# sourceMappingURL=kafkaWorker.schema.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"kafkaWorker.schema.d.ts","sourceRoot":"","sources":["../../../schemas/zod/kafkaWorker.schema.ts"],"names":[],"mappings":"AAEA,eAAO,MAAM,wBAAwB;;;;;;;CAOpC,CAAC"}