@forklaunch/implementation-worker-kafka 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -3
- package/lib/__test__/schemaEquality.test.d.ts +0 -2
- package/lib/__test__/schemaEquality.test.d.ts.map +0 -1
- package/lib/__test__/schemaEquality.test.js +0 -24
- package/lib/consumers/index.d.ts +0 -2
- package/lib/consumers/index.d.ts.map +0 -1
- package/lib/consumers/index.js +0 -1
- package/lib/consumers/kafkaWorker.consumer.d.ts +0 -32
- package/lib/consumers/kafkaWorker.consumer.d.ts.map +0 -1
- package/lib/consumers/kafkaWorker.consumer.js +0 -147
- package/lib/eject/consumers/index.ts +0 -1
- package/lib/eject/consumers/kafkaWorker.consumer.ts +0 -182
- package/lib/eject/domain/schemas/index.ts +0 -1
- package/lib/eject/domain/schemas/kafkaWorker.schema.ts +0 -10
- package/lib/eject/producers/index.ts +0 -1
- package/lib/eject/producers/kafkaWorker.producer.ts +0 -36
- package/lib/eject/types/index.ts +0 -1
- package/lib/eject/types/kafkaWorker.types.ts +0 -8
- package/lib/jest.config.d.ts +0 -4
- package/lib/jest.config.d.ts.map +0 -1
- package/lib/jest.config.js +0 -19
- package/lib/producers/index.d.ts +0 -2
- package/lib/producers/index.d.ts.map +0 -1
- package/lib/producers/index.js +0 -1
- package/lib/producers/kafkaWorker.producer.d.ts +0 -14
- package/lib/producers/kafkaWorker.producer.d.ts.map +0 -1
- package/lib/producers/kafkaWorker.producer.js +0 -28
- package/lib/schemas/index.d.ts +0 -2
- package/lib/schemas/index.d.ts.map +0 -1
- package/lib/schemas/index.js +0 -1
- package/lib/schemas/kafka.schema.d.ts +0 -64
- package/lib/schemas/kafka.schema.d.ts.map +0 -1
- package/lib/schemas/kafka.schema.js +0 -7
- package/lib/schemas/typebox/kafkaWorker.schema.d.ts +0 -47
- package/lib/schemas/typebox/kafkaWorker.schema.d.ts.map +0 -1
- package/lib/schemas/typebox/kafkaWorker.schema.js +0 -9
- package/lib/schemas/zod/kafkaWorker.schema.d.ts +0 -9
- package/lib/schemas/zod/kafkaWorker.schema.d.ts.map +0 -1
- package/lib/schemas/zod/kafkaWorker.schema.js +0 -9
- package/lib/tsconfig.tsbuildinfo +0 -1
- package/lib/types/index.d.ts +0 -2
- package/lib/types/index.d.ts.map +0 -1
- package/lib/types/index.js +0 -1
- package/lib/types/kafkaWorker.types.d.ts +0 -9
- package/lib/types/kafkaWorker.types.d.ts.map +0 -1
- package/lib/types/kafkaWorker.types.js +0 -1
- package/lib/vitest.config.d.ts +0 -3
- package/lib/vitest.config.d.ts.map +0 -1
- package/lib/vitest.config.js +0 -7
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@forklaunch/implementation-worker-kafka",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.3",
|
|
4
4
|
"description": "Kafka implementation for forklaunch workers",
|
|
5
5
|
"homepage": "https://github.com/forklaunch/forklaunch-js#readme",
|
|
6
6
|
"bugs": {
|
|
@@ -38,11 +38,11 @@
|
|
|
38
38
|
"lib/**"
|
|
39
39
|
],
|
|
40
40
|
"dependencies": {
|
|
41
|
-
"@forklaunch/core": "
|
|
41
|
+
"@forklaunch/core": "^0.7.4",
|
|
42
42
|
"@sinclair/typebox": "^0.34.33",
|
|
43
43
|
"kafkajs": "^2.2.4",
|
|
44
44
|
"zod": "^3.24.3",
|
|
45
|
-
"@forklaunch/interfaces-worker": "0.1.
|
|
45
|
+
"@forklaunch/interfaces-worker": "0.1.3"
|
|
46
46
|
},
|
|
47
47
|
"devDependencies": {
|
|
48
48
|
"depcheck": "^1.4.7",
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"schemaEquality.test.d.ts","sourceRoot":"","sources":["../../__test__/schemaEquality.test.ts"],"names":[],"mappings":""}
|
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
import { isTrue } from '@forklaunch/common';
|
|
2
|
-
import { testSchemaEquality } from '@forklaunch/core/test';
|
|
3
|
-
import { KafkaWorkerOptionsSchema as TypeboxKafkaWorkerOptionsSchema } from '../schemas/typebox/kafkaWorker.schema';
|
|
4
|
-
import { KafkaWorkerOptionsSchema as ZodKafkaWorkerOptionsSchema } from '../schemas/zod/kafkaWorker.schema';
|
|
5
|
-
describe('schema equality', () => {
|
|
6
|
-
it('should be equal for bullmq worker', () => {
|
|
7
|
-
expect(
|
|
8
|
-
isTrue(
|
|
9
|
-
testSchemaEquality(
|
|
10
|
-
ZodKafkaWorkerOptionsSchema,
|
|
11
|
-
TypeboxKafkaWorkerOptionsSchema,
|
|
12
|
-
{
|
|
13
|
-
brokers: ['localhost:9092'],
|
|
14
|
-
clientId: 'test',
|
|
15
|
-
groupId: 'test',
|
|
16
|
-
retries: 1,
|
|
17
|
-
interval: 1000,
|
|
18
|
-
peekCount: 1
|
|
19
|
-
}
|
|
20
|
-
)
|
|
21
|
-
)
|
|
22
|
-
).toBeTruthy();
|
|
23
|
-
});
|
|
24
|
-
});
|
package/lib/consumers/index.d.ts
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../consumers/index.ts"],"names":[],"mappings":"AAAA,cAAc,wBAAwB,CAAC"}
|
package/lib/consumers/index.js
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export * from './kafkaWorker.consumer';
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
import { WorkerConsumer } from '@forklaunch/interfaces-worker/interfaces';
|
|
2
|
-
import {
|
|
3
|
-
WorkerEventEntity,
|
|
4
|
-
WorkerFailureHandler,
|
|
5
|
-
WorkerProcessFunction
|
|
6
|
-
} from '@forklaunch/interfaces-worker/types';
|
|
7
|
-
import { KafkaWorkerOptions } from '../types/kafkaWorker.types';
|
|
8
|
-
export declare class KafkaWorkerConsumer<
|
|
9
|
-
EventEntity extends WorkerEventEntity,
|
|
10
|
-
Options extends KafkaWorkerOptions
|
|
11
|
-
> implements WorkerConsumer<EventEntity>
|
|
12
|
-
{
|
|
13
|
-
protected readonly queueName: string;
|
|
14
|
-
protected readonly options: Options;
|
|
15
|
-
protected readonly processEventsFunction: WorkerProcessFunction<EventEntity>;
|
|
16
|
-
protected readonly failureHandler: WorkerFailureHandler<EventEntity>;
|
|
17
|
-
private kafka;
|
|
18
|
-
private producer;
|
|
19
|
-
private consumer;
|
|
20
|
-
private processedMessages;
|
|
21
|
-
constructor(
|
|
22
|
-
queueName: string,
|
|
23
|
-
options: Options,
|
|
24
|
-
processEventsFunction: WorkerProcessFunction<EventEntity>,
|
|
25
|
-
failureHandler: WorkerFailureHandler<EventEntity>
|
|
26
|
-
);
|
|
27
|
-
private setupConsumer;
|
|
28
|
-
peekEvents(): Promise<EventEntity[]>;
|
|
29
|
-
start(): Promise<void>;
|
|
30
|
-
close(): Promise<void>;
|
|
31
|
-
}
|
|
32
|
-
//# sourceMappingURL=kafkaWorker.consumer.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"kafkaWorker.consumer.d.ts","sourceRoot":"","sources":["../../consumers/kafkaWorker.consumer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,cAAc,EAAE,MAAM,0CAA0C,CAAC;AAC1E,OAAO,EACL,iBAAiB,EACjB,oBAAoB,EACpB,qBAAqB,EACtB,MAAM,qCAAqC,CAAC;AAE7C,OAAO,EAAE,kBAAkB,EAAE,MAAM,4BAA4B,CAAC;AAEhE,qBAAa,mBAAmB,CAC9B,WAAW,SAAS,iBAAiB,EACrC,OAAO,SAAS,kBAAkB,CAClC,YAAW,cAAc,CAAC,WAAW,CAAC;IAQpC,SAAS,CAAC,QAAQ,CAAC,SAAS,EAAE,MAAM;IACpC,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,OAAO;IACnC,SAAS,CAAC,QAAQ,CAAC,qBAAqB,EAAE,qBAAqB,CAAC,WAAW,CAAC;IAC5E,SAAS,CAAC,QAAQ,CAAC,cAAc,EAAE,oBAAoB,CAAC,WAAW,CAAC;IATtE,OAAO,CAAC,KAAK,CAAQ;IACrB,OAAO,CAAC,QAAQ,CAAW;IAC3B,OAAO,CAAC,QAAQ,CAAW;IAC3B,OAAO,CAAC,iBAAiB,CAA0B;gBAG9B,SAAS,EAAE,MAAM,EACjB,OAAO,EAAE,OAAO,EAChB,qBAAqB,EAAE,qBAAqB,CAAC,WAAW,CAAC,EACzD,cAAc,EAAE,oBAAoB,CAAC,WAAW,CAAC;YAaxD,aAAa;IA4DrB,UAAU,IAAI,OAAO,CAAC,WAAW,EAAE,CAAC;IA4EpC,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAKtB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;CAI7B"}
|
|
@@ -1,147 +0,0 @@
|
|
|
1
|
-
import { Kafka } from 'kafkajs';
|
|
2
|
-
export class KafkaWorkerConsumer {
|
|
3
|
-
queueName;
|
|
4
|
-
options;
|
|
5
|
-
processEventsFunction;
|
|
6
|
-
failureHandler;
|
|
7
|
-
kafka;
|
|
8
|
-
producer;
|
|
9
|
-
consumer;
|
|
10
|
-
processedMessages = new Set();
|
|
11
|
-
constructor(queueName, options, processEventsFunction, failureHandler) {
|
|
12
|
-
this.queueName = queueName;
|
|
13
|
-
this.options = options;
|
|
14
|
-
this.processEventsFunction = processEventsFunction;
|
|
15
|
-
this.failureHandler = failureHandler;
|
|
16
|
-
this.kafka = new Kafka({
|
|
17
|
-
clientId: this.options.clientId,
|
|
18
|
-
brokers: this.options.brokers
|
|
19
|
-
});
|
|
20
|
-
this.producer = this.kafka.producer();
|
|
21
|
-
this.consumer = this.kafka.consumer({
|
|
22
|
-
groupId: this.options.groupId
|
|
23
|
-
});
|
|
24
|
-
}
|
|
25
|
-
async setupConsumer() {
|
|
26
|
-
await this.consumer.connect();
|
|
27
|
-
await this.consumer.subscribe({
|
|
28
|
-
topic: this.queueName,
|
|
29
|
-
fromBeginning: false
|
|
30
|
-
});
|
|
31
|
-
await this.consumer.run({
|
|
32
|
-
eachMessage: async ({ topic, partition, message }) => {
|
|
33
|
-
if (!message.value) return;
|
|
34
|
-
const messageKey = `${topic}-${partition}-${message.offset}`;
|
|
35
|
-
if (this.processedMessages.has(messageKey)) {
|
|
36
|
-
return;
|
|
37
|
-
}
|
|
38
|
-
const events = JSON.parse(message.value.toString());
|
|
39
|
-
try {
|
|
40
|
-
await this.processEventsFunction(events);
|
|
41
|
-
this.processedMessages.add(messageKey);
|
|
42
|
-
await this.consumer.commitOffsets([
|
|
43
|
-
{
|
|
44
|
-
topic,
|
|
45
|
-
partition,
|
|
46
|
-
offset: (parseInt(message.offset) + 1).toString()
|
|
47
|
-
}
|
|
48
|
-
]);
|
|
49
|
-
} catch (error) {
|
|
50
|
-
this.failureHandler([
|
|
51
|
-
{
|
|
52
|
-
value: events[0],
|
|
53
|
-
error: error
|
|
54
|
-
}
|
|
55
|
-
]);
|
|
56
|
-
for (const event of events) {
|
|
57
|
-
if (event.retryCount <= this.options.retries) {
|
|
58
|
-
await this.producer.send({
|
|
59
|
-
topic: this.queueName,
|
|
60
|
-
messages: [
|
|
61
|
-
{
|
|
62
|
-
value: JSON.stringify([
|
|
63
|
-
{
|
|
64
|
-
...event,
|
|
65
|
-
retryCount: event.retryCount + 1
|
|
66
|
-
}
|
|
67
|
-
]),
|
|
68
|
-
key: event.id
|
|
69
|
-
}
|
|
70
|
-
]
|
|
71
|
-
});
|
|
72
|
-
}
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
});
|
|
77
|
-
}
|
|
78
|
-
async peekEvents() {
|
|
79
|
-
const events = [];
|
|
80
|
-
const admin = this.kafka.admin();
|
|
81
|
-
await admin.connect();
|
|
82
|
-
try {
|
|
83
|
-
// Get topic metadata to find partitions
|
|
84
|
-
const metadata = await admin.fetchTopicMetadata({
|
|
85
|
-
topics: [this.queueName]
|
|
86
|
-
});
|
|
87
|
-
const topic = metadata.topics[0];
|
|
88
|
-
if (!topic) {
|
|
89
|
-
return events;
|
|
90
|
-
}
|
|
91
|
-
// For each partition, get the latest offset
|
|
92
|
-
for (const partition of topic.partitions) {
|
|
93
|
-
const offsets = await admin.fetchTopicOffsets(this.queueName);
|
|
94
|
-
const partitionOffset = offsets.find(
|
|
95
|
-
(o) => o.partition === partition.partitionId
|
|
96
|
-
);
|
|
97
|
-
if (!partitionOffset) {
|
|
98
|
-
continue;
|
|
99
|
-
}
|
|
100
|
-
// Create a temporary consumer to read messages
|
|
101
|
-
const peekConsumer = this.kafka.consumer({
|
|
102
|
-
groupId: `${this.options.groupId}-peek-${Date.now()}`
|
|
103
|
-
});
|
|
104
|
-
try {
|
|
105
|
-
await peekConsumer.connect();
|
|
106
|
-
await peekConsumer.subscribe({
|
|
107
|
-
topic: this.queueName,
|
|
108
|
-
fromBeginning: false
|
|
109
|
-
});
|
|
110
|
-
const messagePromise = new Promise((resolve) => {
|
|
111
|
-
peekConsumer.run({
|
|
112
|
-
eachMessage: async ({ message }) => {
|
|
113
|
-
if (message.value && events.length < this.options.peekCount) {
|
|
114
|
-
const messageEvents = JSON.parse(message.value.toString());
|
|
115
|
-
events.push(...messageEvents);
|
|
116
|
-
if (events.length >= this.options.peekCount) {
|
|
117
|
-
resolve();
|
|
118
|
-
}
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
});
|
|
122
|
-
});
|
|
123
|
-
await Promise.race([
|
|
124
|
-
messagePromise,
|
|
125
|
-
new Promise((resolve) => setTimeout(resolve, 5000))
|
|
126
|
-
]);
|
|
127
|
-
if (events.length >= this.options.peekCount) {
|
|
128
|
-
break;
|
|
129
|
-
}
|
|
130
|
-
} finally {
|
|
131
|
-
await peekConsumer.disconnect();
|
|
132
|
-
}
|
|
133
|
-
}
|
|
134
|
-
return events;
|
|
135
|
-
} finally {
|
|
136
|
-
await admin.disconnect();
|
|
137
|
-
}
|
|
138
|
-
}
|
|
139
|
-
async start() {
|
|
140
|
-
await this.setupConsumer();
|
|
141
|
-
await this.producer.connect();
|
|
142
|
-
}
|
|
143
|
-
async close() {
|
|
144
|
-
await this.producer.disconnect();
|
|
145
|
-
await this.consumer.disconnect();
|
|
146
|
-
}
|
|
147
|
-
}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export * from './kafkaWorker.consumer';
|
|
@@ -1,182 +0,0 @@
|
|
|
1
|
-
import { WorkerConsumer } from '@forklaunch/interfaces-worker/interfaces';
|
|
2
|
-
import {
|
|
3
|
-
WorkerEventEntity,
|
|
4
|
-
WorkerFailureHandler,
|
|
5
|
-
WorkerProcessFunction
|
|
6
|
-
} from '@forklaunch/interfaces-worker/types';
|
|
7
|
-
import { Consumer, Kafka, Producer } from 'kafkajs';
|
|
8
|
-
import { KafkaWorkerOptions } from '../types/kafkaWorker.types';
|
|
9
|
-
|
|
10
|
-
export class KafkaWorkerConsumer<
|
|
11
|
-
EventEntity extends WorkerEventEntity,
|
|
12
|
-
Options extends KafkaWorkerOptions
|
|
13
|
-
> implements WorkerConsumer<EventEntity>
|
|
14
|
-
{
|
|
15
|
-
private kafka: Kafka;
|
|
16
|
-
private producer: Producer;
|
|
17
|
-
private consumer: Consumer;
|
|
18
|
-
private processedMessages: Set<string> = new Set();
|
|
19
|
-
|
|
20
|
-
constructor(
|
|
21
|
-
protected readonly queueName: string,
|
|
22
|
-
protected readonly options: Options,
|
|
23
|
-
protected readonly processEventsFunction: WorkerProcessFunction<EventEntity>,
|
|
24
|
-
protected readonly failureHandler: WorkerFailureHandler<EventEntity>
|
|
25
|
-
) {
|
|
26
|
-
this.kafka = new Kafka({
|
|
27
|
-
clientId: this.options.clientId,
|
|
28
|
-
brokers: this.options.brokers
|
|
29
|
-
});
|
|
30
|
-
|
|
31
|
-
this.producer = this.kafka.producer();
|
|
32
|
-
this.consumer = this.kafka.consumer({
|
|
33
|
-
groupId: this.options.groupId
|
|
34
|
-
});
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
private async setupConsumer() {
|
|
38
|
-
await this.consumer.connect();
|
|
39
|
-
await this.consumer.subscribe({
|
|
40
|
-
topic: this.queueName,
|
|
41
|
-
fromBeginning: false
|
|
42
|
-
});
|
|
43
|
-
|
|
44
|
-
await this.consumer.run({
|
|
45
|
-
eachMessage: async ({ topic, partition, message }) => {
|
|
46
|
-
if (!message.value) return;
|
|
47
|
-
|
|
48
|
-
const messageKey = `${topic}-${partition}-${message.offset}`;
|
|
49
|
-
|
|
50
|
-
if (this.processedMessages.has(messageKey)) {
|
|
51
|
-
return;
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
const events = JSON.parse(message.value.toString()) as EventEntity[];
|
|
55
|
-
|
|
56
|
-
try {
|
|
57
|
-
await this.processEventsFunction(events);
|
|
58
|
-
this.processedMessages.add(messageKey);
|
|
59
|
-
|
|
60
|
-
await this.consumer.commitOffsets([
|
|
61
|
-
{
|
|
62
|
-
topic,
|
|
63
|
-
partition,
|
|
64
|
-
offset: (parseInt(message.offset) + 1).toString()
|
|
65
|
-
}
|
|
66
|
-
]);
|
|
67
|
-
} catch (error) {
|
|
68
|
-
this.failureHandler([
|
|
69
|
-
{
|
|
70
|
-
value: events[0],
|
|
71
|
-
error: error as Error
|
|
72
|
-
}
|
|
73
|
-
]);
|
|
74
|
-
for (const event of events) {
|
|
75
|
-
if (event.retryCount <= this.options.retries) {
|
|
76
|
-
await this.producer.send({
|
|
77
|
-
topic: this.queueName,
|
|
78
|
-
messages: [
|
|
79
|
-
{
|
|
80
|
-
value: JSON.stringify([
|
|
81
|
-
{
|
|
82
|
-
...event,
|
|
83
|
-
retryCount: event.retryCount + 1
|
|
84
|
-
}
|
|
85
|
-
]),
|
|
86
|
-
key: event.id
|
|
87
|
-
}
|
|
88
|
-
]
|
|
89
|
-
});
|
|
90
|
-
}
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
}
|
|
94
|
-
});
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
async peekEvents(): Promise<EventEntity[]> {
|
|
98
|
-
const events: EventEntity[] = [];
|
|
99
|
-
|
|
100
|
-
const admin = this.kafka.admin();
|
|
101
|
-
await admin.connect();
|
|
102
|
-
|
|
103
|
-
try {
|
|
104
|
-
// Get topic metadata to find partitions
|
|
105
|
-
const metadata = await admin.fetchTopicMetadata({
|
|
106
|
-
topics: [this.queueName]
|
|
107
|
-
});
|
|
108
|
-
const topic = metadata.topics[0];
|
|
109
|
-
|
|
110
|
-
if (!topic) {
|
|
111
|
-
return events;
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
// For each partition, get the latest offset
|
|
115
|
-
for (const partition of topic.partitions) {
|
|
116
|
-
const offsets = await admin.fetchTopicOffsets(this.queueName);
|
|
117
|
-
const partitionOffset = offsets.find(
|
|
118
|
-
(o) => o.partition === partition.partitionId
|
|
119
|
-
);
|
|
120
|
-
|
|
121
|
-
if (!partitionOffset) {
|
|
122
|
-
continue;
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
// Create a temporary consumer to read messages
|
|
126
|
-
const peekConsumer = this.kafka.consumer({
|
|
127
|
-
groupId: `${this.options.groupId}-peek-${Date.now()}`
|
|
128
|
-
});
|
|
129
|
-
|
|
130
|
-
try {
|
|
131
|
-
await peekConsumer.connect();
|
|
132
|
-
await peekConsumer.subscribe({
|
|
133
|
-
topic: this.queueName,
|
|
134
|
-
fromBeginning: false
|
|
135
|
-
});
|
|
136
|
-
|
|
137
|
-
const messagePromise = new Promise<void>((resolve) => {
|
|
138
|
-
peekConsumer.run({
|
|
139
|
-
eachMessage: async ({ message }) => {
|
|
140
|
-
if (message.value && events.length < this.options.peekCount) {
|
|
141
|
-
const messageEvents = JSON.parse(
|
|
142
|
-
message.value.toString()
|
|
143
|
-
) as EventEntity[];
|
|
144
|
-
events.push(...messageEvents);
|
|
145
|
-
|
|
146
|
-
if (events.length >= this.options.peekCount) {
|
|
147
|
-
resolve();
|
|
148
|
-
}
|
|
149
|
-
}
|
|
150
|
-
}
|
|
151
|
-
});
|
|
152
|
-
});
|
|
153
|
-
|
|
154
|
-
await Promise.race([
|
|
155
|
-
messagePromise,
|
|
156
|
-
new Promise((resolve) => setTimeout(resolve, 5000))
|
|
157
|
-
]);
|
|
158
|
-
|
|
159
|
-
if (events.length >= this.options.peekCount) {
|
|
160
|
-
break;
|
|
161
|
-
}
|
|
162
|
-
} finally {
|
|
163
|
-
await peekConsumer.disconnect();
|
|
164
|
-
}
|
|
165
|
-
}
|
|
166
|
-
|
|
167
|
-
return events;
|
|
168
|
-
} finally {
|
|
169
|
-
await admin.disconnect();
|
|
170
|
-
}
|
|
171
|
-
}
|
|
172
|
-
|
|
173
|
-
async start(): Promise<void> {
|
|
174
|
-
await this.setupConsumer();
|
|
175
|
-
await this.producer.connect();
|
|
176
|
-
}
|
|
177
|
-
|
|
178
|
-
async close(): Promise<void> {
|
|
179
|
-
await this.producer.disconnect();
|
|
180
|
-
await this.consumer.disconnect();
|
|
181
|
-
}
|
|
182
|
-
}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export * from './kafka.schema';
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export * from './kafkaWorker.producer';
|
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
import { WorkerEventEntity } from '@forklaunch/interfaces-worker/types';
|
|
2
|
-
import { Kafka } from 'kafkajs';
|
|
3
|
-
import { KafkaWorkerOptions } from '../types/kafkaWorker.types';
|
|
4
|
-
|
|
5
|
-
export class KafkaWorkerProducer<
|
|
6
|
-
EventEntity extends WorkerEventEntity,
|
|
7
|
-
Options extends KafkaWorkerOptions
|
|
8
|
-
> {
|
|
9
|
-
private producer;
|
|
10
|
-
|
|
11
|
-
constructor(
|
|
12
|
-
private readonly queueName: string,
|
|
13
|
-
private readonly options: Options
|
|
14
|
-
) {
|
|
15
|
-
const kafka = new Kafka({
|
|
16
|
-
clientId: this.options.clientId,
|
|
17
|
-
brokers: this.options.brokers
|
|
18
|
-
});
|
|
19
|
-
this.producer = kafka.producer();
|
|
20
|
-
this.producer.connect();
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
async enqueueJob(event: EventEntity): Promise<void> {
|
|
24
|
-
await this.producer.send({
|
|
25
|
-
topic: this.queueName,
|
|
26
|
-
messages: [{ value: JSON.stringify([event]) }]
|
|
27
|
-
});
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
async enqueueBatchJobs(events: EventEntity[]): Promise<void> {
|
|
31
|
-
await this.producer.send({
|
|
32
|
-
topic: this.queueName,
|
|
33
|
-
messages: events.map((event) => ({ value: JSON.stringify(event) }))
|
|
34
|
-
});
|
|
35
|
-
}
|
|
36
|
-
}
|
package/lib/eject/types/index.ts
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export * from './kafkaWorker.types';
|
package/lib/jest.config.d.ts
DELETED
package/lib/jest.config.d.ts.map
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"jest.config.d.ts","sourceRoot":"","sources":["../jest.config.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,oBAAoB,EAAE,MAAM,SAAS,CAAC;AAEpD,QAAA,MAAM,UAAU,EAAE,oBAiBjB,CAAC;AAEF,eAAe,UAAU,CAAC"}
|
package/lib/jest.config.js
DELETED
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
const jestConfig = {
|
|
2
|
-
preset: 'ts-jest/presets/default-esm', // or other ESM presets
|
|
3
|
-
moduleNameMapper: {
|
|
4
|
-
'^(\\.{1,2}/.*)\\.js$': '$1'
|
|
5
|
-
},
|
|
6
|
-
transform: {
|
|
7
|
-
// '^.+\\.[tj]sx?$' to process ts,js,tsx,jsx with `ts-jest`
|
|
8
|
-
// '^.+\\.m?[tj]sx?$' to process ts,js,tsx,jsx,mts,mjs,mtsx,mjsx with `ts-jest`
|
|
9
|
-
'^.+\\.[tj]sx?$': [
|
|
10
|
-
'ts-jest',
|
|
11
|
-
{
|
|
12
|
-
useESM: true
|
|
13
|
-
}
|
|
14
|
-
],
|
|
15
|
-
'^.+\\.js$': 'babel-jest'
|
|
16
|
-
},
|
|
17
|
-
testPathIgnorePatterns: ['.*dist/', '.*node_modules/']
|
|
18
|
-
};
|
|
19
|
-
export default jestConfig;
|
package/lib/producers/index.d.ts
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../producers/index.ts"],"names":[],"mappings":"AAAA,cAAc,wBAAwB,CAAC"}
|
package/lib/producers/index.js
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export * from './kafkaWorker.producer';
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
import { WorkerEventEntity } from '@forklaunch/interfaces-worker/types';
|
|
2
|
-
import { KafkaWorkerOptions } from '../types/kafkaWorker.types';
|
|
3
|
-
export declare class KafkaWorkerProducer<
|
|
4
|
-
EventEntity extends WorkerEventEntity,
|
|
5
|
-
Options extends KafkaWorkerOptions
|
|
6
|
-
> {
|
|
7
|
-
private readonly queueName;
|
|
8
|
-
private readonly options;
|
|
9
|
-
private producer;
|
|
10
|
-
constructor(queueName: string, options: Options);
|
|
11
|
-
enqueueJob(event: EventEntity): Promise<void>;
|
|
12
|
-
enqueueBatchJobs(events: EventEntity[]): Promise<void>;
|
|
13
|
-
}
|
|
14
|
-
//# sourceMappingURL=kafkaWorker.producer.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"kafkaWorker.producer.d.ts","sourceRoot":"","sources":["../../producers/kafkaWorker.producer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,iBAAiB,EAAE,MAAM,qCAAqC,CAAC;AAExE,OAAO,EAAE,kBAAkB,EAAE,MAAM,4BAA4B,CAAC;AAEhE,qBAAa,mBAAmB,CAC9B,WAAW,SAAS,iBAAiB,EACrC,OAAO,SAAS,kBAAkB;IAKhC,OAAO,CAAC,QAAQ,CAAC,SAAS;IAC1B,OAAO,CAAC,QAAQ,CAAC,OAAO;IAJ1B,OAAO,CAAC,QAAQ,CAAC;gBAGE,SAAS,EAAE,MAAM,EACjB,OAAO,EAAE,OAAO;IAU7B,UAAU,CAAC,KAAK,EAAE,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC;IAO7C,gBAAgB,CAAC,MAAM,EAAE,WAAW,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC;CAM7D"}
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
import { Kafka } from 'kafkajs';
|
|
2
|
-
export class KafkaWorkerProducer {
|
|
3
|
-
queueName;
|
|
4
|
-
options;
|
|
5
|
-
producer;
|
|
6
|
-
constructor(queueName, options) {
|
|
7
|
-
this.queueName = queueName;
|
|
8
|
-
this.options = options;
|
|
9
|
-
const kafka = new Kafka({
|
|
10
|
-
clientId: this.options.clientId,
|
|
11
|
-
brokers: this.options.brokers
|
|
12
|
-
});
|
|
13
|
-
this.producer = kafka.producer();
|
|
14
|
-
this.producer.connect();
|
|
15
|
-
}
|
|
16
|
-
async enqueueJob(event) {
|
|
17
|
-
await this.producer.send({
|
|
18
|
-
topic: this.queueName,
|
|
19
|
-
messages: [{ value: JSON.stringify([event]) }]
|
|
20
|
-
});
|
|
21
|
-
}
|
|
22
|
-
async enqueueBatchJobs(events) {
|
|
23
|
-
await this.producer.send({
|
|
24
|
-
topic: this.queueName,
|
|
25
|
-
messages: events.map((event) => ({ value: JSON.stringify(event) }))
|
|
26
|
-
});
|
|
27
|
-
}
|
|
28
|
-
}
|
package/lib/schemas/index.d.ts
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../schemas/index.ts"],"names":[],"mappings":"AAAA,cAAc,gBAAgB,CAAC"}
|
package/lib/schemas/index.js
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export * from './kafka.schema';
|
|
@@ -1,64 +0,0 @@
|
|
|
1
|
-
export declare const KafkaWorkerSchemas: <
|
|
2
|
-
SchemaValidator extends import('@forklaunch/validator').AnySchemaValidator
|
|
3
|
-
>(
|
|
4
|
-
options: Record<string, unknown> & {
|
|
5
|
-
validator: SchemaValidator;
|
|
6
|
-
}
|
|
7
|
-
) => import('@forklaunch/core/mappers').SchemasByValidator<
|
|
8
|
-
SchemaValidator,
|
|
9
|
-
(options: Record<string, unknown>) => {
|
|
10
|
-
brokers: import('@sinclair/typebox').TArray<
|
|
11
|
-
import('@sinclair/typebox').TString
|
|
12
|
-
>;
|
|
13
|
-
clientId: import('@sinclair/typebox').TString;
|
|
14
|
-
groupId: import('@sinclair/typebox').TString;
|
|
15
|
-
retries: import('@sinclair/typebox').TTransform<
|
|
16
|
-
import('@sinclair/typebox').TUnion<
|
|
17
|
-
[
|
|
18
|
-
import('@sinclair/typebox').TNumber,
|
|
19
|
-
import('@sinclair/typebox').TString,
|
|
20
|
-
import('@sinclair/typebox').TBoolean,
|
|
21
|
-
import('@sinclair/typebox').TNull,
|
|
22
|
-
import('@sinclair/typebox').TDate,
|
|
23
|
-
import('@sinclair/typebox').TBigInt
|
|
24
|
-
]
|
|
25
|
-
>,
|
|
26
|
-
number
|
|
27
|
-
>;
|
|
28
|
-
interval: import('@sinclair/typebox').TTransform<
|
|
29
|
-
import('@sinclair/typebox').TUnion<
|
|
30
|
-
[
|
|
31
|
-
import('@sinclair/typebox').TNumber,
|
|
32
|
-
import('@sinclair/typebox').TString,
|
|
33
|
-
import('@sinclair/typebox').TBoolean,
|
|
34
|
-
import('@sinclair/typebox').TNull,
|
|
35
|
-
import('@sinclair/typebox').TDate,
|
|
36
|
-
import('@sinclair/typebox').TBigInt
|
|
37
|
-
]
|
|
38
|
-
>,
|
|
39
|
-
number
|
|
40
|
-
>;
|
|
41
|
-
peekCount: import('@sinclair/typebox').TTransform<
|
|
42
|
-
import('@sinclair/typebox').TUnion<
|
|
43
|
-
[
|
|
44
|
-
import('@sinclair/typebox').TNumber,
|
|
45
|
-
import('@sinclair/typebox').TString,
|
|
46
|
-
import('@sinclair/typebox').TBoolean,
|
|
47
|
-
import('@sinclair/typebox').TNull,
|
|
48
|
-
import('@sinclair/typebox').TDate,
|
|
49
|
-
import('@sinclair/typebox').TBigInt
|
|
50
|
-
]
|
|
51
|
-
>,
|
|
52
|
-
number
|
|
53
|
-
>;
|
|
54
|
-
},
|
|
55
|
-
(options: Record<string, unknown>) => {
|
|
56
|
-
brokers: import('zod').ZodArray<import('zod').ZodString, 'many'>;
|
|
57
|
-
clientId: import('zod').ZodString;
|
|
58
|
-
groupId: import('zod').ZodString;
|
|
59
|
-
retries: import('zod').ZodNumber;
|
|
60
|
-
interval: import('zod').ZodNumber;
|
|
61
|
-
peekCount: import('zod').ZodNumber;
|
|
62
|
-
}
|
|
63
|
-
>;
|
|
64
|
-
//# sourceMappingURL=kafka.schema.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"kafka.schema.d.ts","sourceRoot":"","sources":["../../schemas/kafka.schema.ts"],"names":[],"mappings":"AAIA,eAAO,MAAM,kBAAkB;;;;;;;;;;;;;;;;EAG9B,CAAC"}
|