@forklaunch/implementation-worker-kafka 0.1.2 → 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/__test__/schemaEquality.test.d.ts +1 -1
- package/lib/__test__/schemaEquality.test.js +4 -12
- package/lib/consumers/index.d.ts +1 -1
- package/lib/consumers/kafkaWorker.consumer.d.ts +16 -29
- package/lib/consumers/kafkaWorker.consumer.js +139 -137
- package/lib/jest.config.d.ts +1 -1
- package/lib/jest.config.js +16 -16
- package/lib/producers/index.d.ts +1 -1
- package/lib/producers/kafkaWorker.producer.d.ts +8 -11
- package/lib/producers/kafkaWorker.producer.js +25 -25
- package/lib/schemas/index.d.ts +1 -1
- package/lib/schemas/kafka.schema.d.ts +17 -63
- package/lib/schemas/kafka.schema.js +1 -4
- package/lib/schemas/typebox/kafkaWorker.schema.d.ts +7 -45
- package/lib/schemas/typebox/kafkaWorker.schema.js +6 -6
- package/lib/schemas/zod/kafkaWorker.schema.d.ts +7 -7
- package/lib/schemas/zod/kafkaWorker.schema.js +6 -6
- package/lib/tsconfig.tsbuildinfo +1 -1
- package/lib/types/index.d.ts +1 -1
- package/lib/types/kafkaWorker.types.d.ts +7 -7
- package/lib/vitest.config.d.ts +2 -2
- package/lib/vitest.config.js +4 -4
- package/package.json +3 -3
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
export {};
|
|
2
|
-
//# sourceMappingURL=schemaEquality.test.d.ts.map
|
|
2
|
+
//# sourceMappingURL=schemaEquality.test.d.ts.map
|
|
@@ -3,22 +3,14 @@ import { testSchemaEquality } from '@forklaunch/core/test';
|
|
|
3
3
|
import { KafkaWorkerOptionsSchema as TypeboxKafkaWorkerOptionsSchema } from '../schemas/typebox/kafkaWorker.schema';
|
|
4
4
|
import { KafkaWorkerOptionsSchema as ZodKafkaWorkerOptionsSchema } from '../schemas/zod/kafkaWorker.schema';
|
|
5
5
|
describe('schema equality', () => {
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
isTrue(
|
|
9
|
-
testSchemaEquality(
|
|
10
|
-
ZodKafkaWorkerOptionsSchema,
|
|
11
|
-
TypeboxKafkaWorkerOptionsSchema,
|
|
12
|
-
{
|
|
6
|
+
it('should be equal for bullmq worker', () => {
|
|
7
|
+
expect(isTrue(testSchemaEquality(ZodKafkaWorkerOptionsSchema, TypeboxKafkaWorkerOptionsSchema, {
|
|
13
8
|
brokers: ['localhost:9092'],
|
|
14
9
|
clientId: 'test',
|
|
15
10
|
groupId: 'test',
|
|
16
11
|
retries: 1,
|
|
17
12
|
interval: 1000,
|
|
18
13
|
peekCount: 1
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
)
|
|
22
|
-
).toBeTruthy();
|
|
23
|
-
});
|
|
14
|
+
}))).toBeTruthy();
|
|
15
|
+
});
|
|
24
16
|
});
|
package/lib/consumers/index.d.ts
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
export * from './kafkaWorker.consumer';
|
|
2
|
-
//# sourceMappingURL=index.d.ts.map
|
|
2
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1,32 +1,19 @@
|
|
|
1
1
|
import { WorkerConsumer } from '@forklaunch/interfaces-worker/interfaces';
|
|
2
|
-
import {
|
|
3
|
-
WorkerEventEntity,
|
|
4
|
-
WorkerFailureHandler,
|
|
5
|
-
WorkerProcessFunction
|
|
6
|
-
} from '@forklaunch/interfaces-worker/types';
|
|
2
|
+
import { WorkerEventEntity, WorkerFailureHandler, WorkerProcessFunction } from '@forklaunch/interfaces-worker/types';
|
|
7
3
|
import { WorkerOptions } from '../types/kafkaWorker.types';
|
|
8
|
-
export declare class KafkaWorkerConsumer<
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
queueName: string,
|
|
23
|
-
options: Options,
|
|
24
|
-
processEventsFunction: WorkerProcessFunction<EventEntity>,
|
|
25
|
-
failureHandler: WorkerFailureHandler<EventEntity>
|
|
26
|
-
);
|
|
27
|
-
private setupConsumer;
|
|
28
|
-
peekEvents(): Promise<EventEntity[]>;
|
|
29
|
-
start(): Promise<void>;
|
|
30
|
-
close(): Promise<void>;
|
|
4
|
+
export declare class KafkaWorkerConsumer<EventEntity extends WorkerEventEntity, Options extends WorkerOptions> implements WorkerConsumer<EventEntity> {
|
|
5
|
+
protected readonly queueName: string;
|
|
6
|
+
protected readonly options: Options;
|
|
7
|
+
protected readonly processEventsFunction: WorkerProcessFunction<EventEntity>;
|
|
8
|
+
protected readonly failureHandler: WorkerFailureHandler<EventEntity>;
|
|
9
|
+
private kafka;
|
|
10
|
+
private producer;
|
|
11
|
+
private consumer;
|
|
12
|
+
private processedMessages;
|
|
13
|
+
constructor(queueName: string, options: Options, processEventsFunction: WorkerProcessFunction<EventEntity>, failureHandler: WorkerFailureHandler<EventEntity>);
|
|
14
|
+
private setupConsumer;
|
|
15
|
+
peekEvents(): Promise<EventEntity[]>;
|
|
16
|
+
start(): Promise<void>;
|
|
17
|
+
close(): Promise<void>;
|
|
31
18
|
}
|
|
32
|
-
//# sourceMappingURL=kafkaWorker.consumer.d.ts.map
|
|
19
|
+
//# sourceMappingURL=kafkaWorker.consumer.d.ts.map
|
|
@@ -1,147 +1,149 @@
|
|
|
1
1
|
import { Kafka } from 'kafkajs';
|
|
2
2
|
export class KafkaWorkerConsumer {
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
});
|
|
20
|
-
this.producer = this.kafka.producer();
|
|
21
|
-
this.consumer = this.kafka.consumer({
|
|
22
|
-
groupId: this.options.groupId
|
|
23
|
-
});
|
|
24
|
-
}
|
|
25
|
-
async setupConsumer() {
|
|
26
|
-
await this.consumer.connect();
|
|
27
|
-
await this.consumer.subscribe({
|
|
28
|
-
topic: this.queueName,
|
|
29
|
-
fromBeginning: false
|
|
30
|
-
});
|
|
31
|
-
await this.consumer.run({
|
|
32
|
-
eachMessage: async ({ topic, partition, message }) => {
|
|
33
|
-
if (!message.value) return;
|
|
34
|
-
const messageKey = `${topic}-${partition}-${message.offset}`;
|
|
35
|
-
if (this.processedMessages.has(messageKey)) {
|
|
36
|
-
return;
|
|
37
|
-
}
|
|
38
|
-
const events = JSON.parse(message.value.toString());
|
|
39
|
-
try {
|
|
40
|
-
await this.processEventsFunction(events);
|
|
41
|
-
this.processedMessages.add(messageKey);
|
|
42
|
-
await this.consumer.commitOffsets([
|
|
43
|
-
{
|
|
44
|
-
topic,
|
|
45
|
-
partition,
|
|
46
|
-
offset: (parseInt(message.offset) + 1).toString()
|
|
47
|
-
}
|
|
48
|
-
]);
|
|
49
|
-
} catch (error) {
|
|
50
|
-
this.failureHandler([
|
|
51
|
-
{
|
|
52
|
-
value: events[0],
|
|
53
|
-
error: error
|
|
54
|
-
}
|
|
55
|
-
]);
|
|
56
|
-
for (const event of events) {
|
|
57
|
-
if (event.retryCount <= this.options.retries) {
|
|
58
|
-
await this.producer.send({
|
|
59
|
-
topic: this.queueName,
|
|
60
|
-
messages: [
|
|
61
|
-
{
|
|
62
|
-
value: JSON.stringify([
|
|
63
|
-
{
|
|
64
|
-
...event,
|
|
65
|
-
retryCount: event.retryCount + 1
|
|
66
|
-
}
|
|
67
|
-
]),
|
|
68
|
-
key: event.id
|
|
69
|
-
}
|
|
70
|
-
]
|
|
71
|
-
});
|
|
72
|
-
}
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
});
|
|
77
|
-
}
|
|
78
|
-
async peekEvents() {
|
|
79
|
-
const events = [];
|
|
80
|
-
const admin = this.kafka.admin();
|
|
81
|
-
await admin.connect();
|
|
82
|
-
try {
|
|
83
|
-
// Get topic metadata to find partitions
|
|
84
|
-
const metadata = await admin.fetchTopicMetadata({
|
|
85
|
-
topics: [this.queueName]
|
|
86
|
-
});
|
|
87
|
-
const topic = metadata.topics[0];
|
|
88
|
-
if (!topic) {
|
|
89
|
-
return events;
|
|
90
|
-
}
|
|
91
|
-
// For each partition, get the latest offset
|
|
92
|
-
for (const partition of topic.partitions) {
|
|
93
|
-
const offsets = await admin.fetchTopicOffsets(this.queueName);
|
|
94
|
-
const partitionOffset = offsets.find(
|
|
95
|
-
(o) => o.partition === partition.partitionId
|
|
96
|
-
);
|
|
97
|
-
if (!partitionOffset) {
|
|
98
|
-
continue;
|
|
99
|
-
}
|
|
100
|
-
// Create a temporary consumer to read messages
|
|
101
|
-
const peekConsumer = this.kafka.consumer({
|
|
102
|
-
groupId: `${this.options.groupId}-peek-${Date.now()}`
|
|
3
|
+
queueName;
|
|
4
|
+
options;
|
|
5
|
+
processEventsFunction;
|
|
6
|
+
failureHandler;
|
|
7
|
+
kafka;
|
|
8
|
+
producer;
|
|
9
|
+
consumer;
|
|
10
|
+
processedMessages = new Set();
|
|
11
|
+
constructor(queueName, options, processEventsFunction, failureHandler) {
|
|
12
|
+
this.queueName = queueName;
|
|
13
|
+
this.options = options;
|
|
14
|
+
this.processEventsFunction = processEventsFunction;
|
|
15
|
+
this.failureHandler = failureHandler;
|
|
16
|
+
this.kafka = new Kafka({
|
|
17
|
+
clientId: this.options.clientId,
|
|
18
|
+
brokers: this.options.brokers
|
|
103
19
|
});
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
20
|
+
this.producer = this.kafka.producer();
|
|
21
|
+
this.consumer = this.kafka.consumer({
|
|
22
|
+
groupId: this.options.groupId
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
async setupConsumer() {
|
|
26
|
+
await this.consumer.connect();
|
|
27
|
+
await this.consumer.subscribe({
|
|
107
28
|
topic: this.queueName,
|
|
108
29
|
fromBeginning: false
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
30
|
+
});
|
|
31
|
+
await this.consumer.run({
|
|
32
|
+
eachMessage: async ({ topic, partition, message }) => {
|
|
33
|
+
if (!message.value)
|
|
34
|
+
return;
|
|
35
|
+
const messageKey = `${topic}-${partition}-${message.offset}`;
|
|
36
|
+
if (this.processedMessages.has(messageKey)) {
|
|
37
|
+
return;
|
|
38
|
+
}
|
|
39
|
+
const events = JSON.parse(message.value.toString());
|
|
40
|
+
try {
|
|
41
|
+
await this.processEventsFunction(events);
|
|
42
|
+
this.processedMessages.add(messageKey);
|
|
43
|
+
await this.consumer.commitOffsets([
|
|
44
|
+
{
|
|
45
|
+
topic,
|
|
46
|
+
partition,
|
|
47
|
+
offset: (parseInt(message.offset) + 1).toString()
|
|
48
|
+
}
|
|
49
|
+
]);
|
|
50
|
+
}
|
|
51
|
+
catch (error) {
|
|
52
|
+
this.failureHandler([
|
|
53
|
+
{
|
|
54
|
+
value: events[0],
|
|
55
|
+
error: error
|
|
56
|
+
}
|
|
57
|
+
]);
|
|
58
|
+
for (const event of events) {
|
|
59
|
+
if (event.retryCount <= this.options.retries) {
|
|
60
|
+
await this.producer.send({
|
|
61
|
+
topic: this.queueName,
|
|
62
|
+
messages: [
|
|
63
|
+
{
|
|
64
|
+
value: JSON.stringify([
|
|
65
|
+
{
|
|
66
|
+
...event,
|
|
67
|
+
retryCount: event.retryCount + 1
|
|
68
|
+
}
|
|
69
|
+
]),
|
|
70
|
+
key: event.id
|
|
71
|
+
}
|
|
72
|
+
]
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
}
|
|
119
76
|
}
|
|
120
|
-
|
|
77
|
+
}
|
|
78
|
+
});
|
|
79
|
+
}
|
|
80
|
+
async peekEvents() {
|
|
81
|
+
const events = [];
|
|
82
|
+
const admin = this.kafka.admin();
|
|
83
|
+
await admin.connect();
|
|
84
|
+
try {
|
|
85
|
+
// Get topic metadata to find partitions
|
|
86
|
+
const metadata = await admin.fetchTopicMetadata({
|
|
87
|
+
topics: [this.queueName]
|
|
121
88
|
});
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
89
|
+
const topic = metadata.topics[0];
|
|
90
|
+
if (!topic) {
|
|
91
|
+
return events;
|
|
92
|
+
}
|
|
93
|
+
// For each partition, get the latest offset
|
|
94
|
+
for (const partition of topic.partitions) {
|
|
95
|
+
const offsets = await admin.fetchTopicOffsets(this.queueName);
|
|
96
|
+
const partitionOffset = offsets.find((o) => o.partition === partition.partitionId);
|
|
97
|
+
if (!partitionOffset) {
|
|
98
|
+
continue;
|
|
99
|
+
}
|
|
100
|
+
// Create a temporary consumer to read messages
|
|
101
|
+
const peekConsumer = this.kafka.consumer({
|
|
102
|
+
groupId: `${this.options.groupId}-peek-${Date.now()}`
|
|
103
|
+
});
|
|
104
|
+
try {
|
|
105
|
+
await peekConsumer.connect();
|
|
106
|
+
await peekConsumer.subscribe({
|
|
107
|
+
topic: this.queueName,
|
|
108
|
+
fromBeginning: false
|
|
109
|
+
});
|
|
110
|
+
const messagePromise = new Promise((resolve) => {
|
|
111
|
+
peekConsumer.run({
|
|
112
|
+
eachMessage: async ({ message }) => {
|
|
113
|
+
if (message.value && events.length < this.options.peekCount) {
|
|
114
|
+
const messageEvents = JSON.parse(message.value.toString());
|
|
115
|
+
events.push(...messageEvents);
|
|
116
|
+
if (events.length >= this.options.peekCount) {
|
|
117
|
+
resolve();
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
await Promise.race([
|
|
124
|
+
messagePromise,
|
|
125
|
+
new Promise((resolve) => setTimeout(resolve, 5000))
|
|
126
|
+
]);
|
|
127
|
+
if (events.length >= this.options.peekCount) {
|
|
128
|
+
break;
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
finally {
|
|
132
|
+
await peekConsumer.disconnect();
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
return events;
|
|
132
136
|
}
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
+
finally {
|
|
138
|
+
await admin.disconnect();
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
async start() {
|
|
142
|
+
await this.setupConsumer();
|
|
143
|
+
await this.producer.connect();
|
|
144
|
+
}
|
|
145
|
+
async close() {
|
|
146
|
+
await this.producer.disconnect();
|
|
147
|
+
await this.consumer.disconnect();
|
|
137
148
|
}
|
|
138
|
-
}
|
|
139
|
-
async start() {
|
|
140
|
-
await this.setupConsumer();
|
|
141
|
-
await this.producer.connect();
|
|
142
|
-
}
|
|
143
|
-
async close() {
|
|
144
|
-
await this.producer.disconnect();
|
|
145
|
-
await this.consumer.disconnect();
|
|
146
|
-
}
|
|
147
149
|
}
|
package/lib/jest.config.d.ts
CHANGED
package/lib/jest.config.js
CHANGED
|
@@ -1,19 +1,19 @@
|
|
|
1
1
|
const jestConfig = {
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
2
|
+
preset: 'ts-jest/presets/default-esm', // or other ESM presets
|
|
3
|
+
moduleNameMapper: {
|
|
4
|
+
'^(\\.{1,2}/.*)\\.js$': '$1'
|
|
5
|
+
},
|
|
6
|
+
transform: {
|
|
7
|
+
// '^.+\\.[tj]sx?$' to process ts,js,tsx,jsx with `ts-jest`
|
|
8
|
+
// '^.+\\.m?[tj]sx?$' to process ts,js,tsx,jsx,mts,mjs,mtsx,mjsx with `ts-jest`
|
|
9
|
+
'^.+\\.[tj]sx?$': [
|
|
10
|
+
'ts-jest',
|
|
11
|
+
{
|
|
12
|
+
useESM: true
|
|
13
|
+
}
|
|
14
|
+
],
|
|
15
|
+
'^.+\\.js$': 'babel-jest'
|
|
16
|
+
},
|
|
17
|
+
testPathIgnorePatterns: ['.*dist/', '.*node_modules/']
|
|
18
18
|
};
|
|
19
19
|
export default jestConfig;
|
package/lib/producers/index.d.ts
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
export * from './kafkaWorker.producer';
|
|
2
|
-
//# sourceMappingURL=index.d.ts.map
|
|
2
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1,14 +1,11 @@
|
|
|
1
1
|
import { WorkerEventEntity } from '@forklaunch/interfaces-worker/types';
|
|
2
2
|
import { WorkerOptions } from '../types/kafkaWorker.types';
|
|
3
|
-
export declare class KafkaWorkerProducer<
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
constructor(queueName: string, options: Options);
|
|
11
|
-
enqueueJob(event: EventEntity): Promise<void>;
|
|
12
|
-
enqueueBatchJobs(events: EventEntity[]): Promise<void>;
|
|
3
|
+
export declare class KafkaWorkerProducer<EventEntity extends WorkerEventEntity, Options extends WorkerOptions> {
|
|
4
|
+
private readonly queueName;
|
|
5
|
+
private readonly options;
|
|
6
|
+
private producer;
|
|
7
|
+
constructor(queueName: string, options: Options);
|
|
8
|
+
enqueueJob(event: EventEntity): Promise<void>;
|
|
9
|
+
enqueueBatchJobs(events: EventEntity[]): Promise<void>;
|
|
13
10
|
}
|
|
14
|
-
//# sourceMappingURL=kafkaWorker.producer.d.ts.map
|
|
11
|
+
//# sourceMappingURL=kafkaWorker.producer.d.ts.map
|
|
@@ -1,28 +1,28 @@
|
|
|
1
1
|
import { Kafka } from 'kafkajs';
|
|
2
2
|
export class KafkaWorkerProducer {
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
3
|
+
queueName;
|
|
4
|
+
options;
|
|
5
|
+
producer;
|
|
6
|
+
constructor(queueName, options) {
|
|
7
|
+
this.queueName = queueName;
|
|
8
|
+
this.options = options;
|
|
9
|
+
const kafka = new Kafka({
|
|
10
|
+
clientId: this.options.clientId,
|
|
11
|
+
brokers: this.options.brokers
|
|
12
|
+
});
|
|
13
|
+
this.producer = kafka.producer();
|
|
14
|
+
this.producer.connect();
|
|
15
|
+
}
|
|
16
|
+
async enqueueJob(event) {
|
|
17
|
+
await this.producer.send({
|
|
18
|
+
topic: this.queueName,
|
|
19
|
+
messages: [{ value: JSON.stringify([event]) }]
|
|
20
|
+
});
|
|
21
|
+
}
|
|
22
|
+
async enqueueBatchJobs(events) {
|
|
23
|
+
await this.producer.send({
|
|
24
|
+
topic: this.queueName,
|
|
25
|
+
messages: events.map((event) => ({ value: JSON.stringify(event) }))
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
28
|
}
|
package/lib/schemas/index.d.ts
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
export * from './kafka.schema';
|
|
2
|
-
//# sourceMappingURL=index.d.ts.map
|
|
2
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1,64 +1,18 @@
|
|
|
1
|
-
export declare const KafkaWorkerSchemas: <
|
|
2
|
-
SchemaValidator extends import('@forklaunch/validator').AnySchemaValidator
|
|
3
|
-
>(
|
|
4
|
-
options: Record<string, unknown> & {
|
|
1
|
+
export declare const KafkaWorkerSchemas: <SchemaValidator extends import("@forklaunch/validator").AnySchemaValidator>(options: Record<string, unknown> & {
|
|
5
2
|
validator: SchemaValidator;
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
>;
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
import('@sinclair/typebox').TDate,
|
|
23
|
-
import('@sinclair/typebox').TBigInt
|
|
24
|
-
]
|
|
25
|
-
>,
|
|
26
|
-
number
|
|
27
|
-
>;
|
|
28
|
-
interval: import('@sinclair/typebox').TTransform<
|
|
29
|
-
import('@sinclair/typebox').TUnion<
|
|
30
|
-
[
|
|
31
|
-
import('@sinclair/typebox').TNumber,
|
|
32
|
-
import('@sinclair/typebox').TString,
|
|
33
|
-
import('@sinclair/typebox').TBoolean,
|
|
34
|
-
import('@sinclair/typebox').TNull,
|
|
35
|
-
import('@sinclair/typebox').TDate,
|
|
36
|
-
import('@sinclair/typebox').TBigInt
|
|
37
|
-
]
|
|
38
|
-
>,
|
|
39
|
-
number
|
|
40
|
-
>;
|
|
41
|
-
peekCount: import('@sinclair/typebox').TTransform<
|
|
42
|
-
import('@sinclair/typebox').TUnion<
|
|
43
|
-
[
|
|
44
|
-
import('@sinclair/typebox').TNumber,
|
|
45
|
-
import('@sinclair/typebox').TString,
|
|
46
|
-
import('@sinclair/typebox').TBoolean,
|
|
47
|
-
import('@sinclair/typebox').TNull,
|
|
48
|
-
import('@sinclair/typebox').TDate,
|
|
49
|
-
import('@sinclair/typebox').TBigInt
|
|
50
|
-
]
|
|
51
|
-
>,
|
|
52
|
-
number
|
|
53
|
-
>;
|
|
54
|
-
},
|
|
55
|
-
(options: Record<string, unknown>) => {
|
|
56
|
-
brokers: import('zod').ZodArray<import('zod').ZodString, 'many'>;
|
|
57
|
-
clientId: import('zod').ZodString;
|
|
58
|
-
groupId: import('zod').ZodString;
|
|
59
|
-
retries: import('zod').ZodNumber;
|
|
60
|
-
interval: import('zod').ZodNumber;
|
|
61
|
-
peekCount: import('zod').ZodNumber;
|
|
62
|
-
}
|
|
63
|
-
>;
|
|
64
|
-
//# sourceMappingURL=kafka.schema.d.ts.map
|
|
3
|
+
}) => import("@forklaunch/core/mappers").SchemasByValidator<SchemaValidator, (options: Record<string, unknown>) => {
|
|
4
|
+
brokers: import("@sinclair/typebox").TArray<import("@sinclair/typebox").TString>;
|
|
5
|
+
clientId: import("@sinclair/typebox").TString;
|
|
6
|
+
groupId: import("@sinclair/typebox").TString;
|
|
7
|
+
retries: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
|
|
8
|
+
interval: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
|
|
9
|
+
peekCount: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
|
|
10
|
+
}, (options: Record<string, unknown>) => {
|
|
11
|
+
brokers: import("zod").ZodArray<import("zod").ZodString, "many">;
|
|
12
|
+
clientId: import("zod").ZodString;
|
|
13
|
+
groupId: import("zod").ZodString;
|
|
14
|
+
retries: import("zod").ZodNumber;
|
|
15
|
+
interval: import("zod").ZodNumber;
|
|
16
|
+
peekCount: import("zod").ZodNumber;
|
|
17
|
+
}>;
|
|
18
|
+
//# sourceMappingURL=kafka.schema.d.ts.map
|
|
@@ -1,7 +1,4 @@
|
|
|
1
1
|
import { serviceSchemaResolver } from '@forklaunch/core/mappers';
|
|
2
2
|
import { KafkaWorkerOptionsSchema as TypeBoxSchemas } from './typebox/kafkaWorker.schema';
|
|
3
3
|
import { KafkaWorkerOptionsSchema as ZodSchemas } from './zod/kafkaWorker.schema';
|
|
4
|
-
export const KafkaWorkerSchemas = serviceSchemaResolver(
|
|
5
|
-
() => TypeBoxSchemas,
|
|
6
|
-
() => ZodSchemas
|
|
7
|
-
);
|
|
4
|
+
export const KafkaWorkerSchemas = serviceSchemaResolver(() => TypeBoxSchemas, () => ZodSchemas);
|
|
@@ -1,47 +1,9 @@
|
|
|
1
1
|
export declare const KafkaWorkerOptionsSchema: {
|
|
2
|
-
|
|
3
|
-
import(
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
import('@sinclair/typebox').TUnion<
|
|
9
|
-
[
|
|
10
|
-
import('@sinclair/typebox').TNumber,
|
|
11
|
-
import('@sinclair/typebox').TString,
|
|
12
|
-
import('@sinclair/typebox').TBoolean,
|
|
13
|
-
import('@sinclair/typebox').TNull,
|
|
14
|
-
import('@sinclair/typebox').TDate,
|
|
15
|
-
import('@sinclair/typebox').TBigInt
|
|
16
|
-
]
|
|
17
|
-
>,
|
|
18
|
-
number
|
|
19
|
-
>;
|
|
20
|
-
interval: import('@sinclair/typebox').TTransform<
|
|
21
|
-
import('@sinclair/typebox').TUnion<
|
|
22
|
-
[
|
|
23
|
-
import('@sinclair/typebox').TNumber,
|
|
24
|
-
import('@sinclair/typebox').TString,
|
|
25
|
-
import('@sinclair/typebox').TBoolean,
|
|
26
|
-
import('@sinclair/typebox').TNull,
|
|
27
|
-
import('@sinclair/typebox').TDate,
|
|
28
|
-
import('@sinclair/typebox').TBigInt
|
|
29
|
-
]
|
|
30
|
-
>,
|
|
31
|
-
number
|
|
32
|
-
>;
|
|
33
|
-
peekCount: import('@sinclair/typebox').TTransform<
|
|
34
|
-
import('@sinclair/typebox').TUnion<
|
|
35
|
-
[
|
|
36
|
-
import('@sinclair/typebox').TNumber,
|
|
37
|
-
import('@sinclair/typebox').TString,
|
|
38
|
-
import('@sinclair/typebox').TBoolean,
|
|
39
|
-
import('@sinclair/typebox').TNull,
|
|
40
|
-
import('@sinclair/typebox').TDate,
|
|
41
|
-
import('@sinclair/typebox').TBigInt
|
|
42
|
-
]
|
|
43
|
-
>,
|
|
44
|
-
number
|
|
45
|
-
>;
|
|
2
|
+
brokers: import("@sinclair/typebox").TArray<import("@sinclair/typebox").TString>;
|
|
3
|
+
clientId: import("@sinclair/typebox").TString;
|
|
4
|
+
groupId: import("@sinclair/typebox").TString;
|
|
5
|
+
retries: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
|
|
6
|
+
interval: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
|
|
7
|
+
peekCount: import("@sinclair/typebox").TTransform<import("@sinclair/typebox").TUnion<[import("@sinclair/typebox").TNumber, import("@sinclair/typebox").TString, import("@sinclair/typebox").TBoolean, import("@sinclair/typebox").TNull, import("@sinclair/typebox").TDate, import("@sinclair/typebox").TBigInt]>, number>;
|
|
46
8
|
};
|
|
47
|
-
//# sourceMappingURL=kafkaWorker.schema.d.ts.map
|
|
9
|
+
//# sourceMappingURL=kafkaWorker.schema.d.ts.map
|