kafka-ts 0.0.17 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +21 -39
- package/dist/api/fetch.d.ts +27 -27
- package/dist/api/fetch.js +4 -19
- package/dist/api/index.d.ts +27 -27
- package/dist/consumer/consumer.d.ts +2 -7
- package/dist/consumer/consumer.js +7 -26
- package/dist/consumer/fetch-manager.d.ts +5 -7
- package/dist/consumer/fetch-manager.js +38 -54
- package/dist/utils/tracer.d.ts +1 -1
- package/package.json +2 -1
package/README.md
CHANGED
|
@@ -27,8 +27,8 @@ export const kafka = createKafkaClient({
|
|
|
27
27
|
const consumer = await kafka.startConsumer({
|
|
28
28
|
groupId: 'my-consumer-group',
|
|
29
29
|
topics: ['my-topic'],
|
|
30
|
-
|
|
31
|
-
console.log(
|
|
30
|
+
onBatch: (messages) => {
|
|
31
|
+
console.log(messages);
|
|
32
32
|
},
|
|
33
33
|
});
|
|
34
34
|
```
|
|
@@ -87,7 +87,7 @@ By default KafkaTS logs out using a JSON logger. This can be globally replaced b
|
|
|
87
87
|
|
|
88
88
|
#### Retries
|
|
89
89
|
|
|
90
|
-
By default KafkaTS retries onBatch
|
|
90
|
+
By default KafkaTS retries `onBatch` using an exponential backoff delay up to 5 times (see [src/utils/retrier.ts](./src/utils/retrier.ts)). In case of failure the consumer is restarted.
|
|
91
91
|
|
|
92
92
|
In case you want to skip failed messages or implement a DLQ-like mechanism, you can overwrite `retrier` on `startConsumer()` and execute your own logic `onFailure`.
|
|
93
93
|
|
|
@@ -100,22 +100,6 @@ await kafka.startConsumer({
|
|
|
100
100
|
});
|
|
101
101
|
```
|
|
102
102
|
|
|
103
|
-
#### Concurrency control
|
|
104
|
-
|
|
105
|
-
Depending on the use case, you might want to control `concurrency` and `batchGranularity`.
|
|
106
|
-
|
|
107
|
-
When subscribing to a topic, the consumer group leader will distribute all subscribed topic partitions to consumers within the group. Each consumer will then fetch messages only from partitions assigned to them.
|
|
108
|
-
|
|
109
|
-
`batchGranularity` controls how messages are split into batches from a fetch response:
|
|
110
|
-
|
|
111
|
-
- **broker** - (default) all messages received from a single kafka broker will be included in a single batch.
|
|
112
|
-
- **topic** - all messages received from a single broker and topic will be included in a single batch.
|
|
113
|
-
- **partition** - a batch will only include messages from a single partition.
|
|
114
|
-
|
|
115
|
-
After each batch is processed, the consumer will commit offsets for the processed messages. The more granual the batch is, the more often offsets are committed.
|
|
116
|
-
|
|
117
|
-
`concurrency` controls how many aforementioned batches are processed concurrently.
|
|
118
|
-
|
|
119
103
|
#### Partitioning
|
|
120
104
|
|
|
121
105
|
By default, messages are partitioned by message key or round-robin if the key is null or undefined. Partition can be overwritten by `partition` property in the message. You can also override the default partitioner per producer instance `kafka.createProducer({ partitioner: customPartitioner })`.
|
|
@@ -168,25 +152,23 @@ Custom SASL mechanisms can be implemented following the `SASLProvider` interface
|
|
|
168
152
|
|
|
169
153
|
### `kafka.startConsumer()`
|
|
170
154
|
|
|
171
|
-
| Name | Type | Required | Default | Description
|
|
172
|
-
| ---------------------- | -------------------------------------- | -------- | ------------------------------- |
|
|
173
|
-
| topics | string[] | true | | List of topics to subscribe to
|
|
174
|
-
| groupId | string | false | _null_ | Consumer group id
|
|
175
|
-
| groupInstanceId | string | false | _null_ | Consumer group instance id
|
|
176
|
-
| rackId | string | false | _null_ | Rack id
|
|
177
|
-
| isolationLevel | IsolationLevel | false | IsolationLevel.READ_UNCOMMITTED | Isolation level
|
|
178
|
-
| sessionTimeoutMs | number | false | 30000 | Session timeout in milliseconds
|
|
179
|
-
| rebalanceTimeoutMs | number | false | 60000 | Rebalance timeout in milliseconds
|
|
180
|
-
| maxWaitMs | number | false | 5000 | Fetch long poll timeout in milliseconds
|
|
181
|
-
| minBytes | number | false | 1 | Minimum number of bytes to wait for before returning a fetch response
|
|
182
|
-
| maxBytes | number | false | 1_048_576 | Maximum number of bytes to return in the fetch response
|
|
183
|
-
| partitionMaxBytes | number | false | 1_048_576 | Maximum number of bytes to return per partition in the fetch response
|
|
184
|
-
| allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist
|
|
185
|
-
|
|
|
186
|
-
|
|
|
187
|
-
|
|
|
188
|
-
| onMessage | (message: Message) => Promise<unknown> | true | | Callback executed on every message |
|
|
189
|
-
| onBatch | (batch: Message[]) => Promise<unknown> | true | | Callback executed on every batch of messages (based on **batchGranuality**) |
|
|
155
|
+
| Name | Type | Required | Default | Description |
|
|
156
|
+
| ---------------------- | -------------------------------------- | -------- | ------------------------------- | ------------------------------------------------------------------------------------ |
|
|
157
|
+
| topics | string[] | true | | List of topics to subscribe to |
|
|
158
|
+
| groupId | string | false | _null_ | Consumer group id |
|
|
159
|
+
| groupInstanceId | string | false | _null_ | Consumer group instance id |
|
|
160
|
+
| rackId | string | false | _null_ | Rack id |
|
|
161
|
+
| isolationLevel | IsolationLevel | false | IsolationLevel.READ_UNCOMMITTED | Isolation level |
|
|
162
|
+
| sessionTimeoutMs | number | false | 30000 | Session timeout in milliseconds |
|
|
163
|
+
| rebalanceTimeoutMs | number | false | 60000 | Rebalance timeout in milliseconds |
|
|
164
|
+
| maxWaitMs | number | false | 5000 | Fetch long poll timeout in milliseconds |
|
|
165
|
+
| minBytes | number | false | 1 | Minimum number of bytes to wait for before returning a fetch response |
|
|
166
|
+
| maxBytes | number | false | 1_048_576 | Maximum number of bytes to return in the fetch response |
|
|
167
|
+
| partitionMaxBytes | number | false | 1_048_576 | Maximum number of bytes to return per partition in the fetch response |
|
|
168
|
+
| allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
|
|
169
|
+
| fromTimestamp | bigint | false | -1 | Start consuming messages from timestamp (-1 = latest offsets, -2 = earliest offsets) |
|
|
170
|
+
| batchSize | number | false | null | Maximum number of records called `onBatch` |
|
|
171
|
+
| onBatch | (batch: Message[]) => Promise<unknown> | true | | Callback executed when a batch of messages is received |
|
|
190
172
|
|
|
191
173
|
### `kafka.createProducer()`
|
|
192
174
|
|
|
@@ -195,7 +177,7 @@ Custom SASL mechanisms can be implemented following the `SASLProvider` interface
|
|
|
195
177
|
| allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
|
|
196
178
|
| partitioner | Partitioner | false | defaultPartitioner | Custom partitioner function. By default, it uses a default java-compatible partitioner. |
|
|
197
179
|
|
|
198
|
-
### `producer.send(messages: Message[])`
|
|
180
|
+
### `producer.send(messages: Message[], options?: { acks?: -1 | 1 })`
|
|
199
181
|
|
|
200
182
|
<!-- export type Message = {
|
|
201
183
|
topic: string;
|
package/dist/api/fetch.d.ts
CHANGED
|
@@ -27,20 +27,25 @@ export declare const FETCH: import("../utils/api").Api<{
|
|
|
27
27
|
}[];
|
|
28
28
|
rackId: string;
|
|
29
29
|
}, {
|
|
30
|
+
_tag: void;
|
|
31
|
+
throttleTimeMs: number;
|
|
32
|
+
errorCode: number;
|
|
33
|
+
sessionId: number;
|
|
30
34
|
responses: {
|
|
35
|
+
topicId: string;
|
|
31
36
|
partitions: {
|
|
37
|
+
partitionIndex: number;
|
|
38
|
+
errorCode: number;
|
|
39
|
+
highWatermark: bigint;
|
|
40
|
+
lastStableOffset: bigint;
|
|
41
|
+
logStartOffset: bigint;
|
|
42
|
+
abortedTransactions: {
|
|
43
|
+
producerId: bigint;
|
|
44
|
+
firstOffset: bigint;
|
|
45
|
+
_tag: void;
|
|
46
|
+
}[];
|
|
47
|
+
preferredReadReplica: number;
|
|
32
48
|
records: {
|
|
33
|
-
records: {
|
|
34
|
-
attributes: number;
|
|
35
|
-
timestampDelta: bigint;
|
|
36
|
-
offsetDelta: number;
|
|
37
|
-
key: string | null;
|
|
38
|
-
value: string | null;
|
|
39
|
-
headers: {
|
|
40
|
-
key: string;
|
|
41
|
-
value: string;
|
|
42
|
-
}[];
|
|
43
|
-
}[];
|
|
44
49
|
baseOffset: bigint;
|
|
45
50
|
batchLength: number;
|
|
46
51
|
partitionLeaderEpoch: number;
|
|
@@ -58,26 +63,21 @@ export declare const FETCH: import("../utils/api").Api<{
|
|
|
58
63
|
producerId: bigint;
|
|
59
64
|
producerEpoch: number;
|
|
60
65
|
baseSequence: number;
|
|
66
|
+
records: {
|
|
67
|
+
attributes: number;
|
|
68
|
+
timestampDelta: bigint;
|
|
69
|
+
offsetDelta: number;
|
|
70
|
+
key: string | null;
|
|
71
|
+
value: string | null;
|
|
72
|
+
headers: {
|
|
73
|
+
key: string;
|
|
74
|
+
value: string;
|
|
75
|
+
}[];
|
|
76
|
+
}[];
|
|
61
77
|
}[];
|
|
62
|
-
partitionIndex: number;
|
|
63
|
-
errorCode: number;
|
|
64
|
-
highWatermark: bigint;
|
|
65
|
-
lastStableOffset: bigint;
|
|
66
|
-
logStartOffset: bigint;
|
|
67
|
-
abortedTransactions: {
|
|
68
|
-
producerId: bigint;
|
|
69
|
-
firstOffset: bigint;
|
|
70
|
-
_tag: void;
|
|
71
|
-
}[];
|
|
72
|
-
preferredReadReplica: number;
|
|
73
78
|
_tag: void;
|
|
74
79
|
}[];
|
|
75
|
-
topicId: string;
|
|
76
80
|
_tag: void;
|
|
77
81
|
}[];
|
|
78
|
-
_tag: void;
|
|
79
|
-
throttleTimeMs: number;
|
|
80
|
-
errorCode: number;
|
|
81
|
-
sessionId: number;
|
|
82
82
|
_tag2: void;
|
|
83
83
|
}>;
|
package/dist/api/fetch.js
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.FETCH = void 0;
|
|
4
|
-
const codecs_1 = require("../codecs");
|
|
5
4
|
const api_1 = require("../utils/api");
|
|
6
5
|
const decoder_1 = require("../utils/decoder");
|
|
7
6
|
const error_1 = require("../utils/error");
|
|
@@ -69,19 +68,7 @@ exports.FETCH = (0, api_1.createApi)({
|
|
|
69
68
|
throw new error_1.KafkaTSApiError(partition.errorCode, null, result);
|
|
70
69
|
});
|
|
71
70
|
});
|
|
72
|
-
|
|
73
|
-
...response,
|
|
74
|
-
partitions: await Promise.all(response.partitions.map(async (partition) => ({
|
|
75
|
-
...partition,
|
|
76
|
-
records: await Promise.all(partition.records.map(async ({ recordsLength, compressedRecords, ...record }) => {
|
|
77
|
-
const { decompress } = (0, codecs_1.findCodec)(record.compression);
|
|
78
|
-
const decompressedRecords = await decompress(compressedRecords);
|
|
79
|
-
const decompressedDecoder = new decoder_1.Decoder(Buffer.concat([recordsLength, decompressedRecords]));
|
|
80
|
-
return { ...record, records: decodeRecord(decompressedDecoder) };
|
|
81
|
-
})),
|
|
82
|
-
}))),
|
|
83
|
-
})));
|
|
84
|
-
return { ...result, responses: decompressedResponses };
|
|
71
|
+
return result;
|
|
85
72
|
},
|
|
86
73
|
});
|
|
87
74
|
const decodeRecordBatch = (decoder) => {
|
|
@@ -125,8 +112,7 @@ const decodeRecordBatch = (decoder) => {
|
|
|
125
112
|
const producerId = batchDecoder.readInt64();
|
|
126
113
|
const producerEpoch = batchDecoder.readInt16();
|
|
127
114
|
const baseSequence = batchDecoder.readInt32();
|
|
128
|
-
const
|
|
129
|
-
const compressedRecords = batchDecoder.read();
|
|
115
|
+
const records = decodeRecords(batchDecoder);
|
|
130
116
|
results.push({
|
|
131
117
|
baseOffset,
|
|
132
118
|
batchLength,
|
|
@@ -145,13 +131,12 @@ const decodeRecordBatch = (decoder) => {
|
|
|
145
131
|
producerId,
|
|
146
132
|
producerEpoch,
|
|
147
133
|
baseSequence,
|
|
148
|
-
|
|
149
|
-
compressedRecords,
|
|
134
|
+
records,
|
|
150
135
|
});
|
|
151
136
|
}
|
|
152
137
|
return results;
|
|
153
138
|
};
|
|
154
|
-
const
|
|
139
|
+
const decodeRecords = (decoder) => decoder.readRecords((record) => ({
|
|
155
140
|
attributes: record.readInt8(),
|
|
156
141
|
timestampDelta: record.readVarLong(),
|
|
157
142
|
offsetDelta: record.readVarInt(),
|
package/dist/api/index.d.ts
CHANGED
|
@@ -90,20 +90,25 @@ export declare const API: {
|
|
|
90
90
|
}[];
|
|
91
91
|
rackId: string;
|
|
92
92
|
}, {
|
|
93
|
+
_tag: void;
|
|
94
|
+
throttleTimeMs: number;
|
|
95
|
+
errorCode: number;
|
|
96
|
+
sessionId: number;
|
|
93
97
|
responses: {
|
|
98
|
+
topicId: string;
|
|
94
99
|
partitions: {
|
|
100
|
+
partitionIndex: number;
|
|
101
|
+
errorCode: number;
|
|
102
|
+
highWatermark: bigint;
|
|
103
|
+
lastStableOffset: bigint;
|
|
104
|
+
logStartOffset: bigint;
|
|
105
|
+
abortedTransactions: {
|
|
106
|
+
producerId: bigint;
|
|
107
|
+
firstOffset: bigint;
|
|
108
|
+
_tag: void;
|
|
109
|
+
}[];
|
|
110
|
+
preferredReadReplica: number;
|
|
95
111
|
records: {
|
|
96
|
-
records: {
|
|
97
|
-
attributes: number;
|
|
98
|
-
timestampDelta: bigint;
|
|
99
|
-
offsetDelta: number;
|
|
100
|
-
key: string | null;
|
|
101
|
-
value: string | null;
|
|
102
|
-
headers: {
|
|
103
|
-
key: string;
|
|
104
|
-
value: string;
|
|
105
|
-
}[];
|
|
106
|
-
}[];
|
|
107
112
|
baseOffset: bigint;
|
|
108
113
|
batchLength: number;
|
|
109
114
|
partitionLeaderEpoch: number;
|
|
@@ -121,27 +126,22 @@ export declare const API: {
|
|
|
121
126
|
producerId: bigint;
|
|
122
127
|
producerEpoch: number;
|
|
123
128
|
baseSequence: number;
|
|
129
|
+
records: {
|
|
130
|
+
attributes: number;
|
|
131
|
+
timestampDelta: bigint;
|
|
132
|
+
offsetDelta: number;
|
|
133
|
+
key: string | null;
|
|
134
|
+
value: string | null;
|
|
135
|
+
headers: {
|
|
136
|
+
key: string;
|
|
137
|
+
value: string;
|
|
138
|
+
}[];
|
|
139
|
+
}[];
|
|
124
140
|
}[];
|
|
125
|
-
partitionIndex: number;
|
|
126
|
-
errorCode: number;
|
|
127
|
-
highWatermark: bigint;
|
|
128
|
-
lastStableOffset: bigint;
|
|
129
|
-
logStartOffset: bigint;
|
|
130
|
-
abortedTransactions: {
|
|
131
|
-
producerId: bigint;
|
|
132
|
-
firstOffset: bigint;
|
|
133
|
-
_tag: void;
|
|
134
|
-
}[];
|
|
135
|
-
preferredReadReplica: number;
|
|
136
141
|
_tag: void;
|
|
137
142
|
}[];
|
|
138
|
-
topicId: string;
|
|
139
143
|
_tag: void;
|
|
140
144
|
}[];
|
|
141
|
-
_tag: void;
|
|
142
|
-
throttleTimeMs: number;
|
|
143
|
-
errorCode: number;
|
|
144
|
-
sessionId: number;
|
|
145
145
|
_tag2: void;
|
|
146
146
|
}>;
|
|
147
147
|
FIND_COORDINATOR: Api<{
|
|
@@ -4,7 +4,6 @@ import { IsolationLevel } from '../api/fetch';
|
|
|
4
4
|
import { Cluster } from '../cluster';
|
|
5
5
|
import { Message } from '../types';
|
|
6
6
|
import { Retrier } from '../utils/retrier';
|
|
7
|
-
import { BatchGranularity } from './fetch-manager';
|
|
8
7
|
export type ConsumerOptions = {
|
|
9
8
|
topics: string[];
|
|
10
9
|
groupId?: string | null;
|
|
@@ -20,14 +19,10 @@ export type ConsumerOptions = {
|
|
|
20
19
|
allowTopicAutoCreation?: boolean;
|
|
21
20
|
fromBeginning?: boolean;
|
|
22
21
|
fromTimestamp?: bigint;
|
|
23
|
-
|
|
24
|
-
concurrency?: number;
|
|
22
|
+
batchSize?: number | null;
|
|
25
23
|
retrier?: Retrier;
|
|
26
|
-
} & ({
|
|
27
24
|
onBatch: (messages: Required<Message>[]) => unknown;
|
|
28
|
-
}
|
|
29
|
-
onMessage: (message: Required<Message>) => unknown;
|
|
30
|
-
});
|
|
25
|
+
};
|
|
31
26
|
export declare class Consumer extends EventEmitter<{
|
|
32
27
|
offsetCommit: [];
|
|
33
28
|
heartbeat: [];
|
|
@@ -52,8 +52,7 @@ class Consumer extends events_1.default {
|
|
|
52
52
|
allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
|
|
53
53
|
fromBeginning: options.fromBeginning ?? false,
|
|
54
54
|
fromTimestamp: options.fromTimestamp ?? (options.fromBeginning ? -2n : -1n),
|
|
55
|
-
|
|
56
|
-
concurrency: options.concurrency ?? 1,
|
|
55
|
+
batchSize: options.batchSize ?? null,
|
|
57
56
|
retrier: options.retrier ?? retrier_1.defaultRetrier,
|
|
58
57
|
};
|
|
59
58
|
this.metadata = new consumer_metadata_1.ConsumerMetadata({ cluster: this.cluster });
|
|
@@ -107,7 +106,7 @@ class Consumer extends events_1.default {
|
|
|
107
106
|
await this.cluster.disconnect().catch((error) => logger_1.log.debug(`Failed to disconnect: ${error.message}`));
|
|
108
107
|
}
|
|
109
108
|
async startFetchManager() {
|
|
110
|
-
const { groupId,
|
|
109
|
+
const { groupId, batchSize } = this.options;
|
|
111
110
|
while (!this.stopHook) {
|
|
112
111
|
try {
|
|
113
112
|
await this.consumerGroup?.join();
|
|
@@ -119,16 +118,13 @@ class Consumer extends events_1.default {
|
|
|
119
118
|
Object.keys(partitions).map(Number),
|
|
120
119
|
])),
|
|
121
120
|
}));
|
|
122
|
-
const numPartitions = Object.values(this.metadata.getAssignment()).flat().length;
|
|
123
|
-
const numProcessors = Math.min(concurrency, numPartitions);
|
|
124
121
|
this.fetchManager = new fetch_manager_1.FetchManager({
|
|
125
122
|
fetch: this.fetch.bind(this),
|
|
126
123
|
process: this.process.bind(this),
|
|
127
124
|
metadata: this.metadata,
|
|
128
125
|
consumerGroup: this.consumerGroup,
|
|
126
|
+
batchSize,
|
|
129
127
|
nodeAssignments,
|
|
130
|
-
batchGranularity,
|
|
131
|
-
concurrency: numProcessors,
|
|
132
128
|
});
|
|
133
129
|
await this.fetchManager.start();
|
|
134
130
|
if (!nodeAssignments.length) {
|
|
@@ -177,25 +173,10 @@ class Consumer extends events_1.default {
|
|
|
177
173
|
topicPartitions[topic] ??= new Set();
|
|
178
174
|
topicPartitions[topic].add(partition);
|
|
179
175
|
}
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
if ('onBatch' in options) {
|
|
185
|
-
await retrier(() => options.onBatch(messages));
|
|
186
|
-
messages.forEach(({ topic, partition, offset }) => this.offsetManager.resolve(topic, partition, offset + 1n));
|
|
187
|
-
}
|
|
188
|
-
else if ('onMessage' in options) {
|
|
189
|
-
for (const message of messages) {
|
|
190
|
-
await retrier(() => options.onMessage(message)).catch(async (error) => {
|
|
191
|
-
await commit().catch();
|
|
192
|
-
throw error;
|
|
193
|
-
});
|
|
194
|
-
const { topic, partition, offset } = message;
|
|
195
|
-
this.offsetManager.resolve(topic, partition, offset + 1n);
|
|
196
|
-
}
|
|
197
|
-
}
|
|
198
|
-
await commit();
|
|
176
|
+
await retrier(() => options.onBatch(messages));
|
|
177
|
+
messages.forEach(({ topic, partition, offset }) => this.offsetManager.resolve(topic, partition, offset + 1n));
|
|
178
|
+
await this.consumerGroup?.offsetCommit(topicPartitions);
|
|
179
|
+
this.offsetManager.flush(topicPartitions);
|
|
199
180
|
}
|
|
200
181
|
fetch(nodeId, assignment) {
|
|
201
182
|
const { rackId, maxWaitMs, minBytes, maxBytes, partitionMaxBytes, isolationLevel } = this.options;
|
|
@@ -1,33 +1,31 @@
|
|
|
1
1
|
import { FetchResponse } from '../api/fetch';
|
|
2
2
|
import { Assignment } from '../api/sync-group';
|
|
3
3
|
import { Metadata } from '../metadata';
|
|
4
|
-
import { Batch } from '../types';
|
|
4
|
+
import { Batch, Message } from '../types';
|
|
5
5
|
import { ConsumerGroup } from './consumer-group';
|
|
6
|
-
export type BatchGranularity = 'partition' | 'topic' | 'broker';
|
|
7
6
|
type FetchManagerOptions = {
|
|
8
7
|
fetch: (nodeId: number, assignment: Assignment) => Promise<FetchResponse>;
|
|
9
8
|
process: (batch: Batch) => Promise<void>;
|
|
9
|
+
batchSize?: number | null;
|
|
10
10
|
metadata: Metadata;
|
|
11
11
|
consumerGroup?: ConsumerGroup;
|
|
12
12
|
nodeAssignments: {
|
|
13
13
|
nodeId: number;
|
|
14
14
|
assignment: Assignment;
|
|
15
15
|
}[];
|
|
16
|
-
batchGranularity: BatchGranularity;
|
|
17
|
-
concurrency: number;
|
|
18
16
|
};
|
|
19
17
|
export declare class FetchManager {
|
|
20
18
|
private options;
|
|
21
19
|
private queue;
|
|
22
20
|
private isRunning;
|
|
23
21
|
private fetchers;
|
|
24
|
-
private
|
|
25
|
-
private
|
|
22
|
+
private processor;
|
|
23
|
+
private pollCallback;
|
|
26
24
|
private fetcherCallbacks;
|
|
27
25
|
constructor(options: FetchManagerOptions);
|
|
28
26
|
start(): Promise<void>;
|
|
29
27
|
stop(): Promise<void>;
|
|
30
|
-
poll(): Promise<
|
|
28
|
+
poll(): Promise<Required<Message>[]>;
|
|
31
29
|
private onResponse;
|
|
32
30
|
}
|
|
33
31
|
export {};
|
|
@@ -10,7 +10,6 @@ var __metadata = (this && this.__metadata) || function (k, v) {
|
|
|
10
10
|
};
|
|
11
11
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
12
|
exports.FetchManager = void 0;
|
|
13
|
-
const error_1 = require("../utils/error");
|
|
14
13
|
const tracer_1 = require("../utils/tracer");
|
|
15
14
|
const fetcher_1 = require("./fetcher");
|
|
16
15
|
const processor_1 = require("./processor");
|
|
@@ -20,28 +19,25 @@ class FetchManager {
|
|
|
20
19
|
queue = [];
|
|
21
20
|
isRunning = false;
|
|
22
21
|
fetchers;
|
|
23
|
-
|
|
24
|
-
|
|
22
|
+
processor;
|
|
23
|
+
pollCallback;
|
|
25
24
|
fetcherCallbacks = {};
|
|
26
25
|
constructor(options) {
|
|
27
26
|
this.options = options;
|
|
28
|
-
const { fetch, process, nodeAssignments
|
|
27
|
+
const { fetch, process, nodeAssignments } = this.options;
|
|
29
28
|
this.fetchers = nodeAssignments.map(({ nodeId, assignment }, index) => new fetcher_1.Fetcher(index, {
|
|
30
29
|
nodeId,
|
|
31
30
|
assignment,
|
|
32
31
|
fetch,
|
|
33
32
|
onResponse: this.onResponse.bind(this),
|
|
34
33
|
}));
|
|
35
|
-
this.
|
|
34
|
+
this.processor = new processor_1.Processor({ process, poll: this.poll.bind(this) });
|
|
36
35
|
}
|
|
37
36
|
async start() {
|
|
38
37
|
this.queue = [];
|
|
39
38
|
this.isRunning = true;
|
|
40
39
|
try {
|
|
41
|
-
await Promise.all([
|
|
42
|
-
...this.fetchers.map((fetcher) => fetcher.loop()),
|
|
43
|
-
...this.processors.map((processor) => processor.loop()),
|
|
44
|
-
]);
|
|
40
|
+
await Promise.all([...this.fetchers.map((fetcher) => fetcher.loop()), this.processor.loop()]);
|
|
45
41
|
}
|
|
46
42
|
finally {
|
|
47
43
|
await this.stop();
|
|
@@ -51,10 +47,9 @@ class FetchManager {
|
|
|
51
47
|
this.isRunning = false;
|
|
52
48
|
const stopPromise = Promise.all([
|
|
53
49
|
...this.fetchers.map((fetcher) => fetcher.stop()),
|
|
54
|
-
|
|
50
|
+
this.processor.stop(),
|
|
55
51
|
]);
|
|
56
|
-
this.
|
|
57
|
-
this.pollQueue = [];
|
|
52
|
+
this.pollCallback?.();
|
|
58
53
|
Object.values(this.fetcherCallbacks).forEach((callback) => callback());
|
|
59
54
|
this.fetcherCallbacks = {};
|
|
60
55
|
await stopPromise;
|
|
@@ -63,35 +58,37 @@ class FetchManager {
|
|
|
63
58
|
if (!this.isRunning) {
|
|
64
59
|
return [];
|
|
65
60
|
}
|
|
66
|
-
const { consumerGroup } = this.options;
|
|
61
|
+
const { consumerGroup, batchSize } = this.options;
|
|
67
62
|
consumerGroup?.handleLastHeartbeat();
|
|
68
|
-
const batch = this.queue.
|
|
69
|
-
if (!batch) {
|
|
70
|
-
|
|
71
|
-
await new Promise((resolve) => {
|
|
72
|
-
this.pollQueue.push(resolve);
|
|
73
|
-
});
|
|
63
|
+
const batch = this.queue.splice(0, batchSize ?? undefined);
|
|
64
|
+
if (!batch.length) {
|
|
65
|
+
await new Promise((resolve) => (this.pollCallback = resolve));
|
|
74
66
|
return this.poll();
|
|
75
67
|
}
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
}
|
|
80
|
-
this.pollQueue?.shift()?.();
|
|
81
|
-
return batch;
|
|
68
|
+
const [checkpoints, messages] = partition(batch, (entry) => 'kind' in entry && entry.kind === 'checkpoint');
|
|
69
|
+
checkpoints.forEach(({ fetcherId }) => this.fetcherCallbacks[fetcherId]?.());
|
|
70
|
+
return messages;
|
|
82
71
|
}
|
|
83
72
|
async onResponse(fetcherId, response) {
|
|
84
|
-
const { metadata,
|
|
73
|
+
const { metadata, consumerGroup } = this.options;
|
|
85
74
|
consumerGroup?.handleLastHeartbeat();
|
|
86
|
-
const
|
|
87
|
-
|
|
75
|
+
const messages = response.responses.flatMap(({ topicId, partitions }) => partitions.flatMap(({ partitionIndex, records }) => records.flatMap(({ baseTimestamp, baseOffset, records }) => records.flatMap((message) => ({
|
|
76
|
+
topic: metadata.getTopicNameById(topicId),
|
|
77
|
+
partition: partitionIndex,
|
|
78
|
+
key: message.key ?? null,
|
|
79
|
+
value: message.value ?? null,
|
|
80
|
+
headers: Object.fromEntries(message.headers.map(({ key, value }) => [key, value])),
|
|
81
|
+
timestamp: baseTimestamp + BigInt(message.timestampDelta),
|
|
82
|
+
offset: baseOffset + BigInt(message.offsetDelta),
|
|
83
|
+
})))));
|
|
84
|
+
if (!messages.length) {
|
|
88
85
|
return;
|
|
89
86
|
}
|
|
90
87
|
// wait until all broker batches have been processed or fetch manager is requested to stop
|
|
91
88
|
await new Promise((resolve) => {
|
|
92
89
|
this.fetcherCallbacks[fetcherId] = resolve;
|
|
93
|
-
this.queue.push(...
|
|
94
|
-
this.
|
|
90
|
+
this.queue.push(...messages, { kind: 'checkpoint', fetcherId });
|
|
91
|
+
this.pollCallback?.();
|
|
95
92
|
});
|
|
96
93
|
consumerGroup?.handleLastHeartbeat();
|
|
97
94
|
}
|
|
@@ -115,29 +112,16 @@ __decorate([
|
|
|
115
112
|
__metadata("design:paramtypes", [Number, Object]),
|
|
116
113
|
__metadata("design:returntype", Promise)
|
|
117
114
|
], FetchManager.prototype, "onResponse", null);
|
|
118
|
-
const
|
|
119
|
-
const
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
switch (batchGranularity) {
|
|
129
|
-
case 'broker':
|
|
130
|
-
const messages = brokerTopics.flatMap((topicPartition) => topicPartition.flatMap((partitionMessages) => partitionMessages));
|
|
131
|
-
return messages.length ? [messages] : [];
|
|
132
|
-
case 'topic':
|
|
133
|
-
return brokerTopics
|
|
134
|
-
.map((topicPartition) => topicPartition.flatMap((partitionMessages) => partitionMessages))
|
|
135
|
-
.filter((messages) => messages.length);
|
|
136
|
-
case 'partition':
|
|
137
|
-
return brokerTopics
|
|
138
|
-
.flatMap((topicPartition) => topicPartition.map((partitionMessages) => partitionMessages))
|
|
139
|
-
.filter((messages) => messages.length);
|
|
140
|
-
default:
|
|
141
|
-
throw new error_1.KafkaTSError(`Unhandled batch granularity: ${batchGranularity}`);
|
|
115
|
+
const partition = (batch, predicate) => {
|
|
116
|
+
const checkpoints = [];
|
|
117
|
+
const messages = [];
|
|
118
|
+
for (const entry of batch) {
|
|
119
|
+
if (predicate(entry)) {
|
|
120
|
+
checkpoints.push(entry);
|
|
121
|
+
}
|
|
122
|
+
else {
|
|
123
|
+
messages.push(entry);
|
|
124
|
+
}
|
|
142
125
|
}
|
|
126
|
+
return [checkpoints, messages];
|
|
143
127
|
};
|
package/dist/utils/tracer.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
export interface Tracer {
|
|
2
2
|
startActiveSpan<T>(module: string, method: string, metadata: Record<string, unknown>, callback: () => T): T;
|
|
3
3
|
}
|
|
4
|
-
export declare const setTracer:
|
|
4
|
+
export declare const setTracer: (newTracer: Tracer) => void;
|
|
5
5
|
export declare const createTracer: (module: string) => (fn?: (...args: any[]) => Record<string, unknown> | undefined) => (target: any, propertyKey: string, descriptor: PropertyDescriptor) => void;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "kafka-ts",
|
|
3
|
-
"version": "0.0
|
|
3
|
+
"version": "1.0.0",
|
|
4
4
|
"main": "dist/index.js",
|
|
5
5
|
"author": "Priit Käärd",
|
|
6
6
|
"license": "MIT",
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
"down": "KAFKA_VERSION=3.7.1 docker-compose down",
|
|
14
14
|
"version:prerelease": "npm version prerelease --preid=beta",
|
|
15
15
|
"version:patch": "npm version patch",
|
|
16
|
+
"version:major": "npm version major",
|
|
16
17
|
"format": "prettier --write .",
|
|
17
18
|
"build": "tsc",
|
|
18
19
|
"watch": "tsc -w",
|