kafka-ts 1.2.0 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -48,6 +48,9 @@ class ConsumerGroup {
|
|
|
48
48
|
}
|
|
49
49
|
catch (error) {
|
|
50
50
|
this.heartbeatError = error;
|
|
51
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
52
|
+
this.options.consumer.emit('rebalanceInProgress');
|
|
53
|
+
}
|
|
51
54
|
}
|
|
52
55
|
}, 5000);
|
|
53
56
|
}
|
|
@@ -21,11 +21,13 @@ export type ConsumerOptions = {
|
|
|
21
21
|
retrier?: Retrier;
|
|
22
22
|
onBatch: (messages: Required<Message>[], context: {
|
|
23
23
|
resolveOffset: (message: Pick<Required<Message>, 'topic' | 'partition' | 'offset'>) => void;
|
|
24
|
+
abortSignal: AbortSignal;
|
|
24
25
|
}) => unknown;
|
|
25
26
|
};
|
|
26
27
|
export declare class Consumer extends EventEmitter<{
|
|
27
28
|
offsetCommit: [];
|
|
28
29
|
heartbeat: [];
|
|
30
|
+
rebalanceInProgress: [];
|
|
29
31
|
}> {
|
|
30
32
|
private cluster;
|
|
31
33
|
private options;
|
|
@@ -75,6 +75,7 @@ class Consumer extends events_1.default {
|
|
|
75
75
|
consumer: this,
|
|
76
76
|
})
|
|
77
77
|
: undefined;
|
|
78
|
+
this.setMaxListeners(Infinity);
|
|
78
79
|
}
|
|
79
80
|
async start() {
|
|
80
81
|
this.stopHook = undefined;
|
|
@@ -191,26 +192,34 @@ class Consumer extends events_1.default {
|
|
|
191
192
|
if (!messages.length) {
|
|
192
193
|
return;
|
|
193
194
|
}
|
|
195
|
+
const commitOffset = () => this.consumerGroup?.offsetCommit(topicPartitions).then(() => this.offsetManager.flush(topicPartitions));
|
|
194
196
|
const resolveOffset = (message) => this.offsetManager.resolve(message.topic, message.partition, message.offset + 1n);
|
|
197
|
+
const abortController = new AbortController();
|
|
198
|
+
const onRebalance = () => {
|
|
199
|
+
abortController.abort();
|
|
200
|
+
commitOffset()?.catch();
|
|
201
|
+
};
|
|
202
|
+
this.once('rebalanceInProgress', onRebalance);
|
|
195
203
|
try {
|
|
196
|
-
await retrier(() => options.onBatch(messages.filter((message) => !this.offsetManager.isResolved(message)), { resolveOffset }));
|
|
204
|
+
await retrier(() => options.onBatch(messages.filter((message) => !this.offsetManager.isResolved(message)), { resolveOffset, abortSignal: abortController.signal }));
|
|
197
205
|
}
|
|
198
206
|
catch (error) {
|
|
199
|
-
await
|
|
200
|
-
?.offsetCommit(topicPartitions)
|
|
201
|
-
.then(() => this.offsetManager.flush(topicPartitions))
|
|
202
|
-
.catch();
|
|
207
|
+
await commitOffset()?.catch();
|
|
203
208
|
throw error;
|
|
204
209
|
}
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
210
|
+
finally {
|
|
211
|
+
this.off('rebalanceInProgress', onRebalance);
|
|
212
|
+
}
|
|
213
|
+
if (!abortController.signal.aborted) {
|
|
214
|
+
response.responses.forEach(({ topicId, partitions }) => {
|
|
215
|
+
partitions.forEach(({ partitionIndex, records }) => {
|
|
216
|
+
records.forEach(({ baseOffset, lastOffsetDelta }) => {
|
|
217
|
+
this.offsetManager.resolve(this.metadata.getTopicNameById(topicId), partitionIndex, baseOffset + BigInt(lastOffsetDelta) + 1n);
|
|
218
|
+
});
|
|
209
219
|
});
|
|
210
220
|
});
|
|
211
|
-
}
|
|
212
|
-
await
|
|
213
|
-
this.offsetManager.flush(topicPartitions);
|
|
221
|
+
}
|
|
222
|
+
await commitOffset();
|
|
214
223
|
}
|
|
215
224
|
async fetch(nodeId, assignment) {
|
|
216
225
|
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
package/dist/utils/decoder.js
CHANGED
|
@@ -110,21 +110,23 @@ class Decoder {
|
|
|
110
110
|
}
|
|
111
111
|
readArray(callback) {
|
|
112
112
|
const length = this.readInt32();
|
|
113
|
-
const results = new Array(length);
|
|
113
|
+
const results = new Array(Math.max(length, 0));
|
|
114
114
|
for (let i = 0; i < length; i++)
|
|
115
115
|
results[i] = callback(this);
|
|
116
116
|
return results;
|
|
117
117
|
}
|
|
118
118
|
readCompactArray(callback) {
|
|
119
119
|
const length = this.readUVarInt() - 1;
|
|
120
|
-
const results = new Array(length);
|
|
120
|
+
const results = new Array(Math.max(length, 0));
|
|
121
121
|
for (let i = 0; i < length; i++)
|
|
122
122
|
results[i] = callback(this);
|
|
123
123
|
return results;
|
|
124
124
|
}
|
|
125
125
|
readVarIntArray(callback) {
|
|
126
126
|
const length = this.readVarInt();
|
|
127
|
-
const results = Array.
|
|
127
|
+
const results = new Array(Math.max(length, 0));
|
|
128
|
+
for (let i = 0; i < length; i++)
|
|
129
|
+
results[i] = callback(this);
|
|
128
130
|
return results;
|
|
129
131
|
}
|
|
130
132
|
readRecords(callback) {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "kafka-ts",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.3.0",
|
|
4
4
|
"main": "dist/index.js",
|
|
5
5
|
"author": "Priit Käärd",
|
|
6
6
|
"license": "MIT",
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
"down": "KAFKA_VERSION=4.0.0 docker-compose down",
|
|
14
14
|
"version:prerelease": "npm version prerelease --preid=beta",
|
|
15
15
|
"version:patch": "npm version patch",
|
|
16
|
+
"version:minor": "npm version minor",
|
|
16
17
|
"version:major": "npm version major",
|
|
17
18
|
"format": "prettier --write .",
|
|
18
19
|
"build": "tsc",
|