kafka-ts 0.0.1-beta.4 → 0.0.1-beta.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/release.yml +19 -6
- package/README.md +15 -21
- package/dist/api/api-versions.d.ts +9 -0
- package/dist/api/api-versions.js +24 -0
- package/dist/api/create-topics.d.ts +38 -0
- package/dist/api/create-topics.js +53 -0
- package/dist/api/delete-topics.d.ts +18 -0
- package/dist/api/delete-topics.js +33 -0
- package/dist/api/fetch.d.ts +84 -0
- package/dist/api/fetch.js +142 -0
- package/dist/api/find-coordinator.d.ts +21 -0
- package/dist/api/find-coordinator.js +39 -0
- package/dist/api/heartbeat.d.ts +11 -0
- package/dist/api/heartbeat.js +27 -0
- package/dist/api/index.d.ts +578 -0
- package/dist/api/index.js +165 -0
- package/dist/api/init-producer-id.d.ts +13 -0
- package/dist/api/init-producer-id.js +29 -0
- package/dist/api/join-group.d.ts +34 -0
- package/dist/api/join-group.js +51 -0
- package/dist/api/leave-group.d.ts +19 -0
- package/dist/api/leave-group.js +39 -0
- package/dist/api/list-offsets.d.ts +29 -0
- package/dist/api/list-offsets.js +48 -0
- package/dist/api/metadata.d.ts +40 -0
- package/dist/api/metadata.js +58 -0
- package/dist/api/offset-commit.d.ts +28 -0
- package/dist/api/offset-commit.js +48 -0
- package/dist/api/offset-fetch.d.ts +33 -0
- package/dist/api/offset-fetch.js +57 -0
- package/dist/api/produce.d.ts +54 -0
- package/dist/api/produce.js +126 -0
- package/dist/api/sasl-authenticate.d.ts +11 -0
- package/dist/api/sasl-authenticate.js +23 -0
- package/dist/api/sasl-handshake.d.ts +6 -0
- package/dist/api/sasl-handshake.js +19 -0
- package/dist/api/sync-group.d.ts +24 -0
- package/dist/api/sync-group.js +36 -0
- package/dist/auth/index.d.ts +2 -0
- package/dist/auth/index.js +8 -0
- package/dist/auth/plain.d.ts +5 -0
- package/dist/auth/plain.js +12 -0
- package/dist/auth/scram.d.ts +9 -0
- package/dist/auth/scram.js +40 -0
- package/dist/broker.d.ts +30 -0
- package/dist/broker.js +55 -0
- package/dist/client.d.ts +23 -0
- package/dist/client.js +36 -0
- package/dist/cluster.d.ts +27 -0
- package/dist/cluster.js +70 -0
- package/dist/cluster.test.d.ts +1 -0
- package/dist/cluster.test.js +345 -0
- package/dist/codecs/gzip.d.ts +2 -0
- package/dist/codecs/gzip.js +8 -0
- package/dist/codecs/index.d.ts +2 -0
- package/dist/codecs/index.js +17 -0
- package/dist/codecs/none.d.ts +2 -0
- package/dist/codecs/none.js +7 -0
- package/dist/codecs/types.d.ts +5 -0
- package/dist/codecs/types.js +2 -0
- package/dist/connection.d.ts +26 -0
- package/dist/connection.js +175 -0
- package/dist/consumer/consumer-group.d.ts +41 -0
- package/dist/consumer/consumer-group.js +217 -0
- package/dist/consumer/consumer-metadata.d.ts +7 -0
- package/dist/consumer/consumer-metadata.js +14 -0
- package/dist/consumer/consumer.d.ts +44 -0
- package/dist/consumer/consumer.js +225 -0
- package/dist/consumer/fetch-manager.d.ts +33 -0
- package/dist/consumer/fetch-manager.js +140 -0
- package/dist/consumer/fetcher.d.ts +25 -0
- package/dist/consumer/fetcher.js +64 -0
- package/dist/consumer/offset-manager.d.ts +22 -0
- package/dist/consumer/offset-manager.js +66 -0
- package/dist/consumer/processor.d.ts +19 -0
- package/dist/consumer/processor.js +59 -0
- package/dist/distributors/assignments-to-replicas.d.ts +16 -0
- package/dist/distributors/assignments-to-replicas.js +59 -0
- package/dist/distributors/assignments-to-replicas.test.d.ts +1 -0
- package/dist/distributors/assignments-to-replicas.test.js +40 -0
- package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
- package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
- package/dist/distributors/partitioner.d.ts +7 -0
- package/dist/distributors/partitioner.js +23 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.js +26 -0
- package/dist/metadata.d.ts +24 -0
- package/dist/metadata.js +106 -0
- package/dist/producer/producer.d.ts +24 -0
- package/dist/producer/producer.js +131 -0
- package/dist/types.d.ts +11 -0
- package/dist/types.js +2 -0
- package/dist/utils/api.d.ts +9 -0
- package/dist/utils/api.js +5 -0
- package/dist/utils/crypto.d.ts +8 -0
- package/dist/utils/crypto.js +18 -0
- package/dist/utils/decoder.d.ts +30 -0
- package/dist/utils/decoder.js +152 -0
- package/dist/utils/delay.d.ts +1 -0
- package/dist/utils/delay.js +5 -0
- package/dist/utils/encoder.d.ts +28 -0
- package/dist/utils/encoder.js +125 -0
- package/dist/utils/error.d.ts +11 -0
- package/dist/utils/error.js +27 -0
- package/dist/utils/logger.d.ts +9 -0
- package/dist/utils/logger.js +32 -0
- package/dist/utils/memo.d.ts +1 -0
- package/dist/utils/memo.js +16 -0
- package/dist/utils/murmur2.d.ts +3 -0
- package/dist/utils/murmur2.js +40 -0
- package/dist/utils/retrier.d.ts +10 -0
- package/dist/utils/retrier.js +22 -0
- package/dist/utils/tracer.d.ts +5 -0
- package/dist/utils/tracer.js +39 -0
- package/docker-compose.yml +3 -3
- package/examples/package-lock.json +3501 -3
- package/examples/package.json +8 -1
- package/examples/src/benchmark/common.ts +98 -0
- package/examples/src/benchmark/kafka-ts.ts +67 -0
- package/examples/src/benchmark/kafkajs.ts +51 -0
- package/examples/src/client.ts +4 -1
- package/examples/src/opentelemetry.ts +46 -0
- package/examples/src/producer.ts +11 -11
- package/package.json +4 -2
- package/scripts/create-scram-user.sh +4 -2
- package/scripts/generate-certs.sh +2 -0
- package/src/__snapshots__/cluster.test.ts.snap +35 -185
- package/src/api/fetch.ts +6 -1
- package/src/api/index.ts +3 -1
- package/src/api/metadata.ts +1 -1
- package/src/api/produce.ts +7 -10
- package/src/cluster.test.ts +2 -2
- package/src/cluster.ts +9 -16
- package/src/connection.ts +28 -15
- package/src/consumer/consumer-group.ts +35 -15
- package/src/consumer/consumer.ts +28 -18
- package/src/consumer/fetch-manager.ts +29 -45
- package/src/consumer/fetcher.ts +21 -14
- package/src/consumer/offset-manager.ts +18 -7
- package/src/consumer/processor.ts +14 -10
- package/src/distributors/assignments-to-replicas.ts +1 -3
- package/src/index.ts +1 -1
- package/src/metadata.ts +4 -0
- package/src/producer/producer.ts +11 -6
- package/src/utils/decoder.ts +0 -4
- package/src/utils/encoder.ts +26 -19
- package/src/utils/logger.ts +4 -4
- package/src/utils/tracer.ts +39 -23
- package/certs/ca.key +0 -52
- package/certs/ca.srl +0 -1
- package/certs/kafka.crt +0 -29
- package/certs/kafka.csr +0 -26
- package/certs/kafka.key +0 -52
- package/src/utils/mutex.ts +0 -31
package/src/connection.ts
CHANGED
|
@@ -6,11 +6,12 @@ import { Api } from './utils/api';
|
|
|
6
6
|
import { Decoder } from './utils/decoder';
|
|
7
7
|
import { Encoder } from './utils/encoder';
|
|
8
8
|
import { ConnectionError } from './utils/error';
|
|
9
|
+
import { log } from './utils/logger';
|
|
9
10
|
import { createTracer } from './utils/tracer';
|
|
10
11
|
|
|
11
12
|
const trace = createTracer('Connection');
|
|
12
13
|
|
|
13
|
-
|
|
14
|
+
type ConnectionOptions = {
|
|
14
15
|
clientId: string | null;
|
|
15
16
|
connection: TcpSocketConnectOpts;
|
|
16
17
|
ssl: TLSSocketOptions | null;
|
|
@@ -24,14 +25,14 @@ export class Connection {
|
|
|
24
25
|
[correlationId: number]: { resolve: (response: RawResonse) => void; reject: (error: Error) => void };
|
|
25
26
|
} = {};
|
|
26
27
|
private lastCorrelationId = 0;
|
|
27
|
-
private
|
|
28
|
+
private chunks: Buffer[] = [];
|
|
28
29
|
|
|
29
30
|
constructor(private options: ConnectionOptions) {}
|
|
30
31
|
|
|
31
32
|
@trace()
|
|
32
33
|
public async connect() {
|
|
33
34
|
this.queue = {};
|
|
34
|
-
this.
|
|
35
|
+
this.chunks = [];
|
|
35
36
|
|
|
36
37
|
await new Promise<void>((resolve, reject) => {
|
|
37
38
|
const { ssl, connection } = this.options;
|
|
@@ -73,6 +74,7 @@ export class Connection {
|
|
|
73
74
|
@trace((api, body) => ({ message: getApiName(api), body }))
|
|
74
75
|
public async sendRequest<Request, Response>(api: Api<Request, Response>, body: Request): Promise<Response> {
|
|
75
76
|
const correlationId = this.nextCorrelationId();
|
|
77
|
+
const apiName = getApiName(api);
|
|
76
78
|
|
|
77
79
|
const encoder = new Encoder()
|
|
78
80
|
.writeInt16(api.apiKey)
|
|
@@ -80,17 +82,24 @@ export class Connection {
|
|
|
80
82
|
.writeInt32(correlationId)
|
|
81
83
|
.writeString(this.options.clientId);
|
|
82
84
|
|
|
83
|
-
const request = api.request(encoder, body)
|
|
84
|
-
const requestEncoder = new Encoder().writeInt32(request.
|
|
85
|
+
const request = api.request(encoder, body);
|
|
86
|
+
const requestEncoder = new Encoder().writeInt32(request.getByteLength()).writeEncoder(request);
|
|
85
87
|
|
|
88
|
+
let timeout: NodeJS.Timeout | undefined;
|
|
86
89
|
const { responseDecoder, responseSize } = await new Promise<RawResonse>(async (resolve, reject) => {
|
|
90
|
+
timeout = setTimeout(() => {
|
|
91
|
+
delete this.queue[correlationId];
|
|
92
|
+
reject(new ConnectionError(`${apiName} timed out`));
|
|
93
|
+
}, 30_000);
|
|
94
|
+
|
|
87
95
|
try {
|
|
88
|
-
await this.write(requestEncoder.value());
|
|
89
96
|
this.queue[correlationId] = { resolve, reject };
|
|
97
|
+
await this.write(requestEncoder.value());
|
|
90
98
|
} catch (error) {
|
|
91
99
|
reject(error);
|
|
92
100
|
}
|
|
93
101
|
});
|
|
102
|
+
clearTimeout(timeout);
|
|
94
103
|
const response = await api.response(responseDecoder);
|
|
95
104
|
|
|
96
105
|
assert(
|
|
@@ -116,12 +125,13 @@ export class Connection {
|
|
|
116
125
|
}
|
|
117
126
|
|
|
118
127
|
private handleData(buffer: Buffer) {
|
|
119
|
-
this.
|
|
120
|
-
|
|
128
|
+
this.chunks.push(buffer);
|
|
129
|
+
|
|
130
|
+
const decoder = new Decoder(Buffer.concat(this.chunks));
|
|
131
|
+
if (decoder.getBufferLength() < 4) {
|
|
121
132
|
return;
|
|
122
133
|
}
|
|
123
134
|
|
|
124
|
-
const decoder = new Decoder(this.buffer);
|
|
125
135
|
const size = decoder.readInt32();
|
|
126
136
|
if (size !== decoder.getBufferLength() - 4) {
|
|
127
137
|
return;
|
|
@@ -129,15 +139,18 @@ export class Connection {
|
|
|
129
139
|
|
|
130
140
|
const correlationId = decoder.readInt32();
|
|
131
141
|
|
|
132
|
-
const
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
142
|
+
const context = this.queue[correlationId];
|
|
143
|
+
if (context) {
|
|
144
|
+
delete this.queue[correlationId];
|
|
145
|
+
context.resolve({ responseDecoder: decoder, responseSize: size });
|
|
146
|
+
} else {
|
|
147
|
+
log.debug('Could not find pending request for correlationId', { correlationId });
|
|
148
|
+
}
|
|
149
|
+
this.chunks = [];
|
|
137
150
|
}
|
|
138
151
|
|
|
139
152
|
private nextCorrelationId() {
|
|
140
|
-
return
|
|
153
|
+
return this.lastCorrelationId++;
|
|
141
154
|
}
|
|
142
155
|
}
|
|
143
156
|
|
|
@@ -1,11 +1,15 @@
|
|
|
1
|
+
import EventEmitter from 'events';
|
|
1
2
|
import { API, API_ERROR } from '../api';
|
|
2
3
|
import { KEY_TYPE } from '../api/find-coordinator';
|
|
3
4
|
import { Assignment, MemberAssignment } from '../api/sync-group';
|
|
4
5
|
import { Cluster } from '../cluster';
|
|
5
6
|
import { KafkaTSApiError, KafkaTSError } from '../utils/error';
|
|
7
|
+
import { createTracer } from '../utils/tracer';
|
|
6
8
|
import { ConsumerMetadata } from './consumer-metadata';
|
|
7
9
|
import { OffsetManager } from './offset-manager';
|
|
8
10
|
|
|
11
|
+
const trace = createTracer('ConsumerGroup');
|
|
12
|
+
|
|
9
13
|
type ConsumerGroupOptions = {
|
|
10
14
|
cluster: Cluster;
|
|
11
15
|
topics: string[];
|
|
@@ -17,7 +21,7 @@ type ConsumerGroupOptions = {
|
|
|
17
21
|
offsetManager: OffsetManager;
|
|
18
22
|
};
|
|
19
23
|
|
|
20
|
-
export class ConsumerGroup {
|
|
24
|
+
export class ConsumerGroup extends EventEmitter<{ offsetCommit: [] }> {
|
|
21
25
|
private coordinatorId = -1;
|
|
22
26
|
private memberId = '';
|
|
23
27
|
private generationId = -1;
|
|
@@ -26,12 +30,16 @@ export class ConsumerGroup {
|
|
|
26
30
|
private heartbeatInterval: NodeJS.Timeout | null = null;
|
|
27
31
|
private heartbeatError: KafkaTSError | null = null;
|
|
28
32
|
|
|
29
|
-
constructor(private options: ConsumerGroupOptions) {
|
|
33
|
+
constructor(private options: ConsumerGroupOptions) {
|
|
34
|
+
super();
|
|
35
|
+
}
|
|
30
36
|
|
|
37
|
+
@trace()
|
|
31
38
|
public async join() {
|
|
32
39
|
await this.findCoordinator();
|
|
33
40
|
await this.options.cluster.setSeedBroker(this.coordinatorId);
|
|
34
41
|
|
|
42
|
+
this.memberId = '';
|
|
35
43
|
await this.joinGroup();
|
|
36
44
|
await this.syncGroup();
|
|
37
45
|
await this.offsetFetch();
|
|
@@ -55,12 +63,16 @@ export class ConsumerGroup {
|
|
|
55
63
|
}
|
|
56
64
|
}
|
|
57
65
|
|
|
58
|
-
public
|
|
66
|
+
public handleLastHeartbeat() {
|
|
59
67
|
if (this.heartbeatError) {
|
|
60
68
|
throw this.heartbeatError;
|
|
61
69
|
}
|
|
62
70
|
}
|
|
63
71
|
|
|
72
|
+
public resetHeartbeat() {
|
|
73
|
+
this.heartbeatError = null;
|
|
74
|
+
}
|
|
75
|
+
|
|
64
76
|
private async findCoordinator() {
|
|
65
77
|
const { coordinators } = await this.options.cluster.sendRequest(API.FIND_COORDINATOR, {
|
|
66
78
|
keyType: KEY_TYPE.GROUP,
|
|
@@ -147,30 +159,34 @@ export class ConsumerGroup {
|
|
|
147
159
|
if (!request.groups.length) return;
|
|
148
160
|
|
|
149
161
|
const response = await cluster.sendRequest(API.OFFSET_FETCH, request);
|
|
162
|
+
|
|
163
|
+
const topicPartitions: Record<string, Set<number>> = {};
|
|
150
164
|
response.groups.forEach((group) => {
|
|
151
165
|
group.topics.forEach((topic) => {
|
|
152
|
-
topic.
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
166
|
+
topicPartitions[topic.name] ??= new Set();
|
|
167
|
+
topic.partitions.forEach(({ partitionIndex, committedOffset }) => {
|
|
168
|
+
if (committedOffset >= 0) {
|
|
169
|
+
topicPartitions[topic.name].add(partitionIndex);
|
|
170
|
+
offsetManager.resolve(topic.name, partitionIndex, committedOffset);
|
|
171
|
+
}
|
|
172
|
+
});
|
|
157
173
|
});
|
|
158
174
|
});
|
|
159
|
-
offsetManager.flush();
|
|
175
|
+
offsetManager.flush(topicPartitions);
|
|
160
176
|
}
|
|
161
177
|
|
|
162
|
-
public async offsetCommit() {
|
|
178
|
+
public async offsetCommit(topicPartitions: Record<string, Set<number>>) {
|
|
163
179
|
const { cluster, groupId, groupInstanceId, offsetManager } = this.options;
|
|
164
180
|
const request = {
|
|
165
181
|
groupId,
|
|
166
182
|
groupInstanceId,
|
|
167
183
|
memberId: this.memberId,
|
|
168
184
|
generationIdOrMemberEpoch: this.generationId,
|
|
169
|
-
topics: Object.entries(
|
|
185
|
+
topics: Object.entries(topicPartitions).map(([topic, partitions]) => ({
|
|
170
186
|
name: topic,
|
|
171
|
-
partitions:
|
|
172
|
-
partitionIndex
|
|
173
|
-
committedOffset:
|
|
187
|
+
partitions: [...partitions].map((partitionIndex) => ({
|
|
188
|
+
partitionIndex,
|
|
189
|
+
committedOffset: offsetManager.pendingOffsets[topic][partitionIndex],
|
|
174
190
|
committedLeaderEpoch: -1,
|
|
175
191
|
committedMetadata: null,
|
|
176
192
|
})),
|
|
@@ -180,7 +196,7 @@ export class ConsumerGroup {
|
|
|
180
196
|
return;
|
|
181
197
|
}
|
|
182
198
|
await cluster.sendRequest(API.OFFSET_COMMIT, request);
|
|
183
|
-
|
|
199
|
+
this.emit('offsetCommit');
|
|
184
200
|
}
|
|
185
201
|
|
|
186
202
|
public async heartbeat() {
|
|
@@ -194,6 +210,10 @@ export class ConsumerGroup {
|
|
|
194
210
|
}
|
|
195
211
|
|
|
196
212
|
public async leaveGroup() {
|
|
213
|
+
if (this.coordinatorId === -1) {
|
|
214
|
+
return;
|
|
215
|
+
}
|
|
216
|
+
|
|
197
217
|
const { cluster, groupId, groupInstanceId } = this.options;
|
|
198
218
|
this.stopHeartbeater();
|
|
199
219
|
try {
|
package/src/consumer/consumer.ts
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import EventEmitter from 'events';
|
|
1
2
|
import { API, API_ERROR } from '../api';
|
|
2
3
|
import { IsolationLevel } from '../api/fetch';
|
|
3
4
|
import { Assignment } from '../api/sync-group';
|
|
@@ -33,7 +34,7 @@ export type ConsumerOptions = {
|
|
|
33
34
|
concurrency?: number;
|
|
34
35
|
} & ({ onBatch: (messages: Required<Message>[]) => unknown } | { onMessage: (message: Required<Message>) => unknown });
|
|
35
36
|
|
|
36
|
-
export class Consumer {
|
|
37
|
+
export class Consumer extends EventEmitter<{ offsetCommit: [] }> {
|
|
37
38
|
private options: Required<ConsumerOptions>;
|
|
38
39
|
private metadata: ConsumerMetadata;
|
|
39
40
|
private consumerGroup: ConsumerGroup | undefined;
|
|
@@ -45,6 +46,8 @@ export class Consumer {
|
|
|
45
46
|
private cluster: Cluster,
|
|
46
47
|
options: ConsumerOptions,
|
|
47
48
|
) {
|
|
49
|
+
super();
|
|
50
|
+
|
|
48
51
|
this.options = {
|
|
49
52
|
...options,
|
|
50
53
|
groupId: options.groupId ?? null,
|
|
@@ -81,8 +84,10 @@ export class Consumer {
|
|
|
81
84
|
offsetManager: this.offsetManager,
|
|
82
85
|
})
|
|
83
86
|
: undefined;
|
|
87
|
+
this.consumerGroup?.on('offsetCommit', () => this.emit('offsetCommit'));
|
|
84
88
|
}
|
|
85
89
|
|
|
90
|
+
@trace()
|
|
86
91
|
public async start(): Promise<void> {
|
|
87
92
|
const { topics, allowTopicAutoCreation, fromBeginning } = this.options;
|
|
88
93
|
|
|
@@ -95,7 +100,7 @@ export class Consumer {
|
|
|
95
100
|
await this.offsetManager.fetchOffsets({ fromBeginning });
|
|
96
101
|
await this.consumerGroup?.join();
|
|
97
102
|
} catch (error) {
|
|
98
|
-
log.
|
|
103
|
+
log.warn('Failed to start consumer', error);
|
|
99
104
|
log.debug(`Restarting consumer in 1 second...`);
|
|
100
105
|
await delay(1000);
|
|
101
106
|
|
|
@@ -113,14 +118,16 @@ export class Consumer {
|
|
|
113
118
|
await this.fetchManager?.stop();
|
|
114
119
|
});
|
|
115
120
|
}
|
|
116
|
-
await this.consumerGroup?.leaveGroup().catch((error) => log.
|
|
117
|
-
await this.cluster.disconnect().catch((error) => log.
|
|
121
|
+
await this.consumerGroup?.leaveGroup().catch((error) => log.debug(`Failed to leave group: ${error.message}`));
|
|
122
|
+
await this.cluster.disconnect().catch((error) => log.debug(`Failed to disconnect: ${error.message}`));
|
|
118
123
|
}
|
|
119
124
|
|
|
120
|
-
private
|
|
125
|
+
private async startFetchManager() {
|
|
121
126
|
const { batchGranularity, concurrency } = this.options;
|
|
122
127
|
|
|
123
128
|
while (!this.stopHook) {
|
|
129
|
+
this.consumerGroup?.resetHeartbeat();
|
|
130
|
+
|
|
124
131
|
// TODO: If leader is not available, find another read replica
|
|
125
132
|
const nodeAssignments = Object.entries(
|
|
126
133
|
distributeMessagesToTopicPartitionLeaders(
|
|
@@ -158,7 +165,7 @@ export class Consumer {
|
|
|
158
165
|
if (!nodeAssignments.length) {
|
|
159
166
|
log.debug('No partitions assigned. Waiting for reassignment...');
|
|
160
167
|
await delay(this.options.maxWaitMs);
|
|
161
|
-
|
|
168
|
+
this.consumerGroup?.handleLastHeartbeat();
|
|
162
169
|
}
|
|
163
170
|
} catch (error) {
|
|
164
171
|
await this.fetchManager.stop();
|
|
@@ -186,12 +193,18 @@ export class Consumer {
|
|
|
186
193
|
}
|
|
187
194
|
}
|
|
188
195
|
this.stopHook?.();
|
|
189
|
-
}
|
|
196
|
+
}
|
|
190
197
|
|
|
191
|
-
@trace()
|
|
198
|
+
@trace((messages) => ({ count: messages.length }))
|
|
192
199
|
private async process(messages: Required<Message>[]) {
|
|
193
200
|
const { options } = this;
|
|
194
201
|
|
|
202
|
+
const topicPartitions: Record<string, Set<number>> = {};
|
|
203
|
+
for (const { topic, partition } of messages) {
|
|
204
|
+
topicPartitions[topic] ??= new Set();
|
|
205
|
+
topicPartitions[topic].add(partition);
|
|
206
|
+
}
|
|
207
|
+
|
|
195
208
|
if ('onBatch' in options) {
|
|
196
209
|
await options.onBatch(messages);
|
|
197
210
|
|
|
@@ -199,19 +212,16 @@ export class Consumer {
|
|
|
199
212
|
this.offsetManager.resolve(topic, partition, offset + 1n),
|
|
200
213
|
);
|
|
201
214
|
} else if ('onMessage' in options) {
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
await options.onMessage(message);
|
|
215
|
+
for (const message of messages) {
|
|
216
|
+
await options.onMessage(message);
|
|
205
217
|
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
}
|
|
209
|
-
} catch (error) {
|
|
210
|
-
await this.consumerGroup?.offsetCommit().catch(() => {});
|
|
211
|
-
throw error;
|
|
218
|
+
const { topic, partition, offset } = message;
|
|
219
|
+
this.offsetManager.resolve(topic, partition, offset + 1n);
|
|
212
220
|
}
|
|
213
221
|
}
|
|
214
|
-
|
|
222
|
+
|
|
223
|
+
await this.consumerGroup?.offsetCommit(topicPartitions);
|
|
224
|
+
this.offsetManager.flush(topicPartitions);
|
|
215
225
|
}
|
|
216
226
|
|
|
217
227
|
private fetch(nodeId: number, assignment: Assignment) {
|
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
import
|
|
2
|
-
import { API } from '../api';
|
|
1
|
+
import { FetchResponse } from '../api/fetch';
|
|
3
2
|
import { Assignment } from '../api/sync-group';
|
|
4
3
|
import { Metadata } from '../metadata';
|
|
5
4
|
import { Batch, Message } from '../types';
|
|
@@ -14,7 +13,7 @@ const trace = createTracer('FetchManager');
|
|
|
14
13
|
export type BatchGranularity = 'partition' | 'topic' | 'broker';
|
|
15
14
|
|
|
16
15
|
type FetchManagerOptions = {
|
|
17
|
-
fetch: (nodeId: number, assignment: Assignment) => Promise<
|
|
16
|
+
fetch: (nodeId: number, assignment: Assignment) => Promise<FetchResponse>;
|
|
18
17
|
process: (batch: Batch) => Promise<void>;
|
|
19
18
|
metadata: Metadata;
|
|
20
19
|
consumerGroup?: ConsumerGroup;
|
|
@@ -26,15 +25,15 @@ type FetchManagerOptions = {
|
|
|
26
25
|
type Checkpoint = { kind: 'checkpoint'; fetcherId: number };
|
|
27
26
|
type Entry = Batch | Checkpoint;
|
|
28
27
|
|
|
29
|
-
export class FetchManager
|
|
28
|
+
export class FetchManager {
|
|
30
29
|
private queue: Entry[] = [];
|
|
31
30
|
private isRunning = false;
|
|
32
31
|
private fetchers: Fetcher[];
|
|
33
32
|
private processors: Processor[];
|
|
33
|
+
private pollQueue: (() => void)[] = [];
|
|
34
|
+
private fetcherCallbacks: Record<number, () => void> = {};
|
|
34
35
|
|
|
35
36
|
constructor(private options: FetchManagerOptions) {
|
|
36
|
-
super();
|
|
37
|
-
|
|
38
37
|
const { fetch, process, consumerGroup, nodeAssignments, concurrency } = this.options;
|
|
39
38
|
|
|
40
39
|
this.fetchers = nodeAssignments.map(
|
|
@@ -52,6 +51,7 @@ export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number];
|
|
|
52
51
|
);
|
|
53
52
|
}
|
|
54
53
|
|
|
54
|
+
@trace(() => ({ root: true }))
|
|
55
55
|
public async start() {
|
|
56
56
|
this.queue = [];
|
|
57
57
|
this.isRunning = true;
|
|
@@ -62,20 +62,25 @@ export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number];
|
|
|
62
62
|
...this.processors.map((processor) => processor.loop()),
|
|
63
63
|
]);
|
|
64
64
|
} finally {
|
|
65
|
-
this.
|
|
66
|
-
this.emit('stop');
|
|
65
|
+
await this.stop();
|
|
67
66
|
}
|
|
68
67
|
}
|
|
69
68
|
|
|
70
|
-
@trace()
|
|
71
69
|
public async stop() {
|
|
72
70
|
this.isRunning = false;
|
|
73
|
-
this.emit('stop');
|
|
74
71
|
|
|
75
|
-
|
|
72
|
+
const stopPromise = Promise.all([
|
|
76
73
|
...this.fetchers.map((fetcher) => fetcher.stop()),
|
|
77
74
|
...this.processors.map((processor) => processor.stop()),
|
|
78
75
|
]);
|
|
76
|
+
|
|
77
|
+
this.pollQueue.forEach((resolve) => resolve());
|
|
78
|
+
this.pollQueue = [];
|
|
79
|
+
|
|
80
|
+
Object.values(this.fetcherCallbacks).forEach((callback) => callback());
|
|
81
|
+
this.fetcherCallbacks = {};
|
|
82
|
+
|
|
83
|
+
await stopPromise;
|
|
79
84
|
}
|
|
80
85
|
|
|
81
86
|
@trace()
|
|
@@ -88,29 +93,23 @@ export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number];
|
|
|
88
93
|
if (!batch) {
|
|
89
94
|
// wait until new data is available or fetch manager is requested to stop
|
|
90
95
|
await new Promise<void>((resolve) => {
|
|
91
|
-
|
|
92
|
-
this.removeListener('stop', onStop);
|
|
93
|
-
resolve();
|
|
94
|
-
};
|
|
95
|
-
const onStop = () => {
|
|
96
|
-
this.removeListener('data', onData);
|
|
97
|
-
resolve();
|
|
98
|
-
};
|
|
99
|
-
this.once('data', onData);
|
|
100
|
-
this.once('stop', onStop);
|
|
96
|
+
this.pollQueue.push(resolve);
|
|
101
97
|
});
|
|
102
98
|
return this.poll();
|
|
103
99
|
}
|
|
104
100
|
|
|
105
101
|
if ('kind' in batch && batch.kind === 'checkpoint') {
|
|
106
|
-
this.
|
|
102
|
+
this.fetcherCallbacks[batch.fetcherId]?.();
|
|
107
103
|
return this.poll();
|
|
108
104
|
}
|
|
109
105
|
|
|
106
|
+
this.pollQueue?.shift()?.();
|
|
107
|
+
|
|
110
108
|
return batch as Exclude<Entry, Checkpoint>;
|
|
111
109
|
}
|
|
112
110
|
|
|
113
|
-
|
|
111
|
+
@trace()
|
|
112
|
+
private async onResponse(fetcherId: number, response: FetchResponse) {
|
|
114
113
|
const { metadata, batchGranularity } = this.options;
|
|
115
114
|
|
|
116
115
|
const batches = fetchResponseToBatches(response, batchGranularity, metadata);
|
|
@@ -120,30 +119,15 @@ export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number];
|
|
|
120
119
|
|
|
121
120
|
// wait until all broker batches have been processed or fetch manager is requested to stop
|
|
122
121
|
await new Promise<void>((resolve) => {
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
this.removeListener('stop', onStop);
|
|
127
|
-
resolve();
|
|
128
|
-
}
|
|
129
|
-
};
|
|
130
|
-
const onStop = () => {
|
|
131
|
-
this.removeListener('checkpoint', onCheckpoint);
|
|
132
|
-
resolve();
|
|
133
|
-
};
|
|
134
|
-
this.on('checkpoint', onCheckpoint);
|
|
135
|
-
this.once('stop', onStop);
|
|
136
|
-
|
|
137
|
-
this.queue.push(...batches);
|
|
138
|
-
this.queue.push({ kind: 'checkpoint', fetcherId });
|
|
139
|
-
|
|
140
|
-
this.emit('data');
|
|
122
|
+
this.fetcherCallbacks[fetcherId] = resolve;
|
|
123
|
+
this.queue.push(...batches, { kind: 'checkpoint', fetcherId });
|
|
124
|
+
this.pollQueue?.shift()?.();
|
|
141
125
|
});
|
|
142
126
|
}
|
|
143
127
|
}
|
|
144
128
|
|
|
145
129
|
const fetchResponseToBatches = (
|
|
146
|
-
batch:
|
|
130
|
+
batch: FetchResponse,
|
|
147
131
|
batchGranularity: BatchGranularity,
|
|
148
132
|
metadata: Metadata,
|
|
149
133
|
): Batch[] => {
|
|
@@ -176,9 +160,9 @@ const fetchResponseToBatches = (
|
|
|
176
160
|
.map((topicPartition) => topicPartition.flatMap((partitionMessages) => partitionMessages))
|
|
177
161
|
.filter((messages) => messages.length);
|
|
178
162
|
case 'partition':
|
|
179
|
-
return brokerTopics
|
|
180
|
-
topicPartition.map((partitionMessages) => partitionMessages)
|
|
181
|
-
|
|
163
|
+
return brokerTopics
|
|
164
|
+
.flatMap((topicPartition) => topicPartition.map((partitionMessages) => partitionMessages))
|
|
165
|
+
.filter((messages) => messages.length);
|
|
182
166
|
default:
|
|
183
167
|
throw new KafkaTSError(`Unhandled batch granularity: ${batchGranularity}`);
|
|
184
168
|
}
|
package/src/consumer/fetcher.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { EventEmitter } from 'stream';
|
|
2
|
-
import {
|
|
2
|
+
import { FetchResponse } from '../api/fetch';
|
|
3
3
|
import { Assignment } from '../api/sync-group';
|
|
4
4
|
import { createTracer } from '../utils/tracer';
|
|
5
5
|
import { ConsumerGroup } from './consumer-group';
|
|
@@ -10,11 +10,11 @@ type FetcherOptions = {
|
|
|
10
10
|
nodeId: number;
|
|
11
11
|
assignment: Assignment;
|
|
12
12
|
consumerGroup?: ConsumerGroup;
|
|
13
|
-
fetch: (nodeId: number, assignment: Assignment) => Promise<
|
|
14
|
-
onResponse: (fetcherId: number, response:
|
|
13
|
+
fetch: (nodeId: number, assignment: Assignment) => Promise<FetchResponse>;
|
|
14
|
+
onResponse: (fetcherId: number, response: FetchResponse) => Promise<void>;
|
|
15
15
|
};
|
|
16
16
|
|
|
17
|
-
export class Fetcher extends EventEmitter<{
|
|
17
|
+
export class Fetcher extends EventEmitter<{ stopped: [] }> {
|
|
18
18
|
private isRunning = false;
|
|
19
19
|
|
|
20
20
|
constructor(
|
|
@@ -25,17 +25,11 @@ export class Fetcher extends EventEmitter<{ stop: []; stopped: []; data: []; dra
|
|
|
25
25
|
}
|
|
26
26
|
|
|
27
27
|
public async loop() {
|
|
28
|
-
const { nodeId, assignment, consumerGroup, fetch, onResponse } = this.options;
|
|
29
|
-
|
|
30
28
|
this.isRunning = true;
|
|
31
|
-
|
|
32
|
-
|
|
29
|
+
|
|
33
30
|
try {
|
|
34
31
|
while (this.isRunning) {
|
|
35
|
-
|
|
36
|
-
await consumerGroup?.handleLastHeartbeat();
|
|
37
|
-
await onResponse(this.fetcherId, response);
|
|
38
|
-
await consumerGroup?.handleLastHeartbeat();
|
|
32
|
+
await this.step();
|
|
39
33
|
}
|
|
40
34
|
} finally {
|
|
41
35
|
this.isRunning = false;
|
|
@@ -44,14 +38,27 @@ export class Fetcher extends EventEmitter<{ stop: []; stopped: []; data: []; dra
|
|
|
44
38
|
}
|
|
45
39
|
|
|
46
40
|
@trace()
|
|
41
|
+
private async step() {
|
|
42
|
+
const { nodeId, assignment, consumerGroup, fetch, onResponse } = this.options;
|
|
43
|
+
|
|
44
|
+
const response = await fetch(nodeId, assignment);
|
|
45
|
+
if (!this.isRunning) {
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
consumerGroup?.handleLastHeartbeat();
|
|
49
|
+
await onResponse(this.fetcherId, response);
|
|
50
|
+
consumerGroup?.handleLastHeartbeat();
|
|
51
|
+
}
|
|
52
|
+
|
|
47
53
|
public async stop() {
|
|
48
54
|
if (!this.isRunning) {
|
|
49
55
|
return;
|
|
50
56
|
}
|
|
51
57
|
|
|
52
|
-
|
|
53
|
-
return new Promise<void>((resolve) => {
|
|
58
|
+
const stopPromise = new Promise<void>((resolve) => {
|
|
54
59
|
this.once('stopped', resolve);
|
|
55
60
|
});
|
|
61
|
+
this.isRunning = false;
|
|
62
|
+
return stopPromise;
|
|
56
63
|
}
|
|
57
64
|
}
|
|
@@ -3,8 +3,11 @@ import { IsolationLevel } from '../api/fetch';
|
|
|
3
3
|
import { Assignment } from '../api/sync-group';
|
|
4
4
|
import { Cluster } from '../cluster';
|
|
5
5
|
import { distributeMessagesToTopicPartitionLeaders } from '../distributors/messages-to-topic-partition-leaders';
|
|
6
|
+
import { createTracer } from '../utils/tracer';
|
|
6
7
|
import { ConsumerMetadata } from './consumer-metadata';
|
|
7
8
|
|
|
9
|
+
const trace = createTracer('OffsetManager');
|
|
10
|
+
|
|
8
11
|
type OffsetManagerOptions = {
|
|
9
12
|
cluster: Cluster;
|
|
10
13
|
metadata: ConsumerMetadata;
|
|
@@ -24,13 +27,18 @@ export class OffsetManager {
|
|
|
24
27
|
public resolve(topic: string, partition: number, offset: bigint) {
|
|
25
28
|
this.pendingOffsets[topic] ??= {};
|
|
26
29
|
this.pendingOffsets[topic][partition] = offset;
|
|
27
|
-
|
|
28
|
-
this.currentOffsets[topic] ??= {};
|
|
29
|
-
this.currentOffsets[topic][partition] = offset;
|
|
30
30
|
}
|
|
31
31
|
|
|
32
|
-
public flush() {
|
|
33
|
-
|
|
32
|
+
public flush(topicPartitions: Record<string, Set<number>>) {
|
|
33
|
+
Object.entries(topicPartitions).forEach(([topic, partitions]) => {
|
|
34
|
+
this.currentOffsets[topic] ??= {};
|
|
35
|
+
partitions.forEach((partition) => {
|
|
36
|
+
if (this.pendingOffsets[topic]?.[partition]) {
|
|
37
|
+
this.currentOffsets[topic][partition] = this.pendingOffsets[topic][partition];
|
|
38
|
+
delete this.pendingOffsets[topic][partition];
|
|
39
|
+
}
|
|
40
|
+
});
|
|
41
|
+
});
|
|
34
42
|
}
|
|
35
43
|
|
|
36
44
|
public async fetchOffsets(options: { fromBeginning: boolean }) {
|
|
@@ -58,7 +66,6 @@ export class OffsetManager {
|
|
|
58
66
|
}),
|
|
59
67
|
),
|
|
60
68
|
);
|
|
61
|
-
this.flush();
|
|
62
69
|
}
|
|
63
70
|
|
|
64
71
|
private async listOffsets({
|
|
@@ -83,11 +90,15 @@ export class OffsetManager {
|
|
|
83
90
|
})),
|
|
84
91
|
});
|
|
85
92
|
|
|
93
|
+
const topicPartitions: Record<string, Set<number>> = {};
|
|
86
94
|
offsets.topics.forEach(({ name, partitions }) => {
|
|
95
|
+
topicPartitions[name] ??= new Set();
|
|
87
96
|
partitions.forEach(({ partitionIndex, offset }) => {
|
|
97
|
+
topicPartitions[name].add(partitionIndex);
|
|
88
98
|
this.resolve(name, partitionIndex, fromBeginning ? 0n : offset);
|
|
89
99
|
});
|
|
90
100
|
});
|
|
91
|
-
|
|
101
|
+
|
|
102
|
+
this.flush(topicPartitions);
|
|
92
103
|
}
|
|
93
104
|
}
|