kafka-ts 0.0.3-beta → 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +68 -8
- package/dist/api/api-versions.d.ts +9 -0
- package/dist/api/api-versions.js +24 -0
- package/dist/api/create-topics.d.ts +38 -0
- package/dist/api/create-topics.js +53 -0
- package/dist/api/delete-topics.d.ts +18 -0
- package/dist/api/delete-topics.js +33 -0
- package/dist/api/fetch.d.ts +84 -0
- package/dist/api/fetch.js +142 -0
- package/dist/api/find-coordinator.d.ts +21 -0
- package/dist/api/find-coordinator.js +39 -0
- package/dist/api/heartbeat.d.ts +11 -0
- package/dist/api/heartbeat.js +27 -0
- package/dist/api/index.d.ts +576 -0
- package/dist/api/index.js +165 -0
- package/dist/api/init-producer-id.d.ts +13 -0
- package/dist/api/init-producer-id.js +29 -0
- package/dist/api/join-group.d.ts +34 -0
- package/dist/api/join-group.js +51 -0
- package/dist/api/leave-group.d.ts +19 -0
- package/dist/api/leave-group.js +39 -0
- package/dist/api/list-offsets.d.ts +29 -0
- package/dist/api/list-offsets.js +48 -0
- package/dist/api/metadata.d.ts +40 -0
- package/dist/api/metadata.js +58 -0
- package/dist/api/offset-commit.d.ts +28 -0
- package/dist/api/offset-commit.js +48 -0
- package/dist/api/offset-fetch.d.ts +31 -0
- package/dist/api/offset-fetch.js +55 -0
- package/dist/api/produce.d.ts +54 -0
- package/dist/api/produce.js +126 -0
- package/dist/api/sasl-authenticate.d.ts +11 -0
- package/dist/api/sasl-authenticate.js +23 -0
- package/dist/api/sasl-handshake.d.ts +6 -0
- package/dist/api/sasl-handshake.js +19 -0
- package/dist/api/sync-group.d.ts +24 -0
- package/dist/api/sync-group.js +36 -0
- package/dist/auth/index.d.ts +2 -0
- package/dist/auth/index.js +8 -0
- package/dist/auth/plain.d.ts +5 -0
- package/dist/auth/plain.js +12 -0
- package/dist/auth/scram.d.ts +9 -0
- package/dist/auth/scram.js +40 -0
- package/dist/broker.d.ts +30 -0
- package/dist/broker.js +55 -0
- package/dist/client.d.ts +23 -0
- package/dist/client.js +36 -0
- package/dist/cluster.d.ts +27 -0
- package/dist/cluster.js +70 -0
- package/dist/cluster.test.d.ts +1 -0
- package/dist/cluster.test.js +343 -0
- package/dist/codecs/gzip.d.ts +2 -0
- package/dist/codecs/gzip.js +8 -0
- package/dist/codecs/index.d.ts +2 -0
- package/dist/codecs/index.js +17 -0
- package/dist/codecs/none.d.ts +2 -0
- package/dist/codecs/none.js +7 -0
- package/dist/codecs/types.d.ts +5 -0
- package/dist/codecs/types.js +2 -0
- package/dist/connection.d.ts +26 -0
- package/dist/connection.js +175 -0
- package/dist/consumer/consumer-group.d.ts +41 -0
- package/dist/consumer/consumer-group.js +215 -0
- package/dist/consumer/consumer-metadata.d.ts +7 -0
- package/dist/consumer/consumer-metadata.js +14 -0
- package/dist/consumer/consumer.d.ts +44 -0
- package/dist/consumer/consumer.js +225 -0
- package/dist/consumer/fetch-manager.d.ts +33 -0
- package/dist/consumer/fetch-manager.js +140 -0
- package/dist/consumer/fetcher.d.ts +25 -0
- package/dist/consumer/fetcher.js +64 -0
- package/dist/consumer/offset-manager.d.ts +22 -0
- package/dist/consumer/offset-manager.js +66 -0
- package/dist/consumer/processor.d.ts +19 -0
- package/dist/consumer/processor.js +59 -0
- package/dist/distributors/assignments-to-replicas.d.ts +16 -0
- package/dist/distributors/assignments-to-replicas.js +59 -0
- package/dist/distributors/assignments-to-replicas.test.d.ts +1 -0
- package/dist/distributors/assignments-to-replicas.test.js +40 -0
- package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
- package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
- package/dist/distributors/partitioner.d.ts +7 -0
- package/dist/distributors/partitioner.js +23 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.js +26 -0
- package/dist/metadata.d.ts +24 -0
- package/dist/metadata.js +106 -0
- package/dist/producer/producer.d.ts +24 -0
- package/dist/producer/producer.js +131 -0
- package/dist/types.d.ts +11 -0
- package/dist/types.js +2 -0
- package/dist/utils/api.d.ts +9 -0
- package/dist/utils/api.js +5 -0
- package/dist/utils/crypto.d.ts +8 -0
- package/dist/utils/crypto.js +18 -0
- package/dist/utils/decoder.d.ts +30 -0
- package/dist/utils/decoder.js +152 -0
- package/dist/utils/delay.d.ts +1 -0
- package/dist/utils/delay.js +5 -0
- package/dist/utils/encoder.d.ts +28 -0
- package/dist/utils/encoder.js +125 -0
- package/dist/utils/error.d.ts +11 -0
- package/dist/utils/error.js +27 -0
- package/dist/utils/logger.d.ts +9 -0
- package/dist/utils/logger.js +32 -0
- package/dist/utils/memo.d.ts +1 -0
- package/dist/utils/memo.js +16 -0
- package/dist/utils/murmur2.d.ts +3 -0
- package/dist/utils/murmur2.js +40 -0
- package/dist/utils/retrier.d.ts +10 -0
- package/dist/utils/retrier.js +22 -0
- package/dist/utils/tracer.d.ts +5 -0
- package/dist/utils/tracer.js +39 -0
- package/package.json +11 -2
- package/src/__snapshots__/{request-handler.test.ts.snap → cluster.test.ts.snap} +329 -26
- package/src/api/fetch.ts +84 -29
- package/src/api/index.ts +3 -1
- package/src/api/metadata.ts +1 -1
- package/src/api/offset-commit.ts +1 -1
- package/src/api/offset-fetch.ts +1 -5
- package/src/api/produce.ts +15 -18
- package/src/auth/index.ts +2 -0
- package/src/auth/plain.ts +10 -0
- package/src/auth/scram.ts +52 -0
- package/src/broker.ts +7 -9
- package/src/client.ts +2 -2
- package/src/cluster.test.ts +16 -14
- package/src/cluster.ts +38 -40
- package/src/codecs/gzip.ts +9 -0
- package/src/codecs/index.ts +16 -0
- package/src/codecs/none.ts +6 -0
- package/src/codecs/types.ts +4 -0
- package/src/connection.ts +31 -17
- package/src/consumer/consumer-group.ts +43 -23
- package/src/consumer/consumer.ts +64 -43
- package/src/consumer/fetch-manager.ts +43 -53
- package/src/consumer/fetcher.ts +20 -13
- package/src/consumer/offset-manager.ts +18 -7
- package/src/consumer/processor.ts +14 -8
- package/src/distributors/assignments-to-replicas.ts +1 -3
- package/src/distributors/partitioner.ts +27 -0
- package/src/index.ts +7 -2
- package/src/metadata.ts +4 -0
- package/src/producer/producer.ts +22 -12
- package/src/types.ts +3 -3
- package/src/utils/api.ts +1 -1
- package/src/utils/crypto.ts +15 -0
- package/src/utils/decoder.ts +11 -5
- package/src/utils/encoder.ts +29 -22
- package/src/utils/logger.ts +37 -0
- package/src/utils/murmur2.ts +44 -0
- package/src/utils/tracer.ts +40 -22
- package/.github/workflows/release.yml +0 -17
- package/certs/ca.crt +0 -29
- package/certs/ca.key +0 -52
- package/certs/ca.srl +0 -1
- package/certs/kafka.crt +0 -29
- package/certs/kafka.csr +0 -26
- package/certs/kafka.key +0 -52
- package/certs/kafka.keystore.jks +0 -0
- package/certs/kafka.truststore.jks +0 -0
- package/docker-compose.yml +0 -104
- package/examples/package-lock.json +0 -31
- package/examples/package.json +0 -14
- package/examples/src/client.ts +0 -9
- package/examples/src/consumer.ts +0 -18
- package/examples/src/create-topic.ts +0 -44
- package/examples/src/producer.ts +0 -24
- package/examples/src/replicator.ts +0 -25
- package/examples/src/utils/delay.ts +0 -1
- package/examples/src/utils/json.ts +0 -1
- package/examples/tsconfig.json +0 -7
- package/log4j.properties +0 -95
- package/scripts/generate-certs.sh +0 -24
- package/src/utils/debug.ts +0 -9
|
@@ -1,11 +1,15 @@
|
|
|
1
|
+
import EventEmitter from 'events';
|
|
1
2
|
import { API, API_ERROR } from '../api';
|
|
2
3
|
import { KEY_TYPE } from '../api/find-coordinator';
|
|
3
4
|
import { Assignment, MemberAssignment } from '../api/sync-group';
|
|
4
5
|
import { Cluster } from '../cluster';
|
|
5
6
|
import { KafkaTSApiError, KafkaTSError } from '../utils/error';
|
|
7
|
+
import { createTracer } from '../utils/tracer';
|
|
6
8
|
import { ConsumerMetadata } from './consumer-metadata';
|
|
7
9
|
import { OffsetManager } from './offset-manager';
|
|
8
10
|
|
|
11
|
+
const trace = createTracer('ConsumerGroup');
|
|
12
|
+
|
|
9
13
|
type ConsumerGroupOptions = {
|
|
10
14
|
cluster: Cluster;
|
|
11
15
|
topics: string[];
|
|
@@ -17,7 +21,7 @@ type ConsumerGroupOptions = {
|
|
|
17
21
|
offsetManager: OffsetManager;
|
|
18
22
|
};
|
|
19
23
|
|
|
20
|
-
export class ConsumerGroup {
|
|
24
|
+
export class ConsumerGroup extends EventEmitter<{ offsetCommit: [] }> {
|
|
21
25
|
private coordinatorId = -1;
|
|
22
26
|
private memberId = '';
|
|
23
27
|
private generationId = -1;
|
|
@@ -26,10 +30,16 @@ export class ConsumerGroup {
|
|
|
26
30
|
private heartbeatInterval: NodeJS.Timeout | null = null;
|
|
27
31
|
private heartbeatError: KafkaTSError | null = null;
|
|
28
32
|
|
|
29
|
-
constructor(private options: ConsumerGroupOptions) {
|
|
33
|
+
constructor(private options: ConsumerGroupOptions) {
|
|
34
|
+
super();
|
|
35
|
+
}
|
|
30
36
|
|
|
37
|
+
@trace()
|
|
31
38
|
public async join() {
|
|
32
39
|
await this.findCoordinator();
|
|
40
|
+
await this.options.cluster.setSeedBroker(this.coordinatorId);
|
|
41
|
+
|
|
42
|
+
this.memberId = '';
|
|
33
43
|
await this.joinGroup();
|
|
34
44
|
await this.syncGroup();
|
|
35
45
|
await this.offsetFetch();
|
|
@@ -53,12 +63,16 @@ export class ConsumerGroup {
|
|
|
53
63
|
}
|
|
54
64
|
}
|
|
55
65
|
|
|
56
|
-
public
|
|
66
|
+
public handleLastHeartbeat() {
|
|
57
67
|
if (this.heartbeatError) {
|
|
58
68
|
throw this.heartbeatError;
|
|
59
69
|
}
|
|
60
70
|
}
|
|
61
71
|
|
|
72
|
+
public resetHeartbeat() {
|
|
73
|
+
this.heartbeatError = null;
|
|
74
|
+
}
|
|
75
|
+
|
|
62
76
|
private async findCoordinator() {
|
|
63
77
|
const { coordinators } = await this.options.cluster.sendRequest(API.FIND_COORDINATOR, {
|
|
64
78
|
keyType: KEY_TYPE.GROUP,
|
|
@@ -70,7 +84,7 @@ export class ConsumerGroup {
|
|
|
70
84
|
private async joinGroup(): Promise<void> {
|
|
71
85
|
const { cluster, groupId, groupInstanceId, sessionTimeoutMs, rebalanceTimeoutMs, topics } = this.options;
|
|
72
86
|
try {
|
|
73
|
-
const response = await cluster.
|
|
87
|
+
const response = await cluster.sendRequest(API.JOIN_GROUP, {
|
|
74
88
|
groupId,
|
|
75
89
|
groupInstanceId,
|
|
76
90
|
memberId: this.memberId,
|
|
@@ -113,7 +127,7 @@ export class ConsumerGroup {
|
|
|
113
127
|
assignments = Object.entries(memberAssignments).map(([memberId, assignment]) => ({ memberId, assignment }));
|
|
114
128
|
}
|
|
115
129
|
|
|
116
|
-
const response = await cluster.
|
|
130
|
+
const response = await cluster.sendRequest(API.SYNC_GROUP, {
|
|
117
131
|
groupId,
|
|
118
132
|
groupInstanceId,
|
|
119
133
|
memberId: this.memberId,
|
|
@@ -133,8 +147,6 @@ export class ConsumerGroup {
|
|
|
133
147
|
groups: [
|
|
134
148
|
{
|
|
135
149
|
groupId,
|
|
136
|
-
memberId: this.memberId,
|
|
137
|
-
memberEpoch: -1,
|
|
138
150
|
topics: topics
|
|
139
151
|
.map((topic) => ({ name: topic, partitionIndexes: assignment[topic] ?? [] }))
|
|
140
152
|
.filter(({ partitionIndexes }) => partitionIndexes.length),
|
|
@@ -144,31 +156,35 @@ export class ConsumerGroup {
|
|
|
144
156
|
};
|
|
145
157
|
if (!request.groups.length) return;
|
|
146
158
|
|
|
147
|
-
const response = await cluster.
|
|
159
|
+
const response = await cluster.sendRequest(API.OFFSET_FETCH, request);
|
|
160
|
+
|
|
161
|
+
const topicPartitions: Record<string, Set<number>> = {};
|
|
148
162
|
response.groups.forEach((group) => {
|
|
149
163
|
group.topics.forEach((topic) => {
|
|
150
|
-
topic.
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
164
|
+
topicPartitions[topic.name] ??= new Set();
|
|
165
|
+
topic.partitions.forEach(({ partitionIndex, committedOffset }) => {
|
|
166
|
+
if (committedOffset >= 0) {
|
|
167
|
+
topicPartitions[topic.name].add(partitionIndex);
|
|
168
|
+
offsetManager.resolve(topic.name, partitionIndex, committedOffset);
|
|
169
|
+
}
|
|
170
|
+
});
|
|
155
171
|
});
|
|
156
172
|
});
|
|
157
|
-
offsetManager.flush();
|
|
173
|
+
offsetManager.flush(topicPartitions);
|
|
158
174
|
}
|
|
159
175
|
|
|
160
|
-
public async offsetCommit() {
|
|
176
|
+
public async offsetCommit(topicPartitions: Record<string, Set<number>>) {
|
|
161
177
|
const { cluster, groupId, groupInstanceId, offsetManager } = this.options;
|
|
162
178
|
const request = {
|
|
163
179
|
groupId,
|
|
164
180
|
groupInstanceId,
|
|
165
181
|
memberId: this.memberId,
|
|
166
182
|
generationIdOrMemberEpoch: this.generationId,
|
|
167
|
-
topics: Object.entries(
|
|
183
|
+
topics: Object.entries(topicPartitions).map(([topic, partitions]) => ({
|
|
168
184
|
name: topic,
|
|
169
|
-
partitions:
|
|
170
|
-
partitionIndex
|
|
171
|
-
committedOffset:
|
|
185
|
+
partitions: [...partitions].map((partitionIndex) => ({
|
|
186
|
+
partitionIndex,
|
|
187
|
+
committedOffset: offsetManager.pendingOffsets[topic][partitionIndex],
|
|
172
188
|
committedLeaderEpoch: -1,
|
|
173
189
|
committedMetadata: null,
|
|
174
190
|
})),
|
|
@@ -177,13 +193,13 @@ export class ConsumerGroup {
|
|
|
177
193
|
if (!request.topics.length) {
|
|
178
194
|
return;
|
|
179
195
|
}
|
|
180
|
-
await cluster.
|
|
181
|
-
|
|
196
|
+
await cluster.sendRequest(API.OFFSET_COMMIT, request);
|
|
197
|
+
this.emit('offsetCommit');
|
|
182
198
|
}
|
|
183
199
|
|
|
184
200
|
public async heartbeat() {
|
|
185
201
|
const { cluster, groupId, groupInstanceId } = this.options;
|
|
186
|
-
await cluster.
|
|
202
|
+
await cluster.sendRequest(API.HEARTBEAT, {
|
|
187
203
|
groupId,
|
|
188
204
|
groupInstanceId,
|
|
189
205
|
memberId: this.memberId,
|
|
@@ -192,10 +208,14 @@ export class ConsumerGroup {
|
|
|
192
208
|
}
|
|
193
209
|
|
|
194
210
|
public async leaveGroup() {
|
|
211
|
+
if (this.coordinatorId === -1) {
|
|
212
|
+
return;
|
|
213
|
+
}
|
|
214
|
+
|
|
195
215
|
const { cluster, groupId, groupInstanceId } = this.options;
|
|
196
216
|
this.stopHeartbeater();
|
|
197
217
|
try {
|
|
198
|
-
await cluster.
|
|
218
|
+
await cluster.sendRequest(API.LEAVE_GROUP, {
|
|
199
219
|
groupId,
|
|
200
220
|
members: [{ memberId: this.memberId, groupInstanceId, reason: null }],
|
|
201
221
|
});
|
package/src/consumer/consumer.ts
CHANGED
|
@@ -1,17 +1,21 @@
|
|
|
1
|
+
import EventEmitter from 'events';
|
|
1
2
|
import { API, API_ERROR } from '../api';
|
|
2
3
|
import { IsolationLevel } from '../api/fetch';
|
|
3
4
|
import { Assignment } from '../api/sync-group';
|
|
4
5
|
import { Cluster } from '../cluster';
|
|
5
|
-
import {
|
|
6
|
+
import { distributeMessagesToTopicPartitionLeaders } from '../distributors/messages-to-topic-partition-leaders';
|
|
6
7
|
import { Message } from '../types';
|
|
7
8
|
import { delay } from '../utils/delay';
|
|
8
9
|
import { ConnectionError, KafkaTSApiError } from '../utils/error';
|
|
9
|
-
import {
|
|
10
|
+
import { log } from '../utils/logger';
|
|
11
|
+
import { createTracer } from '../utils/tracer';
|
|
10
12
|
import { ConsumerGroup } from './consumer-group';
|
|
11
13
|
import { ConsumerMetadata } from './consumer-metadata';
|
|
12
|
-
import {
|
|
14
|
+
import { BatchGranularity, FetchManager } from './fetch-manager';
|
|
13
15
|
import { OffsetManager } from './offset-manager';
|
|
14
16
|
|
|
17
|
+
const trace = createTracer('Consumer');
|
|
18
|
+
|
|
15
19
|
export type ConsumerOptions = {
|
|
16
20
|
topics: string[];
|
|
17
21
|
groupId?: string | null;
|
|
@@ -26,12 +30,11 @@ export type ConsumerOptions = {
|
|
|
26
30
|
partitionMaxBytes?: number;
|
|
27
31
|
allowTopicAutoCreation?: boolean;
|
|
28
32
|
fromBeginning?: boolean;
|
|
29
|
-
|
|
30
|
-
granularity?: Granularity;
|
|
33
|
+
batchGranularity?: BatchGranularity;
|
|
31
34
|
concurrency?: number;
|
|
32
35
|
} & ({ onBatch: (messages: Required<Message>[]) => unknown } | { onMessage: (message: Required<Message>) => unknown });
|
|
33
36
|
|
|
34
|
-
export class Consumer {
|
|
37
|
+
export class Consumer extends EventEmitter<{ offsetCommit: [] }> {
|
|
35
38
|
private options: Required<ConsumerOptions>;
|
|
36
39
|
private metadata: ConsumerMetadata;
|
|
37
40
|
private consumerGroup: ConsumerGroup | undefined;
|
|
@@ -43,6 +46,8 @@ export class Consumer {
|
|
|
43
46
|
private cluster: Cluster,
|
|
44
47
|
options: ConsumerOptions,
|
|
45
48
|
) {
|
|
49
|
+
super();
|
|
50
|
+
|
|
46
51
|
this.options = {
|
|
47
52
|
...options,
|
|
48
53
|
groupId: options.groupId ?? null,
|
|
@@ -52,13 +57,12 @@ export class Consumer {
|
|
|
52
57
|
rebalanceTimeoutMs: options.rebalanceTimeoutMs ?? 60_000,
|
|
53
58
|
maxWaitMs: options.maxWaitMs ?? 5000,
|
|
54
59
|
minBytes: options.minBytes ?? 1,
|
|
55
|
-
maxBytes: options.maxBytes ??
|
|
56
|
-
partitionMaxBytes: options.partitionMaxBytes ??
|
|
60
|
+
maxBytes: options.maxBytes ?? 1_048_576,
|
|
61
|
+
partitionMaxBytes: options.partitionMaxBytes ?? 1_048_576,
|
|
57
62
|
isolationLevel: options.isolationLevel ?? IsolationLevel.READ_UNCOMMITTED,
|
|
58
63
|
allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
|
|
59
64
|
fromBeginning: options.fromBeginning ?? false,
|
|
60
|
-
|
|
61
|
-
granularity: options.granularity ?? 'broker',
|
|
65
|
+
batchGranularity: options.batchGranularity ?? 'partition',
|
|
62
66
|
concurrency: options.concurrency ?? 1,
|
|
63
67
|
};
|
|
64
68
|
|
|
@@ -80,8 +84,10 @@ export class Consumer {
|
|
|
80
84
|
offsetManager: this.offsetManager,
|
|
81
85
|
})
|
|
82
86
|
: undefined;
|
|
87
|
+
this.consumerGroup?.on('offsetCommit', () => this.emit('offsetCommit'));
|
|
83
88
|
}
|
|
84
89
|
|
|
90
|
+
@trace()
|
|
85
91
|
public async start(): Promise<void> {
|
|
86
92
|
const { topics, allowTopicAutoCreation, fromBeginning } = this.options;
|
|
87
93
|
|
|
@@ -94,16 +100,17 @@ export class Consumer {
|
|
|
94
100
|
await this.offsetManager.fetchOffsets({ fromBeginning });
|
|
95
101
|
await this.consumerGroup?.join();
|
|
96
102
|
} catch (error) {
|
|
97
|
-
|
|
98
|
-
|
|
103
|
+
log.warn('Failed to start consumer', error);
|
|
104
|
+
log.debug(`Restarting consumer in 1 second...`);
|
|
99
105
|
await delay(1000);
|
|
100
106
|
|
|
101
107
|
if (this.stopHook) return (this.stopHook as () => void)();
|
|
102
108
|
return this.close(true).then(() => this.start());
|
|
103
109
|
}
|
|
104
|
-
|
|
110
|
+
this.startFetchManager();
|
|
105
111
|
}
|
|
106
112
|
|
|
113
|
+
@trace()
|
|
107
114
|
public async close(force = false): Promise<void> {
|
|
108
115
|
if (!force) {
|
|
109
116
|
await new Promise<void>(async (resolve) => {
|
|
@@ -111,22 +118,33 @@ export class Consumer {
|
|
|
111
118
|
await this.fetchManager?.stop();
|
|
112
119
|
});
|
|
113
120
|
}
|
|
114
|
-
await this.consumerGroup
|
|
115
|
-
|
|
116
|
-
.catch((error) => console.warn(`Failed to leave group: ${error.message}`));
|
|
117
|
-
await this.cluster.disconnect().catch((error) => console.warn(`Failed to disconnect: ${error.message}`));
|
|
121
|
+
await this.consumerGroup?.leaveGroup().catch((error) => log.debug(`Failed to leave group: ${error.message}`));
|
|
122
|
+
await this.cluster.disconnect().catch((error) => log.debug(`Failed to disconnect: ${error.message}`));
|
|
118
123
|
}
|
|
119
124
|
|
|
120
|
-
private
|
|
121
|
-
const {
|
|
125
|
+
private async startFetchManager() {
|
|
126
|
+
const { batchGranularity, concurrency } = this.options;
|
|
122
127
|
|
|
123
128
|
while (!this.stopHook) {
|
|
129
|
+
this.consumerGroup?.resetHeartbeat();
|
|
130
|
+
|
|
131
|
+
// TODO: If leader is not available, find another read replica
|
|
124
132
|
const nodeAssignments = Object.entries(
|
|
125
|
-
|
|
126
|
-
this.metadata.getAssignment(),
|
|
127
|
-
|
|
133
|
+
distributeMessagesToTopicPartitionLeaders(
|
|
134
|
+
Object.entries(this.metadata.getAssignment()).flatMap(([topic, partitions]) =>
|
|
135
|
+
partitions.map((partition) => ({ topic, partition })),
|
|
136
|
+
),
|
|
137
|
+
this.metadata.getTopicPartitionLeaderIds(),
|
|
128
138
|
),
|
|
129
|
-
).map(([nodeId, assignment]) => ({
|
|
139
|
+
).map(([nodeId, assignment]) => ({
|
|
140
|
+
nodeId: parseInt(nodeId),
|
|
141
|
+
assignment: Object.fromEntries(
|
|
142
|
+
Object.entries(assignment).map(([topic, partitions]) => [
|
|
143
|
+
topic,
|
|
144
|
+
Object.keys(partitions).map(Number),
|
|
145
|
+
]),
|
|
146
|
+
),
|
|
147
|
+
}));
|
|
130
148
|
|
|
131
149
|
const numPartitions = Object.values(this.metadata.getAssignment()).flat().length;
|
|
132
150
|
const numProcessors = Math.min(concurrency, numPartitions);
|
|
@@ -137,7 +155,7 @@ export class Consumer {
|
|
|
137
155
|
metadata: this.metadata,
|
|
138
156
|
consumerGroup: this.consumerGroup,
|
|
139
157
|
nodeAssignments,
|
|
140
|
-
|
|
158
|
+
batchGranularity,
|
|
141
159
|
concurrency: numProcessors,
|
|
142
160
|
});
|
|
143
161
|
|
|
@@ -145,19 +163,19 @@ export class Consumer {
|
|
|
145
163
|
await this.fetchManager.start();
|
|
146
164
|
|
|
147
165
|
if (!nodeAssignments.length) {
|
|
148
|
-
|
|
166
|
+
log.debug('No partitions assigned. Waiting for reassignment...');
|
|
149
167
|
await delay(this.options.maxWaitMs);
|
|
150
|
-
|
|
168
|
+
this.consumerGroup?.handleLastHeartbeat();
|
|
151
169
|
}
|
|
152
170
|
} catch (error) {
|
|
153
171
|
await this.fetchManager.stop();
|
|
154
172
|
|
|
155
173
|
if ((error as KafkaTSApiError).errorCode === API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
156
|
-
|
|
174
|
+
log.debug('Rebalance in progress...');
|
|
157
175
|
continue;
|
|
158
176
|
}
|
|
159
177
|
if ((error as KafkaTSApiError).errorCode === API_ERROR.FENCED_INSTANCE_ID) {
|
|
160
|
-
|
|
178
|
+
log.debug('New consumer with the same groupInstanceId joined. Exiting the consumer...');
|
|
161
179
|
this.close();
|
|
162
180
|
break;
|
|
163
181
|
}
|
|
@@ -165,42 +183,45 @@ export class Consumer {
|
|
|
165
183
|
error instanceof ConnectionError ||
|
|
166
184
|
(error instanceof KafkaTSApiError && error.errorCode === API_ERROR.NOT_COORDINATOR)
|
|
167
185
|
) {
|
|
168
|
-
|
|
186
|
+
log.debug(`${error.message}. Restarting consumer...`);
|
|
169
187
|
this.close().then(() => this.start());
|
|
170
188
|
break;
|
|
171
189
|
}
|
|
172
|
-
|
|
190
|
+
log.error((error as Error).message, error);
|
|
173
191
|
this.close();
|
|
174
192
|
break;
|
|
175
193
|
}
|
|
176
194
|
}
|
|
177
195
|
this.stopHook?.();
|
|
178
|
-
}
|
|
196
|
+
}
|
|
179
197
|
|
|
198
|
+
@trace((messages) => ({ count: messages.length }))
|
|
180
199
|
private async process(messages: Required<Message>[]) {
|
|
181
200
|
const { options } = this;
|
|
182
|
-
|
|
201
|
+
|
|
202
|
+
const topicPartitions: Record<string, Set<number>> = {};
|
|
203
|
+
for (const { topic, partition } of messages) {
|
|
204
|
+
topicPartitions[topic] ??= new Set();
|
|
205
|
+
topicPartitions[topic].add(partition);
|
|
206
|
+
}
|
|
183
207
|
|
|
184
208
|
if ('onBatch' in options) {
|
|
185
|
-
await
|
|
209
|
+
await options.onBatch(messages);
|
|
186
210
|
|
|
187
211
|
messages.forEach(({ topic, partition, offset }) =>
|
|
188
212
|
this.offsetManager.resolve(topic, partition, offset + 1n),
|
|
189
213
|
);
|
|
190
214
|
} else if ('onMessage' in options) {
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
await retrier(() => options.onMessage(message));
|
|
215
|
+
for (const message of messages) {
|
|
216
|
+
await options.onMessage(message);
|
|
194
217
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
}
|
|
198
|
-
} catch (error) {
|
|
199
|
-
await this.consumerGroup?.offsetCommit().catch(() => {});
|
|
200
|
-
throw error;
|
|
218
|
+
const { topic, partition, offset } = message;
|
|
219
|
+
this.offsetManager.resolve(topic, partition, offset + 1n);
|
|
201
220
|
}
|
|
202
221
|
}
|
|
203
|
-
|
|
222
|
+
|
|
223
|
+
await this.consumerGroup?.offsetCommit(topicPartitions);
|
|
224
|
+
this.offsetManager.flush(topicPartitions);
|
|
204
225
|
}
|
|
205
226
|
|
|
206
227
|
private fetch(nodeId: number, assignment: Assignment) {
|
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
import
|
|
2
|
-
import { API } from '../api';
|
|
1
|
+
import { FetchResponse } from '../api/fetch';
|
|
3
2
|
import { Assignment } from '../api/sync-group';
|
|
4
3
|
import { Metadata } from '../metadata';
|
|
5
4
|
import { Batch, Message } from '../types';
|
|
@@ -11,30 +10,30 @@ import { Processor } from './processor';
|
|
|
11
10
|
|
|
12
11
|
const trace = createTracer('FetchManager');
|
|
13
12
|
|
|
14
|
-
export type
|
|
13
|
+
export type BatchGranularity = 'partition' | 'topic' | 'broker';
|
|
15
14
|
|
|
16
15
|
type FetchManagerOptions = {
|
|
17
|
-
fetch: (nodeId: number, assignment: Assignment) => Promise<
|
|
16
|
+
fetch: (nodeId: number, assignment: Assignment) => Promise<FetchResponse>;
|
|
18
17
|
process: (batch: Batch) => Promise<void>;
|
|
19
18
|
metadata: Metadata;
|
|
20
19
|
consumerGroup?: ConsumerGroup;
|
|
21
20
|
nodeAssignments: { nodeId: number; assignment: Assignment }[];
|
|
22
|
-
|
|
21
|
+
batchGranularity: BatchGranularity;
|
|
23
22
|
concurrency: number;
|
|
24
23
|
};
|
|
25
24
|
|
|
26
25
|
type Checkpoint = { kind: 'checkpoint'; fetcherId: number };
|
|
27
26
|
type Entry = Batch | Checkpoint;
|
|
28
27
|
|
|
29
|
-
export class FetchManager
|
|
28
|
+
export class FetchManager {
|
|
30
29
|
private queue: Entry[] = [];
|
|
31
30
|
private isRunning = false;
|
|
32
31
|
private fetchers: Fetcher[];
|
|
33
32
|
private processors: Processor[];
|
|
33
|
+
private pollQueue: (() => void)[] = [];
|
|
34
|
+
private fetcherCallbacks: Record<number, () => void> = {};
|
|
34
35
|
|
|
35
36
|
constructor(private options: FetchManagerOptions) {
|
|
36
|
-
super();
|
|
37
|
-
|
|
38
37
|
const { fetch, process, consumerGroup, nodeAssignments, concurrency } = this.options;
|
|
39
38
|
|
|
40
39
|
this.fetchers = nodeAssignments.map(
|
|
@@ -52,6 +51,7 @@ export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number];
|
|
|
52
51
|
);
|
|
53
52
|
}
|
|
54
53
|
|
|
54
|
+
@trace(() => ({ root: true }))
|
|
55
55
|
public async start() {
|
|
56
56
|
this.queue = [];
|
|
57
57
|
this.isRunning = true;
|
|
@@ -62,20 +62,25 @@ export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number];
|
|
|
62
62
|
...this.processors.map((processor) => processor.loop()),
|
|
63
63
|
]);
|
|
64
64
|
} finally {
|
|
65
|
-
this.
|
|
66
|
-
this.emit('stop');
|
|
65
|
+
await this.stop();
|
|
67
66
|
}
|
|
68
67
|
}
|
|
69
68
|
|
|
70
|
-
@trace()
|
|
71
69
|
public async stop() {
|
|
72
70
|
this.isRunning = false;
|
|
73
|
-
this.emit('stop');
|
|
74
71
|
|
|
75
|
-
|
|
72
|
+
const stopPromise = Promise.all([
|
|
76
73
|
...this.fetchers.map((fetcher) => fetcher.stop()),
|
|
77
74
|
...this.processors.map((processor) => processor.stop()),
|
|
78
75
|
]);
|
|
76
|
+
|
|
77
|
+
this.pollQueue.forEach((resolve) => resolve());
|
|
78
|
+
this.pollQueue = [];
|
|
79
|
+
|
|
80
|
+
Object.values(this.fetcherCallbacks).forEach((callback) => callback());
|
|
81
|
+
this.fetcherCallbacks = {};
|
|
82
|
+
|
|
83
|
+
await stopPromise;
|
|
79
84
|
}
|
|
80
85
|
|
|
81
86
|
@trace()
|
|
@@ -86,59 +91,44 @@ export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number];
|
|
|
86
91
|
|
|
87
92
|
const batch = this.queue.shift();
|
|
88
93
|
if (!batch) {
|
|
94
|
+
// wait until new data is available or fetch manager is requested to stop
|
|
89
95
|
await new Promise<void>((resolve) => {
|
|
90
|
-
|
|
91
|
-
this.removeListener('stop', onStop);
|
|
92
|
-
resolve();
|
|
93
|
-
};
|
|
94
|
-
const onStop = () => {
|
|
95
|
-
this.removeListener('data', onData);
|
|
96
|
-
resolve();
|
|
97
|
-
};
|
|
98
|
-
this.once('data', onData);
|
|
99
|
-
this.once('stop', onStop);
|
|
96
|
+
this.pollQueue.push(resolve);
|
|
100
97
|
});
|
|
101
98
|
return this.poll();
|
|
102
99
|
}
|
|
103
100
|
|
|
104
101
|
if ('kind' in batch && batch.kind === 'checkpoint') {
|
|
105
|
-
this.
|
|
102
|
+
this.fetcherCallbacks[batch.fetcherId]?.();
|
|
106
103
|
return this.poll();
|
|
107
104
|
}
|
|
108
105
|
|
|
106
|
+
this.pollQueue?.shift()?.();
|
|
107
|
+
|
|
109
108
|
return batch as Exclude<Entry, Checkpoint>;
|
|
110
109
|
}
|
|
111
110
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
const batches = fetchResponseToBatches(response, granularity, metadata);
|
|
116
|
-
if (batches.length) {
|
|
117
|
-
this.queue.push(...batches);
|
|
118
|
-
this.queue.push({ kind: 'checkpoint', fetcherId });
|
|
111
|
+
@trace()
|
|
112
|
+
private async onResponse(fetcherId: number, response: FetchResponse) {
|
|
113
|
+
const { metadata, batchGranularity } = this.options;
|
|
119
114
|
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
if (id === fetcherId) {
|
|
124
|
-
this.removeListener('stop', onStop);
|
|
125
|
-
resolve();
|
|
126
|
-
}
|
|
127
|
-
};
|
|
128
|
-
const onStop = () => {
|
|
129
|
-
this.removeListener('checkpoint', onCheckpoint);
|
|
130
|
-
resolve();
|
|
131
|
-
};
|
|
132
|
-
this.once('checkpoint', onCheckpoint);
|
|
133
|
-
this.once('stop', onStop);
|
|
134
|
-
});
|
|
115
|
+
const batches = fetchResponseToBatches(response, batchGranularity, metadata);
|
|
116
|
+
if (!batches.length) {
|
|
117
|
+
return;
|
|
135
118
|
}
|
|
119
|
+
|
|
120
|
+
// wait until all broker batches have been processed or fetch manager is requested to stop
|
|
121
|
+
await new Promise<void>((resolve) => {
|
|
122
|
+
this.fetcherCallbacks[fetcherId] = resolve;
|
|
123
|
+
this.queue.push(...batches, { kind: 'checkpoint', fetcherId });
|
|
124
|
+
this.pollQueue?.shift()?.();
|
|
125
|
+
});
|
|
136
126
|
}
|
|
137
127
|
}
|
|
138
128
|
|
|
139
129
|
const fetchResponseToBatches = (
|
|
140
|
-
batch:
|
|
141
|
-
|
|
130
|
+
batch: FetchResponse,
|
|
131
|
+
batchGranularity: BatchGranularity,
|
|
142
132
|
metadata: Metadata,
|
|
143
133
|
): Batch[] => {
|
|
144
134
|
const brokerTopics = batch.responses.map(({ topicId, partitions }) =>
|
|
@@ -159,7 +149,7 @@ const fetchResponseToBatches = (
|
|
|
159
149
|
),
|
|
160
150
|
);
|
|
161
151
|
|
|
162
|
-
switch (
|
|
152
|
+
switch (batchGranularity) {
|
|
163
153
|
case 'broker':
|
|
164
154
|
const messages = brokerTopics.flatMap((topicPartition) =>
|
|
165
155
|
topicPartition.flatMap((partitionMessages) => partitionMessages),
|
|
@@ -170,10 +160,10 @@ const fetchResponseToBatches = (
|
|
|
170
160
|
.map((topicPartition) => topicPartition.flatMap((partitionMessages) => partitionMessages))
|
|
171
161
|
.filter((messages) => messages.length);
|
|
172
162
|
case 'partition':
|
|
173
|
-
return brokerTopics
|
|
174
|
-
topicPartition.map((partitionMessages) => partitionMessages)
|
|
175
|
-
|
|
163
|
+
return brokerTopics
|
|
164
|
+
.flatMap((topicPartition) => topicPartition.map((partitionMessages) => partitionMessages))
|
|
165
|
+
.filter((messages) => messages.length);
|
|
176
166
|
default:
|
|
177
|
-
throw new KafkaTSError(`Unhandled batch granularity: ${
|
|
167
|
+
throw new KafkaTSError(`Unhandled batch granularity: ${batchGranularity}`);
|
|
178
168
|
}
|
|
179
169
|
};
|
package/src/consumer/fetcher.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { EventEmitter } from 'stream';
|
|
2
|
-
import {
|
|
2
|
+
import { FetchResponse } from '../api/fetch';
|
|
3
3
|
import { Assignment } from '../api/sync-group';
|
|
4
4
|
import { createTracer } from '../utils/tracer';
|
|
5
5
|
import { ConsumerGroup } from './consumer-group';
|
|
@@ -10,11 +10,11 @@ type FetcherOptions = {
|
|
|
10
10
|
nodeId: number;
|
|
11
11
|
assignment: Assignment;
|
|
12
12
|
consumerGroup?: ConsumerGroup;
|
|
13
|
-
fetch: (nodeId: number, assignment: Assignment) => Promise<
|
|
14
|
-
onResponse: (fetcherId: number, response:
|
|
13
|
+
fetch: (nodeId: number, assignment: Assignment) => Promise<FetchResponse>;
|
|
14
|
+
onResponse: (fetcherId: number, response: FetchResponse) => Promise<void>;
|
|
15
15
|
};
|
|
16
16
|
|
|
17
|
-
export class Fetcher extends EventEmitter<{
|
|
17
|
+
export class Fetcher extends EventEmitter<{ stopped: [] }> {
|
|
18
18
|
private isRunning = false;
|
|
19
19
|
|
|
20
20
|
constructor(
|
|
@@ -25,17 +25,11 @@ export class Fetcher extends EventEmitter<{ stop: []; stopped: []; data: []; dra
|
|
|
25
25
|
}
|
|
26
26
|
|
|
27
27
|
public async loop() {
|
|
28
|
-
const { nodeId, assignment, consumerGroup, fetch, onResponse } = this.options;
|
|
29
|
-
|
|
30
28
|
this.isRunning = true;
|
|
31
|
-
this.once('stop', () => (this.isRunning = false));
|
|
32
29
|
|
|
33
30
|
try {
|
|
34
31
|
while (this.isRunning) {
|
|
35
|
-
|
|
36
|
-
await consumerGroup?.handleLastHeartbeat();
|
|
37
|
-
await onResponse(this.fetcherId, response);
|
|
38
|
-
await consumerGroup?.handleLastHeartbeat();
|
|
32
|
+
await this.step();
|
|
39
33
|
}
|
|
40
34
|
} finally {
|
|
41
35
|
this.isRunning = false;
|
|
@@ -44,14 +38,27 @@ export class Fetcher extends EventEmitter<{ stop: []; stopped: []; data: []; dra
|
|
|
44
38
|
}
|
|
45
39
|
|
|
46
40
|
@trace()
|
|
41
|
+
private async step() {
|
|
42
|
+
const { nodeId, assignment, consumerGroup, fetch, onResponse } = this.options;
|
|
43
|
+
|
|
44
|
+
const response = await fetch(nodeId, assignment);
|
|
45
|
+
if (!this.isRunning) {
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
consumerGroup?.handleLastHeartbeat();
|
|
49
|
+
await onResponse(this.fetcherId, response);
|
|
50
|
+
consumerGroup?.handleLastHeartbeat();
|
|
51
|
+
}
|
|
52
|
+
|
|
47
53
|
public async stop() {
|
|
48
54
|
if (!this.isRunning) {
|
|
49
55
|
return;
|
|
50
56
|
}
|
|
51
57
|
|
|
52
|
-
|
|
53
|
-
return new Promise<void>((resolve) => {
|
|
58
|
+
const stopPromise = new Promise<void>((resolve) => {
|
|
54
59
|
this.once('stopped', resolve);
|
|
55
60
|
});
|
|
61
|
+
this.isRunning = false;
|
|
62
|
+
return stopPromise;
|
|
56
63
|
}
|
|
57
64
|
}
|