kafka-ts 0.0.3 → 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -0
- package/dist/client.d.ts +1 -2
- package/package.json +1 -1
- package/.prettierrc +0 -8
- package/src/__snapshots__/cluster.test.ts.snap +0 -1281
- package/src/api/api-versions.ts +0 -21
- package/src/api/create-topics.ts +0 -78
- package/src/api/delete-topics.ts +0 -42
- package/src/api/fetch.ts +0 -198
- package/src/api/find-coordinator.ts +0 -39
- package/src/api/heartbeat.ts +0 -33
- package/src/api/index.ts +0 -166
- package/src/api/init-producer-id.ts +0 -35
- package/src/api/join-group.ts +0 -67
- package/src/api/leave-group.ts +0 -48
- package/src/api/list-offsets.ts +0 -65
- package/src/api/metadata.ts +0 -66
- package/src/api/offset-commit.ts +0 -67
- package/src/api/offset-fetch.ts +0 -70
- package/src/api/produce.ts +0 -170
- package/src/api/sasl-authenticate.ts +0 -21
- package/src/api/sasl-handshake.ts +0 -16
- package/src/api/sync-group.ts +0 -54
- package/src/auth/index.ts +0 -2
- package/src/auth/plain.ts +0 -10
- package/src/auth/scram.ts +0 -52
- package/src/broker.ts +0 -72
- package/src/client.ts +0 -47
- package/src/cluster.test.ts +0 -371
- package/src/cluster.ts +0 -85
- package/src/codecs/gzip.ts +0 -9
- package/src/codecs/index.ts +0 -16
- package/src/codecs/none.ts +0 -6
- package/src/codecs/types.ts +0 -4
- package/src/connection.ts +0 -157
- package/src/consumer/consumer-group.ts +0 -229
- package/src/consumer/consumer-metadata.ts +0 -14
- package/src/consumer/consumer.ts +0 -252
- package/src/consumer/fetch-manager.ts +0 -169
- package/src/consumer/fetcher.ts +0 -64
- package/src/consumer/offset-manager.ts +0 -104
- package/src/consumer/processor.ts +0 -53
- package/src/distributors/assignments-to-replicas.test.ts +0 -43
- package/src/distributors/assignments-to-replicas.ts +0 -83
- package/src/distributors/messages-to-topic-partition-leaders.test.ts +0 -32
- package/src/distributors/messages-to-topic-partition-leaders.ts +0 -19
- package/src/distributors/partitioner.ts +0 -27
- package/src/index.ts +0 -9
- package/src/metadata.ts +0 -126
- package/src/producer/producer.ts +0 -142
- package/src/types.ts +0 -11
- package/src/utils/api.ts +0 -11
- package/src/utils/crypto.ts +0 -15
- package/src/utils/decoder.ts +0 -174
- package/src/utils/delay.ts +0 -1
- package/src/utils/encoder.ts +0 -148
- package/src/utils/error.ts +0 -21
- package/src/utils/logger.ts +0 -37
- package/src/utils/memo.ts +0 -12
- package/src/utils/murmur2.ts +0 -44
- package/src/utils/retrier.ts +0 -39
- package/src/utils/tracer.ts +0 -49
- package/tsconfig.json +0 -17
package/src/connection.ts
DELETED
|
@@ -1,157 +0,0 @@
|
|
|
1
|
-
import assert from 'assert';
|
|
2
|
-
import net, { isIP, Socket, TcpSocketConnectOpts } from 'net';
|
|
3
|
-
import tls, { TLSSocketOptions } from 'tls';
|
|
4
|
-
import { getApiName } from './api';
|
|
5
|
-
import { Api } from './utils/api';
|
|
6
|
-
import { Decoder } from './utils/decoder';
|
|
7
|
-
import { Encoder } from './utils/encoder';
|
|
8
|
-
import { ConnectionError } from './utils/error';
|
|
9
|
-
import { log } from './utils/logger';
|
|
10
|
-
import { createTracer } from './utils/tracer';
|
|
11
|
-
|
|
12
|
-
const trace = createTracer('Connection');
|
|
13
|
-
|
|
14
|
-
type ConnectionOptions = {
|
|
15
|
-
clientId: string | null;
|
|
16
|
-
connection: TcpSocketConnectOpts;
|
|
17
|
-
ssl: TLSSocketOptions | null;
|
|
18
|
-
};
|
|
19
|
-
|
|
20
|
-
type RawResonse = { responseDecoder: Decoder; responseSize: number };
|
|
21
|
-
|
|
22
|
-
export class Connection {
|
|
23
|
-
private socket = new Socket();
|
|
24
|
-
private queue: {
|
|
25
|
-
[correlationId: number]: { resolve: (response: RawResonse) => void; reject: (error: Error) => void };
|
|
26
|
-
} = {};
|
|
27
|
-
private lastCorrelationId = 0;
|
|
28
|
-
private chunks: Buffer[] = [];
|
|
29
|
-
|
|
30
|
-
constructor(private options: ConnectionOptions) {}
|
|
31
|
-
|
|
32
|
-
@trace()
|
|
33
|
-
public async connect() {
|
|
34
|
-
this.queue = {};
|
|
35
|
-
this.chunks = [];
|
|
36
|
-
|
|
37
|
-
await new Promise<void>((resolve, reject) => {
|
|
38
|
-
const { ssl, connection } = this.options;
|
|
39
|
-
|
|
40
|
-
this.socket = ssl
|
|
41
|
-
? tls.connect(
|
|
42
|
-
{
|
|
43
|
-
...connection,
|
|
44
|
-
...ssl,
|
|
45
|
-
...(connection.host && !isIP(connection.host) && { servername: connection.host }),
|
|
46
|
-
},
|
|
47
|
-
resolve,
|
|
48
|
-
)
|
|
49
|
-
: net.connect(connection, resolve);
|
|
50
|
-
this.socket.once('error', reject);
|
|
51
|
-
});
|
|
52
|
-
this.socket.removeAllListeners('error');
|
|
53
|
-
|
|
54
|
-
this.socket.on('data', (data) => this.handleData(data));
|
|
55
|
-
this.socket.once('close', async () => {
|
|
56
|
-
Object.values(this.queue).forEach(({ reject }) => {
|
|
57
|
-
reject(new ConnectionError('Socket closed unexpectedly'));
|
|
58
|
-
});
|
|
59
|
-
this.queue = {};
|
|
60
|
-
});
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
@trace()
|
|
64
|
-
public disconnect() {
|
|
65
|
-
this.socket.removeAllListeners();
|
|
66
|
-
return new Promise<void>((resolve) => {
|
|
67
|
-
if (this.socket.pending) {
|
|
68
|
-
return resolve();
|
|
69
|
-
}
|
|
70
|
-
this.socket.end(resolve);
|
|
71
|
-
});
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
@trace((api, body) => ({ message: getApiName(api), body }))
|
|
75
|
-
public async sendRequest<Request, Response>(api: Api<Request, Response>, body: Request): Promise<Response> {
|
|
76
|
-
const correlationId = this.nextCorrelationId();
|
|
77
|
-
const apiName = getApiName(api);
|
|
78
|
-
|
|
79
|
-
const encoder = new Encoder()
|
|
80
|
-
.writeInt16(api.apiKey)
|
|
81
|
-
.writeInt16(api.apiVersion)
|
|
82
|
-
.writeInt32(correlationId)
|
|
83
|
-
.writeString(this.options.clientId);
|
|
84
|
-
|
|
85
|
-
const request = api.request(encoder, body);
|
|
86
|
-
const requestEncoder = new Encoder().writeInt32(request.getByteLength()).writeEncoder(request);
|
|
87
|
-
|
|
88
|
-
let timeout: NodeJS.Timeout | undefined;
|
|
89
|
-
const { responseDecoder, responseSize } = await new Promise<RawResonse>(async (resolve, reject) => {
|
|
90
|
-
timeout = setTimeout(() => {
|
|
91
|
-
delete this.queue[correlationId];
|
|
92
|
-
reject(new ConnectionError(`${apiName} timed out`));
|
|
93
|
-
}, 30_000);
|
|
94
|
-
|
|
95
|
-
try {
|
|
96
|
-
this.queue[correlationId] = { resolve, reject };
|
|
97
|
-
await this.write(requestEncoder.value());
|
|
98
|
-
} catch (error) {
|
|
99
|
-
reject(error);
|
|
100
|
-
}
|
|
101
|
-
});
|
|
102
|
-
clearTimeout(timeout);
|
|
103
|
-
const response = await api.response(responseDecoder);
|
|
104
|
-
|
|
105
|
-
assert(
|
|
106
|
-
responseDecoder.getOffset() - 4 === responseSize,
|
|
107
|
-
`Buffer not correctly consumed: ${responseDecoder.getOffset() - 4} !== ${responseSize}`,
|
|
108
|
-
);
|
|
109
|
-
|
|
110
|
-
return response;
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
private write(buffer: Buffer) {
|
|
114
|
-
return new Promise<void>((resolve, reject) => {
|
|
115
|
-
const { stack } = new Error('Write error');
|
|
116
|
-
this.socket.write(buffer, (error) => {
|
|
117
|
-
if (error) {
|
|
118
|
-
const err = new ConnectionError(error.message);
|
|
119
|
-
err.stack += `\n${stack}`;
|
|
120
|
-
return reject(err);
|
|
121
|
-
}
|
|
122
|
-
resolve();
|
|
123
|
-
});
|
|
124
|
-
});
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
private handleData(buffer: Buffer) {
|
|
128
|
-
this.chunks.push(buffer);
|
|
129
|
-
|
|
130
|
-
const decoder = new Decoder(Buffer.concat(this.chunks));
|
|
131
|
-
if (decoder.getBufferLength() < 4) {
|
|
132
|
-
return;
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
const size = decoder.readInt32();
|
|
136
|
-
if (size !== decoder.getBufferLength() - 4) {
|
|
137
|
-
return;
|
|
138
|
-
}
|
|
139
|
-
|
|
140
|
-
const correlationId = decoder.readInt32();
|
|
141
|
-
|
|
142
|
-
const context = this.queue[correlationId];
|
|
143
|
-
if (context) {
|
|
144
|
-
delete this.queue[correlationId];
|
|
145
|
-
context.resolve({ responseDecoder: decoder, responseSize: size });
|
|
146
|
-
} else {
|
|
147
|
-
log.debug('Could not find pending request for correlationId', { correlationId });
|
|
148
|
-
}
|
|
149
|
-
this.chunks = [];
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
private nextCorrelationId() {
|
|
153
|
-
return this.lastCorrelationId++;
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
export type SendRequest = typeof Connection.prototype.sendRequest;
|
|
@@ -1,229 +0,0 @@
|
|
|
1
|
-
import EventEmitter from 'events';
|
|
2
|
-
import { API, API_ERROR } from '../api';
|
|
3
|
-
import { KEY_TYPE } from '../api/find-coordinator';
|
|
4
|
-
import { Assignment, MemberAssignment } from '../api/sync-group';
|
|
5
|
-
import { Cluster } from '../cluster';
|
|
6
|
-
import { KafkaTSApiError, KafkaTSError } from '../utils/error';
|
|
7
|
-
import { createTracer } from '../utils/tracer';
|
|
8
|
-
import { ConsumerMetadata } from './consumer-metadata';
|
|
9
|
-
import { OffsetManager } from './offset-manager';
|
|
10
|
-
|
|
11
|
-
const trace = createTracer('ConsumerGroup');
|
|
12
|
-
|
|
13
|
-
type ConsumerGroupOptions = {
|
|
14
|
-
cluster: Cluster;
|
|
15
|
-
topics: string[];
|
|
16
|
-
groupId: string;
|
|
17
|
-
groupInstanceId: string | null;
|
|
18
|
-
sessionTimeoutMs: number;
|
|
19
|
-
rebalanceTimeoutMs: number;
|
|
20
|
-
metadata: ConsumerMetadata;
|
|
21
|
-
offsetManager: OffsetManager;
|
|
22
|
-
};
|
|
23
|
-
|
|
24
|
-
export class ConsumerGroup extends EventEmitter<{ offsetCommit: [] }> {
|
|
25
|
-
private coordinatorId = -1;
|
|
26
|
-
private memberId = '';
|
|
27
|
-
private generationId = -1;
|
|
28
|
-
private leaderId = '';
|
|
29
|
-
private memberIds: string[] = [];
|
|
30
|
-
private heartbeatInterval: NodeJS.Timeout | null = null;
|
|
31
|
-
private heartbeatError: KafkaTSError | null = null;
|
|
32
|
-
|
|
33
|
-
constructor(private options: ConsumerGroupOptions) {
|
|
34
|
-
super();
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
@trace()
|
|
38
|
-
public async join() {
|
|
39
|
-
await this.findCoordinator();
|
|
40
|
-
await this.options.cluster.setSeedBroker(this.coordinatorId);
|
|
41
|
-
|
|
42
|
-
this.memberId = '';
|
|
43
|
-
await this.joinGroup();
|
|
44
|
-
await this.syncGroup();
|
|
45
|
-
await this.offsetFetch();
|
|
46
|
-
this.startHeartbeater();
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
private async startHeartbeater() {
|
|
50
|
-
this.heartbeatInterval = setInterval(async () => {
|
|
51
|
-
try {
|
|
52
|
-
await this.heartbeat();
|
|
53
|
-
} catch (error) {
|
|
54
|
-
this.heartbeatError = error as KafkaTSError;
|
|
55
|
-
}
|
|
56
|
-
}, 5000);
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
private async stopHeartbeater() {
|
|
60
|
-
if (this.heartbeatInterval) {
|
|
61
|
-
clearInterval(this.heartbeatInterval);
|
|
62
|
-
this.heartbeatInterval = null;
|
|
63
|
-
}
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
public handleLastHeartbeat() {
|
|
67
|
-
if (this.heartbeatError) {
|
|
68
|
-
throw this.heartbeatError;
|
|
69
|
-
}
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
public resetHeartbeat() {
|
|
73
|
-
this.heartbeatError = null;
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
private async findCoordinator() {
|
|
77
|
-
const { coordinators } = await this.options.cluster.sendRequest(API.FIND_COORDINATOR, {
|
|
78
|
-
keyType: KEY_TYPE.GROUP,
|
|
79
|
-
keys: [this.options.groupId],
|
|
80
|
-
});
|
|
81
|
-
this.coordinatorId = coordinators[0].nodeId;
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
private async joinGroup(): Promise<void> {
|
|
85
|
-
const { cluster, groupId, groupInstanceId, sessionTimeoutMs, rebalanceTimeoutMs, topics } = this.options;
|
|
86
|
-
try {
|
|
87
|
-
const response = await cluster.sendRequest(API.JOIN_GROUP, {
|
|
88
|
-
groupId,
|
|
89
|
-
groupInstanceId,
|
|
90
|
-
memberId: this.memberId,
|
|
91
|
-
sessionTimeoutMs,
|
|
92
|
-
rebalanceTimeoutMs,
|
|
93
|
-
protocolType: 'consumer',
|
|
94
|
-
protocols: [{ name: 'RoundRobinAssigner', metadata: { version: 0, topics } }],
|
|
95
|
-
reason: null,
|
|
96
|
-
});
|
|
97
|
-
this.memberId = response.memberId;
|
|
98
|
-
this.generationId = response.generationId;
|
|
99
|
-
this.leaderId = response.leader;
|
|
100
|
-
this.memberIds = response.members.map((member) => member.memberId);
|
|
101
|
-
} catch (error) {
|
|
102
|
-
if ((error as KafkaTSApiError).errorCode === API_ERROR.MEMBER_ID_REQUIRED) {
|
|
103
|
-
this.memberId = (error as KafkaTSApiError).response.memberId;
|
|
104
|
-
return this.joinGroup();
|
|
105
|
-
}
|
|
106
|
-
throw error;
|
|
107
|
-
}
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
private async syncGroup() {
|
|
111
|
-
const { cluster, metadata, groupId, groupInstanceId } = this.options;
|
|
112
|
-
|
|
113
|
-
let assignments: MemberAssignment[] = [];
|
|
114
|
-
if (this.memberId === this.leaderId) {
|
|
115
|
-
const memberAssignments = Object.entries(metadata.getTopicPartitions())
|
|
116
|
-
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
117
|
-
.reduce(
|
|
118
|
-
(acc, { topic, partition }, index) => {
|
|
119
|
-
const memberId = this.memberIds[index % this.memberIds.length];
|
|
120
|
-
acc[memberId] ??= {};
|
|
121
|
-
acc[memberId][topic] ??= [];
|
|
122
|
-
acc[memberId][topic].push(partition);
|
|
123
|
-
return acc;
|
|
124
|
-
},
|
|
125
|
-
{} as Record<string, Record<string, number[]>>,
|
|
126
|
-
);
|
|
127
|
-
assignments = Object.entries(memberAssignments).map(([memberId, assignment]) => ({ memberId, assignment }));
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
const response = await cluster.sendRequest(API.SYNC_GROUP, {
|
|
131
|
-
groupId,
|
|
132
|
-
groupInstanceId,
|
|
133
|
-
memberId: this.memberId,
|
|
134
|
-
generationId: this.generationId,
|
|
135
|
-
protocolType: 'consumer',
|
|
136
|
-
protocolName: 'RoundRobinAssigner',
|
|
137
|
-
assignments,
|
|
138
|
-
});
|
|
139
|
-
metadata.setAssignment(JSON.parse(response.assignments || '{}') as Assignment);
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
private async offsetFetch() {
|
|
143
|
-
const { cluster, groupId, topics, metadata, offsetManager } = this.options;
|
|
144
|
-
|
|
145
|
-
const assignment = metadata.getAssignment();
|
|
146
|
-
const request = {
|
|
147
|
-
groups: [
|
|
148
|
-
{
|
|
149
|
-
groupId,
|
|
150
|
-
topics: topics
|
|
151
|
-
.map((topic) => ({ name: topic, partitionIndexes: assignment[topic] ?? [] }))
|
|
152
|
-
.filter(({ partitionIndexes }) => partitionIndexes.length),
|
|
153
|
-
},
|
|
154
|
-
].filter(({ topics }) => topics.length),
|
|
155
|
-
requireStable: true,
|
|
156
|
-
};
|
|
157
|
-
if (!request.groups.length) return;
|
|
158
|
-
|
|
159
|
-
const response = await cluster.sendRequest(API.OFFSET_FETCH, request);
|
|
160
|
-
|
|
161
|
-
const topicPartitions: Record<string, Set<number>> = {};
|
|
162
|
-
response.groups.forEach((group) => {
|
|
163
|
-
group.topics.forEach((topic) => {
|
|
164
|
-
topicPartitions[topic.name] ??= new Set();
|
|
165
|
-
topic.partitions.forEach(({ partitionIndex, committedOffset }) => {
|
|
166
|
-
if (committedOffset >= 0) {
|
|
167
|
-
topicPartitions[topic.name].add(partitionIndex);
|
|
168
|
-
offsetManager.resolve(topic.name, partitionIndex, committedOffset);
|
|
169
|
-
}
|
|
170
|
-
});
|
|
171
|
-
});
|
|
172
|
-
});
|
|
173
|
-
offsetManager.flush(topicPartitions);
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
public async offsetCommit(topicPartitions: Record<string, Set<number>>) {
|
|
177
|
-
const { cluster, groupId, groupInstanceId, offsetManager } = this.options;
|
|
178
|
-
const request = {
|
|
179
|
-
groupId,
|
|
180
|
-
groupInstanceId,
|
|
181
|
-
memberId: this.memberId,
|
|
182
|
-
generationIdOrMemberEpoch: this.generationId,
|
|
183
|
-
topics: Object.entries(topicPartitions).map(([topic, partitions]) => ({
|
|
184
|
-
name: topic,
|
|
185
|
-
partitions: [...partitions].map((partitionIndex) => ({
|
|
186
|
-
partitionIndex,
|
|
187
|
-
committedOffset: offsetManager.pendingOffsets[topic][partitionIndex],
|
|
188
|
-
committedLeaderEpoch: -1,
|
|
189
|
-
committedMetadata: null,
|
|
190
|
-
})),
|
|
191
|
-
})),
|
|
192
|
-
};
|
|
193
|
-
if (!request.topics.length) {
|
|
194
|
-
return;
|
|
195
|
-
}
|
|
196
|
-
await cluster.sendRequest(API.OFFSET_COMMIT, request);
|
|
197
|
-
this.emit('offsetCommit');
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
public async heartbeat() {
|
|
201
|
-
const { cluster, groupId, groupInstanceId } = this.options;
|
|
202
|
-
await cluster.sendRequest(API.HEARTBEAT, {
|
|
203
|
-
groupId,
|
|
204
|
-
groupInstanceId,
|
|
205
|
-
memberId: this.memberId,
|
|
206
|
-
generationId: this.generationId,
|
|
207
|
-
});
|
|
208
|
-
}
|
|
209
|
-
|
|
210
|
-
public async leaveGroup() {
|
|
211
|
-
if (this.coordinatorId === -1) {
|
|
212
|
-
return;
|
|
213
|
-
}
|
|
214
|
-
|
|
215
|
-
const { cluster, groupId, groupInstanceId } = this.options;
|
|
216
|
-
this.stopHeartbeater();
|
|
217
|
-
try {
|
|
218
|
-
await cluster.sendRequest(API.LEAVE_GROUP, {
|
|
219
|
-
groupId,
|
|
220
|
-
members: [{ memberId: this.memberId, groupInstanceId, reason: null }],
|
|
221
|
-
});
|
|
222
|
-
} catch (error) {
|
|
223
|
-
if ((error as KafkaTSApiError).errorCode === API_ERROR.FENCED_INSTANCE_ID) {
|
|
224
|
-
return;
|
|
225
|
-
}
|
|
226
|
-
throw error;
|
|
227
|
-
}
|
|
228
|
-
}
|
|
229
|
-
}
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
import { Assignment } from '../api/sync-group';
|
|
2
|
-
import { Metadata } from '../metadata';
|
|
3
|
-
|
|
4
|
-
export class ConsumerMetadata extends Metadata {
|
|
5
|
-
private assignment: Assignment = {};
|
|
6
|
-
|
|
7
|
-
public getAssignment() {
|
|
8
|
-
return this.assignment;
|
|
9
|
-
}
|
|
10
|
-
|
|
11
|
-
public setAssignment(newAssignment: Assignment) {
|
|
12
|
-
this.assignment = newAssignment;
|
|
13
|
-
}
|
|
14
|
-
}
|
package/src/consumer/consumer.ts
DELETED
|
@@ -1,252 +0,0 @@
|
|
|
1
|
-
import EventEmitter from 'events';
|
|
2
|
-
import { API, API_ERROR } from '../api';
|
|
3
|
-
import { IsolationLevel } from '../api/fetch';
|
|
4
|
-
import { Assignment } from '../api/sync-group';
|
|
5
|
-
import { Cluster } from '../cluster';
|
|
6
|
-
import { distributeMessagesToTopicPartitionLeaders } from '../distributors/messages-to-topic-partition-leaders';
|
|
7
|
-
import { Message } from '../types';
|
|
8
|
-
import { delay } from '../utils/delay';
|
|
9
|
-
import { ConnectionError, KafkaTSApiError } from '../utils/error';
|
|
10
|
-
import { log } from '../utils/logger';
|
|
11
|
-
import { createTracer } from '../utils/tracer';
|
|
12
|
-
import { ConsumerGroup } from './consumer-group';
|
|
13
|
-
import { ConsumerMetadata } from './consumer-metadata';
|
|
14
|
-
import { BatchGranularity, FetchManager } from './fetch-manager';
|
|
15
|
-
import { OffsetManager } from './offset-manager';
|
|
16
|
-
|
|
17
|
-
const trace = createTracer('Consumer');
|
|
18
|
-
|
|
19
|
-
export type ConsumerOptions = {
|
|
20
|
-
topics: string[];
|
|
21
|
-
groupId?: string | null;
|
|
22
|
-
groupInstanceId?: string | null;
|
|
23
|
-
rackId?: string;
|
|
24
|
-
isolationLevel?: IsolationLevel;
|
|
25
|
-
sessionTimeoutMs?: number;
|
|
26
|
-
rebalanceTimeoutMs?: number;
|
|
27
|
-
maxWaitMs?: number;
|
|
28
|
-
minBytes?: number;
|
|
29
|
-
maxBytes?: number;
|
|
30
|
-
partitionMaxBytes?: number;
|
|
31
|
-
allowTopicAutoCreation?: boolean;
|
|
32
|
-
fromBeginning?: boolean;
|
|
33
|
-
batchGranularity?: BatchGranularity;
|
|
34
|
-
concurrency?: number;
|
|
35
|
-
} & ({ onBatch: (messages: Required<Message>[]) => unknown } | { onMessage: (message: Required<Message>) => unknown });
|
|
36
|
-
|
|
37
|
-
export class Consumer extends EventEmitter<{ offsetCommit: [] }> {
|
|
38
|
-
private options: Required<ConsumerOptions>;
|
|
39
|
-
private metadata: ConsumerMetadata;
|
|
40
|
-
private consumerGroup: ConsumerGroup | undefined;
|
|
41
|
-
private offsetManager: OffsetManager;
|
|
42
|
-
private fetchManager?: FetchManager;
|
|
43
|
-
private stopHook: (() => void) | undefined;
|
|
44
|
-
|
|
45
|
-
constructor(
|
|
46
|
-
private cluster: Cluster,
|
|
47
|
-
options: ConsumerOptions,
|
|
48
|
-
) {
|
|
49
|
-
super();
|
|
50
|
-
|
|
51
|
-
this.options = {
|
|
52
|
-
...options,
|
|
53
|
-
groupId: options.groupId ?? null,
|
|
54
|
-
groupInstanceId: options.groupInstanceId ?? null,
|
|
55
|
-
rackId: options.rackId ?? '',
|
|
56
|
-
sessionTimeoutMs: options.sessionTimeoutMs ?? 30_000,
|
|
57
|
-
rebalanceTimeoutMs: options.rebalanceTimeoutMs ?? 60_000,
|
|
58
|
-
maxWaitMs: options.maxWaitMs ?? 5000,
|
|
59
|
-
minBytes: options.minBytes ?? 1,
|
|
60
|
-
maxBytes: options.maxBytes ?? 1_048_576,
|
|
61
|
-
partitionMaxBytes: options.partitionMaxBytes ?? 1_048_576,
|
|
62
|
-
isolationLevel: options.isolationLevel ?? IsolationLevel.READ_UNCOMMITTED,
|
|
63
|
-
allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
|
|
64
|
-
fromBeginning: options.fromBeginning ?? false,
|
|
65
|
-
batchGranularity: options.batchGranularity ?? 'partition',
|
|
66
|
-
concurrency: options.concurrency ?? 1,
|
|
67
|
-
};
|
|
68
|
-
|
|
69
|
-
this.metadata = new ConsumerMetadata({ cluster: this.cluster });
|
|
70
|
-
this.offsetManager = new OffsetManager({
|
|
71
|
-
cluster: this.cluster,
|
|
72
|
-
metadata: this.metadata,
|
|
73
|
-
isolationLevel: this.options.isolationLevel,
|
|
74
|
-
});
|
|
75
|
-
this.consumerGroup = this.options.groupId
|
|
76
|
-
? new ConsumerGroup({
|
|
77
|
-
cluster: this.cluster,
|
|
78
|
-
topics: this.options.topics,
|
|
79
|
-
groupId: this.options.groupId,
|
|
80
|
-
groupInstanceId: this.options.groupInstanceId,
|
|
81
|
-
sessionTimeoutMs: this.options.sessionTimeoutMs,
|
|
82
|
-
rebalanceTimeoutMs: this.options.rebalanceTimeoutMs,
|
|
83
|
-
metadata: this.metadata,
|
|
84
|
-
offsetManager: this.offsetManager,
|
|
85
|
-
})
|
|
86
|
-
: undefined;
|
|
87
|
-
this.consumerGroup?.on('offsetCommit', () => this.emit('offsetCommit'));
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
@trace()
|
|
91
|
-
public async start(): Promise<void> {
|
|
92
|
-
const { topics, allowTopicAutoCreation, fromBeginning } = this.options;
|
|
93
|
-
|
|
94
|
-
this.stopHook = undefined;
|
|
95
|
-
|
|
96
|
-
try {
|
|
97
|
-
await this.cluster.connect();
|
|
98
|
-
await this.metadata.fetchMetadataIfNecessary({ topics, allowTopicAutoCreation });
|
|
99
|
-
this.metadata.setAssignment(this.metadata.getTopicPartitions());
|
|
100
|
-
await this.offsetManager.fetchOffsets({ fromBeginning });
|
|
101
|
-
await this.consumerGroup?.join();
|
|
102
|
-
} catch (error) {
|
|
103
|
-
log.warn('Failed to start consumer', error);
|
|
104
|
-
log.debug(`Restarting consumer in 1 second...`);
|
|
105
|
-
await delay(1000);
|
|
106
|
-
|
|
107
|
-
if (this.stopHook) return (this.stopHook as () => void)();
|
|
108
|
-
return this.close(true).then(() => this.start());
|
|
109
|
-
}
|
|
110
|
-
this.startFetchManager();
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
@trace()
|
|
114
|
-
public async close(force = false): Promise<void> {
|
|
115
|
-
if (!force) {
|
|
116
|
-
await new Promise<void>(async (resolve) => {
|
|
117
|
-
this.stopHook = resolve;
|
|
118
|
-
await this.fetchManager?.stop();
|
|
119
|
-
});
|
|
120
|
-
}
|
|
121
|
-
await this.consumerGroup?.leaveGroup().catch((error) => log.debug(`Failed to leave group: ${error.message}`));
|
|
122
|
-
await this.cluster.disconnect().catch((error) => log.debug(`Failed to disconnect: ${error.message}`));
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
private async startFetchManager() {
|
|
126
|
-
const { batchGranularity, concurrency } = this.options;
|
|
127
|
-
|
|
128
|
-
while (!this.stopHook) {
|
|
129
|
-
this.consumerGroup?.resetHeartbeat();
|
|
130
|
-
|
|
131
|
-
// TODO: If leader is not available, find another read replica
|
|
132
|
-
const nodeAssignments = Object.entries(
|
|
133
|
-
distributeMessagesToTopicPartitionLeaders(
|
|
134
|
-
Object.entries(this.metadata.getAssignment()).flatMap(([topic, partitions]) =>
|
|
135
|
-
partitions.map((partition) => ({ topic, partition })),
|
|
136
|
-
),
|
|
137
|
-
this.metadata.getTopicPartitionLeaderIds(),
|
|
138
|
-
),
|
|
139
|
-
).map(([nodeId, assignment]) => ({
|
|
140
|
-
nodeId: parseInt(nodeId),
|
|
141
|
-
assignment: Object.fromEntries(
|
|
142
|
-
Object.entries(assignment).map(([topic, partitions]) => [
|
|
143
|
-
topic,
|
|
144
|
-
Object.keys(partitions).map(Number),
|
|
145
|
-
]),
|
|
146
|
-
),
|
|
147
|
-
}));
|
|
148
|
-
|
|
149
|
-
const numPartitions = Object.values(this.metadata.getAssignment()).flat().length;
|
|
150
|
-
const numProcessors = Math.min(concurrency, numPartitions);
|
|
151
|
-
|
|
152
|
-
this.fetchManager = new FetchManager({
|
|
153
|
-
fetch: this.fetch.bind(this),
|
|
154
|
-
process: this.process.bind(this),
|
|
155
|
-
metadata: this.metadata,
|
|
156
|
-
consumerGroup: this.consumerGroup,
|
|
157
|
-
nodeAssignments,
|
|
158
|
-
batchGranularity,
|
|
159
|
-
concurrency: numProcessors,
|
|
160
|
-
});
|
|
161
|
-
|
|
162
|
-
try {
|
|
163
|
-
await this.fetchManager.start();
|
|
164
|
-
|
|
165
|
-
if (!nodeAssignments.length) {
|
|
166
|
-
log.debug('No partitions assigned. Waiting for reassignment...');
|
|
167
|
-
await delay(this.options.maxWaitMs);
|
|
168
|
-
this.consumerGroup?.handleLastHeartbeat();
|
|
169
|
-
}
|
|
170
|
-
} catch (error) {
|
|
171
|
-
await this.fetchManager.stop();
|
|
172
|
-
|
|
173
|
-
if ((error as KafkaTSApiError).errorCode === API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
174
|
-
log.debug('Rebalance in progress...');
|
|
175
|
-
continue;
|
|
176
|
-
}
|
|
177
|
-
if ((error as KafkaTSApiError).errorCode === API_ERROR.FENCED_INSTANCE_ID) {
|
|
178
|
-
log.debug('New consumer with the same groupInstanceId joined. Exiting the consumer...');
|
|
179
|
-
this.close();
|
|
180
|
-
break;
|
|
181
|
-
}
|
|
182
|
-
if (
|
|
183
|
-
error instanceof ConnectionError ||
|
|
184
|
-
(error instanceof KafkaTSApiError && error.errorCode === API_ERROR.NOT_COORDINATOR)
|
|
185
|
-
) {
|
|
186
|
-
log.debug(`${error.message}. Restarting consumer...`);
|
|
187
|
-
this.close().then(() => this.start());
|
|
188
|
-
break;
|
|
189
|
-
}
|
|
190
|
-
log.error((error as Error).message, error);
|
|
191
|
-
this.close();
|
|
192
|
-
break;
|
|
193
|
-
}
|
|
194
|
-
}
|
|
195
|
-
this.stopHook?.();
|
|
196
|
-
}
|
|
197
|
-
|
|
198
|
-
@trace((messages) => ({ count: messages.length }))
|
|
199
|
-
private async process(messages: Required<Message>[]) {
|
|
200
|
-
const { options } = this;
|
|
201
|
-
|
|
202
|
-
const topicPartitions: Record<string, Set<number>> = {};
|
|
203
|
-
for (const { topic, partition } of messages) {
|
|
204
|
-
topicPartitions[topic] ??= new Set();
|
|
205
|
-
topicPartitions[topic].add(partition);
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
if ('onBatch' in options) {
|
|
209
|
-
await options.onBatch(messages);
|
|
210
|
-
|
|
211
|
-
messages.forEach(({ topic, partition, offset }) =>
|
|
212
|
-
this.offsetManager.resolve(topic, partition, offset + 1n),
|
|
213
|
-
);
|
|
214
|
-
} else if ('onMessage' in options) {
|
|
215
|
-
for (const message of messages) {
|
|
216
|
-
await options.onMessage(message);
|
|
217
|
-
|
|
218
|
-
const { topic, partition, offset } = message;
|
|
219
|
-
this.offsetManager.resolve(topic, partition, offset + 1n);
|
|
220
|
-
}
|
|
221
|
-
}
|
|
222
|
-
|
|
223
|
-
await this.consumerGroup?.offsetCommit(topicPartitions);
|
|
224
|
-
this.offsetManager.flush(topicPartitions);
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
private fetch(nodeId: number, assignment: Assignment) {
|
|
228
|
-
const { rackId, maxWaitMs, minBytes, maxBytes, partitionMaxBytes, isolationLevel } = this.options;
|
|
229
|
-
|
|
230
|
-
return this.cluster.sendRequestToNode(nodeId)(API.FETCH, {
|
|
231
|
-
maxWaitMs,
|
|
232
|
-
minBytes,
|
|
233
|
-
maxBytes,
|
|
234
|
-
isolationLevel,
|
|
235
|
-
sessionId: 0,
|
|
236
|
-
sessionEpoch: -1,
|
|
237
|
-
topics: Object.entries(assignment).map(([topic, partitions]) => ({
|
|
238
|
-
topicId: this.metadata.getTopicIdByName(topic),
|
|
239
|
-
partitions: partitions.map((partition) => ({
|
|
240
|
-
partition,
|
|
241
|
-
currentLeaderEpoch: -1,
|
|
242
|
-
fetchOffset: this.offsetManager.getCurrentOffset(topic, partition),
|
|
243
|
-
lastFetchedEpoch: -1,
|
|
244
|
-
logStartOffset: 0n,
|
|
245
|
-
partitionMaxBytes,
|
|
246
|
-
})),
|
|
247
|
-
})),
|
|
248
|
-
forgottenTopicsData: [],
|
|
249
|
-
rackId,
|
|
250
|
-
});
|
|
251
|
-
}
|
|
252
|
-
}
|