kafka-ts 0.0.3 → 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +36 -1
- package/dist/client.d.ts +1 -2
- package/dist/consumer/consumer.d.ts +2 -0
- package/dist/consumer/consumer.js +18 -7
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/utils/retrier.d.ts +3 -4
- package/dist/utils/retrier.js +19 -14
- package/package.json +1 -1
- package/.prettierrc +0 -8
- package/src/__snapshots__/cluster.test.ts.snap +0 -1281
- package/src/api/api-versions.ts +0 -21
- package/src/api/create-topics.ts +0 -78
- package/src/api/delete-topics.ts +0 -42
- package/src/api/fetch.ts +0 -198
- package/src/api/find-coordinator.ts +0 -39
- package/src/api/heartbeat.ts +0 -33
- package/src/api/index.ts +0 -166
- package/src/api/init-producer-id.ts +0 -35
- package/src/api/join-group.ts +0 -67
- package/src/api/leave-group.ts +0 -48
- package/src/api/list-offsets.ts +0 -65
- package/src/api/metadata.ts +0 -66
- package/src/api/offset-commit.ts +0 -67
- package/src/api/offset-fetch.ts +0 -70
- package/src/api/produce.ts +0 -170
- package/src/api/sasl-authenticate.ts +0 -21
- package/src/api/sasl-handshake.ts +0 -16
- package/src/api/sync-group.ts +0 -54
- package/src/auth/index.ts +0 -2
- package/src/auth/plain.ts +0 -10
- package/src/auth/scram.ts +0 -52
- package/src/broker.ts +0 -72
- package/src/client.ts +0 -47
- package/src/cluster.test.ts +0 -371
- package/src/cluster.ts +0 -85
- package/src/codecs/gzip.ts +0 -9
- package/src/codecs/index.ts +0 -16
- package/src/codecs/none.ts +0 -6
- package/src/codecs/types.ts +0 -4
- package/src/connection.ts +0 -157
- package/src/consumer/consumer-group.ts +0 -229
- package/src/consumer/consumer-metadata.ts +0 -14
- package/src/consumer/consumer.ts +0 -252
- package/src/consumer/fetch-manager.ts +0 -169
- package/src/consumer/fetcher.ts +0 -64
- package/src/consumer/offset-manager.ts +0 -104
- package/src/consumer/processor.ts +0 -53
- package/src/distributors/assignments-to-replicas.test.ts +0 -43
- package/src/distributors/assignments-to-replicas.ts +0 -83
- package/src/distributors/messages-to-topic-partition-leaders.test.ts +0 -32
- package/src/distributors/messages-to-topic-partition-leaders.ts +0 -19
- package/src/distributors/partitioner.ts +0 -27
- package/src/index.ts +0 -9
- package/src/metadata.ts +0 -126
- package/src/producer/producer.ts +0 -142
- package/src/types.ts +0 -11
- package/src/utils/api.ts +0 -11
- package/src/utils/crypto.ts +0 -15
- package/src/utils/decoder.ts +0 -174
- package/src/utils/delay.ts +0 -1
- package/src/utils/encoder.ts +0 -148
- package/src/utils/error.ts +0 -21
- package/src/utils/logger.ts +0 -37
- package/src/utils/memo.ts +0 -12
- package/src/utils/murmur2.ts +0 -44
- package/src/utils/retrier.ts +0 -39
- package/src/utils/tracer.ts +0 -49
- package/tsconfig.json +0 -17
package/src/broker.ts
DELETED
|
@@ -1,72 +0,0 @@
|
|
|
1
|
-
import { TcpSocketConnectOpts } from 'net';
|
|
2
|
-
import { TLSSocketOptions } from 'tls';
|
|
3
|
-
import { API } from './api';
|
|
4
|
-
import { Connection, SendRequest } from './connection';
|
|
5
|
-
import { KafkaTSError } from './utils/error';
|
|
6
|
-
import { memo } from './utils/memo';
|
|
7
|
-
|
|
8
|
-
export type SASLProvider = {
|
|
9
|
-
mechanism: string;
|
|
10
|
-
authenticate: (context: { sendRequest: SendRequest }) => Promise<void>;
|
|
11
|
-
};
|
|
12
|
-
|
|
13
|
-
type BrokerOptions = {
|
|
14
|
-
clientId: string | null;
|
|
15
|
-
options: TcpSocketConnectOpts;
|
|
16
|
-
sasl: SASLProvider | null;
|
|
17
|
-
ssl: TLSSocketOptions | null;
|
|
18
|
-
};
|
|
19
|
-
|
|
20
|
-
export class Broker {
|
|
21
|
-
private connection: Connection;
|
|
22
|
-
public sendRequest: SendRequest;
|
|
23
|
-
|
|
24
|
-
constructor(private options: BrokerOptions) {
|
|
25
|
-
this.connection = new Connection({
|
|
26
|
-
clientId: this.options.clientId,
|
|
27
|
-
connection: this.options.options,
|
|
28
|
-
ssl: this.options.ssl,
|
|
29
|
-
});
|
|
30
|
-
this.sendRequest = this.connection.sendRequest.bind(this.connection);
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
public async connect() {
|
|
34
|
-
await this.connection.connect();
|
|
35
|
-
await this.validateApiVersions();
|
|
36
|
-
await this.saslHandshake();
|
|
37
|
-
await this.saslAuthenticate();
|
|
38
|
-
return this;
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
public ensureConnected = memo(() => this.connect());
|
|
42
|
-
|
|
43
|
-
public async disconnect() {
|
|
44
|
-
await this.connection.disconnect();
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
private async validateApiVersions() {
|
|
48
|
-
const { versions } = await this.sendRequest(API.API_VERSIONS, {});
|
|
49
|
-
|
|
50
|
-
const apiByKey = Object.fromEntries(Object.values(API).map((api) => [api.apiKey, api]));
|
|
51
|
-
versions.forEach(({ apiKey, minVersion, maxVersion }) => {
|
|
52
|
-
if (!apiByKey[apiKey]) {
|
|
53
|
-
return;
|
|
54
|
-
}
|
|
55
|
-
const { apiVersion } = apiByKey[apiKey];
|
|
56
|
-
if (apiVersion < minVersion || apiVersion > maxVersion) {
|
|
57
|
-
throw new KafkaTSError(`API ${apiKey} version ${apiVersion} is not supported by the broker (minVersion=${minVersion}, maxVersion=${maxVersion})`);
|
|
58
|
-
}
|
|
59
|
-
});
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
private async saslHandshake() {
|
|
63
|
-
if (!this.options.sasl) {
|
|
64
|
-
return;
|
|
65
|
-
}
|
|
66
|
-
await this.sendRequest(API.SASL_HANDSHAKE, { mechanism: this.options.sasl.mechanism });
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
private async saslAuthenticate() {
|
|
70
|
-
await this.options.sasl?.authenticate({ sendRequest: this.sendRequest });
|
|
71
|
-
}
|
|
72
|
-
}
|
package/src/client.ts
DELETED
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
import { TcpSocketConnectOpts } from 'net';
|
|
2
|
-
import { TLSSocketOptions } from 'tls';
|
|
3
|
-
import { SASLProvider } from './broker';
|
|
4
|
-
import { Cluster } from './cluster';
|
|
5
|
-
import { Consumer, ConsumerOptions } from './consumer/consumer';
|
|
6
|
-
import { Producer, ProducerOptions } from './producer/producer';
|
|
7
|
-
|
|
8
|
-
type ClientOptions = {
|
|
9
|
-
clientId?: string | null;
|
|
10
|
-
bootstrapServers: TcpSocketConnectOpts[];
|
|
11
|
-
sasl?: SASLProvider | null;
|
|
12
|
-
ssl?: TLSSocketOptions | null;
|
|
13
|
-
};
|
|
14
|
-
|
|
15
|
-
export class Client {
|
|
16
|
-
private options: Required<ClientOptions>;
|
|
17
|
-
|
|
18
|
-
constructor(options: ClientOptions) {
|
|
19
|
-
this.options = {
|
|
20
|
-
...options,
|
|
21
|
-
clientId: options.clientId ?? null,
|
|
22
|
-
sasl: options.sasl ?? null,
|
|
23
|
-
ssl: options.ssl ?? null,
|
|
24
|
-
};
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
public async startConsumer(options: ConsumerOptions) {
|
|
28
|
-
const consumer = new Consumer(this.createCluster(), options);
|
|
29
|
-
await consumer.start();
|
|
30
|
-
return consumer;
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
public createProducer(options: ProducerOptions) {
|
|
34
|
-
return new Producer(this.createCluster(), options);
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
public createCluster() {
|
|
38
|
-
return new Cluster({
|
|
39
|
-
clientId: this.options.clientId,
|
|
40
|
-
bootstrapServers: this.options.bootstrapServers,
|
|
41
|
-
sasl: this.options.sasl,
|
|
42
|
-
ssl: this.options.ssl,
|
|
43
|
-
});
|
|
44
|
-
}
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
export const createKafkaClient = (options: ClientOptions) => new Client(options);
|
package/src/cluster.test.ts
DELETED
|
@@ -1,371 +0,0 @@
|
|
|
1
|
-
import { randomBytes } from 'crypto';
|
|
2
|
-
import { readFileSync } from 'fs';
|
|
3
|
-
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
|
|
4
|
-
import { API } from './api';
|
|
5
|
-
import { KEY_TYPE } from './api/find-coordinator';
|
|
6
|
-
import { saslPlain } from './auth';
|
|
7
|
-
import { createKafkaClient } from './client';
|
|
8
|
-
import { Cluster } from './cluster';
|
|
9
|
-
import { KafkaTSApiError } from './utils/error';
|
|
10
|
-
|
|
11
|
-
const kafka = createKafkaClient({
|
|
12
|
-
clientId: 'kafka-ts',
|
|
13
|
-
bootstrapServers: [{ host: 'localhost', port: 9092 }],
|
|
14
|
-
sasl: saslPlain({ username: 'admin', password: 'admin' }),
|
|
15
|
-
ssl: { ca: readFileSync('./certs/ca.crt').toString() },
|
|
16
|
-
});
|
|
17
|
-
|
|
18
|
-
describe.sequential('Low-level API', () => {
|
|
19
|
-
const groupId = randomBytes(16).toString('hex');
|
|
20
|
-
|
|
21
|
-
let cluster: Cluster;
|
|
22
|
-
|
|
23
|
-
beforeAll(async () => {
|
|
24
|
-
cluster = await kafka.createCluster();
|
|
25
|
-
await cluster.connect();
|
|
26
|
-
|
|
27
|
-
const metadataResult = await cluster.sendRequest(API.METADATA, {
|
|
28
|
-
topics: null,
|
|
29
|
-
allowTopicAutoCreation: false,
|
|
30
|
-
includeTopicAuthorizedOperations: false,
|
|
31
|
-
});
|
|
32
|
-
if (metadataResult.topics.some((topic) => topic.name === 'kafka-ts-test-topic')) {
|
|
33
|
-
await cluster.sendRequest(API.DELETE_TOPICS, {
|
|
34
|
-
topics: [{ name: 'kafka-ts-test-topic', topicId: null }],
|
|
35
|
-
timeoutMs: 10000,
|
|
36
|
-
});
|
|
37
|
-
}
|
|
38
|
-
});
|
|
39
|
-
|
|
40
|
-
afterAll(async () => {
|
|
41
|
-
await cluster.disconnect();
|
|
42
|
-
});
|
|
43
|
-
|
|
44
|
-
it('should request api versions', async () => {
|
|
45
|
-
const result = await cluster.sendRequest(API.API_VERSIONS, {});
|
|
46
|
-
expect(result).toMatchSnapshot();
|
|
47
|
-
});
|
|
48
|
-
|
|
49
|
-
let topicId: string = 'd6718d178e1b47c886441ad2d19faea5';
|
|
50
|
-
|
|
51
|
-
it('should create topics', async () => {
|
|
52
|
-
const result = await cluster.sendRequest(API.CREATE_TOPICS, {
|
|
53
|
-
topics: [
|
|
54
|
-
{
|
|
55
|
-
name: 'kafka-ts-test-topic',
|
|
56
|
-
numPartitions: 10,
|
|
57
|
-
replicationFactor: 3,
|
|
58
|
-
assignments: [],
|
|
59
|
-
configs: [],
|
|
60
|
-
},
|
|
61
|
-
],
|
|
62
|
-
timeoutMs: 10000,
|
|
63
|
-
validateOnly: false,
|
|
64
|
-
});
|
|
65
|
-
topicId = result.topics[0].topicId;
|
|
66
|
-
result.topics.forEach((topic) => {
|
|
67
|
-
topic.topicId = 'Any<UUID>';
|
|
68
|
-
});
|
|
69
|
-
expect(result).toMatchSnapshot();
|
|
70
|
-
|
|
71
|
-
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
72
|
-
});
|
|
73
|
-
|
|
74
|
-
it('should request metadata for all topics', async () => {
|
|
75
|
-
const result = await cluster.sendRequest(API.METADATA, {
|
|
76
|
-
topics: null,
|
|
77
|
-
allowTopicAutoCreation: false,
|
|
78
|
-
includeTopicAuthorizedOperations: false,
|
|
79
|
-
});
|
|
80
|
-
result.controllerId = 0;
|
|
81
|
-
result.topics = result.topics.filter((topic) => topic.name !== '__consumer_offsets');
|
|
82
|
-
result.topics.forEach((topic) => {
|
|
83
|
-
topic.topicId = 'Any<UUID>';
|
|
84
|
-
topic.partitions.forEach((partition) => {
|
|
85
|
-
partition.leaderId = 0;
|
|
86
|
-
partition.isrNodes = [0];
|
|
87
|
-
partition.replicaNodes = [0];
|
|
88
|
-
});
|
|
89
|
-
});
|
|
90
|
-
expect(result).toMatchSnapshot();
|
|
91
|
-
});
|
|
92
|
-
|
|
93
|
-
let partitionIndex = 0;
|
|
94
|
-
let leaderId = 0;
|
|
95
|
-
|
|
96
|
-
it('should request metadata for a topic', async () => {
|
|
97
|
-
const result = await cluster.sendRequest(API.METADATA, {
|
|
98
|
-
topics: [{ id: topicId, name: 'kafka-ts-test-topic' }],
|
|
99
|
-
allowTopicAutoCreation: false,
|
|
100
|
-
includeTopicAuthorizedOperations: false,
|
|
101
|
-
});
|
|
102
|
-
partitionIndex = result.topics[0].partitions[0].partitionIndex;
|
|
103
|
-
leaderId = result.topics[0].partitions[0].leaderId;
|
|
104
|
-
result.controllerId = 0;
|
|
105
|
-
result.topics.forEach((topic) => {
|
|
106
|
-
topic.topicId = 'Any<UUID>';
|
|
107
|
-
topic.partitions.forEach((partition) => {
|
|
108
|
-
partition.leaderId = 0;
|
|
109
|
-
partition.isrNodes = [0];
|
|
110
|
-
partition.replicaNodes = [0];
|
|
111
|
-
});
|
|
112
|
-
});
|
|
113
|
-
expect(result).toMatchSnapshot();
|
|
114
|
-
});
|
|
115
|
-
|
|
116
|
-
let producerId = 9n;
|
|
117
|
-
|
|
118
|
-
it('should init producer id', async () => {
|
|
119
|
-
const result = await cluster.sendRequest(API.INIT_PRODUCER_ID, {
|
|
120
|
-
transactionalId: null,
|
|
121
|
-
transactionTimeoutMs: 0,
|
|
122
|
-
producerId,
|
|
123
|
-
producerEpoch: 0,
|
|
124
|
-
});
|
|
125
|
-
result.producerId = 0n;
|
|
126
|
-
expect(result).toMatchSnapshot();
|
|
127
|
-
});
|
|
128
|
-
|
|
129
|
-
it('should produce messages', async () => {
|
|
130
|
-
const now = Date.now();
|
|
131
|
-
const result = await cluster.sendRequestToNode(leaderId)(API.PRODUCE, {
|
|
132
|
-
transactionalId: null,
|
|
133
|
-
timeoutMs: 10000,
|
|
134
|
-
acks: 1,
|
|
135
|
-
topicData: [
|
|
136
|
-
{
|
|
137
|
-
name: 'kafka-ts-test-topic',
|
|
138
|
-
partitionData: [
|
|
139
|
-
{
|
|
140
|
-
index: partitionIndex,
|
|
141
|
-
baseOffset: 0n,
|
|
142
|
-
partitionLeaderEpoch: 0,
|
|
143
|
-
attributes: 0,
|
|
144
|
-
baseSequence: 0,
|
|
145
|
-
baseTimestamp: BigInt(now),
|
|
146
|
-
lastOffsetDelta: 0,
|
|
147
|
-
maxTimestamp: BigInt(now),
|
|
148
|
-
producerEpoch: 0,
|
|
149
|
-
producerId,
|
|
150
|
-
records: [
|
|
151
|
-
{
|
|
152
|
-
attributes: 0,
|
|
153
|
-
offsetDelta: 0,
|
|
154
|
-
timestampDelta: 0n,
|
|
155
|
-
key: Buffer.from('key'),
|
|
156
|
-
value: Buffer.from('value'),
|
|
157
|
-
headers: [
|
|
158
|
-
{
|
|
159
|
-
key: Buffer.from('header-key'),
|
|
160
|
-
value: Buffer.from('header-value'),
|
|
161
|
-
},
|
|
162
|
-
],
|
|
163
|
-
},
|
|
164
|
-
],
|
|
165
|
-
},
|
|
166
|
-
],
|
|
167
|
-
},
|
|
168
|
-
],
|
|
169
|
-
});
|
|
170
|
-
expect(result).toMatchSnapshot();
|
|
171
|
-
});
|
|
172
|
-
|
|
173
|
-
it('should fetch messages', async () => {
|
|
174
|
-
const result = await cluster.sendRequestToNode(leaderId)(API.FETCH, {
|
|
175
|
-
maxWaitMs: 100,
|
|
176
|
-
minBytes: 1,
|
|
177
|
-
maxBytes: 10485760,
|
|
178
|
-
isolationLevel: 1,
|
|
179
|
-
sessionId: 0,
|
|
180
|
-
sessionEpoch: -1,
|
|
181
|
-
topics: [
|
|
182
|
-
{
|
|
183
|
-
topicId,
|
|
184
|
-
partitions: [
|
|
185
|
-
{
|
|
186
|
-
partition: partitionIndex,
|
|
187
|
-
currentLeaderEpoch: -1,
|
|
188
|
-
fetchOffset: 0n,
|
|
189
|
-
lastFetchedEpoch: 0,
|
|
190
|
-
logStartOffset: -1n,
|
|
191
|
-
partitionMaxBytes: 10485760,
|
|
192
|
-
},
|
|
193
|
-
],
|
|
194
|
-
},
|
|
195
|
-
],
|
|
196
|
-
forgottenTopicsData: [],
|
|
197
|
-
rackId: '',
|
|
198
|
-
});
|
|
199
|
-
result.responses.forEach((response) => {
|
|
200
|
-
response.topicId = 'Any<UUID>';
|
|
201
|
-
response.partitions.forEach((partition) => {
|
|
202
|
-
partition.records.forEach((record) => {
|
|
203
|
-
expect(record.baseTimestamp).toBeGreaterThan(1721926744730n);
|
|
204
|
-
expect(record.maxTimestamp).toBeGreaterThan(1721926744730n);
|
|
205
|
-
expect(record.crc).toBeGreaterThan(0);
|
|
206
|
-
|
|
207
|
-
record.baseTimestamp = 0n;
|
|
208
|
-
record.maxTimestamp = 0n;
|
|
209
|
-
record.crc = 0;
|
|
210
|
-
});
|
|
211
|
-
});
|
|
212
|
-
});
|
|
213
|
-
expect(result).toMatchSnapshot();
|
|
214
|
-
});
|
|
215
|
-
|
|
216
|
-
let coordinatorId = -1;
|
|
217
|
-
|
|
218
|
-
it('should find coordinator', async () => {
|
|
219
|
-
const result = await cluster.sendRequest(API.FIND_COORDINATOR, { keyType: KEY_TYPE.GROUP, keys: [groupId] });
|
|
220
|
-
result.coordinators.forEach((coordinator) => {
|
|
221
|
-
coordinator.key = 'Any<String>';
|
|
222
|
-
});
|
|
223
|
-
coordinatorId = result.coordinators[0].nodeId;
|
|
224
|
-
result.coordinators.forEach((coordinator) => {
|
|
225
|
-
coordinator.nodeId = 1;
|
|
226
|
-
coordinator.port = 9093;
|
|
227
|
-
});
|
|
228
|
-
expect(result).toMatchSnapshot();
|
|
229
|
-
});
|
|
230
|
-
|
|
231
|
-
let memberId = '';
|
|
232
|
-
|
|
233
|
-
it('should fail join group request with new memberId', async () => {
|
|
234
|
-
try {
|
|
235
|
-
const result = await cluster.sendRequestToNode(coordinatorId)(API.JOIN_GROUP, {
|
|
236
|
-
groupId,
|
|
237
|
-
sessionTimeoutMs: 30000,
|
|
238
|
-
rebalanceTimeoutMs: 60000,
|
|
239
|
-
memberId,
|
|
240
|
-
groupInstanceId: null,
|
|
241
|
-
protocolType: 'consumer',
|
|
242
|
-
protocols: [
|
|
243
|
-
{
|
|
244
|
-
name: 'RoundRobinAssigner',
|
|
245
|
-
metadata: { version: 0, topics: ['kafka-ts-test-topic'] },
|
|
246
|
-
},
|
|
247
|
-
],
|
|
248
|
-
reason: null,
|
|
249
|
-
});
|
|
250
|
-
expect(false, 'Should throw an error').toBe(true);
|
|
251
|
-
} catch (error) {
|
|
252
|
-
const { response } = error as KafkaTSApiError;
|
|
253
|
-
memberId = response.memberId;
|
|
254
|
-
response.memberId = 'Any<UUID>';
|
|
255
|
-
expect(response).toMatchSnapshot();
|
|
256
|
-
}
|
|
257
|
-
});
|
|
258
|
-
|
|
259
|
-
it('should join group', async () => {
|
|
260
|
-
const result = await cluster.sendRequestToNode(coordinatorId)(API.JOIN_GROUP, {
|
|
261
|
-
groupId,
|
|
262
|
-
sessionTimeoutMs: 30000,
|
|
263
|
-
rebalanceTimeoutMs: 60000,
|
|
264
|
-
memberId,
|
|
265
|
-
groupInstanceId: null,
|
|
266
|
-
protocolType: 'consumer',
|
|
267
|
-
protocols: [
|
|
268
|
-
{
|
|
269
|
-
name: 'RoundRobinAssigner',
|
|
270
|
-
metadata: { version: 0, topics: ['kafka-ts-test-topic'] },
|
|
271
|
-
},
|
|
272
|
-
],
|
|
273
|
-
reason: null,
|
|
274
|
-
});
|
|
275
|
-
result.memberId = 'Any<UUID>';
|
|
276
|
-
result.leader = 'Any<UUID>';
|
|
277
|
-
result.members.forEach((member) => {
|
|
278
|
-
member.memberId = 'Any<UUID>';
|
|
279
|
-
});
|
|
280
|
-
expect(result).toMatchSnapshot();
|
|
281
|
-
});
|
|
282
|
-
|
|
283
|
-
it('should sync group', async () => {
|
|
284
|
-
const result = await cluster.sendRequestToNode(coordinatorId)(API.SYNC_GROUP, {
|
|
285
|
-
groupId,
|
|
286
|
-
generationId: 1,
|
|
287
|
-
memberId,
|
|
288
|
-
groupInstanceId: null,
|
|
289
|
-
protocolType: 'consumer',
|
|
290
|
-
protocolName: 'RoundRobinAssigner',
|
|
291
|
-
assignments: [
|
|
292
|
-
{
|
|
293
|
-
memberId,
|
|
294
|
-
assignment: { 'kafka-test-topic': [0] },
|
|
295
|
-
},
|
|
296
|
-
],
|
|
297
|
-
});
|
|
298
|
-
expect(result).toMatchSnapshot();
|
|
299
|
-
});
|
|
300
|
-
|
|
301
|
-
it('should commit offsets', async () => {
|
|
302
|
-
const result = await cluster.sendRequestToNode(coordinatorId)(API.OFFSET_COMMIT, {
|
|
303
|
-
groupId,
|
|
304
|
-
generationIdOrMemberEpoch: 1,
|
|
305
|
-
memberId,
|
|
306
|
-
groupInstanceId: null,
|
|
307
|
-
topics: [
|
|
308
|
-
{
|
|
309
|
-
name: 'kafka-ts-test-topic',
|
|
310
|
-
partitions: [
|
|
311
|
-
{ partitionIndex: 0, committedOffset: 1n, committedLeaderEpoch: 0, committedMetadata: null },
|
|
312
|
-
],
|
|
313
|
-
},
|
|
314
|
-
],
|
|
315
|
-
});
|
|
316
|
-
expect(result).toMatchSnapshot();
|
|
317
|
-
});
|
|
318
|
-
|
|
319
|
-
it('should fetch offsets', async () => {
|
|
320
|
-
const result = await cluster.sendRequestToNode(coordinatorId)(API.OFFSET_FETCH, {
|
|
321
|
-
groups: [
|
|
322
|
-
{
|
|
323
|
-
groupId,
|
|
324
|
-
topics: [
|
|
325
|
-
{
|
|
326
|
-
name: 'kafka-ts-test-topic',
|
|
327
|
-
partitionIndexes: [0],
|
|
328
|
-
},
|
|
329
|
-
],
|
|
330
|
-
},
|
|
331
|
-
],
|
|
332
|
-
requireStable: false,
|
|
333
|
-
});
|
|
334
|
-
result.groups.forEach((group) => {
|
|
335
|
-
group.groupId = 'Any<String>';
|
|
336
|
-
});
|
|
337
|
-
expect(result).toMatchSnapshot();
|
|
338
|
-
});
|
|
339
|
-
|
|
340
|
-
it('should heartbeat', async () => {
|
|
341
|
-
const result = await cluster.sendRequestToNode(coordinatorId)(API.HEARTBEAT, {
|
|
342
|
-
groupId,
|
|
343
|
-
generationId: 1,
|
|
344
|
-
memberId,
|
|
345
|
-
groupInstanceId: null,
|
|
346
|
-
});
|
|
347
|
-
expect(result).toMatchSnapshot();
|
|
348
|
-
});
|
|
349
|
-
|
|
350
|
-
it('should leave group', async () => {
|
|
351
|
-
const result = await cluster.sendRequestToNode(coordinatorId)(API.LEAVE_GROUP, {
|
|
352
|
-
groupId,
|
|
353
|
-
members: [{ memberId, groupInstanceId: null, reason: null }],
|
|
354
|
-
});
|
|
355
|
-
result.members.forEach((member) => {
|
|
356
|
-
member.memberId = 'Any<UUID>';
|
|
357
|
-
});
|
|
358
|
-
expect(result).toMatchSnapshot();
|
|
359
|
-
});
|
|
360
|
-
|
|
361
|
-
it('should delete topics', async () => {
|
|
362
|
-
const result = await cluster.sendRequest(API.DELETE_TOPICS, {
|
|
363
|
-
topics: [{ name: 'kafka-ts-test-topic', topicId: null }],
|
|
364
|
-
timeoutMs: 10000,
|
|
365
|
-
});
|
|
366
|
-
result.responses.forEach((response) => {
|
|
367
|
-
response.topicId = 'Any<UUID>';
|
|
368
|
-
});
|
|
369
|
-
expect(result).toMatchSnapshot();
|
|
370
|
-
});
|
|
371
|
-
});
|
package/src/cluster.ts
DELETED
|
@@ -1,85 +0,0 @@
|
|
|
1
|
-
import { TcpSocketConnectOpts } from 'net';
|
|
2
|
-
import { TLSSocketOptions } from 'tls';
|
|
3
|
-
import { API } from './api';
|
|
4
|
-
import { Metadata } from './api/metadata';
|
|
5
|
-
import { Broker, SASLProvider } from './broker';
|
|
6
|
-
import { SendRequest } from './connection';
|
|
7
|
-
import { KafkaTSError } from './utils/error';
|
|
8
|
-
import { log } from './utils/logger';
|
|
9
|
-
|
|
10
|
-
type ClusterOptions = {
|
|
11
|
-
clientId: string | null;
|
|
12
|
-
bootstrapServers: TcpSocketConnectOpts[];
|
|
13
|
-
sasl: SASLProvider | null;
|
|
14
|
-
ssl: TLSSocketOptions | null;
|
|
15
|
-
};
|
|
16
|
-
|
|
17
|
-
export class Cluster {
|
|
18
|
-
private seedBroker = new Broker({ clientId: null, sasl: null, ssl: null, options: { port: 9092 } });
|
|
19
|
-
private brokerById: Record<number, Broker> = {};
|
|
20
|
-
private brokerMetadata: Record<number, Metadata['brokers'][number]> = {};
|
|
21
|
-
|
|
22
|
-
constructor(private options: ClusterOptions) {}
|
|
23
|
-
|
|
24
|
-
public async connect() {
|
|
25
|
-
this.seedBroker = await this.findSeedBroker();
|
|
26
|
-
this.brokerById = {};
|
|
27
|
-
|
|
28
|
-
const metadata = await this.sendRequest(API.METADATA, {
|
|
29
|
-
allowTopicAutoCreation: false,
|
|
30
|
-
includeTopicAuthorizedOperations: false,
|
|
31
|
-
topics: [],
|
|
32
|
-
});
|
|
33
|
-
this.brokerMetadata = Object.fromEntries(metadata.brokers.map((options) => [options.nodeId, options]));
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
public async disconnect() {
|
|
37
|
-
await Promise.all([this.seedBroker.disconnect(), ...Object.values(this.brokerById).map((x) => x.disconnect())]);
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
public setSeedBroker = async (nodeId: number) => {
|
|
41
|
-
await this.seedBroker.disconnect();
|
|
42
|
-
this.seedBroker = await this.acquireBroker(nodeId);
|
|
43
|
-
};
|
|
44
|
-
|
|
45
|
-
public sendRequest: SendRequest = (...args) => this.seedBroker.sendRequest(...args);
|
|
46
|
-
|
|
47
|
-
public sendRequestToNode =
|
|
48
|
-
(nodeId: number): SendRequest =>
|
|
49
|
-
async (...args) => {
|
|
50
|
-
if (!this.brokerById[nodeId]) {
|
|
51
|
-
this.brokerById[nodeId] = await this.acquireBroker(nodeId);
|
|
52
|
-
}
|
|
53
|
-
return this.brokerById[nodeId].sendRequest(...args);
|
|
54
|
-
};
|
|
55
|
-
|
|
56
|
-
public async acquireBroker(nodeId: number) {
|
|
57
|
-
const broker = new Broker({
|
|
58
|
-
clientId: this.options.clientId,
|
|
59
|
-
sasl: this.options.sasl,
|
|
60
|
-
ssl: this.options.ssl,
|
|
61
|
-
options: this.brokerMetadata[nodeId],
|
|
62
|
-
});
|
|
63
|
-
await broker.connect();
|
|
64
|
-
return broker;
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
private async findSeedBroker() {
|
|
68
|
-
const randomizedBrokers = this.options.bootstrapServers.toSorted(() => Math.random() - 0.5);
|
|
69
|
-
for (const options of randomizedBrokers) {
|
|
70
|
-
try {
|
|
71
|
-
const broker = await new Broker({
|
|
72
|
-
clientId: this.options.clientId,
|
|
73
|
-
sasl: this.options.sasl,
|
|
74
|
-
ssl: this.options.ssl,
|
|
75
|
-
options,
|
|
76
|
-
});
|
|
77
|
-
await broker.connect();
|
|
78
|
-
return broker;
|
|
79
|
-
} catch (error) {
|
|
80
|
-
log.warn(`Failed to connect to seed broker ${options.host}:${options.port}`, error);
|
|
81
|
-
}
|
|
82
|
-
}
|
|
83
|
-
throw new KafkaTSError('No seed brokers found');
|
|
84
|
-
}
|
|
85
|
-
}
|
package/src/codecs/gzip.ts
DELETED
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
import { gzip, unzip } from 'zlib';
|
|
2
|
-
import { Codec } from './types';
|
|
3
|
-
|
|
4
|
-
export const GZIP: Codec = {
|
|
5
|
-
compress: async (data) =>
|
|
6
|
-
new Promise<Buffer>((resolve, reject) => gzip(data, (err, result) => (err ? reject(err) : resolve(result)))),
|
|
7
|
-
decompress: async (data) =>
|
|
8
|
-
new Promise<Buffer>((resolve, reject) => unzip(data, (err, result) => (err ? reject(err) : resolve(result)))),
|
|
9
|
-
};
|
package/src/codecs/index.ts
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
import { GZIP } from './gzip';
|
|
2
|
-
import { NONE } from './none';
|
|
3
|
-
import { Codec } from './types';
|
|
4
|
-
|
|
5
|
-
const codecs: Record<number, Codec> = {
|
|
6
|
-
0: NONE,
|
|
7
|
-
1: GZIP,
|
|
8
|
-
};
|
|
9
|
-
|
|
10
|
-
export const findCodec = (type: number) => {
|
|
11
|
-
const codec = codecs[type];
|
|
12
|
-
if (!codec) {
|
|
13
|
-
throw new Error(`Unsupported codec: ${type}`);
|
|
14
|
-
}
|
|
15
|
-
return codec;
|
|
16
|
-
};
|
package/src/codecs/none.ts
DELETED
package/src/codecs/types.ts
DELETED