kafka-ts 0.0.2-beta → 0.0.3-beta
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/release.yml +14 -14
- package/.prettierrc +3 -2
- package/README.md +43 -33
- package/docker-compose.yml +102 -102
- package/examples/package-lock.json +28 -28
- package/examples/package.json +12 -12
- package/examples/src/client.ts +6 -6
- package/examples/src/consumer.ts +9 -8
- package/examples/src/create-topic.ts +23 -16
- package/examples/src/producer.ts +7 -7
- package/examples/src/replicator.ts +4 -4
- package/examples/src/utils/delay.ts +1 -0
- package/examples/src/utils/json.ts +1 -1
- package/examples/tsconfig.json +2 -2
- package/package.json +21 -19
- package/src/api/api-versions.ts +2 -2
- package/src/api/create-topics.ts +2 -2
- package/src/api/delete-topics.ts +2 -2
- package/src/api/fetch.ts +3 -3
- package/src/api/find-coordinator.ts +2 -2
- package/src/api/heartbeat.ts +2 -2
- package/src/api/index.ts +18 -18
- package/src/api/init-producer-id.ts +2 -2
- package/src/api/join-group.ts +3 -3
- package/src/api/leave-group.ts +2 -2
- package/src/api/list-offsets.ts +3 -3
- package/src/api/metadata.ts +3 -3
- package/src/api/offset-commit.ts +2 -2
- package/src/api/offset-fetch.ts +2 -2
- package/src/api/produce.ts +3 -3
- package/src/api/sasl-authenticate.ts +2 -2
- package/src/api/sasl-handshake.ts +2 -2
- package/src/api/sync-group.ts +2 -2
- package/src/broker.ts +9 -9
- package/src/client.ts +6 -6
- package/src/cluster.test.ts +68 -68
- package/src/cluster.ts +7 -7
- package/src/connection.ts +17 -15
- package/src/consumer/consumer-group.ts +14 -14
- package/src/consumer/consumer-metadata.ts +2 -2
- package/src/consumer/consumer.ts +84 -82
- package/src/consumer/fetch-manager.ts +179 -0
- package/src/consumer/fetcher.ts +57 -0
- package/src/consumer/offset-manager.ts +6 -6
- package/src/consumer/processor.ts +47 -0
- package/src/distributors/assignments-to-replicas.test.ts +7 -7
- package/src/distributors/assignments-to-replicas.ts +1 -1
- package/src/distributors/messages-to-topic-partition-leaders.test.ts +6 -6
- package/src/index.ts +4 -3
- package/src/metadata.ts +4 -4
- package/src/producer/producer.ts +8 -8
- package/src/types.ts +2 -0
- package/src/utils/api.ts +4 -4
- package/src/utils/debug.ts +2 -2
- package/src/utils/decoder.ts +4 -4
- package/src/utils/encoder.ts +6 -6
- package/src/utils/error.ts +3 -3
- package/src/utils/retrier.ts +1 -1
- package/src/utils/tracer.ts +7 -4
- package/tsconfig.json +16 -16
package/src/cluster.test.ts
CHANGED
|
@@ -1,21 +1,21 @@
|
|
|
1
|
-
import { randomBytes } from
|
|
2
|
-
import { readFileSync } from
|
|
3
|
-
import { afterAll, beforeAll, describe, expect, it } from
|
|
4
|
-
import { API } from
|
|
5
|
-
import { KEY_TYPE } from
|
|
6
|
-
import { createKafkaClient } from
|
|
7
|
-
import { Cluster } from
|
|
8
|
-
import { KafkaTSApiError } from
|
|
1
|
+
import { randomBytes } from 'crypto';
|
|
2
|
+
import { readFileSync } from 'fs';
|
|
3
|
+
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
|
|
4
|
+
import { API } from './api';
|
|
5
|
+
import { KEY_TYPE } from './api/find-coordinator';
|
|
6
|
+
import { createKafkaClient } from './client';
|
|
7
|
+
import { Cluster } from './cluster';
|
|
8
|
+
import { KafkaTSApiError } from './utils/error';
|
|
9
9
|
|
|
10
10
|
export const kafka = createKafkaClient({
|
|
11
|
-
clientId:
|
|
12
|
-
bootstrapServers: [{ host:
|
|
13
|
-
sasl: { mechanism:
|
|
14
|
-
ssl: { ca: readFileSync(
|
|
11
|
+
clientId: 'kafka-ts',
|
|
12
|
+
bootstrapServers: [{ host: 'localhost', port: 9092 }],
|
|
13
|
+
sasl: { mechanism: 'PLAIN', username: 'admin', password: 'admin' },
|
|
14
|
+
ssl: { ca: readFileSync('./certs/ca.crt').toString() },
|
|
15
15
|
});
|
|
16
16
|
|
|
17
|
-
describe.sequential(
|
|
18
|
-
const groupId = randomBytes(16).toString(
|
|
17
|
+
describe.sequential('Request handler', () => {
|
|
18
|
+
const groupId = randomBytes(16).toString('hex');
|
|
19
19
|
|
|
20
20
|
let cluster: Cluster;
|
|
21
21
|
|
|
@@ -27,9 +27,9 @@ describe.sequential("Request handler", () => {
|
|
|
27
27
|
allowTopicAutoCreation: false,
|
|
28
28
|
includeTopicAuthorizedOperations: false,
|
|
29
29
|
});
|
|
30
|
-
if (metadataResult.topics.some((topic) => topic.name ===
|
|
30
|
+
if (metadataResult.topics.some((topic) => topic.name === 'kafka-ts-test-topic')) {
|
|
31
31
|
await cluster.sendRequest(API.DELETE_TOPICS, {
|
|
32
|
-
topics: [{ name:
|
|
32
|
+
topics: [{ name: 'kafka-ts-test-topic', topicId: null }],
|
|
33
33
|
timeoutMs: 10000,
|
|
34
34
|
});
|
|
35
35
|
}
|
|
@@ -39,18 +39,18 @@ describe.sequential("Request handler", () => {
|
|
|
39
39
|
await cluster.disconnect();
|
|
40
40
|
});
|
|
41
41
|
|
|
42
|
-
it(
|
|
42
|
+
it('should request api versions', async () => {
|
|
43
43
|
const result = await cluster.sendRequest(API.API_VERSIONS, {});
|
|
44
44
|
expect(result).toMatchSnapshot();
|
|
45
45
|
});
|
|
46
46
|
|
|
47
|
-
let topicId: string =
|
|
47
|
+
let topicId: string = 'd6718d178e1b47c886441ad2d19faea5';
|
|
48
48
|
|
|
49
|
-
it(
|
|
49
|
+
it('should create topics', async () => {
|
|
50
50
|
const result = await cluster.sendRequest(API.CREATE_TOPICS, {
|
|
51
51
|
topics: [
|
|
52
52
|
{
|
|
53
|
-
name:
|
|
53
|
+
name: 'kafka-ts-test-topic',
|
|
54
54
|
numPartitions: 1,
|
|
55
55
|
replicationFactor: 1,
|
|
56
56
|
assignments: [],
|
|
@@ -62,23 +62,23 @@ describe.sequential("Request handler", () => {
|
|
|
62
62
|
});
|
|
63
63
|
topicId = result.topics[0].topicId;
|
|
64
64
|
result.topics.forEach((topic) => {
|
|
65
|
-
topic.topicId =
|
|
65
|
+
topic.topicId = 'Any<UUID>';
|
|
66
66
|
});
|
|
67
67
|
expect(result).toMatchSnapshot();
|
|
68
68
|
|
|
69
69
|
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
70
70
|
});
|
|
71
71
|
|
|
72
|
-
it(
|
|
72
|
+
it('should request metadata for all topics', async () => {
|
|
73
73
|
const result = await cluster.sendRequest(API.METADATA, {
|
|
74
74
|
topics: null,
|
|
75
75
|
allowTopicAutoCreation: false,
|
|
76
76
|
includeTopicAuthorizedOperations: false,
|
|
77
77
|
});
|
|
78
78
|
result.controllerId = 0;
|
|
79
|
-
result.topics = result.topics.filter((topic) => topic.name !==
|
|
79
|
+
result.topics = result.topics.filter((topic) => topic.name !== '__consumer_offsets');
|
|
80
80
|
result.topics.forEach((topic) => {
|
|
81
|
-
topic.topicId =
|
|
81
|
+
topic.topicId = 'Any<UUID>';
|
|
82
82
|
topic.partitions.forEach((partition) => {
|
|
83
83
|
partition.leaderId = 0;
|
|
84
84
|
partition.isrNodes = [0];
|
|
@@ -90,16 +90,16 @@ describe.sequential("Request handler", () => {
|
|
|
90
90
|
|
|
91
91
|
let leaderId = 0;
|
|
92
92
|
|
|
93
|
-
it(
|
|
93
|
+
it('should request metadata for a topic', async () => {
|
|
94
94
|
const result = await cluster.sendRequest(API.METADATA, {
|
|
95
|
-
topics: [{ id: topicId, name:
|
|
95
|
+
topics: [{ id: topicId, name: 'kafka-ts-test-topic' }],
|
|
96
96
|
allowTopicAutoCreation: false,
|
|
97
97
|
includeTopicAuthorizedOperations: false,
|
|
98
98
|
});
|
|
99
99
|
leaderId = result.topics[0].partitions[0].leaderId;
|
|
100
100
|
result.controllerId = 0;
|
|
101
101
|
result.topics.forEach((topic) => {
|
|
102
|
-
topic.topicId =
|
|
102
|
+
topic.topicId = 'Any<UUID>';
|
|
103
103
|
topic.partitions.forEach((partition) => {
|
|
104
104
|
partition.leaderId = 0;
|
|
105
105
|
partition.isrNodes = [0];
|
|
@@ -111,7 +111,7 @@ describe.sequential("Request handler", () => {
|
|
|
111
111
|
|
|
112
112
|
let producerId = 9n;
|
|
113
113
|
|
|
114
|
-
it(
|
|
114
|
+
it('should init producer id', async () => {
|
|
115
115
|
const result = await cluster.sendRequest(API.INIT_PRODUCER_ID, {
|
|
116
116
|
transactionalId: null,
|
|
117
117
|
transactionTimeoutMs: 0,
|
|
@@ -122,7 +122,7 @@ describe.sequential("Request handler", () => {
|
|
|
122
122
|
expect(result).toMatchSnapshot();
|
|
123
123
|
});
|
|
124
124
|
|
|
125
|
-
it(
|
|
125
|
+
it('should produce messages', async () => {
|
|
126
126
|
const now = Date.now();
|
|
127
127
|
const result = await cluster.sendRequestToNode(leaderId)(API.PRODUCE, {
|
|
128
128
|
transactionalId: null,
|
|
@@ -130,7 +130,7 @@ describe.sequential("Request handler", () => {
|
|
|
130
130
|
acks: 1,
|
|
131
131
|
topicData: [
|
|
132
132
|
{
|
|
133
|
-
name:
|
|
133
|
+
name: 'kafka-ts-test-topic',
|
|
134
134
|
partitionData: [
|
|
135
135
|
{
|
|
136
136
|
index: 0,
|
|
@@ -148,12 +148,12 @@ describe.sequential("Request handler", () => {
|
|
|
148
148
|
attributes: 0,
|
|
149
149
|
offsetDelta: 0,
|
|
150
150
|
timestampDelta: 0n,
|
|
151
|
-
key:
|
|
152
|
-
value:
|
|
151
|
+
key: 'key',
|
|
152
|
+
value: 'value',
|
|
153
153
|
headers: [
|
|
154
154
|
{
|
|
155
|
-
key:
|
|
156
|
-
value:
|
|
155
|
+
key: 'header-key',
|
|
156
|
+
value: 'header-value',
|
|
157
157
|
},
|
|
158
158
|
],
|
|
159
159
|
},
|
|
@@ -166,7 +166,7 @@ describe.sequential("Request handler", () => {
|
|
|
166
166
|
expect(result).toMatchSnapshot();
|
|
167
167
|
});
|
|
168
168
|
|
|
169
|
-
it(
|
|
169
|
+
it('should fetch messages', async () => {
|
|
170
170
|
const result = await cluster.sendRequestToNode(leaderId)(API.FETCH, {
|
|
171
171
|
maxWaitMs: 100,
|
|
172
172
|
minBytes: 1,
|
|
@@ -190,10 +190,10 @@ describe.sequential("Request handler", () => {
|
|
|
190
190
|
},
|
|
191
191
|
],
|
|
192
192
|
forgottenTopicsData: [],
|
|
193
|
-
rackId:
|
|
193
|
+
rackId: '',
|
|
194
194
|
});
|
|
195
195
|
result.responses.forEach((response) => {
|
|
196
|
-
response.topicId =
|
|
196
|
+
response.topicId = 'Any<UUID>';
|
|
197
197
|
response.partitions.forEach((partition) => {
|
|
198
198
|
partition.records.forEach((record) => {
|
|
199
199
|
expect(record.baseTimestamp).toBeGreaterThan(1721926744730n);
|
|
@@ -211,10 +211,10 @@ describe.sequential("Request handler", () => {
|
|
|
211
211
|
|
|
212
212
|
let coordinatorId = -1;
|
|
213
213
|
|
|
214
|
-
it(
|
|
214
|
+
it('should find coordinator', async () => {
|
|
215
215
|
const result = await cluster.sendRequest(API.FIND_COORDINATOR, { keyType: KEY_TYPE.GROUP, keys: [groupId] });
|
|
216
216
|
result.coordinators.forEach((coordinator) => {
|
|
217
|
-
coordinator.key =
|
|
217
|
+
coordinator.key = 'Any<String>';
|
|
218
218
|
});
|
|
219
219
|
coordinatorId = result.coordinators[0].nodeId;
|
|
220
220
|
result.coordinators.forEach((coordinator) => {
|
|
@@ -224,9 +224,9 @@ describe.sequential("Request handler", () => {
|
|
|
224
224
|
expect(result).toMatchSnapshot();
|
|
225
225
|
});
|
|
226
226
|
|
|
227
|
-
let memberId =
|
|
227
|
+
let memberId = '';
|
|
228
228
|
|
|
229
|
-
it(
|
|
229
|
+
it('should fail join group request with new memberId', async () => {
|
|
230
230
|
try {
|
|
231
231
|
const result = await cluster.sendRequestToNode(coordinatorId)(API.JOIN_GROUP, {
|
|
232
232
|
groupId,
|
|
@@ -234,67 +234,67 @@ describe.sequential("Request handler", () => {
|
|
|
234
234
|
rebalanceTimeoutMs: 60000,
|
|
235
235
|
memberId,
|
|
236
236
|
groupInstanceId: null,
|
|
237
|
-
protocolType:
|
|
237
|
+
protocolType: 'consumer',
|
|
238
238
|
protocols: [
|
|
239
239
|
{
|
|
240
|
-
name:
|
|
241
|
-
metadata: { version: 0, topics: [
|
|
240
|
+
name: 'RoundRobinAssigner',
|
|
241
|
+
metadata: { version: 0, topics: ['kafka-ts-test-topic'] },
|
|
242
242
|
},
|
|
243
243
|
],
|
|
244
244
|
reason: null,
|
|
245
245
|
});
|
|
246
|
-
expect(false,
|
|
246
|
+
expect(false, 'Should throw an error').toBe(true);
|
|
247
247
|
} catch (error) {
|
|
248
248
|
const { response } = error as KafkaTSApiError;
|
|
249
249
|
memberId = response.memberId;
|
|
250
|
-
response.memberId =
|
|
250
|
+
response.memberId = 'Any<UUID>';
|
|
251
251
|
expect(response).toMatchSnapshot();
|
|
252
252
|
}
|
|
253
253
|
});
|
|
254
254
|
|
|
255
|
-
it(
|
|
255
|
+
it('should join group', async () => {
|
|
256
256
|
const result = await cluster.sendRequestToNode(coordinatorId)(API.JOIN_GROUP, {
|
|
257
257
|
groupId,
|
|
258
258
|
sessionTimeoutMs: 30000,
|
|
259
259
|
rebalanceTimeoutMs: 60000,
|
|
260
260
|
memberId,
|
|
261
261
|
groupInstanceId: null,
|
|
262
|
-
protocolType:
|
|
262
|
+
protocolType: 'consumer',
|
|
263
263
|
protocols: [
|
|
264
264
|
{
|
|
265
|
-
name:
|
|
266
|
-
metadata: { version: 0, topics: [
|
|
265
|
+
name: 'RoundRobinAssigner',
|
|
266
|
+
metadata: { version: 0, topics: ['kafka-ts-test-topic'] },
|
|
267
267
|
},
|
|
268
268
|
],
|
|
269
269
|
reason: null,
|
|
270
270
|
});
|
|
271
|
-
result.memberId =
|
|
272
|
-
result.leader =
|
|
271
|
+
result.memberId = 'Any<UUID>';
|
|
272
|
+
result.leader = 'Any<UUID>';
|
|
273
273
|
result.members.forEach((member) => {
|
|
274
|
-
member.memberId =
|
|
274
|
+
member.memberId = 'Any<UUID>';
|
|
275
275
|
});
|
|
276
276
|
expect(result).toMatchSnapshot();
|
|
277
277
|
});
|
|
278
278
|
|
|
279
|
-
it(
|
|
279
|
+
it('should sync group', async () => {
|
|
280
280
|
const result = await cluster.sendRequestToNode(coordinatorId)(API.SYNC_GROUP, {
|
|
281
281
|
groupId,
|
|
282
282
|
generationId: 1,
|
|
283
283
|
memberId,
|
|
284
284
|
groupInstanceId: null,
|
|
285
|
-
protocolType:
|
|
286
|
-
protocolName:
|
|
285
|
+
protocolType: 'consumer',
|
|
286
|
+
protocolName: 'RoundRobinAssigner',
|
|
287
287
|
assignments: [
|
|
288
288
|
{
|
|
289
289
|
memberId,
|
|
290
|
-
assignment: {
|
|
290
|
+
assignment: { 'kafka-test-topic': [0] },
|
|
291
291
|
},
|
|
292
292
|
],
|
|
293
293
|
});
|
|
294
294
|
expect(result).toMatchSnapshot();
|
|
295
295
|
});
|
|
296
296
|
|
|
297
|
-
it(
|
|
297
|
+
it('should commit offsets', async () => {
|
|
298
298
|
const result = await cluster.sendRequestToNode(coordinatorId)(API.OFFSET_COMMIT, {
|
|
299
299
|
groupId,
|
|
300
300
|
generationIdOrMemberEpoch: 1,
|
|
@@ -302,7 +302,7 @@ describe.sequential("Request handler", () => {
|
|
|
302
302
|
groupInstanceId: null,
|
|
303
303
|
topics: [
|
|
304
304
|
{
|
|
305
|
-
name:
|
|
305
|
+
name: 'kafka-ts-test-topic',
|
|
306
306
|
partitions: [
|
|
307
307
|
{ partitionIndex: 0, committedOffset: 1n, committedLeaderEpoch: 0, committedMetadata: null },
|
|
308
308
|
],
|
|
@@ -312,7 +312,7 @@ describe.sequential("Request handler", () => {
|
|
|
312
312
|
expect(result).toMatchSnapshot();
|
|
313
313
|
});
|
|
314
314
|
|
|
315
|
-
it(
|
|
315
|
+
it('should fetch offsets', async () => {
|
|
316
316
|
const result = await cluster.sendRequestToNode(coordinatorId)(API.OFFSET_FETCH, {
|
|
317
317
|
groups: [
|
|
318
318
|
{
|
|
@@ -321,7 +321,7 @@ describe.sequential("Request handler", () => {
|
|
|
321
321
|
memberEpoch: 0,
|
|
322
322
|
topics: [
|
|
323
323
|
{
|
|
324
|
-
name:
|
|
324
|
+
name: 'kafka-ts-test-topic',
|
|
325
325
|
partitionIndexes: [0],
|
|
326
326
|
},
|
|
327
327
|
],
|
|
@@ -330,12 +330,12 @@ describe.sequential("Request handler", () => {
|
|
|
330
330
|
requireStable: false,
|
|
331
331
|
});
|
|
332
332
|
result.groups.forEach((group) => {
|
|
333
|
-
group.groupId =
|
|
333
|
+
group.groupId = 'Any<String>';
|
|
334
334
|
});
|
|
335
335
|
expect(result).toMatchSnapshot();
|
|
336
336
|
});
|
|
337
337
|
|
|
338
|
-
it(
|
|
338
|
+
it('should heartbeat', async () => {
|
|
339
339
|
const result = await cluster.sendRequestToNode(coordinatorId)(API.HEARTBEAT, {
|
|
340
340
|
groupId,
|
|
341
341
|
generationId: 1,
|
|
@@ -345,24 +345,24 @@ describe.sequential("Request handler", () => {
|
|
|
345
345
|
expect(result).toMatchSnapshot();
|
|
346
346
|
});
|
|
347
347
|
|
|
348
|
-
it(
|
|
348
|
+
it('should leave group', async () => {
|
|
349
349
|
const result = await cluster.sendRequestToNode(coordinatorId)(API.LEAVE_GROUP, {
|
|
350
350
|
groupId,
|
|
351
351
|
members: [{ memberId, groupInstanceId: null, reason: null }],
|
|
352
352
|
});
|
|
353
353
|
result.members.forEach((member) => {
|
|
354
|
-
member.memberId =
|
|
354
|
+
member.memberId = 'Any<UUID>';
|
|
355
355
|
});
|
|
356
356
|
expect(result).toMatchSnapshot();
|
|
357
357
|
});
|
|
358
358
|
|
|
359
|
-
it(
|
|
359
|
+
it('should delete topics', async () => {
|
|
360
360
|
const result = await cluster.sendRequest(API.DELETE_TOPICS, {
|
|
361
|
-
topics: [{ name:
|
|
361
|
+
topics: [{ name: 'kafka-ts-test-topic', topicId: null }],
|
|
362
362
|
timeoutMs: 10000,
|
|
363
363
|
});
|
|
364
364
|
result.responses.forEach((response) => {
|
|
365
|
-
response.topicId =
|
|
365
|
+
response.topicId = 'Any<UUID>';
|
|
366
366
|
});
|
|
367
367
|
expect(result).toMatchSnapshot();
|
|
368
368
|
});
|
package/src/cluster.ts
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { TcpSocketConnectOpts } from
|
|
2
|
-
import { TLSSocketOptions } from
|
|
3
|
-
import { API } from
|
|
4
|
-
import { Broker, SASLOptions } from
|
|
5
|
-
import { SendRequest } from
|
|
6
|
-
import { ConnectionError, KafkaTSError } from
|
|
1
|
+
import { TcpSocketConnectOpts } from 'net';
|
|
2
|
+
import { TLSSocketOptions } from 'tls';
|
|
3
|
+
import { API } from './api';
|
|
4
|
+
import { Broker, SASLOptions } from './broker';
|
|
5
|
+
import { SendRequest } from './connection';
|
|
6
|
+
import { ConnectionError, KafkaTSError } from './utils/error';
|
|
7
7
|
|
|
8
8
|
type ClusterOptions = {
|
|
9
9
|
clientId: string | null;
|
|
@@ -82,6 +82,6 @@ export class Cluster {
|
|
|
82
82
|
console.warn(`Failed to connect to seed broker ${options.host}:${options.port}`, error);
|
|
83
83
|
}
|
|
84
84
|
}
|
|
85
|
-
throw new KafkaTSError(
|
|
85
|
+
throw new KafkaTSError('No seed brokers found');
|
|
86
86
|
}
|
|
87
87
|
}
|
package/src/connection.ts
CHANGED
|
@@ -1,12 +1,14 @@
|
|
|
1
|
-
import assert from
|
|
2
|
-
import net, { isIP, Socket, TcpSocketConnectOpts } from
|
|
3
|
-
import tls, { TLSSocketOptions } from
|
|
4
|
-
import { getApiName } from
|
|
5
|
-
import { Api } from
|
|
6
|
-
import { Decoder } from
|
|
7
|
-
import { Encoder } from
|
|
8
|
-
import { ConnectionError } from
|
|
9
|
-
import {
|
|
1
|
+
import assert from 'assert';
|
|
2
|
+
import net, { isIP, Socket, TcpSocketConnectOpts } from 'net';
|
|
3
|
+
import tls, { TLSSocketOptions } from 'tls';
|
|
4
|
+
import { getApiName } from './api';
|
|
5
|
+
import { Api } from './utils/api';
|
|
6
|
+
import { Decoder } from './utils/decoder';
|
|
7
|
+
import { Encoder } from './utils/encoder';
|
|
8
|
+
import { ConnectionError } from './utils/error';
|
|
9
|
+
import { createTracer } from './utils/tracer';
|
|
10
|
+
|
|
11
|
+
const trace = createTracer('Connection');
|
|
10
12
|
|
|
11
13
|
export type ConnectionOptions = {
|
|
12
14
|
clientId: string | null;
|
|
@@ -44,14 +46,14 @@ export class Connection {
|
|
|
44
46
|
resolve,
|
|
45
47
|
)
|
|
46
48
|
: net.connect(connection, resolve);
|
|
47
|
-
this.socket.once(
|
|
49
|
+
this.socket.once('error', reject);
|
|
48
50
|
});
|
|
49
|
-
this.socket.removeAllListeners(
|
|
51
|
+
this.socket.removeAllListeners('error');
|
|
50
52
|
|
|
51
|
-
this.socket.on(
|
|
52
|
-
this.socket.once(
|
|
53
|
+
this.socket.on('data', (data) => this.handleData(data));
|
|
54
|
+
this.socket.once('close', async () => {
|
|
53
55
|
Object.values(this.queue).forEach(({ reject }) => {
|
|
54
|
-
reject(new ConnectionError(
|
|
56
|
+
reject(new ConnectionError('Socket closed unexpectedly'));
|
|
55
57
|
});
|
|
56
58
|
this.queue = {};
|
|
57
59
|
});
|
|
@@ -100,7 +102,7 @@ export class Connection {
|
|
|
100
102
|
|
|
101
103
|
private write(buffer: Buffer) {
|
|
102
104
|
return new Promise<void>((resolve, reject) => {
|
|
103
|
-
const { stack } = new Error(
|
|
105
|
+
const { stack } = new Error('Write error');
|
|
104
106
|
this.socket.write(buffer, (error) => {
|
|
105
107
|
if (error) {
|
|
106
108
|
const err = new ConnectionError(error.message);
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
import { API, API_ERROR } from
|
|
2
|
-
import { KEY_TYPE } from
|
|
3
|
-
import { Assignment, MemberAssignment } from
|
|
4
|
-
import { Cluster } from
|
|
5
|
-
import { KafkaTSApiError, KafkaTSError } from
|
|
6
|
-
import { ConsumerMetadata } from
|
|
7
|
-
import { OffsetManager } from
|
|
1
|
+
import { API, API_ERROR } from '../api';
|
|
2
|
+
import { KEY_TYPE } from '../api/find-coordinator';
|
|
3
|
+
import { Assignment, MemberAssignment } from '../api/sync-group';
|
|
4
|
+
import { Cluster } from '../cluster';
|
|
5
|
+
import { KafkaTSApiError, KafkaTSError } from '../utils/error';
|
|
6
|
+
import { ConsumerMetadata } from './consumer-metadata';
|
|
7
|
+
import { OffsetManager } from './offset-manager';
|
|
8
8
|
|
|
9
9
|
type ConsumerGroupOptions = {
|
|
10
10
|
cluster: Cluster;
|
|
@@ -19,9 +19,9 @@ type ConsumerGroupOptions = {
|
|
|
19
19
|
|
|
20
20
|
export class ConsumerGroup {
|
|
21
21
|
private coordinatorId = -1;
|
|
22
|
-
private memberId =
|
|
22
|
+
private memberId = '';
|
|
23
23
|
private generationId = -1;
|
|
24
|
-
private leaderId =
|
|
24
|
+
private leaderId = '';
|
|
25
25
|
private memberIds: string[] = [];
|
|
26
26
|
private heartbeatInterval: NodeJS.Timeout | null = null;
|
|
27
27
|
private heartbeatError: KafkaTSError | null = null;
|
|
@@ -76,8 +76,8 @@ export class ConsumerGroup {
|
|
|
76
76
|
memberId: this.memberId,
|
|
77
77
|
sessionTimeoutMs,
|
|
78
78
|
rebalanceTimeoutMs,
|
|
79
|
-
protocolType:
|
|
80
|
-
protocols: [{ name:
|
|
79
|
+
protocolType: 'consumer',
|
|
80
|
+
protocols: [{ name: 'RoundRobinAssigner', metadata: { version: 0, topics } }],
|
|
81
81
|
reason: null,
|
|
82
82
|
});
|
|
83
83
|
this.memberId = response.memberId;
|
|
@@ -118,11 +118,11 @@ export class ConsumerGroup {
|
|
|
118
118
|
groupInstanceId,
|
|
119
119
|
memberId: this.memberId,
|
|
120
120
|
generationId: this.generationId,
|
|
121
|
-
protocolType:
|
|
122
|
-
protocolName:
|
|
121
|
+
protocolType: 'consumer',
|
|
122
|
+
protocolName: 'RoundRobinAssigner',
|
|
123
123
|
assignments,
|
|
124
124
|
});
|
|
125
|
-
metadata.setAssignment(JSON.parse(response.assignments ||
|
|
125
|
+
metadata.setAssignment(JSON.parse(response.assignments || '{}') as Assignment);
|
|
126
126
|
}
|
|
127
127
|
|
|
128
128
|
private async offsetFetch() {
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { Assignment } from
|
|
2
|
-
import { Metadata } from
|
|
1
|
+
import { Assignment } from '../api/sync-group';
|
|
2
|
+
import { Metadata } from '../metadata';
|
|
3
3
|
|
|
4
4
|
export class ConsumerMetadata extends Metadata {
|
|
5
5
|
private assignment: Assignment = {};
|