kafka-ts 1.1.8 → 1.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/dist/consumer/consumer-group.js +84 -99
  2. package/dist/consumer/consumer.js +16 -25
  3. package/dist/producer/producer.js +5 -12
  4. package/dist/utils/retry.d.ts +1 -0
  5. package/dist/utils/retry.js +19 -0
  6. package/package.json +1 -1
  7. package/dist/consumer/metadata.d.ts +0 -24
  8. package/dist/consumer/metadata.js +0 -64
  9. package/dist/examples/src/replicator.js +0 -34
  10. package/dist/examples/src/utils/json.js +0 -5
  11. package/dist/request-handler.d.ts +0 -16
  12. package/dist/request-handler.js +0 -67
  13. package/dist/request-handler.test.d.ts +0 -1
  14. package/dist/request-handler.test.js +0 -340
  15. package/dist/src/api/api-versions.js +0 -18
  16. package/dist/src/api/create-topics.js +0 -46
  17. package/dist/src/api/delete-topics.js +0 -26
  18. package/dist/src/api/fetch.js +0 -95
  19. package/dist/src/api/find-coordinator.js +0 -34
  20. package/dist/src/api/heartbeat.js +0 -22
  21. package/dist/src/api/index.js +0 -38
  22. package/dist/src/api/init-producer-id.js +0 -24
  23. package/dist/src/api/join-group.js +0 -48
  24. package/dist/src/api/leave-group.js +0 -30
  25. package/dist/src/api/list-offsets.js +0 -39
  26. package/dist/src/api/metadata.js +0 -47
  27. package/dist/src/api/offset-commit.js +0 -39
  28. package/dist/src/api/offset-fetch.js +0 -44
  29. package/dist/src/api/produce.js +0 -119
  30. package/dist/src/api/sync-group.js +0 -31
  31. package/dist/src/broker.js +0 -35
  32. package/dist/src/connection.js +0 -21
  33. package/dist/src/consumer/consumer-group.js +0 -131
  34. package/dist/src/consumer/consumer.js +0 -103
  35. package/dist/src/consumer/metadata.js +0 -52
  36. package/dist/src/consumer/offset-manager.js +0 -23
  37. package/dist/src/index.js +0 -19
  38. package/dist/src/producer/producer.js +0 -84
  39. package/dist/src/request-handler.js +0 -57
  40. package/dist/src/request-handler.test.js +0 -321
  41. package/dist/src/types.js +0 -2
  42. package/dist/src/utils/api.js +0 -5
  43. package/dist/src/utils/decoder.js +0 -161
  44. package/dist/src/utils/encoder.js +0 -137
  45. package/dist/src/utils/error.js +0 -10
  46. package/dist/utils/debug.d.ts +0 -2
  47. package/dist/utils/debug.js +0 -11
  48. package/dist/utils/memo.d.ts +0 -1
  49. package/dist/utils/memo.js +0 -16
  50. package/dist/utils/mutex.d.ts +0 -3
  51. package/dist/utils/mutex.js +0 -32
@@ -1,340 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.kafka = void 0;
4
- const crypto_1 = require("crypto");
5
- const fs_1 = require("fs");
6
- const vitest_1 = require("vitest");
7
- const api_1 = require("./api");
8
- const find_coordinator_1 = require("./api/find-coordinator");
9
- const client_1 = require("./client");
10
- exports.kafka = (0, client_1.createKafkaClient)({
11
- clientId: "kafka-ts",
12
- bootstrapServers: [{ host: "localhost", port: 9092 }],
13
- sasl: { mechanism: "PLAIN", username: "admin", password: "admin" },
14
- ssl: { ca: (0, fs_1.readFileSync)("./certs/ca.crt").toString() },
15
- });
16
- vitest_1.describe.sequential("Request handler", () => {
17
- const groupId = (0, crypto_1.randomBytes)(16).toString("hex");
18
- let cluster;
19
- (0, vitest_1.beforeAll)(async () => {
20
- cluster = await exports.kafka.createCluster().connect();
21
- const metadataResult = await cluster.sendRequest(api_1.API.METADATA, {
22
- topics: null,
23
- allowTopicAutoCreation: false,
24
- includeTopicAuthorizedOperations: false,
25
- });
26
- if (metadataResult.topics.some((topic) => topic.name === "kafka-ts-test-topic")) {
27
- await cluster.sendRequest(api_1.API.DELETE_TOPICS, {
28
- topics: [{ name: "kafka-ts-test-topic", topicId: null }],
29
- timeoutMs: 10000,
30
- });
31
- }
32
- });
33
- (0, vitest_1.afterAll)(async () => {
34
- await cluster.disconnect();
35
- });
36
- (0, vitest_1.it)("should request api versions", async () => {
37
- const result = await cluster.sendRequest(api_1.API.API_VERSIONS, {});
38
- (0, vitest_1.expect)(result).toMatchSnapshot();
39
- });
40
- let topicId = "d6718d178e1b47c886441ad2d19faea5";
41
- (0, vitest_1.it)("should create topics", async () => {
42
- const result = await cluster.sendRequest(api_1.API.CREATE_TOPICS, {
43
- topics: [
44
- {
45
- name: "kafka-ts-test-topic",
46
- numPartitions: 1,
47
- replicationFactor: 1,
48
- assignments: [],
49
- configs: [],
50
- },
51
- ],
52
- timeoutMs: 10000,
53
- validateOnly: false,
54
- });
55
- topicId = result.topics[0].topicId;
56
- result.topics.forEach((topic) => {
57
- topic.topicId = "Any<UUID>";
58
- });
59
- (0, vitest_1.expect)(result).toMatchSnapshot();
60
- await new Promise((resolve) => setTimeout(resolve, 1000));
61
- });
62
- (0, vitest_1.it)("should request metadata for all topics", async () => {
63
- const result = await cluster.sendRequest(api_1.API.METADATA, {
64
- topics: null,
65
- allowTopicAutoCreation: false,
66
- includeTopicAuthorizedOperations: false,
67
- });
68
- result.controllerId = 0;
69
- result.topics.forEach((topic) => {
70
- topic.topicId = "Any<UUID>";
71
- topic.partitions.forEach((partition) => {
72
- partition.leaderId = 0;
73
- partition.isrNodes = [0];
74
- partition.replicaNodes = [0];
75
- });
76
- });
77
- (0, vitest_1.expect)(result).toMatchSnapshot();
78
- });
79
- let leaderId = 0;
80
- (0, vitest_1.it)("should request metadata for a topic", async () => {
81
- const result = await cluster.sendRequest(api_1.API.METADATA, {
82
- topics: [{ id: topicId, name: "kafka-ts-test-topic" }],
83
- allowTopicAutoCreation: false,
84
- includeTopicAuthorizedOperations: false,
85
- });
86
- leaderId = result.topics[0].partitions[0].leaderId;
87
- result.controllerId = 0;
88
- result.topics.forEach((topic) => {
89
- topic.topicId = "Any<UUID>";
90
- topic.partitions.forEach((partition) => {
91
- partition.leaderId = 0;
92
- partition.isrNodes = [0];
93
- partition.replicaNodes = [0];
94
- });
95
- });
96
- (0, vitest_1.expect)(result).toMatchSnapshot();
97
- });
98
- (0, vitest_1.it)("should init producer id", async () => {
99
- const result = await cluster.sendRequest(api_1.API.INIT_PRODUCER_ID, {
100
- transactionalId: null,
101
- transactionTimeoutMs: 0,
102
- producerId: 0n,
103
- producerEpoch: 0,
104
- });
105
- result.producerId = 0n;
106
- (0, vitest_1.expect)(result).toMatchSnapshot();
107
- });
108
- (0, vitest_1.it)("should produce messages", async () => {
109
- const now = Date.now();
110
- const result = await cluster.sendRequestToNode(leaderId)(api_1.API.PRODUCE, {
111
- transactionalId: null,
112
- timeoutMs: 10000,
113
- acks: 1,
114
- topicData: [
115
- {
116
- name: "kafka-ts-test-topic",
117
- partitionData: [
118
- {
119
- index: 0,
120
- baseOffset: 0n,
121
- partitionLeaderEpoch: 0,
122
- attributes: 0,
123
- baseSequence: 0,
124
- baseTimestamp: BigInt(now),
125
- lastOffsetDelta: 0,
126
- maxTimestamp: BigInt(now),
127
- producerEpoch: 0,
128
- producerId: 9n,
129
- records: [
130
- {
131
- attributes: 0,
132
- offsetDelta: 0,
133
- timestampDelta: 0n,
134
- key: "key",
135
- value: "value",
136
- headers: [
137
- {
138
- key: "header-key",
139
- value: "header-value",
140
- },
141
- ],
142
- },
143
- ],
144
- },
145
- ],
146
- },
147
- ],
148
- });
149
- (0, vitest_1.expect)(result).toMatchSnapshot();
150
- });
151
- (0, vitest_1.it)("should fetch messages", async () => {
152
- const result = await cluster.sendRequestToNode(leaderId)(api_1.API.FETCH, {
153
- maxWaitMs: 100,
154
- minBytes: 1,
155
- maxBytes: 10485760,
156
- isolationLevel: 1,
157
- sessionId: 0,
158
- sessionEpoch: -1,
159
- topics: [
160
- {
161
- topicId,
162
- partitions: [
163
- {
164
- partition: 0,
165
- currentLeaderEpoch: -1,
166
- fetchOffset: 0n,
167
- lastFetchedEpoch: 0,
168
- logStartOffset: -1n,
169
- partitionMaxBytes: 10485760,
170
- },
171
- ],
172
- },
173
- ],
174
- forgottenTopicsData: [],
175
- rackId: "",
176
- });
177
- result.responses.forEach((response) => {
178
- response.topicId = "Any<UUID>";
179
- response.partitions.forEach((partition) => {
180
- partition.records.forEach((record) => {
181
- (0, vitest_1.expect)(record.baseTimestamp).toBeGreaterThan(1721926744730n);
182
- (0, vitest_1.expect)(record.maxTimestamp).toBeGreaterThan(1721926744730n);
183
- (0, vitest_1.expect)(record.crc).toBeGreaterThan(0);
184
- record.baseTimestamp = 0n;
185
- record.maxTimestamp = 0n;
186
- record.crc = 0;
187
- });
188
- });
189
- });
190
- (0, vitest_1.expect)(result).toMatchSnapshot();
191
- });
192
- let coordinatorId = -1;
193
- (0, vitest_1.it)("should find coordinator", async () => {
194
- const result = await cluster.sendRequest(api_1.API.FIND_COORDINATOR, { keyType: find_coordinator_1.KEY_TYPE.GROUP, keys: [groupId] });
195
- result.coordinators.forEach((coordinator) => {
196
- coordinator.key = "Any<String>";
197
- });
198
- coordinatorId = result.coordinators[0].nodeId;
199
- result.coordinators.forEach((coordinator) => {
200
- coordinator.nodeId = 1;
201
- coordinator.port = 9093;
202
- });
203
- (0, vitest_1.expect)(result).toMatchSnapshot();
204
- });
205
- let memberId = "";
206
- (0, vitest_1.it)("should fail join group request with new memberId", async () => {
207
- try {
208
- const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.JOIN_GROUP, {
209
- groupId,
210
- sessionTimeoutMs: 30000,
211
- rebalanceTimeoutMs: 60000,
212
- memberId,
213
- groupInstanceId: null,
214
- protocolType: "consumer",
215
- protocols: [
216
- {
217
- name: "RoundRobinAssigner",
218
- metadata: { version: 0, topics: ["kafka-ts-test-topic"] },
219
- },
220
- ],
221
- reason: null,
222
- });
223
- (0, vitest_1.expect)(false, "Should throw an error").toBe(true);
224
- }
225
- catch (error) {
226
- const { response } = error;
227
- memberId = response.memberId;
228
- response.memberId = "Any<UUID>";
229
- (0, vitest_1.expect)(response).toMatchSnapshot();
230
- }
231
- });
232
- (0, vitest_1.it)("should join group", async () => {
233
- const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.JOIN_GROUP, {
234
- groupId,
235
- sessionTimeoutMs: 30000,
236
- rebalanceTimeoutMs: 60000,
237
- memberId,
238
- groupInstanceId: null,
239
- protocolType: "consumer",
240
- protocols: [
241
- {
242
- name: "RoundRobinAssigner",
243
- metadata: { version: 0, topics: ["kafka-ts-test-topic"] },
244
- },
245
- ],
246
- reason: null,
247
- });
248
- result.memberId = "Any<UUID>";
249
- result.leader = "Any<UUID>";
250
- result.members.forEach((member) => {
251
- member.memberId = "Any<UUID>";
252
- });
253
- (0, vitest_1.expect)(result).toMatchSnapshot();
254
- });
255
- (0, vitest_1.it)("should sync group", async () => {
256
- const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.SYNC_GROUP, {
257
- groupId,
258
- generationId: 1,
259
- memberId,
260
- groupInstanceId: null,
261
- protocolType: "consumer",
262
- protocolName: "RoundRobinAssigner",
263
- assignments: [
264
- {
265
- memberId,
266
- assignment: { "kafka-test-topic": [0] },
267
- },
268
- ],
269
- });
270
- (0, vitest_1.expect)(result).toMatchSnapshot();
271
- });
272
- (0, vitest_1.it)("should commit offsets", async () => {
273
- const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.OFFSET_COMMIT, {
274
- groupId,
275
- generationIdOrMemberEpoch: 1,
276
- memberId,
277
- groupInstanceId: null,
278
- topics: [
279
- {
280
- name: "kafka-ts-test-topic",
281
- partitions: [
282
- { partitionIndex: 0, committedOffset: 1n, committedLeaderEpoch: 0, committedMetadata: null },
283
- ],
284
- },
285
- ],
286
- });
287
- (0, vitest_1.expect)(result).toMatchSnapshot();
288
- });
289
- (0, vitest_1.it)("should fetch offsets", async () => {
290
- const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.OFFSET_FETCH, {
291
- groups: [
292
- {
293
- groupId,
294
- memberId,
295
- memberEpoch: 0,
296
- topics: [
297
- {
298
- name: "kafka-ts-test-topic",
299
- partitionIndexes: [0],
300
- },
301
- ],
302
- },
303
- ],
304
- requireStable: false,
305
- });
306
- result.groups.forEach((group) => {
307
- group.groupId = "Any<String>";
308
- });
309
- (0, vitest_1.expect)(result).toMatchSnapshot();
310
- });
311
- (0, vitest_1.it)("should heartbeat", async () => {
312
- const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.HEARTBEAT, {
313
- groupId,
314
- generationId: 1,
315
- memberId,
316
- groupInstanceId: null,
317
- });
318
- (0, vitest_1.expect)(result).toMatchSnapshot();
319
- });
320
- (0, vitest_1.it)("should leave group", async () => {
321
- const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.LEAVE_GROUP, {
322
- groupId,
323
- members: [{ memberId, groupInstanceId: null, reason: null }],
324
- });
325
- result.members.forEach((member) => {
326
- member.memberId = "Any<UUID>";
327
- });
328
- (0, vitest_1.expect)(result).toMatchSnapshot();
329
- });
330
- (0, vitest_1.it)("should delete topics", async () => {
331
- const result = await cluster.sendRequest(api_1.API.DELETE_TOPICS, {
332
- topics: [{ name: "kafka-ts-test-topic", topicId: null }],
333
- timeoutMs: 10000,
334
- });
335
- result.responses.forEach((response) => {
336
- response.topicId = "Any<UUID>";
337
- });
338
- (0, vitest_1.expect)(result).toMatchSnapshot();
339
- });
340
- });
@@ -1,18 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.API_VERSIONS = void 0;
4
- const api_js_1 = require("../utils/api.js");
5
- exports.API_VERSIONS = (0, api_js_1.createApi)({
6
- apiKey: 18,
7
- apiVersion: 2,
8
- request: (encoder) => encoder.value(),
9
- response: (decoder) => ({
10
- errorCode: decoder.readInt16(),
11
- versions: decoder.readArray((version) => ({
12
- apiKey: version.readInt16(),
13
- minVersion: version.readInt16(),
14
- maxVersion: version.readInt16(),
15
- })),
16
- throttleTimeMs: decoder.readInt32(),
17
- }),
18
- });
@@ -1,46 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.CREATE_TOPICS = void 0;
4
- const api_1 = require("../utils/api");
5
- exports.CREATE_TOPICS = (0, api_1.createApi)({
6
- apiKey: 19,
7
- apiVersion: 7,
8
- request: (encoder, data) => encoder
9
- .writeUVarInt(0)
10
- .writeCompactArray(data.topics, (encoder, topic) => encoder
11
- .writeCompactString(topic.name)
12
- .writeInt32(topic.numPartitions)
13
- .writeInt16(topic.replicationFactor)
14
- .writeCompactArray(topic.assignments, (encoder, assignment) => encoder
15
- .writeInt32(assignment.partitionIndex)
16
- .writeCompactArray(assignment.brokerIds, (encoder, brokerId) => encoder.writeInt32(brokerId))
17
- .writeUVarInt(0))
18
- .writeCompactArray(topic.configs, (encoder, config) => encoder.writeCompactString(config.name).writeCompactString(config.value).writeUVarInt(0))
19
- .writeUVarInt(0))
20
- .writeInt32(data.timeoutMs)
21
- .writeBoolean(data.validateOnly)
22
- .writeUVarInt(0)
23
- .value(),
24
- response: (decoder) => ({
25
- _tag: decoder.readTagBuffer(),
26
- throttleTimeMs: decoder.readInt32(),
27
- topics: decoder.readCompactArray((topic) => ({
28
- name: topic.readCompactString(),
29
- topicId: topic.readUUID(),
30
- errorCode: topic.readInt16(),
31
- errorMessage: topic.readCompactString(),
32
- numPartitions: topic.readInt32(),
33
- replicationFactor: topic.readInt16(),
34
- configs: topic.readCompactArray((config) => ({
35
- name: config.readCompactString(),
36
- value: config.readCompactString(),
37
- readOnly: config.readBoolean(),
38
- configSource: config.readInt8(),
39
- isSensitive: config.readBoolean(),
40
- _tag: config.readTagBuffer(),
41
- })),
42
- _tag: topic.readTagBuffer(),
43
- })),
44
- _tag2: decoder.readTagBuffer(),
45
- }),
46
- });
@@ -1,26 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.DELETE_TOPICS = void 0;
4
- const api_1 = require("../utils/api");
5
- exports.DELETE_TOPICS = (0, api_1.createApi)({
6
- apiKey: 20,
7
- apiVersion: 6,
8
- request: (encoder, data) => encoder
9
- .writeUVarInt(0)
10
- .writeCompactArray(data.topics, (encoder, topic) => encoder.writeCompactString(topic.name).writeUUID(topic.topicId).writeUVarInt(0))
11
- .writeInt32(data.timeoutMs)
12
- .writeUVarInt(0)
13
- .value(),
14
- response: (decoder) => ({
15
- _tag: decoder.readTagBuffer(),
16
- throttleTimeMs: decoder.readInt32(),
17
- responses: decoder.readCompactArray((decoder) => ({
18
- name: decoder.readCompactString(),
19
- topicId: decoder.readUUID(),
20
- errorCode: decoder.readInt16(),
21
- errorMessage: decoder.readCompactString(),
22
- _tag: decoder.readTagBuffer(),
23
- })),
24
- _tag2: decoder.readTagBuffer(),
25
- }),
26
- });
@@ -1,95 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.FETCH = void 0;
4
- const api_1 = require("../utils/api");
5
- exports.FETCH = (0, api_1.createApi)({
6
- apiKey: 1,
7
- apiVersion: 16,
8
- request: (encoder, data) => encoder
9
- .writeUVarInt(0)
10
- .writeInt32(data.maxWaitMs)
11
- .writeInt32(data.minBytes)
12
- .writeInt32(data.maxBytes)
13
- .writeInt8(data.isolationLevel)
14
- .writeInt32(data.sessionId)
15
- .writeInt32(data.sessionEpoch)
16
- .writeCompactArray(data.topics, (encoder, topic) => encoder
17
- .writeUUID(topic.topicId)
18
- .writeCompactArray(topic.partitions, (encoder, partition) => encoder
19
- .writeInt32(partition.partition)
20
- .writeInt32(partition.currentLeaderEpoch)
21
- .writeInt64(partition.fetchOffset)
22
- .writeInt32(partition.lastFetchedEpoch)
23
- .writeInt64(partition.logStartOffset)
24
- .writeInt32(partition.partitionMaxBytes)
25
- .writeUVarInt(0))
26
- .writeUVarInt(0))
27
- .writeCompactArray(data.forgottenTopicsData, (encoder, forgottenTopic) => encoder
28
- .writeUUID(forgottenTopic.topicId)
29
- .writeCompactArray(forgottenTopic.partitions, (encoder, partition) => encoder.writeInt32(partition))
30
- .writeUVarInt(0))
31
- .writeCompactString(data.rackId)
32
- .writeUVarInt(0)
33
- .value(),
34
- response: (decoder) => ({
35
- _tag: decoder.readTagBuffer(),
36
- throttleTimeMs: decoder.readInt32(),
37
- errorCode: decoder.readInt16(),
38
- sessionId: decoder.readInt32(),
39
- responses: decoder.readCompactArray((response) => ({
40
- topicId: response.readUUID(),
41
- partitions: response.readCompactArray((partition) => ({
42
- partitionIndex: partition.readInt32(),
43
- errorCode: partition.readInt16(),
44
- highWatermark: partition.readInt64(),
45
- lastStableOffset: partition.readInt64(),
46
- logStartOffset: partition.readInt64(),
47
- abortedTransactions: partition.readCompactArray((abortedTransaction) => ({
48
- producerId: abortedTransaction.readInt64(),
49
- firstOffset: abortedTransaction.readInt64(),
50
- _tag: abortedTransaction.readTagBuffer(),
51
- })),
52
- preferredReadReplica: partition.readInt32(),
53
- records: decodeRecords(partition),
54
- _tag: partition.readTagBuffer(),
55
- })),
56
- _tag: response.readTagBuffer(),
57
- })),
58
- _tag2: decoder.readTagBuffer(),
59
- }),
60
- });
61
- const decodeRecords = (decoder) => {
62
- const size = decoder.readUVarInt() - 1;
63
- if (size <= 0) {
64
- return [];
65
- }
66
- const results = [];
67
- while (decoder.buffer.length > decoder.offset + 49) {
68
- results.push({
69
- baseOffset: decoder.readInt64(),
70
- batchLength: decoder.readInt32(),
71
- partitionLeaderEpoch: decoder.readInt32(),
72
- magic: decoder.readInt8(),
73
- crc: decoder.readUInt32(),
74
- attributes: decoder.readInt16(),
75
- lastOffsetDelta: decoder.readInt32(),
76
- baseTimestamp: decoder.readInt64(),
77
- maxTimestamp: decoder.readInt64(),
78
- producerId: decoder.readInt64(),
79
- producerEpoch: decoder.readInt16(),
80
- baseSequence: decoder.readInt32(),
81
- records: decoder.readRecords((record) => ({
82
- attributes: record.readInt8(),
83
- timestampDelta: record.readVarLong(),
84
- offsetDelta: record.readVarInt(),
85
- key: record.read(Math.ceil((record.readVarInt() - 1) / 2))?.toString() || null,
86
- value: record.read(Math.ceil((record.readVarInt() - 1) / 2))?.toString() || null,
87
- headers: record.readCompactArray((header) => ({
88
- key: header.read(Math.ceil((header.readVarInt() - 1) / 2))?.toString(),
89
- value: header.read(Math.ceil((header.readVarInt() - 1) / 2))?.toString(),
90
- })),
91
- })),
92
- });
93
- }
94
- return results;
95
- };
@@ -1,34 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.FIND_COORDINATOR = exports.KEY_TYPE = void 0;
4
- const api_1 = require("../utils/api");
5
- exports.KEY_TYPE = {
6
- GROUP: 0,
7
- TRANSACTION: 1,
8
- };
9
- exports.FIND_COORDINATOR = (0, api_1.createApi)({
10
- apiKey: 10,
11
- apiVersion: 4,
12
- request: (encoder, data) => {
13
- return encoder
14
- .writeUVarInt(0)
15
- .writeInt8(data.keyType)
16
- .writeCompactArray(data.keys, (encoder, key) => encoder.writeCompactString(key))
17
- .writeUVarInt(0)
18
- .value();
19
- },
20
- response: (decoder) => ({
21
- _tag: decoder.readTagBuffer(),
22
- throttleTimeMs: decoder.readInt32(),
23
- coordinators: decoder.readCompactArray((decoder) => ({
24
- key: decoder.readCompactString(),
25
- nodeId: decoder.readInt32(),
26
- host: decoder.readCompactString(),
27
- port: decoder.readInt32(),
28
- errorCode: decoder.readInt16(),
29
- errorMessage: decoder.readCompactString(),
30
- _tag: decoder.readTagBuffer(),
31
- })),
32
- _tag2: decoder.readTagBuffer(),
33
- }),
34
- });
@@ -1,22 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.HEARTBEAT = void 0;
4
- const api_1 = require("../utils/api");
5
- exports.HEARTBEAT = (0, api_1.createApi)({
6
- apiKey: 12,
7
- apiVersion: 4,
8
- request: (encoder, data) => encoder
9
- .writeUVarInt(0)
10
- .writeCompactString(data.groupId)
11
- .writeInt32(data.generationId)
12
- .writeCompactString(data.memberId)
13
- .writeCompactString(data.groupInstanceId)
14
- .writeUVarInt(0)
15
- .value(),
16
- response: (decoder) => ({
17
- _tag: decoder.readTagBuffer(),
18
- throttleTimeMs: decoder.readInt32(),
19
- errorCode: decoder.readInt16(),
20
- _tag2: decoder.readTagBuffer(),
21
- }),
22
- });
@@ -1,38 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.API_ERROR = exports.API = void 0;
4
- const api_versions_1 = require("./api-versions");
5
- const create_topics_1 = require("./create-topics");
6
- const delete_topics_1 = require("./delete-topics");
7
- const fetch_1 = require("./fetch");
8
- const find_coordinator_1 = require("./find-coordinator");
9
- const heartbeat_1 = require("./heartbeat");
10
- const init_producer_id_1 = require("./init-producer-id");
11
- const join_group_1 = require("./join-group");
12
- const leave_group_1 = require("./leave-group");
13
- const list_offsets_1 = require("./list-offsets");
14
- const metadata_1 = require("./metadata");
15
- const offset_commit_1 = require("./offset-commit");
16
- const offset_fetch_1 = require("./offset-fetch");
17
- const produce_1 = require("./produce");
18
- const sync_group_1 = require("./sync-group");
19
- exports.API = {
20
- API_VERSIONS: api_versions_1.API_VERSIONS,
21
- CREATE_TOPICS: create_topics_1.CREATE_TOPICS,
22
- DELETE_TOPICS: delete_topics_1.DELETE_TOPICS,
23
- FETCH: fetch_1.FETCH,
24
- FIND_COORDINATOR: find_coordinator_1.FIND_COORDINATOR,
25
- HEARTBEAT: heartbeat_1.HEARTBEAT,
26
- INIT_PRODUCER_ID: init_producer_id_1.INIT_PRODUCER_ID,
27
- JOIN_GROUP: join_group_1.JOIN_GROUP,
28
- LEAVE_GROUP: leave_group_1.LEAVE_GROUP,
29
- LIST_OFFSETS: list_offsets_1.LIST_OFFSETS,
30
- METADATA: metadata_1.METADATA,
31
- OFFSET_COMMIT: offset_commit_1.OFFSET_COMMIT,
32
- OFFSET_FETCH: offset_fetch_1.OFFSET_FETCH,
33
- PRODUCE: produce_1.PRODUCE,
34
- SYNC_GROUP: sync_group_1.SYNC_GROUP
35
- };
36
- exports.API_ERROR = {
37
- MEMBER_ID_REQUIRED: 79
38
- };
@@ -1,24 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.INIT_PRODUCER_ID = void 0;
4
- const api_1 = require("../utils/api");
5
- exports.INIT_PRODUCER_ID = (0, api_1.createApi)({
6
- apiKey: 22,
7
- apiVersion: 4,
8
- request: (encoder, body) => encoder
9
- .writeUVarInt(0)
10
- .writeCompactString(body.transactionalId)
11
- .writeInt32(body.transactionTimeoutMs)
12
- .writeInt64(body.producerId)
13
- .writeInt16(body.producerEpoch)
14
- .writeUVarInt(0)
15
- .value(),
16
- response: (decoder) => ({
17
- _tag: decoder.readTagBuffer(),
18
- throttleTimeMs: decoder.readInt32(),
19
- errorCode: decoder.readInt16(),
20
- producerId: decoder.readInt64(),
21
- producerEpoch: decoder.readInt16(),
22
- _tag2: decoder.readTagBuffer(),
23
- })
24
- });