kafka-ts 1.1.7 → 1.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/dist/cluster.js +1 -0
  2. package/dist/consumer/metadata.d.ts +24 -0
  3. package/dist/consumer/metadata.js +64 -0
  4. package/dist/examples/src/replicator.js +34 -0
  5. package/dist/examples/src/utils/json.js +5 -0
  6. package/dist/request-handler.d.ts +16 -0
  7. package/dist/request-handler.js +67 -0
  8. package/dist/request-handler.test.d.ts +1 -0
  9. package/dist/request-handler.test.js +340 -0
  10. package/dist/src/api/api-versions.js +18 -0
  11. package/dist/src/api/create-topics.js +46 -0
  12. package/dist/src/api/delete-topics.js +26 -0
  13. package/dist/src/api/fetch.js +95 -0
  14. package/dist/src/api/find-coordinator.js +34 -0
  15. package/dist/src/api/heartbeat.js +22 -0
  16. package/dist/src/api/index.js +38 -0
  17. package/dist/src/api/init-producer-id.js +24 -0
  18. package/dist/src/api/join-group.js +48 -0
  19. package/dist/src/api/leave-group.js +30 -0
  20. package/dist/src/api/list-offsets.js +39 -0
  21. package/dist/src/api/metadata.js +47 -0
  22. package/dist/src/api/offset-commit.js +39 -0
  23. package/dist/src/api/offset-fetch.js +44 -0
  24. package/dist/src/api/produce.js +119 -0
  25. package/dist/src/api/sync-group.js +31 -0
  26. package/dist/src/broker.js +35 -0
  27. package/dist/src/connection.js +21 -0
  28. package/dist/src/consumer/consumer-group.js +131 -0
  29. package/dist/src/consumer/consumer.js +103 -0
  30. package/dist/src/consumer/metadata.js +52 -0
  31. package/dist/src/consumer/offset-manager.js +23 -0
  32. package/dist/src/index.js +19 -0
  33. package/dist/src/producer/producer.js +84 -0
  34. package/dist/src/request-handler.js +57 -0
  35. package/dist/src/request-handler.test.js +321 -0
  36. package/dist/src/types.js +2 -0
  37. package/dist/src/utils/api.js +5 -0
  38. package/dist/src/utils/decoder.js +161 -0
  39. package/dist/src/utils/encoder.js +137 -0
  40. package/dist/src/utils/error.js +10 -0
  41. package/dist/utils/debug.d.ts +2 -0
  42. package/dist/utils/debug.js +11 -0
  43. package/dist/utils/memo.d.ts +1 -0
  44. package/dist/utils/memo.js +16 -0
  45. package/dist/utils/mutex.d.ts +3 -0
  46. package/dist/utils/mutex.js +32 -0
  47. package/package.json +1 -1
package/dist/cluster.js CHANGED
@@ -28,6 +28,7 @@ class Cluster {
28
28
  async connect() {
29
29
  this.seedBroker = await this.findSeedBroker();
30
30
  this.brokerById = {};
31
+ await this.refreshBrokerMetadata();
31
32
  }
32
33
  async disconnect() {
33
34
  await Promise.all([
@@ -0,0 +1,24 @@
1
+ import { IsolationLevel } from "../api/fetch";
2
+ import { Assignment } from "../api/sync-group";
3
+ import { Cluster } from "../cluster";
4
+ import { OffsetManager } from "./offset-manager";
5
+ export type Metadata = ReturnType<typeof createMetadata>;
6
+ type MetadataOptions = {
7
+ cluster: Cluster;
8
+ topics?: string[];
9
+ isolationLevel?: IsolationLevel;
10
+ allowTopicAutoCreation?: boolean;
11
+ fromBeginning?: boolean;
12
+ offsetManager?: OffsetManager;
13
+ };
14
+ export declare const createMetadata: ({ cluster, topics, isolationLevel, allowTopicAutoCreation, fromBeginning, offsetManager, }: MetadataOptions) => {
15
+ init: () => Promise<void>;
16
+ getTopicPartitions: () => Record<string, number[]>;
17
+ getTopicIdByName: (name: string) => string;
18
+ getTopicNameById: (id: string) => string;
19
+ getAssignment: () => Assignment;
20
+ setAssignment: (newAssignment: Assignment) => void;
21
+ getLeaderIdByTopicPartition: (topic: string, partition: number) => number;
22
+ getIsrNodeIdsByTopicPartition: (topic: string, partition: number) => number[];
23
+ };
24
+ export {};
@@ -0,0 +1,64 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.createMetadata = void 0;
4
+ const api_1 = require("../api");
5
+ const createMetadata = ({ cluster, topics, isolationLevel = 0 /* IsolationLevel.READ_UNCOMMITTED */, allowTopicAutoCreation = true, fromBeginning = false, offsetManager, }) => {
6
+ let topicPartitions = {};
7
+ let topicNameById = {};
8
+ let topicIdByName = {};
9
+ let leaderIdByTopicPartition = {};
10
+ let isrNodesByTopicPartition;
11
+ let assignment = {};
12
+ const fetchMetadata = async () => {
13
+ const response = await cluster.sendRequest(api_1.API.METADATA, {
14
+ allowTopicAutoCreation,
15
+ includeTopicAuthorizedOperations: false,
16
+ topics: topics?.map((name) => ({ id: null, name })) ?? null,
17
+ });
18
+ topicPartitions = Object.fromEntries(response.topics.map((topic) => [topic.name, topic.partitions.map((partition) => partition.partitionIndex)]));
19
+ topicNameById = Object.fromEntries(response.topics.map((topic) => [topic.topicId, topic.name]));
20
+ topicIdByName = Object.fromEntries(response.topics.map((topic) => [topic.name, topic.topicId]));
21
+ leaderIdByTopicPartition = Object.fromEntries(response.topics.map((topic) => [
22
+ topic.name,
23
+ Object.fromEntries(topic.partitions.map((partition) => [partition.partitionIndex, partition.leaderId])),
24
+ ]));
25
+ isrNodesByTopicPartition = Object.fromEntries(response.topics.map((topic) => [
26
+ topic.name,
27
+ Object.fromEntries(topic.partitions.map((partition) => [partition.partitionIndex, partition.isrNodes])),
28
+ ]));
29
+ assignment = topicPartitions;
30
+ };
31
+ const listOffsets = async () => {
32
+ const offsets = await cluster.sendRequest(api_1.API.LIST_OFFSETS, {
33
+ replicaId: -1,
34
+ isolationLevel,
35
+ topics: Object.entries(assignment)
36
+ .flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
37
+ .map(({ topic, partition }) => ({
38
+ name: topic,
39
+ partitions: [{ partitionIndex: partition, currentLeaderEpoch: -1, timestamp: -1n }],
40
+ })),
41
+ });
42
+ offsets.topics.forEach(({ name, partitions }) => {
43
+ partitions.forEach(({ partitionIndex, offset }) => {
44
+ offsetManager?.resolve(name, partitionIndex, fromBeginning ? 0n : offset);
45
+ });
46
+ });
47
+ };
48
+ return {
49
+ init: async () => {
50
+ await fetchMetadata();
51
+ await listOffsets();
52
+ },
53
+ getTopicPartitions: () => topicPartitions,
54
+ getTopicIdByName: (name) => topicIdByName[name],
55
+ getTopicNameById: (id) => topicNameById[id],
56
+ getAssignment: () => assignment,
57
+ setAssignment: (newAssignment) => {
58
+ assignment = newAssignment;
59
+ },
60
+ getLeaderIdByTopicPartition: (topic, partition) => leaderIdByTopicPartition[topic][partition],
61
+ getIsrNodeIdsByTopicPartition: (topic, partition) => isrNodesByTopicPartition[topic][partition],
62
+ };
63
+ };
64
+ exports.createMetadata = createMetadata;
@@ -0,0 +1,34 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const kafkats_1 = require("kafkats");
4
+ const json_1 = require("./utils/json");
5
+ (async () => {
6
+ const brokers = [{ host: "localhost", port: 9092 }];
7
+ const topic = "playground-topic";
8
+ // const producer = createProducer({ brokers });
9
+ // const producerInterval = setInterval(async () => {
10
+ // await producer.send([
11
+ // {
12
+ // topic,
13
+ // partition: 0,
14
+ // offset: 1n,
15
+ // timestamp: BigInt(Date.now()),
16
+ // key: null,
17
+ // value: `PING ${Math.random()}`,
18
+ // headers: { timestamp: Date.now().toString() }
19
+ // }
20
+ // ])
21
+ // }, 5000);
22
+ const consumer = await (0, kafkats_1.startConsumer)({
23
+ topics: [topic],
24
+ brokers,
25
+ onBatch: (messages) => {
26
+ console.log(JSON.stringify(messages, json_1.serializer, 2));
27
+ },
28
+ });
29
+ process.on("SIGINT", async () => {
30
+ await consumer.close();
31
+ // clearInterval(producerInterval);
32
+ // await producer.close();
33
+ });
34
+ })();
@@ -0,0 +1,5 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.serializer = void 0;
4
+ const serializer = (_, value) => (typeof value === "bigint" ? value.toString() : value);
5
+ exports.serializer = serializer;
@@ -0,0 +1,16 @@
1
+ import { Connection } from "./connection";
2
+ import { Api } from "./utils/api";
3
+ type RequestHandlerOptions = {
4
+ clientId: string | null;
5
+ };
6
+ export declare class RequestHandler {
7
+ private connection;
8
+ private options;
9
+ private queue;
10
+ private currentBuffer;
11
+ constructor(connection: Connection, options: RequestHandlerOptions);
12
+ private handleData;
13
+ sendRequest<Request, Response>(api: Api<Request, Response>, args: Request): Promise<Response>;
14
+ }
15
+ export type SendRequest = typeof RequestHandler.prototype.sendRequest;
16
+ export {};
@@ -0,0 +1,67 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.RequestHandler = void 0;
7
+ const node_assert_1 = __importDefault(require("node:assert"));
8
+ const decoder_1 = require("./utils/decoder");
9
+ const encoder_1 = require("./utils/encoder");
10
+ class RequestHandler {
11
+ connection;
12
+ options;
13
+ queue = {};
14
+ currentBuffer = null;
15
+ constructor(connection, options) {
16
+ this.connection = connection;
17
+ this.options = options;
18
+ this.connection.on("data", this.handleData);
19
+ }
20
+ handleData(buffer) {
21
+ this.currentBuffer = this.currentBuffer ? Buffer.concat([this.currentBuffer, buffer]) : buffer;
22
+ if (this.currentBuffer.length < 4) {
23
+ return;
24
+ }
25
+ const decoder = (0, decoder_1.createDecoder)({ buffer: this.currentBuffer });
26
+ const size = decoder.readInt32();
27
+ if (size > decoder.buffer.length) {
28
+ return;
29
+ }
30
+ const correlationId = decoder.readInt32();
31
+ const request = this.queue[correlationId];
32
+ delete this.queue[correlationId];
33
+ request.callback(decoder);
34
+ // debug(handleData.name, 'Response offsets', {
35
+ // offset: decoder.offset,
36
+ // length: decoder.buffer.length,
37
+ // rest: decoder.buffer.subarray(decoder.offset, decoder.buffer.length)?.toString(),
38
+ // });
39
+ (0, node_assert_1.default)(decoder.offset - 4 === size, `Buffer not correctly consumed: ${decoder.offset - 4} !== ${buffer.length}`);
40
+ this.currentBuffer = null;
41
+ }
42
+ async sendRequest(api, args) {
43
+ const correlationId = Math.floor(Math.random() * 1000000);
44
+ const encoder = (0, encoder_1.createEncoder)()
45
+ .writeInt16(api.apiKey)
46
+ .writeInt16(api.apiVersion)
47
+ .writeInt32(correlationId)
48
+ .writeString(this.options.clientId);
49
+ const request = api.request(encoder, args).value();
50
+ const buffer = (0, encoder_1.createEncoder)().writeInt32(request.length).write(request).value();
51
+ return new Promise(async (resolve, reject) => {
52
+ await this.connection.write(buffer);
53
+ this.queue[correlationId] = {
54
+ callback: (decoder) => {
55
+ try {
56
+ const response = api.response(decoder);
57
+ resolve(response);
58
+ }
59
+ catch (error) {
60
+ reject(error);
61
+ }
62
+ },
63
+ };
64
+ });
65
+ }
66
+ }
67
+ exports.RequestHandler = RequestHandler;
@@ -0,0 +1 @@
1
+ export declare const kafka: import("./client").Client;
@@ -0,0 +1,340 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.kafka = void 0;
4
+ const crypto_1 = require("crypto");
5
+ const fs_1 = require("fs");
6
+ const vitest_1 = require("vitest");
7
+ const api_1 = require("./api");
8
+ const find_coordinator_1 = require("./api/find-coordinator");
9
+ const client_1 = require("./client");
10
+ exports.kafka = (0, client_1.createKafkaClient)({
11
+ clientId: "kafka-ts",
12
+ bootstrapServers: [{ host: "localhost", port: 9092 }],
13
+ sasl: { mechanism: "PLAIN", username: "admin", password: "admin" },
14
+ ssl: { ca: (0, fs_1.readFileSync)("./certs/ca.crt").toString() },
15
+ });
16
+ vitest_1.describe.sequential("Request handler", () => {
17
+ const groupId = (0, crypto_1.randomBytes)(16).toString("hex");
18
+ let cluster;
19
+ (0, vitest_1.beforeAll)(async () => {
20
+ cluster = await exports.kafka.createCluster().connect();
21
+ const metadataResult = await cluster.sendRequest(api_1.API.METADATA, {
22
+ topics: null,
23
+ allowTopicAutoCreation: false,
24
+ includeTopicAuthorizedOperations: false,
25
+ });
26
+ if (metadataResult.topics.some((topic) => topic.name === "kafka-ts-test-topic")) {
27
+ await cluster.sendRequest(api_1.API.DELETE_TOPICS, {
28
+ topics: [{ name: "kafka-ts-test-topic", topicId: null }],
29
+ timeoutMs: 10000,
30
+ });
31
+ }
32
+ });
33
+ (0, vitest_1.afterAll)(async () => {
34
+ await cluster.disconnect();
35
+ });
36
+ (0, vitest_1.it)("should request api versions", async () => {
37
+ const result = await cluster.sendRequest(api_1.API.API_VERSIONS, {});
38
+ (0, vitest_1.expect)(result).toMatchSnapshot();
39
+ });
40
+ let topicId = "d6718d178e1b47c886441ad2d19faea5";
41
+ (0, vitest_1.it)("should create topics", async () => {
42
+ const result = await cluster.sendRequest(api_1.API.CREATE_TOPICS, {
43
+ topics: [
44
+ {
45
+ name: "kafka-ts-test-topic",
46
+ numPartitions: 1,
47
+ replicationFactor: 1,
48
+ assignments: [],
49
+ configs: [],
50
+ },
51
+ ],
52
+ timeoutMs: 10000,
53
+ validateOnly: false,
54
+ });
55
+ topicId = result.topics[0].topicId;
56
+ result.topics.forEach((topic) => {
57
+ topic.topicId = "Any<UUID>";
58
+ });
59
+ (0, vitest_1.expect)(result).toMatchSnapshot();
60
+ await new Promise((resolve) => setTimeout(resolve, 1000));
61
+ });
62
+ (0, vitest_1.it)("should request metadata for all topics", async () => {
63
+ const result = await cluster.sendRequest(api_1.API.METADATA, {
64
+ topics: null,
65
+ allowTopicAutoCreation: false,
66
+ includeTopicAuthorizedOperations: false,
67
+ });
68
+ result.controllerId = 0;
69
+ result.topics.forEach((topic) => {
70
+ topic.topicId = "Any<UUID>";
71
+ topic.partitions.forEach((partition) => {
72
+ partition.leaderId = 0;
73
+ partition.isrNodes = [0];
74
+ partition.replicaNodes = [0];
75
+ });
76
+ });
77
+ (0, vitest_1.expect)(result).toMatchSnapshot();
78
+ });
79
+ let leaderId = 0;
80
+ (0, vitest_1.it)("should request metadata for a topic", async () => {
81
+ const result = await cluster.sendRequest(api_1.API.METADATA, {
82
+ topics: [{ id: topicId, name: "kafka-ts-test-topic" }],
83
+ allowTopicAutoCreation: false,
84
+ includeTopicAuthorizedOperations: false,
85
+ });
86
+ leaderId = result.topics[0].partitions[0].leaderId;
87
+ result.controllerId = 0;
88
+ result.topics.forEach((topic) => {
89
+ topic.topicId = "Any<UUID>";
90
+ topic.partitions.forEach((partition) => {
91
+ partition.leaderId = 0;
92
+ partition.isrNodes = [0];
93
+ partition.replicaNodes = [0];
94
+ });
95
+ });
96
+ (0, vitest_1.expect)(result).toMatchSnapshot();
97
+ });
98
+ (0, vitest_1.it)("should init producer id", async () => {
99
+ const result = await cluster.sendRequest(api_1.API.INIT_PRODUCER_ID, {
100
+ transactionalId: null,
101
+ transactionTimeoutMs: 0,
102
+ producerId: 0n,
103
+ producerEpoch: 0,
104
+ });
105
+ result.producerId = 0n;
106
+ (0, vitest_1.expect)(result).toMatchSnapshot();
107
+ });
108
+ (0, vitest_1.it)("should produce messages", async () => {
109
+ const now = Date.now();
110
+ const result = await cluster.sendRequestToNode(leaderId)(api_1.API.PRODUCE, {
111
+ transactionalId: null,
112
+ timeoutMs: 10000,
113
+ acks: 1,
114
+ topicData: [
115
+ {
116
+ name: "kafka-ts-test-topic",
117
+ partitionData: [
118
+ {
119
+ index: 0,
120
+ baseOffset: 0n,
121
+ partitionLeaderEpoch: 0,
122
+ attributes: 0,
123
+ baseSequence: 0,
124
+ baseTimestamp: BigInt(now),
125
+ lastOffsetDelta: 0,
126
+ maxTimestamp: BigInt(now),
127
+ producerEpoch: 0,
128
+ producerId: 9n,
129
+ records: [
130
+ {
131
+ attributes: 0,
132
+ offsetDelta: 0,
133
+ timestampDelta: 0n,
134
+ key: "key",
135
+ value: "value",
136
+ headers: [
137
+ {
138
+ key: "header-key",
139
+ value: "header-value",
140
+ },
141
+ ],
142
+ },
143
+ ],
144
+ },
145
+ ],
146
+ },
147
+ ],
148
+ });
149
+ (0, vitest_1.expect)(result).toMatchSnapshot();
150
+ });
151
+ (0, vitest_1.it)("should fetch messages", async () => {
152
+ const result = await cluster.sendRequestToNode(leaderId)(api_1.API.FETCH, {
153
+ maxWaitMs: 100,
154
+ minBytes: 1,
155
+ maxBytes: 10485760,
156
+ isolationLevel: 1,
157
+ sessionId: 0,
158
+ sessionEpoch: -1,
159
+ topics: [
160
+ {
161
+ topicId,
162
+ partitions: [
163
+ {
164
+ partition: 0,
165
+ currentLeaderEpoch: -1,
166
+ fetchOffset: 0n,
167
+ lastFetchedEpoch: 0,
168
+ logStartOffset: -1n,
169
+ partitionMaxBytes: 10485760,
170
+ },
171
+ ],
172
+ },
173
+ ],
174
+ forgottenTopicsData: [],
175
+ rackId: "",
176
+ });
177
+ result.responses.forEach((response) => {
178
+ response.topicId = "Any<UUID>";
179
+ response.partitions.forEach((partition) => {
180
+ partition.records.forEach((record) => {
181
+ (0, vitest_1.expect)(record.baseTimestamp).toBeGreaterThan(1721926744730n);
182
+ (0, vitest_1.expect)(record.maxTimestamp).toBeGreaterThan(1721926744730n);
183
+ (0, vitest_1.expect)(record.crc).toBeGreaterThan(0);
184
+ record.baseTimestamp = 0n;
185
+ record.maxTimestamp = 0n;
186
+ record.crc = 0;
187
+ });
188
+ });
189
+ });
190
+ (0, vitest_1.expect)(result).toMatchSnapshot();
191
+ });
192
+ let coordinatorId = -1;
193
+ (0, vitest_1.it)("should find coordinator", async () => {
194
+ const result = await cluster.sendRequest(api_1.API.FIND_COORDINATOR, { keyType: find_coordinator_1.KEY_TYPE.GROUP, keys: [groupId] });
195
+ result.coordinators.forEach((coordinator) => {
196
+ coordinator.key = "Any<String>";
197
+ });
198
+ coordinatorId = result.coordinators[0].nodeId;
199
+ result.coordinators.forEach((coordinator) => {
200
+ coordinator.nodeId = 1;
201
+ coordinator.port = 9093;
202
+ });
203
+ (0, vitest_1.expect)(result).toMatchSnapshot();
204
+ });
205
+ let memberId = "";
206
+ (0, vitest_1.it)("should fail join group request with new memberId", async () => {
207
+ try {
208
+ const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.JOIN_GROUP, {
209
+ groupId,
210
+ sessionTimeoutMs: 30000,
211
+ rebalanceTimeoutMs: 60000,
212
+ memberId,
213
+ groupInstanceId: null,
214
+ protocolType: "consumer",
215
+ protocols: [
216
+ {
217
+ name: "RoundRobinAssigner",
218
+ metadata: { version: 0, topics: ["kafka-ts-test-topic"] },
219
+ },
220
+ ],
221
+ reason: null,
222
+ });
223
+ (0, vitest_1.expect)(false, "Should throw an error").toBe(true);
224
+ }
225
+ catch (error) {
226
+ const { response } = error;
227
+ memberId = response.memberId;
228
+ response.memberId = "Any<UUID>";
229
+ (0, vitest_1.expect)(response).toMatchSnapshot();
230
+ }
231
+ });
232
+ (0, vitest_1.it)("should join group", async () => {
233
+ const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.JOIN_GROUP, {
234
+ groupId,
235
+ sessionTimeoutMs: 30000,
236
+ rebalanceTimeoutMs: 60000,
237
+ memberId,
238
+ groupInstanceId: null,
239
+ protocolType: "consumer",
240
+ protocols: [
241
+ {
242
+ name: "RoundRobinAssigner",
243
+ metadata: { version: 0, topics: ["kafka-ts-test-topic"] },
244
+ },
245
+ ],
246
+ reason: null,
247
+ });
248
+ result.memberId = "Any<UUID>";
249
+ result.leader = "Any<UUID>";
250
+ result.members.forEach((member) => {
251
+ member.memberId = "Any<UUID>";
252
+ });
253
+ (0, vitest_1.expect)(result).toMatchSnapshot();
254
+ });
255
+ (0, vitest_1.it)("should sync group", async () => {
256
+ const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.SYNC_GROUP, {
257
+ groupId,
258
+ generationId: 1,
259
+ memberId,
260
+ groupInstanceId: null,
261
+ protocolType: "consumer",
262
+ protocolName: "RoundRobinAssigner",
263
+ assignments: [
264
+ {
265
+ memberId,
266
+ assignment: { "kafka-test-topic": [0] },
267
+ },
268
+ ],
269
+ });
270
+ (0, vitest_1.expect)(result).toMatchSnapshot();
271
+ });
272
+ (0, vitest_1.it)("should commit offsets", async () => {
273
+ const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.OFFSET_COMMIT, {
274
+ groupId,
275
+ generationIdOrMemberEpoch: 1,
276
+ memberId,
277
+ groupInstanceId: null,
278
+ topics: [
279
+ {
280
+ name: "kafka-ts-test-topic",
281
+ partitions: [
282
+ { partitionIndex: 0, committedOffset: 1n, committedLeaderEpoch: 0, committedMetadata: null },
283
+ ],
284
+ },
285
+ ],
286
+ });
287
+ (0, vitest_1.expect)(result).toMatchSnapshot();
288
+ });
289
+ (0, vitest_1.it)("should fetch offsets", async () => {
290
+ const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.OFFSET_FETCH, {
291
+ groups: [
292
+ {
293
+ groupId,
294
+ memberId,
295
+ memberEpoch: 0,
296
+ topics: [
297
+ {
298
+ name: "kafka-ts-test-topic",
299
+ partitionIndexes: [0],
300
+ },
301
+ ],
302
+ },
303
+ ],
304
+ requireStable: false,
305
+ });
306
+ result.groups.forEach((group) => {
307
+ group.groupId = "Any<String>";
308
+ });
309
+ (0, vitest_1.expect)(result).toMatchSnapshot();
310
+ });
311
+ (0, vitest_1.it)("should heartbeat", async () => {
312
+ const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.HEARTBEAT, {
313
+ groupId,
314
+ generationId: 1,
315
+ memberId,
316
+ groupInstanceId: null,
317
+ });
318
+ (0, vitest_1.expect)(result).toMatchSnapshot();
319
+ });
320
+ (0, vitest_1.it)("should leave group", async () => {
321
+ const result = await cluster.sendRequestToNode(coordinatorId)(api_1.API.LEAVE_GROUP, {
322
+ groupId,
323
+ members: [{ memberId, groupInstanceId: null, reason: null }],
324
+ });
325
+ result.members.forEach((member) => {
326
+ member.memberId = "Any<UUID>";
327
+ });
328
+ (0, vitest_1.expect)(result).toMatchSnapshot();
329
+ });
330
+ (0, vitest_1.it)("should delete topics", async () => {
331
+ const result = await cluster.sendRequest(api_1.API.DELETE_TOPICS, {
332
+ topics: [{ name: "kafka-ts-test-topic", topicId: null }],
333
+ timeoutMs: 10000,
334
+ });
335
+ result.responses.forEach((response) => {
336
+ response.topicId = "Any<UUID>";
337
+ });
338
+ (0, vitest_1.expect)(result).toMatchSnapshot();
339
+ });
340
+ });
@@ -0,0 +1,18 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.API_VERSIONS = void 0;
4
+ const api_js_1 = require("../utils/api.js");
5
+ exports.API_VERSIONS = (0, api_js_1.createApi)({
6
+ apiKey: 18,
7
+ apiVersion: 2,
8
+ request: (encoder) => encoder.value(),
9
+ response: (decoder) => ({
10
+ errorCode: decoder.readInt16(),
11
+ versions: decoder.readArray((version) => ({
12
+ apiKey: version.readInt16(),
13
+ minVersion: version.readInt16(),
14
+ maxVersion: version.readInt16(),
15
+ })),
16
+ throttleTimeMs: decoder.readInt32(),
17
+ }),
18
+ });
@@ -0,0 +1,46 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.CREATE_TOPICS = void 0;
4
+ const api_1 = require("../utils/api");
5
+ exports.CREATE_TOPICS = (0, api_1.createApi)({
6
+ apiKey: 19,
7
+ apiVersion: 7,
8
+ request: (encoder, data) => encoder
9
+ .writeUVarInt(0)
10
+ .writeCompactArray(data.topics, (encoder, topic) => encoder
11
+ .writeCompactString(topic.name)
12
+ .writeInt32(topic.numPartitions)
13
+ .writeInt16(topic.replicationFactor)
14
+ .writeCompactArray(topic.assignments, (encoder, assignment) => encoder
15
+ .writeInt32(assignment.partitionIndex)
16
+ .writeCompactArray(assignment.brokerIds, (encoder, brokerId) => encoder.writeInt32(brokerId))
17
+ .writeUVarInt(0))
18
+ .writeCompactArray(topic.configs, (encoder, config) => encoder.writeCompactString(config.name).writeCompactString(config.value).writeUVarInt(0))
19
+ .writeUVarInt(0))
20
+ .writeInt32(data.timeoutMs)
21
+ .writeBoolean(data.validateOnly)
22
+ .writeUVarInt(0)
23
+ .value(),
24
+ response: (decoder) => ({
25
+ _tag: decoder.readTagBuffer(),
26
+ throttleTimeMs: decoder.readInt32(),
27
+ topics: decoder.readCompactArray((topic) => ({
28
+ name: topic.readCompactString(),
29
+ topicId: topic.readUUID(),
30
+ errorCode: topic.readInt16(),
31
+ errorMessage: topic.readCompactString(),
32
+ numPartitions: topic.readInt32(),
33
+ replicationFactor: topic.readInt16(),
34
+ configs: topic.readCompactArray((config) => ({
35
+ name: config.readCompactString(),
36
+ value: config.readCompactString(),
37
+ readOnly: config.readBoolean(),
38
+ configSource: config.readInt8(),
39
+ isSensitive: config.readBoolean(),
40
+ _tag: config.readTagBuffer(),
41
+ })),
42
+ _tag: topic.readTagBuffer(),
43
+ })),
44
+ _tag2: decoder.readTagBuffer(),
45
+ }),
46
+ });
@@ -0,0 +1,26 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.DELETE_TOPICS = void 0;
4
+ const api_1 = require("../utils/api");
5
+ exports.DELETE_TOPICS = (0, api_1.createApi)({
6
+ apiKey: 20,
7
+ apiVersion: 6,
8
+ request: (encoder, data) => encoder
9
+ .writeUVarInt(0)
10
+ .writeCompactArray(data.topics, (encoder, topic) => encoder.writeCompactString(topic.name).writeUUID(topic.topicId).writeUVarInt(0))
11
+ .writeInt32(data.timeoutMs)
12
+ .writeUVarInt(0)
13
+ .value(),
14
+ response: (decoder) => ({
15
+ _tag: decoder.readTagBuffer(),
16
+ throttleTimeMs: decoder.readInt32(),
17
+ responses: decoder.readCompactArray((decoder) => ({
18
+ name: decoder.readCompactString(),
19
+ topicId: decoder.readUUID(),
20
+ errorCode: decoder.readInt16(),
21
+ errorMessage: decoder.readCompactString(),
22
+ _tag: decoder.readTagBuffer(),
23
+ })),
24
+ _tag2: decoder.readTagBuffer(),
25
+ }),
26
+ });