kafka-ts 0.0.2-beta → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/.prettierrc +3 -2
  2. package/README.md +109 -39
  3. package/dist/api/api-versions.d.ts +9 -0
  4. package/dist/api/api-versions.js +24 -0
  5. package/dist/api/create-topics.d.ts +38 -0
  6. package/dist/api/create-topics.js +53 -0
  7. package/dist/api/delete-topics.d.ts +18 -0
  8. package/dist/api/delete-topics.js +33 -0
  9. package/dist/api/fetch.d.ts +84 -0
  10. package/dist/api/fetch.js +142 -0
  11. package/dist/api/find-coordinator.d.ts +21 -0
  12. package/dist/api/find-coordinator.js +39 -0
  13. package/dist/api/heartbeat.d.ts +11 -0
  14. package/dist/api/heartbeat.js +27 -0
  15. package/dist/api/index.d.ts +578 -0
  16. package/dist/api/index.js +165 -0
  17. package/dist/api/init-producer-id.d.ts +13 -0
  18. package/dist/api/init-producer-id.js +29 -0
  19. package/dist/api/join-group.d.ts +34 -0
  20. package/dist/api/join-group.js +51 -0
  21. package/dist/api/leave-group.d.ts +19 -0
  22. package/dist/api/leave-group.js +39 -0
  23. package/dist/api/list-offsets.d.ts +29 -0
  24. package/dist/api/list-offsets.js +48 -0
  25. package/dist/api/metadata.d.ts +40 -0
  26. package/dist/api/metadata.js +58 -0
  27. package/dist/api/offset-commit.d.ts +28 -0
  28. package/dist/api/offset-commit.js +48 -0
  29. package/dist/api/offset-fetch.d.ts +33 -0
  30. package/dist/api/offset-fetch.js +57 -0
  31. package/dist/api/produce.d.ts +54 -0
  32. package/dist/api/produce.js +126 -0
  33. package/dist/api/sasl-authenticate.d.ts +11 -0
  34. package/dist/api/sasl-authenticate.js +23 -0
  35. package/dist/api/sasl-handshake.d.ts +6 -0
  36. package/dist/api/sasl-handshake.js +19 -0
  37. package/dist/api/sync-group.d.ts +24 -0
  38. package/dist/api/sync-group.js +36 -0
  39. package/dist/auth/index.d.ts +2 -0
  40. package/dist/auth/index.js +8 -0
  41. package/dist/auth/plain.d.ts +5 -0
  42. package/dist/auth/plain.js +12 -0
  43. package/dist/auth/scram.d.ts +9 -0
  44. package/dist/auth/scram.js +40 -0
  45. package/dist/broker.d.ts +30 -0
  46. package/dist/broker.js +55 -0
  47. package/dist/client.d.ts +23 -0
  48. package/dist/client.js +36 -0
  49. package/dist/cluster.d.ts +27 -0
  50. package/dist/cluster.js +70 -0
  51. package/dist/cluster.test.d.ts +1 -0
  52. package/dist/cluster.test.js +345 -0
  53. package/dist/codecs/gzip.d.ts +2 -0
  54. package/dist/codecs/gzip.js +8 -0
  55. package/dist/codecs/index.d.ts +2 -0
  56. package/dist/codecs/index.js +17 -0
  57. package/dist/codecs/none.d.ts +2 -0
  58. package/dist/codecs/none.js +7 -0
  59. package/dist/codecs/types.d.ts +5 -0
  60. package/dist/codecs/types.js +2 -0
  61. package/dist/connection.d.ts +26 -0
  62. package/dist/connection.js +175 -0
  63. package/dist/consumer/consumer-group.d.ts +41 -0
  64. package/dist/consumer/consumer-group.js +217 -0
  65. package/dist/consumer/consumer-metadata.d.ts +7 -0
  66. package/dist/consumer/consumer-metadata.js +14 -0
  67. package/dist/consumer/consumer.d.ts +44 -0
  68. package/dist/consumer/consumer.js +225 -0
  69. package/dist/consumer/fetch-manager.d.ts +33 -0
  70. package/dist/consumer/fetch-manager.js +140 -0
  71. package/dist/consumer/fetcher.d.ts +25 -0
  72. package/dist/consumer/fetcher.js +64 -0
  73. package/dist/consumer/offset-manager.d.ts +22 -0
  74. package/dist/consumer/offset-manager.js +66 -0
  75. package/dist/consumer/processor.d.ts +19 -0
  76. package/dist/consumer/processor.js +59 -0
  77. package/dist/distributors/assignments-to-replicas.d.ts +16 -0
  78. package/dist/distributors/assignments-to-replicas.js +59 -0
  79. package/dist/distributors/assignments-to-replicas.test.d.ts +1 -0
  80. package/dist/distributors/assignments-to-replicas.test.js +40 -0
  81. package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
  82. package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
  83. package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
  84. package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
  85. package/dist/distributors/partitioner.d.ts +7 -0
  86. package/dist/distributors/partitioner.js +23 -0
  87. package/dist/index.d.ts +9 -0
  88. package/dist/index.js +26 -0
  89. package/dist/metadata.d.ts +24 -0
  90. package/dist/metadata.js +106 -0
  91. package/dist/producer/producer.d.ts +24 -0
  92. package/dist/producer/producer.js +131 -0
  93. package/dist/types.d.ts +11 -0
  94. package/dist/types.js +2 -0
  95. package/dist/utils/api.d.ts +9 -0
  96. package/dist/utils/api.js +5 -0
  97. package/dist/utils/crypto.d.ts +8 -0
  98. package/dist/utils/crypto.js +18 -0
  99. package/dist/utils/decoder.d.ts +30 -0
  100. package/dist/utils/decoder.js +152 -0
  101. package/dist/utils/delay.d.ts +1 -0
  102. package/dist/utils/delay.js +5 -0
  103. package/dist/utils/encoder.d.ts +28 -0
  104. package/dist/utils/encoder.js +125 -0
  105. package/dist/utils/error.d.ts +11 -0
  106. package/dist/utils/error.js +27 -0
  107. package/dist/utils/logger.d.ts +9 -0
  108. package/dist/utils/logger.js +32 -0
  109. package/dist/utils/memo.d.ts +1 -0
  110. package/dist/utils/memo.js +16 -0
  111. package/dist/utils/murmur2.d.ts +3 -0
  112. package/dist/utils/murmur2.js +40 -0
  113. package/dist/utils/retrier.d.ts +10 -0
  114. package/dist/utils/retrier.js +22 -0
  115. package/dist/utils/tracer.d.ts +5 -0
  116. package/dist/utils/tracer.js +39 -0
  117. package/package.json +30 -19
  118. package/src/__snapshots__/{request-handler.test.ts.snap → cluster.test.ts.snap} +329 -26
  119. package/src/api/api-versions.ts +2 -2
  120. package/src/api/create-topics.ts +2 -2
  121. package/src/api/delete-topics.ts +2 -2
  122. package/src/api/fetch.ts +86 -31
  123. package/src/api/find-coordinator.ts +2 -2
  124. package/src/api/heartbeat.ts +2 -2
  125. package/src/api/index.ts +21 -19
  126. package/src/api/init-producer-id.ts +2 -2
  127. package/src/api/join-group.ts +3 -3
  128. package/src/api/leave-group.ts +2 -2
  129. package/src/api/list-offsets.ts +3 -3
  130. package/src/api/metadata.ts +3 -3
  131. package/src/api/offset-commit.ts +2 -2
  132. package/src/api/offset-fetch.ts +2 -2
  133. package/src/api/produce.ts +17 -20
  134. package/src/api/sasl-authenticate.ts +2 -2
  135. package/src/api/sasl-handshake.ts +2 -2
  136. package/src/api/sync-group.ts +2 -2
  137. package/src/auth/index.ts +2 -0
  138. package/src/auth/plain.ts +10 -0
  139. package/src/auth/scram.ts +52 -0
  140. package/src/broker.ts +12 -14
  141. package/src/client.ts +7 -7
  142. package/src/cluster.test.ts +78 -74
  143. package/src/cluster.ts +43 -45
  144. package/src/codecs/gzip.ts +9 -0
  145. package/src/codecs/index.ts +16 -0
  146. package/src/codecs/none.ts +6 -0
  147. package/src/codecs/types.ts +4 -0
  148. package/src/connection.ts +49 -33
  149. package/src/consumer/consumer-group.ts +57 -35
  150. package/src/consumer/consumer-metadata.ts +2 -2
  151. package/src/consumer/consumer.ts +115 -92
  152. package/src/consumer/fetch-manager.ts +169 -0
  153. package/src/consumer/fetcher.ts +64 -0
  154. package/src/consumer/offset-manager.ts +24 -13
  155. package/src/consumer/processor.ts +53 -0
  156. package/src/distributors/assignments-to-replicas.test.ts +7 -7
  157. package/src/distributors/assignments-to-replicas.ts +2 -4
  158. package/src/distributors/messages-to-topic-partition-leaders.test.ts +6 -6
  159. package/src/distributors/partitioner.ts +27 -0
  160. package/src/index.ts +9 -3
  161. package/src/metadata.ts +8 -4
  162. package/src/producer/producer.ts +30 -20
  163. package/src/types.ts +5 -3
  164. package/src/utils/api.ts +5 -5
  165. package/src/utils/crypto.ts +15 -0
  166. package/src/utils/decoder.ts +14 -8
  167. package/src/utils/encoder.ts +34 -27
  168. package/src/utils/error.ts +3 -3
  169. package/src/utils/logger.ts +37 -0
  170. package/src/utils/murmur2.ts +44 -0
  171. package/src/utils/retrier.ts +1 -1
  172. package/src/utils/tracer.ts +41 -20
  173. package/tsconfig.json +16 -16
  174. package/.github/workflows/release.yml +0 -17
  175. package/certs/ca.crt +0 -29
  176. package/certs/ca.key +0 -52
  177. package/certs/ca.srl +0 -1
  178. package/certs/kafka.crt +0 -29
  179. package/certs/kafka.csr +0 -26
  180. package/certs/kafka.key +0 -52
  181. package/certs/kafka.keystore.jks +0 -0
  182. package/certs/kafka.truststore.jks +0 -0
  183. package/docker-compose.yml +0 -104
  184. package/examples/package-lock.json +0 -31
  185. package/examples/package.json +0 -14
  186. package/examples/src/client.ts +0 -9
  187. package/examples/src/consumer.ts +0 -17
  188. package/examples/src/create-topic.ts +0 -37
  189. package/examples/src/producer.ts +0 -24
  190. package/examples/src/replicator.ts +0 -25
  191. package/examples/src/utils/json.ts +0 -1
  192. package/examples/tsconfig.json +0 -7
  193. package/log4j.properties +0 -95
  194. package/scripts/generate-certs.sh +0 -24
  195. package/src/utils/debug.ts +0 -9
@@ -1,35 +1,37 @@
1
- import { randomBytes } from "crypto";
2
- import { readFileSync } from "fs";
3
- import { afterAll, beforeAll, describe, expect, it } from "vitest";
4
- import { API } from "./api";
5
- import { KEY_TYPE } from "./api/find-coordinator";
6
- import { createKafkaClient } from "./client";
7
- import { Cluster } from "./cluster";
8
- import { KafkaTSApiError } from "./utils/error";
1
+ import { randomBytes } from 'crypto';
2
+ import { readFileSync } from 'fs';
3
+ import { afterAll, beforeAll, describe, expect, it } from 'vitest';
4
+ import { API } from './api';
5
+ import { KEY_TYPE } from './api/find-coordinator';
6
+ import { saslPlain } from './auth';
7
+ import { createKafkaClient } from './client';
8
+ import { Cluster } from './cluster';
9
+ import { KafkaTSApiError } from './utils/error';
9
10
 
10
- export const kafka = createKafkaClient({
11
- clientId: "kafka-ts",
12
- bootstrapServers: [{ host: "localhost", port: 9092 }],
13
- sasl: { mechanism: "PLAIN", username: "admin", password: "admin" },
14
- ssl: { ca: readFileSync("./certs/ca.crt").toString() },
11
+ const kafka = createKafkaClient({
12
+ clientId: 'kafka-ts',
13
+ bootstrapServers: [{ host: 'localhost', port: 9092 }],
14
+ sasl: saslPlain({ username: 'admin', password: 'admin' }),
15
+ ssl: { ca: readFileSync('./certs/ca.crt').toString() },
15
16
  });
16
17
 
17
- describe.sequential("Request handler", () => {
18
- const groupId = randomBytes(16).toString("hex");
18
+ describe.sequential('Low-level API', () => {
19
+ const groupId = randomBytes(16).toString('hex');
19
20
 
20
21
  let cluster: Cluster;
21
22
 
22
23
  beforeAll(async () => {
23
- cluster = await kafka.createCluster().connect();
24
+ cluster = await kafka.createCluster();
25
+ await cluster.connect();
24
26
 
25
27
  const metadataResult = await cluster.sendRequest(API.METADATA, {
26
28
  topics: null,
27
29
  allowTopicAutoCreation: false,
28
30
  includeTopicAuthorizedOperations: false,
29
31
  });
30
- if (metadataResult.topics.some((topic) => topic.name === "kafka-ts-test-topic")) {
32
+ if (metadataResult.topics.some((topic) => topic.name === 'kafka-ts-test-topic')) {
31
33
  await cluster.sendRequest(API.DELETE_TOPICS, {
32
- topics: [{ name: "kafka-ts-test-topic", topicId: null }],
34
+ topics: [{ name: 'kafka-ts-test-topic', topicId: null }],
33
35
  timeoutMs: 10000,
34
36
  });
35
37
  }
@@ -39,20 +41,20 @@ describe.sequential("Request handler", () => {
39
41
  await cluster.disconnect();
40
42
  });
41
43
 
42
- it("should request api versions", async () => {
44
+ it('should request api versions', async () => {
43
45
  const result = await cluster.sendRequest(API.API_VERSIONS, {});
44
46
  expect(result).toMatchSnapshot();
45
47
  });
46
48
 
47
- let topicId: string = "d6718d178e1b47c886441ad2d19faea5";
49
+ let topicId: string = 'd6718d178e1b47c886441ad2d19faea5';
48
50
 
49
- it("should create topics", async () => {
51
+ it('should create topics', async () => {
50
52
  const result = await cluster.sendRequest(API.CREATE_TOPICS, {
51
53
  topics: [
52
54
  {
53
- name: "kafka-ts-test-topic",
54
- numPartitions: 1,
55
- replicationFactor: 1,
55
+ name: 'kafka-ts-test-topic',
56
+ numPartitions: 10,
57
+ replicationFactor: 3,
56
58
  assignments: [],
57
59
  configs: [],
58
60
  },
@@ -62,23 +64,23 @@ describe.sequential("Request handler", () => {
62
64
  });
63
65
  topicId = result.topics[0].topicId;
64
66
  result.topics.forEach((topic) => {
65
- topic.topicId = "Any<UUID>";
67
+ topic.topicId = 'Any<UUID>';
66
68
  });
67
69
  expect(result).toMatchSnapshot();
68
70
 
69
71
  await new Promise((resolve) => setTimeout(resolve, 1000));
70
72
  });
71
73
 
72
- it("should request metadata for all topics", async () => {
74
+ it('should request metadata for all topics', async () => {
73
75
  const result = await cluster.sendRequest(API.METADATA, {
74
76
  topics: null,
75
77
  allowTopicAutoCreation: false,
76
78
  includeTopicAuthorizedOperations: false,
77
79
  });
78
80
  result.controllerId = 0;
79
- result.topics = result.topics.filter((topic) => topic.name !== "__consumer_offsets");
81
+ result.topics = result.topics.filter((topic) => topic.name !== '__consumer_offsets');
80
82
  result.topics.forEach((topic) => {
81
- topic.topicId = "Any<UUID>";
83
+ topic.topicId = 'Any<UUID>';
82
84
  topic.partitions.forEach((partition) => {
83
85
  partition.leaderId = 0;
84
86
  partition.isrNodes = [0];
@@ -88,18 +90,20 @@ describe.sequential("Request handler", () => {
88
90
  expect(result).toMatchSnapshot();
89
91
  });
90
92
 
93
+ let partitionIndex = 0;
91
94
  let leaderId = 0;
92
95
 
93
- it("should request metadata for a topic", async () => {
96
+ it('should request metadata for a topic', async () => {
94
97
  const result = await cluster.sendRequest(API.METADATA, {
95
- topics: [{ id: topicId, name: "kafka-ts-test-topic" }],
98
+ topics: [{ id: topicId, name: 'kafka-ts-test-topic' }],
96
99
  allowTopicAutoCreation: false,
97
100
  includeTopicAuthorizedOperations: false,
98
101
  });
102
+ partitionIndex = result.topics[0].partitions[0].partitionIndex;
99
103
  leaderId = result.topics[0].partitions[0].leaderId;
100
104
  result.controllerId = 0;
101
105
  result.topics.forEach((topic) => {
102
- topic.topicId = "Any<UUID>";
106
+ topic.topicId = 'Any<UUID>';
103
107
  topic.partitions.forEach((partition) => {
104
108
  partition.leaderId = 0;
105
109
  partition.isrNodes = [0];
@@ -111,7 +115,7 @@ describe.sequential("Request handler", () => {
111
115
 
112
116
  let producerId = 9n;
113
117
 
114
- it("should init producer id", async () => {
118
+ it('should init producer id', async () => {
115
119
  const result = await cluster.sendRequest(API.INIT_PRODUCER_ID, {
116
120
  transactionalId: null,
117
121
  transactionTimeoutMs: 0,
@@ -122,7 +126,7 @@ describe.sequential("Request handler", () => {
122
126
  expect(result).toMatchSnapshot();
123
127
  });
124
128
 
125
- it("should produce messages", async () => {
129
+ it('should produce messages', async () => {
126
130
  const now = Date.now();
127
131
  const result = await cluster.sendRequestToNode(leaderId)(API.PRODUCE, {
128
132
  transactionalId: null,
@@ -130,10 +134,10 @@ describe.sequential("Request handler", () => {
130
134
  acks: 1,
131
135
  topicData: [
132
136
  {
133
- name: "kafka-ts-test-topic",
137
+ name: 'kafka-ts-test-topic',
134
138
  partitionData: [
135
139
  {
136
- index: 0,
140
+ index: partitionIndex,
137
141
  baseOffset: 0n,
138
142
  partitionLeaderEpoch: 0,
139
143
  attributes: 0,
@@ -148,12 +152,12 @@ describe.sequential("Request handler", () => {
148
152
  attributes: 0,
149
153
  offsetDelta: 0,
150
154
  timestampDelta: 0n,
151
- key: "key",
152
- value: "value",
155
+ key: Buffer.from('key'),
156
+ value: Buffer.from('value'),
153
157
  headers: [
154
158
  {
155
- key: "header-key",
156
- value: "header-value",
159
+ key: Buffer.from('header-key'),
160
+ value: Buffer.from('header-value'),
157
161
  },
158
162
  ],
159
163
  },
@@ -166,7 +170,7 @@ describe.sequential("Request handler", () => {
166
170
  expect(result).toMatchSnapshot();
167
171
  });
168
172
 
169
- it("should fetch messages", async () => {
173
+ it('should fetch messages', async () => {
170
174
  const result = await cluster.sendRequestToNode(leaderId)(API.FETCH, {
171
175
  maxWaitMs: 100,
172
176
  minBytes: 1,
@@ -179,7 +183,7 @@ describe.sequential("Request handler", () => {
179
183
  topicId,
180
184
  partitions: [
181
185
  {
182
- partition: 0,
186
+ partition: partitionIndex,
183
187
  currentLeaderEpoch: -1,
184
188
  fetchOffset: 0n,
185
189
  lastFetchedEpoch: 0,
@@ -190,10 +194,10 @@ describe.sequential("Request handler", () => {
190
194
  },
191
195
  ],
192
196
  forgottenTopicsData: [],
193
- rackId: "",
197
+ rackId: '',
194
198
  });
195
199
  result.responses.forEach((response) => {
196
- response.topicId = "Any<UUID>";
200
+ response.topicId = 'Any<UUID>';
197
201
  response.partitions.forEach((partition) => {
198
202
  partition.records.forEach((record) => {
199
203
  expect(record.baseTimestamp).toBeGreaterThan(1721926744730n);
@@ -211,10 +215,10 @@ describe.sequential("Request handler", () => {
211
215
 
212
216
  let coordinatorId = -1;
213
217
 
214
- it("should find coordinator", async () => {
218
+ it('should find coordinator', async () => {
215
219
  const result = await cluster.sendRequest(API.FIND_COORDINATOR, { keyType: KEY_TYPE.GROUP, keys: [groupId] });
216
220
  result.coordinators.forEach((coordinator) => {
217
- coordinator.key = "Any<String>";
221
+ coordinator.key = 'Any<String>';
218
222
  });
219
223
  coordinatorId = result.coordinators[0].nodeId;
220
224
  result.coordinators.forEach((coordinator) => {
@@ -224,9 +228,9 @@ describe.sequential("Request handler", () => {
224
228
  expect(result).toMatchSnapshot();
225
229
  });
226
230
 
227
- let memberId = "";
231
+ let memberId = '';
228
232
 
229
- it("should fail join group request with new memberId", async () => {
233
+ it('should fail join group request with new memberId', async () => {
230
234
  try {
231
235
  const result = await cluster.sendRequestToNode(coordinatorId)(API.JOIN_GROUP, {
232
236
  groupId,
@@ -234,67 +238,67 @@ describe.sequential("Request handler", () => {
234
238
  rebalanceTimeoutMs: 60000,
235
239
  memberId,
236
240
  groupInstanceId: null,
237
- protocolType: "consumer",
241
+ protocolType: 'consumer',
238
242
  protocols: [
239
243
  {
240
- name: "RoundRobinAssigner",
241
- metadata: { version: 0, topics: ["kafka-ts-test-topic"] },
244
+ name: 'RoundRobinAssigner',
245
+ metadata: { version: 0, topics: ['kafka-ts-test-topic'] },
242
246
  },
243
247
  ],
244
248
  reason: null,
245
249
  });
246
- expect(false, "Should throw an error").toBe(true);
250
+ expect(false, 'Should throw an error').toBe(true);
247
251
  } catch (error) {
248
252
  const { response } = error as KafkaTSApiError;
249
253
  memberId = response.memberId;
250
- response.memberId = "Any<UUID>";
254
+ response.memberId = 'Any<UUID>';
251
255
  expect(response).toMatchSnapshot();
252
256
  }
253
257
  });
254
258
 
255
- it("should join group", async () => {
259
+ it('should join group', async () => {
256
260
  const result = await cluster.sendRequestToNode(coordinatorId)(API.JOIN_GROUP, {
257
261
  groupId,
258
262
  sessionTimeoutMs: 30000,
259
263
  rebalanceTimeoutMs: 60000,
260
264
  memberId,
261
265
  groupInstanceId: null,
262
- protocolType: "consumer",
266
+ protocolType: 'consumer',
263
267
  protocols: [
264
268
  {
265
- name: "RoundRobinAssigner",
266
- metadata: { version: 0, topics: ["kafka-ts-test-topic"] },
269
+ name: 'RoundRobinAssigner',
270
+ metadata: { version: 0, topics: ['kafka-ts-test-topic'] },
267
271
  },
268
272
  ],
269
273
  reason: null,
270
274
  });
271
- result.memberId = "Any<UUID>";
272
- result.leader = "Any<UUID>";
275
+ result.memberId = 'Any<UUID>';
276
+ result.leader = 'Any<UUID>';
273
277
  result.members.forEach((member) => {
274
- member.memberId = "Any<UUID>";
278
+ member.memberId = 'Any<UUID>';
275
279
  });
276
280
  expect(result).toMatchSnapshot();
277
281
  });
278
282
 
279
- it("should sync group", async () => {
283
+ it('should sync group', async () => {
280
284
  const result = await cluster.sendRequestToNode(coordinatorId)(API.SYNC_GROUP, {
281
285
  groupId,
282
286
  generationId: 1,
283
287
  memberId,
284
288
  groupInstanceId: null,
285
- protocolType: "consumer",
286
- protocolName: "RoundRobinAssigner",
289
+ protocolType: 'consumer',
290
+ protocolName: 'RoundRobinAssigner',
287
291
  assignments: [
288
292
  {
289
293
  memberId,
290
- assignment: { "kafka-test-topic": [0] },
294
+ assignment: { 'kafka-test-topic': [0] },
291
295
  },
292
296
  ],
293
297
  });
294
298
  expect(result).toMatchSnapshot();
295
299
  });
296
300
 
297
- it("should commit offsets", async () => {
301
+ it('should commit offsets', async () => {
298
302
  const result = await cluster.sendRequestToNode(coordinatorId)(API.OFFSET_COMMIT, {
299
303
  groupId,
300
304
  generationIdOrMemberEpoch: 1,
@@ -302,7 +306,7 @@ describe.sequential("Request handler", () => {
302
306
  groupInstanceId: null,
303
307
  topics: [
304
308
  {
305
- name: "kafka-ts-test-topic",
309
+ name: 'kafka-ts-test-topic',
306
310
  partitions: [
307
311
  { partitionIndex: 0, committedOffset: 1n, committedLeaderEpoch: 0, committedMetadata: null },
308
312
  ],
@@ -312,7 +316,7 @@ describe.sequential("Request handler", () => {
312
316
  expect(result).toMatchSnapshot();
313
317
  });
314
318
 
315
- it("should fetch offsets", async () => {
319
+ it('should fetch offsets', async () => {
316
320
  const result = await cluster.sendRequestToNode(coordinatorId)(API.OFFSET_FETCH, {
317
321
  groups: [
318
322
  {
@@ -321,7 +325,7 @@ describe.sequential("Request handler", () => {
321
325
  memberEpoch: 0,
322
326
  topics: [
323
327
  {
324
- name: "kafka-ts-test-topic",
328
+ name: 'kafka-ts-test-topic',
325
329
  partitionIndexes: [0],
326
330
  },
327
331
  ],
@@ -330,12 +334,12 @@ describe.sequential("Request handler", () => {
330
334
  requireStable: false,
331
335
  });
332
336
  result.groups.forEach((group) => {
333
- group.groupId = "Any<String>";
337
+ group.groupId = 'Any<String>';
334
338
  });
335
339
  expect(result).toMatchSnapshot();
336
340
  });
337
341
 
338
- it("should heartbeat", async () => {
342
+ it('should heartbeat', async () => {
339
343
  const result = await cluster.sendRequestToNode(coordinatorId)(API.HEARTBEAT, {
340
344
  groupId,
341
345
  generationId: 1,
@@ -345,24 +349,24 @@ describe.sequential("Request handler", () => {
345
349
  expect(result).toMatchSnapshot();
346
350
  });
347
351
 
348
- it("should leave group", async () => {
352
+ it('should leave group', async () => {
349
353
  const result = await cluster.sendRequestToNode(coordinatorId)(API.LEAVE_GROUP, {
350
354
  groupId,
351
355
  members: [{ memberId, groupInstanceId: null, reason: null }],
352
356
  });
353
357
  result.members.forEach((member) => {
354
- member.memberId = "Any<UUID>";
358
+ member.memberId = 'Any<UUID>';
355
359
  });
356
360
  expect(result).toMatchSnapshot();
357
361
  });
358
362
 
359
- it("should delete topics", async () => {
363
+ it('should delete topics', async () => {
360
364
  const result = await cluster.sendRequest(API.DELETE_TOPICS, {
361
- topics: [{ name: "kafka-ts-test-topic", topicId: null }],
365
+ topics: [{ name: 'kafka-ts-test-topic', topicId: null }],
362
366
  timeoutMs: 10000,
363
367
  });
364
368
  result.responses.forEach((response) => {
365
- response.topicId = "Any<UUID>";
369
+ response.topicId = 'Any<UUID>';
366
370
  });
367
371
  expect(result).toMatchSnapshot();
368
372
  });
package/src/cluster.ts CHANGED
@@ -1,87 +1,85 @@
1
- import { TcpSocketConnectOpts } from "net";
2
- import { TLSSocketOptions } from "tls";
3
- import { API } from "./api";
4
- import { Broker, SASLOptions } from "./broker";
5
- import { SendRequest } from "./connection";
6
- import { ConnectionError, KafkaTSError } from "./utils/error";
1
+ import { TcpSocketConnectOpts } from 'net';
2
+ import { TLSSocketOptions } from 'tls';
3
+ import { API } from './api';
4
+ import { Metadata } from './api/metadata';
5
+ import { Broker, SASLProvider } from './broker';
6
+ import { SendRequest } from './connection';
7
+ import { KafkaTSError } from './utils/error';
8
+ import { log } from './utils/logger';
7
9
 
8
10
  type ClusterOptions = {
9
11
  clientId: string | null;
10
12
  bootstrapServers: TcpSocketConnectOpts[];
11
- sasl: SASLOptions | null;
13
+ sasl: SASLProvider | null;
12
14
  ssl: TLSSocketOptions | null;
13
15
  };
14
16
 
15
17
  export class Cluster {
16
- private seedBroker: Broker;
18
+ private seedBroker = new Broker({ clientId: null, sasl: null, ssl: null, options: { port: 9092 } });
17
19
  private brokerById: Record<number, Broker> = {};
20
+ private brokerMetadata: Record<number, Metadata['brokers'][number]> = {};
18
21
 
19
- constructor(private options: ClusterOptions) {
20
- this.seedBroker = new Broker({
21
- clientId: this.options.clientId,
22
- sasl: this.options.sasl,
23
- ssl: this.options.ssl,
24
- options: this.options.bootstrapServers[0],
25
- });
26
- }
22
+ constructor(private options: ClusterOptions) {}
27
23
 
28
24
  public async connect() {
29
- await this.connectSeedBroker();
25
+ this.seedBroker = await this.findSeedBroker();
26
+ this.brokerById = {};
27
+
30
28
  const metadata = await this.sendRequest(API.METADATA, {
31
29
  allowTopicAutoCreation: false,
32
30
  includeTopicAuthorizedOperations: false,
33
31
  topics: [],
34
32
  });
35
-
36
- this.brokerById = Object.fromEntries(
37
- metadata.brokers.map(({ nodeId, ...options }) => [
38
- nodeId,
39
- new Broker({
40
- clientId: this.options.clientId,
41
- sasl: this.options.sasl,
42
- ssl: this.options.ssl,
43
- options,
44
- }),
45
- ]),
46
- );
47
- return this;
33
+ this.brokerMetadata = Object.fromEntries(metadata.brokers.map((options) => [options.nodeId, options]));
48
34
  }
49
35
 
50
36
  public async disconnect() {
51
- await Promise.all([
52
- this.seedBroker.disconnect(),
53
- ...Object.values(this.brokerById).map((broker) => broker.disconnect()),
54
- ]);
37
+ await Promise.all([this.seedBroker.disconnect(), ...Object.values(this.brokerById).map((x) => x.disconnect())]);
55
38
  }
56
39
 
40
+ public setSeedBroker = async (nodeId: number) => {
41
+ await this.seedBroker.disconnect();
42
+ this.seedBroker = await this.acquireBroker(nodeId);
43
+ };
44
+
57
45
  public sendRequest: SendRequest = (...args) => this.seedBroker.sendRequest(...args);
58
46
 
59
47
  public sendRequestToNode =
60
48
  (nodeId: number): SendRequest =>
61
49
  async (...args) => {
62
- const broker = this.brokerById[nodeId];
63
- if (!broker) {
64
- throw new ConnectionError(`Broker ${nodeId} is not available`);
50
+ if (!this.brokerById[nodeId]) {
51
+ this.brokerById[nodeId] = await this.acquireBroker(nodeId);
65
52
  }
66
- await broker.ensureConnected();
67
- return broker.sendRequest(...args);
53
+ return this.brokerById[nodeId].sendRequest(...args);
68
54
  };
69
55
 
70
- private async connectSeedBroker() {
56
+ public async acquireBroker(nodeId: number) {
57
+ const broker = new Broker({
58
+ clientId: this.options.clientId,
59
+ sasl: this.options.sasl,
60
+ ssl: this.options.ssl,
61
+ options: this.brokerMetadata[nodeId],
62
+ });
63
+ await broker.connect();
64
+ return broker;
65
+ }
66
+
67
+ private async findSeedBroker() {
71
68
  const randomizedBrokers = this.options.bootstrapServers.toSorted(() => Math.random() - 0.5);
72
69
  for (const options of randomizedBrokers) {
73
70
  try {
74
- this.seedBroker = await new Broker({
71
+ const broker = await new Broker({
75
72
  clientId: this.options.clientId,
76
73
  sasl: this.options.sasl,
77
74
  ssl: this.options.ssl,
78
75
  options,
79
- }).connect();
80
- return;
76
+ });
77
+ await broker.connect();
78
+ return broker;
81
79
  } catch (error) {
82
- console.warn(`Failed to connect to seed broker ${options.host}:${options.port}`, error);
80
+ log.warn(`Failed to connect to seed broker ${options.host}:${options.port}`, error);
83
81
  }
84
82
  }
85
- throw new KafkaTSError("No seed brokers found");
83
+ throw new KafkaTSError('No seed brokers found');
86
84
  }
87
85
  }
@@ -0,0 +1,9 @@
1
+ import { gzip, unzip } from 'zlib';
2
+ import { Codec } from './types';
3
+
4
+ export const GZIP: Codec = {
5
+ compress: async (data) =>
6
+ new Promise<Buffer>((resolve, reject) => gzip(data, (err, result) => (err ? reject(err) : resolve(result)))),
7
+ decompress: async (data) =>
8
+ new Promise<Buffer>((resolve, reject) => unzip(data, (err, result) => (err ? reject(err) : resolve(result)))),
9
+ };
@@ -0,0 +1,16 @@
1
+ import { GZIP } from './gzip';
2
+ import { NONE } from './none';
3
+ import { Codec } from './types';
4
+
5
+ const codecs: Record<number, Codec> = {
6
+ 0: NONE,
7
+ 1: GZIP,
8
+ };
9
+
10
+ export const findCodec = (type: number) => {
11
+ const codec = codecs[type];
12
+ if (!codec) {
13
+ throw new Error(`Unsupported codec: ${type}`);
14
+ }
15
+ return codec;
16
+ };
@@ -0,0 +1,6 @@
1
+ import { Codec } from './types';
2
+
3
+ export const NONE: Codec = {
4
+ compress: (data: Buffer) => Promise.resolve(data),
5
+ decompress: (data: Buffer) => Promise.resolve(data),
6
+ };
@@ -0,0 +1,4 @@
1
+ export type Codec = {
2
+ compress: (data: Buffer) => Promise<Buffer>;
3
+ decompress: (data: Buffer) => Promise<Buffer>;
4
+ };