kafka-ts 0.0.2-beta → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/.prettierrc +3 -2
  2. package/README.md +109 -39
  3. package/dist/api/api-versions.d.ts +9 -0
  4. package/dist/api/api-versions.js +24 -0
  5. package/dist/api/create-topics.d.ts +38 -0
  6. package/dist/api/create-topics.js +53 -0
  7. package/dist/api/delete-topics.d.ts +18 -0
  8. package/dist/api/delete-topics.js +33 -0
  9. package/dist/api/fetch.d.ts +84 -0
  10. package/dist/api/fetch.js +142 -0
  11. package/dist/api/find-coordinator.d.ts +21 -0
  12. package/dist/api/find-coordinator.js +39 -0
  13. package/dist/api/heartbeat.d.ts +11 -0
  14. package/dist/api/heartbeat.js +27 -0
  15. package/dist/api/index.d.ts +578 -0
  16. package/dist/api/index.js +165 -0
  17. package/dist/api/init-producer-id.d.ts +13 -0
  18. package/dist/api/init-producer-id.js +29 -0
  19. package/dist/api/join-group.d.ts +34 -0
  20. package/dist/api/join-group.js +51 -0
  21. package/dist/api/leave-group.d.ts +19 -0
  22. package/dist/api/leave-group.js +39 -0
  23. package/dist/api/list-offsets.d.ts +29 -0
  24. package/dist/api/list-offsets.js +48 -0
  25. package/dist/api/metadata.d.ts +40 -0
  26. package/dist/api/metadata.js +58 -0
  27. package/dist/api/offset-commit.d.ts +28 -0
  28. package/dist/api/offset-commit.js +48 -0
  29. package/dist/api/offset-fetch.d.ts +33 -0
  30. package/dist/api/offset-fetch.js +57 -0
  31. package/dist/api/produce.d.ts +54 -0
  32. package/dist/api/produce.js +126 -0
  33. package/dist/api/sasl-authenticate.d.ts +11 -0
  34. package/dist/api/sasl-authenticate.js +23 -0
  35. package/dist/api/sasl-handshake.d.ts +6 -0
  36. package/dist/api/sasl-handshake.js +19 -0
  37. package/dist/api/sync-group.d.ts +24 -0
  38. package/dist/api/sync-group.js +36 -0
  39. package/dist/auth/index.d.ts +2 -0
  40. package/dist/auth/index.js +8 -0
  41. package/dist/auth/plain.d.ts +5 -0
  42. package/dist/auth/plain.js +12 -0
  43. package/dist/auth/scram.d.ts +9 -0
  44. package/dist/auth/scram.js +40 -0
  45. package/dist/broker.d.ts +30 -0
  46. package/dist/broker.js +55 -0
  47. package/dist/client.d.ts +23 -0
  48. package/dist/client.js +36 -0
  49. package/dist/cluster.d.ts +27 -0
  50. package/dist/cluster.js +70 -0
  51. package/dist/cluster.test.d.ts +1 -0
  52. package/dist/cluster.test.js +345 -0
  53. package/dist/codecs/gzip.d.ts +2 -0
  54. package/dist/codecs/gzip.js +8 -0
  55. package/dist/codecs/index.d.ts +2 -0
  56. package/dist/codecs/index.js +17 -0
  57. package/dist/codecs/none.d.ts +2 -0
  58. package/dist/codecs/none.js +7 -0
  59. package/dist/codecs/types.d.ts +5 -0
  60. package/dist/codecs/types.js +2 -0
  61. package/dist/connection.d.ts +26 -0
  62. package/dist/connection.js +175 -0
  63. package/dist/consumer/consumer-group.d.ts +41 -0
  64. package/dist/consumer/consumer-group.js +217 -0
  65. package/dist/consumer/consumer-metadata.d.ts +7 -0
  66. package/dist/consumer/consumer-metadata.js +14 -0
  67. package/dist/consumer/consumer.d.ts +44 -0
  68. package/dist/consumer/consumer.js +225 -0
  69. package/dist/consumer/fetch-manager.d.ts +33 -0
  70. package/dist/consumer/fetch-manager.js +140 -0
  71. package/dist/consumer/fetcher.d.ts +25 -0
  72. package/dist/consumer/fetcher.js +64 -0
  73. package/dist/consumer/offset-manager.d.ts +22 -0
  74. package/dist/consumer/offset-manager.js +66 -0
  75. package/dist/consumer/processor.d.ts +19 -0
  76. package/dist/consumer/processor.js +59 -0
  77. package/dist/distributors/assignments-to-replicas.d.ts +16 -0
  78. package/dist/distributors/assignments-to-replicas.js +59 -0
  79. package/dist/distributors/assignments-to-replicas.test.d.ts +1 -0
  80. package/dist/distributors/assignments-to-replicas.test.js +40 -0
  81. package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
  82. package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
  83. package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
  84. package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
  85. package/dist/distributors/partitioner.d.ts +7 -0
  86. package/dist/distributors/partitioner.js +23 -0
  87. package/dist/index.d.ts +9 -0
  88. package/dist/index.js +26 -0
  89. package/dist/metadata.d.ts +24 -0
  90. package/dist/metadata.js +106 -0
  91. package/dist/producer/producer.d.ts +24 -0
  92. package/dist/producer/producer.js +131 -0
  93. package/dist/types.d.ts +11 -0
  94. package/dist/types.js +2 -0
  95. package/dist/utils/api.d.ts +9 -0
  96. package/dist/utils/api.js +5 -0
  97. package/dist/utils/crypto.d.ts +8 -0
  98. package/dist/utils/crypto.js +18 -0
  99. package/dist/utils/decoder.d.ts +30 -0
  100. package/dist/utils/decoder.js +152 -0
  101. package/dist/utils/delay.d.ts +1 -0
  102. package/dist/utils/delay.js +5 -0
  103. package/dist/utils/encoder.d.ts +28 -0
  104. package/dist/utils/encoder.js +125 -0
  105. package/dist/utils/error.d.ts +11 -0
  106. package/dist/utils/error.js +27 -0
  107. package/dist/utils/logger.d.ts +9 -0
  108. package/dist/utils/logger.js +32 -0
  109. package/dist/utils/memo.d.ts +1 -0
  110. package/dist/utils/memo.js +16 -0
  111. package/dist/utils/murmur2.d.ts +3 -0
  112. package/dist/utils/murmur2.js +40 -0
  113. package/dist/utils/retrier.d.ts +10 -0
  114. package/dist/utils/retrier.js +22 -0
  115. package/dist/utils/tracer.d.ts +5 -0
  116. package/dist/utils/tracer.js +39 -0
  117. package/package.json +30 -19
  118. package/src/__snapshots__/{request-handler.test.ts.snap → cluster.test.ts.snap} +329 -26
  119. package/src/api/api-versions.ts +2 -2
  120. package/src/api/create-topics.ts +2 -2
  121. package/src/api/delete-topics.ts +2 -2
  122. package/src/api/fetch.ts +86 -31
  123. package/src/api/find-coordinator.ts +2 -2
  124. package/src/api/heartbeat.ts +2 -2
  125. package/src/api/index.ts +21 -19
  126. package/src/api/init-producer-id.ts +2 -2
  127. package/src/api/join-group.ts +3 -3
  128. package/src/api/leave-group.ts +2 -2
  129. package/src/api/list-offsets.ts +3 -3
  130. package/src/api/metadata.ts +3 -3
  131. package/src/api/offset-commit.ts +2 -2
  132. package/src/api/offset-fetch.ts +2 -2
  133. package/src/api/produce.ts +17 -20
  134. package/src/api/sasl-authenticate.ts +2 -2
  135. package/src/api/sasl-handshake.ts +2 -2
  136. package/src/api/sync-group.ts +2 -2
  137. package/src/auth/index.ts +2 -0
  138. package/src/auth/plain.ts +10 -0
  139. package/src/auth/scram.ts +52 -0
  140. package/src/broker.ts +12 -14
  141. package/src/client.ts +7 -7
  142. package/src/cluster.test.ts +78 -74
  143. package/src/cluster.ts +43 -45
  144. package/src/codecs/gzip.ts +9 -0
  145. package/src/codecs/index.ts +16 -0
  146. package/src/codecs/none.ts +6 -0
  147. package/src/codecs/types.ts +4 -0
  148. package/src/connection.ts +49 -33
  149. package/src/consumer/consumer-group.ts +57 -35
  150. package/src/consumer/consumer-metadata.ts +2 -2
  151. package/src/consumer/consumer.ts +115 -92
  152. package/src/consumer/fetch-manager.ts +169 -0
  153. package/src/consumer/fetcher.ts +64 -0
  154. package/src/consumer/offset-manager.ts +24 -13
  155. package/src/consumer/processor.ts +53 -0
  156. package/src/distributors/assignments-to-replicas.test.ts +7 -7
  157. package/src/distributors/assignments-to-replicas.ts +2 -4
  158. package/src/distributors/messages-to-topic-partition-leaders.test.ts +6 -6
  159. package/src/distributors/partitioner.ts +27 -0
  160. package/src/index.ts +9 -3
  161. package/src/metadata.ts +8 -4
  162. package/src/producer/producer.ts +30 -20
  163. package/src/types.ts +5 -3
  164. package/src/utils/api.ts +5 -5
  165. package/src/utils/crypto.ts +15 -0
  166. package/src/utils/decoder.ts +14 -8
  167. package/src/utils/encoder.ts +34 -27
  168. package/src/utils/error.ts +3 -3
  169. package/src/utils/logger.ts +37 -0
  170. package/src/utils/murmur2.ts +44 -0
  171. package/src/utils/retrier.ts +1 -1
  172. package/src/utils/tracer.ts +41 -20
  173. package/tsconfig.json +16 -16
  174. package/.github/workflows/release.yml +0 -17
  175. package/certs/ca.crt +0 -29
  176. package/certs/ca.key +0 -52
  177. package/certs/ca.srl +0 -1
  178. package/certs/kafka.crt +0 -29
  179. package/certs/kafka.csr +0 -26
  180. package/certs/kafka.key +0 -52
  181. package/certs/kafka.keystore.jks +0 -0
  182. package/certs/kafka.truststore.jks +0 -0
  183. package/docker-compose.yml +0 -104
  184. package/examples/package-lock.json +0 -31
  185. package/examples/package.json +0 -14
  186. package/examples/src/client.ts +0 -9
  187. package/examples/src/consumer.ts +0 -17
  188. package/examples/src/create-topic.ts +0 -37
  189. package/examples/src/producer.ts +0 -24
  190. package/examples/src/replicator.ts +0 -25
  191. package/examples/src/utils/json.ts +0 -1
  192. package/examples/tsconfig.json +0 -7
  193. package/log4j.properties +0 -95
  194. package/scripts/generate-certs.sh +0 -24
  195. package/src/utils/debug.ts +0 -9
package/.prettierrc CHANGED
@@ -3,5 +3,6 @@
3
3
  "tabWidth": 4,
4
4
  "endOfLine": "lf",
5
5
  "semi": true,
6
- "trailingComma": "all"
7
- }
6
+ "trailingComma": "all",
7
+ "singleQuote": true
8
+ }
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  **KafkaTS** is a Apache Kafka client library for Node.js. It provides both a low-level API for communicating directly with the Apache Kafka cluster and high-level APIs for publishing and subscribing to Kafka topics.
4
4
 
5
- **Please note that this project is still in early development and is not yet ready for production use. The interface before stable release is subject to change.**
5
+ Supported Kafka versions: 3.7.0 and later
6
6
 
7
7
  ## Installation
8
8
 
@@ -16,8 +16,8 @@ npm install kafka-ts
16
16
 
17
17
  ```typescript
18
18
  export const kafka = createKafkaClient({
19
- clientId: "my-app",
20
- bootstrapServers: [{ host: "localhost", port: 9092 }],
19
+ clientId: 'my-app',
20
+ bootstrapServers: [{ host: 'localhost', port: 9092 }],
21
21
  });
22
22
  ```
23
23
 
@@ -26,7 +26,7 @@ export const kafka = createKafkaClient({
26
26
  ```typescript
27
27
  const consumer = await kafka.startConsumer({
28
28
  groupId: 'my-consumer-group'.
29
- topics: ["my-topic"],
29
+ topics: ['my-topic'],
30
30
  onMessage: (message) => {
31
31
  console.log(message);
32
32
  },
@@ -36,36 +36,47 @@ const consumer = await kafka.startConsumer({
36
36
  #### Producing messages
37
37
 
38
38
  ```typescript
39
- export const producer = kafka.createProcucer();
39
+ export const producer = kafka.createProducer();
40
40
 
41
- await producer.send([{ topic: "example-topic-f", partition: 0, key: null, value: line }]);
41
+ await producer.send([{ topic: 'my-topic', partition: 0, key: 'key', value: 'value' }]);
42
42
  ```
43
43
 
44
44
  #### Low-level API
45
45
 
46
46
  ```typescript
47
47
  const cluster = kafka.createCluster();
48
- await cluster.connect();
49
-
50
- const { controllerId } = await cluster.sendRequest(API.METADATA, {
51
- allowTopicAutoCreation: false,
52
- includeTopicAuthorizedOperations: false,
53
- topics: [],
54
- });
55
-
56
- await cluster.sendRequestToNode(controllerId)(API.CREATE_TOPICS, {
57
- validateOnly: false,
58
- timeoutMs: 10_000,
59
- topics: [
60
- {
61
- name: "my-topic",
62
- numPartitions: 10,
63
- replicationFactor: 3,
64
- assignments: [],
65
- configs: [],
66
- },
67
- ],
68
- });
48
+ await cluster.connect();
49
+
50
+ const { controllerId } = await cluster.sendRequest(API.METADATA, {
51
+ allowTopicAutoCreation: false,
52
+ includeTopicAuthorizedOperations: false,
53
+ topics: [],
54
+ });
55
+
56
+ await cluster.sendRequestToNode(controllerId)(API.CREATE_TOPICS, {
57
+ validateOnly: false,
58
+ timeoutMs: 10_000,
59
+ topics: [
60
+ {
61
+ name: 'my-topic',
62
+ numPartitions: 10,
63
+ replicationFactor: 3,
64
+ assignments: [],
65
+ configs: [],
66
+ },
67
+ ],
68
+ });
69
+
70
+ await cluster.disconnect();
71
+ ```
72
+
73
+ #### Graceful shutdown
74
+
75
+ ```typescript
76
+ process.once('SIGTERM', async () => {
77
+ await consumer.close(); // waits for the consumer to finish processing the last batch and disconnects
78
+ await producer.close();
79
+ });
69
80
  ```
70
81
 
71
82
  See the [examples](./examples) for more detailed examples.
@@ -77,15 +88,74 @@ The existing high-level libraries (e.g. kafkajs) are missing a few crucial featu
77
88
 
78
89
  ### New features compared to kafkajs
79
90
 
80
- * **Static consumer membership** - Rebalancing during rolling deployments causes delays. Using `groupInstanceId` in addition to `groupId` can avoid rebalancing and continue consuming partitions in the existing assignment.
81
- * **Consuming messages without consumer groups** - When you don't need the consumer to track the partition offsets, you can simply create a consumer without groupId and always either start consuming messages from the beginning or from the latest partition offset.
82
- * **Low-level API requests** - It's possible to communicate directly with the Kafka cluster using the kafka api protocol.
83
-
84
- ## Backlog
85
-
86
- Minimal set of features required before a stable release:
87
-
88
- - Consumer concurrency control (Currently each relevant broker is polled in sequence)
89
- - Partitioner (Currently have to specify the partition on producer.send())
90
- - API versioning (Currently only tested against Kafka 3.7+)
91
- - SASL SCRAM support (+ pluggable authentication providers)
91
+ - **Static consumer membership** - Rebalancing during rolling deployments causes delays. Using `groupInstanceId` in addition to `groupId` can avoid rebalancing and continue consuming partitions in the existing assignment.
92
+ - **Consuming messages without consumer groups** - When you don't need the consumer to track the partition offsets, you can simply create a consumer without groupId and always either start consuming messages from the beginning or from the latest partition offset.
93
+ - **Low-level API requests** - It's possible to communicate directly with the Kafka cluster using the kafka api protocol.
94
+
95
+ ## Configuration
96
+
97
+ ### `createKafkaClient()`
98
+
99
+ | Name | Type | Required | Default | Description |
100
+ | ---------------- | ---------------------- | -------- | ------- | ---------------------------------------------------- |
101
+ | clientId | string | false | _null_ | The client id used for all requests. |
102
+ | bootstrapServers | TcpSocketConnectOpts[] | true | | List of kafka brokers for initial cluster discovery. |
103
+ | sasl | SASLProvider | false | | SASL provider |
104
+ | ssl | TLSSocketOptions | false | | SSL configuration. |
105
+
106
+ #### Supported SASL mechanisms
107
+
108
+ - PLAIN: `saslPlain({ username, password })`
109
+ - SCRAM-SHA-256: `saslScramSha256({ username, password })`
110
+ - SCRAM-SHA-512: `saslScramSha512({ username, password })`
111
+
112
+ Custom SASL mechanisms can be implemented following the `SASLProvider` interface. See [src/auth](./src/auth) for examples.
113
+
114
+ ### `kafka.startConsumer()`
115
+
116
+ | Name | Type | Required | Default | Description |
117
+ | ---------------------- | -------------------------------------- | -------- | ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
118
+ | topics | string[] | true | | List of topics to subscribe to |
119
+ | groupId | string | false | _null_ | Consumer group id |
120
+ | groupInstanceId | string | false | _null_ | Consumer group instance id |
121
+ | rackId | string | false | _null_ | Rack id |
122
+ | isolationLevel | IsolationLevel | false | IsolationLevel.READ_UNCOMMITTED | Isolation level |
123
+ | sessionTimeoutMs | number | false | 30000 | Session timeout in milliseconds |
124
+ | rebalanceTimeoutMs | number | false | 60000 | Rebalance timeout in milliseconds |
125
+ | maxWaitMs | number | false | 5000 | Fetch long poll timeout in milliseconds |
126
+ | minBytes | number | false | 1 | Minimum number of bytes to wait for before returning a fetch response |
127
+ | maxBytes | number | false | 1_048_576 | Maximum number of bytes to return in the fetch response |
128
+ | partitionMaxBytes | number | false | 1_048_576 | Maximum number of bytes to return per partition in the fetch response |
129
+ | allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
130
+ | fromBeginning | boolean | false | false | Start consuming from the beginning of the topic |
131
+ | batchGranularity | BatchGranularity | false | partition | Controls messages split from fetch response. Also controls how often offsets are committed. **onBatch** will include messages:<br/>- **partition** - from a single batch<br/>- **topic** - from all topic partitions<br/>- **broker** - from all assignned topics and partitions |
132
+ | concurrency | number | false | 1 | How many batches to process concurrently |
133
+ | onMessage | (message: Message) => Promise<unknown> | true | | Callback executed on every message |
134
+ | onBatch | (batch: Message[]) => Promise<unknown> | true | | Callback executed on every batch of messages (based on **batchGranuality**) |
135
+
136
+ ### `kafka.createProducer()`
137
+
138
+ | Name | Type | Required | Default | Description |
139
+ | ---------------------- | ----------- | -------- | ------------------ | --------------------------------------------------------------------------------------- |
140
+ | allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
141
+ | partitioner | Partitioner | false | defaultPartitioner | Custom partitioner function. By default, it uses a default java-compatible partitioner. |
142
+
143
+ ### `producer.send(messages: Message[])`
144
+
145
+ <!-- export type Message = {
146
+ topic: string;
147
+ partition?: number;
148
+ timestamp?: bigint;
149
+ key?: Buffer | null;
150
+ value: Buffer | null;
151
+ headers?: Record<string, string>;
152
+ }; -->
153
+
154
+ | Name | Type | Required | Default | Description |
155
+ | --------- | ---------------------- | -------- | ------- | -------------------------------------------------------------------------------------------------------------------------- |
156
+ | topic | string | true | | Topic to send the message to |
157
+ | partition | number | false | _null_ | Partition to send the message to. By default partitioned by key. If key is also missing, partition is assigned round-robin |
158
+ | timestamp | bigint | false | _null_ | Message timestamp in milliseconds |
159
+ | key | Buffer \| null | false | _null_ | Message key |
160
+ | value | Buffer \| null | true | | Message value |
161
+ | headers | Record<string, string> | false | _null_ | Message headers |
@@ -0,0 +1,9 @@
1
+ export declare const API_VERSIONS: import("../utils/api.js").Api<unknown, {
2
+ errorCode: number;
3
+ versions: {
4
+ apiKey: number;
5
+ minVersion: number;
6
+ maxVersion: number;
7
+ }[];
8
+ throttleTimeMs: number;
9
+ }>;
@@ -0,0 +1,24 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.API_VERSIONS = void 0;
4
+ const api_js_1 = require("../utils/api.js");
5
+ const error_js_1 = require("../utils/error.js");
6
+ exports.API_VERSIONS = (0, api_js_1.createApi)({
7
+ apiKey: 18,
8
+ apiVersion: 2,
9
+ request: (encoder) => encoder,
10
+ response: (decoder) => {
11
+ const result = {
12
+ errorCode: decoder.readInt16(),
13
+ versions: decoder.readArray((version) => ({
14
+ apiKey: version.readInt16(),
15
+ minVersion: version.readInt16(),
16
+ maxVersion: version.readInt16(),
17
+ })),
18
+ throttleTimeMs: decoder.readInt32(),
19
+ };
20
+ if (result.errorCode)
21
+ throw new error_js_1.KafkaTSApiError(result.errorCode, null, result);
22
+ return result;
23
+ },
24
+ });
@@ -0,0 +1,38 @@
1
+ export declare const CREATE_TOPICS: import("../utils/api").Api<{
2
+ topics: {
3
+ name: string;
4
+ numPartitions: number;
5
+ replicationFactor: number;
6
+ assignments: {
7
+ partitionIndex: number;
8
+ brokerIds: number[];
9
+ }[];
10
+ configs: {
11
+ name: string;
12
+ value: string | null;
13
+ }[];
14
+ }[];
15
+ timeoutMs: number;
16
+ validateOnly: boolean;
17
+ }, {
18
+ _tag: void;
19
+ throttleTimeMs: number;
20
+ topics: {
21
+ name: string | null;
22
+ topicId: string;
23
+ errorCode: number;
24
+ errorMessage: string | null;
25
+ numPartitions: number;
26
+ replicationFactor: number;
27
+ configs: {
28
+ name: string | null;
29
+ value: string | null;
30
+ readOnly: boolean;
31
+ configSource: number;
32
+ isSensitive: boolean;
33
+ _tag: void;
34
+ }[];
35
+ _tag: void;
36
+ }[];
37
+ _tag2: void;
38
+ }>;
@@ -0,0 +1,53 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.CREATE_TOPICS = void 0;
4
+ const api_1 = require("../utils/api");
5
+ const error_1 = require("../utils/error");
6
+ exports.CREATE_TOPICS = (0, api_1.createApi)({
7
+ apiKey: 19,
8
+ apiVersion: 7,
9
+ request: (encoder, data) => encoder
10
+ .writeUVarInt(0)
11
+ .writeCompactArray(data.topics, (encoder, topic) => encoder
12
+ .writeCompactString(topic.name)
13
+ .writeInt32(topic.numPartitions)
14
+ .writeInt16(topic.replicationFactor)
15
+ .writeCompactArray(topic.assignments, (encoder, assignment) => encoder
16
+ .writeInt32(assignment.partitionIndex)
17
+ .writeCompactArray(assignment.brokerIds, (encoder, brokerId) => encoder.writeInt32(brokerId))
18
+ .writeUVarInt(0))
19
+ .writeCompactArray(topic.configs, (encoder, config) => encoder.writeCompactString(config.name).writeCompactString(config.value).writeUVarInt(0))
20
+ .writeUVarInt(0))
21
+ .writeInt32(data.timeoutMs)
22
+ .writeBoolean(data.validateOnly)
23
+ .writeUVarInt(0),
24
+ response: (decoder) => {
25
+ const result = {
26
+ _tag: decoder.readTagBuffer(),
27
+ throttleTimeMs: decoder.readInt32(),
28
+ topics: decoder.readCompactArray((topic) => ({
29
+ name: topic.readCompactString(),
30
+ topicId: topic.readUUID(),
31
+ errorCode: topic.readInt16(),
32
+ errorMessage: topic.readCompactString(),
33
+ numPartitions: topic.readInt32(),
34
+ replicationFactor: topic.readInt16(),
35
+ configs: topic.readCompactArray((config) => ({
36
+ name: config.readCompactString(),
37
+ value: config.readCompactString(),
38
+ readOnly: config.readBoolean(),
39
+ configSource: config.readInt8(),
40
+ isSensitive: config.readBoolean(),
41
+ _tag: config.readTagBuffer(),
42
+ })),
43
+ _tag: topic.readTagBuffer(),
44
+ })),
45
+ _tag2: decoder.readTagBuffer(),
46
+ };
47
+ result.topics.forEach((topic) => {
48
+ if (topic.errorCode)
49
+ throw new error_1.KafkaTSApiError(topic.errorCode, topic.errorMessage, result);
50
+ });
51
+ return result;
52
+ },
53
+ });
@@ -0,0 +1,18 @@
1
+ export declare const DELETE_TOPICS: import("../utils/api").Api<{
2
+ topics: {
3
+ name: string | null;
4
+ topicId: string | null;
5
+ }[];
6
+ timeoutMs: number;
7
+ }, {
8
+ _tag: void;
9
+ throttleTimeMs: number;
10
+ responses: {
11
+ name: string | null;
12
+ topicId: string;
13
+ errorCode: number;
14
+ errorMessage: string | null;
15
+ _tag: void;
16
+ }[];
17
+ _tag2: void;
18
+ }>;
@@ -0,0 +1,33 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.DELETE_TOPICS = void 0;
4
+ const api_1 = require("../utils/api");
5
+ const error_1 = require("../utils/error");
6
+ exports.DELETE_TOPICS = (0, api_1.createApi)({
7
+ apiKey: 20,
8
+ apiVersion: 6,
9
+ request: (encoder, data) => encoder
10
+ .writeUVarInt(0)
11
+ .writeCompactArray(data.topics, (encoder, topic) => encoder.writeCompactString(topic.name).writeUUID(topic.topicId).writeUVarInt(0))
12
+ .writeInt32(data.timeoutMs)
13
+ .writeUVarInt(0),
14
+ response: (decoder) => {
15
+ const result = {
16
+ _tag: decoder.readTagBuffer(),
17
+ throttleTimeMs: decoder.readInt32(),
18
+ responses: decoder.readCompactArray((decoder) => ({
19
+ name: decoder.readCompactString(),
20
+ topicId: decoder.readUUID(),
21
+ errorCode: decoder.readInt16(),
22
+ errorMessage: decoder.readCompactString(),
23
+ _tag: decoder.readTagBuffer(),
24
+ })),
25
+ _tag2: decoder.readTagBuffer(),
26
+ };
27
+ result.responses.forEach((response) => {
28
+ if (response.errorCode)
29
+ throw new error_1.KafkaTSApiError(response.errorCode, response.errorMessage, result);
30
+ });
31
+ return result;
32
+ },
33
+ });
@@ -0,0 +1,84 @@
1
+ /// <reference types="node" />
2
+ export declare const enum IsolationLevel {
3
+ READ_UNCOMMITTED = 0,
4
+ READ_COMMITTED = 1
5
+ }
6
+ export type FetchResponse = Awaited<ReturnType<(typeof FETCH)['response']>>;
7
+ export declare const FETCH: import("../utils/api").Api<{
8
+ maxWaitMs: number;
9
+ minBytes: number;
10
+ maxBytes: number;
11
+ isolationLevel: IsolationLevel;
12
+ sessionId: number;
13
+ sessionEpoch: number;
14
+ topics: {
15
+ topicId: string;
16
+ partitions: {
17
+ partition: number;
18
+ currentLeaderEpoch: number;
19
+ fetchOffset: bigint;
20
+ lastFetchedEpoch: number;
21
+ logStartOffset: bigint;
22
+ partitionMaxBytes: number;
23
+ }[];
24
+ }[];
25
+ forgottenTopicsData: {
26
+ topicId: string;
27
+ partitions: number[];
28
+ }[];
29
+ rackId: string;
30
+ }, {
31
+ responses: {
32
+ partitions: {
33
+ records: {
34
+ records: {
35
+ attributes: number;
36
+ timestampDelta: bigint;
37
+ offsetDelta: number;
38
+ key: Buffer | null;
39
+ value: Buffer | null;
40
+ headers: {
41
+ key: Buffer | null;
42
+ value: Buffer | null;
43
+ }[];
44
+ }[];
45
+ compression: number;
46
+ timestampType: string;
47
+ isTransactional: boolean;
48
+ isControlBatch: boolean;
49
+ hasDeleteHorizonMs: boolean;
50
+ baseOffset: bigint;
51
+ batchLength: number;
52
+ partitionLeaderEpoch: number;
53
+ magic: number;
54
+ crc: number;
55
+ attributes: number;
56
+ lastOffsetDelta: number;
57
+ baseTimestamp: bigint;
58
+ maxTimestamp: bigint;
59
+ producerId: bigint;
60
+ producerEpoch: number;
61
+ baseSequence: number;
62
+ }[];
63
+ partitionIndex: number;
64
+ errorCode: number;
65
+ highWatermark: bigint;
66
+ lastStableOffset: bigint;
67
+ logStartOffset: bigint;
68
+ abortedTransactions: {
69
+ producerId: bigint;
70
+ firstOffset: bigint;
71
+ _tag: void;
72
+ }[];
73
+ preferredReadReplica: number;
74
+ _tag: void;
75
+ }[];
76
+ topicId: string;
77
+ _tag: void;
78
+ }[];
79
+ _tag: void;
80
+ throttleTimeMs: number;
81
+ errorCode: number;
82
+ sessionId: number;
83
+ _tag2: void;
84
+ }>;
@@ -0,0 +1,142 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.FETCH = void 0;
4
+ const codecs_1 = require("../codecs");
5
+ const api_1 = require("../utils/api");
6
+ const decoder_1 = require("../utils/decoder");
7
+ const error_1 = require("../utils/error");
8
+ exports.FETCH = (0, api_1.createApi)({
9
+ apiKey: 1,
10
+ apiVersion: 16,
11
+ request: (encoder, data) => encoder
12
+ .writeUVarInt(0)
13
+ .writeInt32(data.maxWaitMs)
14
+ .writeInt32(data.minBytes)
15
+ .writeInt32(data.maxBytes)
16
+ .writeInt8(data.isolationLevel)
17
+ .writeInt32(data.sessionId)
18
+ .writeInt32(data.sessionEpoch)
19
+ .writeCompactArray(data.topics, (encoder, topic) => encoder
20
+ .writeUUID(topic.topicId)
21
+ .writeCompactArray(topic.partitions, (encoder, partition) => encoder
22
+ .writeInt32(partition.partition)
23
+ .writeInt32(partition.currentLeaderEpoch)
24
+ .writeInt64(partition.fetchOffset)
25
+ .writeInt32(partition.lastFetchedEpoch)
26
+ .writeInt64(partition.logStartOffset)
27
+ .writeInt32(partition.partitionMaxBytes)
28
+ .writeUVarInt(0))
29
+ .writeUVarInt(0))
30
+ .writeCompactArray(data.forgottenTopicsData, (encoder, forgottenTopic) => encoder
31
+ .writeUUID(forgottenTopic.topicId)
32
+ .writeCompactArray(forgottenTopic.partitions, (encoder, partition) => encoder.writeInt32(partition))
33
+ .writeUVarInt(0))
34
+ .writeCompactString(data.rackId)
35
+ .writeUVarInt(0),
36
+ response: async (decoder) => {
37
+ const result = {
38
+ _tag: decoder.readTagBuffer(),
39
+ throttleTimeMs: decoder.readInt32(),
40
+ errorCode: decoder.readInt16(),
41
+ sessionId: decoder.readInt32(),
42
+ responses: decoder.readCompactArray((response) => ({
43
+ topicId: response.readUUID(),
44
+ partitions: response.readCompactArray((partition) => ({
45
+ partitionIndex: partition.readInt32(),
46
+ errorCode: partition.readInt16(),
47
+ highWatermark: partition.readInt64(),
48
+ lastStableOffset: partition.readInt64(),
49
+ logStartOffset: partition.readInt64(),
50
+ abortedTransactions: partition.readCompactArray((abortedTransaction) => ({
51
+ producerId: abortedTransaction.readInt64(),
52
+ firstOffset: abortedTransaction.readInt64(),
53
+ _tag: abortedTransaction.readTagBuffer(),
54
+ })),
55
+ preferredReadReplica: partition.readInt32(),
56
+ records: decodeRecordBatch(partition),
57
+ _tag: partition.readTagBuffer(),
58
+ })),
59
+ _tag: response.readTagBuffer(),
60
+ })),
61
+ _tag2: decoder.readTagBuffer(),
62
+ };
63
+ if (result.errorCode)
64
+ throw new error_1.KafkaTSApiError(result.errorCode, null, result);
65
+ result.responses.forEach((response) => {
66
+ response.partitions.forEach((partition) => {
67
+ if (partition.errorCode)
68
+ throw new error_1.KafkaTSApiError(partition.errorCode, null, result);
69
+ });
70
+ });
71
+ const decompressedResponses = await Promise.all(result.responses.map(async (response) => ({
72
+ ...response,
73
+ partitions: await Promise.all(response.partitions.map(async (partition) => ({
74
+ ...partition,
75
+ records: await Promise.all(partition.records.map(async ({ recordsLength, compressedRecords, ...record }) => {
76
+ const { decompress } = (0, codecs_1.findCodec)(record.compression);
77
+ const decompressedRecords = await decompress(compressedRecords);
78
+ const decompressedDecoder = new decoder_1.Decoder(Buffer.concat([recordsLength, decompressedRecords]));
79
+ return { ...record, records: decodeRecord(decompressedDecoder) };
80
+ })),
81
+ }))),
82
+ })));
83
+ return { ...result, responses: decompressedResponses };
84
+ },
85
+ });
86
+ const decodeRecordBatch = (decoder) => {
87
+ const size = decoder.readUVarInt() - 1;
88
+ if (size <= 0) {
89
+ return [];
90
+ }
91
+ const recordBatchDecoder = new decoder_1.Decoder(decoder.read(size));
92
+ const results = [];
93
+ while (recordBatchDecoder.getBufferLength() > recordBatchDecoder.getOffset() + 12) {
94
+ const baseOffset = recordBatchDecoder.readInt64();
95
+ const batchLength = recordBatchDecoder.readInt32();
96
+ if (!batchLength) {
97
+ continue;
98
+ }
99
+ const batchDecoder = new decoder_1.Decoder(recordBatchDecoder.read(batchLength));
100
+ const result = {
101
+ baseOffset,
102
+ batchLength,
103
+ partitionLeaderEpoch: batchDecoder.readInt32(),
104
+ magic: batchDecoder.readInt8(),
105
+ crc: batchDecoder.readUInt32(),
106
+ attributes: batchDecoder.readInt16(),
107
+ lastOffsetDelta: batchDecoder.readInt32(),
108
+ baseTimestamp: batchDecoder.readInt64(),
109
+ maxTimestamp: batchDecoder.readInt64(),
110
+ producerId: batchDecoder.readInt64(),
111
+ producerEpoch: batchDecoder.readInt16(),
112
+ baseSequence: batchDecoder.readInt32(),
113
+ recordsLength: batchDecoder.read(4),
114
+ compressedRecords: batchDecoder.read(),
115
+ };
116
+ const compression = result.attributes & 0x07;
117
+ const timestampType = (result.attributes & 0x08) >> 3 ? 'LogAppendTime' : 'CreateTime';
118
+ const isTransactional = !!((result.attributes & 0x10) >> 4);
119
+ const isControlBatch = !!((result.attributes & 0x20) >> 5);
120
+ const hasDeleteHorizonMs = !!((result.attributes & 0x40) >> 6);
121
+ results.push({
122
+ ...result,
123
+ compression,
124
+ timestampType,
125
+ isTransactional,
126
+ isControlBatch,
127
+ hasDeleteHorizonMs,
128
+ });
129
+ }
130
+ return results;
131
+ };
132
+ const decodeRecord = (decoder) => decoder.readRecords((record) => ({
133
+ attributes: record.readInt8(),
134
+ timestampDelta: record.readVarLong(),
135
+ offsetDelta: record.readVarInt(),
136
+ key: record.readVarIntBuffer(),
137
+ value: record.readVarIntBuffer(),
138
+ headers: record.readVarIntArray((header) => ({
139
+ key: header.readVarIntBuffer(),
140
+ value: header.readVarIntBuffer(),
141
+ })),
142
+ }));
@@ -0,0 +1,21 @@
1
+ export declare const KEY_TYPE: {
2
+ GROUP: number;
3
+ TRANSACTION: number;
4
+ };
5
+ export declare const FIND_COORDINATOR: import("../utils/api").Api<{
6
+ keyType: number;
7
+ keys: string[];
8
+ }, {
9
+ _tag: void;
10
+ throttleTimeMs: number;
11
+ coordinators: {
12
+ key: string | null;
13
+ nodeId: number;
14
+ host: string;
15
+ port: number;
16
+ errorCode: number;
17
+ errorMessage: string | null;
18
+ _tag: void;
19
+ }[];
20
+ _tag2: void;
21
+ }>;