kafka-ts 0.0.3-beta → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (176) hide show
  1. package/README.md +72 -8
  2. package/dist/api/api-versions.d.ts +9 -0
  3. package/{src/api/api-versions.ts → dist/api/api-versions.js} +8 -5
  4. package/dist/api/create-topics.d.ts +38 -0
  5. package/dist/api/create-topics.js +53 -0
  6. package/dist/api/delete-topics.d.ts +18 -0
  7. package/dist/api/delete-topics.js +33 -0
  8. package/dist/api/fetch.d.ts +84 -0
  9. package/dist/api/fetch.js +142 -0
  10. package/dist/api/find-coordinator.d.ts +21 -0
  11. package/{src/api/find-coordinator.ts → dist/api/find-coordinator.js} +14 -14
  12. package/dist/api/heartbeat.d.ts +11 -0
  13. package/dist/api/heartbeat.js +27 -0
  14. package/dist/api/index.d.ts +576 -0
  15. package/{src/api/index.ts → dist/api/index.js} +42 -41
  16. package/dist/api/init-producer-id.d.ts +13 -0
  17. package/dist/api/init-producer-id.js +29 -0
  18. package/dist/api/join-group.d.ts +34 -0
  19. package/dist/api/join-group.js +51 -0
  20. package/dist/api/leave-group.d.ts +19 -0
  21. package/dist/api/leave-group.js +39 -0
  22. package/dist/api/list-offsets.d.ts +29 -0
  23. package/dist/api/list-offsets.js +48 -0
  24. package/dist/api/metadata.d.ts +40 -0
  25. package/{src/api/metadata.ts → dist/api/metadata.js} +18 -26
  26. package/dist/api/offset-commit.d.ts +28 -0
  27. package/dist/api/offset-commit.js +48 -0
  28. package/dist/api/offset-fetch.d.ts +31 -0
  29. package/dist/api/offset-fetch.js +55 -0
  30. package/dist/api/produce.d.ts +54 -0
  31. package/{src/api/produce.ts → dist/api/produce.js} +55 -102
  32. package/dist/api/sasl-authenticate.d.ts +11 -0
  33. package/dist/api/sasl-authenticate.js +23 -0
  34. package/dist/api/sasl-handshake.d.ts +6 -0
  35. package/dist/api/sasl-handshake.js +19 -0
  36. package/dist/api/sync-group.d.ts +24 -0
  37. package/dist/api/sync-group.js +36 -0
  38. package/dist/auth/index.d.ts +2 -0
  39. package/dist/auth/index.js +8 -0
  40. package/dist/auth/plain.d.ts +5 -0
  41. package/dist/auth/plain.js +12 -0
  42. package/dist/auth/scram.d.ts +9 -0
  43. package/dist/auth/scram.js +40 -0
  44. package/dist/broker.d.ts +30 -0
  45. package/dist/broker.js +55 -0
  46. package/dist/client.d.ts +22 -0
  47. package/dist/client.js +36 -0
  48. package/dist/cluster.d.ts +27 -0
  49. package/dist/cluster.js +70 -0
  50. package/dist/cluster.test.d.ts +1 -0
  51. package/{src/cluster.test.ts → dist/cluster.test.js} +87 -113
  52. package/dist/codecs/gzip.d.ts +2 -0
  53. package/dist/codecs/gzip.js +8 -0
  54. package/dist/codecs/index.d.ts +2 -0
  55. package/dist/codecs/index.js +17 -0
  56. package/dist/codecs/none.d.ts +2 -0
  57. package/dist/codecs/none.js +7 -0
  58. package/dist/codecs/types.d.ts +5 -0
  59. package/dist/codecs/types.js +2 -0
  60. package/dist/connection.d.ts +26 -0
  61. package/dist/connection.js +175 -0
  62. package/dist/consumer/consumer-group.d.ts +41 -0
  63. package/dist/consumer/consumer-group.js +215 -0
  64. package/dist/consumer/consumer-metadata.d.ts +7 -0
  65. package/dist/consumer/consumer-metadata.js +14 -0
  66. package/dist/consumer/consumer.d.ts +44 -0
  67. package/dist/consumer/consumer.js +225 -0
  68. package/dist/consumer/fetch-manager.d.ts +33 -0
  69. package/dist/consumer/fetch-manager.js +140 -0
  70. package/dist/consumer/fetcher.d.ts +25 -0
  71. package/dist/consumer/fetcher.js +64 -0
  72. package/dist/consumer/offset-manager.d.ts +22 -0
  73. package/dist/consumer/offset-manager.js +66 -0
  74. package/dist/consumer/processor.d.ts +19 -0
  75. package/dist/consumer/processor.js +59 -0
  76. package/dist/distributors/assignments-to-replicas.d.ts +16 -0
  77. package/{src/distributors/assignments-to-replicas.ts → dist/distributors/assignments-to-replicas.js} +15 -41
  78. package/dist/distributors/assignments-to-replicas.test.d.ts +1 -0
  79. package/dist/distributors/assignments-to-replicas.test.js +40 -0
  80. package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
  81. package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
  82. package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
  83. package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
  84. package/dist/distributors/partitioner.d.ts +7 -0
  85. package/dist/distributors/partitioner.js +23 -0
  86. package/dist/index.d.ts +9 -0
  87. package/dist/index.js +26 -0
  88. package/dist/metadata.d.ts +24 -0
  89. package/dist/metadata.js +106 -0
  90. package/dist/producer/producer.d.ts +24 -0
  91. package/dist/producer/producer.js +131 -0
  92. package/{src/types.ts → dist/types.d.ts} +4 -4
  93. package/dist/types.js +2 -0
  94. package/{src/utils/api.ts → dist/utils/api.d.ts} +2 -4
  95. package/dist/utils/api.js +5 -0
  96. package/dist/utils/crypto.d.ts +8 -0
  97. package/dist/utils/crypto.js +18 -0
  98. package/dist/utils/decoder.d.ts +30 -0
  99. package/{src/utils/decoder.ts → dist/utils/decoder.js} +41 -57
  100. package/dist/utils/delay.d.ts +1 -0
  101. package/dist/utils/delay.js +5 -0
  102. package/dist/utils/encoder.d.ts +28 -0
  103. package/{src/utils/encoder.ts → dist/utils/encoder.js} +50 -66
  104. package/dist/utils/error.d.ts +11 -0
  105. package/dist/utils/error.js +27 -0
  106. package/dist/utils/logger.d.ts +9 -0
  107. package/dist/utils/logger.js +32 -0
  108. package/dist/utils/memo.d.ts +1 -0
  109. package/{src/utils/memo.ts → dist/utils/memo.js} +7 -3
  110. package/dist/utils/murmur2.d.ts +3 -0
  111. package/dist/utils/murmur2.js +40 -0
  112. package/dist/utils/retrier.d.ts +10 -0
  113. package/dist/utils/retrier.js +22 -0
  114. package/dist/utils/tracer.d.ts +5 -0
  115. package/dist/utils/tracer.js +39 -0
  116. package/package.json +11 -2
  117. package/.github/workflows/release.yml +0 -17
  118. package/.prettierrc +0 -8
  119. package/certs/ca.crt +0 -29
  120. package/certs/ca.key +0 -52
  121. package/certs/ca.srl +0 -1
  122. package/certs/kafka.crt +0 -29
  123. package/certs/kafka.csr +0 -26
  124. package/certs/kafka.key +0 -52
  125. package/certs/kafka.keystore.jks +0 -0
  126. package/certs/kafka.truststore.jks +0 -0
  127. package/docker-compose.yml +0 -104
  128. package/examples/package-lock.json +0 -31
  129. package/examples/package.json +0 -14
  130. package/examples/src/client.ts +0 -9
  131. package/examples/src/consumer.ts +0 -18
  132. package/examples/src/create-topic.ts +0 -44
  133. package/examples/src/producer.ts +0 -24
  134. package/examples/src/replicator.ts +0 -25
  135. package/examples/src/utils/delay.ts +0 -1
  136. package/examples/src/utils/json.ts +0 -1
  137. package/examples/tsconfig.json +0 -7
  138. package/log4j.properties +0 -95
  139. package/scripts/generate-certs.sh +0 -24
  140. package/src/__snapshots__/request-handler.test.ts.snap +0 -978
  141. package/src/api/create-topics.ts +0 -78
  142. package/src/api/delete-topics.ts +0 -42
  143. package/src/api/fetch.ts +0 -143
  144. package/src/api/heartbeat.ts +0 -33
  145. package/src/api/init-producer-id.ts +0 -35
  146. package/src/api/join-group.ts +0 -67
  147. package/src/api/leave-group.ts +0 -48
  148. package/src/api/list-offsets.ts +0 -65
  149. package/src/api/offset-commit.ts +0 -67
  150. package/src/api/offset-fetch.ts +0 -74
  151. package/src/api/sasl-authenticate.ts +0 -21
  152. package/src/api/sasl-handshake.ts +0 -16
  153. package/src/api/sync-group.ts +0 -54
  154. package/src/broker.ts +0 -74
  155. package/src/client.ts +0 -47
  156. package/src/cluster.ts +0 -87
  157. package/src/connection.ts +0 -143
  158. package/src/consumer/consumer-group.ts +0 -209
  159. package/src/consumer/consumer-metadata.ts +0 -14
  160. package/src/consumer/consumer.ts +0 -231
  161. package/src/consumer/fetch-manager.ts +0 -179
  162. package/src/consumer/fetcher.ts +0 -57
  163. package/src/consumer/offset-manager.ts +0 -93
  164. package/src/consumer/processor.ts +0 -47
  165. package/src/distributors/assignments-to-replicas.test.ts +0 -43
  166. package/src/distributors/messages-to-topic-partition-leaders.test.ts +0 -32
  167. package/src/distributors/messages-to-topic-partition-leaders.ts +0 -19
  168. package/src/index.ts +0 -4
  169. package/src/metadata.ts +0 -122
  170. package/src/producer/producer.ts +0 -132
  171. package/src/utils/debug.ts +0 -9
  172. package/src/utils/delay.ts +0 -1
  173. package/src/utils/error.ts +0 -21
  174. package/src/utils/retrier.ts +0 -39
  175. package/src/utils/tracer.ts +0 -31
  176. package/tsconfig.json +0 -17
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  **KafkaTS** is a Apache Kafka client library for Node.js. It provides both a low-level API for communicating directly with the Apache Kafka cluster and high-level APIs for publishing and subscribing to Kafka topics.
4
4
 
5
- **Please note that this project is still in early development and is not yet ready for production use. The interface before stable release is subject to change.**
5
+ **Supported Kafka versions:** 3.6 and later
6
6
 
7
7
  ## Installation
8
8
 
@@ -81,6 +81,10 @@ process.once('SIGTERM', async () => {
81
81
 
82
82
  See the [examples](./examples) for more detailed examples.
83
83
 
84
+ #### Logging
85
+
86
+ By default KafkaTS logs out using a JSON logger. This can be globally replaced by calling setLogger method (see [src/utils/logger.ts](./src/utils/logger.ts))
87
+
84
88
  ## Motivation
85
89
 
86
90
  The existing low-level libraries (e.g. node-rdkafka) are bindings on librdkafka, which doesn't give enough control over the consumer logic.
@@ -92,10 +96,70 @@ The existing high-level libraries (e.g. kafkajs) are missing a few crucial featu
92
96
  - **Consuming messages without consumer groups** - When you don't need the consumer to track the partition offsets, you can simply create a consumer without groupId and always either start consuming messages from the beginning or from the latest partition offset.
93
97
  - **Low-level API requests** - It's possible to communicate directly with the Kafka cluster using the kafka api protocol.
94
98
 
95
- ## Backlog
96
-
97
- Minimal set of features required before a stable release:
98
-
99
- - Partitioner (Currently have to specify the partition on producer.send())
100
- - API versioning (Currently only tested against Kafka 3.7+)
101
- - SASL SCRAM support (+ pluggable authentication providers)
99
+ ## Configuration
100
+
101
+ ### `createKafkaClient()`
102
+
103
+ | Name | Type | Required | Default | Description |
104
+ | ---------------- | ---------------------- | -------- | ------- | ---------------------------------------------------- |
105
+ | clientId | string | false | _null_ | The client id used for all requests. |
106
+ | bootstrapServers | TcpSocketConnectOpts[] | true | | List of kafka brokers for initial cluster discovery. |
107
+ | sasl | SASLProvider | false | | SASL provider |
108
+ | ssl | TLSSocketOptions | false | | SSL configuration. |
109
+
110
+ #### Supported SASL mechanisms
111
+
112
+ - PLAIN: `saslPlain({ username, password })`
113
+ - SCRAM-SHA-256: `saslScramSha256({ username, password })`
114
+ - SCRAM-SHA-512: `saslScramSha512({ username, password })`
115
+
116
+ Custom SASL mechanisms can be implemented following the `SASLProvider` interface. See [src/auth](./src/auth) for examples.
117
+
118
+ ### `kafka.startConsumer()`
119
+
120
+ | Name | Type | Required | Default | Description |
121
+ | ---------------------- | -------------------------------------- | -------- | ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
122
+ | topics | string[] | true | | List of topics to subscribe to |
123
+ | groupId | string | false | _null_ | Consumer group id |
124
+ | groupInstanceId | string | false | _null_ | Consumer group instance id |
125
+ | rackId | string | false | _null_ | Rack id |
126
+ | isolationLevel | IsolationLevel | false | IsolationLevel.READ_UNCOMMITTED | Isolation level |
127
+ | sessionTimeoutMs | number | false | 30000 | Session timeout in milliseconds |
128
+ | rebalanceTimeoutMs | number | false | 60000 | Rebalance timeout in milliseconds |
129
+ | maxWaitMs | number | false | 5000 | Fetch long poll timeout in milliseconds |
130
+ | minBytes | number | false | 1 | Minimum number of bytes to wait for before returning a fetch response |
131
+ | maxBytes | number | false | 1_048_576 | Maximum number of bytes to return in the fetch response |
132
+ | partitionMaxBytes | number | false | 1_048_576 | Maximum number of bytes to return per partition in the fetch response |
133
+ | allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
134
+ | fromBeginning | boolean | false | false | Start consuming from the beginning of the topic |
135
+ | batchGranularity | BatchGranularity | false | partition | Controls messages split from fetch response. Also controls how often offsets are committed. **onBatch** will include messages:<br/>- **partition** - from a single batch<br/>- **topic** - from all topic partitions<br/>- **broker** - from all assignned topics and partitions |
136
+ | concurrency | number | false | 1 | How many batches to process concurrently |
137
+ | onMessage | (message: Message) => Promise<unknown> | true | | Callback executed on every message |
138
+ | onBatch | (batch: Message[]) => Promise<unknown> | true | | Callback executed on every batch of messages (based on **batchGranuality**) |
139
+
140
+ ### `kafka.createProducer()`
141
+
142
+ | Name | Type | Required | Default | Description |
143
+ | ---------------------- | ----------- | -------- | ------------------ | --------------------------------------------------------------------------------------- |
144
+ | allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
145
+ | partitioner | Partitioner | false | defaultPartitioner | Custom partitioner function. By default, it uses a default java-compatible partitioner. |
146
+
147
+ ### `producer.send(messages: Message[])`
148
+
149
+ <!-- export type Message = {
150
+ topic: string;
151
+ partition?: number;
152
+ timestamp?: bigint;
153
+ key?: Buffer | null;
154
+ value: Buffer | null;
155
+ headers?: Record<string, string>;
156
+ }; -->
157
+
158
+ | Name | Type | Required | Default | Description |
159
+ | --------- | ---------------------- | -------- | ------- | -------------------------------------------------------------------------------------------------------------------------- |
160
+ | topic | string | true | | Topic to send the message to |
161
+ | partition | number | false | _null_ | Partition to send the message to. By default partitioned by key. If key is also missing, partition is assigned round-robin |
162
+ | timestamp | bigint | false | _null_ | Message timestamp in milliseconds |
163
+ | key | Buffer \| null | false | _null_ | Message key |
164
+ | value | Buffer \| null | true | | Message value |
165
+ | headers | Record<string, string> | false | _null_ | Message headers |
@@ -0,0 +1,9 @@
1
+ export declare const API_VERSIONS: import("../utils/api.js").Api<unknown, {
2
+ errorCode: number;
3
+ versions: {
4
+ apiKey: number;
5
+ minVersion: number;
6
+ maxVersion: number;
7
+ }[];
8
+ throttleTimeMs: number;
9
+ }>;
@@ -1,7 +1,9 @@
1
- import { createApi } from '../utils/api.js';
2
- import { KafkaTSApiError } from '../utils/error.js';
3
-
4
- export const API_VERSIONS = createApi({
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.API_VERSIONS = void 0;
4
+ const api_js_1 = require("../utils/api.js");
5
+ const error_js_1 = require("../utils/error.js");
6
+ exports.API_VERSIONS = (0, api_js_1.createApi)({
5
7
  apiKey: 18,
6
8
  apiVersion: 2,
7
9
  request: (encoder) => encoder,
@@ -15,7 +17,8 @@ export const API_VERSIONS = createApi({
15
17
  })),
16
18
  throttleTimeMs: decoder.readInt32(),
17
19
  };
18
- if (result.errorCode) throw new KafkaTSApiError(result.errorCode, null, result);
20
+ if (result.errorCode)
21
+ throw new error_js_1.KafkaTSApiError(result.errorCode, null, result);
19
22
  return result;
20
23
  },
21
24
  });
@@ -0,0 +1,38 @@
1
+ export declare const CREATE_TOPICS: import("../utils/api").Api<{
2
+ topics: {
3
+ name: string;
4
+ numPartitions: number;
5
+ replicationFactor: number;
6
+ assignments: {
7
+ partitionIndex: number;
8
+ brokerIds: number[];
9
+ }[];
10
+ configs: {
11
+ name: string;
12
+ value: string | null;
13
+ }[];
14
+ }[];
15
+ timeoutMs: number;
16
+ validateOnly: boolean;
17
+ }, {
18
+ _tag: void;
19
+ throttleTimeMs: number;
20
+ topics: {
21
+ name: string | null;
22
+ topicId: string;
23
+ errorCode: number;
24
+ errorMessage: string | null;
25
+ numPartitions: number;
26
+ replicationFactor: number;
27
+ configs: {
28
+ name: string | null;
29
+ value: string | null;
30
+ readOnly: boolean;
31
+ configSource: number;
32
+ isSensitive: boolean;
33
+ _tag: void;
34
+ }[];
35
+ _tag: void;
36
+ }[];
37
+ _tag2: void;
38
+ }>;
@@ -0,0 +1,53 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.CREATE_TOPICS = void 0;
4
+ const api_1 = require("../utils/api");
5
+ const error_1 = require("../utils/error");
6
+ exports.CREATE_TOPICS = (0, api_1.createApi)({
7
+ apiKey: 19,
8
+ apiVersion: 7,
9
+ request: (encoder, data) => encoder
10
+ .writeUVarInt(0)
11
+ .writeCompactArray(data.topics, (encoder, topic) => encoder
12
+ .writeCompactString(topic.name)
13
+ .writeInt32(topic.numPartitions)
14
+ .writeInt16(topic.replicationFactor)
15
+ .writeCompactArray(topic.assignments, (encoder, assignment) => encoder
16
+ .writeInt32(assignment.partitionIndex)
17
+ .writeCompactArray(assignment.brokerIds, (encoder, brokerId) => encoder.writeInt32(brokerId))
18
+ .writeUVarInt(0))
19
+ .writeCompactArray(topic.configs, (encoder, config) => encoder.writeCompactString(config.name).writeCompactString(config.value).writeUVarInt(0))
20
+ .writeUVarInt(0))
21
+ .writeInt32(data.timeoutMs)
22
+ .writeBoolean(data.validateOnly)
23
+ .writeUVarInt(0),
24
+ response: (decoder) => {
25
+ const result = {
26
+ _tag: decoder.readTagBuffer(),
27
+ throttleTimeMs: decoder.readInt32(),
28
+ topics: decoder.readCompactArray((topic) => ({
29
+ name: topic.readCompactString(),
30
+ topicId: topic.readUUID(),
31
+ errorCode: topic.readInt16(),
32
+ errorMessage: topic.readCompactString(),
33
+ numPartitions: topic.readInt32(),
34
+ replicationFactor: topic.readInt16(),
35
+ configs: topic.readCompactArray((config) => ({
36
+ name: config.readCompactString(),
37
+ value: config.readCompactString(),
38
+ readOnly: config.readBoolean(),
39
+ configSource: config.readInt8(),
40
+ isSensitive: config.readBoolean(),
41
+ _tag: config.readTagBuffer(),
42
+ })),
43
+ _tag: topic.readTagBuffer(),
44
+ })),
45
+ _tag2: decoder.readTagBuffer(),
46
+ };
47
+ result.topics.forEach((topic) => {
48
+ if (topic.errorCode)
49
+ throw new error_1.KafkaTSApiError(topic.errorCode, topic.errorMessage, result);
50
+ });
51
+ return result;
52
+ },
53
+ });
@@ -0,0 +1,18 @@
1
+ export declare const DELETE_TOPICS: import("../utils/api").Api<{
2
+ topics: {
3
+ name: string | null;
4
+ topicId: string | null;
5
+ }[];
6
+ timeoutMs: number;
7
+ }, {
8
+ _tag: void;
9
+ throttleTimeMs: number;
10
+ responses: {
11
+ name: string | null;
12
+ topicId: string;
13
+ errorCode: number;
14
+ errorMessage: string | null;
15
+ _tag: void;
16
+ }[];
17
+ _tag2: void;
18
+ }>;
@@ -0,0 +1,33 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.DELETE_TOPICS = void 0;
4
+ const api_1 = require("../utils/api");
5
+ const error_1 = require("../utils/error");
6
+ exports.DELETE_TOPICS = (0, api_1.createApi)({
7
+ apiKey: 20,
8
+ apiVersion: 6,
9
+ request: (encoder, data) => encoder
10
+ .writeUVarInt(0)
11
+ .writeCompactArray(data.topics, (encoder, topic) => encoder.writeCompactString(topic.name).writeUUID(topic.topicId).writeUVarInt(0))
12
+ .writeInt32(data.timeoutMs)
13
+ .writeUVarInt(0),
14
+ response: (decoder) => {
15
+ const result = {
16
+ _tag: decoder.readTagBuffer(),
17
+ throttleTimeMs: decoder.readInt32(),
18
+ responses: decoder.readCompactArray((decoder) => ({
19
+ name: decoder.readCompactString(),
20
+ topicId: decoder.readUUID(),
21
+ errorCode: decoder.readInt16(),
22
+ errorMessage: decoder.readCompactString(),
23
+ _tag: decoder.readTagBuffer(),
24
+ })),
25
+ _tag2: decoder.readTagBuffer(),
26
+ };
27
+ result.responses.forEach((response) => {
28
+ if (response.errorCode)
29
+ throw new error_1.KafkaTSApiError(response.errorCode, response.errorMessage, result);
30
+ });
31
+ return result;
32
+ },
33
+ });
@@ -0,0 +1,84 @@
1
+ /// <reference types="node" />
2
+ export declare const enum IsolationLevel {
3
+ READ_UNCOMMITTED = 0,
4
+ READ_COMMITTED = 1
5
+ }
6
+ export type FetchResponse = Awaited<ReturnType<(typeof FETCH)['response']>>;
7
+ export declare const FETCH: import("../utils/api").Api<{
8
+ maxWaitMs: number;
9
+ minBytes: number;
10
+ maxBytes: number;
11
+ isolationLevel: IsolationLevel;
12
+ sessionId: number;
13
+ sessionEpoch: number;
14
+ topics: {
15
+ topicId: string;
16
+ partitions: {
17
+ partition: number;
18
+ currentLeaderEpoch: number;
19
+ fetchOffset: bigint;
20
+ lastFetchedEpoch: number;
21
+ logStartOffset: bigint;
22
+ partitionMaxBytes: number;
23
+ }[];
24
+ }[];
25
+ forgottenTopicsData: {
26
+ topicId: string;
27
+ partitions: number[];
28
+ }[];
29
+ rackId: string;
30
+ }, {
31
+ responses: {
32
+ partitions: {
33
+ records: {
34
+ records: {
35
+ attributes: number;
36
+ timestampDelta: bigint;
37
+ offsetDelta: number;
38
+ key: Buffer | null;
39
+ value: Buffer | null;
40
+ headers: {
41
+ key: Buffer | null;
42
+ value: Buffer | null;
43
+ }[];
44
+ }[];
45
+ compression: number;
46
+ timestampType: string;
47
+ isTransactional: boolean;
48
+ isControlBatch: boolean;
49
+ hasDeleteHorizonMs: boolean;
50
+ baseOffset: bigint;
51
+ batchLength: number;
52
+ partitionLeaderEpoch: number;
53
+ magic: number;
54
+ crc: number;
55
+ attributes: number;
56
+ lastOffsetDelta: number;
57
+ baseTimestamp: bigint;
58
+ maxTimestamp: bigint;
59
+ producerId: bigint;
60
+ producerEpoch: number;
61
+ baseSequence: number;
62
+ }[];
63
+ partitionIndex: number;
64
+ errorCode: number;
65
+ highWatermark: bigint;
66
+ lastStableOffset: bigint;
67
+ logStartOffset: bigint;
68
+ abortedTransactions: {
69
+ producerId: bigint;
70
+ firstOffset: bigint;
71
+ _tag: void;
72
+ }[];
73
+ preferredReadReplica: number;
74
+ _tag: void;
75
+ }[];
76
+ topicId: string;
77
+ _tag: void;
78
+ }[];
79
+ _tag: void;
80
+ throttleTimeMs: number;
81
+ errorCode: number;
82
+ sessionId: number;
83
+ _tag2: void;
84
+ }>;
@@ -0,0 +1,142 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.FETCH = void 0;
4
+ const codecs_1 = require("../codecs");
5
+ const api_1 = require("../utils/api");
6
+ const decoder_1 = require("../utils/decoder");
7
+ const error_1 = require("../utils/error");
8
+ exports.FETCH = (0, api_1.createApi)({
9
+ apiKey: 1,
10
+ apiVersion: 15,
11
+ request: (encoder, data) => encoder
12
+ .writeUVarInt(0)
13
+ .writeInt32(data.maxWaitMs)
14
+ .writeInt32(data.minBytes)
15
+ .writeInt32(data.maxBytes)
16
+ .writeInt8(data.isolationLevel)
17
+ .writeInt32(data.sessionId)
18
+ .writeInt32(data.sessionEpoch)
19
+ .writeCompactArray(data.topics, (encoder, topic) => encoder
20
+ .writeUUID(topic.topicId)
21
+ .writeCompactArray(topic.partitions, (encoder, partition) => encoder
22
+ .writeInt32(partition.partition)
23
+ .writeInt32(partition.currentLeaderEpoch)
24
+ .writeInt64(partition.fetchOffset)
25
+ .writeInt32(partition.lastFetchedEpoch)
26
+ .writeInt64(partition.logStartOffset)
27
+ .writeInt32(partition.partitionMaxBytes)
28
+ .writeUVarInt(0))
29
+ .writeUVarInt(0))
30
+ .writeCompactArray(data.forgottenTopicsData, (encoder, forgottenTopic) => encoder
31
+ .writeUUID(forgottenTopic.topicId)
32
+ .writeCompactArray(forgottenTopic.partitions, (encoder, partition) => encoder.writeInt32(partition))
33
+ .writeUVarInt(0))
34
+ .writeCompactString(data.rackId)
35
+ .writeUVarInt(0),
36
+ response: async (decoder) => {
37
+ const result = {
38
+ _tag: decoder.readTagBuffer(),
39
+ throttleTimeMs: decoder.readInt32(),
40
+ errorCode: decoder.readInt16(),
41
+ sessionId: decoder.readInt32(),
42
+ responses: decoder.readCompactArray((response) => ({
43
+ topicId: response.readUUID(),
44
+ partitions: response.readCompactArray((partition) => ({
45
+ partitionIndex: partition.readInt32(),
46
+ errorCode: partition.readInt16(),
47
+ highWatermark: partition.readInt64(),
48
+ lastStableOffset: partition.readInt64(),
49
+ logStartOffset: partition.readInt64(),
50
+ abortedTransactions: partition.readCompactArray((abortedTransaction) => ({
51
+ producerId: abortedTransaction.readInt64(),
52
+ firstOffset: abortedTransaction.readInt64(),
53
+ _tag: abortedTransaction.readTagBuffer(),
54
+ })),
55
+ preferredReadReplica: partition.readInt32(),
56
+ records: decodeRecordBatch(partition),
57
+ _tag: partition.readTagBuffer(),
58
+ })),
59
+ _tag: response.readTagBuffer(),
60
+ })),
61
+ _tag2: decoder.readTagBuffer(),
62
+ };
63
+ if (result.errorCode)
64
+ throw new error_1.KafkaTSApiError(result.errorCode, null, result);
65
+ result.responses.forEach((response) => {
66
+ response.partitions.forEach((partition) => {
67
+ if (partition.errorCode)
68
+ throw new error_1.KafkaTSApiError(partition.errorCode, null, result);
69
+ });
70
+ });
71
+ const decompressedResponses = await Promise.all(result.responses.map(async (response) => ({
72
+ ...response,
73
+ partitions: await Promise.all(response.partitions.map(async (partition) => ({
74
+ ...partition,
75
+ records: await Promise.all(partition.records.map(async ({ recordsLength, compressedRecords, ...record }) => {
76
+ const { decompress } = (0, codecs_1.findCodec)(record.compression);
77
+ const decompressedRecords = await decompress(compressedRecords);
78
+ const decompressedDecoder = new decoder_1.Decoder(Buffer.concat([recordsLength, decompressedRecords]));
79
+ return { ...record, records: decodeRecord(decompressedDecoder) };
80
+ })),
81
+ }))),
82
+ })));
83
+ return { ...result, responses: decompressedResponses };
84
+ },
85
+ });
86
+ const decodeRecordBatch = (decoder) => {
87
+ const size = decoder.readUVarInt() - 1;
88
+ if (size <= 0) {
89
+ return [];
90
+ }
91
+ const recordBatchDecoder = new decoder_1.Decoder(decoder.read(size));
92
+ const results = [];
93
+ while (recordBatchDecoder.getBufferLength() > recordBatchDecoder.getOffset() + 12) {
94
+ const baseOffset = recordBatchDecoder.readInt64();
95
+ const batchLength = recordBatchDecoder.readInt32();
96
+ if (!batchLength) {
97
+ continue;
98
+ }
99
+ const batchDecoder = new decoder_1.Decoder(recordBatchDecoder.read(batchLength));
100
+ const result = {
101
+ baseOffset,
102
+ batchLength,
103
+ partitionLeaderEpoch: batchDecoder.readInt32(),
104
+ magic: batchDecoder.readInt8(),
105
+ crc: batchDecoder.readUInt32(),
106
+ attributes: batchDecoder.readInt16(),
107
+ lastOffsetDelta: batchDecoder.readInt32(),
108
+ baseTimestamp: batchDecoder.readInt64(),
109
+ maxTimestamp: batchDecoder.readInt64(),
110
+ producerId: batchDecoder.readInt64(),
111
+ producerEpoch: batchDecoder.readInt16(),
112
+ baseSequence: batchDecoder.readInt32(),
113
+ recordsLength: batchDecoder.read(4),
114
+ compressedRecords: batchDecoder.read(),
115
+ };
116
+ const compression = result.attributes & 0x07;
117
+ const timestampType = (result.attributes & 0x08) >> 3 ? 'LogAppendTime' : 'CreateTime';
118
+ const isTransactional = !!((result.attributes & 0x10) >> 4);
119
+ const isControlBatch = !!((result.attributes & 0x20) >> 5);
120
+ const hasDeleteHorizonMs = !!((result.attributes & 0x40) >> 6);
121
+ results.push({
122
+ ...result,
123
+ compression,
124
+ timestampType,
125
+ isTransactional,
126
+ isControlBatch,
127
+ hasDeleteHorizonMs,
128
+ });
129
+ }
130
+ return results;
131
+ };
132
+ const decodeRecord = (decoder) => decoder.readRecords((record) => ({
133
+ attributes: record.readInt8(),
134
+ timestampDelta: record.readVarLong(),
135
+ offsetDelta: record.readVarInt(),
136
+ key: record.readVarIntBuffer(),
137
+ value: record.readVarIntBuffer(),
138
+ headers: record.readVarIntArray((header) => ({
139
+ key: header.readVarIntBuffer(),
140
+ value: header.readVarIntBuffer(),
141
+ })),
142
+ }));
@@ -0,0 +1,21 @@
1
+ export declare const KEY_TYPE: {
2
+ GROUP: number;
3
+ TRANSACTION: number;
4
+ };
5
+ export declare const FIND_COORDINATOR: import("../utils/api").Api<{
6
+ keyType: number;
7
+ keys: string[];
8
+ }, {
9
+ _tag: void;
10
+ throttleTimeMs: number;
11
+ coordinators: {
12
+ key: string | null;
13
+ nodeId: number;
14
+ host: string;
15
+ port: number;
16
+ errorCode: number;
17
+ errorMessage: string | null;
18
+ _tag: void;
19
+ }[];
20
+ _tag2: void;
21
+ }>;
@@ -1,20 +1,20 @@
1
- import { createApi } from '../utils/api';
2
- import { KafkaTSApiError } from '../utils/error';
3
-
4
- export const KEY_TYPE = {
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.FIND_COORDINATOR = exports.KEY_TYPE = void 0;
4
+ const api_1 = require("../utils/api");
5
+ const error_1 = require("../utils/error");
6
+ exports.KEY_TYPE = {
5
7
  GROUP: 0,
6
8
  TRANSACTION: 1,
7
9
  };
8
-
9
- export const FIND_COORDINATOR = createApi({
10
+ exports.FIND_COORDINATOR = (0, api_1.createApi)({
10
11
  apiKey: 10,
11
12
  apiVersion: 4,
12
- request: (encoder, data: { keyType: number; keys: string[] }) =>
13
- encoder
14
- .writeUVarInt(0)
15
- .writeInt8(data.keyType)
16
- .writeCompactArray(data.keys, (encoder, key) => encoder.writeCompactString(key))
17
- .writeUVarInt(0),
13
+ request: (encoder, data) => encoder
14
+ .writeUVarInt(0)
15
+ .writeInt8(data.keyType)
16
+ .writeCompactArray(data.keys, (encoder, key) => encoder.writeCompactString(key))
17
+ .writeUVarInt(0),
18
18
  response: (decoder) => {
19
19
  const result = {
20
20
  _tag: decoder.readTagBuffer(),
@@ -22,7 +22,7 @@ export const FIND_COORDINATOR = createApi({
22
22
  coordinators: decoder.readCompactArray((decoder) => ({
23
23
  key: decoder.readCompactString(),
24
24
  nodeId: decoder.readInt32(),
25
- host: decoder.readCompactString()!,
25
+ host: decoder.readCompactString(),
26
26
  port: decoder.readInt32(),
27
27
  errorCode: decoder.readInt16(),
28
28
  errorMessage: decoder.readCompactString(),
@@ -32,7 +32,7 @@ export const FIND_COORDINATOR = createApi({
32
32
  };
33
33
  result.coordinators.forEach((coordinator) => {
34
34
  if (coordinator.errorCode)
35
- throw new KafkaTSApiError(coordinator.errorCode, coordinator.errorMessage, result);
35
+ throw new error_1.KafkaTSApiError(coordinator.errorCode, coordinator.errorMessage, result);
36
36
  });
37
37
  return result;
38
38
  },
@@ -0,0 +1,11 @@
1
+ export declare const HEARTBEAT: import("../utils/api").Api<{
2
+ groupId: string;
3
+ generationId: number;
4
+ memberId: string;
5
+ groupInstanceId: string | null;
6
+ }, {
7
+ _tag: void;
8
+ throttleTimeMs: number;
9
+ errorCode: number;
10
+ _tag2: void;
11
+ }>;
@@ -0,0 +1,27 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.HEARTBEAT = void 0;
4
+ const api_1 = require("../utils/api");
5
+ const error_1 = require("../utils/error");
6
+ exports.HEARTBEAT = (0, api_1.createApi)({
7
+ apiKey: 12,
8
+ apiVersion: 4,
9
+ request: (encoder, data) => encoder
10
+ .writeUVarInt(0)
11
+ .writeCompactString(data.groupId)
12
+ .writeInt32(data.generationId)
13
+ .writeCompactString(data.memberId)
14
+ .writeCompactString(data.groupInstanceId)
15
+ .writeUVarInt(0),
16
+ response: (decoder) => {
17
+ const result = {
18
+ _tag: decoder.readTagBuffer(),
19
+ throttleTimeMs: decoder.readInt32(),
20
+ errorCode: decoder.readInt16(),
21
+ _tag2: decoder.readTagBuffer(),
22
+ };
23
+ if (result.errorCode)
24
+ throw new error_1.KafkaTSApiError(result.errorCode, null, result);
25
+ return result;
26
+ },
27
+ });