kafka-ts 0.0.3-beta → 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +68 -8
- package/dist/api/api-versions.d.ts +9 -0
- package/dist/api/api-versions.js +24 -0
- package/dist/api/create-topics.d.ts +38 -0
- package/dist/api/create-topics.js +53 -0
- package/dist/api/delete-topics.d.ts +18 -0
- package/dist/api/delete-topics.js +33 -0
- package/dist/api/fetch.d.ts +84 -0
- package/dist/api/fetch.js +142 -0
- package/dist/api/find-coordinator.d.ts +21 -0
- package/dist/api/find-coordinator.js +39 -0
- package/dist/api/heartbeat.d.ts +11 -0
- package/dist/api/heartbeat.js +27 -0
- package/dist/api/index.d.ts +576 -0
- package/dist/api/index.js +165 -0
- package/dist/api/init-producer-id.d.ts +13 -0
- package/dist/api/init-producer-id.js +29 -0
- package/dist/api/join-group.d.ts +34 -0
- package/dist/api/join-group.js +51 -0
- package/dist/api/leave-group.d.ts +19 -0
- package/dist/api/leave-group.js +39 -0
- package/dist/api/list-offsets.d.ts +29 -0
- package/dist/api/list-offsets.js +48 -0
- package/dist/api/metadata.d.ts +40 -0
- package/dist/api/metadata.js +58 -0
- package/dist/api/offset-commit.d.ts +28 -0
- package/dist/api/offset-commit.js +48 -0
- package/dist/api/offset-fetch.d.ts +31 -0
- package/dist/api/offset-fetch.js +55 -0
- package/dist/api/produce.d.ts +54 -0
- package/dist/api/produce.js +126 -0
- package/dist/api/sasl-authenticate.d.ts +11 -0
- package/dist/api/sasl-authenticate.js +23 -0
- package/dist/api/sasl-handshake.d.ts +6 -0
- package/dist/api/sasl-handshake.js +19 -0
- package/dist/api/sync-group.d.ts +24 -0
- package/dist/api/sync-group.js +36 -0
- package/dist/auth/index.d.ts +2 -0
- package/dist/auth/index.js +8 -0
- package/dist/auth/plain.d.ts +5 -0
- package/dist/auth/plain.js +12 -0
- package/dist/auth/scram.d.ts +9 -0
- package/dist/auth/scram.js +40 -0
- package/dist/broker.d.ts +30 -0
- package/dist/broker.js +55 -0
- package/dist/client.d.ts +23 -0
- package/dist/client.js +36 -0
- package/dist/cluster.d.ts +27 -0
- package/dist/cluster.js +70 -0
- package/dist/cluster.test.d.ts +1 -0
- package/dist/cluster.test.js +343 -0
- package/dist/codecs/gzip.d.ts +2 -0
- package/dist/codecs/gzip.js +8 -0
- package/dist/codecs/index.d.ts +2 -0
- package/dist/codecs/index.js +17 -0
- package/dist/codecs/none.d.ts +2 -0
- package/dist/codecs/none.js +7 -0
- package/dist/codecs/types.d.ts +5 -0
- package/dist/codecs/types.js +2 -0
- package/dist/connection.d.ts +26 -0
- package/dist/connection.js +175 -0
- package/dist/consumer/consumer-group.d.ts +41 -0
- package/dist/consumer/consumer-group.js +215 -0
- package/dist/consumer/consumer-metadata.d.ts +7 -0
- package/dist/consumer/consumer-metadata.js +14 -0
- package/dist/consumer/consumer.d.ts +44 -0
- package/dist/consumer/consumer.js +225 -0
- package/dist/consumer/fetch-manager.d.ts +33 -0
- package/dist/consumer/fetch-manager.js +140 -0
- package/dist/consumer/fetcher.d.ts +25 -0
- package/dist/consumer/fetcher.js +64 -0
- package/dist/consumer/offset-manager.d.ts +22 -0
- package/dist/consumer/offset-manager.js +66 -0
- package/dist/consumer/processor.d.ts +19 -0
- package/dist/consumer/processor.js +59 -0
- package/dist/distributors/assignments-to-replicas.d.ts +16 -0
- package/dist/distributors/assignments-to-replicas.js +59 -0
- package/dist/distributors/assignments-to-replicas.test.d.ts +1 -0
- package/dist/distributors/assignments-to-replicas.test.js +40 -0
- package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
- package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
- package/dist/distributors/partitioner.d.ts +7 -0
- package/dist/distributors/partitioner.js +23 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.js +26 -0
- package/dist/metadata.d.ts +24 -0
- package/dist/metadata.js +106 -0
- package/dist/producer/producer.d.ts +24 -0
- package/dist/producer/producer.js +131 -0
- package/dist/types.d.ts +11 -0
- package/dist/types.js +2 -0
- package/dist/utils/api.d.ts +9 -0
- package/dist/utils/api.js +5 -0
- package/dist/utils/crypto.d.ts +8 -0
- package/dist/utils/crypto.js +18 -0
- package/dist/utils/decoder.d.ts +30 -0
- package/dist/utils/decoder.js +152 -0
- package/dist/utils/delay.d.ts +1 -0
- package/dist/utils/delay.js +5 -0
- package/dist/utils/encoder.d.ts +28 -0
- package/dist/utils/encoder.js +125 -0
- package/dist/utils/error.d.ts +11 -0
- package/dist/utils/error.js +27 -0
- package/dist/utils/logger.d.ts +9 -0
- package/dist/utils/logger.js +32 -0
- package/dist/utils/memo.d.ts +1 -0
- package/dist/utils/memo.js +16 -0
- package/dist/utils/murmur2.d.ts +3 -0
- package/dist/utils/murmur2.js +40 -0
- package/dist/utils/retrier.d.ts +10 -0
- package/dist/utils/retrier.js +22 -0
- package/dist/utils/tracer.d.ts +5 -0
- package/dist/utils/tracer.js +39 -0
- package/package.json +11 -2
- package/src/__snapshots__/{request-handler.test.ts.snap → cluster.test.ts.snap} +329 -26
- package/src/api/fetch.ts +84 -29
- package/src/api/index.ts +3 -1
- package/src/api/metadata.ts +1 -1
- package/src/api/offset-commit.ts +1 -1
- package/src/api/offset-fetch.ts +1 -5
- package/src/api/produce.ts +15 -18
- package/src/auth/index.ts +2 -0
- package/src/auth/plain.ts +10 -0
- package/src/auth/scram.ts +52 -0
- package/src/broker.ts +7 -9
- package/src/client.ts +2 -2
- package/src/cluster.test.ts +16 -14
- package/src/cluster.ts +38 -40
- package/src/codecs/gzip.ts +9 -0
- package/src/codecs/index.ts +16 -0
- package/src/codecs/none.ts +6 -0
- package/src/codecs/types.ts +4 -0
- package/src/connection.ts +31 -17
- package/src/consumer/consumer-group.ts +43 -23
- package/src/consumer/consumer.ts +64 -43
- package/src/consumer/fetch-manager.ts +43 -53
- package/src/consumer/fetcher.ts +20 -13
- package/src/consumer/offset-manager.ts +18 -7
- package/src/consumer/processor.ts +14 -8
- package/src/distributors/assignments-to-replicas.ts +1 -3
- package/src/distributors/partitioner.ts +27 -0
- package/src/index.ts +7 -2
- package/src/metadata.ts +4 -0
- package/src/producer/producer.ts +22 -12
- package/src/types.ts +3 -3
- package/src/utils/api.ts +1 -1
- package/src/utils/crypto.ts +15 -0
- package/src/utils/decoder.ts +11 -5
- package/src/utils/encoder.ts +29 -22
- package/src/utils/logger.ts +37 -0
- package/src/utils/murmur2.ts +44 -0
- package/src/utils/tracer.ts +40 -22
- package/.github/workflows/release.yml +0 -17
- package/certs/ca.crt +0 -29
- package/certs/ca.key +0 -52
- package/certs/ca.srl +0 -1
- package/certs/kafka.crt +0 -29
- package/certs/kafka.csr +0 -26
- package/certs/kafka.key +0 -52
- package/certs/kafka.keystore.jks +0 -0
- package/certs/kafka.truststore.jks +0 -0
- package/docker-compose.yml +0 -104
- package/examples/package-lock.json +0 -31
- package/examples/package.json +0 -14
- package/examples/src/client.ts +0 -9
- package/examples/src/consumer.ts +0 -18
- package/examples/src/create-topic.ts +0 -44
- package/examples/src/producer.ts +0 -24
- package/examples/src/replicator.ts +0 -25
- package/examples/src/utils/delay.ts +0 -1
- package/examples/src/utils/json.ts +0 -1
- package/examples/tsconfig.json +0 -7
- package/log4j.properties +0 -95
- package/scripts/generate-certs.sh +0 -24
- package/src/utils/debug.ts +0 -9
package/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
**KafkaTS** is a Apache Kafka client library for Node.js. It provides both a low-level API for communicating directly with the Apache Kafka cluster and high-level APIs for publishing and subscribing to Kafka topics.
|
|
4
4
|
|
|
5
|
-
**
|
|
5
|
+
**Supported Kafka versions:** 3.6 and later
|
|
6
6
|
|
|
7
7
|
## Installation
|
|
8
8
|
|
|
@@ -92,10 +92,70 @@ The existing high-level libraries (e.g. kafkajs) are missing a few crucial featu
|
|
|
92
92
|
- **Consuming messages without consumer groups** - When you don't need the consumer to track the partition offsets, you can simply create a consumer without groupId and always either start consuming messages from the beginning or from the latest partition offset.
|
|
93
93
|
- **Low-level API requests** - It's possible to communicate directly with the Kafka cluster using the kafka api protocol.
|
|
94
94
|
|
|
95
|
-
##
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
95
|
+
## Configuration
|
|
96
|
+
|
|
97
|
+
### `createKafkaClient()`
|
|
98
|
+
|
|
99
|
+
| Name | Type | Required | Default | Description |
|
|
100
|
+
| ---------------- | ---------------------- | -------- | ------- | ---------------------------------------------------- |
|
|
101
|
+
| clientId | string | false | _null_ | The client id used for all requests. |
|
|
102
|
+
| bootstrapServers | TcpSocketConnectOpts[] | true | | List of kafka brokers for initial cluster discovery. |
|
|
103
|
+
| sasl | SASLProvider | false | | SASL provider |
|
|
104
|
+
| ssl | TLSSocketOptions | false | | SSL configuration. |
|
|
105
|
+
|
|
106
|
+
#### Supported SASL mechanisms
|
|
107
|
+
|
|
108
|
+
- PLAIN: `saslPlain({ username, password })`
|
|
109
|
+
- SCRAM-SHA-256: `saslScramSha256({ username, password })`
|
|
110
|
+
- SCRAM-SHA-512: `saslScramSha512({ username, password })`
|
|
111
|
+
|
|
112
|
+
Custom SASL mechanisms can be implemented following the `SASLProvider` interface. See [src/auth](./src/auth) for examples.
|
|
113
|
+
|
|
114
|
+
### `kafka.startConsumer()`
|
|
115
|
+
|
|
116
|
+
| Name | Type | Required | Default | Description |
|
|
117
|
+
| ---------------------- | -------------------------------------- | -------- | ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
118
|
+
| topics | string[] | true | | List of topics to subscribe to |
|
|
119
|
+
| groupId | string | false | _null_ | Consumer group id |
|
|
120
|
+
| groupInstanceId | string | false | _null_ | Consumer group instance id |
|
|
121
|
+
| rackId | string | false | _null_ | Rack id |
|
|
122
|
+
| isolationLevel | IsolationLevel | false | IsolationLevel.READ_UNCOMMITTED | Isolation level |
|
|
123
|
+
| sessionTimeoutMs | number | false | 30000 | Session timeout in milliseconds |
|
|
124
|
+
| rebalanceTimeoutMs | number | false | 60000 | Rebalance timeout in milliseconds |
|
|
125
|
+
| maxWaitMs | number | false | 5000 | Fetch long poll timeout in milliseconds |
|
|
126
|
+
| minBytes | number | false | 1 | Minimum number of bytes to wait for before returning a fetch response |
|
|
127
|
+
| maxBytes | number | false | 1_048_576 | Maximum number of bytes to return in the fetch response |
|
|
128
|
+
| partitionMaxBytes | number | false | 1_048_576 | Maximum number of bytes to return per partition in the fetch response |
|
|
129
|
+
| allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
|
|
130
|
+
| fromBeginning | boolean | false | false | Start consuming from the beginning of the topic |
|
|
131
|
+
| batchGranularity | BatchGranularity | false | partition | Controls messages split from fetch response. Also controls how often offsets are committed. **onBatch** will include messages:<br/>- **partition** - from a single batch<br/>- **topic** - from all topic partitions<br/>- **broker** - from all assignned topics and partitions |
|
|
132
|
+
| concurrency | number | false | 1 | How many batches to process concurrently |
|
|
133
|
+
| onMessage | (message: Message) => Promise<unknown> | true | | Callback executed on every message |
|
|
134
|
+
| onBatch | (batch: Message[]) => Promise<unknown> | true | | Callback executed on every batch of messages (based on **batchGranuality**) |
|
|
135
|
+
|
|
136
|
+
### `kafka.createProducer()`
|
|
137
|
+
|
|
138
|
+
| Name | Type | Required | Default | Description |
|
|
139
|
+
| ---------------------- | ----------- | -------- | ------------------ | --------------------------------------------------------------------------------------- |
|
|
140
|
+
| allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
|
|
141
|
+
| partitioner | Partitioner | false | defaultPartitioner | Custom partitioner function. By default, it uses a default java-compatible partitioner. |
|
|
142
|
+
|
|
143
|
+
### `producer.send(messages: Message[])`
|
|
144
|
+
|
|
145
|
+
<!-- export type Message = {
|
|
146
|
+
topic: string;
|
|
147
|
+
partition?: number;
|
|
148
|
+
timestamp?: bigint;
|
|
149
|
+
key?: Buffer | null;
|
|
150
|
+
value: Buffer | null;
|
|
151
|
+
headers?: Record<string, string>;
|
|
152
|
+
}; -->
|
|
153
|
+
|
|
154
|
+
| Name | Type | Required | Default | Description |
|
|
155
|
+
| --------- | ---------------------- | -------- | ------- | -------------------------------------------------------------------------------------------------------------------------- |
|
|
156
|
+
| topic | string | true | | Topic to send the message to |
|
|
157
|
+
| partition | number | false | _null_ | Partition to send the message to. By default partitioned by key. If key is also missing, partition is assigned round-robin |
|
|
158
|
+
| timestamp | bigint | false | _null_ | Message timestamp in milliseconds |
|
|
159
|
+
| key | Buffer \| null | false | _null_ | Message key |
|
|
160
|
+
| value | Buffer \| null | true | | Message value |
|
|
161
|
+
| headers | Record<string, string> | false | _null_ | Message headers |
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.API_VERSIONS = void 0;
|
|
4
|
+
const api_js_1 = require("../utils/api.js");
|
|
5
|
+
const error_js_1 = require("../utils/error.js");
|
|
6
|
+
exports.API_VERSIONS = (0, api_js_1.createApi)({
|
|
7
|
+
apiKey: 18,
|
|
8
|
+
apiVersion: 2,
|
|
9
|
+
request: (encoder) => encoder,
|
|
10
|
+
response: (decoder) => {
|
|
11
|
+
const result = {
|
|
12
|
+
errorCode: decoder.readInt16(),
|
|
13
|
+
versions: decoder.readArray((version) => ({
|
|
14
|
+
apiKey: version.readInt16(),
|
|
15
|
+
minVersion: version.readInt16(),
|
|
16
|
+
maxVersion: version.readInt16(),
|
|
17
|
+
})),
|
|
18
|
+
throttleTimeMs: decoder.readInt32(),
|
|
19
|
+
};
|
|
20
|
+
if (result.errorCode)
|
|
21
|
+
throw new error_js_1.KafkaTSApiError(result.errorCode, null, result);
|
|
22
|
+
return result;
|
|
23
|
+
},
|
|
24
|
+
});
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
export declare const CREATE_TOPICS: import("../utils/api").Api<{
|
|
2
|
+
topics: {
|
|
3
|
+
name: string;
|
|
4
|
+
numPartitions: number;
|
|
5
|
+
replicationFactor: number;
|
|
6
|
+
assignments: {
|
|
7
|
+
partitionIndex: number;
|
|
8
|
+
brokerIds: number[];
|
|
9
|
+
}[];
|
|
10
|
+
configs: {
|
|
11
|
+
name: string;
|
|
12
|
+
value: string | null;
|
|
13
|
+
}[];
|
|
14
|
+
}[];
|
|
15
|
+
timeoutMs: number;
|
|
16
|
+
validateOnly: boolean;
|
|
17
|
+
}, {
|
|
18
|
+
_tag: void;
|
|
19
|
+
throttleTimeMs: number;
|
|
20
|
+
topics: {
|
|
21
|
+
name: string | null;
|
|
22
|
+
topicId: string;
|
|
23
|
+
errorCode: number;
|
|
24
|
+
errorMessage: string | null;
|
|
25
|
+
numPartitions: number;
|
|
26
|
+
replicationFactor: number;
|
|
27
|
+
configs: {
|
|
28
|
+
name: string | null;
|
|
29
|
+
value: string | null;
|
|
30
|
+
readOnly: boolean;
|
|
31
|
+
configSource: number;
|
|
32
|
+
isSensitive: boolean;
|
|
33
|
+
_tag: void;
|
|
34
|
+
}[];
|
|
35
|
+
_tag: void;
|
|
36
|
+
}[];
|
|
37
|
+
_tag2: void;
|
|
38
|
+
}>;
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.CREATE_TOPICS = void 0;
|
|
4
|
+
const api_1 = require("../utils/api");
|
|
5
|
+
const error_1 = require("../utils/error");
|
|
6
|
+
exports.CREATE_TOPICS = (0, api_1.createApi)({
|
|
7
|
+
apiKey: 19,
|
|
8
|
+
apiVersion: 7,
|
|
9
|
+
request: (encoder, data) => encoder
|
|
10
|
+
.writeUVarInt(0)
|
|
11
|
+
.writeCompactArray(data.topics, (encoder, topic) => encoder
|
|
12
|
+
.writeCompactString(topic.name)
|
|
13
|
+
.writeInt32(topic.numPartitions)
|
|
14
|
+
.writeInt16(topic.replicationFactor)
|
|
15
|
+
.writeCompactArray(topic.assignments, (encoder, assignment) => encoder
|
|
16
|
+
.writeInt32(assignment.partitionIndex)
|
|
17
|
+
.writeCompactArray(assignment.brokerIds, (encoder, brokerId) => encoder.writeInt32(brokerId))
|
|
18
|
+
.writeUVarInt(0))
|
|
19
|
+
.writeCompactArray(topic.configs, (encoder, config) => encoder.writeCompactString(config.name).writeCompactString(config.value).writeUVarInt(0))
|
|
20
|
+
.writeUVarInt(0))
|
|
21
|
+
.writeInt32(data.timeoutMs)
|
|
22
|
+
.writeBoolean(data.validateOnly)
|
|
23
|
+
.writeUVarInt(0),
|
|
24
|
+
response: (decoder) => {
|
|
25
|
+
const result = {
|
|
26
|
+
_tag: decoder.readTagBuffer(),
|
|
27
|
+
throttleTimeMs: decoder.readInt32(),
|
|
28
|
+
topics: decoder.readCompactArray((topic) => ({
|
|
29
|
+
name: topic.readCompactString(),
|
|
30
|
+
topicId: topic.readUUID(),
|
|
31
|
+
errorCode: topic.readInt16(),
|
|
32
|
+
errorMessage: topic.readCompactString(),
|
|
33
|
+
numPartitions: topic.readInt32(),
|
|
34
|
+
replicationFactor: topic.readInt16(),
|
|
35
|
+
configs: topic.readCompactArray((config) => ({
|
|
36
|
+
name: config.readCompactString(),
|
|
37
|
+
value: config.readCompactString(),
|
|
38
|
+
readOnly: config.readBoolean(),
|
|
39
|
+
configSource: config.readInt8(),
|
|
40
|
+
isSensitive: config.readBoolean(),
|
|
41
|
+
_tag: config.readTagBuffer(),
|
|
42
|
+
})),
|
|
43
|
+
_tag: topic.readTagBuffer(),
|
|
44
|
+
})),
|
|
45
|
+
_tag2: decoder.readTagBuffer(),
|
|
46
|
+
};
|
|
47
|
+
result.topics.forEach((topic) => {
|
|
48
|
+
if (topic.errorCode)
|
|
49
|
+
throw new error_1.KafkaTSApiError(topic.errorCode, topic.errorMessage, result);
|
|
50
|
+
});
|
|
51
|
+
return result;
|
|
52
|
+
},
|
|
53
|
+
});
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
export declare const DELETE_TOPICS: import("../utils/api").Api<{
|
|
2
|
+
topics: {
|
|
3
|
+
name: string | null;
|
|
4
|
+
topicId: string | null;
|
|
5
|
+
}[];
|
|
6
|
+
timeoutMs: number;
|
|
7
|
+
}, {
|
|
8
|
+
_tag: void;
|
|
9
|
+
throttleTimeMs: number;
|
|
10
|
+
responses: {
|
|
11
|
+
name: string | null;
|
|
12
|
+
topicId: string;
|
|
13
|
+
errorCode: number;
|
|
14
|
+
errorMessage: string | null;
|
|
15
|
+
_tag: void;
|
|
16
|
+
}[];
|
|
17
|
+
_tag2: void;
|
|
18
|
+
}>;
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.DELETE_TOPICS = void 0;
|
|
4
|
+
const api_1 = require("../utils/api");
|
|
5
|
+
const error_1 = require("../utils/error");
|
|
6
|
+
exports.DELETE_TOPICS = (0, api_1.createApi)({
|
|
7
|
+
apiKey: 20,
|
|
8
|
+
apiVersion: 6,
|
|
9
|
+
request: (encoder, data) => encoder
|
|
10
|
+
.writeUVarInt(0)
|
|
11
|
+
.writeCompactArray(data.topics, (encoder, topic) => encoder.writeCompactString(topic.name).writeUUID(topic.topicId).writeUVarInt(0))
|
|
12
|
+
.writeInt32(data.timeoutMs)
|
|
13
|
+
.writeUVarInt(0),
|
|
14
|
+
response: (decoder) => {
|
|
15
|
+
const result = {
|
|
16
|
+
_tag: decoder.readTagBuffer(),
|
|
17
|
+
throttleTimeMs: decoder.readInt32(),
|
|
18
|
+
responses: decoder.readCompactArray((decoder) => ({
|
|
19
|
+
name: decoder.readCompactString(),
|
|
20
|
+
topicId: decoder.readUUID(),
|
|
21
|
+
errorCode: decoder.readInt16(),
|
|
22
|
+
errorMessage: decoder.readCompactString(),
|
|
23
|
+
_tag: decoder.readTagBuffer(),
|
|
24
|
+
})),
|
|
25
|
+
_tag2: decoder.readTagBuffer(),
|
|
26
|
+
};
|
|
27
|
+
result.responses.forEach((response) => {
|
|
28
|
+
if (response.errorCode)
|
|
29
|
+
throw new error_1.KafkaTSApiError(response.errorCode, response.errorMessage, result);
|
|
30
|
+
});
|
|
31
|
+
return result;
|
|
32
|
+
},
|
|
33
|
+
});
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
/// <reference types="node" />
|
|
2
|
+
export declare const enum IsolationLevel {
|
|
3
|
+
READ_UNCOMMITTED = 0,
|
|
4
|
+
READ_COMMITTED = 1
|
|
5
|
+
}
|
|
6
|
+
export type FetchResponse = Awaited<ReturnType<(typeof FETCH)['response']>>;
|
|
7
|
+
export declare const FETCH: import("../utils/api").Api<{
|
|
8
|
+
maxWaitMs: number;
|
|
9
|
+
minBytes: number;
|
|
10
|
+
maxBytes: number;
|
|
11
|
+
isolationLevel: IsolationLevel;
|
|
12
|
+
sessionId: number;
|
|
13
|
+
sessionEpoch: number;
|
|
14
|
+
topics: {
|
|
15
|
+
topicId: string;
|
|
16
|
+
partitions: {
|
|
17
|
+
partition: number;
|
|
18
|
+
currentLeaderEpoch: number;
|
|
19
|
+
fetchOffset: bigint;
|
|
20
|
+
lastFetchedEpoch: number;
|
|
21
|
+
logStartOffset: bigint;
|
|
22
|
+
partitionMaxBytes: number;
|
|
23
|
+
}[];
|
|
24
|
+
}[];
|
|
25
|
+
forgottenTopicsData: {
|
|
26
|
+
topicId: string;
|
|
27
|
+
partitions: number[];
|
|
28
|
+
}[];
|
|
29
|
+
rackId: string;
|
|
30
|
+
}, {
|
|
31
|
+
responses: {
|
|
32
|
+
partitions: {
|
|
33
|
+
records: {
|
|
34
|
+
records: {
|
|
35
|
+
attributes: number;
|
|
36
|
+
timestampDelta: bigint;
|
|
37
|
+
offsetDelta: number;
|
|
38
|
+
key: Buffer | null;
|
|
39
|
+
value: Buffer | null;
|
|
40
|
+
headers: {
|
|
41
|
+
key: Buffer | null;
|
|
42
|
+
value: Buffer | null;
|
|
43
|
+
}[];
|
|
44
|
+
}[];
|
|
45
|
+
compression: number;
|
|
46
|
+
timestampType: string;
|
|
47
|
+
isTransactional: boolean;
|
|
48
|
+
isControlBatch: boolean;
|
|
49
|
+
hasDeleteHorizonMs: boolean;
|
|
50
|
+
baseOffset: bigint;
|
|
51
|
+
batchLength: number;
|
|
52
|
+
partitionLeaderEpoch: number;
|
|
53
|
+
magic: number;
|
|
54
|
+
crc: number;
|
|
55
|
+
attributes: number;
|
|
56
|
+
lastOffsetDelta: number;
|
|
57
|
+
baseTimestamp: bigint;
|
|
58
|
+
maxTimestamp: bigint;
|
|
59
|
+
producerId: bigint;
|
|
60
|
+
producerEpoch: number;
|
|
61
|
+
baseSequence: number;
|
|
62
|
+
}[];
|
|
63
|
+
partitionIndex: number;
|
|
64
|
+
errorCode: number;
|
|
65
|
+
highWatermark: bigint;
|
|
66
|
+
lastStableOffset: bigint;
|
|
67
|
+
logStartOffset: bigint;
|
|
68
|
+
abortedTransactions: {
|
|
69
|
+
producerId: bigint;
|
|
70
|
+
firstOffset: bigint;
|
|
71
|
+
_tag: void;
|
|
72
|
+
}[];
|
|
73
|
+
preferredReadReplica: number;
|
|
74
|
+
_tag: void;
|
|
75
|
+
}[];
|
|
76
|
+
topicId: string;
|
|
77
|
+
_tag: void;
|
|
78
|
+
}[];
|
|
79
|
+
_tag: void;
|
|
80
|
+
throttleTimeMs: number;
|
|
81
|
+
errorCode: number;
|
|
82
|
+
sessionId: number;
|
|
83
|
+
_tag2: void;
|
|
84
|
+
}>;
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.FETCH = void 0;
|
|
4
|
+
const codecs_1 = require("../codecs");
|
|
5
|
+
const api_1 = require("../utils/api");
|
|
6
|
+
const decoder_1 = require("../utils/decoder");
|
|
7
|
+
const error_1 = require("../utils/error");
|
|
8
|
+
exports.FETCH = (0, api_1.createApi)({
|
|
9
|
+
apiKey: 1,
|
|
10
|
+
apiVersion: 15,
|
|
11
|
+
request: (encoder, data) => encoder
|
|
12
|
+
.writeUVarInt(0)
|
|
13
|
+
.writeInt32(data.maxWaitMs)
|
|
14
|
+
.writeInt32(data.minBytes)
|
|
15
|
+
.writeInt32(data.maxBytes)
|
|
16
|
+
.writeInt8(data.isolationLevel)
|
|
17
|
+
.writeInt32(data.sessionId)
|
|
18
|
+
.writeInt32(data.sessionEpoch)
|
|
19
|
+
.writeCompactArray(data.topics, (encoder, topic) => encoder
|
|
20
|
+
.writeUUID(topic.topicId)
|
|
21
|
+
.writeCompactArray(topic.partitions, (encoder, partition) => encoder
|
|
22
|
+
.writeInt32(partition.partition)
|
|
23
|
+
.writeInt32(partition.currentLeaderEpoch)
|
|
24
|
+
.writeInt64(partition.fetchOffset)
|
|
25
|
+
.writeInt32(partition.lastFetchedEpoch)
|
|
26
|
+
.writeInt64(partition.logStartOffset)
|
|
27
|
+
.writeInt32(partition.partitionMaxBytes)
|
|
28
|
+
.writeUVarInt(0))
|
|
29
|
+
.writeUVarInt(0))
|
|
30
|
+
.writeCompactArray(data.forgottenTopicsData, (encoder, forgottenTopic) => encoder
|
|
31
|
+
.writeUUID(forgottenTopic.topicId)
|
|
32
|
+
.writeCompactArray(forgottenTopic.partitions, (encoder, partition) => encoder.writeInt32(partition))
|
|
33
|
+
.writeUVarInt(0))
|
|
34
|
+
.writeCompactString(data.rackId)
|
|
35
|
+
.writeUVarInt(0),
|
|
36
|
+
response: async (decoder) => {
|
|
37
|
+
const result = {
|
|
38
|
+
_tag: decoder.readTagBuffer(),
|
|
39
|
+
throttleTimeMs: decoder.readInt32(),
|
|
40
|
+
errorCode: decoder.readInt16(),
|
|
41
|
+
sessionId: decoder.readInt32(),
|
|
42
|
+
responses: decoder.readCompactArray((response) => ({
|
|
43
|
+
topicId: response.readUUID(),
|
|
44
|
+
partitions: response.readCompactArray((partition) => ({
|
|
45
|
+
partitionIndex: partition.readInt32(),
|
|
46
|
+
errorCode: partition.readInt16(),
|
|
47
|
+
highWatermark: partition.readInt64(),
|
|
48
|
+
lastStableOffset: partition.readInt64(),
|
|
49
|
+
logStartOffset: partition.readInt64(),
|
|
50
|
+
abortedTransactions: partition.readCompactArray((abortedTransaction) => ({
|
|
51
|
+
producerId: abortedTransaction.readInt64(),
|
|
52
|
+
firstOffset: abortedTransaction.readInt64(),
|
|
53
|
+
_tag: abortedTransaction.readTagBuffer(),
|
|
54
|
+
})),
|
|
55
|
+
preferredReadReplica: partition.readInt32(),
|
|
56
|
+
records: decodeRecordBatch(partition),
|
|
57
|
+
_tag: partition.readTagBuffer(),
|
|
58
|
+
})),
|
|
59
|
+
_tag: response.readTagBuffer(),
|
|
60
|
+
})),
|
|
61
|
+
_tag2: decoder.readTagBuffer(),
|
|
62
|
+
};
|
|
63
|
+
if (result.errorCode)
|
|
64
|
+
throw new error_1.KafkaTSApiError(result.errorCode, null, result);
|
|
65
|
+
result.responses.forEach((response) => {
|
|
66
|
+
response.partitions.forEach((partition) => {
|
|
67
|
+
if (partition.errorCode)
|
|
68
|
+
throw new error_1.KafkaTSApiError(partition.errorCode, null, result);
|
|
69
|
+
});
|
|
70
|
+
});
|
|
71
|
+
const decompressedResponses = await Promise.all(result.responses.map(async (response) => ({
|
|
72
|
+
...response,
|
|
73
|
+
partitions: await Promise.all(response.partitions.map(async (partition) => ({
|
|
74
|
+
...partition,
|
|
75
|
+
records: await Promise.all(partition.records.map(async ({ recordsLength, compressedRecords, ...record }) => {
|
|
76
|
+
const { decompress } = (0, codecs_1.findCodec)(record.compression);
|
|
77
|
+
const decompressedRecords = await decompress(compressedRecords);
|
|
78
|
+
const decompressedDecoder = new decoder_1.Decoder(Buffer.concat([recordsLength, decompressedRecords]));
|
|
79
|
+
return { ...record, records: decodeRecord(decompressedDecoder) };
|
|
80
|
+
})),
|
|
81
|
+
}))),
|
|
82
|
+
})));
|
|
83
|
+
return { ...result, responses: decompressedResponses };
|
|
84
|
+
},
|
|
85
|
+
});
|
|
86
|
+
const decodeRecordBatch = (decoder) => {
|
|
87
|
+
const size = decoder.readUVarInt() - 1;
|
|
88
|
+
if (size <= 0) {
|
|
89
|
+
return [];
|
|
90
|
+
}
|
|
91
|
+
const recordBatchDecoder = new decoder_1.Decoder(decoder.read(size));
|
|
92
|
+
const results = [];
|
|
93
|
+
while (recordBatchDecoder.getBufferLength() > recordBatchDecoder.getOffset() + 12) {
|
|
94
|
+
const baseOffset = recordBatchDecoder.readInt64();
|
|
95
|
+
const batchLength = recordBatchDecoder.readInt32();
|
|
96
|
+
if (!batchLength) {
|
|
97
|
+
continue;
|
|
98
|
+
}
|
|
99
|
+
const batchDecoder = new decoder_1.Decoder(recordBatchDecoder.read(batchLength));
|
|
100
|
+
const result = {
|
|
101
|
+
baseOffset,
|
|
102
|
+
batchLength,
|
|
103
|
+
partitionLeaderEpoch: batchDecoder.readInt32(),
|
|
104
|
+
magic: batchDecoder.readInt8(),
|
|
105
|
+
crc: batchDecoder.readUInt32(),
|
|
106
|
+
attributes: batchDecoder.readInt16(),
|
|
107
|
+
lastOffsetDelta: batchDecoder.readInt32(),
|
|
108
|
+
baseTimestamp: batchDecoder.readInt64(),
|
|
109
|
+
maxTimestamp: batchDecoder.readInt64(),
|
|
110
|
+
producerId: batchDecoder.readInt64(),
|
|
111
|
+
producerEpoch: batchDecoder.readInt16(),
|
|
112
|
+
baseSequence: batchDecoder.readInt32(),
|
|
113
|
+
recordsLength: batchDecoder.read(4),
|
|
114
|
+
compressedRecords: batchDecoder.read(),
|
|
115
|
+
};
|
|
116
|
+
const compression = result.attributes & 0x07;
|
|
117
|
+
const timestampType = (result.attributes & 0x08) >> 3 ? 'LogAppendTime' : 'CreateTime';
|
|
118
|
+
const isTransactional = !!((result.attributes & 0x10) >> 4);
|
|
119
|
+
const isControlBatch = !!((result.attributes & 0x20) >> 5);
|
|
120
|
+
const hasDeleteHorizonMs = !!((result.attributes & 0x40) >> 6);
|
|
121
|
+
results.push({
|
|
122
|
+
...result,
|
|
123
|
+
compression,
|
|
124
|
+
timestampType,
|
|
125
|
+
isTransactional,
|
|
126
|
+
isControlBatch,
|
|
127
|
+
hasDeleteHorizonMs,
|
|
128
|
+
});
|
|
129
|
+
}
|
|
130
|
+
return results;
|
|
131
|
+
};
|
|
132
|
+
const decodeRecord = (decoder) => decoder.readRecords((record) => ({
|
|
133
|
+
attributes: record.readInt8(),
|
|
134
|
+
timestampDelta: record.readVarLong(),
|
|
135
|
+
offsetDelta: record.readVarInt(),
|
|
136
|
+
key: record.readVarIntBuffer(),
|
|
137
|
+
value: record.readVarIntBuffer(),
|
|
138
|
+
headers: record.readVarIntArray((header) => ({
|
|
139
|
+
key: header.readVarIntBuffer(),
|
|
140
|
+
value: header.readVarIntBuffer(),
|
|
141
|
+
})),
|
|
142
|
+
}));
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
export declare const KEY_TYPE: {
|
|
2
|
+
GROUP: number;
|
|
3
|
+
TRANSACTION: number;
|
|
4
|
+
};
|
|
5
|
+
export declare const FIND_COORDINATOR: import("../utils/api").Api<{
|
|
6
|
+
keyType: number;
|
|
7
|
+
keys: string[];
|
|
8
|
+
}, {
|
|
9
|
+
_tag: void;
|
|
10
|
+
throttleTimeMs: number;
|
|
11
|
+
coordinators: {
|
|
12
|
+
key: string | null;
|
|
13
|
+
nodeId: number;
|
|
14
|
+
host: string;
|
|
15
|
+
port: number;
|
|
16
|
+
errorCode: number;
|
|
17
|
+
errorMessage: string | null;
|
|
18
|
+
_tag: void;
|
|
19
|
+
}[];
|
|
20
|
+
_tag2: void;
|
|
21
|
+
}>;
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.FIND_COORDINATOR = exports.KEY_TYPE = void 0;
|
|
4
|
+
const api_1 = require("../utils/api");
|
|
5
|
+
const error_1 = require("../utils/error");
|
|
6
|
+
exports.KEY_TYPE = {
|
|
7
|
+
GROUP: 0,
|
|
8
|
+
TRANSACTION: 1,
|
|
9
|
+
};
|
|
10
|
+
exports.FIND_COORDINATOR = (0, api_1.createApi)({
|
|
11
|
+
apiKey: 10,
|
|
12
|
+
apiVersion: 4,
|
|
13
|
+
request: (encoder, data) => encoder
|
|
14
|
+
.writeUVarInt(0)
|
|
15
|
+
.writeInt8(data.keyType)
|
|
16
|
+
.writeCompactArray(data.keys, (encoder, key) => encoder.writeCompactString(key))
|
|
17
|
+
.writeUVarInt(0),
|
|
18
|
+
response: (decoder) => {
|
|
19
|
+
const result = {
|
|
20
|
+
_tag: decoder.readTagBuffer(),
|
|
21
|
+
throttleTimeMs: decoder.readInt32(),
|
|
22
|
+
coordinators: decoder.readCompactArray((decoder) => ({
|
|
23
|
+
key: decoder.readCompactString(),
|
|
24
|
+
nodeId: decoder.readInt32(),
|
|
25
|
+
host: decoder.readCompactString(),
|
|
26
|
+
port: decoder.readInt32(),
|
|
27
|
+
errorCode: decoder.readInt16(),
|
|
28
|
+
errorMessage: decoder.readCompactString(),
|
|
29
|
+
_tag: decoder.readTagBuffer(),
|
|
30
|
+
})),
|
|
31
|
+
_tag2: decoder.readTagBuffer(),
|
|
32
|
+
};
|
|
33
|
+
result.coordinators.forEach((coordinator) => {
|
|
34
|
+
if (coordinator.errorCode)
|
|
35
|
+
throw new error_1.KafkaTSApiError(coordinator.errorCode, coordinator.errorMessage, result);
|
|
36
|
+
});
|
|
37
|
+
return result;
|
|
38
|
+
},
|
|
39
|
+
});
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.HEARTBEAT = void 0;
|
|
4
|
+
const api_1 = require("../utils/api");
|
|
5
|
+
const error_1 = require("../utils/error");
|
|
6
|
+
exports.HEARTBEAT = (0, api_1.createApi)({
|
|
7
|
+
apiKey: 12,
|
|
8
|
+
apiVersion: 4,
|
|
9
|
+
request: (encoder, data) => encoder
|
|
10
|
+
.writeUVarInt(0)
|
|
11
|
+
.writeCompactString(data.groupId)
|
|
12
|
+
.writeInt32(data.generationId)
|
|
13
|
+
.writeCompactString(data.memberId)
|
|
14
|
+
.writeCompactString(data.groupInstanceId)
|
|
15
|
+
.writeUVarInt(0),
|
|
16
|
+
response: (decoder) => {
|
|
17
|
+
const result = {
|
|
18
|
+
_tag: decoder.readTagBuffer(),
|
|
19
|
+
throttleTimeMs: decoder.readInt32(),
|
|
20
|
+
errorCode: decoder.readInt16(),
|
|
21
|
+
_tag2: decoder.readTagBuffer(),
|
|
22
|
+
};
|
|
23
|
+
if (result.errorCode)
|
|
24
|
+
throw new error_1.KafkaTSApiError(result.errorCode, null, result);
|
|
25
|
+
return result;
|
|
26
|
+
},
|
|
27
|
+
});
|