kafka-ts 0.0.16 → 0.0.17-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +21 -1
- package/dist/api/create-topics.d.ts +6 -6
- package/dist/api/create-topics.js +6 -6
- package/dist/api/delete-topics.d.ts +1 -1
- package/dist/api/delete-topics.js +1 -1
- package/dist/api/index.d.ts +13 -13
- package/dist/api/metadata.d.ts +4 -4
- package/dist/api/metadata.js +3 -3
- package/dist/cluster.js +9 -6
- package/dist/connection.js +1 -1
- package/dist/consumer/consumer.js +2 -2
- package/dist/metadata.d.ts +4 -1
- package/dist/metadata.js +6 -1
- package/dist/producer/producer.js +8 -2
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -38,7 +38,7 @@ const consumer = await kafka.startConsumer({
|
|
|
38
38
|
```typescript
|
|
39
39
|
export const producer = kafka.createProducer();
|
|
40
40
|
|
|
41
|
-
await producer.send([{ topic: 'my-topic',
|
|
41
|
+
await producer.send([{ topic: 'my-topic', key: 'key', value: 'value' }]);
|
|
42
42
|
```
|
|
43
43
|
|
|
44
44
|
#### Low-level API
|
|
@@ -116,6 +116,26 @@ After each batch is processed, the consumer will commit offsets for the processe
|
|
|
116
116
|
|
|
117
117
|
`concurrency` controls how many aforementioned batches are processed concurrently.
|
|
118
118
|
|
|
119
|
+
#### Partitioning
|
|
120
|
+
|
|
121
|
+
By default, messages are partitioned by message key or round-robin if the key is null or undefined. Partition can be overwritten by `partition` property in the message. You can also override the default partitioner per producer instance `kafka.createProducer({ partitioner: customPartitioner })`.
|
|
122
|
+
|
|
123
|
+
A simple example how to partition messages by the value in message header `x-partition-key`:
|
|
124
|
+
|
|
125
|
+
```typescript
|
|
126
|
+
import type { Partitioner } from 'kafka-ts';
|
|
127
|
+
import { defaultPartitioner } from 'kafka-ts';
|
|
128
|
+
|
|
129
|
+
const myPartitioner: Partitioner = (context) => {
|
|
130
|
+
const partition = defaultPartitioner(context);
|
|
131
|
+
return (message) => partition({ ...message, key: message.headers?.['x-partition-key'] });
|
|
132
|
+
};
|
|
133
|
+
|
|
134
|
+
const producer = kafka.createProducer({ partitioner: myPartitioner });
|
|
135
|
+
|
|
136
|
+
await producer.send([{ topic: 'my-topic', value: 'value', headers: { 'x-partition-key': '123' } }]);
|
|
137
|
+
```
|
|
138
|
+
|
|
119
139
|
## Motivation
|
|
120
140
|
|
|
121
141
|
The existing low-level libraries (e.g. node-rdkafka) are bindings on librdkafka, which doesn't give enough control over the consumer logic.
|
|
@@ -1,19 +1,19 @@
|
|
|
1
1
|
export declare const CREATE_TOPICS: import("../utils/api").Api<{
|
|
2
2
|
topics: {
|
|
3
3
|
name: string;
|
|
4
|
-
numPartitions
|
|
5
|
-
replicationFactor
|
|
6
|
-
assignments
|
|
4
|
+
numPartitions?: number;
|
|
5
|
+
replicationFactor?: number;
|
|
6
|
+
assignments?: {
|
|
7
7
|
partitionIndex: number;
|
|
8
8
|
brokerIds: number[];
|
|
9
9
|
}[];
|
|
10
|
-
configs
|
|
10
|
+
configs?: {
|
|
11
11
|
name: string;
|
|
12
12
|
value: string | null;
|
|
13
13
|
}[];
|
|
14
14
|
}[];
|
|
15
|
-
timeoutMs
|
|
16
|
-
validateOnly
|
|
15
|
+
timeoutMs?: number | undefined;
|
|
16
|
+
validateOnly?: boolean | undefined;
|
|
17
17
|
}, {
|
|
18
18
|
_tag: void;
|
|
19
19
|
throttleTimeMs: number;
|
|
@@ -10,16 +10,16 @@ exports.CREATE_TOPICS = (0, api_1.createApi)({
|
|
|
10
10
|
.writeUVarInt(0)
|
|
11
11
|
.writeCompactArray(data.topics, (encoder, topic) => encoder
|
|
12
12
|
.writeCompactString(topic.name)
|
|
13
|
-
.writeInt32(topic.numPartitions)
|
|
14
|
-
.writeInt16(topic.replicationFactor)
|
|
15
|
-
.writeCompactArray(topic.assignments, (encoder, assignment) => encoder
|
|
13
|
+
.writeInt32(topic.numPartitions ?? -1)
|
|
14
|
+
.writeInt16(topic.replicationFactor ?? -1)
|
|
15
|
+
.writeCompactArray(topic.assignments ?? [], (encoder, assignment) => encoder
|
|
16
16
|
.writeInt32(assignment.partitionIndex)
|
|
17
17
|
.writeCompactArray(assignment.brokerIds, (encoder, brokerId) => encoder.writeInt32(brokerId))
|
|
18
18
|
.writeUVarInt(0))
|
|
19
|
-
.writeCompactArray(topic.configs, (encoder, config) => encoder.writeCompactString(config.name).writeCompactString(config.value).writeUVarInt(0))
|
|
19
|
+
.writeCompactArray(topic.configs ?? [], (encoder, config) => encoder.writeCompactString(config.name).writeCompactString(config.value).writeUVarInt(0))
|
|
20
20
|
.writeUVarInt(0))
|
|
21
|
-
.writeInt32(data.timeoutMs)
|
|
22
|
-
.writeBoolean(data.validateOnly)
|
|
21
|
+
.writeInt32(data.timeoutMs ?? 10_000)
|
|
22
|
+
.writeBoolean(data.validateOnly ?? false)
|
|
23
23
|
.writeUVarInt(0),
|
|
24
24
|
response: (decoder) => {
|
|
25
25
|
const result = {
|
|
@@ -9,7 +9,7 @@ exports.DELETE_TOPICS = (0, api_1.createApi)({
|
|
|
9
9
|
request: (encoder, data) => encoder
|
|
10
10
|
.writeUVarInt(0)
|
|
11
11
|
.writeCompactArray(data.topics, (encoder, topic) => encoder.writeCompactString(topic.name).writeUUID(topic.topicId).writeUVarInt(0))
|
|
12
|
-
.writeInt32(data.timeoutMs)
|
|
12
|
+
.writeInt32(data.timeoutMs ?? 10_000)
|
|
13
13
|
.writeUVarInt(0),
|
|
14
14
|
response: (decoder) => {
|
|
15
15
|
const result = {
|
package/dist/api/index.d.ts
CHANGED
|
@@ -13,19 +13,19 @@ export declare const API: {
|
|
|
13
13
|
CREATE_TOPICS: Api<{
|
|
14
14
|
topics: {
|
|
15
15
|
name: string;
|
|
16
|
-
numPartitions
|
|
17
|
-
replicationFactor
|
|
18
|
-
assignments
|
|
16
|
+
numPartitions?: number | undefined;
|
|
17
|
+
replicationFactor?: number | undefined;
|
|
18
|
+
assignments?: {
|
|
19
19
|
partitionIndex: number;
|
|
20
20
|
brokerIds: number[];
|
|
21
|
-
}[];
|
|
22
|
-
configs
|
|
21
|
+
}[] | undefined;
|
|
22
|
+
configs?: {
|
|
23
23
|
name: string;
|
|
24
24
|
value: string | null;
|
|
25
|
-
}[];
|
|
25
|
+
}[] | undefined;
|
|
26
26
|
}[];
|
|
27
|
-
timeoutMs
|
|
28
|
-
validateOnly
|
|
27
|
+
timeoutMs?: number | undefined;
|
|
28
|
+
validateOnly?: boolean | undefined;
|
|
29
29
|
}, {
|
|
30
30
|
_tag: void;
|
|
31
31
|
throttleTimeMs: number;
|
|
@@ -53,7 +53,7 @@ export declare const API: {
|
|
|
53
53
|
name: string | null;
|
|
54
54
|
topicId: string | null;
|
|
55
55
|
}[];
|
|
56
|
-
timeoutMs
|
|
56
|
+
timeoutMs?: number | undefined;
|
|
57
57
|
}, {
|
|
58
58
|
_tag: void;
|
|
59
59
|
throttleTimeMs: number;
|
|
@@ -266,12 +266,12 @@ export declare const API: {
|
|
|
266
266
|
_tag2: void;
|
|
267
267
|
}>;
|
|
268
268
|
METADATA: Api<{
|
|
269
|
-
topics
|
|
269
|
+
topics?: {
|
|
270
270
|
id: string | null;
|
|
271
271
|
name: string;
|
|
272
|
-
}[] | null;
|
|
273
|
-
allowTopicAutoCreation
|
|
274
|
-
includeTopicAuthorizedOperations
|
|
272
|
+
}[] | null | undefined;
|
|
273
|
+
allowTopicAutoCreation?: boolean | undefined;
|
|
274
|
+
includeTopicAuthorizedOperations?: boolean | undefined;
|
|
275
275
|
}, {
|
|
276
276
|
_tag: void;
|
|
277
277
|
throttleTimeMs: number;
|
package/dist/api/metadata.d.ts
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
export type Metadata = Awaited<ReturnType<(typeof METADATA)['response']>>;
|
|
2
2
|
export declare const METADATA: import("../utils/api").Api<{
|
|
3
|
-
topics
|
|
3
|
+
topics?: {
|
|
4
4
|
id: string | null;
|
|
5
5
|
name: string;
|
|
6
|
-
}[] | null;
|
|
7
|
-
allowTopicAutoCreation
|
|
8
|
-
includeTopicAuthorizedOperations
|
|
6
|
+
}[] | null | undefined;
|
|
7
|
+
allowTopicAutoCreation?: boolean | undefined;
|
|
8
|
+
includeTopicAuthorizedOperations?: boolean | undefined;
|
|
9
9
|
}, {
|
|
10
10
|
_tag: void;
|
|
11
11
|
throttleTimeMs: number;
|
package/dist/api/metadata.js
CHANGED
|
@@ -8,9 +8,9 @@ exports.METADATA = (0, api_1.createApi)({
|
|
|
8
8
|
apiVersion: 12,
|
|
9
9
|
request: (encoder, data) => encoder
|
|
10
10
|
.writeUVarInt(0)
|
|
11
|
-
.writeCompactArray(data.topics, (encoder, topic) => encoder.writeUUID(topic.id).writeCompactString(topic.name).writeUVarInt(0))
|
|
12
|
-
.writeBoolean(data.allowTopicAutoCreation)
|
|
13
|
-
.writeBoolean(data.includeTopicAuthorizedOperations)
|
|
11
|
+
.writeCompactArray(data.topics ?? null, (encoder, topic) => encoder.writeUUID(topic.id).writeCompactString(topic.name).writeUVarInt(0))
|
|
12
|
+
.writeBoolean(data.allowTopicAutoCreation ?? false)
|
|
13
|
+
.writeBoolean(data.includeTopicAuthorizedOperations ?? false)
|
|
14
14
|
.writeUVarInt(0),
|
|
15
15
|
response: (decoder) => {
|
|
16
16
|
const result = {
|
package/dist/cluster.js
CHANGED
|
@@ -16,18 +16,21 @@ class Cluster {
|
|
|
16
16
|
async connect() {
|
|
17
17
|
this.seedBroker = await this.findSeedBroker();
|
|
18
18
|
this.brokerById = {};
|
|
19
|
-
const metadata = await this.sendRequest(api_1.API.METADATA, {
|
|
20
|
-
allowTopicAutoCreation: false,
|
|
21
|
-
includeTopicAuthorizedOperations: false,
|
|
22
|
-
topics: [],
|
|
23
|
-
});
|
|
19
|
+
const metadata = await this.sendRequest(api_1.API.METADATA, { topics: [] });
|
|
24
20
|
this.brokerMetadata = Object.fromEntries(metadata.brokers.map((options) => [options.nodeId, options]));
|
|
25
21
|
}
|
|
26
22
|
async ensureConnected() {
|
|
27
23
|
if (!this.seedBroker) {
|
|
28
24
|
return this.connect();
|
|
29
25
|
}
|
|
30
|
-
|
|
26
|
+
try {
|
|
27
|
+
await Promise.all([this.seedBroker, ...Object.values(this.brokerById)].map((x) => x.ensureConnected()));
|
|
28
|
+
}
|
|
29
|
+
catch {
|
|
30
|
+
logger_1.log.warn('Failed to connect to known brokers, reconnecting...');
|
|
31
|
+
await this.disconnect();
|
|
32
|
+
return this.connect();
|
|
33
|
+
}
|
|
31
34
|
}
|
|
32
35
|
async disconnect() {
|
|
33
36
|
await Promise.all([
|
package/dist/connection.js
CHANGED
|
@@ -80,7 +80,7 @@ class Consumer extends events_1.default {
|
|
|
80
80
|
this.stopHook = undefined;
|
|
81
81
|
try {
|
|
82
82
|
await this.cluster.connect();
|
|
83
|
-
await this.metadata.
|
|
83
|
+
await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
|
|
84
84
|
this.metadata.setAssignment(this.metadata.getTopicPartitions());
|
|
85
85
|
await this.offsetManager.fetchOffsets({ fromBeginning });
|
|
86
86
|
await this.consumerGroup?.init();
|
|
@@ -137,7 +137,7 @@ class Consumer extends events_1.default {
|
|
|
137
137
|
catch (error) {
|
|
138
138
|
await this.fetchManager?.stop();
|
|
139
139
|
if (error.errorCode === api_1.API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
140
|
-
logger_1.log.debug('Rebalance in progress...', { apiName: error.apiName });
|
|
140
|
+
logger_1.log.debug('Rebalance in progress...', { apiName: error.apiName, groupId });
|
|
141
141
|
continue;
|
|
142
142
|
}
|
|
143
143
|
if (error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
package/dist/metadata.d.ts
CHANGED
|
@@ -19,6 +19,9 @@ export declare class Metadata {
|
|
|
19
19
|
topics: string[];
|
|
20
20
|
allowTopicAutoCreation: boolean;
|
|
21
21
|
}): Promise<void>;
|
|
22
|
-
|
|
22
|
+
fetchMetadata({ topics, allowTopicAutoCreation, }: {
|
|
23
|
+
topics: string[] | null;
|
|
24
|
+
allowTopicAutoCreation: boolean;
|
|
25
|
+
}): Promise<void>;
|
|
23
26
|
}
|
|
24
27
|
export {};
|
package/dist/metadata.js
CHANGED
|
@@ -63,7 +63,6 @@ class Metadata {
|
|
|
63
63
|
const { cluster } = this.options;
|
|
64
64
|
const response = await cluster.sendRequest(api_1.API.METADATA, {
|
|
65
65
|
allowTopicAutoCreation,
|
|
66
|
-
includeTopicAuthorizedOperations: false,
|
|
67
66
|
topics: topics?.map((name) => ({ id: null, name })) ?? null,
|
|
68
67
|
});
|
|
69
68
|
this.topicPartitions = {
|
|
@@ -104,3 +103,9 @@ __decorate([
|
|
|
104
103
|
__metadata("design:paramtypes", [Object]),
|
|
105
104
|
__metadata("design:returntype", Promise)
|
|
106
105
|
], Metadata.prototype, "fetchMetadataIfNecessary", null);
|
|
106
|
+
__decorate([
|
|
107
|
+
trace(),
|
|
108
|
+
__metadata("design:type", Function),
|
|
109
|
+
__metadata("design:paramtypes", [Object]),
|
|
110
|
+
__metadata("design:returntype", Promise)
|
|
111
|
+
], Metadata.prototype, "fetchMetadata", null);
|
|
@@ -76,7 +76,10 @@ class Producer {
|
|
|
76
76
|
offsetDelta: index,
|
|
77
77
|
key: message.key ?? null,
|
|
78
78
|
value: message.value,
|
|
79
|
-
headers: Object.entries(message.headers ?? {}).map(([key, value]) => ({
|
|
79
|
+
headers: Object.entries(message.headers ?? {}).map(([key, value]) => ({
|
|
80
|
+
key,
|
|
81
|
+
value,
|
|
82
|
+
})),
|
|
80
83
|
})),
|
|
81
84
|
};
|
|
82
85
|
}),
|
|
@@ -95,7 +98,10 @@ class Producer {
|
|
|
95
98
|
}));
|
|
96
99
|
}
|
|
97
100
|
catch (error) {
|
|
98
|
-
if (
|
|
101
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_LEADER_OR_FOLLOWER) {
|
|
102
|
+
await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
|
|
103
|
+
}
|
|
104
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.OUT_OF_ORDER_SEQUENCE_NUMBER) {
|
|
99
105
|
await this.initProducerId();
|
|
100
106
|
}
|
|
101
107
|
throw error;
|