kafka-ts 0.0.1-beta.1 → 0.0.1-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +45 -7
- package/examples/package.json +2 -1
- package/examples/src/consumer.ts +1 -1
- package/examples/src/producer.ts +1 -1
- package/package.json +1 -1
- package/src/__snapshots__/{request-handler.test.ts.snap → cluster.test.ts.snap} +200 -4
- package/src/api/fetch.ts +4 -4
- package/src/api/produce.ts +7 -7
- package/src/cluster.test.ts +4 -4
- package/src/consumer/consumer.ts +7 -7
- package/src/consumer/fetch-manager.ts +7 -7
- package/src/distributors/partitioner.ts +27 -0
- package/src/index.ts +1 -0
- package/src/producer/producer.ts +8 -3
- package/src/types.ts +2 -2
- package/src/utils/decoder.ts +2 -2
- package/src/utils/encoder.ts +3 -3
- package/src/utils/murmur2.ts +44 -0
package/README.md
CHANGED
|
@@ -92,12 +92,51 @@ The existing high-level libraries (e.g. kafkajs) are missing a few crucial featu
|
|
|
92
92
|
- **Consuming messages without consumer groups** - When you don't need the consumer to track the partition offsets, you can simply create a consumer without groupId and always either start consuming messages from the beginning or from the latest partition offset.
|
|
93
93
|
- **Low-level API requests** - It's possible to communicate directly with the Kafka cluster using the kafka api protocol.
|
|
94
94
|
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
95
|
+
## Configuration
|
|
96
|
+
|
|
97
|
+
### `createKafkaClient()`
|
|
98
|
+
|
|
99
|
+
| Name | Type | Required | Default | Description |
|
|
100
|
+
| ---------------- | ---------------------- | -------- | ------- | ----------------------------------------------------- |
|
|
101
|
+
| clientId | string | false | _null_ | The client id used for all requests. |
|
|
102
|
+
| bootstrapServers | TcpSocketConnectOpts[] | true | | List of kafka brokers for initial cluster discovery. |
|
|
103
|
+
| sasl | SASLProvider | false | | SASL provider (see "Supported SASL mechanisms" below) |
|
|
104
|
+
| ssl | TLSSocketOptions | false | | SSL configuration. |
|
|
105
|
+
|
|
106
|
+
### `kafka.startConsumer()`
|
|
107
|
+
|
|
108
|
+
| Name | Type | Required | Default | Description |
|
|
109
|
+
| ---------------------- | -------------------------------------- | -------- | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
110
|
+
| topics | string[] | true | | List of topics to subscribe to |
|
|
111
|
+
| groupId | string | false | _null_ | Consumer group id |
|
|
112
|
+
| groupInstanceId | string | false | _null_ | Consumer group instance id |
|
|
113
|
+
| rackId | string | false | _null_ | Rack id |
|
|
114
|
+
| isolationLevel | IsolationLevel | false | | Isolation level |
|
|
115
|
+
| sessionTimeoutMs | number | false | 30000 | Session timeout in milliseconds |
|
|
116
|
+
| rebalanceTimeoutMs | number | false | 60000 | Rebalance timeout in milliseconds |
|
|
117
|
+
| maxWaitMs | number | false | 5000 | Fetch long poll timeout in milliseconds |
|
|
118
|
+
| minBytes | number | false | 1 | Minimum number of bytes to wait for before returning a fetch response |
|
|
119
|
+
| maxBytes | number | false | 1_048_576 | Maximum number of bytes to return in the fetch response |
|
|
120
|
+
| partitionMaxBytes | number | false | 1_048_576 | Maximum number of bytes to return per partition in the fetch response |
|
|
121
|
+
| allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
|
|
122
|
+
| fromBeginning | boolean | false | false | Start consuming from the beginning of the topic |
|
|
123
|
+
| batchGranularity | BatchGranularity | false | partition | Controls messages split from fetch response. Also controls how often offsets are committed. **onBatch** will include messages:<br/>- **partition** - from a single batch<br/>- **topic** - from all topic partitions<br/>- **broker** - from all assignned topics and partitions |
|
|
124
|
+
| concurrency | number | false | 1 | How many batches to process concurrently |
|
|
125
|
+
| onMessage | (message: Message) => Promise<unknown> | true | | Callback executed on every message |
|
|
126
|
+
| onBatch | (batch: Message[]) => Promise<unknown> | true | | Callback executed on every batch of messages (based on **batchGranuality**) |
|
|
127
|
+
|
|
128
|
+
### `kafka.createProducer()`
|
|
129
|
+
|
|
130
|
+
| Name | Type | Required | Default | Description |
|
|
131
|
+
| ---------------------- | ----------- | -------- | ------- | --------------------------------------------------------------------------------------- |
|
|
132
|
+
| allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
|
|
133
|
+
| partitioner | Partitioner | false | | Custom partitioner function. By default, it uses a default java-compatible partitioner. |
|
|
134
|
+
|
|
135
|
+
### Supported SASL mechanisms
|
|
136
|
+
|
|
137
|
+
- PLAIN: `saslPlain({ username, password })`
|
|
138
|
+
- SCRAM-SHA-256: `saslScramSha256({ username, password })`
|
|
139
|
+
- SCRAM-SHA-512: `saslScramSha512({ username, password })`
|
|
101
140
|
|
|
102
141
|
Custom SASL mechanisms can be implemented following the `SASLProvider` interface. See [src/auth](./src/auth) for examples.
|
|
103
142
|
|
|
@@ -105,5 +144,4 @@ Custom SASL mechanisms can be implemented following the `SASLProvider` interface
|
|
|
105
144
|
|
|
106
145
|
Minimal set of features left to implement before a stable release:
|
|
107
146
|
|
|
108
|
-
- Partitioner (Currently have to specify the partition on producer.send())
|
|
109
147
|
- API versioning (Currently only tested against Kafka 3.7+)
|
package/examples/package.json
CHANGED
package/examples/src/consumer.ts
CHANGED
package/examples/src/producer.ts
CHANGED
package/package.json
CHANGED
|
@@ -360,14 +360,60 @@ exports[`Request handler > should fetch messages 1`] = `
|
|
|
360
360
|
"attributes": 0,
|
|
361
361
|
"headers": [
|
|
362
362
|
{
|
|
363
|
-
"key":
|
|
364
|
-
|
|
363
|
+
"key": {
|
|
364
|
+
"data": [
|
|
365
|
+
104,
|
|
366
|
+
101,
|
|
367
|
+
97,
|
|
368
|
+
100,
|
|
369
|
+
101,
|
|
370
|
+
114,
|
|
371
|
+
45,
|
|
372
|
+
107,
|
|
373
|
+
101,
|
|
374
|
+
121,
|
|
375
|
+
],
|
|
376
|
+
"type": "Buffer",
|
|
377
|
+
},
|
|
378
|
+
"value": {
|
|
379
|
+
"data": [
|
|
380
|
+
104,
|
|
381
|
+
101,
|
|
382
|
+
97,
|
|
383
|
+
100,
|
|
384
|
+
101,
|
|
385
|
+
114,
|
|
386
|
+
45,
|
|
387
|
+
118,
|
|
388
|
+
97,
|
|
389
|
+
108,
|
|
390
|
+
117,
|
|
391
|
+
101,
|
|
392
|
+
],
|
|
393
|
+
"type": "Buffer",
|
|
394
|
+
},
|
|
365
395
|
},
|
|
366
396
|
],
|
|
367
|
-
"key":
|
|
397
|
+
"key": {
|
|
398
|
+
"data": [
|
|
399
|
+
107,
|
|
400
|
+
101,
|
|
401
|
+
121,
|
|
402
|
+
],
|
|
403
|
+
"type": "Buffer",
|
|
404
|
+
},
|
|
368
405
|
"offsetDelta": 0,
|
|
369
406
|
"timestampDelta": 0n,
|
|
370
|
-
"value":
|
|
407
|
+
"value": {
|
|
408
|
+
"data": [
|
|
409
|
+
118,
|
|
410
|
+
97,
|
|
411
|
+
108,
|
|
412
|
+
117,
|
|
413
|
+
101,
|
|
414
|
+
],
|
|
415
|
+
"type": "Buffer",
|
|
416
|
+
},
|
|
371
417
|
},
|
|
372
418
|
],
|
|
373
419
|
},
|
|
@@ -937,6 +983,156 @@ exports[`Request handler > should request metadata for all topics 1`] = `
|
|
|
937
983
|
"controllerId": 0,
|
|
938
984
|
"throttleTimeMs": 0,
|
|
939
985
|
"topics": [
|
|
986
|
+
{
|
|
987
|
+
"_tag": undefined,
|
|
988
|
+
"errorCode": 0,
|
|
989
|
+
"isInternal": false,
|
|
990
|
+
"name": "my-topic",
|
|
991
|
+
"partitions": [
|
|
992
|
+
{
|
|
993
|
+
"_tag": undefined,
|
|
994
|
+
"errorCode": 0,
|
|
995
|
+
"isrNodes": [
|
|
996
|
+
0,
|
|
997
|
+
],
|
|
998
|
+
"leaderEpoch": 0,
|
|
999
|
+
"leaderId": 0,
|
|
1000
|
+
"offlineReplicas": [],
|
|
1001
|
+
"partitionIndex": 5,
|
|
1002
|
+
"replicaNodes": [
|
|
1003
|
+
0,
|
|
1004
|
+
],
|
|
1005
|
+
},
|
|
1006
|
+
{
|
|
1007
|
+
"_tag": undefined,
|
|
1008
|
+
"errorCode": 0,
|
|
1009
|
+
"isrNodes": [
|
|
1010
|
+
0,
|
|
1011
|
+
],
|
|
1012
|
+
"leaderEpoch": 0,
|
|
1013
|
+
"leaderId": 0,
|
|
1014
|
+
"offlineReplicas": [],
|
|
1015
|
+
"partitionIndex": 6,
|
|
1016
|
+
"replicaNodes": [
|
|
1017
|
+
0,
|
|
1018
|
+
],
|
|
1019
|
+
},
|
|
1020
|
+
{
|
|
1021
|
+
"_tag": undefined,
|
|
1022
|
+
"errorCode": 0,
|
|
1023
|
+
"isrNodes": [
|
|
1024
|
+
0,
|
|
1025
|
+
],
|
|
1026
|
+
"leaderEpoch": 0,
|
|
1027
|
+
"leaderId": 0,
|
|
1028
|
+
"offlineReplicas": [],
|
|
1029
|
+
"partitionIndex": 0,
|
|
1030
|
+
"replicaNodes": [
|
|
1031
|
+
0,
|
|
1032
|
+
],
|
|
1033
|
+
},
|
|
1034
|
+
{
|
|
1035
|
+
"_tag": undefined,
|
|
1036
|
+
"errorCode": 0,
|
|
1037
|
+
"isrNodes": [
|
|
1038
|
+
0,
|
|
1039
|
+
],
|
|
1040
|
+
"leaderEpoch": 0,
|
|
1041
|
+
"leaderId": 0,
|
|
1042
|
+
"offlineReplicas": [],
|
|
1043
|
+
"partitionIndex": 2,
|
|
1044
|
+
"replicaNodes": [
|
|
1045
|
+
0,
|
|
1046
|
+
],
|
|
1047
|
+
},
|
|
1048
|
+
{
|
|
1049
|
+
"_tag": undefined,
|
|
1050
|
+
"errorCode": 0,
|
|
1051
|
+
"isrNodes": [
|
|
1052
|
+
0,
|
|
1053
|
+
],
|
|
1054
|
+
"leaderEpoch": 0,
|
|
1055
|
+
"leaderId": 0,
|
|
1056
|
+
"offlineReplicas": [],
|
|
1057
|
+
"partitionIndex": 7,
|
|
1058
|
+
"replicaNodes": [
|
|
1059
|
+
0,
|
|
1060
|
+
],
|
|
1061
|
+
},
|
|
1062
|
+
{
|
|
1063
|
+
"_tag": undefined,
|
|
1064
|
+
"errorCode": 0,
|
|
1065
|
+
"isrNodes": [
|
|
1066
|
+
0,
|
|
1067
|
+
],
|
|
1068
|
+
"leaderEpoch": 0,
|
|
1069
|
+
"leaderId": 0,
|
|
1070
|
+
"offlineReplicas": [],
|
|
1071
|
+
"partitionIndex": 1,
|
|
1072
|
+
"replicaNodes": [
|
|
1073
|
+
0,
|
|
1074
|
+
],
|
|
1075
|
+
},
|
|
1076
|
+
{
|
|
1077
|
+
"_tag": undefined,
|
|
1078
|
+
"errorCode": 0,
|
|
1079
|
+
"isrNodes": [
|
|
1080
|
+
0,
|
|
1081
|
+
],
|
|
1082
|
+
"leaderEpoch": 0,
|
|
1083
|
+
"leaderId": 0,
|
|
1084
|
+
"offlineReplicas": [],
|
|
1085
|
+
"partitionIndex": 8,
|
|
1086
|
+
"replicaNodes": [
|
|
1087
|
+
0,
|
|
1088
|
+
],
|
|
1089
|
+
},
|
|
1090
|
+
{
|
|
1091
|
+
"_tag": undefined,
|
|
1092
|
+
"errorCode": 0,
|
|
1093
|
+
"isrNodes": [
|
|
1094
|
+
0,
|
|
1095
|
+
],
|
|
1096
|
+
"leaderEpoch": 0,
|
|
1097
|
+
"leaderId": 0,
|
|
1098
|
+
"offlineReplicas": [],
|
|
1099
|
+
"partitionIndex": 9,
|
|
1100
|
+
"replicaNodes": [
|
|
1101
|
+
0,
|
|
1102
|
+
],
|
|
1103
|
+
},
|
|
1104
|
+
{
|
|
1105
|
+
"_tag": undefined,
|
|
1106
|
+
"errorCode": 0,
|
|
1107
|
+
"isrNodes": [
|
|
1108
|
+
0,
|
|
1109
|
+
],
|
|
1110
|
+
"leaderEpoch": 0,
|
|
1111
|
+
"leaderId": 0,
|
|
1112
|
+
"offlineReplicas": [],
|
|
1113
|
+
"partitionIndex": 3,
|
|
1114
|
+
"replicaNodes": [
|
|
1115
|
+
0,
|
|
1116
|
+
],
|
|
1117
|
+
},
|
|
1118
|
+
{
|
|
1119
|
+
"_tag": undefined,
|
|
1120
|
+
"errorCode": 0,
|
|
1121
|
+
"isrNodes": [
|
|
1122
|
+
0,
|
|
1123
|
+
],
|
|
1124
|
+
"leaderEpoch": 0,
|
|
1125
|
+
"leaderId": 0,
|
|
1126
|
+
"offlineReplicas": [],
|
|
1127
|
+
"partitionIndex": 4,
|
|
1128
|
+
"replicaNodes": [
|
|
1129
|
+
0,
|
|
1130
|
+
],
|
|
1131
|
+
},
|
|
1132
|
+
],
|
|
1133
|
+
"topicAuthorizedOperations": -2147483648,
|
|
1134
|
+
"topicId": "Any<UUID>",
|
|
1135
|
+
},
|
|
940
1136
|
{
|
|
941
1137
|
"_tag": undefined,
|
|
942
1138
|
"errorCode": 0,
|
package/src/api/fetch.ts
CHANGED
|
@@ -130,11 +130,11 @@ const decodeRecords = (decoder: Decoder) => {
|
|
|
130
130
|
attributes: record.readInt8(),
|
|
131
131
|
timestampDelta: record.readVarLong(),
|
|
132
132
|
offsetDelta: record.readVarInt(),
|
|
133
|
-
key: record.
|
|
134
|
-
value: record.
|
|
133
|
+
key: record.readVarIntBuffer(),
|
|
134
|
+
value: record.readVarIntBuffer(),
|
|
135
135
|
headers: record.readCompactArray((header) => ({
|
|
136
|
-
key: header.
|
|
137
|
-
value: header.
|
|
136
|
+
key: header.readVarIntBuffer(),
|
|
137
|
+
value: header.readVarIntBuffer(),
|
|
138
138
|
})),
|
|
139
139
|
})),
|
|
140
140
|
});
|
package/src/api/produce.ts
CHANGED
|
@@ -28,11 +28,11 @@ export const PRODUCE = createApi({
|
|
|
28
28
|
attributes: number;
|
|
29
29
|
timestampDelta: bigint;
|
|
30
30
|
offsetDelta: number;
|
|
31
|
-
key:
|
|
32
|
-
value:
|
|
31
|
+
key: Buffer | null;
|
|
32
|
+
value: Buffer | null;
|
|
33
33
|
headers: {
|
|
34
|
-
key:
|
|
35
|
-
value:
|
|
34
|
+
key: Buffer;
|
|
35
|
+
value: Buffer;
|
|
36
36
|
}[];
|
|
37
37
|
}[];
|
|
38
38
|
}[];
|
|
@@ -61,10 +61,10 @@ export const PRODUCE = createApi({
|
|
|
61
61
|
.writeInt8(record.attributes)
|
|
62
62
|
.writeVarLong(record.timestampDelta)
|
|
63
63
|
.writeVarInt(record.offsetDelta)
|
|
64
|
-
.
|
|
65
|
-
.
|
|
64
|
+
.writeVarIntBuffer(record.key)
|
|
65
|
+
.writeVarIntBuffer(record.value)
|
|
66
66
|
.writeVarIntArray(record.headers, (encoder, header) =>
|
|
67
|
-
encoder.
|
|
67
|
+
encoder.writeVarIntBuffer(header.key).writeVarIntBuffer(header.value),
|
|
68
68
|
)
|
|
69
69
|
.value();
|
|
70
70
|
|
package/src/cluster.test.ts
CHANGED
|
@@ -149,12 +149,12 @@ describe.sequential('Request handler', () => {
|
|
|
149
149
|
attributes: 0,
|
|
150
150
|
offsetDelta: 0,
|
|
151
151
|
timestampDelta: 0n,
|
|
152
|
-
key: 'key',
|
|
153
|
-
value: 'value',
|
|
152
|
+
key: Buffer.from('key'),
|
|
153
|
+
value: Buffer.from('value'),
|
|
154
154
|
headers: [
|
|
155
155
|
{
|
|
156
|
-
key: 'header-key',
|
|
157
|
-
value: 'header-value',
|
|
156
|
+
key: Buffer.from('header-key'),
|
|
157
|
+
value: Buffer.from('header-value'),
|
|
158
158
|
},
|
|
159
159
|
],
|
|
160
160
|
},
|
package/src/consumer/consumer.ts
CHANGED
|
@@ -9,7 +9,7 @@ import { ConnectionError, KafkaTSApiError } from '../utils/error';
|
|
|
9
9
|
import { defaultRetrier, Retrier } from '../utils/retrier';
|
|
10
10
|
import { ConsumerGroup } from './consumer-group';
|
|
11
11
|
import { ConsumerMetadata } from './consumer-metadata';
|
|
12
|
-
import { FetchManager,
|
|
12
|
+
import { FetchManager, BatchGranularity } from './fetch-manager';
|
|
13
13
|
import { OffsetManager } from './offset-manager';
|
|
14
14
|
|
|
15
15
|
export type ConsumerOptions = {
|
|
@@ -27,7 +27,7 @@ export type ConsumerOptions = {
|
|
|
27
27
|
allowTopicAutoCreation?: boolean;
|
|
28
28
|
fromBeginning?: boolean;
|
|
29
29
|
retrier?: Retrier;
|
|
30
|
-
|
|
30
|
+
batchGranularity?: BatchGranularity;
|
|
31
31
|
concurrency?: number;
|
|
32
32
|
} & ({ onBatch: (messages: Required<Message>[]) => unknown } | { onMessage: (message: Required<Message>) => unknown });
|
|
33
33
|
|
|
@@ -52,13 +52,13 @@ export class Consumer {
|
|
|
52
52
|
rebalanceTimeoutMs: options.rebalanceTimeoutMs ?? 60_000,
|
|
53
53
|
maxWaitMs: options.maxWaitMs ?? 5000,
|
|
54
54
|
minBytes: options.minBytes ?? 1,
|
|
55
|
-
maxBytes: options.maxBytes ??
|
|
56
|
-
partitionMaxBytes: options.partitionMaxBytes ??
|
|
55
|
+
maxBytes: options.maxBytes ?? 1_048_576,
|
|
56
|
+
partitionMaxBytes: options.partitionMaxBytes ?? 1_048_576,
|
|
57
57
|
isolationLevel: options.isolationLevel ?? IsolationLevel.READ_UNCOMMITTED,
|
|
58
58
|
allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
|
|
59
59
|
fromBeginning: options.fromBeginning ?? false,
|
|
60
60
|
retrier: options.retrier ?? defaultRetrier,
|
|
61
|
-
|
|
61
|
+
batchGranularity: options.batchGranularity ?? 'partition',
|
|
62
62
|
concurrency: options.concurrency ?? 1,
|
|
63
63
|
};
|
|
64
64
|
|
|
@@ -118,7 +118,7 @@ export class Consumer {
|
|
|
118
118
|
}
|
|
119
119
|
|
|
120
120
|
private startFetchManager = async () => {
|
|
121
|
-
const {
|
|
121
|
+
const { batchGranularity, concurrency } = this.options;
|
|
122
122
|
|
|
123
123
|
while (!this.stopHook) {
|
|
124
124
|
const nodeAssignments = Object.entries(
|
|
@@ -137,7 +137,7 @@ export class Consumer {
|
|
|
137
137
|
metadata: this.metadata,
|
|
138
138
|
consumerGroup: this.consumerGroup,
|
|
139
139
|
nodeAssignments,
|
|
140
|
-
|
|
140
|
+
batchGranularity,
|
|
141
141
|
concurrency: numProcessors,
|
|
142
142
|
});
|
|
143
143
|
|
|
@@ -11,7 +11,7 @@ import { Processor } from './processor';
|
|
|
11
11
|
|
|
12
12
|
const trace = createTracer('FetchManager');
|
|
13
13
|
|
|
14
|
-
export type
|
|
14
|
+
export type BatchGranularity = 'partition' | 'topic' | 'broker';
|
|
15
15
|
|
|
16
16
|
type FetchManagerOptions = {
|
|
17
17
|
fetch: (nodeId: number, assignment: Assignment) => Promise<ReturnType<(typeof API.FETCH)['response']>>;
|
|
@@ -19,7 +19,7 @@ type FetchManagerOptions = {
|
|
|
19
19
|
metadata: Metadata;
|
|
20
20
|
consumerGroup?: ConsumerGroup;
|
|
21
21
|
nodeAssignments: { nodeId: number; assignment: Assignment }[];
|
|
22
|
-
|
|
22
|
+
batchGranularity: BatchGranularity;
|
|
23
23
|
concurrency: number;
|
|
24
24
|
};
|
|
25
25
|
|
|
@@ -110,9 +110,9 @@ export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number];
|
|
|
110
110
|
}
|
|
111
111
|
|
|
112
112
|
private async onResponse(fetcherId: number, response: ReturnType<(typeof API.FETCH)['response']>) {
|
|
113
|
-
const { metadata,
|
|
113
|
+
const { metadata, batchGranularity } = this.options;
|
|
114
114
|
|
|
115
|
-
const batches = fetchResponseToBatches(response,
|
|
115
|
+
const batches = fetchResponseToBatches(response, batchGranularity, metadata);
|
|
116
116
|
if (batches.length) {
|
|
117
117
|
this.queue.push(...batches);
|
|
118
118
|
this.queue.push({ kind: 'checkpoint', fetcherId });
|
|
@@ -138,7 +138,7 @@ export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number];
|
|
|
138
138
|
|
|
139
139
|
const fetchResponseToBatches = (
|
|
140
140
|
batch: ReturnType<typeof API.FETCH.response>,
|
|
141
|
-
|
|
141
|
+
batchGranularity: BatchGranularity,
|
|
142
142
|
metadata: Metadata,
|
|
143
143
|
): Batch[] => {
|
|
144
144
|
const brokerTopics = batch.responses.map(({ topicId, partitions }) =>
|
|
@@ -159,7 +159,7 @@ const fetchResponseToBatches = (
|
|
|
159
159
|
),
|
|
160
160
|
);
|
|
161
161
|
|
|
162
|
-
switch (
|
|
162
|
+
switch (batchGranularity) {
|
|
163
163
|
case 'broker':
|
|
164
164
|
const messages = brokerTopics.flatMap((topicPartition) =>
|
|
165
165
|
topicPartition.flatMap((partitionMessages) => partitionMessages),
|
|
@@ -174,6 +174,6 @@ const fetchResponseToBatches = (
|
|
|
174
174
|
topicPartition.map((partitionMessages) => partitionMessages),
|
|
175
175
|
);
|
|
176
176
|
default:
|
|
177
|
-
throw new KafkaTSError(`Unhandled batch granularity: ${
|
|
177
|
+
throw new KafkaTSError(`Unhandled batch granularity: ${batchGranularity}`);
|
|
178
178
|
}
|
|
179
179
|
};
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { Metadata } from '../metadata';
|
|
2
|
+
import { Message } from '../types';
|
|
3
|
+
import { murmur2, toPositive } from '../utils/murmur2';
|
|
4
|
+
|
|
5
|
+
export type Partition = (message: Message) => number;
|
|
6
|
+
export type Partitioner = (context: { metadata: Metadata }) => Partition;
|
|
7
|
+
|
|
8
|
+
export const defaultPartitioner: Partitioner = ({ metadata }) => {
|
|
9
|
+
const topicCounterMap: Record<string, number> = {};
|
|
10
|
+
|
|
11
|
+
const getNextValue = (topic: string) => {
|
|
12
|
+
topicCounterMap[topic] ??= 0;
|
|
13
|
+
return topicCounterMap[topic]++;
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
return ({ topic, partition, key }: Message) => {
|
|
17
|
+
if (partition !== null && partition !== undefined) {
|
|
18
|
+
return partition;
|
|
19
|
+
}
|
|
20
|
+
const partitions = metadata.getTopicPartitions()[topic];
|
|
21
|
+
const numPartitions = partitions.length;
|
|
22
|
+
if (key) {
|
|
23
|
+
return toPositive(murmur2(key)) % numPartitions;
|
|
24
|
+
}
|
|
25
|
+
return toPositive(getNextValue(topic)) % numPartitions;
|
|
26
|
+
};
|
|
27
|
+
};
|
package/src/index.ts
CHANGED
package/src/producer/producer.ts
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { API, API_ERROR } from '../api';
|
|
2
2
|
import { Cluster } from '../cluster';
|
|
3
3
|
import { distributeMessagesToTopicPartitionLeaders } from '../distributors/messages-to-topic-partition-leaders';
|
|
4
|
+
import { defaultPartitioner, Partition, Partitioner } from '../distributors/partitioner';
|
|
4
5
|
import { Metadata } from '../metadata';
|
|
5
6
|
import { Message } from '../types';
|
|
6
7
|
import { delay } from '../utils/delay';
|
|
@@ -9,6 +10,7 @@ import { memo } from '../utils/memo';
|
|
|
9
10
|
|
|
10
11
|
export type ProducerOptions = {
|
|
11
12
|
allowTopicAutoCreation?: boolean;
|
|
13
|
+
partitioner?: Partitioner;
|
|
12
14
|
};
|
|
13
15
|
|
|
14
16
|
export class Producer {
|
|
@@ -17,6 +19,7 @@ export class Producer {
|
|
|
17
19
|
private producerId = 0n;
|
|
18
20
|
private producerEpoch = 0;
|
|
19
21
|
private sequences: Record<string, Record<number, number>> = {};
|
|
22
|
+
private partition: Partition;
|
|
20
23
|
|
|
21
24
|
constructor(
|
|
22
25
|
private cluster: Cluster,
|
|
@@ -25,8 +28,10 @@ export class Producer {
|
|
|
25
28
|
this.options = {
|
|
26
29
|
...options,
|
|
27
30
|
allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
|
|
31
|
+
partitioner: options.partitioner ?? defaultPartitioner,
|
|
28
32
|
};
|
|
29
33
|
this.metadata = new Metadata({ cluster });
|
|
34
|
+
this.partition = this.options.partitioner({ metadata: this.metadata });
|
|
30
35
|
}
|
|
31
36
|
|
|
32
37
|
public async send(messages: Message[]) {
|
|
@@ -39,7 +44,7 @@ export class Producer {
|
|
|
39
44
|
await this.metadata.fetchMetadataIfNecessary({ topics, allowTopicAutoCreation });
|
|
40
45
|
|
|
41
46
|
const nodeTopicPartitionMessages = distributeMessagesToTopicPartitionLeaders(
|
|
42
|
-
messages,
|
|
47
|
+
messages.map(message => ({ ...message, partition: this.partition(message) })),
|
|
43
48
|
this.metadata.getTopicPartitionLeaderIds(),
|
|
44
49
|
);
|
|
45
50
|
|
|
@@ -83,8 +88,8 @@ export class Producer {
|
|
|
83
88
|
key: message.key,
|
|
84
89
|
value: message.value,
|
|
85
90
|
headers: Object.entries(message.headers ?? {}).map(([key, value]) => ({
|
|
86
|
-
key,
|
|
87
|
-
value,
|
|
91
|
+
key: Buffer.from(key),
|
|
92
|
+
value: Buffer.from(value),
|
|
88
93
|
})),
|
|
89
94
|
})),
|
|
90
95
|
};
|
package/src/types.ts
CHANGED
package/src/utils/decoder.ts
CHANGED
|
@@ -97,13 +97,13 @@ export class Decoder {
|
|
|
97
97
|
return value;
|
|
98
98
|
}
|
|
99
99
|
|
|
100
|
-
public
|
|
100
|
+
public readVarIntBuffer() {
|
|
101
101
|
const length = this.readVarInt();
|
|
102
102
|
if (length < 0) {
|
|
103
103
|
return null;
|
|
104
104
|
}
|
|
105
105
|
|
|
106
|
-
const value = this.buffer.
|
|
106
|
+
const value = this.buffer.subarray(this.offset, this.offset + length);
|
|
107
107
|
this.offset += length;
|
|
108
108
|
return value;
|
|
109
109
|
}
|
package/src/utils/encoder.ts
CHANGED
|
@@ -91,11 +91,11 @@ export class Encoder {
|
|
|
91
91
|
return this.writeUVarInt(byteLength + 1).write(buffer);
|
|
92
92
|
}
|
|
93
93
|
|
|
94
|
-
public
|
|
95
|
-
if (
|
|
94
|
+
public writeVarIntBuffer(buffer: Buffer | null) {
|
|
95
|
+
if (buffer === null) {
|
|
96
96
|
return this.writeVarInt(-1);
|
|
97
97
|
}
|
|
98
|
-
return this.writeVarInt(
|
|
98
|
+
return this.writeVarInt(buffer.byteLength).write(buffer);
|
|
99
99
|
}
|
|
100
100
|
|
|
101
101
|
public writeUUID(value: string | null) {
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/* https://github.com/apache/kafka/blob/0.10.2/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L364 */
|
|
2
|
+
|
|
3
|
+
export const murmur2 = (data: Buffer): number => {
|
|
4
|
+
const length = data.length;
|
|
5
|
+
const seed = 0x9747b28c;
|
|
6
|
+
|
|
7
|
+
const m = 0x5bd1e995;
|
|
8
|
+
const r = 24;
|
|
9
|
+
|
|
10
|
+
let h = seed ^ length;
|
|
11
|
+
let length4 = Math.floor(length / 4);
|
|
12
|
+
|
|
13
|
+
for (let i = 0; i < length4; i++) {
|
|
14
|
+
const i4 = i * 4;
|
|
15
|
+
let k =
|
|
16
|
+
(data[i4 + 0] & 0xff) +
|
|
17
|
+
((data[i4 + 1] & 0xff) << 8) +
|
|
18
|
+
((data[i4 + 2] & 0xff) << 16) +
|
|
19
|
+
((data[i4 + 3] & 0xff) << 24);
|
|
20
|
+
k *= m;
|
|
21
|
+
k ^= k >> r;
|
|
22
|
+
k *= m;
|
|
23
|
+
h *= m;
|
|
24
|
+
h ^= k;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
switch (length % 4) {
|
|
28
|
+
case 3:
|
|
29
|
+
h = h ^ ((data[(length & ~3) + 2] & 0xff) << 16);
|
|
30
|
+
case 2:
|
|
31
|
+
h = h ^ ((data[(length & ~3) + 1] & 0xff) << 8);
|
|
32
|
+
case 1:
|
|
33
|
+
h = h ^ (data[length & ~3] & 0xff);
|
|
34
|
+
h *= m;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
h ^= h >> 13;
|
|
38
|
+
h *= m;
|
|
39
|
+
h ^= h >> 15;
|
|
40
|
+
|
|
41
|
+
return h;
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
export const toPositive = (input: number) => input & 0x7fffffff;
|