kafka-ts 0.0.1-beta
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.prettierrc +7 -0
- package/LICENSE +24 -0
- package/README.md +88 -0
- package/certs/ca.crt +29 -0
- package/certs/ca.key +52 -0
- package/certs/ca.srl +1 -0
- package/certs/kafka.crt +29 -0
- package/certs/kafka.csr +26 -0
- package/certs/kafka.key +52 -0
- package/certs/kafka.keystore.jks +0 -0
- package/certs/kafka.truststore.jks +0 -0
- package/dist/api/api-versions.d.ts +9 -0
- package/dist/api/api-versions.js +24 -0
- package/dist/api/create-topics.d.ts +38 -0
- package/dist/api/create-topics.js +53 -0
- package/dist/api/delete-topics.d.ts +18 -0
- package/dist/api/delete-topics.js +33 -0
- package/dist/api/fetch.d.ts +77 -0
- package/dist/api/fetch.js +106 -0
- package/dist/api/find-coordinator.d.ts +21 -0
- package/dist/api/find-coordinator.js +39 -0
- package/dist/api/heartbeat.d.ts +11 -0
- package/dist/api/heartbeat.js +27 -0
- package/dist/api/index.d.ts +573 -0
- package/dist/api/index.js +164 -0
- package/dist/api/init-producer-id.d.ts +13 -0
- package/dist/api/init-producer-id.js +29 -0
- package/dist/api/join-group.d.ts +34 -0
- package/dist/api/join-group.js +51 -0
- package/dist/api/leave-group.d.ts +19 -0
- package/dist/api/leave-group.js +39 -0
- package/dist/api/list-offsets.d.ts +29 -0
- package/dist/api/list-offsets.js +48 -0
- package/dist/api/metadata.d.ts +40 -0
- package/dist/api/metadata.js +58 -0
- package/dist/api/offset-commit.d.ts +28 -0
- package/dist/api/offset-commit.js +48 -0
- package/dist/api/offset-fetch.d.ts +33 -0
- package/dist/api/offset-fetch.js +57 -0
- package/dist/api/produce.d.ts +53 -0
- package/dist/api/produce.js +129 -0
- package/dist/api/sasl-authenticate.d.ts +11 -0
- package/dist/api/sasl-authenticate.js +23 -0
- package/dist/api/sasl-handshake.d.ts +6 -0
- package/dist/api/sasl-handshake.js +19 -0
- package/dist/api/sync-group.d.ts +24 -0
- package/dist/api/sync-group.js +36 -0
- package/dist/broker.d.ts +29 -0
- package/dist/broker.js +60 -0
- package/dist/client.d.ts +23 -0
- package/dist/client.js +36 -0
- package/dist/cluster.d.ts +24 -0
- package/dist/cluster.js +72 -0
- package/dist/connection.d.ts +25 -0
- package/dist/connection.js +155 -0
- package/dist/consumer/consumer-group.d.ts +36 -0
- package/dist/consumer/consumer-group.js +182 -0
- package/dist/consumer/consumer-metadata.d.ts +7 -0
- package/dist/consumer/consumer-metadata.js +14 -0
- package/dist/consumer/consumer.d.ts +37 -0
- package/dist/consumer/consumer.js +178 -0
- package/dist/consumer/metadata.d.ts +24 -0
- package/dist/consumer/metadata.js +64 -0
- package/dist/consumer/offset-manager.d.ts +22 -0
- package/dist/consumer/offset-manager.js +56 -0
- package/dist/distributors/assignments-to-replicas.d.ts +17 -0
- package/dist/distributors/assignments-to-replicas.js +60 -0
- package/dist/distributors/assignments-to-replicas.test.d.ts +1 -0
- package/dist/distributors/assignments-to-replicas.test.js +40 -0
- package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
- package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
- package/dist/examples/src/replicator.js +34 -0
- package/dist/examples/src/utils/json.js +5 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +19 -0
- package/dist/metadata.d.ts +24 -0
- package/dist/metadata.js +89 -0
- package/dist/producer/producer.d.ts +19 -0
- package/dist/producer/producer.js +111 -0
- package/dist/request-handler.d.ts +16 -0
- package/dist/request-handler.js +67 -0
- package/dist/request-handler.test.d.ts +1 -0
- package/dist/request-handler.test.js +340 -0
- package/dist/src/api/api-versions.js +18 -0
- package/dist/src/api/create-topics.js +46 -0
- package/dist/src/api/delete-topics.js +26 -0
- package/dist/src/api/fetch.js +95 -0
- package/dist/src/api/find-coordinator.js +34 -0
- package/dist/src/api/heartbeat.js +22 -0
- package/dist/src/api/index.js +38 -0
- package/dist/src/api/init-producer-id.js +24 -0
- package/dist/src/api/join-group.js +48 -0
- package/dist/src/api/leave-group.js +30 -0
- package/dist/src/api/list-offsets.js +39 -0
- package/dist/src/api/metadata.js +47 -0
- package/dist/src/api/offset-commit.js +39 -0
- package/dist/src/api/offset-fetch.js +44 -0
- package/dist/src/api/produce.js +119 -0
- package/dist/src/api/sync-group.js +31 -0
- package/dist/src/broker.js +35 -0
- package/dist/src/connection.js +21 -0
- package/dist/src/consumer/consumer-group.js +131 -0
- package/dist/src/consumer/consumer.js +103 -0
- package/dist/src/consumer/metadata.js +52 -0
- package/dist/src/consumer/offset-manager.js +23 -0
- package/dist/src/index.js +19 -0
- package/dist/src/producer/producer.js +84 -0
- package/dist/src/request-handler.js +57 -0
- package/dist/src/request-handler.test.js +321 -0
- package/dist/src/types.js +2 -0
- package/dist/src/utils/api.js +5 -0
- package/dist/src/utils/decoder.js +161 -0
- package/dist/src/utils/encoder.js +137 -0
- package/dist/src/utils/error.js +10 -0
- package/dist/types.d.ts +9 -0
- package/dist/types.js +2 -0
- package/dist/utils/api.d.ts +9 -0
- package/dist/utils/api.js +5 -0
- package/dist/utils/debug.d.ts +2 -0
- package/dist/utils/debug.js +11 -0
- package/dist/utils/decoder.d.ts +29 -0
- package/dist/utils/decoder.js +147 -0
- package/dist/utils/delay.d.ts +1 -0
- package/dist/utils/delay.js +5 -0
- package/dist/utils/encoder.d.ts +28 -0
- package/dist/utils/encoder.js +122 -0
- package/dist/utils/error.d.ts +11 -0
- package/dist/utils/error.js +27 -0
- package/dist/utils/memo.d.ts +1 -0
- package/dist/utils/memo.js +16 -0
- package/dist/utils/retrier.d.ts +10 -0
- package/dist/utils/retrier.js +22 -0
- package/dist/utils/tracer.d.ts +1 -0
- package/dist/utils/tracer.js +26 -0
- package/docker-compose.yml +104 -0
- package/examples/node_modules/.package-lock.json +22 -0
- package/examples/package-lock.json +30 -0
- package/examples/package.json +14 -0
- package/examples/src/client.ts +9 -0
- package/examples/src/consumer.ts +17 -0
- package/examples/src/create-topic.ts +37 -0
- package/examples/src/producer.ts +24 -0
- package/examples/src/replicator.ts +25 -0
- package/examples/src/utils/json.ts +1 -0
- package/examples/tsconfig.json +7 -0
- package/log4j.properties +95 -0
- package/package.json +17 -0
- package/scripts/generate-certs.sh +24 -0
- package/src/__snapshots__/request-handler.test.ts.snap +1687 -0
- package/src/api/api-versions.ts +21 -0
- package/src/api/create-topics.ts +78 -0
- package/src/api/delete-topics.ts +42 -0
- package/src/api/fetch.ts +143 -0
- package/src/api/find-coordinator.ts +39 -0
- package/src/api/heartbeat.ts +33 -0
- package/src/api/index.ts +164 -0
- package/src/api/init-producer-id.ts +35 -0
- package/src/api/join-group.ts +67 -0
- package/src/api/leave-group.ts +48 -0
- package/src/api/list-offsets.ts +65 -0
- package/src/api/metadata.ts +66 -0
- package/src/api/offset-commit.ts +67 -0
- package/src/api/offset-fetch.ts +74 -0
- package/src/api/produce.ts +173 -0
- package/src/api/sasl-authenticate.ts +21 -0
- package/src/api/sasl-handshake.ts +16 -0
- package/src/api/sync-group.ts +54 -0
- package/src/broker.ts +74 -0
- package/src/client.ts +47 -0
- package/src/cluster.ts +87 -0
- package/src/connection.ts +141 -0
- package/src/consumer/consumer-group.ts +209 -0
- package/src/consumer/consumer-metadata.ts +14 -0
- package/src/consumer/consumer.ts +229 -0
- package/src/consumer/offset-manager.ts +93 -0
- package/src/distributors/assignments-to-replicas.test.ts +43 -0
- package/src/distributors/assignments-to-replicas.ts +85 -0
- package/src/distributors/messages-to-topic-partition-leaders.test.ts +32 -0
- package/src/distributors/messages-to-topic-partition-leaders.ts +19 -0
- package/src/index.ts +3 -0
- package/src/metadata.ts +122 -0
- package/src/producer/producer.ts +132 -0
- package/src/request-handler.test.ts +366 -0
- package/src/types.ts +9 -0
- package/src/utils/api.ts +11 -0
- package/src/utils/debug.ts +9 -0
- package/src/utils/decoder.ts +168 -0
- package/src/utils/delay.ts +1 -0
- package/src/utils/encoder.ts +141 -0
- package/src/utils/error.ts +21 -0
- package/src/utils/memo.ts +12 -0
- package/src/utils/retrier.ts +39 -0
- package/src/utils/tracer.ts +28 -0
- package/tsconfig.json +17 -0
package/src/cluster.ts
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import { TcpSocketConnectOpts } from "net";
|
|
2
|
+
import { TLSSocketOptions } from "tls";
|
|
3
|
+
import { API } from "./api";
|
|
4
|
+
import { Broker, SASLOptions } from "./broker";
|
|
5
|
+
import { SendRequest } from "./connection";
|
|
6
|
+
import { ConnectionError, KafkaTSError } from "./utils/error";
|
|
7
|
+
|
|
8
|
+
type ClusterOptions = {
|
|
9
|
+
clientId: string | null;
|
|
10
|
+
bootstrapServers: TcpSocketConnectOpts[];
|
|
11
|
+
sasl: SASLOptions | null;
|
|
12
|
+
ssl: TLSSocketOptions | null;
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
export class Cluster {
|
|
16
|
+
private seedBroker: Broker;
|
|
17
|
+
private brokerById: Record<number, Broker> = {};
|
|
18
|
+
|
|
19
|
+
constructor(private options: ClusterOptions) {
|
|
20
|
+
this.seedBroker = new Broker({
|
|
21
|
+
clientId: this.options.clientId,
|
|
22
|
+
sasl: this.options.sasl,
|
|
23
|
+
ssl: this.options.ssl,
|
|
24
|
+
options: this.options.bootstrapServers[0],
|
|
25
|
+
});
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
public async connect() {
|
|
29
|
+
await this.connectSeedBroker();
|
|
30
|
+
const metadata = await this.sendRequest(API.METADATA, {
|
|
31
|
+
allowTopicAutoCreation: false,
|
|
32
|
+
includeTopicAuthorizedOperations: false,
|
|
33
|
+
topics: [],
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
this.brokerById = Object.fromEntries(
|
|
37
|
+
metadata.brokers.map(({ nodeId, ...options }) => [
|
|
38
|
+
nodeId,
|
|
39
|
+
new Broker({
|
|
40
|
+
clientId: this.options.clientId,
|
|
41
|
+
sasl: this.options.sasl,
|
|
42
|
+
ssl: this.options.ssl,
|
|
43
|
+
options,
|
|
44
|
+
}),
|
|
45
|
+
]),
|
|
46
|
+
);
|
|
47
|
+
return this;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
public async disconnect() {
|
|
51
|
+
await Promise.all([
|
|
52
|
+
this.seedBroker.disconnect(),
|
|
53
|
+
...Object.values(this.brokerById).map((broker) => broker.disconnect()),
|
|
54
|
+
]);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
public sendRequest: SendRequest = (...args) => this.seedBroker.sendRequest(...args);
|
|
58
|
+
|
|
59
|
+
public sendRequestToNode =
|
|
60
|
+
(nodeId: number): SendRequest =>
|
|
61
|
+
async (...args) => {
|
|
62
|
+
const broker = this.brokerById[nodeId];
|
|
63
|
+
if (!broker) {
|
|
64
|
+
throw new ConnectionError(`Broker ${nodeId} is not available`);
|
|
65
|
+
}
|
|
66
|
+
await broker.ensureConnected();
|
|
67
|
+
return broker.sendRequest(...args);
|
|
68
|
+
};
|
|
69
|
+
|
|
70
|
+
private async connectSeedBroker() {
|
|
71
|
+
const randomizedBrokers = this.options.bootstrapServers.toSorted(() => Math.random() - 0.5);
|
|
72
|
+
for (const options of randomizedBrokers) {
|
|
73
|
+
try {
|
|
74
|
+
this.seedBroker = await new Broker({
|
|
75
|
+
clientId: this.options.clientId,
|
|
76
|
+
sasl: this.options.sasl,
|
|
77
|
+
ssl: this.options.ssl,
|
|
78
|
+
options,
|
|
79
|
+
}).connect();
|
|
80
|
+
return;
|
|
81
|
+
} catch (error) {
|
|
82
|
+
console.warn(`Failed to connect to seed broker ${options.host}:${options.port}`, error);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
throw new KafkaTSError("No seed brokers found");
|
|
86
|
+
}
|
|
87
|
+
}
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import assert from "assert";
|
|
2
|
+
import net, { isIP, Socket, TcpSocketConnectOpts } from "net";
|
|
3
|
+
import tls, { TLSSocketOptions } from "tls";
|
|
4
|
+
import { getApiName } from "./api";
|
|
5
|
+
import { Api } from "./utils/api";
|
|
6
|
+
import { Decoder } from "./utils/decoder";
|
|
7
|
+
import { Encoder } from "./utils/encoder";
|
|
8
|
+
import { ConnectionError } from "./utils/error";
|
|
9
|
+
import { trace } from "./utils/tracer";
|
|
10
|
+
|
|
11
|
+
export type ConnectionOptions = {
|
|
12
|
+
clientId: string | null;
|
|
13
|
+
connection: TcpSocketConnectOpts;
|
|
14
|
+
ssl: TLSSocketOptions | null;
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
type RawResonse = { responseDecoder: Decoder; responseSize: number };
|
|
18
|
+
|
|
19
|
+
export class Connection {
|
|
20
|
+
private socket = new Socket();
|
|
21
|
+
private queue: {
|
|
22
|
+
[correlationId: number]: { resolve: (response: RawResonse) => void; reject: (error: Error) => void };
|
|
23
|
+
} = {};
|
|
24
|
+
private lastCorrelationId = 0;
|
|
25
|
+
private buffer: Buffer | null = null;
|
|
26
|
+
|
|
27
|
+
constructor(private options: ConnectionOptions) {}
|
|
28
|
+
|
|
29
|
+
@trace()
|
|
30
|
+
public async connect() {
|
|
31
|
+
this.queue = {};
|
|
32
|
+
this.buffer = null;
|
|
33
|
+
|
|
34
|
+
await new Promise<void>((resolve, reject) => {
|
|
35
|
+
const { ssl, connection } = this.options;
|
|
36
|
+
|
|
37
|
+
this.socket = ssl
|
|
38
|
+
? tls.connect(
|
|
39
|
+
{
|
|
40
|
+
...connection,
|
|
41
|
+
...ssl,
|
|
42
|
+
...(connection.host && !isIP(connection.host) && { servername: connection.host }),
|
|
43
|
+
},
|
|
44
|
+
resolve,
|
|
45
|
+
)
|
|
46
|
+
: net.connect(connection, resolve);
|
|
47
|
+
this.socket.once("error", reject);
|
|
48
|
+
});
|
|
49
|
+
this.socket.removeAllListeners("error");
|
|
50
|
+
|
|
51
|
+
this.socket.on("data", (data) => this.handleData(data));
|
|
52
|
+
this.socket.once("close", async () => {
|
|
53
|
+
Object.values(this.queue).forEach(({ reject }) => {
|
|
54
|
+
reject(new ConnectionError("Socket closed unexpectedly"));
|
|
55
|
+
});
|
|
56
|
+
this.queue = {};
|
|
57
|
+
});
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
public disconnect() {
|
|
61
|
+
this.socket.removeAllListeners();
|
|
62
|
+
return new Promise<void>((resolve) => {
|
|
63
|
+
if (this.socket.pending) {
|
|
64
|
+
return resolve();
|
|
65
|
+
}
|
|
66
|
+
this.socket.end(resolve);
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
@trace((api, body) => ({ apiName: getApiName(api), body }))
|
|
71
|
+
public async sendRequest<Request, Response>(api: Api<Request, Response>, body: Request): Promise<Response> {
|
|
72
|
+
const correlationId = this.nextCorrelationId();
|
|
73
|
+
|
|
74
|
+
const encoder = new Encoder()
|
|
75
|
+
.writeInt16(api.apiKey)
|
|
76
|
+
.writeInt16(api.apiVersion)
|
|
77
|
+
.writeInt32(correlationId)
|
|
78
|
+
.writeString(this.options.clientId);
|
|
79
|
+
|
|
80
|
+
const request = api.request(encoder, body).value();
|
|
81
|
+
const requestEncoder = new Encoder().writeInt32(request.length).write(request);
|
|
82
|
+
|
|
83
|
+
const { responseDecoder, responseSize } = await new Promise<RawResonse>(async (resolve, reject) => {
|
|
84
|
+
try {
|
|
85
|
+
await this.write(requestEncoder.value());
|
|
86
|
+
this.queue[correlationId] = { resolve, reject };
|
|
87
|
+
} catch (error) {
|
|
88
|
+
reject(error);
|
|
89
|
+
}
|
|
90
|
+
});
|
|
91
|
+
const response = api.response(responseDecoder);
|
|
92
|
+
|
|
93
|
+
assert(
|
|
94
|
+
responseDecoder.getOffset() - 4 === responseSize,
|
|
95
|
+
`Buffer not correctly consumed: ${responseDecoder.getOffset() - 4} !== ${responseSize}`,
|
|
96
|
+
);
|
|
97
|
+
|
|
98
|
+
return response;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
private write(buffer: Buffer) {
|
|
102
|
+
return new Promise<void>((resolve, reject) => {
|
|
103
|
+
const { stack } = new Error("Write error");
|
|
104
|
+
this.socket.write(buffer, (error) => {
|
|
105
|
+
if (error) {
|
|
106
|
+
const err = new ConnectionError(error.message);
|
|
107
|
+
err.stack += `\n${stack}`;
|
|
108
|
+
return reject(err);
|
|
109
|
+
}
|
|
110
|
+
resolve();
|
|
111
|
+
});
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
private handleData(buffer: Buffer) {
|
|
116
|
+
this.buffer = this.buffer ? Buffer.concat([this.buffer, buffer]) : buffer;
|
|
117
|
+
if (this.buffer.length < 4) {
|
|
118
|
+
return;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
const decoder = new Decoder(this.buffer);
|
|
122
|
+
const size = decoder.readInt32();
|
|
123
|
+
if (size !== decoder.getBufferLength() - 4) {
|
|
124
|
+
return;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
const correlationId = decoder.readInt32();
|
|
128
|
+
|
|
129
|
+
const { resolve } = this.queue[correlationId];
|
|
130
|
+
delete this.queue[correlationId];
|
|
131
|
+
|
|
132
|
+
resolve({ responseDecoder: decoder, responseSize: size });
|
|
133
|
+
this.buffer = null;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
private nextCorrelationId() {
|
|
137
|
+
return (this.lastCorrelationId = (this.lastCorrelationId + 1) % 2 ** 31);
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
export type SendRequest = typeof Connection.prototype.sendRequest;
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import { API, API_ERROR } from "../api";
|
|
2
|
+
import { KEY_TYPE } from "../api/find-coordinator";
|
|
3
|
+
import { Assignment, MemberAssignment } from "../api/sync-group";
|
|
4
|
+
import { Cluster } from "../cluster";
|
|
5
|
+
import { KafkaTSApiError, KafkaTSError } from "../utils/error";
|
|
6
|
+
import { ConsumerMetadata } from "./consumer-metadata";
|
|
7
|
+
import { OffsetManager } from "./offset-manager";
|
|
8
|
+
|
|
9
|
+
type ConsumerGroupOptions = {
|
|
10
|
+
cluster: Cluster;
|
|
11
|
+
topics: string[];
|
|
12
|
+
groupId: string;
|
|
13
|
+
groupInstanceId: string | null;
|
|
14
|
+
sessionTimeoutMs: number;
|
|
15
|
+
rebalanceTimeoutMs: number;
|
|
16
|
+
metadata: ConsumerMetadata;
|
|
17
|
+
offsetManager: OffsetManager;
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
export class ConsumerGroup {
|
|
21
|
+
private coordinatorId = -1;
|
|
22
|
+
private memberId = "";
|
|
23
|
+
private generationId = -1;
|
|
24
|
+
private leaderId = "";
|
|
25
|
+
private memberIds: string[] = [];
|
|
26
|
+
private heartbeatInterval: NodeJS.Timeout | null = null;
|
|
27
|
+
private heartbeatError: KafkaTSError | null = null;
|
|
28
|
+
|
|
29
|
+
constructor(private options: ConsumerGroupOptions) {}
|
|
30
|
+
|
|
31
|
+
public async join() {
|
|
32
|
+
await this.findCoordinator();
|
|
33
|
+
await this.joinGroup();
|
|
34
|
+
await this.syncGroup();
|
|
35
|
+
await this.offsetFetch();
|
|
36
|
+
this.startHeartbeater();
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
private async startHeartbeater() {
|
|
40
|
+
this.heartbeatInterval = setInterval(async () => {
|
|
41
|
+
try {
|
|
42
|
+
await this.heartbeat();
|
|
43
|
+
} catch (error) {
|
|
44
|
+
this.heartbeatError = error as KafkaTSError;
|
|
45
|
+
}
|
|
46
|
+
}, 5000);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
private async stopHeartbeater() {
|
|
50
|
+
if (this.heartbeatInterval) {
|
|
51
|
+
clearInterval(this.heartbeatInterval);
|
|
52
|
+
this.heartbeatInterval = null;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
public async handleLastHeartbeat() {
|
|
57
|
+
if (this.heartbeatError) {
|
|
58
|
+
throw this.heartbeatError;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
private async findCoordinator() {
|
|
63
|
+
const { coordinators } = await this.options.cluster.sendRequest(API.FIND_COORDINATOR, {
|
|
64
|
+
keyType: KEY_TYPE.GROUP,
|
|
65
|
+
keys: [this.options.groupId],
|
|
66
|
+
});
|
|
67
|
+
this.coordinatorId = coordinators[0].nodeId;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
private async joinGroup(): Promise<void> {
|
|
71
|
+
const { cluster, groupId, groupInstanceId, sessionTimeoutMs, rebalanceTimeoutMs, topics } = this.options;
|
|
72
|
+
try {
|
|
73
|
+
const response = await cluster.sendRequestToNode(this.coordinatorId)(API.JOIN_GROUP, {
|
|
74
|
+
groupId,
|
|
75
|
+
groupInstanceId,
|
|
76
|
+
memberId: this.memberId,
|
|
77
|
+
sessionTimeoutMs,
|
|
78
|
+
rebalanceTimeoutMs,
|
|
79
|
+
protocolType: "consumer",
|
|
80
|
+
protocols: [{ name: "RoundRobinAssigner", metadata: { version: 0, topics } }],
|
|
81
|
+
reason: null,
|
|
82
|
+
});
|
|
83
|
+
this.memberId = response.memberId;
|
|
84
|
+
this.generationId = response.generationId;
|
|
85
|
+
this.leaderId = response.leader;
|
|
86
|
+
this.memberIds = response.members.map((member) => member.memberId);
|
|
87
|
+
} catch (error) {
|
|
88
|
+
if ((error as KafkaTSApiError).errorCode === API_ERROR.MEMBER_ID_REQUIRED) {
|
|
89
|
+
this.memberId = (error as KafkaTSApiError).response.memberId;
|
|
90
|
+
return this.joinGroup();
|
|
91
|
+
}
|
|
92
|
+
throw error;
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
private async syncGroup() {
|
|
97
|
+
const { cluster, metadata, groupId, groupInstanceId } = this.options;
|
|
98
|
+
|
|
99
|
+
let assignments: MemberAssignment[] = [];
|
|
100
|
+
if (this.memberId === this.leaderId) {
|
|
101
|
+
const memberAssignments = Object.entries(metadata.getTopicPartitions())
|
|
102
|
+
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
103
|
+
.reduce(
|
|
104
|
+
(acc, { topic, partition }, index) => {
|
|
105
|
+
const memberId = this.memberIds[index % this.memberIds.length];
|
|
106
|
+
acc[memberId] ??= {};
|
|
107
|
+
acc[memberId][topic] ??= [];
|
|
108
|
+
acc[memberId][topic].push(partition);
|
|
109
|
+
return acc;
|
|
110
|
+
},
|
|
111
|
+
{} as Record<string, Record<string, number[]>>,
|
|
112
|
+
);
|
|
113
|
+
assignments = Object.entries(memberAssignments).map(([memberId, assignment]) => ({ memberId, assignment }));
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
const response = await cluster.sendRequestToNode(this.coordinatorId)(API.SYNC_GROUP, {
|
|
117
|
+
groupId,
|
|
118
|
+
groupInstanceId,
|
|
119
|
+
memberId: this.memberId,
|
|
120
|
+
generationId: this.generationId,
|
|
121
|
+
protocolType: "consumer",
|
|
122
|
+
protocolName: "RoundRobinAssigner",
|
|
123
|
+
assignments,
|
|
124
|
+
});
|
|
125
|
+
metadata.setAssignment(JSON.parse(response.assignments || "{}") as Assignment);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
private async offsetFetch() {
|
|
129
|
+
const { cluster, groupId, topics, metadata, offsetManager } = this.options;
|
|
130
|
+
|
|
131
|
+
const assignment = metadata.getAssignment();
|
|
132
|
+
const request = {
|
|
133
|
+
groups: [
|
|
134
|
+
{
|
|
135
|
+
groupId,
|
|
136
|
+
memberId: this.memberId,
|
|
137
|
+
memberEpoch: -1,
|
|
138
|
+
topics: topics
|
|
139
|
+
.map((topic) => ({ name: topic, partitionIndexes: assignment[topic] ?? [] }))
|
|
140
|
+
.filter(({ partitionIndexes }) => partitionIndexes.length),
|
|
141
|
+
},
|
|
142
|
+
].filter(({ topics }) => topics.length),
|
|
143
|
+
requireStable: true,
|
|
144
|
+
};
|
|
145
|
+
if (!request.groups.length) return;
|
|
146
|
+
|
|
147
|
+
const response = await cluster.sendRequestToNode(this.coordinatorId)(API.OFFSET_FETCH, request);
|
|
148
|
+
response.groups.forEach((group) => {
|
|
149
|
+
group.topics.forEach((topic) => {
|
|
150
|
+
topic.partitions
|
|
151
|
+
.filter(({ committedOffset }) => committedOffset >= 0)
|
|
152
|
+
.forEach(({ partitionIndex, committedOffset }) =>
|
|
153
|
+
offsetManager.resolve(topic.name, partitionIndex, committedOffset),
|
|
154
|
+
);
|
|
155
|
+
});
|
|
156
|
+
});
|
|
157
|
+
offsetManager.flush();
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
public async offsetCommit() {
|
|
161
|
+
const { cluster, groupId, groupInstanceId, offsetManager } = this.options;
|
|
162
|
+
const request = {
|
|
163
|
+
groupId,
|
|
164
|
+
groupInstanceId,
|
|
165
|
+
memberId: this.memberId,
|
|
166
|
+
generationIdOrMemberEpoch: this.generationId,
|
|
167
|
+
topics: Object.entries(offsetManager.pendingOffsets).map(([topic, partitions]) => ({
|
|
168
|
+
name: topic,
|
|
169
|
+
partitions: Object.entries(partitions).map(([partition, offset]) => ({
|
|
170
|
+
partitionIndex: parseInt(partition),
|
|
171
|
+
committedOffset: offset,
|
|
172
|
+
committedLeaderEpoch: -1,
|
|
173
|
+
committedMetadata: null,
|
|
174
|
+
})),
|
|
175
|
+
})),
|
|
176
|
+
};
|
|
177
|
+
if (!request.topics.length) {
|
|
178
|
+
return;
|
|
179
|
+
}
|
|
180
|
+
await cluster.sendRequestToNode(this.coordinatorId)(API.OFFSET_COMMIT, request);
|
|
181
|
+
offsetManager.flush();
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
public async heartbeat() {
|
|
185
|
+
const { cluster, groupId, groupInstanceId } = this.options;
|
|
186
|
+
await cluster.sendRequestToNode(this.coordinatorId)(API.HEARTBEAT, {
|
|
187
|
+
groupId,
|
|
188
|
+
groupInstanceId,
|
|
189
|
+
memberId: this.memberId,
|
|
190
|
+
generationId: this.generationId,
|
|
191
|
+
});
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
public async leaveGroup() {
|
|
195
|
+
const { cluster, groupId, groupInstanceId } = this.options;
|
|
196
|
+
this.stopHeartbeater();
|
|
197
|
+
try {
|
|
198
|
+
await cluster.sendRequestToNode(this.coordinatorId)(API.LEAVE_GROUP, {
|
|
199
|
+
groupId,
|
|
200
|
+
members: [{ memberId: this.memberId, groupInstanceId, reason: null }],
|
|
201
|
+
});
|
|
202
|
+
} catch (error) {
|
|
203
|
+
if ((error as KafkaTSApiError).errorCode === API_ERROR.FENCED_INSTANCE_ID) {
|
|
204
|
+
return;
|
|
205
|
+
}
|
|
206
|
+
throw error;
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { Assignment } from "../api/sync-group";
|
|
2
|
+
import { Metadata } from "../metadata";
|
|
3
|
+
|
|
4
|
+
export class ConsumerMetadata extends Metadata {
|
|
5
|
+
private assignment: Assignment = {};
|
|
6
|
+
|
|
7
|
+
public getAssignment() {
|
|
8
|
+
return this.assignment;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
public setAssignment(newAssignment: Assignment) {
|
|
12
|
+
this.assignment = newAssignment;
|
|
13
|
+
}
|
|
14
|
+
}
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
import { API, API_ERROR } from "../api";
|
|
2
|
+
import { IsolationLevel } from "../api/fetch";
|
|
3
|
+
import { Assignment } from "../api/sync-group";
|
|
4
|
+
import { Cluster } from "../cluster";
|
|
5
|
+
import { distributeAssignmentsToNodes } from "../distributors/assignments-to-replicas";
|
|
6
|
+
import { Message } from "../types";
|
|
7
|
+
import { delay } from "../utils/delay";
|
|
8
|
+
import { ConnectionError, KafkaTSApiError } from "../utils/error";
|
|
9
|
+
import { defaultRetrier, Retrier } from "../utils/retrier";
|
|
10
|
+
import { ConsumerGroup } from "./consumer-group";
|
|
11
|
+
import { ConsumerMetadata } from "./consumer-metadata";
|
|
12
|
+
import { OffsetManager } from "./offset-manager";
|
|
13
|
+
|
|
14
|
+
export type ConsumerOptions = {
|
|
15
|
+
topics: string[];
|
|
16
|
+
groupId?: string | null;
|
|
17
|
+
groupInstanceId?: string | null;
|
|
18
|
+
rackId?: string;
|
|
19
|
+
isolationLevel?: IsolationLevel;
|
|
20
|
+
sessionTimeoutMs?: number;
|
|
21
|
+
rebalanceTimeoutMs?: number;
|
|
22
|
+
maxWaitMs?: number;
|
|
23
|
+
minBytes?: number;
|
|
24
|
+
maxBytes?: number;
|
|
25
|
+
partitionMaxBytes?: number;
|
|
26
|
+
allowTopicAutoCreation?: boolean;
|
|
27
|
+
fromBeginning?: boolean;
|
|
28
|
+
retrier?: Retrier;
|
|
29
|
+
} & ({ onMessage: (message: Message) => unknown } | { onBatch: (messages: Message[]) => unknown });
|
|
30
|
+
|
|
31
|
+
export class Consumer {
|
|
32
|
+
private options: Required<ConsumerOptions>;
|
|
33
|
+
private metadata: ConsumerMetadata;
|
|
34
|
+
private consumerGroup: ConsumerGroup | undefined;
|
|
35
|
+
private offsetManager: OffsetManager;
|
|
36
|
+
private stopHook: (() => void) | undefined;
|
|
37
|
+
|
|
38
|
+
constructor(
|
|
39
|
+
private cluster: Cluster,
|
|
40
|
+
options: ConsumerOptions,
|
|
41
|
+
) {
|
|
42
|
+
this.options = {
|
|
43
|
+
...options,
|
|
44
|
+
groupId: options.groupId ?? null,
|
|
45
|
+
groupInstanceId: options.groupInstanceId ?? null,
|
|
46
|
+
rackId: options.rackId ?? "",
|
|
47
|
+
sessionTimeoutMs: options.sessionTimeoutMs ?? 30_000,
|
|
48
|
+
rebalanceTimeoutMs: options.rebalanceTimeoutMs ?? 60_000,
|
|
49
|
+
maxWaitMs: options.maxWaitMs ?? 5000,
|
|
50
|
+
minBytes: options.minBytes ?? 1,
|
|
51
|
+
maxBytes: options.maxBytes ?? 1_000_000,
|
|
52
|
+
partitionMaxBytes: options.partitionMaxBytes ?? 1_000_000,
|
|
53
|
+
isolationLevel: options.isolationLevel ?? IsolationLevel.READ_UNCOMMITTED,
|
|
54
|
+
allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
|
|
55
|
+
fromBeginning: options.fromBeginning ?? false,
|
|
56
|
+
retrier: options.retrier ?? defaultRetrier,
|
|
57
|
+
};
|
|
58
|
+
|
|
59
|
+
this.metadata = new ConsumerMetadata({ cluster: this.cluster });
|
|
60
|
+
this.offsetManager = new OffsetManager({
|
|
61
|
+
cluster: this.cluster,
|
|
62
|
+
metadata: this.metadata,
|
|
63
|
+
isolationLevel: this.options.isolationLevel,
|
|
64
|
+
});
|
|
65
|
+
this.consumerGroup = this.options.groupId
|
|
66
|
+
? new ConsumerGroup({
|
|
67
|
+
cluster: this.cluster,
|
|
68
|
+
topics: this.options.topics,
|
|
69
|
+
groupId: this.options.groupId,
|
|
70
|
+
groupInstanceId: this.options.groupInstanceId,
|
|
71
|
+
sessionTimeoutMs: this.options.sessionTimeoutMs,
|
|
72
|
+
rebalanceTimeoutMs: this.options.rebalanceTimeoutMs,
|
|
73
|
+
metadata: this.metadata,
|
|
74
|
+
offsetManager: this.offsetManager,
|
|
75
|
+
})
|
|
76
|
+
: undefined;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
public async start(): Promise<void> {
|
|
80
|
+
const { topics, allowTopicAutoCreation, fromBeginning } = this.options;
|
|
81
|
+
|
|
82
|
+
this.stopHook = undefined;
|
|
83
|
+
|
|
84
|
+
try {
|
|
85
|
+
await this.cluster.connect();
|
|
86
|
+
await this.metadata.fetchMetadataIfNecessary({ topics, allowTopicAutoCreation });
|
|
87
|
+
this.metadata.setAssignment(this.metadata.getTopicPartitions());
|
|
88
|
+
await this.offsetManager.fetchOffsets({ fromBeginning });
|
|
89
|
+
await this.consumerGroup?.join();
|
|
90
|
+
} catch (error) {
|
|
91
|
+
console.error(error);
|
|
92
|
+
console.debug(`Restarting consumer in 1 second...`);
|
|
93
|
+
await delay(1000);
|
|
94
|
+
|
|
95
|
+
if (this.stopHook) return (this.stopHook as () => void)();
|
|
96
|
+
return this.close(true).then(() => this.start());
|
|
97
|
+
}
|
|
98
|
+
this.fetchLoop();
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
private fetchLoop = async () => {
|
|
102
|
+
const { options } = this;
|
|
103
|
+
const { retrier } = options;
|
|
104
|
+
|
|
105
|
+
let nodeAssignments: { nodeId: number; assignment: Assignment }[] = [];
|
|
106
|
+
let shouldReassign = true;
|
|
107
|
+
|
|
108
|
+
while (!this.stopHook) {
|
|
109
|
+
if (shouldReassign || !nodeAssignments) {
|
|
110
|
+
nodeAssignments = Object.entries(
|
|
111
|
+
distributeAssignmentsToNodes(
|
|
112
|
+
this.metadata.getAssignment(),
|
|
113
|
+
this.metadata.getTopicPartitionReplicaIds(),
|
|
114
|
+
),
|
|
115
|
+
).map(([nodeId, assignment]) => ({ nodeId: parseInt(nodeId), assignment }));
|
|
116
|
+
shouldReassign = false;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
try {
|
|
120
|
+
for (const { nodeId, assignment } of nodeAssignments) {
|
|
121
|
+
const batch = await this.fetch(nodeId, assignment);
|
|
122
|
+
const messages = batch.responses.flatMap(({ topicId, partitions }) =>
|
|
123
|
+
partitions.flatMap(({ partitionIndex, records }) =>
|
|
124
|
+
records.flatMap(({ baseTimestamp, baseOffset, records }) =>
|
|
125
|
+
records.map(
|
|
126
|
+
(message): Required<Message> => ({
|
|
127
|
+
topic: this.metadata.getTopicNameById(topicId),
|
|
128
|
+
partition: partitionIndex,
|
|
129
|
+
key: message.key ?? null,
|
|
130
|
+
value: message.value ?? null,
|
|
131
|
+
headers: Object.fromEntries(
|
|
132
|
+
message.headers.map(({ key, value }) => [key, value]),
|
|
133
|
+
),
|
|
134
|
+
timestamp: baseTimestamp + BigInt(message.timestampDelta),
|
|
135
|
+
offset: baseOffset + BigInt(message.offsetDelta),
|
|
136
|
+
}),
|
|
137
|
+
),
|
|
138
|
+
),
|
|
139
|
+
),
|
|
140
|
+
);
|
|
141
|
+
|
|
142
|
+
if ("onBatch" in options) {
|
|
143
|
+
await retrier(() => options.onBatch(messages));
|
|
144
|
+
|
|
145
|
+
messages.forEach(({ topic, partition, offset }) =>
|
|
146
|
+
this.offsetManager.resolve(topic, partition, offset + 1n),
|
|
147
|
+
);
|
|
148
|
+
} else if ("onMessage" in options) {
|
|
149
|
+
for (const message of messages) {
|
|
150
|
+
await retrier(() => options.onMessage(message));
|
|
151
|
+
|
|
152
|
+
const { topic, partition, offset } = message;
|
|
153
|
+
this.offsetManager.resolve(topic, partition, offset + 1n);
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
await this.consumerGroup?.offsetCommit();
|
|
157
|
+
await this.consumerGroup?.handleLastHeartbeat();
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
if (!nodeAssignments.length) {
|
|
161
|
+
console.debug("No partitions assigned. Waiting for reassignment...");
|
|
162
|
+
await delay(this.options.maxWaitMs);
|
|
163
|
+
await this.consumerGroup?.handleLastHeartbeat();
|
|
164
|
+
}
|
|
165
|
+
} catch (error) {
|
|
166
|
+
if ((error as KafkaTSApiError).errorCode === API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
167
|
+
console.debug("Rebalance in progress...");
|
|
168
|
+
shouldReassign = true;
|
|
169
|
+
continue;
|
|
170
|
+
}
|
|
171
|
+
if ((error as KafkaTSApiError).errorCode === API_ERROR.FENCED_INSTANCE_ID) {
|
|
172
|
+
console.debug("New consumer with the same groupInstanceId joined. Exiting the consumer...");
|
|
173
|
+
this.close();
|
|
174
|
+
break;
|
|
175
|
+
}
|
|
176
|
+
if (
|
|
177
|
+
error instanceof ConnectionError ||
|
|
178
|
+
(error instanceof KafkaTSApiError && error.errorCode === API_ERROR.NOT_COORDINATOR)
|
|
179
|
+
) {
|
|
180
|
+
console.debug(`${error.message}. Restarting consumer...`);
|
|
181
|
+
this.close().then(() => this.start());
|
|
182
|
+
break;
|
|
183
|
+
}
|
|
184
|
+
console.error(error);
|
|
185
|
+
await this.consumerGroup?.offsetCommit();
|
|
186
|
+
break;
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
this.stopHook?.();
|
|
190
|
+
};
|
|
191
|
+
|
|
192
|
+
public async close(force = false): Promise<void> {
|
|
193
|
+
if (!force) {
|
|
194
|
+
await new Promise<void>((resolve) => {
|
|
195
|
+
this.stopHook = resolve;
|
|
196
|
+
});
|
|
197
|
+
}
|
|
198
|
+
await this.consumerGroup
|
|
199
|
+
?.leaveGroup()
|
|
200
|
+
.catch((error) => console.warn(`Failed to leave group: ${error.message}`));
|
|
201
|
+
await this.cluster.disconnect().catch((error) => console.warn(`Failed to disconnect: ${error.message}`));
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
private fetch(nodeId: number, assignment: Assignment) {
|
|
205
|
+
const { rackId, maxWaitMs, minBytes, maxBytes, partitionMaxBytes, isolationLevel } = this.options;
|
|
206
|
+
|
|
207
|
+
return this.cluster.sendRequestToNode(nodeId)(API.FETCH, {
|
|
208
|
+
maxWaitMs,
|
|
209
|
+
minBytes,
|
|
210
|
+
maxBytes,
|
|
211
|
+
isolationLevel,
|
|
212
|
+
sessionId: 0,
|
|
213
|
+
sessionEpoch: -1,
|
|
214
|
+
topics: Object.entries(assignment).map(([topic, partitions]) => ({
|
|
215
|
+
topicId: this.metadata.getTopicIdByName(topic),
|
|
216
|
+
partitions: partitions.map((partition) => ({
|
|
217
|
+
partition,
|
|
218
|
+
currentLeaderEpoch: -1,
|
|
219
|
+
fetchOffset: this.offsetManager.getCurrentOffset(topic, partition),
|
|
220
|
+
lastFetchedEpoch: -1,
|
|
221
|
+
logStartOffset: 0n,
|
|
222
|
+
partitionMaxBytes,
|
|
223
|
+
})),
|
|
224
|
+
})),
|
|
225
|
+
forgottenTopicsData: [],
|
|
226
|
+
rackId,
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
}
|