kafka-ts 0.0.3-beta → 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +68 -8
- package/dist/api/api-versions.d.ts +9 -0
- package/dist/api/api-versions.js +24 -0
- package/dist/api/create-topics.d.ts +38 -0
- package/dist/api/create-topics.js +53 -0
- package/dist/api/delete-topics.d.ts +18 -0
- package/dist/api/delete-topics.js +33 -0
- package/dist/api/fetch.d.ts +84 -0
- package/dist/api/fetch.js +142 -0
- package/dist/api/find-coordinator.d.ts +21 -0
- package/dist/api/find-coordinator.js +39 -0
- package/dist/api/heartbeat.d.ts +11 -0
- package/dist/api/heartbeat.js +27 -0
- package/dist/api/index.d.ts +576 -0
- package/dist/api/index.js +165 -0
- package/dist/api/init-producer-id.d.ts +13 -0
- package/dist/api/init-producer-id.js +29 -0
- package/dist/api/join-group.d.ts +34 -0
- package/dist/api/join-group.js +51 -0
- package/dist/api/leave-group.d.ts +19 -0
- package/dist/api/leave-group.js +39 -0
- package/dist/api/list-offsets.d.ts +29 -0
- package/dist/api/list-offsets.js +48 -0
- package/dist/api/metadata.d.ts +40 -0
- package/dist/api/metadata.js +58 -0
- package/dist/api/offset-commit.d.ts +28 -0
- package/dist/api/offset-commit.js +48 -0
- package/dist/api/offset-fetch.d.ts +31 -0
- package/dist/api/offset-fetch.js +55 -0
- package/dist/api/produce.d.ts +54 -0
- package/dist/api/produce.js +126 -0
- package/dist/api/sasl-authenticate.d.ts +11 -0
- package/dist/api/sasl-authenticate.js +23 -0
- package/dist/api/sasl-handshake.d.ts +6 -0
- package/dist/api/sasl-handshake.js +19 -0
- package/dist/api/sync-group.d.ts +24 -0
- package/dist/api/sync-group.js +36 -0
- package/dist/auth/index.d.ts +2 -0
- package/dist/auth/index.js +8 -0
- package/dist/auth/plain.d.ts +5 -0
- package/dist/auth/plain.js +12 -0
- package/dist/auth/scram.d.ts +9 -0
- package/dist/auth/scram.js +40 -0
- package/dist/broker.d.ts +30 -0
- package/dist/broker.js +55 -0
- package/dist/client.d.ts +23 -0
- package/dist/client.js +36 -0
- package/dist/cluster.d.ts +27 -0
- package/dist/cluster.js +70 -0
- package/dist/cluster.test.d.ts +1 -0
- package/dist/cluster.test.js +343 -0
- package/dist/codecs/gzip.d.ts +2 -0
- package/dist/codecs/gzip.js +8 -0
- package/dist/codecs/index.d.ts +2 -0
- package/dist/codecs/index.js +17 -0
- package/dist/codecs/none.d.ts +2 -0
- package/dist/codecs/none.js +7 -0
- package/dist/codecs/types.d.ts +5 -0
- package/dist/codecs/types.js +2 -0
- package/dist/connection.d.ts +26 -0
- package/dist/connection.js +175 -0
- package/dist/consumer/consumer-group.d.ts +41 -0
- package/dist/consumer/consumer-group.js +215 -0
- package/dist/consumer/consumer-metadata.d.ts +7 -0
- package/dist/consumer/consumer-metadata.js +14 -0
- package/dist/consumer/consumer.d.ts +44 -0
- package/dist/consumer/consumer.js +225 -0
- package/dist/consumer/fetch-manager.d.ts +33 -0
- package/dist/consumer/fetch-manager.js +140 -0
- package/dist/consumer/fetcher.d.ts +25 -0
- package/dist/consumer/fetcher.js +64 -0
- package/dist/consumer/offset-manager.d.ts +22 -0
- package/dist/consumer/offset-manager.js +66 -0
- package/dist/consumer/processor.d.ts +19 -0
- package/dist/consumer/processor.js +59 -0
- package/dist/distributors/assignments-to-replicas.d.ts +16 -0
- package/dist/distributors/assignments-to-replicas.js +59 -0
- package/dist/distributors/assignments-to-replicas.test.d.ts +1 -0
- package/dist/distributors/assignments-to-replicas.test.js +40 -0
- package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
- package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
- package/dist/distributors/partitioner.d.ts +7 -0
- package/dist/distributors/partitioner.js +23 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.js +26 -0
- package/dist/metadata.d.ts +24 -0
- package/dist/metadata.js +106 -0
- package/dist/producer/producer.d.ts +24 -0
- package/dist/producer/producer.js +131 -0
- package/dist/types.d.ts +11 -0
- package/dist/types.js +2 -0
- package/dist/utils/api.d.ts +9 -0
- package/dist/utils/api.js +5 -0
- package/dist/utils/crypto.d.ts +8 -0
- package/dist/utils/crypto.js +18 -0
- package/dist/utils/decoder.d.ts +30 -0
- package/dist/utils/decoder.js +152 -0
- package/dist/utils/delay.d.ts +1 -0
- package/dist/utils/delay.js +5 -0
- package/dist/utils/encoder.d.ts +28 -0
- package/dist/utils/encoder.js +125 -0
- package/dist/utils/error.d.ts +11 -0
- package/dist/utils/error.js +27 -0
- package/dist/utils/logger.d.ts +9 -0
- package/dist/utils/logger.js +32 -0
- package/dist/utils/memo.d.ts +1 -0
- package/dist/utils/memo.js +16 -0
- package/dist/utils/murmur2.d.ts +3 -0
- package/dist/utils/murmur2.js +40 -0
- package/dist/utils/retrier.d.ts +10 -0
- package/dist/utils/retrier.js +22 -0
- package/dist/utils/tracer.d.ts +5 -0
- package/dist/utils/tracer.js +39 -0
- package/package.json +11 -2
- package/src/__snapshots__/{request-handler.test.ts.snap → cluster.test.ts.snap} +329 -26
- package/src/api/fetch.ts +84 -29
- package/src/api/index.ts +3 -1
- package/src/api/metadata.ts +1 -1
- package/src/api/offset-commit.ts +1 -1
- package/src/api/offset-fetch.ts +1 -5
- package/src/api/produce.ts +15 -18
- package/src/auth/index.ts +2 -0
- package/src/auth/plain.ts +10 -0
- package/src/auth/scram.ts +52 -0
- package/src/broker.ts +7 -9
- package/src/client.ts +2 -2
- package/src/cluster.test.ts +16 -14
- package/src/cluster.ts +38 -40
- package/src/codecs/gzip.ts +9 -0
- package/src/codecs/index.ts +16 -0
- package/src/codecs/none.ts +6 -0
- package/src/codecs/types.ts +4 -0
- package/src/connection.ts +31 -17
- package/src/consumer/consumer-group.ts +43 -23
- package/src/consumer/consumer.ts +64 -43
- package/src/consumer/fetch-manager.ts +43 -53
- package/src/consumer/fetcher.ts +20 -13
- package/src/consumer/offset-manager.ts +18 -7
- package/src/consumer/processor.ts +14 -8
- package/src/distributors/assignments-to-replicas.ts +1 -3
- package/src/distributors/partitioner.ts +27 -0
- package/src/index.ts +7 -2
- package/src/metadata.ts +4 -0
- package/src/producer/producer.ts +22 -12
- package/src/types.ts +3 -3
- package/src/utils/api.ts +1 -1
- package/src/utils/crypto.ts +15 -0
- package/src/utils/decoder.ts +11 -5
- package/src/utils/encoder.ts +29 -22
- package/src/utils/logger.ts +37 -0
- package/src/utils/murmur2.ts +44 -0
- package/src/utils/tracer.ts +40 -22
- package/.github/workflows/release.yml +0 -17
- package/certs/ca.crt +0 -29
- package/certs/ca.key +0 -52
- package/certs/ca.srl +0 -1
- package/certs/kafka.crt +0 -29
- package/certs/kafka.csr +0 -26
- package/certs/kafka.key +0 -52
- package/certs/kafka.keystore.jks +0 -0
- package/certs/kafka.truststore.jks +0 -0
- package/docker-compose.yml +0 -104
- package/examples/package-lock.json +0 -31
- package/examples/package.json +0 -14
- package/examples/src/client.ts +0 -9
- package/examples/src/consumer.ts +0 -18
- package/examples/src/create-topic.ts +0 -44
- package/examples/src/producer.ts +0 -24
- package/examples/src/replicator.ts +0 -25
- package/examples/src/utils/delay.ts +0 -1
- package/examples/src/utils/json.ts +0 -1
- package/examples/tsconfig.json +0 -7
- package/log4j.properties +0 -95
- package/scripts/generate-certs.sh +0 -24
- package/src/utils/debug.ts +0 -9
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
/// <reference types="node" />
|
|
2
|
+
import EventEmitter from 'events';
|
|
3
|
+
import { Cluster } from '../cluster';
|
|
4
|
+
import { ConsumerMetadata } from './consumer-metadata';
|
|
5
|
+
import { OffsetManager } from './offset-manager';
|
|
6
|
+
type ConsumerGroupOptions = {
|
|
7
|
+
cluster: Cluster;
|
|
8
|
+
topics: string[];
|
|
9
|
+
groupId: string;
|
|
10
|
+
groupInstanceId: string | null;
|
|
11
|
+
sessionTimeoutMs: number;
|
|
12
|
+
rebalanceTimeoutMs: number;
|
|
13
|
+
metadata: ConsumerMetadata;
|
|
14
|
+
offsetManager: OffsetManager;
|
|
15
|
+
};
|
|
16
|
+
export declare class ConsumerGroup extends EventEmitter<{
|
|
17
|
+
offsetCommit: [];
|
|
18
|
+
}> {
|
|
19
|
+
private options;
|
|
20
|
+
private coordinatorId;
|
|
21
|
+
private memberId;
|
|
22
|
+
private generationId;
|
|
23
|
+
private leaderId;
|
|
24
|
+
private memberIds;
|
|
25
|
+
private heartbeatInterval;
|
|
26
|
+
private heartbeatError;
|
|
27
|
+
constructor(options: ConsumerGroupOptions);
|
|
28
|
+
join(): Promise<void>;
|
|
29
|
+
private startHeartbeater;
|
|
30
|
+
private stopHeartbeater;
|
|
31
|
+
handleLastHeartbeat(): void;
|
|
32
|
+
resetHeartbeat(): void;
|
|
33
|
+
private findCoordinator;
|
|
34
|
+
private joinGroup;
|
|
35
|
+
private syncGroup;
|
|
36
|
+
private offsetFetch;
|
|
37
|
+
offsetCommit(topicPartitions: Record<string, Set<number>>): Promise<void>;
|
|
38
|
+
heartbeat(): Promise<void>;
|
|
39
|
+
leaveGroup(): Promise<void>;
|
|
40
|
+
}
|
|
41
|
+
export {};
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
|
|
3
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
4
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
5
|
+
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
6
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
7
|
+
};
|
|
8
|
+
var __metadata = (this && this.__metadata) || function (k, v) {
|
|
9
|
+
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
|
|
10
|
+
};
|
|
11
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
12
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
13
|
+
};
|
|
14
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
15
|
+
exports.ConsumerGroup = void 0;
|
|
16
|
+
const events_1 = __importDefault(require("events"));
|
|
17
|
+
const api_1 = require("../api");
|
|
18
|
+
const find_coordinator_1 = require("../api/find-coordinator");
|
|
19
|
+
const tracer_1 = require("../utils/tracer");
|
|
20
|
+
const trace = (0, tracer_1.createTracer)('ConsumerGroup');
|
|
21
|
+
class ConsumerGroup extends events_1.default {
|
|
22
|
+
options;
|
|
23
|
+
coordinatorId = -1;
|
|
24
|
+
memberId = '';
|
|
25
|
+
generationId = -1;
|
|
26
|
+
leaderId = '';
|
|
27
|
+
memberIds = [];
|
|
28
|
+
heartbeatInterval = null;
|
|
29
|
+
heartbeatError = null;
|
|
30
|
+
constructor(options) {
|
|
31
|
+
super();
|
|
32
|
+
this.options = options;
|
|
33
|
+
}
|
|
34
|
+
async join() {
|
|
35
|
+
await this.findCoordinator();
|
|
36
|
+
await this.options.cluster.setSeedBroker(this.coordinatorId);
|
|
37
|
+
this.memberId = '';
|
|
38
|
+
await this.joinGroup();
|
|
39
|
+
await this.syncGroup();
|
|
40
|
+
await this.offsetFetch();
|
|
41
|
+
this.startHeartbeater();
|
|
42
|
+
}
|
|
43
|
+
async startHeartbeater() {
|
|
44
|
+
this.heartbeatInterval = setInterval(async () => {
|
|
45
|
+
try {
|
|
46
|
+
await this.heartbeat();
|
|
47
|
+
}
|
|
48
|
+
catch (error) {
|
|
49
|
+
this.heartbeatError = error;
|
|
50
|
+
}
|
|
51
|
+
}, 5000);
|
|
52
|
+
}
|
|
53
|
+
async stopHeartbeater() {
|
|
54
|
+
if (this.heartbeatInterval) {
|
|
55
|
+
clearInterval(this.heartbeatInterval);
|
|
56
|
+
this.heartbeatInterval = null;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
handleLastHeartbeat() {
|
|
60
|
+
if (this.heartbeatError) {
|
|
61
|
+
throw this.heartbeatError;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
resetHeartbeat() {
|
|
65
|
+
this.heartbeatError = null;
|
|
66
|
+
}
|
|
67
|
+
async findCoordinator() {
|
|
68
|
+
const { coordinators } = await this.options.cluster.sendRequest(api_1.API.FIND_COORDINATOR, {
|
|
69
|
+
keyType: find_coordinator_1.KEY_TYPE.GROUP,
|
|
70
|
+
keys: [this.options.groupId],
|
|
71
|
+
});
|
|
72
|
+
this.coordinatorId = coordinators[0].nodeId;
|
|
73
|
+
}
|
|
74
|
+
async joinGroup() {
|
|
75
|
+
const { cluster, groupId, groupInstanceId, sessionTimeoutMs, rebalanceTimeoutMs, topics } = this.options;
|
|
76
|
+
try {
|
|
77
|
+
const response = await cluster.sendRequest(api_1.API.JOIN_GROUP, {
|
|
78
|
+
groupId,
|
|
79
|
+
groupInstanceId,
|
|
80
|
+
memberId: this.memberId,
|
|
81
|
+
sessionTimeoutMs,
|
|
82
|
+
rebalanceTimeoutMs,
|
|
83
|
+
protocolType: 'consumer',
|
|
84
|
+
protocols: [{ name: 'RoundRobinAssigner', metadata: { version: 0, topics } }],
|
|
85
|
+
reason: null,
|
|
86
|
+
});
|
|
87
|
+
this.memberId = response.memberId;
|
|
88
|
+
this.generationId = response.generationId;
|
|
89
|
+
this.leaderId = response.leader;
|
|
90
|
+
this.memberIds = response.members.map((member) => member.memberId);
|
|
91
|
+
}
|
|
92
|
+
catch (error) {
|
|
93
|
+
if (error.errorCode === api_1.API_ERROR.MEMBER_ID_REQUIRED) {
|
|
94
|
+
this.memberId = error.response.memberId;
|
|
95
|
+
return this.joinGroup();
|
|
96
|
+
}
|
|
97
|
+
throw error;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
async syncGroup() {
|
|
101
|
+
const { cluster, metadata, groupId, groupInstanceId } = this.options;
|
|
102
|
+
let assignments = [];
|
|
103
|
+
if (this.memberId === this.leaderId) {
|
|
104
|
+
const memberAssignments = Object.entries(metadata.getTopicPartitions())
|
|
105
|
+
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
106
|
+
.reduce((acc, { topic, partition }, index) => {
|
|
107
|
+
const memberId = this.memberIds[index % this.memberIds.length];
|
|
108
|
+
acc[memberId] ??= {};
|
|
109
|
+
acc[memberId][topic] ??= [];
|
|
110
|
+
acc[memberId][topic].push(partition);
|
|
111
|
+
return acc;
|
|
112
|
+
}, {});
|
|
113
|
+
assignments = Object.entries(memberAssignments).map(([memberId, assignment]) => ({ memberId, assignment }));
|
|
114
|
+
}
|
|
115
|
+
const response = await cluster.sendRequest(api_1.API.SYNC_GROUP, {
|
|
116
|
+
groupId,
|
|
117
|
+
groupInstanceId,
|
|
118
|
+
memberId: this.memberId,
|
|
119
|
+
generationId: this.generationId,
|
|
120
|
+
protocolType: 'consumer',
|
|
121
|
+
protocolName: 'RoundRobinAssigner',
|
|
122
|
+
assignments,
|
|
123
|
+
});
|
|
124
|
+
metadata.setAssignment(JSON.parse(response.assignments || '{}'));
|
|
125
|
+
}
|
|
126
|
+
async offsetFetch() {
|
|
127
|
+
const { cluster, groupId, topics, metadata, offsetManager } = this.options;
|
|
128
|
+
const assignment = metadata.getAssignment();
|
|
129
|
+
const request = {
|
|
130
|
+
groups: [
|
|
131
|
+
{
|
|
132
|
+
groupId,
|
|
133
|
+
topics: topics
|
|
134
|
+
.map((topic) => ({ name: topic, partitionIndexes: assignment[topic] ?? [] }))
|
|
135
|
+
.filter(({ partitionIndexes }) => partitionIndexes.length),
|
|
136
|
+
},
|
|
137
|
+
].filter(({ topics }) => topics.length),
|
|
138
|
+
requireStable: true,
|
|
139
|
+
};
|
|
140
|
+
if (!request.groups.length)
|
|
141
|
+
return;
|
|
142
|
+
const response = await cluster.sendRequest(api_1.API.OFFSET_FETCH, request);
|
|
143
|
+
const topicPartitions = {};
|
|
144
|
+
response.groups.forEach((group) => {
|
|
145
|
+
group.topics.forEach((topic) => {
|
|
146
|
+
topicPartitions[topic.name] ??= new Set();
|
|
147
|
+
topic.partitions.forEach(({ partitionIndex, committedOffset }) => {
|
|
148
|
+
if (committedOffset >= 0) {
|
|
149
|
+
topicPartitions[topic.name].add(partitionIndex);
|
|
150
|
+
offsetManager.resolve(topic.name, partitionIndex, committedOffset);
|
|
151
|
+
}
|
|
152
|
+
});
|
|
153
|
+
});
|
|
154
|
+
});
|
|
155
|
+
offsetManager.flush(topicPartitions);
|
|
156
|
+
}
|
|
157
|
+
async offsetCommit(topicPartitions) {
|
|
158
|
+
const { cluster, groupId, groupInstanceId, offsetManager } = this.options;
|
|
159
|
+
const request = {
|
|
160
|
+
groupId,
|
|
161
|
+
groupInstanceId,
|
|
162
|
+
memberId: this.memberId,
|
|
163
|
+
generationIdOrMemberEpoch: this.generationId,
|
|
164
|
+
topics: Object.entries(topicPartitions).map(([topic, partitions]) => ({
|
|
165
|
+
name: topic,
|
|
166
|
+
partitions: [...partitions].map((partitionIndex) => ({
|
|
167
|
+
partitionIndex,
|
|
168
|
+
committedOffset: offsetManager.pendingOffsets[topic][partitionIndex],
|
|
169
|
+
committedLeaderEpoch: -1,
|
|
170
|
+
committedMetadata: null,
|
|
171
|
+
})),
|
|
172
|
+
})),
|
|
173
|
+
};
|
|
174
|
+
if (!request.topics.length) {
|
|
175
|
+
return;
|
|
176
|
+
}
|
|
177
|
+
await cluster.sendRequest(api_1.API.OFFSET_COMMIT, request);
|
|
178
|
+
this.emit('offsetCommit');
|
|
179
|
+
}
|
|
180
|
+
async heartbeat() {
|
|
181
|
+
const { cluster, groupId, groupInstanceId } = this.options;
|
|
182
|
+
await cluster.sendRequest(api_1.API.HEARTBEAT, {
|
|
183
|
+
groupId,
|
|
184
|
+
groupInstanceId,
|
|
185
|
+
memberId: this.memberId,
|
|
186
|
+
generationId: this.generationId,
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
async leaveGroup() {
|
|
190
|
+
if (this.coordinatorId === -1) {
|
|
191
|
+
return;
|
|
192
|
+
}
|
|
193
|
+
const { cluster, groupId, groupInstanceId } = this.options;
|
|
194
|
+
this.stopHeartbeater();
|
|
195
|
+
try {
|
|
196
|
+
await cluster.sendRequest(api_1.API.LEAVE_GROUP, {
|
|
197
|
+
groupId,
|
|
198
|
+
members: [{ memberId: this.memberId, groupInstanceId, reason: null }],
|
|
199
|
+
});
|
|
200
|
+
}
|
|
201
|
+
catch (error) {
|
|
202
|
+
if (error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
203
|
+
return;
|
|
204
|
+
}
|
|
205
|
+
throw error;
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
exports.ConsumerGroup = ConsumerGroup;
|
|
210
|
+
__decorate([
|
|
211
|
+
trace(),
|
|
212
|
+
__metadata("design:type", Function),
|
|
213
|
+
__metadata("design:paramtypes", []),
|
|
214
|
+
__metadata("design:returntype", Promise)
|
|
215
|
+
], ConsumerGroup.prototype, "join", null);
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ConsumerMetadata = void 0;
|
|
4
|
+
const metadata_1 = require("../metadata");
|
|
5
|
+
class ConsumerMetadata extends metadata_1.Metadata {
|
|
6
|
+
assignment = {};
|
|
7
|
+
getAssignment() {
|
|
8
|
+
return this.assignment;
|
|
9
|
+
}
|
|
10
|
+
setAssignment(newAssignment) {
|
|
11
|
+
this.assignment = newAssignment;
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
exports.ConsumerMetadata = ConsumerMetadata;
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/// <reference types="node" />
|
|
2
|
+
import EventEmitter from 'events';
|
|
3
|
+
import { IsolationLevel } from '../api/fetch';
|
|
4
|
+
import { Cluster } from '../cluster';
|
|
5
|
+
import { Message } from '../types';
|
|
6
|
+
import { BatchGranularity } from './fetch-manager';
|
|
7
|
+
export type ConsumerOptions = {
|
|
8
|
+
topics: string[];
|
|
9
|
+
groupId?: string | null;
|
|
10
|
+
groupInstanceId?: string | null;
|
|
11
|
+
rackId?: string;
|
|
12
|
+
isolationLevel?: IsolationLevel;
|
|
13
|
+
sessionTimeoutMs?: number;
|
|
14
|
+
rebalanceTimeoutMs?: number;
|
|
15
|
+
maxWaitMs?: number;
|
|
16
|
+
minBytes?: number;
|
|
17
|
+
maxBytes?: number;
|
|
18
|
+
partitionMaxBytes?: number;
|
|
19
|
+
allowTopicAutoCreation?: boolean;
|
|
20
|
+
fromBeginning?: boolean;
|
|
21
|
+
batchGranularity?: BatchGranularity;
|
|
22
|
+
concurrency?: number;
|
|
23
|
+
} & ({
|
|
24
|
+
onBatch: (messages: Required<Message>[]) => unknown;
|
|
25
|
+
} | {
|
|
26
|
+
onMessage: (message: Required<Message>) => unknown;
|
|
27
|
+
});
|
|
28
|
+
export declare class Consumer extends EventEmitter<{
|
|
29
|
+
offsetCommit: [];
|
|
30
|
+
}> {
|
|
31
|
+
private cluster;
|
|
32
|
+
private options;
|
|
33
|
+
private metadata;
|
|
34
|
+
private consumerGroup;
|
|
35
|
+
private offsetManager;
|
|
36
|
+
private fetchManager?;
|
|
37
|
+
private stopHook;
|
|
38
|
+
constructor(cluster: Cluster, options: ConsumerOptions);
|
|
39
|
+
start(): Promise<void>;
|
|
40
|
+
close(force?: boolean): Promise<void>;
|
|
41
|
+
private startFetchManager;
|
|
42
|
+
private process;
|
|
43
|
+
private fetch;
|
|
44
|
+
}
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
|
|
3
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
4
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
5
|
+
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
6
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
7
|
+
};
|
|
8
|
+
var __metadata = (this && this.__metadata) || function (k, v) {
|
|
9
|
+
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
|
|
10
|
+
};
|
|
11
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
12
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
13
|
+
};
|
|
14
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
15
|
+
exports.Consumer = void 0;
|
|
16
|
+
const events_1 = __importDefault(require("events"));
|
|
17
|
+
const api_1 = require("../api");
|
|
18
|
+
const messages_to_topic_partition_leaders_1 = require("../distributors/messages-to-topic-partition-leaders");
|
|
19
|
+
const delay_1 = require("../utils/delay");
|
|
20
|
+
const error_1 = require("../utils/error");
|
|
21
|
+
const logger_1 = require("../utils/logger");
|
|
22
|
+
const tracer_1 = require("../utils/tracer");
|
|
23
|
+
const consumer_group_1 = require("./consumer-group");
|
|
24
|
+
const consumer_metadata_1 = require("./consumer-metadata");
|
|
25
|
+
const fetch_manager_1 = require("./fetch-manager");
|
|
26
|
+
const offset_manager_1 = require("./offset-manager");
|
|
27
|
+
const trace = (0, tracer_1.createTracer)('Consumer');
|
|
28
|
+
class Consumer extends events_1.default {
|
|
29
|
+
cluster;
|
|
30
|
+
options;
|
|
31
|
+
metadata;
|
|
32
|
+
consumerGroup;
|
|
33
|
+
offsetManager;
|
|
34
|
+
fetchManager;
|
|
35
|
+
stopHook;
|
|
36
|
+
constructor(cluster, options) {
|
|
37
|
+
super();
|
|
38
|
+
this.cluster = cluster;
|
|
39
|
+
this.options = {
|
|
40
|
+
...options,
|
|
41
|
+
groupId: options.groupId ?? null,
|
|
42
|
+
groupInstanceId: options.groupInstanceId ?? null,
|
|
43
|
+
rackId: options.rackId ?? '',
|
|
44
|
+
sessionTimeoutMs: options.sessionTimeoutMs ?? 30_000,
|
|
45
|
+
rebalanceTimeoutMs: options.rebalanceTimeoutMs ?? 60_000,
|
|
46
|
+
maxWaitMs: options.maxWaitMs ?? 5000,
|
|
47
|
+
minBytes: options.minBytes ?? 1,
|
|
48
|
+
maxBytes: options.maxBytes ?? 1_048_576,
|
|
49
|
+
partitionMaxBytes: options.partitionMaxBytes ?? 1_048_576,
|
|
50
|
+
isolationLevel: options.isolationLevel ?? 0 /* IsolationLevel.READ_UNCOMMITTED */,
|
|
51
|
+
allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
|
|
52
|
+
fromBeginning: options.fromBeginning ?? false,
|
|
53
|
+
batchGranularity: options.batchGranularity ?? 'partition',
|
|
54
|
+
concurrency: options.concurrency ?? 1,
|
|
55
|
+
};
|
|
56
|
+
this.metadata = new consumer_metadata_1.ConsumerMetadata({ cluster: this.cluster });
|
|
57
|
+
this.offsetManager = new offset_manager_1.OffsetManager({
|
|
58
|
+
cluster: this.cluster,
|
|
59
|
+
metadata: this.metadata,
|
|
60
|
+
isolationLevel: this.options.isolationLevel,
|
|
61
|
+
});
|
|
62
|
+
this.consumerGroup = this.options.groupId
|
|
63
|
+
? new consumer_group_1.ConsumerGroup({
|
|
64
|
+
cluster: this.cluster,
|
|
65
|
+
topics: this.options.topics,
|
|
66
|
+
groupId: this.options.groupId,
|
|
67
|
+
groupInstanceId: this.options.groupInstanceId,
|
|
68
|
+
sessionTimeoutMs: this.options.sessionTimeoutMs,
|
|
69
|
+
rebalanceTimeoutMs: this.options.rebalanceTimeoutMs,
|
|
70
|
+
metadata: this.metadata,
|
|
71
|
+
offsetManager: this.offsetManager,
|
|
72
|
+
})
|
|
73
|
+
: undefined;
|
|
74
|
+
this.consumerGroup?.on('offsetCommit', () => this.emit('offsetCommit'));
|
|
75
|
+
}
|
|
76
|
+
async start() {
|
|
77
|
+
const { topics, allowTopicAutoCreation, fromBeginning } = this.options;
|
|
78
|
+
this.stopHook = undefined;
|
|
79
|
+
try {
|
|
80
|
+
await this.cluster.connect();
|
|
81
|
+
await this.metadata.fetchMetadataIfNecessary({ topics, allowTopicAutoCreation });
|
|
82
|
+
this.metadata.setAssignment(this.metadata.getTopicPartitions());
|
|
83
|
+
await this.offsetManager.fetchOffsets({ fromBeginning });
|
|
84
|
+
await this.consumerGroup?.join();
|
|
85
|
+
}
|
|
86
|
+
catch (error) {
|
|
87
|
+
logger_1.log.warn('Failed to start consumer', error);
|
|
88
|
+
logger_1.log.debug(`Restarting consumer in 1 second...`);
|
|
89
|
+
await (0, delay_1.delay)(1000);
|
|
90
|
+
if (this.stopHook)
|
|
91
|
+
return this.stopHook();
|
|
92
|
+
return this.close(true).then(() => this.start());
|
|
93
|
+
}
|
|
94
|
+
this.startFetchManager();
|
|
95
|
+
}
|
|
96
|
+
async close(force = false) {
|
|
97
|
+
if (!force) {
|
|
98
|
+
await new Promise(async (resolve) => {
|
|
99
|
+
this.stopHook = resolve;
|
|
100
|
+
await this.fetchManager?.stop();
|
|
101
|
+
});
|
|
102
|
+
}
|
|
103
|
+
await this.consumerGroup?.leaveGroup().catch((error) => logger_1.log.debug(`Failed to leave group: ${error.message}`));
|
|
104
|
+
await this.cluster.disconnect().catch((error) => logger_1.log.debug(`Failed to disconnect: ${error.message}`));
|
|
105
|
+
}
|
|
106
|
+
async startFetchManager() {
|
|
107
|
+
const { batchGranularity, concurrency } = this.options;
|
|
108
|
+
while (!this.stopHook) {
|
|
109
|
+
this.consumerGroup?.resetHeartbeat();
|
|
110
|
+
// TODO: If leader is not available, find another read replica
|
|
111
|
+
const nodeAssignments = Object.entries((0, messages_to_topic_partition_leaders_1.distributeMessagesToTopicPartitionLeaders)(Object.entries(this.metadata.getAssignment()).flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition }))), this.metadata.getTopicPartitionLeaderIds())).map(([nodeId, assignment]) => ({
|
|
112
|
+
nodeId: parseInt(nodeId),
|
|
113
|
+
assignment: Object.fromEntries(Object.entries(assignment).map(([topic, partitions]) => [
|
|
114
|
+
topic,
|
|
115
|
+
Object.keys(partitions).map(Number),
|
|
116
|
+
])),
|
|
117
|
+
}));
|
|
118
|
+
const numPartitions = Object.values(this.metadata.getAssignment()).flat().length;
|
|
119
|
+
const numProcessors = Math.min(concurrency, numPartitions);
|
|
120
|
+
this.fetchManager = new fetch_manager_1.FetchManager({
|
|
121
|
+
fetch: this.fetch.bind(this),
|
|
122
|
+
process: this.process.bind(this),
|
|
123
|
+
metadata: this.metadata,
|
|
124
|
+
consumerGroup: this.consumerGroup,
|
|
125
|
+
nodeAssignments,
|
|
126
|
+
batchGranularity,
|
|
127
|
+
concurrency: numProcessors,
|
|
128
|
+
});
|
|
129
|
+
try {
|
|
130
|
+
await this.fetchManager.start();
|
|
131
|
+
if (!nodeAssignments.length) {
|
|
132
|
+
logger_1.log.debug('No partitions assigned. Waiting for reassignment...');
|
|
133
|
+
await (0, delay_1.delay)(this.options.maxWaitMs);
|
|
134
|
+
this.consumerGroup?.handleLastHeartbeat();
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
catch (error) {
|
|
138
|
+
await this.fetchManager.stop();
|
|
139
|
+
if (error.errorCode === api_1.API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
140
|
+
logger_1.log.debug('Rebalance in progress...');
|
|
141
|
+
continue;
|
|
142
|
+
}
|
|
143
|
+
if (error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
144
|
+
logger_1.log.debug('New consumer with the same groupInstanceId joined. Exiting the consumer...');
|
|
145
|
+
this.close();
|
|
146
|
+
break;
|
|
147
|
+
}
|
|
148
|
+
if (error instanceof error_1.ConnectionError ||
|
|
149
|
+
(error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_COORDINATOR)) {
|
|
150
|
+
logger_1.log.debug(`${error.message}. Restarting consumer...`);
|
|
151
|
+
this.close().then(() => this.start());
|
|
152
|
+
break;
|
|
153
|
+
}
|
|
154
|
+
logger_1.log.error(error.message, error);
|
|
155
|
+
this.close();
|
|
156
|
+
break;
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
this.stopHook?.();
|
|
160
|
+
}
|
|
161
|
+
async process(messages) {
|
|
162
|
+
const { options } = this;
|
|
163
|
+
const topicPartitions = {};
|
|
164
|
+
for (const { topic, partition } of messages) {
|
|
165
|
+
topicPartitions[topic] ??= new Set();
|
|
166
|
+
topicPartitions[topic].add(partition);
|
|
167
|
+
}
|
|
168
|
+
if ('onBatch' in options) {
|
|
169
|
+
await options.onBatch(messages);
|
|
170
|
+
messages.forEach(({ topic, partition, offset }) => this.offsetManager.resolve(topic, partition, offset + 1n));
|
|
171
|
+
}
|
|
172
|
+
else if ('onMessage' in options) {
|
|
173
|
+
for (const message of messages) {
|
|
174
|
+
await options.onMessage(message);
|
|
175
|
+
const { topic, partition, offset } = message;
|
|
176
|
+
this.offsetManager.resolve(topic, partition, offset + 1n);
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
await this.consumerGroup?.offsetCommit(topicPartitions);
|
|
180
|
+
this.offsetManager.flush(topicPartitions);
|
|
181
|
+
}
|
|
182
|
+
fetch(nodeId, assignment) {
|
|
183
|
+
const { rackId, maxWaitMs, minBytes, maxBytes, partitionMaxBytes, isolationLevel } = this.options;
|
|
184
|
+
return this.cluster.sendRequestToNode(nodeId)(api_1.API.FETCH, {
|
|
185
|
+
maxWaitMs,
|
|
186
|
+
minBytes,
|
|
187
|
+
maxBytes,
|
|
188
|
+
isolationLevel,
|
|
189
|
+
sessionId: 0,
|
|
190
|
+
sessionEpoch: -1,
|
|
191
|
+
topics: Object.entries(assignment).map(([topic, partitions]) => ({
|
|
192
|
+
topicId: this.metadata.getTopicIdByName(topic),
|
|
193
|
+
partitions: partitions.map((partition) => ({
|
|
194
|
+
partition,
|
|
195
|
+
currentLeaderEpoch: -1,
|
|
196
|
+
fetchOffset: this.offsetManager.getCurrentOffset(topic, partition),
|
|
197
|
+
lastFetchedEpoch: -1,
|
|
198
|
+
logStartOffset: 0n,
|
|
199
|
+
partitionMaxBytes,
|
|
200
|
+
})),
|
|
201
|
+
})),
|
|
202
|
+
forgottenTopicsData: [],
|
|
203
|
+
rackId,
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
exports.Consumer = Consumer;
|
|
208
|
+
__decorate([
|
|
209
|
+
trace(),
|
|
210
|
+
__metadata("design:type", Function),
|
|
211
|
+
__metadata("design:paramtypes", []),
|
|
212
|
+
__metadata("design:returntype", Promise)
|
|
213
|
+
], Consumer.prototype, "start", null);
|
|
214
|
+
__decorate([
|
|
215
|
+
trace(),
|
|
216
|
+
__metadata("design:type", Function),
|
|
217
|
+
__metadata("design:paramtypes", [Object]),
|
|
218
|
+
__metadata("design:returntype", Promise)
|
|
219
|
+
], Consumer.prototype, "close", null);
|
|
220
|
+
__decorate([
|
|
221
|
+
trace((messages) => ({ count: messages.length })),
|
|
222
|
+
__metadata("design:type", Function),
|
|
223
|
+
__metadata("design:paramtypes", [Array]),
|
|
224
|
+
__metadata("design:returntype", Promise)
|
|
225
|
+
], Consumer.prototype, "process", null);
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { FetchResponse } from '../api/fetch';
|
|
2
|
+
import { Assignment } from '../api/sync-group';
|
|
3
|
+
import { Metadata } from '../metadata';
|
|
4
|
+
import { Batch } from '../types';
|
|
5
|
+
import { ConsumerGroup } from './consumer-group';
|
|
6
|
+
export type BatchGranularity = 'partition' | 'topic' | 'broker';
|
|
7
|
+
type FetchManagerOptions = {
|
|
8
|
+
fetch: (nodeId: number, assignment: Assignment) => Promise<FetchResponse>;
|
|
9
|
+
process: (batch: Batch) => Promise<void>;
|
|
10
|
+
metadata: Metadata;
|
|
11
|
+
consumerGroup?: ConsumerGroup;
|
|
12
|
+
nodeAssignments: {
|
|
13
|
+
nodeId: number;
|
|
14
|
+
assignment: Assignment;
|
|
15
|
+
}[];
|
|
16
|
+
batchGranularity: BatchGranularity;
|
|
17
|
+
concurrency: number;
|
|
18
|
+
};
|
|
19
|
+
export declare class FetchManager {
|
|
20
|
+
private options;
|
|
21
|
+
private queue;
|
|
22
|
+
private isRunning;
|
|
23
|
+
private fetchers;
|
|
24
|
+
private processors;
|
|
25
|
+
private pollQueue;
|
|
26
|
+
private fetcherCallbacks;
|
|
27
|
+
constructor(options: FetchManagerOptions);
|
|
28
|
+
start(): Promise<void>;
|
|
29
|
+
stop(): Promise<void>;
|
|
30
|
+
poll(): Promise<Batch>;
|
|
31
|
+
private onResponse;
|
|
32
|
+
}
|
|
33
|
+
export {};
|