kafka-ts 1.1.8 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/consumer/consumer-group.js +84 -99
- package/dist/consumer/consumer.js +23 -32
- package/dist/consumer/offset-manager.js +6 -5
- package/dist/distributors/group-by-leader-id.d.ts +10 -0
- package/dist/distributors/group-by-leader-id.js +13 -0
- package/dist/distributors/group-partitions-by-topic.d.ts +6 -0
- package/dist/distributors/group-partitions-by-topic.js +12 -0
- package/dist/metadata.d.ts +1 -1
- package/dist/metadata.js +1 -1
- package/dist/producer/producer-buffer.d.ts +20 -0
- package/dist/producer/producer-buffer.js +118 -0
- package/dist/producer/producer-state.d.ts +15 -0
- package/dist/producer/producer-state.js +33 -0
- package/dist/producer/producer.d.ts +6 -11
- package/dist/producer/producer.js +32 -108
- package/dist/utils/decoder.js +13 -8
- package/dist/utils/encoder.d.ts +10 -8
- package/dist/utils/encoder.js +95 -58
- package/dist/utils/promise-chain.d.ts +5 -0
- package/dist/utils/promise-chain.js +39 -0
- package/dist/utils/retry.d.ts +1 -0
- package/dist/utils/retry.js +19 -0
- package/dist/utils/shared.d.ts +1 -1
- package/dist/utils/shared.js +8 -7
- package/package.json +1 -1
- package/dist/consumer/metadata.d.ts +0 -24
- package/dist/consumer/metadata.js +0 -64
- package/dist/distributors/messages-to-topic-partition-leaders.d.ts +0 -17
- package/dist/distributors/messages-to-topic-partition-leaders.js +0 -15
- package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +0 -1
- package/dist/distributors/messages-to-topic-partition-leaders.test.js +0 -30
- package/dist/examples/src/replicator.js +0 -34
- package/dist/examples/src/utils/json.js +0 -5
- package/dist/request-handler.d.ts +0 -16
- package/dist/request-handler.js +0 -67
- package/dist/request-handler.test.d.ts +0 -1
- package/dist/request-handler.test.js +0 -340
- package/dist/src/api/api-versions.js +0 -18
- package/dist/src/api/create-topics.js +0 -46
- package/dist/src/api/delete-topics.js +0 -26
- package/dist/src/api/fetch.js +0 -95
- package/dist/src/api/find-coordinator.js +0 -34
- package/dist/src/api/heartbeat.js +0 -22
- package/dist/src/api/index.js +0 -38
- package/dist/src/api/init-producer-id.js +0 -24
- package/dist/src/api/join-group.js +0 -48
- package/dist/src/api/leave-group.js +0 -30
- package/dist/src/api/list-offsets.js +0 -39
- package/dist/src/api/metadata.js +0 -47
- package/dist/src/api/offset-commit.js +0 -39
- package/dist/src/api/offset-fetch.js +0 -44
- package/dist/src/api/produce.js +0 -119
- package/dist/src/api/sync-group.js +0 -31
- package/dist/src/broker.js +0 -35
- package/dist/src/connection.js +0 -21
- package/dist/src/consumer/consumer-group.js +0 -131
- package/dist/src/consumer/consumer.js +0 -103
- package/dist/src/consumer/metadata.js +0 -52
- package/dist/src/consumer/offset-manager.js +0 -23
- package/dist/src/index.js +0 -19
- package/dist/src/producer/producer.js +0 -84
- package/dist/src/request-handler.js +0 -57
- package/dist/src/request-handler.test.js +0 -321
- package/dist/src/types.js +0 -2
- package/dist/src/utils/api.js +0 -5
- package/dist/src/utils/decoder.js +0 -161
- package/dist/src/utils/encoder.js +0 -137
- package/dist/src/utils/error.js +0 -10
- package/dist/utils/debug.d.ts +0 -2
- package/dist/utils/debug.js +0 -11
- package/dist/utils/lock.d.ts +0 -8
- package/dist/utils/lock.js +0 -44
- package/dist/utils/memo.d.ts +0 -1
- package/dist/utils/memo.js +0 -16
- package/dist/utils/mutex.d.ts +0 -3
- package/dist/utils/mutex.js +0 -32
package/README.md
CHANGED
|
@@ -177,7 +177,7 @@ Custom SASL mechanisms can be implemented following the `SASLProvider` interface
|
|
|
177
177
|
| allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
|
|
178
178
|
| partitioner | Partitioner | false | defaultPartitioner | Custom partitioner function. By default, it uses a default java-compatible partitioner. |
|
|
179
179
|
|
|
180
|
-
### `producer.send(messages: Message[]
|
|
180
|
+
### `producer.send(messages: Message[])`
|
|
181
181
|
|
|
182
182
|
<!-- export type Message = {
|
|
183
183
|
topic: string;
|
|
@@ -14,6 +14,7 @@ const api_1 = require("../api");
|
|
|
14
14
|
const find_coordinator_1 = require("../api/find-coordinator");
|
|
15
15
|
const error_1 = require("../utils/error");
|
|
16
16
|
const logger_1 = require("../utils/logger");
|
|
17
|
+
const retry_1 = require("../utils/retry");
|
|
17
18
|
const tracer_1 = require("../utils/tracer");
|
|
18
19
|
const trace = (0, tracer_1.createTracer)('ConsumerGroup');
|
|
19
20
|
class ConsumerGroup {
|
|
@@ -62,7 +63,7 @@ class ConsumerGroup {
|
|
|
62
63
|
}
|
|
63
64
|
}
|
|
64
65
|
async findCoordinator() {
|
|
65
|
-
|
|
66
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
66
67
|
const { coordinators } = await this.options.cluster.sendRequest(api_1.API.FIND_COORDINATOR, {
|
|
67
68
|
keyType: find_coordinator_1.KEY_TYPE.GROUP,
|
|
68
69
|
keys: [this.options.groupId],
|
|
@@ -70,15 +71,11 @@ class ConsumerGroup {
|
|
|
70
71
|
this.coordinatorId = coordinators[0].nodeId;
|
|
71
72
|
await this.options.cluster.setSeedBroker(this.coordinatorId);
|
|
72
73
|
this.heartbeatError = null;
|
|
73
|
-
}
|
|
74
|
-
catch (error) {
|
|
75
|
-
await this.handleError(error);
|
|
76
|
-
return this.findCoordinator();
|
|
77
|
-
}
|
|
74
|
+
});
|
|
78
75
|
}
|
|
79
76
|
async joinGroup() {
|
|
80
|
-
|
|
81
|
-
|
|
77
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
78
|
+
const { cluster, groupId, groupInstanceId, sessionTimeoutMs, rebalanceTimeoutMs, topics } = this.options;
|
|
82
79
|
const response = await cluster.sendRequest(api_1.API.JOIN_GROUP, {
|
|
83
80
|
groupId,
|
|
84
81
|
groupInstanceId,
|
|
@@ -93,28 +90,27 @@ class ConsumerGroup {
|
|
|
93
90
|
this.generationId = response.generationId;
|
|
94
91
|
this.leaderId = response.leader;
|
|
95
92
|
this.memberIds = response.members.map((member) => member.memberId);
|
|
96
|
-
}
|
|
97
|
-
catch (error) {
|
|
98
|
-
await this.handleError(error);
|
|
99
|
-
return this.joinGroup();
|
|
100
|
-
}
|
|
93
|
+
});
|
|
101
94
|
}
|
|
102
95
|
async syncGroup() {
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
96
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
97
|
+
const { cluster, metadata, groupId, groupInstanceId } = this.options;
|
|
98
|
+
let assignments = [];
|
|
99
|
+
if (this.memberId === this.leaderId) {
|
|
100
|
+
const memberAssignments = Object.entries(metadata.getTopicPartitions())
|
|
101
|
+
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
102
|
+
.reduce((acc, { topic, partition }, index) => {
|
|
103
|
+
const memberId = this.memberIds[index % this.memberIds.length];
|
|
104
|
+
acc[memberId] ??= {};
|
|
105
|
+
acc[memberId][topic] ??= [];
|
|
106
|
+
acc[memberId][topic].push(partition);
|
|
107
|
+
return acc;
|
|
108
|
+
}, {});
|
|
109
|
+
assignments = Object.entries(memberAssignments).map(([memberId, assignment]) => ({
|
|
110
|
+
memberId,
|
|
111
|
+
assignment,
|
|
112
|
+
}));
|
|
113
|
+
}
|
|
118
114
|
const response = await cluster.sendRequest(api_1.API.SYNC_GROUP, {
|
|
119
115
|
groupId,
|
|
120
116
|
groupInstanceId,
|
|
@@ -125,29 +121,25 @@ class ConsumerGroup {
|
|
|
125
121
|
assignments,
|
|
126
122
|
});
|
|
127
123
|
metadata.setAssignment(response.assignments);
|
|
128
|
-
}
|
|
129
|
-
catch (error) {
|
|
130
|
-
await this.handleError(error);
|
|
131
|
-
return this.syncGroup();
|
|
132
|
-
}
|
|
124
|
+
});
|
|
133
125
|
}
|
|
134
126
|
async offsetFetch() {
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
127
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
128
|
+
const { cluster, groupId, topics, metadata, offsetManager } = this.options;
|
|
129
|
+
const assignment = metadata.getAssignment();
|
|
130
|
+
const request = {
|
|
131
|
+
groups: [
|
|
132
|
+
{
|
|
133
|
+
groupId,
|
|
134
|
+
topics: topics
|
|
135
|
+
.map((topic) => ({ name: topic, partitionIndexes: assignment[topic] ?? [] }))
|
|
136
|
+
.filter(({ partitionIndexes }) => partitionIndexes.length),
|
|
137
|
+
},
|
|
138
|
+
].filter(({ topics }) => topics.length),
|
|
139
|
+
requireStable: true,
|
|
140
|
+
};
|
|
141
|
+
if (!request.groups.length)
|
|
142
|
+
return;
|
|
151
143
|
const response = await cluster.sendRequest(api_1.API.OFFSET_FETCH, request);
|
|
152
144
|
const topicPartitions = {};
|
|
153
145
|
response.groups.forEach((group) => {
|
|
@@ -162,44 +154,36 @@ class ConsumerGroup {
|
|
|
162
154
|
});
|
|
163
155
|
});
|
|
164
156
|
offsetManager.flush(topicPartitions);
|
|
165
|
-
}
|
|
166
|
-
catch (error) {
|
|
167
|
-
await this.handleError(error);
|
|
168
|
-
return this.offsetFetch();
|
|
169
|
-
}
|
|
157
|
+
});
|
|
170
158
|
}
|
|
171
159
|
async offsetCommit(topicPartitions) {
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
.
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
160
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
161
|
+
const { cluster, groupId, groupInstanceId, offsetManager, consumer } = this.options;
|
|
162
|
+
const request = {
|
|
163
|
+
groupId,
|
|
164
|
+
groupInstanceId,
|
|
165
|
+
memberId: this.memberId,
|
|
166
|
+
generationIdOrMemberEpoch: this.generationId,
|
|
167
|
+
topics: Object.entries(topicPartitions)
|
|
168
|
+
.filter(([topic]) => topic in offsetManager.pendingOffsets)
|
|
169
|
+
.map(([topic, partitions]) => ({
|
|
170
|
+
name: topic,
|
|
171
|
+
partitions: [...partitions]
|
|
172
|
+
.filter((partition) => partition in offsetManager.pendingOffsets[topic])
|
|
173
|
+
.map((partitionIndex) => ({
|
|
174
|
+
partitionIndex,
|
|
175
|
+
committedOffset: offsetManager.pendingOffsets[topic][partitionIndex],
|
|
176
|
+
committedLeaderEpoch: -1,
|
|
177
|
+
committedMetadata: null,
|
|
178
|
+
})),
|
|
189
179
|
})),
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
}
|
|
195
|
-
try {
|
|
180
|
+
};
|
|
181
|
+
if (!request.topics.length) {
|
|
182
|
+
return;
|
|
183
|
+
}
|
|
196
184
|
await cluster.sendRequest(api_1.API.OFFSET_COMMIT, request);
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
await this.handleError(error);
|
|
200
|
-
return this.offsetCommit(topicPartitions);
|
|
201
|
-
}
|
|
202
|
-
consumer.emit('offsetCommit');
|
|
185
|
+
consumer.emit('offsetCommit');
|
|
186
|
+
});
|
|
203
187
|
}
|
|
204
188
|
async heartbeat() {
|
|
205
189
|
const { cluster, groupId, groupInstanceId, consumer } = this.options;
|
|
@@ -212,24 +196,25 @@ class ConsumerGroup {
|
|
|
212
196
|
consumer.emit('heartbeat');
|
|
213
197
|
}
|
|
214
198
|
async leaveGroup() {
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
}
|
|
218
|
-
const { cluster, groupId, groupInstanceId } = this.options;
|
|
219
|
-
this.stopHeartbeater();
|
|
220
|
-
try {
|
|
221
|
-
await cluster.sendRequest(api_1.API.LEAVE_GROUP, {
|
|
222
|
-
groupId,
|
|
223
|
-
members: [{ memberId: this.memberId, groupInstanceId, reason: null }],
|
|
224
|
-
});
|
|
225
|
-
}
|
|
226
|
-
catch (error) {
|
|
227
|
-
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
199
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
200
|
+
if (this.coordinatorId === -1) {
|
|
228
201
|
return;
|
|
229
202
|
}
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
203
|
+
const { cluster, groupId, groupInstanceId } = this.options;
|
|
204
|
+
this.stopHeartbeater();
|
|
205
|
+
try {
|
|
206
|
+
await cluster.sendRequest(api_1.API.LEAVE_GROUP, {
|
|
207
|
+
groupId,
|
|
208
|
+
members: [{ memberId: this.memberId, groupInstanceId, reason: null }],
|
|
209
|
+
});
|
|
210
|
+
}
|
|
211
|
+
catch (error) {
|
|
212
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
213
|
+
return;
|
|
214
|
+
}
|
|
215
|
+
throw error;
|
|
216
|
+
}
|
|
217
|
+
});
|
|
233
218
|
}
|
|
234
219
|
async handleError(error) {
|
|
235
220
|
await (0, api_1.handleApiError)(error).catch(async (error) => {
|
|
@@ -15,11 +15,13 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
15
15
|
exports.Consumer = void 0;
|
|
16
16
|
const events_1 = __importDefault(require("events"));
|
|
17
17
|
const api_1 = require("../api");
|
|
18
|
-
const
|
|
18
|
+
const group_by_leader_id_1 = require("../distributors/group-by-leader-id");
|
|
19
|
+
const group_partitions_by_topic_1 = require("../distributors/group-partitions-by-topic");
|
|
19
20
|
const delay_1 = require("../utils/delay");
|
|
20
21
|
const error_1 = require("../utils/error");
|
|
21
22
|
const logger_1 = require("../utils/logger");
|
|
22
23
|
const retrier_1 = require("../utils/retrier");
|
|
24
|
+
const retry_1 = require("../utils/retry");
|
|
23
25
|
const tracer_1 = require("../utils/tracer");
|
|
24
26
|
const consumer_group_1 = require("./consumer-group");
|
|
25
27
|
const consumer_metadata_1 = require("./consumer-metadata");
|
|
@@ -100,7 +102,9 @@ class Consumer extends events_1.default {
|
|
|
100
102
|
await this.fetchManager?.stop();
|
|
101
103
|
});
|
|
102
104
|
}
|
|
103
|
-
await this.consumerGroup
|
|
105
|
+
await this.consumerGroup
|
|
106
|
+
?.leaveGroup()
|
|
107
|
+
.catch((error) => logger_1.log.debug('Failed to leave group', { reason: error.message }));
|
|
104
108
|
await this.cluster.disconnect().catch(() => { });
|
|
105
109
|
}
|
|
106
110
|
async startFetchManager() {
|
|
@@ -109,12 +113,11 @@ class Consumer extends events_1.default {
|
|
|
109
113
|
try {
|
|
110
114
|
await this.consumerGroup?.join();
|
|
111
115
|
// TODO: If leader is not available, find another read replica
|
|
112
|
-
const
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
])),
|
|
116
|
+
const topicPartitions = Object.entries(this.metadata.getAssignment()).flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })));
|
|
117
|
+
const topicPartitionsByLeaderId = (0, group_by_leader_id_1.groupByLeaderId)(topicPartitions, this.metadata.getTopicPartitionLeaderIds());
|
|
118
|
+
const nodeAssignments = Object.entries(topicPartitionsByLeaderId).map(([leaderId, topicPartitions]) => ({
|
|
119
|
+
nodeId: parseInt(leaderId),
|
|
120
|
+
assignment: (0, group_partitions_by_topic_1.groupPartitionsByTopic)(topicPartitions),
|
|
118
121
|
}));
|
|
119
122
|
this.fetchManager = new fetch_manager_1.FetchManager({
|
|
120
123
|
fetch: this.fetch.bind(this),
|
|
@@ -140,7 +143,7 @@ class Consumer extends events_1.default {
|
|
|
140
143
|
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_COORDINATOR) {
|
|
141
144
|
logger_1.log.debug('Not coordinator. Searching for new coordinator...');
|
|
142
145
|
await this.consumerGroup?.findCoordinator();
|
|
143
|
-
|
|
146
|
+
continue;
|
|
144
147
|
}
|
|
145
148
|
if (error instanceof error_1.ConnectionError) {
|
|
146
149
|
logger_1.log.debug(`${error.message}. Restarting consumer...`, { stack: error.stack });
|
|
@@ -210,10 +213,10 @@ class Consumer extends events_1.default {
|
|
|
210
213
|
this.offsetManager.flush(topicPartitions);
|
|
211
214
|
}
|
|
212
215
|
async fetch(nodeId, assignment) {
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
return
|
|
216
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
217
|
+
const { rackId, maxWaitMs, minBytes, maxBytes, partitionMaxBytes, isolationLevel } = this.options;
|
|
218
|
+
this.consumerGroup?.handleLastHeartbeat();
|
|
219
|
+
return this.cluster.sendRequestToNode(nodeId)(api_1.API.FETCH, {
|
|
217
220
|
maxWaitMs,
|
|
218
221
|
minBytes,
|
|
219
222
|
maxBytes,
|
|
@@ -234,31 +237,19 @@ class Consumer extends events_1.default {
|
|
|
234
237
|
forgottenTopicsData: [],
|
|
235
238
|
rackId,
|
|
236
239
|
});
|
|
237
|
-
}
|
|
238
|
-
catch (error) {
|
|
239
|
-
await this.handleError(error);
|
|
240
|
-
return this.fetch(nodeId, assignment);
|
|
241
|
-
}
|
|
240
|
+
});
|
|
242
241
|
}
|
|
243
242
|
async fetchMetadata() {
|
|
244
|
-
|
|
245
|
-
|
|
243
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
244
|
+
const { topics, allowTopicAutoCreation } = this.options;
|
|
246
245
|
await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
|
|
247
|
-
}
|
|
248
|
-
catch (error) {
|
|
249
|
-
await this.handleError(error);
|
|
250
|
-
return this.fetchMetadata();
|
|
251
|
-
}
|
|
246
|
+
});
|
|
252
247
|
}
|
|
253
248
|
async fetchOffsets() {
|
|
254
|
-
|
|
255
|
-
|
|
249
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
250
|
+
const { fromTimestamp } = this.options;
|
|
256
251
|
await this.offsetManager.fetchOffsets({ fromTimestamp });
|
|
257
|
-
}
|
|
258
|
-
catch (error) {
|
|
259
|
-
await this.handleError(error);
|
|
260
|
-
return this.fetchOffsets();
|
|
261
|
-
}
|
|
252
|
+
});
|
|
262
253
|
}
|
|
263
254
|
async handleError(error) {
|
|
264
255
|
await (0, api_1.handleApiError)(error).catch(async (error) => {
|
|
@@ -2,7 +2,8 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.OffsetManager = void 0;
|
|
4
4
|
const api_1 = require("../api");
|
|
5
|
-
const
|
|
5
|
+
const group_by_leader_id_1 = require("../distributors/group-by-leader-id");
|
|
6
|
+
const group_partitions_by_topic_1 = require("../distributors/group-partitions-by-topic");
|
|
6
7
|
const tracer_1 = require("../utils/tracer");
|
|
7
8
|
const trace = (0, tracer_1.createTracer)('OffsetManager');
|
|
8
9
|
class OffsetManager {
|
|
@@ -40,11 +41,11 @@ class OffsetManager {
|
|
|
40
41
|
async fetchOffsets(options) {
|
|
41
42
|
const { metadata } = this.options;
|
|
42
43
|
const topicPartitions = Object.entries(metadata.getAssignment()).flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })));
|
|
43
|
-
const
|
|
44
|
-
await Promise.all(Object.entries(
|
|
44
|
+
const topicPartitionsByLeaderId = (0, group_by_leader_id_1.groupByLeaderId)(topicPartitions, metadata.getTopicPartitionLeaderIds());
|
|
45
|
+
await Promise.all(Object.entries(topicPartitionsByLeaderId).map(([leaderId, topicPartitions]) => this.listOffsets({
|
|
45
46
|
...options,
|
|
46
|
-
nodeId: parseInt(
|
|
47
|
-
nodeAssignment:
|
|
47
|
+
nodeId: parseInt(leaderId),
|
|
48
|
+
nodeAssignment: (0, group_partitions_by_topic_1.groupPartitionsByTopic)(topicPartitions),
|
|
48
49
|
})));
|
|
49
50
|
}
|
|
50
51
|
async listOffsets({ nodeId, nodeAssignment, fromTimestamp, }) {
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.groupByLeaderId = void 0;
|
|
4
|
+
const groupByLeaderId = (items, leaderIdByTopicPartition) => {
|
|
5
|
+
const result = {};
|
|
6
|
+
items.forEach((item) => {
|
|
7
|
+
const leaderId = leaderIdByTopicPartition[item.topic][item.partition];
|
|
8
|
+
result[leaderId] ??= [];
|
|
9
|
+
result[leaderId].push(item);
|
|
10
|
+
});
|
|
11
|
+
return result;
|
|
12
|
+
};
|
|
13
|
+
exports.groupByLeaderId = groupByLeaderId;
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.groupPartitionsByTopic = void 0;
|
|
4
|
+
const groupPartitionsByTopic = (items) => {
|
|
5
|
+
const result = {};
|
|
6
|
+
items.forEach((item) => {
|
|
7
|
+
result[item.topic] ??= [];
|
|
8
|
+
result[item.topic].push(item.partition);
|
|
9
|
+
});
|
|
10
|
+
return result;
|
|
11
|
+
};
|
|
12
|
+
exports.groupPartitionsByTopic = groupPartitionsByTopic;
|
package/dist/metadata.d.ts
CHANGED
|
@@ -16,7 +16,7 @@ export declare class Metadata {
|
|
|
16
16
|
getTopicIdByName(name: string): string;
|
|
17
17
|
getTopicNameById(id: string): string;
|
|
18
18
|
fetchMetadataIfNecessary({ topics, allowTopicAutoCreation, }: {
|
|
19
|
-
topics: string[]
|
|
19
|
+
topics: string[];
|
|
20
20
|
allowTopicAutoCreation: boolean;
|
|
21
21
|
}): Promise<void>;
|
|
22
22
|
fetchMetadata({ topics, allowTopicAutoCreation, }: {
|
package/dist/metadata.js
CHANGED
|
@@ -41,7 +41,7 @@ class Metadata {
|
|
|
41
41
|
return this.topicNameById[id];
|
|
42
42
|
}
|
|
43
43
|
async fetchMetadataIfNecessary({ topics, allowTopicAutoCreation, }) {
|
|
44
|
-
const missingTopics =
|
|
44
|
+
const missingTopics = topics.filter((topic) => !this.topicPartitions[topic]);
|
|
45
45
|
if (!missingTopics.length) {
|
|
46
46
|
return;
|
|
47
47
|
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { Cluster } from '../cluster';
|
|
2
|
+
import { Message } from '../types';
|
|
3
|
+
import { ProducerState } from './producer-state';
|
|
4
|
+
type ProducerBufferOptions = {
|
|
5
|
+
nodeId: number;
|
|
6
|
+
maxBatchSize: number;
|
|
7
|
+
cluster: Cluster;
|
|
8
|
+
state: ProducerState;
|
|
9
|
+
};
|
|
10
|
+
export declare class ProducerBuffer {
|
|
11
|
+
private options;
|
|
12
|
+
private buffer;
|
|
13
|
+
private head;
|
|
14
|
+
private isFlushing;
|
|
15
|
+
constructor(options: ProducerBufferOptions);
|
|
16
|
+
enqueue(messages: Message[]): Promise<void>;
|
|
17
|
+
private flush;
|
|
18
|
+
private compactBuffer;
|
|
19
|
+
}
|
|
20
|
+
export {};
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ProducerBuffer = void 0;
|
|
4
|
+
const api_1 = require("../api");
|
|
5
|
+
class ProducerBuffer {
|
|
6
|
+
options;
|
|
7
|
+
buffer = [];
|
|
8
|
+
head = 0;
|
|
9
|
+
isFlushing = false;
|
|
10
|
+
constructor(options) {
|
|
11
|
+
this.options = options;
|
|
12
|
+
}
|
|
13
|
+
enqueue(messages) {
|
|
14
|
+
return new Promise((resolve, reject) => {
|
|
15
|
+
this.buffer.push({ messages, resolve, reject });
|
|
16
|
+
this.flush();
|
|
17
|
+
});
|
|
18
|
+
}
|
|
19
|
+
async flush() {
|
|
20
|
+
if (this.isFlushing)
|
|
21
|
+
return;
|
|
22
|
+
this.isFlushing = true;
|
|
23
|
+
const { cluster, state, nodeId, maxBatchSize } = this.options;
|
|
24
|
+
while (true) {
|
|
25
|
+
const batch = [];
|
|
26
|
+
const resolvers = [];
|
|
27
|
+
const rejecters = [];
|
|
28
|
+
while (this.head < this.buffer.length) {
|
|
29
|
+
const entry = this.buffer[this.head++];
|
|
30
|
+
batch.push(...entry.messages);
|
|
31
|
+
resolvers.push(entry.resolve);
|
|
32
|
+
rejecters.push(entry.reject);
|
|
33
|
+
const nextLength = this.buffer[this.head]?.messages.length ?? 0;
|
|
34
|
+
if (batch.length + nextLength > maxBatchSize) {
|
|
35
|
+
break;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
if (!batch.length)
|
|
39
|
+
break;
|
|
40
|
+
this.compactBuffer();
|
|
41
|
+
const topicPartitionMessages = {};
|
|
42
|
+
batch.forEach((message) => {
|
|
43
|
+
topicPartitionMessages[message.topic] ??= {};
|
|
44
|
+
topicPartitionMessages[message.topic][message.partition] ??= [];
|
|
45
|
+
topicPartitionMessages[message.topic][message.partition].push(message);
|
|
46
|
+
});
|
|
47
|
+
const defaultTimestamp = BigInt(Date.now());
|
|
48
|
+
const topicData = Object.entries(topicPartitionMessages).map(([topic, partitionMessages]) => ({
|
|
49
|
+
name: topic,
|
|
50
|
+
partitionData: Object.entries(partitionMessages).map(([partition, messages]) => {
|
|
51
|
+
const partitionIndex = parseInt(partition);
|
|
52
|
+
let baseTimestamp;
|
|
53
|
+
let maxTimestamp;
|
|
54
|
+
messages.forEach(({ timestamp = defaultTimestamp }) => {
|
|
55
|
+
if (!baseTimestamp || timestamp < baseTimestamp) {
|
|
56
|
+
baseTimestamp = timestamp;
|
|
57
|
+
}
|
|
58
|
+
if (!maxTimestamp || timestamp > maxTimestamp) {
|
|
59
|
+
maxTimestamp = timestamp;
|
|
60
|
+
}
|
|
61
|
+
});
|
|
62
|
+
return {
|
|
63
|
+
index: partitionIndex,
|
|
64
|
+
baseOffset: 0n,
|
|
65
|
+
partitionLeaderEpoch: -1,
|
|
66
|
+
attributes: 0,
|
|
67
|
+
lastOffsetDelta: messages.length - 1,
|
|
68
|
+
baseTimestamp: baseTimestamp ?? 0n,
|
|
69
|
+
maxTimestamp: maxTimestamp ?? 0n,
|
|
70
|
+
producerId: state.producerId,
|
|
71
|
+
producerEpoch: 0,
|
|
72
|
+
baseSequence: state.getSequence(topic, partitionIndex),
|
|
73
|
+
records: messages.map((message, index) => ({
|
|
74
|
+
attributes: 0,
|
|
75
|
+
timestampDelta: (message.timestamp ?? defaultTimestamp) - (baseTimestamp ?? 0n),
|
|
76
|
+
offsetDelta: index,
|
|
77
|
+
key: message.key ?? null,
|
|
78
|
+
value: message.value,
|
|
79
|
+
headers: Object.entries(message.headers ?? {}).map(([key, value]) => ({
|
|
80
|
+
key,
|
|
81
|
+
value,
|
|
82
|
+
})),
|
|
83
|
+
})),
|
|
84
|
+
};
|
|
85
|
+
}),
|
|
86
|
+
}));
|
|
87
|
+
try {
|
|
88
|
+
await cluster.sendRequestToNode(nodeId)(api_1.API.PRODUCE, {
|
|
89
|
+
transactionalId: null,
|
|
90
|
+
acks: -1,
|
|
91
|
+
timeoutMs: 30000,
|
|
92
|
+
topicData,
|
|
93
|
+
});
|
|
94
|
+
topicData.forEach(({ name, partitionData }) => {
|
|
95
|
+
partitionData.forEach(({ index, records }) => {
|
|
96
|
+
state.updateSequence(name, index, records.length);
|
|
97
|
+
});
|
|
98
|
+
});
|
|
99
|
+
resolvers.forEach((resolve) => resolve());
|
|
100
|
+
}
|
|
101
|
+
catch (error) {
|
|
102
|
+
rejecters.forEach((reject) => reject(error));
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
this.isFlushing = false;
|
|
106
|
+
}
|
|
107
|
+
compactBuffer() {
|
|
108
|
+
if (this.head >= this.buffer.length) {
|
|
109
|
+
this.buffer = [];
|
|
110
|
+
this.head = 0;
|
|
111
|
+
}
|
|
112
|
+
else if (this.head > 1000 && this.head > this.buffer.length / 2) {
|
|
113
|
+
this.buffer = this.buffer.slice(this.head);
|
|
114
|
+
this.head = 0;
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
exports.ProducerBuffer = ProducerBuffer;
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { Cluster } from "../cluster";
|
|
2
|
+
type ProducerStateOptions = {
|
|
3
|
+
cluster: Cluster;
|
|
4
|
+
};
|
|
5
|
+
export declare class ProducerState {
|
|
6
|
+
private options;
|
|
7
|
+
producerId: bigint;
|
|
8
|
+
private producerEpoch;
|
|
9
|
+
private sequences;
|
|
10
|
+
constructor(options: ProducerStateOptions);
|
|
11
|
+
initProducerId(): Promise<void>;
|
|
12
|
+
getSequence(topic: string, partition: number): number;
|
|
13
|
+
updateSequence(topic: string, partition: number, messagesCount: number): void;
|
|
14
|
+
}
|
|
15
|
+
export {};
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ProducerState = void 0;
|
|
4
|
+
const api_1 = require("../api");
|
|
5
|
+
class ProducerState {
|
|
6
|
+
options;
|
|
7
|
+
producerId = 0n;
|
|
8
|
+
producerEpoch = 0;
|
|
9
|
+
sequences = {};
|
|
10
|
+
constructor(options) {
|
|
11
|
+
this.options = options;
|
|
12
|
+
}
|
|
13
|
+
async initProducerId() {
|
|
14
|
+
const result = await this.options.cluster.sendRequest(api_1.API.INIT_PRODUCER_ID, {
|
|
15
|
+
transactionalId: null,
|
|
16
|
+
transactionTimeoutMs: 0,
|
|
17
|
+
producerId: this.producerId,
|
|
18
|
+
producerEpoch: this.producerEpoch,
|
|
19
|
+
});
|
|
20
|
+
this.producerId = result.producerId;
|
|
21
|
+
this.producerEpoch = result.producerEpoch;
|
|
22
|
+
this.sequences = {};
|
|
23
|
+
}
|
|
24
|
+
getSequence(topic, partition) {
|
|
25
|
+
return this.sequences[topic]?.[partition] ?? 0;
|
|
26
|
+
}
|
|
27
|
+
updateSequence(topic, partition, messagesCount) {
|
|
28
|
+
this.sequences[topic] ??= {};
|
|
29
|
+
this.sequences[topic][partition] ??= 0;
|
|
30
|
+
this.sequences[topic][partition] += messagesCount;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
exports.ProducerState = ProducerState;
|