kafka-ts 1.1.8 → 1.1.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/consumer/consumer-group.js +84 -99
- package/dist/consumer/consumer.js +16 -25
- package/dist/producer/producer.js +5 -12
- package/dist/utils/retry.d.ts +1 -0
- package/dist/utils/retry.js +19 -0
- package/package.json +1 -1
- package/dist/consumer/metadata.d.ts +0 -24
- package/dist/consumer/metadata.js +0 -64
- package/dist/examples/src/replicator.js +0 -34
- package/dist/examples/src/utils/json.js +0 -5
- package/dist/request-handler.d.ts +0 -16
- package/dist/request-handler.js +0 -67
- package/dist/request-handler.test.d.ts +0 -1
- package/dist/request-handler.test.js +0 -340
- package/dist/src/api/api-versions.js +0 -18
- package/dist/src/api/create-topics.js +0 -46
- package/dist/src/api/delete-topics.js +0 -26
- package/dist/src/api/fetch.js +0 -95
- package/dist/src/api/find-coordinator.js +0 -34
- package/dist/src/api/heartbeat.js +0 -22
- package/dist/src/api/index.js +0 -38
- package/dist/src/api/init-producer-id.js +0 -24
- package/dist/src/api/join-group.js +0 -48
- package/dist/src/api/leave-group.js +0 -30
- package/dist/src/api/list-offsets.js +0 -39
- package/dist/src/api/metadata.js +0 -47
- package/dist/src/api/offset-commit.js +0 -39
- package/dist/src/api/offset-fetch.js +0 -44
- package/dist/src/api/produce.js +0 -119
- package/dist/src/api/sync-group.js +0 -31
- package/dist/src/broker.js +0 -35
- package/dist/src/connection.js +0 -21
- package/dist/src/consumer/consumer-group.js +0 -131
- package/dist/src/consumer/consumer.js +0 -103
- package/dist/src/consumer/metadata.js +0 -52
- package/dist/src/consumer/offset-manager.js +0 -23
- package/dist/src/index.js +0 -19
- package/dist/src/producer/producer.js +0 -84
- package/dist/src/request-handler.js +0 -57
- package/dist/src/request-handler.test.js +0 -321
- package/dist/src/types.js +0 -2
- package/dist/src/utils/api.js +0 -5
- package/dist/src/utils/decoder.js +0 -161
- package/dist/src/utils/encoder.js +0 -137
- package/dist/src/utils/error.js +0 -10
- package/dist/utils/debug.d.ts +0 -2
- package/dist/utils/debug.js +0 -11
- package/dist/utils/memo.d.ts +0 -1
- package/dist/utils/memo.js +0 -16
- package/dist/utils/mutex.d.ts +0 -3
- package/dist/utils/mutex.js +0 -32
|
@@ -14,6 +14,7 @@ const api_1 = require("../api");
|
|
|
14
14
|
const find_coordinator_1 = require("../api/find-coordinator");
|
|
15
15
|
const error_1 = require("../utils/error");
|
|
16
16
|
const logger_1 = require("../utils/logger");
|
|
17
|
+
const retry_1 = require("../utils/retry");
|
|
17
18
|
const tracer_1 = require("../utils/tracer");
|
|
18
19
|
const trace = (0, tracer_1.createTracer)('ConsumerGroup');
|
|
19
20
|
class ConsumerGroup {
|
|
@@ -62,7 +63,7 @@ class ConsumerGroup {
|
|
|
62
63
|
}
|
|
63
64
|
}
|
|
64
65
|
async findCoordinator() {
|
|
65
|
-
|
|
66
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
66
67
|
const { coordinators } = await this.options.cluster.sendRequest(api_1.API.FIND_COORDINATOR, {
|
|
67
68
|
keyType: find_coordinator_1.KEY_TYPE.GROUP,
|
|
68
69
|
keys: [this.options.groupId],
|
|
@@ -70,15 +71,11 @@ class ConsumerGroup {
|
|
|
70
71
|
this.coordinatorId = coordinators[0].nodeId;
|
|
71
72
|
await this.options.cluster.setSeedBroker(this.coordinatorId);
|
|
72
73
|
this.heartbeatError = null;
|
|
73
|
-
}
|
|
74
|
-
catch (error) {
|
|
75
|
-
await this.handleError(error);
|
|
76
|
-
return this.findCoordinator();
|
|
77
|
-
}
|
|
74
|
+
});
|
|
78
75
|
}
|
|
79
76
|
async joinGroup() {
|
|
80
|
-
|
|
81
|
-
|
|
77
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
78
|
+
const { cluster, groupId, groupInstanceId, sessionTimeoutMs, rebalanceTimeoutMs, topics } = this.options;
|
|
82
79
|
const response = await cluster.sendRequest(api_1.API.JOIN_GROUP, {
|
|
83
80
|
groupId,
|
|
84
81
|
groupInstanceId,
|
|
@@ -93,28 +90,27 @@ class ConsumerGroup {
|
|
|
93
90
|
this.generationId = response.generationId;
|
|
94
91
|
this.leaderId = response.leader;
|
|
95
92
|
this.memberIds = response.members.map((member) => member.memberId);
|
|
96
|
-
}
|
|
97
|
-
catch (error) {
|
|
98
|
-
await this.handleError(error);
|
|
99
|
-
return this.joinGroup();
|
|
100
|
-
}
|
|
93
|
+
});
|
|
101
94
|
}
|
|
102
95
|
async syncGroup() {
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
96
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
97
|
+
const { cluster, metadata, groupId, groupInstanceId } = this.options;
|
|
98
|
+
let assignments = [];
|
|
99
|
+
if (this.memberId === this.leaderId) {
|
|
100
|
+
const memberAssignments = Object.entries(metadata.getTopicPartitions())
|
|
101
|
+
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
102
|
+
.reduce((acc, { topic, partition }, index) => {
|
|
103
|
+
const memberId = this.memberIds[index % this.memberIds.length];
|
|
104
|
+
acc[memberId] ??= {};
|
|
105
|
+
acc[memberId][topic] ??= [];
|
|
106
|
+
acc[memberId][topic].push(partition);
|
|
107
|
+
return acc;
|
|
108
|
+
}, {});
|
|
109
|
+
assignments = Object.entries(memberAssignments).map(([memberId, assignment]) => ({
|
|
110
|
+
memberId,
|
|
111
|
+
assignment,
|
|
112
|
+
}));
|
|
113
|
+
}
|
|
118
114
|
const response = await cluster.sendRequest(api_1.API.SYNC_GROUP, {
|
|
119
115
|
groupId,
|
|
120
116
|
groupInstanceId,
|
|
@@ -125,29 +121,25 @@ class ConsumerGroup {
|
|
|
125
121
|
assignments,
|
|
126
122
|
});
|
|
127
123
|
metadata.setAssignment(response.assignments);
|
|
128
|
-
}
|
|
129
|
-
catch (error) {
|
|
130
|
-
await this.handleError(error);
|
|
131
|
-
return this.syncGroup();
|
|
132
|
-
}
|
|
124
|
+
});
|
|
133
125
|
}
|
|
134
126
|
async offsetFetch() {
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
127
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
128
|
+
const { cluster, groupId, topics, metadata, offsetManager } = this.options;
|
|
129
|
+
const assignment = metadata.getAssignment();
|
|
130
|
+
const request = {
|
|
131
|
+
groups: [
|
|
132
|
+
{
|
|
133
|
+
groupId,
|
|
134
|
+
topics: topics
|
|
135
|
+
.map((topic) => ({ name: topic, partitionIndexes: assignment[topic] ?? [] }))
|
|
136
|
+
.filter(({ partitionIndexes }) => partitionIndexes.length),
|
|
137
|
+
},
|
|
138
|
+
].filter(({ topics }) => topics.length),
|
|
139
|
+
requireStable: true,
|
|
140
|
+
};
|
|
141
|
+
if (!request.groups.length)
|
|
142
|
+
return;
|
|
151
143
|
const response = await cluster.sendRequest(api_1.API.OFFSET_FETCH, request);
|
|
152
144
|
const topicPartitions = {};
|
|
153
145
|
response.groups.forEach((group) => {
|
|
@@ -162,44 +154,36 @@ class ConsumerGroup {
|
|
|
162
154
|
});
|
|
163
155
|
});
|
|
164
156
|
offsetManager.flush(topicPartitions);
|
|
165
|
-
}
|
|
166
|
-
catch (error) {
|
|
167
|
-
await this.handleError(error);
|
|
168
|
-
return this.offsetFetch();
|
|
169
|
-
}
|
|
157
|
+
});
|
|
170
158
|
}
|
|
171
159
|
async offsetCommit(topicPartitions) {
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
.
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
160
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
161
|
+
const { cluster, groupId, groupInstanceId, offsetManager, consumer } = this.options;
|
|
162
|
+
const request = {
|
|
163
|
+
groupId,
|
|
164
|
+
groupInstanceId,
|
|
165
|
+
memberId: this.memberId,
|
|
166
|
+
generationIdOrMemberEpoch: this.generationId,
|
|
167
|
+
topics: Object.entries(topicPartitions)
|
|
168
|
+
.filter(([topic]) => topic in offsetManager.pendingOffsets)
|
|
169
|
+
.map(([topic, partitions]) => ({
|
|
170
|
+
name: topic,
|
|
171
|
+
partitions: [...partitions]
|
|
172
|
+
.filter((partition) => partition in offsetManager.pendingOffsets[topic])
|
|
173
|
+
.map((partitionIndex) => ({
|
|
174
|
+
partitionIndex,
|
|
175
|
+
committedOffset: offsetManager.pendingOffsets[topic][partitionIndex],
|
|
176
|
+
committedLeaderEpoch: -1,
|
|
177
|
+
committedMetadata: null,
|
|
178
|
+
})),
|
|
189
179
|
})),
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
}
|
|
195
|
-
try {
|
|
180
|
+
};
|
|
181
|
+
if (!request.topics.length) {
|
|
182
|
+
return;
|
|
183
|
+
}
|
|
196
184
|
await cluster.sendRequest(api_1.API.OFFSET_COMMIT, request);
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
await this.handleError(error);
|
|
200
|
-
return this.offsetCommit(topicPartitions);
|
|
201
|
-
}
|
|
202
|
-
consumer.emit('offsetCommit');
|
|
185
|
+
consumer.emit('offsetCommit');
|
|
186
|
+
});
|
|
203
187
|
}
|
|
204
188
|
async heartbeat() {
|
|
205
189
|
const { cluster, groupId, groupInstanceId, consumer } = this.options;
|
|
@@ -212,24 +196,25 @@ class ConsumerGroup {
|
|
|
212
196
|
consumer.emit('heartbeat');
|
|
213
197
|
}
|
|
214
198
|
async leaveGroup() {
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
}
|
|
218
|
-
const { cluster, groupId, groupInstanceId } = this.options;
|
|
219
|
-
this.stopHeartbeater();
|
|
220
|
-
try {
|
|
221
|
-
await cluster.sendRequest(api_1.API.LEAVE_GROUP, {
|
|
222
|
-
groupId,
|
|
223
|
-
members: [{ memberId: this.memberId, groupInstanceId, reason: null }],
|
|
224
|
-
});
|
|
225
|
-
}
|
|
226
|
-
catch (error) {
|
|
227
|
-
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
199
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
200
|
+
if (this.coordinatorId === -1) {
|
|
228
201
|
return;
|
|
229
202
|
}
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
203
|
+
const { cluster, groupId, groupInstanceId } = this.options;
|
|
204
|
+
this.stopHeartbeater();
|
|
205
|
+
try {
|
|
206
|
+
await cluster.sendRequest(api_1.API.LEAVE_GROUP, {
|
|
207
|
+
groupId,
|
|
208
|
+
members: [{ memberId: this.memberId, groupInstanceId, reason: null }],
|
|
209
|
+
});
|
|
210
|
+
}
|
|
211
|
+
catch (error) {
|
|
212
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
213
|
+
return;
|
|
214
|
+
}
|
|
215
|
+
throw error;
|
|
216
|
+
}
|
|
217
|
+
});
|
|
233
218
|
}
|
|
234
219
|
async handleError(error) {
|
|
235
220
|
await (0, api_1.handleApiError)(error).catch(async (error) => {
|
|
@@ -20,6 +20,7 @@ const delay_1 = require("../utils/delay");
|
|
|
20
20
|
const error_1 = require("../utils/error");
|
|
21
21
|
const logger_1 = require("../utils/logger");
|
|
22
22
|
const retrier_1 = require("../utils/retrier");
|
|
23
|
+
const retry_1 = require("../utils/retry");
|
|
23
24
|
const tracer_1 = require("../utils/tracer");
|
|
24
25
|
const consumer_group_1 = require("./consumer-group");
|
|
25
26
|
const consumer_metadata_1 = require("./consumer-metadata");
|
|
@@ -100,7 +101,9 @@ class Consumer extends events_1.default {
|
|
|
100
101
|
await this.fetchManager?.stop();
|
|
101
102
|
});
|
|
102
103
|
}
|
|
103
|
-
await this.consumerGroup
|
|
104
|
+
await this.consumerGroup
|
|
105
|
+
?.leaveGroup()
|
|
106
|
+
.catch((error) => logger_1.log.debug('Failed to leave group', { reason: error.message }));
|
|
104
107
|
await this.cluster.disconnect().catch(() => { });
|
|
105
108
|
}
|
|
106
109
|
async startFetchManager() {
|
|
@@ -140,7 +143,7 @@ class Consumer extends events_1.default {
|
|
|
140
143
|
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_COORDINATOR) {
|
|
141
144
|
logger_1.log.debug('Not coordinator. Searching for new coordinator...');
|
|
142
145
|
await this.consumerGroup?.findCoordinator();
|
|
143
|
-
|
|
146
|
+
continue;
|
|
144
147
|
}
|
|
145
148
|
if (error instanceof error_1.ConnectionError) {
|
|
146
149
|
logger_1.log.debug(`${error.message}. Restarting consumer...`, { stack: error.stack });
|
|
@@ -210,10 +213,10 @@ class Consumer extends events_1.default {
|
|
|
210
213
|
this.offsetManager.flush(topicPartitions);
|
|
211
214
|
}
|
|
212
215
|
async fetch(nodeId, assignment) {
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
return
|
|
216
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
217
|
+
const { rackId, maxWaitMs, minBytes, maxBytes, partitionMaxBytes, isolationLevel } = this.options;
|
|
218
|
+
this.consumerGroup?.handleLastHeartbeat();
|
|
219
|
+
return this.cluster.sendRequestToNode(nodeId)(api_1.API.FETCH, {
|
|
217
220
|
maxWaitMs,
|
|
218
221
|
minBytes,
|
|
219
222
|
maxBytes,
|
|
@@ -234,31 +237,19 @@ class Consumer extends events_1.default {
|
|
|
234
237
|
forgottenTopicsData: [],
|
|
235
238
|
rackId,
|
|
236
239
|
});
|
|
237
|
-
}
|
|
238
|
-
catch (error) {
|
|
239
|
-
await this.handleError(error);
|
|
240
|
-
return this.fetch(nodeId, assignment);
|
|
241
|
-
}
|
|
240
|
+
});
|
|
242
241
|
}
|
|
243
242
|
async fetchMetadata() {
|
|
244
|
-
|
|
245
|
-
|
|
243
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
244
|
+
const { topics, allowTopicAutoCreation } = this.options;
|
|
246
245
|
await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
|
|
247
|
-
}
|
|
248
|
-
catch (error) {
|
|
249
|
-
await this.handleError(error);
|
|
250
|
-
return this.fetchMetadata();
|
|
251
|
-
}
|
|
246
|
+
});
|
|
252
247
|
}
|
|
253
248
|
async fetchOffsets() {
|
|
254
|
-
|
|
255
|
-
|
|
249
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
250
|
+
const { fromTimestamp } = this.options;
|
|
256
251
|
await this.offsetManager.fetchOffsets({ fromTimestamp });
|
|
257
|
-
}
|
|
258
|
-
catch (error) {
|
|
259
|
-
await this.handleError(error);
|
|
260
|
-
return this.fetchOffsets();
|
|
261
|
-
}
|
|
252
|
+
});
|
|
262
253
|
}
|
|
263
254
|
async handleError(error) {
|
|
264
255
|
await (0, api_1.handleApiError)(error).catch(async (error) => {
|
|
@@ -17,6 +17,7 @@ const metadata_1 = require("../metadata");
|
|
|
17
17
|
const error_1 = require("../utils/error");
|
|
18
18
|
const lock_1 = require("../utils/lock");
|
|
19
19
|
const logger_1 = require("../utils/logger");
|
|
20
|
+
const retry_1 = require("../utils/retry");
|
|
20
21
|
const shared_1 = require("../utils/shared");
|
|
21
22
|
const tracer_1 = require("../utils/tracer");
|
|
22
23
|
const trace = (0, tracer_1.createTracer)('Producer');
|
|
@@ -124,7 +125,7 @@ class Producer {
|
|
|
124
125
|
}
|
|
125
126
|
});
|
|
126
127
|
async initProducerId() {
|
|
127
|
-
|
|
128
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
128
129
|
const result = await this.cluster.sendRequest(api_1.API.INIT_PRODUCER_ID, {
|
|
129
130
|
transactionalId: null,
|
|
130
131
|
transactionTimeoutMs: 0,
|
|
@@ -134,11 +135,7 @@ class Producer {
|
|
|
134
135
|
this.producerId = result.producerId;
|
|
135
136
|
this.producerEpoch = result.producerEpoch;
|
|
136
137
|
this.sequences = {};
|
|
137
|
-
}
|
|
138
|
-
catch (error) {
|
|
139
|
-
await this.handleError(error);
|
|
140
|
-
return this.initProducerId();
|
|
141
|
-
}
|
|
138
|
+
});
|
|
142
139
|
}
|
|
143
140
|
getSequence(topic, partition) {
|
|
144
141
|
return this.sequences[topic]?.[partition] ?? 0;
|
|
@@ -149,13 +146,9 @@ class Producer {
|
|
|
149
146
|
this.sequences[topic][partition] += messagesCount;
|
|
150
147
|
}
|
|
151
148
|
async fetchMetadata(topics, allowTopicAutoCreation) {
|
|
152
|
-
|
|
149
|
+
return (0, retry_1.withRetry)(this.handleError.bind(this))(async () => {
|
|
153
150
|
await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
|
|
154
|
-
}
|
|
155
|
-
catch (error) {
|
|
156
|
-
await this.handleError(error);
|
|
157
|
-
return this.fetchMetadata(topics, allowTopicAutoCreation);
|
|
158
|
-
}
|
|
151
|
+
});
|
|
159
152
|
}
|
|
160
153
|
async handleError(error) {
|
|
161
154
|
await (0, api_1.handleApiError)(error).catch(async (error) => {
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare const withRetry: (handleError: (error: unknown) => Promise<void>) => <T>(func: () => Promise<T>) => Promise<T>;
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.withRetry = void 0;
|
|
4
|
+
const logger_1 = require("./logger");
|
|
5
|
+
const withRetry = (handleError) => async (func) => {
|
|
6
|
+
let lastError;
|
|
7
|
+
for (let i = 0; i < 15; i++) {
|
|
8
|
+
try {
|
|
9
|
+
return await func();
|
|
10
|
+
}
|
|
11
|
+
catch (error) {
|
|
12
|
+
await handleError(error);
|
|
13
|
+
lastError = error;
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
logger_1.log.warn('Retries exhausted', { lastError });
|
|
17
|
+
throw lastError;
|
|
18
|
+
};
|
|
19
|
+
exports.withRetry = withRetry;
|
package/package.json
CHANGED
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
import { IsolationLevel } from "../api/fetch";
|
|
2
|
-
import { Assignment } from "../api/sync-group";
|
|
3
|
-
import { Cluster } from "../cluster";
|
|
4
|
-
import { OffsetManager } from "./offset-manager";
|
|
5
|
-
export type Metadata = ReturnType<typeof createMetadata>;
|
|
6
|
-
type MetadataOptions = {
|
|
7
|
-
cluster: Cluster;
|
|
8
|
-
topics?: string[];
|
|
9
|
-
isolationLevel?: IsolationLevel;
|
|
10
|
-
allowTopicAutoCreation?: boolean;
|
|
11
|
-
fromBeginning?: boolean;
|
|
12
|
-
offsetManager?: OffsetManager;
|
|
13
|
-
};
|
|
14
|
-
export declare const createMetadata: ({ cluster, topics, isolationLevel, allowTopicAutoCreation, fromBeginning, offsetManager, }: MetadataOptions) => {
|
|
15
|
-
init: () => Promise<void>;
|
|
16
|
-
getTopicPartitions: () => Record<string, number[]>;
|
|
17
|
-
getTopicIdByName: (name: string) => string;
|
|
18
|
-
getTopicNameById: (id: string) => string;
|
|
19
|
-
getAssignment: () => Assignment;
|
|
20
|
-
setAssignment: (newAssignment: Assignment) => void;
|
|
21
|
-
getLeaderIdByTopicPartition: (topic: string, partition: number) => number;
|
|
22
|
-
getIsrNodeIdsByTopicPartition: (topic: string, partition: number) => number[];
|
|
23
|
-
};
|
|
24
|
-
export {};
|
|
@@ -1,64 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.createMetadata = void 0;
|
|
4
|
-
const api_1 = require("../api");
|
|
5
|
-
const createMetadata = ({ cluster, topics, isolationLevel = 0 /* IsolationLevel.READ_UNCOMMITTED */, allowTopicAutoCreation = true, fromBeginning = false, offsetManager, }) => {
|
|
6
|
-
let topicPartitions = {};
|
|
7
|
-
let topicNameById = {};
|
|
8
|
-
let topicIdByName = {};
|
|
9
|
-
let leaderIdByTopicPartition = {};
|
|
10
|
-
let isrNodesByTopicPartition;
|
|
11
|
-
let assignment = {};
|
|
12
|
-
const fetchMetadata = async () => {
|
|
13
|
-
const response = await cluster.sendRequest(api_1.API.METADATA, {
|
|
14
|
-
allowTopicAutoCreation,
|
|
15
|
-
includeTopicAuthorizedOperations: false,
|
|
16
|
-
topics: topics?.map((name) => ({ id: null, name })) ?? null,
|
|
17
|
-
});
|
|
18
|
-
topicPartitions = Object.fromEntries(response.topics.map((topic) => [topic.name, topic.partitions.map((partition) => partition.partitionIndex)]));
|
|
19
|
-
topicNameById = Object.fromEntries(response.topics.map((topic) => [topic.topicId, topic.name]));
|
|
20
|
-
topicIdByName = Object.fromEntries(response.topics.map((topic) => [topic.name, topic.topicId]));
|
|
21
|
-
leaderIdByTopicPartition = Object.fromEntries(response.topics.map((topic) => [
|
|
22
|
-
topic.name,
|
|
23
|
-
Object.fromEntries(topic.partitions.map((partition) => [partition.partitionIndex, partition.leaderId])),
|
|
24
|
-
]));
|
|
25
|
-
isrNodesByTopicPartition = Object.fromEntries(response.topics.map((topic) => [
|
|
26
|
-
topic.name,
|
|
27
|
-
Object.fromEntries(topic.partitions.map((partition) => [partition.partitionIndex, partition.isrNodes])),
|
|
28
|
-
]));
|
|
29
|
-
assignment = topicPartitions;
|
|
30
|
-
};
|
|
31
|
-
const listOffsets = async () => {
|
|
32
|
-
const offsets = await cluster.sendRequest(api_1.API.LIST_OFFSETS, {
|
|
33
|
-
replicaId: -1,
|
|
34
|
-
isolationLevel,
|
|
35
|
-
topics: Object.entries(assignment)
|
|
36
|
-
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
37
|
-
.map(({ topic, partition }) => ({
|
|
38
|
-
name: topic,
|
|
39
|
-
partitions: [{ partitionIndex: partition, currentLeaderEpoch: -1, timestamp: -1n }],
|
|
40
|
-
})),
|
|
41
|
-
});
|
|
42
|
-
offsets.topics.forEach(({ name, partitions }) => {
|
|
43
|
-
partitions.forEach(({ partitionIndex, offset }) => {
|
|
44
|
-
offsetManager?.resolve(name, partitionIndex, fromBeginning ? 0n : offset);
|
|
45
|
-
});
|
|
46
|
-
});
|
|
47
|
-
};
|
|
48
|
-
return {
|
|
49
|
-
init: async () => {
|
|
50
|
-
await fetchMetadata();
|
|
51
|
-
await listOffsets();
|
|
52
|
-
},
|
|
53
|
-
getTopicPartitions: () => topicPartitions,
|
|
54
|
-
getTopicIdByName: (name) => topicIdByName[name],
|
|
55
|
-
getTopicNameById: (id) => topicNameById[id],
|
|
56
|
-
getAssignment: () => assignment,
|
|
57
|
-
setAssignment: (newAssignment) => {
|
|
58
|
-
assignment = newAssignment;
|
|
59
|
-
},
|
|
60
|
-
getLeaderIdByTopicPartition: (topic, partition) => leaderIdByTopicPartition[topic][partition],
|
|
61
|
-
getIsrNodeIdsByTopicPartition: (topic, partition) => isrNodesByTopicPartition[topic][partition],
|
|
62
|
-
};
|
|
63
|
-
};
|
|
64
|
-
exports.createMetadata = createMetadata;
|
|
@@ -1,34 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const kafkats_1 = require("kafkats");
|
|
4
|
-
const json_1 = require("./utils/json");
|
|
5
|
-
(async () => {
|
|
6
|
-
const brokers = [{ host: "localhost", port: 9092 }];
|
|
7
|
-
const topic = "playground-topic";
|
|
8
|
-
// const producer = createProducer({ brokers });
|
|
9
|
-
// const producerInterval = setInterval(async () => {
|
|
10
|
-
// await producer.send([
|
|
11
|
-
// {
|
|
12
|
-
// topic,
|
|
13
|
-
// partition: 0,
|
|
14
|
-
// offset: 1n,
|
|
15
|
-
// timestamp: BigInt(Date.now()),
|
|
16
|
-
// key: null,
|
|
17
|
-
// value: `PING ${Math.random()}`,
|
|
18
|
-
// headers: { timestamp: Date.now().toString() }
|
|
19
|
-
// }
|
|
20
|
-
// ])
|
|
21
|
-
// }, 5000);
|
|
22
|
-
const consumer = await (0, kafkats_1.startConsumer)({
|
|
23
|
-
topics: [topic],
|
|
24
|
-
brokers,
|
|
25
|
-
onBatch: (messages) => {
|
|
26
|
-
console.log(JSON.stringify(messages, json_1.serializer, 2));
|
|
27
|
-
},
|
|
28
|
-
});
|
|
29
|
-
process.on("SIGINT", async () => {
|
|
30
|
-
await consumer.close();
|
|
31
|
-
// clearInterval(producerInterval);
|
|
32
|
-
// await producer.close();
|
|
33
|
-
});
|
|
34
|
-
})();
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
import { Connection } from "./connection";
|
|
2
|
-
import { Api } from "./utils/api";
|
|
3
|
-
type RequestHandlerOptions = {
|
|
4
|
-
clientId: string | null;
|
|
5
|
-
};
|
|
6
|
-
export declare class RequestHandler {
|
|
7
|
-
private connection;
|
|
8
|
-
private options;
|
|
9
|
-
private queue;
|
|
10
|
-
private currentBuffer;
|
|
11
|
-
constructor(connection: Connection, options: RequestHandlerOptions);
|
|
12
|
-
private handleData;
|
|
13
|
-
sendRequest<Request, Response>(api: Api<Request, Response>, args: Request): Promise<Response>;
|
|
14
|
-
}
|
|
15
|
-
export type SendRequest = typeof RequestHandler.prototype.sendRequest;
|
|
16
|
-
export {};
|
package/dist/request-handler.js
DELETED
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
5
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
exports.RequestHandler = void 0;
|
|
7
|
-
const node_assert_1 = __importDefault(require("node:assert"));
|
|
8
|
-
const decoder_1 = require("./utils/decoder");
|
|
9
|
-
const encoder_1 = require("./utils/encoder");
|
|
10
|
-
class RequestHandler {
|
|
11
|
-
connection;
|
|
12
|
-
options;
|
|
13
|
-
queue = {};
|
|
14
|
-
currentBuffer = null;
|
|
15
|
-
constructor(connection, options) {
|
|
16
|
-
this.connection = connection;
|
|
17
|
-
this.options = options;
|
|
18
|
-
this.connection.on("data", this.handleData);
|
|
19
|
-
}
|
|
20
|
-
handleData(buffer) {
|
|
21
|
-
this.currentBuffer = this.currentBuffer ? Buffer.concat([this.currentBuffer, buffer]) : buffer;
|
|
22
|
-
if (this.currentBuffer.length < 4) {
|
|
23
|
-
return;
|
|
24
|
-
}
|
|
25
|
-
const decoder = (0, decoder_1.createDecoder)({ buffer: this.currentBuffer });
|
|
26
|
-
const size = decoder.readInt32();
|
|
27
|
-
if (size > decoder.buffer.length) {
|
|
28
|
-
return;
|
|
29
|
-
}
|
|
30
|
-
const correlationId = decoder.readInt32();
|
|
31
|
-
const request = this.queue[correlationId];
|
|
32
|
-
delete this.queue[correlationId];
|
|
33
|
-
request.callback(decoder);
|
|
34
|
-
// debug(handleData.name, 'Response offsets', {
|
|
35
|
-
// offset: decoder.offset,
|
|
36
|
-
// length: decoder.buffer.length,
|
|
37
|
-
// rest: decoder.buffer.subarray(decoder.offset, decoder.buffer.length)?.toString(),
|
|
38
|
-
// });
|
|
39
|
-
(0, node_assert_1.default)(decoder.offset - 4 === size, `Buffer not correctly consumed: ${decoder.offset - 4} !== ${buffer.length}`);
|
|
40
|
-
this.currentBuffer = null;
|
|
41
|
-
}
|
|
42
|
-
async sendRequest(api, args) {
|
|
43
|
-
const correlationId = Math.floor(Math.random() * 1000000);
|
|
44
|
-
const encoder = (0, encoder_1.createEncoder)()
|
|
45
|
-
.writeInt16(api.apiKey)
|
|
46
|
-
.writeInt16(api.apiVersion)
|
|
47
|
-
.writeInt32(correlationId)
|
|
48
|
-
.writeString(this.options.clientId);
|
|
49
|
-
const request = api.request(encoder, args).value();
|
|
50
|
-
const buffer = (0, encoder_1.createEncoder)().writeInt32(request.length).write(request).value();
|
|
51
|
-
return new Promise(async (resolve, reject) => {
|
|
52
|
-
await this.connection.write(buffer);
|
|
53
|
-
this.queue[correlationId] = {
|
|
54
|
-
callback: (decoder) => {
|
|
55
|
-
try {
|
|
56
|
-
const response = api.response(decoder);
|
|
57
|
-
resolve(response);
|
|
58
|
-
}
|
|
59
|
-
catch (error) {
|
|
60
|
-
reject(error);
|
|
61
|
-
}
|
|
62
|
-
},
|
|
63
|
-
};
|
|
64
|
-
});
|
|
65
|
-
}
|
|
66
|
-
}
|
|
67
|
-
exports.RequestHandler = RequestHandler;
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export declare const kafka: import("./client").Client;
|