kafka-ts 1.1.6 → 1.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/index.d.ts +122 -121
- package/dist/api/index.js +24 -1
- package/dist/broker.d.ts +0 -1
- package/dist/broker.js +5 -8
- package/dist/cluster.d.ts +2 -2
- package/dist/cluster.js +48 -23
- package/dist/connection.js +9 -13
- package/dist/consumer/consumer-group.d.ts +2 -1
- package/dist/consumer/consumer-group.js +75 -36
- package/dist/consumer/consumer.d.ts +3 -0
- package/dist/consumer/consumer.js +47 -13
- package/dist/consumer/metadata.d.ts +24 -0
- package/dist/consumer/metadata.js +64 -0
- package/dist/examples/src/replicator.js +34 -0
- package/dist/examples/src/utils/json.js +5 -0
- package/dist/producer/producer.d.ts +3 -1
- package/dist/producer/producer.js +85 -86
- package/dist/request-handler.d.ts +16 -0
- package/dist/request-handler.js +67 -0
- package/dist/request-handler.test.d.ts +1 -0
- package/dist/request-handler.test.js +340 -0
- package/dist/src/api/api-versions.js +18 -0
- package/dist/src/api/create-topics.js +46 -0
- package/dist/src/api/delete-topics.js +26 -0
- package/dist/src/api/fetch.js +95 -0
- package/dist/src/api/find-coordinator.js +34 -0
- package/dist/src/api/heartbeat.js +22 -0
- package/dist/src/api/index.js +38 -0
- package/dist/src/api/init-producer-id.js +24 -0
- package/dist/src/api/join-group.js +48 -0
- package/dist/src/api/leave-group.js +30 -0
- package/dist/src/api/list-offsets.js +39 -0
- package/dist/src/api/metadata.js +47 -0
- package/dist/src/api/offset-commit.js +39 -0
- package/dist/src/api/offset-fetch.js +44 -0
- package/dist/src/api/produce.js +119 -0
- package/dist/src/api/sync-group.js +31 -0
- package/dist/src/broker.js +35 -0
- package/dist/src/connection.js +21 -0
- package/dist/src/consumer/consumer-group.js +131 -0
- package/dist/src/consumer/consumer.js +103 -0
- package/dist/src/consumer/metadata.js +52 -0
- package/dist/src/consumer/offset-manager.js +23 -0
- package/dist/src/index.js +19 -0
- package/dist/src/producer/producer.js +84 -0
- package/dist/src/request-handler.js +57 -0
- package/dist/src/request-handler.test.js +321 -0
- package/dist/src/types.js +2 -0
- package/dist/src/utils/api.js +5 -0
- package/dist/src/utils/decoder.js +161 -0
- package/dist/src/utils/encoder.js +137 -0
- package/dist/src/utils/error.js +10 -0
- package/dist/utils/debug.d.ts +2 -0
- package/dist/utils/debug.js +11 -0
- package/dist/utils/error.d.ts +1 -4
- package/dist/utils/error.js +5 -9
- package/dist/utils/logger.d.ts +7 -0
- package/dist/utils/logger.js +20 -5
- package/dist/utils/memo.d.ts +1 -0
- package/dist/utils/memo.js +16 -0
- package/dist/utils/mutex.d.ts +3 -0
- package/dist/utils/mutex.js +32 -0
- package/package.json +1 -1
|
@@ -12,6 +12,8 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
12
12
|
exports.ConsumerGroup = void 0;
|
|
13
13
|
const api_1 = require("../api");
|
|
14
14
|
const find_coordinator_1 = require("../api/find-coordinator");
|
|
15
|
+
const error_1 = require("../utils/error");
|
|
16
|
+
const logger_1 = require("../utils/logger");
|
|
15
17
|
const tracer_1 = require("../utils/tracer");
|
|
16
18
|
const trace = (0, tracer_1.createTracer)('ConsumerGroup');
|
|
17
19
|
class ConsumerGroup {
|
|
@@ -28,7 +30,6 @@ class ConsumerGroup {
|
|
|
28
30
|
}
|
|
29
31
|
async init() {
|
|
30
32
|
await this.findCoordinator();
|
|
31
|
-
await this.options.cluster.setSeedBroker(this.coordinatorId);
|
|
32
33
|
this.memberId = '';
|
|
33
34
|
}
|
|
34
35
|
async join() {
|
|
@@ -61,11 +62,19 @@ class ConsumerGroup {
|
|
|
61
62
|
}
|
|
62
63
|
}
|
|
63
64
|
async findCoordinator() {
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
65
|
+
try {
|
|
66
|
+
const { coordinators } = await this.options.cluster.sendRequest(api_1.API.FIND_COORDINATOR, {
|
|
67
|
+
keyType: find_coordinator_1.KEY_TYPE.GROUP,
|
|
68
|
+
keys: [this.options.groupId],
|
|
69
|
+
});
|
|
70
|
+
this.coordinatorId = coordinators[0].nodeId;
|
|
71
|
+
await this.options.cluster.setSeedBroker(this.coordinatorId);
|
|
72
|
+
this.heartbeatError = null;
|
|
73
|
+
}
|
|
74
|
+
catch (error) {
|
|
75
|
+
await this.handleError(error);
|
|
76
|
+
return this.findCoordinator();
|
|
77
|
+
}
|
|
69
78
|
}
|
|
70
79
|
async joinGroup() {
|
|
71
80
|
const { cluster, groupId, groupInstanceId, sessionTimeoutMs, rebalanceTimeoutMs, topics } = this.options;
|
|
@@ -86,11 +95,8 @@ class ConsumerGroup {
|
|
|
86
95
|
this.memberIds = response.members.map((member) => member.memberId);
|
|
87
96
|
}
|
|
88
97
|
catch (error) {
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
return this.joinGroup();
|
|
92
|
-
}
|
|
93
|
-
throw error;
|
|
98
|
+
await this.handleError(error);
|
|
99
|
+
return this.joinGroup();
|
|
94
100
|
}
|
|
95
101
|
}
|
|
96
102
|
async syncGroup() {
|
|
@@ -108,16 +114,22 @@ class ConsumerGroup {
|
|
|
108
114
|
}, {});
|
|
109
115
|
assignments = Object.entries(memberAssignments).map(([memberId, assignment]) => ({ memberId, assignment }));
|
|
110
116
|
}
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
117
|
+
try {
|
|
118
|
+
const response = await cluster.sendRequest(api_1.API.SYNC_GROUP, {
|
|
119
|
+
groupId,
|
|
120
|
+
groupInstanceId,
|
|
121
|
+
memberId: this.memberId,
|
|
122
|
+
generationId: this.generationId,
|
|
123
|
+
protocolType: 'consumer',
|
|
124
|
+
protocolName: 'RoundRobinAssigner',
|
|
125
|
+
assignments,
|
|
126
|
+
});
|
|
127
|
+
metadata.setAssignment(response.assignments);
|
|
128
|
+
}
|
|
129
|
+
catch (error) {
|
|
130
|
+
await this.handleError(error);
|
|
131
|
+
return this.syncGroup();
|
|
132
|
+
}
|
|
121
133
|
}
|
|
122
134
|
async offsetFetch() {
|
|
123
135
|
const { cluster, groupId, topics, metadata, offsetManager } = this.options;
|
|
@@ -135,20 +147,26 @@ class ConsumerGroup {
|
|
|
135
147
|
};
|
|
136
148
|
if (!request.groups.length)
|
|
137
149
|
return;
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
150
|
+
try {
|
|
151
|
+
const response = await cluster.sendRequest(api_1.API.OFFSET_FETCH, request);
|
|
152
|
+
const topicPartitions = {};
|
|
153
|
+
response.groups.forEach((group) => {
|
|
154
|
+
group.topics.forEach((topic) => {
|
|
155
|
+
topicPartitions[topic.name] ??= new Set();
|
|
156
|
+
topic.partitions.forEach(({ partitionIndex, committedOffset }) => {
|
|
157
|
+
if (committedOffset >= 0) {
|
|
158
|
+
topicPartitions[topic.name].add(partitionIndex);
|
|
159
|
+
offsetManager.resolve(topic.name, partitionIndex, committedOffset);
|
|
160
|
+
}
|
|
161
|
+
});
|
|
148
162
|
});
|
|
149
163
|
});
|
|
150
|
-
|
|
151
|
-
|
|
164
|
+
offsetManager.flush(topicPartitions);
|
|
165
|
+
}
|
|
166
|
+
catch (error) {
|
|
167
|
+
await this.handleError(error);
|
|
168
|
+
return this.offsetFetch();
|
|
169
|
+
}
|
|
152
170
|
}
|
|
153
171
|
async offsetCommit(topicPartitions) {
|
|
154
172
|
const { cluster, groupId, groupInstanceId, offsetManager, consumer } = this.options;
|
|
@@ -174,7 +192,13 @@ class ConsumerGroup {
|
|
|
174
192
|
if (!request.topics.length) {
|
|
175
193
|
return;
|
|
176
194
|
}
|
|
177
|
-
|
|
195
|
+
try {
|
|
196
|
+
await cluster.sendRequest(api_1.API.OFFSET_COMMIT, request);
|
|
197
|
+
}
|
|
198
|
+
catch (error) {
|
|
199
|
+
await this.handleError(error);
|
|
200
|
+
return this.offsetCommit(topicPartitions);
|
|
201
|
+
}
|
|
178
202
|
consumer.emit('offsetCommit');
|
|
179
203
|
}
|
|
180
204
|
async heartbeat() {
|
|
@@ -200,12 +224,27 @@ class ConsumerGroup {
|
|
|
200
224
|
});
|
|
201
225
|
}
|
|
202
226
|
catch (error) {
|
|
203
|
-
if (error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
227
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
204
228
|
return;
|
|
205
229
|
}
|
|
206
|
-
|
|
230
|
+
await this.handleError(error);
|
|
231
|
+
return this.leaveGroup();
|
|
207
232
|
}
|
|
208
233
|
}
|
|
234
|
+
async handleError(error) {
|
|
235
|
+
await (0, api_1.handleApiError)(error).catch(async (error) => {
|
|
236
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_COORDINATOR) {
|
|
237
|
+
logger_1.log.debug('Not coordinator. Searching for new coordinator...');
|
|
238
|
+
await this.findCoordinator();
|
|
239
|
+
return;
|
|
240
|
+
}
|
|
241
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.MEMBER_ID_REQUIRED) {
|
|
242
|
+
this.memberId = error.response.memberId;
|
|
243
|
+
return;
|
|
244
|
+
}
|
|
245
|
+
throw error;
|
|
246
|
+
});
|
|
247
|
+
}
|
|
209
248
|
}
|
|
210
249
|
exports.ConsumerGroup = ConsumerGroup;
|
|
211
250
|
__decorate([
|
|
@@ -75,13 +75,12 @@ class Consumer extends events_1.default {
|
|
|
75
75
|
: undefined;
|
|
76
76
|
}
|
|
77
77
|
async start() {
|
|
78
|
-
const { topics, allowTopicAutoCreation, fromTimestamp } = this.options;
|
|
79
78
|
this.stopHook = undefined;
|
|
80
79
|
try {
|
|
81
80
|
await this.cluster.connect();
|
|
82
|
-
await this.
|
|
81
|
+
await this.fetchMetadata();
|
|
83
82
|
this.metadata.setAssignment(this.metadata.getTopicPartitions());
|
|
84
|
-
await this.
|
|
83
|
+
await this.fetchOffsets();
|
|
85
84
|
await this.consumerGroup?.init();
|
|
86
85
|
}
|
|
87
86
|
catch (error) {
|
|
@@ -101,8 +100,8 @@ class Consumer extends events_1.default {
|
|
|
101
100
|
await this.fetchManager?.stop();
|
|
102
101
|
});
|
|
103
102
|
}
|
|
104
|
-
await this.consumerGroup?.leaveGroup().catch((error) => logger_1.log.debug(
|
|
105
|
-
await this.cluster.disconnect().catch((
|
|
103
|
+
await this.consumerGroup?.leaveGroup().catch((error) => logger_1.log.debug('Failed to leave group', { reason: error.message }));
|
|
104
|
+
await this.cluster.disconnect().catch(() => { });
|
|
106
105
|
}
|
|
107
106
|
async startFetchManager() {
|
|
108
107
|
const { groupId } = this.options;
|
|
@@ -129,18 +128,22 @@ class Consumer extends events_1.default {
|
|
|
129
128
|
}
|
|
130
129
|
catch (error) {
|
|
131
130
|
await this.fetchManager?.stop();
|
|
132
|
-
if (error.errorCode === api_1.API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
131
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
133
132
|
logger_1.log.debug('Rebalance in progress...', { apiName: error.apiName, groupId });
|
|
134
133
|
continue;
|
|
135
134
|
}
|
|
136
|
-
if (error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
135
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
|
|
137
136
|
logger_1.log.debug('New consumer with the same groupInstanceId joined. Exiting the consumer...');
|
|
138
137
|
this.close();
|
|
139
138
|
break;
|
|
140
139
|
}
|
|
141
|
-
if (error instanceof error_1.
|
|
142
|
-
(
|
|
143
|
-
|
|
140
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_COORDINATOR) {
|
|
141
|
+
logger_1.log.debug('Not coordinator. Searching for new coordinator...');
|
|
142
|
+
await this.consumerGroup?.findCoordinator();
|
|
143
|
+
return;
|
|
144
|
+
}
|
|
145
|
+
if (error instanceof error_1.ConnectionError) {
|
|
146
|
+
logger_1.log.debug(`${error.message}. Restarting consumer...`, { stack: error.stack });
|
|
144
147
|
this.close().then(() => this.start());
|
|
145
148
|
break;
|
|
146
149
|
}
|
|
@@ -233,13 +236,44 @@ class Consumer extends events_1.default {
|
|
|
233
236
|
});
|
|
234
237
|
}
|
|
235
238
|
catch (error) {
|
|
239
|
+
await this.handleError(error);
|
|
240
|
+
return this.fetch(nodeId, assignment);
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
async fetchMetadata() {
|
|
244
|
+
const { topics, allowTopicAutoCreation } = this.options;
|
|
245
|
+
try {
|
|
246
|
+
await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
|
|
247
|
+
}
|
|
248
|
+
catch (error) {
|
|
249
|
+
await this.handleError(error);
|
|
250
|
+
return this.fetchMetadata();
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
async fetchOffsets() {
|
|
254
|
+
const { fromTimestamp } = this.options;
|
|
255
|
+
try {
|
|
256
|
+
await this.offsetManager.fetchOffsets({ fromTimestamp });
|
|
257
|
+
}
|
|
258
|
+
catch (error) {
|
|
259
|
+
await this.handleError(error);
|
|
260
|
+
return this.fetchOffsets();
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
async handleError(error) {
|
|
264
|
+
await (0, api_1.handleApiError)(error).catch(async (error) => {
|
|
265
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_LEADER_OR_FOLLOWER) {
|
|
266
|
+
logger_1.log.debug('Refreshing metadata', { reason: error.message });
|
|
267
|
+
await this.fetchMetadata();
|
|
268
|
+
return;
|
|
269
|
+
}
|
|
236
270
|
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.OFFSET_OUT_OF_RANGE) {
|
|
237
271
|
logger_1.log.warn('Offset out of range. Resetting offsets.');
|
|
238
|
-
await this.
|
|
239
|
-
return
|
|
272
|
+
await this.fetchOffsets();
|
|
273
|
+
return;
|
|
240
274
|
}
|
|
241
275
|
throw error;
|
|
242
|
-
}
|
|
276
|
+
});
|
|
243
277
|
}
|
|
244
278
|
}
|
|
245
279
|
exports.Consumer = Consumer;
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { IsolationLevel } from "../api/fetch";
|
|
2
|
+
import { Assignment } from "../api/sync-group";
|
|
3
|
+
import { Cluster } from "../cluster";
|
|
4
|
+
import { OffsetManager } from "./offset-manager";
|
|
5
|
+
export type Metadata = ReturnType<typeof createMetadata>;
|
|
6
|
+
type MetadataOptions = {
|
|
7
|
+
cluster: Cluster;
|
|
8
|
+
topics?: string[];
|
|
9
|
+
isolationLevel?: IsolationLevel;
|
|
10
|
+
allowTopicAutoCreation?: boolean;
|
|
11
|
+
fromBeginning?: boolean;
|
|
12
|
+
offsetManager?: OffsetManager;
|
|
13
|
+
};
|
|
14
|
+
export declare const createMetadata: ({ cluster, topics, isolationLevel, allowTopicAutoCreation, fromBeginning, offsetManager, }: MetadataOptions) => {
|
|
15
|
+
init: () => Promise<void>;
|
|
16
|
+
getTopicPartitions: () => Record<string, number[]>;
|
|
17
|
+
getTopicIdByName: (name: string) => string;
|
|
18
|
+
getTopicNameById: (id: string) => string;
|
|
19
|
+
getAssignment: () => Assignment;
|
|
20
|
+
setAssignment: (newAssignment: Assignment) => void;
|
|
21
|
+
getLeaderIdByTopicPartition: (topic: string, partition: number) => number;
|
|
22
|
+
getIsrNodeIdsByTopicPartition: (topic: string, partition: number) => number[];
|
|
23
|
+
};
|
|
24
|
+
export {};
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createMetadata = void 0;
|
|
4
|
+
const api_1 = require("../api");
|
|
5
|
+
const createMetadata = ({ cluster, topics, isolationLevel = 0 /* IsolationLevel.READ_UNCOMMITTED */, allowTopicAutoCreation = true, fromBeginning = false, offsetManager, }) => {
|
|
6
|
+
let topicPartitions = {};
|
|
7
|
+
let topicNameById = {};
|
|
8
|
+
let topicIdByName = {};
|
|
9
|
+
let leaderIdByTopicPartition = {};
|
|
10
|
+
let isrNodesByTopicPartition;
|
|
11
|
+
let assignment = {};
|
|
12
|
+
const fetchMetadata = async () => {
|
|
13
|
+
const response = await cluster.sendRequest(api_1.API.METADATA, {
|
|
14
|
+
allowTopicAutoCreation,
|
|
15
|
+
includeTopicAuthorizedOperations: false,
|
|
16
|
+
topics: topics?.map((name) => ({ id: null, name })) ?? null,
|
|
17
|
+
});
|
|
18
|
+
topicPartitions = Object.fromEntries(response.topics.map((topic) => [topic.name, topic.partitions.map((partition) => partition.partitionIndex)]));
|
|
19
|
+
topicNameById = Object.fromEntries(response.topics.map((topic) => [topic.topicId, topic.name]));
|
|
20
|
+
topicIdByName = Object.fromEntries(response.topics.map((topic) => [topic.name, topic.topicId]));
|
|
21
|
+
leaderIdByTopicPartition = Object.fromEntries(response.topics.map((topic) => [
|
|
22
|
+
topic.name,
|
|
23
|
+
Object.fromEntries(topic.partitions.map((partition) => [partition.partitionIndex, partition.leaderId])),
|
|
24
|
+
]));
|
|
25
|
+
isrNodesByTopicPartition = Object.fromEntries(response.topics.map((topic) => [
|
|
26
|
+
topic.name,
|
|
27
|
+
Object.fromEntries(topic.partitions.map((partition) => [partition.partitionIndex, partition.isrNodes])),
|
|
28
|
+
]));
|
|
29
|
+
assignment = topicPartitions;
|
|
30
|
+
};
|
|
31
|
+
const listOffsets = async () => {
|
|
32
|
+
const offsets = await cluster.sendRequest(api_1.API.LIST_OFFSETS, {
|
|
33
|
+
replicaId: -1,
|
|
34
|
+
isolationLevel,
|
|
35
|
+
topics: Object.entries(assignment)
|
|
36
|
+
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
37
|
+
.map(({ topic, partition }) => ({
|
|
38
|
+
name: topic,
|
|
39
|
+
partitions: [{ partitionIndex: partition, currentLeaderEpoch: -1, timestamp: -1n }],
|
|
40
|
+
})),
|
|
41
|
+
});
|
|
42
|
+
offsets.topics.forEach(({ name, partitions }) => {
|
|
43
|
+
partitions.forEach(({ partitionIndex, offset }) => {
|
|
44
|
+
offsetManager?.resolve(name, partitionIndex, fromBeginning ? 0n : offset);
|
|
45
|
+
});
|
|
46
|
+
});
|
|
47
|
+
};
|
|
48
|
+
return {
|
|
49
|
+
init: async () => {
|
|
50
|
+
await fetchMetadata();
|
|
51
|
+
await listOffsets();
|
|
52
|
+
},
|
|
53
|
+
getTopicPartitions: () => topicPartitions,
|
|
54
|
+
getTopicIdByName: (name) => topicIdByName[name],
|
|
55
|
+
getTopicNameById: (id) => topicNameById[id],
|
|
56
|
+
getAssignment: () => assignment,
|
|
57
|
+
setAssignment: (newAssignment) => {
|
|
58
|
+
assignment = newAssignment;
|
|
59
|
+
},
|
|
60
|
+
getLeaderIdByTopicPartition: (topic, partition) => leaderIdByTopicPartition[topic][partition],
|
|
61
|
+
getIsrNodeIdsByTopicPartition: (topic, partition) => isrNodesByTopicPartition[topic][partition],
|
|
62
|
+
};
|
|
63
|
+
};
|
|
64
|
+
exports.createMetadata = createMetadata;
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const kafkats_1 = require("kafkats");
|
|
4
|
+
const json_1 = require("./utils/json");
|
|
5
|
+
(async () => {
|
|
6
|
+
const brokers = [{ host: "localhost", port: 9092 }];
|
|
7
|
+
const topic = "playground-topic";
|
|
8
|
+
// const producer = createProducer({ brokers });
|
|
9
|
+
// const producerInterval = setInterval(async () => {
|
|
10
|
+
// await producer.send([
|
|
11
|
+
// {
|
|
12
|
+
// topic,
|
|
13
|
+
// partition: 0,
|
|
14
|
+
// offset: 1n,
|
|
15
|
+
// timestamp: BigInt(Date.now()),
|
|
16
|
+
// key: null,
|
|
17
|
+
// value: `PING ${Math.random()}`,
|
|
18
|
+
// headers: { timestamp: Date.now().toString() }
|
|
19
|
+
// }
|
|
20
|
+
// ])
|
|
21
|
+
// }, 5000);
|
|
22
|
+
const consumer = await (0, kafkats_1.startConsumer)({
|
|
23
|
+
topics: [topic],
|
|
24
|
+
brokers,
|
|
25
|
+
onBatch: (messages) => {
|
|
26
|
+
console.log(JSON.stringify(messages, json_1.serializer, 2));
|
|
27
|
+
},
|
|
28
|
+
});
|
|
29
|
+
process.on("SIGINT", async () => {
|
|
30
|
+
await consumer.close();
|
|
31
|
+
// clearInterval(producerInterval);
|
|
32
|
+
// await producer.close();
|
|
33
|
+
});
|
|
34
|
+
})();
|
|
@@ -19,8 +19,10 @@ export declare class Producer {
|
|
|
19
19
|
acks?: -1 | 1;
|
|
20
20
|
}): Promise<void>;
|
|
21
21
|
close(): Promise<void>;
|
|
22
|
-
private
|
|
22
|
+
private ensureProducerInitialized;
|
|
23
23
|
private initProducerId;
|
|
24
24
|
private getSequence;
|
|
25
25
|
private updateSequence;
|
|
26
|
+
private fetchMetadata;
|
|
27
|
+
private handleError;
|
|
26
28
|
}
|
|
@@ -14,7 +14,6 @@ const api_1 = require("../api");
|
|
|
14
14
|
const messages_to_topic_partition_leaders_1 = require("../distributors/messages-to-topic-partition-leaders");
|
|
15
15
|
const partitioner_1 = require("../distributors/partitioner");
|
|
16
16
|
const metadata_1 = require("../metadata");
|
|
17
|
-
const delay_1 = require("../utils/delay");
|
|
18
17
|
const error_1 = require("../utils/error");
|
|
19
18
|
const lock_1 = require("../utils/lock");
|
|
20
19
|
const logger_1 = require("../utils/logger");
|
|
@@ -41,7 +40,7 @@ class Producer {
|
|
|
41
40
|
this.partition = this.options.partitioner({ metadata: this.metadata });
|
|
42
41
|
}
|
|
43
42
|
async send(messages, { acks = -1 } = {}) {
|
|
44
|
-
await this.
|
|
43
|
+
await this.ensureProducerInitialized();
|
|
45
44
|
const { allowTopicAutoCreation } = this.options;
|
|
46
45
|
const defaultTimestamp = BigInt(Date.now());
|
|
47
46
|
const topics = new Set(messages.map((message) => message.topic));
|
|
@@ -51,96 +50,74 @@ class Producer {
|
|
|
51
50
|
return message;
|
|
52
51
|
});
|
|
53
52
|
const nodeTopicPartitionMessages = (0, messages_to_topic_partition_leaders_1.distributeMessagesToTopicPartitionLeaders)(partitionedMessages, this.metadata.getTopicPartitionLeaderIds());
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
53
|
+
await Promise.all(Object.entries(nodeTopicPartitionMessages).map(async ([nodeId, topicPartitionMessages]) => {
|
|
54
|
+
try {
|
|
55
|
+
await this.lock.acquire([`node:${nodeId}`], async () => {
|
|
56
|
+
const topicData = Object.entries(topicPartitionMessages).map(([topic, partitionMessages]) => ({
|
|
57
|
+
name: topic,
|
|
58
|
+
partitionData: Object.entries(partitionMessages).map(([partition, messages]) => {
|
|
59
|
+
const partitionIndex = parseInt(partition);
|
|
60
|
+
let baseTimestamp;
|
|
61
|
+
let maxTimestamp;
|
|
62
|
+
messages.forEach(({ timestamp = defaultTimestamp }) => {
|
|
63
|
+
if (!baseTimestamp || timestamp < baseTimestamp) {
|
|
64
|
+
baseTimestamp = timestamp;
|
|
65
|
+
}
|
|
66
|
+
if (!maxTimestamp || timestamp > maxTimestamp) {
|
|
67
|
+
maxTimestamp = timestamp;
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
return {
|
|
71
|
+
index: partitionIndex,
|
|
72
|
+
baseOffset: 0n,
|
|
73
|
+
partitionLeaderEpoch: -1,
|
|
74
|
+
attributes: 0,
|
|
75
|
+
lastOffsetDelta: messages.length - 1,
|
|
76
|
+
baseTimestamp: baseTimestamp ?? 0n,
|
|
77
|
+
maxTimestamp: maxTimestamp ?? 0n,
|
|
78
|
+
producerId: this.producerId,
|
|
79
|
+
producerEpoch: 0,
|
|
80
|
+
baseSequence: this.getSequence(topic, partitionIndex),
|
|
81
|
+
records: messages.map((message, index) => ({
|
|
76
82
|
attributes: 0,
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
attributes: 0,
|
|
85
|
-
timestampDelta: (message.timestamp ?? defaultTimestamp) - (baseTimestamp ?? 0n),
|
|
86
|
-
offsetDelta: index,
|
|
87
|
-
key: message.key ?? null,
|
|
88
|
-
value: message.value,
|
|
89
|
-
headers: Object.entries(message.headers ?? {}).map(([key, value]) => ({
|
|
90
|
-
key,
|
|
91
|
-
value,
|
|
92
|
-
})),
|
|
83
|
+
timestampDelta: (message.timestamp ?? defaultTimestamp) - (baseTimestamp ?? 0n),
|
|
84
|
+
offsetDelta: index,
|
|
85
|
+
key: message.key ?? null,
|
|
86
|
+
value: message.value,
|
|
87
|
+
headers: Object.entries(message.headers ?? {}).map(([key, value]) => ({
|
|
88
|
+
key,
|
|
89
|
+
value,
|
|
93
90
|
})),
|
|
94
|
-
}
|
|
95
|
-
}
|
|
96
|
-
})
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
91
|
+
})),
|
|
92
|
+
};
|
|
93
|
+
}),
|
|
94
|
+
}));
|
|
95
|
+
await this.cluster.sendRequestToNode(parseInt(nodeId))(api_1.API.PRODUCE, {
|
|
96
|
+
transactionalId: null,
|
|
97
|
+
acks,
|
|
98
|
+
timeoutMs: 30000,
|
|
99
|
+
topicData,
|
|
100
|
+
});
|
|
101
|
+
topicData.forEach(({ name, partitionData }) => {
|
|
102
|
+
partitionData.forEach(({ index, records }) => {
|
|
103
|
+
this.updateSequence(name, index, records.length);
|
|
107
104
|
});
|
|
108
105
|
});
|
|
109
|
-
}
|
|
110
|
-
catch (error) {
|
|
111
|
-
if (error instanceof error_1.BrokerNotAvailableError || (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_LEADER_OR_FOLLOWER)) {
|
|
112
|
-
logger_1.log.debug('Refreshing broker metadata', { reason: error.message, nodeId });
|
|
113
|
-
await this.cluster.refreshBrokerMetadata();
|
|
114
|
-
await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
|
|
115
|
-
const messages = Object.values(topicPartitionMessages).flatMap(partitionMessages => Object.values(partitionMessages).flat()).map(({ partition, ...message }) => message);
|
|
116
|
-
return this.send(messages, { acks });
|
|
117
|
-
}
|
|
118
|
-
throw error;
|
|
119
|
-
}
|
|
120
|
-
}));
|
|
121
|
-
}
|
|
122
|
-
catch (error) {
|
|
123
|
-
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_LEADER_OR_FOLLOWER) {
|
|
124
|
-
await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
|
|
125
|
-
}
|
|
126
|
-
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.OUT_OF_ORDER_SEQUENCE_NUMBER) {
|
|
127
|
-
await this.initProducerId();
|
|
128
|
-
}
|
|
129
|
-
logger_1.log.warn('Reconnecting producer due to an unhandled error', { error });
|
|
130
|
-
try {
|
|
131
|
-
await this.cluster.disconnect();
|
|
132
|
-
await this.cluster.connect();
|
|
106
|
+
});
|
|
133
107
|
}
|
|
134
108
|
catch (error) {
|
|
135
|
-
|
|
109
|
+
await this.handleError(error);
|
|
110
|
+
const messages = Object.values(topicPartitionMessages)
|
|
111
|
+
.flatMap((partitionMessages) => Object.values(partitionMessages).flat())
|
|
112
|
+
.map(({ partition, ...message }) => message);
|
|
113
|
+
return this.send(messages, { acks });
|
|
136
114
|
}
|
|
137
|
-
|
|
138
|
-
}
|
|
115
|
+
}));
|
|
139
116
|
}
|
|
140
117
|
async close() {
|
|
141
118
|
await this.cluster.disconnect();
|
|
142
119
|
}
|
|
143
|
-
|
|
120
|
+
ensureProducerInitialized = (0, shared_1.shared)(async () => {
|
|
144
121
|
await this.cluster.ensureConnected();
|
|
145
122
|
if (!this.producerId) {
|
|
146
123
|
await this.initProducerId();
|
|
@@ -159,11 +136,8 @@ class Producer {
|
|
|
159
136
|
this.sequences = {};
|
|
160
137
|
}
|
|
161
138
|
catch (error) {
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
return this.initProducerId();
|
|
165
|
-
}
|
|
166
|
-
throw error;
|
|
139
|
+
await this.handleError(error);
|
|
140
|
+
return this.initProducerId();
|
|
167
141
|
}
|
|
168
142
|
}
|
|
169
143
|
getSequence(topic, partition) {
|
|
@@ -174,6 +148,31 @@ class Producer {
|
|
|
174
148
|
this.sequences[topic][partition] ??= 0;
|
|
175
149
|
this.sequences[topic][partition] += messagesCount;
|
|
176
150
|
}
|
|
151
|
+
async fetchMetadata(topics, allowTopicAutoCreation) {
|
|
152
|
+
try {
|
|
153
|
+
await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
|
|
154
|
+
}
|
|
155
|
+
catch (error) {
|
|
156
|
+
await this.handleError(error);
|
|
157
|
+
return this.fetchMetadata(topics, allowTopicAutoCreation);
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
async handleError(error) {
|
|
161
|
+
await (0, api_1.handleApiError)(error).catch(async (error) => {
|
|
162
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_LEADER_OR_FOLLOWER) {
|
|
163
|
+
logger_1.log.debug('Refreshing metadata', { reason: error.message });
|
|
164
|
+
const topics = Object.keys(this.metadata.getTopicPartitions());
|
|
165
|
+
await this.fetchMetadata(topics, false);
|
|
166
|
+
return;
|
|
167
|
+
}
|
|
168
|
+
if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.OUT_OF_ORDER_SEQUENCE_NUMBER) {
|
|
169
|
+
logger_1.log.debug('Out of order sequence number. Reinitializing producer ID');
|
|
170
|
+
await this.initProducerId();
|
|
171
|
+
return;
|
|
172
|
+
}
|
|
173
|
+
throw error;
|
|
174
|
+
});
|
|
175
|
+
}
|
|
177
176
|
}
|
|
178
177
|
exports.Producer = Producer;
|
|
179
178
|
__decorate([
|