kafka-ts 0.0.13 → 0.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/consumer/consumer.js +21 -21
- package/package.json +1 -1
|
@@ -108,27 +108,27 @@ class Consumer extends events_1.default {
|
|
|
108
108
|
async startFetchManager() {
|
|
109
109
|
const { groupId, batchGranularity, concurrency } = this.options;
|
|
110
110
|
while (!this.stopHook) {
|
|
111
|
-
await this.consumerGroup?.join();
|
|
112
|
-
// TODO: If leader is not available, find another read replica
|
|
113
|
-
const nodeAssignments = Object.entries((0, messages_to_topic_partition_leaders_1.distributeMessagesToTopicPartitionLeaders)(Object.entries(this.metadata.getAssignment()).flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition }))), this.metadata.getTopicPartitionLeaderIds())).map(([nodeId, assignment]) => ({
|
|
114
|
-
nodeId: parseInt(nodeId),
|
|
115
|
-
assignment: Object.fromEntries(Object.entries(assignment).map(([topic, partitions]) => [
|
|
116
|
-
topic,
|
|
117
|
-
Object.keys(partitions).map(Number),
|
|
118
|
-
])),
|
|
119
|
-
}));
|
|
120
|
-
const numPartitions = Object.values(this.metadata.getAssignment()).flat().length;
|
|
121
|
-
const numProcessors = Math.min(concurrency, numPartitions);
|
|
122
|
-
this.fetchManager = new fetch_manager_1.FetchManager({
|
|
123
|
-
fetch: this.fetch.bind(this),
|
|
124
|
-
process: this.process.bind(this),
|
|
125
|
-
metadata: this.metadata,
|
|
126
|
-
consumerGroup: this.consumerGroup,
|
|
127
|
-
nodeAssignments,
|
|
128
|
-
batchGranularity,
|
|
129
|
-
concurrency: numProcessors,
|
|
130
|
-
});
|
|
131
111
|
try {
|
|
112
|
+
await this.consumerGroup?.join();
|
|
113
|
+
// TODO: If leader is not available, find another read replica
|
|
114
|
+
const nodeAssignments = Object.entries((0, messages_to_topic_partition_leaders_1.distributeMessagesToTopicPartitionLeaders)(Object.entries(this.metadata.getAssignment()).flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition }))), this.metadata.getTopicPartitionLeaderIds())).map(([nodeId, assignment]) => ({
|
|
115
|
+
nodeId: parseInt(nodeId),
|
|
116
|
+
assignment: Object.fromEntries(Object.entries(assignment).map(([topic, partitions]) => [
|
|
117
|
+
topic,
|
|
118
|
+
Object.keys(partitions).map(Number),
|
|
119
|
+
])),
|
|
120
|
+
}));
|
|
121
|
+
const numPartitions = Object.values(this.metadata.getAssignment()).flat().length;
|
|
122
|
+
const numProcessors = Math.min(concurrency, numPartitions);
|
|
123
|
+
this.fetchManager = new fetch_manager_1.FetchManager({
|
|
124
|
+
fetch: this.fetch.bind(this),
|
|
125
|
+
process: this.process.bind(this),
|
|
126
|
+
metadata: this.metadata,
|
|
127
|
+
consumerGroup: this.consumerGroup,
|
|
128
|
+
nodeAssignments,
|
|
129
|
+
batchGranularity,
|
|
130
|
+
concurrency: numProcessors,
|
|
131
|
+
});
|
|
132
132
|
await this.fetchManager.start();
|
|
133
133
|
if (!nodeAssignments.length) {
|
|
134
134
|
logger_1.log.debug('No partitions assigned. Waiting for reassignment...', { groupId });
|
|
@@ -137,7 +137,7 @@ class Consumer extends events_1.default {
|
|
|
137
137
|
}
|
|
138
138
|
}
|
|
139
139
|
catch (error) {
|
|
140
|
-
await this.fetchManager
|
|
140
|
+
await this.fetchManager?.stop();
|
|
141
141
|
if (error.errorCode === api_1.API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
142
142
|
logger_1.log.debug('Rebalance in progress...');
|
|
143
143
|
continue;
|