kafka-ts 0.0.6-beta.1 → 0.0.6-beta.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/connection.js
CHANGED
|
@@ -43,11 +43,8 @@ class Producer {
|
|
|
43
43
|
const topics = Array.from(new Set(messages.map((message) => message.topic)));
|
|
44
44
|
await this.metadata.fetchMetadataIfNecessary({ topics, allowTopicAutoCreation });
|
|
45
45
|
const nodeTopicPartitionMessages = (0, messages_to_topic_partition_leaders_1.distributeMessagesToTopicPartitionLeaders)(messages.map((message) => ({ ...message, partition: this.partition(message) })), this.metadata.getTopicPartitionLeaderIds());
|
|
46
|
-
await Promise.all(Object.entries(nodeTopicPartitionMessages).map(([nodeId, topicPartitionMessages]) =>
|
|
47
|
-
|
|
48
|
-
acks,
|
|
49
|
-
timeoutMs: 5000,
|
|
50
|
-
topicData: Object.entries(topicPartitionMessages).map(([topic, partitionMessages]) => ({
|
|
46
|
+
await Promise.all(Object.entries(nodeTopicPartitionMessages).map(async ([nodeId, topicPartitionMessages]) => {
|
|
47
|
+
const topicData = Object.entries(topicPartitionMessages).map(([topic, partitionMessages]) => ({
|
|
51
48
|
name: topic,
|
|
52
49
|
partitionData: Object.entries(partitionMessages).map(([partition, messages]) => {
|
|
53
50
|
const partitionIndex = parseInt(partition);
|
|
@@ -61,7 +58,7 @@ class Producer {
|
|
|
61
58
|
maxTimestamp = timestamp;
|
|
62
59
|
}
|
|
63
60
|
});
|
|
64
|
-
const baseSequence = this.
|
|
61
|
+
const baseSequence = this.nextBaseSequence(topic, partitionIndex, messages.length);
|
|
65
62
|
return {
|
|
66
63
|
index: partitionIndex,
|
|
67
64
|
baseOffset: 0n,
|
|
@@ -86,8 +83,24 @@ class Producer {
|
|
|
86
83
|
})),
|
|
87
84
|
};
|
|
88
85
|
}),
|
|
89
|
-
}))
|
|
90
|
-
|
|
86
|
+
}));
|
|
87
|
+
try {
|
|
88
|
+
return await this.cluster.sendRequestToNode(parseInt(nodeId))(api_1.API.PRODUCE, {
|
|
89
|
+
transactionalId: null,
|
|
90
|
+
acks,
|
|
91
|
+
timeoutMs: 5000,
|
|
92
|
+
topicData,
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
catch (error) {
|
|
96
|
+
topicData.forEach(({ name, partitionData }) => {
|
|
97
|
+
partitionData.forEach(({ index, records }) => {
|
|
98
|
+
this.revertBaseSequence(name, index, records.length);
|
|
99
|
+
});
|
|
100
|
+
});
|
|
101
|
+
throw error;
|
|
102
|
+
}
|
|
103
|
+
}));
|
|
91
104
|
}
|
|
92
105
|
async close() {
|
|
93
106
|
await this.cluster.disconnect();
|
|
@@ -116,13 +129,16 @@ class Producer {
|
|
|
116
129
|
throw error;
|
|
117
130
|
}
|
|
118
131
|
}
|
|
119
|
-
|
|
132
|
+
nextBaseSequence(topic, partition, messagesCount) {
|
|
120
133
|
this.sequences[topic] ??= {};
|
|
121
134
|
this.sequences[topic][partition] ??= 0;
|
|
122
135
|
const baseSequence = this.sequences[topic][partition];
|
|
123
136
|
this.sequences[topic][partition] += messagesCount;
|
|
124
137
|
return baseSequence;
|
|
125
138
|
}
|
|
139
|
+
revertBaseSequence(topic, partition, messagesCount) {
|
|
140
|
+
this.sequences[topic][partition] -= messagesCount;
|
|
141
|
+
}
|
|
126
142
|
}
|
|
127
143
|
exports.Producer = Producer;
|
|
128
144
|
__decorate([
|
package/dist/utils/error.d.ts
CHANGED
|
@@ -5,7 +5,7 @@ export declare class KafkaTSApiError<T = any> extends KafkaTSError {
|
|
|
5
5
|
errorCode: number;
|
|
6
6
|
errorMessage: string | null;
|
|
7
7
|
response: T;
|
|
8
|
-
request:
|
|
8
|
+
request: unknown | undefined;
|
|
9
9
|
constructor(errorCode: number, errorMessage: string | null, response: T);
|
|
10
10
|
}
|
|
11
11
|
export declare class ConnectionError extends KafkaTSError {
|
package/dist/utils/retrier.js
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.defaultRetrier = exports.createExponentialBackoffRetrier = void 0;
|
|
4
4
|
const delay_1 = require("./delay");
|
|
5
|
+
const logger_1 = require("./logger");
|
|
5
6
|
const createExponentialBackoffRetrier = ({ retries = 5, initialDelayMs = 100, maxDelayMs = 3000, multiplier = 2, onFailure = (error) => {
|
|
6
7
|
throw error;
|
|
7
8
|
}, } = {}) => async (func) => {
|
|
@@ -16,6 +17,7 @@ const createExponentialBackoffRetrier = ({ retries = 5, initialDelayMs = 100, ma
|
|
|
16
17
|
catch (error) {
|
|
17
18
|
lastError = error;
|
|
18
19
|
}
|
|
20
|
+
logger_1.log.debug(`Failed to process batch (retriesLeft: ${retriesLeft})`);
|
|
19
21
|
if (--retriesLeft < 1)
|
|
20
22
|
break;
|
|
21
23
|
await (0, delay_1.delay)(delayMs);
|