ioredis-om 5.10.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +1571 -0
- package/built/Command.d.ts +166 -0
- package/built/Command.js +450 -0
- package/built/DataHandler.d.ts +37 -0
- package/built/DataHandler.js +224 -0
- package/built/Pipeline.d.ts +31 -0
- package/built/Pipeline.js +342 -0
- package/built/Redis.d.ts +243 -0
- package/built/Redis.js +800 -0
- package/built/ScanStream.d.ts +23 -0
- package/built/ScanStream.js +51 -0
- package/built/Script.d.ts +11 -0
- package/built/Script.js +62 -0
- package/built/SubscriptionSet.d.ts +14 -0
- package/built/SubscriptionSet.js +41 -0
- package/built/autoPipelining.d.ts +8 -0
- package/built/autoPipelining.js +167 -0
- package/built/cluster/ClusterOptions.d.ts +172 -0
- package/built/cluster/ClusterOptions.js +22 -0
- package/built/cluster/ClusterSubscriber.d.ts +29 -0
- package/built/cluster/ClusterSubscriber.js +223 -0
- package/built/cluster/ClusterSubscriberGroup.d.ts +108 -0
- package/built/cluster/ClusterSubscriberGroup.js +373 -0
- package/built/cluster/ConnectionPool.d.ts +37 -0
- package/built/cluster/ConnectionPool.js +154 -0
- package/built/cluster/DelayQueue.d.ts +20 -0
- package/built/cluster/DelayQueue.js +53 -0
- package/built/cluster/ShardedSubscriber.d.ts +36 -0
- package/built/cluster/ShardedSubscriber.js +147 -0
- package/built/cluster/index.d.ts +163 -0
- package/built/cluster/index.js +937 -0
- package/built/cluster/util.d.ts +25 -0
- package/built/cluster/util.js +100 -0
- package/built/connectors/AbstractConnector.d.ts +12 -0
- package/built/connectors/AbstractConnector.js +26 -0
- package/built/connectors/ConnectorConstructor.d.ts +5 -0
- package/built/connectors/ConnectorConstructor.js +2 -0
- package/built/connectors/SentinelConnector/FailoverDetector.d.ts +11 -0
- package/built/connectors/SentinelConnector/FailoverDetector.js +45 -0
- package/built/connectors/SentinelConnector/SentinelIterator.d.ts +13 -0
- package/built/connectors/SentinelConnector/SentinelIterator.js +37 -0
- package/built/connectors/SentinelConnector/index.d.ts +72 -0
- package/built/connectors/SentinelConnector/index.js +305 -0
- package/built/connectors/SentinelConnector/types.d.ts +21 -0
- package/built/connectors/SentinelConnector/types.js +2 -0
- package/built/connectors/StandaloneConnector.d.ts +17 -0
- package/built/connectors/StandaloneConnector.js +69 -0
- package/built/connectors/index.d.ts +3 -0
- package/built/connectors/index.js +7 -0
- package/built/constants/TLSProfiles.d.ts +9 -0
- package/built/constants/TLSProfiles.js +149 -0
- package/built/errors/ClusterAllFailedError.d.ts +7 -0
- package/built/errors/ClusterAllFailedError.js +15 -0
- package/built/errors/MaxRetriesPerRequestError.d.ts +5 -0
- package/built/errors/MaxRetriesPerRequestError.js +14 -0
- package/built/errors/index.d.ts +2 -0
- package/built/errors/index.js +5 -0
- package/built/index.d.ts +44 -0
- package/built/index.js +62 -0
- package/built/redis/RedisOptions.d.ts +197 -0
- package/built/redis/RedisOptions.js +58 -0
- package/built/redis/event_handler.d.ts +4 -0
- package/built/redis/event_handler.js +315 -0
- package/built/tracing.d.ts +26 -0
- package/built/tracing.js +96 -0
- package/built/transaction.d.ts +13 -0
- package/built/transaction.js +100 -0
- package/built/types.d.ts +33 -0
- package/built/types.js +2 -0
- package/built/utils/Commander.d.ts +50 -0
- package/built/utils/Commander.js +117 -0
- package/built/utils/RedisCommander.d.ts +8950 -0
- package/built/utils/RedisCommander.js +7 -0
- package/built/utils/applyMixin.d.ts +3 -0
- package/built/utils/applyMixin.js +8 -0
- package/built/utils/argumentParsers.d.ts +14 -0
- package/built/utils/argumentParsers.js +74 -0
- package/built/utils/debug.d.ts +16 -0
- package/built/utils/debug.js +95 -0
- package/built/utils/index.d.ts +124 -0
- package/built/utils/index.js +332 -0
- package/built/utils/lodash.d.ts +4 -0
- package/built/utils/lodash.js +9 -0
- package/package.json +103 -0
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const utils_1 = require("../utils");
|
|
4
|
+
const util_1 = require("./util");
|
|
5
|
+
const calculateSlot = require("cluster-key-slot");
|
|
6
|
+
const ShardedSubscriber_1 = require("./ShardedSubscriber");
|
|
7
|
+
const debug = (0, utils_1.Debug)("cluster:subscriberGroup");
|
|
8
|
+
/**
|
|
9
|
+
* Redis distinguishes between "normal" and sharded PubSub. When using the normal PubSub feature,
|
|
10
|
+
* exactly one subscriber exists per cluster instance because the Redis cluster bus forwards
|
|
11
|
+
* messages between shards. Sharded PubSub removes this limitation by making each shard
|
|
12
|
+
* responsible for its own messages.
|
|
13
|
+
*
|
|
14
|
+
* This class coordinates one ShardedSubscriber per master node in the cluster, providing
|
|
15
|
+
* sharded PubSub support while keeping the public API backward compatible.
|
|
16
|
+
*/
|
|
17
|
+
class ClusterSubscriberGroup {
|
|
18
|
+
/**
|
|
19
|
+
* Register callbacks
|
|
20
|
+
*
|
|
21
|
+
* @param cluster
|
|
22
|
+
*/
|
|
23
|
+
constructor(subscriberGroupEmitter, options) {
|
|
24
|
+
this.subscriberGroupEmitter = subscriberGroupEmitter;
|
|
25
|
+
this.options = options;
|
|
26
|
+
this.shardedSubscribers = new Map();
|
|
27
|
+
this.clusterSlots = [];
|
|
28
|
+
// Simple [min, max] slot ranges aren't enough because you can migrate single slots
|
|
29
|
+
this.subscriberToSlotsIndex = new Map();
|
|
30
|
+
this.channels = new Map();
|
|
31
|
+
this.failedAttemptsByNode = new Map();
|
|
32
|
+
// Only latest pending reset kept; throttled by refreshSlotsCache's isRefreshing + backoff delay
|
|
33
|
+
this.isResetting = false;
|
|
34
|
+
this.pendingReset = null;
|
|
35
|
+
/**
|
|
36
|
+
* Handles failed subscriber connections by emitting an event to refresh the slots cache
|
|
37
|
+
* after a backoff period.
|
|
38
|
+
*
|
|
39
|
+
* @param error
|
|
40
|
+
* @param nodeKey
|
|
41
|
+
*/
|
|
42
|
+
this.handleSubscriberConnectFailed = (error, nodeKey) => {
|
|
43
|
+
const currentAttempts = this.failedAttemptsByNode.get(nodeKey) || 0;
|
|
44
|
+
const failedAttempts = currentAttempts + 1;
|
|
45
|
+
this.failedAttemptsByNode.set(nodeKey, failedAttempts);
|
|
46
|
+
const attempts = Math.min(failedAttempts, ClusterSubscriberGroup.MAX_RETRY_ATTEMPTS);
|
|
47
|
+
const backoff = Math.min(ClusterSubscriberGroup.BASE_BACKOFF_MS * 2 ** attempts, ClusterSubscriberGroup.MAX_BACKOFF_MS);
|
|
48
|
+
const jitter = Math.floor((Math.random() - 0.5) * (backoff * 0.5));
|
|
49
|
+
const delay = Math.max(0, backoff + jitter);
|
|
50
|
+
debug("Failed to connect subscriber for %s. Refreshing slots in %dms", nodeKey, delay);
|
|
51
|
+
this.subscriberGroupEmitter.emit("subscriberConnectFailed", {
|
|
52
|
+
delay,
|
|
53
|
+
error,
|
|
54
|
+
});
|
|
55
|
+
};
|
|
56
|
+
/**
|
|
57
|
+
* Handles successful subscriber connections by resetting the failed attempts counter.
|
|
58
|
+
*
|
|
59
|
+
* @param nodeKey
|
|
60
|
+
*/
|
|
61
|
+
this.handleSubscriberConnectSucceeded = (nodeKey) => {
|
|
62
|
+
this.failedAttemptsByNode.delete(nodeKey);
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
/**
|
|
66
|
+
* Get the responsible subscriber.
|
|
67
|
+
*
|
|
68
|
+
* @param slot
|
|
69
|
+
*/
|
|
70
|
+
getResponsibleSubscriber(slot) {
|
|
71
|
+
const nodeKey = this.clusterSlots[slot][0];
|
|
72
|
+
const sub = this.shardedSubscribers.get(nodeKey);
|
|
73
|
+
if (sub && sub.subscriberStatus === "idle") {
|
|
74
|
+
sub
|
|
75
|
+
.start()
|
|
76
|
+
.then(() => {
|
|
77
|
+
this.handleSubscriberConnectSucceeded(sub.getNodeKey());
|
|
78
|
+
})
|
|
79
|
+
.catch((err) => {
|
|
80
|
+
this.handleSubscriberConnectFailed(err, sub.getNodeKey());
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
return sub;
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* Adds a channel for which this subscriber group is responsible
|
|
87
|
+
*
|
|
88
|
+
* @param channels
|
|
89
|
+
*/
|
|
90
|
+
addChannels(channels) {
|
|
91
|
+
const slot = calculateSlot(channels[0]);
|
|
92
|
+
// Check if the all channels belong to the same slot and otherwise reject the operation
|
|
93
|
+
for (const c of channels) {
|
|
94
|
+
if (calculateSlot(c) !== slot) {
|
|
95
|
+
return -1;
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
const currChannels = this.channels.get(slot);
|
|
99
|
+
if (!currChannels) {
|
|
100
|
+
this.channels.set(slot, channels);
|
|
101
|
+
}
|
|
102
|
+
else {
|
|
103
|
+
this.channels.set(slot, currChannels.concat(channels));
|
|
104
|
+
}
|
|
105
|
+
return Array.from(this.channels.values()).reduce((sum, array) => sum + array.length, 0);
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Removes channels for which the subscriber group is responsible by optionally unsubscribing
|
|
109
|
+
* @param channels
|
|
110
|
+
*/
|
|
111
|
+
removeChannels(channels) {
|
|
112
|
+
const slot = calculateSlot(channels[0]);
|
|
113
|
+
// Check if the all channels belong to the same slot and otherwise reject the operation
|
|
114
|
+
for (const c of channels) {
|
|
115
|
+
if (calculateSlot(c) !== slot) {
|
|
116
|
+
return -1;
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
const slotChannels = this.channels.get(slot);
|
|
120
|
+
if (slotChannels) {
|
|
121
|
+
const updatedChannels = slotChannels.filter((c) => !channels.includes(c));
|
|
122
|
+
this.channels.set(slot, updatedChannels);
|
|
123
|
+
}
|
|
124
|
+
return Array.from(this.channels.values()).reduce((sum, array) => sum + array.length, 0);
|
|
125
|
+
}
|
|
126
|
+
/**
|
|
127
|
+
* Disconnect all subscribers and clear some of the internal state.
|
|
128
|
+
*/
|
|
129
|
+
stop() {
|
|
130
|
+
for (const s of this.shardedSubscribers.values()) {
|
|
131
|
+
s.stop();
|
|
132
|
+
}
|
|
133
|
+
// Clear subscriber instances and pending operations.
|
|
134
|
+
// Channels are preserved for resubscription on reconnect.
|
|
135
|
+
this.pendingReset = null;
|
|
136
|
+
this.shardedSubscribers.clear();
|
|
137
|
+
this.subscriberToSlotsIndex.clear();
|
|
138
|
+
}
|
|
139
|
+
/**
|
|
140
|
+
* Start all not yet started subscribers
|
|
141
|
+
*/
|
|
142
|
+
start() {
|
|
143
|
+
const startPromises = [];
|
|
144
|
+
for (const s of this.shardedSubscribers.values()) {
|
|
145
|
+
if (this.shouldStartSubscriber(s)) {
|
|
146
|
+
startPromises.push(s
|
|
147
|
+
.start()
|
|
148
|
+
.then(() => {
|
|
149
|
+
this.handleSubscriberConnectSucceeded(s.getNodeKey());
|
|
150
|
+
})
|
|
151
|
+
.catch((err) => {
|
|
152
|
+
this.handleSubscriberConnectFailed(err, s.getNodeKey());
|
|
153
|
+
}));
|
|
154
|
+
this.subscriberGroupEmitter.emit("+subscriber");
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
return Promise.all(startPromises);
|
|
158
|
+
}
|
|
159
|
+
/**
|
|
160
|
+
* Resets the subscriber group by disconnecting all subscribers that are no longer needed and connecting new ones.
|
|
161
|
+
*/
|
|
162
|
+
async reset(clusterSlots, clusterNodes) {
|
|
163
|
+
if (this.isResetting) {
|
|
164
|
+
this.pendingReset = { slots: clusterSlots, nodes: clusterNodes };
|
|
165
|
+
return;
|
|
166
|
+
}
|
|
167
|
+
this.isResetting = true;
|
|
168
|
+
try {
|
|
169
|
+
const hasTopologyChanged = this._refreshSlots(clusterSlots);
|
|
170
|
+
const hasFailedSubscribers = this.hasUnhealthySubscribers();
|
|
171
|
+
if (!hasTopologyChanged && !hasFailedSubscribers) {
|
|
172
|
+
debug("No topology change detected or failed subscribers. Skipping reset.");
|
|
173
|
+
return;
|
|
174
|
+
}
|
|
175
|
+
// For each of the sharded subscribers
|
|
176
|
+
for (const [nodeKey, shardedSubscriber] of this.shardedSubscribers) {
|
|
177
|
+
if (
|
|
178
|
+
// If the subscriber is still responsible for a slot range and is healthy then keep it
|
|
179
|
+
this.subscriberToSlotsIndex.has(nodeKey) &&
|
|
180
|
+
shardedSubscriber.isHealthy()) {
|
|
181
|
+
debug("Skipping deleting subscriber for %s", nodeKey);
|
|
182
|
+
continue;
|
|
183
|
+
}
|
|
184
|
+
debug("Removing subscriber for %s", nodeKey);
|
|
185
|
+
// Otherwise stop the subscriber and remove it
|
|
186
|
+
shardedSubscriber.stop();
|
|
187
|
+
this.shardedSubscribers.delete(nodeKey);
|
|
188
|
+
this.subscriberGroupEmitter.emit("-subscriber");
|
|
189
|
+
}
|
|
190
|
+
const startPromises = [];
|
|
191
|
+
// For each node in slots cache
|
|
192
|
+
for (const [nodeKey, _] of this.subscriberToSlotsIndex) {
|
|
193
|
+
const existingSubscriber = this.shardedSubscribers.get(nodeKey);
|
|
194
|
+
// If we already have a subscriber for this node, only ensure it is healthy
|
|
195
|
+
// when it now owns slots with active channel subscriptions.
|
|
196
|
+
if (existingSubscriber && existingSubscriber.isHealthy()) {
|
|
197
|
+
debug("Skipping creating new subscriber for %s", nodeKey);
|
|
198
|
+
if (!existingSubscriber.isStarted() &&
|
|
199
|
+
this.shouldStartSubscriber(existingSubscriber)) {
|
|
200
|
+
startPromises.push(existingSubscriber
|
|
201
|
+
.start()
|
|
202
|
+
.then(() => {
|
|
203
|
+
this.handleSubscriberConnectSucceeded(nodeKey);
|
|
204
|
+
})
|
|
205
|
+
.catch((error) => {
|
|
206
|
+
this.handleSubscriberConnectFailed(error, nodeKey);
|
|
207
|
+
}));
|
|
208
|
+
}
|
|
209
|
+
continue;
|
|
210
|
+
}
|
|
211
|
+
// If we have an existing subscriber but it is not healthy, stop it
|
|
212
|
+
if (existingSubscriber && !existingSubscriber.isHealthy()) {
|
|
213
|
+
debug("Replacing subscriber for %s", nodeKey);
|
|
214
|
+
existingSubscriber.stop();
|
|
215
|
+
this.shardedSubscribers.delete(nodeKey);
|
|
216
|
+
this.subscriberGroupEmitter.emit("-subscriber");
|
|
217
|
+
}
|
|
218
|
+
debug("Creating new subscriber for %s", nodeKey);
|
|
219
|
+
// Otherwise create a new subscriber
|
|
220
|
+
const redis = clusterNodes.find((node) => {
|
|
221
|
+
return (0, util_1.getNodeKey)(node.options) === nodeKey;
|
|
222
|
+
});
|
|
223
|
+
if (!redis) {
|
|
224
|
+
debug("Failed to find node for key %s", nodeKey);
|
|
225
|
+
continue;
|
|
226
|
+
}
|
|
227
|
+
const sub = new ShardedSubscriber_1.default(this.subscriberGroupEmitter, redis.options, this.options.redisOptions);
|
|
228
|
+
this.shardedSubscribers.set(nodeKey, sub);
|
|
229
|
+
if (this.shouldStartSubscriber(sub)) {
|
|
230
|
+
startPromises.push(sub
|
|
231
|
+
.start()
|
|
232
|
+
.then(() => {
|
|
233
|
+
this.handleSubscriberConnectSucceeded(nodeKey);
|
|
234
|
+
})
|
|
235
|
+
.catch((error) => {
|
|
236
|
+
this.handleSubscriberConnectFailed(error, nodeKey);
|
|
237
|
+
}));
|
|
238
|
+
}
|
|
239
|
+
this.subscriberGroupEmitter.emit("+subscriber");
|
|
240
|
+
}
|
|
241
|
+
// It's vital to await the start promises before resubscribing
|
|
242
|
+
// Otherwise we might try to resubscribe to a subscriber that is not yet connected
|
|
243
|
+
// This can cause a race condition
|
|
244
|
+
await Promise.all(startPromises);
|
|
245
|
+
this._resubscribe();
|
|
246
|
+
this.subscriberGroupEmitter.emit("subscribersReady");
|
|
247
|
+
}
|
|
248
|
+
finally {
|
|
249
|
+
this.isResetting = false;
|
|
250
|
+
if (this.pendingReset) {
|
|
251
|
+
const { slots, nodes } = this.pendingReset;
|
|
252
|
+
this.pendingReset = null;
|
|
253
|
+
await this.reset(slots, nodes);
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
/**
|
|
258
|
+
* Refreshes the subscriber-related slot ranges
|
|
259
|
+
*
|
|
260
|
+
* Returns false if no refresh was needed
|
|
261
|
+
*
|
|
262
|
+
* @param targetSlots
|
|
263
|
+
*/
|
|
264
|
+
_refreshSlots(targetSlots) {
|
|
265
|
+
//If there was an actual change, then reassign the slot ranges
|
|
266
|
+
// Also rebuild if subscriberToSlotsIndex is empty (e.g., after stop() was called)
|
|
267
|
+
if (this._slotsAreEqual(targetSlots) && this.subscriberToSlotsIndex.size > 0) {
|
|
268
|
+
debug("Nothing to refresh because the new cluster map is equal to the previous one.");
|
|
269
|
+
return false;
|
|
270
|
+
}
|
|
271
|
+
debug("Refreshing the slots of the subscriber group.");
|
|
272
|
+
//Rebuild the slots index
|
|
273
|
+
this.subscriberToSlotsIndex = new Map();
|
|
274
|
+
for (let slot = 0; slot < targetSlots.length; slot++) {
|
|
275
|
+
const node = targetSlots[slot][0];
|
|
276
|
+
if (!this.subscriberToSlotsIndex.has(node)) {
|
|
277
|
+
this.subscriberToSlotsIndex.set(node, []);
|
|
278
|
+
}
|
|
279
|
+
this.subscriberToSlotsIndex.get(node).push(Number(slot));
|
|
280
|
+
}
|
|
281
|
+
//Update the cached slots map
|
|
282
|
+
this.clusterSlots = JSON.parse(JSON.stringify(targetSlots));
|
|
283
|
+
return true;
|
|
284
|
+
}
|
|
285
|
+
/**
|
|
286
|
+
* Resubscribes to the previous channels
|
|
287
|
+
*
|
|
288
|
+
* @private
|
|
289
|
+
*/
|
|
290
|
+
_resubscribe() {
|
|
291
|
+
if (this.shardedSubscribers) {
|
|
292
|
+
this.shardedSubscribers.forEach((s, nodeKey) => {
|
|
293
|
+
const subscriberSlots = this.subscriberToSlotsIndex.get(nodeKey);
|
|
294
|
+
if (subscriberSlots) {
|
|
295
|
+
//Resubscribe on the underlying connection
|
|
296
|
+
subscriberSlots.forEach((ss) => {
|
|
297
|
+
//Might return null if being disconnected
|
|
298
|
+
const redis = s.getInstance();
|
|
299
|
+
const channels = this.channels.get(ss);
|
|
300
|
+
if (channels && channels.length > 0) {
|
|
301
|
+
if (!redis || redis.status === "end") {
|
|
302
|
+
return;
|
|
303
|
+
}
|
|
304
|
+
if (redis.status === "ready") {
|
|
305
|
+
redis.ssubscribe(...channels).catch((err) => {
|
|
306
|
+
// TODO: Should we emit an error event here?
|
|
307
|
+
debug("Failed to ssubscribe on node %s: %s", nodeKey, err);
|
|
308
|
+
});
|
|
309
|
+
}
|
|
310
|
+
else {
|
|
311
|
+
redis.once("ready", () => {
|
|
312
|
+
redis.ssubscribe(...channels).catch((err) => {
|
|
313
|
+
// TODO: Should we emit an error event here?
|
|
314
|
+
debug("Failed to ssubscribe on node %s: %s", nodeKey, err);
|
|
315
|
+
});
|
|
316
|
+
});
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
});
|
|
320
|
+
}
|
|
321
|
+
});
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
/**
|
|
325
|
+
* Deep equality of the cluster slots objects
|
|
326
|
+
*
|
|
327
|
+
* @param other
|
|
328
|
+
* @private
|
|
329
|
+
*/
|
|
330
|
+
_slotsAreEqual(other) {
|
|
331
|
+
if (this.clusterSlots === undefined) {
|
|
332
|
+
return false;
|
|
333
|
+
}
|
|
334
|
+
else {
|
|
335
|
+
return JSON.stringify(this.clusterSlots) === JSON.stringify(other);
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
/**
|
|
339
|
+
* Checks if any subscribers are in an unhealthy state.
|
|
340
|
+
*
|
|
341
|
+
* A subscriber is considered unhealthy if:
|
|
342
|
+
* - It exists but is not started (failed/disconnected)
|
|
343
|
+
* - It's missing entirely for a node that should have one
|
|
344
|
+
*
|
|
345
|
+
* @returns true if any subscribers need to be recreated
|
|
346
|
+
*/
|
|
347
|
+
hasUnhealthySubscribers() {
|
|
348
|
+
const hasFailedSubscribers = Array.from(this.shardedSubscribers.values()).some((sub) => !sub.isHealthy());
|
|
349
|
+
const hasMissingSubscribers = Array.from(this.subscriberToSlotsIndex.keys()).some((nodeKey) => !this.shardedSubscribers.has(nodeKey));
|
|
350
|
+
return hasFailedSubscribers || hasMissingSubscribers;
|
|
351
|
+
}
|
|
352
|
+
shouldStartSubscriber(sub) {
|
|
353
|
+
if (sub.isStarted()) {
|
|
354
|
+
return false;
|
|
355
|
+
}
|
|
356
|
+
if (!sub.isLazyConnect()) {
|
|
357
|
+
return true;
|
|
358
|
+
}
|
|
359
|
+
const subscriberSlots = this.subscriberToSlotsIndex.get(sub.getNodeKey());
|
|
360
|
+
if (!subscriberSlots) {
|
|
361
|
+
return false;
|
|
362
|
+
}
|
|
363
|
+
return subscriberSlots.some((slot) => {
|
|
364
|
+
const channels = this.channels.get(slot);
|
|
365
|
+
return Boolean(channels && channels.length > 0);
|
|
366
|
+
});
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
exports.default = ClusterSubscriberGroup;
|
|
370
|
+
// Retry strategy
|
|
371
|
+
ClusterSubscriberGroup.MAX_RETRY_ATTEMPTS = 10;
|
|
372
|
+
ClusterSubscriberGroup.MAX_BACKOFF_MS = 2000;
|
|
373
|
+
ClusterSubscriberGroup.BASE_BACKOFF_MS = 100;
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
/// <reference types="node" />
|
|
2
|
+
import { EventEmitter } from "events";
|
|
3
|
+
import { RedisOptions, NodeKey, NodeRole } from "./util";
|
|
4
|
+
import Redis from "../Redis";
|
|
5
|
+
export default class ConnectionPool extends EventEmitter {
|
|
6
|
+
private redisOptions;
|
|
7
|
+
private nodes;
|
|
8
|
+
private specifiedOptions;
|
|
9
|
+
constructor(redisOptions: any);
|
|
10
|
+
getNodes(role?: NodeRole): Redis[];
|
|
11
|
+
getInstanceByKey(key: NodeKey): Redis;
|
|
12
|
+
getSampleInstance(role: NodeRole): Redis;
|
|
13
|
+
/**
|
|
14
|
+
* Add a master node to the pool
|
|
15
|
+
* @param node
|
|
16
|
+
*/
|
|
17
|
+
addMasterNode(node: RedisOptions): boolean;
|
|
18
|
+
/**
|
|
19
|
+
* Creates a Redis connection instance from the node options
|
|
20
|
+
* @param node
|
|
21
|
+
* @param readOnly
|
|
22
|
+
*/
|
|
23
|
+
createRedisFromOptions(node: RedisOptions, readOnly: boolean): Redis;
|
|
24
|
+
/**
|
|
25
|
+
* Find or create a connection to the node
|
|
26
|
+
*/
|
|
27
|
+
findOrCreate(node: RedisOptions, readOnly?: boolean): Redis;
|
|
28
|
+
/**
|
|
29
|
+
* Reset the pool with a set of nodes.
|
|
30
|
+
* The old node will be removed.
|
|
31
|
+
*/
|
|
32
|
+
reset(nodes: RedisOptions[]): void;
|
|
33
|
+
/**
|
|
34
|
+
* Remove a node from the pool.
|
|
35
|
+
*/
|
|
36
|
+
private removeNode;
|
|
37
|
+
}
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const events_1 = require("events");
|
|
4
|
+
const utils_1 = require("../utils");
|
|
5
|
+
const util_1 = require("./util");
|
|
6
|
+
const Redis_1 = require("../Redis");
|
|
7
|
+
const debug = (0, utils_1.Debug)("cluster:connectionPool");
|
|
8
|
+
class ConnectionPool extends events_1.EventEmitter {
|
|
9
|
+
constructor(redisOptions) {
|
|
10
|
+
super();
|
|
11
|
+
this.redisOptions = redisOptions;
|
|
12
|
+
// master + slave = all
|
|
13
|
+
this.nodes = {
|
|
14
|
+
all: {},
|
|
15
|
+
master: {},
|
|
16
|
+
slave: {},
|
|
17
|
+
};
|
|
18
|
+
this.specifiedOptions = {};
|
|
19
|
+
}
|
|
20
|
+
getNodes(role = "all") {
|
|
21
|
+
const nodes = this.nodes[role];
|
|
22
|
+
return Object.keys(nodes).map((key) => nodes[key]);
|
|
23
|
+
}
|
|
24
|
+
getInstanceByKey(key) {
|
|
25
|
+
return this.nodes.all[key];
|
|
26
|
+
}
|
|
27
|
+
getSampleInstance(role) {
|
|
28
|
+
const keys = Object.keys(this.nodes[role]);
|
|
29
|
+
const sampleKey = (0, utils_1.sample)(keys);
|
|
30
|
+
return this.nodes[role][sampleKey];
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Add a master node to the pool
|
|
34
|
+
* @param node
|
|
35
|
+
*/
|
|
36
|
+
addMasterNode(node) {
|
|
37
|
+
const key = (0, util_1.getNodeKey)(node.options);
|
|
38
|
+
const redis = this.createRedisFromOptions(node, node.options.readOnly);
|
|
39
|
+
//Master nodes aren't read-only
|
|
40
|
+
if (!node.options.readOnly) {
|
|
41
|
+
this.nodes.all[key] = redis;
|
|
42
|
+
this.nodes.master[key] = redis;
|
|
43
|
+
return true;
|
|
44
|
+
}
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Creates a Redis connection instance from the node options
|
|
49
|
+
* @param node
|
|
50
|
+
* @param readOnly
|
|
51
|
+
*/
|
|
52
|
+
createRedisFromOptions(node, readOnly) {
|
|
53
|
+
const redis = new Redis_1.default((0, utils_1.defaults)({
|
|
54
|
+
// Never try to reconnect when a node is lose,
|
|
55
|
+
// instead, waiting for a `MOVED` error and
|
|
56
|
+
// fetch the slots again.
|
|
57
|
+
retryStrategy: null,
|
|
58
|
+
// Offline queue should be enabled so that
|
|
59
|
+
// we don't need to wait for the `ready` event
|
|
60
|
+
// before sending commands to the node.
|
|
61
|
+
enableOfflineQueue: true,
|
|
62
|
+
readOnly: readOnly,
|
|
63
|
+
}, node, this.redisOptions, { lazyConnect: true }));
|
|
64
|
+
return redis;
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Find or create a connection to the node
|
|
68
|
+
*/
|
|
69
|
+
findOrCreate(node, readOnly = false) {
|
|
70
|
+
const key = (0, util_1.getNodeKey)(node);
|
|
71
|
+
readOnly = Boolean(readOnly);
|
|
72
|
+
if (this.specifiedOptions[key]) {
|
|
73
|
+
Object.assign(node, this.specifiedOptions[key]);
|
|
74
|
+
}
|
|
75
|
+
else {
|
|
76
|
+
this.specifiedOptions[key] = node;
|
|
77
|
+
}
|
|
78
|
+
let redis;
|
|
79
|
+
if (this.nodes.all[key]) {
|
|
80
|
+
redis = this.nodes.all[key];
|
|
81
|
+
if (redis.options.readOnly !== readOnly) {
|
|
82
|
+
redis.options.readOnly = readOnly;
|
|
83
|
+
debug("Change role of %s to %s", key, readOnly ? "slave" : "master");
|
|
84
|
+
redis[readOnly ? "readonly" : "readwrite"]().catch(utils_1.noop);
|
|
85
|
+
if (readOnly) {
|
|
86
|
+
delete this.nodes.master[key];
|
|
87
|
+
this.nodes.slave[key] = redis;
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
delete this.nodes.slave[key];
|
|
91
|
+
this.nodes.master[key] = redis;
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
else {
|
|
96
|
+
debug("Connecting to %s as %s", key, readOnly ? "slave" : "master");
|
|
97
|
+
redis = this.createRedisFromOptions(node, readOnly);
|
|
98
|
+
this.nodes.all[key] = redis;
|
|
99
|
+
this.nodes[readOnly ? "slave" : "master"][key] = redis;
|
|
100
|
+
redis.once("end", () => {
|
|
101
|
+
this.removeNode(key);
|
|
102
|
+
this.emit("-node", redis, key);
|
|
103
|
+
if (!Object.keys(this.nodes.all).length) {
|
|
104
|
+
this.emit("drain");
|
|
105
|
+
}
|
|
106
|
+
});
|
|
107
|
+
this.emit("+node", redis, key);
|
|
108
|
+
redis.on("error", function (error) {
|
|
109
|
+
this.emit("nodeError", error, key);
|
|
110
|
+
});
|
|
111
|
+
}
|
|
112
|
+
return redis;
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Reset the pool with a set of nodes.
|
|
116
|
+
* The old node will be removed.
|
|
117
|
+
*/
|
|
118
|
+
reset(nodes) {
|
|
119
|
+
debug("Reset with %O", nodes);
|
|
120
|
+
const newNodes = {};
|
|
121
|
+
nodes.forEach((node) => {
|
|
122
|
+
const key = (0, util_1.getNodeKey)(node);
|
|
123
|
+
// Don't override the existing (master) node
|
|
124
|
+
// when the current one is slave.
|
|
125
|
+
if (!(node.readOnly && newNodes[key])) {
|
|
126
|
+
newNodes[key] = node;
|
|
127
|
+
}
|
|
128
|
+
});
|
|
129
|
+
Object.keys(this.nodes.all).forEach((key) => {
|
|
130
|
+
if (!newNodes[key]) {
|
|
131
|
+
debug("Disconnect %s because the node does not hold any slot", key);
|
|
132
|
+
this.nodes.all[key].disconnect();
|
|
133
|
+
this.removeNode(key);
|
|
134
|
+
}
|
|
135
|
+
});
|
|
136
|
+
Object.keys(newNodes).forEach((key) => {
|
|
137
|
+
const node = newNodes[key];
|
|
138
|
+
this.findOrCreate(node, node.readOnly);
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
/**
|
|
142
|
+
* Remove a node from the pool.
|
|
143
|
+
*/
|
|
144
|
+
removeNode(key) {
|
|
145
|
+
const { nodes } = this;
|
|
146
|
+
if (nodes.all[key]) {
|
|
147
|
+
debug("Remove %s from the pool", key);
|
|
148
|
+
delete nodes.all[key];
|
|
149
|
+
}
|
|
150
|
+
delete nodes.master[key];
|
|
151
|
+
delete nodes.slave[key];
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
exports.default = ConnectionPool;
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
export interface DelayQueueOptions {
|
|
2
|
+
callback?: Function;
|
|
3
|
+
timeout: number;
|
|
4
|
+
}
|
|
5
|
+
/**
|
|
6
|
+
* Queue that runs items after specified duration
|
|
7
|
+
*/
|
|
8
|
+
export default class DelayQueue {
|
|
9
|
+
private queues;
|
|
10
|
+
private timeouts;
|
|
11
|
+
/**
|
|
12
|
+
* Add a new item to the queue
|
|
13
|
+
*
|
|
14
|
+
* @param bucket bucket name
|
|
15
|
+
* @param item function that will run later
|
|
16
|
+
* @param options
|
|
17
|
+
*/
|
|
18
|
+
push(bucket: string, item: Function, options: DelayQueueOptions): void;
|
|
19
|
+
private execute;
|
|
20
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const utils_1 = require("../utils");
|
|
4
|
+
const Deque = require("denque");
|
|
5
|
+
const debug = (0, utils_1.Debug)("delayqueue");
|
|
6
|
+
/**
|
|
7
|
+
* Queue that runs items after specified duration
|
|
8
|
+
*/
|
|
9
|
+
class DelayQueue {
|
|
10
|
+
constructor() {
|
|
11
|
+
this.queues = {};
|
|
12
|
+
this.timeouts = {};
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Add a new item to the queue
|
|
16
|
+
*
|
|
17
|
+
* @param bucket bucket name
|
|
18
|
+
* @param item function that will run later
|
|
19
|
+
* @param options
|
|
20
|
+
*/
|
|
21
|
+
push(bucket, item, options) {
|
|
22
|
+
const callback = options.callback || process.nextTick;
|
|
23
|
+
if (!this.queues[bucket]) {
|
|
24
|
+
this.queues[bucket] = new Deque();
|
|
25
|
+
}
|
|
26
|
+
const queue = this.queues[bucket];
|
|
27
|
+
queue.push(item);
|
|
28
|
+
if (!this.timeouts[bucket]) {
|
|
29
|
+
this.timeouts[bucket] = setTimeout(() => {
|
|
30
|
+
callback(() => {
|
|
31
|
+
this.timeouts[bucket] = null;
|
|
32
|
+
this.execute(bucket);
|
|
33
|
+
});
|
|
34
|
+
}, options.timeout);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
execute(bucket) {
|
|
38
|
+
const queue = this.queues[bucket];
|
|
39
|
+
if (!queue) {
|
|
40
|
+
return;
|
|
41
|
+
}
|
|
42
|
+
const { length } = queue;
|
|
43
|
+
if (!length) {
|
|
44
|
+
return;
|
|
45
|
+
}
|
|
46
|
+
debug("send %d commands in %s queue", length, bucket);
|
|
47
|
+
this.queues[bucket] = null;
|
|
48
|
+
while (queue.length > 0) {
|
|
49
|
+
queue.shift()();
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
exports.default = DelayQueue;
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/// <reference types="node" />
|
|
2
|
+
import EventEmitter = require("events");
|
|
3
|
+
import { RedisOptions } from "./util";
|
|
4
|
+
import Redis from "../Redis";
|
|
5
|
+
import { ClusterOptions } from "./ClusterOptions";
|
|
6
|
+
declare const SubscriberStatus: {
|
|
7
|
+
readonly IDLE: "idle";
|
|
8
|
+
readonly STARTING: "starting";
|
|
9
|
+
readonly CONNECTED: "connected";
|
|
10
|
+
readonly STOPPING: "stopping";
|
|
11
|
+
readonly ENDED: "ended";
|
|
12
|
+
};
|
|
13
|
+
declare type SubscriberStatus = typeof SubscriberStatus[keyof typeof SubscriberStatus];
|
|
14
|
+
export default class ShardedSubscriber {
|
|
15
|
+
private readonly emitter;
|
|
16
|
+
private readonly nodeKey;
|
|
17
|
+
private status;
|
|
18
|
+
private instance;
|
|
19
|
+
private connectPromise;
|
|
20
|
+
private lazyConnect;
|
|
21
|
+
private readonly messageListeners;
|
|
22
|
+
constructor(emitter: EventEmitter, options: RedisOptions, redisOptions?: ClusterOptions["redisOptions"]);
|
|
23
|
+
start(): Promise<void>;
|
|
24
|
+
stop(): void;
|
|
25
|
+
isStarted(): boolean;
|
|
26
|
+
get subscriberStatus(): SubscriberStatus;
|
|
27
|
+
isHealthy(): boolean;
|
|
28
|
+
getInstance(): Redis | null;
|
|
29
|
+
getNodeKey(): string;
|
|
30
|
+
isLazyConnect(): boolean;
|
|
31
|
+
private onEnd;
|
|
32
|
+
private onError;
|
|
33
|
+
private onMoved;
|
|
34
|
+
private updateStatus;
|
|
35
|
+
}
|
|
36
|
+
export {};
|