ioredis 5.8.2 β 5.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +16 -2
- package/built/Command.d.ts +42 -0
- package/built/Command.js +112 -14
- package/built/Pipeline.js +2 -1
- package/built/Redis.d.ts +2 -0
- package/built/Redis.js +44 -4
- package/built/cluster/ClusterSubscriberGroup.d.ts +45 -26
- package/built/cluster/ClusterSubscriberGroup.js +188 -99
- package/built/cluster/ShardedSubscriber.d.ts +20 -0
- package/built/cluster/ShardedSubscriber.js +89 -0
- package/built/cluster/index.d.ts +2 -0
- package/built/cluster/index.js +71 -7
- package/built/redis/RedisOptions.d.ts +10 -0
- package/built/redis/RedisOptions.js +1 -0
- package/built/utils/argumentParsers.d.ts +14 -0
- package/built/utils/argumentParsers.js +74 -0
- package/package.json +2 -2
package/README.md
CHANGED
|
@@ -21,7 +21,7 @@ ioredis is a stable project and maintenance is done on a best-effort basis for r
|
|
|
21
21
|
ioredis is a robust, full-featured Redis client that is
|
|
22
22
|
used in the world's biggest online commerce company [Alibaba](http://www.alibaba.com/) and many other awesome companies.
|
|
23
23
|
|
|
24
|
-
0. Full-featured. It supports [Cluster](http://redis.io/topics/cluster-tutorial), [Sentinel](https://redis.io/docs/
|
|
24
|
+
0. Full-featured. It supports [Cluster](http://redis.io/topics/cluster-tutorial), [Sentinel](https://redis.io/docs/latest/operate/oss_and_stack/management/sentinel/), [Streams](https://redis.io/topics/streams-intro), [Pipelining](http://redis.io/topics/pipelining), and of course [Lua scripting](http://redis.io/commands/eval), [Redis Functions](https://redis.io/topics/functions-intro), [Pub/Sub](http://redis.io/topics/pubsub) (with the support of binary messages).
|
|
25
25
|
1. High performance π.
|
|
26
26
|
2. Delightful API π. It works with Node callbacks and Native promises.
|
|
27
27
|
3. Transformation of command arguments and replies.
|
|
@@ -798,6 +798,20 @@ const redis = new Redis({
|
|
|
798
798
|
|
|
799
799
|
Set maxRetriesPerRequest to `null` to disable this behavior, and every command will wait forever until the connection is alive again (which is the default behavior before ioredis v4).
|
|
800
800
|
|
|
801
|
+
### Blocking Command Timeout
|
|
802
|
+
|
|
803
|
+
ioredis can apply a client-side timeout to blocking commands (such as `blpop`, `brpop`, `bzpopmin`, `bzmpop`, `blmpop`, `xread`, `xreadgroup`, etc.). This protects against scenarios where the TCP connection becomes a zombie (e.g., due to a silent network failure like a Docker network disconnect) and Redis never replies.
|
|
804
|
+
|
|
805
|
+
For commands with a finite timeout (e.g., `blpop("key", 5)`), ioredis automatically sets a client-side deadline based on the command's timeout plus a small grace period. If no reply arrives before the deadline, the command resolves with `null`βthe same value Redis returns when a blocking command times out normally.
|
|
806
|
+
|
|
807
|
+
For commands that intentionally block forever (e.g., `timeout = 0` or `BLOCK 0`), you can provide a safety net via the optional `blockingTimeout` option (milliseconds):
|
|
808
|
+
|
|
809
|
+
```javascript
|
|
810
|
+
const redis = new Redis({
|
|
811
|
+
blockingTimeout: 30000, // Resolve with null after 30 seconds when timeout=0/BLOCK 0
|
|
812
|
+
});
|
|
813
|
+
```
|
|
814
|
+
|
|
801
815
|
### Reconnect on Error
|
|
802
816
|
|
|
803
817
|
Besides auto-reconnect when the connection is closed, ioredis supports reconnecting on certain Redis errors using the `reconnectOnError` option. Here's an example that will reconnect when receiving `READONLY` error:
|
|
@@ -1213,7 +1227,7 @@ cluster.on("smessage", (channel, message) => {
|
|
|
1213
1227
|
console.log(message);
|
|
1214
1228
|
});
|
|
1215
1229
|
|
|
1216
|
-
|
|
1230
|
+
|
|
1217
1231
|
//Subscribe to the channels on the same slot
|
|
1218
1232
|
cluster.ssubscribe("channel{my}:1", "channel{my}:2").then( ( count: number ) => {
|
|
1219
1233
|
console.log(count);
|
package/built/Command.d.ts
CHANGED
|
@@ -32,6 +32,28 @@ export interface CommandNameFlags {
|
|
|
32
32
|
WILL_DISCONNECT: ["quit"];
|
|
33
33
|
HANDSHAKE_COMMANDS: ["auth", "select", "client", "readonly", "info"];
|
|
34
34
|
IGNORE_RECONNECT_ON_ERROR: ["client"];
|
|
35
|
+
BLOCKING_COMMANDS: [
|
|
36
|
+
"blpop",
|
|
37
|
+
"brpop",
|
|
38
|
+
"brpoplpush",
|
|
39
|
+
"blmove",
|
|
40
|
+
"bzpopmin",
|
|
41
|
+
"bzpopmax",
|
|
42
|
+
"bzmpop",
|
|
43
|
+
"blmpop",
|
|
44
|
+
"xread",
|
|
45
|
+
"xreadgroup"
|
|
46
|
+
];
|
|
47
|
+
LAST_ARG_TIMEOUT_COMMANDS: [
|
|
48
|
+
"blpop",
|
|
49
|
+
"brpop",
|
|
50
|
+
"brpoplpush",
|
|
51
|
+
"blmove",
|
|
52
|
+
"bzpopmin",
|
|
53
|
+
"bzpopmax"
|
|
54
|
+
];
|
|
55
|
+
FIRST_ARG_TIMEOUT_COMMANDS: ["bzmpop", "blmpop"];
|
|
56
|
+
BLOCK_OPTION_COMMANDS: ["xread", "xreadgroup"];
|
|
35
57
|
}
|
|
36
58
|
/**
|
|
37
59
|
* Command instance
|
|
@@ -82,6 +104,8 @@ export default class Command implements Respondable {
|
|
|
82
104
|
private callback;
|
|
83
105
|
private transformed;
|
|
84
106
|
private _commandTimeoutTimer?;
|
|
107
|
+
private _blockingTimeoutTimer?;
|
|
108
|
+
private _blockingDeadline?;
|
|
85
109
|
private slot?;
|
|
86
110
|
private keys?;
|
|
87
111
|
/**
|
|
@@ -110,6 +134,24 @@ export default class Command implements Respondable {
|
|
|
110
134
|
* and generating an error.
|
|
111
135
|
*/
|
|
112
136
|
setTimeout(ms: number): void;
|
|
137
|
+
/**
|
|
138
|
+
* Set a timeout for blocking commands.
|
|
139
|
+
* When the timeout expires, the command resolves with null (matching Redis behavior).
|
|
140
|
+
* This handles the case of undetectable network failures (e.g., docker network disconnect)
|
|
141
|
+
* where the TCP connection becomes a zombie and no close event fires.
|
|
142
|
+
*/
|
|
143
|
+
setBlockingTimeout(ms: number): void;
|
|
144
|
+
/**
|
|
145
|
+
* Extract the blocking timeout from the command arguments.
|
|
146
|
+
*
|
|
147
|
+
* @returns The timeout in seconds, null for indefinite blocking (timeout of 0),
|
|
148
|
+
* or undefined if this is not a blocking command
|
|
149
|
+
*/
|
|
150
|
+
extractBlockingTimeout(): number | null | undefined;
|
|
151
|
+
/**
|
|
152
|
+
* Clear the command and blocking timers
|
|
153
|
+
*/
|
|
154
|
+
private _clearTimers;
|
|
113
155
|
private initPromise;
|
|
114
156
|
/**
|
|
115
157
|
* Iterate through the command arguments that are considered keys.
|
package/built/Command.js
CHANGED
|
@@ -4,6 +4,7 @@ const commands_1 = require("@ioredis/commands");
|
|
|
4
4
|
const calculateSlot = require("cluster-key-slot");
|
|
5
5
|
const standard_as_callback_1 = require("standard-as-callback");
|
|
6
6
|
const utils_1 = require("./utils");
|
|
7
|
+
const argumentParsers_1 = require("./utils/argumentParsers");
|
|
7
8
|
/**
|
|
8
9
|
* Command instance
|
|
9
10
|
*
|
|
@@ -72,6 +73,7 @@ class Command {
|
|
|
72
73
|
* Check whether the command has the flag
|
|
73
74
|
*/
|
|
74
75
|
static checkFlag(flagName, commandName) {
|
|
76
|
+
commandName = commandName.toLowerCase();
|
|
75
77
|
return !!this.getFlagMap()[flagName][commandName];
|
|
76
78
|
}
|
|
77
79
|
static setArgumentTransformer(name, func) {
|
|
@@ -194,6 +196,81 @@ class Command {
|
|
|
194
196
|
}, ms);
|
|
195
197
|
}
|
|
196
198
|
}
|
|
199
|
+
/**
|
|
200
|
+
* Set a timeout for blocking commands.
|
|
201
|
+
* When the timeout expires, the command resolves with null (matching Redis behavior).
|
|
202
|
+
* This handles the case of undetectable network failures (e.g., docker network disconnect)
|
|
203
|
+
* where the TCP connection becomes a zombie and no close event fires.
|
|
204
|
+
*/
|
|
205
|
+
setBlockingTimeout(ms) {
|
|
206
|
+
if (ms <= 0) {
|
|
207
|
+
return;
|
|
208
|
+
}
|
|
209
|
+
// Clear existing timer if any (can happen when command moves from offline to command queue)
|
|
210
|
+
if (this._blockingTimeoutTimer) {
|
|
211
|
+
clearTimeout(this._blockingTimeoutTimer);
|
|
212
|
+
this._blockingTimeoutTimer = undefined;
|
|
213
|
+
}
|
|
214
|
+
const now = Date.now();
|
|
215
|
+
// First call: establish absolute deadline
|
|
216
|
+
if (this._blockingDeadline === undefined) {
|
|
217
|
+
this._blockingDeadline = now + ms;
|
|
218
|
+
}
|
|
219
|
+
// Check if we've already exceeded the deadline
|
|
220
|
+
const remaining = this._blockingDeadline - now;
|
|
221
|
+
if (remaining <= 0) {
|
|
222
|
+
// Resolve with null to indicate timeout (same as Redis behavior)
|
|
223
|
+
this.resolve(null);
|
|
224
|
+
return;
|
|
225
|
+
}
|
|
226
|
+
this._blockingTimeoutTimer = setTimeout(() => {
|
|
227
|
+
if (this.isResolved) {
|
|
228
|
+
this._blockingTimeoutTimer = undefined;
|
|
229
|
+
return;
|
|
230
|
+
}
|
|
231
|
+
this._blockingTimeoutTimer = undefined;
|
|
232
|
+
// Timeout expired - resolve with null (same as Redis behavior when blocking command times out)
|
|
233
|
+
this.resolve(null);
|
|
234
|
+
}, remaining);
|
|
235
|
+
}
|
|
236
|
+
/**
|
|
237
|
+
* Extract the blocking timeout from the command arguments.
|
|
238
|
+
*
|
|
239
|
+
* @returns The timeout in seconds, null for indefinite blocking (timeout of 0),
|
|
240
|
+
* or undefined if this is not a blocking command
|
|
241
|
+
*/
|
|
242
|
+
extractBlockingTimeout() {
|
|
243
|
+
const args = this.args;
|
|
244
|
+
if (!args || args.length === 0) {
|
|
245
|
+
return undefined;
|
|
246
|
+
}
|
|
247
|
+
const name = this.name.toLowerCase();
|
|
248
|
+
if (Command.checkFlag("LAST_ARG_TIMEOUT_COMMANDS", name)) {
|
|
249
|
+
return (0, argumentParsers_1.parseSecondsArgument)(args[args.length - 1]);
|
|
250
|
+
}
|
|
251
|
+
if (Command.checkFlag("FIRST_ARG_TIMEOUT_COMMANDS", name)) {
|
|
252
|
+
return (0, argumentParsers_1.parseSecondsArgument)(args[0]);
|
|
253
|
+
}
|
|
254
|
+
if (Command.checkFlag("BLOCK_OPTION_COMMANDS", name)) {
|
|
255
|
+
return (0, argumentParsers_1.parseBlockOption)(args);
|
|
256
|
+
}
|
|
257
|
+
return undefined;
|
|
258
|
+
}
|
|
259
|
+
/**
|
|
260
|
+
* Clear the command and blocking timers
|
|
261
|
+
*/
|
|
262
|
+
_clearTimers() {
|
|
263
|
+
const existingTimer = this._commandTimeoutTimer;
|
|
264
|
+
if (existingTimer) {
|
|
265
|
+
clearTimeout(existingTimer);
|
|
266
|
+
delete this._commandTimeoutTimer;
|
|
267
|
+
}
|
|
268
|
+
const blockingTimer = this._blockingTimeoutTimer;
|
|
269
|
+
if (blockingTimer) {
|
|
270
|
+
clearTimeout(blockingTimer);
|
|
271
|
+
delete this._blockingTimeoutTimer;
|
|
272
|
+
}
|
|
273
|
+
}
|
|
197
274
|
initPromise() {
|
|
198
275
|
const promise = new Promise((resolve, reject) => {
|
|
199
276
|
if (!this.transformed) {
|
|
@@ -205,14 +282,15 @@ class Command {
|
|
|
205
282
|
this.stringifyArguments();
|
|
206
283
|
}
|
|
207
284
|
this.resolve = this._convertValue(resolve);
|
|
208
|
-
|
|
209
|
-
this.
|
|
285
|
+
this.reject = (err) => {
|
|
286
|
+
this._clearTimers();
|
|
287
|
+
if (this.errorStack) {
|
|
210
288
|
reject((0, utils_1.optimizeErrorStack)(err, this.errorStack.stack, __dirname));
|
|
211
|
-
}
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
}
|
|
289
|
+
}
|
|
290
|
+
else {
|
|
291
|
+
reject(err);
|
|
292
|
+
}
|
|
293
|
+
};
|
|
216
294
|
});
|
|
217
295
|
this.promise = (0, standard_as_callback_1.default)(promise, this.callback);
|
|
218
296
|
}
|
|
@@ -222,9 +300,11 @@ class Command {
|
|
|
222
300
|
_iterateKeys(transform = (key) => key) {
|
|
223
301
|
if (typeof this.keys === "undefined") {
|
|
224
302
|
this.keys = [];
|
|
225
|
-
if ((0, commands_1.exists)(this.name)) {
|
|
303
|
+
if ((0, commands_1.exists)(this.name, { caseInsensitive: true })) {
|
|
226
304
|
// @ts-expect-error
|
|
227
|
-
const keyIndexes = (0, commands_1.getKeyIndexes)(this.name, this.args
|
|
305
|
+
const keyIndexes = (0, commands_1.getKeyIndexes)(this.name, this.args, {
|
|
306
|
+
nameCaseInsensitive: true,
|
|
307
|
+
});
|
|
228
308
|
for (const index of keyIndexes) {
|
|
229
309
|
this.args[index] = transform(this.args[index]);
|
|
230
310
|
this.keys.push(this.args[index]);
|
|
@@ -239,11 +319,7 @@ class Command {
|
|
|
239
319
|
_convertValue(resolve) {
|
|
240
320
|
return (value) => {
|
|
241
321
|
try {
|
|
242
|
-
|
|
243
|
-
if (existingTimer) {
|
|
244
|
-
clearTimeout(existingTimer);
|
|
245
|
-
delete this._commandTimeoutTimer;
|
|
246
|
-
}
|
|
322
|
+
this._clearTimers();
|
|
247
323
|
resolve(this.transformReply(value));
|
|
248
324
|
this.isResolved = true;
|
|
249
325
|
}
|
|
@@ -272,6 +348,28 @@ Command.FLAGS = {
|
|
|
272
348
|
WILL_DISCONNECT: ["quit"],
|
|
273
349
|
HANDSHAKE_COMMANDS: ["auth", "select", "client", "readonly", "info"],
|
|
274
350
|
IGNORE_RECONNECT_ON_ERROR: ["client"],
|
|
351
|
+
BLOCKING_COMMANDS: [
|
|
352
|
+
"blpop",
|
|
353
|
+
"brpop",
|
|
354
|
+
"brpoplpush",
|
|
355
|
+
"blmove",
|
|
356
|
+
"bzpopmin",
|
|
357
|
+
"bzpopmax",
|
|
358
|
+
"bzmpop",
|
|
359
|
+
"blmpop",
|
|
360
|
+
"xread",
|
|
361
|
+
"xreadgroup",
|
|
362
|
+
],
|
|
363
|
+
LAST_ARG_TIMEOUT_COMMANDS: [
|
|
364
|
+
"blpop",
|
|
365
|
+
"brpop",
|
|
366
|
+
"brpoplpush",
|
|
367
|
+
"blmove",
|
|
368
|
+
"bzpopmin",
|
|
369
|
+
"bzpopmax",
|
|
370
|
+
],
|
|
371
|
+
FIRST_ARG_TIMEOUT_COMMANDS: ["bzmpop", "blmpop"],
|
|
372
|
+
BLOCK_OPTION_COMMANDS: ["xread", "xreadgroup"],
|
|
275
373
|
};
|
|
276
374
|
Command._transformer = {
|
|
277
375
|
argument: {},
|
package/built/Pipeline.js
CHANGED
|
@@ -101,7 +101,8 @@ class Pipeline extends Commander_1.default {
|
|
|
101
101
|
}
|
|
102
102
|
}
|
|
103
103
|
else if (!command.inTransaction) {
|
|
104
|
-
const isReadOnly = (0, commands_1.exists)(command.name
|
|
104
|
+
const isReadOnly = (0, commands_1.exists)(command.name, { caseInsensitive: true }) &&
|
|
105
|
+
(0, commands_1.hasFlag)(command.name, "readonly", { nameCaseInsensitive: true });
|
|
105
106
|
if (!isReadOnly) {
|
|
106
107
|
retriable = false;
|
|
107
108
|
break;
|
package/built/Redis.d.ts
CHANGED
|
@@ -161,6 +161,8 @@ declare class Redis extends Commander implements DataHandledable {
|
|
|
161
161
|
* @ignore
|
|
162
162
|
*/
|
|
163
163
|
sendCommand(command: Command, stream?: WriteableStream): unknown;
|
|
164
|
+
private getBlockingTimeoutInMs;
|
|
165
|
+
private getConfiguredBlockingTimeout;
|
|
164
166
|
private setSocketTimeout;
|
|
165
167
|
scanStream(options?: ScanStreamOptions): ScanStream;
|
|
166
168
|
scanBufferStream(options?: ScanStreamOptions): ScanStream;
|
package/built/Redis.js
CHANGED
|
@@ -325,7 +325,7 @@ class Redis extends Commander_1.default {
|
|
|
325
325
|
* @ignore
|
|
326
326
|
*/
|
|
327
327
|
sendCommand(command, stream) {
|
|
328
|
-
var _a, _b;
|
|
328
|
+
var _a, _b, _c;
|
|
329
329
|
if (this.status === "wait") {
|
|
330
330
|
this.connect().catch(lodash_1.noop);
|
|
331
331
|
}
|
|
@@ -341,11 +341,12 @@ class Redis extends Commander_1.default {
|
|
|
341
341
|
if (typeof this.options.commandTimeout === "number") {
|
|
342
342
|
command.setTimeout(this.options.commandTimeout);
|
|
343
343
|
}
|
|
344
|
+
const blockingTimeout = this.getBlockingTimeoutInMs(command);
|
|
344
345
|
let writable = this.status === "ready" ||
|
|
345
346
|
(!stream &&
|
|
346
347
|
this.status === "connect" &&
|
|
347
|
-
(0, commands_1.exists)(command.name) &&
|
|
348
|
-
((0, commands_1.hasFlag)(command.name, "loading") ||
|
|
348
|
+
(0, commands_1.exists)(command.name, { caseInsensitive: true }) &&
|
|
349
|
+
((0, commands_1.hasFlag)(command.name, "loading", { nameCaseInsensitive: true }) ||
|
|
349
350
|
Command_1.default.checkFlag("HANDSHAKE_COMMANDS", command.name)));
|
|
350
351
|
if (!this.stream) {
|
|
351
352
|
writable = false;
|
|
@@ -378,11 +379,20 @@ class Redis extends Commander_1.default {
|
|
|
378
379
|
stream: stream,
|
|
379
380
|
select: this.condition.select,
|
|
380
381
|
});
|
|
382
|
+
// For blocking commands, set a timeout while queued to ensure they don't wait forever
|
|
383
|
+
// if connection never becomes ready (e.g., docker network disconnect scenario)
|
|
384
|
+
// Use blockingTimeout if configured, otherwise fall back to the command's own timeout
|
|
385
|
+
if (Command_1.default.checkFlag("BLOCKING_COMMANDS", command.name)) {
|
|
386
|
+
const offlineTimeout = (_b = this.getConfiguredBlockingTimeout()) !== null && _b !== void 0 ? _b : blockingTimeout;
|
|
387
|
+
if (offlineTimeout !== undefined) {
|
|
388
|
+
command.setBlockingTimeout(offlineTimeout);
|
|
389
|
+
}
|
|
390
|
+
}
|
|
381
391
|
}
|
|
382
392
|
else {
|
|
383
393
|
// @ts-expect-error
|
|
384
394
|
if (debug.enabled) {
|
|
385
|
-
debug("write command[%s]: %d -> %s(%o)", this._getDescription(), (
|
|
395
|
+
debug("write command[%s]: %d -> %s(%o)", this._getDescription(), (_c = this.condition) === null || _c === void 0 ? void 0 : _c.select, command.name, command.args);
|
|
386
396
|
}
|
|
387
397
|
if (stream) {
|
|
388
398
|
if ("isPipeline" in stream && stream.isPipeline) {
|
|
@@ -400,6 +410,9 @@ class Redis extends Commander_1.default {
|
|
|
400
410
|
stream: stream,
|
|
401
411
|
select: this.condition.select,
|
|
402
412
|
});
|
|
413
|
+
if (blockingTimeout !== undefined) {
|
|
414
|
+
command.setBlockingTimeout(blockingTimeout);
|
|
415
|
+
}
|
|
403
416
|
if (Command_1.default.checkFlag("WILL_DISCONNECT", command.name)) {
|
|
404
417
|
this.manuallyClosing = true;
|
|
405
418
|
}
|
|
@@ -417,6 +430,33 @@ class Redis extends Commander_1.default {
|
|
|
417
430
|
}
|
|
418
431
|
return command.promise;
|
|
419
432
|
}
|
|
433
|
+
getBlockingTimeoutInMs(command) {
|
|
434
|
+
var _a;
|
|
435
|
+
if (!Command_1.default.checkFlag("BLOCKING_COMMANDS", command.name)) {
|
|
436
|
+
return undefined;
|
|
437
|
+
}
|
|
438
|
+
const timeout = command.extractBlockingTimeout();
|
|
439
|
+
if (typeof timeout === "number") {
|
|
440
|
+
if (timeout > 0) {
|
|
441
|
+
// Finite timeout from command args - add grace period
|
|
442
|
+
return timeout + ((_a = this.options.blockingTimeoutGrace) !== null && _a !== void 0 ? _a : RedisOptions_1.DEFAULT_REDIS_OPTIONS.blockingTimeoutGrace);
|
|
443
|
+
}
|
|
444
|
+
// Command has timeout=0 (block forever), use blockingTimeout option as safety net
|
|
445
|
+
return this.getConfiguredBlockingTimeout();
|
|
446
|
+
}
|
|
447
|
+
if (timeout === null) {
|
|
448
|
+
// No BLOCK option found (e.g., XREAD without BLOCK), use blockingTimeout as safety net
|
|
449
|
+
return this.getConfiguredBlockingTimeout();
|
|
450
|
+
}
|
|
451
|
+
return undefined;
|
|
452
|
+
}
|
|
453
|
+
getConfiguredBlockingTimeout() {
|
|
454
|
+
if (typeof this.options.blockingTimeout === "number" &&
|
|
455
|
+
this.options.blockingTimeout > 0) {
|
|
456
|
+
return this.options.blockingTimeout;
|
|
457
|
+
}
|
|
458
|
+
return undefined;
|
|
459
|
+
}
|
|
420
460
|
setSocketTimeout() {
|
|
421
461
|
this.socketTimeoutTimer = setTimeout(() => {
|
|
422
462
|
this.stream.destroy(new Error(`Socket timeout. Expecting data, but didn't receive any in ${this.options.socketTimeout}ms.`));
|
|
@@ -1,37 +1,39 @@
|
|
|
1
1
|
/// <reference types="node" />
|
|
2
|
-
import
|
|
3
|
-
import
|
|
2
|
+
import * as EventEmitter from "events";
|
|
3
|
+
import ShardedSubscriber from "./ShardedSubscriber";
|
|
4
4
|
/**
|
|
5
|
-
* Redis
|
|
6
|
-
*
|
|
7
|
-
* messages between shards.
|
|
8
|
-
*
|
|
9
|
-
* Given that, we need at least one ClusterSubscriber per master endpoint/node.
|
|
5
|
+
* Redis distinguishes between "normal" and sharded PubSub. When using the normal PubSub feature,
|
|
6
|
+
* exactly one subscriber exists per cluster instance because the Redis cluster bus forwards
|
|
7
|
+
* messages between shards. Sharded PubSub removes this limitation by making each shard
|
|
8
|
+
* responsible for its own messages.
|
|
10
9
|
*
|
|
11
|
-
* This class
|
|
12
|
-
*
|
|
13
|
-
* to support this feature.
|
|
10
|
+
* This class coordinates one ShardedSubscriber per master node in the cluster, providing
|
|
11
|
+
* sharded PubSub support while keeping the public API backward compatible.
|
|
14
12
|
*/
|
|
15
13
|
export default class ClusterSubscriberGroup {
|
|
16
|
-
private
|
|
14
|
+
private readonly subscriberGroupEmitter;
|
|
17
15
|
private shardedSubscribers;
|
|
18
16
|
private clusterSlots;
|
|
19
17
|
private subscriberToSlotsIndex;
|
|
20
18
|
private channels;
|
|
19
|
+
private failedAttemptsByNode;
|
|
20
|
+
private isResetting;
|
|
21
|
+
private pendingReset;
|
|
22
|
+
private static readonly MAX_RETRY_ATTEMPTS;
|
|
23
|
+
private static readonly MAX_BACKOFF_MS;
|
|
24
|
+
private static readonly BASE_BACKOFF_MS;
|
|
21
25
|
/**
|
|
22
26
|
* Register callbacks
|
|
23
27
|
*
|
|
24
28
|
* @param cluster
|
|
25
29
|
*/
|
|
26
|
-
constructor(
|
|
30
|
+
constructor(subscriberGroupEmitter: EventEmitter);
|
|
27
31
|
/**
|
|
28
32
|
* Get the responsible subscriber.
|
|
29
33
|
*
|
|
30
|
-
* Returns null if no subscriber was found
|
|
31
|
-
*
|
|
32
34
|
* @param slot
|
|
33
35
|
*/
|
|
34
|
-
getResponsibleSubscriber(slot: number):
|
|
36
|
+
getResponsibleSubscriber(slot: number): ShardedSubscriber | undefined;
|
|
35
37
|
/**
|
|
36
38
|
* Adds a channel for which this subscriber group is responsible
|
|
37
39
|
*
|
|
@@ -50,24 +52,17 @@ export default class ClusterSubscriberGroup {
|
|
|
50
52
|
/**
|
|
51
53
|
* Start all not yet started subscribers
|
|
52
54
|
*/
|
|
53
|
-
start():
|
|
54
|
-
/**
|
|
55
|
-
* Add a subscriber to the group of subscribers
|
|
56
|
-
*
|
|
57
|
-
* @param redis
|
|
58
|
-
*/
|
|
59
|
-
private _addSubscriber;
|
|
55
|
+
start(): Promise<any[]>;
|
|
60
56
|
/**
|
|
61
|
-
*
|
|
62
|
-
* @param redis
|
|
57
|
+
* Resets the subscriber group by disconnecting all subscribers that are no longer needed and connecting new ones.
|
|
63
58
|
*/
|
|
64
|
-
|
|
59
|
+
reset(clusterSlots: string[][], clusterNodes: any[]): Promise<void>;
|
|
65
60
|
/**
|
|
66
61
|
* Refreshes the subscriber-related slot ranges
|
|
67
62
|
*
|
|
68
63
|
* Returns false if no refresh was needed
|
|
69
64
|
*
|
|
70
|
-
* @param
|
|
65
|
+
* @param targetSlots
|
|
71
66
|
*/
|
|
72
67
|
private _refreshSlots;
|
|
73
68
|
/**
|
|
@@ -83,4 +78,28 @@ export default class ClusterSubscriberGroup {
|
|
|
83
78
|
* @private
|
|
84
79
|
*/
|
|
85
80
|
private _slotsAreEqual;
|
|
81
|
+
/**
|
|
82
|
+
* Checks if any subscribers are in an unhealthy state.
|
|
83
|
+
*
|
|
84
|
+
* A subscriber is considered unhealthy if:
|
|
85
|
+
* - It exists but is not started (failed/disconnected)
|
|
86
|
+
* - It's missing entirely for a node that should have one
|
|
87
|
+
*
|
|
88
|
+
* @returns true if any subscribers need to be recreated
|
|
89
|
+
*/
|
|
90
|
+
private hasUnhealthySubscribers;
|
|
91
|
+
/**
|
|
92
|
+
* Handles failed subscriber connections by emitting an event to refresh the slots cache
|
|
93
|
+
* after a backoff period.
|
|
94
|
+
*
|
|
95
|
+
* @param error
|
|
96
|
+
* @param nodeKey
|
|
97
|
+
*/
|
|
98
|
+
private handleSubscriberConnectFailed;
|
|
99
|
+
/**
|
|
100
|
+
* Handles successful subscriber connections by resetting the failed attempts counter.
|
|
101
|
+
*
|
|
102
|
+
* @param nodeKey
|
|
103
|
+
*/
|
|
104
|
+
private handleSubscriberConnectSucceeded;
|
|
86
105
|
}
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
const utils_1 = require("../utils");
|
|
4
|
-
const ClusterSubscriber_1 = require("./ClusterSubscriber");
|
|
5
|
-
const ConnectionPool_1 = require("./ConnectionPool");
|
|
6
4
|
const util_1 = require("./util");
|
|
7
5
|
const calculateSlot = require("cluster-key-slot");
|
|
6
|
+
const ShardedSubscriber_1 = require("./ShardedSubscriber");
|
|
8
7
|
const debug = (0, utils_1.Debug)("cluster:subscriberGroup");
|
|
9
8
|
/**
|
|
10
|
-
* Redis
|
|
11
|
-
*
|
|
12
|
-
* messages between shards.
|
|
13
|
-
*
|
|
14
|
-
* Given that, we need at least one ClusterSubscriber per master endpoint/node.
|
|
9
|
+
* Redis distinguishes between "normal" and sharded PubSub. When using the normal PubSub feature,
|
|
10
|
+
* exactly one subscriber exists per cluster instance because the Redis cluster bus forwards
|
|
11
|
+
* messages between shards. Sharded PubSub removes this limitation by making each shard
|
|
12
|
+
* responsible for its own messages.
|
|
15
13
|
*
|
|
16
|
-
* This class
|
|
17
|
-
*
|
|
18
|
-
* to support this feature.
|
|
14
|
+
* This class coordinates one ShardedSubscriber per master node in the cluster, providing
|
|
15
|
+
* sharded PubSub support while keeping the public API backward compatible.
|
|
19
16
|
*/
|
|
20
17
|
class ClusterSubscriberGroup {
|
|
21
18
|
/**
|
|
@@ -23,31 +20,50 @@ class ClusterSubscriberGroup {
|
|
|
23
20
|
*
|
|
24
21
|
* @param cluster
|
|
25
22
|
*/
|
|
26
|
-
constructor(
|
|
27
|
-
this.
|
|
23
|
+
constructor(subscriberGroupEmitter) {
|
|
24
|
+
this.subscriberGroupEmitter = subscriberGroupEmitter;
|
|
28
25
|
this.shardedSubscribers = new Map();
|
|
29
26
|
this.clusterSlots = [];
|
|
30
|
-
//Simple [min, max] slot ranges aren't enough because you can migrate single slots
|
|
27
|
+
// Simple [min, max] slot ranges aren't enough because you can migrate single slots
|
|
31
28
|
this.subscriberToSlotsIndex = new Map();
|
|
32
29
|
this.channels = new Map();
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
30
|
+
this.failedAttemptsByNode = new Map();
|
|
31
|
+
// Only latest pending reset kept; throttled by refreshSlotsCache's isRefreshing + backoff delay
|
|
32
|
+
this.isResetting = false;
|
|
33
|
+
this.pendingReset = null;
|
|
34
|
+
/**
|
|
35
|
+
* Handles failed subscriber connections by emitting an event to refresh the slots cache
|
|
36
|
+
* after a backoff period.
|
|
37
|
+
*
|
|
38
|
+
* @param error
|
|
39
|
+
* @param nodeKey
|
|
40
|
+
*/
|
|
41
|
+
this.handleSubscriberConnectFailed = (error, nodeKey) => {
|
|
42
|
+
const currentAttempts = this.failedAttemptsByNode.get(nodeKey) || 0;
|
|
43
|
+
const failedAttempts = currentAttempts + 1;
|
|
44
|
+
this.failedAttemptsByNode.set(nodeKey, failedAttempts);
|
|
45
|
+
const attempts = Math.min(failedAttempts, ClusterSubscriberGroup.MAX_RETRY_ATTEMPTS);
|
|
46
|
+
const backoff = Math.min(ClusterSubscriberGroup.BASE_BACKOFF_MS * 2 ** attempts, ClusterSubscriberGroup.MAX_BACKOFF_MS);
|
|
47
|
+
const jitter = Math.floor((Math.random() - 0.5) * (backoff * 0.5));
|
|
48
|
+
const delay = Math.max(0, backoff + jitter);
|
|
49
|
+
debug("Failed to connect subscriber for %s. Refreshing slots in %dms", nodeKey, delay);
|
|
50
|
+
this.subscriberGroupEmitter.emit("subscriberConnectFailed", {
|
|
51
|
+
delay,
|
|
52
|
+
error,
|
|
53
|
+
});
|
|
54
|
+
};
|
|
55
|
+
/**
|
|
56
|
+
* Handles successful subscriber connections by resetting the failed attempts counter.
|
|
57
|
+
*
|
|
58
|
+
* @param nodeKey
|
|
59
|
+
*/
|
|
60
|
+
this.handleSubscriberConnectSucceeded = (nodeKey) => {
|
|
61
|
+
this.failedAttemptsByNode.delete(nodeKey);
|
|
62
|
+
};
|
|
45
63
|
}
|
|
46
64
|
/**
|
|
47
65
|
* Get the responsible subscriber.
|
|
48
66
|
*
|
|
49
|
-
* Returns null if no subscriber was found
|
|
50
|
-
*
|
|
51
67
|
* @param slot
|
|
52
68
|
*/
|
|
53
69
|
getResponsibleSubscriber(slot) {
|
|
@@ -61,11 +77,12 @@ class ClusterSubscriberGroup {
|
|
|
61
77
|
*/
|
|
62
78
|
addChannels(channels) {
|
|
63
79
|
const slot = calculateSlot(channels[0]);
|
|
64
|
-
//Check if the all channels belong to the same slot and otherwise reject the operation
|
|
65
|
-
|
|
66
|
-
if (calculateSlot(c)
|
|
80
|
+
// Check if the all channels belong to the same slot and otherwise reject the operation
|
|
81
|
+
for (const c of channels) {
|
|
82
|
+
if (calculateSlot(c) !== slot) {
|
|
67
83
|
return -1;
|
|
68
|
-
|
|
84
|
+
}
|
|
85
|
+
}
|
|
69
86
|
const currChannels = this.channels.get(slot);
|
|
70
87
|
if (!currChannels) {
|
|
71
88
|
this.channels.set(slot, channels);
|
|
@@ -73,7 +90,7 @@ class ClusterSubscriberGroup {
|
|
|
73
90
|
else {
|
|
74
91
|
this.channels.set(slot, currChannels.concat(channels));
|
|
75
92
|
}
|
|
76
|
-
return
|
|
93
|
+
return Array.from(this.channels.values()).reduce((sum, array) => sum + array.length, 0);
|
|
77
94
|
}
|
|
78
95
|
/**
|
|
79
96
|
* Removes channels for which the subscriber group is responsible by optionally unsubscribing
|
|
@@ -81,17 +98,18 @@ class ClusterSubscriberGroup {
|
|
|
81
98
|
*/
|
|
82
99
|
removeChannels(channels) {
|
|
83
100
|
const slot = calculateSlot(channels[0]);
|
|
84
|
-
//Check if the all channels belong to the same slot and otherwise reject the operation
|
|
85
|
-
|
|
86
|
-
if (calculateSlot(c)
|
|
101
|
+
// Check if the all channels belong to the same slot and otherwise reject the operation
|
|
102
|
+
for (const c of channels) {
|
|
103
|
+
if (calculateSlot(c) !== slot) {
|
|
87
104
|
return -1;
|
|
88
|
-
|
|
105
|
+
}
|
|
106
|
+
}
|
|
89
107
|
const slotChannels = this.channels.get(slot);
|
|
90
108
|
if (slotChannels) {
|
|
91
|
-
const updatedChannels = slotChannels.filter(c => !channels.includes(c));
|
|
109
|
+
const updatedChannels = slotChannels.filter((c) => !channels.includes(c));
|
|
92
110
|
this.channels.set(slot, updatedChannels);
|
|
93
111
|
}
|
|
94
|
-
return
|
|
112
|
+
return Array.from(this.channels.values()).reduce((sum, array) => sum + array.length, 0);
|
|
95
113
|
}
|
|
96
114
|
/**
|
|
97
115
|
* Disconnect all subscribers
|
|
@@ -105,79 +123,123 @@ class ClusterSubscriberGroup {
|
|
|
105
123
|
* Start all not yet started subscribers
|
|
106
124
|
*/
|
|
107
125
|
start() {
|
|
126
|
+
const startPromises = [];
|
|
108
127
|
for (const s of this.shardedSubscribers.values()) {
|
|
109
128
|
if (!s.isStarted()) {
|
|
110
|
-
|
|
129
|
+
startPromises.push(s
|
|
130
|
+
.start()
|
|
131
|
+
.then(() => {
|
|
132
|
+
this.handleSubscriberConnectSucceeded(s.getNodeKey());
|
|
133
|
+
})
|
|
134
|
+
.catch((err) => {
|
|
135
|
+
this.handleSubscriberConnectFailed(err, s.getNodeKey());
|
|
136
|
+
}));
|
|
111
137
|
}
|
|
112
138
|
}
|
|
139
|
+
return Promise.all(startPromises);
|
|
113
140
|
}
|
|
114
141
|
/**
|
|
115
|
-
*
|
|
116
|
-
*
|
|
117
|
-
* @param redis
|
|
142
|
+
* Resets the subscriber group by disconnecting all subscribers that are no longer needed and connecting new ones.
|
|
118
143
|
*/
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
const nodeKey = (0, util_1.getNodeKey)(redis.options);
|
|
124
|
-
this.shardedSubscribers.set(nodeKey, sub);
|
|
125
|
-
sub.start();
|
|
126
|
-
// We need to attempt to resubscribe them in case the new node serves their slot
|
|
127
|
-
this._resubscribe();
|
|
128
|
-
this.cluster.emit("+subscriber");
|
|
129
|
-
return sub;
|
|
144
|
+
async reset(clusterSlots, clusterNodes) {
|
|
145
|
+
if (this.isResetting) {
|
|
146
|
+
this.pendingReset = { slots: clusterSlots, nodes: clusterNodes };
|
|
147
|
+
return;
|
|
130
148
|
}
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
149
|
+
this.isResetting = true;
|
|
150
|
+
try {
|
|
151
|
+
const hasTopologyChanged = this._refreshSlots(clusterSlots);
|
|
152
|
+
const hasFailedSubscribers = this.hasUnhealthySubscribers();
|
|
153
|
+
if (!hasTopologyChanged && !hasFailedSubscribers) {
|
|
154
|
+
debug("No topology change detected or failed subscribers. Skipping reset.");
|
|
155
|
+
return;
|
|
156
|
+
}
|
|
157
|
+
// For each of the sharded subscribers
|
|
158
|
+
for (const [nodeKey, shardedSubscriber] of this.shardedSubscribers) {
|
|
159
|
+
if (
|
|
160
|
+
// If the subscriber is still responsible for a slot range and is running then keep it
|
|
161
|
+
this.subscriberToSlotsIndex.has(nodeKey) &&
|
|
162
|
+
shardedSubscriber.isStarted()) {
|
|
163
|
+
debug("Skipping deleting subscriber for %s", nodeKey);
|
|
164
|
+
continue;
|
|
165
|
+
}
|
|
166
|
+
debug("Removing subscriber for %s", nodeKey);
|
|
167
|
+
// Otherwise stop the subscriber and remove it
|
|
168
|
+
shardedSubscriber.stop();
|
|
169
|
+
this.shardedSubscribers.delete(nodeKey);
|
|
170
|
+
this.subscriberGroupEmitter.emit("-subscriber");
|
|
171
|
+
}
|
|
172
|
+
const startPromises = [];
|
|
173
|
+
// For each node in slots cache
|
|
174
|
+
for (const [nodeKey, _] of this.subscriberToSlotsIndex) {
|
|
175
|
+
// If we already have a subscriber for this node then keep it
|
|
176
|
+
if (this.shardedSubscribers.has(nodeKey)) {
|
|
177
|
+
debug("Skipping creating new subscriber for %s", nodeKey);
|
|
178
|
+
continue;
|
|
179
|
+
}
|
|
180
|
+
debug("Creating new subscriber for %s", nodeKey);
|
|
181
|
+
// Otherwise create a new subscriber
|
|
182
|
+
const redis = clusterNodes.find((node) => {
|
|
183
|
+
return (0, util_1.getNodeKey)(node.options) === nodeKey;
|
|
184
|
+
});
|
|
185
|
+
if (!redis) {
|
|
186
|
+
debug("Failed to find node for key %s", nodeKey);
|
|
187
|
+
continue;
|
|
188
|
+
}
|
|
189
|
+
const sub = new ShardedSubscriber_1.default(this.subscriberGroupEmitter, redis.options);
|
|
190
|
+
this.shardedSubscribers.set(nodeKey, sub);
|
|
191
|
+
startPromises.push(sub
|
|
192
|
+
.start()
|
|
193
|
+
.then(() => {
|
|
194
|
+
this.handleSubscriberConnectSucceeded(nodeKey);
|
|
195
|
+
})
|
|
196
|
+
.catch((error) => {
|
|
197
|
+
this.handleSubscriberConnectFailed(error, nodeKey);
|
|
198
|
+
}));
|
|
199
|
+
this.subscriberGroupEmitter.emit("+subscriber");
|
|
200
|
+
}
|
|
201
|
+
// It's vital to await the start promises before resubscribing
|
|
202
|
+
// Otherwise we might try to resubscribe to a subscriber that is not yet connected
|
|
203
|
+
// This can cause a race condition
|
|
204
|
+
await Promise.all(startPromises);
|
|
145
205
|
this._resubscribe();
|
|
146
|
-
this.
|
|
206
|
+
this.subscriberGroupEmitter.emit("subscribersReady");
|
|
207
|
+
}
|
|
208
|
+
finally {
|
|
209
|
+
this.isResetting = false;
|
|
210
|
+
if (this.pendingReset) {
|
|
211
|
+
const { slots, nodes } = this.pendingReset;
|
|
212
|
+
this.pendingReset = null;
|
|
213
|
+
await this.reset(slots, nodes);
|
|
214
|
+
}
|
|
147
215
|
}
|
|
148
|
-
return this.shardedSubscribers;
|
|
149
216
|
}
|
|
150
217
|
/**
|
|
151
218
|
* Refreshes the subscriber-related slot ranges
|
|
152
219
|
*
|
|
153
220
|
* Returns false if no refresh was needed
|
|
154
221
|
*
|
|
155
|
-
* @param
|
|
222
|
+
* @param targetSlots
|
|
156
223
|
*/
|
|
157
|
-
_refreshSlots(
|
|
224
|
+
_refreshSlots(targetSlots) {
|
|
158
225
|
//If there was an actual change, then reassign the slot ranges
|
|
159
|
-
if (this._slotsAreEqual(
|
|
226
|
+
if (this._slotsAreEqual(targetSlots)) {
|
|
160
227
|
debug("Nothing to refresh because the new cluster map is equal to the previous one.");
|
|
228
|
+
return false;
|
|
161
229
|
}
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
this.subscriberToSlotsIndex.set(node, []);
|
|
170
|
-
}
|
|
171
|
-
this.subscriberToSlotsIndex.get(node).push(Number(slot));
|
|
230
|
+
debug("Refreshing the slots of the subscriber group.");
|
|
231
|
+
//Rebuild the slots index
|
|
232
|
+
this.subscriberToSlotsIndex = new Map();
|
|
233
|
+
for (let slot = 0; slot < targetSlots.length; slot++) {
|
|
234
|
+
const node = targetSlots[slot][0];
|
|
235
|
+
if (!this.subscriberToSlotsIndex.has(node)) {
|
|
236
|
+
this.subscriberToSlotsIndex.set(node, []);
|
|
172
237
|
}
|
|
173
|
-
|
|
174
|
-
this._resubscribe();
|
|
175
|
-
//Update the cached slots map
|
|
176
|
-
this.clusterSlots = JSON.parse(JSON.stringify(cluster.slots));
|
|
177
|
-
this.cluster.emit("subscribersReady");
|
|
178
|
-
return true;
|
|
238
|
+
this.subscriberToSlotsIndex.get(node).push(Number(slot));
|
|
179
239
|
}
|
|
180
|
-
|
|
240
|
+
//Update the cached slots map
|
|
241
|
+
this.clusterSlots = JSON.parse(JSON.stringify(targetSlots));
|
|
242
|
+
return true;
|
|
181
243
|
}
|
|
182
244
|
/**
|
|
183
245
|
* Resubscribes to the previous channels
|
|
@@ -189,20 +251,27 @@ class ClusterSubscriberGroup {
|
|
|
189
251
|
this.shardedSubscribers.forEach((s, nodeKey) => {
|
|
190
252
|
const subscriberSlots = this.subscriberToSlotsIndex.get(nodeKey);
|
|
191
253
|
if (subscriberSlots) {
|
|
192
|
-
//More for debugging purposes
|
|
193
|
-
s.associateSlotRange(subscriberSlots);
|
|
194
254
|
//Resubscribe on the underlying connection
|
|
195
255
|
subscriberSlots.forEach((ss) => {
|
|
196
256
|
//Might return null if being disconnected
|
|
197
257
|
const redis = s.getInstance();
|
|
198
258
|
const channels = this.channels.get(ss);
|
|
199
259
|
if (channels && channels.length > 0) {
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
redis.
|
|
205
|
-
|
|
260
|
+
if (redis.status === "end") {
|
|
261
|
+
return;
|
|
262
|
+
}
|
|
263
|
+
if (redis.status === "ready") {
|
|
264
|
+
redis.ssubscribe(...channels).catch((err) => {
|
|
265
|
+
// TODO: Should we emit an error event here?
|
|
266
|
+
debug("Failed to ssubscribe on node %s: %s", nodeKey, err);
|
|
267
|
+
});
|
|
268
|
+
}
|
|
269
|
+
else {
|
|
270
|
+
redis.once("ready", () => {
|
|
271
|
+
redis.ssubscribe(...channels).catch((err) => {
|
|
272
|
+
// TODO: Should we emit an error event here?
|
|
273
|
+
debug("Failed to ssubscribe on node %s: %s", nodeKey, err);
|
|
274
|
+
});
|
|
206
275
|
});
|
|
207
276
|
}
|
|
208
277
|
}
|
|
@@ -218,10 +287,30 @@ class ClusterSubscriberGroup {
|
|
|
218
287
|
* @private
|
|
219
288
|
*/
|
|
220
289
|
_slotsAreEqual(other) {
|
|
221
|
-
if (this.clusterSlots === undefined)
|
|
290
|
+
if (this.clusterSlots === undefined) {
|
|
222
291
|
return false;
|
|
223
|
-
|
|
292
|
+
}
|
|
293
|
+
else {
|
|
224
294
|
return JSON.stringify(this.clusterSlots) === JSON.stringify(other);
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
/**
|
|
298
|
+
* Checks if any subscribers are in an unhealthy state.
|
|
299
|
+
*
|
|
300
|
+
* A subscriber is considered unhealthy if:
|
|
301
|
+
* - It exists but is not started (failed/disconnected)
|
|
302
|
+
* - It's missing entirely for a node that should have one
|
|
303
|
+
*
|
|
304
|
+
* @returns true if any subscribers need to be recreated
|
|
305
|
+
*/
|
|
306
|
+
hasUnhealthySubscribers() {
|
|
307
|
+
const hasFailedSubscribers = Array.from(this.shardedSubscribers.values()).some((sub) => !sub.isStarted());
|
|
308
|
+
const hasMissingSubscribers = Array.from(this.subscriberToSlotsIndex.keys()).some((nodeKey) => !this.shardedSubscribers.has(nodeKey));
|
|
309
|
+
return hasFailedSubscribers || hasMissingSubscribers;
|
|
225
310
|
}
|
|
226
311
|
}
|
|
227
312
|
exports.default = ClusterSubscriberGroup;
|
|
313
|
+
// Retry strategy
|
|
314
|
+
ClusterSubscriberGroup.MAX_RETRY_ATTEMPTS = 10;
|
|
315
|
+
ClusterSubscriberGroup.MAX_BACKOFF_MS = 2000;
|
|
316
|
+
ClusterSubscriberGroup.BASE_BACKOFF_MS = 100;
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
/// <reference types="node" />
|
|
2
|
+
import EventEmitter = require("events");
|
|
3
|
+
import { RedisOptions } from "./util";
|
|
4
|
+
import Redis from "../Redis";
|
|
5
|
+
export default class ShardedSubscriber {
|
|
6
|
+
private readonly emitter;
|
|
7
|
+
private readonly nodeKey;
|
|
8
|
+
private started;
|
|
9
|
+
private instance;
|
|
10
|
+
private readonly messageListeners;
|
|
11
|
+
constructor(emitter: EventEmitter, options: RedisOptions);
|
|
12
|
+
private onEnd;
|
|
13
|
+
private onError;
|
|
14
|
+
private onMoved;
|
|
15
|
+
start(): Promise<void>;
|
|
16
|
+
stop(): void;
|
|
17
|
+
isStarted(): boolean;
|
|
18
|
+
getInstance(): Redis | null;
|
|
19
|
+
getNodeKey(): string;
|
|
20
|
+
}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const util_1 = require("./util");
|
|
4
|
+
const utils_1 = require("../utils");
|
|
5
|
+
const Redis_1 = require("../Redis");
|
|
6
|
+
const debug = (0, utils_1.Debug)("cluster:subscriberGroup:shardedSubscriber");
|
|
7
|
+
class ShardedSubscriber {
|
|
8
|
+
constructor(emitter, options) {
|
|
9
|
+
this.emitter = emitter;
|
|
10
|
+
this.started = false;
|
|
11
|
+
this.instance = null;
|
|
12
|
+
// Store listener references for cleanup
|
|
13
|
+
this.messageListeners = new Map();
|
|
14
|
+
this.onEnd = () => {
|
|
15
|
+
this.started = false;
|
|
16
|
+
this.emitter.emit("-node", this.instance, this.nodeKey);
|
|
17
|
+
};
|
|
18
|
+
this.onError = (error) => {
|
|
19
|
+
this.emitter.emit("nodeError", error, this.nodeKey);
|
|
20
|
+
};
|
|
21
|
+
this.onMoved = () => {
|
|
22
|
+
this.emitter.emit("moved");
|
|
23
|
+
};
|
|
24
|
+
this.instance = new Redis_1.default({
|
|
25
|
+
port: options.port,
|
|
26
|
+
host: options.host,
|
|
27
|
+
username: options.username,
|
|
28
|
+
password: options.password,
|
|
29
|
+
enableReadyCheck: false,
|
|
30
|
+
offlineQueue: true,
|
|
31
|
+
connectionName: (0, util_1.getConnectionName)("ssubscriber", options.connectionName),
|
|
32
|
+
lazyConnect: true,
|
|
33
|
+
tls: options.tls,
|
|
34
|
+
/**
|
|
35
|
+
* Disable auto reconnection for subscribers.
|
|
36
|
+
* The ClusterSubscriberGroup will handle the reconnection.
|
|
37
|
+
*/
|
|
38
|
+
retryStrategy: null,
|
|
39
|
+
});
|
|
40
|
+
this.nodeKey = (0, util_1.getNodeKey)(options);
|
|
41
|
+
// Register listeners
|
|
42
|
+
this.instance.once("end", this.onEnd);
|
|
43
|
+
this.instance.on("error", this.onError);
|
|
44
|
+
this.instance.on("moved", this.onMoved);
|
|
45
|
+
for (const event of ["smessage", "smessageBuffer"]) {
|
|
46
|
+
const listener = (...args) => {
|
|
47
|
+
this.emitter.emit(event, ...args);
|
|
48
|
+
};
|
|
49
|
+
this.messageListeners.set(event, listener);
|
|
50
|
+
this.instance.on(event, listener);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
async start() {
|
|
54
|
+
if (this.started) {
|
|
55
|
+
debug("already started %s", this.nodeKey);
|
|
56
|
+
return;
|
|
57
|
+
}
|
|
58
|
+
try {
|
|
59
|
+
await this.instance.connect();
|
|
60
|
+
debug("started %s", this.nodeKey);
|
|
61
|
+
this.started = true;
|
|
62
|
+
}
|
|
63
|
+
catch (err) {
|
|
64
|
+
debug("failed to start %s: %s", this.nodeKey, err);
|
|
65
|
+
this.started = false;
|
|
66
|
+
throw err; // Re-throw so caller knows it failed
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
stop() {
|
|
70
|
+
this.started = false;
|
|
71
|
+
if (this.instance) {
|
|
72
|
+
this.instance.disconnect();
|
|
73
|
+
this.instance.removeAllListeners();
|
|
74
|
+
this.messageListeners.clear();
|
|
75
|
+
this.instance = null;
|
|
76
|
+
}
|
|
77
|
+
debug("stopped %s", this.nodeKey);
|
|
78
|
+
}
|
|
79
|
+
isStarted() {
|
|
80
|
+
return this.started;
|
|
81
|
+
}
|
|
82
|
+
getInstance() {
|
|
83
|
+
return this.instance;
|
|
84
|
+
}
|
|
85
|
+
getNodeKey() {
|
|
86
|
+
return this.nodeKey;
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
exports.default = ShardedSubscriber;
|
package/built/cluster/index.d.ts
CHANGED
|
@@ -49,6 +49,7 @@ declare class Cluster extends Commander {
|
|
|
49
49
|
private _autoPipelines;
|
|
50
50
|
private _runningAutoPipelines;
|
|
51
51
|
private _readyDelayedCallbacks;
|
|
52
|
+
private subscriberGroupEmitter;
|
|
52
53
|
/**
|
|
53
54
|
* Every time Cluster#connect() is called, this value will be
|
|
54
55
|
* auto-incrementing. The purpose of this value is used for
|
|
@@ -153,6 +154,7 @@ declare class Cluster extends Commander {
|
|
|
153
154
|
*/
|
|
154
155
|
private resolveStartupNodeHostnames;
|
|
155
156
|
private createScanStream;
|
|
157
|
+
private createShardedSubscriberGroup;
|
|
156
158
|
}
|
|
157
159
|
interface Cluster extends EventEmitter {
|
|
158
160
|
}
|
package/built/cluster/index.js
CHANGED
|
@@ -62,8 +62,9 @@ class Cluster extends Commander_1.default {
|
|
|
62
62
|
events_1.EventEmitter.call(this);
|
|
63
63
|
this.startupNodes = startupNodes;
|
|
64
64
|
this.options = (0, utils_1.defaults)({}, options, ClusterOptions_1.DEFAULT_CLUSTER_OPTIONS, this.options);
|
|
65
|
-
if (this.options.shardedSubscribers
|
|
66
|
-
this.
|
|
65
|
+
if (this.options.shardedSubscribers) {
|
|
66
|
+
this.createShardedSubscriberGroup();
|
|
67
|
+
}
|
|
67
68
|
if (this.options.redisOptions &&
|
|
68
69
|
this.options.redisOptions.keyPrefix &&
|
|
69
70
|
!this.options.keyPrefix) {
|
|
@@ -130,6 +131,14 @@ class Cluster extends Commander_1.default {
|
|
|
130
131
|
return;
|
|
131
132
|
}
|
|
132
133
|
this.connectionPool.reset(nodes);
|
|
134
|
+
if (this.options.shardedSubscribers) {
|
|
135
|
+
this.shardedSubscribers
|
|
136
|
+
.reset(this.slots, this.connectionPool.getNodes("all"))
|
|
137
|
+
.catch((err) => {
|
|
138
|
+
// TODO should we emit an error event here?
|
|
139
|
+
debug("Error while starting subscribers: %s", err);
|
|
140
|
+
});
|
|
141
|
+
}
|
|
133
142
|
const readyHandler = () => {
|
|
134
143
|
this.setStatus("ready");
|
|
135
144
|
this.retryAttempts = 0;
|
|
@@ -177,7 +186,10 @@ class Cluster extends Commander_1.default {
|
|
|
177
186
|
});
|
|
178
187
|
this.subscriber.start();
|
|
179
188
|
if (this.options.shardedSubscribers) {
|
|
180
|
-
this.shardedSubscribers.start()
|
|
189
|
+
this.shardedSubscribers.start().catch((err) => {
|
|
190
|
+
// TODO should we emit an error event here?
|
|
191
|
+
debug("Error while starting subscribers: %s", err);
|
|
192
|
+
});
|
|
181
193
|
}
|
|
182
194
|
})
|
|
183
195
|
.catch((err) => {
|
|
@@ -422,19 +434,25 @@ class Cluster extends Commander_1.default {
|
|
|
422
434
|
}
|
|
423
435
|
else if (Command_1.default.checkFlag("ENTER_SUBSCRIBER_MODE", command.name) ||
|
|
424
436
|
Command_1.default.checkFlag("EXIT_SUBSCRIBER_MODE", command.name)) {
|
|
425
|
-
if (_this.options.shardedSubscribers
|
|
437
|
+
if (_this.options.shardedSubscribers &&
|
|
426
438
|
(command.name == "ssubscribe" || command.name == "sunsubscribe")) {
|
|
427
439
|
const sub = _this.shardedSubscribers.getResponsibleSubscriber(targetSlot);
|
|
440
|
+
if (!sub) {
|
|
441
|
+
command.reject(new redis_errors_1.AbortError(`No sharded subscriber for slot: ${targetSlot}`));
|
|
442
|
+
return;
|
|
443
|
+
}
|
|
428
444
|
let status = -1;
|
|
429
|
-
if (command.name == "ssubscribe")
|
|
445
|
+
if (command.name == "ssubscribe") {
|
|
430
446
|
status = _this.shardedSubscribers.addChannels(command.getKeys());
|
|
431
|
-
|
|
447
|
+
}
|
|
448
|
+
if (command.name == "sunsubscribe") {
|
|
432
449
|
status = _this.shardedSubscribers.removeChannels(command.getKeys());
|
|
450
|
+
}
|
|
433
451
|
if (status !== -1) {
|
|
434
452
|
redis = sub.getInstance();
|
|
435
453
|
}
|
|
436
454
|
else {
|
|
437
|
-
command.reject(new redis_errors_1.AbortError("
|
|
455
|
+
command.reject(new redis_errors_1.AbortError("Possible CROSSSLOT error: All channels must hash to the same slot"));
|
|
438
456
|
}
|
|
439
457
|
}
|
|
440
458
|
else {
|
|
@@ -614,6 +632,7 @@ class Cluster extends Commander_1.default {
|
|
|
614
632
|
* Called when closed to check whether a reconnection should be made
|
|
615
633
|
*/
|
|
616
634
|
handleCloseEvent(reason) {
|
|
635
|
+
var _a;
|
|
617
636
|
if (reason) {
|
|
618
637
|
debug("closed because %s", reason);
|
|
619
638
|
}
|
|
@@ -633,6 +652,9 @@ class Cluster extends Commander_1.default {
|
|
|
633
652
|
}, retryDelay);
|
|
634
653
|
}
|
|
635
654
|
else {
|
|
655
|
+
if (this.options.shardedSubscribers) {
|
|
656
|
+
(_a = this.subscriberGroupEmitter) === null || _a === void 0 ? void 0 : _a.removeAllListeners();
|
|
657
|
+
}
|
|
636
658
|
this.setStatus("end");
|
|
637
659
|
this.flushQueue(new Error("None of startup nodes is available"));
|
|
638
660
|
}
|
|
@@ -744,6 +766,14 @@ class Cluster extends Commander_1.default {
|
|
|
744
766
|
this._groupsBySlot[i] = this._groupsIds[target];
|
|
745
767
|
}
|
|
746
768
|
this.connectionPool.reset(nodes);
|
|
769
|
+
if (this.options.shardedSubscribers) {
|
|
770
|
+
this.shardedSubscribers
|
|
771
|
+
.reset(this.slots, this.connectionPool.getNodes("all"))
|
|
772
|
+
.catch((err) => {
|
|
773
|
+
// TODO should we emit an error event here?
|
|
774
|
+
debug("Error while starting subscribers: %s", err);
|
|
775
|
+
});
|
|
776
|
+
}
|
|
747
777
|
callback();
|
|
748
778
|
}, this.options.slotsRefreshTimeout));
|
|
749
779
|
}
|
|
@@ -857,6 +887,40 @@ class Cluster extends Commander_1.default {
|
|
|
857
887
|
...options,
|
|
858
888
|
});
|
|
859
889
|
}
|
|
890
|
+
createShardedSubscriberGroup() {
|
|
891
|
+
this.subscriberGroupEmitter = new events_1.EventEmitter();
|
|
892
|
+
this.shardedSubscribers = new ClusterSubscriberGroup_1.default(this.subscriberGroupEmitter);
|
|
893
|
+
this.subscriberGroupEmitter.on("-node", (redis, nodeKey) => {
|
|
894
|
+
this.emit("-node", redis, nodeKey);
|
|
895
|
+
this.refreshSlotsCache();
|
|
896
|
+
});
|
|
897
|
+
this.subscriberGroupEmitter.on("subscriberConnectFailed", ({ delay, error }) => {
|
|
898
|
+
this.emit("error", error);
|
|
899
|
+
setTimeout(() => {
|
|
900
|
+
this.refreshSlotsCache();
|
|
901
|
+
}, delay);
|
|
902
|
+
});
|
|
903
|
+
this.subscriberGroupEmitter.on("moved", () => {
|
|
904
|
+
this.refreshSlotsCache();
|
|
905
|
+
});
|
|
906
|
+
this.subscriberGroupEmitter.on("-subscriber", () => {
|
|
907
|
+
this.emit("-subscriber");
|
|
908
|
+
});
|
|
909
|
+
this.subscriberGroupEmitter.on("+subscriber", () => {
|
|
910
|
+
this.emit("+subscriber");
|
|
911
|
+
});
|
|
912
|
+
this.subscriberGroupEmitter.on("nodeError", (error, nodeKey) => {
|
|
913
|
+
this.emit("nodeError", error, nodeKey);
|
|
914
|
+
});
|
|
915
|
+
this.subscriberGroupEmitter.on("subscribersReady", () => {
|
|
916
|
+
this.emit("subscribersReady");
|
|
917
|
+
});
|
|
918
|
+
for (const event of ["smessage", "smessageBuffer"]) {
|
|
919
|
+
this.subscriberGroupEmitter.on(event, (arg1, arg2, arg3) => {
|
|
920
|
+
this.emit(event, arg1, arg2, arg3);
|
|
921
|
+
});
|
|
922
|
+
}
|
|
923
|
+
}
|
|
860
924
|
}
|
|
861
925
|
(0, applyMixin_1.default)(Cluster, events_1.EventEmitter);
|
|
862
926
|
(0, transaction_1.addTransactionSupport)(Cluster.prototype);
|
|
@@ -11,6 +11,16 @@ export interface CommonRedisOptions extends CommanderOptions {
|
|
|
11
11
|
* a "Command timed out" error will be thrown.
|
|
12
12
|
*/
|
|
13
13
|
commandTimeout?: number;
|
|
14
|
+
/**
|
|
15
|
+
* Timeout (ms) for blocking commands with timeout=0 / BLOCK 0.
|
|
16
|
+
* When exceeded, the command resolves with null.
|
|
17
|
+
*/
|
|
18
|
+
blockingTimeout?: number;
|
|
19
|
+
/**
|
|
20
|
+
* Grace period (ms) added to blocking command timeouts to account for network latency.
|
|
21
|
+
* @default 100
|
|
22
|
+
*/
|
|
23
|
+
blockingTimeoutGrace?: number;
|
|
14
24
|
/**
|
|
15
25
|
* If the socket does not receive data within a set number of milliseconds:
|
|
16
26
|
* 1. the socket is considered "dead" and will be destroyed
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { CommandParameter } from "../types";
|
|
2
|
+
/**
|
|
3
|
+
* Parses a command parameter as seconds and converts to milliseconds.
|
|
4
|
+
* @param arg - The command parameter representing seconds
|
|
5
|
+
* @returns The value in milliseconds, 0 if value is <= 0, or undefined if parsing fails
|
|
6
|
+
*/
|
|
7
|
+
export declare const parseSecondsArgument: (arg: CommandParameter | undefined) => number | undefined;
|
|
8
|
+
/**
|
|
9
|
+
* Parses the BLOCK option from Redis command arguments (e.g., XREAD, XREADGROUP).
|
|
10
|
+
* @param args - Array of command parameters to search for the BLOCK option
|
|
11
|
+
* @returns The block duration in milliseconds, 0 if duration is <= 0,
|
|
12
|
+
* null if BLOCK option is not found, or undefined if BLOCK is found but duration is invalid
|
|
13
|
+
*/
|
|
14
|
+
export declare const parseBlockOption: (args: CommandParameter[]) => number | null | undefined;
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.parseBlockOption = exports.parseSecondsArgument = void 0;
|
|
4
|
+
/**
|
|
5
|
+
* Parses a command parameter to a number.
|
|
6
|
+
* @param arg - The command parameter to parse (number, string, or Buffer)
|
|
7
|
+
* @returns The parsed number, or undefined if parsing fails or arg is undefined
|
|
8
|
+
*/
|
|
9
|
+
const parseNumberArgument = (arg) => {
|
|
10
|
+
if (typeof arg === "number") {
|
|
11
|
+
return arg;
|
|
12
|
+
}
|
|
13
|
+
if (Buffer.isBuffer(arg)) {
|
|
14
|
+
return parseNumberArgument(arg.toString());
|
|
15
|
+
}
|
|
16
|
+
if (typeof arg === "string") {
|
|
17
|
+
const value = Number(arg);
|
|
18
|
+
return Number.isFinite(value) ? value : undefined;
|
|
19
|
+
}
|
|
20
|
+
return undefined;
|
|
21
|
+
};
|
|
22
|
+
/**
|
|
23
|
+
* Parses a command parameter to a string.
|
|
24
|
+
* @param arg - The command parameter to parse (string or Buffer)
|
|
25
|
+
* @returns The parsed string, or undefined if arg is not a string/Buffer or is undefined
|
|
26
|
+
*/
|
|
27
|
+
const parseStringArgument = (arg) => {
|
|
28
|
+
if (typeof arg === "string") {
|
|
29
|
+
return arg;
|
|
30
|
+
}
|
|
31
|
+
if (Buffer.isBuffer(arg)) {
|
|
32
|
+
return arg.toString();
|
|
33
|
+
}
|
|
34
|
+
return undefined;
|
|
35
|
+
};
|
|
36
|
+
/**
|
|
37
|
+
* Parses a command parameter as seconds and converts to milliseconds.
|
|
38
|
+
* @param arg - The command parameter representing seconds
|
|
39
|
+
* @returns The value in milliseconds, 0 if value is <= 0, or undefined if parsing fails
|
|
40
|
+
*/
|
|
41
|
+
const parseSecondsArgument = (arg) => {
|
|
42
|
+
const value = parseNumberArgument(arg);
|
|
43
|
+
if (value === undefined) {
|
|
44
|
+
return undefined;
|
|
45
|
+
}
|
|
46
|
+
if (value <= 0) {
|
|
47
|
+
return 0;
|
|
48
|
+
}
|
|
49
|
+
return value * 1000;
|
|
50
|
+
};
|
|
51
|
+
exports.parseSecondsArgument = parseSecondsArgument;
|
|
52
|
+
/**
|
|
53
|
+
* Parses the BLOCK option from Redis command arguments (e.g., XREAD, XREADGROUP).
|
|
54
|
+
* @param args - Array of command parameters to search for the BLOCK option
|
|
55
|
+
* @returns The block duration in milliseconds, 0 if duration is <= 0,
|
|
56
|
+
* null if BLOCK option is not found, or undefined if BLOCK is found but duration is invalid
|
|
57
|
+
*/
|
|
58
|
+
const parseBlockOption = (args) => {
|
|
59
|
+
for (let i = 0; i < args.length; i++) {
|
|
60
|
+
const token = parseStringArgument(args[i]);
|
|
61
|
+
if (token && token.toLowerCase() === "block") {
|
|
62
|
+
const duration = parseNumberArgument(args[i + 1]);
|
|
63
|
+
if (duration === undefined) {
|
|
64
|
+
return undefined;
|
|
65
|
+
}
|
|
66
|
+
if (duration <= 0) {
|
|
67
|
+
return 0;
|
|
68
|
+
}
|
|
69
|
+
return duration;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
return null;
|
|
73
|
+
};
|
|
74
|
+
exports.parseBlockOption = parseBlockOption;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "ioredis",
|
|
3
|
-
"version": "5.
|
|
3
|
+
"version": "5.9.0",
|
|
4
4
|
"description": "A robust, performance-focused and full-featured Redis client for Node.js.",
|
|
5
5
|
"main": "./built/index.js",
|
|
6
6
|
"types": "./built/index.d.ts",
|
|
@@ -43,7 +43,7 @@
|
|
|
43
43
|
"url": "https://opencollective.com/ioredis"
|
|
44
44
|
},
|
|
45
45
|
"dependencies": {
|
|
46
|
-
"@ioredis/commands": "1.
|
|
46
|
+
"@ioredis/commands": "1.5.0",
|
|
47
47
|
"cluster-key-slot": "^1.1.0",
|
|
48
48
|
"debug": "^4.3.4",
|
|
49
49
|
"denque": "^2.1.0",
|