ioredis 5.4.2 → 5.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +66 -2
- package/built/ScanStream.d.ts +1 -0
- package/built/ScanStream.js +3 -0
- package/built/cluster/ClusterOptions.d.ts +15 -2
- package/built/cluster/ClusterOptions.js +1 -0
- package/built/cluster/ClusterSubscriber.d.ts +14 -1
- package/built/cluster/ClusterSubscriber.js +37 -5
- package/built/cluster/ClusterSubscriberGroup.d.ts +86 -0
- package/built/cluster/ClusterSubscriberGroup.js +224 -0
- package/built/cluster/ConnectionPool.d.ts +11 -0
- package/built/cluster/ConnectionPool.js +35 -11
- package/built/cluster/index.d.ts +1 -0
- package/built/cluster/index.js +44 -10
- package/built/connectors/SentinelConnector/index.js +9 -1
- package/built/types.d.ts +1 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -14,6 +14,8 @@ A robust, performance-focused and full-featured [Redis](http://redis.io) client
|
|
|
14
14
|
|
|
15
15
|
Supports Redis >= 2.6.12. Completely compatible with Redis 7.x.
|
|
16
16
|
|
|
17
|
+
ioredis is a stable project and maintenance is done on a best-effort basis for relevant issues (contributions to ioredis will still be evaluated, reviewed, and merged when they benefit the project). For new projects, node-redis is the recommended client library. [node-redis](https://github.com/redis/node-redis) is the open-source (MIT license) Redis JavaScript client library redesigned from the ground up and actively maintained. [node-redis](https://github.com/redis/node-redis) supports new (hash-field expiration) and future commands and the capabilities available in Redis Stack and Redis 8 (search, JSON, time-series, probabilistic data structures).
|
|
18
|
+
|
|
17
19
|
# Features
|
|
18
20
|
|
|
19
21
|
ioredis is a robust, full-featured Redis client that is
|
|
@@ -44,7 +46,7 @@ used in the world's biggest online commerce company [Alibaba](http://www.alibaba
|
|
|
44
46
|
| Version | Branch | Node.js Version | Redis Version |
|
|
45
47
|
| -------------- | ------ | --------------- | --------------- |
|
|
46
48
|
| 5.x.x (latest) | main | >= 12 | 2.6.12 ~ latest |
|
|
47
|
-
| 4.x.x | v4 | >=
|
|
49
|
+
| 4.x.x | v4 | >= 8 | 2.6.12 ~ 7 |
|
|
48
50
|
|
|
49
51
|
Refer to [CHANGELOG.md](CHANGELOG.md) for features and bug fixes introduced in v5.
|
|
50
52
|
|
|
@@ -722,12 +724,18 @@ the key names are not utf8 strings.
|
|
|
722
724
|
There are also `hscanStream`, `zscanStream` and `sscanStream` to iterate through elements in a hash, zset and set. The interface of each is
|
|
723
725
|
similar to `scanStream` except the first argument is the key name:
|
|
724
726
|
|
|
727
|
+
```javascript
|
|
728
|
+
const stream = redis.zscanStream("myhash", {
|
|
729
|
+
match: "age:??",
|
|
730
|
+
});
|
|
731
|
+
```
|
|
732
|
+
The `hscanStream` also accepts the `noValues` option to specify whether Redis should return only the keys in the hash table without their corresponding values.
|
|
725
733
|
```javascript
|
|
726
734
|
const stream = redis.hscanStream("myhash", {
|
|
727
735
|
match: "age:??",
|
|
736
|
+
noValues: true,
|
|
728
737
|
});
|
|
729
738
|
```
|
|
730
|
-
|
|
731
739
|
You can learn more from the [Redis documentation](http://redis.io/commands/scan).
|
|
732
740
|
|
|
733
741
|
**Useful Tips**
|
|
@@ -1130,7 +1138,31 @@ const cluster = new Redis.Cluster(
|
|
|
1130
1138
|
);
|
|
1131
1139
|
```
|
|
1132
1140
|
|
|
1141
|
+
Or you can specify this parameter through function:
|
|
1142
|
+
```javascript
|
|
1143
|
+
const cluster = new Redis.Cluster(
|
|
1144
|
+
[
|
|
1145
|
+
{
|
|
1146
|
+
host: "203.0.113.73",
|
|
1147
|
+
port: 30001,
|
|
1148
|
+
},
|
|
1149
|
+
],
|
|
1150
|
+
{
|
|
1151
|
+
natMap: (key) => {
|
|
1152
|
+
if(key.indexOf('30001')) {
|
|
1153
|
+
return { host: "203.0.113.73", port: 30001 };
|
|
1154
|
+
}
|
|
1155
|
+
|
|
1156
|
+
return null;
|
|
1157
|
+
},
|
|
1158
|
+
}
|
|
1159
|
+
);
|
|
1160
|
+
```
|
|
1161
|
+
|
|
1133
1162
|
This option is also useful when the cluster is running inside a Docker container.
|
|
1163
|
+
Also it works for Clusters in cloud infrastructure where cluster nodes connected through dedicated subnet.
|
|
1164
|
+
|
|
1165
|
+
Specifying through may be useful if you don't know concrete internal host and know only node port.
|
|
1134
1166
|
|
|
1135
1167
|
### Transaction and Pipeline in Cluster Mode
|
|
1136
1168
|
|
|
@@ -1164,6 +1196,38 @@ sub.subscribe("news", () => {
|
|
|
1164
1196
|
});
|
|
1165
1197
|
```
|
|
1166
1198
|
|
|
1199
|
+
### Sharded Pub/Sub
|
|
1200
|
+
|
|
1201
|
+
For sharded Pub/Sub, use the `spublish` and `ssubscribe` commands instead of the traditional `publish` and `subscribe`. With the old commands, the Redis cluster handles message propagation behind the scenes, allowing you to publish or subscribe to any node without considering sharding. However, this approach has scalability limitations that are addressed with sharded Pub/Sub. Here’s what you need to know:
|
|
1202
|
+
|
|
1203
|
+
1. Instead of a single subscriber connection, there is now one subscriber connection per shard. Because of the potential overhead, you can enable or disable the use of the cluster subscriber group with the `shardedSubscribers` option. By default, this option is set to `false`, meaning sharded subscriptions are disabled. You should enable this option when establishing your cluster connection before using `ssubscribe`.
|
|
1204
|
+
2. All channel names that you pass to a single `ssubscribe` need to map to the same hash slot. You can call `ssubscribe` multiple times on the same cluster client instance to subscribe to channels across slots. The cluster's subscriber group takes care of forwarding the `ssubscribe` command to the shard that is responsible for the channels.
|
|
1205
|
+
|
|
1206
|
+
The following basic example shows you how to use sharded Pub/Sub:
|
|
1207
|
+
|
|
1208
|
+
```javascript
|
|
1209
|
+
const cluster: Cluster = new Cluster([{host: host, port: port}], {shardedSubscribers: true});
|
|
1210
|
+
|
|
1211
|
+
//Register the callback
|
|
1212
|
+
cluster.on("smessage", (channel, message) => {
|
|
1213
|
+
console.log(message);
|
|
1214
|
+
});
|
|
1215
|
+
|
|
1216
|
+
|
|
1217
|
+
//Subscribe to the channels on the same slot
|
|
1218
|
+
cluster.ssubscribe("channel{my}:1", "channel{my}:2").then( ( count: number ) => {
|
|
1219
|
+
console.log(count);
|
|
1220
|
+
}).catch( (err) => {
|
|
1221
|
+
console.log(err);
|
|
1222
|
+
});
|
|
1223
|
+
|
|
1224
|
+
//Publish a message
|
|
1225
|
+
cluster.spublish("channel{my}:1", "This is a test message to my first channel.").then((value: number) => {
|
|
1226
|
+
console.log("Published a message to channel{my}:1");
|
|
1227
|
+
});
|
|
1228
|
+
```
|
|
1229
|
+
|
|
1230
|
+
|
|
1167
1231
|
### Events
|
|
1168
1232
|
|
|
1169
1233
|
| Event | Description |
|
package/built/ScanStream.d.ts
CHANGED
package/built/ScanStream.js
CHANGED
|
@@ -29,6 +29,9 @@ class ScanStream extends stream_1.Readable {
|
|
|
29
29
|
if (this.opt.count) {
|
|
30
30
|
args.push("COUNT", String(this.opt.count));
|
|
31
31
|
}
|
|
32
|
+
if (this.opt.noValues) {
|
|
33
|
+
args.push("NOVALUES");
|
|
34
|
+
}
|
|
32
35
|
this.opt.redis[this.opt.command](args, (err, res) => {
|
|
33
36
|
if (err) {
|
|
34
37
|
this.emit("error", err);
|
|
@@ -5,12 +5,16 @@ import { CommanderOptions } from "../utils/Commander";
|
|
|
5
5
|
import { NodeRole } from "./util";
|
|
6
6
|
export declare type DNSResolveSrvFunction = (hostname: string, callback: (err: NodeJS.ErrnoException | null | undefined, records?: SrvRecord[]) => void) => void;
|
|
7
7
|
export declare type DNSLookupFunction = (hostname: string, callback: (err: NodeJS.ErrnoException | null | undefined, address: string, family?: number) => void) => void;
|
|
8
|
-
export
|
|
8
|
+
export declare type NatMapFunction = (key: string) => {
|
|
9
|
+
host: string;
|
|
10
|
+
port: number;
|
|
11
|
+
} | null;
|
|
12
|
+
export declare type NatMap = {
|
|
9
13
|
[key: string]: {
|
|
10
14
|
host: string;
|
|
11
15
|
port: number;
|
|
12
16
|
};
|
|
13
|
-
}
|
|
17
|
+
} | NatMapFunction;
|
|
14
18
|
/**
|
|
15
19
|
* Options for Cluster constructor
|
|
16
20
|
*/
|
|
@@ -93,6 +97,15 @@ export interface ClusterOptions extends CommanderOptions {
|
|
|
93
97
|
* @default 5000
|
|
94
98
|
*/
|
|
95
99
|
slotsRefreshInterval?: number;
|
|
100
|
+
/**
|
|
101
|
+
* Use sharded subscribers instead of a single subscriber.
|
|
102
|
+
*
|
|
103
|
+
* If sharded subscribers are used, then one additional subscriber connection per master node
|
|
104
|
+
* is established. If you don't plan to use SPUBLISH/SSUBSCRIBE, then this should be disabled.
|
|
105
|
+
*
|
|
106
|
+
* @default false
|
|
107
|
+
*/
|
|
108
|
+
shardedSubscribers?: boolean;
|
|
96
109
|
/**
|
|
97
110
|
* Passed to the constructor of `Redis`
|
|
98
111
|
*
|
|
@@ -4,13 +4,26 @@ import ConnectionPool from "./ConnectionPool";
|
|
|
4
4
|
export default class ClusterSubscriber {
|
|
5
5
|
private connectionPool;
|
|
6
6
|
private emitter;
|
|
7
|
+
private isSharded;
|
|
7
8
|
private started;
|
|
8
9
|
private subscriber;
|
|
9
10
|
private lastActiveSubscriber;
|
|
10
|
-
|
|
11
|
+
private slotRange;
|
|
12
|
+
constructor(connectionPool: ConnectionPool, emitter: EventEmitter, isSharded?: boolean);
|
|
11
13
|
getInstance(): any;
|
|
14
|
+
/**
|
|
15
|
+
* Associate this subscriber to a specific slot range.
|
|
16
|
+
*
|
|
17
|
+
* Returns the range or an empty array if the slot range couldn't be associated.
|
|
18
|
+
*
|
|
19
|
+
* BTW: This is more for debugging and testing purposes.
|
|
20
|
+
*
|
|
21
|
+
* @param range
|
|
22
|
+
*/
|
|
23
|
+
associateSlotRange(range: number[]): number[];
|
|
12
24
|
start(): void;
|
|
13
25
|
stop(): void;
|
|
26
|
+
isStarted(): boolean;
|
|
14
27
|
private onSubscriberEnd;
|
|
15
28
|
private selectSubscriber;
|
|
16
29
|
}
|
|
@@ -5,11 +5,15 @@ const utils_1 = require("../utils");
|
|
|
5
5
|
const Redis_1 = require("../Redis");
|
|
6
6
|
const debug = (0, utils_1.Debug)("cluster:subscriber");
|
|
7
7
|
class ClusterSubscriber {
|
|
8
|
-
constructor(connectionPool, emitter) {
|
|
8
|
+
constructor(connectionPool, emitter, isSharded = false) {
|
|
9
9
|
this.connectionPool = connectionPool;
|
|
10
10
|
this.emitter = emitter;
|
|
11
|
+
this.isSharded = isSharded;
|
|
11
12
|
this.started = false;
|
|
13
|
+
//There is only one connection for the entire pool
|
|
12
14
|
this.subscriber = null;
|
|
15
|
+
//The slot range for which this subscriber is responsible
|
|
16
|
+
this.slotRange = [];
|
|
13
17
|
this.onSubscriberEnd = () => {
|
|
14
18
|
if (!this.started) {
|
|
15
19
|
debug("subscriber has disconnected, but ClusterSubscriber is not started, so not reconnecting.");
|
|
@@ -49,6 +53,21 @@ class ClusterSubscriber {
|
|
|
49
53
|
getInstance() {
|
|
50
54
|
return this.subscriber;
|
|
51
55
|
}
|
|
56
|
+
/**
|
|
57
|
+
* Associate this subscriber to a specific slot range.
|
|
58
|
+
*
|
|
59
|
+
* Returns the range or an empty array if the slot range couldn't be associated.
|
|
60
|
+
*
|
|
61
|
+
* BTW: This is more for debugging and testing purposes.
|
|
62
|
+
*
|
|
63
|
+
* @param range
|
|
64
|
+
*/
|
|
65
|
+
associateSlotRange(range) {
|
|
66
|
+
if (this.isSharded) {
|
|
67
|
+
this.slotRange = range;
|
|
68
|
+
}
|
|
69
|
+
return this.slotRange;
|
|
70
|
+
}
|
|
52
71
|
start() {
|
|
53
72
|
this.started = true;
|
|
54
73
|
this.selectSubscriber();
|
|
@@ -60,7 +79,9 @@ class ClusterSubscriber {
|
|
|
60
79
|
this.subscriber.disconnect();
|
|
61
80
|
this.subscriber = null;
|
|
62
81
|
}
|
|
63
|
-
|
|
82
|
+
}
|
|
83
|
+
isStarted() {
|
|
84
|
+
return this.started;
|
|
64
85
|
}
|
|
65
86
|
selectSubscriber() {
|
|
66
87
|
const lastActiveSubscriber = this.lastActiveSubscriber;
|
|
@@ -91,13 +112,16 @@ class ClusterSubscriber {
|
|
|
91
112
|
* provided for the subscriber is correct, and if not, the current subscriber
|
|
92
113
|
* will be disconnected and a new subscriber will be selected.
|
|
93
114
|
*/
|
|
115
|
+
let connectionPrefix = "subscriber";
|
|
116
|
+
if (this.isSharded)
|
|
117
|
+
connectionPrefix = "ssubscriber";
|
|
94
118
|
this.subscriber = new Redis_1.default({
|
|
95
119
|
port: options.port,
|
|
96
120
|
host: options.host,
|
|
97
121
|
username: options.username,
|
|
98
122
|
password: options.password,
|
|
99
123
|
enableReadyCheck: true,
|
|
100
|
-
connectionName: (0, util_1.getConnectionName)(
|
|
124
|
+
connectionName: (0, util_1.getConnectionName)(connectionPrefix, options.connectionName),
|
|
101
125
|
lazyConnect: true,
|
|
102
126
|
tls: options.tls,
|
|
103
127
|
// Don't try to reconnect the subscriber connection. If the connection fails
|
|
@@ -153,8 +177,6 @@ class ClusterSubscriber {
|
|
|
153
177
|
for (const event of [
|
|
154
178
|
"message",
|
|
155
179
|
"messageBuffer",
|
|
156
|
-
"smessage",
|
|
157
|
-
"smessageBuffer",
|
|
158
180
|
]) {
|
|
159
181
|
this.subscriber.on(event, (arg1, arg2) => {
|
|
160
182
|
this.emitter.emit(event, arg1, arg2);
|
|
@@ -165,6 +187,16 @@ class ClusterSubscriber {
|
|
|
165
187
|
this.emitter.emit(event, arg1, arg2, arg3);
|
|
166
188
|
});
|
|
167
189
|
}
|
|
190
|
+
if (this.isSharded == true) {
|
|
191
|
+
for (const event of [
|
|
192
|
+
"smessage",
|
|
193
|
+
"smessageBuffer",
|
|
194
|
+
]) {
|
|
195
|
+
this.subscriber.on(event, (arg1, arg2) => {
|
|
196
|
+
this.emitter.emit(event, arg1, arg2);
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
}
|
|
168
200
|
}
|
|
169
201
|
}
|
|
170
202
|
exports.default = ClusterSubscriber;
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
/// <reference types="node" />
|
|
2
|
+
import ClusterSubscriber from "./ClusterSubscriber";
|
|
3
|
+
import Cluster from "./index";
|
|
4
|
+
/**
|
|
5
|
+
* Redis differs between "normal" and sharded PubSub. If using the "normal" PubSub feature, exactly one
|
|
6
|
+
* ClusterSubscriber exists per cluster instance. This works because the Redis cluster bus forwards m
|
|
7
|
+
* messages between shards. However, this has scalability limitations, which is the reason why the sharded
|
|
8
|
+
* PubSub feature was added to Redis. With sharded PubSub, each shard is responsible for its own messages.
|
|
9
|
+
* Given that, we need at least one ClusterSubscriber per master endpoint/node.
|
|
10
|
+
*
|
|
11
|
+
* This class leverages the previously exising ClusterSubscriber by adding support for multiple such subscribers
|
|
12
|
+
* in alignment to the master nodes of the cluster. The ClusterSubscriber class was extended in a non-breaking way
|
|
13
|
+
* to support this feature.
|
|
14
|
+
*/
|
|
15
|
+
export default class ClusterSubscriberGroup {
|
|
16
|
+
private cluster;
|
|
17
|
+
private shardedSubscribers;
|
|
18
|
+
private clusterSlots;
|
|
19
|
+
private subscriberToSlotsIndex;
|
|
20
|
+
private channels;
|
|
21
|
+
/**
|
|
22
|
+
* Register callbacks
|
|
23
|
+
*
|
|
24
|
+
* @param cluster
|
|
25
|
+
*/
|
|
26
|
+
constructor(cluster: Cluster);
|
|
27
|
+
/**
|
|
28
|
+
* Get the responsible subscriber.
|
|
29
|
+
*
|
|
30
|
+
* Returns null if no subscriber was found
|
|
31
|
+
*
|
|
32
|
+
* @param slot
|
|
33
|
+
*/
|
|
34
|
+
getResponsibleSubscriber(slot: number): ClusterSubscriber;
|
|
35
|
+
/**
|
|
36
|
+
* Adds a channel for which this subscriber group is responsible
|
|
37
|
+
*
|
|
38
|
+
* @param channels
|
|
39
|
+
*/
|
|
40
|
+
addChannels(channels: (string | Buffer)[]): number;
|
|
41
|
+
/**
|
|
42
|
+
* Removes channels for which the subscriber group is responsible by optionally unsubscribing
|
|
43
|
+
* @param channels
|
|
44
|
+
*/
|
|
45
|
+
removeChannels(channels: (string | Buffer)[]): number;
|
|
46
|
+
/**
|
|
47
|
+
* Disconnect all subscribers
|
|
48
|
+
*/
|
|
49
|
+
stop(): void;
|
|
50
|
+
/**
|
|
51
|
+
* Start all not yet started subscribers
|
|
52
|
+
*/
|
|
53
|
+
start(): void;
|
|
54
|
+
/**
|
|
55
|
+
* Add a subscriber to the group of subscribers
|
|
56
|
+
*
|
|
57
|
+
* @param redis
|
|
58
|
+
*/
|
|
59
|
+
private _addSubscriber;
|
|
60
|
+
/**
|
|
61
|
+
* Removes a subscriber from the group
|
|
62
|
+
* @param redis
|
|
63
|
+
*/
|
|
64
|
+
private _removeSubscriber;
|
|
65
|
+
/**
|
|
66
|
+
* Refreshes the subscriber-related slot ranges
|
|
67
|
+
*
|
|
68
|
+
* Returns false if no refresh was needed
|
|
69
|
+
*
|
|
70
|
+
* @param cluster
|
|
71
|
+
*/
|
|
72
|
+
private _refreshSlots;
|
|
73
|
+
/**
|
|
74
|
+
* Resubscribes to the previous channels
|
|
75
|
+
*
|
|
76
|
+
* @private
|
|
77
|
+
*/
|
|
78
|
+
private _resubscribe;
|
|
79
|
+
/**
|
|
80
|
+
* Deep equality of the cluster slots objects
|
|
81
|
+
*
|
|
82
|
+
* @param other
|
|
83
|
+
* @private
|
|
84
|
+
*/
|
|
85
|
+
private _slotsAreEqual;
|
|
86
|
+
}
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const utils_1 = require("../utils");
|
|
4
|
+
const ClusterSubscriber_1 = require("./ClusterSubscriber");
|
|
5
|
+
const ConnectionPool_1 = require("./ConnectionPool");
|
|
6
|
+
const util_1 = require("./util");
|
|
7
|
+
const calculateSlot = require("cluster-key-slot");
|
|
8
|
+
const debug = (0, utils_1.Debug)("cluster:subscriberGroup");
|
|
9
|
+
/**
|
|
10
|
+
* Redis differs between "normal" and sharded PubSub. If using the "normal" PubSub feature, exactly one
|
|
11
|
+
* ClusterSubscriber exists per cluster instance. This works because the Redis cluster bus forwards m
|
|
12
|
+
* messages between shards. However, this has scalability limitations, which is the reason why the sharded
|
|
13
|
+
* PubSub feature was added to Redis. With sharded PubSub, each shard is responsible for its own messages.
|
|
14
|
+
* Given that, we need at least one ClusterSubscriber per master endpoint/node.
|
|
15
|
+
*
|
|
16
|
+
* This class leverages the previously exising ClusterSubscriber by adding support for multiple such subscribers
|
|
17
|
+
* in alignment to the master nodes of the cluster. The ClusterSubscriber class was extended in a non-breaking way
|
|
18
|
+
* to support this feature.
|
|
19
|
+
*/
|
|
20
|
+
class ClusterSubscriberGroup {
|
|
21
|
+
/**
|
|
22
|
+
* Register callbacks
|
|
23
|
+
*
|
|
24
|
+
* @param cluster
|
|
25
|
+
*/
|
|
26
|
+
constructor(cluster) {
|
|
27
|
+
this.cluster = cluster;
|
|
28
|
+
this.shardedSubscribers = new Map();
|
|
29
|
+
this.clusterSlots = [];
|
|
30
|
+
//Simple [min, max] slot ranges aren't enough because you can migrate single slots
|
|
31
|
+
this.subscriberToSlotsIndex = new Map();
|
|
32
|
+
this.channels = new Map();
|
|
33
|
+
cluster.on("+node", (redis) => {
|
|
34
|
+
this._addSubscriber(redis);
|
|
35
|
+
});
|
|
36
|
+
cluster.on("-node", (redis) => {
|
|
37
|
+
this._removeSubscriber(redis);
|
|
38
|
+
});
|
|
39
|
+
cluster.on("refresh", () => {
|
|
40
|
+
this._refreshSlots(cluster);
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Get the responsible subscriber.
|
|
45
|
+
*
|
|
46
|
+
* Returns null if no subscriber was found
|
|
47
|
+
*
|
|
48
|
+
* @param slot
|
|
49
|
+
*/
|
|
50
|
+
getResponsibleSubscriber(slot) {
|
|
51
|
+
const nodeKey = this.clusterSlots[slot][0];
|
|
52
|
+
return this.shardedSubscribers.get(nodeKey);
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* Adds a channel for which this subscriber group is responsible
|
|
56
|
+
*
|
|
57
|
+
* @param channels
|
|
58
|
+
*/
|
|
59
|
+
addChannels(channels) {
|
|
60
|
+
const slot = calculateSlot(channels[0]);
|
|
61
|
+
//Check if the all channels belong to the same slot and otherwise reject the operation
|
|
62
|
+
channels.forEach((c) => {
|
|
63
|
+
if (calculateSlot(c) != slot)
|
|
64
|
+
return -1;
|
|
65
|
+
});
|
|
66
|
+
const currChannels = this.channels.get(slot);
|
|
67
|
+
if (!currChannels) {
|
|
68
|
+
this.channels.set(slot, channels);
|
|
69
|
+
}
|
|
70
|
+
else {
|
|
71
|
+
this.channels.set(slot, currChannels.concat(channels));
|
|
72
|
+
}
|
|
73
|
+
return [...this.channels.values()].flatMap(v => v).length;
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Removes channels for which the subscriber group is responsible by optionally unsubscribing
|
|
77
|
+
* @param channels
|
|
78
|
+
*/
|
|
79
|
+
removeChannels(channels) {
|
|
80
|
+
const slot = calculateSlot(channels[0]);
|
|
81
|
+
//Check if the all channels belong to the same slot and otherwise reject the operation
|
|
82
|
+
channels.forEach((c) => {
|
|
83
|
+
if (calculateSlot(c) != slot)
|
|
84
|
+
return -1;
|
|
85
|
+
});
|
|
86
|
+
const slotChannels = this.channels.get(slot);
|
|
87
|
+
if (slotChannels) {
|
|
88
|
+
const updatedChannels = slotChannels.filter(c => !channels.includes(c));
|
|
89
|
+
this.channels.set(slot, updatedChannels);
|
|
90
|
+
}
|
|
91
|
+
return [...this.channels.values()].flatMap(v => v).length;
|
|
92
|
+
}
|
|
93
|
+
/**
|
|
94
|
+
* Disconnect all subscribers
|
|
95
|
+
*/
|
|
96
|
+
stop() {
|
|
97
|
+
for (const s of this.shardedSubscribers.values()) {
|
|
98
|
+
s.stop();
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Start all not yet started subscribers
|
|
103
|
+
*/
|
|
104
|
+
start() {
|
|
105
|
+
for (const s of this.shardedSubscribers.values()) {
|
|
106
|
+
if (!s.isStarted()) {
|
|
107
|
+
s.start();
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
/**
|
|
112
|
+
* Add a subscriber to the group of subscribers
|
|
113
|
+
*
|
|
114
|
+
* @param redis
|
|
115
|
+
*/
|
|
116
|
+
_addSubscriber(redis) {
|
|
117
|
+
const pool = new ConnectionPool_1.default(redis.options);
|
|
118
|
+
if (pool.addMasterNode(redis)) {
|
|
119
|
+
const sub = new ClusterSubscriber_1.default(pool, this.cluster, true);
|
|
120
|
+
const nodeKey = (0, util_1.getNodeKey)(redis.options);
|
|
121
|
+
this.shardedSubscribers.set(nodeKey, sub);
|
|
122
|
+
sub.start();
|
|
123
|
+
// We need to attempt to resubscribe them in case the new node serves their slot
|
|
124
|
+
this._resubscribe();
|
|
125
|
+
this.cluster.emit("+subscriber");
|
|
126
|
+
return sub;
|
|
127
|
+
}
|
|
128
|
+
return null;
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Removes a subscriber from the group
|
|
132
|
+
* @param redis
|
|
133
|
+
*/
|
|
134
|
+
_removeSubscriber(redis) {
|
|
135
|
+
const nodeKey = (0, util_1.getNodeKey)(redis.options);
|
|
136
|
+
const sub = this.shardedSubscribers.get(nodeKey);
|
|
137
|
+
if (sub) {
|
|
138
|
+
sub.stop();
|
|
139
|
+
this.shardedSubscribers.delete(nodeKey);
|
|
140
|
+
// Even though the subscriber to this node is going down, we might have another subscriber
|
|
141
|
+
// handling the same slots, so we need to attempt to subscribe the orphaned channels
|
|
142
|
+
this._resubscribe();
|
|
143
|
+
this.cluster.emit("-subscriber");
|
|
144
|
+
}
|
|
145
|
+
return this.shardedSubscribers;
|
|
146
|
+
}
|
|
147
|
+
/**
|
|
148
|
+
* Refreshes the subscriber-related slot ranges
|
|
149
|
+
*
|
|
150
|
+
* Returns false if no refresh was needed
|
|
151
|
+
*
|
|
152
|
+
* @param cluster
|
|
153
|
+
*/
|
|
154
|
+
_refreshSlots(cluster) {
|
|
155
|
+
//If there was an actual change, then reassign the slot ranges
|
|
156
|
+
if (this._slotsAreEqual(cluster.slots)) {
|
|
157
|
+
debug("Nothing to refresh because the new cluster map is equal to the previous one.");
|
|
158
|
+
}
|
|
159
|
+
else {
|
|
160
|
+
debug("Refreshing the slots of the subscriber group.");
|
|
161
|
+
//Rebuild the slots index
|
|
162
|
+
this.subscriberToSlotsIndex = new Map();
|
|
163
|
+
for (let slot = 0; slot < cluster.slots.length; slot++) {
|
|
164
|
+
const node = cluster.slots[slot][0];
|
|
165
|
+
if (!this.subscriberToSlotsIndex.has(node)) {
|
|
166
|
+
this.subscriberToSlotsIndex.set(node, []);
|
|
167
|
+
}
|
|
168
|
+
this.subscriberToSlotsIndex.get(node).push(Number(slot));
|
|
169
|
+
}
|
|
170
|
+
//Update the subscribers from the index
|
|
171
|
+
this._resubscribe();
|
|
172
|
+
//Update the cached slots map
|
|
173
|
+
this.clusterSlots = JSON.parse(JSON.stringify(cluster.slots));
|
|
174
|
+
this.cluster.emit("subscribersReady");
|
|
175
|
+
return true;
|
|
176
|
+
}
|
|
177
|
+
return false;
|
|
178
|
+
}
|
|
179
|
+
/**
|
|
180
|
+
* Resubscribes to the previous channels
|
|
181
|
+
*
|
|
182
|
+
* @private
|
|
183
|
+
*/
|
|
184
|
+
_resubscribe() {
|
|
185
|
+
if (this.shardedSubscribers) {
|
|
186
|
+
this.shardedSubscribers.forEach((s, nodeKey) => {
|
|
187
|
+
const subscriberSlots = this.subscriberToSlotsIndex.get(nodeKey);
|
|
188
|
+
if (subscriberSlots) {
|
|
189
|
+
//More for debugging purposes
|
|
190
|
+
s.associateSlotRange(subscriberSlots);
|
|
191
|
+
//Resubscribe on the underlying connection
|
|
192
|
+
subscriberSlots.forEach((ss) => {
|
|
193
|
+
//Might return null if being disconnected
|
|
194
|
+
const redis = s.getInstance();
|
|
195
|
+
const channels = this.channels.get(ss);
|
|
196
|
+
if (channels && channels.length > 0) {
|
|
197
|
+
//Try to subscribe now
|
|
198
|
+
if (redis) {
|
|
199
|
+
redis.ssubscribe(channels);
|
|
200
|
+
//If the instance isn't ready yet, then register the re-subscription for later
|
|
201
|
+
redis.on("ready", () => {
|
|
202
|
+
redis.ssubscribe(channels);
|
|
203
|
+
});
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
});
|
|
207
|
+
}
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
/**
|
|
212
|
+
* Deep equality of the cluster slots objects
|
|
213
|
+
*
|
|
214
|
+
* @param other
|
|
215
|
+
* @private
|
|
216
|
+
*/
|
|
217
|
+
_slotsAreEqual(other) {
|
|
218
|
+
if (this.clusterSlots === undefined)
|
|
219
|
+
return false;
|
|
220
|
+
else
|
|
221
|
+
return JSON.stringify(this.clusterSlots) === JSON.stringify(other);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
exports.default = ClusterSubscriberGroup;
|
|
@@ -10,6 +10,17 @@ export default class ConnectionPool extends EventEmitter {
|
|
|
10
10
|
getNodes(role?: NodeRole): Redis[];
|
|
11
11
|
getInstanceByKey(key: NodeKey): Redis;
|
|
12
12
|
getSampleInstance(role: NodeRole): Redis;
|
|
13
|
+
/**
|
|
14
|
+
* Add a master node to the pool
|
|
15
|
+
* @param node
|
|
16
|
+
*/
|
|
17
|
+
addMasterNode(node: RedisOptions): boolean;
|
|
18
|
+
/**
|
|
19
|
+
* Creates a Redis connection instance from the node options
|
|
20
|
+
* @param node
|
|
21
|
+
* @param readOnly
|
|
22
|
+
*/
|
|
23
|
+
createRedisFromOptions(node: RedisOptions, readOnly: boolean): Redis;
|
|
13
24
|
/**
|
|
14
25
|
* Find or create a connection to the node
|
|
15
26
|
*/
|
|
@@ -29,6 +29,40 @@ class ConnectionPool extends events_1.EventEmitter {
|
|
|
29
29
|
const sampleKey = (0, utils_1.sample)(keys);
|
|
30
30
|
return this.nodes[role][sampleKey];
|
|
31
31
|
}
|
|
32
|
+
/**
|
|
33
|
+
* Add a master node to the pool
|
|
34
|
+
* @param node
|
|
35
|
+
*/
|
|
36
|
+
addMasterNode(node) {
|
|
37
|
+
const key = (0, util_1.getNodeKey)(node.options);
|
|
38
|
+
const redis = this.createRedisFromOptions(node, node.options.readOnly);
|
|
39
|
+
//Master nodes aren't read-only
|
|
40
|
+
if (!node.options.readOnly) {
|
|
41
|
+
this.nodes.all[key] = redis;
|
|
42
|
+
this.nodes.master[key] = redis;
|
|
43
|
+
return true;
|
|
44
|
+
}
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Creates a Redis connection instance from the node options
|
|
49
|
+
* @param node
|
|
50
|
+
* @param readOnly
|
|
51
|
+
*/
|
|
52
|
+
createRedisFromOptions(node, readOnly) {
|
|
53
|
+
const redis = new Redis_1.default((0, utils_1.defaults)({
|
|
54
|
+
// Never try to reconnect when a node is lose,
|
|
55
|
+
// instead, waiting for a `MOVED` error and
|
|
56
|
+
// fetch the slots again.
|
|
57
|
+
retryStrategy: null,
|
|
58
|
+
// Offline queue should be enabled so that
|
|
59
|
+
// we don't need to wait for the `ready` event
|
|
60
|
+
// before sending commands to the node.
|
|
61
|
+
enableOfflineQueue: true,
|
|
62
|
+
readOnly: readOnly,
|
|
63
|
+
}, node, this.redisOptions, { lazyConnect: true }));
|
|
64
|
+
return redis;
|
|
65
|
+
}
|
|
32
66
|
/**
|
|
33
67
|
* Find or create a connection to the node
|
|
34
68
|
*/
|
|
@@ -60,17 +94,7 @@ class ConnectionPool extends events_1.EventEmitter {
|
|
|
60
94
|
}
|
|
61
95
|
else {
|
|
62
96
|
debug("Connecting to %s as %s", key, readOnly ? "slave" : "master");
|
|
63
|
-
redis =
|
|
64
|
-
// Never try to reconnect when a node is lose,
|
|
65
|
-
// instead, waiting for a `MOVED` error and
|
|
66
|
-
// fetch the slots again.
|
|
67
|
-
retryStrategy: null,
|
|
68
|
-
// Offline queue should be enabled so that
|
|
69
|
-
// we don't need to wait for the `ready` event
|
|
70
|
-
// before sending commands to the node.
|
|
71
|
-
enableOfflineQueue: true,
|
|
72
|
-
readOnly: readOnly,
|
|
73
|
-
}, node, this.redisOptions, { lazyConnect: true }));
|
|
97
|
+
redis = this.createRedisFromOptions(node, readOnly);
|
|
74
98
|
this.nodes.all[key] = redis;
|
|
75
99
|
this.nodes[readOnly ? "slave" : "master"][key] = redis;
|
|
76
100
|
redis.once("end", () => {
|
package/built/cluster/index.d.ts
CHANGED
package/built/cluster/index.js
CHANGED
|
@@ -18,6 +18,7 @@ const ConnectionPool_1 = require("./ConnectionPool");
|
|
|
18
18
|
const DelayQueue_1 = require("./DelayQueue");
|
|
19
19
|
const util_1 = require("./util");
|
|
20
20
|
const Deque = require("denque");
|
|
21
|
+
const ClusterSubscriberGroup_1 = require("./ClusterSubscriberGroup");
|
|
21
22
|
const debug = (0, utils_1.Debug)("cluster");
|
|
22
23
|
const REJECT_OVERWRITTEN_COMMANDS = new WeakSet();
|
|
23
24
|
/**
|
|
@@ -27,6 +28,7 @@ class Cluster extends Commander_1.default {
|
|
|
27
28
|
/**
|
|
28
29
|
* Creates an instance of Cluster.
|
|
29
30
|
*/
|
|
31
|
+
//TODO: Add an option that enables or disables sharded PubSub
|
|
30
32
|
constructor(startupNodes, options = {}) {
|
|
31
33
|
super();
|
|
32
34
|
this.slots = [];
|
|
@@ -60,6 +62,8 @@ class Cluster extends Commander_1.default {
|
|
|
60
62
|
events_1.EventEmitter.call(this);
|
|
61
63
|
this.startupNodes = startupNodes;
|
|
62
64
|
this.options = (0, utils_1.defaults)({}, options, ClusterOptions_1.DEFAULT_CLUSTER_OPTIONS, this.options);
|
|
65
|
+
if (this.options.shardedSubscribers == true)
|
|
66
|
+
this.shardedSubscribers = new ClusterSubscriberGroup_1.default(this);
|
|
63
67
|
if (this.options.redisOptions &&
|
|
64
68
|
this.options.redisOptions.keyPrefix &&
|
|
65
69
|
!this.options.keyPrefix) {
|
|
@@ -172,6 +176,9 @@ class Cluster extends Commander_1.default {
|
|
|
172
176
|
}
|
|
173
177
|
});
|
|
174
178
|
this.subscriber.start();
|
|
179
|
+
if (this.options.shardedSubscribers) {
|
|
180
|
+
this.shardedSubscribers.start();
|
|
181
|
+
}
|
|
175
182
|
})
|
|
176
183
|
.catch((err) => {
|
|
177
184
|
this.setStatus("close");
|
|
@@ -197,6 +204,9 @@ class Cluster extends Commander_1.default {
|
|
|
197
204
|
}
|
|
198
205
|
this.clearNodesRefreshInterval();
|
|
199
206
|
this.subscriber.stop();
|
|
207
|
+
if (this.options.shardedSubscribers) {
|
|
208
|
+
this.shardedSubscribers.stop();
|
|
209
|
+
}
|
|
200
210
|
if (status === "wait") {
|
|
201
211
|
this.setStatus("close");
|
|
202
212
|
this.handleCloseEvent();
|
|
@@ -218,6 +228,9 @@ class Cluster extends Commander_1.default {
|
|
|
218
228
|
}
|
|
219
229
|
this.clearNodesRefreshInterval();
|
|
220
230
|
this.subscriber.stop();
|
|
231
|
+
if (this.options.shardedSubscribers) {
|
|
232
|
+
this.shardedSubscribers.stop();
|
|
233
|
+
}
|
|
221
234
|
if (status === "wait") {
|
|
222
235
|
const ret = (0, standard_as_callback_1.default)(Promise.resolve("OK"), callback);
|
|
223
236
|
// use setImmediate to make sure "close" event
|
|
@@ -409,7 +422,24 @@ class Cluster extends Commander_1.default {
|
|
|
409
422
|
}
|
|
410
423
|
else if (Command_1.default.checkFlag("ENTER_SUBSCRIBER_MODE", command.name) ||
|
|
411
424
|
Command_1.default.checkFlag("EXIT_SUBSCRIBER_MODE", command.name)) {
|
|
412
|
-
|
|
425
|
+
if (_this.options.shardedSubscribers == true &&
|
|
426
|
+
(command.name == "ssubscribe" || command.name == "sunsubscribe")) {
|
|
427
|
+
const sub = _this.shardedSubscribers.getResponsibleSubscriber(targetSlot);
|
|
428
|
+
let status = -1;
|
|
429
|
+
if (command.name == "ssubscribe")
|
|
430
|
+
status = _this.shardedSubscribers.addChannels(command.getKeys());
|
|
431
|
+
if (command.name == "sunsubscribe")
|
|
432
|
+
status = _this.shardedSubscribers.removeChannels(command.getKeys());
|
|
433
|
+
if (status !== -1) {
|
|
434
|
+
redis = sub.getInstance();
|
|
435
|
+
}
|
|
436
|
+
else {
|
|
437
|
+
command.reject(new redis_errors_1.AbortError("Can't add or remove the given channels. Are they in the same slot?"));
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
else {
|
|
441
|
+
redis = _this.subscriber.getInstance();
|
|
442
|
+
}
|
|
413
443
|
if (!redis) {
|
|
414
444
|
command.reject(new redis_errors_1.AbortError("No subscriber for the cluster"));
|
|
415
445
|
return;
|
|
@@ -628,15 +658,19 @@ class Cluster extends Commander_1.default {
|
|
|
628
658
|
}
|
|
629
659
|
}
|
|
630
660
|
natMapper(nodeKey) {
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
661
|
+
const key = typeof nodeKey === "string"
|
|
662
|
+
? nodeKey
|
|
663
|
+
: `${nodeKey.host}:${nodeKey.port}`;
|
|
664
|
+
let mapped = null;
|
|
665
|
+
if (this.options.natMap && typeof this.options.natMap === "function") {
|
|
666
|
+
mapped = this.options.natMap(key);
|
|
667
|
+
}
|
|
668
|
+
else if (this.options.natMap && typeof this.options.natMap === "object") {
|
|
669
|
+
mapped = this.options.natMap[key];
|
|
670
|
+
}
|
|
671
|
+
if (mapped) {
|
|
672
|
+
debug("NAT mapping %s -> %O", key, mapped);
|
|
673
|
+
return Object.assign({}, mapped);
|
|
640
674
|
}
|
|
641
675
|
return typeof nodeKey === "string"
|
|
642
676
|
? (0, util_1.nodeKeyToRedisOptions)(nodeKey)
|
|
@@ -162,7 +162,15 @@ class SentinelConnector extends AbstractConnector_1.default {
|
|
|
162
162
|
sentinelNatResolve(item) {
|
|
163
163
|
if (!item || !this.options.natMap)
|
|
164
164
|
return item;
|
|
165
|
-
|
|
165
|
+
const key = `${item.host}:${item.port}`;
|
|
166
|
+
let result = item;
|
|
167
|
+
if (typeof this.options.natMap === "function") {
|
|
168
|
+
result = this.options.natMap(key) || item;
|
|
169
|
+
}
|
|
170
|
+
else if (typeof this.options.natMap === "object") {
|
|
171
|
+
result = this.options.natMap[key] || item;
|
|
172
|
+
}
|
|
173
|
+
return result;
|
|
166
174
|
}
|
|
167
175
|
connectToSentinel(endpoint, options) {
|
|
168
176
|
const redis = new Redis_1.default({
|
package/built/types.d.ts
CHANGED