redis 5.2.1__py3-none-any.whl → 5.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/asyncio/client.py +49 -12
- redis/asyncio/cluster.py +101 -12
- redis/asyncio/connection.py +78 -11
- redis/auth/__init__.py +0 -0
- redis/auth/err.py +31 -0
- redis/auth/idp.py +28 -0
- redis/auth/token.py +126 -0
- redis/auth/token_manager.py +370 -0
- redis/backoff.py +15 -0
- redis/client.py +116 -56
- redis/cluster.py +157 -33
- redis/connection.py +103 -11
- redis/credentials.py +40 -1
- redis/event.py +394 -0
- redis/typing.py +1 -1
- redis/utils.py +65 -0
- {redis-5.2.1.dist-info → redis-5.3.0.dist-info}/METADATA +2 -1
- {redis-5.2.1.dist-info → redis-5.3.0.dist-info}/RECORD +21 -15
- {redis-5.2.1.dist-info → redis-5.3.0.dist-info}/LICENSE +0 -0
- {redis-5.2.1.dist-info → redis-5.3.0.dist-info}/WHEEL +0 -0
- {redis-5.2.1.dist-info → redis-5.3.0.dist-info}/top_level.txt +0 -0
redis/cluster.py
CHANGED
|
@@ -4,6 +4,7 @@ import sys
|
|
|
4
4
|
import threading
|
|
5
5
|
import time
|
|
6
6
|
from collections import OrderedDict
|
|
7
|
+
from enum import Enum
|
|
7
8
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
|
8
9
|
|
|
9
10
|
from redis._parsers import CommandsParser, Encoder
|
|
@@ -15,6 +16,12 @@ from redis.commands import READ_COMMANDS, RedisClusterCommands
|
|
|
15
16
|
from redis.commands.helpers import list_or_args
|
|
16
17
|
from redis.connection import ConnectionPool, DefaultParser, parse_url
|
|
17
18
|
from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
|
|
19
|
+
from redis.event import (
|
|
20
|
+
AfterPooledConnectionsInstantiationEvent,
|
|
21
|
+
AfterPubSubConnectionInstantiationEvent,
|
|
22
|
+
ClientType,
|
|
23
|
+
EventDispatcher,
|
|
24
|
+
)
|
|
18
25
|
from redis.exceptions import (
|
|
19
26
|
AskError,
|
|
20
27
|
AuthenticationError,
|
|
@@ -36,6 +43,7 @@ from redis.lock import Lock
|
|
|
36
43
|
from redis.retry import Retry
|
|
37
44
|
from redis.utils import (
|
|
38
45
|
HIREDIS_AVAILABLE,
|
|
46
|
+
deprecated_args,
|
|
39
47
|
dict_merge,
|
|
40
48
|
list_keys_to_dict,
|
|
41
49
|
merge_result,
|
|
@@ -48,10 +56,13 @@ def get_node_name(host: str, port: Union[str, int]) -> str:
|
|
|
48
56
|
return f"{host}:{port}"
|
|
49
57
|
|
|
50
58
|
|
|
59
|
+
@deprecated_args(
|
|
60
|
+
allowed_args=["redis_node"],
|
|
61
|
+
reason="Use get_connection(redis_node) instead",
|
|
62
|
+
version="5.3.0",
|
|
63
|
+
)
|
|
51
64
|
def get_connection(redis_node, *args, **options):
|
|
52
|
-
return redis_node.connection or redis_node.connection_pool.get_connection(
|
|
53
|
-
args[0], **options
|
|
54
|
-
)
|
|
65
|
+
return redis_node.connection or redis_node.connection_pool.get_connection()
|
|
55
66
|
|
|
56
67
|
|
|
57
68
|
def parse_scan_result(command, res, **options):
|
|
@@ -418,7 +429,12 @@ class AbstractRedisCluster:
|
|
|
418
429
|
list_keys_to_dict(["SCRIPT FLUSH"], lambda command, res: all(res.values())),
|
|
419
430
|
)
|
|
420
431
|
|
|
421
|
-
ERRORS_ALLOW_RETRY = (
|
|
432
|
+
ERRORS_ALLOW_RETRY = (
|
|
433
|
+
ConnectionError,
|
|
434
|
+
TimeoutError,
|
|
435
|
+
ClusterDownError,
|
|
436
|
+
SlotNotCoveredError,
|
|
437
|
+
)
|
|
422
438
|
|
|
423
439
|
def replace_default_node(self, target_node: "ClusterNode" = None) -> None:
|
|
424
440
|
"""Replace the default cluster node.
|
|
@@ -490,6 +506,11 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
490
506
|
"""
|
|
491
507
|
return cls(url=url, **kwargs)
|
|
492
508
|
|
|
509
|
+
@deprecated_args(
|
|
510
|
+
args_to_warn=["read_from_replicas"],
|
|
511
|
+
reason="Please configure the 'load_balancing_strategy' instead",
|
|
512
|
+
version="5.3.0",
|
|
513
|
+
)
|
|
493
514
|
def __init__(
|
|
494
515
|
self,
|
|
495
516
|
host: Optional[str] = None,
|
|
@@ -500,11 +521,13 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
500
521
|
require_full_coverage: bool = False,
|
|
501
522
|
reinitialize_steps: int = 5,
|
|
502
523
|
read_from_replicas: bool = False,
|
|
524
|
+
load_balancing_strategy: Optional["LoadBalancingStrategy"] = None,
|
|
503
525
|
dynamic_startup_nodes: bool = True,
|
|
504
526
|
url: Optional[str] = None,
|
|
505
527
|
address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
|
|
506
528
|
cache: Optional[CacheInterface] = None,
|
|
507
529
|
cache_config: Optional[CacheConfig] = None,
|
|
530
|
+
event_dispatcher: Optional[EventDispatcher] = None,
|
|
508
531
|
**kwargs,
|
|
509
532
|
):
|
|
510
533
|
"""
|
|
@@ -527,11 +550,17 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
527
550
|
cluster client. If not all slots are covered, RedisClusterException
|
|
528
551
|
will be thrown.
|
|
529
552
|
:param read_from_replicas:
|
|
553
|
+
@deprecated - please use load_balancing_strategy instead
|
|
530
554
|
Enable read from replicas in READONLY mode. You can read possibly
|
|
531
555
|
stale data.
|
|
532
556
|
When set to true, read commands will be assigned between the
|
|
533
557
|
primary and its replications in a Round-Robin manner.
|
|
534
|
-
|
|
558
|
+
:param load_balancing_strategy:
|
|
559
|
+
Enable read from replicas in READONLY mode and defines the load balancing
|
|
560
|
+
strategy that will be used for cluster node selection.
|
|
561
|
+
The data read from replicas is eventually consistent
|
|
562
|
+
with the data in primary nodes.
|
|
563
|
+
:param dynamic_startup_nodes:
|
|
535
564
|
Set the RedisCluster's startup nodes to all of the discovered nodes.
|
|
536
565
|
If true (default value), the cluster's discovered nodes will be used to
|
|
537
566
|
determine the cluster nodes-slots mapping in the next topology refresh.
|
|
@@ -636,8 +665,13 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
636
665
|
self.command_flags = self.__class__.COMMAND_FLAGS.copy()
|
|
637
666
|
self.node_flags = self.__class__.NODE_FLAGS.copy()
|
|
638
667
|
self.read_from_replicas = read_from_replicas
|
|
668
|
+
self.load_balancing_strategy = load_balancing_strategy
|
|
639
669
|
self.reinitialize_counter = 0
|
|
640
670
|
self.reinitialize_steps = reinitialize_steps
|
|
671
|
+
if event_dispatcher is None:
|
|
672
|
+
self._event_dispatcher = EventDispatcher()
|
|
673
|
+
else:
|
|
674
|
+
self._event_dispatcher = event_dispatcher
|
|
641
675
|
self.nodes_manager = NodesManager(
|
|
642
676
|
startup_nodes=startup_nodes,
|
|
643
677
|
from_url=from_url,
|
|
@@ -646,6 +680,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
646
680
|
address_remap=address_remap,
|
|
647
681
|
cache=cache,
|
|
648
682
|
cache_config=cache_config,
|
|
683
|
+
event_dispatcher=self._event_dispatcher,
|
|
649
684
|
**kwargs,
|
|
650
685
|
)
|
|
651
686
|
|
|
@@ -683,7 +718,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
683
718
|
connection.set_parser(ClusterParser)
|
|
684
719
|
connection.on_connect()
|
|
685
720
|
|
|
686
|
-
if self.read_from_replicas:
|
|
721
|
+
if self.read_from_replicas or self.load_balancing_strategy:
|
|
687
722
|
# Sending READONLY command to server to configure connection as
|
|
688
723
|
# readonly. Since each cluster node may change its server type due
|
|
689
724
|
# to a failover, we should establish a READONLY connection
|
|
@@ -810,6 +845,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
810
845
|
cluster_response_callbacks=self.cluster_response_callbacks,
|
|
811
846
|
cluster_error_retry_attempts=self.cluster_error_retry_attempts,
|
|
812
847
|
read_from_replicas=self.read_from_replicas,
|
|
848
|
+
load_balancing_strategy=self.load_balancing_strategy,
|
|
813
849
|
reinitialize_steps=self.reinitialize_steps,
|
|
814
850
|
lock=self._lock,
|
|
815
851
|
)
|
|
@@ -927,7 +963,9 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
927
963
|
# get the node that holds the key's slot
|
|
928
964
|
slot = self.determine_slot(*args)
|
|
929
965
|
node = self.nodes_manager.get_node_from_slot(
|
|
930
|
-
slot,
|
|
966
|
+
slot,
|
|
967
|
+
self.read_from_replicas and command in READ_COMMANDS,
|
|
968
|
+
self.load_balancing_strategy if command in READ_COMMANDS else None,
|
|
931
969
|
)
|
|
932
970
|
return [node]
|
|
933
971
|
|
|
@@ -1151,12 +1189,18 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
1151
1189
|
# refresh the target node
|
|
1152
1190
|
slot = self.determine_slot(*args)
|
|
1153
1191
|
target_node = self.nodes_manager.get_node_from_slot(
|
|
1154
|
-
slot,
|
|
1192
|
+
slot,
|
|
1193
|
+
self.read_from_replicas and command in READ_COMMANDS,
|
|
1194
|
+
(
|
|
1195
|
+
self.load_balancing_strategy
|
|
1196
|
+
if command in READ_COMMANDS
|
|
1197
|
+
else None
|
|
1198
|
+
),
|
|
1155
1199
|
)
|
|
1156
1200
|
moved = False
|
|
1157
1201
|
|
|
1158
1202
|
redis_node = self.get_redis_connection(target_node)
|
|
1159
|
-
connection = get_connection(redis_node
|
|
1203
|
+
connection = get_connection(redis_node)
|
|
1160
1204
|
if asking:
|
|
1161
1205
|
connection.send_command("ASKING")
|
|
1162
1206
|
redis_node.parse_response(connection, "ASKING", **kwargs)
|
|
@@ -1213,13 +1257,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
1213
1257
|
except AskError as e:
|
|
1214
1258
|
redirect_addr = get_node_name(host=e.host, port=e.port)
|
|
1215
1259
|
asking = True
|
|
1216
|
-
except ClusterDownError
|
|
1260
|
+
except (ClusterDownError, SlotNotCoveredError):
|
|
1217
1261
|
# ClusterDownError can occur during a failover and to get
|
|
1218
1262
|
# self-healed, we will try to reinitialize the cluster layout
|
|
1219
1263
|
# and retry executing the command
|
|
1264
|
+
|
|
1265
|
+
# SlotNotCoveredError can occur when the cluster is not fully
|
|
1266
|
+
# initialized or can be temporary issue.
|
|
1267
|
+
# We will try to reinitialize the cluster topology
|
|
1268
|
+
# and retry executing the command
|
|
1269
|
+
|
|
1220
1270
|
time.sleep(0.25)
|
|
1221
1271
|
self.nodes_manager.initialize()
|
|
1222
|
-
raise
|
|
1272
|
+
raise
|
|
1223
1273
|
except ResponseError:
|
|
1224
1274
|
raise
|
|
1225
1275
|
except Exception as e:
|
|
@@ -1232,7 +1282,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
1232
1282
|
|
|
1233
1283
|
raise ClusterError("TTL exhausted.")
|
|
1234
1284
|
|
|
1235
|
-
def close(self):
|
|
1285
|
+
def close(self) -> None:
|
|
1236
1286
|
try:
|
|
1237
1287
|
with self._lock:
|
|
1238
1288
|
if self.nodes_manager:
|
|
@@ -1300,6 +1350,12 @@ class ClusterNode:
|
|
|
1300
1350
|
self.redis_connection.close()
|
|
1301
1351
|
|
|
1302
1352
|
|
|
1353
|
+
class LoadBalancingStrategy(Enum):
|
|
1354
|
+
ROUND_ROBIN = "round_robin"
|
|
1355
|
+
ROUND_ROBIN_REPLICAS = "round_robin_replicas"
|
|
1356
|
+
RANDOM_REPLICA = "random_replica"
|
|
1357
|
+
|
|
1358
|
+
|
|
1303
1359
|
class LoadBalancer:
|
|
1304
1360
|
"""
|
|
1305
1361
|
Round-Robin Load Balancing
|
|
@@ -1309,15 +1365,38 @@ class LoadBalancer:
|
|
|
1309
1365
|
self.primary_to_idx = {}
|
|
1310
1366
|
self.start_index = start_index
|
|
1311
1367
|
|
|
1312
|
-
def get_server_index(
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1368
|
+
def get_server_index(
|
|
1369
|
+
self,
|
|
1370
|
+
primary: str,
|
|
1371
|
+
list_size: int,
|
|
1372
|
+
load_balancing_strategy: LoadBalancingStrategy = LoadBalancingStrategy.ROUND_ROBIN, # noqa: line too long ignored
|
|
1373
|
+
) -> int:
|
|
1374
|
+
if load_balancing_strategy == LoadBalancingStrategy.RANDOM_REPLICA:
|
|
1375
|
+
return self._get_random_replica_index(list_size)
|
|
1376
|
+
else:
|
|
1377
|
+
return self._get_round_robin_index(
|
|
1378
|
+
primary,
|
|
1379
|
+
list_size,
|
|
1380
|
+
load_balancing_strategy == LoadBalancingStrategy.ROUND_ROBIN_REPLICAS,
|
|
1381
|
+
)
|
|
1317
1382
|
|
|
1318
1383
|
def reset(self) -> None:
|
|
1319
1384
|
self.primary_to_idx.clear()
|
|
1320
1385
|
|
|
1386
|
+
def _get_random_replica_index(self, list_size: int) -> int:
|
|
1387
|
+
return random.randint(1, list_size - 1)
|
|
1388
|
+
|
|
1389
|
+
def _get_round_robin_index(
|
|
1390
|
+
self, primary: str, list_size: int, replicas_only: bool
|
|
1391
|
+
) -> int:
|
|
1392
|
+
server_index = self.primary_to_idx.setdefault(primary, self.start_index)
|
|
1393
|
+
if replicas_only and server_index == 0:
|
|
1394
|
+
# skip the primary node index
|
|
1395
|
+
server_index = 1
|
|
1396
|
+
# Update the index for the next round
|
|
1397
|
+
self.primary_to_idx[primary] = (server_index + 1) % list_size
|
|
1398
|
+
return server_index
|
|
1399
|
+
|
|
1321
1400
|
|
|
1322
1401
|
class NodesManager:
|
|
1323
1402
|
def __init__(
|
|
@@ -1332,6 +1411,7 @@ class NodesManager:
|
|
|
1332
1411
|
cache: Optional[CacheInterface] = None,
|
|
1333
1412
|
cache_config: Optional[CacheConfig] = None,
|
|
1334
1413
|
cache_factory: Optional[CacheFactoryInterface] = None,
|
|
1414
|
+
event_dispatcher: Optional[EventDispatcher] = None,
|
|
1335
1415
|
**kwargs,
|
|
1336
1416
|
):
|
|
1337
1417
|
self.nodes_cache = {}
|
|
@@ -1353,6 +1433,13 @@ class NodesManager:
|
|
|
1353
1433
|
if lock is None:
|
|
1354
1434
|
lock = threading.Lock()
|
|
1355
1435
|
self._lock = lock
|
|
1436
|
+
if event_dispatcher is None:
|
|
1437
|
+
self._event_dispatcher = EventDispatcher()
|
|
1438
|
+
else:
|
|
1439
|
+
self._event_dispatcher = event_dispatcher
|
|
1440
|
+
self._credential_provider = self.connection_kwargs.get(
|
|
1441
|
+
"credential_provider", None
|
|
1442
|
+
)
|
|
1356
1443
|
self.initialize()
|
|
1357
1444
|
|
|
1358
1445
|
def get_node(self, host=None, port=None, node_name=None):
|
|
@@ -1413,7 +1500,21 @@ class NodesManager:
|
|
|
1413
1500
|
# Reset moved_exception
|
|
1414
1501
|
self._moved_exception = None
|
|
1415
1502
|
|
|
1416
|
-
|
|
1503
|
+
@deprecated_args(
|
|
1504
|
+
args_to_warn=["server_type"],
|
|
1505
|
+
reason=(
|
|
1506
|
+
"In case you need select some load balancing strategy "
|
|
1507
|
+
"that will use replicas, please set it through 'load_balancing_strategy'"
|
|
1508
|
+
),
|
|
1509
|
+
version="5.3.0",
|
|
1510
|
+
)
|
|
1511
|
+
def get_node_from_slot(
|
|
1512
|
+
self,
|
|
1513
|
+
slot,
|
|
1514
|
+
read_from_replicas=False,
|
|
1515
|
+
load_balancing_strategy=None,
|
|
1516
|
+
server_type=None,
|
|
1517
|
+
):
|
|
1417
1518
|
"""
|
|
1418
1519
|
Gets a node that servers this hash slot
|
|
1419
1520
|
"""
|
|
@@ -1428,11 +1529,14 @@ class NodesManager:
|
|
|
1428
1529
|
f'"require_full_coverage={self._require_full_coverage}"'
|
|
1429
1530
|
)
|
|
1430
1531
|
|
|
1431
|
-
if read_from_replicas is True:
|
|
1432
|
-
|
|
1532
|
+
if read_from_replicas is True and load_balancing_strategy is None:
|
|
1533
|
+
load_balancing_strategy = LoadBalancingStrategy.ROUND_ROBIN
|
|
1534
|
+
|
|
1535
|
+
if len(self.slots_cache[slot]) > 1 and load_balancing_strategy:
|
|
1536
|
+
# get the server index using the strategy defined in load_balancing_strategy
|
|
1433
1537
|
primary_name = self.slots_cache[slot][0].name
|
|
1434
1538
|
node_idx = self.read_load_balancer.get_server_index(
|
|
1435
|
-
primary_name, len(self.slots_cache[slot])
|
|
1539
|
+
primary_name, len(self.slots_cache[slot]), load_balancing_strategy
|
|
1436
1540
|
)
|
|
1437
1541
|
elif (
|
|
1438
1542
|
server_type is None
|
|
@@ -1479,11 +1583,19 @@ class NodesManager:
|
|
|
1479
1583
|
"""
|
|
1480
1584
|
This function will create a redis connection to all nodes in :nodes:
|
|
1481
1585
|
"""
|
|
1586
|
+
connection_pools = []
|
|
1482
1587
|
for node in nodes:
|
|
1483
1588
|
if node.redis_connection is None:
|
|
1484
1589
|
node.redis_connection = self.create_redis_node(
|
|
1485
1590
|
host=node.host, port=node.port, **self.connection_kwargs
|
|
1486
1591
|
)
|
|
1592
|
+
connection_pools.append(node.redis_connection.connection_pool)
|
|
1593
|
+
|
|
1594
|
+
self._event_dispatcher.dispatch(
|
|
1595
|
+
AfterPooledConnectionsInstantiationEvent(
|
|
1596
|
+
connection_pools, ClientType.SYNC, self._credential_provider
|
|
1597
|
+
)
|
|
1598
|
+
)
|
|
1487
1599
|
|
|
1488
1600
|
def create_redis_node(self, host, port, **kwargs):
|
|
1489
1601
|
if self.from_url:
|
|
@@ -1613,7 +1725,7 @@ class NodesManager:
|
|
|
1613
1725
|
if len(disagreements) > 5:
|
|
1614
1726
|
raise RedisClusterException(
|
|
1615
1727
|
f"startup_nodes could not agree on a valid "
|
|
1616
|
-
f
|
|
1728
|
+
f"slots cache: {', '.join(disagreements)}"
|
|
1617
1729
|
)
|
|
1618
1730
|
|
|
1619
1731
|
fully_covered = self.check_slots_coverage(tmp_slots)
|
|
@@ -1658,7 +1770,7 @@ class NodesManager:
|
|
|
1658
1770
|
# If initialize was called after a MovedError, clear it
|
|
1659
1771
|
self._moved_exception = None
|
|
1660
1772
|
|
|
1661
|
-
def close(self):
|
|
1773
|
+
def close(self) -> None:
|
|
1662
1774
|
self.default_node = None
|
|
1663
1775
|
for node in self.nodes_cache.values():
|
|
1664
1776
|
if node.redis_connection:
|
|
@@ -1698,6 +1810,7 @@ class ClusterPubSub(PubSub):
|
|
|
1698
1810
|
host=None,
|
|
1699
1811
|
port=None,
|
|
1700
1812
|
push_handler_func=None,
|
|
1813
|
+
event_dispatcher: Optional["EventDispatcher"] = None,
|
|
1701
1814
|
**kwargs,
|
|
1702
1815
|
):
|
|
1703
1816
|
"""
|
|
@@ -1706,7 +1819,7 @@ class ClusterPubSub(PubSub):
|
|
|
1706
1819
|
first command execution. The node will be determined by:
|
|
1707
1820
|
1. Hashing the channel name in the request to find its keyslot
|
|
1708
1821
|
2. Selecting a node that handles the keyslot: If read_from_replicas is
|
|
1709
|
-
set to true, a replica can be selected.
|
|
1822
|
+
set to true or load_balancing_strategy is set, a replica can be selected.
|
|
1710
1823
|
|
|
1711
1824
|
:type redis_cluster: RedisCluster
|
|
1712
1825
|
:type node: ClusterNode
|
|
@@ -1723,10 +1836,15 @@ class ClusterPubSub(PubSub):
|
|
|
1723
1836
|
self.cluster = redis_cluster
|
|
1724
1837
|
self.node_pubsub_mapping = {}
|
|
1725
1838
|
self._pubsubs_generator = self._pubsubs_generator()
|
|
1839
|
+
if event_dispatcher is None:
|
|
1840
|
+
self._event_dispatcher = EventDispatcher()
|
|
1841
|
+
else:
|
|
1842
|
+
self._event_dispatcher = event_dispatcher
|
|
1726
1843
|
super().__init__(
|
|
1727
1844
|
connection_pool=connection_pool,
|
|
1728
1845
|
encoder=redis_cluster.encoder,
|
|
1729
1846
|
push_handler_func=push_handler_func,
|
|
1847
|
+
event_dispatcher=self._event_dispatcher,
|
|
1730
1848
|
**kwargs,
|
|
1731
1849
|
)
|
|
1732
1850
|
|
|
@@ -1797,7 +1915,9 @@ class ClusterPubSub(PubSub):
|
|
|
1797
1915
|
channel = args[1]
|
|
1798
1916
|
slot = self.cluster.keyslot(channel)
|
|
1799
1917
|
node = self.cluster.nodes_manager.get_node_from_slot(
|
|
1800
|
-
slot,
|
|
1918
|
+
slot,
|
|
1919
|
+
self.cluster.read_from_replicas,
|
|
1920
|
+
self.cluster.load_balancing_strategy,
|
|
1801
1921
|
)
|
|
1802
1922
|
else:
|
|
1803
1923
|
# Get a random node
|
|
@@ -1805,14 +1925,17 @@ class ClusterPubSub(PubSub):
|
|
|
1805
1925
|
self.node = node
|
|
1806
1926
|
redis_connection = self.cluster.get_redis_connection(node)
|
|
1807
1927
|
self.connection_pool = redis_connection.connection_pool
|
|
1808
|
-
self.connection = self.connection_pool.get_connection(
|
|
1809
|
-
"pubsub", self.shard_hint
|
|
1810
|
-
)
|
|
1928
|
+
self.connection = self.connection_pool.get_connection()
|
|
1811
1929
|
# register a callback that re-subscribes to any channels we
|
|
1812
1930
|
# were listening to when we were disconnected
|
|
1813
1931
|
self.connection.register_connect_callback(self.on_connect)
|
|
1814
1932
|
if self.push_handler_func is not None and not HIREDIS_AVAILABLE:
|
|
1815
1933
|
self.connection._parser.set_pubsub_push_handler(self.push_handler_func)
|
|
1934
|
+
self._event_dispatcher.dispatch(
|
|
1935
|
+
AfterPubSubConnectionInstantiationEvent(
|
|
1936
|
+
self.connection, self.connection_pool, ClientType.SYNC, self._lock
|
|
1937
|
+
)
|
|
1938
|
+
)
|
|
1816
1939
|
connection = self.connection
|
|
1817
1940
|
self._execute(connection, connection.send_command, *args)
|
|
1818
1941
|
|
|
@@ -1937,6 +2060,7 @@ class ClusterPipeline(RedisCluster):
|
|
|
1937
2060
|
cluster_response_callbacks: Optional[Dict[str, Callable]] = None,
|
|
1938
2061
|
startup_nodes: Optional[List["ClusterNode"]] = None,
|
|
1939
2062
|
read_from_replicas: bool = False,
|
|
2063
|
+
load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
|
|
1940
2064
|
cluster_error_retry_attempts: int = 3,
|
|
1941
2065
|
reinitialize_steps: int = 5,
|
|
1942
2066
|
lock=None,
|
|
@@ -1952,6 +2076,7 @@ class ClusterPipeline(RedisCluster):
|
|
|
1952
2076
|
)
|
|
1953
2077
|
self.startup_nodes = startup_nodes if startup_nodes else []
|
|
1954
2078
|
self.read_from_replicas = read_from_replicas
|
|
2079
|
+
self.load_balancing_strategy = load_balancing_strategy
|
|
1955
2080
|
self.command_flags = self.__class__.COMMAND_FLAGS.copy()
|
|
1956
2081
|
self.cluster_response_callbacks = cluster_response_callbacks
|
|
1957
2082
|
self.cluster_error_retry_attempts = cluster_error_retry_attempts
|
|
@@ -2023,12 +2148,11 @@ class ClusterPipeline(RedisCluster):
|
|
|
2023
2148
|
"""
|
|
2024
2149
|
cmd = " ".join(map(safe_str, command))
|
|
2025
2150
|
msg = (
|
|
2026
|
-
f"Command # {number} ({cmd}) of pipeline "
|
|
2027
|
-
f"caused error: {exception.args[0]}"
|
|
2151
|
+
f"Command # {number} ({cmd}) of pipeline caused error: {exception.args[0]}"
|
|
2028
2152
|
)
|
|
2029
2153
|
exception.args = (msg,) + exception.args[1:]
|
|
2030
2154
|
|
|
2031
|
-
def execute(self, raise_on_error=True):
|
|
2155
|
+
def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
2032
2156
|
"""
|
|
2033
2157
|
Execute all the commands in the current pipeline
|
|
2034
2158
|
"""
|
|
@@ -2162,8 +2286,8 @@ class ClusterPipeline(RedisCluster):
|
|
|
2162
2286
|
if node_name not in nodes:
|
|
2163
2287
|
redis_node = self.get_redis_connection(node)
|
|
2164
2288
|
try:
|
|
2165
|
-
connection = get_connection(redis_node
|
|
2166
|
-
except ConnectionError:
|
|
2289
|
+
connection = get_connection(redis_node)
|
|
2290
|
+
except (ConnectionError, TimeoutError):
|
|
2167
2291
|
for n in nodes.values():
|
|
2168
2292
|
n.connection_pool.release(n.connection)
|
|
2169
2293
|
# Connection retries are being handled in the node's
|