redis 5.3.0b5__py3-none-any.whl → 6.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. redis/__init__.py +2 -11
  2. redis/_parsers/base.py +14 -2
  3. redis/_parsers/resp3.py +2 -2
  4. redis/asyncio/client.py +102 -82
  5. redis/asyncio/cluster.py +147 -102
  6. redis/asyncio/connection.py +77 -24
  7. redis/asyncio/lock.py +26 -5
  8. redis/asyncio/retry.py +12 -0
  9. redis/asyncio/sentinel.py +11 -1
  10. redis/asyncio/utils.py +1 -1
  11. redis/auth/token.py +6 -2
  12. redis/backoff.py +15 -0
  13. redis/client.py +106 -98
  14. redis/cluster.py +208 -79
  15. redis/commands/cluster.py +1 -11
  16. redis/commands/core.py +219 -207
  17. redis/commands/helpers.py +19 -76
  18. redis/commands/json/__init__.py +1 -1
  19. redis/commands/redismodules.py +5 -17
  20. redis/commands/search/aggregation.py +3 -1
  21. redis/commands/search/commands.py +43 -16
  22. redis/commands/search/dialect.py +3 -0
  23. redis/commands/search/profile_information.py +14 -0
  24. redis/commands/search/query.py +5 -1
  25. redis/commands/timeseries/__init__.py +1 -1
  26. redis/commands/vectorset/__init__.py +46 -0
  27. redis/commands/vectorset/commands.py +367 -0
  28. redis/commands/vectorset/utils.py +94 -0
  29. redis/connection.py +78 -29
  30. redis/exceptions.py +4 -1
  31. redis/lock.py +24 -4
  32. redis/ocsp.py +2 -1
  33. redis/retry.py +12 -0
  34. redis/sentinel.py +3 -1
  35. redis/utils.py +114 -1
  36. {redis-5.3.0b5.dist-info → redis-6.0.0.dist-info}/METADATA +57 -23
  37. redis-6.0.0.dist-info/RECORD +78 -0
  38. {redis-5.3.0b5.dist-info → redis-6.0.0.dist-info}/WHEEL +1 -2
  39. redis/commands/graph/__init__.py +0 -263
  40. redis/commands/graph/commands.py +0 -313
  41. redis/commands/graph/edge.py +0 -91
  42. redis/commands/graph/exceptions.py +0 -3
  43. redis/commands/graph/execution_plan.py +0 -211
  44. redis/commands/graph/node.py +0 -88
  45. redis/commands/graph/path.py +0 -78
  46. redis/commands/graph/query_result.py +0 -588
  47. redis-5.3.0b5.dist-info/RECORD +0 -82
  48. redis-5.3.0b5.dist-info/top_level.txt +0 -1
  49. /redis/commands/search/{indexDefinition.py → index_definition.py} +0 -0
  50. {redis-5.3.0b5.dist-info → redis-6.0.0.dist-info/licenses}/LICENSE +0 -0
redis/cluster.py CHANGED
@@ -4,16 +4,17 @@ import sys
4
4
  import threading
5
5
  import time
6
6
  from collections import OrderedDict
7
+ from enum import Enum
7
8
  from typing import Any, Callable, Dict, List, Optional, Tuple, Union
8
9
 
9
10
  from redis._parsers import CommandsParser, Encoder
10
11
  from redis._parsers.helpers import parse_scan
11
- from redis.backoff import default_backoff
12
+ from redis.backoff import ExponentialWithJitterBackoff, NoBackoff
12
13
  from redis.cache import CacheConfig, CacheFactory, CacheFactoryInterface, CacheInterface
13
14
  from redis.client import CaseInsensitiveDict, PubSub, Redis
14
15
  from redis.commands import READ_COMMANDS, RedisClusterCommands
15
16
  from redis.commands.helpers import list_or_args
16
- from redis.connection import ConnectionPool, DefaultParser, parse_url
17
+ from redis.connection import ConnectionPool, parse_url
17
18
  from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
18
19
  from redis.event import (
19
20
  AfterPooledConnectionsInstantiationEvent,
@@ -24,12 +25,10 @@ from redis.event import (
24
25
  from redis.exceptions import (
25
26
  AskError,
26
27
  AuthenticationError,
27
- ClusterCrossSlotError,
28
28
  ClusterDownError,
29
29
  ClusterError,
30
30
  ConnectionError,
31
31
  DataError,
32
- MasterDownError,
33
32
  MovedError,
34
33
  RedisClusterException,
35
34
  RedisError,
@@ -42,11 +41,13 @@ from redis.lock import Lock
42
41
  from redis.retry import Retry
43
42
  from redis.utils import (
44
43
  HIREDIS_AVAILABLE,
44
+ deprecated_args,
45
45
  dict_merge,
46
46
  list_keys_to_dict,
47
47
  merge_result,
48
48
  safe_str,
49
49
  str_if_bytes,
50
+ truncate_text,
50
51
  )
51
52
 
52
53
 
@@ -54,10 +55,13 @@ def get_node_name(host: str, port: Union[str, int]) -> str:
54
55
  return f"{host}:{port}"
55
56
 
56
57
 
58
+ @deprecated_args(
59
+ allowed_args=["redis_node"],
60
+ reason="Use get_connection(redis_node) instead",
61
+ version="5.3.0",
62
+ )
57
63
  def get_connection(redis_node, *args, **options):
58
- return redis_node.connection or redis_node.connection_pool.get_connection(
59
- args[0], **options
60
- )
64
+ return redis_node.connection or redis_node.connection_pool.get_connection()
61
65
 
62
66
 
63
67
  def parse_scan_result(command, res, **options):
@@ -138,7 +142,6 @@ REPLICA = "replica"
138
142
  SLOT_ID = "slot-id"
139
143
 
140
144
  REDIS_ALLOWED_KEYS = (
141
- "charset",
142
145
  "connection_class",
143
146
  "connection_pool",
144
147
  "connection_pool_class",
@@ -148,7 +151,6 @@ REDIS_ALLOWED_KEYS = (
148
151
  "decode_responses",
149
152
  "encoding",
150
153
  "encoding_errors",
151
- "errors",
152
154
  "host",
153
155
  "lib_name",
154
156
  "lib_version",
@@ -177,7 +179,7 @@ REDIS_ALLOWED_KEYS = (
177
179
  "cache",
178
180
  "cache_config",
179
181
  )
180
- KWARGS_DISABLED_KEYS = ("host", "port")
182
+ KWARGS_DISABLED_KEYS = ("host", "port", "retry")
181
183
 
182
184
 
183
185
  def cleanup_kwargs(**kwargs):
@@ -193,20 +195,6 @@ def cleanup_kwargs(**kwargs):
193
195
  return connection_kwargs
194
196
 
195
197
 
196
- class ClusterParser(DefaultParser):
197
- EXCEPTION_CLASSES = dict_merge(
198
- DefaultParser.EXCEPTION_CLASSES,
199
- {
200
- "ASK": AskError,
201
- "TRYAGAIN": TryAgainError,
202
- "MOVED": MovedError,
203
- "CLUSTERDOWN": ClusterDownError,
204
- "CROSSSLOT": ClusterCrossSlotError,
205
- "MASTERDOWN": MasterDownError,
206
- },
207
- )
208
-
209
-
210
198
  class AbstractRedisCluster:
211
199
  RedisClusterRequestTTL = 16
212
200
 
@@ -300,7 +288,6 @@ class AbstractRedisCluster:
300
288
  "TFUNCTION LIST",
301
289
  "TFCALL",
302
290
  "TFCALLASYNC",
303
- "GRAPH.CONFIG",
304
291
  "LATENCY HISTORY",
305
292
  "LATENCY LATEST",
306
293
  "LATENCY RESET",
@@ -320,7 +307,6 @@ class AbstractRedisCluster:
320
307
  "FUNCTION LIST",
321
308
  "FUNCTION LOAD",
322
309
  "FUNCTION RESTORE",
323
- "REDISGEARS_2.REFRESHCLUSTER",
324
310
  "SCAN",
325
311
  "SCRIPT EXISTS",
326
312
  "SCRIPT FLUSH",
@@ -424,7 +410,12 @@ class AbstractRedisCluster:
424
410
  list_keys_to_dict(["SCRIPT FLUSH"], lambda command, res: all(res.values())),
425
411
  )
426
412
 
427
- ERRORS_ALLOW_RETRY = (ConnectionError, TimeoutError, ClusterDownError)
413
+ ERRORS_ALLOW_RETRY = (
414
+ ConnectionError,
415
+ TimeoutError,
416
+ ClusterDownError,
417
+ SlotNotCoveredError,
418
+ )
428
419
 
429
420
  def replace_default_node(self, target_node: "ClusterNode" = None) -> None:
430
421
  """Replace the default cluster node.
@@ -445,7 +436,7 @@ class AbstractRedisCluster:
445
436
  # Choose a primary if the cluster contains different primaries
446
437
  self.nodes_manager.default_node = random.choice(primaries)
447
438
  else:
448
- # Otherwise, hoose a primary if the cluster contains different primaries
439
+ # Otherwise, choose a primary if the cluster contains different primaries
449
440
  replicas = [node for node in self.get_replicas() if node != curr_node]
450
441
  if replicas:
451
442
  self.nodes_manager.default_node = random.choice(replicas)
@@ -496,6 +487,18 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
496
487
  """
497
488
  return cls(url=url, **kwargs)
498
489
 
490
+ @deprecated_args(
491
+ args_to_warn=["read_from_replicas"],
492
+ reason="Please configure the 'load_balancing_strategy' instead",
493
+ version="5.3.0",
494
+ )
495
+ @deprecated_args(
496
+ args_to_warn=[
497
+ "cluster_error_retry_attempts",
498
+ ],
499
+ reason="Please configure the 'retry' object instead",
500
+ version="6.0.0",
501
+ )
499
502
  def __init__(
500
503
  self,
501
504
  host: Optional[str] = None,
@@ -503,9 +506,10 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
503
506
  startup_nodes: Optional[List["ClusterNode"]] = None,
504
507
  cluster_error_retry_attempts: int = 3,
505
508
  retry: Optional["Retry"] = None,
506
- require_full_coverage: bool = False,
509
+ require_full_coverage: bool = True,
507
510
  reinitialize_steps: int = 5,
508
511
  read_from_replicas: bool = False,
512
+ load_balancing_strategy: Optional["LoadBalancingStrategy"] = None,
509
513
  dynamic_startup_nodes: bool = True,
510
514
  url: Optional[str] = None,
511
515
  address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
@@ -534,11 +538,16 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
534
538
  cluster client. If not all slots are covered, RedisClusterException
535
539
  will be thrown.
536
540
  :param read_from_replicas:
541
+ @deprecated - please use load_balancing_strategy instead
537
542
  Enable read from replicas in READONLY mode. You can read possibly
538
543
  stale data.
539
544
  When set to true, read commands will be assigned between the
540
545
  primary and its replications in a Round-Robin manner.
541
- :param dynamic_startup_nodes:
546
+ :param load_balancing_strategy:
547
+ Enable read from replicas in READONLY mode and defines the load balancing
548
+ strategy that will be used for cluster node selection.
549
+ The data read from replicas is eventually consistent with the data in primary nodes.
550
+ :param dynamic_startup_nodes:
542
551
  Set the RedisCluster's startup nodes to all of the discovered nodes.
543
552
  If true (default value), the cluster's discovered nodes will be used to
544
553
  determine the cluster nodes-slots mapping in the next topology refresh.
@@ -547,9 +556,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
547
556
  If you use dynamic DNS endpoints for startup nodes but CLUSTER SLOTS lists
548
557
  specific IP addresses, it is best to set it to false.
549
558
  :param cluster_error_retry_attempts:
559
+ @deprecated - Please configure the 'retry' object instead
560
+ In case 'retry' object is set - this argument is ignored!
561
+
550
562
  Number of times to retry before raising an error when
551
- :class:`~.TimeoutError` or :class:`~.ConnectionError` or
563
+ :class:`~.TimeoutError` or :class:`~.ConnectionError`, :class:`~.SlotNotCoveredError` or
552
564
  :class:`~.ClusterDownError` are encountered
565
+ :param retry:
566
+ A retry object that defines the retry strategy and the number of
567
+ retries for the cluster client.
568
+ In current implementation for the cluster client (starting form redis-py version 6.0.0)
569
+ the retry object is not yet fully utilized, instead it is used just to determine
570
+ the number of retries for the cluster client.
571
+ In the future releases the retry object will be used to handle the cluster client retries!
553
572
  :param reinitialize_steps:
554
573
  Specifies the number of MOVED errors that need to occur before
555
574
  reinitializing the whole cluster topology. If a MOVED error occurs
@@ -569,7 +588,8 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
569
588
 
570
589
  :**kwargs:
571
590
  Extra arguments that will be sent into Redis instance when created
572
- (See Official redis-py doc for supported kwargs
591
+ (See Official redis-py doc for supported kwargs - the only limitation
592
+ is that you can't provide 'retry' object as part of kwargs.
573
593
  [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py])
574
594
  Some kwargs are not supported and will raise a
575
595
  RedisClusterException:
@@ -584,6 +604,15 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
584
604
  "Argument 'db' is not possible to use in cluster mode"
585
605
  )
586
606
 
607
+ if "retry" in kwargs:
608
+ # Argument 'retry' is not possible to be used in kwargs when in cluster mode
609
+ # the kwargs are set to the lower level connections to the cluster nodes
610
+ # and there we provide retry configuration without retries allowed.
611
+ # The retries should be handled on cluster client level.
612
+ raise RedisClusterException(
613
+ "The 'retry' argument cannot be used in kwargs when running in cluster mode."
614
+ )
615
+
587
616
  # Get the startup node/s
588
617
  from_url = False
589
618
  if url is not None:
@@ -626,9 +655,11 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
626
655
  kwargs = cleanup_kwargs(**kwargs)
627
656
  if retry:
628
657
  self.retry = retry
629
- kwargs.update({"retry": self.retry})
630
658
  else:
631
- kwargs.update({"retry": Retry(default_backoff(), 0)})
659
+ self.retry = Retry(
660
+ backoff=ExponentialWithJitterBackoff(base=1, cap=10),
661
+ retries=cluster_error_retry_attempts,
662
+ )
632
663
 
633
664
  self.encoder = Encoder(
634
665
  kwargs.get("encoding", "utf-8"),
@@ -639,10 +670,10 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
639
670
  if (cache_config or cache) and protocol not in [3, "3"]:
640
671
  raise RedisError("Client caching is only supported with RESP version 3")
641
672
 
642
- self.cluster_error_retry_attempts = cluster_error_retry_attempts
643
673
  self.command_flags = self.__class__.COMMAND_FLAGS.copy()
644
674
  self.node_flags = self.__class__.NODE_FLAGS.copy()
645
675
  self.read_from_replicas = read_from_replicas
676
+ self.load_balancing_strategy = load_balancing_strategy
646
677
  self.reinitialize_counter = 0
647
678
  self.reinitialize_steps = reinitialize_steps
648
679
  if event_dispatcher is None:
@@ -676,7 +707,10 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
676
707
  self.close()
677
708
 
678
709
  def __del__(self):
679
- self.close()
710
+ try:
711
+ self.close()
712
+ except Exception:
713
+ pass
680
714
 
681
715
  def disconnect_connection_pools(self):
682
716
  for node in self.get_nodes():
@@ -692,10 +726,9 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
692
726
  Initialize the connection, authenticate and select a database and send
693
727
  READONLY if it is set during object initialization.
694
728
  """
695
- connection.set_parser(ClusterParser)
696
729
  connection.on_connect()
697
730
 
698
- if self.read_from_replicas:
731
+ if self.read_from_replicas or self.load_balancing_strategy:
699
732
  # Sending READONLY command to server to configure connection as
700
733
  # readonly. Since each cluster node may change its server type due
701
734
  # to a failover, we should establish a READONLY connection
@@ -767,13 +800,8 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
767
800
  self.nodes_manager.default_node = node
768
801
  return True
769
802
 
770
- def get_retry(self) -> Optional["Retry"]:
771
- return self.retry
772
-
773
- def set_retry(self, retry: "Retry") -> None:
803
+ def set_retry(self, retry: Retry) -> None:
774
804
  self.retry = retry
775
- for node in self.get_nodes():
776
- node.redis_connection.set_retry(retry)
777
805
 
778
806
  def monitor(self, target_node=None):
779
807
  """
@@ -820,9 +848,11 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
820
848
  startup_nodes=self.nodes_manager.startup_nodes,
821
849
  result_callbacks=self.result_callbacks,
822
850
  cluster_response_callbacks=self.cluster_response_callbacks,
823
- cluster_error_retry_attempts=self.cluster_error_retry_attempts,
851
+ cluster_error_retry_attempts=self.retry.get_retries(),
824
852
  read_from_replicas=self.read_from_replicas,
853
+ load_balancing_strategy=self.load_balancing_strategy,
825
854
  reinitialize_steps=self.reinitialize_steps,
855
+ retry=self.retry,
826
856
  lock=self._lock,
827
857
  )
828
858
 
@@ -835,6 +865,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
835
865
  blocking_timeout=None,
836
866
  lock_class=None,
837
867
  thread_local=True,
868
+ raise_on_release_error: bool = True,
838
869
  ):
839
870
  """
840
871
  Return a new Lock object using key ``name`` that mimics
@@ -881,6 +912,11 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
881
912
  thread-1 would see the token value as "xyz" and would be
882
913
  able to successfully release the thread-2's lock.
883
914
 
915
+ ``raise_on_release_error`` indicates whether to raise an exception when
916
+ the lock is no longer owned when exiting the context manager. By default,
917
+ this is True, meaning an exception will be raised. If False, the warning
918
+ will be logged and the exception will be suppressed.
919
+
884
920
  In some use cases it's necessary to disable thread local storage. For
885
921
  example, if you have code where one thread acquires a lock and passes
886
922
  that lock instance to a worker thread to release later. If thread
@@ -898,6 +934,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
898
934
  blocking=blocking,
899
935
  blocking_timeout=blocking_timeout,
900
936
  thread_local=thread_local,
937
+ raise_on_release_error=raise_on_release_error,
901
938
  )
902
939
 
903
940
  def set_response_callback(self, command, callback):
@@ -939,7 +976,9 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
939
976
  # get the node that holds the key's slot
940
977
  slot = self.determine_slot(*args)
941
978
  node = self.nodes_manager.get_node_from_slot(
942
- slot, self.read_from_replicas and command in READ_COMMANDS
979
+ slot,
980
+ self.read_from_replicas and command in READ_COMMANDS,
981
+ self.load_balancing_strategy if command in READ_COMMANDS else None,
943
982
  )
944
983
  return [node]
945
984
 
@@ -1075,8 +1114,8 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1075
1114
  """
1076
1115
  Wrapper for ERRORS_ALLOW_RETRY error handling.
1077
1116
 
1078
- It will try the number of times specified by the config option
1079
- "self.cluster_error_retry_attempts" which defaults to 3 unless manually
1117
+ It will try the number of times specified by the retries property from
1118
+ config option "self.retry" which defaults to 3 unless manually
1080
1119
  configured.
1081
1120
 
1082
1121
  If it reaches the number of times, the command will raise the exception
@@ -1102,9 +1141,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1102
1141
  # execution since the nodes may not be valid anymore after the tables
1103
1142
  # were reinitialized. So in case of passed target nodes,
1104
1143
  # retry_attempts will be set to 0.
1105
- retry_attempts = (
1106
- 0 if target_nodes_specified else self.cluster_error_retry_attempts
1107
- )
1144
+ retry_attempts = 0 if target_nodes_specified else self.retry.get_retries()
1108
1145
  # Add one for the first execution
1109
1146
  execute_attempts = 1 + retry_attempts
1110
1147
  for _ in range(execute_attempts):
@@ -1163,12 +1200,16 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1163
1200
  # refresh the target node
1164
1201
  slot = self.determine_slot(*args)
1165
1202
  target_node = self.nodes_manager.get_node_from_slot(
1166
- slot, self.read_from_replicas and command in READ_COMMANDS
1203
+ slot,
1204
+ self.read_from_replicas and command in READ_COMMANDS,
1205
+ self.load_balancing_strategy
1206
+ if command in READ_COMMANDS
1207
+ else None,
1167
1208
  )
1168
1209
  moved = False
1169
1210
 
1170
1211
  redis_node = self.get_redis_connection(target_node)
1171
- connection = get_connection(redis_node, *args, **kwargs)
1212
+ connection = get_connection(redis_node)
1172
1213
  if asking:
1173
1214
  connection.send_command("ASKING")
1174
1215
  redis_node.parse_response(connection, "ASKING", **kwargs)
@@ -1225,13 +1266,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1225
1266
  except AskError as e:
1226
1267
  redirect_addr = get_node_name(host=e.host, port=e.port)
1227
1268
  asking = True
1228
- except ClusterDownError as e:
1269
+ except (ClusterDownError, SlotNotCoveredError):
1229
1270
  # ClusterDownError can occur during a failover and to get
1230
1271
  # self-healed, we will try to reinitialize the cluster layout
1231
1272
  # and retry executing the command
1273
+
1274
+ # SlotNotCoveredError can occur when the cluster is not fully
1275
+ # initialized or can be temporary issue.
1276
+ # We will try to reinitialize the cluster topology
1277
+ # and retry executing the command
1278
+
1232
1279
  time.sleep(0.25)
1233
1280
  self.nodes_manager.initialize()
1234
- raise e
1281
+ raise
1235
1282
  except ResponseError:
1236
1283
  raise
1237
1284
  except Exception as e:
@@ -1308,8 +1355,18 @@ class ClusterNode:
1308
1355
  return isinstance(obj, ClusterNode) and obj.name == self.name
1309
1356
 
1310
1357
  def __del__(self):
1311
- if self.redis_connection is not None:
1312
- self.redis_connection.close()
1358
+ try:
1359
+ if self.redis_connection is not None:
1360
+ self.redis_connection.close()
1361
+ except Exception:
1362
+ # Ignore errors when closing the connection
1363
+ pass
1364
+
1365
+
1366
+ class LoadBalancingStrategy(Enum):
1367
+ ROUND_ROBIN = "round_robin"
1368
+ ROUND_ROBIN_REPLICAS = "round_robin_replicas"
1369
+ RANDOM_REPLICA = "random_replica"
1313
1370
 
1314
1371
 
1315
1372
  class LoadBalancer:
@@ -1321,15 +1378,38 @@ class LoadBalancer:
1321
1378
  self.primary_to_idx = {}
1322
1379
  self.start_index = start_index
1323
1380
 
1324
- def get_server_index(self, primary: str, list_size: int) -> int:
1325
- server_index = self.primary_to_idx.setdefault(primary, self.start_index)
1326
- # Update the index
1327
- self.primary_to_idx[primary] = (server_index + 1) % list_size
1328
- return server_index
1381
+ def get_server_index(
1382
+ self,
1383
+ primary: str,
1384
+ list_size: int,
1385
+ load_balancing_strategy: LoadBalancingStrategy = LoadBalancingStrategy.ROUND_ROBIN,
1386
+ ) -> int:
1387
+ if load_balancing_strategy == LoadBalancingStrategy.RANDOM_REPLICA:
1388
+ return self._get_random_replica_index(list_size)
1389
+ else:
1390
+ return self._get_round_robin_index(
1391
+ primary,
1392
+ list_size,
1393
+ load_balancing_strategy == LoadBalancingStrategy.ROUND_ROBIN_REPLICAS,
1394
+ )
1329
1395
 
1330
1396
  def reset(self) -> None:
1331
1397
  self.primary_to_idx.clear()
1332
1398
 
1399
+ def _get_random_replica_index(self, list_size: int) -> int:
1400
+ return random.randint(1, list_size - 1)
1401
+
1402
+ def _get_round_robin_index(
1403
+ self, primary: str, list_size: int, replicas_only: bool
1404
+ ) -> int:
1405
+ server_index = self.primary_to_idx.setdefault(primary, self.start_index)
1406
+ if replicas_only and server_index == 0:
1407
+ # skip the primary node index
1408
+ server_index = 1
1409
+ # Update the index for the next round
1410
+ self.primary_to_idx[primary] = (server_index + 1) % list_size
1411
+ return server_index
1412
+
1333
1413
 
1334
1414
  class NodesManager:
1335
1415
  def __init__(
@@ -1433,7 +1513,21 @@ class NodesManager:
1433
1513
  # Reset moved_exception
1434
1514
  self._moved_exception = None
1435
1515
 
1436
- def get_node_from_slot(self, slot, read_from_replicas=False, server_type=None):
1516
+ @deprecated_args(
1517
+ args_to_warn=["server_type"],
1518
+ reason=(
1519
+ "In case you need select some load balancing strategy "
1520
+ "that will use replicas, please set it through 'load_balancing_strategy'"
1521
+ ),
1522
+ version="5.3.0",
1523
+ )
1524
+ def get_node_from_slot(
1525
+ self,
1526
+ slot,
1527
+ read_from_replicas=False,
1528
+ load_balancing_strategy=None,
1529
+ server_type=None,
1530
+ ):
1437
1531
  """
1438
1532
  Gets a node that servers this hash slot
1439
1533
  """
@@ -1448,11 +1542,14 @@ class NodesManager:
1448
1542
  f'"require_full_coverage={self._require_full_coverage}"'
1449
1543
  )
1450
1544
 
1451
- if read_from_replicas is True:
1452
- # get the server index in a Round-Robin manner
1545
+ if read_from_replicas is True and load_balancing_strategy is None:
1546
+ load_balancing_strategy = LoadBalancingStrategy.ROUND_ROBIN
1547
+
1548
+ if len(self.slots_cache[slot]) > 1 and load_balancing_strategy:
1549
+ # get the server index using the strategy defined in load_balancing_strategy
1453
1550
  primary_name = self.slots_cache[slot][0].name
1454
1551
  node_idx = self.read_load_balancer.get_server_index(
1455
- primary_name, len(self.slots_cache[slot])
1552
+ primary_name, len(self.slots_cache[slot]), load_balancing_strategy
1456
1553
  )
1457
1554
  elif (
1458
1555
  server_type is None
@@ -1514,17 +1611,32 @@ class NodesManager:
1514
1611
  )
1515
1612
 
1516
1613
  def create_redis_node(self, host, port, **kwargs):
1614
+ # We are configuring the connection pool not to retry
1615
+ # connections on lower level clients to avoid retrying
1616
+ # connections to nodes that are not reachable
1617
+ # and to avoid blocking the connection pool.
1618
+ # The only error that will have some handling in the lower
1619
+ # level clients is ConnectionError which will trigger disconnection
1620
+ # of the socket.
1621
+ # The retries will be handled on cluster client level
1622
+ # where we will have proper handling of the cluster topology
1623
+ node_retry_config = Retry(
1624
+ backoff=NoBackoff(), retries=0, supported_errors=(ConnectionError,)
1625
+ )
1626
+
1517
1627
  if self.from_url:
1518
1628
  # Create a redis node with a costumed connection pool
1519
1629
  kwargs.update({"host": host})
1520
1630
  kwargs.update({"port": port})
1521
1631
  kwargs.update({"cache": self._cache})
1632
+ kwargs.update({"retry": node_retry_config})
1522
1633
  r = Redis(connection_pool=self.connection_pool_class(**kwargs))
1523
1634
  else:
1524
1635
  r = Redis(
1525
1636
  host=host,
1526
1637
  port=port,
1527
1638
  cache=self._cache,
1639
+ retry=node_retry_config,
1528
1640
  **kwargs,
1529
1641
  )
1530
1642
  return r
@@ -1641,7 +1753,7 @@ class NodesManager:
1641
1753
  if len(disagreements) > 5:
1642
1754
  raise RedisClusterException(
1643
1755
  f"startup_nodes could not agree on a valid "
1644
- f'slots cache: {", ".join(disagreements)}'
1756
+ f"slots cache: {', '.join(disagreements)}"
1645
1757
  )
1646
1758
 
1647
1759
  fully_covered = self.check_slots_coverage(tmp_slots)
@@ -1735,7 +1847,7 @@ class ClusterPubSub(PubSub):
1735
1847
  first command execution. The node will be determined by:
1736
1848
  1. Hashing the channel name in the request to find its keyslot
1737
1849
  2. Selecting a node that handles the keyslot: If read_from_replicas is
1738
- set to true, a replica can be selected.
1850
+ set to true or load_balancing_strategy is set, a replica can be selected.
1739
1851
 
1740
1852
  :type redis_cluster: RedisCluster
1741
1853
  :type node: ClusterNode
@@ -1831,7 +1943,9 @@ class ClusterPubSub(PubSub):
1831
1943
  channel = args[1]
1832
1944
  slot = self.cluster.keyslot(channel)
1833
1945
  node = self.cluster.nodes_manager.get_node_from_slot(
1834
- slot, self.cluster.read_from_replicas
1946
+ slot,
1947
+ self.cluster.read_from_replicas,
1948
+ self.cluster.load_balancing_strategy,
1835
1949
  )
1836
1950
  else:
1837
1951
  # Get a random node
@@ -1839,9 +1953,7 @@ class ClusterPubSub(PubSub):
1839
1953
  self.node = node
1840
1954
  redis_connection = self.cluster.get_redis_connection(node)
1841
1955
  self.connection_pool = redis_connection.connection_pool
1842
- self.connection = self.connection_pool.get_connection(
1843
- "pubsub", self.shard_hint
1844
- )
1956
+ self.connection = self.connection_pool.get_connection()
1845
1957
  # register a callback that re-subscribes to any channels we
1846
1958
  # were listening to when we were disconnected
1847
1959
  self.connection.register_connect_callback(self.on_connect)
@@ -1968,6 +2080,13 @@ class ClusterPipeline(RedisCluster):
1968
2080
  TryAgainError,
1969
2081
  )
1970
2082
 
2083
+ @deprecated_args(
2084
+ args_to_warn=[
2085
+ "cluster_error_retry_attempts",
2086
+ ],
2087
+ reason="Please configure the 'retry' object instead",
2088
+ version="6.0.0",
2089
+ )
1971
2090
  def __init__(
1972
2091
  self,
1973
2092
  nodes_manager: "NodesManager",
@@ -1976,8 +2095,10 @@ class ClusterPipeline(RedisCluster):
1976
2095
  cluster_response_callbacks: Optional[Dict[str, Callable]] = None,
1977
2096
  startup_nodes: Optional[List["ClusterNode"]] = None,
1978
2097
  read_from_replicas: bool = False,
2098
+ load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
1979
2099
  cluster_error_retry_attempts: int = 3,
1980
2100
  reinitialize_steps: int = 5,
2101
+ retry: Optional[Retry] = None,
1981
2102
  lock=None,
1982
2103
  **kwargs,
1983
2104
  ):
@@ -1991,11 +2112,19 @@ class ClusterPipeline(RedisCluster):
1991
2112
  )
1992
2113
  self.startup_nodes = startup_nodes if startup_nodes else []
1993
2114
  self.read_from_replicas = read_from_replicas
2115
+ self.load_balancing_strategy = load_balancing_strategy
1994
2116
  self.command_flags = self.__class__.COMMAND_FLAGS.copy()
1995
2117
  self.cluster_response_callbacks = cluster_response_callbacks
1996
- self.cluster_error_retry_attempts = cluster_error_retry_attempts
1997
2118
  self.reinitialize_counter = 0
1998
2119
  self.reinitialize_steps = reinitialize_steps
2120
+ if retry is not None:
2121
+ self.retry = retry
2122
+ else:
2123
+ self.retry = Retry(
2124
+ backoff=ExponentialWithJitterBackoff(base=1, cap=10),
2125
+ retries=self.cluster_error_retry_attempts,
2126
+ )
2127
+
1999
2128
  self.encoder = Encoder(
2000
2129
  kwargs.get("encoding", "utf-8"),
2001
2130
  kwargs.get("encoding_errors", "strict"),
@@ -2062,7 +2191,7 @@ class ClusterPipeline(RedisCluster):
2062
2191
  """
2063
2192
  cmd = " ".join(map(safe_str, command))
2064
2193
  msg = (
2065
- f"Command # {number} ({cmd}) of pipeline "
2194
+ f"Command # {number} ({truncate_text(cmd)}) of pipeline "
2066
2195
  f"caused error: {exception.args[0]}"
2067
2196
  )
2068
2197
  exception.args = (msg,) + exception.args[1:]
@@ -2121,7 +2250,7 @@ class ClusterPipeline(RedisCluster):
2121
2250
  - refereh_table_asap set to True
2122
2251
 
2123
2252
  It will try the number of times specified by
2124
- the config option "self.cluster_error_retry_attempts"
2253
+ the retries in config option "self.retry"
2125
2254
  which defaults to 3 unless manually configured.
2126
2255
 
2127
2256
  If it reaches the number of times, the command will
@@ -2129,7 +2258,7 @@ class ClusterPipeline(RedisCluster):
2129
2258
  """
2130
2259
  if not stack:
2131
2260
  return []
2132
- retry_attempts = self.cluster_error_retry_attempts
2261
+ retry_attempts = self.retry.get_retries()
2133
2262
  while True:
2134
2263
  try:
2135
2264
  return self._send_cluster_commands(
@@ -2137,7 +2266,7 @@ class ClusterPipeline(RedisCluster):
2137
2266
  raise_on_error=raise_on_error,
2138
2267
  allow_redirections=allow_redirections,
2139
2268
  )
2140
- except (ClusterDownError, ConnectionError) as e:
2269
+ except RedisCluster.ERRORS_ALLOW_RETRY as e:
2141
2270
  if retry_attempts > 0:
2142
2271
  # Try again with the new cluster setup. All other errors
2143
2272
  # should be raised.
@@ -2201,8 +2330,8 @@ class ClusterPipeline(RedisCluster):
2201
2330
  if node_name not in nodes:
2202
2331
  redis_node = self.get_redis_connection(node)
2203
2332
  try:
2204
- connection = get_connection(redis_node, c.args)
2205
- except ConnectionError:
2333
+ connection = get_connection(redis_node)
2334
+ except (ConnectionError, TimeoutError):
2206
2335
  for n in nodes.values():
2207
2336
  n.connection_pool.release(n.connection)
2208
2337
  # Connection retries are being handled in the node's