redis 5.3.0b5__py3-none-any.whl → 6.0.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. redis/__init__.py +2 -11
  2. redis/_parsers/base.py +14 -2
  3. redis/asyncio/client.py +27 -14
  4. redis/asyncio/cluster.py +85 -59
  5. redis/asyncio/connection.py +76 -23
  6. redis/asyncio/lock.py +26 -5
  7. redis/asyncio/sentinel.py +11 -1
  8. redis/asyncio/utils.py +1 -1
  9. redis/auth/token.py +6 -2
  10. redis/backoff.py +15 -0
  11. redis/client.py +23 -14
  12. redis/cluster.py +112 -48
  13. redis/commands/cluster.py +1 -11
  14. redis/commands/core.py +219 -207
  15. redis/commands/helpers.py +0 -70
  16. redis/commands/redismodules.py +5 -17
  17. redis/commands/search/aggregation.py +3 -1
  18. redis/commands/search/commands.py +41 -14
  19. redis/commands/search/dialect.py +3 -0
  20. redis/commands/search/profile_information.py +14 -0
  21. redis/commands/search/query.py +5 -1
  22. redis/commands/vectorset/__init__.py +46 -0
  23. redis/commands/vectorset/commands.py +367 -0
  24. redis/commands/vectorset/utils.py +94 -0
  25. redis/connection.py +76 -27
  26. redis/exceptions.py +4 -1
  27. redis/lock.py +24 -4
  28. redis/ocsp.py +2 -1
  29. redis/sentinel.py +3 -1
  30. redis/utils.py +114 -1
  31. {redis-5.3.0b5.dist-info → redis-6.0.0b2.dist-info}/METADATA +57 -23
  32. {redis-5.3.0b5.dist-info → redis-6.0.0b2.dist-info}/RECORD +35 -39
  33. {redis-5.3.0b5.dist-info → redis-6.0.0b2.dist-info}/WHEEL +1 -2
  34. redis/commands/graph/__init__.py +0 -263
  35. redis/commands/graph/commands.py +0 -313
  36. redis/commands/graph/edge.py +0 -91
  37. redis/commands/graph/exceptions.py +0 -3
  38. redis/commands/graph/execution_plan.py +0 -211
  39. redis/commands/graph/node.py +0 -88
  40. redis/commands/graph/path.py +0 -78
  41. redis/commands/graph/query_result.py +0 -588
  42. redis-5.3.0b5.dist-info/top_level.txt +0 -1
  43. /redis/commands/search/{indexDefinition.py → index_definition.py} +0 -0
  44. {redis-5.3.0b5.dist-info → redis-6.0.0b2.dist-info/licenses}/LICENSE +0 -0
redis/__init__.py CHANGED
@@ -1,5 +1,3 @@
1
- from importlib import metadata
2
-
3
1
  from redis import asyncio # noqa
4
2
  from redis.backoff import default_backoff
5
3
  from redis.client import Redis, StrictRedis
@@ -44,16 +42,9 @@ def int_or_str(value):
44
42
  return value
45
43
 
46
44
 
47
- try:
48
- __version__ = metadata.version("redis")
49
- except metadata.PackageNotFoundError:
50
- __version__ = "99.99.99"
51
-
45
+ __version__ = "6.0.0b2"
46
+ VERSION = tuple(map(int_or_str, __version__.split(".")))
52
47
 
53
- try:
54
- VERSION = tuple(map(int_or_str, __version__.split(".")))
55
- except AttributeError:
56
- VERSION = tuple([99, 99, 99])
57
48
 
58
49
  __all__ = [
59
50
  "AuthenticationError",
redis/_parsers/base.py CHANGED
@@ -9,26 +9,32 @@ else:
9
9
  from async_timeout import timeout as async_timeout
10
10
 
11
11
  from ..exceptions import (
12
+ AskError,
12
13
  AuthenticationError,
13
14
  AuthenticationWrongNumberOfArgsError,
14
15
  BusyLoadingError,
16
+ ClusterCrossSlotError,
17
+ ClusterDownError,
15
18
  ConnectionError,
16
19
  ExecAbortError,
20
+ MasterDownError,
17
21
  ModuleError,
22
+ MovedError,
18
23
  NoPermissionError,
19
24
  NoScriptError,
20
25
  OutOfMemoryError,
21
26
  ReadOnlyError,
22
27
  RedisError,
23
28
  ResponseError,
29
+ TryAgainError,
24
30
  )
25
31
  from ..typing import EncodableT
26
32
  from .encoders import Encoder
27
33
  from .socket import SERVER_CLOSED_CONNECTION_ERROR, SocketBuffer
28
34
 
29
- MODULE_LOAD_ERROR = "Error loading the extension. " "Please check the server logs."
35
+ MODULE_LOAD_ERROR = "Error loading the extension. Please check the server logs."
30
36
  NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
31
- MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not " "possible."
37
+ MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not possible."
32
38
  MODULE_EXPORTS_DATA_TYPES_ERROR = (
33
39
  "Error unloading module: the module "
34
40
  "exports one or more module-side data "
@@ -72,6 +78,12 @@ class BaseParser(ABC):
72
78
  "READONLY": ReadOnlyError,
73
79
  "NOAUTH": AuthenticationError,
74
80
  "NOPERM": NoPermissionError,
81
+ "ASK": AskError,
82
+ "TRYAGAIN": TryAgainError,
83
+ "MOVED": MovedError,
84
+ "CLUSTERDOWN": ClusterDownError,
85
+ "CROSSSLOT": ClusterCrossSlotError,
86
+ "MASTERDOWN": MasterDownError,
75
87
  }
76
88
 
77
89
  @classmethod
redis/asyncio/client.py CHANGED
@@ -2,7 +2,6 @@ import asyncio
2
2
  import copy
3
3
  import inspect
4
4
  import re
5
- import ssl
6
5
  import warnings
7
6
  from typing import (
8
7
  TYPE_CHECKING,
@@ -72,13 +71,21 @@ from redis.exceptions import (
72
71
  from redis.typing import ChannelT, EncodableT, KeyT
73
72
  from redis.utils import (
74
73
  HIREDIS_AVAILABLE,
74
+ SSL_AVAILABLE,
75
75
  _set_info_logger,
76
76
  deprecated_function,
77
77
  get_lib_version,
78
78
  safe_str,
79
79
  str_if_bytes,
80
+ truncate_text,
80
81
  )
81
82
 
83
+ if TYPE_CHECKING and SSL_AVAILABLE:
84
+ from ssl import TLSVersion, VerifyMode
85
+ else:
86
+ TLSVersion = None
87
+ VerifyMode = None
88
+
82
89
  PubSubHandler = Callable[[Dict[str, str]], Awaitable[None]]
83
90
  _KeyT = TypeVar("_KeyT", bound=KeyT)
84
91
  _ArgT = TypeVar("_ArgT", KeyT, EncodableT)
@@ -222,11 +229,11 @@ class Redis(
222
229
  ssl: bool = False,
223
230
  ssl_keyfile: Optional[str] = None,
224
231
  ssl_certfile: Optional[str] = None,
225
- ssl_cert_reqs: str = "required",
232
+ ssl_cert_reqs: Union[str, VerifyMode] = "required",
226
233
  ssl_ca_certs: Optional[str] = None,
227
234
  ssl_ca_data: Optional[str] = None,
228
235
  ssl_check_hostname: bool = False,
229
- ssl_min_version: Optional[ssl.TLSVersion] = None,
236
+ ssl_min_version: Optional[TLSVersion] = None,
230
237
  ssl_ciphers: Optional[str] = None,
231
238
  max_connections: Optional[int] = None,
232
239
  single_connection_client: bool = False,
@@ -375,7 +382,7 @@ class Redis(
375
382
  if self.single_connection_client:
376
383
  async with self._single_conn_lock:
377
384
  if self.connection is None:
378
- self.connection = await self.connection_pool.get_connection("_")
385
+ self.connection = await self.connection_pool.get_connection()
379
386
 
380
387
  self._event_dispatcher.dispatch(
381
388
  AfterSingleConnectionInstantiationEvent(
@@ -478,6 +485,7 @@ class Redis(
478
485
  blocking_timeout: Optional[float] = None,
479
486
  lock_class: Optional[Type[Lock]] = None,
480
487
  thread_local: bool = True,
488
+ raise_on_release_error: bool = True,
481
489
  ) -> Lock:
482
490
  """
483
491
  Return a new Lock object using key ``name`` that mimics
@@ -524,6 +532,11 @@ class Redis(
524
532
  thread-1 would see the token value as "xyz" and would be
525
533
  able to successfully release the thread-2's lock.
526
534
 
535
+ ``raise_on_release_error`` indicates whether to raise an exception when
536
+ the lock is no longer owned when exiting the context manager. By default,
537
+ this is True, meaning an exception will be raised. If False, the warning
538
+ will be logged and the exception will be suppressed.
539
+
527
540
  In some use cases it's necessary to disable thread local storage. For
528
541
  example, if you have code where one thread acquires a lock and passes
529
542
  that lock instance to a worker thread to release later. If thread
@@ -541,6 +554,7 @@ class Redis(
541
554
  blocking=blocking,
542
555
  blocking_timeout=blocking_timeout,
543
556
  thread_local=thread_local,
557
+ raise_on_release_error=raise_on_release_error,
544
558
  )
545
559
 
546
560
  def pubsub(self, **kwargs) -> "PubSub":
@@ -638,7 +652,7 @@ class Redis(
638
652
  await self.initialize()
639
653
  pool = self.connection_pool
640
654
  command_name = args[0]
641
- conn = self.connection or await pool.get_connection(command_name, **options)
655
+ conn = self.connection or await pool.get_connection()
642
656
 
643
657
  if self.single_connection_client:
644
658
  await self._single_conn_lock.acquire()
@@ -712,7 +726,7 @@ class Monitor:
712
726
 
713
727
  async def connect(self):
714
728
  if self.connection is None:
715
- self.connection = await self.connection_pool.get_connection("MONITOR")
729
+ self.connection = await self.connection_pool.get_connection()
716
730
 
717
731
  async def __aenter__(self):
718
732
  await self.connect()
@@ -900,9 +914,7 @@ class PubSub:
900
914
  Ensure that the PubSub is connected
901
915
  """
902
916
  if self.connection is None:
903
- self.connection = await self.connection_pool.get_connection(
904
- "pubsub", self.shard_hint
905
- )
917
+ self.connection = await self.connection_pool.get_connection()
906
918
  # register a callback that re-subscribes to any channels we
907
919
  # were listening to when we were disconnected
908
920
  self.connection.register_connect_callback(self.on_connect)
@@ -1370,9 +1382,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1370
1382
  conn = self.connection
1371
1383
  # if this is the first call, we need a connection
1372
1384
  if not conn:
1373
- conn = await self.connection_pool.get_connection(
1374
- command_name, self.shard_hint
1375
- )
1385
+ conn = await self.connection_pool.get_connection()
1376
1386
  self.connection = conn
1377
1387
 
1378
1388
  return await conn.retry.call_with_retry(
@@ -1505,7 +1515,10 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1505
1515
  self, exception: Exception, number: int, command: Iterable[object]
1506
1516
  ) -> None:
1507
1517
  cmd = " ".join(map(safe_str, command))
1508
- msg = f"Command # {number} ({cmd}) of pipeline caused error: {exception.args}"
1518
+ msg = (
1519
+ f"Command # {number} ({truncate_text(cmd)}) "
1520
+ "of pipeline caused error: {exception.args}"
1521
+ )
1509
1522
  exception.args = (msg,) + exception.args[1:]
1510
1523
 
1511
1524
  async def parse_response(
@@ -1568,7 +1581,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1568
1581
 
1569
1582
  conn = self.connection
1570
1583
  if not conn:
1571
- conn = await self.connection_pool.get_connection("MULTI", self.shard_hint)
1584
+ conn = await self.connection_pool.get_connection()
1572
1585
  # assign to self.connection so reset() releases the connection
1573
1586
  # back to the pool after we're done
1574
1587
  self.connection = conn
redis/asyncio/cluster.py CHANGED
@@ -2,7 +2,6 @@ import asyncio
2
2
  import collections
3
3
  import random
4
4
  import socket
5
- import ssl
6
5
  import warnings
7
6
  from typing import (
8
7
  Any,
@@ -26,7 +25,7 @@ from redis._parsers.helpers import (
26
25
  _RedisCallbacksRESP3,
27
26
  )
28
27
  from redis.asyncio.client import ResponseCallbackT
29
- from redis.asyncio.connection import Connection, DefaultParser, SSLConnection, parse_url
28
+ from redis.asyncio.connection import Connection, SSLConnection, parse_url
30
29
  from redis.asyncio.lock import Lock
31
30
  from redis.asyncio.retry import Retry
32
31
  from redis.auth.token import TokenInterface
@@ -39,6 +38,7 @@ from redis.cluster import (
39
38
  SLOT_ID,
40
39
  AbstractRedisCluster,
41
40
  LoadBalancer,
41
+ LoadBalancingStrategy,
42
42
  block_pipeline_command,
43
43
  get_node_name,
44
44
  parse_cluster_slots,
@@ -50,12 +50,10 @@ from redis.event import AfterAsyncClusterInstantiationEvent, EventDispatcher
50
50
  from redis.exceptions import (
51
51
  AskError,
52
52
  BusyLoadingError,
53
- ClusterCrossSlotError,
54
53
  ClusterDownError,
55
54
  ClusterError,
56
55
  ConnectionError,
57
56
  DataError,
58
- MasterDownError,
59
57
  MaxConnectionsError,
60
58
  MovedError,
61
59
  RedisClusterException,
@@ -67,32 +65,26 @@ from redis.exceptions import (
67
65
  )
68
66
  from redis.typing import AnyKeyT, EncodableT, KeyT
69
67
  from redis.utils import (
68
+ SSL_AVAILABLE,
69
+ deprecated_args,
70
70
  deprecated_function,
71
- dict_merge,
72
71
  get_lib_version,
73
72
  safe_str,
74
73
  str_if_bytes,
74
+ truncate_text,
75
75
  )
76
76
 
77
+ if SSL_AVAILABLE:
78
+ from ssl import TLSVersion, VerifyMode
79
+ else:
80
+ TLSVersion = None
81
+ VerifyMode = None
82
+
77
83
  TargetNodesT = TypeVar(
78
84
  "TargetNodesT", str, "ClusterNode", List["ClusterNode"], Dict[Any, "ClusterNode"]
79
85
  )
80
86
 
81
87
 
82
- class ClusterParser(DefaultParser):
83
- EXCEPTION_CLASSES = dict_merge(
84
- DefaultParser.EXCEPTION_CLASSES,
85
- {
86
- "ASK": AskError,
87
- "CLUSTERDOWN": ClusterDownError,
88
- "CROSSSLOT": ClusterCrossSlotError,
89
- "MASTERDOWN": MasterDownError,
90
- "MOVED": MovedError,
91
- "TRYAGAIN": TryAgainError,
92
- },
93
- )
94
-
95
-
96
88
  class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommands):
97
89
  """
98
90
  Create a new RedisCluster client.
@@ -133,9 +125,15 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
133
125
  | See:
134
126
  https://redis.io/docs/manual/scaling/#redis-cluster-configuration-parameters
135
127
  :param read_from_replicas:
136
- | Enable read from replicas in READONLY mode. You can read possibly stale data.
128
+ | @deprecated - please use load_balancing_strategy instead
129
+ | Enable read from replicas in READONLY mode.
137
130
  When set to true, read commands will be assigned between the primary and
138
131
  its replications in a Round-Robin manner.
132
+ The data read from replicas is eventually consistent with the data in primary nodes.
133
+ :param load_balancing_strategy:
134
+ | Enable read from replicas in READONLY mode and defines the load balancing
135
+ strategy that will be used for cluster node selection.
136
+ The data read from replicas is eventually consistent with the data in primary nodes.
139
137
  :param reinitialize_steps:
140
138
  | Specifies the number of MOVED errors that need to occur before reinitializing
141
139
  the whole cluster topology. If a MOVED error occurs and the cluster does not
@@ -228,6 +226,11 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
228
226
  "result_callbacks",
229
227
  )
230
228
 
229
+ @deprecated_args(
230
+ args_to_warn=["read_from_replicas"],
231
+ reason="Please configure the 'load_balancing_strategy' instead",
232
+ version="5.0.3",
233
+ )
231
234
  def __init__(
232
235
  self,
233
236
  host: Optional[str] = None,
@@ -236,6 +239,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
236
239
  startup_nodes: Optional[List["ClusterNode"]] = None,
237
240
  require_full_coverage: bool = True,
238
241
  read_from_replicas: bool = False,
242
+ load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
239
243
  reinitialize_steps: int = 5,
240
244
  cluster_error_retry_attempts: int = 3,
241
245
  connection_error_retry_attempts: int = 3,
@@ -265,11 +269,11 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
265
269
  ssl: bool = False,
266
270
  ssl_ca_certs: Optional[str] = None,
267
271
  ssl_ca_data: Optional[str] = None,
268
- ssl_cert_reqs: str = "required",
272
+ ssl_cert_reqs: Union[str, VerifyMode] = "required",
269
273
  ssl_certfile: Optional[str] = None,
270
274
  ssl_check_hostname: bool = False,
271
275
  ssl_keyfile: Optional[str] = None,
272
- ssl_min_version: Optional[ssl.TLSVersion] = None,
276
+ ssl_min_version: Optional[TLSVersion] = None,
273
277
  ssl_ciphers: Optional[str] = None,
274
278
  protocol: Optional[int] = 2,
275
279
  address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
@@ -297,7 +301,6 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
297
301
  kwargs: Dict[str, Any] = {
298
302
  "max_connections": max_connections,
299
303
  "connection_class": Connection,
300
- "parser_class": ClusterParser,
301
304
  # Client related kwargs
302
305
  "credential_provider": credential_provider,
303
306
  "username": username,
@@ -335,7 +338,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
335
338
  }
336
339
  )
337
340
 
338
- if read_from_replicas:
341
+ if read_from_replicas or load_balancing_strategy:
339
342
  # Call our on_connect function to configure READONLY mode
340
343
  kwargs["redis_connect_func"] = self.on_connect
341
344
 
@@ -384,6 +387,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
384
387
  )
385
388
  self.encoder = Encoder(encoding, encoding_errors, decode_responses)
386
389
  self.read_from_replicas = read_from_replicas
390
+ self.load_balancing_strategy = load_balancing_strategy
387
391
  self.reinitialize_steps = reinitialize_steps
388
392
  self.cluster_error_retry_attempts = cluster_error_retry_attempts
389
393
  self.connection_error_retry_attempts = connection_error_retry_attempts
@@ -602,6 +606,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
602
606
  self.nodes_manager.get_node_from_slot(
603
607
  await self._determine_slot(command, *args),
604
608
  self.read_from_replicas and command in READ_COMMANDS,
609
+ self.load_balancing_strategy if command in READ_COMMANDS else None,
605
610
  )
606
611
  ]
607
612
 
@@ -782,7 +787,11 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
782
787
  # refresh the target node
783
788
  slot = await self._determine_slot(*args)
784
789
  target_node = self.nodes_manager.get_node_from_slot(
785
- slot, self.read_from_replicas and args[0] in READ_COMMANDS
790
+ slot,
791
+ self.read_from_replicas and args[0] in READ_COMMANDS,
792
+ self.load_balancing_strategy
793
+ if args[0] in READ_COMMANDS
794
+ else None,
786
795
  )
787
796
  moved = False
788
797
 
@@ -862,6 +871,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
862
871
  blocking_timeout: Optional[float] = None,
863
872
  lock_class: Optional[Type[Lock]] = None,
864
873
  thread_local: bool = True,
874
+ raise_on_release_error: bool = True,
865
875
  ) -> Lock:
866
876
  """
867
877
  Return a new Lock object using key ``name`` that mimics
@@ -908,6 +918,11 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
908
918
  thread-1 would see the token value as "xyz" and would be
909
919
  able to successfully release the thread-2's lock.
910
920
 
921
+ ``raise_on_release_error`` indicates whether to raise an exception when
922
+ the lock is no longer owned when exiting the context manager. By default,
923
+ this is True, meaning an exception will be raised. If False, the warning
924
+ will be logged and the exception will be suppressed.
925
+
911
926
  In some use cases it's necessary to disable thread local storage. For
912
927
  example, if you have code where one thread acquires a lock and passes
913
928
  that lock instance to a worker thread to release later. If thread
@@ -925,6 +940,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
925
940
  blocking=blocking,
926
941
  blocking_timeout=blocking_timeout,
927
942
  thread_local=thread_local,
943
+ raise_on_release_error=raise_on_release_error,
928
944
  )
929
945
 
930
946
 
@@ -1177,9 +1193,7 @@ class NodesManager:
1177
1193
  return self.nodes_cache.get(node_name)
1178
1194
  else:
1179
1195
  raise DataError(
1180
- "get_node requires one of the following: "
1181
- "1. node name "
1182
- "2. host and port"
1196
+ "get_node requires one of the following: 1. node name 2. host and port"
1183
1197
  )
1184
1198
 
1185
1199
  def set_nodes(
@@ -1239,17 +1253,23 @@ class NodesManager:
1239
1253
  self._moved_exception = None
1240
1254
 
1241
1255
  def get_node_from_slot(
1242
- self, slot: int, read_from_replicas: bool = False
1256
+ self,
1257
+ slot: int,
1258
+ read_from_replicas: bool = False,
1259
+ load_balancing_strategy=None,
1243
1260
  ) -> "ClusterNode":
1244
1261
  if self._moved_exception:
1245
1262
  self._update_moved_slots()
1246
1263
 
1264
+ if read_from_replicas is True and load_balancing_strategy is None:
1265
+ load_balancing_strategy = LoadBalancingStrategy.ROUND_ROBIN
1266
+
1247
1267
  try:
1248
- if read_from_replicas:
1249
- # get the server index in a Round-Robin manner
1268
+ if len(self.slots_cache[slot]) > 1 and load_balancing_strategy:
1269
+ # get the server index using the strategy defined in load_balancing_strategy
1250
1270
  primary_name = self.slots_cache[slot][0].name
1251
1271
  node_idx = self.read_load_balancer.get_server_index(
1252
- primary_name, len(self.slots_cache[slot])
1272
+ primary_name, len(self.slots_cache[slot]), load_balancing_strategy
1253
1273
  )
1254
1274
  return self.slots_cache[slot][node_idx]
1255
1275
  return self.slots_cache[slot][0]
@@ -1361,7 +1381,7 @@ class NodesManager:
1361
1381
  if len(disagreements) > 5:
1362
1382
  raise RedisClusterException(
1363
1383
  f"startup_nodes could not agree on a valid "
1364
- f'slots cache: {", ".join(disagreements)}'
1384
+ f"slots cache: {', '.join(disagreements)}"
1365
1385
  )
1366
1386
 
1367
1387
  # Validate if all slots are covered or if we should try next startup node
@@ -1534,29 +1554,28 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
1534
1554
  return []
1535
1555
 
1536
1556
  try:
1537
- for _ in range(self._client.cluster_error_retry_attempts):
1538
- if self._client._initialize:
1539
- await self._client.initialize()
1540
-
1557
+ retry_attempts = self._client.cluster_error_retry_attempts
1558
+ while True:
1541
1559
  try:
1560
+ if self._client._initialize:
1561
+ await self._client.initialize()
1542
1562
  return await self._execute(
1543
1563
  self._client,
1544
1564
  self._command_stack,
1545
1565
  raise_on_error=raise_on_error,
1546
1566
  allow_redirections=allow_redirections,
1547
1567
  )
1548
- except BaseException as e:
1549
- if type(e) in self.__class__.ERRORS_ALLOW_RETRY:
1550
- # Try again with the new cluster setup.
1551
- exception = e
1568
+
1569
+ except self.__class__.ERRORS_ALLOW_RETRY as e:
1570
+ if retry_attempts > 0:
1571
+ # Try again with the new cluster setup. All other errors
1572
+ # should be raised.
1573
+ retry_attempts -= 1
1552
1574
  await self._client.aclose()
1553
1575
  await asyncio.sleep(0.25)
1554
1576
  else:
1555
1577
  # All other errors should be raised.
1556
- raise
1557
-
1558
- # If it fails the configured number of times then raise an exception
1559
- raise exception
1578
+ raise e
1560
1579
  finally:
1561
1580
  self._command_stack = []
1562
1581
 
@@ -1616,24 +1635,31 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
1616
1635
  if isinstance(result, Exception):
1617
1636
  command = " ".join(map(safe_str, cmd.args))
1618
1637
  msg = (
1619
- f"Command # {cmd.position + 1} ({command}) of pipeline "
1620
- f"caused error: {result.args}"
1638
+ f"Command # {cmd.position + 1} "
1639
+ f"({truncate_text(command)}) "
1640
+ f"of pipeline caused error: {result.args}"
1621
1641
  )
1622
1642
  result.args = (msg,) + result.args[1:]
1623
1643
  raise result
1624
1644
 
1625
- default_node = nodes.get(client.get_default_node().name)
1626
- if default_node is not None:
1627
- # This pipeline execution used the default node, check if we need
1628
- # to replace it.
1629
- # Note: when the error is raised we'll reset the default node in the
1630
- # caller function.
1631
- for cmd in default_node[1]:
1632
- # Check if it has a command that failed with a relevant
1633
- # exception
1634
- if type(cmd.result) in self.__class__.ERRORS_ALLOW_RETRY:
1635
- client.replace_default_node()
1636
- break
1645
+ default_cluster_node = client.get_default_node()
1646
+
1647
+ # Check whether the default node was used. In some cases,
1648
+ # 'client.get_default_node()' may return None. The check below
1649
+ # prevents a potential AttributeError.
1650
+ if default_cluster_node is not None:
1651
+ default_node = nodes.get(default_cluster_node.name)
1652
+ if default_node is not None:
1653
+ # This pipeline execution used the default node, check if we need
1654
+ # to replace it.
1655
+ # Note: when the error is raised we'll reset the default node in the
1656
+ # caller function.
1657
+ for cmd in default_node[1]:
1658
+ # Check if it has a command that failed with a relevant
1659
+ # exception
1660
+ if type(cmd.result) in self.__class__.ERRORS_ALLOW_RETRY:
1661
+ client.replace_default_node()
1662
+ break
1637
1663
 
1638
1664
  return [cmd.result for cmd in stack]
1639
1665