redis 5.3.0b4__py3-none-any.whl → 6.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. redis/__init__.py +2 -11
  2. redis/_parsers/base.py +14 -2
  3. redis/asyncio/client.py +21 -13
  4. redis/asyncio/cluster.py +79 -56
  5. redis/asyncio/connection.py +40 -11
  6. redis/asyncio/lock.py +26 -5
  7. redis/asyncio/sentinel.py +9 -1
  8. redis/asyncio/utils.py +1 -1
  9. redis/auth/token.py +6 -2
  10. redis/backoff.py +15 -0
  11. redis/client.py +80 -59
  12. redis/cluster.py +114 -52
  13. redis/commands/cluster.py +1 -11
  14. redis/commands/core.py +218 -206
  15. redis/commands/helpers.py +0 -70
  16. redis/commands/redismodules.py +0 -20
  17. redis/commands/search/aggregation.py +3 -1
  18. redis/commands/search/commands.py +41 -14
  19. redis/commands/search/dialect.py +3 -0
  20. redis/commands/search/profile_information.py +14 -0
  21. redis/commands/search/query.py +5 -1
  22. redis/connection.py +48 -23
  23. redis/exceptions.py +4 -1
  24. redis/lock.py +24 -4
  25. redis/ocsp.py +2 -1
  26. redis/sentinel.py +1 -1
  27. redis/typing.py +1 -1
  28. redis/utils.py +107 -1
  29. {redis-5.3.0b4.dist-info → redis-6.0.0b1.dist-info}/METADATA +57 -23
  30. {redis-5.3.0b4.dist-info → redis-6.0.0b1.dist-info}/RECORD +33 -40
  31. {redis-5.3.0b4.dist-info → redis-6.0.0b1.dist-info}/WHEEL +1 -2
  32. redis/commands/graph/__init__.py +0 -263
  33. redis/commands/graph/commands.py +0 -313
  34. redis/commands/graph/edge.py +0 -91
  35. redis/commands/graph/exceptions.py +0 -3
  36. redis/commands/graph/execution_plan.py +0 -211
  37. redis/commands/graph/node.py +0 -88
  38. redis/commands/graph/path.py +0 -78
  39. redis/commands/graph/query_result.py +0 -588
  40. redis-5.3.0b4.dist-info/top_level.txt +0 -1
  41. /redis/commands/search/{indexDefinition.py → index_definition.py} +0 -0
  42. {redis-5.3.0b4.dist-info → redis-6.0.0b1.dist-info/licenses}/LICENSE +0 -0
redis/__init__.py CHANGED
@@ -1,5 +1,3 @@
1
- from importlib import metadata
2
-
3
1
  from redis import asyncio # noqa
4
2
  from redis.backoff import default_backoff
5
3
  from redis.client import Redis, StrictRedis
@@ -44,16 +42,9 @@ def int_or_str(value):
44
42
  return value
45
43
 
46
44
 
47
- try:
48
- __version__ = metadata.version("redis")
49
- except metadata.PackageNotFoundError:
50
- __version__ = "99.99.99"
51
-
45
+ __version__ = "6.0.0b1"
46
+ VERSION = tuple(map(int_or_str, __version__.split(".")))
52
47
 
53
- try:
54
- VERSION = tuple(map(int_or_str, __version__.split(".")))
55
- except AttributeError:
56
- VERSION = tuple([99, 99, 99])
57
48
 
58
49
  __all__ = [
59
50
  "AuthenticationError",
redis/_parsers/base.py CHANGED
@@ -9,26 +9,32 @@ else:
9
9
  from async_timeout import timeout as async_timeout
10
10
 
11
11
  from ..exceptions import (
12
+ AskError,
12
13
  AuthenticationError,
13
14
  AuthenticationWrongNumberOfArgsError,
14
15
  BusyLoadingError,
16
+ ClusterCrossSlotError,
17
+ ClusterDownError,
15
18
  ConnectionError,
16
19
  ExecAbortError,
20
+ MasterDownError,
17
21
  ModuleError,
22
+ MovedError,
18
23
  NoPermissionError,
19
24
  NoScriptError,
20
25
  OutOfMemoryError,
21
26
  ReadOnlyError,
22
27
  RedisError,
23
28
  ResponseError,
29
+ TryAgainError,
24
30
  )
25
31
  from ..typing import EncodableT
26
32
  from .encoders import Encoder
27
33
  from .socket import SERVER_CLOSED_CONNECTION_ERROR, SocketBuffer
28
34
 
29
- MODULE_LOAD_ERROR = "Error loading the extension. " "Please check the server logs."
35
+ MODULE_LOAD_ERROR = "Error loading the extension. Please check the server logs."
30
36
  NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
31
- MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not " "possible."
37
+ MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not possible."
32
38
  MODULE_EXPORTS_DATA_TYPES_ERROR = (
33
39
  "Error unloading module: the module "
34
40
  "exports one or more module-side data "
@@ -72,6 +78,12 @@ class BaseParser(ABC):
72
78
  "READONLY": ReadOnlyError,
73
79
  "NOAUTH": AuthenticationError,
74
80
  "NOPERM": NoPermissionError,
81
+ "ASK": AskError,
82
+ "TRYAGAIN": TryAgainError,
83
+ "MOVED": MovedError,
84
+ "CLUSTERDOWN": ClusterDownError,
85
+ "CROSSSLOT": ClusterCrossSlotError,
86
+ "MASTERDOWN": MasterDownError,
75
87
  }
76
88
 
77
89
  @classmethod
redis/asyncio/client.py CHANGED
@@ -2,7 +2,6 @@ import asyncio
2
2
  import copy
3
3
  import inspect
4
4
  import re
5
- import ssl
6
5
  import warnings
7
6
  from typing import (
8
7
  TYPE_CHECKING,
@@ -72,6 +71,7 @@ from redis.exceptions import (
72
71
  from redis.typing import ChannelT, EncodableT, KeyT
73
72
  from redis.utils import (
74
73
  HIREDIS_AVAILABLE,
74
+ SSL_AVAILABLE,
75
75
  _set_info_logger,
76
76
  deprecated_function,
77
77
  get_lib_version,
@@ -79,6 +79,11 @@ from redis.utils import (
79
79
  str_if_bytes,
80
80
  )
81
81
 
82
+ if TYPE_CHECKING and SSL_AVAILABLE:
83
+ from ssl import TLSVersion
84
+ else:
85
+ TLSVersion = None
86
+
82
87
  PubSubHandler = Callable[[Dict[str, str]], Awaitable[None]]
83
88
  _KeyT = TypeVar("_KeyT", bound=KeyT)
84
89
  _ArgT = TypeVar("_ArgT", KeyT, EncodableT)
@@ -226,7 +231,7 @@ class Redis(
226
231
  ssl_ca_certs: Optional[str] = None,
227
232
  ssl_ca_data: Optional[str] = None,
228
233
  ssl_check_hostname: bool = False,
229
- ssl_min_version: Optional[ssl.TLSVersion] = None,
234
+ ssl_min_version: Optional[TLSVersion] = None,
230
235
  ssl_ciphers: Optional[str] = None,
231
236
  max_connections: Optional[int] = None,
232
237
  single_connection_client: bool = False,
@@ -375,7 +380,7 @@ class Redis(
375
380
  if self.single_connection_client:
376
381
  async with self._single_conn_lock:
377
382
  if self.connection is None:
378
- self.connection = await self.connection_pool.get_connection("_")
383
+ self.connection = await self.connection_pool.get_connection()
379
384
 
380
385
  self._event_dispatcher.dispatch(
381
386
  AfterSingleConnectionInstantiationEvent(
@@ -478,6 +483,7 @@ class Redis(
478
483
  blocking_timeout: Optional[float] = None,
479
484
  lock_class: Optional[Type[Lock]] = None,
480
485
  thread_local: bool = True,
486
+ raise_on_release_error: bool = True,
481
487
  ) -> Lock:
482
488
  """
483
489
  Return a new Lock object using key ``name`` that mimics
@@ -524,6 +530,11 @@ class Redis(
524
530
  thread-1 would see the token value as "xyz" and would be
525
531
  able to successfully release the thread-2's lock.
526
532
 
533
+ ``raise_on_release_error`` indicates whether to raise an exception when
534
+ the lock is no longer owned when exiting the context manager. By default,
535
+ this is True, meaning an exception will be raised. If False, the warning
536
+ will be logged and the exception will be suppressed.
537
+
527
538
  In some use cases it's necessary to disable thread local storage. For
528
539
  example, if you have code where one thread acquires a lock and passes
529
540
  that lock instance to a worker thread to release later. If thread
@@ -541,6 +552,7 @@ class Redis(
541
552
  blocking=blocking,
542
553
  blocking_timeout=blocking_timeout,
543
554
  thread_local=thread_local,
555
+ raise_on_release_error=raise_on_release_error,
544
556
  )
545
557
 
546
558
  def pubsub(self, **kwargs) -> "PubSub":
@@ -638,7 +650,7 @@ class Redis(
638
650
  await self.initialize()
639
651
  pool = self.connection_pool
640
652
  command_name = args[0]
641
- conn = self.connection or await pool.get_connection(command_name, **options)
653
+ conn = self.connection or await pool.get_connection()
642
654
 
643
655
  if self.single_connection_client:
644
656
  await self._single_conn_lock.acquire()
@@ -712,7 +724,7 @@ class Monitor:
712
724
 
713
725
  async def connect(self):
714
726
  if self.connection is None:
715
- self.connection = await self.connection_pool.get_connection("MONITOR")
727
+ self.connection = await self.connection_pool.get_connection()
716
728
 
717
729
  async def __aenter__(self):
718
730
  await self.connect()
@@ -900,9 +912,7 @@ class PubSub:
900
912
  Ensure that the PubSub is connected
901
913
  """
902
914
  if self.connection is None:
903
- self.connection = await self.connection_pool.get_connection(
904
- "pubsub", self.shard_hint
905
- )
915
+ self.connection = await self.connection_pool.get_connection()
906
916
  # register a callback that re-subscribes to any channels we
907
917
  # were listening to when we were disconnected
908
918
  self.connection.register_connect_callback(self.on_connect)
@@ -1370,9 +1380,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1370
1380
  conn = self.connection
1371
1381
  # if this is the first call, we need a connection
1372
1382
  if not conn:
1373
- conn = await self.connection_pool.get_connection(
1374
- command_name, self.shard_hint
1375
- )
1383
+ conn = await self.connection_pool.get_connection()
1376
1384
  self.connection = conn
1377
1385
 
1378
1386
  return await conn.retry.call_with_retry(
@@ -1554,7 +1562,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1554
1562
  await self.reset()
1555
1563
  raise
1556
1564
 
1557
- async def execute(self, raise_on_error: bool = True):
1565
+ async def execute(self, raise_on_error: bool = True) -> List[Any]:
1558
1566
  """Execute all the commands in the current pipeline"""
1559
1567
  stack = self.command_stack
1560
1568
  if not stack and not self.watching:
@@ -1568,7 +1576,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1568
1576
 
1569
1577
  conn = self.connection
1570
1578
  if not conn:
1571
- conn = await self.connection_pool.get_connection("MULTI", self.shard_hint)
1579
+ conn = await self.connection_pool.get_connection()
1572
1580
  # assign to self.connection so reset() releases the connection
1573
1581
  # back to the pool after we're done
1574
1582
  self.connection = conn
redis/asyncio/cluster.py CHANGED
@@ -2,7 +2,6 @@ import asyncio
2
2
  import collections
3
3
  import random
4
4
  import socket
5
- import ssl
6
5
  import warnings
7
6
  from typing import (
8
7
  Any,
@@ -26,7 +25,7 @@ from redis._parsers.helpers import (
26
25
  _RedisCallbacksRESP3,
27
26
  )
28
27
  from redis.asyncio.client import ResponseCallbackT
29
- from redis.asyncio.connection import Connection, DefaultParser, SSLConnection, parse_url
28
+ from redis.asyncio.connection import Connection, SSLConnection, parse_url
30
29
  from redis.asyncio.lock import Lock
31
30
  from redis.asyncio.retry import Retry
32
31
  from redis.auth.token import TokenInterface
@@ -39,6 +38,7 @@ from redis.cluster import (
39
38
  SLOT_ID,
40
39
  AbstractRedisCluster,
41
40
  LoadBalancer,
41
+ LoadBalancingStrategy,
42
42
  block_pipeline_command,
43
43
  get_node_name,
44
44
  parse_cluster_slots,
@@ -50,12 +50,10 @@ from redis.event import AfterAsyncClusterInstantiationEvent, EventDispatcher
50
50
  from redis.exceptions import (
51
51
  AskError,
52
52
  BusyLoadingError,
53
- ClusterCrossSlotError,
54
53
  ClusterDownError,
55
54
  ClusterError,
56
55
  ConnectionError,
57
56
  DataError,
58
- MasterDownError,
59
57
  MaxConnectionsError,
60
58
  MovedError,
61
59
  RedisClusterException,
@@ -67,32 +65,24 @@ from redis.exceptions import (
67
65
  )
68
66
  from redis.typing import AnyKeyT, EncodableT, KeyT
69
67
  from redis.utils import (
68
+ SSL_AVAILABLE,
69
+ deprecated_args,
70
70
  deprecated_function,
71
- dict_merge,
72
71
  get_lib_version,
73
72
  safe_str,
74
73
  str_if_bytes,
75
74
  )
76
75
 
76
+ if SSL_AVAILABLE:
77
+ from ssl import TLSVersion
78
+ else:
79
+ TLSVersion = None
80
+
77
81
  TargetNodesT = TypeVar(
78
82
  "TargetNodesT", str, "ClusterNode", List["ClusterNode"], Dict[Any, "ClusterNode"]
79
83
  )
80
84
 
81
85
 
82
- class ClusterParser(DefaultParser):
83
- EXCEPTION_CLASSES = dict_merge(
84
- DefaultParser.EXCEPTION_CLASSES,
85
- {
86
- "ASK": AskError,
87
- "CLUSTERDOWN": ClusterDownError,
88
- "CROSSSLOT": ClusterCrossSlotError,
89
- "MASTERDOWN": MasterDownError,
90
- "MOVED": MovedError,
91
- "TRYAGAIN": TryAgainError,
92
- },
93
- )
94
-
95
-
96
86
  class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommands):
97
87
  """
98
88
  Create a new RedisCluster client.
@@ -133,9 +123,15 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
133
123
  | See:
134
124
  https://redis.io/docs/manual/scaling/#redis-cluster-configuration-parameters
135
125
  :param read_from_replicas:
136
- | Enable read from replicas in READONLY mode. You can read possibly stale data.
126
+ | @deprecated - please use load_balancing_strategy instead
127
+ | Enable read from replicas in READONLY mode.
137
128
  When set to true, read commands will be assigned between the primary and
138
129
  its replications in a Round-Robin manner.
130
+ The data read from replicas is eventually consistent with the data in primary nodes.
131
+ :param load_balancing_strategy:
132
+ | Enable read from replicas in READONLY mode and defines the load balancing
133
+ strategy that will be used for cluster node selection.
134
+ The data read from replicas is eventually consistent with the data in primary nodes.
139
135
  :param reinitialize_steps:
140
136
  | Specifies the number of MOVED errors that need to occur before reinitializing
141
137
  the whole cluster topology. If a MOVED error occurs and the cluster does not
@@ -228,6 +224,11 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
228
224
  "result_callbacks",
229
225
  )
230
226
 
227
+ @deprecated_args(
228
+ args_to_warn=["read_from_replicas"],
229
+ reason="Please configure the 'load_balancing_strategy' instead",
230
+ version="5.0.3",
231
+ )
231
232
  def __init__(
232
233
  self,
233
234
  host: Optional[str] = None,
@@ -236,6 +237,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
236
237
  startup_nodes: Optional[List["ClusterNode"]] = None,
237
238
  require_full_coverage: bool = True,
238
239
  read_from_replicas: bool = False,
240
+ load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
239
241
  reinitialize_steps: int = 5,
240
242
  cluster_error_retry_attempts: int = 3,
241
243
  connection_error_retry_attempts: int = 3,
@@ -269,7 +271,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
269
271
  ssl_certfile: Optional[str] = None,
270
272
  ssl_check_hostname: bool = False,
271
273
  ssl_keyfile: Optional[str] = None,
272
- ssl_min_version: Optional[ssl.TLSVersion] = None,
274
+ ssl_min_version: Optional[TLSVersion] = None,
273
275
  ssl_ciphers: Optional[str] = None,
274
276
  protocol: Optional[int] = 2,
275
277
  address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
@@ -297,7 +299,6 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
297
299
  kwargs: Dict[str, Any] = {
298
300
  "max_connections": max_connections,
299
301
  "connection_class": Connection,
300
- "parser_class": ClusterParser,
301
302
  # Client related kwargs
302
303
  "credential_provider": credential_provider,
303
304
  "username": username,
@@ -335,7 +336,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
335
336
  }
336
337
  )
337
338
 
338
- if read_from_replicas:
339
+ if read_from_replicas or load_balancing_strategy:
339
340
  # Call our on_connect function to configure READONLY mode
340
341
  kwargs["redis_connect_func"] = self.on_connect
341
342
 
@@ -384,6 +385,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
384
385
  )
385
386
  self.encoder = Encoder(encoding, encoding_errors, decode_responses)
386
387
  self.read_from_replicas = read_from_replicas
388
+ self.load_balancing_strategy = load_balancing_strategy
387
389
  self.reinitialize_steps = reinitialize_steps
388
390
  self.cluster_error_retry_attempts = cluster_error_retry_attempts
389
391
  self.connection_error_retry_attempts = connection_error_retry_attempts
@@ -602,6 +604,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
602
604
  self.nodes_manager.get_node_from_slot(
603
605
  await self._determine_slot(command, *args),
604
606
  self.read_from_replicas and command in READ_COMMANDS,
607
+ self.load_balancing_strategy if command in READ_COMMANDS else None,
605
608
  )
606
609
  ]
607
610
 
@@ -782,7 +785,11 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
782
785
  # refresh the target node
783
786
  slot = await self._determine_slot(*args)
784
787
  target_node = self.nodes_manager.get_node_from_slot(
785
- slot, self.read_from_replicas and args[0] in READ_COMMANDS
788
+ slot,
789
+ self.read_from_replicas and args[0] in READ_COMMANDS,
790
+ self.load_balancing_strategy
791
+ if args[0] in READ_COMMANDS
792
+ else None,
786
793
  )
787
794
  moved = False
788
795
 
@@ -862,6 +869,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
862
869
  blocking_timeout: Optional[float] = None,
863
870
  lock_class: Optional[Type[Lock]] = None,
864
871
  thread_local: bool = True,
872
+ raise_on_release_error: bool = True,
865
873
  ) -> Lock:
866
874
  """
867
875
  Return a new Lock object using key ``name`` that mimics
@@ -908,6 +916,11 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
908
916
  thread-1 would see the token value as "xyz" and would be
909
917
  able to successfully release the thread-2's lock.
910
918
 
919
+ ``raise_on_release_error`` indicates whether to raise an exception when
920
+ the lock is no longer owned when exiting the context manager. By default,
921
+ this is True, meaning an exception will be raised. If False, the warning
922
+ will be logged and the exception will be suppressed.
923
+
911
924
  In some use cases it's necessary to disable thread local storage. For
912
925
  example, if you have code where one thread acquires a lock and passes
913
926
  that lock instance to a worker thread to release later. If thread
@@ -925,6 +938,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
925
938
  blocking=blocking,
926
939
  blocking_timeout=blocking_timeout,
927
940
  thread_local=thread_local,
941
+ raise_on_release_error=raise_on_release_error,
928
942
  )
929
943
 
930
944
 
@@ -1177,9 +1191,7 @@ class NodesManager:
1177
1191
  return self.nodes_cache.get(node_name)
1178
1192
  else:
1179
1193
  raise DataError(
1180
- "get_node requires one of the following: "
1181
- "1. node name "
1182
- "2. host and port"
1194
+ "get_node requires one of the following: 1. node name 2. host and port"
1183
1195
  )
1184
1196
 
1185
1197
  def set_nodes(
@@ -1239,17 +1251,23 @@ class NodesManager:
1239
1251
  self._moved_exception = None
1240
1252
 
1241
1253
  def get_node_from_slot(
1242
- self, slot: int, read_from_replicas: bool = False
1254
+ self,
1255
+ slot: int,
1256
+ read_from_replicas: bool = False,
1257
+ load_balancing_strategy=None,
1243
1258
  ) -> "ClusterNode":
1244
1259
  if self._moved_exception:
1245
1260
  self._update_moved_slots()
1246
1261
 
1262
+ if read_from_replicas is True and load_balancing_strategy is None:
1263
+ load_balancing_strategy = LoadBalancingStrategy.ROUND_ROBIN
1264
+
1247
1265
  try:
1248
- if read_from_replicas:
1249
- # get the server index in a Round-Robin manner
1266
+ if len(self.slots_cache[slot]) > 1 and load_balancing_strategy:
1267
+ # get the server index using the strategy defined in load_balancing_strategy
1250
1268
  primary_name = self.slots_cache[slot][0].name
1251
1269
  node_idx = self.read_load_balancer.get_server_index(
1252
- primary_name, len(self.slots_cache[slot])
1270
+ primary_name, len(self.slots_cache[slot]), load_balancing_strategy
1253
1271
  )
1254
1272
  return self.slots_cache[slot][node_idx]
1255
1273
  return self.slots_cache[slot][0]
@@ -1361,7 +1379,7 @@ class NodesManager:
1361
1379
  if len(disagreements) > 5:
1362
1380
  raise RedisClusterException(
1363
1381
  f"startup_nodes could not agree on a valid "
1364
- f'slots cache: {", ".join(disagreements)}'
1382
+ f"slots cache: {', '.join(disagreements)}"
1365
1383
  )
1366
1384
 
1367
1385
  # Validate if all slots are covered or if we should try next startup node
@@ -1534,29 +1552,28 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
1534
1552
  return []
1535
1553
 
1536
1554
  try:
1537
- for _ in range(self._client.cluster_error_retry_attempts):
1538
- if self._client._initialize:
1539
- await self._client.initialize()
1540
-
1555
+ retry_attempts = self._client.cluster_error_retry_attempts
1556
+ while True:
1541
1557
  try:
1558
+ if self._client._initialize:
1559
+ await self._client.initialize()
1542
1560
  return await self._execute(
1543
1561
  self._client,
1544
1562
  self._command_stack,
1545
1563
  raise_on_error=raise_on_error,
1546
1564
  allow_redirections=allow_redirections,
1547
1565
  )
1548
- except BaseException as e:
1549
- if type(e) in self.__class__.ERRORS_ALLOW_RETRY:
1550
- # Try again with the new cluster setup.
1551
- exception = e
1566
+
1567
+ except self.__class__.ERRORS_ALLOW_RETRY as e:
1568
+ if retry_attempts > 0:
1569
+ # Try again with the new cluster setup. All other errors
1570
+ # should be raised.
1571
+ retry_attempts -= 1
1552
1572
  await self._client.aclose()
1553
1573
  await asyncio.sleep(0.25)
1554
1574
  else:
1555
1575
  # All other errors should be raised.
1556
- raise
1557
-
1558
- # If it fails the configured number of times then raise an exception
1559
- raise exception
1576
+ raise e
1560
1577
  finally:
1561
1578
  self._command_stack = []
1562
1579
 
@@ -1622,18 +1639,24 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
1622
1639
  result.args = (msg,) + result.args[1:]
1623
1640
  raise result
1624
1641
 
1625
- default_node = nodes.get(client.get_default_node().name)
1626
- if default_node is not None:
1627
- # This pipeline execution used the default node, check if we need
1628
- # to replace it.
1629
- # Note: when the error is raised we'll reset the default node in the
1630
- # caller function.
1631
- for cmd in default_node[1]:
1632
- # Check if it has a command that failed with a relevant
1633
- # exception
1634
- if type(cmd.result) in self.__class__.ERRORS_ALLOW_RETRY:
1635
- client.replace_default_node()
1636
- break
1642
+ default_cluster_node = client.get_default_node()
1643
+
1644
+ # Check whether the default node was used. In some cases,
1645
+ # 'client.get_default_node()' may return None. The check below
1646
+ # prevents a potential AttributeError.
1647
+ if default_cluster_node is not None:
1648
+ default_node = nodes.get(default_cluster_node.name)
1649
+ if default_node is not None:
1650
+ # This pipeline execution used the default node, check if we need
1651
+ # to replace it.
1652
+ # Note: when the error is raised we'll reset the default node in the
1653
+ # caller function.
1654
+ for cmd in default_node[1]:
1655
+ # Check if it has a command that failed with a relevant
1656
+ # exception
1657
+ if type(cmd.result) in self.__class__.ERRORS_ALLOW_RETRY:
1658
+ client.replace_default_node()
1659
+ break
1637
1660
 
1638
1661
  return [cmd.result for cmd in stack]
1639
1662
 
@@ -3,7 +3,6 @@ import copy
3
3
  import enum
4
4
  import inspect
5
5
  import socket
6
- import ssl
7
6
  import sys
8
7
  import warnings
9
8
  import weakref
@@ -27,9 +26,19 @@ from typing import (
27
26
  )
28
27
  from urllib.parse import ParseResult, parse_qs, unquote, urlparse
29
28
 
29
+ from ..utils import SSL_AVAILABLE
30
+
31
+ if SSL_AVAILABLE:
32
+ import ssl
33
+ from ssl import SSLContext, TLSVersion
34
+ else:
35
+ ssl = None
36
+ TLSVersion = None
37
+ SSLContext = None
38
+
30
39
  from ..auth.token import TokenInterface
31
40
  from ..event import AsyncAfterConnectionReleasedEvent, EventDispatcher
32
- from ..utils import format_error_message
41
+ from ..utils import deprecated_args, format_error_message
33
42
 
34
43
  # the functionality is available in 3.11.x but has a major issue before
35
44
  # 3.11.3. See https://github.com/redis/redis-py/issues/2633
@@ -363,7 +372,11 @@ class AbstractConnection:
363
372
  self._parser.on_connect(self)
364
373
  if len(auth_args) == 1:
365
374
  auth_args = ["default", auth_args[0]]
366
- await self.send_command("HELLO", self.protocol, "AUTH", *auth_args)
375
+ # avoid checking health here -- PING will fail if we try
376
+ # to check the health prior to the AUTH
377
+ await self.send_command(
378
+ "HELLO", self.protocol, "AUTH", *auth_args, check_health=False
379
+ )
367
380
  response = await self.read_response()
368
381
  if response.get(b"proto") != int(self.protocol) and response.get(
369
382
  "proto"
@@ -759,10 +772,13 @@ class SSLConnection(Connection):
759
772
  ssl_ca_certs: Optional[str] = None,
760
773
  ssl_ca_data: Optional[str] = None,
761
774
  ssl_check_hostname: bool = False,
762
- ssl_min_version: Optional[ssl.TLSVersion] = None,
775
+ ssl_min_version: Optional[TLSVersion] = None,
763
776
  ssl_ciphers: Optional[str] = None,
764
777
  **kwargs,
765
778
  ):
779
+ if not SSL_AVAILABLE:
780
+ raise RedisError("Python wasn't built with SSL support")
781
+
766
782
  self.ssl_context: RedisSSLContext = RedisSSLContext(
767
783
  keyfile=ssl_keyfile,
768
784
  certfile=ssl_certfile,
@@ -830,15 +846,18 @@ class RedisSSLContext:
830
846
  ca_certs: Optional[str] = None,
831
847
  ca_data: Optional[str] = None,
832
848
  check_hostname: bool = False,
833
- min_version: Optional[ssl.TLSVersion] = None,
849
+ min_version: Optional[TLSVersion] = None,
834
850
  ciphers: Optional[str] = None,
835
851
  ):
852
+ if not SSL_AVAILABLE:
853
+ raise RedisError("Python wasn't built with SSL support")
854
+
836
855
  self.keyfile = keyfile
837
856
  self.certfile = certfile
838
857
  if cert_reqs is None:
839
858
  self.cert_reqs = ssl.CERT_NONE
840
859
  elif isinstance(cert_reqs, str):
841
- CERT_REQS = {
860
+ CERT_REQS = { # noqa: N806
842
861
  "none": ssl.CERT_NONE,
843
862
  "optional": ssl.CERT_OPTIONAL,
844
863
  "required": ssl.CERT_REQUIRED,
@@ -853,9 +872,9 @@ class RedisSSLContext:
853
872
  self.check_hostname = check_hostname
854
873
  self.min_version = min_version
855
874
  self.ciphers = ciphers
856
- self.context: Optional[ssl.SSLContext] = None
875
+ self.context: Optional[SSLContext] = None
857
876
 
858
- def get(self) -> ssl.SSLContext:
877
+ def get(self) -> SSLContext:
859
878
  if not self.context:
860
879
  context = ssl.create_default_context()
861
880
  context.check_hostname = self.check_hostname
@@ -1087,7 +1106,12 @@ class ConnectionPool:
1087
1106
  or len(self._in_use_connections) < self.max_connections
1088
1107
  )
1089
1108
 
1090
- async def get_connection(self, command_name, *keys, **options):
1109
+ @deprecated_args(
1110
+ args_to_warn=["*"],
1111
+ reason="Use get_connection() without args instead",
1112
+ version="5.0.3",
1113
+ )
1114
+ async def get_connection(self, command_name=None, *keys, **options):
1091
1115
  async with self._lock:
1092
1116
  """Get a connected connection from the pool"""
1093
1117
  connection = self.get_available_connection()
@@ -1133,7 +1157,7 @@ class ConnectionPool:
1133
1157
  try:
1134
1158
  if await connection.can_read_destructive():
1135
1159
  raise ConnectionError("Connection has data") from None
1136
- except (ConnectionError, OSError):
1160
+ except (ConnectionError, TimeoutError, OSError):
1137
1161
  await connection.disconnect()
1138
1162
  await connection.connect()
1139
1163
  if await connection.can_read_destructive():
@@ -1255,7 +1279,12 @@ class BlockingConnectionPool(ConnectionPool):
1255
1279
  self._condition = asyncio.Condition()
1256
1280
  self.timeout = timeout
1257
1281
 
1258
- async def get_connection(self, command_name, *keys, **options):
1282
+ @deprecated_args(
1283
+ args_to_warn=["*"],
1284
+ reason="Use get_connection() without args instead",
1285
+ version="5.0.3",
1286
+ )
1287
+ async def get_connection(self, command_name=None, *keys, **options):
1259
1288
  """Gets a connection from the pool, blocking until one is available"""
1260
1289
  try:
1261
1290
  async with self._condition: