redis 6.0.0b2__py3-none-any.whl → 6.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
redis/__init__.py CHANGED
@@ -16,11 +16,14 @@ from redis.exceptions import (
16
16
  BusyLoadingError,
17
17
  ChildDeadlockedError,
18
18
  ConnectionError,
19
+ CrossSlotTransactionError,
19
20
  DataError,
21
+ InvalidPipelineStack,
20
22
  InvalidResponse,
21
23
  OutOfMemoryError,
22
24
  PubSubError,
23
25
  ReadOnlyError,
26
+ RedisClusterException,
24
27
  RedisError,
25
28
  ResponseError,
26
29
  TimeoutError,
@@ -42,7 +45,9 @@ def int_or_str(value):
42
45
  return value
43
46
 
44
47
 
45
- __version__ = "6.0.0b2"
48
+ # This is the version of redis-py that is being used
49
+ # for building and installing the lib.
50
+ __version__ = "6.1.0"
46
51
  VERSION = tuple(map(int_or_str, __version__.split(".")))
47
52
 
48
53
 
@@ -56,15 +61,18 @@ __all__ = [
56
61
  "ConnectionError",
57
62
  "ConnectionPool",
58
63
  "CredentialProvider",
64
+ "CrossSlotTransactionError",
59
65
  "DataError",
60
66
  "from_url",
61
67
  "default_backoff",
68
+ "InvalidPipelineStack",
62
69
  "InvalidResponse",
63
70
  "OutOfMemoryError",
64
71
  "PubSubError",
65
72
  "ReadOnlyError",
66
73
  "Redis",
67
74
  "RedisCluster",
75
+ "RedisClusterException",
68
76
  "RedisError",
69
77
  "ResponseError",
70
78
  "Sentinel",
redis/_parsers/resp3.py CHANGED
@@ -19,7 +19,7 @@ class _RESP3Parser(_RESPBase):
19
19
 
20
20
  def handle_pubsub_push_response(self, response):
21
21
  logger = getLogger("push_response")
22
- logger.info("Push response: " + str(response))
22
+ logger.debug("Push response: " + str(response))
23
23
  return response
24
24
 
25
25
  def read_response(self, disable_decoding=False, push_request=False):
@@ -150,7 +150,7 @@ class _AsyncRESP3Parser(_AsyncRESPBase):
150
150
 
151
151
  async def handle_pubsub_push_response(self, response):
152
152
  logger = getLogger("push_response")
153
- logger.info("Push response: " + str(response))
153
+ logger.debug("Push response: " + str(response))
154
154
  return response
155
155
 
156
156
  async def read_response(
redis/asyncio/client.py CHANGED
@@ -39,6 +39,7 @@ from redis.asyncio.connection import (
39
39
  )
40
40
  from redis.asyncio.lock import Lock
41
41
  from redis.asyncio.retry import Retry
42
+ from redis.backoff import ExponentialWithJitterBackoff
42
43
  from redis.client import (
43
44
  EMPTY_RESPONSE,
44
45
  NEVER_DECODE,
@@ -65,7 +66,6 @@ from redis.exceptions import (
65
66
  PubSubError,
66
67
  RedisError,
67
68
  ResponseError,
68
- TimeoutError,
69
69
  WatchError,
70
70
  )
71
71
  from redis.typing import ChannelT, EncodableT, KeyT
@@ -73,6 +73,7 @@ from redis.utils import (
73
73
  HIREDIS_AVAILABLE,
74
74
  SSL_AVAILABLE,
75
75
  _set_info_logger,
76
+ deprecated_args,
76
77
  deprecated_function,
77
78
  get_lib_version,
78
79
  safe_str,
@@ -208,6 +209,11 @@ class Redis(
208
209
  client.auto_close_connection_pool = True
209
210
  return client
210
211
 
212
+ @deprecated_args(
213
+ args_to_warn=["retry_on_timeout"],
214
+ reason="TimeoutError is included by default.",
215
+ version="6.0.0",
216
+ )
211
217
  def __init__(
212
218
  self,
213
219
  *,
@@ -225,6 +231,9 @@ class Redis(
225
231
  encoding_errors: str = "strict",
226
232
  decode_responses: bool = False,
227
233
  retry_on_timeout: bool = False,
234
+ retry: Retry = Retry(
235
+ backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
236
+ ),
228
237
  retry_on_error: Optional[list] = None,
229
238
  ssl: bool = False,
230
239
  ssl_keyfile: Optional[str] = None,
@@ -232,7 +241,7 @@ class Redis(
232
241
  ssl_cert_reqs: Union[str, VerifyMode] = "required",
233
242
  ssl_ca_certs: Optional[str] = None,
234
243
  ssl_ca_data: Optional[str] = None,
235
- ssl_check_hostname: bool = False,
244
+ ssl_check_hostname: bool = True,
236
245
  ssl_min_version: Optional[TLSVersion] = None,
237
246
  ssl_ciphers: Optional[str] = None,
238
247
  max_connections: Optional[int] = None,
@@ -242,7 +251,6 @@ class Redis(
242
251
  lib_name: Optional[str] = "redis-py",
243
252
  lib_version: Optional[str] = get_lib_version(),
244
253
  username: Optional[str] = None,
245
- retry: Optional[Retry] = None,
246
254
  auto_close_connection_pool: Optional[bool] = None,
247
255
  redis_connect_func=None,
248
256
  credential_provider: Optional[CredentialProvider] = None,
@@ -251,10 +259,24 @@ class Redis(
251
259
  ):
252
260
  """
253
261
  Initialize a new Redis client.
254
- To specify a retry policy for specific errors, first set
255
- `retry_on_error` to a list of the error/s to retry on, then set
256
- `retry` to a valid `Retry` object.
257
- To retry on TimeoutError, `retry_on_timeout` can also be set to `True`.
262
+
263
+ To specify a retry policy for specific errors, you have two options:
264
+
265
+ 1. Set the `retry_on_error` to a list of the error/s to retry on, and
266
+ you can also set `retry` to a valid `Retry` object(in case the default
267
+ one is not appropriate) - with this approach the retries will be triggered
268
+ on the default errors specified in the Retry object enriched with the
269
+ errors specified in `retry_on_error`.
270
+
271
+ 2. Define a `Retry` object with configured 'supported_errors' and set
272
+ it to the `retry` parameter - with this approach you completely redefine
273
+ the errors on which retries will happen.
274
+
275
+ `retry_on_timeout` is deprecated - please include the TimeoutError
276
+ either in the Retry object or in the `retry_on_error` list.
277
+
278
+ When 'connection_pool' is provided - the retry configuration of the
279
+ provided pool will be used.
258
280
  """
259
281
  kwargs: Dict[str, Any]
260
282
  if event_dispatcher is None:
@@ -280,8 +302,6 @@ class Redis(
280
302
  # Create internal connection pool, expected to be closed by Redis instance
281
303
  if not retry_on_error:
282
304
  retry_on_error = []
283
- if retry_on_timeout is True:
284
- retry_on_error.append(TimeoutError)
285
305
  kwargs = {
286
306
  "db": db,
287
307
  "username": username,
@@ -291,7 +311,6 @@ class Redis(
291
311
  "encoding": encoding,
292
312
  "encoding_errors": encoding_errors,
293
313
  "decode_responses": decode_responses,
294
- "retry_on_timeout": retry_on_timeout,
295
314
  "retry_on_error": retry_on_error,
296
315
  "retry": copy.deepcopy(retry),
297
316
  "max_connections": max_connections,
@@ -403,10 +422,10 @@ class Redis(
403
422
  """Get the connection's key-word arguments"""
404
423
  return self.connection_pool.connection_kwargs
405
424
 
406
- def get_retry(self) -> Optional["Retry"]:
425
+ def get_retry(self) -> Optional[Retry]:
407
426
  return self.get_connection_kwargs().get("retry")
408
427
 
409
- def set_retry(self, retry: "Retry") -> None:
428
+ def set_retry(self, retry: Retry) -> None:
410
429
  self.get_connection_kwargs().update({"retry": retry})
411
430
  self.connection_pool.set_retry(retry)
412
431
 
@@ -633,18 +652,17 @@ class Redis(
633
652
  await conn.send_command(*args)
634
653
  return await self.parse_response(conn, command_name, **options)
635
654
 
636
- async def _disconnect_raise(self, conn: Connection, error: Exception):
655
+ async def _close_connection(self, conn: Connection):
637
656
  """
638
- Close the connection and raise an exception
639
- if retry_on_error is not set or the error
640
- is not one of the specified error types
657
+ Close the connection before retrying.
658
+
659
+ The supported exceptions are already checked in the
660
+ retry object so we don't need to do it here.
661
+
662
+ After we disconnect the connection, it will try to reconnect and
663
+ do a health check as part of the send_command logic(on connection level).
641
664
  """
642
665
  await conn.disconnect()
643
- if (
644
- conn.retry_on_error is None
645
- or isinstance(error, tuple(conn.retry_on_error)) is False
646
- ):
647
- raise error
648
666
 
649
667
  # COMMAND EXECUTION AND PROTOCOL PARSING
650
668
  async def execute_command(self, *args, **options):
@@ -661,7 +679,7 @@ class Redis(
661
679
  lambda: self._send_command_parse_response(
662
680
  conn, command_name, *args, **options
663
681
  ),
664
- lambda error: self._disconnect_raise(conn, error),
682
+ lambda _: self._close_connection(conn),
665
683
  )
666
684
  finally:
667
685
  if self.single_connection_client:
@@ -929,19 +947,11 @@ class PubSub:
929
947
  )
930
948
  )
931
949
 
932
- async def _disconnect_raise_connect(self, conn, error):
950
+ async def _reconnect(self, conn):
933
951
  """
934
- Close the connection and raise an exception
935
- if retry_on_error is not set or the error is not one
936
- of the specified error types. Otherwise, try to
937
- reconnect
952
+ Try to reconnect
938
953
  """
939
954
  await conn.disconnect()
940
- if (
941
- conn.retry_on_error is None
942
- or isinstance(error, tuple(conn.retry_on_error)) is False
943
- ):
944
- raise error
945
955
  await conn.connect()
946
956
 
947
957
  async def _execute(self, conn, command, *args, **kwargs):
@@ -954,7 +964,7 @@ class PubSub:
954
964
  """
955
965
  return await conn.retry.call_with_retry(
956
966
  lambda: command(*args, **kwargs),
957
- lambda error: self._disconnect_raise_connect(conn, error),
967
+ lambda _: self._reconnect(conn),
958
968
  )
959
969
 
960
970
  async def parse_response(self, block: bool = True, timeout: float = 0):
@@ -1245,7 +1255,8 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1245
1255
  in one transmission. This is convenient for batch processing, such as
1246
1256
  saving all the values in a list to Redis.
1247
1257
 
1248
- All commands executed within a pipeline are wrapped with MULTI and EXEC
1258
+ All commands executed within a pipeline(when running in transactional mode,
1259
+ which is the default behavior) are wrapped with MULTI and EXEC
1249
1260
  calls. This guarantees all commands executed in the pipeline will be
1250
1261
  executed atomically.
1251
1262
 
@@ -1274,7 +1285,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1274
1285
  self.shard_hint = shard_hint
1275
1286
  self.watching = False
1276
1287
  self.command_stack: CommandStackT = []
1277
- self.scripts: Set["Script"] = set()
1288
+ self.scripts: Set[Script] = set()
1278
1289
  self.explicit_transaction = False
1279
1290
 
1280
1291
  async def __aenter__(self: _RedisT) -> _RedisT:
@@ -1346,36 +1357,36 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1346
1357
  return self.immediate_execute_command(*args, **kwargs)
1347
1358
  return self.pipeline_execute_command(*args, **kwargs)
1348
1359
 
1349
- async def _disconnect_reset_raise(self, conn, error):
1360
+ async def _disconnect_reset_raise_on_watching(
1361
+ self,
1362
+ conn: Connection,
1363
+ error: Exception,
1364
+ ):
1350
1365
  """
1351
- Close the connection, reset watching state and
1352
- raise an exception if we were watching,
1353
- if retry_on_error is not set or the error is not one
1354
- of the specified error types.
1366
+ Close the connection reset watching state and
1367
+ raise an exception if we were watching.
1368
+
1369
+ The supported exceptions are already checked in the
1370
+ retry object so we don't need to do it here.
1371
+
1372
+ After we disconnect the connection, it will try to reconnect and
1373
+ do a health check as part of the send_command logic(on connection level).
1355
1374
  """
1356
1375
  await conn.disconnect()
1357
1376
  # if we were already watching a variable, the watch is no longer
1358
1377
  # valid since this connection has died. raise a WatchError, which
1359
1378
  # indicates the user should retry this transaction.
1360
1379
  if self.watching:
1361
- await self.aclose()
1380
+ await self.reset()
1362
1381
  raise WatchError(
1363
- "A ConnectionError occurred on while watching one or more keys"
1382
+ f"A {type(error).__name__} occurred while watching one or more keys"
1364
1383
  )
1365
- # if retry_on_error is not set or the error is not one
1366
- # of the specified error types, raise it
1367
- if (
1368
- conn.retry_on_error is None
1369
- or isinstance(error, tuple(conn.retry_on_error)) is False
1370
- ):
1371
- await self.aclose()
1372
- raise
1373
1384
 
1374
1385
  async def immediate_execute_command(self, *args, **options):
1375
1386
  """
1376
- Execute a command immediately, but don't auto-retry on a
1377
- ConnectionError if we're already WATCHing a variable. Used when
1378
- issuing WATCH or subsequent commands retrieving their values but before
1387
+ Execute a command immediately, but don't auto-retry on the supported
1388
+ errors for retry if we're already WATCHing a variable.
1389
+ Used when issuing WATCH or subsequent commands retrieving their values but before
1379
1390
  MULTI is called.
1380
1391
  """
1381
1392
  command_name = args[0]
@@ -1389,7 +1400,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1389
1400
  lambda: self._send_command_parse_response(
1390
1401
  conn, command_name, *args, **options
1391
1402
  ),
1392
- lambda error: self._disconnect_reset_raise(conn, error),
1403
+ lambda error: self._disconnect_reset_raise_on_watching(conn, error),
1393
1404
  )
1394
1405
 
1395
1406
  def pipeline_execute_command(self, *args, **options):
@@ -1544,11 +1555,15 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1544
1555
  if not exist:
1545
1556
  s.sha = await immediate("SCRIPT LOAD", s.script)
1546
1557
 
1547
- async def _disconnect_raise_reset(self, conn: Connection, error: Exception):
1558
+ async def _disconnect_raise_on_watching(self, conn: Connection, error: Exception):
1548
1559
  """
1549
- Close the connection, raise an exception if we were watching,
1550
- and raise an exception if retry_on_error is not set or the
1551
- error is not one of the specified error types.
1560
+ Close the connection, raise an exception if we were watching.
1561
+
1562
+ The supported exceptions are already checked in the
1563
+ retry object so we don't need to do it here.
1564
+
1565
+ After we disconnect the connection, it will try to reconnect and
1566
+ do a health check as part of the send_command logic(on connection level).
1552
1567
  """
1553
1568
  await conn.disconnect()
1554
1569
  # if we were watching a variable, the watch is no longer valid
@@ -1556,16 +1571,8 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1556
1571
  # indicates the user should retry this transaction.
1557
1572
  if self.watching:
1558
1573
  raise WatchError(
1559
- "A ConnectionError occurred on while watching one or more keys"
1574
+ f"A {type(error).__name__} occurred while watching one or more keys"
1560
1575
  )
1561
- # if retry_on_error is not set or the error is not one
1562
- # of the specified error types, raise it
1563
- if (
1564
- conn.retry_on_error is None
1565
- or isinstance(error, tuple(conn.retry_on_error)) is False
1566
- ):
1567
- await self.reset()
1568
- raise
1569
1576
 
1570
1577
  async def execute(self, raise_on_error: bool = True) -> List[Any]:
1571
1578
  """Execute all the commands in the current pipeline"""
@@ -1590,7 +1597,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1590
1597
  try:
1591
1598
  return await conn.retry.call_with_retry(
1592
1599
  lambda: execute(conn, stack, raise_on_error),
1593
- lambda error: self._disconnect_raise_reset(conn, error),
1600
+ lambda error: self._disconnect_raise_on_watching(conn, error),
1594
1601
  )
1595
1602
  finally:
1596
1603
  await self.reset()
redis/asyncio/cluster.py CHANGED
@@ -29,7 +29,7 @@ from redis.asyncio.connection import Connection, SSLConnection, parse_url
29
29
  from redis.asyncio.lock import Lock
30
30
  from redis.asyncio.retry import Retry
31
31
  from redis.auth.token import TokenInterface
32
- from redis.backoff import default_backoff
32
+ from redis.backoff import ExponentialWithJitterBackoff, NoBackoff
33
33
  from redis.client import EMPTY_RESPONSE, NEVER_DECODE, AbstractRedis
34
34
  from redis.cluster import (
35
35
  PIPELINE_BLOCKED_COMMANDS,
@@ -143,19 +143,23 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
143
143
  To avoid reinitializing the cluster on moved errors, set reinitialize_steps to
144
144
  0.
145
145
  :param cluster_error_retry_attempts:
146
- | Number of times to retry before raising an error when :class:`~.TimeoutError`
147
- or :class:`~.ConnectionError` or :class:`~.ClusterDownError` are encountered
148
- :param connection_error_retry_attempts:
149
- | Number of times to retry before reinitializing when :class:`~.TimeoutError`
150
- or :class:`~.ConnectionError` are encountered.
151
- The default backoff strategy will be set if Retry object is not passed (see
152
- default_backoff in backoff.py). To change it, pass a custom Retry object
153
- using the "retry" keyword.
146
+ | @deprecated - Please configure the 'retry' object instead
147
+ In case 'retry' object is set - this argument is ignored!
148
+
149
+ Number of times to retry before raising an error when :class:`~.TimeoutError`,
150
+ :class:`~.ConnectionError`, :class:`~.SlotNotCoveredError`
151
+ or :class:`~.ClusterDownError` are encountered
152
+ :param retry:
153
+ | A retry object that defines the retry strategy and the number of
154
+ retries for the cluster client.
155
+ In current implementation for the cluster client (starting form redis-py version 6.0.0)
156
+ the retry object is not yet fully utilized, instead it is used just to determine
157
+ the number of retries for the cluster client.
158
+ In the future releases the retry object will be used to handle the cluster client retries!
154
159
  :param max_connections:
155
160
  | Maximum number of connections per node. If there are no free connections & the
156
161
  maximum number of connections are already created, a
157
- :class:`~.MaxConnectionsError` is raised. This error may be retried as defined
158
- by :attr:`connection_error_retry_attempts`
162
+ :class:`~.MaxConnectionsError` is raised.
159
163
  :param address_remap:
160
164
  | An optional callable which, when provided with an internal network
161
165
  address of a node, e.g. a `(host, port)` tuple, will return the address
@@ -211,10 +215,9 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
211
215
  __slots__ = (
212
216
  "_initialize",
213
217
  "_lock",
214
- "cluster_error_retry_attempts",
218
+ "retry",
215
219
  "command_flags",
216
220
  "commands_parser",
217
- "connection_error_retry_attempts",
218
221
  "connection_kwargs",
219
222
  "encoder",
220
223
  "node_flags",
@@ -229,7 +232,14 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
229
232
  @deprecated_args(
230
233
  args_to_warn=["read_from_replicas"],
231
234
  reason="Please configure the 'load_balancing_strategy' instead",
232
- version="5.0.3",
235
+ version="5.3.0",
236
+ )
237
+ @deprecated_args(
238
+ args_to_warn=[
239
+ "cluster_error_retry_attempts",
240
+ ],
241
+ reason="Please configure the 'retry' object instead",
242
+ version="6.0.0",
233
243
  )
234
244
  def __init__(
235
245
  self,
@@ -242,8 +252,9 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
242
252
  load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
243
253
  reinitialize_steps: int = 5,
244
254
  cluster_error_retry_attempts: int = 3,
245
- connection_error_retry_attempts: int = 3,
246
255
  max_connections: int = 2**31,
256
+ retry: Optional["Retry"] = None,
257
+ retry_on_error: Optional[List[Type[Exception]]] = None,
247
258
  # Client related kwargs
248
259
  db: Union[str, int] = 0,
249
260
  path: Optional[str] = None,
@@ -263,15 +274,13 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
263
274
  socket_keepalive: bool = False,
264
275
  socket_keepalive_options: Optional[Mapping[int, Union[int, bytes]]] = None,
265
276
  socket_timeout: Optional[float] = None,
266
- retry: Optional["Retry"] = None,
267
- retry_on_error: Optional[List[Type[Exception]]] = None,
268
277
  # SSL related kwargs
269
278
  ssl: bool = False,
270
279
  ssl_ca_certs: Optional[str] = None,
271
280
  ssl_ca_data: Optional[str] = None,
272
281
  ssl_cert_reqs: Union[str, VerifyMode] = "required",
273
282
  ssl_certfile: Optional[str] = None,
274
- ssl_check_hostname: bool = False,
283
+ ssl_check_hostname: bool = True,
275
284
  ssl_keyfile: Optional[str] = None,
276
285
  ssl_min_version: Optional[TLSVersion] = None,
277
286
  ssl_ciphers: Optional[str] = None,
@@ -318,7 +327,6 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
318
327
  "socket_keepalive": socket_keepalive,
319
328
  "socket_keepalive_options": socket_keepalive_options,
320
329
  "socket_timeout": socket_timeout,
321
- "retry": retry,
322
330
  "protocol": protocol,
323
331
  }
324
332
 
@@ -342,17 +350,15 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
342
350
  # Call our on_connect function to configure READONLY mode
343
351
  kwargs["redis_connect_func"] = self.on_connect
344
352
 
345
- self.retry = retry
346
- if retry or retry_on_error or connection_error_retry_attempts > 0:
347
- # Set a retry object for all cluster nodes
348
- self.retry = retry or Retry(
349
- default_backoff(), connection_error_retry_attempts
353
+ if retry:
354
+ self.retry = retry
355
+ else:
356
+ self.retry = Retry(
357
+ backoff=ExponentialWithJitterBackoff(base=1, cap=10),
358
+ retries=cluster_error_retry_attempts,
350
359
  )
351
- if not retry_on_error:
352
- # Default errors for retrying
353
- retry_on_error = [ConnectionError, TimeoutError]
360
+ if retry_on_error:
354
361
  self.retry.update_supported_errors(retry_on_error)
355
- kwargs.update({"retry": self.retry})
356
362
 
357
363
  kwargs["response_callbacks"] = _RedisCallbacks.copy()
358
364
  if kwargs.get("protocol") in ["3", 3]:
@@ -389,8 +395,6 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
389
395
  self.read_from_replicas = read_from_replicas
390
396
  self.load_balancing_strategy = load_balancing_strategy
391
397
  self.reinitialize_steps = reinitialize_steps
392
- self.cluster_error_retry_attempts = cluster_error_retry_attempts
393
- self.connection_error_retry_attempts = connection_error_retry_attempts
394
398
  self.reinitialize_counter = 0
395
399
  self.commands_parser = AsyncCommandsParser()
396
400
  self.node_flags = self.__class__.NODE_FLAGS.copy()
@@ -561,15 +565,8 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
561
565
  """Get the kwargs passed to :class:`~redis.asyncio.connection.Connection`."""
562
566
  return self.connection_kwargs
563
567
 
564
- def get_retry(self) -> Optional["Retry"]:
565
- return self.retry
566
-
567
- def set_retry(self, retry: "Retry") -> None:
568
+ def set_retry(self, retry: Retry) -> None:
568
569
  self.retry = retry
569
- for node in self.get_nodes():
570
- node.connection_kwargs.update({"retry": retry})
571
- for conn in node._connections:
572
- conn.retry = retry
573
570
 
574
571
  def set_response_callback(self, command: str, callback: ResponseCallbackT) -> None:
575
572
  """Set a custom response callback."""
@@ -688,8 +685,8 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
688
685
  """
689
686
  Execute a raw command on the appropriate cluster node or target_nodes.
690
687
 
691
- It will retry the command as specified by :attr:`cluster_error_retry_attempts` &
692
- then raise an exception.
688
+ It will retry the command as specified by the retries property of
689
+ the :attr:`retry` & then raise an exception.
693
690
 
694
691
  :param args:
695
692
  | Raw command args
@@ -705,7 +702,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
705
702
  command = args[0]
706
703
  target_nodes = []
707
704
  target_nodes_specified = False
708
- retry_attempts = self.cluster_error_retry_attempts
705
+ retry_attempts = self.retry.get_retries()
709
706
 
710
707
  passed_targets = kwargs.pop("target_nodes", None)
711
708
  if passed_targets and not self._is_node_flag(passed_targets):
@@ -808,10 +805,16 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
808
805
  # and try again with the new setup
809
806
  await self.aclose()
810
807
  raise
811
- except ClusterDownError:
808
+ except (ClusterDownError, SlotNotCoveredError):
812
809
  # ClusterDownError can occur during a failover and to get
813
810
  # self-healed, we will try to reinitialize the cluster layout
814
811
  # and retry executing the command
812
+
813
+ # SlotNotCoveredError can occur when the cluster is not fully
814
+ # initialized or can be temporary issue.
815
+ # We will try to reinitialize the cluster topology
816
+ # and retry executing the command
817
+
815
818
  await self.aclose()
816
819
  await asyncio.sleep(0.25)
817
820
  raise
@@ -1042,7 +1045,23 @@ class ClusterNode:
1042
1045
  return self._free.popleft()
1043
1046
  except IndexError:
1044
1047
  if len(self._connections) < self.max_connections:
1045
- connection = self.connection_class(**self.connection_kwargs)
1048
+ # We are configuring the connection pool not to retry
1049
+ # connections on lower level clients to avoid retrying
1050
+ # connections to nodes that are not reachable
1051
+ # and to avoid blocking the connection pool.
1052
+ # The only error that will have some handling in the lower
1053
+ # level clients is ConnectionError which will trigger disconnection
1054
+ # of the socket.
1055
+ # The retries will be handled on cluster client level
1056
+ # where we will have proper handling of the cluster topology
1057
+ retry = Retry(
1058
+ backoff=NoBackoff(),
1059
+ retries=0,
1060
+ supported_errors=(ConnectionError,),
1061
+ )
1062
+ connection_kwargs = self.connection_kwargs.copy()
1063
+ connection_kwargs["retry"] = retry
1064
+ connection = self.connection_class(**connection_kwargs)
1046
1065
  self._connections.append(connection)
1047
1066
  return connection
1048
1067
 
@@ -1294,7 +1313,9 @@ class NodesManager:
1294
1313
  startup_nodes_reachable = False
1295
1314
  fully_covered = False
1296
1315
  exception = None
1297
- for startup_node in self.startup_nodes.values():
1316
+ # Convert to tuple to prevent RuntimeError if self.startup_nodes
1317
+ # is modified during iteration
1318
+ for startup_node in tuple(self.startup_nodes.values()):
1298
1319
  try:
1299
1320
  # Make sure cluster mode is enabled on this node
1300
1321
  try:
@@ -1538,7 +1559,7 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
1538
1559
  """
1539
1560
  Execute the pipeline.
1540
1561
 
1541
- It will retry the commands as specified by :attr:`cluster_error_retry_attempts`
1562
+ It will retry the commands as specified by retries specified in :attr:`retry`
1542
1563
  & then raise an exception.
1543
1564
 
1544
1565
  :param raise_on_error:
@@ -1554,7 +1575,7 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
1554
1575
  return []
1555
1576
 
1556
1577
  try:
1557
- retry_attempts = self._client.cluster_error_retry_attempts
1578
+ retry_attempts = self._client.retry.get_retries()
1558
1579
  while True:
1559
1580
  try:
1560
1581
  if self._client._initialize:
@@ -794,7 +794,7 @@ class SSLConnection(Connection):
794
794
  ssl_cert_reqs: Union[str, ssl.VerifyMode] = "required",
795
795
  ssl_ca_certs: Optional[str] = None,
796
796
  ssl_ca_data: Optional[str] = None,
797
- ssl_check_hostname: bool = False,
797
+ ssl_check_hostname: bool = True,
798
798
  ssl_min_version: Optional[TLSVersion] = None,
799
799
  ssl_ciphers: Optional[str] = None,
800
800
  **kwargs,
@@ -868,7 +868,7 @@ class RedisSSLContext:
868
868
  cert_reqs: Optional[Union[str, ssl.VerifyMode]] = None,
869
869
  ca_certs: Optional[str] = None,
870
870
  ca_data: Optional[str] = None,
871
- check_hostname: bool = False,
871
+ check_hostname: bool = True,
872
872
  min_version: Optional[TLSVersion] = None,
873
873
  ciphers: Optional[str] = None,
874
874
  ):
@@ -893,7 +893,9 @@ class RedisSSLContext:
893
893
  self.cert_reqs = cert_reqs
894
894
  self.ca_certs = ca_certs
895
895
  self.ca_data = ca_data
896
- self.check_hostname = check_hostname
896
+ self.check_hostname = (
897
+ check_hostname if self.cert_reqs != ssl.CERT_NONE else False
898
+ )
897
899
  self.min_version = min_version
898
900
  self.ciphers = ciphers
899
901
  self.context: Optional[SSLContext] = None
@@ -1133,7 +1135,7 @@ class ConnectionPool:
1133
1135
  @deprecated_args(
1134
1136
  args_to_warn=["*"],
1135
1137
  reason="Use get_connection() without args instead",
1136
- version="5.0.3",
1138
+ version="5.3.0",
1137
1139
  )
1138
1140
  async def get_connection(self, command_name=None, *keys, **options):
1139
1141
  async with self._lock:
@@ -1306,7 +1308,7 @@ class BlockingConnectionPool(ConnectionPool):
1306
1308
  @deprecated_args(
1307
1309
  args_to_warn=["*"],
1308
1310
  reason="Use get_connection() without args instead",
1309
- version="5.0.3",
1311
+ version="5.3.0",
1310
1312
  )
1311
1313
  async def get_connection(self, command_name=None, *keys, **options):
1312
1314
  """Gets a connection from the pool, blocking until one is available"""