redis 6.0.0b2__py3-none-any.whl → 6.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
redis/asyncio/retry.py CHANGED
@@ -43,6 +43,18 @@ class Retry:
43
43
  set(self._supported_errors + tuple(specified_errors))
44
44
  )
45
45
 
46
+ def get_retries(self) -> int:
47
+ """
48
+ Get the number of retries.
49
+ """
50
+ return self._retries
51
+
52
+ def update_retries(self, value: int) -> None:
53
+ """
54
+ Set the number of retries.
55
+ """
56
+ self._retries = value
57
+
46
58
  async def call_with_retry(
47
59
  self, do: Callable[[], Awaitable[T]], fail: Callable[[RedisError], Any]
48
60
  ) -> T:
redis/backoff.py CHANGED
@@ -31,6 +31,15 @@ class ConstantBackoff(AbstractBackoff):
31
31
  """`backoff`: backoff time in seconds"""
32
32
  self._backoff = backoff
33
33
 
34
+ def __hash__(self) -> int:
35
+ return hash((self._backoff,))
36
+
37
+ def __eq__(self, other) -> bool:
38
+ if not isinstance(other, ConstantBackoff):
39
+ return NotImplemented
40
+
41
+ return self._backoff == other._backoff
42
+
34
43
  def compute(self, failures: int) -> float:
35
44
  return self._backoff
36
45
 
@@ -53,6 +62,15 @@ class ExponentialBackoff(AbstractBackoff):
53
62
  self._cap = cap
54
63
  self._base = base
55
64
 
65
+ def __hash__(self) -> int:
66
+ return hash((self._base, self._cap))
67
+
68
+ def __eq__(self, other) -> bool:
69
+ if not isinstance(other, ExponentialBackoff):
70
+ return NotImplemented
71
+
72
+ return self._base == other._base and self._cap == other._cap
73
+
56
74
  def compute(self, failures: int) -> float:
57
75
  return min(self._cap, self._base * 2**failures)
58
76
 
@@ -68,6 +86,15 @@ class FullJitterBackoff(AbstractBackoff):
68
86
  self._cap = cap
69
87
  self._base = base
70
88
 
89
+ def __hash__(self) -> int:
90
+ return hash((self._base, self._cap))
91
+
92
+ def __eq__(self, other) -> bool:
93
+ if not isinstance(other, FullJitterBackoff):
94
+ return NotImplemented
95
+
96
+ return self._base == other._base and self._cap == other._cap
97
+
71
98
  def compute(self, failures: int) -> float:
72
99
  return random.uniform(0, min(self._cap, self._base * 2**failures))
73
100
 
@@ -83,6 +110,15 @@ class EqualJitterBackoff(AbstractBackoff):
83
110
  self._cap = cap
84
111
  self._base = base
85
112
 
113
+ def __hash__(self) -> int:
114
+ return hash((self._base, self._cap))
115
+
116
+ def __eq__(self, other) -> bool:
117
+ if not isinstance(other, EqualJitterBackoff):
118
+ return NotImplemented
119
+
120
+ return self._base == other._base and self._cap == other._cap
121
+
86
122
  def compute(self, failures: int) -> float:
87
123
  temp = min(self._cap, self._base * 2**failures) / 2
88
124
  return temp + random.uniform(0, temp)
@@ -100,6 +136,15 @@ class DecorrelatedJitterBackoff(AbstractBackoff):
100
136
  self._base = base
101
137
  self._previous_backoff = 0
102
138
 
139
+ def __hash__(self) -> int:
140
+ return hash((self._base, self._cap))
141
+
142
+ def __eq__(self, other) -> bool:
143
+ if not isinstance(other, DecorrelatedJitterBackoff):
144
+ return NotImplemented
145
+
146
+ return self._base == other._base and self._cap == other._cap
147
+
103
148
  def reset(self) -> None:
104
149
  self._previous_backoff = 0
105
150
 
@@ -121,6 +166,15 @@ class ExponentialWithJitterBackoff(AbstractBackoff):
121
166
  self._cap = cap
122
167
  self._base = base
123
168
 
169
+ def __hash__(self) -> int:
170
+ return hash((self._base, self._cap))
171
+
172
+ def __eq__(self, other) -> bool:
173
+ if not isinstance(other, EqualJitterBackoff):
174
+ return NotImplemented
175
+
176
+ return self._base == other._base and self._cap == other._cap
177
+
124
178
  def compute(self, failures: int) -> float:
125
179
  return min(self._cap, random.random() * self._base * 2**failures)
126
180
 
redis/client.py CHANGED
@@ -2,7 +2,6 @@ import copy
2
2
  import re
3
3
  import threading
4
4
  import time
5
- import warnings
6
5
  from itertools import chain
7
6
  from typing import (
8
7
  TYPE_CHECKING,
@@ -12,6 +11,7 @@ from typing import (
12
11
  List,
13
12
  Mapping,
14
13
  Optional,
14
+ Set,
15
15
  Type,
16
16
  Union,
17
17
  )
@@ -23,6 +23,7 @@ from redis._parsers.helpers import (
23
23
  _RedisCallbacksRESP3,
24
24
  bool_ok,
25
25
  )
26
+ from redis.backoff import ExponentialWithJitterBackoff
26
27
  from redis.cache import CacheConfig, CacheInterface
27
28
  from redis.commands import (
28
29
  CoreCommands,
@@ -30,8 +31,10 @@ from redis.commands import (
30
31
  SentinelCommands,
31
32
  list_or_args,
32
33
  )
34
+ from redis.commands.core import Script
33
35
  from redis.connection import (
34
36
  AbstractConnection,
37
+ Connection,
35
38
  ConnectionPool,
36
39
  SSLConnection,
37
40
  UnixDomainSocketConnection,
@@ -50,7 +53,6 @@ from redis.exceptions import (
50
53
  PubSubError,
51
54
  RedisError,
52
55
  ResponseError,
53
- TimeoutError,
54
56
  WatchError,
55
57
  )
56
58
  from redis.lock import Lock
@@ -58,6 +60,7 @@ from redis.retry import Retry
58
60
  from redis.utils import (
59
61
  HIREDIS_AVAILABLE,
60
62
  _set_info_logger,
63
+ deprecated_args,
61
64
  get_lib_version,
62
65
  safe_str,
63
66
  str_if_bytes,
@@ -189,6 +192,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
189
192
  client.auto_close_connection_pool = True
190
193
  return client
191
194
 
195
+ @deprecated_args(
196
+ args_to_warn=["retry_on_timeout"],
197
+ reason="TimeoutError is included by default.",
198
+ version="6.0.0",
199
+ )
192
200
  def __init__(
193
201
  self,
194
202
  host: str = "localhost",
@@ -203,10 +211,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
203
211
  unix_socket_path: Optional[str] = None,
204
212
  encoding: str = "utf-8",
205
213
  encoding_errors: str = "strict",
206
- charset: Optional[str] = None,
207
- errors: Optional[str] = None,
208
214
  decode_responses: bool = False,
209
215
  retry_on_timeout: bool = False,
216
+ retry: Retry = Retry(
217
+ backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
218
+ ),
210
219
  retry_on_error: Optional[List[Type[Exception]]] = None,
211
220
  ssl: bool = False,
212
221
  ssl_keyfile: Optional[str] = None,
@@ -215,7 +224,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
215
224
  ssl_ca_certs: Optional[str] = None,
216
225
  ssl_ca_path: Optional[str] = None,
217
226
  ssl_ca_data: Optional[str] = None,
218
- ssl_check_hostname: bool = False,
227
+ ssl_check_hostname: bool = True,
219
228
  ssl_password: Optional[str] = None,
220
229
  ssl_validate_ocsp: bool = False,
221
230
  ssl_validate_ocsp_stapled: bool = False,
@@ -230,7 +239,6 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
230
239
  lib_name: Optional[str] = "redis-py",
231
240
  lib_version: Optional[str] = get_lib_version(),
232
241
  username: Optional[str] = None,
233
- retry: Optional[Retry] = None,
234
242
  redis_connect_func: Optional[Callable[[], None]] = None,
235
243
  credential_provider: Optional[CredentialProvider] = None,
236
244
  protocol: Optional[int] = 2,
@@ -240,10 +248,24 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
240
248
  ) -> None:
241
249
  """
242
250
  Initialize a new Redis client.
243
- To specify a retry policy for specific errors, first set
244
- `retry_on_error` to a list of the error/s to retry on, then set
245
- `retry` to a valid `Retry` object.
246
- To retry on TimeoutError, `retry_on_timeout` can also be set to `True`.
251
+
252
+ To specify a retry policy for specific errors, you have two options:
253
+
254
+ 1. Set the `retry_on_error` to a list of the error/s to retry on, and
255
+ you can also set `retry` to a valid `Retry` object(in case the default
256
+ one is not appropriate) - with this approach the retries will be triggered
257
+ on the default errors specified in the Retry object enriched with the
258
+ errors specified in `retry_on_error`.
259
+
260
+ 2. Define a `Retry` object with configured 'supported_errors' and set
261
+ it to the `retry` parameter - with this approach you completely redefine
262
+ the errors on which retries will happen.
263
+
264
+ `retry_on_timeout` is deprecated - please include the TimeoutError
265
+ either in the Retry object or in the `retry_on_error` list.
266
+
267
+ When 'connection_pool' is provided - the retry configuration of the
268
+ provided pool will be used.
247
269
 
248
270
  Args:
249
271
 
@@ -256,24 +278,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
256
278
  else:
257
279
  self._event_dispatcher = event_dispatcher
258
280
  if not connection_pool:
259
- if charset is not None:
260
- warnings.warn(
261
- DeprecationWarning(
262
- '"charset" is deprecated. Use "encoding" instead'
263
- )
264
- )
265
- encoding = charset
266
- if errors is not None:
267
- warnings.warn(
268
- DeprecationWarning(
269
- '"errors" is deprecated. Use "encoding_errors" instead'
270
- )
271
- )
272
- encoding_errors = errors
273
281
  if not retry_on_error:
274
282
  retry_on_error = []
275
- if retry_on_timeout is True:
276
- retry_on_error.append(TimeoutError)
277
283
  kwargs = {
278
284
  "db": db,
279
285
  "username": username,
@@ -363,6 +369,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
363
369
  ]:
364
370
  raise RedisError("Client caching is only supported with RESP version 3")
365
371
 
372
+ # TODO: To avoid breaking changes during the bug fix, we have to keep non-reentrant lock.
373
+ # TODO: Remove this before next major version (7.0.0)
366
374
  self.single_connection_lock = threading.Lock()
367
375
  self.connection = None
368
376
  self._single_connection_client = single_connection_client
@@ -395,10 +403,10 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
395
403
  """Get the connection's key-word arguments"""
396
404
  return self.connection_pool.connection_kwargs
397
405
 
398
- def get_retry(self) -> Optional["Retry"]:
406
+ def get_retry(self) -> Optional[Retry]:
399
407
  return self.get_connection_kwargs().get("retry")
400
408
 
401
- def set_retry(self, retry: "Retry") -> None:
409
+ def set_retry(self, retry: Retry) -> None:
402
410
  self.get_connection_kwargs().update({"retry": retry})
403
411
  self.connection_pool.set_retry(retry)
404
412
 
@@ -598,18 +606,18 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
598
606
  conn.send_command(*args, **options)
599
607
  return self.parse_response(conn, command_name, **options)
600
608
 
601
- def _disconnect_raise(self, conn, error):
609
+ def _close_connection(self, conn) -> None:
602
610
  """
603
- Close the connection and raise an exception
604
- if retry_on_error is not set or the error
605
- is not one of the specified error types
611
+ Close the connection before retrying.
612
+
613
+ The supported exceptions are already checked in the
614
+ retry object so we don't need to do it here.
615
+
616
+ After we disconnect the connection, it will try to reconnect and
617
+ do a health check as part of the send_command logic(on connection level).
606
618
  """
619
+
607
620
  conn.disconnect()
608
- if (
609
- conn.retry_on_error is None
610
- or isinstance(error, tuple(conn.retry_on_error)) is False
611
- ):
612
- raise error
613
621
 
614
622
  # COMMAND EXECUTION AND PROTOCOL PARSING
615
623
  def execute_command(self, *args, **options):
@@ -628,7 +636,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
628
636
  lambda: self._send_command_parse_response(
629
637
  conn, command_name, *args, **options
630
638
  ),
631
- lambda error: self._disconnect_raise(conn, error),
639
+ lambda _: self._close_connection(conn),
632
640
  )
633
641
  finally:
634
642
  if self._single_connection_client:
@@ -768,6 +776,9 @@ class PubSub:
768
776
  self._event_dispatcher = EventDispatcher()
769
777
  else:
770
778
  self._event_dispatcher = event_dispatcher
779
+
780
+ # TODO: To avoid breaking changes during the bug fix, we have to keep non-reentrant lock.
781
+ # TODO: Remove this before next major version (7.0.0)
771
782
  self._lock = threading.Lock()
772
783
  if self.encoder is None:
773
784
  self.encoder = self.connection_pool.get_encoder()
@@ -887,19 +898,14 @@ class PubSub:
887
898
  )
888
899
  ttl -= 1
889
900
 
890
- def _disconnect_raise_connect(self, conn, error) -> None:
901
+ def _reconnect(self, conn) -> None:
891
902
  """
892
- Close the connection and raise an exception
893
- if retry_on_error is not set or the error is not one
894
- of the specified error types. Otherwise, try to
895
- reconnect
903
+ The supported exceptions are already checked in the
904
+ retry object so we don't need to do it here.
905
+
906
+ In this error handler we are trying to reconnect to the server.
896
907
  """
897
908
  conn.disconnect()
898
- if (
899
- conn.retry_on_error is None
900
- or isinstance(error, tuple(conn.retry_on_error)) is False
901
- ):
902
- raise error
903
909
  conn.connect()
904
910
 
905
911
  def _execute(self, conn, command, *args, **kwargs):
@@ -912,7 +918,7 @@ class PubSub:
912
918
  """
913
919
  return conn.retry.call_with_retry(
914
920
  lambda: command(*args, **kwargs),
915
- lambda error: self._disconnect_raise_connect(conn, error),
921
+ lambda _: self._reconnect(conn),
916
922
  )
917
923
 
918
924
  def parse_response(self, block=True, timeout=0):
@@ -1281,7 +1287,8 @@ class Pipeline(Redis):
1281
1287
  in one transmission. This is convenient for batch processing, such as
1282
1288
  saving all the values in a list to Redis.
1283
1289
 
1284
- All commands executed within a pipeline are wrapped with MULTI and EXEC
1290
+ All commands executed within a pipeline(when running in transactional mode,
1291
+ which is the default behavior) are wrapped with MULTI and EXEC
1285
1292
  calls. This guarantees all commands executed in the pipeline will be
1286
1293
  executed atomically.
1287
1294
 
@@ -1296,15 +1303,22 @@ class Pipeline(Redis):
1296
1303
 
1297
1304
  UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
1298
1305
 
1299
- def __init__(self, connection_pool, response_callbacks, transaction, shard_hint):
1306
+ def __init__(
1307
+ self,
1308
+ connection_pool: ConnectionPool,
1309
+ response_callbacks,
1310
+ transaction,
1311
+ shard_hint,
1312
+ ):
1300
1313
  self.connection_pool = connection_pool
1301
- self.connection = None
1314
+ self.connection: Optional[Connection] = None
1302
1315
  self.response_callbacks = response_callbacks
1303
1316
  self.transaction = transaction
1304
1317
  self.shard_hint = shard_hint
1305
-
1306
1318
  self.watching = False
1307
- self.reset()
1319
+ self.command_stack = []
1320
+ self.scripts: Set[Script] = set()
1321
+ self.explicit_transaction = False
1308
1322
 
1309
1323
  def __enter__(self) -> "Pipeline":
1310
1324
  return self
@@ -1370,36 +1384,37 @@ class Pipeline(Redis):
1370
1384
  return self.immediate_execute_command(*args, **kwargs)
1371
1385
  return self.pipeline_execute_command(*args, **kwargs)
1372
1386
 
1373
- def _disconnect_reset_raise(self, conn, error) -> None:
1387
+ def _disconnect_reset_raise_on_watching(
1388
+ self,
1389
+ conn: AbstractConnection,
1390
+ error: Exception,
1391
+ ) -> None:
1374
1392
  """
1375
- Close the connection, reset watching state and
1376
- raise an exception if we were watching,
1377
- if retry_on_error is not set or the error is not one
1378
- of the specified error types.
1393
+ Close the connection reset watching state and
1394
+ raise an exception if we were watching.
1395
+
1396
+ The supported exceptions are already checked in the
1397
+ retry object so we don't need to do it here.
1398
+
1399
+ After we disconnect the connection, it will try to reconnect and
1400
+ do a health check as part of the send_command logic(on connection level).
1379
1401
  """
1380
1402
  conn.disconnect()
1403
+
1381
1404
  # if we were already watching a variable, the watch is no longer
1382
1405
  # valid since this connection has died. raise a WatchError, which
1383
1406
  # indicates the user should retry this transaction.
1384
1407
  if self.watching:
1385
1408
  self.reset()
1386
1409
  raise WatchError(
1387
- "A ConnectionError occurred on while watching one or more keys"
1410
+ f"A {type(error).__name__} occurred while watching one or more keys"
1388
1411
  )
1389
- # if retry_on_error is not set or the error is not one
1390
- # of the specified error types, raise it
1391
- if (
1392
- conn.retry_on_error is None
1393
- or isinstance(error, tuple(conn.retry_on_error)) is False
1394
- ):
1395
- self.reset()
1396
- raise
1397
1412
 
1398
1413
  def immediate_execute_command(self, *args, **options):
1399
1414
  """
1400
- Execute a command immediately, but don't auto-retry on a
1401
- ConnectionError if we're already WATCHing a variable. Used when
1402
- issuing WATCH or subsequent commands retrieving their values but before
1415
+ Execute a command immediately, but don't auto-retry on the supported
1416
+ errors for retry if we're already WATCHing a variable.
1417
+ Used when issuing WATCH or subsequent commands retrieving their values but before
1403
1418
  MULTI is called.
1404
1419
  """
1405
1420
  command_name = args[0]
@@ -1413,7 +1428,7 @@ class Pipeline(Redis):
1413
1428
  lambda: self._send_command_parse_response(
1414
1429
  conn, command_name, *args, **options
1415
1430
  ),
1416
- lambda error: self._disconnect_reset_raise(conn, error),
1431
+ lambda error: self._disconnect_reset_raise_on_watching(conn, error),
1417
1432
  )
1418
1433
 
1419
1434
  def pipeline_execute_command(self, *args, **options) -> "Pipeline":
@@ -1431,7 +1446,9 @@ class Pipeline(Redis):
1431
1446
  self.command_stack.append((args, options))
1432
1447
  return self
1433
1448
 
1434
- def _execute_transaction(self, connection, commands, raise_on_error) -> List:
1449
+ def _execute_transaction(
1450
+ self, connection: Connection, commands, raise_on_error
1451
+ ) -> List:
1435
1452
  cmds = chain([(("MULTI",), {})], commands, [(("EXEC",), {})])
1436
1453
  all_cmds = connection.pack_commands(
1437
1454
  [args for args, options in cmds if EMPTY_RESPONSE not in options]
@@ -1551,15 +1568,19 @@ class Pipeline(Redis):
1551
1568
  if not exist:
1552
1569
  s.sha = immediate("SCRIPT LOAD", s.script)
1553
1570
 
1554
- def _disconnect_raise_reset(
1571
+ def _disconnect_raise_on_watching(
1555
1572
  self,
1556
1573
  conn: AbstractConnection,
1557
1574
  error: Exception,
1558
1575
  ) -> None:
1559
1576
  """
1560
- Close the connection, raise an exception if we were watching,
1561
- and raise an exception if retry_on_error is not set or the
1562
- error is not one of the specified error types.
1577
+ Close the connection, raise an exception if we were watching.
1578
+
1579
+ The supported exceptions are already checked in the
1580
+ retry object so we don't need to do it here.
1581
+
1582
+ After we disconnect the connection, it will try to reconnect and
1583
+ do a health check as part of the send_command logic(on connection level).
1563
1584
  """
1564
1585
  conn.disconnect()
1565
1586
  # if we were watching a variable, the watch is no longer valid
@@ -1567,16 +1588,8 @@ class Pipeline(Redis):
1567
1588
  # indicates the user should retry this transaction.
1568
1589
  if self.watching:
1569
1590
  raise WatchError(
1570
- "A ConnectionError occurred on while watching one or more keys"
1591
+ f"A {type(error).__name__} occurred while watching one or more keys"
1571
1592
  )
1572
- # if retry_on_error is not set or the error is not one
1573
- # of the specified error types, raise it
1574
- if (
1575
- conn.retry_on_error is None
1576
- or isinstance(error, tuple(conn.retry_on_error)) is False
1577
- ):
1578
- self.reset()
1579
- raise error
1580
1593
 
1581
1594
  def execute(self, raise_on_error: bool = True) -> List[Any]:
1582
1595
  """Execute all the commands in the current pipeline"""
@@ -1600,7 +1613,7 @@ class Pipeline(Redis):
1600
1613
  try:
1601
1614
  return conn.retry.call_with_retry(
1602
1615
  lambda: execute(conn, stack, raise_on_error),
1603
- lambda error: self._disconnect_raise_reset(conn, error),
1616
+ lambda error: self._disconnect_raise_on_watching(conn, error),
1604
1617
  )
1605
1618
  finally:
1606
1619
  self.reset()