redis 6.0.0b1__py3-none-any.whl → 6.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/__init__.py +9 -1
- redis/_parsers/resp3.py +2 -2
- redis/asyncio/client.py +83 -71
- redis/asyncio/cluster.py +74 -50
- redis/asyncio/connection.py +43 -17
- redis/asyncio/retry.py +12 -0
- redis/asyncio/sentinel.py +2 -0
- redis/backoff.py +54 -0
- redis/client.py +99 -89
- redis/cluster.py +1085 -359
- redis/commands/core.py +105 -105
- redis/commands/helpers.py +19 -6
- redis/commands/json/__init__.py +1 -1
- redis/commands/json/commands.py +8 -8
- redis/commands/redismodules.py +27 -9
- redis/commands/search/commands.py +2 -2
- redis/commands/timeseries/__init__.py +1 -1
- redis/commands/vectorset/__init__.py +46 -0
- redis/commands/vectorset/commands.py +367 -0
- redis/commands/vectorset/utils.py +94 -0
- redis/connection.py +46 -13
- redis/exceptions.py +18 -0
- redis/retry.py +25 -0
- redis/sentinel.py +2 -0
- redis/utils.py +7 -0
- {redis-6.0.0b1.dist-info → redis-6.1.0.dist-info}/METADATA +14 -8
- {redis-6.0.0b1.dist-info → redis-6.1.0.dist-info}/RECORD +29 -26
- {redis-6.0.0b1.dist-info → redis-6.1.0.dist-info}/WHEEL +0 -0
- {redis-6.0.0b1.dist-info → redis-6.1.0.dist-info}/licenses/LICENSE +0 -0
redis/__init__.py
CHANGED
|
@@ -16,11 +16,14 @@ from redis.exceptions import (
|
|
|
16
16
|
BusyLoadingError,
|
|
17
17
|
ChildDeadlockedError,
|
|
18
18
|
ConnectionError,
|
|
19
|
+
CrossSlotTransactionError,
|
|
19
20
|
DataError,
|
|
21
|
+
InvalidPipelineStack,
|
|
20
22
|
InvalidResponse,
|
|
21
23
|
OutOfMemoryError,
|
|
22
24
|
PubSubError,
|
|
23
25
|
ReadOnlyError,
|
|
26
|
+
RedisClusterException,
|
|
24
27
|
RedisError,
|
|
25
28
|
ResponseError,
|
|
26
29
|
TimeoutError,
|
|
@@ -42,7 +45,9 @@ def int_or_str(value):
|
|
|
42
45
|
return value
|
|
43
46
|
|
|
44
47
|
|
|
45
|
-
|
|
48
|
+
# This is the version of redis-py that is being used
|
|
49
|
+
# for building and installing the lib.
|
|
50
|
+
__version__ = "6.1.0"
|
|
46
51
|
VERSION = tuple(map(int_or_str, __version__.split(".")))
|
|
47
52
|
|
|
48
53
|
|
|
@@ -56,15 +61,18 @@ __all__ = [
|
|
|
56
61
|
"ConnectionError",
|
|
57
62
|
"ConnectionPool",
|
|
58
63
|
"CredentialProvider",
|
|
64
|
+
"CrossSlotTransactionError",
|
|
59
65
|
"DataError",
|
|
60
66
|
"from_url",
|
|
61
67
|
"default_backoff",
|
|
68
|
+
"InvalidPipelineStack",
|
|
62
69
|
"InvalidResponse",
|
|
63
70
|
"OutOfMemoryError",
|
|
64
71
|
"PubSubError",
|
|
65
72
|
"ReadOnlyError",
|
|
66
73
|
"Redis",
|
|
67
74
|
"RedisCluster",
|
|
75
|
+
"RedisClusterException",
|
|
68
76
|
"RedisError",
|
|
69
77
|
"ResponseError",
|
|
70
78
|
"Sentinel",
|
redis/_parsers/resp3.py
CHANGED
|
@@ -19,7 +19,7 @@ class _RESP3Parser(_RESPBase):
|
|
|
19
19
|
|
|
20
20
|
def handle_pubsub_push_response(self, response):
|
|
21
21
|
logger = getLogger("push_response")
|
|
22
|
-
logger.
|
|
22
|
+
logger.debug("Push response: " + str(response))
|
|
23
23
|
return response
|
|
24
24
|
|
|
25
25
|
def read_response(self, disable_decoding=False, push_request=False):
|
|
@@ -150,7 +150,7 @@ class _AsyncRESP3Parser(_AsyncRESPBase):
|
|
|
150
150
|
|
|
151
151
|
async def handle_pubsub_push_response(self, response):
|
|
152
152
|
logger = getLogger("push_response")
|
|
153
|
-
logger.
|
|
153
|
+
logger.debug("Push response: " + str(response))
|
|
154
154
|
return response
|
|
155
155
|
|
|
156
156
|
async def read_response(
|
redis/asyncio/client.py
CHANGED
|
@@ -39,6 +39,7 @@ from redis.asyncio.connection import (
|
|
|
39
39
|
)
|
|
40
40
|
from redis.asyncio.lock import Lock
|
|
41
41
|
from redis.asyncio.retry import Retry
|
|
42
|
+
from redis.backoff import ExponentialWithJitterBackoff
|
|
42
43
|
from redis.client import (
|
|
43
44
|
EMPTY_RESPONSE,
|
|
44
45
|
NEVER_DECODE,
|
|
@@ -65,7 +66,6 @@ from redis.exceptions import (
|
|
|
65
66
|
PubSubError,
|
|
66
67
|
RedisError,
|
|
67
68
|
ResponseError,
|
|
68
|
-
TimeoutError,
|
|
69
69
|
WatchError,
|
|
70
70
|
)
|
|
71
71
|
from redis.typing import ChannelT, EncodableT, KeyT
|
|
@@ -73,16 +73,19 @@ from redis.utils import (
|
|
|
73
73
|
HIREDIS_AVAILABLE,
|
|
74
74
|
SSL_AVAILABLE,
|
|
75
75
|
_set_info_logger,
|
|
76
|
+
deprecated_args,
|
|
76
77
|
deprecated_function,
|
|
77
78
|
get_lib_version,
|
|
78
79
|
safe_str,
|
|
79
80
|
str_if_bytes,
|
|
81
|
+
truncate_text,
|
|
80
82
|
)
|
|
81
83
|
|
|
82
84
|
if TYPE_CHECKING and SSL_AVAILABLE:
|
|
83
|
-
from ssl import TLSVersion
|
|
85
|
+
from ssl import TLSVersion, VerifyMode
|
|
84
86
|
else:
|
|
85
87
|
TLSVersion = None
|
|
88
|
+
VerifyMode = None
|
|
86
89
|
|
|
87
90
|
PubSubHandler = Callable[[Dict[str, str]], Awaitable[None]]
|
|
88
91
|
_KeyT = TypeVar("_KeyT", bound=KeyT)
|
|
@@ -206,6 +209,11 @@ class Redis(
|
|
|
206
209
|
client.auto_close_connection_pool = True
|
|
207
210
|
return client
|
|
208
211
|
|
|
212
|
+
@deprecated_args(
|
|
213
|
+
args_to_warn=["retry_on_timeout"],
|
|
214
|
+
reason="TimeoutError is included by default.",
|
|
215
|
+
version="6.0.0",
|
|
216
|
+
)
|
|
209
217
|
def __init__(
|
|
210
218
|
self,
|
|
211
219
|
*,
|
|
@@ -223,14 +231,17 @@ class Redis(
|
|
|
223
231
|
encoding_errors: str = "strict",
|
|
224
232
|
decode_responses: bool = False,
|
|
225
233
|
retry_on_timeout: bool = False,
|
|
234
|
+
retry: Retry = Retry(
|
|
235
|
+
backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
|
|
236
|
+
),
|
|
226
237
|
retry_on_error: Optional[list] = None,
|
|
227
238
|
ssl: bool = False,
|
|
228
239
|
ssl_keyfile: Optional[str] = None,
|
|
229
240
|
ssl_certfile: Optional[str] = None,
|
|
230
|
-
ssl_cert_reqs: str = "required",
|
|
241
|
+
ssl_cert_reqs: Union[str, VerifyMode] = "required",
|
|
231
242
|
ssl_ca_certs: Optional[str] = None,
|
|
232
243
|
ssl_ca_data: Optional[str] = None,
|
|
233
|
-
ssl_check_hostname: bool =
|
|
244
|
+
ssl_check_hostname: bool = True,
|
|
234
245
|
ssl_min_version: Optional[TLSVersion] = None,
|
|
235
246
|
ssl_ciphers: Optional[str] = None,
|
|
236
247
|
max_connections: Optional[int] = None,
|
|
@@ -240,7 +251,6 @@ class Redis(
|
|
|
240
251
|
lib_name: Optional[str] = "redis-py",
|
|
241
252
|
lib_version: Optional[str] = get_lib_version(),
|
|
242
253
|
username: Optional[str] = None,
|
|
243
|
-
retry: Optional[Retry] = None,
|
|
244
254
|
auto_close_connection_pool: Optional[bool] = None,
|
|
245
255
|
redis_connect_func=None,
|
|
246
256
|
credential_provider: Optional[CredentialProvider] = None,
|
|
@@ -249,10 +259,24 @@ class Redis(
|
|
|
249
259
|
):
|
|
250
260
|
"""
|
|
251
261
|
Initialize a new Redis client.
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
262
|
+
|
|
263
|
+
To specify a retry policy for specific errors, you have two options:
|
|
264
|
+
|
|
265
|
+
1. Set the `retry_on_error` to a list of the error/s to retry on, and
|
|
266
|
+
you can also set `retry` to a valid `Retry` object(in case the default
|
|
267
|
+
one is not appropriate) - with this approach the retries will be triggered
|
|
268
|
+
on the default errors specified in the Retry object enriched with the
|
|
269
|
+
errors specified in `retry_on_error`.
|
|
270
|
+
|
|
271
|
+
2. Define a `Retry` object with configured 'supported_errors' and set
|
|
272
|
+
it to the `retry` parameter - with this approach you completely redefine
|
|
273
|
+
the errors on which retries will happen.
|
|
274
|
+
|
|
275
|
+
`retry_on_timeout` is deprecated - please include the TimeoutError
|
|
276
|
+
either in the Retry object or in the `retry_on_error` list.
|
|
277
|
+
|
|
278
|
+
When 'connection_pool' is provided - the retry configuration of the
|
|
279
|
+
provided pool will be used.
|
|
256
280
|
"""
|
|
257
281
|
kwargs: Dict[str, Any]
|
|
258
282
|
if event_dispatcher is None:
|
|
@@ -278,8 +302,6 @@ class Redis(
|
|
|
278
302
|
# Create internal connection pool, expected to be closed by Redis instance
|
|
279
303
|
if not retry_on_error:
|
|
280
304
|
retry_on_error = []
|
|
281
|
-
if retry_on_timeout is True:
|
|
282
|
-
retry_on_error.append(TimeoutError)
|
|
283
305
|
kwargs = {
|
|
284
306
|
"db": db,
|
|
285
307
|
"username": username,
|
|
@@ -289,7 +311,6 @@ class Redis(
|
|
|
289
311
|
"encoding": encoding,
|
|
290
312
|
"encoding_errors": encoding_errors,
|
|
291
313
|
"decode_responses": decode_responses,
|
|
292
|
-
"retry_on_timeout": retry_on_timeout,
|
|
293
314
|
"retry_on_error": retry_on_error,
|
|
294
315
|
"retry": copy.deepcopy(retry),
|
|
295
316
|
"max_connections": max_connections,
|
|
@@ -401,10 +422,10 @@ class Redis(
|
|
|
401
422
|
"""Get the connection's key-word arguments"""
|
|
402
423
|
return self.connection_pool.connection_kwargs
|
|
403
424
|
|
|
404
|
-
def get_retry(self) -> Optional[
|
|
425
|
+
def get_retry(self) -> Optional[Retry]:
|
|
405
426
|
return self.get_connection_kwargs().get("retry")
|
|
406
427
|
|
|
407
|
-
def set_retry(self, retry:
|
|
428
|
+
def set_retry(self, retry: Retry) -> None:
|
|
408
429
|
self.get_connection_kwargs().update({"retry": retry})
|
|
409
430
|
self.connection_pool.set_retry(retry)
|
|
410
431
|
|
|
@@ -631,18 +652,17 @@ class Redis(
|
|
|
631
652
|
await conn.send_command(*args)
|
|
632
653
|
return await self.parse_response(conn, command_name, **options)
|
|
633
654
|
|
|
634
|
-
async def
|
|
655
|
+
async def _close_connection(self, conn: Connection):
|
|
635
656
|
"""
|
|
636
|
-
Close the connection
|
|
637
|
-
|
|
638
|
-
|
|
657
|
+
Close the connection before retrying.
|
|
658
|
+
|
|
659
|
+
The supported exceptions are already checked in the
|
|
660
|
+
retry object so we don't need to do it here.
|
|
661
|
+
|
|
662
|
+
After we disconnect the connection, it will try to reconnect and
|
|
663
|
+
do a health check as part of the send_command logic(on connection level).
|
|
639
664
|
"""
|
|
640
665
|
await conn.disconnect()
|
|
641
|
-
if (
|
|
642
|
-
conn.retry_on_error is None
|
|
643
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
644
|
-
):
|
|
645
|
-
raise error
|
|
646
666
|
|
|
647
667
|
# COMMAND EXECUTION AND PROTOCOL PARSING
|
|
648
668
|
async def execute_command(self, *args, **options):
|
|
@@ -659,7 +679,7 @@ class Redis(
|
|
|
659
679
|
lambda: self._send_command_parse_response(
|
|
660
680
|
conn, command_name, *args, **options
|
|
661
681
|
),
|
|
662
|
-
lambda
|
|
682
|
+
lambda _: self._close_connection(conn),
|
|
663
683
|
)
|
|
664
684
|
finally:
|
|
665
685
|
if self.single_connection_client:
|
|
@@ -927,19 +947,11 @@ class PubSub:
|
|
|
927
947
|
)
|
|
928
948
|
)
|
|
929
949
|
|
|
930
|
-
async def
|
|
950
|
+
async def _reconnect(self, conn):
|
|
931
951
|
"""
|
|
932
|
-
|
|
933
|
-
if retry_on_error is not set or the error is not one
|
|
934
|
-
of the specified error types. Otherwise, try to
|
|
935
|
-
reconnect
|
|
952
|
+
Try to reconnect
|
|
936
953
|
"""
|
|
937
954
|
await conn.disconnect()
|
|
938
|
-
if (
|
|
939
|
-
conn.retry_on_error is None
|
|
940
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
941
|
-
):
|
|
942
|
-
raise error
|
|
943
955
|
await conn.connect()
|
|
944
956
|
|
|
945
957
|
async def _execute(self, conn, command, *args, **kwargs):
|
|
@@ -952,7 +964,7 @@ class PubSub:
|
|
|
952
964
|
"""
|
|
953
965
|
return await conn.retry.call_with_retry(
|
|
954
966
|
lambda: command(*args, **kwargs),
|
|
955
|
-
lambda
|
|
967
|
+
lambda _: self._reconnect(conn),
|
|
956
968
|
)
|
|
957
969
|
|
|
958
970
|
async def parse_response(self, block: bool = True, timeout: float = 0):
|
|
@@ -1243,7 +1255,8 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
|
|
|
1243
1255
|
in one transmission. This is convenient for batch processing, such as
|
|
1244
1256
|
saving all the values in a list to Redis.
|
|
1245
1257
|
|
|
1246
|
-
All commands executed within a pipeline
|
|
1258
|
+
All commands executed within a pipeline(when running in transactional mode,
|
|
1259
|
+
which is the default behavior) are wrapped with MULTI and EXEC
|
|
1247
1260
|
calls. This guarantees all commands executed in the pipeline will be
|
|
1248
1261
|
executed atomically.
|
|
1249
1262
|
|
|
@@ -1272,7 +1285,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
|
|
|
1272
1285
|
self.shard_hint = shard_hint
|
|
1273
1286
|
self.watching = False
|
|
1274
1287
|
self.command_stack: CommandStackT = []
|
|
1275
|
-
self.scripts: Set[
|
|
1288
|
+
self.scripts: Set[Script] = set()
|
|
1276
1289
|
self.explicit_transaction = False
|
|
1277
1290
|
|
|
1278
1291
|
async def __aenter__(self: _RedisT) -> _RedisT:
|
|
@@ -1344,36 +1357,36 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
|
|
|
1344
1357
|
return self.immediate_execute_command(*args, **kwargs)
|
|
1345
1358
|
return self.pipeline_execute_command(*args, **kwargs)
|
|
1346
1359
|
|
|
1347
|
-
async def
|
|
1360
|
+
async def _disconnect_reset_raise_on_watching(
|
|
1361
|
+
self,
|
|
1362
|
+
conn: Connection,
|
|
1363
|
+
error: Exception,
|
|
1364
|
+
):
|
|
1348
1365
|
"""
|
|
1349
|
-
Close the connection
|
|
1350
|
-
raise an exception if we were watching
|
|
1351
|
-
|
|
1352
|
-
|
|
1366
|
+
Close the connection reset watching state and
|
|
1367
|
+
raise an exception if we were watching.
|
|
1368
|
+
|
|
1369
|
+
The supported exceptions are already checked in the
|
|
1370
|
+
retry object so we don't need to do it here.
|
|
1371
|
+
|
|
1372
|
+
After we disconnect the connection, it will try to reconnect and
|
|
1373
|
+
do a health check as part of the send_command logic(on connection level).
|
|
1353
1374
|
"""
|
|
1354
1375
|
await conn.disconnect()
|
|
1355
1376
|
# if we were already watching a variable, the watch is no longer
|
|
1356
1377
|
# valid since this connection has died. raise a WatchError, which
|
|
1357
1378
|
# indicates the user should retry this transaction.
|
|
1358
1379
|
if self.watching:
|
|
1359
|
-
await self.
|
|
1380
|
+
await self.reset()
|
|
1360
1381
|
raise WatchError(
|
|
1361
|
-
"A
|
|
1382
|
+
f"A {type(error).__name__} occurred while watching one or more keys"
|
|
1362
1383
|
)
|
|
1363
|
-
# if retry_on_error is not set or the error is not one
|
|
1364
|
-
# of the specified error types, raise it
|
|
1365
|
-
if (
|
|
1366
|
-
conn.retry_on_error is None
|
|
1367
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
1368
|
-
):
|
|
1369
|
-
await self.aclose()
|
|
1370
|
-
raise
|
|
1371
1384
|
|
|
1372
1385
|
async def immediate_execute_command(self, *args, **options):
|
|
1373
1386
|
"""
|
|
1374
|
-
Execute a command immediately, but don't auto-retry on
|
|
1375
|
-
|
|
1376
|
-
issuing WATCH or subsequent commands retrieving their values but before
|
|
1387
|
+
Execute a command immediately, but don't auto-retry on the supported
|
|
1388
|
+
errors for retry if we're already WATCHing a variable.
|
|
1389
|
+
Used when issuing WATCH or subsequent commands retrieving their values but before
|
|
1377
1390
|
MULTI is called.
|
|
1378
1391
|
"""
|
|
1379
1392
|
command_name = args[0]
|
|
@@ -1387,7 +1400,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
|
|
|
1387
1400
|
lambda: self._send_command_parse_response(
|
|
1388
1401
|
conn, command_name, *args, **options
|
|
1389
1402
|
),
|
|
1390
|
-
lambda error: self.
|
|
1403
|
+
lambda error: self._disconnect_reset_raise_on_watching(conn, error),
|
|
1391
1404
|
)
|
|
1392
1405
|
|
|
1393
1406
|
def pipeline_execute_command(self, *args, **options):
|
|
@@ -1513,7 +1526,10 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
|
|
|
1513
1526
|
self, exception: Exception, number: int, command: Iterable[object]
|
|
1514
1527
|
) -> None:
|
|
1515
1528
|
cmd = " ".join(map(safe_str, command))
|
|
1516
|
-
msg =
|
|
1529
|
+
msg = (
|
|
1530
|
+
f"Command # {number} ({truncate_text(cmd)}) "
|
|
1531
|
+
"of pipeline caused error: {exception.args}"
|
|
1532
|
+
)
|
|
1517
1533
|
exception.args = (msg,) + exception.args[1:]
|
|
1518
1534
|
|
|
1519
1535
|
async def parse_response(
|
|
@@ -1539,11 +1555,15 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
|
|
|
1539
1555
|
if not exist:
|
|
1540
1556
|
s.sha = await immediate("SCRIPT LOAD", s.script)
|
|
1541
1557
|
|
|
1542
|
-
async def
|
|
1558
|
+
async def _disconnect_raise_on_watching(self, conn: Connection, error: Exception):
|
|
1543
1559
|
"""
|
|
1544
|
-
Close the connection, raise an exception if we were watching
|
|
1545
|
-
|
|
1546
|
-
|
|
1560
|
+
Close the connection, raise an exception if we were watching.
|
|
1561
|
+
|
|
1562
|
+
The supported exceptions are already checked in the
|
|
1563
|
+
retry object so we don't need to do it here.
|
|
1564
|
+
|
|
1565
|
+
After we disconnect the connection, it will try to reconnect and
|
|
1566
|
+
do a health check as part of the send_command logic(on connection level).
|
|
1547
1567
|
"""
|
|
1548
1568
|
await conn.disconnect()
|
|
1549
1569
|
# if we were watching a variable, the watch is no longer valid
|
|
@@ -1551,16 +1571,8 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
|
|
|
1551
1571
|
# indicates the user should retry this transaction.
|
|
1552
1572
|
if self.watching:
|
|
1553
1573
|
raise WatchError(
|
|
1554
|
-
"A
|
|
1574
|
+
f"A {type(error).__name__} occurred while watching one or more keys"
|
|
1555
1575
|
)
|
|
1556
|
-
# if retry_on_error is not set or the error is not one
|
|
1557
|
-
# of the specified error types, raise it
|
|
1558
|
-
if (
|
|
1559
|
-
conn.retry_on_error is None
|
|
1560
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
1561
|
-
):
|
|
1562
|
-
await self.reset()
|
|
1563
|
-
raise
|
|
1564
1576
|
|
|
1565
1577
|
async def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
1566
1578
|
"""Execute all the commands in the current pipeline"""
|
|
@@ -1585,7 +1597,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
|
|
|
1585
1597
|
try:
|
|
1586
1598
|
return await conn.retry.call_with_retry(
|
|
1587
1599
|
lambda: execute(conn, stack, raise_on_error),
|
|
1588
|
-
lambda error: self.
|
|
1600
|
+
lambda error: self._disconnect_raise_on_watching(conn, error),
|
|
1589
1601
|
)
|
|
1590
1602
|
finally:
|
|
1591
1603
|
await self.reset()
|
redis/asyncio/cluster.py
CHANGED
|
@@ -29,7 +29,7 @@ from redis.asyncio.connection import Connection, SSLConnection, parse_url
|
|
|
29
29
|
from redis.asyncio.lock import Lock
|
|
30
30
|
from redis.asyncio.retry import Retry
|
|
31
31
|
from redis.auth.token import TokenInterface
|
|
32
|
-
from redis.backoff import
|
|
32
|
+
from redis.backoff import ExponentialWithJitterBackoff, NoBackoff
|
|
33
33
|
from redis.client import EMPTY_RESPONSE, NEVER_DECODE, AbstractRedis
|
|
34
34
|
from redis.cluster import (
|
|
35
35
|
PIPELINE_BLOCKED_COMMANDS,
|
|
@@ -71,12 +71,14 @@ from redis.utils import (
|
|
|
71
71
|
get_lib_version,
|
|
72
72
|
safe_str,
|
|
73
73
|
str_if_bytes,
|
|
74
|
+
truncate_text,
|
|
74
75
|
)
|
|
75
76
|
|
|
76
77
|
if SSL_AVAILABLE:
|
|
77
|
-
from ssl import TLSVersion
|
|
78
|
+
from ssl import TLSVersion, VerifyMode
|
|
78
79
|
else:
|
|
79
80
|
TLSVersion = None
|
|
81
|
+
VerifyMode = None
|
|
80
82
|
|
|
81
83
|
TargetNodesT = TypeVar(
|
|
82
84
|
"TargetNodesT", str, "ClusterNode", List["ClusterNode"], Dict[Any, "ClusterNode"]
|
|
@@ -141,19 +143,23 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
141
143
|
To avoid reinitializing the cluster on moved errors, set reinitialize_steps to
|
|
142
144
|
0.
|
|
143
145
|
:param cluster_error_retry_attempts:
|
|
144
|
-
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
146
|
+
| @deprecated - Please configure the 'retry' object instead
|
|
147
|
+
In case 'retry' object is set - this argument is ignored!
|
|
148
|
+
|
|
149
|
+
Number of times to retry before raising an error when :class:`~.TimeoutError`,
|
|
150
|
+
:class:`~.ConnectionError`, :class:`~.SlotNotCoveredError`
|
|
151
|
+
or :class:`~.ClusterDownError` are encountered
|
|
152
|
+
:param retry:
|
|
153
|
+
| A retry object that defines the retry strategy and the number of
|
|
154
|
+
retries for the cluster client.
|
|
155
|
+
In current implementation for the cluster client (starting form redis-py version 6.0.0)
|
|
156
|
+
the retry object is not yet fully utilized, instead it is used just to determine
|
|
157
|
+
the number of retries for the cluster client.
|
|
158
|
+
In the future releases the retry object will be used to handle the cluster client retries!
|
|
152
159
|
:param max_connections:
|
|
153
160
|
| Maximum number of connections per node. If there are no free connections & the
|
|
154
161
|
maximum number of connections are already created, a
|
|
155
|
-
:class:`~.MaxConnectionsError` is raised.
|
|
156
|
-
by :attr:`connection_error_retry_attempts`
|
|
162
|
+
:class:`~.MaxConnectionsError` is raised.
|
|
157
163
|
:param address_remap:
|
|
158
164
|
| An optional callable which, when provided with an internal network
|
|
159
165
|
address of a node, e.g. a `(host, port)` tuple, will return the address
|
|
@@ -209,10 +215,9 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
209
215
|
__slots__ = (
|
|
210
216
|
"_initialize",
|
|
211
217
|
"_lock",
|
|
212
|
-
"
|
|
218
|
+
"retry",
|
|
213
219
|
"command_flags",
|
|
214
220
|
"commands_parser",
|
|
215
|
-
"connection_error_retry_attempts",
|
|
216
221
|
"connection_kwargs",
|
|
217
222
|
"encoder",
|
|
218
223
|
"node_flags",
|
|
@@ -227,7 +232,14 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
227
232
|
@deprecated_args(
|
|
228
233
|
args_to_warn=["read_from_replicas"],
|
|
229
234
|
reason="Please configure the 'load_balancing_strategy' instead",
|
|
230
|
-
version="5.0
|
|
235
|
+
version="5.3.0",
|
|
236
|
+
)
|
|
237
|
+
@deprecated_args(
|
|
238
|
+
args_to_warn=[
|
|
239
|
+
"cluster_error_retry_attempts",
|
|
240
|
+
],
|
|
241
|
+
reason="Please configure the 'retry' object instead",
|
|
242
|
+
version="6.0.0",
|
|
231
243
|
)
|
|
232
244
|
def __init__(
|
|
233
245
|
self,
|
|
@@ -240,8 +252,9 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
240
252
|
load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
|
|
241
253
|
reinitialize_steps: int = 5,
|
|
242
254
|
cluster_error_retry_attempts: int = 3,
|
|
243
|
-
connection_error_retry_attempts: int = 3,
|
|
244
255
|
max_connections: int = 2**31,
|
|
256
|
+
retry: Optional["Retry"] = None,
|
|
257
|
+
retry_on_error: Optional[List[Type[Exception]]] = None,
|
|
245
258
|
# Client related kwargs
|
|
246
259
|
db: Union[str, int] = 0,
|
|
247
260
|
path: Optional[str] = None,
|
|
@@ -261,15 +274,13 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
261
274
|
socket_keepalive: bool = False,
|
|
262
275
|
socket_keepalive_options: Optional[Mapping[int, Union[int, bytes]]] = None,
|
|
263
276
|
socket_timeout: Optional[float] = None,
|
|
264
|
-
retry: Optional["Retry"] = None,
|
|
265
|
-
retry_on_error: Optional[List[Type[Exception]]] = None,
|
|
266
277
|
# SSL related kwargs
|
|
267
278
|
ssl: bool = False,
|
|
268
279
|
ssl_ca_certs: Optional[str] = None,
|
|
269
280
|
ssl_ca_data: Optional[str] = None,
|
|
270
|
-
ssl_cert_reqs: str = "required",
|
|
281
|
+
ssl_cert_reqs: Union[str, VerifyMode] = "required",
|
|
271
282
|
ssl_certfile: Optional[str] = None,
|
|
272
|
-
ssl_check_hostname: bool =
|
|
283
|
+
ssl_check_hostname: bool = True,
|
|
273
284
|
ssl_keyfile: Optional[str] = None,
|
|
274
285
|
ssl_min_version: Optional[TLSVersion] = None,
|
|
275
286
|
ssl_ciphers: Optional[str] = None,
|
|
@@ -316,7 +327,6 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
316
327
|
"socket_keepalive": socket_keepalive,
|
|
317
328
|
"socket_keepalive_options": socket_keepalive_options,
|
|
318
329
|
"socket_timeout": socket_timeout,
|
|
319
|
-
"retry": retry,
|
|
320
330
|
"protocol": protocol,
|
|
321
331
|
}
|
|
322
332
|
|
|
@@ -340,17 +350,15 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
340
350
|
# Call our on_connect function to configure READONLY mode
|
|
341
351
|
kwargs["redis_connect_func"] = self.on_connect
|
|
342
352
|
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
self.retry =
|
|
347
|
-
|
|
353
|
+
if retry:
|
|
354
|
+
self.retry = retry
|
|
355
|
+
else:
|
|
356
|
+
self.retry = Retry(
|
|
357
|
+
backoff=ExponentialWithJitterBackoff(base=1, cap=10),
|
|
358
|
+
retries=cluster_error_retry_attempts,
|
|
348
359
|
)
|
|
349
|
-
|
|
350
|
-
# Default errors for retrying
|
|
351
|
-
retry_on_error = [ConnectionError, TimeoutError]
|
|
360
|
+
if retry_on_error:
|
|
352
361
|
self.retry.update_supported_errors(retry_on_error)
|
|
353
|
-
kwargs.update({"retry": self.retry})
|
|
354
362
|
|
|
355
363
|
kwargs["response_callbacks"] = _RedisCallbacks.copy()
|
|
356
364
|
if kwargs.get("protocol") in ["3", 3]:
|
|
@@ -387,8 +395,6 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
387
395
|
self.read_from_replicas = read_from_replicas
|
|
388
396
|
self.load_balancing_strategy = load_balancing_strategy
|
|
389
397
|
self.reinitialize_steps = reinitialize_steps
|
|
390
|
-
self.cluster_error_retry_attempts = cluster_error_retry_attempts
|
|
391
|
-
self.connection_error_retry_attempts = connection_error_retry_attempts
|
|
392
398
|
self.reinitialize_counter = 0
|
|
393
399
|
self.commands_parser = AsyncCommandsParser()
|
|
394
400
|
self.node_flags = self.__class__.NODE_FLAGS.copy()
|
|
@@ -559,15 +565,8 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
559
565
|
"""Get the kwargs passed to :class:`~redis.asyncio.connection.Connection`."""
|
|
560
566
|
return self.connection_kwargs
|
|
561
567
|
|
|
562
|
-
def
|
|
563
|
-
return self.retry
|
|
564
|
-
|
|
565
|
-
def set_retry(self, retry: "Retry") -> None:
|
|
568
|
+
def set_retry(self, retry: Retry) -> None:
|
|
566
569
|
self.retry = retry
|
|
567
|
-
for node in self.get_nodes():
|
|
568
|
-
node.connection_kwargs.update({"retry": retry})
|
|
569
|
-
for conn in node._connections:
|
|
570
|
-
conn.retry = retry
|
|
571
570
|
|
|
572
571
|
def set_response_callback(self, command: str, callback: ResponseCallbackT) -> None:
|
|
573
572
|
"""Set a custom response callback."""
|
|
@@ -686,8 +685,8 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
686
685
|
"""
|
|
687
686
|
Execute a raw command on the appropriate cluster node or target_nodes.
|
|
688
687
|
|
|
689
|
-
It will retry the command as specified by
|
|
690
|
-
then raise an exception.
|
|
688
|
+
It will retry the command as specified by the retries property of
|
|
689
|
+
the :attr:`retry` & then raise an exception.
|
|
691
690
|
|
|
692
691
|
:param args:
|
|
693
692
|
| Raw command args
|
|
@@ -703,7 +702,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
703
702
|
command = args[0]
|
|
704
703
|
target_nodes = []
|
|
705
704
|
target_nodes_specified = False
|
|
706
|
-
retry_attempts = self.
|
|
705
|
+
retry_attempts = self.retry.get_retries()
|
|
707
706
|
|
|
708
707
|
passed_targets = kwargs.pop("target_nodes", None)
|
|
709
708
|
if passed_targets and not self._is_node_flag(passed_targets):
|
|
@@ -806,10 +805,16 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
|
|
|
806
805
|
# and try again with the new setup
|
|
807
806
|
await self.aclose()
|
|
808
807
|
raise
|
|
809
|
-
except ClusterDownError:
|
|
808
|
+
except (ClusterDownError, SlotNotCoveredError):
|
|
810
809
|
# ClusterDownError can occur during a failover and to get
|
|
811
810
|
# self-healed, we will try to reinitialize the cluster layout
|
|
812
811
|
# and retry executing the command
|
|
812
|
+
|
|
813
|
+
# SlotNotCoveredError can occur when the cluster is not fully
|
|
814
|
+
# initialized or can be temporary issue.
|
|
815
|
+
# We will try to reinitialize the cluster topology
|
|
816
|
+
# and retry executing the command
|
|
817
|
+
|
|
813
818
|
await self.aclose()
|
|
814
819
|
await asyncio.sleep(0.25)
|
|
815
820
|
raise
|
|
@@ -1040,7 +1045,23 @@ class ClusterNode:
|
|
|
1040
1045
|
return self._free.popleft()
|
|
1041
1046
|
except IndexError:
|
|
1042
1047
|
if len(self._connections) < self.max_connections:
|
|
1043
|
-
connection
|
|
1048
|
+
# We are configuring the connection pool not to retry
|
|
1049
|
+
# connections on lower level clients to avoid retrying
|
|
1050
|
+
# connections to nodes that are not reachable
|
|
1051
|
+
# and to avoid blocking the connection pool.
|
|
1052
|
+
# The only error that will have some handling in the lower
|
|
1053
|
+
# level clients is ConnectionError which will trigger disconnection
|
|
1054
|
+
# of the socket.
|
|
1055
|
+
# The retries will be handled on cluster client level
|
|
1056
|
+
# where we will have proper handling of the cluster topology
|
|
1057
|
+
retry = Retry(
|
|
1058
|
+
backoff=NoBackoff(),
|
|
1059
|
+
retries=0,
|
|
1060
|
+
supported_errors=(ConnectionError,),
|
|
1061
|
+
)
|
|
1062
|
+
connection_kwargs = self.connection_kwargs.copy()
|
|
1063
|
+
connection_kwargs["retry"] = retry
|
|
1064
|
+
connection = self.connection_class(**connection_kwargs)
|
|
1044
1065
|
self._connections.append(connection)
|
|
1045
1066
|
return connection
|
|
1046
1067
|
|
|
@@ -1292,7 +1313,9 @@ class NodesManager:
|
|
|
1292
1313
|
startup_nodes_reachable = False
|
|
1293
1314
|
fully_covered = False
|
|
1294
1315
|
exception = None
|
|
1295
|
-
|
|
1316
|
+
# Convert to tuple to prevent RuntimeError if self.startup_nodes
|
|
1317
|
+
# is modified during iteration
|
|
1318
|
+
for startup_node in tuple(self.startup_nodes.values()):
|
|
1296
1319
|
try:
|
|
1297
1320
|
# Make sure cluster mode is enabled on this node
|
|
1298
1321
|
try:
|
|
@@ -1536,7 +1559,7 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
|
|
|
1536
1559
|
"""
|
|
1537
1560
|
Execute the pipeline.
|
|
1538
1561
|
|
|
1539
|
-
It will retry the commands as specified by :attr:`
|
|
1562
|
+
It will retry the commands as specified by retries specified in :attr:`retry`
|
|
1540
1563
|
& then raise an exception.
|
|
1541
1564
|
|
|
1542
1565
|
:param raise_on_error:
|
|
@@ -1552,7 +1575,7 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
|
|
|
1552
1575
|
return []
|
|
1553
1576
|
|
|
1554
1577
|
try:
|
|
1555
|
-
retry_attempts = self._client.
|
|
1578
|
+
retry_attempts = self._client.retry.get_retries()
|
|
1556
1579
|
while True:
|
|
1557
1580
|
try:
|
|
1558
1581
|
if self._client._initialize:
|
|
@@ -1633,8 +1656,9 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
|
|
|
1633
1656
|
if isinstance(result, Exception):
|
|
1634
1657
|
command = " ".join(map(safe_str, cmd.args))
|
|
1635
1658
|
msg = (
|
|
1636
|
-
f"Command # {cmd.position + 1}
|
|
1637
|
-
f"
|
|
1659
|
+
f"Command # {cmd.position + 1} "
|
|
1660
|
+
f"({truncate_text(command)}) "
|
|
1661
|
+
f"of pipeline caused error: {result.args}"
|
|
1638
1662
|
)
|
|
1639
1663
|
result.args = (msg,) + result.args[1:]
|
|
1640
1664
|
raise result
|