redis 6.0.0b1__py3-none-any.whl → 6.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/__init__.py +9 -1
- redis/_parsers/resp3.py +2 -2
- redis/asyncio/client.py +83 -71
- redis/asyncio/cluster.py +74 -50
- redis/asyncio/connection.py +43 -17
- redis/asyncio/retry.py +12 -0
- redis/asyncio/sentinel.py +2 -0
- redis/backoff.py +54 -0
- redis/client.py +99 -89
- redis/cluster.py +1085 -359
- redis/commands/core.py +105 -105
- redis/commands/helpers.py +19 -6
- redis/commands/json/__init__.py +1 -1
- redis/commands/json/commands.py +8 -8
- redis/commands/redismodules.py +27 -9
- redis/commands/search/commands.py +2 -2
- redis/commands/timeseries/__init__.py +1 -1
- redis/commands/vectorset/__init__.py +46 -0
- redis/commands/vectorset/commands.py +367 -0
- redis/commands/vectorset/utils.py +94 -0
- redis/connection.py +46 -13
- redis/exceptions.py +18 -0
- redis/retry.py +25 -0
- redis/sentinel.py +2 -0
- redis/utils.py +7 -0
- {redis-6.0.0b1.dist-info → redis-6.1.0.dist-info}/METADATA +14 -8
- {redis-6.0.0b1.dist-info → redis-6.1.0.dist-info}/RECORD +29 -26
- {redis-6.0.0b1.dist-info → redis-6.1.0.dist-info}/WHEEL +0 -0
- {redis-6.0.0b1.dist-info → redis-6.1.0.dist-info}/licenses/LICENSE +0 -0
redis/asyncio/connection.py
CHANGED
|
@@ -293,6 +293,9 @@ class AbstractConnection:
|
|
|
293
293
|
|
|
294
294
|
async def connect(self):
|
|
295
295
|
"""Connects to the Redis server if not already connected"""
|
|
296
|
+
await self.connect_check_health(check_health=True)
|
|
297
|
+
|
|
298
|
+
async def connect_check_health(self, check_health: bool = True):
|
|
296
299
|
if self.is_connected:
|
|
297
300
|
return
|
|
298
301
|
try:
|
|
@@ -311,7 +314,7 @@ class AbstractConnection:
|
|
|
311
314
|
try:
|
|
312
315
|
if not self.redis_connect_func:
|
|
313
316
|
# Use the default on_connect function
|
|
314
|
-
await self.
|
|
317
|
+
await self.on_connect_check_health(check_health=check_health)
|
|
315
318
|
else:
|
|
316
319
|
# Use the passed function redis_connect_func
|
|
317
320
|
(
|
|
@@ -350,6 +353,9 @@ class AbstractConnection:
|
|
|
350
353
|
|
|
351
354
|
async def on_connect(self) -> None:
|
|
352
355
|
"""Initialize the connection, authenticate and select a database"""
|
|
356
|
+
await self.on_connect_check_health(check_health=True)
|
|
357
|
+
|
|
358
|
+
async def on_connect_check_health(self, check_health: bool = True) -> None:
|
|
353
359
|
self._parser.on_connect(self)
|
|
354
360
|
parser = self._parser
|
|
355
361
|
|
|
@@ -407,7 +413,7 @@ class AbstractConnection:
|
|
|
407
413
|
# update cluster exception classes
|
|
408
414
|
self._parser.EXCEPTION_CLASSES = parser.EXCEPTION_CLASSES
|
|
409
415
|
self._parser.on_connect(self)
|
|
410
|
-
await self.send_command("HELLO", self.protocol)
|
|
416
|
+
await self.send_command("HELLO", self.protocol, check_health=check_health)
|
|
411
417
|
response = await self.read_response()
|
|
412
418
|
# if response.get(b"proto") != self.protocol and response.get(
|
|
413
419
|
# "proto"
|
|
@@ -416,18 +422,35 @@ class AbstractConnection:
|
|
|
416
422
|
|
|
417
423
|
# if a client_name is given, set it
|
|
418
424
|
if self.client_name:
|
|
419
|
-
await self.send_command(
|
|
425
|
+
await self.send_command(
|
|
426
|
+
"CLIENT",
|
|
427
|
+
"SETNAME",
|
|
428
|
+
self.client_name,
|
|
429
|
+
check_health=check_health,
|
|
430
|
+
)
|
|
420
431
|
if str_if_bytes(await self.read_response()) != "OK":
|
|
421
432
|
raise ConnectionError("Error setting client name")
|
|
422
433
|
|
|
423
434
|
# set the library name and version, pipeline for lower startup latency
|
|
424
435
|
if self.lib_name:
|
|
425
|
-
await self.send_command(
|
|
436
|
+
await self.send_command(
|
|
437
|
+
"CLIENT",
|
|
438
|
+
"SETINFO",
|
|
439
|
+
"LIB-NAME",
|
|
440
|
+
self.lib_name,
|
|
441
|
+
check_health=check_health,
|
|
442
|
+
)
|
|
426
443
|
if self.lib_version:
|
|
427
|
-
await self.send_command(
|
|
444
|
+
await self.send_command(
|
|
445
|
+
"CLIENT",
|
|
446
|
+
"SETINFO",
|
|
447
|
+
"LIB-VER",
|
|
448
|
+
self.lib_version,
|
|
449
|
+
check_health=check_health,
|
|
450
|
+
)
|
|
428
451
|
# if a database is specified, switch to it. Also pipeline this
|
|
429
452
|
if self.db:
|
|
430
|
-
await self.send_command("SELECT", self.db)
|
|
453
|
+
await self.send_command("SELECT", self.db, check_health=check_health)
|
|
431
454
|
|
|
432
455
|
# read responses from pipeline
|
|
433
456
|
for _ in (sent for sent in (self.lib_name, self.lib_version) if sent):
|
|
@@ -489,8 +512,8 @@ class AbstractConnection:
|
|
|
489
512
|
self, command: Union[bytes, str, Iterable[bytes]], check_health: bool = True
|
|
490
513
|
) -> None:
|
|
491
514
|
if not self.is_connected:
|
|
492
|
-
await self.
|
|
493
|
-
|
|
515
|
+
await self.connect_check_health(check_health=False)
|
|
516
|
+
if check_health:
|
|
494
517
|
await self.check_health()
|
|
495
518
|
|
|
496
519
|
try:
|
|
@@ -768,10 +791,10 @@ class SSLConnection(Connection):
|
|
|
768
791
|
self,
|
|
769
792
|
ssl_keyfile: Optional[str] = None,
|
|
770
793
|
ssl_certfile: Optional[str] = None,
|
|
771
|
-
ssl_cert_reqs: str = "required",
|
|
794
|
+
ssl_cert_reqs: Union[str, ssl.VerifyMode] = "required",
|
|
772
795
|
ssl_ca_certs: Optional[str] = None,
|
|
773
796
|
ssl_ca_data: Optional[str] = None,
|
|
774
|
-
ssl_check_hostname: bool =
|
|
797
|
+
ssl_check_hostname: bool = True,
|
|
775
798
|
ssl_min_version: Optional[TLSVersion] = None,
|
|
776
799
|
ssl_ciphers: Optional[str] = None,
|
|
777
800
|
**kwargs,
|
|
@@ -842,10 +865,10 @@ class RedisSSLContext:
|
|
|
842
865
|
self,
|
|
843
866
|
keyfile: Optional[str] = None,
|
|
844
867
|
certfile: Optional[str] = None,
|
|
845
|
-
cert_reqs: Optional[str] = None,
|
|
868
|
+
cert_reqs: Optional[Union[str, ssl.VerifyMode]] = None,
|
|
846
869
|
ca_certs: Optional[str] = None,
|
|
847
870
|
ca_data: Optional[str] = None,
|
|
848
|
-
check_hostname: bool =
|
|
871
|
+
check_hostname: bool = True,
|
|
849
872
|
min_version: Optional[TLSVersion] = None,
|
|
850
873
|
ciphers: Optional[str] = None,
|
|
851
874
|
):
|
|
@@ -855,7 +878,7 @@ class RedisSSLContext:
|
|
|
855
878
|
self.keyfile = keyfile
|
|
856
879
|
self.certfile = certfile
|
|
857
880
|
if cert_reqs is None:
|
|
858
|
-
|
|
881
|
+
cert_reqs = ssl.CERT_NONE
|
|
859
882
|
elif isinstance(cert_reqs, str):
|
|
860
883
|
CERT_REQS = { # noqa: N806
|
|
861
884
|
"none": ssl.CERT_NONE,
|
|
@@ -866,10 +889,13 @@ class RedisSSLContext:
|
|
|
866
889
|
raise RedisError(
|
|
867
890
|
f"Invalid SSL Certificate Requirements Flag: {cert_reqs}"
|
|
868
891
|
)
|
|
869
|
-
|
|
892
|
+
cert_reqs = CERT_REQS[cert_reqs]
|
|
893
|
+
self.cert_reqs = cert_reqs
|
|
870
894
|
self.ca_certs = ca_certs
|
|
871
895
|
self.ca_data = ca_data
|
|
872
|
-
self.check_hostname =
|
|
896
|
+
self.check_hostname = (
|
|
897
|
+
check_hostname if self.cert_reqs != ssl.CERT_NONE else False
|
|
898
|
+
)
|
|
873
899
|
self.min_version = min_version
|
|
874
900
|
self.ciphers = ciphers
|
|
875
901
|
self.context: Optional[SSLContext] = None
|
|
@@ -1109,7 +1135,7 @@ class ConnectionPool:
|
|
|
1109
1135
|
@deprecated_args(
|
|
1110
1136
|
args_to_warn=["*"],
|
|
1111
1137
|
reason="Use get_connection() without args instead",
|
|
1112
|
-
version="5.0
|
|
1138
|
+
version="5.3.0",
|
|
1113
1139
|
)
|
|
1114
1140
|
async def get_connection(self, command_name=None, *keys, **options):
|
|
1115
1141
|
async with self._lock:
|
|
@@ -1282,7 +1308,7 @@ class BlockingConnectionPool(ConnectionPool):
|
|
|
1282
1308
|
@deprecated_args(
|
|
1283
1309
|
args_to_warn=["*"],
|
|
1284
1310
|
reason="Use get_connection() without args instead",
|
|
1285
|
-
version="5.0
|
|
1311
|
+
version="5.3.0",
|
|
1286
1312
|
)
|
|
1287
1313
|
async def get_connection(self, command_name=None, *keys, **options):
|
|
1288
1314
|
"""Gets a connection from the pool, blocking until one is available"""
|
redis/asyncio/retry.py
CHANGED
|
@@ -43,6 +43,18 @@ class Retry:
|
|
|
43
43
|
set(self._supported_errors + tuple(specified_errors))
|
|
44
44
|
)
|
|
45
45
|
|
|
46
|
+
def get_retries(self) -> int:
|
|
47
|
+
"""
|
|
48
|
+
Get the number of retries.
|
|
49
|
+
"""
|
|
50
|
+
return self._retries
|
|
51
|
+
|
|
52
|
+
def update_retries(self, value: int) -> None:
|
|
53
|
+
"""
|
|
54
|
+
Set the number of retries.
|
|
55
|
+
"""
|
|
56
|
+
self._retries = value
|
|
57
|
+
|
|
46
58
|
async def call_with_retry(
|
|
47
59
|
self, do: Callable[[], Awaitable[T]], fail: Callable[[RedisError], Any]
|
|
48
60
|
) -> T:
|
redis/asyncio/sentinel.py
CHANGED
|
@@ -326,6 +326,8 @@ class Sentinel(AsyncSentinelCommands):
|
|
|
326
326
|
):
|
|
327
327
|
"""
|
|
328
328
|
Returns a redis client instance for the ``service_name`` master.
|
|
329
|
+
Sentinel client will detect failover and reconnect Redis clients
|
|
330
|
+
automatically.
|
|
329
331
|
|
|
330
332
|
A :py:class:`~redis.sentinel.SentinelConnectionPool` class is
|
|
331
333
|
used to retrieve the master's address before establishing a new
|
redis/backoff.py
CHANGED
|
@@ -31,6 +31,15 @@ class ConstantBackoff(AbstractBackoff):
|
|
|
31
31
|
"""`backoff`: backoff time in seconds"""
|
|
32
32
|
self._backoff = backoff
|
|
33
33
|
|
|
34
|
+
def __hash__(self) -> int:
|
|
35
|
+
return hash((self._backoff,))
|
|
36
|
+
|
|
37
|
+
def __eq__(self, other) -> bool:
|
|
38
|
+
if not isinstance(other, ConstantBackoff):
|
|
39
|
+
return NotImplemented
|
|
40
|
+
|
|
41
|
+
return self._backoff == other._backoff
|
|
42
|
+
|
|
34
43
|
def compute(self, failures: int) -> float:
|
|
35
44
|
return self._backoff
|
|
36
45
|
|
|
@@ -53,6 +62,15 @@ class ExponentialBackoff(AbstractBackoff):
|
|
|
53
62
|
self._cap = cap
|
|
54
63
|
self._base = base
|
|
55
64
|
|
|
65
|
+
def __hash__(self) -> int:
|
|
66
|
+
return hash((self._base, self._cap))
|
|
67
|
+
|
|
68
|
+
def __eq__(self, other) -> bool:
|
|
69
|
+
if not isinstance(other, ExponentialBackoff):
|
|
70
|
+
return NotImplemented
|
|
71
|
+
|
|
72
|
+
return self._base == other._base and self._cap == other._cap
|
|
73
|
+
|
|
56
74
|
def compute(self, failures: int) -> float:
|
|
57
75
|
return min(self._cap, self._base * 2**failures)
|
|
58
76
|
|
|
@@ -68,6 +86,15 @@ class FullJitterBackoff(AbstractBackoff):
|
|
|
68
86
|
self._cap = cap
|
|
69
87
|
self._base = base
|
|
70
88
|
|
|
89
|
+
def __hash__(self) -> int:
|
|
90
|
+
return hash((self._base, self._cap))
|
|
91
|
+
|
|
92
|
+
def __eq__(self, other) -> bool:
|
|
93
|
+
if not isinstance(other, FullJitterBackoff):
|
|
94
|
+
return NotImplemented
|
|
95
|
+
|
|
96
|
+
return self._base == other._base and self._cap == other._cap
|
|
97
|
+
|
|
71
98
|
def compute(self, failures: int) -> float:
|
|
72
99
|
return random.uniform(0, min(self._cap, self._base * 2**failures))
|
|
73
100
|
|
|
@@ -83,6 +110,15 @@ class EqualJitterBackoff(AbstractBackoff):
|
|
|
83
110
|
self._cap = cap
|
|
84
111
|
self._base = base
|
|
85
112
|
|
|
113
|
+
def __hash__(self) -> int:
|
|
114
|
+
return hash((self._base, self._cap))
|
|
115
|
+
|
|
116
|
+
def __eq__(self, other) -> bool:
|
|
117
|
+
if not isinstance(other, EqualJitterBackoff):
|
|
118
|
+
return NotImplemented
|
|
119
|
+
|
|
120
|
+
return self._base == other._base and self._cap == other._cap
|
|
121
|
+
|
|
86
122
|
def compute(self, failures: int) -> float:
|
|
87
123
|
temp = min(self._cap, self._base * 2**failures) / 2
|
|
88
124
|
return temp + random.uniform(0, temp)
|
|
@@ -100,6 +136,15 @@ class DecorrelatedJitterBackoff(AbstractBackoff):
|
|
|
100
136
|
self._base = base
|
|
101
137
|
self._previous_backoff = 0
|
|
102
138
|
|
|
139
|
+
def __hash__(self) -> int:
|
|
140
|
+
return hash((self._base, self._cap))
|
|
141
|
+
|
|
142
|
+
def __eq__(self, other) -> bool:
|
|
143
|
+
if not isinstance(other, DecorrelatedJitterBackoff):
|
|
144
|
+
return NotImplemented
|
|
145
|
+
|
|
146
|
+
return self._base == other._base and self._cap == other._cap
|
|
147
|
+
|
|
103
148
|
def reset(self) -> None:
|
|
104
149
|
self._previous_backoff = 0
|
|
105
150
|
|
|
@@ -121,6 +166,15 @@ class ExponentialWithJitterBackoff(AbstractBackoff):
|
|
|
121
166
|
self._cap = cap
|
|
122
167
|
self._base = base
|
|
123
168
|
|
|
169
|
+
def __hash__(self) -> int:
|
|
170
|
+
return hash((self._base, self._cap))
|
|
171
|
+
|
|
172
|
+
def __eq__(self, other) -> bool:
|
|
173
|
+
if not isinstance(other, EqualJitterBackoff):
|
|
174
|
+
return NotImplemented
|
|
175
|
+
|
|
176
|
+
return self._base == other._base and self._cap == other._cap
|
|
177
|
+
|
|
124
178
|
def compute(self, failures: int) -> float:
|
|
125
179
|
return min(self._cap, random.random() * self._base * 2**failures)
|
|
126
180
|
|
redis/client.py
CHANGED
|
@@ -2,7 +2,6 @@ import copy
|
|
|
2
2
|
import re
|
|
3
3
|
import threading
|
|
4
4
|
import time
|
|
5
|
-
import warnings
|
|
6
5
|
from itertools import chain
|
|
7
6
|
from typing import (
|
|
8
7
|
TYPE_CHECKING,
|
|
@@ -12,6 +11,7 @@ from typing import (
|
|
|
12
11
|
List,
|
|
13
12
|
Mapping,
|
|
14
13
|
Optional,
|
|
14
|
+
Set,
|
|
15
15
|
Type,
|
|
16
16
|
Union,
|
|
17
17
|
)
|
|
@@ -23,6 +23,7 @@ from redis._parsers.helpers import (
|
|
|
23
23
|
_RedisCallbacksRESP3,
|
|
24
24
|
bool_ok,
|
|
25
25
|
)
|
|
26
|
+
from redis.backoff import ExponentialWithJitterBackoff
|
|
26
27
|
from redis.cache import CacheConfig, CacheInterface
|
|
27
28
|
from redis.commands import (
|
|
28
29
|
CoreCommands,
|
|
@@ -30,8 +31,10 @@ from redis.commands import (
|
|
|
30
31
|
SentinelCommands,
|
|
31
32
|
list_or_args,
|
|
32
33
|
)
|
|
34
|
+
from redis.commands.core import Script
|
|
33
35
|
from redis.connection import (
|
|
34
36
|
AbstractConnection,
|
|
37
|
+
Connection,
|
|
35
38
|
ConnectionPool,
|
|
36
39
|
SSLConnection,
|
|
37
40
|
UnixDomainSocketConnection,
|
|
@@ -50,7 +53,6 @@ from redis.exceptions import (
|
|
|
50
53
|
PubSubError,
|
|
51
54
|
RedisError,
|
|
52
55
|
ResponseError,
|
|
53
|
-
TimeoutError,
|
|
54
56
|
WatchError,
|
|
55
57
|
)
|
|
56
58
|
from redis.lock import Lock
|
|
@@ -58,9 +60,11 @@ from redis.retry import Retry
|
|
|
58
60
|
from redis.utils import (
|
|
59
61
|
HIREDIS_AVAILABLE,
|
|
60
62
|
_set_info_logger,
|
|
63
|
+
deprecated_args,
|
|
61
64
|
get_lib_version,
|
|
62
65
|
safe_str,
|
|
63
66
|
str_if_bytes,
|
|
67
|
+
truncate_text,
|
|
64
68
|
)
|
|
65
69
|
|
|
66
70
|
if TYPE_CHECKING:
|
|
@@ -188,6 +192,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
188
192
|
client.auto_close_connection_pool = True
|
|
189
193
|
return client
|
|
190
194
|
|
|
195
|
+
@deprecated_args(
|
|
196
|
+
args_to_warn=["retry_on_timeout"],
|
|
197
|
+
reason="TimeoutError is included by default.",
|
|
198
|
+
version="6.0.0",
|
|
199
|
+
)
|
|
191
200
|
def __init__(
|
|
192
201
|
self,
|
|
193
202
|
host: str = "localhost",
|
|
@@ -202,19 +211,20 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
202
211
|
unix_socket_path: Optional[str] = None,
|
|
203
212
|
encoding: str = "utf-8",
|
|
204
213
|
encoding_errors: str = "strict",
|
|
205
|
-
charset: Optional[str] = None,
|
|
206
|
-
errors: Optional[str] = None,
|
|
207
214
|
decode_responses: bool = False,
|
|
208
215
|
retry_on_timeout: bool = False,
|
|
216
|
+
retry: Retry = Retry(
|
|
217
|
+
backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
|
|
218
|
+
),
|
|
209
219
|
retry_on_error: Optional[List[Type[Exception]]] = None,
|
|
210
220
|
ssl: bool = False,
|
|
211
221
|
ssl_keyfile: Optional[str] = None,
|
|
212
222
|
ssl_certfile: Optional[str] = None,
|
|
213
|
-
ssl_cert_reqs: str = "required",
|
|
223
|
+
ssl_cert_reqs: Union[str, "ssl.VerifyMode"] = "required",
|
|
214
224
|
ssl_ca_certs: Optional[str] = None,
|
|
215
225
|
ssl_ca_path: Optional[str] = None,
|
|
216
226
|
ssl_ca_data: Optional[str] = None,
|
|
217
|
-
ssl_check_hostname: bool =
|
|
227
|
+
ssl_check_hostname: bool = True,
|
|
218
228
|
ssl_password: Optional[str] = None,
|
|
219
229
|
ssl_validate_ocsp: bool = False,
|
|
220
230
|
ssl_validate_ocsp_stapled: bool = False,
|
|
@@ -229,7 +239,6 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
229
239
|
lib_name: Optional[str] = "redis-py",
|
|
230
240
|
lib_version: Optional[str] = get_lib_version(),
|
|
231
241
|
username: Optional[str] = None,
|
|
232
|
-
retry: Optional[Retry] = None,
|
|
233
242
|
redis_connect_func: Optional[Callable[[], None]] = None,
|
|
234
243
|
credential_provider: Optional[CredentialProvider] = None,
|
|
235
244
|
protocol: Optional[int] = 2,
|
|
@@ -239,10 +248,24 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
239
248
|
) -> None:
|
|
240
249
|
"""
|
|
241
250
|
Initialize a new Redis client.
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
251
|
+
|
|
252
|
+
To specify a retry policy for specific errors, you have two options:
|
|
253
|
+
|
|
254
|
+
1. Set the `retry_on_error` to a list of the error/s to retry on, and
|
|
255
|
+
you can also set `retry` to a valid `Retry` object(in case the default
|
|
256
|
+
one is not appropriate) - with this approach the retries will be triggered
|
|
257
|
+
on the default errors specified in the Retry object enriched with the
|
|
258
|
+
errors specified in `retry_on_error`.
|
|
259
|
+
|
|
260
|
+
2. Define a `Retry` object with configured 'supported_errors' and set
|
|
261
|
+
it to the `retry` parameter - with this approach you completely redefine
|
|
262
|
+
the errors on which retries will happen.
|
|
263
|
+
|
|
264
|
+
`retry_on_timeout` is deprecated - please include the TimeoutError
|
|
265
|
+
either in the Retry object or in the `retry_on_error` list.
|
|
266
|
+
|
|
267
|
+
When 'connection_pool' is provided - the retry configuration of the
|
|
268
|
+
provided pool will be used.
|
|
246
269
|
|
|
247
270
|
Args:
|
|
248
271
|
|
|
@@ -255,24 +278,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
255
278
|
else:
|
|
256
279
|
self._event_dispatcher = event_dispatcher
|
|
257
280
|
if not connection_pool:
|
|
258
|
-
if charset is not None:
|
|
259
|
-
warnings.warn(
|
|
260
|
-
DeprecationWarning(
|
|
261
|
-
'"charset" is deprecated. Use "encoding" instead'
|
|
262
|
-
)
|
|
263
|
-
)
|
|
264
|
-
encoding = charset
|
|
265
|
-
if errors is not None:
|
|
266
|
-
warnings.warn(
|
|
267
|
-
DeprecationWarning(
|
|
268
|
-
'"errors" is deprecated. Use "encoding_errors" instead'
|
|
269
|
-
)
|
|
270
|
-
)
|
|
271
|
-
encoding_errors = errors
|
|
272
281
|
if not retry_on_error:
|
|
273
282
|
retry_on_error = []
|
|
274
|
-
if retry_on_timeout is True:
|
|
275
|
-
retry_on_error.append(TimeoutError)
|
|
276
283
|
kwargs = {
|
|
277
284
|
"db": db,
|
|
278
285
|
"username": username,
|
|
@@ -394,10 +401,10 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
394
401
|
"""Get the connection's key-word arguments"""
|
|
395
402
|
return self.connection_pool.connection_kwargs
|
|
396
403
|
|
|
397
|
-
def get_retry(self) -> Optional[
|
|
404
|
+
def get_retry(self) -> Optional[Retry]:
|
|
398
405
|
return self.get_connection_kwargs().get("retry")
|
|
399
406
|
|
|
400
|
-
def set_retry(self, retry:
|
|
407
|
+
def set_retry(self, retry: Retry) -> None:
|
|
401
408
|
self.get_connection_kwargs().update({"retry": retry})
|
|
402
409
|
self.connection_pool.set_retry(retry)
|
|
403
410
|
|
|
@@ -597,18 +604,18 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
597
604
|
conn.send_command(*args, **options)
|
|
598
605
|
return self.parse_response(conn, command_name, **options)
|
|
599
606
|
|
|
600
|
-
def
|
|
607
|
+
def _close_connection(self, conn) -> None:
|
|
601
608
|
"""
|
|
602
|
-
Close the connection
|
|
603
|
-
|
|
604
|
-
|
|
609
|
+
Close the connection before retrying.
|
|
610
|
+
|
|
611
|
+
The supported exceptions are already checked in the
|
|
612
|
+
retry object so we don't need to do it here.
|
|
613
|
+
|
|
614
|
+
After we disconnect the connection, it will try to reconnect and
|
|
615
|
+
do a health check as part of the send_command logic(on connection level).
|
|
605
616
|
"""
|
|
617
|
+
|
|
606
618
|
conn.disconnect()
|
|
607
|
-
if (
|
|
608
|
-
conn.retry_on_error is None
|
|
609
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
610
|
-
):
|
|
611
|
-
raise error
|
|
612
619
|
|
|
613
620
|
# COMMAND EXECUTION AND PROTOCOL PARSING
|
|
614
621
|
def execute_command(self, *args, **options):
|
|
@@ -627,7 +634,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
627
634
|
lambda: self._send_command_parse_response(
|
|
628
635
|
conn, command_name, *args, **options
|
|
629
636
|
),
|
|
630
|
-
lambda
|
|
637
|
+
lambda _: self._close_connection(conn),
|
|
631
638
|
)
|
|
632
639
|
finally:
|
|
633
640
|
if self._single_connection_client:
|
|
@@ -886,19 +893,14 @@ class PubSub:
|
|
|
886
893
|
)
|
|
887
894
|
ttl -= 1
|
|
888
895
|
|
|
889
|
-
def
|
|
896
|
+
def _reconnect(self, conn) -> None:
|
|
890
897
|
"""
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
reconnect
|
|
898
|
+
The supported exceptions are already checked in the
|
|
899
|
+
retry object so we don't need to do it here.
|
|
900
|
+
|
|
901
|
+
In this error handler we are trying to reconnect to the server.
|
|
895
902
|
"""
|
|
896
903
|
conn.disconnect()
|
|
897
|
-
if (
|
|
898
|
-
conn.retry_on_error is None
|
|
899
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
900
|
-
):
|
|
901
|
-
raise error
|
|
902
904
|
conn.connect()
|
|
903
905
|
|
|
904
906
|
def _execute(self, conn, command, *args, **kwargs):
|
|
@@ -911,7 +913,7 @@ class PubSub:
|
|
|
911
913
|
"""
|
|
912
914
|
return conn.retry.call_with_retry(
|
|
913
915
|
lambda: command(*args, **kwargs),
|
|
914
|
-
lambda
|
|
916
|
+
lambda _: self._reconnect(conn),
|
|
915
917
|
)
|
|
916
918
|
|
|
917
919
|
def parse_response(self, block=True, timeout=0):
|
|
@@ -1280,7 +1282,8 @@ class Pipeline(Redis):
|
|
|
1280
1282
|
in one transmission. This is convenient for batch processing, such as
|
|
1281
1283
|
saving all the values in a list to Redis.
|
|
1282
1284
|
|
|
1283
|
-
All commands executed within a pipeline
|
|
1285
|
+
All commands executed within a pipeline(when running in transactional mode,
|
|
1286
|
+
which is the default behavior) are wrapped with MULTI and EXEC
|
|
1284
1287
|
calls. This guarantees all commands executed in the pipeline will be
|
|
1285
1288
|
executed atomically.
|
|
1286
1289
|
|
|
@@ -1295,15 +1298,22 @@ class Pipeline(Redis):
|
|
|
1295
1298
|
|
|
1296
1299
|
UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
|
|
1297
1300
|
|
|
1298
|
-
def __init__(
|
|
1301
|
+
def __init__(
|
|
1302
|
+
self,
|
|
1303
|
+
connection_pool: ConnectionPool,
|
|
1304
|
+
response_callbacks,
|
|
1305
|
+
transaction,
|
|
1306
|
+
shard_hint,
|
|
1307
|
+
):
|
|
1299
1308
|
self.connection_pool = connection_pool
|
|
1300
|
-
self.connection = None
|
|
1309
|
+
self.connection: Optional[Connection] = None
|
|
1301
1310
|
self.response_callbacks = response_callbacks
|
|
1302
1311
|
self.transaction = transaction
|
|
1303
1312
|
self.shard_hint = shard_hint
|
|
1304
|
-
|
|
1305
1313
|
self.watching = False
|
|
1306
|
-
self.
|
|
1314
|
+
self.command_stack = []
|
|
1315
|
+
self.scripts: Set[Script] = set()
|
|
1316
|
+
self.explicit_transaction = False
|
|
1307
1317
|
|
|
1308
1318
|
def __enter__(self) -> "Pipeline":
|
|
1309
1319
|
return self
|
|
@@ -1369,36 +1379,37 @@ class Pipeline(Redis):
|
|
|
1369
1379
|
return self.immediate_execute_command(*args, **kwargs)
|
|
1370
1380
|
return self.pipeline_execute_command(*args, **kwargs)
|
|
1371
1381
|
|
|
1372
|
-
def
|
|
1382
|
+
def _disconnect_reset_raise_on_watching(
|
|
1383
|
+
self,
|
|
1384
|
+
conn: AbstractConnection,
|
|
1385
|
+
error: Exception,
|
|
1386
|
+
) -> None:
|
|
1373
1387
|
"""
|
|
1374
|
-
Close the connection
|
|
1375
|
-
raise an exception if we were watching
|
|
1376
|
-
|
|
1377
|
-
|
|
1388
|
+
Close the connection reset watching state and
|
|
1389
|
+
raise an exception if we were watching.
|
|
1390
|
+
|
|
1391
|
+
The supported exceptions are already checked in the
|
|
1392
|
+
retry object so we don't need to do it here.
|
|
1393
|
+
|
|
1394
|
+
After we disconnect the connection, it will try to reconnect and
|
|
1395
|
+
do a health check as part of the send_command logic(on connection level).
|
|
1378
1396
|
"""
|
|
1379
1397
|
conn.disconnect()
|
|
1398
|
+
|
|
1380
1399
|
# if we were already watching a variable, the watch is no longer
|
|
1381
1400
|
# valid since this connection has died. raise a WatchError, which
|
|
1382
1401
|
# indicates the user should retry this transaction.
|
|
1383
1402
|
if self.watching:
|
|
1384
1403
|
self.reset()
|
|
1385
1404
|
raise WatchError(
|
|
1386
|
-
"A
|
|
1405
|
+
f"A {type(error).__name__} occurred while watching one or more keys"
|
|
1387
1406
|
)
|
|
1388
|
-
# if retry_on_error is not set or the error is not one
|
|
1389
|
-
# of the specified error types, raise it
|
|
1390
|
-
if (
|
|
1391
|
-
conn.retry_on_error is None
|
|
1392
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
1393
|
-
):
|
|
1394
|
-
self.reset()
|
|
1395
|
-
raise
|
|
1396
1407
|
|
|
1397
1408
|
def immediate_execute_command(self, *args, **options):
|
|
1398
1409
|
"""
|
|
1399
|
-
Execute a command immediately, but don't auto-retry on
|
|
1400
|
-
|
|
1401
|
-
issuing WATCH or subsequent commands retrieving their values but before
|
|
1410
|
+
Execute a command immediately, but don't auto-retry on the supported
|
|
1411
|
+
errors for retry if we're already WATCHing a variable.
|
|
1412
|
+
Used when issuing WATCH or subsequent commands retrieving their values but before
|
|
1402
1413
|
MULTI is called.
|
|
1403
1414
|
"""
|
|
1404
1415
|
command_name = args[0]
|
|
@@ -1412,7 +1423,7 @@ class Pipeline(Redis):
|
|
|
1412
1423
|
lambda: self._send_command_parse_response(
|
|
1413
1424
|
conn, command_name, *args, **options
|
|
1414
1425
|
),
|
|
1415
|
-
lambda error: self.
|
|
1426
|
+
lambda error: self._disconnect_reset_raise_on_watching(conn, error),
|
|
1416
1427
|
)
|
|
1417
1428
|
|
|
1418
1429
|
def pipeline_execute_command(self, *args, **options) -> "Pipeline":
|
|
@@ -1430,7 +1441,9 @@ class Pipeline(Redis):
|
|
|
1430
1441
|
self.command_stack.append((args, options))
|
|
1431
1442
|
return self
|
|
1432
1443
|
|
|
1433
|
-
def _execute_transaction(
|
|
1444
|
+
def _execute_transaction(
|
|
1445
|
+
self, connection: Connection, commands, raise_on_error
|
|
1446
|
+
) -> List:
|
|
1434
1447
|
cmds = chain([(("MULTI",), {})], commands, [(("EXEC",), {})])
|
|
1435
1448
|
all_cmds = connection.pack_commands(
|
|
1436
1449
|
[args for args, options in cmds if EMPTY_RESPONSE not in options]
|
|
@@ -1524,7 +1537,8 @@ class Pipeline(Redis):
|
|
|
1524
1537
|
def annotate_exception(self, exception, number, command):
|
|
1525
1538
|
cmd = " ".join(map(safe_str, command))
|
|
1526
1539
|
msg = (
|
|
1527
|
-
f"Command # {number} ({cmd}) of pipeline
|
|
1540
|
+
f"Command # {number} ({truncate_text(cmd)}) of pipeline "
|
|
1541
|
+
f"caused error: {exception.args[0]}"
|
|
1528
1542
|
)
|
|
1529
1543
|
exception.args = (msg,) + exception.args[1:]
|
|
1530
1544
|
|
|
@@ -1549,15 +1563,19 @@ class Pipeline(Redis):
|
|
|
1549
1563
|
if not exist:
|
|
1550
1564
|
s.sha = immediate("SCRIPT LOAD", s.script)
|
|
1551
1565
|
|
|
1552
|
-
def
|
|
1566
|
+
def _disconnect_raise_on_watching(
|
|
1553
1567
|
self,
|
|
1554
1568
|
conn: AbstractConnection,
|
|
1555
1569
|
error: Exception,
|
|
1556
1570
|
) -> None:
|
|
1557
1571
|
"""
|
|
1558
|
-
Close the connection, raise an exception if we were watching
|
|
1559
|
-
|
|
1560
|
-
|
|
1572
|
+
Close the connection, raise an exception if we were watching.
|
|
1573
|
+
|
|
1574
|
+
The supported exceptions are already checked in the
|
|
1575
|
+
retry object so we don't need to do it here.
|
|
1576
|
+
|
|
1577
|
+
After we disconnect the connection, it will try to reconnect and
|
|
1578
|
+
do a health check as part of the send_command logic(on connection level).
|
|
1561
1579
|
"""
|
|
1562
1580
|
conn.disconnect()
|
|
1563
1581
|
# if we were watching a variable, the watch is no longer valid
|
|
@@ -1565,16 +1583,8 @@ class Pipeline(Redis):
|
|
|
1565
1583
|
# indicates the user should retry this transaction.
|
|
1566
1584
|
if self.watching:
|
|
1567
1585
|
raise WatchError(
|
|
1568
|
-
"A
|
|
1586
|
+
f"A {type(error).__name__} occurred while watching one or more keys"
|
|
1569
1587
|
)
|
|
1570
|
-
# if retry_on_error is not set or the error is not one
|
|
1571
|
-
# of the specified error types, raise it
|
|
1572
|
-
if (
|
|
1573
|
-
conn.retry_on_error is None
|
|
1574
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
1575
|
-
):
|
|
1576
|
-
self.reset()
|
|
1577
|
-
raise error
|
|
1578
1588
|
|
|
1579
1589
|
def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
1580
1590
|
"""Execute all the commands in the current pipeline"""
|
|
@@ -1598,7 +1608,7 @@ class Pipeline(Redis):
|
|
|
1598
1608
|
try:
|
|
1599
1609
|
return conn.retry.call_with_retry(
|
|
1600
1610
|
lambda: execute(conn, stack, raise_on_error),
|
|
1601
|
-
lambda error: self.
|
|
1611
|
+
lambda error: self._disconnect_raise_on_watching(conn, error),
|
|
1602
1612
|
)
|
|
1603
1613
|
finally:
|
|
1604
1614
|
self.reset()
|