redis 6.0.0b2__py3-none-any.whl → 6.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/__init__.py +8 -1
- redis/_parsers/__init__.py +8 -1
- redis/_parsers/base.py +53 -1
- redis/_parsers/hiredis.py +72 -5
- redis/_parsers/resp3.py +12 -37
- redis/asyncio/client.py +76 -70
- redis/asyncio/cluster.py +796 -104
- redis/asyncio/connection.py +8 -10
- redis/asyncio/retry.py +12 -0
- redis/backoff.py +54 -0
- redis/client.py +101 -89
- redis/cluster.py +1088 -365
- redis/commands/core.py +104 -104
- redis/commands/helpers.py +19 -6
- redis/commands/json/__init__.py +1 -1
- redis/commands/json/commands.py +8 -8
- redis/commands/redismodules.py +20 -10
- redis/commands/search/commands.py +2 -2
- redis/commands/timeseries/__init__.py +1 -1
- redis/connection.py +19 -9
- redis/exceptions.py +18 -0
- redis/retry.py +25 -0
- redis/typing.py +0 -4
- redis/utils.py +5 -2
- {redis-6.0.0b2.dist-info → redis-6.2.0.dist-info}/METADATA +16 -12
- {redis-6.0.0b2.dist-info → redis-6.2.0.dist-info}/RECORD +28 -28
- {redis-6.0.0b2.dist-info → redis-6.2.0.dist-info}/WHEEL +0 -0
- {redis-6.0.0b2.dist-info → redis-6.2.0.dist-info}/licenses/LICENSE +0 -0
redis/asyncio/connection.py
CHANGED
|
@@ -576,11 +576,7 @@ class AbstractConnection:
|
|
|
576
576
|
read_timeout = timeout if timeout is not None else self.socket_timeout
|
|
577
577
|
host_error = self._host_error()
|
|
578
578
|
try:
|
|
579
|
-
if
|
|
580
|
-
read_timeout is not None
|
|
581
|
-
and self.protocol in ["3", 3]
|
|
582
|
-
and not HIREDIS_AVAILABLE
|
|
583
|
-
):
|
|
579
|
+
if read_timeout is not None and self.protocol in ["3", 3]:
|
|
584
580
|
async with async_timeout(read_timeout):
|
|
585
581
|
response = await self._parser.read_response(
|
|
586
582
|
disable_decoding=disable_decoding, push_request=push_request
|
|
@@ -590,7 +586,7 @@ class AbstractConnection:
|
|
|
590
586
|
response = await self._parser.read_response(
|
|
591
587
|
disable_decoding=disable_decoding
|
|
592
588
|
)
|
|
593
|
-
elif self.protocol in ["3", 3]
|
|
589
|
+
elif self.protocol in ["3", 3]:
|
|
594
590
|
response = await self._parser.read_response(
|
|
595
591
|
disable_decoding=disable_decoding, push_request=push_request
|
|
596
592
|
)
|
|
@@ -794,7 +790,7 @@ class SSLConnection(Connection):
|
|
|
794
790
|
ssl_cert_reqs: Union[str, ssl.VerifyMode] = "required",
|
|
795
791
|
ssl_ca_certs: Optional[str] = None,
|
|
796
792
|
ssl_ca_data: Optional[str] = None,
|
|
797
|
-
ssl_check_hostname: bool =
|
|
793
|
+
ssl_check_hostname: bool = True,
|
|
798
794
|
ssl_min_version: Optional[TLSVersion] = None,
|
|
799
795
|
ssl_ciphers: Optional[str] = None,
|
|
800
796
|
**kwargs,
|
|
@@ -893,7 +889,9 @@ class RedisSSLContext:
|
|
|
893
889
|
self.cert_reqs = cert_reqs
|
|
894
890
|
self.ca_certs = ca_certs
|
|
895
891
|
self.ca_data = ca_data
|
|
896
|
-
self.check_hostname =
|
|
892
|
+
self.check_hostname = (
|
|
893
|
+
check_hostname if self.cert_reqs != ssl.CERT_NONE else False
|
|
894
|
+
)
|
|
897
895
|
self.min_version = min_version
|
|
898
896
|
self.ciphers = ciphers
|
|
899
897
|
self.context: Optional[SSLContext] = None
|
|
@@ -1133,7 +1131,7 @@ class ConnectionPool:
|
|
|
1133
1131
|
@deprecated_args(
|
|
1134
1132
|
args_to_warn=["*"],
|
|
1135
1133
|
reason="Use get_connection() without args instead",
|
|
1136
|
-
version="5.0
|
|
1134
|
+
version="5.3.0",
|
|
1137
1135
|
)
|
|
1138
1136
|
async def get_connection(self, command_name=None, *keys, **options):
|
|
1139
1137
|
async with self._lock:
|
|
@@ -1306,7 +1304,7 @@ class BlockingConnectionPool(ConnectionPool):
|
|
|
1306
1304
|
@deprecated_args(
|
|
1307
1305
|
args_to_warn=["*"],
|
|
1308
1306
|
reason="Use get_connection() without args instead",
|
|
1309
|
-
version="5.0
|
|
1307
|
+
version="5.3.0",
|
|
1310
1308
|
)
|
|
1311
1309
|
async def get_connection(self, command_name=None, *keys, **options):
|
|
1312
1310
|
"""Gets a connection from the pool, blocking until one is available"""
|
redis/asyncio/retry.py
CHANGED
|
@@ -43,6 +43,18 @@ class Retry:
|
|
|
43
43
|
set(self._supported_errors + tuple(specified_errors))
|
|
44
44
|
)
|
|
45
45
|
|
|
46
|
+
def get_retries(self) -> int:
|
|
47
|
+
"""
|
|
48
|
+
Get the number of retries.
|
|
49
|
+
"""
|
|
50
|
+
return self._retries
|
|
51
|
+
|
|
52
|
+
def update_retries(self, value: int) -> None:
|
|
53
|
+
"""
|
|
54
|
+
Set the number of retries.
|
|
55
|
+
"""
|
|
56
|
+
self._retries = value
|
|
57
|
+
|
|
46
58
|
async def call_with_retry(
|
|
47
59
|
self, do: Callable[[], Awaitable[T]], fail: Callable[[RedisError], Any]
|
|
48
60
|
) -> T:
|
redis/backoff.py
CHANGED
|
@@ -31,6 +31,15 @@ class ConstantBackoff(AbstractBackoff):
|
|
|
31
31
|
"""`backoff`: backoff time in seconds"""
|
|
32
32
|
self._backoff = backoff
|
|
33
33
|
|
|
34
|
+
def __hash__(self) -> int:
|
|
35
|
+
return hash((self._backoff,))
|
|
36
|
+
|
|
37
|
+
def __eq__(self, other) -> bool:
|
|
38
|
+
if not isinstance(other, ConstantBackoff):
|
|
39
|
+
return NotImplemented
|
|
40
|
+
|
|
41
|
+
return self._backoff == other._backoff
|
|
42
|
+
|
|
34
43
|
def compute(self, failures: int) -> float:
|
|
35
44
|
return self._backoff
|
|
36
45
|
|
|
@@ -53,6 +62,15 @@ class ExponentialBackoff(AbstractBackoff):
|
|
|
53
62
|
self._cap = cap
|
|
54
63
|
self._base = base
|
|
55
64
|
|
|
65
|
+
def __hash__(self) -> int:
|
|
66
|
+
return hash((self._base, self._cap))
|
|
67
|
+
|
|
68
|
+
def __eq__(self, other) -> bool:
|
|
69
|
+
if not isinstance(other, ExponentialBackoff):
|
|
70
|
+
return NotImplemented
|
|
71
|
+
|
|
72
|
+
return self._base == other._base and self._cap == other._cap
|
|
73
|
+
|
|
56
74
|
def compute(self, failures: int) -> float:
|
|
57
75
|
return min(self._cap, self._base * 2**failures)
|
|
58
76
|
|
|
@@ -68,6 +86,15 @@ class FullJitterBackoff(AbstractBackoff):
|
|
|
68
86
|
self._cap = cap
|
|
69
87
|
self._base = base
|
|
70
88
|
|
|
89
|
+
def __hash__(self) -> int:
|
|
90
|
+
return hash((self._base, self._cap))
|
|
91
|
+
|
|
92
|
+
def __eq__(self, other) -> bool:
|
|
93
|
+
if not isinstance(other, FullJitterBackoff):
|
|
94
|
+
return NotImplemented
|
|
95
|
+
|
|
96
|
+
return self._base == other._base and self._cap == other._cap
|
|
97
|
+
|
|
71
98
|
def compute(self, failures: int) -> float:
|
|
72
99
|
return random.uniform(0, min(self._cap, self._base * 2**failures))
|
|
73
100
|
|
|
@@ -83,6 +110,15 @@ class EqualJitterBackoff(AbstractBackoff):
|
|
|
83
110
|
self._cap = cap
|
|
84
111
|
self._base = base
|
|
85
112
|
|
|
113
|
+
def __hash__(self) -> int:
|
|
114
|
+
return hash((self._base, self._cap))
|
|
115
|
+
|
|
116
|
+
def __eq__(self, other) -> bool:
|
|
117
|
+
if not isinstance(other, EqualJitterBackoff):
|
|
118
|
+
return NotImplemented
|
|
119
|
+
|
|
120
|
+
return self._base == other._base and self._cap == other._cap
|
|
121
|
+
|
|
86
122
|
def compute(self, failures: int) -> float:
|
|
87
123
|
temp = min(self._cap, self._base * 2**failures) / 2
|
|
88
124
|
return temp + random.uniform(0, temp)
|
|
@@ -100,6 +136,15 @@ class DecorrelatedJitterBackoff(AbstractBackoff):
|
|
|
100
136
|
self._base = base
|
|
101
137
|
self._previous_backoff = 0
|
|
102
138
|
|
|
139
|
+
def __hash__(self) -> int:
|
|
140
|
+
return hash((self._base, self._cap))
|
|
141
|
+
|
|
142
|
+
def __eq__(self, other) -> bool:
|
|
143
|
+
if not isinstance(other, DecorrelatedJitterBackoff):
|
|
144
|
+
return NotImplemented
|
|
145
|
+
|
|
146
|
+
return self._base == other._base and self._cap == other._cap
|
|
147
|
+
|
|
103
148
|
def reset(self) -> None:
|
|
104
149
|
self._previous_backoff = 0
|
|
105
150
|
|
|
@@ -121,6 +166,15 @@ class ExponentialWithJitterBackoff(AbstractBackoff):
|
|
|
121
166
|
self._cap = cap
|
|
122
167
|
self._base = base
|
|
123
168
|
|
|
169
|
+
def __hash__(self) -> int:
|
|
170
|
+
return hash((self._base, self._cap))
|
|
171
|
+
|
|
172
|
+
def __eq__(self, other) -> bool:
|
|
173
|
+
if not isinstance(other, EqualJitterBackoff):
|
|
174
|
+
return NotImplemented
|
|
175
|
+
|
|
176
|
+
return self._base == other._base and self._cap == other._cap
|
|
177
|
+
|
|
124
178
|
def compute(self, failures: int) -> float:
|
|
125
179
|
return min(self._cap, random.random() * self._base * 2**failures)
|
|
126
180
|
|
redis/client.py
CHANGED
|
@@ -2,7 +2,6 @@ import copy
|
|
|
2
2
|
import re
|
|
3
3
|
import threading
|
|
4
4
|
import time
|
|
5
|
-
import warnings
|
|
6
5
|
from itertools import chain
|
|
7
6
|
from typing import (
|
|
8
7
|
TYPE_CHECKING,
|
|
@@ -12,6 +11,7 @@ from typing import (
|
|
|
12
11
|
List,
|
|
13
12
|
Mapping,
|
|
14
13
|
Optional,
|
|
14
|
+
Set,
|
|
15
15
|
Type,
|
|
16
16
|
Union,
|
|
17
17
|
)
|
|
@@ -23,6 +23,7 @@ from redis._parsers.helpers import (
|
|
|
23
23
|
_RedisCallbacksRESP3,
|
|
24
24
|
bool_ok,
|
|
25
25
|
)
|
|
26
|
+
from redis.backoff import ExponentialWithJitterBackoff
|
|
26
27
|
from redis.cache import CacheConfig, CacheInterface
|
|
27
28
|
from redis.commands import (
|
|
28
29
|
CoreCommands,
|
|
@@ -30,8 +31,10 @@ from redis.commands import (
|
|
|
30
31
|
SentinelCommands,
|
|
31
32
|
list_or_args,
|
|
32
33
|
)
|
|
34
|
+
from redis.commands.core import Script
|
|
33
35
|
from redis.connection import (
|
|
34
36
|
AbstractConnection,
|
|
37
|
+
Connection,
|
|
35
38
|
ConnectionPool,
|
|
36
39
|
SSLConnection,
|
|
37
40
|
UnixDomainSocketConnection,
|
|
@@ -50,14 +53,13 @@ from redis.exceptions import (
|
|
|
50
53
|
PubSubError,
|
|
51
54
|
RedisError,
|
|
52
55
|
ResponseError,
|
|
53
|
-
TimeoutError,
|
|
54
56
|
WatchError,
|
|
55
57
|
)
|
|
56
58
|
from redis.lock import Lock
|
|
57
59
|
from redis.retry import Retry
|
|
58
60
|
from redis.utils import (
|
|
59
|
-
HIREDIS_AVAILABLE,
|
|
60
61
|
_set_info_logger,
|
|
62
|
+
deprecated_args,
|
|
61
63
|
get_lib_version,
|
|
62
64
|
safe_str,
|
|
63
65
|
str_if_bytes,
|
|
@@ -189,6 +191,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
189
191
|
client.auto_close_connection_pool = True
|
|
190
192
|
return client
|
|
191
193
|
|
|
194
|
+
@deprecated_args(
|
|
195
|
+
args_to_warn=["retry_on_timeout"],
|
|
196
|
+
reason="TimeoutError is included by default.",
|
|
197
|
+
version="6.0.0",
|
|
198
|
+
)
|
|
192
199
|
def __init__(
|
|
193
200
|
self,
|
|
194
201
|
host: str = "localhost",
|
|
@@ -203,10 +210,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
203
210
|
unix_socket_path: Optional[str] = None,
|
|
204
211
|
encoding: str = "utf-8",
|
|
205
212
|
encoding_errors: str = "strict",
|
|
206
|
-
charset: Optional[str] = None,
|
|
207
|
-
errors: Optional[str] = None,
|
|
208
213
|
decode_responses: bool = False,
|
|
209
214
|
retry_on_timeout: bool = False,
|
|
215
|
+
retry: Retry = Retry(
|
|
216
|
+
backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
|
|
217
|
+
),
|
|
210
218
|
retry_on_error: Optional[List[Type[Exception]]] = None,
|
|
211
219
|
ssl: bool = False,
|
|
212
220
|
ssl_keyfile: Optional[str] = None,
|
|
@@ -215,7 +223,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
215
223
|
ssl_ca_certs: Optional[str] = None,
|
|
216
224
|
ssl_ca_path: Optional[str] = None,
|
|
217
225
|
ssl_ca_data: Optional[str] = None,
|
|
218
|
-
ssl_check_hostname: bool =
|
|
226
|
+
ssl_check_hostname: bool = True,
|
|
219
227
|
ssl_password: Optional[str] = None,
|
|
220
228
|
ssl_validate_ocsp: bool = False,
|
|
221
229
|
ssl_validate_ocsp_stapled: bool = False,
|
|
@@ -230,7 +238,6 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
230
238
|
lib_name: Optional[str] = "redis-py",
|
|
231
239
|
lib_version: Optional[str] = get_lib_version(),
|
|
232
240
|
username: Optional[str] = None,
|
|
233
|
-
retry: Optional[Retry] = None,
|
|
234
241
|
redis_connect_func: Optional[Callable[[], None]] = None,
|
|
235
242
|
credential_provider: Optional[CredentialProvider] = None,
|
|
236
243
|
protocol: Optional[int] = 2,
|
|
@@ -240,10 +247,24 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
240
247
|
) -> None:
|
|
241
248
|
"""
|
|
242
249
|
Initialize a new Redis client.
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
250
|
+
|
|
251
|
+
To specify a retry policy for specific errors, you have two options:
|
|
252
|
+
|
|
253
|
+
1. Set the `retry_on_error` to a list of the error/s to retry on, and
|
|
254
|
+
you can also set `retry` to a valid `Retry` object(in case the default
|
|
255
|
+
one is not appropriate) - with this approach the retries will be triggered
|
|
256
|
+
on the default errors specified in the Retry object enriched with the
|
|
257
|
+
errors specified in `retry_on_error`.
|
|
258
|
+
|
|
259
|
+
2. Define a `Retry` object with configured 'supported_errors' and set
|
|
260
|
+
it to the `retry` parameter - with this approach you completely redefine
|
|
261
|
+
the errors on which retries will happen.
|
|
262
|
+
|
|
263
|
+
`retry_on_timeout` is deprecated - please include the TimeoutError
|
|
264
|
+
either in the Retry object or in the `retry_on_error` list.
|
|
265
|
+
|
|
266
|
+
When 'connection_pool' is provided - the retry configuration of the
|
|
267
|
+
provided pool will be used.
|
|
247
268
|
|
|
248
269
|
Args:
|
|
249
270
|
|
|
@@ -256,24 +277,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
256
277
|
else:
|
|
257
278
|
self._event_dispatcher = event_dispatcher
|
|
258
279
|
if not connection_pool:
|
|
259
|
-
if charset is not None:
|
|
260
|
-
warnings.warn(
|
|
261
|
-
DeprecationWarning(
|
|
262
|
-
'"charset" is deprecated. Use "encoding" instead'
|
|
263
|
-
)
|
|
264
|
-
)
|
|
265
|
-
encoding = charset
|
|
266
|
-
if errors is not None:
|
|
267
|
-
warnings.warn(
|
|
268
|
-
DeprecationWarning(
|
|
269
|
-
'"errors" is deprecated. Use "encoding_errors" instead'
|
|
270
|
-
)
|
|
271
|
-
)
|
|
272
|
-
encoding_errors = errors
|
|
273
280
|
if not retry_on_error:
|
|
274
281
|
retry_on_error = []
|
|
275
|
-
if retry_on_timeout is True:
|
|
276
|
-
retry_on_error.append(TimeoutError)
|
|
277
282
|
kwargs = {
|
|
278
283
|
"db": db,
|
|
279
284
|
"username": username,
|
|
@@ -363,6 +368,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
363
368
|
]:
|
|
364
369
|
raise RedisError("Client caching is only supported with RESP version 3")
|
|
365
370
|
|
|
371
|
+
# TODO: To avoid breaking changes during the bug fix, we have to keep non-reentrant lock.
|
|
372
|
+
# TODO: Remove this before next major version (7.0.0)
|
|
366
373
|
self.single_connection_lock = threading.Lock()
|
|
367
374
|
self.connection = None
|
|
368
375
|
self._single_connection_client = single_connection_client
|
|
@@ -395,10 +402,10 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
395
402
|
"""Get the connection's key-word arguments"""
|
|
396
403
|
return self.connection_pool.connection_kwargs
|
|
397
404
|
|
|
398
|
-
def get_retry(self) -> Optional[
|
|
405
|
+
def get_retry(self) -> Optional[Retry]:
|
|
399
406
|
return self.get_connection_kwargs().get("retry")
|
|
400
407
|
|
|
401
|
-
def set_retry(self, retry:
|
|
408
|
+
def set_retry(self, retry: Retry) -> None:
|
|
402
409
|
self.get_connection_kwargs().update({"retry": retry})
|
|
403
410
|
self.connection_pool.set_retry(retry)
|
|
404
411
|
|
|
@@ -598,18 +605,18 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
598
605
|
conn.send_command(*args, **options)
|
|
599
606
|
return self.parse_response(conn, command_name, **options)
|
|
600
607
|
|
|
601
|
-
def
|
|
608
|
+
def _close_connection(self, conn) -> None:
|
|
602
609
|
"""
|
|
603
|
-
Close the connection
|
|
604
|
-
|
|
605
|
-
|
|
610
|
+
Close the connection before retrying.
|
|
611
|
+
|
|
612
|
+
The supported exceptions are already checked in the
|
|
613
|
+
retry object so we don't need to do it here.
|
|
614
|
+
|
|
615
|
+
After we disconnect the connection, it will try to reconnect and
|
|
616
|
+
do a health check as part of the send_command logic(on connection level).
|
|
606
617
|
"""
|
|
618
|
+
|
|
607
619
|
conn.disconnect()
|
|
608
|
-
if (
|
|
609
|
-
conn.retry_on_error is None
|
|
610
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
611
|
-
):
|
|
612
|
-
raise error
|
|
613
620
|
|
|
614
621
|
# COMMAND EXECUTION AND PROTOCOL PARSING
|
|
615
622
|
def execute_command(self, *args, **options):
|
|
@@ -628,7 +635,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
628
635
|
lambda: self._send_command_parse_response(
|
|
629
636
|
conn, command_name, *args, **options
|
|
630
637
|
),
|
|
631
|
-
lambda
|
|
638
|
+
lambda _: self._close_connection(conn),
|
|
632
639
|
)
|
|
633
640
|
finally:
|
|
634
641
|
if self._single_connection_client:
|
|
@@ -768,6 +775,9 @@ class PubSub:
|
|
|
768
775
|
self._event_dispatcher = EventDispatcher()
|
|
769
776
|
else:
|
|
770
777
|
self._event_dispatcher = event_dispatcher
|
|
778
|
+
|
|
779
|
+
# TODO: To avoid breaking changes during the bug fix, we have to keep non-reentrant lock.
|
|
780
|
+
# TODO: Remove this before next major version (7.0.0)
|
|
771
781
|
self._lock = threading.Lock()
|
|
772
782
|
if self.encoder is None:
|
|
773
783
|
self.encoder = self.connection_pool.get_encoder()
|
|
@@ -855,7 +865,7 @@ class PubSub:
|
|
|
855
865
|
# register a callback that re-subscribes to any channels we
|
|
856
866
|
# were listening to when we were disconnected
|
|
857
867
|
self.connection.register_connect_callback(self.on_connect)
|
|
858
|
-
if self.push_handler_func is not None
|
|
868
|
+
if self.push_handler_func is not None:
|
|
859
869
|
self.connection._parser.set_pubsub_push_handler(self.push_handler_func)
|
|
860
870
|
self._event_dispatcher.dispatch(
|
|
861
871
|
AfterPubSubConnectionInstantiationEvent(
|
|
@@ -887,19 +897,14 @@ class PubSub:
|
|
|
887
897
|
)
|
|
888
898
|
ttl -= 1
|
|
889
899
|
|
|
890
|
-
def
|
|
900
|
+
def _reconnect(self, conn) -> None:
|
|
891
901
|
"""
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
reconnect
|
|
902
|
+
The supported exceptions are already checked in the
|
|
903
|
+
retry object so we don't need to do it here.
|
|
904
|
+
|
|
905
|
+
In this error handler we are trying to reconnect to the server.
|
|
896
906
|
"""
|
|
897
907
|
conn.disconnect()
|
|
898
|
-
if (
|
|
899
|
-
conn.retry_on_error is None
|
|
900
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
901
|
-
):
|
|
902
|
-
raise error
|
|
903
908
|
conn.connect()
|
|
904
909
|
|
|
905
910
|
def _execute(self, conn, command, *args, **kwargs):
|
|
@@ -912,7 +917,7 @@ class PubSub:
|
|
|
912
917
|
"""
|
|
913
918
|
return conn.retry.call_with_retry(
|
|
914
919
|
lambda: command(*args, **kwargs),
|
|
915
|
-
lambda
|
|
920
|
+
lambda _: self._reconnect(conn),
|
|
916
921
|
)
|
|
917
922
|
|
|
918
923
|
def parse_response(self, block=True, timeout=0):
|
|
@@ -1281,7 +1286,8 @@ class Pipeline(Redis):
|
|
|
1281
1286
|
in one transmission. This is convenient for batch processing, such as
|
|
1282
1287
|
saving all the values in a list to Redis.
|
|
1283
1288
|
|
|
1284
|
-
All commands executed within a pipeline
|
|
1289
|
+
All commands executed within a pipeline(when running in transactional mode,
|
|
1290
|
+
which is the default behavior) are wrapped with MULTI and EXEC
|
|
1285
1291
|
calls. This guarantees all commands executed in the pipeline will be
|
|
1286
1292
|
executed atomically.
|
|
1287
1293
|
|
|
@@ -1296,15 +1302,22 @@ class Pipeline(Redis):
|
|
|
1296
1302
|
|
|
1297
1303
|
UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
|
|
1298
1304
|
|
|
1299
|
-
def __init__(
|
|
1305
|
+
def __init__(
|
|
1306
|
+
self,
|
|
1307
|
+
connection_pool: ConnectionPool,
|
|
1308
|
+
response_callbacks,
|
|
1309
|
+
transaction,
|
|
1310
|
+
shard_hint,
|
|
1311
|
+
):
|
|
1300
1312
|
self.connection_pool = connection_pool
|
|
1301
|
-
self.connection = None
|
|
1313
|
+
self.connection: Optional[Connection] = None
|
|
1302
1314
|
self.response_callbacks = response_callbacks
|
|
1303
1315
|
self.transaction = transaction
|
|
1304
1316
|
self.shard_hint = shard_hint
|
|
1305
|
-
|
|
1306
1317
|
self.watching = False
|
|
1307
|
-
self.
|
|
1318
|
+
self.command_stack = []
|
|
1319
|
+
self.scripts: Set[Script] = set()
|
|
1320
|
+
self.explicit_transaction = False
|
|
1308
1321
|
|
|
1309
1322
|
def __enter__(self) -> "Pipeline":
|
|
1310
1323
|
return self
|
|
@@ -1370,36 +1383,37 @@ class Pipeline(Redis):
|
|
|
1370
1383
|
return self.immediate_execute_command(*args, **kwargs)
|
|
1371
1384
|
return self.pipeline_execute_command(*args, **kwargs)
|
|
1372
1385
|
|
|
1373
|
-
def
|
|
1386
|
+
def _disconnect_reset_raise_on_watching(
|
|
1387
|
+
self,
|
|
1388
|
+
conn: AbstractConnection,
|
|
1389
|
+
error: Exception,
|
|
1390
|
+
) -> None:
|
|
1374
1391
|
"""
|
|
1375
|
-
Close the connection
|
|
1376
|
-
raise an exception if we were watching
|
|
1377
|
-
|
|
1378
|
-
|
|
1392
|
+
Close the connection reset watching state and
|
|
1393
|
+
raise an exception if we were watching.
|
|
1394
|
+
|
|
1395
|
+
The supported exceptions are already checked in the
|
|
1396
|
+
retry object so we don't need to do it here.
|
|
1397
|
+
|
|
1398
|
+
After we disconnect the connection, it will try to reconnect and
|
|
1399
|
+
do a health check as part of the send_command logic(on connection level).
|
|
1379
1400
|
"""
|
|
1380
1401
|
conn.disconnect()
|
|
1402
|
+
|
|
1381
1403
|
# if we were already watching a variable, the watch is no longer
|
|
1382
1404
|
# valid since this connection has died. raise a WatchError, which
|
|
1383
1405
|
# indicates the user should retry this transaction.
|
|
1384
1406
|
if self.watching:
|
|
1385
1407
|
self.reset()
|
|
1386
1408
|
raise WatchError(
|
|
1387
|
-
"A
|
|
1409
|
+
f"A {type(error).__name__} occurred while watching one or more keys"
|
|
1388
1410
|
)
|
|
1389
|
-
# if retry_on_error is not set or the error is not one
|
|
1390
|
-
# of the specified error types, raise it
|
|
1391
|
-
if (
|
|
1392
|
-
conn.retry_on_error is None
|
|
1393
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
1394
|
-
):
|
|
1395
|
-
self.reset()
|
|
1396
|
-
raise
|
|
1397
1411
|
|
|
1398
1412
|
def immediate_execute_command(self, *args, **options):
|
|
1399
1413
|
"""
|
|
1400
|
-
Execute a command immediately, but don't auto-retry on
|
|
1401
|
-
|
|
1402
|
-
issuing WATCH or subsequent commands retrieving their values but before
|
|
1414
|
+
Execute a command immediately, but don't auto-retry on the supported
|
|
1415
|
+
errors for retry if we're already WATCHing a variable.
|
|
1416
|
+
Used when issuing WATCH or subsequent commands retrieving their values but before
|
|
1403
1417
|
MULTI is called.
|
|
1404
1418
|
"""
|
|
1405
1419
|
command_name = args[0]
|
|
@@ -1413,7 +1427,7 @@ class Pipeline(Redis):
|
|
|
1413
1427
|
lambda: self._send_command_parse_response(
|
|
1414
1428
|
conn, command_name, *args, **options
|
|
1415
1429
|
),
|
|
1416
|
-
lambda error: self.
|
|
1430
|
+
lambda error: self._disconnect_reset_raise_on_watching(conn, error),
|
|
1417
1431
|
)
|
|
1418
1432
|
|
|
1419
1433
|
def pipeline_execute_command(self, *args, **options) -> "Pipeline":
|
|
@@ -1431,7 +1445,9 @@ class Pipeline(Redis):
|
|
|
1431
1445
|
self.command_stack.append((args, options))
|
|
1432
1446
|
return self
|
|
1433
1447
|
|
|
1434
|
-
def _execute_transaction(
|
|
1448
|
+
def _execute_transaction(
|
|
1449
|
+
self, connection: Connection, commands, raise_on_error
|
|
1450
|
+
) -> List:
|
|
1435
1451
|
cmds = chain([(("MULTI",), {})], commands, [(("EXEC",), {})])
|
|
1436
1452
|
all_cmds = connection.pack_commands(
|
|
1437
1453
|
[args for args, options in cmds if EMPTY_RESPONSE not in options]
|
|
@@ -1551,15 +1567,19 @@ class Pipeline(Redis):
|
|
|
1551
1567
|
if not exist:
|
|
1552
1568
|
s.sha = immediate("SCRIPT LOAD", s.script)
|
|
1553
1569
|
|
|
1554
|
-
def
|
|
1570
|
+
def _disconnect_raise_on_watching(
|
|
1555
1571
|
self,
|
|
1556
1572
|
conn: AbstractConnection,
|
|
1557
1573
|
error: Exception,
|
|
1558
1574
|
) -> None:
|
|
1559
1575
|
"""
|
|
1560
|
-
Close the connection, raise an exception if we were watching
|
|
1561
|
-
|
|
1562
|
-
|
|
1576
|
+
Close the connection, raise an exception if we were watching.
|
|
1577
|
+
|
|
1578
|
+
The supported exceptions are already checked in the
|
|
1579
|
+
retry object so we don't need to do it here.
|
|
1580
|
+
|
|
1581
|
+
After we disconnect the connection, it will try to reconnect and
|
|
1582
|
+
do a health check as part of the send_command logic(on connection level).
|
|
1563
1583
|
"""
|
|
1564
1584
|
conn.disconnect()
|
|
1565
1585
|
# if we were watching a variable, the watch is no longer valid
|
|
@@ -1567,16 +1587,8 @@ class Pipeline(Redis):
|
|
|
1567
1587
|
# indicates the user should retry this transaction.
|
|
1568
1588
|
if self.watching:
|
|
1569
1589
|
raise WatchError(
|
|
1570
|
-
"A
|
|
1590
|
+
f"A {type(error).__name__} occurred while watching one or more keys"
|
|
1571
1591
|
)
|
|
1572
|
-
# if retry_on_error is not set or the error is not one
|
|
1573
|
-
# of the specified error types, raise it
|
|
1574
|
-
if (
|
|
1575
|
-
conn.retry_on_error is None
|
|
1576
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
1577
|
-
):
|
|
1578
|
-
self.reset()
|
|
1579
|
-
raise error
|
|
1580
1592
|
|
|
1581
1593
|
def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
1582
1594
|
"""Execute all the commands in the current pipeline"""
|
|
@@ -1600,7 +1612,7 @@ class Pipeline(Redis):
|
|
|
1600
1612
|
try:
|
|
1601
1613
|
return conn.retry.call_with_retry(
|
|
1602
1614
|
lambda: execute(conn, stack, raise_on_error),
|
|
1603
|
-
lambda error: self.
|
|
1615
|
+
lambda error: self._disconnect_raise_on_watching(conn, error),
|
|
1604
1616
|
)
|
|
1605
1617
|
finally:
|
|
1606
1618
|
self.reset()
|