redis 6.0.0b2__py3-none-any.whl → 6.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/__init__.py +9 -1
- redis/_parsers/resp3.py +2 -2
- redis/asyncio/client.py +75 -68
- redis/asyncio/cluster.py +67 -46
- redis/asyncio/connection.py +7 -5
- redis/asyncio/retry.py +12 -0
- redis/backoff.py +54 -0
- redis/client.py +95 -87
- redis/cluster.py +1084 -360
- redis/commands/core.py +104 -104
- redis/commands/helpers.py +19 -6
- redis/commands/json/__init__.py +1 -1
- redis/commands/json/commands.py +8 -8
- redis/commands/redismodules.py +20 -10
- redis/commands/search/commands.py +2 -2
- redis/commands/timeseries/__init__.py +1 -1
- redis/connection.py +7 -5
- redis/exceptions.py +18 -0
- redis/retry.py +25 -0
- {redis-6.0.0b2.dist-info → redis-6.1.0.dist-info}/METADATA +14 -8
- {redis-6.0.0b2.dist-info → redis-6.1.0.dist-info}/RECORD +23 -23
- {redis-6.0.0b2.dist-info → redis-6.1.0.dist-info}/WHEEL +0 -0
- {redis-6.0.0b2.dist-info → redis-6.1.0.dist-info}/licenses/LICENSE +0 -0
redis/asyncio/retry.py
CHANGED
|
@@ -43,6 +43,18 @@ class Retry:
|
|
|
43
43
|
set(self._supported_errors + tuple(specified_errors))
|
|
44
44
|
)
|
|
45
45
|
|
|
46
|
+
def get_retries(self) -> int:
|
|
47
|
+
"""
|
|
48
|
+
Get the number of retries.
|
|
49
|
+
"""
|
|
50
|
+
return self._retries
|
|
51
|
+
|
|
52
|
+
def update_retries(self, value: int) -> None:
|
|
53
|
+
"""
|
|
54
|
+
Set the number of retries.
|
|
55
|
+
"""
|
|
56
|
+
self._retries = value
|
|
57
|
+
|
|
46
58
|
async def call_with_retry(
|
|
47
59
|
self, do: Callable[[], Awaitable[T]], fail: Callable[[RedisError], Any]
|
|
48
60
|
) -> T:
|
redis/backoff.py
CHANGED
|
@@ -31,6 +31,15 @@ class ConstantBackoff(AbstractBackoff):
|
|
|
31
31
|
"""`backoff`: backoff time in seconds"""
|
|
32
32
|
self._backoff = backoff
|
|
33
33
|
|
|
34
|
+
def __hash__(self) -> int:
|
|
35
|
+
return hash((self._backoff,))
|
|
36
|
+
|
|
37
|
+
def __eq__(self, other) -> bool:
|
|
38
|
+
if not isinstance(other, ConstantBackoff):
|
|
39
|
+
return NotImplemented
|
|
40
|
+
|
|
41
|
+
return self._backoff == other._backoff
|
|
42
|
+
|
|
34
43
|
def compute(self, failures: int) -> float:
|
|
35
44
|
return self._backoff
|
|
36
45
|
|
|
@@ -53,6 +62,15 @@ class ExponentialBackoff(AbstractBackoff):
|
|
|
53
62
|
self._cap = cap
|
|
54
63
|
self._base = base
|
|
55
64
|
|
|
65
|
+
def __hash__(self) -> int:
|
|
66
|
+
return hash((self._base, self._cap))
|
|
67
|
+
|
|
68
|
+
def __eq__(self, other) -> bool:
|
|
69
|
+
if not isinstance(other, ExponentialBackoff):
|
|
70
|
+
return NotImplemented
|
|
71
|
+
|
|
72
|
+
return self._base == other._base and self._cap == other._cap
|
|
73
|
+
|
|
56
74
|
def compute(self, failures: int) -> float:
|
|
57
75
|
return min(self._cap, self._base * 2**failures)
|
|
58
76
|
|
|
@@ -68,6 +86,15 @@ class FullJitterBackoff(AbstractBackoff):
|
|
|
68
86
|
self._cap = cap
|
|
69
87
|
self._base = base
|
|
70
88
|
|
|
89
|
+
def __hash__(self) -> int:
|
|
90
|
+
return hash((self._base, self._cap))
|
|
91
|
+
|
|
92
|
+
def __eq__(self, other) -> bool:
|
|
93
|
+
if not isinstance(other, FullJitterBackoff):
|
|
94
|
+
return NotImplemented
|
|
95
|
+
|
|
96
|
+
return self._base == other._base and self._cap == other._cap
|
|
97
|
+
|
|
71
98
|
def compute(self, failures: int) -> float:
|
|
72
99
|
return random.uniform(0, min(self._cap, self._base * 2**failures))
|
|
73
100
|
|
|
@@ -83,6 +110,15 @@ class EqualJitterBackoff(AbstractBackoff):
|
|
|
83
110
|
self._cap = cap
|
|
84
111
|
self._base = base
|
|
85
112
|
|
|
113
|
+
def __hash__(self) -> int:
|
|
114
|
+
return hash((self._base, self._cap))
|
|
115
|
+
|
|
116
|
+
def __eq__(self, other) -> bool:
|
|
117
|
+
if not isinstance(other, EqualJitterBackoff):
|
|
118
|
+
return NotImplemented
|
|
119
|
+
|
|
120
|
+
return self._base == other._base and self._cap == other._cap
|
|
121
|
+
|
|
86
122
|
def compute(self, failures: int) -> float:
|
|
87
123
|
temp = min(self._cap, self._base * 2**failures) / 2
|
|
88
124
|
return temp + random.uniform(0, temp)
|
|
@@ -100,6 +136,15 @@ class DecorrelatedJitterBackoff(AbstractBackoff):
|
|
|
100
136
|
self._base = base
|
|
101
137
|
self._previous_backoff = 0
|
|
102
138
|
|
|
139
|
+
def __hash__(self) -> int:
|
|
140
|
+
return hash((self._base, self._cap))
|
|
141
|
+
|
|
142
|
+
def __eq__(self, other) -> bool:
|
|
143
|
+
if not isinstance(other, DecorrelatedJitterBackoff):
|
|
144
|
+
return NotImplemented
|
|
145
|
+
|
|
146
|
+
return self._base == other._base and self._cap == other._cap
|
|
147
|
+
|
|
103
148
|
def reset(self) -> None:
|
|
104
149
|
self._previous_backoff = 0
|
|
105
150
|
|
|
@@ -121,6 +166,15 @@ class ExponentialWithJitterBackoff(AbstractBackoff):
|
|
|
121
166
|
self._cap = cap
|
|
122
167
|
self._base = base
|
|
123
168
|
|
|
169
|
+
def __hash__(self) -> int:
|
|
170
|
+
return hash((self._base, self._cap))
|
|
171
|
+
|
|
172
|
+
def __eq__(self, other) -> bool:
|
|
173
|
+
if not isinstance(other, EqualJitterBackoff):
|
|
174
|
+
return NotImplemented
|
|
175
|
+
|
|
176
|
+
return self._base == other._base and self._cap == other._cap
|
|
177
|
+
|
|
124
178
|
def compute(self, failures: int) -> float:
|
|
125
179
|
return min(self._cap, random.random() * self._base * 2**failures)
|
|
126
180
|
|
redis/client.py
CHANGED
|
@@ -2,7 +2,6 @@ import copy
|
|
|
2
2
|
import re
|
|
3
3
|
import threading
|
|
4
4
|
import time
|
|
5
|
-
import warnings
|
|
6
5
|
from itertools import chain
|
|
7
6
|
from typing import (
|
|
8
7
|
TYPE_CHECKING,
|
|
@@ -12,6 +11,7 @@ from typing import (
|
|
|
12
11
|
List,
|
|
13
12
|
Mapping,
|
|
14
13
|
Optional,
|
|
14
|
+
Set,
|
|
15
15
|
Type,
|
|
16
16
|
Union,
|
|
17
17
|
)
|
|
@@ -23,6 +23,7 @@ from redis._parsers.helpers import (
|
|
|
23
23
|
_RedisCallbacksRESP3,
|
|
24
24
|
bool_ok,
|
|
25
25
|
)
|
|
26
|
+
from redis.backoff import ExponentialWithJitterBackoff
|
|
26
27
|
from redis.cache import CacheConfig, CacheInterface
|
|
27
28
|
from redis.commands import (
|
|
28
29
|
CoreCommands,
|
|
@@ -30,8 +31,10 @@ from redis.commands import (
|
|
|
30
31
|
SentinelCommands,
|
|
31
32
|
list_or_args,
|
|
32
33
|
)
|
|
34
|
+
from redis.commands.core import Script
|
|
33
35
|
from redis.connection import (
|
|
34
36
|
AbstractConnection,
|
|
37
|
+
Connection,
|
|
35
38
|
ConnectionPool,
|
|
36
39
|
SSLConnection,
|
|
37
40
|
UnixDomainSocketConnection,
|
|
@@ -50,7 +53,6 @@ from redis.exceptions import (
|
|
|
50
53
|
PubSubError,
|
|
51
54
|
RedisError,
|
|
52
55
|
ResponseError,
|
|
53
|
-
TimeoutError,
|
|
54
56
|
WatchError,
|
|
55
57
|
)
|
|
56
58
|
from redis.lock import Lock
|
|
@@ -58,6 +60,7 @@ from redis.retry import Retry
|
|
|
58
60
|
from redis.utils import (
|
|
59
61
|
HIREDIS_AVAILABLE,
|
|
60
62
|
_set_info_logger,
|
|
63
|
+
deprecated_args,
|
|
61
64
|
get_lib_version,
|
|
62
65
|
safe_str,
|
|
63
66
|
str_if_bytes,
|
|
@@ -189,6 +192,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
189
192
|
client.auto_close_connection_pool = True
|
|
190
193
|
return client
|
|
191
194
|
|
|
195
|
+
@deprecated_args(
|
|
196
|
+
args_to_warn=["retry_on_timeout"],
|
|
197
|
+
reason="TimeoutError is included by default.",
|
|
198
|
+
version="6.0.0",
|
|
199
|
+
)
|
|
192
200
|
def __init__(
|
|
193
201
|
self,
|
|
194
202
|
host: str = "localhost",
|
|
@@ -203,10 +211,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
203
211
|
unix_socket_path: Optional[str] = None,
|
|
204
212
|
encoding: str = "utf-8",
|
|
205
213
|
encoding_errors: str = "strict",
|
|
206
|
-
charset: Optional[str] = None,
|
|
207
|
-
errors: Optional[str] = None,
|
|
208
214
|
decode_responses: bool = False,
|
|
209
215
|
retry_on_timeout: bool = False,
|
|
216
|
+
retry: Retry = Retry(
|
|
217
|
+
backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
|
|
218
|
+
),
|
|
210
219
|
retry_on_error: Optional[List[Type[Exception]]] = None,
|
|
211
220
|
ssl: bool = False,
|
|
212
221
|
ssl_keyfile: Optional[str] = None,
|
|
@@ -215,7 +224,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
215
224
|
ssl_ca_certs: Optional[str] = None,
|
|
216
225
|
ssl_ca_path: Optional[str] = None,
|
|
217
226
|
ssl_ca_data: Optional[str] = None,
|
|
218
|
-
ssl_check_hostname: bool =
|
|
227
|
+
ssl_check_hostname: bool = True,
|
|
219
228
|
ssl_password: Optional[str] = None,
|
|
220
229
|
ssl_validate_ocsp: bool = False,
|
|
221
230
|
ssl_validate_ocsp_stapled: bool = False,
|
|
@@ -230,7 +239,6 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
230
239
|
lib_name: Optional[str] = "redis-py",
|
|
231
240
|
lib_version: Optional[str] = get_lib_version(),
|
|
232
241
|
username: Optional[str] = None,
|
|
233
|
-
retry: Optional[Retry] = None,
|
|
234
242
|
redis_connect_func: Optional[Callable[[], None]] = None,
|
|
235
243
|
credential_provider: Optional[CredentialProvider] = None,
|
|
236
244
|
protocol: Optional[int] = 2,
|
|
@@ -240,10 +248,24 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
240
248
|
) -> None:
|
|
241
249
|
"""
|
|
242
250
|
Initialize a new Redis client.
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
251
|
+
|
|
252
|
+
To specify a retry policy for specific errors, you have two options:
|
|
253
|
+
|
|
254
|
+
1. Set the `retry_on_error` to a list of the error/s to retry on, and
|
|
255
|
+
you can also set `retry` to a valid `Retry` object(in case the default
|
|
256
|
+
one is not appropriate) - with this approach the retries will be triggered
|
|
257
|
+
on the default errors specified in the Retry object enriched with the
|
|
258
|
+
errors specified in `retry_on_error`.
|
|
259
|
+
|
|
260
|
+
2. Define a `Retry` object with configured 'supported_errors' and set
|
|
261
|
+
it to the `retry` parameter - with this approach you completely redefine
|
|
262
|
+
the errors on which retries will happen.
|
|
263
|
+
|
|
264
|
+
`retry_on_timeout` is deprecated - please include the TimeoutError
|
|
265
|
+
either in the Retry object or in the `retry_on_error` list.
|
|
266
|
+
|
|
267
|
+
When 'connection_pool' is provided - the retry configuration of the
|
|
268
|
+
provided pool will be used.
|
|
247
269
|
|
|
248
270
|
Args:
|
|
249
271
|
|
|
@@ -256,24 +278,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
256
278
|
else:
|
|
257
279
|
self._event_dispatcher = event_dispatcher
|
|
258
280
|
if not connection_pool:
|
|
259
|
-
if charset is not None:
|
|
260
|
-
warnings.warn(
|
|
261
|
-
DeprecationWarning(
|
|
262
|
-
'"charset" is deprecated. Use "encoding" instead'
|
|
263
|
-
)
|
|
264
|
-
)
|
|
265
|
-
encoding = charset
|
|
266
|
-
if errors is not None:
|
|
267
|
-
warnings.warn(
|
|
268
|
-
DeprecationWarning(
|
|
269
|
-
'"errors" is deprecated. Use "encoding_errors" instead'
|
|
270
|
-
)
|
|
271
|
-
)
|
|
272
|
-
encoding_errors = errors
|
|
273
281
|
if not retry_on_error:
|
|
274
282
|
retry_on_error = []
|
|
275
|
-
if retry_on_timeout is True:
|
|
276
|
-
retry_on_error.append(TimeoutError)
|
|
277
283
|
kwargs = {
|
|
278
284
|
"db": db,
|
|
279
285
|
"username": username,
|
|
@@ -395,10 +401,10 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
395
401
|
"""Get the connection's key-word arguments"""
|
|
396
402
|
return self.connection_pool.connection_kwargs
|
|
397
403
|
|
|
398
|
-
def get_retry(self) -> Optional[
|
|
404
|
+
def get_retry(self) -> Optional[Retry]:
|
|
399
405
|
return self.get_connection_kwargs().get("retry")
|
|
400
406
|
|
|
401
|
-
def set_retry(self, retry:
|
|
407
|
+
def set_retry(self, retry: Retry) -> None:
|
|
402
408
|
self.get_connection_kwargs().update({"retry": retry})
|
|
403
409
|
self.connection_pool.set_retry(retry)
|
|
404
410
|
|
|
@@ -598,18 +604,18 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
598
604
|
conn.send_command(*args, **options)
|
|
599
605
|
return self.parse_response(conn, command_name, **options)
|
|
600
606
|
|
|
601
|
-
def
|
|
607
|
+
def _close_connection(self, conn) -> None:
|
|
602
608
|
"""
|
|
603
|
-
Close the connection
|
|
604
|
-
|
|
605
|
-
|
|
609
|
+
Close the connection before retrying.
|
|
610
|
+
|
|
611
|
+
The supported exceptions are already checked in the
|
|
612
|
+
retry object so we don't need to do it here.
|
|
613
|
+
|
|
614
|
+
After we disconnect the connection, it will try to reconnect and
|
|
615
|
+
do a health check as part of the send_command logic(on connection level).
|
|
606
616
|
"""
|
|
617
|
+
|
|
607
618
|
conn.disconnect()
|
|
608
|
-
if (
|
|
609
|
-
conn.retry_on_error is None
|
|
610
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
611
|
-
):
|
|
612
|
-
raise error
|
|
613
619
|
|
|
614
620
|
# COMMAND EXECUTION AND PROTOCOL PARSING
|
|
615
621
|
def execute_command(self, *args, **options):
|
|
@@ -628,7 +634,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
|
|
|
628
634
|
lambda: self._send_command_parse_response(
|
|
629
635
|
conn, command_name, *args, **options
|
|
630
636
|
),
|
|
631
|
-
lambda
|
|
637
|
+
lambda _: self._close_connection(conn),
|
|
632
638
|
)
|
|
633
639
|
finally:
|
|
634
640
|
if self._single_connection_client:
|
|
@@ -887,19 +893,14 @@ class PubSub:
|
|
|
887
893
|
)
|
|
888
894
|
ttl -= 1
|
|
889
895
|
|
|
890
|
-
def
|
|
896
|
+
def _reconnect(self, conn) -> None:
|
|
891
897
|
"""
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
reconnect
|
|
898
|
+
The supported exceptions are already checked in the
|
|
899
|
+
retry object so we don't need to do it here.
|
|
900
|
+
|
|
901
|
+
In this error handler we are trying to reconnect to the server.
|
|
896
902
|
"""
|
|
897
903
|
conn.disconnect()
|
|
898
|
-
if (
|
|
899
|
-
conn.retry_on_error is None
|
|
900
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
901
|
-
):
|
|
902
|
-
raise error
|
|
903
904
|
conn.connect()
|
|
904
905
|
|
|
905
906
|
def _execute(self, conn, command, *args, **kwargs):
|
|
@@ -912,7 +913,7 @@ class PubSub:
|
|
|
912
913
|
"""
|
|
913
914
|
return conn.retry.call_with_retry(
|
|
914
915
|
lambda: command(*args, **kwargs),
|
|
915
|
-
lambda
|
|
916
|
+
lambda _: self._reconnect(conn),
|
|
916
917
|
)
|
|
917
918
|
|
|
918
919
|
def parse_response(self, block=True, timeout=0):
|
|
@@ -1281,7 +1282,8 @@ class Pipeline(Redis):
|
|
|
1281
1282
|
in one transmission. This is convenient for batch processing, such as
|
|
1282
1283
|
saving all the values in a list to Redis.
|
|
1283
1284
|
|
|
1284
|
-
All commands executed within a pipeline
|
|
1285
|
+
All commands executed within a pipeline(when running in transactional mode,
|
|
1286
|
+
which is the default behavior) are wrapped with MULTI and EXEC
|
|
1285
1287
|
calls. This guarantees all commands executed in the pipeline will be
|
|
1286
1288
|
executed atomically.
|
|
1287
1289
|
|
|
@@ -1296,15 +1298,22 @@ class Pipeline(Redis):
|
|
|
1296
1298
|
|
|
1297
1299
|
UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
|
|
1298
1300
|
|
|
1299
|
-
def __init__(
|
|
1301
|
+
def __init__(
|
|
1302
|
+
self,
|
|
1303
|
+
connection_pool: ConnectionPool,
|
|
1304
|
+
response_callbacks,
|
|
1305
|
+
transaction,
|
|
1306
|
+
shard_hint,
|
|
1307
|
+
):
|
|
1300
1308
|
self.connection_pool = connection_pool
|
|
1301
|
-
self.connection = None
|
|
1309
|
+
self.connection: Optional[Connection] = None
|
|
1302
1310
|
self.response_callbacks = response_callbacks
|
|
1303
1311
|
self.transaction = transaction
|
|
1304
1312
|
self.shard_hint = shard_hint
|
|
1305
|
-
|
|
1306
1313
|
self.watching = False
|
|
1307
|
-
self.
|
|
1314
|
+
self.command_stack = []
|
|
1315
|
+
self.scripts: Set[Script] = set()
|
|
1316
|
+
self.explicit_transaction = False
|
|
1308
1317
|
|
|
1309
1318
|
def __enter__(self) -> "Pipeline":
|
|
1310
1319
|
return self
|
|
@@ -1370,36 +1379,37 @@ class Pipeline(Redis):
|
|
|
1370
1379
|
return self.immediate_execute_command(*args, **kwargs)
|
|
1371
1380
|
return self.pipeline_execute_command(*args, **kwargs)
|
|
1372
1381
|
|
|
1373
|
-
def
|
|
1382
|
+
def _disconnect_reset_raise_on_watching(
|
|
1383
|
+
self,
|
|
1384
|
+
conn: AbstractConnection,
|
|
1385
|
+
error: Exception,
|
|
1386
|
+
) -> None:
|
|
1374
1387
|
"""
|
|
1375
|
-
Close the connection
|
|
1376
|
-
raise an exception if we were watching
|
|
1377
|
-
|
|
1378
|
-
|
|
1388
|
+
Close the connection reset watching state and
|
|
1389
|
+
raise an exception if we were watching.
|
|
1390
|
+
|
|
1391
|
+
The supported exceptions are already checked in the
|
|
1392
|
+
retry object so we don't need to do it here.
|
|
1393
|
+
|
|
1394
|
+
After we disconnect the connection, it will try to reconnect and
|
|
1395
|
+
do a health check as part of the send_command logic(on connection level).
|
|
1379
1396
|
"""
|
|
1380
1397
|
conn.disconnect()
|
|
1398
|
+
|
|
1381
1399
|
# if we were already watching a variable, the watch is no longer
|
|
1382
1400
|
# valid since this connection has died. raise a WatchError, which
|
|
1383
1401
|
# indicates the user should retry this transaction.
|
|
1384
1402
|
if self.watching:
|
|
1385
1403
|
self.reset()
|
|
1386
1404
|
raise WatchError(
|
|
1387
|
-
"A
|
|
1405
|
+
f"A {type(error).__name__} occurred while watching one or more keys"
|
|
1388
1406
|
)
|
|
1389
|
-
# if retry_on_error is not set or the error is not one
|
|
1390
|
-
# of the specified error types, raise it
|
|
1391
|
-
if (
|
|
1392
|
-
conn.retry_on_error is None
|
|
1393
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
1394
|
-
):
|
|
1395
|
-
self.reset()
|
|
1396
|
-
raise
|
|
1397
1407
|
|
|
1398
1408
|
def immediate_execute_command(self, *args, **options):
|
|
1399
1409
|
"""
|
|
1400
|
-
Execute a command immediately, but don't auto-retry on
|
|
1401
|
-
|
|
1402
|
-
issuing WATCH or subsequent commands retrieving their values but before
|
|
1410
|
+
Execute a command immediately, but don't auto-retry on the supported
|
|
1411
|
+
errors for retry if we're already WATCHing a variable.
|
|
1412
|
+
Used when issuing WATCH or subsequent commands retrieving their values but before
|
|
1403
1413
|
MULTI is called.
|
|
1404
1414
|
"""
|
|
1405
1415
|
command_name = args[0]
|
|
@@ -1413,7 +1423,7 @@ class Pipeline(Redis):
|
|
|
1413
1423
|
lambda: self._send_command_parse_response(
|
|
1414
1424
|
conn, command_name, *args, **options
|
|
1415
1425
|
),
|
|
1416
|
-
lambda error: self.
|
|
1426
|
+
lambda error: self._disconnect_reset_raise_on_watching(conn, error),
|
|
1417
1427
|
)
|
|
1418
1428
|
|
|
1419
1429
|
def pipeline_execute_command(self, *args, **options) -> "Pipeline":
|
|
@@ -1431,7 +1441,9 @@ class Pipeline(Redis):
|
|
|
1431
1441
|
self.command_stack.append((args, options))
|
|
1432
1442
|
return self
|
|
1433
1443
|
|
|
1434
|
-
def _execute_transaction(
|
|
1444
|
+
def _execute_transaction(
|
|
1445
|
+
self, connection: Connection, commands, raise_on_error
|
|
1446
|
+
) -> List:
|
|
1435
1447
|
cmds = chain([(("MULTI",), {})], commands, [(("EXEC",), {})])
|
|
1436
1448
|
all_cmds = connection.pack_commands(
|
|
1437
1449
|
[args for args, options in cmds if EMPTY_RESPONSE not in options]
|
|
@@ -1551,15 +1563,19 @@ class Pipeline(Redis):
|
|
|
1551
1563
|
if not exist:
|
|
1552
1564
|
s.sha = immediate("SCRIPT LOAD", s.script)
|
|
1553
1565
|
|
|
1554
|
-
def
|
|
1566
|
+
def _disconnect_raise_on_watching(
|
|
1555
1567
|
self,
|
|
1556
1568
|
conn: AbstractConnection,
|
|
1557
1569
|
error: Exception,
|
|
1558
1570
|
) -> None:
|
|
1559
1571
|
"""
|
|
1560
|
-
Close the connection, raise an exception if we were watching
|
|
1561
|
-
|
|
1562
|
-
|
|
1572
|
+
Close the connection, raise an exception if we were watching.
|
|
1573
|
+
|
|
1574
|
+
The supported exceptions are already checked in the
|
|
1575
|
+
retry object so we don't need to do it here.
|
|
1576
|
+
|
|
1577
|
+
After we disconnect the connection, it will try to reconnect and
|
|
1578
|
+
do a health check as part of the send_command logic(on connection level).
|
|
1563
1579
|
"""
|
|
1564
1580
|
conn.disconnect()
|
|
1565
1581
|
# if we were watching a variable, the watch is no longer valid
|
|
@@ -1567,16 +1583,8 @@ class Pipeline(Redis):
|
|
|
1567
1583
|
# indicates the user should retry this transaction.
|
|
1568
1584
|
if self.watching:
|
|
1569
1585
|
raise WatchError(
|
|
1570
|
-
"A
|
|
1586
|
+
f"A {type(error).__name__} occurred while watching one or more keys"
|
|
1571
1587
|
)
|
|
1572
|
-
# if retry_on_error is not set or the error is not one
|
|
1573
|
-
# of the specified error types, raise it
|
|
1574
|
-
if (
|
|
1575
|
-
conn.retry_on_error is None
|
|
1576
|
-
or isinstance(error, tuple(conn.retry_on_error)) is False
|
|
1577
|
-
):
|
|
1578
|
-
self.reset()
|
|
1579
|
-
raise error
|
|
1580
1588
|
|
|
1581
1589
|
def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
1582
1590
|
"""Execute all the commands in the current pipeline"""
|
|
@@ -1600,7 +1608,7 @@ class Pipeline(Redis):
|
|
|
1600
1608
|
try:
|
|
1601
1609
|
return conn.retry.call_with_retry(
|
|
1602
1610
|
lambda: execute(conn, stack, raise_on_error),
|
|
1603
|
-
lambda error: self.
|
|
1611
|
+
lambda error: self._disconnect_raise_on_watching(conn, error),
|
|
1604
1612
|
)
|
|
1605
1613
|
finally:
|
|
1606
1614
|
self.reset()
|