redis 6.0.0b2__py3-none-any.whl → 6.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/__init__.py +8 -1
- redis/_parsers/__init__.py +8 -1
- redis/_parsers/base.py +53 -1
- redis/_parsers/hiredis.py +72 -5
- redis/_parsers/resp3.py +12 -37
- redis/asyncio/client.py +76 -70
- redis/asyncio/cluster.py +796 -104
- redis/asyncio/connection.py +8 -10
- redis/asyncio/retry.py +12 -0
- redis/backoff.py +54 -0
- redis/client.py +101 -89
- redis/cluster.py +1088 -365
- redis/commands/core.py +104 -104
- redis/commands/helpers.py +19 -6
- redis/commands/json/__init__.py +1 -1
- redis/commands/json/commands.py +8 -8
- redis/commands/redismodules.py +20 -10
- redis/commands/search/commands.py +2 -2
- redis/commands/timeseries/__init__.py +1 -1
- redis/connection.py +19 -9
- redis/exceptions.py +18 -0
- redis/retry.py +25 -0
- redis/typing.py +0 -4
- redis/utils.py +5 -2
- {redis-6.0.0b2.dist-info → redis-6.2.0.dist-info}/METADATA +16 -12
- {redis-6.0.0b2.dist-info → redis-6.2.0.dist-info}/RECORD +28 -28
- {redis-6.0.0b2.dist-info → redis-6.2.0.dist-info}/WHEEL +0 -0
- {redis-6.0.0b2.dist-info → redis-6.2.0.dist-info}/licenses/LICENSE +0 -0
redis/cluster.py
CHANGED
|
@@ -3,18 +3,25 @@ import socket
|
|
|
3
3
|
import sys
|
|
4
4
|
import threading
|
|
5
5
|
import time
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
6
7
|
from collections import OrderedDict
|
|
8
|
+
from copy import copy
|
|
7
9
|
from enum import Enum
|
|
8
|
-
from
|
|
10
|
+
from itertools import chain
|
|
11
|
+
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
|
9
12
|
|
|
10
13
|
from redis._parsers import CommandsParser, Encoder
|
|
11
14
|
from redis._parsers.helpers import parse_scan
|
|
12
|
-
from redis.backoff import
|
|
15
|
+
from redis.backoff import ExponentialWithJitterBackoff, NoBackoff
|
|
13
16
|
from redis.cache import CacheConfig, CacheFactory, CacheFactoryInterface, CacheInterface
|
|
14
|
-
from redis.client import CaseInsensitiveDict, PubSub, Redis
|
|
17
|
+
from redis.client import EMPTY_RESPONSE, CaseInsensitiveDict, PubSub, Redis
|
|
15
18
|
from redis.commands import READ_COMMANDS, RedisClusterCommands
|
|
16
19
|
from redis.commands.helpers import list_or_args
|
|
17
|
-
from redis.connection import
|
|
20
|
+
from redis.connection import (
|
|
21
|
+
Connection,
|
|
22
|
+
ConnectionPool,
|
|
23
|
+
parse_url,
|
|
24
|
+
)
|
|
18
25
|
from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
|
|
19
26
|
from redis.event import (
|
|
20
27
|
AfterPooledConnectionsInstantiationEvent,
|
|
@@ -28,7 +35,10 @@ from redis.exceptions import (
|
|
|
28
35
|
ClusterDownError,
|
|
29
36
|
ClusterError,
|
|
30
37
|
ConnectionError,
|
|
38
|
+
CrossSlotTransactionError,
|
|
31
39
|
DataError,
|
|
40
|
+
ExecAbortError,
|
|
41
|
+
InvalidPipelineStack,
|
|
32
42
|
MovedError,
|
|
33
43
|
RedisClusterException,
|
|
34
44
|
RedisError,
|
|
@@ -36,11 +46,11 @@ from redis.exceptions import (
|
|
|
36
46
|
SlotNotCoveredError,
|
|
37
47
|
TimeoutError,
|
|
38
48
|
TryAgainError,
|
|
49
|
+
WatchError,
|
|
39
50
|
)
|
|
40
51
|
from redis.lock import Lock
|
|
41
52
|
from redis.retry import Retry
|
|
42
53
|
from redis.utils import (
|
|
43
|
-
HIREDIS_AVAILABLE,
|
|
44
54
|
deprecated_args,
|
|
45
55
|
dict_merge,
|
|
46
56
|
list_keys_to_dict,
|
|
@@ -58,9 +68,9 @@ def get_node_name(host: str, port: Union[str, int]) -> str:
|
|
|
58
68
|
@deprecated_args(
|
|
59
69
|
allowed_args=["redis_node"],
|
|
60
70
|
reason="Use get_connection(redis_node) instead",
|
|
61
|
-
version="5.0
|
|
71
|
+
version="5.3.0",
|
|
62
72
|
)
|
|
63
|
-
def get_connection(redis_node, *args, **options):
|
|
73
|
+
def get_connection(redis_node: Redis, *args, **options) -> Connection:
|
|
64
74
|
return redis_node.connection or redis_node.connection_pool.get_connection()
|
|
65
75
|
|
|
66
76
|
|
|
@@ -142,7 +152,6 @@ REPLICA = "replica"
|
|
|
142
152
|
SLOT_ID = "slot-id"
|
|
143
153
|
|
|
144
154
|
REDIS_ALLOWED_KEYS = (
|
|
145
|
-
"charset",
|
|
146
155
|
"connection_class",
|
|
147
156
|
"connection_pool",
|
|
148
157
|
"connection_pool_class",
|
|
@@ -152,7 +161,6 @@ REDIS_ALLOWED_KEYS = (
|
|
|
152
161
|
"decode_responses",
|
|
153
162
|
"encoding",
|
|
154
163
|
"encoding_errors",
|
|
155
|
-
"errors",
|
|
156
164
|
"host",
|
|
157
165
|
"lib_name",
|
|
158
166
|
"lib_version",
|
|
@@ -176,12 +184,13 @@ REDIS_ALLOWED_KEYS = (
|
|
|
176
184
|
"ssl_cert_reqs",
|
|
177
185
|
"ssl_keyfile",
|
|
178
186
|
"ssl_password",
|
|
187
|
+
"ssl_check_hostname",
|
|
179
188
|
"unix_socket_path",
|
|
180
189
|
"username",
|
|
181
190
|
"cache",
|
|
182
191
|
"cache_config",
|
|
183
192
|
)
|
|
184
|
-
KWARGS_DISABLED_KEYS = ("host", "port")
|
|
193
|
+
KWARGS_DISABLED_KEYS = ("host", "port", "retry")
|
|
185
194
|
|
|
186
195
|
|
|
187
196
|
def cleanup_kwargs(**kwargs):
|
|
@@ -412,7 +421,12 @@ class AbstractRedisCluster:
|
|
|
412
421
|
list_keys_to_dict(["SCRIPT FLUSH"], lambda command, res: all(res.values())),
|
|
413
422
|
)
|
|
414
423
|
|
|
415
|
-
ERRORS_ALLOW_RETRY = (
|
|
424
|
+
ERRORS_ALLOW_RETRY = (
|
|
425
|
+
ConnectionError,
|
|
426
|
+
TimeoutError,
|
|
427
|
+
ClusterDownError,
|
|
428
|
+
SlotNotCoveredError,
|
|
429
|
+
)
|
|
416
430
|
|
|
417
431
|
def replace_default_node(self, target_node: "ClusterNode" = None) -> None:
|
|
418
432
|
"""Replace the default cluster node.
|
|
@@ -433,7 +447,7 @@ class AbstractRedisCluster:
|
|
|
433
447
|
# Choose a primary if the cluster contains different primaries
|
|
434
448
|
self.nodes_manager.default_node = random.choice(primaries)
|
|
435
449
|
else:
|
|
436
|
-
# Otherwise,
|
|
450
|
+
# Otherwise, choose a primary if the cluster contains different primaries
|
|
437
451
|
replicas = [node for node in self.get_replicas() if node != curr_node]
|
|
438
452
|
if replicas:
|
|
439
453
|
self.nodes_manager.default_node = random.choice(replicas)
|
|
@@ -487,7 +501,14 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
487
501
|
@deprecated_args(
|
|
488
502
|
args_to_warn=["read_from_replicas"],
|
|
489
503
|
reason="Please configure the 'load_balancing_strategy' instead",
|
|
490
|
-
version="5.0
|
|
504
|
+
version="5.3.0",
|
|
505
|
+
)
|
|
506
|
+
@deprecated_args(
|
|
507
|
+
args_to_warn=[
|
|
508
|
+
"cluster_error_retry_attempts",
|
|
509
|
+
],
|
|
510
|
+
reason="Please configure the 'retry' object instead",
|
|
511
|
+
version="6.0.0",
|
|
491
512
|
)
|
|
492
513
|
def __init__(
|
|
493
514
|
self,
|
|
@@ -496,7 +517,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
496
517
|
startup_nodes: Optional[List["ClusterNode"]] = None,
|
|
497
518
|
cluster_error_retry_attempts: int = 3,
|
|
498
519
|
retry: Optional["Retry"] = None,
|
|
499
|
-
require_full_coverage: bool =
|
|
520
|
+
require_full_coverage: bool = True,
|
|
500
521
|
reinitialize_steps: int = 5,
|
|
501
522
|
read_from_replicas: bool = False,
|
|
502
523
|
load_balancing_strategy: Optional["LoadBalancingStrategy"] = None,
|
|
@@ -546,9 +567,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
546
567
|
If you use dynamic DNS endpoints for startup nodes but CLUSTER SLOTS lists
|
|
547
568
|
specific IP addresses, it is best to set it to false.
|
|
548
569
|
:param cluster_error_retry_attempts:
|
|
570
|
+
@deprecated - Please configure the 'retry' object instead
|
|
571
|
+
In case 'retry' object is set - this argument is ignored!
|
|
572
|
+
|
|
549
573
|
Number of times to retry before raising an error when
|
|
550
|
-
:class:`~.TimeoutError` or :class:`~.ConnectionError` or
|
|
574
|
+
:class:`~.TimeoutError` or :class:`~.ConnectionError`, :class:`~.SlotNotCoveredError` or
|
|
551
575
|
:class:`~.ClusterDownError` are encountered
|
|
576
|
+
:param retry:
|
|
577
|
+
A retry object that defines the retry strategy and the number of
|
|
578
|
+
retries for the cluster client.
|
|
579
|
+
In current implementation for the cluster client (starting form redis-py version 6.0.0)
|
|
580
|
+
the retry object is not yet fully utilized, instead it is used just to determine
|
|
581
|
+
the number of retries for the cluster client.
|
|
582
|
+
In the future releases the retry object will be used to handle the cluster client retries!
|
|
552
583
|
:param reinitialize_steps:
|
|
553
584
|
Specifies the number of MOVED errors that need to occur before
|
|
554
585
|
reinitializing the whole cluster topology. If a MOVED error occurs
|
|
@@ -568,7 +599,8 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
568
599
|
|
|
569
600
|
:**kwargs:
|
|
570
601
|
Extra arguments that will be sent into Redis instance when created
|
|
571
|
-
(See Official redis-py doc for supported kwargs
|
|
602
|
+
(See Official redis-py doc for supported kwargs - the only limitation
|
|
603
|
+
is that you can't provide 'retry' object as part of kwargs.
|
|
572
604
|
[https://github.com/andymccurdy/redis-py/blob/master/redis/client.py])
|
|
573
605
|
Some kwargs are not supported and will raise a
|
|
574
606
|
RedisClusterException:
|
|
@@ -583,6 +615,15 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
583
615
|
"Argument 'db' is not possible to use in cluster mode"
|
|
584
616
|
)
|
|
585
617
|
|
|
618
|
+
if "retry" in kwargs:
|
|
619
|
+
# Argument 'retry' is not possible to be used in kwargs when in cluster mode
|
|
620
|
+
# the kwargs are set to the lower level connections to the cluster nodes
|
|
621
|
+
# and there we provide retry configuration without retries allowed.
|
|
622
|
+
# The retries should be handled on cluster client level.
|
|
623
|
+
raise RedisClusterException(
|
|
624
|
+
"The 'retry' argument cannot be used in kwargs when running in cluster mode."
|
|
625
|
+
)
|
|
626
|
+
|
|
586
627
|
# Get the startup node/s
|
|
587
628
|
from_url = False
|
|
588
629
|
if url is not None:
|
|
@@ -625,9 +666,11 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
625
666
|
kwargs = cleanup_kwargs(**kwargs)
|
|
626
667
|
if retry:
|
|
627
668
|
self.retry = retry
|
|
628
|
-
kwargs.update({"retry": self.retry})
|
|
629
669
|
else:
|
|
630
|
-
|
|
670
|
+
self.retry = Retry(
|
|
671
|
+
backoff=ExponentialWithJitterBackoff(base=1, cap=10),
|
|
672
|
+
retries=cluster_error_retry_attempts,
|
|
673
|
+
)
|
|
631
674
|
|
|
632
675
|
self.encoder = Encoder(
|
|
633
676
|
kwargs.get("encoding", "utf-8"),
|
|
@@ -638,7 +681,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
638
681
|
if (cache_config or cache) and protocol not in [3, "3"]:
|
|
639
682
|
raise RedisError("Client caching is only supported with RESP version 3")
|
|
640
683
|
|
|
641
|
-
self.cluster_error_retry_attempts = cluster_error_retry_attempts
|
|
642
684
|
self.command_flags = self.__class__.COMMAND_FLAGS.copy()
|
|
643
685
|
self.node_flags = self.__class__.NODE_FLAGS.copy()
|
|
644
686
|
self.read_from_replicas = read_from_replicas
|
|
@@ -667,7 +709,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
667
709
|
self.result_callbacks = CaseInsensitiveDict(self.__class__.RESULT_CALLBACKS)
|
|
668
710
|
|
|
669
711
|
self.commands_parser = CommandsParser(self)
|
|
670
|
-
self._lock = threading.
|
|
712
|
+
self._lock = threading.RLock()
|
|
671
713
|
|
|
672
714
|
def __enter__(self):
|
|
673
715
|
return self
|
|
@@ -710,7 +752,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
710
752
|
if self.user_on_connect_func is not None:
|
|
711
753
|
self.user_on_connect_func(connection)
|
|
712
754
|
|
|
713
|
-
def get_redis_connection(self, node):
|
|
755
|
+
def get_redis_connection(self, node: "ClusterNode") -> Redis:
|
|
714
756
|
if not node.redis_connection:
|
|
715
757
|
with self._lock:
|
|
716
758
|
if not node.redis_connection:
|
|
@@ -769,13 +811,8 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
769
811
|
self.nodes_manager.default_node = node
|
|
770
812
|
return True
|
|
771
813
|
|
|
772
|
-
def
|
|
773
|
-
return self.retry
|
|
774
|
-
|
|
775
|
-
def set_retry(self, retry: "Retry") -> None:
|
|
814
|
+
def set_retry(self, retry: Retry) -> None:
|
|
776
815
|
self.retry = retry
|
|
777
|
-
for node in self.get_nodes():
|
|
778
|
-
node.redis_connection.set_retry(retry)
|
|
779
816
|
|
|
780
817
|
def monitor(self, target_node=None):
|
|
781
818
|
"""
|
|
@@ -813,20 +850,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
813
850
|
if shard_hint:
|
|
814
851
|
raise RedisClusterException("shard_hint is deprecated in cluster mode")
|
|
815
852
|
|
|
816
|
-
if transaction:
|
|
817
|
-
raise RedisClusterException("transaction is deprecated in cluster mode")
|
|
818
|
-
|
|
819
853
|
return ClusterPipeline(
|
|
820
854
|
nodes_manager=self.nodes_manager,
|
|
821
855
|
commands_parser=self.commands_parser,
|
|
822
856
|
startup_nodes=self.nodes_manager.startup_nodes,
|
|
823
857
|
result_callbacks=self.result_callbacks,
|
|
824
858
|
cluster_response_callbacks=self.cluster_response_callbacks,
|
|
825
|
-
cluster_error_retry_attempts=self.
|
|
859
|
+
cluster_error_retry_attempts=self.retry.get_retries(),
|
|
826
860
|
read_from_replicas=self.read_from_replicas,
|
|
827
861
|
load_balancing_strategy=self.load_balancing_strategy,
|
|
828
862
|
reinitialize_steps=self.reinitialize_steps,
|
|
863
|
+
retry=self.retry,
|
|
829
864
|
lock=self._lock,
|
|
865
|
+
transaction=transaction,
|
|
830
866
|
)
|
|
831
867
|
|
|
832
868
|
def lock(
|
|
@@ -988,7 +1024,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
988
1024
|
redis_conn = self.get_default_node().redis_connection
|
|
989
1025
|
return self.commands_parser.get_keys(redis_conn, *args)
|
|
990
1026
|
|
|
991
|
-
def determine_slot(self, *args):
|
|
1027
|
+
def determine_slot(self, *args) -> int:
|
|
992
1028
|
"""
|
|
993
1029
|
Figure out what slot to use based on args.
|
|
994
1030
|
|
|
@@ -1087,8 +1123,8 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
1087
1123
|
"""
|
|
1088
1124
|
Wrapper for ERRORS_ALLOW_RETRY error handling.
|
|
1089
1125
|
|
|
1090
|
-
It will try the number of times specified by the
|
|
1091
|
-
"self.
|
|
1126
|
+
It will try the number of times specified by the retries property from
|
|
1127
|
+
config option "self.retry" which defaults to 3 unless manually
|
|
1092
1128
|
configured.
|
|
1093
1129
|
|
|
1094
1130
|
If it reaches the number of times, the command will raise the exception
|
|
@@ -1114,9 +1150,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
1114
1150
|
# execution since the nodes may not be valid anymore after the tables
|
|
1115
1151
|
# were reinitialized. So in case of passed target nodes,
|
|
1116
1152
|
# retry_attempts will be set to 0.
|
|
1117
|
-
retry_attempts = (
|
|
1118
|
-
0 if target_nodes_specified else self.cluster_error_retry_attempts
|
|
1119
|
-
)
|
|
1153
|
+
retry_attempts = 0 if target_nodes_specified else self.retry.get_retries()
|
|
1120
1154
|
# Add one for the first execution
|
|
1121
1155
|
execute_attempts = 1 + retry_attempts
|
|
1122
1156
|
for _ in range(execute_attempts):
|
|
@@ -1203,8 +1237,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
1203
1237
|
except AuthenticationError:
|
|
1204
1238
|
raise
|
|
1205
1239
|
except (ConnectionError, TimeoutError) as e:
|
|
1206
|
-
# Connection retries are being handled in the node's
|
|
1207
|
-
# Retry object.
|
|
1208
1240
|
# ConnectionError can also be raised if we couldn't get a
|
|
1209
1241
|
# connection from the pool before timing out, so check that
|
|
1210
1242
|
# this is an actual connection before attempting to disconnect.
|
|
@@ -1241,13 +1273,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
1241
1273
|
except AskError as e:
|
|
1242
1274
|
redirect_addr = get_node_name(host=e.host, port=e.port)
|
|
1243
1275
|
asking = True
|
|
1244
|
-
except ClusterDownError
|
|
1276
|
+
except (ClusterDownError, SlotNotCoveredError):
|
|
1245
1277
|
# ClusterDownError can occur during a failover and to get
|
|
1246
1278
|
# self-healed, we will try to reinitialize the cluster layout
|
|
1247
1279
|
# and retry executing the command
|
|
1280
|
+
|
|
1281
|
+
# SlotNotCoveredError can occur when the cluster is not fully
|
|
1282
|
+
# initialized or can be temporary issue.
|
|
1283
|
+
# We will try to reinitialize the cluster topology
|
|
1284
|
+
# and retry executing the command
|
|
1285
|
+
|
|
1248
1286
|
time.sleep(0.25)
|
|
1249
1287
|
self.nodes_manager.initialize()
|
|
1250
|
-
raise
|
|
1288
|
+
raise
|
|
1251
1289
|
except ResponseError:
|
|
1252
1290
|
raise
|
|
1253
1291
|
except Exception as e:
|
|
@@ -1299,6 +1337,28 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
1299
1337
|
"""
|
|
1300
1338
|
setattr(self, funcname, func)
|
|
1301
1339
|
|
|
1340
|
+
def transaction(self, func, *watches, **kwargs):
|
|
1341
|
+
"""
|
|
1342
|
+
Convenience method for executing the callable `func` as a transaction
|
|
1343
|
+
while watching all keys specified in `watches`. The 'func' callable
|
|
1344
|
+
should expect a single argument which is a Pipeline object.
|
|
1345
|
+
"""
|
|
1346
|
+
shard_hint = kwargs.pop("shard_hint", None)
|
|
1347
|
+
value_from_callable = kwargs.pop("value_from_callable", False)
|
|
1348
|
+
watch_delay = kwargs.pop("watch_delay", None)
|
|
1349
|
+
with self.pipeline(True, shard_hint) as pipe:
|
|
1350
|
+
while True:
|
|
1351
|
+
try:
|
|
1352
|
+
if watches:
|
|
1353
|
+
pipe.watch(*watches)
|
|
1354
|
+
func_value = func(pipe)
|
|
1355
|
+
exec_value = pipe.execute()
|
|
1356
|
+
return func_value if value_from_callable else exec_value
|
|
1357
|
+
except WatchError:
|
|
1358
|
+
if watch_delay is not None and watch_delay > 0:
|
|
1359
|
+
time.sleep(watch_delay)
|
|
1360
|
+
continue
|
|
1361
|
+
|
|
1302
1362
|
|
|
1303
1363
|
class ClusterNode:
|
|
1304
1364
|
def __init__(self, host, port, server_type=None, redis_connection=None):
|
|
@@ -1324,8 +1384,12 @@ class ClusterNode:
|
|
|
1324
1384
|
return isinstance(obj, ClusterNode) and obj.name == self.name
|
|
1325
1385
|
|
|
1326
1386
|
def __del__(self):
|
|
1327
|
-
|
|
1328
|
-
self.redis_connection
|
|
1387
|
+
try:
|
|
1388
|
+
if self.redis_connection is not None:
|
|
1389
|
+
self.redis_connection.close()
|
|
1390
|
+
except Exception:
|
|
1391
|
+
# Ignore errors when closing the connection
|
|
1392
|
+
pass
|
|
1329
1393
|
|
|
1330
1394
|
|
|
1331
1395
|
class LoadBalancingStrategy(Enum):
|
|
@@ -1392,7 +1456,7 @@ class NodesManager:
|
|
|
1392
1456
|
event_dispatcher: Optional[EventDispatcher] = None,
|
|
1393
1457
|
**kwargs,
|
|
1394
1458
|
):
|
|
1395
|
-
self.nodes_cache = {}
|
|
1459
|
+
self.nodes_cache: Dict[str, Redis] = {}
|
|
1396
1460
|
self.slots_cache = {}
|
|
1397
1461
|
self.startup_nodes = {}
|
|
1398
1462
|
self.default_node = None
|
|
@@ -1409,7 +1473,7 @@ class NodesManager:
|
|
|
1409
1473
|
self.connection_kwargs = kwargs
|
|
1410
1474
|
self.read_load_balancer = LoadBalancer()
|
|
1411
1475
|
if lock is None:
|
|
1412
|
-
lock = threading.
|
|
1476
|
+
lock = threading.RLock()
|
|
1413
1477
|
self._lock = lock
|
|
1414
1478
|
if event_dispatcher is None:
|
|
1415
1479
|
self._event_dispatcher = EventDispatcher()
|
|
@@ -1484,7 +1548,7 @@ class NodesManager:
|
|
|
1484
1548
|
"In case you need select some load balancing strategy "
|
|
1485
1549
|
"that will use replicas, please set it through 'load_balancing_strategy'"
|
|
1486
1550
|
),
|
|
1487
|
-
version="5.0
|
|
1551
|
+
version="5.3.0",
|
|
1488
1552
|
)
|
|
1489
1553
|
def get_node_from_slot(
|
|
1490
1554
|
self,
|
|
@@ -1492,7 +1556,7 @@ class NodesManager:
|
|
|
1492
1556
|
read_from_replicas=False,
|
|
1493
1557
|
load_balancing_strategy=None,
|
|
1494
1558
|
server_type=None,
|
|
1495
|
-
):
|
|
1559
|
+
) -> ClusterNode:
|
|
1496
1560
|
"""
|
|
1497
1561
|
Gets a node that servers this hash slot
|
|
1498
1562
|
"""
|
|
@@ -1576,17 +1640,32 @@ class NodesManager:
|
|
|
1576
1640
|
)
|
|
1577
1641
|
|
|
1578
1642
|
def create_redis_node(self, host, port, **kwargs):
|
|
1643
|
+
# We are configuring the connection pool not to retry
|
|
1644
|
+
# connections on lower level clients to avoid retrying
|
|
1645
|
+
# connections to nodes that are not reachable
|
|
1646
|
+
# and to avoid blocking the connection pool.
|
|
1647
|
+
# The only error that will have some handling in the lower
|
|
1648
|
+
# level clients is ConnectionError which will trigger disconnection
|
|
1649
|
+
# of the socket.
|
|
1650
|
+
# The retries will be handled on cluster client level
|
|
1651
|
+
# where we will have proper handling of the cluster topology
|
|
1652
|
+
node_retry_config = Retry(
|
|
1653
|
+
backoff=NoBackoff(), retries=0, supported_errors=(ConnectionError,)
|
|
1654
|
+
)
|
|
1655
|
+
|
|
1579
1656
|
if self.from_url:
|
|
1580
1657
|
# Create a redis node with a costumed connection pool
|
|
1581
1658
|
kwargs.update({"host": host})
|
|
1582
1659
|
kwargs.update({"port": port})
|
|
1583
1660
|
kwargs.update({"cache": self._cache})
|
|
1661
|
+
kwargs.update({"retry": node_retry_config})
|
|
1584
1662
|
r = Redis(connection_pool=self.connection_pool_class(**kwargs))
|
|
1585
1663
|
else:
|
|
1586
1664
|
r = Redis(
|
|
1587
1665
|
host=host,
|
|
1588
1666
|
port=port,
|
|
1589
1667
|
cache=self._cache,
|
|
1668
|
+
retry=node_retry_config,
|
|
1590
1669
|
**kwargs,
|
|
1591
1670
|
)
|
|
1592
1671
|
return r
|
|
@@ -1624,7 +1703,9 @@ class NodesManager:
|
|
|
1624
1703
|
fully_covered = False
|
|
1625
1704
|
kwargs = self.connection_kwargs
|
|
1626
1705
|
exception = None
|
|
1627
|
-
|
|
1706
|
+
# Convert to tuple to prevent RuntimeError if self.startup_nodes
|
|
1707
|
+
# is modified during iteration
|
|
1708
|
+
for startup_node in tuple(self.startup_nodes.values()):
|
|
1628
1709
|
try:
|
|
1629
1710
|
if startup_node.redis_connection:
|
|
1630
1711
|
r = startup_node.redis_connection
|
|
@@ -1771,6 +1852,16 @@ class NodesManager:
|
|
|
1771
1852
|
return self.address_remap((host, port))
|
|
1772
1853
|
return host, port
|
|
1773
1854
|
|
|
1855
|
+
def find_connection_owner(self, connection: Connection) -> Optional[Redis]:
|
|
1856
|
+
node_name = get_node_name(connection.host, connection.port)
|
|
1857
|
+
for node in tuple(self.nodes_cache.values()):
|
|
1858
|
+
if node.redis_connection:
|
|
1859
|
+
conn_args = node.redis_connection.connection_pool.connection_kwargs
|
|
1860
|
+
if node_name == get_node_name(
|
|
1861
|
+
conn_args.get("host"), conn_args.get("port")
|
|
1862
|
+
):
|
|
1863
|
+
return node
|
|
1864
|
+
|
|
1774
1865
|
|
|
1775
1866
|
class ClusterPubSub(PubSub):
|
|
1776
1867
|
"""
|
|
@@ -1907,7 +1998,7 @@ class ClusterPubSub(PubSub):
|
|
|
1907
1998
|
# register a callback that re-subscribes to any channels we
|
|
1908
1999
|
# were listening to when we were disconnected
|
|
1909
2000
|
self.connection.register_connect_callback(self.on_connect)
|
|
1910
|
-
if self.push_handler_func is not None
|
|
2001
|
+
if self.push_handler_func is not None:
|
|
1911
2002
|
self.connection._parser.set_pubsub_push_handler(self.push_handler_func)
|
|
1912
2003
|
self._event_dispatcher.dispatch(
|
|
1913
2004
|
AfterPubSubConnectionInstantiationEvent(
|
|
@@ -2030,6 +2121,17 @@ class ClusterPipeline(RedisCluster):
|
|
|
2030
2121
|
TryAgainError,
|
|
2031
2122
|
)
|
|
2032
2123
|
|
|
2124
|
+
NO_SLOTS_COMMANDS = {"UNWATCH"}
|
|
2125
|
+
IMMEDIATE_EXECUTE_COMMANDS = {"WATCH", "UNWATCH"}
|
|
2126
|
+
UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
|
|
2127
|
+
|
|
2128
|
+
@deprecated_args(
|
|
2129
|
+
args_to_warn=[
|
|
2130
|
+
"cluster_error_retry_attempts",
|
|
2131
|
+
],
|
|
2132
|
+
reason="Please configure the 'retry' object instead",
|
|
2133
|
+
version="6.0.0",
|
|
2134
|
+
)
|
|
2033
2135
|
def __init__(
|
|
2034
2136
|
self,
|
|
2035
2137
|
nodes_manager: "NodesManager",
|
|
@@ -2041,7 +2143,9 @@ class ClusterPipeline(RedisCluster):
|
|
|
2041
2143
|
load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
|
|
2042
2144
|
cluster_error_retry_attempts: int = 3,
|
|
2043
2145
|
reinitialize_steps: int = 5,
|
|
2146
|
+
retry: Optional[Retry] = None,
|
|
2044
2147
|
lock=None,
|
|
2148
|
+
transaction=False,
|
|
2045
2149
|
**kwargs,
|
|
2046
2150
|
):
|
|
2047
2151
|
""" """
|
|
@@ -2057,17 +2161,28 @@ class ClusterPipeline(RedisCluster):
|
|
|
2057
2161
|
self.load_balancing_strategy = load_balancing_strategy
|
|
2058
2162
|
self.command_flags = self.__class__.COMMAND_FLAGS.copy()
|
|
2059
2163
|
self.cluster_response_callbacks = cluster_response_callbacks
|
|
2060
|
-
self.cluster_error_retry_attempts = cluster_error_retry_attempts
|
|
2061
2164
|
self.reinitialize_counter = 0
|
|
2062
2165
|
self.reinitialize_steps = reinitialize_steps
|
|
2166
|
+
if retry is not None:
|
|
2167
|
+
self.retry = retry
|
|
2168
|
+
else:
|
|
2169
|
+
self.retry = Retry(
|
|
2170
|
+
backoff=ExponentialWithJitterBackoff(base=1, cap=10),
|
|
2171
|
+
retries=cluster_error_retry_attempts,
|
|
2172
|
+
)
|
|
2173
|
+
|
|
2063
2174
|
self.encoder = Encoder(
|
|
2064
2175
|
kwargs.get("encoding", "utf-8"),
|
|
2065
2176
|
kwargs.get("encoding_errors", "strict"),
|
|
2066
2177
|
kwargs.get("decode_responses", False),
|
|
2067
2178
|
)
|
|
2068
2179
|
if lock is None:
|
|
2069
|
-
lock = threading.
|
|
2180
|
+
lock = threading.RLock()
|
|
2070
2181
|
self._lock = lock
|
|
2182
|
+
self.parent_execute_command = super().execute_command
|
|
2183
|
+
self._execution_strategy: ExecutionStrategy = (
|
|
2184
|
+
PipelineStrategy(self) if not transaction else TransactionStrategy(self)
|
|
2185
|
+
)
|
|
2071
2186
|
|
|
2072
2187
|
def __repr__(self):
|
|
2073
2188
|
""" """
|
|
@@ -2089,7 +2204,7 @@ class ClusterPipeline(RedisCluster):
|
|
|
2089
2204
|
|
|
2090
2205
|
def __len__(self):
|
|
2091
2206
|
""" """
|
|
2092
|
-
return len(self.
|
|
2207
|
+
return len(self._execution_strategy.command_queue)
|
|
2093
2208
|
|
|
2094
2209
|
def __bool__(self):
|
|
2095
2210
|
"Pipeline instances should always evaluate to True on Python 3+"
|
|
@@ -2099,45 +2214,35 @@ class ClusterPipeline(RedisCluster):
|
|
|
2099
2214
|
"""
|
|
2100
2215
|
Wrapper function for pipeline_execute_command
|
|
2101
2216
|
"""
|
|
2102
|
-
return self.
|
|
2217
|
+
return self._execution_strategy.execute_command(*args, **kwargs)
|
|
2103
2218
|
|
|
2104
2219
|
def pipeline_execute_command(self, *args, **options):
|
|
2105
2220
|
"""
|
|
2106
|
-
|
|
2107
|
-
"""
|
|
2108
|
-
self.command_stack.append(
|
|
2109
|
-
PipelineCommand(args, options, len(self.command_stack))
|
|
2110
|
-
)
|
|
2111
|
-
return self
|
|
2221
|
+
Stage a command to be executed when execute() is next called
|
|
2112
2222
|
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
|
|
2223
|
+
Returns the current Pipeline object back so commands can be
|
|
2224
|
+
chained together, such as:
|
|
2225
|
+
|
|
2226
|
+
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
|
|
2227
|
+
|
|
2228
|
+
At some other point, you can then run: pipe.execute(),
|
|
2229
|
+
which will execute all commands queued in the pipe.
|
|
2116
2230
|
"""
|
|
2117
|
-
|
|
2118
|
-
r = c.result
|
|
2119
|
-
if isinstance(r, Exception):
|
|
2120
|
-
self.annotate_exception(r, c.position + 1, c.args)
|
|
2121
|
-
raise r
|
|
2231
|
+
return self._execution_strategy.execute_command(*args, **options)
|
|
2122
2232
|
|
|
2123
2233
|
def annotate_exception(self, exception, number, command):
|
|
2124
2234
|
"""
|
|
2125
2235
|
Provides extra context to the exception prior to it being handled
|
|
2126
2236
|
"""
|
|
2127
|
-
|
|
2128
|
-
msg = (
|
|
2129
|
-
f"Command # {number} ({truncate_text(cmd)}) of pipeline "
|
|
2130
|
-
f"caused error: {exception.args[0]}"
|
|
2131
|
-
)
|
|
2132
|
-
exception.args = (msg,) + exception.args[1:]
|
|
2237
|
+
self._execution_strategy.annotate_exception(exception, number, command)
|
|
2133
2238
|
|
|
2134
2239
|
def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
2135
2240
|
"""
|
|
2136
2241
|
Execute all the commands in the current pipeline
|
|
2137
2242
|
"""
|
|
2138
|
-
|
|
2243
|
+
|
|
2139
2244
|
try:
|
|
2140
|
-
return self.
|
|
2245
|
+
return self._execution_strategy.execute(raise_on_error)
|
|
2141
2246
|
finally:
|
|
2142
2247
|
self.reset()
|
|
2143
2248
|
|
|
@@ -2145,312 +2250,53 @@ class ClusterPipeline(RedisCluster):
|
|
|
2145
2250
|
"""
|
|
2146
2251
|
Reset back to empty pipeline.
|
|
2147
2252
|
"""
|
|
2148
|
-
self.
|
|
2149
|
-
|
|
2150
|
-
self.scripts = set()
|
|
2151
|
-
|
|
2152
|
-
# TODO: Implement
|
|
2153
|
-
# make sure to reset the connection state in the event that we were
|
|
2154
|
-
# watching something
|
|
2155
|
-
# if self.watching and self.connection:
|
|
2156
|
-
# try:
|
|
2157
|
-
# # call this manually since our unwatch or
|
|
2158
|
-
# # immediate_execute_command methods can call reset()
|
|
2159
|
-
# self.connection.send_command('UNWATCH')
|
|
2160
|
-
# self.connection.read_response()
|
|
2161
|
-
# except ConnectionError:
|
|
2162
|
-
# # disconnect will also remove any previous WATCHes
|
|
2163
|
-
# self.connection.disconnect()
|
|
2164
|
-
|
|
2165
|
-
# clean up the other instance attributes
|
|
2166
|
-
self.watching = False
|
|
2167
|
-
self.explicit_transaction = False
|
|
2168
|
-
|
|
2169
|
-
# TODO: Implement
|
|
2170
|
-
# we can safely return the connection to the pool here since we're
|
|
2171
|
-
# sure we're no longer WATCHing anything
|
|
2172
|
-
# if self.connection:
|
|
2173
|
-
# self.connection_pool.release(self.connection)
|
|
2174
|
-
# self.connection = None
|
|
2253
|
+
self._execution_strategy.reset()
|
|
2175
2254
|
|
|
2176
2255
|
def send_cluster_commands(
|
|
2177
2256
|
self, stack, raise_on_error=True, allow_redirections=True
|
|
2178
2257
|
):
|
|
2179
|
-
|
|
2180
|
-
|
|
2258
|
+
return self._execution_strategy.send_cluster_commands(
|
|
2259
|
+
stack, raise_on_error=raise_on_error, allow_redirections=allow_redirections
|
|
2260
|
+
)
|
|
2181
2261
|
|
|
2182
|
-
|
|
2183
|
-
|
|
2184
|
-
- connection_pool was reseted
|
|
2185
|
-
- refereh_table_asap set to True
|
|
2262
|
+
def exists(self, *keys):
|
|
2263
|
+
return self._execution_strategy.exists(*keys)
|
|
2186
2264
|
|
|
2187
|
-
|
|
2188
|
-
|
|
2189
|
-
|
|
2265
|
+
def eval(self):
|
|
2266
|
+
""" """
|
|
2267
|
+
return self._execution_strategy.eval()
|
|
2190
2268
|
|
|
2191
|
-
|
|
2192
|
-
raises ClusterDownException.
|
|
2269
|
+
def multi(self):
|
|
2193
2270
|
"""
|
|
2194
|
-
|
|
2195
|
-
|
|
2196
|
-
retry_attempts = self.cluster_error_retry_attempts
|
|
2197
|
-
while True:
|
|
2198
|
-
try:
|
|
2199
|
-
return self._send_cluster_commands(
|
|
2200
|
-
stack,
|
|
2201
|
-
raise_on_error=raise_on_error,
|
|
2202
|
-
allow_redirections=allow_redirections,
|
|
2203
|
-
)
|
|
2204
|
-
except RedisCluster.ERRORS_ALLOW_RETRY as e:
|
|
2205
|
-
if retry_attempts > 0:
|
|
2206
|
-
# Try again with the new cluster setup. All other errors
|
|
2207
|
-
# should be raised.
|
|
2208
|
-
retry_attempts -= 1
|
|
2209
|
-
pass
|
|
2210
|
-
else:
|
|
2211
|
-
raise e
|
|
2212
|
-
|
|
2213
|
-
def _send_cluster_commands(
|
|
2214
|
-
self, stack, raise_on_error=True, allow_redirections=True
|
|
2215
|
-
):
|
|
2271
|
+
Start a transactional block of the pipeline after WATCH commands
|
|
2272
|
+
are issued. End the transactional block with `execute`.
|
|
2216
2273
|
"""
|
|
2217
|
-
|
|
2274
|
+
self._execution_strategy.multi()
|
|
2218
2275
|
|
|
2219
|
-
|
|
2220
|
-
|
|
2221
|
-
|
|
2222
|
-
"""
|
|
2223
|
-
# the first time sending the commands we send all of
|
|
2224
|
-
# the commands that were queued up.
|
|
2225
|
-
# if we have to run through it again, we only retry
|
|
2226
|
-
# the commands that failed.
|
|
2227
|
-
attempt = sorted(stack, key=lambda x: x.position)
|
|
2228
|
-
is_default_node = False
|
|
2229
|
-
# build a list of node objects based on node names we need to
|
|
2230
|
-
nodes = {}
|
|
2276
|
+
def load_scripts(self):
|
|
2277
|
+
""" """
|
|
2278
|
+
self._execution_strategy.load_scripts()
|
|
2231
2279
|
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
|
|
2235
|
-
for c in attempt:
|
|
2236
|
-
while True:
|
|
2237
|
-
# refer to our internal node -> slot table that
|
|
2238
|
-
# tells us where a given command should route to.
|
|
2239
|
-
# (it might be possible we have a cached node that no longer
|
|
2240
|
-
# exists in the cluster, which is why we do this in a loop)
|
|
2241
|
-
passed_targets = c.options.pop("target_nodes", None)
|
|
2242
|
-
if passed_targets and not self._is_nodes_flag(passed_targets):
|
|
2243
|
-
target_nodes = self._parse_target_nodes(passed_targets)
|
|
2244
|
-
else:
|
|
2245
|
-
target_nodes = self._determine_nodes(
|
|
2246
|
-
*c.args, node_flag=passed_targets
|
|
2247
|
-
)
|
|
2248
|
-
if not target_nodes:
|
|
2249
|
-
raise RedisClusterException(
|
|
2250
|
-
f"No targets were found to execute {c.args} command on"
|
|
2251
|
-
)
|
|
2252
|
-
if len(target_nodes) > 1:
|
|
2253
|
-
raise RedisClusterException(
|
|
2254
|
-
f"Too many targets for command {c.args}"
|
|
2255
|
-
)
|
|
2280
|
+
def discard(self):
|
|
2281
|
+
""" """
|
|
2282
|
+
self._execution_strategy.discard()
|
|
2256
2283
|
|
|
2257
|
-
|
|
2258
|
-
|
|
2259
|
-
|
|
2284
|
+
def watch(self, *names):
|
|
2285
|
+
"""Watches the values at keys ``names``"""
|
|
2286
|
+
self._execution_strategy.watch(*names)
|
|
2260
2287
|
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
node_name = node.name
|
|
2265
|
-
if node_name not in nodes:
|
|
2266
|
-
redis_node = self.get_redis_connection(node)
|
|
2267
|
-
try:
|
|
2268
|
-
connection = get_connection(redis_node)
|
|
2269
|
-
except (ConnectionError, TimeoutError):
|
|
2270
|
-
for n in nodes.values():
|
|
2271
|
-
n.connection_pool.release(n.connection)
|
|
2272
|
-
# Connection retries are being handled in the node's
|
|
2273
|
-
# Retry object. Reinitialize the node -> slot table.
|
|
2274
|
-
self.nodes_manager.initialize()
|
|
2275
|
-
if is_default_node:
|
|
2276
|
-
self.replace_default_node()
|
|
2277
|
-
raise
|
|
2278
|
-
nodes[node_name] = NodeCommands(
|
|
2279
|
-
redis_node.parse_response,
|
|
2280
|
-
redis_node.connection_pool,
|
|
2281
|
-
connection,
|
|
2282
|
-
)
|
|
2283
|
-
nodes[node_name].append(c)
|
|
2284
|
-
break
|
|
2288
|
+
def unwatch(self):
|
|
2289
|
+
"""Unwatches all previously specified keys"""
|
|
2290
|
+
self._execution_strategy.unwatch()
|
|
2285
2291
|
|
|
2286
|
-
|
|
2287
|
-
|
|
2288
|
-
# before reading anything
|
|
2289
|
-
# this allows us to flush all the requests out across the
|
|
2290
|
-
# network essentially in parallel
|
|
2291
|
-
# so that we can read them all in parallel as they come back.
|
|
2292
|
-
# we dont' multiplex on the sockets as they come available,
|
|
2293
|
-
# but that shouldn't make too much difference.
|
|
2294
|
-
node_commands = nodes.values()
|
|
2295
|
-
try:
|
|
2296
|
-
node_commands = nodes.values()
|
|
2297
|
-
for n in node_commands:
|
|
2298
|
-
n.write()
|
|
2292
|
+
def script_load_for_pipeline(self, *args, **kwargs):
|
|
2293
|
+
self._execution_strategy.script_load_for_pipeline(*args, **kwargs)
|
|
2299
2294
|
|
|
2300
|
-
|
|
2301
|
-
|
|
2302
|
-
finally:
|
|
2303
|
-
# release all of the redis connections we allocated earlier
|
|
2304
|
-
# back into the connection pool.
|
|
2305
|
-
# we used to do this step as part of a try/finally block,
|
|
2306
|
-
# but it is really dangerous to
|
|
2307
|
-
# release connections back into the pool if for some
|
|
2308
|
-
# reason the socket has data still left in it
|
|
2309
|
-
# from a previous operation. The write and
|
|
2310
|
-
# read operations already have try/catch around them for
|
|
2311
|
-
# all known types of errors including connection
|
|
2312
|
-
# and socket level errors.
|
|
2313
|
-
# So if we hit an exception, something really bad
|
|
2314
|
-
# happened and putting any oF
|
|
2315
|
-
# these connections back into the pool is a very bad idea.
|
|
2316
|
-
# the socket might have unread buffer still sitting in it,
|
|
2317
|
-
# and then the next time we read from it we pass the
|
|
2318
|
-
# buffered result back from a previous command and
|
|
2319
|
-
# every single request after to that connection will always get
|
|
2320
|
-
# a mismatched result.
|
|
2321
|
-
for n in nodes.values():
|
|
2322
|
-
n.connection_pool.release(n.connection)
|
|
2295
|
+
def delete(self, *names):
|
|
2296
|
+
self._execution_strategy.delete(*names)
|
|
2323
2297
|
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
|
-
# we're all done with that command, YAY!
|
|
2327
|
-
# if we have more commands to attempt, we've run into problems.
|
|
2328
|
-
# collect all the commands we are allowed to retry.
|
|
2329
|
-
# (MOVED, ASK, or connection errors or timeout errors)
|
|
2330
|
-
attempt = sorted(
|
|
2331
|
-
(
|
|
2332
|
-
c
|
|
2333
|
-
for c in attempt
|
|
2334
|
-
if isinstance(c.result, ClusterPipeline.ERRORS_ALLOW_RETRY)
|
|
2335
|
-
),
|
|
2336
|
-
key=lambda x: x.position,
|
|
2337
|
-
)
|
|
2338
|
-
if attempt and allow_redirections:
|
|
2339
|
-
# RETRY MAGIC HAPPENS HERE!
|
|
2340
|
-
# send these remaining commands one at a time using `execute_command`
|
|
2341
|
-
# in the main client. This keeps our retry logic
|
|
2342
|
-
# in one place mostly,
|
|
2343
|
-
# and allows us to be more confident in correctness of behavior.
|
|
2344
|
-
# at this point any speed gains from pipelining have been lost
|
|
2345
|
-
# anyway, so we might as well make the best
|
|
2346
|
-
# attempt to get the correct behavior.
|
|
2347
|
-
#
|
|
2348
|
-
# The client command will handle retries for each
|
|
2349
|
-
# individual command sequentially as we pass each
|
|
2350
|
-
# one into `execute_command`. Any exceptions
|
|
2351
|
-
# that bubble out should only appear once all
|
|
2352
|
-
# retries have been exhausted.
|
|
2353
|
-
#
|
|
2354
|
-
# If a lot of commands have failed, we'll be setting the
|
|
2355
|
-
# flag to rebuild the slots table from scratch.
|
|
2356
|
-
# So MOVED errors should correct themselves fairly quickly.
|
|
2357
|
-
self.reinitialize_counter += 1
|
|
2358
|
-
if self._should_reinitialized():
|
|
2359
|
-
self.nodes_manager.initialize()
|
|
2360
|
-
if is_default_node:
|
|
2361
|
-
self.replace_default_node()
|
|
2362
|
-
for c in attempt:
|
|
2363
|
-
try:
|
|
2364
|
-
# send each command individually like we
|
|
2365
|
-
# do in the main client.
|
|
2366
|
-
c.result = super().execute_command(*c.args, **c.options)
|
|
2367
|
-
except RedisError as e:
|
|
2368
|
-
c.result = e
|
|
2369
|
-
|
|
2370
|
-
# turn the response back into a simple flat array that corresponds
|
|
2371
|
-
# to the sequence of commands issued in the stack in pipeline.execute()
|
|
2372
|
-
response = []
|
|
2373
|
-
for c in sorted(stack, key=lambda x: x.position):
|
|
2374
|
-
if c.args[0] in self.cluster_response_callbacks:
|
|
2375
|
-
# Remove keys entry, it needs only for cache.
|
|
2376
|
-
c.options.pop("keys", None)
|
|
2377
|
-
c.result = self.cluster_response_callbacks[c.args[0]](
|
|
2378
|
-
c.result, **c.options
|
|
2379
|
-
)
|
|
2380
|
-
response.append(c.result)
|
|
2381
|
-
|
|
2382
|
-
if raise_on_error:
|
|
2383
|
-
self.raise_first_error(stack)
|
|
2384
|
-
|
|
2385
|
-
return response
|
|
2386
|
-
|
|
2387
|
-
def _fail_on_redirect(self, allow_redirections):
|
|
2388
|
-
""" """
|
|
2389
|
-
if not allow_redirections:
|
|
2390
|
-
raise RedisClusterException(
|
|
2391
|
-
"ASK & MOVED redirection not allowed in this pipeline"
|
|
2392
|
-
)
|
|
2393
|
-
|
|
2394
|
-
def exists(self, *keys):
|
|
2395
|
-
return self.execute_command("EXISTS", *keys)
|
|
2396
|
-
|
|
2397
|
-
def eval(self):
|
|
2398
|
-
""" """
|
|
2399
|
-
raise RedisClusterException("method eval() is not implemented")
|
|
2400
|
-
|
|
2401
|
-
def multi(self):
|
|
2402
|
-
""" """
|
|
2403
|
-
raise RedisClusterException("method multi() is not implemented")
|
|
2404
|
-
|
|
2405
|
-
def immediate_execute_command(self, *args, **options):
|
|
2406
|
-
""" """
|
|
2407
|
-
raise RedisClusterException(
|
|
2408
|
-
"method immediate_execute_command() is not implemented"
|
|
2409
|
-
)
|
|
2410
|
-
|
|
2411
|
-
def _execute_transaction(self, *args, **kwargs):
|
|
2412
|
-
""" """
|
|
2413
|
-
raise RedisClusterException("method _execute_transaction() is not implemented")
|
|
2414
|
-
|
|
2415
|
-
def load_scripts(self):
|
|
2416
|
-
""" """
|
|
2417
|
-
raise RedisClusterException("method load_scripts() is not implemented")
|
|
2418
|
-
|
|
2419
|
-
def watch(self, *names):
|
|
2420
|
-
""" """
|
|
2421
|
-
raise RedisClusterException("method watch() is not implemented")
|
|
2422
|
-
|
|
2423
|
-
def unwatch(self):
|
|
2424
|
-
""" """
|
|
2425
|
-
raise RedisClusterException("method unwatch() is not implemented")
|
|
2426
|
-
|
|
2427
|
-
def script_load_for_pipeline(self, *args, **kwargs):
|
|
2428
|
-
""" """
|
|
2429
|
-
raise RedisClusterException(
|
|
2430
|
-
"method script_load_for_pipeline() is not implemented"
|
|
2431
|
-
)
|
|
2432
|
-
|
|
2433
|
-
def delete(self, *names):
|
|
2434
|
-
"""
|
|
2435
|
-
"Delete a key specified by ``names``"
|
|
2436
|
-
"""
|
|
2437
|
-
if len(names) != 1:
|
|
2438
|
-
raise RedisClusterException(
|
|
2439
|
-
"deleting multiple keys is not implemented in pipeline command"
|
|
2440
|
-
)
|
|
2441
|
-
|
|
2442
|
-
return self.execute_command("DEL", names[0])
|
|
2443
|
-
|
|
2444
|
-
def unlink(self, *names):
|
|
2445
|
-
"""
|
|
2446
|
-
"Unlink a key specified by ``names``"
|
|
2447
|
-
"""
|
|
2448
|
-
if len(names) != 1:
|
|
2449
|
-
raise RedisClusterException(
|
|
2450
|
-
"unlinking multiple keys is not implemented in pipeline command"
|
|
2451
|
-
)
|
|
2452
|
-
|
|
2453
|
-
return self.execute_command("UNLINK", names[0])
|
|
2298
|
+
def unlink(self, *names):
|
|
2299
|
+
self._execution_strategy.unlink(*names)
|
|
2454
2300
|
|
|
2455
2301
|
|
|
2456
2302
|
def block_pipeline_command(name: str) -> Callable[..., Any]:
|
|
@@ -2627,3 +2473,880 @@ class NodeCommands:
|
|
|
2627
2473
|
return
|
|
2628
2474
|
except RedisError:
|
|
2629
2475
|
c.result = sys.exc_info()[1]
|
|
2476
|
+
|
|
2477
|
+
|
|
2478
|
+
class ExecutionStrategy(ABC):
|
|
2479
|
+
@property
|
|
2480
|
+
@abstractmethod
|
|
2481
|
+
def command_queue(self):
|
|
2482
|
+
pass
|
|
2483
|
+
|
|
2484
|
+
@abstractmethod
|
|
2485
|
+
def execute_command(self, *args, **kwargs):
|
|
2486
|
+
"""
|
|
2487
|
+
Execution flow for current execution strategy.
|
|
2488
|
+
|
|
2489
|
+
See: ClusterPipeline.execute_command()
|
|
2490
|
+
"""
|
|
2491
|
+
pass
|
|
2492
|
+
|
|
2493
|
+
@abstractmethod
|
|
2494
|
+
def annotate_exception(self, exception, number, command):
|
|
2495
|
+
"""
|
|
2496
|
+
Annotate exception according to current execution strategy.
|
|
2497
|
+
|
|
2498
|
+
See: ClusterPipeline.annotate_exception()
|
|
2499
|
+
"""
|
|
2500
|
+
pass
|
|
2501
|
+
|
|
2502
|
+
@abstractmethod
|
|
2503
|
+
def pipeline_execute_command(self, *args, **options):
|
|
2504
|
+
"""
|
|
2505
|
+
Pipeline execution flow for current execution strategy.
|
|
2506
|
+
|
|
2507
|
+
See: ClusterPipeline.pipeline_execute_command()
|
|
2508
|
+
"""
|
|
2509
|
+
pass
|
|
2510
|
+
|
|
2511
|
+
@abstractmethod
|
|
2512
|
+
def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
2513
|
+
"""
|
|
2514
|
+
Executes current execution strategy.
|
|
2515
|
+
|
|
2516
|
+
See: ClusterPipeline.execute()
|
|
2517
|
+
"""
|
|
2518
|
+
pass
|
|
2519
|
+
|
|
2520
|
+
@abstractmethod
|
|
2521
|
+
def send_cluster_commands(
|
|
2522
|
+
self, stack, raise_on_error=True, allow_redirections=True
|
|
2523
|
+
):
|
|
2524
|
+
"""
|
|
2525
|
+
Sends commands according to current execution strategy.
|
|
2526
|
+
|
|
2527
|
+
See: ClusterPipeline.send_cluster_commands()
|
|
2528
|
+
"""
|
|
2529
|
+
pass
|
|
2530
|
+
|
|
2531
|
+
@abstractmethod
|
|
2532
|
+
def reset(self):
|
|
2533
|
+
"""
|
|
2534
|
+
Resets current execution strategy.
|
|
2535
|
+
|
|
2536
|
+
See: ClusterPipeline.reset()
|
|
2537
|
+
"""
|
|
2538
|
+
pass
|
|
2539
|
+
|
|
2540
|
+
@abstractmethod
|
|
2541
|
+
def exists(self, *keys):
|
|
2542
|
+
pass
|
|
2543
|
+
|
|
2544
|
+
@abstractmethod
|
|
2545
|
+
def eval(self):
|
|
2546
|
+
pass
|
|
2547
|
+
|
|
2548
|
+
@abstractmethod
|
|
2549
|
+
def multi(self):
|
|
2550
|
+
"""
|
|
2551
|
+
Starts transactional context.
|
|
2552
|
+
|
|
2553
|
+
See: ClusterPipeline.multi()
|
|
2554
|
+
"""
|
|
2555
|
+
pass
|
|
2556
|
+
|
|
2557
|
+
@abstractmethod
|
|
2558
|
+
def load_scripts(self):
|
|
2559
|
+
pass
|
|
2560
|
+
|
|
2561
|
+
@abstractmethod
|
|
2562
|
+
def watch(self, *names):
|
|
2563
|
+
pass
|
|
2564
|
+
|
|
2565
|
+
@abstractmethod
|
|
2566
|
+
def unwatch(self):
|
|
2567
|
+
"""
|
|
2568
|
+
Unwatches all previously specified keys
|
|
2569
|
+
|
|
2570
|
+
See: ClusterPipeline.unwatch()
|
|
2571
|
+
"""
|
|
2572
|
+
pass
|
|
2573
|
+
|
|
2574
|
+
@abstractmethod
|
|
2575
|
+
def script_load_for_pipeline(self, *args, **kwargs):
|
|
2576
|
+
pass
|
|
2577
|
+
|
|
2578
|
+
@abstractmethod
|
|
2579
|
+
def delete(self, *names):
|
|
2580
|
+
"""
|
|
2581
|
+
"Delete a key specified by ``names``"
|
|
2582
|
+
|
|
2583
|
+
See: ClusterPipeline.delete()
|
|
2584
|
+
"""
|
|
2585
|
+
pass
|
|
2586
|
+
|
|
2587
|
+
@abstractmethod
|
|
2588
|
+
def unlink(self, *names):
|
|
2589
|
+
"""
|
|
2590
|
+
"Unlink a key specified by ``names``"
|
|
2591
|
+
|
|
2592
|
+
See: ClusterPipeline.unlink()
|
|
2593
|
+
"""
|
|
2594
|
+
pass
|
|
2595
|
+
|
|
2596
|
+
@abstractmethod
|
|
2597
|
+
def discard(self):
|
|
2598
|
+
pass
|
|
2599
|
+
|
|
2600
|
+
|
|
2601
|
+
class AbstractStrategy(ExecutionStrategy):
|
|
2602
|
+
def __init__(
|
|
2603
|
+
self,
|
|
2604
|
+
pipe: ClusterPipeline,
|
|
2605
|
+
):
|
|
2606
|
+
self._command_queue: List[PipelineCommand] = []
|
|
2607
|
+
self._pipe = pipe
|
|
2608
|
+
self._nodes_manager = self._pipe.nodes_manager
|
|
2609
|
+
|
|
2610
|
+
@property
|
|
2611
|
+
def command_queue(self):
|
|
2612
|
+
return self._command_queue
|
|
2613
|
+
|
|
2614
|
+
@command_queue.setter
|
|
2615
|
+
def command_queue(self, queue: List[PipelineCommand]):
|
|
2616
|
+
self._command_queue = queue
|
|
2617
|
+
|
|
2618
|
+
@abstractmethod
|
|
2619
|
+
def execute_command(self, *args, **kwargs):
|
|
2620
|
+
pass
|
|
2621
|
+
|
|
2622
|
+
def pipeline_execute_command(self, *args, **options):
|
|
2623
|
+
self._command_queue.append(
|
|
2624
|
+
PipelineCommand(args, options, len(self._command_queue))
|
|
2625
|
+
)
|
|
2626
|
+
return self._pipe
|
|
2627
|
+
|
|
2628
|
+
@abstractmethod
|
|
2629
|
+
def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
2630
|
+
pass
|
|
2631
|
+
|
|
2632
|
+
@abstractmethod
|
|
2633
|
+
def send_cluster_commands(
|
|
2634
|
+
self, stack, raise_on_error=True, allow_redirections=True
|
|
2635
|
+
):
|
|
2636
|
+
pass
|
|
2637
|
+
|
|
2638
|
+
@abstractmethod
|
|
2639
|
+
def reset(self):
|
|
2640
|
+
pass
|
|
2641
|
+
|
|
2642
|
+
def exists(self, *keys):
|
|
2643
|
+
return self.execute_command("EXISTS", *keys)
|
|
2644
|
+
|
|
2645
|
+
def eval(self):
|
|
2646
|
+
""" """
|
|
2647
|
+
raise RedisClusterException("method eval() is not implemented")
|
|
2648
|
+
|
|
2649
|
+
def load_scripts(self):
|
|
2650
|
+
""" """
|
|
2651
|
+
raise RedisClusterException("method load_scripts() is not implemented")
|
|
2652
|
+
|
|
2653
|
+
def script_load_for_pipeline(self, *args, **kwargs):
|
|
2654
|
+
""" """
|
|
2655
|
+
raise RedisClusterException(
|
|
2656
|
+
"method script_load_for_pipeline() is not implemented"
|
|
2657
|
+
)
|
|
2658
|
+
|
|
2659
|
+
def annotate_exception(self, exception, number, command):
|
|
2660
|
+
"""
|
|
2661
|
+
Provides extra context to the exception prior to it being handled
|
|
2662
|
+
"""
|
|
2663
|
+
cmd = " ".join(map(safe_str, command))
|
|
2664
|
+
msg = (
|
|
2665
|
+
f"Command # {number} ({truncate_text(cmd)}) of pipeline "
|
|
2666
|
+
f"caused error: {exception.args[0]}"
|
|
2667
|
+
)
|
|
2668
|
+
exception.args = (msg,) + exception.args[1:]
|
|
2669
|
+
|
|
2670
|
+
|
|
2671
|
+
class PipelineStrategy(AbstractStrategy):
|
|
2672
|
+
def __init__(self, pipe: ClusterPipeline):
|
|
2673
|
+
super().__init__(pipe)
|
|
2674
|
+
self.command_flags = pipe.command_flags
|
|
2675
|
+
|
|
2676
|
+
def execute_command(self, *args, **kwargs):
|
|
2677
|
+
return self.pipeline_execute_command(*args, **kwargs)
|
|
2678
|
+
|
|
2679
|
+
def _raise_first_error(self, stack):
|
|
2680
|
+
"""
|
|
2681
|
+
Raise the first exception on the stack
|
|
2682
|
+
"""
|
|
2683
|
+
for c in stack:
|
|
2684
|
+
r = c.result
|
|
2685
|
+
if isinstance(r, Exception):
|
|
2686
|
+
self.annotate_exception(r, c.position + 1, c.args)
|
|
2687
|
+
raise r
|
|
2688
|
+
|
|
2689
|
+
def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
2690
|
+
stack = self._command_queue
|
|
2691
|
+
if not stack:
|
|
2692
|
+
return []
|
|
2693
|
+
|
|
2694
|
+
try:
|
|
2695
|
+
return self.send_cluster_commands(stack, raise_on_error)
|
|
2696
|
+
finally:
|
|
2697
|
+
self.reset()
|
|
2698
|
+
|
|
2699
|
+
def reset(self):
|
|
2700
|
+
"""
|
|
2701
|
+
Reset back to empty pipeline.
|
|
2702
|
+
"""
|
|
2703
|
+
self._command_queue = []
|
|
2704
|
+
|
|
2705
|
+
def send_cluster_commands(
|
|
2706
|
+
self, stack, raise_on_error=True, allow_redirections=True
|
|
2707
|
+
):
|
|
2708
|
+
"""
|
|
2709
|
+
Wrapper for RedisCluster.ERRORS_ALLOW_RETRY errors handling.
|
|
2710
|
+
|
|
2711
|
+
If one of the retryable exceptions has been thrown we assume that:
|
|
2712
|
+
- connection_pool was disconnected
|
|
2713
|
+
- connection_pool was reseted
|
|
2714
|
+
- refereh_table_asap set to True
|
|
2715
|
+
|
|
2716
|
+
It will try the number of times specified by
|
|
2717
|
+
the retries in config option "self.retry"
|
|
2718
|
+
which defaults to 3 unless manually configured.
|
|
2719
|
+
|
|
2720
|
+
If it reaches the number of times, the command will
|
|
2721
|
+
raises ClusterDownException.
|
|
2722
|
+
"""
|
|
2723
|
+
if not stack:
|
|
2724
|
+
return []
|
|
2725
|
+
retry_attempts = self._pipe.retry.get_retries()
|
|
2726
|
+
while True:
|
|
2727
|
+
try:
|
|
2728
|
+
return self._send_cluster_commands(
|
|
2729
|
+
stack,
|
|
2730
|
+
raise_on_error=raise_on_error,
|
|
2731
|
+
allow_redirections=allow_redirections,
|
|
2732
|
+
)
|
|
2733
|
+
except RedisCluster.ERRORS_ALLOW_RETRY as e:
|
|
2734
|
+
if retry_attempts > 0:
|
|
2735
|
+
# Try again with the new cluster setup. All other errors
|
|
2736
|
+
# should be raised.
|
|
2737
|
+
retry_attempts -= 1
|
|
2738
|
+
pass
|
|
2739
|
+
else:
|
|
2740
|
+
raise e
|
|
2741
|
+
|
|
2742
|
+
def _send_cluster_commands(
|
|
2743
|
+
self, stack, raise_on_error=True, allow_redirections=True
|
|
2744
|
+
):
|
|
2745
|
+
"""
|
|
2746
|
+
Send a bunch of cluster commands to the redis cluster.
|
|
2747
|
+
|
|
2748
|
+
`allow_redirections` If the pipeline should follow
|
|
2749
|
+
`ASK` & `MOVED` responses automatically. If set
|
|
2750
|
+
to false it will raise RedisClusterException.
|
|
2751
|
+
"""
|
|
2752
|
+
# the first time sending the commands we send all of
|
|
2753
|
+
# the commands that were queued up.
|
|
2754
|
+
# if we have to run through it again, we only retry
|
|
2755
|
+
# the commands that failed.
|
|
2756
|
+
attempt = sorted(stack, key=lambda x: x.position)
|
|
2757
|
+
is_default_node = False
|
|
2758
|
+
# build a list of node objects based on node names we need to
|
|
2759
|
+
nodes = {}
|
|
2760
|
+
|
|
2761
|
+
# as we move through each command that still needs to be processed,
|
|
2762
|
+
# we figure out the slot number that command maps to, then from
|
|
2763
|
+
# the slot determine the node.
|
|
2764
|
+
for c in attempt:
|
|
2765
|
+
while True:
|
|
2766
|
+
# refer to our internal node -> slot table that
|
|
2767
|
+
# tells us where a given command should route to.
|
|
2768
|
+
# (it might be possible we have a cached node that no longer
|
|
2769
|
+
# exists in the cluster, which is why we do this in a loop)
|
|
2770
|
+
passed_targets = c.options.pop("target_nodes", None)
|
|
2771
|
+
if passed_targets and not self._is_nodes_flag(passed_targets):
|
|
2772
|
+
target_nodes = self._parse_target_nodes(passed_targets)
|
|
2773
|
+
else:
|
|
2774
|
+
target_nodes = self._determine_nodes(
|
|
2775
|
+
*c.args, node_flag=passed_targets
|
|
2776
|
+
)
|
|
2777
|
+
if not target_nodes:
|
|
2778
|
+
raise RedisClusterException(
|
|
2779
|
+
f"No targets were found to execute {c.args} command on"
|
|
2780
|
+
)
|
|
2781
|
+
if len(target_nodes) > 1:
|
|
2782
|
+
raise RedisClusterException(
|
|
2783
|
+
f"Too many targets for command {c.args}"
|
|
2784
|
+
)
|
|
2785
|
+
|
|
2786
|
+
node = target_nodes[0]
|
|
2787
|
+
if node == self._pipe.get_default_node():
|
|
2788
|
+
is_default_node = True
|
|
2789
|
+
|
|
2790
|
+
# now that we know the name of the node
|
|
2791
|
+
# ( it's just a string in the form of host:port )
|
|
2792
|
+
# we can build a list of commands for each node.
|
|
2793
|
+
node_name = node.name
|
|
2794
|
+
if node_name not in nodes:
|
|
2795
|
+
redis_node = self._pipe.get_redis_connection(node)
|
|
2796
|
+
try:
|
|
2797
|
+
connection = get_connection(redis_node)
|
|
2798
|
+
except (ConnectionError, TimeoutError):
|
|
2799
|
+
for n in nodes.values():
|
|
2800
|
+
n.connection_pool.release(n.connection)
|
|
2801
|
+
# Connection retries are being handled in the node's
|
|
2802
|
+
# Retry object. Reinitialize the node -> slot table.
|
|
2803
|
+
self._nodes_manager.initialize()
|
|
2804
|
+
if is_default_node:
|
|
2805
|
+
self._pipe.replace_default_node()
|
|
2806
|
+
raise
|
|
2807
|
+
nodes[node_name] = NodeCommands(
|
|
2808
|
+
redis_node.parse_response,
|
|
2809
|
+
redis_node.connection_pool,
|
|
2810
|
+
connection,
|
|
2811
|
+
)
|
|
2812
|
+
nodes[node_name].append(c)
|
|
2813
|
+
break
|
|
2814
|
+
|
|
2815
|
+
# send the commands in sequence.
|
|
2816
|
+
# we write to all the open sockets for each node first,
|
|
2817
|
+
# before reading anything
|
|
2818
|
+
# this allows us to flush all the requests out across the
|
|
2819
|
+
# network
|
|
2820
|
+
# so that we can read them from different sockets as they come back.
|
|
2821
|
+
# we dont' multiplex on the sockets as they come available,
|
|
2822
|
+
# but that shouldn't make too much difference.
|
|
2823
|
+
try:
|
|
2824
|
+
node_commands = nodes.values()
|
|
2825
|
+
for n in node_commands:
|
|
2826
|
+
n.write()
|
|
2827
|
+
|
|
2828
|
+
for n in node_commands:
|
|
2829
|
+
n.read()
|
|
2830
|
+
finally:
|
|
2831
|
+
# release all of the redis connections we allocated earlier
|
|
2832
|
+
# back into the connection pool.
|
|
2833
|
+
# we used to do this step as part of a try/finally block,
|
|
2834
|
+
# but it is really dangerous to
|
|
2835
|
+
# release connections back into the pool if for some
|
|
2836
|
+
# reason the socket has data still left in it
|
|
2837
|
+
# from a previous operation. The write and
|
|
2838
|
+
# read operations already have try/catch around them for
|
|
2839
|
+
# all known types of errors including connection
|
|
2840
|
+
# and socket level errors.
|
|
2841
|
+
# So if we hit an exception, something really bad
|
|
2842
|
+
# happened and putting any oF
|
|
2843
|
+
# these connections back into the pool is a very bad idea.
|
|
2844
|
+
# the socket might have unread buffer still sitting in it,
|
|
2845
|
+
# and then the next time we read from it we pass the
|
|
2846
|
+
# buffered result back from a previous command and
|
|
2847
|
+
# every single request after to that connection will always get
|
|
2848
|
+
# a mismatched result.
|
|
2849
|
+
for n in nodes.values():
|
|
2850
|
+
n.connection_pool.release(n.connection)
|
|
2851
|
+
|
|
2852
|
+
# if the response isn't an exception it is a
|
|
2853
|
+
# valid response from the node
|
|
2854
|
+
# we're all done with that command, YAY!
|
|
2855
|
+
# if we have more commands to attempt, we've run into problems.
|
|
2856
|
+
# collect all the commands we are allowed to retry.
|
|
2857
|
+
# (MOVED, ASK, or connection errors or timeout errors)
|
|
2858
|
+
attempt = sorted(
|
|
2859
|
+
(
|
|
2860
|
+
c
|
|
2861
|
+
for c in attempt
|
|
2862
|
+
if isinstance(c.result, ClusterPipeline.ERRORS_ALLOW_RETRY)
|
|
2863
|
+
),
|
|
2864
|
+
key=lambda x: x.position,
|
|
2865
|
+
)
|
|
2866
|
+
if attempt and allow_redirections:
|
|
2867
|
+
# RETRY MAGIC HAPPENS HERE!
|
|
2868
|
+
# send these remaining commands one at a time using `execute_command`
|
|
2869
|
+
# in the main client. This keeps our retry logic
|
|
2870
|
+
# in one place mostly,
|
|
2871
|
+
# and allows us to be more confident in correctness of behavior.
|
|
2872
|
+
# at this point any speed gains from pipelining have been lost
|
|
2873
|
+
# anyway, so we might as well make the best
|
|
2874
|
+
# attempt to get the correct behavior.
|
|
2875
|
+
#
|
|
2876
|
+
# The client command will handle retries for each
|
|
2877
|
+
# individual command sequentially as we pass each
|
|
2878
|
+
# one into `execute_command`. Any exceptions
|
|
2879
|
+
# that bubble out should only appear once all
|
|
2880
|
+
# retries have been exhausted.
|
|
2881
|
+
#
|
|
2882
|
+
# If a lot of commands have failed, we'll be setting the
|
|
2883
|
+
# flag to rebuild the slots table from scratch.
|
|
2884
|
+
# So MOVED errors should correct themselves fairly quickly.
|
|
2885
|
+
self._pipe.reinitialize_counter += 1
|
|
2886
|
+
if self._pipe._should_reinitialized():
|
|
2887
|
+
self._nodes_manager.initialize()
|
|
2888
|
+
if is_default_node:
|
|
2889
|
+
self._pipe.replace_default_node()
|
|
2890
|
+
for c in attempt:
|
|
2891
|
+
try:
|
|
2892
|
+
# send each command individually like we
|
|
2893
|
+
# do in the main client.
|
|
2894
|
+
c.result = self._pipe.parent_execute_command(*c.args, **c.options)
|
|
2895
|
+
except RedisError as e:
|
|
2896
|
+
c.result = e
|
|
2897
|
+
|
|
2898
|
+
# turn the response back into a simple flat array that corresponds
|
|
2899
|
+
# to the sequence of commands issued in the stack in pipeline.execute()
|
|
2900
|
+
response = []
|
|
2901
|
+
for c in sorted(stack, key=lambda x: x.position):
|
|
2902
|
+
if c.args[0] in self._pipe.cluster_response_callbacks:
|
|
2903
|
+
# Remove keys entry, it needs only for cache.
|
|
2904
|
+
c.options.pop("keys", None)
|
|
2905
|
+
c.result = self._pipe.cluster_response_callbacks[c.args[0]](
|
|
2906
|
+
c.result, **c.options
|
|
2907
|
+
)
|
|
2908
|
+
response.append(c.result)
|
|
2909
|
+
|
|
2910
|
+
if raise_on_error:
|
|
2911
|
+
self._raise_first_error(stack)
|
|
2912
|
+
|
|
2913
|
+
return response
|
|
2914
|
+
|
|
2915
|
+
def _is_nodes_flag(self, target_nodes):
|
|
2916
|
+
return isinstance(target_nodes, str) and target_nodes in self._pipe.node_flags
|
|
2917
|
+
|
|
2918
|
+
def _parse_target_nodes(self, target_nodes):
|
|
2919
|
+
if isinstance(target_nodes, list):
|
|
2920
|
+
nodes = target_nodes
|
|
2921
|
+
elif isinstance(target_nodes, ClusterNode):
|
|
2922
|
+
# Supports passing a single ClusterNode as a variable
|
|
2923
|
+
nodes = [target_nodes]
|
|
2924
|
+
elif isinstance(target_nodes, dict):
|
|
2925
|
+
# Supports dictionaries of the format {node_name: node}.
|
|
2926
|
+
# It enables to execute commands with multi nodes as follows:
|
|
2927
|
+
# rc.cluster_save_config(rc.get_primaries())
|
|
2928
|
+
nodes = target_nodes.values()
|
|
2929
|
+
else:
|
|
2930
|
+
raise TypeError(
|
|
2931
|
+
"target_nodes type can be one of the following: "
|
|
2932
|
+
"node_flag (PRIMARIES, REPLICAS, RANDOM, ALL_NODES),"
|
|
2933
|
+
"ClusterNode, list<ClusterNode>, or dict<any, ClusterNode>. "
|
|
2934
|
+
f"The passed type is {type(target_nodes)}"
|
|
2935
|
+
)
|
|
2936
|
+
return nodes
|
|
2937
|
+
|
|
2938
|
+
def _determine_nodes(self, *args, **kwargs) -> List["ClusterNode"]:
|
|
2939
|
+
# Determine which nodes should be executed the command on.
|
|
2940
|
+
# Returns a list of target nodes.
|
|
2941
|
+
command = args[0].upper()
|
|
2942
|
+
if (
|
|
2943
|
+
len(args) >= 2
|
|
2944
|
+
and f"{args[0]} {args[1]}".upper() in self._pipe.command_flags
|
|
2945
|
+
):
|
|
2946
|
+
command = f"{args[0]} {args[1]}".upper()
|
|
2947
|
+
|
|
2948
|
+
nodes_flag = kwargs.pop("nodes_flag", None)
|
|
2949
|
+
if nodes_flag is not None:
|
|
2950
|
+
# nodes flag passed by the user
|
|
2951
|
+
command_flag = nodes_flag
|
|
2952
|
+
else:
|
|
2953
|
+
# get the nodes group for this command if it was predefined
|
|
2954
|
+
command_flag = self._pipe.command_flags.get(command)
|
|
2955
|
+
if command_flag == self._pipe.RANDOM:
|
|
2956
|
+
# return a random node
|
|
2957
|
+
return [self._pipe.get_random_node()]
|
|
2958
|
+
elif command_flag == self._pipe.PRIMARIES:
|
|
2959
|
+
# return all primaries
|
|
2960
|
+
return self._pipe.get_primaries()
|
|
2961
|
+
elif command_flag == self._pipe.REPLICAS:
|
|
2962
|
+
# return all replicas
|
|
2963
|
+
return self._pipe.get_replicas()
|
|
2964
|
+
elif command_flag == self._pipe.ALL_NODES:
|
|
2965
|
+
# return all nodes
|
|
2966
|
+
return self._pipe.get_nodes()
|
|
2967
|
+
elif command_flag == self._pipe.DEFAULT_NODE:
|
|
2968
|
+
# return the cluster's default node
|
|
2969
|
+
return [self._nodes_manager.default_node]
|
|
2970
|
+
elif command in self._pipe.SEARCH_COMMANDS[0]:
|
|
2971
|
+
return [self._nodes_manager.default_node]
|
|
2972
|
+
else:
|
|
2973
|
+
# get the node that holds the key's slot
|
|
2974
|
+
slot = self._pipe.determine_slot(*args)
|
|
2975
|
+
node = self._nodes_manager.get_node_from_slot(
|
|
2976
|
+
slot,
|
|
2977
|
+
self._pipe.read_from_replicas and command in READ_COMMANDS,
|
|
2978
|
+
self._pipe.load_balancing_strategy
|
|
2979
|
+
if command in READ_COMMANDS
|
|
2980
|
+
else None,
|
|
2981
|
+
)
|
|
2982
|
+
return [node]
|
|
2983
|
+
|
|
2984
|
+
def multi(self):
|
|
2985
|
+
raise RedisClusterException(
|
|
2986
|
+
"method multi() is not supported outside of transactional context"
|
|
2987
|
+
)
|
|
2988
|
+
|
|
2989
|
+
def discard(self):
|
|
2990
|
+
raise RedisClusterException(
|
|
2991
|
+
"method discard() is not supported outside of transactional context"
|
|
2992
|
+
)
|
|
2993
|
+
|
|
2994
|
+
def watch(self, *names):
|
|
2995
|
+
raise RedisClusterException(
|
|
2996
|
+
"method watch() is not supported outside of transactional context"
|
|
2997
|
+
)
|
|
2998
|
+
|
|
2999
|
+
def unwatch(self, *names):
|
|
3000
|
+
raise RedisClusterException(
|
|
3001
|
+
"method unwatch() is not supported outside of transactional context"
|
|
3002
|
+
)
|
|
3003
|
+
|
|
3004
|
+
def delete(self, *names):
|
|
3005
|
+
if len(names) != 1:
|
|
3006
|
+
raise RedisClusterException(
|
|
3007
|
+
"deleting multiple keys is not implemented in pipeline command"
|
|
3008
|
+
)
|
|
3009
|
+
|
|
3010
|
+
return self.execute_command("DEL", names[0])
|
|
3011
|
+
|
|
3012
|
+
def unlink(self, *names):
|
|
3013
|
+
if len(names) != 1:
|
|
3014
|
+
raise RedisClusterException(
|
|
3015
|
+
"unlinking multiple keys is not implemented in pipeline command"
|
|
3016
|
+
)
|
|
3017
|
+
|
|
3018
|
+
return self.execute_command("UNLINK", names[0])
|
|
3019
|
+
|
|
3020
|
+
|
|
3021
|
+
class TransactionStrategy(AbstractStrategy):
|
|
3022
|
+
NO_SLOTS_COMMANDS = {"UNWATCH"}
|
|
3023
|
+
IMMEDIATE_EXECUTE_COMMANDS = {"WATCH", "UNWATCH"}
|
|
3024
|
+
UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
|
|
3025
|
+
SLOT_REDIRECT_ERRORS = (AskError, MovedError)
|
|
3026
|
+
CONNECTION_ERRORS = (
|
|
3027
|
+
ConnectionError,
|
|
3028
|
+
OSError,
|
|
3029
|
+
ClusterDownError,
|
|
3030
|
+
SlotNotCoveredError,
|
|
3031
|
+
)
|
|
3032
|
+
|
|
3033
|
+
def __init__(self, pipe: ClusterPipeline):
|
|
3034
|
+
super().__init__(pipe)
|
|
3035
|
+
self._explicit_transaction = False
|
|
3036
|
+
self._watching = False
|
|
3037
|
+
self._pipeline_slots: Set[int] = set()
|
|
3038
|
+
self._transaction_connection: Optional[Connection] = None
|
|
3039
|
+
self._executing = False
|
|
3040
|
+
self._retry = copy(self._pipe.retry)
|
|
3041
|
+
self._retry.update_supported_errors(
|
|
3042
|
+
RedisCluster.ERRORS_ALLOW_RETRY + self.SLOT_REDIRECT_ERRORS
|
|
3043
|
+
)
|
|
3044
|
+
|
|
3045
|
+
def _get_client_and_connection_for_transaction(self) -> Tuple[Redis, Connection]:
|
|
3046
|
+
"""
|
|
3047
|
+
Find a connection for a pipeline transaction.
|
|
3048
|
+
|
|
3049
|
+
For running an atomic transaction, watch keys ensure that contents have not been
|
|
3050
|
+
altered as long as the watch commands for those keys were sent over the same
|
|
3051
|
+
connection. So once we start watching a key, we fetch a connection to the
|
|
3052
|
+
node that owns that slot and reuse it.
|
|
3053
|
+
"""
|
|
3054
|
+
if not self._pipeline_slots:
|
|
3055
|
+
raise RedisClusterException(
|
|
3056
|
+
"At least a command with a key is needed to identify a node"
|
|
3057
|
+
)
|
|
3058
|
+
|
|
3059
|
+
node: ClusterNode = self._nodes_manager.get_node_from_slot(
|
|
3060
|
+
list(self._pipeline_slots)[0], False
|
|
3061
|
+
)
|
|
3062
|
+
redis_node: Redis = self._pipe.get_redis_connection(node)
|
|
3063
|
+
if self._transaction_connection:
|
|
3064
|
+
if not redis_node.connection_pool.owns_connection(
|
|
3065
|
+
self._transaction_connection
|
|
3066
|
+
):
|
|
3067
|
+
previous_node = self._nodes_manager.find_connection_owner(
|
|
3068
|
+
self._transaction_connection
|
|
3069
|
+
)
|
|
3070
|
+
previous_node.connection_pool.release(self._transaction_connection)
|
|
3071
|
+
self._transaction_connection = None
|
|
3072
|
+
|
|
3073
|
+
if not self._transaction_connection:
|
|
3074
|
+
self._transaction_connection = get_connection(redis_node)
|
|
3075
|
+
|
|
3076
|
+
return redis_node, self._transaction_connection
|
|
3077
|
+
|
|
3078
|
+
def execute_command(self, *args, **kwargs):
|
|
3079
|
+
slot_number: Optional[int] = None
|
|
3080
|
+
if args[0] not in ClusterPipeline.NO_SLOTS_COMMANDS:
|
|
3081
|
+
slot_number = self._pipe.determine_slot(*args)
|
|
3082
|
+
|
|
3083
|
+
if (
|
|
3084
|
+
self._watching or args[0] in self.IMMEDIATE_EXECUTE_COMMANDS
|
|
3085
|
+
) and not self._explicit_transaction:
|
|
3086
|
+
if args[0] == "WATCH":
|
|
3087
|
+
self._validate_watch()
|
|
3088
|
+
|
|
3089
|
+
if slot_number is not None:
|
|
3090
|
+
if self._pipeline_slots and slot_number not in self._pipeline_slots:
|
|
3091
|
+
raise CrossSlotTransactionError(
|
|
3092
|
+
"Cannot watch or send commands on different slots"
|
|
3093
|
+
)
|
|
3094
|
+
|
|
3095
|
+
self._pipeline_slots.add(slot_number)
|
|
3096
|
+
elif args[0] not in self.NO_SLOTS_COMMANDS:
|
|
3097
|
+
raise RedisClusterException(
|
|
3098
|
+
f"Cannot identify slot number for command: {args[0]},"
|
|
3099
|
+
"it cannot be triggered in a transaction"
|
|
3100
|
+
)
|
|
3101
|
+
|
|
3102
|
+
return self._immediate_execute_command(*args, **kwargs)
|
|
3103
|
+
else:
|
|
3104
|
+
if slot_number is not None:
|
|
3105
|
+
self._pipeline_slots.add(slot_number)
|
|
3106
|
+
|
|
3107
|
+
return self.pipeline_execute_command(*args, **kwargs)
|
|
3108
|
+
|
|
3109
|
+
def _validate_watch(self):
|
|
3110
|
+
if self._explicit_transaction:
|
|
3111
|
+
raise RedisError("Cannot issue a WATCH after a MULTI")
|
|
3112
|
+
|
|
3113
|
+
self._watching = True
|
|
3114
|
+
|
|
3115
|
+
def _immediate_execute_command(self, *args, **options):
|
|
3116
|
+
return self._retry.call_with_retry(
|
|
3117
|
+
lambda: self._get_connection_and_send_command(*args, **options),
|
|
3118
|
+
self._reinitialize_on_error,
|
|
3119
|
+
)
|
|
3120
|
+
|
|
3121
|
+
def _get_connection_and_send_command(self, *args, **options):
|
|
3122
|
+
redis_node, connection = self._get_client_and_connection_for_transaction()
|
|
3123
|
+
return self._send_command_parse_response(
|
|
3124
|
+
connection, redis_node, args[0], *args, **options
|
|
3125
|
+
)
|
|
3126
|
+
|
|
3127
|
+
def _send_command_parse_response(
|
|
3128
|
+
self, conn, redis_node: Redis, command_name, *args, **options
|
|
3129
|
+
):
|
|
3130
|
+
"""
|
|
3131
|
+
Send a command and parse the response
|
|
3132
|
+
"""
|
|
3133
|
+
|
|
3134
|
+
conn.send_command(*args)
|
|
3135
|
+
output = redis_node.parse_response(conn, command_name, **options)
|
|
3136
|
+
|
|
3137
|
+
if command_name in self.UNWATCH_COMMANDS:
|
|
3138
|
+
self._watching = False
|
|
3139
|
+
return output
|
|
3140
|
+
|
|
3141
|
+
def _reinitialize_on_error(self, error):
|
|
3142
|
+
if self._watching:
|
|
3143
|
+
if type(error) in self.SLOT_REDIRECT_ERRORS and self._executing:
|
|
3144
|
+
raise WatchError("Slot rebalancing occurred while watching keys")
|
|
3145
|
+
|
|
3146
|
+
if (
|
|
3147
|
+
type(error) in self.SLOT_REDIRECT_ERRORS
|
|
3148
|
+
or type(error) in self.CONNECTION_ERRORS
|
|
3149
|
+
):
|
|
3150
|
+
if self._transaction_connection:
|
|
3151
|
+
self._transaction_connection = None
|
|
3152
|
+
|
|
3153
|
+
self._pipe.reinitialize_counter += 1
|
|
3154
|
+
if self._pipe._should_reinitialized():
|
|
3155
|
+
self._nodes_manager.initialize()
|
|
3156
|
+
self.reinitialize_counter = 0
|
|
3157
|
+
else:
|
|
3158
|
+
self._nodes_manager.update_moved_exception(error)
|
|
3159
|
+
|
|
3160
|
+
self._executing = False
|
|
3161
|
+
|
|
3162
|
+
def _raise_first_error(self, responses, stack):
|
|
3163
|
+
"""
|
|
3164
|
+
Raise the first exception on the stack
|
|
3165
|
+
"""
|
|
3166
|
+
for r, cmd in zip(responses, stack):
|
|
3167
|
+
if isinstance(r, Exception):
|
|
3168
|
+
self.annotate_exception(r, cmd.position + 1, cmd.args)
|
|
3169
|
+
raise r
|
|
3170
|
+
|
|
3171
|
+
def execute(self, raise_on_error: bool = True) -> List[Any]:
|
|
3172
|
+
stack = self._command_queue
|
|
3173
|
+
if not stack and (not self._watching or not self._pipeline_slots):
|
|
3174
|
+
return []
|
|
3175
|
+
|
|
3176
|
+
return self._execute_transaction_with_retries(stack, raise_on_error)
|
|
3177
|
+
|
|
3178
|
+
def _execute_transaction_with_retries(
|
|
3179
|
+
self, stack: List["PipelineCommand"], raise_on_error: bool
|
|
3180
|
+
):
|
|
3181
|
+
return self._retry.call_with_retry(
|
|
3182
|
+
lambda: self._execute_transaction(stack, raise_on_error),
|
|
3183
|
+
self._reinitialize_on_error,
|
|
3184
|
+
)
|
|
3185
|
+
|
|
3186
|
+
def _execute_transaction(
|
|
3187
|
+
self, stack: List["PipelineCommand"], raise_on_error: bool
|
|
3188
|
+
):
|
|
3189
|
+
if len(self._pipeline_slots) > 1:
|
|
3190
|
+
raise CrossSlotTransactionError(
|
|
3191
|
+
"All keys involved in a cluster transaction must map to the same slot"
|
|
3192
|
+
)
|
|
3193
|
+
|
|
3194
|
+
self._executing = True
|
|
3195
|
+
|
|
3196
|
+
redis_node, connection = self._get_client_and_connection_for_transaction()
|
|
3197
|
+
|
|
3198
|
+
stack = chain(
|
|
3199
|
+
[PipelineCommand(("MULTI",))],
|
|
3200
|
+
stack,
|
|
3201
|
+
[PipelineCommand(("EXEC",))],
|
|
3202
|
+
)
|
|
3203
|
+
commands = [c.args for c in stack if EMPTY_RESPONSE not in c.options]
|
|
3204
|
+
packed_commands = connection.pack_commands(commands)
|
|
3205
|
+
connection.send_packed_command(packed_commands)
|
|
3206
|
+
errors = []
|
|
3207
|
+
|
|
3208
|
+
# parse off the response for MULTI
|
|
3209
|
+
# NOTE: we need to handle ResponseErrors here and continue
|
|
3210
|
+
# so that we read all the additional command messages from
|
|
3211
|
+
# the socket
|
|
3212
|
+
try:
|
|
3213
|
+
redis_node.parse_response(connection, "MULTI")
|
|
3214
|
+
except ResponseError as e:
|
|
3215
|
+
self.annotate_exception(e, 0, "MULTI")
|
|
3216
|
+
errors.append(e)
|
|
3217
|
+
except self.CONNECTION_ERRORS as cluster_error:
|
|
3218
|
+
self.annotate_exception(cluster_error, 0, "MULTI")
|
|
3219
|
+
raise
|
|
3220
|
+
|
|
3221
|
+
# and all the other commands
|
|
3222
|
+
for i, command in enumerate(self._command_queue):
|
|
3223
|
+
if EMPTY_RESPONSE in command.options:
|
|
3224
|
+
errors.append((i, command.options[EMPTY_RESPONSE]))
|
|
3225
|
+
else:
|
|
3226
|
+
try:
|
|
3227
|
+
_ = redis_node.parse_response(connection, "_")
|
|
3228
|
+
except self.SLOT_REDIRECT_ERRORS as slot_error:
|
|
3229
|
+
self.annotate_exception(slot_error, i + 1, command.args)
|
|
3230
|
+
errors.append(slot_error)
|
|
3231
|
+
except self.CONNECTION_ERRORS as cluster_error:
|
|
3232
|
+
self.annotate_exception(cluster_error, i + 1, command.args)
|
|
3233
|
+
raise
|
|
3234
|
+
except ResponseError as e:
|
|
3235
|
+
self.annotate_exception(e, i + 1, command.args)
|
|
3236
|
+
errors.append(e)
|
|
3237
|
+
|
|
3238
|
+
response = None
|
|
3239
|
+
# parse the EXEC.
|
|
3240
|
+
try:
|
|
3241
|
+
response = redis_node.parse_response(connection, "EXEC")
|
|
3242
|
+
except ExecAbortError:
|
|
3243
|
+
if errors:
|
|
3244
|
+
raise errors[0]
|
|
3245
|
+
raise
|
|
3246
|
+
|
|
3247
|
+
self._executing = False
|
|
3248
|
+
|
|
3249
|
+
# EXEC clears any watched keys
|
|
3250
|
+
self._watching = False
|
|
3251
|
+
|
|
3252
|
+
if response is None:
|
|
3253
|
+
raise WatchError("Watched variable changed.")
|
|
3254
|
+
|
|
3255
|
+
# put any parse errors into the response
|
|
3256
|
+
for i, e in errors:
|
|
3257
|
+
response.insert(i, e)
|
|
3258
|
+
|
|
3259
|
+
if len(response) != len(self._command_queue):
|
|
3260
|
+
raise InvalidPipelineStack(
|
|
3261
|
+
"Unexpected response length for cluster pipeline EXEC."
|
|
3262
|
+
" Command stack was {} but response had length {}".format(
|
|
3263
|
+
[c.args[0] for c in self._command_queue], len(response)
|
|
3264
|
+
)
|
|
3265
|
+
)
|
|
3266
|
+
|
|
3267
|
+
# find any errors in the response and raise if necessary
|
|
3268
|
+
if raise_on_error or len(errors) > 0:
|
|
3269
|
+
self._raise_first_error(
|
|
3270
|
+
response,
|
|
3271
|
+
self._command_queue,
|
|
3272
|
+
)
|
|
3273
|
+
|
|
3274
|
+
# We have to run response callbacks manually
|
|
3275
|
+
data = []
|
|
3276
|
+
for r, cmd in zip(response, self._command_queue):
|
|
3277
|
+
if not isinstance(r, Exception):
|
|
3278
|
+
command_name = cmd.args[0]
|
|
3279
|
+
if command_name in self._pipe.cluster_response_callbacks:
|
|
3280
|
+
r = self._pipe.cluster_response_callbacks[command_name](
|
|
3281
|
+
r, **cmd.options
|
|
3282
|
+
)
|
|
3283
|
+
data.append(r)
|
|
3284
|
+
return data
|
|
3285
|
+
|
|
3286
|
+
def reset(self):
|
|
3287
|
+
self._command_queue = []
|
|
3288
|
+
|
|
3289
|
+
# make sure to reset the connection state in the event that we were
|
|
3290
|
+
# watching something
|
|
3291
|
+
if self._transaction_connection:
|
|
3292
|
+
try:
|
|
3293
|
+
# call this manually since our unwatch or
|
|
3294
|
+
# immediate_execute_command methods can call reset()
|
|
3295
|
+
self._transaction_connection.send_command("UNWATCH")
|
|
3296
|
+
self._transaction_connection.read_response()
|
|
3297
|
+
# we can safely return the connection to the pool here since we're
|
|
3298
|
+
# sure we're no longer WATCHing anything
|
|
3299
|
+
node = self._nodes_manager.find_connection_owner(
|
|
3300
|
+
self._transaction_connection
|
|
3301
|
+
)
|
|
3302
|
+
node.redis_connection.connection_pool.release(
|
|
3303
|
+
self._transaction_connection
|
|
3304
|
+
)
|
|
3305
|
+
self._transaction_connection = None
|
|
3306
|
+
except self.CONNECTION_ERRORS:
|
|
3307
|
+
# disconnect will also remove any previous WATCHes
|
|
3308
|
+
if self._transaction_connection:
|
|
3309
|
+
self._transaction_connection.disconnect()
|
|
3310
|
+
|
|
3311
|
+
# clean up the other instance attributes
|
|
3312
|
+
self._watching = False
|
|
3313
|
+
self._explicit_transaction = False
|
|
3314
|
+
self._pipeline_slots = set()
|
|
3315
|
+
self._executing = False
|
|
3316
|
+
|
|
3317
|
+
def send_cluster_commands(
|
|
3318
|
+
self, stack, raise_on_error=True, allow_redirections=True
|
|
3319
|
+
):
|
|
3320
|
+
raise NotImplementedError(
|
|
3321
|
+
"send_cluster_commands cannot be executed in transactional context."
|
|
3322
|
+
)
|
|
3323
|
+
|
|
3324
|
+
def multi(self):
|
|
3325
|
+
if self._explicit_transaction:
|
|
3326
|
+
raise RedisError("Cannot issue nested calls to MULTI")
|
|
3327
|
+
if self._command_queue:
|
|
3328
|
+
raise RedisError(
|
|
3329
|
+
"Commands without an initial WATCH have already been issued"
|
|
3330
|
+
)
|
|
3331
|
+
self._explicit_transaction = True
|
|
3332
|
+
|
|
3333
|
+
def watch(self, *names):
|
|
3334
|
+
if self._explicit_transaction:
|
|
3335
|
+
raise RedisError("Cannot issue a WATCH after a MULTI")
|
|
3336
|
+
|
|
3337
|
+
return self.execute_command("WATCH", *names)
|
|
3338
|
+
|
|
3339
|
+
def unwatch(self):
|
|
3340
|
+
if self._watching:
|
|
3341
|
+
return self.execute_command("UNWATCH")
|
|
3342
|
+
|
|
3343
|
+
return True
|
|
3344
|
+
|
|
3345
|
+
def discard(self):
|
|
3346
|
+
self.reset()
|
|
3347
|
+
|
|
3348
|
+
def delete(self, *names):
|
|
3349
|
+
return self.execute_command("DEL", *names)
|
|
3350
|
+
|
|
3351
|
+
def unlink(self, *names):
|
|
3352
|
+
return self.execute_command("UNLINK", *names)
|