redis 6.0.0b1__py3-none-any.whl → 6.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
redis/cluster.py CHANGED
@@ -3,18 +3,25 @@ import socket
3
3
  import sys
4
4
  import threading
5
5
  import time
6
+ from abc import ABC, abstractmethod
6
7
  from collections import OrderedDict
8
+ from copy import copy
7
9
  from enum import Enum
8
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
10
+ from itertools import chain
11
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
9
12
 
10
13
  from redis._parsers import CommandsParser, Encoder
11
14
  from redis._parsers.helpers import parse_scan
12
- from redis.backoff import default_backoff
15
+ from redis.backoff import ExponentialWithJitterBackoff, NoBackoff
13
16
  from redis.cache import CacheConfig, CacheFactory, CacheFactoryInterface, CacheInterface
14
- from redis.client import CaseInsensitiveDict, PubSub, Redis
17
+ from redis.client import EMPTY_RESPONSE, CaseInsensitiveDict, PubSub, Redis
15
18
  from redis.commands import READ_COMMANDS, RedisClusterCommands
16
19
  from redis.commands.helpers import list_or_args
17
- from redis.connection import ConnectionPool, parse_url
20
+ from redis.connection import (
21
+ Connection,
22
+ ConnectionPool,
23
+ parse_url,
24
+ )
18
25
  from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
19
26
  from redis.event import (
20
27
  AfterPooledConnectionsInstantiationEvent,
@@ -28,7 +35,10 @@ from redis.exceptions import (
28
35
  ClusterDownError,
29
36
  ClusterError,
30
37
  ConnectionError,
38
+ CrossSlotTransactionError,
31
39
  DataError,
40
+ ExecAbortError,
41
+ InvalidPipelineStack,
32
42
  MovedError,
33
43
  RedisClusterException,
34
44
  RedisError,
@@ -36,6 +46,7 @@ from redis.exceptions import (
36
46
  SlotNotCoveredError,
37
47
  TimeoutError,
38
48
  TryAgainError,
49
+ WatchError,
39
50
  )
40
51
  from redis.lock import Lock
41
52
  from redis.retry import Retry
@@ -47,6 +58,7 @@ from redis.utils import (
47
58
  merge_result,
48
59
  safe_str,
49
60
  str_if_bytes,
61
+ truncate_text,
50
62
  )
51
63
 
52
64
 
@@ -57,9 +69,9 @@ def get_node_name(host: str, port: Union[str, int]) -> str:
57
69
  @deprecated_args(
58
70
  allowed_args=["redis_node"],
59
71
  reason="Use get_connection(redis_node) instead",
60
- version="5.0.3",
72
+ version="5.3.0",
61
73
  )
62
- def get_connection(redis_node, *args, **options):
74
+ def get_connection(redis_node: Redis, *args, **options) -> Connection:
63
75
  return redis_node.connection or redis_node.connection_pool.get_connection()
64
76
 
65
77
 
@@ -141,7 +153,6 @@ REPLICA = "replica"
141
153
  SLOT_ID = "slot-id"
142
154
 
143
155
  REDIS_ALLOWED_KEYS = (
144
- "charset",
145
156
  "connection_class",
146
157
  "connection_pool",
147
158
  "connection_pool_class",
@@ -151,7 +162,6 @@ REDIS_ALLOWED_KEYS = (
151
162
  "decode_responses",
152
163
  "encoding",
153
164
  "encoding_errors",
154
- "errors",
155
165
  "host",
156
166
  "lib_name",
157
167
  "lib_version",
@@ -175,12 +185,13 @@ REDIS_ALLOWED_KEYS = (
175
185
  "ssl_cert_reqs",
176
186
  "ssl_keyfile",
177
187
  "ssl_password",
188
+ "ssl_check_hostname",
178
189
  "unix_socket_path",
179
190
  "username",
180
191
  "cache",
181
192
  "cache_config",
182
193
  )
183
- KWARGS_DISABLED_KEYS = ("host", "port")
194
+ KWARGS_DISABLED_KEYS = ("host", "port", "retry")
184
195
 
185
196
 
186
197
  def cleanup_kwargs(**kwargs):
@@ -411,7 +422,12 @@ class AbstractRedisCluster:
411
422
  list_keys_to_dict(["SCRIPT FLUSH"], lambda command, res: all(res.values())),
412
423
  )
413
424
 
414
- ERRORS_ALLOW_RETRY = (ConnectionError, TimeoutError, ClusterDownError)
425
+ ERRORS_ALLOW_RETRY = (
426
+ ConnectionError,
427
+ TimeoutError,
428
+ ClusterDownError,
429
+ SlotNotCoveredError,
430
+ )
415
431
 
416
432
  def replace_default_node(self, target_node: "ClusterNode" = None) -> None:
417
433
  """Replace the default cluster node.
@@ -432,7 +448,7 @@ class AbstractRedisCluster:
432
448
  # Choose a primary if the cluster contains different primaries
433
449
  self.nodes_manager.default_node = random.choice(primaries)
434
450
  else:
435
- # Otherwise, hoose a primary if the cluster contains different primaries
451
+ # Otherwise, choose a primary if the cluster contains different primaries
436
452
  replicas = [node for node in self.get_replicas() if node != curr_node]
437
453
  if replicas:
438
454
  self.nodes_manager.default_node = random.choice(replicas)
@@ -486,7 +502,14 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
486
502
  @deprecated_args(
487
503
  args_to_warn=["read_from_replicas"],
488
504
  reason="Please configure the 'load_balancing_strategy' instead",
489
- version="5.0.3",
505
+ version="5.3.0",
506
+ )
507
+ @deprecated_args(
508
+ args_to_warn=[
509
+ "cluster_error_retry_attempts",
510
+ ],
511
+ reason="Please configure the 'retry' object instead",
512
+ version="6.0.0",
490
513
  )
491
514
  def __init__(
492
515
  self,
@@ -495,7 +518,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
495
518
  startup_nodes: Optional[List["ClusterNode"]] = None,
496
519
  cluster_error_retry_attempts: int = 3,
497
520
  retry: Optional["Retry"] = None,
498
- require_full_coverage: bool = False,
521
+ require_full_coverage: bool = True,
499
522
  reinitialize_steps: int = 5,
500
523
  read_from_replicas: bool = False,
501
524
  load_balancing_strategy: Optional["LoadBalancingStrategy"] = None,
@@ -545,9 +568,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
545
568
  If you use dynamic DNS endpoints for startup nodes but CLUSTER SLOTS lists
546
569
  specific IP addresses, it is best to set it to false.
547
570
  :param cluster_error_retry_attempts:
571
+ @deprecated - Please configure the 'retry' object instead
572
+ In case 'retry' object is set - this argument is ignored!
573
+
548
574
  Number of times to retry before raising an error when
549
- :class:`~.TimeoutError` or :class:`~.ConnectionError` or
575
+ :class:`~.TimeoutError` or :class:`~.ConnectionError`, :class:`~.SlotNotCoveredError` or
550
576
  :class:`~.ClusterDownError` are encountered
577
+ :param retry:
578
+ A retry object that defines the retry strategy and the number of
579
+ retries for the cluster client.
580
+ In current implementation for the cluster client (starting form redis-py version 6.0.0)
581
+ the retry object is not yet fully utilized, instead it is used just to determine
582
+ the number of retries for the cluster client.
583
+ In the future releases the retry object will be used to handle the cluster client retries!
551
584
  :param reinitialize_steps:
552
585
  Specifies the number of MOVED errors that need to occur before
553
586
  reinitializing the whole cluster topology. If a MOVED error occurs
@@ -567,7 +600,8 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
567
600
 
568
601
  :**kwargs:
569
602
  Extra arguments that will be sent into Redis instance when created
570
- (See Official redis-py doc for supported kwargs
603
+ (See Official redis-py doc for supported kwargs - the only limitation
604
+ is that you can't provide 'retry' object as part of kwargs.
571
605
  [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py])
572
606
  Some kwargs are not supported and will raise a
573
607
  RedisClusterException:
@@ -582,6 +616,15 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
582
616
  "Argument 'db' is not possible to use in cluster mode"
583
617
  )
584
618
 
619
+ if "retry" in kwargs:
620
+ # Argument 'retry' is not possible to be used in kwargs when in cluster mode
621
+ # the kwargs are set to the lower level connections to the cluster nodes
622
+ # and there we provide retry configuration without retries allowed.
623
+ # The retries should be handled on cluster client level.
624
+ raise RedisClusterException(
625
+ "The 'retry' argument cannot be used in kwargs when running in cluster mode."
626
+ )
627
+
585
628
  # Get the startup node/s
586
629
  from_url = False
587
630
  if url is not None:
@@ -624,9 +667,11 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
624
667
  kwargs = cleanup_kwargs(**kwargs)
625
668
  if retry:
626
669
  self.retry = retry
627
- kwargs.update({"retry": self.retry})
628
670
  else:
629
- kwargs.update({"retry": Retry(default_backoff(), 0)})
671
+ self.retry = Retry(
672
+ backoff=ExponentialWithJitterBackoff(base=1, cap=10),
673
+ retries=cluster_error_retry_attempts,
674
+ )
630
675
 
631
676
  self.encoder = Encoder(
632
677
  kwargs.get("encoding", "utf-8"),
@@ -637,7 +682,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
637
682
  if (cache_config or cache) and protocol not in [3, "3"]:
638
683
  raise RedisError("Client caching is only supported with RESP version 3")
639
684
 
640
- self.cluster_error_retry_attempts = cluster_error_retry_attempts
641
685
  self.command_flags = self.__class__.COMMAND_FLAGS.copy()
642
686
  self.node_flags = self.__class__.NODE_FLAGS.copy()
643
687
  self.read_from_replicas = read_from_replicas
@@ -709,7 +753,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
709
753
  if self.user_on_connect_func is not None:
710
754
  self.user_on_connect_func(connection)
711
755
 
712
- def get_redis_connection(self, node):
756
+ def get_redis_connection(self, node: "ClusterNode") -> Redis:
713
757
  if not node.redis_connection:
714
758
  with self._lock:
715
759
  if not node.redis_connection:
@@ -768,13 +812,8 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
768
812
  self.nodes_manager.default_node = node
769
813
  return True
770
814
 
771
- def get_retry(self) -> Optional["Retry"]:
772
- return self.retry
773
-
774
- def set_retry(self, retry: "Retry") -> None:
815
+ def set_retry(self, retry: Retry) -> None:
775
816
  self.retry = retry
776
- for node in self.get_nodes():
777
- node.redis_connection.set_retry(retry)
778
817
 
779
818
  def monitor(self, target_node=None):
780
819
  """
@@ -812,20 +851,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
812
851
  if shard_hint:
813
852
  raise RedisClusterException("shard_hint is deprecated in cluster mode")
814
853
 
815
- if transaction:
816
- raise RedisClusterException("transaction is deprecated in cluster mode")
817
-
818
854
  return ClusterPipeline(
819
855
  nodes_manager=self.nodes_manager,
820
856
  commands_parser=self.commands_parser,
821
857
  startup_nodes=self.nodes_manager.startup_nodes,
822
858
  result_callbacks=self.result_callbacks,
823
859
  cluster_response_callbacks=self.cluster_response_callbacks,
824
- cluster_error_retry_attempts=self.cluster_error_retry_attempts,
860
+ cluster_error_retry_attempts=self.retry.get_retries(),
825
861
  read_from_replicas=self.read_from_replicas,
826
862
  load_balancing_strategy=self.load_balancing_strategy,
827
863
  reinitialize_steps=self.reinitialize_steps,
864
+ retry=self.retry,
828
865
  lock=self._lock,
866
+ transaction=transaction,
829
867
  )
830
868
 
831
869
  def lock(
@@ -987,7 +1025,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
987
1025
  redis_conn = self.get_default_node().redis_connection
988
1026
  return self.commands_parser.get_keys(redis_conn, *args)
989
1027
 
990
- def determine_slot(self, *args):
1028
+ def determine_slot(self, *args) -> int:
991
1029
  """
992
1030
  Figure out what slot to use based on args.
993
1031
 
@@ -1086,8 +1124,8 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1086
1124
  """
1087
1125
  Wrapper for ERRORS_ALLOW_RETRY error handling.
1088
1126
 
1089
- It will try the number of times specified by the config option
1090
- "self.cluster_error_retry_attempts" which defaults to 3 unless manually
1127
+ It will try the number of times specified by the retries property from
1128
+ config option "self.retry" which defaults to 3 unless manually
1091
1129
  configured.
1092
1130
 
1093
1131
  If it reaches the number of times, the command will raise the exception
@@ -1113,9 +1151,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1113
1151
  # execution since the nodes may not be valid anymore after the tables
1114
1152
  # were reinitialized. So in case of passed target nodes,
1115
1153
  # retry_attempts will be set to 0.
1116
- retry_attempts = (
1117
- 0 if target_nodes_specified else self.cluster_error_retry_attempts
1118
- )
1154
+ retry_attempts = 0 if target_nodes_specified else self.retry.get_retries()
1119
1155
  # Add one for the first execution
1120
1156
  execute_attempts = 1 + retry_attempts
1121
1157
  for _ in range(execute_attempts):
@@ -1202,8 +1238,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1202
1238
  except AuthenticationError:
1203
1239
  raise
1204
1240
  except (ConnectionError, TimeoutError) as e:
1205
- # Connection retries are being handled in the node's
1206
- # Retry object.
1207
1241
  # ConnectionError can also be raised if we couldn't get a
1208
1242
  # connection from the pool before timing out, so check that
1209
1243
  # this is an actual connection before attempting to disconnect.
@@ -1240,13 +1274,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1240
1274
  except AskError as e:
1241
1275
  redirect_addr = get_node_name(host=e.host, port=e.port)
1242
1276
  asking = True
1243
- except ClusterDownError as e:
1277
+ except (ClusterDownError, SlotNotCoveredError):
1244
1278
  # ClusterDownError can occur during a failover and to get
1245
1279
  # self-healed, we will try to reinitialize the cluster layout
1246
1280
  # and retry executing the command
1281
+
1282
+ # SlotNotCoveredError can occur when the cluster is not fully
1283
+ # initialized or can be temporary issue.
1284
+ # We will try to reinitialize the cluster topology
1285
+ # and retry executing the command
1286
+
1247
1287
  time.sleep(0.25)
1248
1288
  self.nodes_manager.initialize()
1249
- raise e
1289
+ raise
1250
1290
  except ResponseError:
1251
1291
  raise
1252
1292
  except Exception as e:
@@ -1298,6 +1338,28 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1298
1338
  """
1299
1339
  setattr(self, funcname, func)
1300
1340
 
1341
+ def transaction(self, func, *watches, **kwargs):
1342
+ """
1343
+ Convenience method for executing the callable `func` as a transaction
1344
+ while watching all keys specified in `watches`. The 'func' callable
1345
+ should expect a single argument which is a Pipeline object.
1346
+ """
1347
+ shard_hint = kwargs.pop("shard_hint", None)
1348
+ value_from_callable = kwargs.pop("value_from_callable", False)
1349
+ watch_delay = kwargs.pop("watch_delay", None)
1350
+ with self.pipeline(True, shard_hint) as pipe:
1351
+ while True:
1352
+ try:
1353
+ if watches:
1354
+ pipe.watch(*watches)
1355
+ func_value = func(pipe)
1356
+ exec_value = pipe.execute()
1357
+ return func_value if value_from_callable else exec_value
1358
+ except WatchError:
1359
+ if watch_delay is not None and watch_delay > 0:
1360
+ time.sleep(watch_delay)
1361
+ continue
1362
+
1301
1363
 
1302
1364
  class ClusterNode:
1303
1365
  def __init__(self, host, port, server_type=None, redis_connection=None):
@@ -1323,8 +1385,12 @@ class ClusterNode:
1323
1385
  return isinstance(obj, ClusterNode) and obj.name == self.name
1324
1386
 
1325
1387
  def __del__(self):
1326
- if self.redis_connection is not None:
1327
- self.redis_connection.close()
1388
+ try:
1389
+ if self.redis_connection is not None:
1390
+ self.redis_connection.close()
1391
+ except Exception:
1392
+ # Ignore errors when closing the connection
1393
+ pass
1328
1394
 
1329
1395
 
1330
1396
  class LoadBalancingStrategy(Enum):
@@ -1391,7 +1457,7 @@ class NodesManager:
1391
1457
  event_dispatcher: Optional[EventDispatcher] = None,
1392
1458
  **kwargs,
1393
1459
  ):
1394
- self.nodes_cache = {}
1460
+ self.nodes_cache: Dict[str, Redis] = {}
1395
1461
  self.slots_cache = {}
1396
1462
  self.startup_nodes = {}
1397
1463
  self.default_node = None
@@ -1483,7 +1549,7 @@ class NodesManager:
1483
1549
  "In case you need select some load balancing strategy "
1484
1550
  "that will use replicas, please set it through 'load_balancing_strategy'"
1485
1551
  ),
1486
- version="5.0.3",
1552
+ version="5.3.0",
1487
1553
  )
1488
1554
  def get_node_from_slot(
1489
1555
  self,
@@ -1491,7 +1557,7 @@ class NodesManager:
1491
1557
  read_from_replicas=False,
1492
1558
  load_balancing_strategy=None,
1493
1559
  server_type=None,
1494
- ):
1560
+ ) -> ClusterNode:
1495
1561
  """
1496
1562
  Gets a node that servers this hash slot
1497
1563
  """
@@ -1575,17 +1641,32 @@ class NodesManager:
1575
1641
  )
1576
1642
 
1577
1643
  def create_redis_node(self, host, port, **kwargs):
1644
+ # We are configuring the connection pool not to retry
1645
+ # connections on lower level clients to avoid retrying
1646
+ # connections to nodes that are not reachable
1647
+ # and to avoid blocking the connection pool.
1648
+ # The only error that will have some handling in the lower
1649
+ # level clients is ConnectionError which will trigger disconnection
1650
+ # of the socket.
1651
+ # The retries will be handled on cluster client level
1652
+ # where we will have proper handling of the cluster topology
1653
+ node_retry_config = Retry(
1654
+ backoff=NoBackoff(), retries=0, supported_errors=(ConnectionError,)
1655
+ )
1656
+
1578
1657
  if self.from_url:
1579
1658
  # Create a redis node with a costumed connection pool
1580
1659
  kwargs.update({"host": host})
1581
1660
  kwargs.update({"port": port})
1582
1661
  kwargs.update({"cache": self._cache})
1662
+ kwargs.update({"retry": node_retry_config})
1583
1663
  r = Redis(connection_pool=self.connection_pool_class(**kwargs))
1584
1664
  else:
1585
1665
  r = Redis(
1586
1666
  host=host,
1587
1667
  port=port,
1588
1668
  cache=self._cache,
1669
+ retry=node_retry_config,
1589
1670
  **kwargs,
1590
1671
  )
1591
1672
  return r
@@ -1623,7 +1704,9 @@ class NodesManager:
1623
1704
  fully_covered = False
1624
1705
  kwargs = self.connection_kwargs
1625
1706
  exception = None
1626
- for startup_node in self.startup_nodes.values():
1707
+ # Convert to tuple to prevent RuntimeError if self.startup_nodes
1708
+ # is modified during iteration
1709
+ for startup_node in tuple(self.startup_nodes.values()):
1627
1710
  try:
1628
1711
  if startup_node.redis_connection:
1629
1712
  r = startup_node.redis_connection
@@ -1770,6 +1853,16 @@ class NodesManager:
1770
1853
  return self.address_remap((host, port))
1771
1854
  return host, port
1772
1855
 
1856
+ def find_connection_owner(self, connection: Connection) -> Optional[Redis]:
1857
+ node_name = get_node_name(connection.host, connection.port)
1858
+ for node in tuple(self.nodes_cache.values()):
1859
+ if node.redis_connection:
1860
+ conn_args = node.redis_connection.connection_pool.connection_kwargs
1861
+ if node_name == get_node_name(
1862
+ conn_args.get("host"), conn_args.get("port")
1863
+ ):
1864
+ return node
1865
+
1773
1866
 
1774
1867
  class ClusterPubSub(PubSub):
1775
1868
  """
@@ -2029,6 +2122,17 @@ class ClusterPipeline(RedisCluster):
2029
2122
  TryAgainError,
2030
2123
  )
2031
2124
 
2125
+ NO_SLOTS_COMMANDS = {"UNWATCH"}
2126
+ IMMEDIATE_EXECUTE_COMMANDS = {"WATCH", "UNWATCH"}
2127
+ UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
2128
+
2129
+ @deprecated_args(
2130
+ args_to_warn=[
2131
+ "cluster_error_retry_attempts",
2132
+ ],
2133
+ reason="Please configure the 'retry' object instead",
2134
+ version="6.0.0",
2135
+ )
2032
2136
  def __init__(
2033
2137
  self,
2034
2138
  nodes_manager: "NodesManager",
@@ -2040,7 +2144,9 @@ class ClusterPipeline(RedisCluster):
2040
2144
  load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
2041
2145
  cluster_error_retry_attempts: int = 3,
2042
2146
  reinitialize_steps: int = 5,
2147
+ retry: Optional[Retry] = None,
2043
2148
  lock=None,
2149
+ transaction=False,
2044
2150
  **kwargs,
2045
2151
  ):
2046
2152
  """ """
@@ -2056,9 +2162,16 @@ class ClusterPipeline(RedisCluster):
2056
2162
  self.load_balancing_strategy = load_balancing_strategy
2057
2163
  self.command_flags = self.__class__.COMMAND_FLAGS.copy()
2058
2164
  self.cluster_response_callbacks = cluster_response_callbacks
2059
- self.cluster_error_retry_attempts = cluster_error_retry_attempts
2060
2165
  self.reinitialize_counter = 0
2061
2166
  self.reinitialize_steps = reinitialize_steps
2167
+ if retry is not None:
2168
+ self.retry = retry
2169
+ else:
2170
+ self.retry = Retry(
2171
+ backoff=ExponentialWithJitterBackoff(base=1, cap=10),
2172
+ retries=cluster_error_retry_attempts,
2173
+ )
2174
+
2062
2175
  self.encoder = Encoder(
2063
2176
  kwargs.get("encoding", "utf-8"),
2064
2177
  kwargs.get("encoding_errors", "strict"),
@@ -2067,6 +2180,10 @@ class ClusterPipeline(RedisCluster):
2067
2180
  if lock is None:
2068
2181
  lock = threading.Lock()
2069
2182
  self._lock = lock
2183
+ self.parent_execute_command = super().execute_command
2184
+ self._execution_strategy: ExecutionStrategy = (
2185
+ PipelineStrategy(self) if not transaction else TransactionStrategy(self)
2186
+ )
2070
2187
 
2071
2188
  def __repr__(self):
2072
2189
  """ """
@@ -2088,7 +2205,7 @@ class ClusterPipeline(RedisCluster):
2088
2205
 
2089
2206
  def __len__(self):
2090
2207
  """ """
2091
- return len(self.command_stack)
2208
+ return len(self._execution_strategy.command_queue)
2092
2209
 
2093
2210
  def __bool__(self):
2094
2211
  "Pipeline instances should always evaluate to True on Python 3+"
@@ -2098,44 +2215,35 @@ class ClusterPipeline(RedisCluster):
2098
2215
  """
2099
2216
  Wrapper function for pipeline_execute_command
2100
2217
  """
2101
- return self.pipeline_execute_command(*args, **kwargs)
2218
+ return self._execution_strategy.execute_command(*args, **kwargs)
2102
2219
 
2103
2220
  def pipeline_execute_command(self, *args, **options):
2104
2221
  """
2105
- Appends the executed command to the pipeline's command stack
2106
- """
2107
- self.command_stack.append(
2108
- PipelineCommand(args, options, len(self.command_stack))
2109
- )
2110
- return self
2222
+ Stage a command to be executed when execute() is next called
2111
2223
 
2112
- def raise_first_error(self, stack):
2113
- """
2114
- Raise the first exception on the stack
2224
+ Returns the current Pipeline object back so commands can be
2225
+ chained together, such as:
2226
+
2227
+ pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
2228
+
2229
+ At some other point, you can then run: pipe.execute(),
2230
+ which will execute all commands queued in the pipe.
2115
2231
  """
2116
- for c in stack:
2117
- r = c.result
2118
- if isinstance(r, Exception):
2119
- self.annotate_exception(r, c.position + 1, c.args)
2120
- raise r
2232
+ return self._execution_strategy.execute_command(*args, **options)
2121
2233
 
2122
2234
  def annotate_exception(self, exception, number, command):
2123
2235
  """
2124
2236
  Provides extra context to the exception prior to it being handled
2125
2237
  """
2126
- cmd = " ".join(map(safe_str, command))
2127
- msg = (
2128
- f"Command # {number} ({cmd}) of pipeline caused error: {exception.args[0]}"
2129
- )
2130
- exception.args = (msg,) + exception.args[1:]
2238
+ self._execution_strategy.annotate_exception(exception, number, command)
2131
2239
 
2132
2240
  def execute(self, raise_on_error: bool = True) -> List[Any]:
2133
2241
  """
2134
2242
  Execute all the commands in the current pipeline
2135
2243
  """
2136
- stack = self.command_stack
2244
+
2137
2245
  try:
2138
- return self.send_cluster_commands(stack, raise_on_error)
2246
+ return self._execution_strategy.execute(raise_on_error)
2139
2247
  finally:
2140
2248
  self.reset()
2141
2249
 
@@ -2143,312 +2251,53 @@ class ClusterPipeline(RedisCluster):
2143
2251
  """
2144
2252
  Reset back to empty pipeline.
2145
2253
  """
2146
- self.command_stack = []
2147
-
2148
- self.scripts = set()
2149
-
2150
- # TODO: Implement
2151
- # make sure to reset the connection state in the event that we were
2152
- # watching something
2153
- # if self.watching and self.connection:
2154
- # try:
2155
- # # call this manually since our unwatch or
2156
- # # immediate_execute_command methods can call reset()
2157
- # self.connection.send_command('UNWATCH')
2158
- # self.connection.read_response()
2159
- # except ConnectionError:
2160
- # # disconnect will also remove any previous WATCHes
2161
- # self.connection.disconnect()
2162
-
2163
- # clean up the other instance attributes
2164
- self.watching = False
2165
- self.explicit_transaction = False
2166
-
2167
- # TODO: Implement
2168
- # we can safely return the connection to the pool here since we're
2169
- # sure we're no longer WATCHing anything
2170
- # if self.connection:
2171
- # self.connection_pool.release(self.connection)
2172
- # self.connection = None
2254
+ self._execution_strategy.reset()
2173
2255
 
2174
2256
  def send_cluster_commands(
2175
2257
  self, stack, raise_on_error=True, allow_redirections=True
2176
2258
  ):
2177
- """
2178
- Wrapper for CLUSTERDOWN error handling.
2259
+ return self._execution_strategy.send_cluster_commands(
2260
+ stack, raise_on_error=raise_on_error, allow_redirections=allow_redirections
2261
+ )
2179
2262
 
2180
- If the cluster reports it is down it is assumed that:
2181
- - connection_pool was disconnected
2182
- - connection_pool was reseted
2183
- - refereh_table_asap set to True
2263
+ def exists(self, *keys):
2264
+ return self._execution_strategy.exists(*keys)
2184
2265
 
2185
- It will try the number of times specified by
2186
- the config option "self.cluster_error_retry_attempts"
2187
- which defaults to 3 unless manually configured.
2266
+ def eval(self):
2267
+ """ """
2268
+ return self._execution_strategy.eval()
2188
2269
 
2189
- If it reaches the number of times, the command will
2190
- raises ClusterDownException.
2270
+ def multi(self):
2191
2271
  """
2192
- if not stack:
2193
- return []
2194
- retry_attempts = self.cluster_error_retry_attempts
2195
- while True:
2196
- try:
2197
- return self._send_cluster_commands(
2198
- stack,
2199
- raise_on_error=raise_on_error,
2200
- allow_redirections=allow_redirections,
2201
- )
2202
- except RedisCluster.ERRORS_ALLOW_RETRY as e:
2203
- if retry_attempts > 0:
2204
- # Try again with the new cluster setup. All other errors
2205
- # should be raised.
2206
- retry_attempts -= 1
2207
- pass
2208
- else:
2209
- raise e
2210
-
2211
- def _send_cluster_commands(
2212
- self, stack, raise_on_error=True, allow_redirections=True
2213
- ):
2272
+ Start a transactional block of the pipeline after WATCH commands
2273
+ are issued. End the transactional block with `execute`.
2214
2274
  """
2215
- Send a bunch of cluster commands to the redis cluster.
2275
+ self._execution_strategy.multi()
2216
2276
 
2217
- `allow_redirections` If the pipeline should follow
2218
- `ASK` & `MOVED` responses automatically. If set
2219
- to false it will raise RedisClusterException.
2220
- """
2221
- # the first time sending the commands we send all of
2222
- # the commands that were queued up.
2223
- # if we have to run through it again, we only retry
2224
- # the commands that failed.
2225
- attempt = sorted(stack, key=lambda x: x.position)
2226
- is_default_node = False
2227
- # build a list of node objects based on node names we need to
2228
- nodes = {}
2277
+ def load_scripts(self):
2278
+ """ """
2279
+ self._execution_strategy.load_scripts()
2229
2280
 
2230
- # as we move through each command that still needs to be processed,
2231
- # we figure out the slot number that command maps to, then from
2232
- # the slot determine the node.
2233
- for c in attempt:
2234
- while True:
2235
- # refer to our internal node -> slot table that
2236
- # tells us where a given command should route to.
2237
- # (it might be possible we have a cached node that no longer
2238
- # exists in the cluster, which is why we do this in a loop)
2239
- passed_targets = c.options.pop("target_nodes", None)
2240
- if passed_targets and not self._is_nodes_flag(passed_targets):
2241
- target_nodes = self._parse_target_nodes(passed_targets)
2242
- else:
2243
- target_nodes = self._determine_nodes(
2244
- *c.args, node_flag=passed_targets
2245
- )
2246
- if not target_nodes:
2247
- raise RedisClusterException(
2248
- f"No targets were found to execute {c.args} command on"
2249
- )
2250
- if len(target_nodes) > 1:
2251
- raise RedisClusterException(
2252
- f"Too many targets for command {c.args}"
2253
- )
2281
+ def discard(self):
2282
+ """ """
2283
+ self._execution_strategy.discard()
2254
2284
 
2255
- node = target_nodes[0]
2256
- if node == self.get_default_node():
2257
- is_default_node = True
2285
+ def watch(self, *names):
2286
+ """Watches the values at keys ``names``"""
2287
+ self._execution_strategy.watch(*names)
2258
2288
 
2259
- # now that we know the name of the node
2260
- # ( it's just a string in the form of host:port )
2261
- # we can build a list of commands for each node.
2262
- node_name = node.name
2263
- if node_name not in nodes:
2264
- redis_node = self.get_redis_connection(node)
2265
- try:
2266
- connection = get_connection(redis_node)
2267
- except (ConnectionError, TimeoutError):
2268
- for n in nodes.values():
2269
- n.connection_pool.release(n.connection)
2270
- # Connection retries are being handled in the node's
2271
- # Retry object. Reinitialize the node -> slot table.
2272
- self.nodes_manager.initialize()
2273
- if is_default_node:
2274
- self.replace_default_node()
2275
- raise
2276
- nodes[node_name] = NodeCommands(
2277
- redis_node.parse_response,
2278
- redis_node.connection_pool,
2279
- connection,
2280
- )
2281
- nodes[node_name].append(c)
2282
- break
2289
+ def unwatch(self):
2290
+ """Unwatches all previously specified keys"""
2291
+ self._execution_strategy.unwatch()
2283
2292
 
2284
- # send the commands in sequence.
2285
- # we write to all the open sockets for each node first,
2286
- # before reading anything
2287
- # this allows us to flush all the requests out across the
2288
- # network essentially in parallel
2289
- # so that we can read them all in parallel as they come back.
2290
- # we dont' multiplex on the sockets as they come available,
2291
- # but that shouldn't make too much difference.
2292
- node_commands = nodes.values()
2293
- try:
2294
- node_commands = nodes.values()
2295
- for n in node_commands:
2296
- n.write()
2293
+ def script_load_for_pipeline(self, *args, **kwargs):
2294
+ self._execution_strategy.script_load_for_pipeline(*args, **kwargs)
2297
2295
 
2298
- for n in node_commands:
2299
- n.read()
2300
- finally:
2301
- # release all of the redis connections we allocated earlier
2302
- # back into the connection pool.
2303
- # we used to do this step as part of a try/finally block,
2304
- # but it is really dangerous to
2305
- # release connections back into the pool if for some
2306
- # reason the socket has data still left in it
2307
- # from a previous operation. The write and
2308
- # read operations already have try/catch around them for
2309
- # all known types of errors including connection
2310
- # and socket level errors.
2311
- # So if we hit an exception, something really bad
2312
- # happened and putting any oF
2313
- # these connections back into the pool is a very bad idea.
2314
- # the socket might have unread buffer still sitting in it,
2315
- # and then the next time we read from it we pass the
2316
- # buffered result back from a previous command and
2317
- # every single request after to that connection will always get
2318
- # a mismatched result.
2319
- for n in nodes.values():
2320
- n.connection_pool.release(n.connection)
2296
+ def delete(self, *names):
2297
+ self._execution_strategy.delete(*names)
2321
2298
 
2322
- # if the response isn't an exception it is a
2323
- # valid response from the node
2324
- # we're all done with that command, YAY!
2325
- # if we have more commands to attempt, we've run into problems.
2326
- # collect all the commands we are allowed to retry.
2327
- # (MOVED, ASK, or connection errors or timeout errors)
2328
- attempt = sorted(
2329
- (
2330
- c
2331
- for c in attempt
2332
- if isinstance(c.result, ClusterPipeline.ERRORS_ALLOW_RETRY)
2333
- ),
2334
- key=lambda x: x.position,
2335
- )
2336
- if attempt and allow_redirections:
2337
- # RETRY MAGIC HAPPENS HERE!
2338
- # send these remaining commands one at a time using `execute_command`
2339
- # in the main client. This keeps our retry logic
2340
- # in one place mostly,
2341
- # and allows us to be more confident in correctness of behavior.
2342
- # at this point any speed gains from pipelining have been lost
2343
- # anyway, so we might as well make the best
2344
- # attempt to get the correct behavior.
2345
- #
2346
- # The client command will handle retries for each
2347
- # individual command sequentially as we pass each
2348
- # one into `execute_command`. Any exceptions
2349
- # that bubble out should only appear once all
2350
- # retries have been exhausted.
2351
- #
2352
- # If a lot of commands have failed, we'll be setting the
2353
- # flag to rebuild the slots table from scratch.
2354
- # So MOVED errors should correct themselves fairly quickly.
2355
- self.reinitialize_counter += 1
2356
- if self._should_reinitialized():
2357
- self.nodes_manager.initialize()
2358
- if is_default_node:
2359
- self.replace_default_node()
2360
- for c in attempt:
2361
- try:
2362
- # send each command individually like we
2363
- # do in the main client.
2364
- c.result = super().execute_command(*c.args, **c.options)
2365
- except RedisError as e:
2366
- c.result = e
2367
-
2368
- # turn the response back into a simple flat array that corresponds
2369
- # to the sequence of commands issued in the stack in pipeline.execute()
2370
- response = []
2371
- for c in sorted(stack, key=lambda x: x.position):
2372
- if c.args[0] in self.cluster_response_callbacks:
2373
- # Remove keys entry, it needs only for cache.
2374
- c.options.pop("keys", None)
2375
- c.result = self.cluster_response_callbacks[c.args[0]](
2376
- c.result, **c.options
2377
- )
2378
- response.append(c.result)
2379
-
2380
- if raise_on_error:
2381
- self.raise_first_error(stack)
2382
-
2383
- return response
2384
-
2385
- def _fail_on_redirect(self, allow_redirections):
2386
- """ """
2387
- if not allow_redirections:
2388
- raise RedisClusterException(
2389
- "ASK & MOVED redirection not allowed in this pipeline"
2390
- )
2391
-
2392
- def exists(self, *keys):
2393
- return self.execute_command("EXISTS", *keys)
2394
-
2395
- def eval(self):
2396
- """ """
2397
- raise RedisClusterException("method eval() is not implemented")
2398
-
2399
- def multi(self):
2400
- """ """
2401
- raise RedisClusterException("method multi() is not implemented")
2402
-
2403
- def immediate_execute_command(self, *args, **options):
2404
- """ """
2405
- raise RedisClusterException(
2406
- "method immediate_execute_command() is not implemented"
2407
- )
2408
-
2409
- def _execute_transaction(self, *args, **kwargs):
2410
- """ """
2411
- raise RedisClusterException("method _execute_transaction() is not implemented")
2412
-
2413
- def load_scripts(self):
2414
- """ """
2415
- raise RedisClusterException("method load_scripts() is not implemented")
2416
-
2417
- def watch(self, *names):
2418
- """ """
2419
- raise RedisClusterException("method watch() is not implemented")
2420
-
2421
- def unwatch(self):
2422
- """ """
2423
- raise RedisClusterException("method unwatch() is not implemented")
2424
-
2425
- def script_load_for_pipeline(self, *args, **kwargs):
2426
- """ """
2427
- raise RedisClusterException(
2428
- "method script_load_for_pipeline() is not implemented"
2429
- )
2430
-
2431
- def delete(self, *names):
2432
- """
2433
- "Delete a key specified by ``names``"
2434
- """
2435
- if len(names) != 1:
2436
- raise RedisClusterException(
2437
- "deleting multiple keys is not implemented in pipeline command"
2438
- )
2439
-
2440
- return self.execute_command("DEL", names[0])
2441
-
2442
- def unlink(self, *names):
2443
- """
2444
- "Unlink a key specified by ``names``"
2445
- """
2446
- if len(names) != 1:
2447
- raise RedisClusterException(
2448
- "unlinking multiple keys is not implemented in pipeline command"
2449
- )
2450
-
2451
- return self.execute_command("UNLINK", names[0])
2299
+ def unlink(self, *names):
2300
+ self._execution_strategy.unlink(*names)
2452
2301
 
2453
2302
 
2454
2303
  def block_pipeline_command(name: str) -> Callable[..., Any]:
@@ -2625,3 +2474,880 @@ class NodeCommands:
2625
2474
  return
2626
2475
  except RedisError:
2627
2476
  c.result = sys.exc_info()[1]
2477
+
2478
+
2479
+ class ExecutionStrategy(ABC):
2480
+ @property
2481
+ @abstractmethod
2482
+ def command_queue(self):
2483
+ pass
2484
+
2485
+ @abstractmethod
2486
+ def execute_command(self, *args, **kwargs):
2487
+ """
2488
+ Execution flow for current execution strategy.
2489
+
2490
+ See: ClusterPipeline.execute_command()
2491
+ """
2492
+ pass
2493
+
2494
+ @abstractmethod
2495
+ def annotate_exception(self, exception, number, command):
2496
+ """
2497
+ Annotate exception according to current execution strategy.
2498
+
2499
+ See: ClusterPipeline.annotate_exception()
2500
+ """
2501
+ pass
2502
+
2503
+ @abstractmethod
2504
+ def pipeline_execute_command(self, *args, **options):
2505
+ """
2506
+ Pipeline execution flow for current execution strategy.
2507
+
2508
+ See: ClusterPipeline.pipeline_execute_command()
2509
+ """
2510
+ pass
2511
+
2512
+ @abstractmethod
2513
+ def execute(self, raise_on_error: bool = True) -> List[Any]:
2514
+ """
2515
+ Executes current execution strategy.
2516
+
2517
+ See: ClusterPipeline.execute()
2518
+ """
2519
+ pass
2520
+
2521
+ @abstractmethod
2522
+ def send_cluster_commands(
2523
+ self, stack, raise_on_error=True, allow_redirections=True
2524
+ ):
2525
+ """
2526
+ Sends commands according to current execution strategy.
2527
+
2528
+ See: ClusterPipeline.send_cluster_commands()
2529
+ """
2530
+ pass
2531
+
2532
+ @abstractmethod
2533
+ def reset(self):
2534
+ """
2535
+ Resets current execution strategy.
2536
+
2537
+ See: ClusterPipeline.reset()
2538
+ """
2539
+ pass
2540
+
2541
+ @abstractmethod
2542
+ def exists(self, *keys):
2543
+ pass
2544
+
2545
+ @abstractmethod
2546
+ def eval(self):
2547
+ pass
2548
+
2549
+ @abstractmethod
2550
+ def multi(self):
2551
+ """
2552
+ Starts transactional context.
2553
+
2554
+ See: ClusterPipeline.multi()
2555
+ """
2556
+ pass
2557
+
2558
+ @abstractmethod
2559
+ def load_scripts(self):
2560
+ pass
2561
+
2562
+ @abstractmethod
2563
+ def watch(self, *names):
2564
+ pass
2565
+
2566
+ @abstractmethod
2567
+ def unwatch(self):
2568
+ """
2569
+ Unwatches all previously specified keys
2570
+
2571
+ See: ClusterPipeline.unwatch()
2572
+ """
2573
+ pass
2574
+
2575
+ @abstractmethod
2576
+ def script_load_for_pipeline(self, *args, **kwargs):
2577
+ pass
2578
+
2579
+ @abstractmethod
2580
+ def delete(self, *names):
2581
+ """
2582
+ "Delete a key specified by ``names``"
2583
+
2584
+ See: ClusterPipeline.delete()
2585
+ """
2586
+ pass
2587
+
2588
+ @abstractmethod
2589
+ def unlink(self, *names):
2590
+ """
2591
+ "Unlink a key specified by ``names``"
2592
+
2593
+ See: ClusterPipeline.unlink()
2594
+ """
2595
+ pass
2596
+
2597
+ @abstractmethod
2598
+ def discard(self):
2599
+ pass
2600
+
2601
+
2602
+ class AbstractStrategy(ExecutionStrategy):
2603
+ def __init__(
2604
+ self,
2605
+ pipe: ClusterPipeline,
2606
+ ):
2607
+ self._command_queue: List[PipelineCommand] = []
2608
+ self._pipe = pipe
2609
+ self._nodes_manager = self._pipe.nodes_manager
2610
+
2611
+ @property
2612
+ def command_queue(self):
2613
+ return self._command_queue
2614
+
2615
+ @command_queue.setter
2616
+ def command_queue(self, queue: List[PipelineCommand]):
2617
+ self._command_queue = queue
2618
+
2619
+ @abstractmethod
2620
+ def execute_command(self, *args, **kwargs):
2621
+ pass
2622
+
2623
+ def pipeline_execute_command(self, *args, **options):
2624
+ self._command_queue.append(
2625
+ PipelineCommand(args, options, len(self._command_queue))
2626
+ )
2627
+ return self._pipe
2628
+
2629
+ @abstractmethod
2630
+ def execute(self, raise_on_error: bool = True) -> List[Any]:
2631
+ pass
2632
+
2633
+ @abstractmethod
2634
+ def send_cluster_commands(
2635
+ self, stack, raise_on_error=True, allow_redirections=True
2636
+ ):
2637
+ pass
2638
+
2639
+ @abstractmethod
2640
+ def reset(self):
2641
+ pass
2642
+
2643
+ def exists(self, *keys):
2644
+ return self.execute_command("EXISTS", *keys)
2645
+
2646
+ def eval(self):
2647
+ """ """
2648
+ raise RedisClusterException("method eval() is not implemented")
2649
+
2650
+ def load_scripts(self):
2651
+ """ """
2652
+ raise RedisClusterException("method load_scripts() is not implemented")
2653
+
2654
+ def script_load_for_pipeline(self, *args, **kwargs):
2655
+ """ """
2656
+ raise RedisClusterException(
2657
+ "method script_load_for_pipeline() is not implemented"
2658
+ )
2659
+
2660
+ def annotate_exception(self, exception, number, command):
2661
+ """
2662
+ Provides extra context to the exception prior to it being handled
2663
+ """
2664
+ cmd = " ".join(map(safe_str, command))
2665
+ msg = (
2666
+ f"Command # {number} ({truncate_text(cmd)}) of pipeline "
2667
+ f"caused error: {exception.args[0]}"
2668
+ )
2669
+ exception.args = (msg,) + exception.args[1:]
2670
+
2671
+
2672
+ class PipelineStrategy(AbstractStrategy):
2673
+ def __init__(self, pipe: ClusterPipeline):
2674
+ super().__init__(pipe)
2675
+ self.command_flags = pipe.command_flags
2676
+
2677
+ def execute_command(self, *args, **kwargs):
2678
+ return self.pipeline_execute_command(*args, **kwargs)
2679
+
2680
+ def _raise_first_error(self, stack):
2681
+ """
2682
+ Raise the first exception on the stack
2683
+ """
2684
+ for c in stack:
2685
+ r = c.result
2686
+ if isinstance(r, Exception):
2687
+ self.annotate_exception(r, c.position + 1, c.args)
2688
+ raise r
2689
+
2690
+ def execute(self, raise_on_error: bool = True) -> List[Any]:
2691
+ stack = self._command_queue
2692
+ if not stack:
2693
+ return []
2694
+
2695
+ try:
2696
+ return self.send_cluster_commands(stack, raise_on_error)
2697
+ finally:
2698
+ self.reset()
2699
+
2700
+ def reset(self):
2701
+ """
2702
+ Reset back to empty pipeline.
2703
+ """
2704
+ self._command_queue = []
2705
+
2706
+ def send_cluster_commands(
2707
+ self, stack, raise_on_error=True, allow_redirections=True
2708
+ ):
2709
+ """
2710
+ Wrapper for RedisCluster.ERRORS_ALLOW_RETRY errors handling.
2711
+
2712
+ If one of the retryable exceptions has been thrown we assume that:
2713
+ - connection_pool was disconnected
2714
+ - connection_pool was reseted
2715
+ - refereh_table_asap set to True
2716
+
2717
+ It will try the number of times specified by
2718
+ the retries in config option "self.retry"
2719
+ which defaults to 3 unless manually configured.
2720
+
2721
+ If it reaches the number of times, the command will
2722
+ raises ClusterDownException.
2723
+ """
2724
+ if not stack:
2725
+ return []
2726
+ retry_attempts = self._pipe.retry.get_retries()
2727
+ while True:
2728
+ try:
2729
+ return self._send_cluster_commands(
2730
+ stack,
2731
+ raise_on_error=raise_on_error,
2732
+ allow_redirections=allow_redirections,
2733
+ )
2734
+ except RedisCluster.ERRORS_ALLOW_RETRY as e:
2735
+ if retry_attempts > 0:
2736
+ # Try again with the new cluster setup. All other errors
2737
+ # should be raised.
2738
+ retry_attempts -= 1
2739
+ pass
2740
+ else:
2741
+ raise e
2742
+
2743
+ def _send_cluster_commands(
2744
+ self, stack, raise_on_error=True, allow_redirections=True
2745
+ ):
2746
+ """
2747
+ Send a bunch of cluster commands to the redis cluster.
2748
+
2749
+ `allow_redirections` If the pipeline should follow
2750
+ `ASK` & `MOVED` responses automatically. If set
2751
+ to false it will raise RedisClusterException.
2752
+ """
2753
+ # the first time sending the commands we send all of
2754
+ # the commands that were queued up.
2755
+ # if we have to run through it again, we only retry
2756
+ # the commands that failed.
2757
+ attempt = sorted(stack, key=lambda x: x.position)
2758
+ is_default_node = False
2759
+ # build a list of node objects based on node names we need to
2760
+ nodes = {}
2761
+
2762
+ # as we move through each command that still needs to be processed,
2763
+ # we figure out the slot number that command maps to, then from
2764
+ # the slot determine the node.
2765
+ for c in attempt:
2766
+ while True:
2767
+ # refer to our internal node -> slot table that
2768
+ # tells us where a given command should route to.
2769
+ # (it might be possible we have a cached node that no longer
2770
+ # exists in the cluster, which is why we do this in a loop)
2771
+ passed_targets = c.options.pop("target_nodes", None)
2772
+ if passed_targets and not self._is_nodes_flag(passed_targets):
2773
+ target_nodes = self._parse_target_nodes(passed_targets)
2774
+ else:
2775
+ target_nodes = self._determine_nodes(
2776
+ *c.args, node_flag=passed_targets
2777
+ )
2778
+ if not target_nodes:
2779
+ raise RedisClusterException(
2780
+ f"No targets were found to execute {c.args} command on"
2781
+ )
2782
+ if len(target_nodes) > 1:
2783
+ raise RedisClusterException(
2784
+ f"Too many targets for command {c.args}"
2785
+ )
2786
+
2787
+ node = target_nodes[0]
2788
+ if node == self._pipe.get_default_node():
2789
+ is_default_node = True
2790
+
2791
+ # now that we know the name of the node
2792
+ # ( it's just a string in the form of host:port )
2793
+ # we can build a list of commands for each node.
2794
+ node_name = node.name
2795
+ if node_name not in nodes:
2796
+ redis_node = self._pipe.get_redis_connection(node)
2797
+ try:
2798
+ connection = get_connection(redis_node)
2799
+ except (ConnectionError, TimeoutError):
2800
+ for n in nodes.values():
2801
+ n.connection_pool.release(n.connection)
2802
+ # Connection retries are being handled in the node's
2803
+ # Retry object. Reinitialize the node -> slot table.
2804
+ self._nodes_manager.initialize()
2805
+ if is_default_node:
2806
+ self._pipe.replace_default_node()
2807
+ raise
2808
+ nodes[node_name] = NodeCommands(
2809
+ redis_node.parse_response,
2810
+ redis_node.connection_pool,
2811
+ connection,
2812
+ )
2813
+ nodes[node_name].append(c)
2814
+ break
2815
+
2816
+ # send the commands in sequence.
2817
+ # we write to all the open sockets for each node first,
2818
+ # before reading anything
2819
+ # this allows us to flush all the requests out across the
2820
+ # network
2821
+ # so that we can read them from different sockets as they come back.
2822
+ # we dont' multiplex on the sockets as they come available,
2823
+ # but that shouldn't make too much difference.
2824
+ try:
2825
+ node_commands = nodes.values()
2826
+ for n in node_commands:
2827
+ n.write()
2828
+
2829
+ for n in node_commands:
2830
+ n.read()
2831
+ finally:
2832
+ # release all of the redis connections we allocated earlier
2833
+ # back into the connection pool.
2834
+ # we used to do this step as part of a try/finally block,
2835
+ # but it is really dangerous to
2836
+ # release connections back into the pool if for some
2837
+ # reason the socket has data still left in it
2838
+ # from a previous operation. The write and
2839
+ # read operations already have try/catch around them for
2840
+ # all known types of errors including connection
2841
+ # and socket level errors.
2842
+ # So if we hit an exception, something really bad
2843
+ # happened and putting any oF
2844
+ # these connections back into the pool is a very bad idea.
2845
+ # the socket might have unread buffer still sitting in it,
2846
+ # and then the next time we read from it we pass the
2847
+ # buffered result back from a previous command and
2848
+ # every single request after to that connection will always get
2849
+ # a mismatched result.
2850
+ for n in nodes.values():
2851
+ n.connection_pool.release(n.connection)
2852
+
2853
+ # if the response isn't an exception it is a
2854
+ # valid response from the node
2855
+ # we're all done with that command, YAY!
2856
+ # if we have more commands to attempt, we've run into problems.
2857
+ # collect all the commands we are allowed to retry.
2858
+ # (MOVED, ASK, or connection errors or timeout errors)
2859
+ attempt = sorted(
2860
+ (
2861
+ c
2862
+ for c in attempt
2863
+ if isinstance(c.result, ClusterPipeline.ERRORS_ALLOW_RETRY)
2864
+ ),
2865
+ key=lambda x: x.position,
2866
+ )
2867
+ if attempt and allow_redirections:
2868
+ # RETRY MAGIC HAPPENS HERE!
2869
+ # send these remaining commands one at a time using `execute_command`
2870
+ # in the main client. This keeps our retry logic
2871
+ # in one place mostly,
2872
+ # and allows us to be more confident in correctness of behavior.
2873
+ # at this point any speed gains from pipelining have been lost
2874
+ # anyway, so we might as well make the best
2875
+ # attempt to get the correct behavior.
2876
+ #
2877
+ # The client command will handle retries for each
2878
+ # individual command sequentially as we pass each
2879
+ # one into `execute_command`. Any exceptions
2880
+ # that bubble out should only appear once all
2881
+ # retries have been exhausted.
2882
+ #
2883
+ # If a lot of commands have failed, we'll be setting the
2884
+ # flag to rebuild the slots table from scratch.
2885
+ # So MOVED errors should correct themselves fairly quickly.
2886
+ self._pipe.reinitialize_counter += 1
2887
+ if self._pipe._should_reinitialized():
2888
+ self._nodes_manager.initialize()
2889
+ if is_default_node:
2890
+ self._pipe.replace_default_node()
2891
+ for c in attempt:
2892
+ try:
2893
+ # send each command individually like we
2894
+ # do in the main client.
2895
+ c.result = self._pipe.parent_execute_command(*c.args, **c.options)
2896
+ except RedisError as e:
2897
+ c.result = e
2898
+
2899
+ # turn the response back into a simple flat array that corresponds
2900
+ # to the sequence of commands issued in the stack in pipeline.execute()
2901
+ response = []
2902
+ for c in sorted(stack, key=lambda x: x.position):
2903
+ if c.args[0] in self._pipe.cluster_response_callbacks:
2904
+ # Remove keys entry, it needs only for cache.
2905
+ c.options.pop("keys", None)
2906
+ c.result = self._pipe.cluster_response_callbacks[c.args[0]](
2907
+ c.result, **c.options
2908
+ )
2909
+ response.append(c.result)
2910
+
2911
+ if raise_on_error:
2912
+ self._raise_first_error(stack)
2913
+
2914
+ return response
2915
+
2916
+ def _is_nodes_flag(self, target_nodes):
2917
+ return isinstance(target_nodes, str) and target_nodes in self._pipe.node_flags
2918
+
2919
+ def _parse_target_nodes(self, target_nodes):
2920
+ if isinstance(target_nodes, list):
2921
+ nodes = target_nodes
2922
+ elif isinstance(target_nodes, ClusterNode):
2923
+ # Supports passing a single ClusterNode as a variable
2924
+ nodes = [target_nodes]
2925
+ elif isinstance(target_nodes, dict):
2926
+ # Supports dictionaries of the format {node_name: node}.
2927
+ # It enables to execute commands with multi nodes as follows:
2928
+ # rc.cluster_save_config(rc.get_primaries())
2929
+ nodes = target_nodes.values()
2930
+ else:
2931
+ raise TypeError(
2932
+ "target_nodes type can be one of the following: "
2933
+ "node_flag (PRIMARIES, REPLICAS, RANDOM, ALL_NODES),"
2934
+ "ClusterNode, list<ClusterNode>, or dict<any, ClusterNode>. "
2935
+ f"The passed type is {type(target_nodes)}"
2936
+ )
2937
+ return nodes
2938
+
2939
+ def _determine_nodes(self, *args, **kwargs) -> List["ClusterNode"]:
2940
+ # Determine which nodes should be executed the command on.
2941
+ # Returns a list of target nodes.
2942
+ command = args[0].upper()
2943
+ if (
2944
+ len(args) >= 2
2945
+ and f"{args[0]} {args[1]}".upper() in self._pipe.command_flags
2946
+ ):
2947
+ command = f"{args[0]} {args[1]}".upper()
2948
+
2949
+ nodes_flag = kwargs.pop("nodes_flag", None)
2950
+ if nodes_flag is not None:
2951
+ # nodes flag passed by the user
2952
+ command_flag = nodes_flag
2953
+ else:
2954
+ # get the nodes group for this command if it was predefined
2955
+ command_flag = self._pipe.command_flags.get(command)
2956
+ if command_flag == self._pipe.RANDOM:
2957
+ # return a random node
2958
+ return [self._pipe.get_random_node()]
2959
+ elif command_flag == self._pipe.PRIMARIES:
2960
+ # return all primaries
2961
+ return self._pipe.get_primaries()
2962
+ elif command_flag == self._pipe.REPLICAS:
2963
+ # return all replicas
2964
+ return self._pipe.get_replicas()
2965
+ elif command_flag == self._pipe.ALL_NODES:
2966
+ # return all nodes
2967
+ return self._pipe.get_nodes()
2968
+ elif command_flag == self._pipe.DEFAULT_NODE:
2969
+ # return the cluster's default node
2970
+ return [self._nodes_manager.default_node]
2971
+ elif command in self._pipe.SEARCH_COMMANDS[0]:
2972
+ return [self._nodes_manager.default_node]
2973
+ else:
2974
+ # get the node that holds the key's slot
2975
+ slot = self._pipe.determine_slot(*args)
2976
+ node = self._nodes_manager.get_node_from_slot(
2977
+ slot,
2978
+ self._pipe.read_from_replicas and command in READ_COMMANDS,
2979
+ self._pipe.load_balancing_strategy
2980
+ if command in READ_COMMANDS
2981
+ else None,
2982
+ )
2983
+ return [node]
2984
+
2985
+ def multi(self):
2986
+ raise RedisClusterException(
2987
+ "method multi() is not supported outside of transactional context"
2988
+ )
2989
+
2990
+ def discard(self):
2991
+ raise RedisClusterException(
2992
+ "method discard() is not supported outside of transactional context"
2993
+ )
2994
+
2995
+ def watch(self, *names):
2996
+ raise RedisClusterException(
2997
+ "method watch() is not supported outside of transactional context"
2998
+ )
2999
+
3000
+ def unwatch(self, *names):
3001
+ raise RedisClusterException(
3002
+ "method unwatch() is not supported outside of transactional context"
3003
+ )
3004
+
3005
+ def delete(self, *names):
3006
+ if len(names) != 1:
3007
+ raise RedisClusterException(
3008
+ "deleting multiple keys is not implemented in pipeline command"
3009
+ )
3010
+
3011
+ return self.execute_command("DEL", names[0])
3012
+
3013
+ def unlink(self, *names):
3014
+ if len(names) != 1:
3015
+ raise RedisClusterException(
3016
+ "unlinking multiple keys is not implemented in pipeline command"
3017
+ )
3018
+
3019
+ return self.execute_command("UNLINK", names[0])
3020
+
3021
+
3022
+ class TransactionStrategy(AbstractStrategy):
3023
+ NO_SLOTS_COMMANDS = {"UNWATCH"}
3024
+ IMMEDIATE_EXECUTE_COMMANDS = {"WATCH", "UNWATCH"}
3025
+ UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
3026
+ SLOT_REDIRECT_ERRORS = (AskError, MovedError)
3027
+ CONNECTION_ERRORS = (
3028
+ ConnectionError,
3029
+ OSError,
3030
+ ClusterDownError,
3031
+ SlotNotCoveredError,
3032
+ )
3033
+
3034
+ def __init__(self, pipe: ClusterPipeline):
3035
+ super().__init__(pipe)
3036
+ self._explicit_transaction = False
3037
+ self._watching = False
3038
+ self._pipeline_slots: Set[int] = set()
3039
+ self._transaction_connection: Optional[Connection] = None
3040
+ self._executing = False
3041
+ self._retry = copy(self._pipe.retry)
3042
+ self._retry.update_supported_errors(
3043
+ RedisCluster.ERRORS_ALLOW_RETRY + self.SLOT_REDIRECT_ERRORS
3044
+ )
3045
+
3046
+ def _get_client_and_connection_for_transaction(self) -> Tuple[Redis, Connection]:
3047
+ """
3048
+ Find a connection for a pipeline transaction.
3049
+
3050
+ For running an atomic transaction, watch keys ensure that contents have not been
3051
+ altered as long as the watch commands for those keys were sent over the same
3052
+ connection. So once we start watching a key, we fetch a connection to the
3053
+ node that owns that slot and reuse it.
3054
+ """
3055
+ if not self._pipeline_slots:
3056
+ raise RedisClusterException(
3057
+ "At least a command with a key is needed to identify a node"
3058
+ )
3059
+
3060
+ node: ClusterNode = self._nodes_manager.get_node_from_slot(
3061
+ list(self._pipeline_slots)[0], False
3062
+ )
3063
+ redis_node: Redis = self._pipe.get_redis_connection(node)
3064
+ if self._transaction_connection:
3065
+ if not redis_node.connection_pool.owns_connection(
3066
+ self._transaction_connection
3067
+ ):
3068
+ previous_node = self._nodes_manager.find_connection_owner(
3069
+ self._transaction_connection
3070
+ )
3071
+ previous_node.connection_pool.release(self._transaction_connection)
3072
+ self._transaction_connection = None
3073
+
3074
+ if not self._transaction_connection:
3075
+ self._transaction_connection = get_connection(redis_node)
3076
+
3077
+ return redis_node, self._transaction_connection
3078
+
3079
+ def execute_command(self, *args, **kwargs):
3080
+ slot_number: Optional[int] = None
3081
+ if args[0] not in ClusterPipeline.NO_SLOTS_COMMANDS:
3082
+ slot_number = self._pipe.determine_slot(*args)
3083
+
3084
+ if (
3085
+ self._watching or args[0] in self.IMMEDIATE_EXECUTE_COMMANDS
3086
+ ) and not self._explicit_transaction:
3087
+ if args[0] == "WATCH":
3088
+ self._validate_watch()
3089
+
3090
+ if slot_number is not None:
3091
+ if self._pipeline_slots and slot_number not in self._pipeline_slots:
3092
+ raise CrossSlotTransactionError(
3093
+ "Cannot watch or send commands on different slots"
3094
+ )
3095
+
3096
+ self._pipeline_slots.add(slot_number)
3097
+ elif args[0] not in self.NO_SLOTS_COMMANDS:
3098
+ raise RedisClusterException(
3099
+ f"Cannot identify slot number for command: {args[0]},"
3100
+ "it cannot be triggered in a transaction"
3101
+ )
3102
+
3103
+ return self._immediate_execute_command(*args, **kwargs)
3104
+ else:
3105
+ if slot_number is not None:
3106
+ self._pipeline_slots.add(slot_number)
3107
+
3108
+ return self.pipeline_execute_command(*args, **kwargs)
3109
+
3110
+ def _validate_watch(self):
3111
+ if self._explicit_transaction:
3112
+ raise RedisError("Cannot issue a WATCH after a MULTI")
3113
+
3114
+ self._watching = True
3115
+
3116
+ def _immediate_execute_command(self, *args, **options):
3117
+ return self._retry.call_with_retry(
3118
+ lambda: self._get_connection_and_send_command(*args, **options),
3119
+ self._reinitialize_on_error,
3120
+ )
3121
+
3122
+ def _get_connection_and_send_command(self, *args, **options):
3123
+ redis_node, connection = self._get_client_and_connection_for_transaction()
3124
+ return self._send_command_parse_response(
3125
+ connection, redis_node, args[0], *args, **options
3126
+ )
3127
+
3128
+ def _send_command_parse_response(
3129
+ self, conn, redis_node: Redis, command_name, *args, **options
3130
+ ):
3131
+ """
3132
+ Send a command and parse the response
3133
+ """
3134
+
3135
+ conn.send_command(*args)
3136
+ output = redis_node.parse_response(conn, command_name, **options)
3137
+
3138
+ if command_name in self.UNWATCH_COMMANDS:
3139
+ self._watching = False
3140
+ return output
3141
+
3142
+ def _reinitialize_on_error(self, error):
3143
+ if self._watching:
3144
+ if type(error) in self.SLOT_REDIRECT_ERRORS and self._executing:
3145
+ raise WatchError("Slot rebalancing occurred while watching keys")
3146
+
3147
+ if (
3148
+ type(error) in self.SLOT_REDIRECT_ERRORS
3149
+ or type(error) in self.CONNECTION_ERRORS
3150
+ ):
3151
+ if self._transaction_connection:
3152
+ self._transaction_connection = None
3153
+
3154
+ self._pipe.reinitialize_counter += 1
3155
+ if self._pipe._should_reinitialized():
3156
+ self._nodes_manager.initialize()
3157
+ self.reinitialize_counter = 0
3158
+ else:
3159
+ self._nodes_manager.update_moved_exception(error)
3160
+
3161
+ self._executing = False
3162
+
3163
+ def _raise_first_error(self, responses, stack):
3164
+ """
3165
+ Raise the first exception on the stack
3166
+ """
3167
+ for r, cmd in zip(responses, stack):
3168
+ if isinstance(r, Exception):
3169
+ self.annotate_exception(r, cmd.position + 1, cmd.args)
3170
+ raise r
3171
+
3172
+ def execute(self, raise_on_error: bool = True) -> List[Any]:
3173
+ stack = self._command_queue
3174
+ if not stack and (not self._watching or not self._pipeline_slots):
3175
+ return []
3176
+
3177
+ return self._execute_transaction_with_retries(stack, raise_on_error)
3178
+
3179
+ def _execute_transaction_with_retries(
3180
+ self, stack: List["PipelineCommand"], raise_on_error: bool
3181
+ ):
3182
+ return self._retry.call_with_retry(
3183
+ lambda: self._execute_transaction(stack, raise_on_error),
3184
+ self._reinitialize_on_error,
3185
+ )
3186
+
3187
+ def _execute_transaction(
3188
+ self, stack: List["PipelineCommand"], raise_on_error: bool
3189
+ ):
3190
+ if len(self._pipeline_slots) > 1:
3191
+ raise CrossSlotTransactionError(
3192
+ "All keys involved in a cluster transaction must map to the same slot"
3193
+ )
3194
+
3195
+ self._executing = True
3196
+
3197
+ redis_node, connection = self._get_client_and_connection_for_transaction()
3198
+
3199
+ stack = chain(
3200
+ [PipelineCommand(("MULTI",))],
3201
+ stack,
3202
+ [PipelineCommand(("EXEC",))],
3203
+ )
3204
+ commands = [c.args for c in stack if EMPTY_RESPONSE not in c.options]
3205
+ packed_commands = connection.pack_commands(commands)
3206
+ connection.send_packed_command(packed_commands)
3207
+ errors = []
3208
+
3209
+ # parse off the response for MULTI
3210
+ # NOTE: we need to handle ResponseErrors here and continue
3211
+ # so that we read all the additional command messages from
3212
+ # the socket
3213
+ try:
3214
+ redis_node.parse_response(connection, "MULTI")
3215
+ except ResponseError as e:
3216
+ self.annotate_exception(e, 0, "MULTI")
3217
+ errors.append(e)
3218
+ except self.CONNECTION_ERRORS as cluster_error:
3219
+ self.annotate_exception(cluster_error, 0, "MULTI")
3220
+ raise
3221
+
3222
+ # and all the other commands
3223
+ for i, command in enumerate(self._command_queue):
3224
+ if EMPTY_RESPONSE in command.options:
3225
+ errors.append((i, command.options[EMPTY_RESPONSE]))
3226
+ else:
3227
+ try:
3228
+ _ = redis_node.parse_response(connection, "_")
3229
+ except self.SLOT_REDIRECT_ERRORS as slot_error:
3230
+ self.annotate_exception(slot_error, i + 1, command.args)
3231
+ errors.append(slot_error)
3232
+ except self.CONNECTION_ERRORS as cluster_error:
3233
+ self.annotate_exception(cluster_error, i + 1, command.args)
3234
+ raise
3235
+ except ResponseError as e:
3236
+ self.annotate_exception(e, i + 1, command.args)
3237
+ errors.append(e)
3238
+
3239
+ response = None
3240
+ # parse the EXEC.
3241
+ try:
3242
+ response = redis_node.parse_response(connection, "EXEC")
3243
+ except ExecAbortError:
3244
+ if errors:
3245
+ raise errors[0]
3246
+ raise
3247
+
3248
+ self._executing = False
3249
+
3250
+ # EXEC clears any watched keys
3251
+ self._watching = False
3252
+
3253
+ if response is None:
3254
+ raise WatchError("Watched variable changed.")
3255
+
3256
+ # put any parse errors into the response
3257
+ for i, e in errors:
3258
+ response.insert(i, e)
3259
+
3260
+ if len(response) != len(self._command_queue):
3261
+ raise InvalidPipelineStack(
3262
+ "Unexpected response length for cluster pipeline EXEC."
3263
+ " Command stack was {} but response had length {}".format(
3264
+ [c.args[0] for c in self._command_queue], len(response)
3265
+ )
3266
+ )
3267
+
3268
+ # find any errors in the response and raise if necessary
3269
+ if raise_on_error or len(errors) > 0:
3270
+ self._raise_first_error(
3271
+ response,
3272
+ self._command_queue,
3273
+ )
3274
+
3275
+ # We have to run response callbacks manually
3276
+ data = []
3277
+ for r, cmd in zip(response, self._command_queue):
3278
+ if not isinstance(r, Exception):
3279
+ command_name = cmd.args[0]
3280
+ if command_name in self._pipe.cluster_response_callbacks:
3281
+ r = self._pipe.cluster_response_callbacks[command_name](
3282
+ r, **cmd.options
3283
+ )
3284
+ data.append(r)
3285
+ return data
3286
+
3287
+ def reset(self):
3288
+ self._command_queue = []
3289
+
3290
+ # make sure to reset the connection state in the event that we were
3291
+ # watching something
3292
+ if self._transaction_connection:
3293
+ try:
3294
+ # call this manually since our unwatch or
3295
+ # immediate_execute_command methods can call reset()
3296
+ self._transaction_connection.send_command("UNWATCH")
3297
+ self._transaction_connection.read_response()
3298
+ # we can safely return the connection to the pool here since we're
3299
+ # sure we're no longer WATCHing anything
3300
+ node = self._nodes_manager.find_connection_owner(
3301
+ self._transaction_connection
3302
+ )
3303
+ node.redis_connection.connection_pool.release(
3304
+ self._transaction_connection
3305
+ )
3306
+ self._transaction_connection = None
3307
+ except self.CONNECTION_ERRORS:
3308
+ # disconnect will also remove any previous WATCHes
3309
+ if self._transaction_connection:
3310
+ self._transaction_connection.disconnect()
3311
+
3312
+ # clean up the other instance attributes
3313
+ self._watching = False
3314
+ self._explicit_transaction = False
3315
+ self._pipeline_slots = set()
3316
+ self._executing = False
3317
+
3318
+ def send_cluster_commands(
3319
+ self, stack, raise_on_error=True, allow_redirections=True
3320
+ ):
3321
+ raise NotImplementedError(
3322
+ "send_cluster_commands cannot be executed in transactional context."
3323
+ )
3324
+
3325
+ def multi(self):
3326
+ if self._explicit_transaction:
3327
+ raise RedisError("Cannot issue nested calls to MULTI")
3328
+ if self._command_queue:
3329
+ raise RedisError(
3330
+ "Commands without an initial WATCH have already been issued"
3331
+ )
3332
+ self._explicit_transaction = True
3333
+
3334
+ def watch(self, *names):
3335
+ if self._explicit_transaction:
3336
+ raise RedisError("Cannot issue a WATCH after a MULTI")
3337
+
3338
+ return self.execute_command("WATCH", *names)
3339
+
3340
+ def unwatch(self):
3341
+ if self._watching:
3342
+ return self.execute_command("UNWATCH")
3343
+
3344
+ return True
3345
+
3346
+ def discard(self):
3347
+ self.reset()
3348
+
3349
+ def delete(self, *names):
3350
+ return self.execute_command("DEL", *names)
3351
+
3352
+ def unlink(self, *names):
3353
+ return self.execute_command("UNLINK", *names)