redis 5.3.0b4__py3-none-any.whl → 5.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
redis/asyncio/client.py CHANGED
@@ -375,7 +375,7 @@ class Redis(
375
375
  if self.single_connection_client:
376
376
  async with self._single_conn_lock:
377
377
  if self.connection is None:
378
- self.connection = await self.connection_pool.get_connection("_")
378
+ self.connection = await self.connection_pool.get_connection()
379
379
 
380
380
  self._event_dispatcher.dispatch(
381
381
  AfterSingleConnectionInstantiationEvent(
@@ -638,7 +638,7 @@ class Redis(
638
638
  await self.initialize()
639
639
  pool = self.connection_pool
640
640
  command_name = args[0]
641
- conn = self.connection or await pool.get_connection(command_name, **options)
641
+ conn = self.connection or await pool.get_connection()
642
642
 
643
643
  if self.single_connection_client:
644
644
  await self._single_conn_lock.acquire()
@@ -712,7 +712,7 @@ class Monitor:
712
712
 
713
713
  async def connect(self):
714
714
  if self.connection is None:
715
- self.connection = await self.connection_pool.get_connection("MONITOR")
715
+ self.connection = await self.connection_pool.get_connection()
716
716
 
717
717
  async def __aenter__(self):
718
718
  await self.connect()
@@ -900,9 +900,7 @@ class PubSub:
900
900
  Ensure that the PubSub is connected
901
901
  """
902
902
  if self.connection is None:
903
- self.connection = await self.connection_pool.get_connection(
904
- "pubsub", self.shard_hint
905
- )
903
+ self.connection = await self.connection_pool.get_connection()
906
904
  # register a callback that re-subscribes to any channels we
907
905
  # were listening to when we were disconnected
908
906
  self.connection.register_connect_callback(self.on_connect)
@@ -1370,9 +1368,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1370
1368
  conn = self.connection
1371
1369
  # if this is the first call, we need a connection
1372
1370
  if not conn:
1373
- conn = await self.connection_pool.get_connection(
1374
- command_name, self.shard_hint
1375
- )
1371
+ conn = await self.connection_pool.get_connection()
1376
1372
  self.connection = conn
1377
1373
 
1378
1374
  return await conn.retry.call_with_retry(
@@ -1554,7 +1550,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1554
1550
  await self.reset()
1555
1551
  raise
1556
1552
 
1557
- async def execute(self, raise_on_error: bool = True):
1553
+ async def execute(self, raise_on_error: bool = True) -> List[Any]:
1558
1554
  """Execute all the commands in the current pipeline"""
1559
1555
  stack = self.command_stack
1560
1556
  if not stack and not self.watching:
@@ -1568,7 +1564,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1568
1564
 
1569
1565
  conn = self.connection
1570
1566
  if not conn:
1571
- conn = await self.connection_pool.get_connection("MULTI", self.shard_hint)
1567
+ conn = await self.connection_pool.get_connection()
1572
1568
  # assign to self.connection so reset() releases the connection
1573
1569
  # back to the pool after we're done
1574
1570
  self.connection = conn
redis/asyncio/cluster.py CHANGED
@@ -39,6 +39,7 @@ from redis.cluster import (
39
39
  SLOT_ID,
40
40
  AbstractRedisCluster,
41
41
  LoadBalancer,
42
+ LoadBalancingStrategy,
42
43
  block_pipeline_command,
43
44
  get_node_name,
44
45
  parse_cluster_slots,
@@ -67,6 +68,7 @@ from redis.exceptions import (
67
68
  )
68
69
  from redis.typing import AnyKeyT, EncodableT, KeyT
69
70
  from redis.utils import (
71
+ deprecated_args,
70
72
  deprecated_function,
71
73
  dict_merge,
72
74
  get_lib_version,
@@ -133,9 +135,17 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
133
135
  | See:
134
136
  https://redis.io/docs/manual/scaling/#redis-cluster-configuration-parameters
135
137
  :param read_from_replicas:
136
- | Enable read from replicas in READONLY mode. You can read possibly stale data.
138
+ | @deprecated - please use load_balancing_strategy instead
139
+ | Enable read from replicas in READONLY mode.
137
140
  When set to true, read commands will be assigned between the primary and
138
141
  its replications in a Round-Robin manner.
142
+ The data read from replicas is eventually consistent
143
+ with the data in primary nodes.
144
+ :param load_balancing_strategy:
145
+ | Enable read from replicas in READONLY mode and defines the load balancing
146
+ strategy that will be used for cluster node selection.
147
+ The data read from replicas is eventually consistent
148
+ with the data in primary nodes.
139
149
  :param reinitialize_steps:
140
150
  | Specifies the number of MOVED errors that need to occur before reinitializing
141
151
  the whole cluster topology. If a MOVED error occurs and the cluster does not
@@ -228,6 +238,11 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
228
238
  "result_callbacks",
229
239
  )
230
240
 
241
+ @deprecated_args(
242
+ args_to_warn=["read_from_replicas"],
243
+ reason="Please configure the 'load_balancing_strategy' instead",
244
+ version="5.3.0",
245
+ )
231
246
  def __init__(
232
247
  self,
233
248
  host: Optional[str] = None,
@@ -236,6 +251,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
236
251
  startup_nodes: Optional[List["ClusterNode"]] = None,
237
252
  require_full_coverage: bool = True,
238
253
  read_from_replicas: bool = False,
254
+ load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
239
255
  reinitialize_steps: int = 5,
240
256
  cluster_error_retry_attempts: int = 3,
241
257
  connection_error_retry_attempts: int = 3,
@@ -335,7 +351,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
335
351
  }
336
352
  )
337
353
 
338
- if read_from_replicas:
354
+ if read_from_replicas or load_balancing_strategy:
339
355
  # Call our on_connect function to configure READONLY mode
340
356
  kwargs["redis_connect_func"] = self.on_connect
341
357
 
@@ -384,6 +400,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
384
400
  )
385
401
  self.encoder = Encoder(encoding, encoding_errors, decode_responses)
386
402
  self.read_from_replicas = read_from_replicas
403
+ self.load_balancing_strategy = load_balancing_strategy
387
404
  self.reinitialize_steps = reinitialize_steps
388
405
  self.cluster_error_retry_attempts = cluster_error_retry_attempts
389
406
  self.connection_error_retry_attempts = connection_error_retry_attempts
@@ -602,6 +619,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
602
619
  self.nodes_manager.get_node_from_slot(
603
620
  await self._determine_slot(command, *args),
604
621
  self.read_from_replicas and command in READ_COMMANDS,
622
+ self.load_balancing_strategy if command in READ_COMMANDS else None,
605
623
  )
606
624
  ]
607
625
 
@@ -782,7 +800,13 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
782
800
  # refresh the target node
783
801
  slot = await self._determine_slot(*args)
784
802
  target_node = self.nodes_manager.get_node_from_slot(
785
- slot, self.read_from_replicas and args[0] in READ_COMMANDS
803
+ slot,
804
+ self.read_from_replicas and args[0] in READ_COMMANDS,
805
+ (
806
+ self.load_balancing_strategy
807
+ if args[0] in READ_COMMANDS
808
+ else None
809
+ ),
786
810
  )
787
811
  moved = False
788
812
 
@@ -799,10 +823,16 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
799
823
  # and try again with the new setup
800
824
  await self.aclose()
801
825
  raise
802
- except ClusterDownError:
826
+ except (ClusterDownError, SlotNotCoveredError):
803
827
  # ClusterDownError can occur during a failover and to get
804
828
  # self-healed, we will try to reinitialize the cluster layout
805
829
  # and retry executing the command
830
+
831
+ # SlotNotCoveredError can occur when the cluster is not fully
832
+ # initialized or can be temporary issue.
833
+ # We will try to reinitialize the cluster topology
834
+ # and retry executing the command
835
+
806
836
  await self.aclose()
807
837
  await asyncio.sleep(0.25)
808
838
  raise
@@ -1177,9 +1207,7 @@ class NodesManager:
1177
1207
  return self.nodes_cache.get(node_name)
1178
1208
  else:
1179
1209
  raise DataError(
1180
- "get_node requires one of the following: "
1181
- "1. node name "
1182
- "2. host and port"
1210
+ "get_node requires one of the following: 1. node name 2. host and port"
1183
1211
  )
1184
1212
 
1185
1213
  def set_nodes(
@@ -1239,17 +1267,24 @@ class NodesManager:
1239
1267
  self._moved_exception = None
1240
1268
 
1241
1269
  def get_node_from_slot(
1242
- self, slot: int, read_from_replicas: bool = False
1270
+ self,
1271
+ slot: int,
1272
+ read_from_replicas: bool = False,
1273
+ load_balancing_strategy=None,
1243
1274
  ) -> "ClusterNode":
1244
1275
  if self._moved_exception:
1245
1276
  self._update_moved_slots()
1246
1277
 
1278
+ if read_from_replicas is True and load_balancing_strategy is None:
1279
+ load_balancing_strategy = LoadBalancingStrategy.ROUND_ROBIN
1280
+
1247
1281
  try:
1248
- if read_from_replicas:
1249
- # get the server index in a Round-Robin manner
1282
+ if len(self.slots_cache[slot]) > 1 and load_balancing_strategy:
1283
+ # get the server index using the strategy defined
1284
+ # in load_balancing_strategy
1250
1285
  primary_name = self.slots_cache[slot][0].name
1251
1286
  node_idx = self.read_load_balancer.get_server_index(
1252
- primary_name, len(self.slots_cache[slot])
1287
+ primary_name, len(self.slots_cache[slot]), load_balancing_strategy
1253
1288
  )
1254
1289
  return self.slots_cache[slot][node_idx]
1255
1290
  return self.slots_cache[slot][0]
@@ -1361,7 +1396,7 @@ class NodesManager:
1361
1396
  if len(disagreements) > 5:
1362
1397
  raise RedisClusterException(
1363
1398
  f"startup_nodes could not agree on a valid "
1364
- f'slots cache: {", ".join(disagreements)}'
1399
+ f"slots cache: {', '.join(disagreements)}"
1365
1400
  )
1366
1401
 
1367
1402
  # Validate if all slots are covered or if we should try next startup node
@@ -29,7 +29,7 @@ from urllib.parse import ParseResult, parse_qs, unquote, urlparse
29
29
 
30
30
  from ..auth.token import TokenInterface
31
31
  from ..event import AsyncAfterConnectionReleasedEvent, EventDispatcher
32
- from ..utils import format_error_message
32
+ from ..utils import deprecated_args, format_error_message
33
33
 
34
34
  # the functionality is available in 3.11.x but has a major issue before
35
35
  # 3.11.3. See https://github.com/redis/redis-py/issues/2633
@@ -1087,7 +1087,12 @@ class ConnectionPool:
1087
1087
  or len(self._in_use_connections) < self.max_connections
1088
1088
  )
1089
1089
 
1090
- async def get_connection(self, command_name, *keys, **options):
1090
+ @deprecated_args(
1091
+ args_to_warn=["*"],
1092
+ reason="Use get_connection() without args instead",
1093
+ version="5.3.0",
1094
+ )
1095
+ async def get_connection(self, command_name=None, *keys, **options):
1091
1096
  async with self._lock:
1092
1097
  """Get a connected connection from the pool"""
1093
1098
  connection = self.get_available_connection()
@@ -1255,7 +1260,12 @@ class BlockingConnectionPool(ConnectionPool):
1255
1260
  self._condition = asyncio.Condition()
1256
1261
  self.timeout = timeout
1257
1262
 
1258
- async def get_connection(self, command_name, *keys, **options):
1263
+ @deprecated_args(
1264
+ args_to_warn=["*"],
1265
+ reason="Use get_connection() without args instead",
1266
+ version="5.3.0",
1267
+ )
1268
+ async def get_connection(self, command_name=None, *keys, **options):
1259
1269
  """Gets a connection from the pool, blocking until one is available"""
1260
1270
  try:
1261
1271
  async with self._condition:
redis/backoff.py CHANGED
@@ -110,5 +110,20 @@ class DecorrelatedJitterBackoff(AbstractBackoff):
110
110
  return self._previous_backoff
111
111
 
112
112
 
113
+ class ExponentialWithJitterBackoff(AbstractBackoff):
114
+ """Exponential backoff upon failure, with jitter"""
115
+
116
+ def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE) -> None:
117
+ """
118
+ `cap`: maximum backoff time in seconds
119
+ `base`: base backoff time in seconds
120
+ """
121
+ self._cap = cap
122
+ self._base = base
123
+
124
+ def compute(self, failures: int) -> float:
125
+ return min(self._cap, random.random() * self._base * 2**failures)
126
+
127
+
113
128
  def default_backoff():
114
129
  return EqualJitterBackoff()
redis/client.py CHANGED
@@ -4,7 +4,17 @@ import threading
4
4
  import time
5
5
  import warnings
6
6
  from itertools import chain
7
- from typing import Any, Callable, Dict, List, Optional, Type, Union
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Callable,
11
+ Dict,
12
+ List,
13
+ Mapping,
14
+ Optional,
15
+ Type,
16
+ Union,
17
+ )
8
18
 
9
19
  from redis._parsers.encoders import Encoder
10
20
  from redis._parsers.helpers import (
@@ -53,6 +63,11 @@ from redis.utils import (
53
63
  str_if_bytes,
54
64
  )
55
65
 
66
+ if TYPE_CHECKING:
67
+ import ssl
68
+
69
+ import OpenSSL
70
+
56
71
  SYM_EMPTY = b""
57
72
  EMPTY_RESPONSE = "EMPTY_RESPONSE"
58
73
 
@@ -175,47 +190,47 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
175
190
 
176
191
  def __init__(
177
192
  self,
178
- host="localhost",
179
- port=6379,
180
- db=0,
181
- password=None,
182
- socket_timeout=None,
183
- socket_connect_timeout=None,
184
- socket_keepalive=None,
185
- socket_keepalive_options=None,
186
- connection_pool=None,
187
- unix_socket_path=None,
188
- encoding="utf-8",
189
- encoding_errors="strict",
190
- charset=None,
191
- errors=None,
192
- decode_responses=False,
193
- retry_on_timeout=False,
194
- retry_on_error=None,
195
- ssl=False,
196
- ssl_keyfile=None,
197
- ssl_certfile=None,
198
- ssl_cert_reqs="required",
199
- ssl_ca_certs=None,
200
- ssl_ca_path=None,
201
- ssl_ca_data=None,
202
- ssl_check_hostname=False,
203
- ssl_password=None,
204
- ssl_validate_ocsp=False,
205
- ssl_validate_ocsp_stapled=False,
206
- ssl_ocsp_context=None,
207
- ssl_ocsp_expected_cert=None,
208
- ssl_min_version=None,
209
- ssl_ciphers=None,
210
- max_connections=None,
211
- single_connection_client=False,
212
- health_check_interval=0,
213
- client_name=None,
214
- lib_name="redis-py",
215
- lib_version=get_lib_version(),
216
- username=None,
217
- retry=None,
218
- redis_connect_func=None,
193
+ host: str = "localhost",
194
+ port: int = 6379,
195
+ db: int = 0,
196
+ password: Optional[str] = None,
197
+ socket_timeout: Optional[float] = None,
198
+ socket_connect_timeout: Optional[float] = None,
199
+ socket_keepalive: Optional[bool] = None,
200
+ socket_keepalive_options: Optional[Mapping[int, Union[int, bytes]]] = None,
201
+ connection_pool: Optional[ConnectionPool] = None,
202
+ unix_socket_path: Optional[str] = None,
203
+ encoding: str = "utf-8",
204
+ encoding_errors: str = "strict",
205
+ charset: Optional[str] = None,
206
+ errors: Optional[str] = None,
207
+ decode_responses: bool = False,
208
+ retry_on_timeout: bool = False,
209
+ retry_on_error: Optional[List[Type[Exception]]] = None,
210
+ ssl: bool = False,
211
+ ssl_keyfile: Optional[str] = None,
212
+ ssl_certfile: Optional[str] = None,
213
+ ssl_cert_reqs: str = "required",
214
+ ssl_ca_certs: Optional[str] = None,
215
+ ssl_ca_path: Optional[str] = None,
216
+ ssl_ca_data: Optional[str] = None,
217
+ ssl_check_hostname: bool = False,
218
+ ssl_password: Optional[str] = None,
219
+ ssl_validate_ocsp: bool = False,
220
+ ssl_validate_ocsp_stapled: bool = False,
221
+ ssl_ocsp_context: Optional["OpenSSL.SSL.Context"] = None,
222
+ ssl_ocsp_expected_cert: Optional[str] = None,
223
+ ssl_min_version: Optional["ssl.TLSVersion"] = None,
224
+ ssl_ciphers: Optional[str] = None,
225
+ max_connections: Optional[int] = None,
226
+ single_connection_client: bool = False,
227
+ health_check_interval: int = 0,
228
+ client_name: Optional[str] = None,
229
+ lib_name: Optional[str] = "redis-py",
230
+ lib_version: Optional[str] = get_lib_version(),
231
+ username: Optional[str] = None,
232
+ retry: Optional[Retry] = None,
233
+ redis_connect_func: Optional[Callable[[], None]] = None,
219
234
  credential_provider: Optional[CredentialProvider] = None,
220
235
  protocol: Optional[int] = 2,
221
236
  cache: Optional[CacheInterface] = None,
@@ -351,7 +366,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
351
366
  self.connection = None
352
367
  self._single_connection_client = single_connection_client
353
368
  if self._single_connection_client:
354
- self.connection = self.connection_pool.get_connection("_")
369
+ self.connection = self.connection_pool.get_connection()
355
370
  self._event_dispatcher.dispatch(
356
371
  AfterSingleConnectionInstantiationEvent(
357
372
  self.connection, ClientType.SYNC, self.single_connection_lock
@@ -550,7 +565,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
550
565
  def __del__(self):
551
566
  self.close()
552
567
 
553
- def close(self):
568
+ def close(self) -> None:
554
569
  # In case a connection property does not yet exist
555
570
  # (due to a crash earlier in the Redis() constructor), return
556
571
  # immediately as there is nothing to clean-up.
@@ -593,7 +608,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
593
608
  """Execute a command and return a parsed response"""
594
609
  pool = self.connection_pool
595
610
  command_name = args[0]
596
- conn = self.connection or pool.get_connection(command_name, **options)
611
+ conn = self.connection or pool.get_connection()
597
612
 
598
613
  if self._single_connection_client:
599
614
  self.single_connection_lock.acquire()
@@ -652,7 +667,7 @@ class Monitor:
652
667
 
653
668
  def __init__(self, connection_pool):
654
669
  self.connection_pool = connection_pool
655
- self.connection = self.connection_pool.get_connection("MONITOR")
670
+ self.connection = self.connection_pool.get_connection()
656
671
 
657
672
  def __enter__(self):
658
673
  self.connection.send_command("MONITOR")
@@ -825,9 +840,7 @@ class PubSub:
825
840
  # subscribed to one or more channels
826
841
 
827
842
  if self.connection is None:
828
- self.connection = self.connection_pool.get_connection(
829
- "pubsub", self.shard_hint
830
- )
843
+ self.connection = self.connection_pool.get_connection()
831
844
  # register a callback that re-subscribes to any channels we
832
845
  # were listening to when we were disconnected
833
846
  self.connection.register_connect_callback(self.on_connect)
@@ -1382,7 +1395,7 @@ class Pipeline(Redis):
1382
1395
  conn = self.connection
1383
1396
  # if this is the first call, we need a connection
1384
1397
  if not conn:
1385
- conn = self.connection_pool.get_connection(command_name, self.shard_hint)
1398
+ conn = self.connection_pool.get_connection()
1386
1399
  self.connection = conn
1387
1400
 
1388
1401
  return conn.retry.call_with_retry(
@@ -1551,11 +1564,10 @@ class Pipeline(Redis):
1551
1564
  conn.retry_on_error is None
1552
1565
  or isinstance(error, tuple(conn.retry_on_error)) is False
1553
1566
  ):
1554
-
1555
1567
  self.reset()
1556
1568
  raise error
1557
1569
 
1558
- def execute(self, raise_on_error=True):
1570
+ def execute(self, raise_on_error: bool = True) -> List[Any]:
1559
1571
  """Execute all the commands in the current pipeline"""
1560
1572
  stack = self.command_stack
1561
1573
  if not stack and not self.watching:
@@ -1569,7 +1581,7 @@ class Pipeline(Redis):
1569
1581
 
1570
1582
  conn = self.connection
1571
1583
  if not conn:
1572
- conn = self.connection_pool.get_connection("MULTI", self.shard_hint)
1584
+ conn = self.connection_pool.get_connection()
1573
1585
  # assign to self.connection so reset() releases the connection
1574
1586
  # back to the pool after we're done
1575
1587
  self.connection = conn
redis/cluster.py CHANGED
@@ -4,6 +4,7 @@ import sys
4
4
  import threading
5
5
  import time
6
6
  from collections import OrderedDict
7
+ from enum import Enum
7
8
  from typing import Any, Callable, Dict, List, Optional, Tuple, Union
8
9
 
9
10
  from redis._parsers import CommandsParser, Encoder
@@ -42,6 +43,7 @@ from redis.lock import Lock
42
43
  from redis.retry import Retry
43
44
  from redis.utils import (
44
45
  HIREDIS_AVAILABLE,
46
+ deprecated_args,
45
47
  dict_merge,
46
48
  list_keys_to_dict,
47
49
  merge_result,
@@ -54,10 +56,13 @@ def get_node_name(host: str, port: Union[str, int]) -> str:
54
56
  return f"{host}:{port}"
55
57
 
56
58
 
59
+ @deprecated_args(
60
+ allowed_args=["redis_node"],
61
+ reason="Use get_connection(redis_node) instead",
62
+ version="5.3.0",
63
+ )
57
64
  def get_connection(redis_node, *args, **options):
58
- return redis_node.connection or redis_node.connection_pool.get_connection(
59
- args[0], **options
60
- )
65
+ return redis_node.connection or redis_node.connection_pool.get_connection()
61
66
 
62
67
 
63
68
  def parse_scan_result(command, res, **options):
@@ -424,7 +429,12 @@ class AbstractRedisCluster:
424
429
  list_keys_to_dict(["SCRIPT FLUSH"], lambda command, res: all(res.values())),
425
430
  )
426
431
 
427
- ERRORS_ALLOW_RETRY = (ConnectionError, TimeoutError, ClusterDownError)
432
+ ERRORS_ALLOW_RETRY = (
433
+ ConnectionError,
434
+ TimeoutError,
435
+ ClusterDownError,
436
+ SlotNotCoveredError,
437
+ )
428
438
 
429
439
  def replace_default_node(self, target_node: "ClusterNode" = None) -> None:
430
440
  """Replace the default cluster node.
@@ -496,6 +506,11 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
496
506
  """
497
507
  return cls(url=url, **kwargs)
498
508
 
509
+ @deprecated_args(
510
+ args_to_warn=["read_from_replicas"],
511
+ reason="Please configure the 'load_balancing_strategy' instead",
512
+ version="5.3.0",
513
+ )
499
514
  def __init__(
500
515
  self,
501
516
  host: Optional[str] = None,
@@ -506,6 +521,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
506
521
  require_full_coverage: bool = False,
507
522
  reinitialize_steps: int = 5,
508
523
  read_from_replicas: bool = False,
524
+ load_balancing_strategy: Optional["LoadBalancingStrategy"] = None,
509
525
  dynamic_startup_nodes: bool = True,
510
526
  url: Optional[str] = None,
511
527
  address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
@@ -534,11 +550,17 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
534
550
  cluster client. If not all slots are covered, RedisClusterException
535
551
  will be thrown.
536
552
  :param read_from_replicas:
553
+ @deprecated - please use load_balancing_strategy instead
537
554
  Enable read from replicas in READONLY mode. You can read possibly
538
555
  stale data.
539
556
  When set to true, read commands will be assigned between the
540
557
  primary and its replications in a Round-Robin manner.
541
- :param dynamic_startup_nodes:
558
+ :param load_balancing_strategy:
559
+ Enable read from replicas in READONLY mode and defines the load balancing
560
+ strategy that will be used for cluster node selection.
561
+ The data read from replicas is eventually consistent
562
+ with the data in primary nodes.
563
+ :param dynamic_startup_nodes:
542
564
  Set the RedisCluster's startup nodes to all of the discovered nodes.
543
565
  If true (default value), the cluster's discovered nodes will be used to
544
566
  determine the cluster nodes-slots mapping in the next topology refresh.
@@ -643,6 +665,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
643
665
  self.command_flags = self.__class__.COMMAND_FLAGS.copy()
644
666
  self.node_flags = self.__class__.NODE_FLAGS.copy()
645
667
  self.read_from_replicas = read_from_replicas
668
+ self.load_balancing_strategy = load_balancing_strategy
646
669
  self.reinitialize_counter = 0
647
670
  self.reinitialize_steps = reinitialize_steps
648
671
  if event_dispatcher is None:
@@ -695,7 +718,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
695
718
  connection.set_parser(ClusterParser)
696
719
  connection.on_connect()
697
720
 
698
- if self.read_from_replicas:
721
+ if self.read_from_replicas or self.load_balancing_strategy:
699
722
  # Sending READONLY command to server to configure connection as
700
723
  # readonly. Since each cluster node may change its server type due
701
724
  # to a failover, we should establish a READONLY connection
@@ -822,6 +845,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
822
845
  cluster_response_callbacks=self.cluster_response_callbacks,
823
846
  cluster_error_retry_attempts=self.cluster_error_retry_attempts,
824
847
  read_from_replicas=self.read_from_replicas,
848
+ load_balancing_strategy=self.load_balancing_strategy,
825
849
  reinitialize_steps=self.reinitialize_steps,
826
850
  lock=self._lock,
827
851
  )
@@ -939,7 +963,9 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
939
963
  # get the node that holds the key's slot
940
964
  slot = self.determine_slot(*args)
941
965
  node = self.nodes_manager.get_node_from_slot(
942
- slot, self.read_from_replicas and command in READ_COMMANDS
966
+ slot,
967
+ self.read_from_replicas and command in READ_COMMANDS,
968
+ self.load_balancing_strategy if command in READ_COMMANDS else None,
943
969
  )
944
970
  return [node]
945
971
 
@@ -1163,12 +1189,18 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1163
1189
  # refresh the target node
1164
1190
  slot = self.determine_slot(*args)
1165
1191
  target_node = self.nodes_manager.get_node_from_slot(
1166
- slot, self.read_from_replicas and command in READ_COMMANDS
1192
+ slot,
1193
+ self.read_from_replicas and command in READ_COMMANDS,
1194
+ (
1195
+ self.load_balancing_strategy
1196
+ if command in READ_COMMANDS
1197
+ else None
1198
+ ),
1167
1199
  )
1168
1200
  moved = False
1169
1201
 
1170
1202
  redis_node = self.get_redis_connection(target_node)
1171
- connection = get_connection(redis_node, *args, **kwargs)
1203
+ connection = get_connection(redis_node)
1172
1204
  if asking:
1173
1205
  connection.send_command("ASKING")
1174
1206
  redis_node.parse_response(connection, "ASKING", **kwargs)
@@ -1225,13 +1257,19 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1225
1257
  except AskError as e:
1226
1258
  redirect_addr = get_node_name(host=e.host, port=e.port)
1227
1259
  asking = True
1228
- except ClusterDownError as e:
1260
+ except (ClusterDownError, SlotNotCoveredError):
1229
1261
  # ClusterDownError can occur during a failover and to get
1230
1262
  # self-healed, we will try to reinitialize the cluster layout
1231
1263
  # and retry executing the command
1264
+
1265
+ # SlotNotCoveredError can occur when the cluster is not fully
1266
+ # initialized or can be temporary issue.
1267
+ # We will try to reinitialize the cluster topology
1268
+ # and retry executing the command
1269
+
1232
1270
  time.sleep(0.25)
1233
1271
  self.nodes_manager.initialize()
1234
- raise e
1272
+ raise
1235
1273
  except ResponseError:
1236
1274
  raise
1237
1275
  except Exception as e:
@@ -1244,7 +1282,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1244
1282
 
1245
1283
  raise ClusterError("TTL exhausted.")
1246
1284
 
1247
- def close(self):
1285
+ def close(self) -> None:
1248
1286
  try:
1249
1287
  with self._lock:
1250
1288
  if self.nodes_manager:
@@ -1312,6 +1350,12 @@ class ClusterNode:
1312
1350
  self.redis_connection.close()
1313
1351
 
1314
1352
 
1353
+ class LoadBalancingStrategy(Enum):
1354
+ ROUND_ROBIN = "round_robin"
1355
+ ROUND_ROBIN_REPLICAS = "round_robin_replicas"
1356
+ RANDOM_REPLICA = "random_replica"
1357
+
1358
+
1315
1359
  class LoadBalancer:
1316
1360
  """
1317
1361
  Round-Robin Load Balancing
@@ -1321,15 +1365,38 @@ class LoadBalancer:
1321
1365
  self.primary_to_idx = {}
1322
1366
  self.start_index = start_index
1323
1367
 
1324
- def get_server_index(self, primary: str, list_size: int) -> int:
1325
- server_index = self.primary_to_idx.setdefault(primary, self.start_index)
1326
- # Update the index
1327
- self.primary_to_idx[primary] = (server_index + 1) % list_size
1328
- return server_index
1368
+ def get_server_index(
1369
+ self,
1370
+ primary: str,
1371
+ list_size: int,
1372
+ load_balancing_strategy: LoadBalancingStrategy = LoadBalancingStrategy.ROUND_ROBIN, # noqa: line too long ignored
1373
+ ) -> int:
1374
+ if load_balancing_strategy == LoadBalancingStrategy.RANDOM_REPLICA:
1375
+ return self._get_random_replica_index(list_size)
1376
+ else:
1377
+ return self._get_round_robin_index(
1378
+ primary,
1379
+ list_size,
1380
+ load_balancing_strategy == LoadBalancingStrategy.ROUND_ROBIN_REPLICAS,
1381
+ )
1329
1382
 
1330
1383
  def reset(self) -> None:
1331
1384
  self.primary_to_idx.clear()
1332
1385
 
1386
+ def _get_random_replica_index(self, list_size: int) -> int:
1387
+ return random.randint(1, list_size - 1)
1388
+
1389
+ def _get_round_robin_index(
1390
+ self, primary: str, list_size: int, replicas_only: bool
1391
+ ) -> int:
1392
+ server_index = self.primary_to_idx.setdefault(primary, self.start_index)
1393
+ if replicas_only and server_index == 0:
1394
+ # skip the primary node index
1395
+ server_index = 1
1396
+ # Update the index for the next round
1397
+ self.primary_to_idx[primary] = (server_index + 1) % list_size
1398
+ return server_index
1399
+
1333
1400
 
1334
1401
  class NodesManager:
1335
1402
  def __init__(
@@ -1433,7 +1500,21 @@ class NodesManager:
1433
1500
  # Reset moved_exception
1434
1501
  self._moved_exception = None
1435
1502
 
1436
- def get_node_from_slot(self, slot, read_from_replicas=False, server_type=None):
1503
+ @deprecated_args(
1504
+ args_to_warn=["server_type"],
1505
+ reason=(
1506
+ "In case you need select some load balancing strategy "
1507
+ "that will use replicas, please set it through 'load_balancing_strategy'"
1508
+ ),
1509
+ version="5.3.0",
1510
+ )
1511
+ def get_node_from_slot(
1512
+ self,
1513
+ slot,
1514
+ read_from_replicas=False,
1515
+ load_balancing_strategy=None,
1516
+ server_type=None,
1517
+ ):
1437
1518
  """
1438
1519
  Gets a node that servers this hash slot
1439
1520
  """
@@ -1448,11 +1529,14 @@ class NodesManager:
1448
1529
  f'"require_full_coverage={self._require_full_coverage}"'
1449
1530
  )
1450
1531
 
1451
- if read_from_replicas is True:
1452
- # get the server index in a Round-Robin manner
1532
+ if read_from_replicas is True and load_balancing_strategy is None:
1533
+ load_balancing_strategy = LoadBalancingStrategy.ROUND_ROBIN
1534
+
1535
+ if len(self.slots_cache[slot]) > 1 and load_balancing_strategy:
1536
+ # get the server index using the strategy defined in load_balancing_strategy
1453
1537
  primary_name = self.slots_cache[slot][0].name
1454
1538
  node_idx = self.read_load_balancer.get_server_index(
1455
- primary_name, len(self.slots_cache[slot])
1539
+ primary_name, len(self.slots_cache[slot]), load_balancing_strategy
1456
1540
  )
1457
1541
  elif (
1458
1542
  server_type is None
@@ -1641,7 +1725,7 @@ class NodesManager:
1641
1725
  if len(disagreements) > 5:
1642
1726
  raise RedisClusterException(
1643
1727
  f"startup_nodes could not agree on a valid "
1644
- f'slots cache: {", ".join(disagreements)}'
1728
+ f"slots cache: {', '.join(disagreements)}"
1645
1729
  )
1646
1730
 
1647
1731
  fully_covered = self.check_slots_coverage(tmp_slots)
@@ -1686,7 +1770,7 @@ class NodesManager:
1686
1770
  # If initialize was called after a MovedError, clear it
1687
1771
  self._moved_exception = None
1688
1772
 
1689
- def close(self):
1773
+ def close(self) -> None:
1690
1774
  self.default_node = None
1691
1775
  for node in self.nodes_cache.values():
1692
1776
  if node.redis_connection:
@@ -1735,7 +1819,7 @@ class ClusterPubSub(PubSub):
1735
1819
  first command execution. The node will be determined by:
1736
1820
  1. Hashing the channel name in the request to find its keyslot
1737
1821
  2. Selecting a node that handles the keyslot: If read_from_replicas is
1738
- set to true, a replica can be selected.
1822
+ set to true or load_balancing_strategy is set, a replica can be selected.
1739
1823
 
1740
1824
  :type redis_cluster: RedisCluster
1741
1825
  :type node: ClusterNode
@@ -1831,7 +1915,9 @@ class ClusterPubSub(PubSub):
1831
1915
  channel = args[1]
1832
1916
  slot = self.cluster.keyslot(channel)
1833
1917
  node = self.cluster.nodes_manager.get_node_from_slot(
1834
- slot, self.cluster.read_from_replicas
1918
+ slot,
1919
+ self.cluster.read_from_replicas,
1920
+ self.cluster.load_balancing_strategy,
1835
1921
  )
1836
1922
  else:
1837
1923
  # Get a random node
@@ -1839,9 +1925,7 @@ class ClusterPubSub(PubSub):
1839
1925
  self.node = node
1840
1926
  redis_connection = self.cluster.get_redis_connection(node)
1841
1927
  self.connection_pool = redis_connection.connection_pool
1842
- self.connection = self.connection_pool.get_connection(
1843
- "pubsub", self.shard_hint
1844
- )
1928
+ self.connection = self.connection_pool.get_connection()
1845
1929
  # register a callback that re-subscribes to any channels we
1846
1930
  # were listening to when we were disconnected
1847
1931
  self.connection.register_connect_callback(self.on_connect)
@@ -1976,6 +2060,7 @@ class ClusterPipeline(RedisCluster):
1976
2060
  cluster_response_callbacks: Optional[Dict[str, Callable]] = None,
1977
2061
  startup_nodes: Optional[List["ClusterNode"]] = None,
1978
2062
  read_from_replicas: bool = False,
2063
+ load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
1979
2064
  cluster_error_retry_attempts: int = 3,
1980
2065
  reinitialize_steps: int = 5,
1981
2066
  lock=None,
@@ -1991,6 +2076,7 @@ class ClusterPipeline(RedisCluster):
1991
2076
  )
1992
2077
  self.startup_nodes = startup_nodes if startup_nodes else []
1993
2078
  self.read_from_replicas = read_from_replicas
2079
+ self.load_balancing_strategy = load_balancing_strategy
1994
2080
  self.command_flags = self.__class__.COMMAND_FLAGS.copy()
1995
2081
  self.cluster_response_callbacks = cluster_response_callbacks
1996
2082
  self.cluster_error_retry_attempts = cluster_error_retry_attempts
@@ -2062,12 +2148,11 @@ class ClusterPipeline(RedisCluster):
2062
2148
  """
2063
2149
  cmd = " ".join(map(safe_str, command))
2064
2150
  msg = (
2065
- f"Command # {number} ({cmd}) of pipeline "
2066
- f"caused error: {exception.args[0]}"
2151
+ f"Command # {number} ({cmd}) of pipeline caused error: {exception.args[0]}"
2067
2152
  )
2068
2153
  exception.args = (msg,) + exception.args[1:]
2069
2154
 
2070
- def execute(self, raise_on_error=True):
2155
+ def execute(self, raise_on_error: bool = True) -> List[Any]:
2071
2156
  """
2072
2157
  Execute all the commands in the current pipeline
2073
2158
  """
@@ -2201,8 +2286,8 @@ class ClusterPipeline(RedisCluster):
2201
2286
  if node_name not in nodes:
2202
2287
  redis_node = self.get_redis_connection(node)
2203
2288
  try:
2204
- connection = get_connection(redis_node, c.args)
2205
- except ConnectionError:
2289
+ connection = get_connection(redis_node)
2290
+ except (ConnectionError, TimeoutError):
2206
2291
  for n in nodes.values():
2207
2292
  n.connection_pool.release(n.connection)
2208
2293
  # Connection retries are being handled in the node's
redis/connection.py CHANGED
@@ -9,7 +9,7 @@ from abc import abstractmethod
9
9
  from itertools import chain
10
10
  from queue import Empty, Full, LifoQueue
11
11
  from time import time
12
- from typing import Any, Callable, Dict, List, Optional, Type, Union
12
+ from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union
13
13
  from urllib.parse import parse_qs, unquote, urlparse
14
14
 
15
15
  from redis.cache import (
@@ -42,6 +42,7 @@ from .utils import (
42
42
  HIREDIS_AVAILABLE,
43
43
  SSL_AVAILABLE,
44
44
  compare_versions,
45
+ deprecated_args,
45
46
  ensure_string,
46
47
  format_error_message,
47
48
  get_lib_version,
@@ -904,9 +905,11 @@ class CacheProxyConnection(ConnectionInterface):
904
905
  and self._cache.get(self._current_command_cache_key).status
905
906
  != CacheEntryStatus.IN_PROGRESS
906
907
  ):
907
- return copy.deepcopy(
908
+ res = copy.deepcopy(
908
909
  self._cache.get(self._current_command_cache_key).cache_value
909
910
  )
911
+ self._current_command_cache_key = None
912
+ return res
910
913
 
911
914
  response = self._conn.read_response(
912
915
  disable_decoding=disable_decoding,
@@ -932,6 +935,8 @@ class CacheProxyConnection(ConnectionInterface):
932
935
  cache_entry.cache_value = response
933
936
  self._cache.set(cache_entry)
934
937
 
938
+ self._current_command_cache_key = None
939
+
935
940
  return response
936
941
 
937
942
  def pack_command(self, *args):
@@ -1259,6 +1264,9 @@ def parse_url(url):
1259
1264
  return kwargs
1260
1265
 
1261
1266
 
1267
+ _CP = TypeVar("_CP", bound="ConnectionPool")
1268
+
1269
+
1262
1270
  class ConnectionPool:
1263
1271
  """
1264
1272
  Create a connection pool. ``If max_connections`` is set, then this
@@ -1274,7 +1282,7 @@ class ConnectionPool:
1274
1282
  """
1275
1283
 
1276
1284
  @classmethod
1277
- def from_url(cls, url, **kwargs):
1285
+ def from_url(cls: Type[_CP], url: str, **kwargs) -> _CP:
1278
1286
  """
1279
1287
  Return a connection pool configured from the given URL.
1280
1288
 
@@ -1374,6 +1382,7 @@ class ConnectionPool:
1374
1382
  # will notice the first thread already did the work and simply
1375
1383
  # release the lock.
1376
1384
  self._fork_lock = threading.Lock()
1385
+ self._lock = threading.Lock()
1377
1386
  self.reset()
1378
1387
 
1379
1388
  def __repr__(self) -> (str, str):
@@ -1391,7 +1400,6 @@ class ConnectionPool:
1391
1400
  return self.connection_kwargs.get("protocol", None)
1392
1401
 
1393
1402
  def reset(self) -> None:
1394
- self._lock = threading.Lock()
1395
1403
  self._created_connections = 0
1396
1404
  self._available_connections = []
1397
1405
  self._in_use_connections = set()
@@ -1454,8 +1462,14 @@ class ConnectionPool:
1454
1462
  finally:
1455
1463
  self._fork_lock.release()
1456
1464
 
1457
- def get_connection(self, command_name: str, *keys, **options) -> "Connection":
1465
+ @deprecated_args(
1466
+ args_to_warn=["*"],
1467
+ reason="Use get_connection() without args instead",
1468
+ version="5.3.0",
1469
+ )
1470
+ def get_connection(self, command_name=None, *keys, **options) -> "Connection":
1458
1471
  "Get a connection from the pool"
1472
+
1459
1473
  self._checkpid()
1460
1474
  with self._lock:
1461
1475
  try:
@@ -1518,7 +1532,7 @@ class ConnectionPool:
1518
1532
  except KeyError:
1519
1533
  # Gracefully fail when a connection is returned to this pool
1520
1534
  # that the pool doesn't actually own
1521
- pass
1535
+ return
1522
1536
 
1523
1537
  if self.owns_connection(connection):
1524
1538
  self._available_connections.append(connection)
@@ -1526,10 +1540,10 @@ class ConnectionPool:
1526
1540
  AfterConnectionReleasedEvent(connection)
1527
1541
  )
1528
1542
  else:
1529
- # pool doesn't own this connection. do not add it back
1530
- # to the pool and decrement the count so that another
1531
- # connection can take its place if needed
1532
- self._created_connections -= 1
1543
+ # Pool doesn't own this connection, do not add it back
1544
+ # to the pool.
1545
+ # The created connections count should not be changed,
1546
+ # because the connection was not created by the pool.
1533
1547
  connection.disconnect()
1534
1548
  return
1535
1549
 
@@ -1676,7 +1690,12 @@ class BlockingConnectionPool(ConnectionPool):
1676
1690
  self._connections.append(connection)
1677
1691
  return connection
1678
1692
 
1679
- def get_connection(self, command_name, *keys, **options):
1693
+ @deprecated_args(
1694
+ args_to_warn=["*"],
1695
+ reason="Use get_connection() without args instead",
1696
+ version="5.3.0",
1697
+ )
1698
+ def get_connection(self, command_name=None, *keys, **options):
1680
1699
  """
1681
1700
  Get a connection, blocking for ``self.timeout`` until a connection
1682
1701
  is available from the pool.
redis/typing.py CHANGED
@@ -20,7 +20,7 @@ if TYPE_CHECKING:
20
20
 
21
21
 
22
22
  Number = Union[int, float]
23
- EncodedT = Union[bytes, memoryview]
23
+ EncodedT = Union[bytes, bytearray, memoryview]
24
24
  DecodedT = Union[str, int, float]
25
25
  EncodableT = Union[EncodedT, DecodedT]
26
26
  AbsExpiryT = Union[int, datetime]
redis/utils.py CHANGED
@@ -122,6 +122,71 @@ def deprecated_function(reason="", version="", name=None):
122
122
  return decorator
123
123
 
124
124
 
125
+ def warn_deprecated_arg_usage(
126
+ arg_name: Union[list, str],
127
+ function_name: str,
128
+ reason: str = "",
129
+ version: str = "",
130
+ stacklevel: int = 2,
131
+ ):
132
+ import warnings
133
+
134
+ msg = (
135
+ f"Call to '{function_name}' function with deprecated"
136
+ f" usage of input argument/s '{arg_name}'."
137
+ )
138
+ if reason:
139
+ msg += f" ({reason})"
140
+ if version:
141
+ msg += f" -- Deprecated since version {version}."
142
+ warnings.warn(msg, category=DeprecationWarning, stacklevel=stacklevel)
143
+
144
+
145
+ def deprecated_args(
146
+ args_to_warn: list = ["*"],
147
+ allowed_args: list = [],
148
+ reason: str = "",
149
+ version: str = "",
150
+ ):
151
+ """
152
+ Decorator to mark specified args of a function as deprecated.
153
+ If '*' is in args_to_warn, all arguments will be marked as deprecated.
154
+ """
155
+
156
+ def decorator(func):
157
+ @wraps(func)
158
+ def wrapper(*args, **kwargs):
159
+ # Get function argument names
160
+ arg_names = func.__code__.co_varnames[: func.__code__.co_argcount]
161
+
162
+ provided_args = dict(zip(arg_names, args))
163
+ provided_args.update(kwargs)
164
+
165
+ provided_args.pop("self", None)
166
+ for allowed_arg in allowed_args:
167
+ provided_args.pop(allowed_arg, None)
168
+
169
+ for arg in args_to_warn:
170
+ if arg == "*" and len(provided_args) > 0:
171
+ warn_deprecated_arg_usage(
172
+ list(provided_args.keys()),
173
+ func.__name__,
174
+ reason,
175
+ version,
176
+ stacklevel=3,
177
+ )
178
+ elif arg in provided_args:
179
+ warn_deprecated_arg_usage(
180
+ arg, func.__name__, reason, version, stacklevel=3
181
+ )
182
+
183
+ return func(*args, **kwargs)
184
+
185
+ return wrapper
186
+
187
+ return decorator
188
+
189
+
125
190
  def _set_info_logger():
126
191
  """
127
192
  Set up a logger that log info logs to stdout.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redis
3
- Version: 5.3.0b4
3
+ Version: 5.3.1
4
4
  Summary: Python client for Redis database and key-value store
5
5
  Home-page: https://github.com/redis/redis-py
6
6
  Author: Redis Inc.
@@ -30,7 +30,7 @@ Classifier: Programming Language :: Python :: Implementation :: PyPy
30
30
  Requires-Python: >=3.8
31
31
  Description-Content-Type: text/markdown
32
32
  License-File: LICENSE
33
- Requires-Dist: PyJWT~=2.9.0
33
+ Requires-Dist: PyJWT>=2.9.0
34
34
  Requires-Dist: async-timeout>=4.0.3; python_full_version < "3.11.3"
35
35
  Provides-Extra: hiredis
36
36
  Requires-Dist: hiredis>=3.0.0; extra == "hiredis"
@@ -1,9 +1,9 @@
1
1
  redis/__init__.py,sha256=WlARnwwst8oaEyjXV5XTcmSGyEKVCn3S9N1MrHyJ8U8,2015
2
- redis/backoff.py,sha256=N2CZXkB3cdoHeMZ01r0zVry0bRKe8mk0ybi8hE7PvzU,3177
2
+ redis/backoff.py,sha256=d22h74LEatJiFd_5o8HvFW3biFBotYOFZHddHt45ydc,3663
3
3
  redis/cache.py,sha256=68rJDNogvNwgdgBel6zSX9QziL11qsKIMhmvQvHvznM,9549
4
- redis/client.py,sha256=MMDn5Qh6rcBx2sDFU4O_Jid5TdzhA3-91_sxzmOBWAM,61055
5
- redis/cluster.py,sha256=4UBn9HoGjKGUZ-ILROSVw-4I3Kg_9YW8r0X4COKwPsI,95882
6
- redis/connection.py,sha256=UXrGt2T_1ebCGnpTmIzafZRLiZyqMJi4LOsRFkaFVHU,64750
4
+ redis/client.py,sha256=5KynKSwVK7YPKWwOItEfNpJsVlu_oSchm2lNc_xJnVc,61733
5
+ redis/cluster.py,sha256=YzGkte85bSJOYeqs_WESFam_gtaWgEZ6CijPIdldVis,99287
6
+ redis/connection.py,sha256=sZiKny4EQ8BtReUYtB4zBQ5D3Tk0SOjbjD3j56jrb0g,65270
7
7
  redis/crc.py,sha256=Z3kXFtkY2LdgefnQMud1xr4vG5UYvA9LCMqNMX1ywu4,729
8
8
  redis/credentials.py,sha256=GOnO3-LSW34efHaIrUbS742Mw8l70mRzF6UrKiKZsMY,1828
9
9
  redis/event.py,sha256=urOK241IdgmCQ3fq7GqXRstZ2vcXRV14bBBMdN3latk,12129
@@ -13,8 +13,8 @@ redis/ocsp.py,sha256=4b1s43x-DJ859zRKtwGTIbNys_dyGv5YyOdWnOvigyM,11451
13
13
  redis/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  redis/retry.py,sha256=JiIDxeD890vgi_me8pwypO1LixwhU0Fv3A5NEay8SAY,2206
15
15
  redis/sentinel.py,sha256=ya1aPeAvUcY9qXMSpV_wA3081vUqkIqcyXG9SqAvU88,14661
16
- redis/typing.py,sha256=skQl2VuyL7fPpg2BRDlGYMmwDQ2BLwwxxR8u_V1Kbm4,2138
17
- redis/utils.py,sha256=oTonIc6DbbB-ZT-mL14ChhcFk2y4qnK3UNORMKPj2oI,4787
16
+ redis/typing.py,sha256=k7F_3Vtsexeb7mUl6txlwrY1veGDLEhtcHe9FwIJOOo,2149
17
+ redis/utils.py,sha256=ErCe0V4nMTWTfeCI1Pg6X3WZeuxG0E-AirsI1AYaGF4,6664
18
18
  redis/_parsers/__init__.py,sha256=qkfgV2X9iyvQAvbLdSelwgz0dCk9SGAosCvuZC9-qDc,550
19
19
  redis/_parsers/base.py,sha256=0j3qIhLjQZOzYGc4n1IesNegckomVhvDsEZD6-yb3Ns,7475
20
20
  redis/_parsers/commands.py,sha256=pmR4hl4u93UvCmeDgePHFc6pWDr4slrKEvCsdMmtj_M,11052
@@ -25,9 +25,9 @@ redis/_parsers/resp2.py,sha256=f22kH-_ZP2iNtOn6xOe65MSy_fJpu8OEn1u_hgeeojI,4813
25
25
  redis/_parsers/resp3.py,sha256=jHtL1LYJegJ_LiNTsjzIvS-kZyNR58jZ_YV4cRfwuN0,11127
26
26
  redis/_parsers/socket.py,sha256=CKD8QW_wFSNlIZzxlbNduaGpiv0I8wBcsGuAIojDfJg,5403
27
27
  redis/asyncio/__init__.py,sha256=uoDD8XYVi0Kj6mcufYwLDUTQXmBRx7a0bhKF9stZr7I,1489
28
- redis/asyncio/client.py,sha256=xxifh7JrWJkSPpbem1qVXV6sCvAQRlq4VCYrkj84yvQ,61176
29
- redis/asyncio/cluster.py,sha256=c3dhOQjMUdXQO0WJCOn6-DTPxk-mbcgw52OpiSDrfG8,65243
30
- redis/asyncio/connection.py,sha256=w4yYr2Pzx_8Q7uJbeEyqZrjrqBpXaEZFYHZC5Zuv5HA,47203
28
+ redis/asyncio/client.py,sha256=Ef2yknTMQrTJ0bvi3-4payHGsDqU0cRZLytHrPxHNuE,61016
29
+ redis/asyncio/cluster.py,sha256=4uV8uTRDFeAY25BbgagX1ykwnPLMuXzOtxzUH5SC8Q0,66922
30
+ redis/asyncio/connection.py,sha256=NKzj0LNn27ZR9A4sh3KtOiPuKkFwujc9dTegodHaHAo,47512
31
31
  redis/asyncio/lock.py,sha256=lLasXEO2E1CskhX5ZZoaSGpmwZP1Q782R3HAUNG3wD4,11967
32
32
  redis/asyncio/retry.py,sha256=SnPPOlo5gcyIFtkC4DY7HFvmDgUaILsJ3DeHioogdB8,2219
33
33
  redis/asyncio/sentinel.py,sha256=QBpsrdlhZlFqENy_rK1IuasSicox55_xSvP_IywbhbQ,14293
@@ -75,8 +75,8 @@ redis/commands/timeseries/__init__.py,sha256=gkz6wshEzzQQryBOnrAqqQzttS-AHfXmuN_
75
75
  redis/commands/timeseries/commands.py,sha256=8Z2BEyP23qTYCJR_e9zdG11yWmIDwGBMO2PJNLtK2BA,47147
76
76
  redis/commands/timeseries/info.py,sha256=meZYdu7IV9KaUWMKZs9qW4vo3Q9MwhdY-EBtKQzls5o,3223
77
77
  redis/commands/timeseries/utils.py,sha256=NLwSOS5Dz9N8dYQSzEyBIvrItOWwfQ0xgDj8un6x3dU,1319
78
- redis-5.3.0b4.dist-info/LICENSE,sha256=pXslClvwPXr-VbdAYzE_Ktt7ANVGwKsUmok5gzP-PMg,1074
79
- redis-5.3.0b4.dist-info/METADATA,sha256=htJiEL0Mc-swfu0WAiCCZGdnKfxW2UwUbasut_xFq6o,9168
80
- redis-5.3.0b4.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
81
- redis-5.3.0b4.dist-info/top_level.txt,sha256=OMAefszlde6ZoOtlM35AWzpRIrwtcqAMHGlRit-w2-4,6
82
- redis-5.3.0b4.dist-info/RECORD,,
78
+ redis-5.3.1.dist-info/LICENSE,sha256=pXslClvwPXr-VbdAYzE_Ktt7ANVGwKsUmok5gzP-PMg,1074
79
+ redis-5.3.1.dist-info/METADATA,sha256=ccrCqyfwV-h4GMhCg0PUDlldwwL10SOwcWX0G1LaJxo,9166
80
+ redis-5.3.1.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
81
+ redis-5.3.1.dist-info/top_level.txt,sha256=OMAefszlde6ZoOtlM35AWzpRIrwtcqAMHGlRit-w2-4,6
82
+ redis-5.3.1.dist-info/RECORD,,
File without changes