redis 6.2.0__py3-none-any.whl → 6.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
redis/__init__.py CHANGED
@@ -20,6 +20,7 @@ from redis.exceptions import (
20
20
  DataError,
21
21
  InvalidPipelineStack,
22
22
  InvalidResponse,
23
+ MaxConnectionsError,
23
24
  OutOfMemoryError,
24
25
  PubSubError,
25
26
  ReadOnlyError,
@@ -46,7 +47,7 @@ def int_or_str(value):
46
47
 
47
48
 
48
49
  # This version is used when building the package for publishing
49
- __version__ = "6.2.0"
50
+ __version__ = "6.3.0"
50
51
  VERSION = tuple(map(int_or_str, __version__.split(".")))
51
52
 
52
53
 
@@ -66,6 +67,7 @@ __all__ = [
66
67
  "default_backoff",
67
68
  "InvalidPipelineStack",
68
69
  "InvalidResponse",
70
+ "MaxConnectionsError",
69
71
  "OutOfMemoryError",
70
72
  "PubSubError",
71
73
  "ReadOnlyError",
redis/_parsers/helpers.py CHANGED
@@ -676,7 +676,8 @@ def parse_client_info(value):
676
676
  "omem",
677
677
  "tot-mem",
678
678
  }:
679
- client_info[int_key] = int(client_info[int_key])
679
+ if int_key in client_info:
680
+ client_info[int_key] = int(client_info[int_key])
680
681
  return client_info
681
682
 
682
683
 
redis/asyncio/cluster.py CHANGED
@@ -814,7 +814,13 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
814
814
  moved = False
815
815
 
816
816
  return await target_node.execute_command(*args, **kwargs)
817
- except (BusyLoadingError, MaxConnectionsError):
817
+ except BusyLoadingError:
818
+ raise
819
+ except MaxConnectionsError:
820
+ # MaxConnectionsError indicates client-side resource exhaustion
821
+ # (too many connections in the pool), not a node failure.
822
+ # Don't treat this as a node failure - just re-raise the error
823
+ # without reinitializing the cluster.
818
824
  raise
819
825
  except (ConnectionError, TimeoutError):
820
826
  # Connection retries are being handled in the node's
@@ -2350,10 +2356,11 @@ class TransactionStrategy(AbstractStrategy):
2350
2356
  # watching something
2351
2357
  if self._transaction_connection:
2352
2358
  try:
2353
- # call this manually since our unwatch or
2354
- # immediate_execute_command methods can call reset()
2355
- await self._transaction_connection.send_command("UNWATCH")
2356
- await self._transaction_connection.read_response()
2359
+ if self._watching:
2360
+ # call this manually since our unwatch or
2361
+ # immediate_execute_command methods can call reset()
2362
+ await self._transaction_connection.send_command("UNWATCH")
2363
+ await self._transaction_connection.read_response()
2357
2364
  # we can safely return the connection to the pool here since we're
2358
2365
  # sure we're no longer WATCHing anything
2359
2366
  self._transaction_node.release(self._transaction_connection)
@@ -295,13 +295,18 @@ class AbstractConnection:
295
295
  """Connects to the Redis server if not already connected"""
296
296
  await self.connect_check_health(check_health=True)
297
297
 
298
- async def connect_check_health(self, check_health: bool = True):
298
+ async def connect_check_health(
299
+ self, check_health: bool = True, retry_socket_connect: bool = True
300
+ ):
299
301
  if self.is_connected:
300
302
  return
301
303
  try:
302
- await self.retry.call_with_retry(
303
- lambda: self._connect(), lambda error: self.disconnect()
304
- )
304
+ if retry_socket_connect:
305
+ await self.retry.call_with_retry(
306
+ lambda: self._connect(), lambda error: self.disconnect()
307
+ )
308
+ else:
309
+ await self._connect()
305
310
  except asyncio.CancelledError:
306
311
  raise # in 3.7 and earlier, this is an Exception, not BaseException
307
312
  except (socket.timeout, asyncio.TimeoutError):
@@ -1037,6 +1042,7 @@ class ConnectionPool:
1037
1042
  By default, TCP connections are created unless ``connection_class``
1038
1043
  is specified. Use :py:class:`~redis.UnixDomainSocketConnection` for
1039
1044
  unix sockets.
1045
+ :py:class:`~redis.SSLConnection` can be used for SSL enabled connections.
1040
1046
 
1041
1047
  Any additional keyword arguments are passed to the constructor of
1042
1048
  ``connection_class``.
@@ -1112,9 +1118,11 @@ class ConnectionPool:
1112
1118
  self._event_dispatcher = EventDispatcher()
1113
1119
 
1114
1120
  def __repr__(self):
1121
+ conn_kwargs = ",".join([f"{k}={v}" for k, v in self.connection_kwargs.items()])
1115
1122
  return (
1116
1123
  f"<{self.__class__.__module__}.{self.__class__.__name__}"
1117
- f"({self.connection_class(**self.connection_kwargs)!r})>"
1124
+ f"(<{self.connection_class.__module__}.{self.connection_class.__name__}"
1125
+ f"({conn_kwargs})>)>"
1118
1126
  )
1119
1127
 
1120
1128
  def reset(self):
redis/asyncio/retry.py CHANGED
@@ -2,18 +2,16 @@ from asyncio import sleep
2
2
  from typing import TYPE_CHECKING, Any, Awaitable, Callable, Tuple, Type, TypeVar
3
3
 
4
4
  from redis.exceptions import ConnectionError, RedisError, TimeoutError
5
-
6
- if TYPE_CHECKING:
7
- from redis.backoff import AbstractBackoff
8
-
5
+ from redis.retry import AbstractRetry
9
6
 
10
7
  T = TypeVar("T")
11
8
 
9
+ if TYPE_CHECKING:
10
+ from redis.backoff import AbstractBackoff
12
11
 
13
- class Retry:
14
- """Retry a specific number of times after a failure"""
15
12
 
16
- __slots__ = "_backoff", "_retries", "_supported_errors"
13
+ class Retry(AbstractRetry[RedisError]):
14
+ __hash__ = AbstractRetry.__hash__
17
15
 
18
16
  def __init__(
19
17
  self,
@@ -24,36 +22,17 @@ class Retry:
24
22
  TimeoutError,
25
23
  ),
26
24
  ):
27
- """
28
- Initialize a `Retry` object with a `Backoff` object
29
- that retries a maximum of `retries` times.
30
- `retries` can be negative to retry forever.
31
- You can specify the types of supported errors which trigger
32
- a retry with the `supported_errors` parameter.
33
- """
34
- self._backoff = backoff
35
- self._retries = retries
36
- self._supported_errors = supported_errors
25
+ super().__init__(backoff, retries, supported_errors)
37
26
 
38
- def update_supported_errors(self, specified_errors: list):
39
- """
40
- Updates the supported errors with the specified error types
41
- """
42
- self._supported_errors = tuple(
43
- set(self._supported_errors + tuple(specified_errors))
44
- )
45
-
46
- def get_retries(self) -> int:
47
- """
48
- Get the number of retries.
49
- """
50
- return self._retries
27
+ def __eq__(self, other: Any) -> bool:
28
+ if not isinstance(other, Retry):
29
+ return NotImplemented
51
30
 
52
- def update_retries(self, value: int) -> None:
53
- """
54
- Set the number of retries.
55
- """
56
- self._retries = value
31
+ return (
32
+ self._backoff == other._backoff
33
+ and self._retries == other._retries
34
+ and set(self._supported_errors) == set(other._supported_errors)
35
+ )
57
36
 
58
37
  async def call_with_retry(
59
38
  self, do: Callable[[], Awaitable[T]], fail: Callable[[RedisError], Any]
redis/asyncio/sentinel.py CHANGED
@@ -11,8 +11,12 @@ from redis.asyncio.connection import (
11
11
  SSLConnection,
12
12
  )
13
13
  from redis.commands import AsyncSentinelCommands
14
- from redis.exceptions import ConnectionError, ReadOnlyError, ResponseError, TimeoutError
15
- from redis.utils import str_if_bytes
14
+ from redis.exceptions import (
15
+ ConnectionError,
16
+ ReadOnlyError,
17
+ ResponseError,
18
+ TimeoutError,
19
+ )
16
20
 
17
21
 
18
22
  class MasterNotFoundError(ConnectionError):
@@ -37,11 +41,10 @@ class SentinelManagedConnection(Connection):
37
41
 
38
42
  async def connect_to(self, address):
39
43
  self.host, self.port = address
40
- await super().connect()
41
- if self.connection_pool.check_connection:
42
- await self.send_command("PING")
43
- if str_if_bytes(await self.read_response()) != "PONG":
44
- raise ConnectionError("PING failed")
44
+ await self.connect_check_health(
45
+ check_health=self.connection_pool.check_connection,
46
+ retry_socket_connect=False,
47
+ )
45
48
 
46
49
  async def _connect_retry(self):
47
50
  if self._reader:
@@ -223,19 +226,31 @@ class Sentinel(AsyncSentinelCommands):
223
226
  once - If set to True, then execute the resulting command on a single
224
227
  node at random, rather than across the entire sentinel cluster.
225
228
  """
226
- once = bool(kwargs.get("once", False))
227
- if "once" in kwargs.keys():
228
- kwargs.pop("once")
229
+ once = bool(kwargs.pop("once", False))
230
+
231
+ # Check if command is supposed to return the original
232
+ # responses instead of boolean value.
233
+ return_responses = bool(kwargs.pop("return_responses", False))
229
234
 
230
235
  if once:
231
- await random.choice(self.sentinels).execute_command(*args, **kwargs)
232
- else:
233
- tasks = [
234
- asyncio.Task(sentinel.execute_command(*args, **kwargs))
235
- for sentinel in self.sentinels
236
- ]
237
- await asyncio.gather(*tasks)
238
- return True
236
+ response = await random.choice(self.sentinels).execute_command(
237
+ *args, **kwargs
238
+ )
239
+ if return_responses:
240
+ return [response]
241
+ else:
242
+ return True if response else False
243
+
244
+ tasks = [
245
+ asyncio.Task(sentinel.execute_command(*args, **kwargs))
246
+ for sentinel in self.sentinels
247
+ ]
248
+ responses = await asyncio.gather(*tasks)
249
+
250
+ if return_responses:
251
+ return responses
252
+
253
+ return all(responses)
239
254
 
240
255
  def __repr__(self):
241
256
  sentinel_addresses = []
redis/backoff.py CHANGED
@@ -170,7 +170,7 @@ class ExponentialWithJitterBackoff(AbstractBackoff):
170
170
  return hash((self._base, self._cap))
171
171
 
172
172
  def __eq__(self, other) -> bool:
173
- if not isinstance(other, EqualJitterBackoff):
173
+ if not isinstance(other, ExponentialWithJitterBackoff):
174
174
  return NotImplemented
175
175
 
176
176
  return self._base == other._base and self._cap == other._cap
redis/client.py CHANGED
@@ -450,7 +450,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
450
450
 
451
451
  def transaction(
452
452
  self, func: Callable[["Pipeline"], None], *watches, **kwargs
453
- ) -> None:
453
+ ) -> Union[List[Any], Any, None]:
454
454
  """
455
455
  Convenience method for executing the callable `func` as a transaction
456
456
  while watching all keys specified in `watches`. The 'func' callable
redis/cluster.py CHANGED
@@ -39,6 +39,7 @@ from redis.exceptions import (
39
39
  DataError,
40
40
  ExecAbortError,
41
41
  InvalidPipelineStack,
42
+ MaxConnectionsError,
42
43
  MovedError,
43
44
  RedisClusterException,
44
45
  RedisError,
@@ -856,7 +857,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
856
857
  startup_nodes=self.nodes_manager.startup_nodes,
857
858
  result_callbacks=self.result_callbacks,
858
859
  cluster_response_callbacks=self.cluster_response_callbacks,
859
- cluster_error_retry_attempts=self.retry.get_retries(),
860
860
  read_from_replicas=self.read_from_replicas,
861
861
  load_balancing_strategy=self.load_balancing_strategy,
862
862
  reinitialize_steps=self.reinitialize_steps,
@@ -1236,6 +1236,12 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
1236
1236
  return response
1237
1237
  except AuthenticationError:
1238
1238
  raise
1239
+ except MaxConnectionsError:
1240
+ # MaxConnectionsError indicates client-side resource exhaustion
1241
+ # (too many connections in the pool), not a node failure.
1242
+ # Don't treat this as a node failure - just re-raise the error
1243
+ # without reinitializing the cluster.
1244
+ raise
1239
1245
  except (ConnectionError, TimeoutError) as e:
1240
1246
  # ConnectionError can also be raised if we couldn't get a
1241
1247
  # connection from the pool before timing out, so check that
@@ -3290,10 +3296,11 @@ class TransactionStrategy(AbstractStrategy):
3290
3296
  # watching something
3291
3297
  if self._transaction_connection:
3292
3298
  try:
3293
- # call this manually since our unwatch or
3294
- # immediate_execute_command methods can call reset()
3295
- self._transaction_connection.send_command("UNWATCH")
3296
- self._transaction_connection.read_response()
3299
+ if self._watching:
3300
+ # call this manually since our unwatch or
3301
+ # immediate_execute_command methods can call reset()
3302
+ self._transaction_connection.send_command("UNWATCH")
3303
+ self._transaction_connection.read_response()
3297
3304
  # we can safely return the connection to the pool here since we're
3298
3305
  # sure we're no longer WATCHing anything
3299
3306
  node = self._nodes_manager.find_connection_owner(
redis/commands/core.py CHANGED
@@ -3290,7 +3290,7 @@ class SetCommands(CommandsProtocol):
3290
3290
  see: https://redis.io/topics/data-types#sets
3291
3291
  """
3292
3292
 
3293
- def sadd(self, name: str, *values: FieldT) -> Union[Awaitable[int], int]:
3293
+ def sadd(self, name: KeyT, *values: FieldT) -> Union[Awaitable[int], int]:
3294
3294
  """
3295
3295
  Add ``value(s)`` to set ``name``
3296
3296
 
@@ -3298,7 +3298,7 @@ class SetCommands(CommandsProtocol):
3298
3298
  """
3299
3299
  return self.execute_command("SADD", name, *values)
3300
3300
 
3301
- def scard(self, name: str) -> Union[Awaitable[int], int]:
3301
+ def scard(self, name: KeyT) -> Union[Awaitable[int], int]:
3302
3302
  """
3303
3303
  Return the number of elements in set ``name``
3304
3304
 
@@ -3337,7 +3337,7 @@ class SetCommands(CommandsProtocol):
3337
3337
  return self.execute_command("SINTER", *args, keys=args)
3338
3338
 
3339
3339
  def sintercard(
3340
- self, numkeys: int, keys: List[str], limit: int = 0
3340
+ self, numkeys: int, keys: List[KeyT], limit: int = 0
3341
3341
  ) -> Union[Awaitable[int], int]:
3342
3342
  """
3343
3343
  Return the cardinality of the intersect of multiple sets specified by ``keys``.
@@ -3352,7 +3352,7 @@ class SetCommands(CommandsProtocol):
3352
3352
  return self.execute_command("SINTERCARD", *args, keys=keys)
3353
3353
 
3354
3354
  def sinterstore(
3355
- self, dest: str, keys: List, *args: List
3355
+ self, dest: KeyT, keys: List, *args: List
3356
3356
  ) -> Union[Awaitable[int], int]:
3357
3357
  """
3358
3358
  Store the intersection of sets specified by ``keys`` into a new
@@ -3364,7 +3364,7 @@ class SetCommands(CommandsProtocol):
3364
3364
  return self.execute_command("SINTERSTORE", dest, *args)
3365
3365
 
3366
3366
  def sismember(
3367
- self, name: str, value: str
3367
+ self, name: KeyT, value: str
3368
3368
  ) -> Union[Awaitable[Union[Literal[0], Literal[1]]], Union[Literal[0], Literal[1]]]:
3369
3369
  """
3370
3370
  Return whether ``value`` is a member of set ``name``:
@@ -3375,7 +3375,7 @@ class SetCommands(CommandsProtocol):
3375
3375
  """
3376
3376
  return self.execute_command("SISMEMBER", name, value, keys=[name])
3377
3377
 
3378
- def smembers(self, name: str) -> Union[Awaitable[Set], Set]:
3378
+ def smembers(self, name: KeyT) -> Union[Awaitable[Set], Set]:
3379
3379
  """
3380
3380
  Return all members of the set ``name``
3381
3381
 
@@ -3384,7 +3384,7 @@ class SetCommands(CommandsProtocol):
3384
3384
  return self.execute_command("SMEMBERS", name, keys=[name])
3385
3385
 
3386
3386
  def smismember(
3387
- self, name: str, values: List, *args: List
3387
+ self, name: KeyT, values: List, *args: List
3388
3388
  ) -> Union[
3389
3389
  Awaitable[List[Union[Literal[0], Literal[1]]]],
3390
3390
  List[Union[Literal[0], Literal[1]]],
@@ -3400,7 +3400,7 @@ class SetCommands(CommandsProtocol):
3400
3400
  args = list_or_args(values, args)
3401
3401
  return self.execute_command("SMISMEMBER", name, *args, keys=[name])
3402
3402
 
3403
- def smove(self, src: str, dst: str, value: str) -> Union[Awaitable[bool], bool]:
3403
+ def smove(self, src: KeyT, dst: KeyT, value: str) -> Union[Awaitable[bool], bool]:
3404
3404
  """
3405
3405
  Move ``value`` from set ``src`` to set ``dst`` atomically
3406
3406
 
@@ -3408,7 +3408,7 @@ class SetCommands(CommandsProtocol):
3408
3408
  """
3409
3409
  return self.execute_command("SMOVE", src, dst, value)
3410
3410
 
3411
- def spop(self, name: str, count: Optional[int] = None) -> Union[str, List, None]:
3411
+ def spop(self, name: KeyT, count: Optional[int] = None) -> Union[str, List, None]:
3412
3412
  """
3413
3413
  Remove and return a random member of set ``name``
3414
3414
 
@@ -3418,7 +3418,7 @@ class SetCommands(CommandsProtocol):
3418
3418
  return self.execute_command("SPOP", name, *args)
3419
3419
 
3420
3420
  def srandmember(
3421
- self, name: str, number: Optional[int] = None
3421
+ self, name: KeyT, number: Optional[int] = None
3422
3422
  ) -> Union[str, List, None]:
3423
3423
  """
3424
3424
  If ``number`` is None, returns a random member of set ``name``.
@@ -3432,7 +3432,7 @@ class SetCommands(CommandsProtocol):
3432
3432
  args = (number is not None) and [number] or []
3433
3433
  return self.execute_command("SRANDMEMBER", name, *args)
3434
3434
 
3435
- def srem(self, name: str, *values: FieldT) -> Union[Awaitable[int], int]:
3435
+ def srem(self, name: KeyT, *values: FieldT) -> Union[Awaitable[int], int]:
3436
3436
  """
3437
3437
  Remove ``values`` from set ``name``
3438
3438
 
@@ -3450,7 +3450,7 @@ class SetCommands(CommandsProtocol):
3450
3450
  return self.execute_command("SUNION", *args, keys=args)
3451
3451
 
3452
3452
  def sunionstore(
3453
- self, dest: str, keys: List, *args: List
3453
+ self, dest: KeyT, keys: List, *args: List
3454
3454
  ) -> Union[Awaitable[int], int]:
3455
3455
  """
3456
3456
  Store the union of sets specified by ``keys`` into a new
@@ -3484,6 +3484,28 @@ class StreamCommands(CommandsProtocol):
3484
3484
  """
3485
3485
  return self.execute_command("XACK", name, groupname, *ids)
3486
3486
 
3487
+ def xackdel(
3488
+ self,
3489
+ name: KeyT,
3490
+ groupname: GroupT,
3491
+ *ids: StreamIdT,
3492
+ ref_policy: Literal["KEEPREF", "DELREF", "ACKED"] = "KEEPREF",
3493
+ ) -> ResponseT:
3494
+ """
3495
+ Combines the functionality of XACK and XDEL. Acknowledges the specified
3496
+ message IDs in the given consumer group and simultaneously attempts to
3497
+ delete the corresponding entries from the stream.
3498
+ """
3499
+ if not ids:
3500
+ raise DataError("XACKDEL requires at least one message ID")
3501
+
3502
+ if ref_policy not in {"KEEPREF", "DELREF", "ACKED"}:
3503
+ raise DataError("XACKDEL ref_policy must be one of: KEEPREF, DELREF, ACKED")
3504
+
3505
+ pieces = [name, groupname, ref_policy, "IDS", len(ids)]
3506
+ pieces.extend(ids)
3507
+ return self.execute_command("XACKDEL", *pieces)
3508
+
3487
3509
  def xadd(
3488
3510
  self,
3489
3511
  name: KeyT,
@@ -3494,6 +3516,7 @@ class StreamCommands(CommandsProtocol):
3494
3516
  nomkstream: bool = False,
3495
3517
  minid: Union[StreamIdT, None] = None,
3496
3518
  limit: Optional[int] = None,
3519
+ ref_policy: Optional[Literal["KEEPREF", "DELREF", "ACKED"]] = None,
3497
3520
  ) -> ResponseT:
3498
3521
  """
3499
3522
  Add to a stream.
@@ -3507,6 +3530,10 @@ class StreamCommands(CommandsProtocol):
3507
3530
  minid: the minimum id in the stream to query.
3508
3531
  Can't be specified with maxlen.
3509
3532
  limit: specifies the maximum number of entries to retrieve
3533
+ ref_policy: optional reference policy for consumer groups when trimming:
3534
+ - KEEPREF (default): When trimming, preserves references in consumer groups' PEL
3535
+ - DELREF: When trimming, removes all references from consumer groups' PEL
3536
+ - ACKED: When trimming, only removes entries acknowledged by all consumer groups
3510
3537
 
3511
3538
  For more information see https://redis.io/commands/xadd
3512
3539
  """
@@ -3514,6 +3541,9 @@ class StreamCommands(CommandsProtocol):
3514
3541
  if maxlen is not None and minid is not None:
3515
3542
  raise DataError("Only one of ```maxlen``` or ```minid``` may be specified")
3516
3543
 
3544
+ if ref_policy is not None and ref_policy not in {"KEEPREF", "DELREF", "ACKED"}:
3545
+ raise DataError("XADD ref_policy must be one of: KEEPREF, DELREF, ACKED")
3546
+
3517
3547
  if maxlen is not None:
3518
3548
  if not isinstance(maxlen, int) or maxlen < 0:
3519
3549
  raise DataError("XADD maxlen must be non-negative integer")
@@ -3530,6 +3560,8 @@ class StreamCommands(CommandsProtocol):
3530
3560
  pieces.extend([b"LIMIT", limit])
3531
3561
  if nomkstream:
3532
3562
  pieces.append(b"NOMKSTREAM")
3563
+ if ref_policy is not None:
3564
+ pieces.append(ref_policy)
3533
3565
  pieces.append(id)
3534
3566
  if not isinstance(fields, dict) or len(fields) == 0:
3535
3567
  raise DataError("XADD fields must be a non-empty dict")
@@ -3683,6 +3715,26 @@ class StreamCommands(CommandsProtocol):
3683
3715
  """
3684
3716
  return self.execute_command("XDEL", name, *ids)
3685
3717
 
3718
+ def xdelex(
3719
+ self,
3720
+ name: KeyT,
3721
+ *ids: StreamIdT,
3722
+ ref_policy: Literal["KEEPREF", "DELREF", "ACKED"] = "KEEPREF",
3723
+ ) -> ResponseT:
3724
+ """
3725
+ Extended version of XDEL that provides more control over how message entries
3726
+ are deleted concerning consumer groups.
3727
+ """
3728
+ if not ids:
3729
+ raise DataError("XDELEX requires at least one message ID")
3730
+
3731
+ if ref_policy not in {"KEEPREF", "DELREF", "ACKED"}:
3732
+ raise DataError("XDELEX ref_policy must be one of: KEEPREF, DELREF, ACKED")
3733
+
3734
+ pieces = [name, ref_policy, "IDS", len(ids)]
3735
+ pieces.extend(ids)
3736
+ return self.execute_command("XDELEX", *pieces)
3737
+
3686
3738
  def xgroup_create(
3687
3739
  self,
3688
3740
  name: KeyT,
@@ -4034,6 +4086,7 @@ class StreamCommands(CommandsProtocol):
4034
4086
  approximate: bool = True,
4035
4087
  minid: Union[StreamIdT, None] = None,
4036
4088
  limit: Optional[int] = None,
4089
+ ref_policy: Optional[Literal["KEEPREF", "DELREF", "ACKED"]] = None,
4037
4090
  ) -> ResponseT:
4038
4091
  """
4039
4092
  Trims old messages from a stream.
@@ -4044,6 +4097,10 @@ class StreamCommands(CommandsProtocol):
4044
4097
  minid: the minimum id in the stream to query
4045
4098
  Can't be specified with maxlen.
4046
4099
  limit: specifies the maximum number of entries to retrieve
4100
+ ref_policy: optional reference policy for consumer groups:
4101
+ - KEEPREF (default): Trims entries but preserves references in consumer groups' PEL
4102
+ - DELREF: Trims entries and removes all references from consumer groups' PEL
4103
+ - ACKED: Only trims entries that were read and acknowledged by all consumer groups
4047
4104
 
4048
4105
  For more information see https://redis.io/commands/xtrim
4049
4106
  """
@@ -4054,6 +4111,9 @@ class StreamCommands(CommandsProtocol):
4054
4111
  if maxlen is None and minid is None:
4055
4112
  raise DataError("One of ``maxlen`` or ``minid`` must be specified")
4056
4113
 
4114
+ if ref_policy is not None and ref_policy not in {"KEEPREF", "DELREF", "ACKED"}:
4115
+ raise DataError("XTRIM ref_policy must be one of: KEEPREF, DELREF, ACKED")
4116
+
4057
4117
  if maxlen is not None:
4058
4118
  pieces.append(b"MAXLEN")
4059
4119
  if minid is not None:
@@ -4067,6 +4127,8 @@ class StreamCommands(CommandsProtocol):
4067
4127
  if limit is not None:
4068
4128
  pieces.append(b"LIMIT")
4069
4129
  pieces.append(limit)
4130
+ if ref_policy is not None:
4131
+ pieces.append(ref_policy)
4070
4132
 
4071
4133
  return self.execute_command("XTRIM", name, *pieces)
4072
4134
 
@@ -181,7 +181,7 @@ class VectorField(Field):
181
181
 
182
182
  ``name`` is the name of the field.
183
183
 
184
- ``algorithm`` can be "FLAT" or "HNSW".
184
+ ``algorithm`` can be "FLAT", "HNSW", or "SVS-VAMANA".
185
185
 
186
186
  ``attributes`` each algorithm can have specific attributes. Some of them
187
187
  are mandatory and some of them are optional. See
@@ -194,10 +194,10 @@ class VectorField(Field):
194
194
  if sort or noindex:
195
195
  raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
196
196
 
197
- if algorithm.upper() not in ["FLAT", "HNSW"]:
197
+ if algorithm.upper() not in ["FLAT", "HNSW", "SVS-VAMANA"]:
198
198
  raise DataError(
199
- "Realtime vector indexing supporting 2 Indexing Methods:"
200
- "'FLAT' and 'HNSW'."
199
+ "Realtime vector indexing supporting 3 Indexing Methods:"
200
+ "'FLAT', 'HNSW', and 'SVS-VAMANA'."
201
201
  )
202
202
 
203
203
  attr_li = []
@@ -11,16 +11,35 @@ class SentinelCommands:
11
11
  """Redis Sentinel's SENTINEL command."""
12
12
  warnings.warn(DeprecationWarning("Use the individual sentinel_* methods"))
13
13
 
14
- def sentinel_get_master_addr_by_name(self, service_name):
15
- """Returns a (host, port) pair for the given ``service_name``"""
16
- return self.execute_command("SENTINEL GET-MASTER-ADDR-BY-NAME", service_name)
17
-
18
- def sentinel_master(self, service_name):
19
- """Returns a dictionary containing the specified masters state."""
20
- return self.execute_command("SENTINEL MASTER", service_name)
14
+ def sentinel_get_master_addr_by_name(self, service_name, return_responses=False):
15
+ """
16
+ Returns a (host, port) pair for the given ``service_name`` when return_responses is True,
17
+ otherwise returns a boolean value that indicates if the command was successful.
18
+ """
19
+ return self.execute_command(
20
+ "SENTINEL GET-MASTER-ADDR-BY-NAME",
21
+ service_name,
22
+ once=True,
23
+ return_responses=return_responses,
24
+ )
25
+
26
+ def sentinel_master(self, service_name, return_responses=False):
27
+ """
28
+ Returns a dictionary containing the specified masters state, when return_responses is True,
29
+ otherwise returns a boolean value that indicates if the command was successful.
30
+ """
31
+ return self.execute_command(
32
+ "SENTINEL MASTER", service_name, return_responses=return_responses
33
+ )
21
34
 
22
35
  def sentinel_masters(self):
23
- """Returns a list of dictionaries containing each master's state."""
36
+ """
37
+ Returns a list of dictionaries containing each master's state.
38
+
39
+ Important: This function is called by the Sentinel implementation and is
40
+ called directly on the Redis standalone client for sentinels,
41
+ so it doesn't support the "once" and "return_responses" options.
42
+ """
24
43
  return self.execute_command("SENTINEL MASTERS")
25
44
 
26
45
  def sentinel_monitor(self, name, ip, port, quorum):
@@ -31,16 +50,27 @@ class SentinelCommands:
31
50
  """Remove a master from Sentinel's monitoring"""
32
51
  return self.execute_command("SENTINEL REMOVE", name)
33
52
 
34
- def sentinel_sentinels(self, service_name):
35
- """Returns a list of sentinels for ``service_name``"""
36
- return self.execute_command("SENTINEL SENTINELS", service_name)
53
+ def sentinel_sentinels(self, service_name, return_responses=False):
54
+ """
55
+ Returns a list of sentinels for ``service_name``, when return_responses is True,
56
+ otherwise returns a boolean value that indicates if the command was successful.
57
+ """
58
+ return self.execute_command(
59
+ "SENTINEL SENTINELS", service_name, return_responses=return_responses
60
+ )
37
61
 
38
62
  def sentinel_set(self, name, option, value):
39
63
  """Set Sentinel monitoring parameters for a given master"""
40
64
  return self.execute_command("SENTINEL SET", name, option, value)
41
65
 
42
66
  def sentinel_slaves(self, service_name):
43
- """Returns a list of slaves for ``service_name``"""
67
+ """
68
+ Returns a list of slaves for ``service_name``
69
+
70
+ Important: This function is called by the Sentinel implementation and is
71
+ called directly on the Redis standalone client for sentinels,
72
+ so it doesn't support the "once" and "return_responses" options.
73
+ """
44
74
  return self.execute_command("SENTINEL SLAVES", service_name)
45
75
 
46
76
  def sentinel_reset(self, pattern):
redis/connection.py CHANGED
@@ -31,6 +31,7 @@ from .exceptions import (
31
31
  ChildDeadlockedError,
32
32
  ConnectionError,
33
33
  DataError,
34
+ MaxConnectionsError,
34
35
  RedisError,
35
36
  ResponseError,
36
37
  TimeoutError,
@@ -378,13 +379,18 @@ class AbstractConnection(ConnectionInterface):
378
379
  "Connects to the Redis server if not already connected"
379
380
  self.connect_check_health(check_health=True)
380
381
 
381
- def connect_check_health(self, check_health: bool = True):
382
+ def connect_check_health(
383
+ self, check_health: bool = True, retry_socket_connect: bool = True
384
+ ):
382
385
  if self._sock:
383
386
  return
384
387
  try:
385
- sock = self.retry.call_with_retry(
386
- lambda: self._connect(), lambda error: self.disconnect(error)
387
- )
388
+ if retry_socket_connect:
389
+ sock = self.retry.call_with_retry(
390
+ lambda: self._connect(), lambda error: self.disconnect(error)
391
+ )
392
+ else:
393
+ sock = self._connect()
388
394
  except socket.timeout:
389
395
  raise TimeoutError("Timeout connecting to server")
390
396
  except OSError as e:
@@ -1315,6 +1321,7 @@ class ConnectionPool:
1315
1321
  By default, TCP connections are created unless ``connection_class``
1316
1322
  is specified. Use class:`.UnixDomainSocketConnection` for
1317
1323
  unix sockets.
1324
+ :py:class:`~redis.SSLConnection` can be used for SSL enabled connections.
1318
1325
 
1319
1326
  Any additional keyword arguments are passed to the constructor of
1320
1327
  ``connection_class``.
@@ -1432,10 +1439,12 @@ class ConnectionPool:
1432
1439
 
1433
1440
  self.reset()
1434
1441
 
1435
- def __repr__(self) -> (str, str):
1442
+ def __repr__(self) -> str:
1443
+ conn_kwargs = ",".join([f"{k}={v}" for k, v in self.connection_kwargs.items()])
1436
1444
  return (
1437
- f"<{type(self).__module__}.{type(self).__name__}"
1438
- f"({repr(self.connection_class(**self.connection_kwargs))})>"
1445
+ f"<{self.__class__.__module__}.{self.__class__.__name__}"
1446
+ f"(<{self.connection_class.__module__}.{self.connection_class.__name__}"
1447
+ f"({conn_kwargs})>)>"
1439
1448
  )
1440
1449
 
1441
1450
  def get_protocol(self):
@@ -1560,7 +1569,7 @@ class ConnectionPool:
1560
1569
  def make_connection(self) -> "ConnectionInterface":
1561
1570
  "Create a new connection"
1562
1571
  if self._created_connections >= self.max_connections:
1563
- raise ConnectionError("Too many connections")
1572
+ raise MaxConnectionsError("Too many connections")
1564
1573
  self._created_connections += 1
1565
1574
 
1566
1575
  if self.cache is not None:
redis/exceptions.py CHANGED
@@ -220,7 +220,13 @@ class SlotNotCoveredError(RedisClusterException):
220
220
  pass
221
221
 
222
222
 
223
- class MaxConnectionsError(ConnectionError): ...
223
+ class MaxConnectionsError(ConnectionError):
224
+ """
225
+ Raised when a connection pool has reached its max_connections limit.
226
+ This indicates pool exhaustion rather than an actual connection failure.
227
+ """
228
+
229
+ pass
224
230
 
225
231
 
226
232
  class CrossSlotTransactionError(RedisClusterException):
redis/retry.py CHANGED
@@ -1,27 +1,27 @@
1
+ import abc
1
2
  import socket
2
3
  from time import sleep
3
- from typing import TYPE_CHECKING, Any, Callable, Iterable, Tuple, Type, TypeVar
4
+ from typing import TYPE_CHECKING, Any, Callable, Generic, Iterable, Tuple, Type, TypeVar
4
5
 
5
6
  from redis.exceptions import ConnectionError, TimeoutError
6
7
 
7
8
  T = TypeVar("T")
9
+ E = TypeVar("E", bound=Exception, covariant=True)
8
10
 
9
11
  if TYPE_CHECKING:
10
12
  from redis.backoff import AbstractBackoff
11
13
 
12
14
 
13
- class Retry:
15
+ class AbstractRetry(Generic[E], abc.ABC):
14
16
  """Retry a specific number of times after a failure"""
15
17
 
18
+ _supported_errors: Tuple[Type[E], ...]
19
+
16
20
  def __init__(
17
21
  self,
18
22
  backoff: "AbstractBackoff",
19
23
  retries: int,
20
- supported_errors: Tuple[Type[Exception], ...] = (
21
- ConnectionError,
22
- TimeoutError,
23
- socket.timeout,
24
- ),
24
+ supported_errors: Tuple[Type[E], ...],
25
25
  ):
26
26
  """
27
27
  Initialize a `Retry` object with a `Backoff` object
@@ -34,22 +34,14 @@ class Retry:
34
34
  self._retries = retries
35
35
  self._supported_errors = supported_errors
36
36
 
37
+ @abc.abstractmethod
37
38
  def __eq__(self, other: Any) -> bool:
38
- if not isinstance(other, Retry):
39
- return NotImplemented
40
-
41
- return (
42
- self._backoff == other._backoff
43
- and self._retries == other._retries
44
- and set(self._supported_errors) == set(other._supported_errors)
45
- )
39
+ return NotImplemented
46
40
 
47
41
  def __hash__(self) -> int:
48
42
  return hash((self._backoff, self._retries, frozenset(self._supported_errors)))
49
43
 
50
- def update_supported_errors(
51
- self, specified_errors: Iterable[Type[Exception]]
52
- ) -> None:
44
+ def update_supported_errors(self, specified_errors: Iterable[Type[E]]) -> None:
53
45
  """
54
46
  Updates the supported errors with the specified error types
55
47
  """
@@ -69,6 +61,32 @@ class Retry:
69
61
  """
70
62
  self._retries = value
71
63
 
64
+
65
+ class Retry(AbstractRetry[Exception]):
66
+ __hash__ = AbstractRetry.__hash__
67
+
68
+ def __init__(
69
+ self,
70
+ backoff: "AbstractBackoff",
71
+ retries: int,
72
+ supported_errors: Tuple[Type[Exception], ...] = (
73
+ ConnectionError,
74
+ TimeoutError,
75
+ socket.timeout,
76
+ ),
77
+ ):
78
+ super().__init__(backoff, retries, supported_errors)
79
+
80
+ def __eq__(self, other: Any) -> bool:
81
+ if not isinstance(other, Retry):
82
+ return NotImplemented
83
+
84
+ return (
85
+ self._backoff == other._backoff
86
+ and self._retries == other._retries
87
+ and set(self._supported_errors) == set(other._supported_errors)
88
+ )
89
+
72
90
  def call_with_retry(
73
91
  self,
74
92
  do: Callable[[], T],
redis/sentinel.py CHANGED
@@ -5,8 +5,12 @@ from typing import Optional
5
5
  from redis.client import Redis
6
6
  from redis.commands import SentinelCommands
7
7
  from redis.connection import Connection, ConnectionPool, SSLConnection
8
- from redis.exceptions import ConnectionError, ReadOnlyError, ResponseError, TimeoutError
9
- from redis.utils import str_if_bytes
8
+ from redis.exceptions import (
9
+ ConnectionError,
10
+ ReadOnlyError,
11
+ ResponseError,
12
+ TimeoutError,
13
+ )
10
14
 
11
15
 
12
16
  class MasterNotFoundError(ConnectionError):
@@ -35,11 +39,11 @@ class SentinelManagedConnection(Connection):
35
39
 
36
40
  def connect_to(self, address):
37
41
  self.host, self.port = address
38
- super().connect()
39
- if self.connection_pool.check_connection:
40
- self.send_command("PING")
41
- if str_if_bytes(self.read_response()) != "PONG":
42
- raise ConnectionError("PING failed")
42
+
43
+ self.connect_check_health(
44
+ check_health=self.connection_pool.check_connection,
45
+ retry_socket_connect=False,
46
+ )
43
47
 
44
48
  def _connect_retry(self):
45
49
  if self._sock:
@@ -254,16 +258,27 @@ class Sentinel(SentinelCommands):
254
258
  once - If set to True, then execute the resulting command on a single
255
259
  node at random, rather than across the entire sentinel cluster.
256
260
  """
257
- once = bool(kwargs.get("once", False))
258
- if "once" in kwargs.keys():
259
- kwargs.pop("once")
261
+ once = bool(kwargs.pop("once", False))
262
+
263
+ # Check if command is supposed to return the original
264
+ # responses instead of boolean value.
265
+ return_responses = bool(kwargs.pop("return_responses", False))
260
266
 
261
267
  if once:
262
- random.choice(self.sentinels).execute_command(*args, **kwargs)
263
- else:
264
- for sentinel in self.sentinels:
265
- sentinel.execute_command(*args, **kwargs)
266
- return True
268
+ response = random.choice(self.sentinels).execute_command(*args, **kwargs)
269
+ if return_responses:
270
+ return [response]
271
+ else:
272
+ return True if response else False
273
+
274
+ responses = []
275
+ for sentinel in self.sentinels:
276
+ responses.append(sentinel.execute_command(*args, **kwargs))
277
+
278
+ if return_responses:
279
+ return responses
280
+
281
+ return all(responses)
267
282
 
268
283
  def __repr__(self):
269
284
  sentinel_addresses = []
redis/utils.py CHANGED
@@ -1,9 +1,10 @@
1
1
  import datetime
2
2
  import logging
3
3
  import textwrap
4
+ from collections.abc import Callable
4
5
  from contextlib import contextmanager
5
6
  from functools import wraps
6
- from typing import Any, Dict, List, Mapping, Optional, Union
7
+ from typing import Any, Dict, List, Mapping, Optional, TypeVar, Union
7
8
 
8
9
  from redis.exceptions import DataError
9
10
  from redis.typing import AbsExpiryT, EncodableT, ExpiryT
@@ -150,18 +151,21 @@ def warn_deprecated_arg_usage(
150
151
  warnings.warn(msg, category=DeprecationWarning, stacklevel=stacklevel)
151
152
 
152
153
 
154
+ C = TypeVar("C", bound=Callable)
155
+
156
+
153
157
  def deprecated_args(
154
158
  args_to_warn: list = ["*"],
155
159
  allowed_args: list = [],
156
160
  reason: str = "",
157
161
  version: str = "",
158
- ):
162
+ ) -> Callable[[C], C]:
159
163
  """
160
164
  Decorator to mark specified args of a function as deprecated.
161
165
  If '*' is in args_to_warn, all arguments will be marked as deprecated.
162
166
  """
163
167
 
164
- def decorator(func):
168
+ def decorator(func: C) -> C:
165
169
  @wraps(func)
166
170
  def wrapper(*args, **kwargs):
167
171
  # Get function argument names
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: redis
3
- Version: 6.2.0
3
+ Version: 6.3.0
4
4
  Summary: Python client for Redis database and key-value store
5
5
  Project-URL: Changes, https://github.com/redis/redis-py/releases
6
6
  Project-URL: Code, https://github.com/redis/redis-py
@@ -43,7 +43,7 @@ Description-Content-Type: text/markdown
43
43
  The Python interface to the Redis key-value store.
44
44
 
45
45
  [![CI](https://github.com/redis/redis-py/workflows/CI/badge.svg?branch=master)](https://github.com/redis/redis-py/actions?query=workflow%3ACI+branch%3Amaster)
46
- [![docs](https://readthedocs.org/projects/redis/badge/?version=stable&style=flat)](https://redis-py.readthedocs.io/en/stable/)
46
+ [![docs](https://readthedocs.org/projects/redis/badge/?version=stable&style=flat)](https://redis.readthedocs.io/en/stable/)
47
47
  [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE)
48
48
  [![pypi](https://badge.fury.io/py/redis.svg)](https://pypi.org/project/redis/)
49
49
  [![pre-release](https://img.shields.io/github/v/release/redis/redis-py?include_prereleases&label=latest-prerelease)](https://github.com/redis/redis-py/releases)
@@ -81,7 +81,7 @@ Start a redis via docker (for Redis versions < 8.0):
81
81
 
82
82
  ``` bash
83
83
  docker run -p 6379:6379 -it redis/redis-stack:latest
84
-
84
+ ```
85
85
  To install redis-py, simply:
86
86
 
87
87
  ``` bash
@@ -249,4 +249,4 @@ Special thanks to:
249
249
  system.
250
250
  - Paul Hubbard for initial packaging support.
251
251
 
252
- [![Redis](./docs/_static/logo-redis.svg)](https://redis.io)
252
+ [![Redis](./docs/_static/logo-redis.svg)](https://redis.io)
@@ -1,36 +1,36 @@
1
- redis/__init__.py,sha256=h5BepEwJtNeHNFaMJ0VdpbtFFTC8QcY5s9wqAnYG8TE,2060
2
- redis/backoff.py,sha256=2zR-ik5enJDsC2n2AWmE3ALSONgDLtyO4k096ZT6Txo,5275
1
+ redis/__init__.py,sha256=fP1FHH-BrfwDHAKlWIds9hEFqSHIi-hUer1pH_JgbAA,2112
2
+ redis/backoff.py,sha256=tQM6Lh2g2FjMH8iXg94br2sU9eri4mEW9FbOrMt0azs,5285
3
3
  redis/cache.py,sha256=68rJDNogvNwgdgBel6zSX9QziL11qsKIMhmvQvHvznM,9549
4
- redis/client.py,sha256=BrYjhRBWw7Sw3LKPNbdW0UzaQsd-y-wPR70SMgDTeEc,62758
5
- redis/cluster.py,sha256=_Pi_u8U9meq-WcTHV0j4u0lgOQVqmALj37wU1qekWcE,123762
6
- redis/connection.py,sha256=z1m1-maULRuw4KeiuKXmzL_DtFxcXiqMbKFJm_3V7ao,66905
4
+ redis/client.py,sha256=CGyGCMl4r4h0MUPPvPzjmF7Z0iKa7MI_LqhWlmkgHNI,62781
5
+ redis/cluster.py,sha256=CgKGFnprziYjsr--qWbhY--2oaaWQRbuKofi1Qr9m5c,124120
6
+ redis/connection.py,sha256=Nr_tCxUY6ZeiSheoDP25J5Cp525oBl5NMnQjI_faGEk,67301
7
7
  redis/crc.py,sha256=Z3kXFtkY2LdgefnQMud1xr4vG5UYvA9LCMqNMX1ywu4,729
8
8
  redis/credentials.py,sha256=GOnO3-LSW34efHaIrUbS742Mw8l70mRzF6UrKiKZsMY,1828
9
9
  redis/event.py,sha256=urOK241IdgmCQ3fq7GqXRstZ2vcXRV14bBBMdN3latk,12129
10
- redis/exceptions.py,sha256=46H-asqIaZ65Gc-voGzP7S39JtxdICGdHzdsT6LSMJE,5617
10
+ redis/exceptions.py,sha256=b3OO87gncNCRUnx1d7O57N2kkjP-feXn70fPkXHaLmQ,5789
11
11
  redis/lock.py,sha256=GrvPSxaOqKo7iAL2oi5ZUEPsOkxAXHVE_Tp1ejgO2fY,12760
12
12
  redis/ocsp.py,sha256=teYSmKnCtk6B3jJLdNYbZN4OE0mxgspt2zUPbkIQzio,11452
13
13
  redis/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- redis/retry.py,sha256=qJPRTARm3TYQPRNnzyjUUjGohbwtMz5KoDphCtHJ44Y,2902
15
- redis/sentinel.py,sha256=DBphu6uNp6ZCSaVDSVC8nFhSxG93a12DnzgGWpyeh64,14757
14
+ redis/retry.py,sha256=oS0nc0nYxEQaD4t95HEr1GhvhpOmnTKMnNtHn8Fqzxo,3405
15
+ redis/sentinel.py,sha256=DP1XtO1HRemZMamC1TFHg_hBJRv9eoQgTMlZfPYRUo8,15013
16
16
  redis/typing.py,sha256=z5JQjGkNzejEzb2y7TXct7tS5yzAfLQod9o37Mh1_Ug,1953
17
- redis/utils.py,sha256=q7tQJs6Tj5M91QypOwZtGnFXs2Bcji3eTt9VEwLYOeA,8386
17
+ redis/utils.py,sha256=vO-njeF4ntROo1OReUiKtcY72I2JcEZYA62-_ssQW50,8495
18
18
  redis/_parsers/__init__.py,sha256=gyf5dp918NuJAkWFl8sX1Z-qAvbX_40-_7YCTM6Rvjc,693
19
19
  redis/_parsers/base.py,sha256=k6n7-oTmmzAUiiZpaB6Vfjzlj_torwBsaPBEYdOTDak,9908
20
20
  redis/_parsers/commands.py,sha256=pmR4hl4u93UvCmeDgePHFc6pWDr4slrKEvCsdMmtj_M,11052
21
21
  redis/_parsers/encoders.py,sha256=X0jvTp-E4TZUlZxV5LJJ88TuVrF1vly5tuC0xjxGaSc,1734
22
- redis/_parsers/helpers.py,sha256=X5wkGDtuzseeCz23_t3FJpzy1ltIvh7zO1uD3cypiOs,29184
22
+ redis/_parsers/helpers.py,sha256=Y6n14fE0eCYbF3TBuJxhycnJ1yHKiYoAJrOCUaiWolg,29223
23
23
  redis/_parsers/hiredis.py,sha256=iUjLT5OEgD4zqF_tg3Szmg1c_73RozXyjjAFsVYKCWM,10893
24
24
  redis/_parsers/resp2.py,sha256=f22kH-_ZP2iNtOn6xOe65MSy_fJpu8OEn1u_hgeeojI,4813
25
25
  redis/_parsers/resp3.py,sha256=tiZRbyJAnObqll2LQJ57Br-3jxwQcMocV4GQE_LpC6g,9883
26
26
  redis/_parsers/socket.py,sha256=CKD8QW_wFSNlIZzxlbNduaGpiv0I8wBcsGuAIojDfJg,5403
27
27
  redis/asyncio/__init__.py,sha256=uoDD8XYVi0Kj6mcufYwLDUTQXmBRx7a0bhKF9stZr7I,1489
28
28
  redis/asyncio/client.py,sha256=6a5-txYcRMtObkb7Bfi08MKQQY01oy5NKpHAlfhIFNM,61905
29
- redis/asyncio/cluster.py,sha256=LNEXjBJKr9M13jGnN52BQgYX6PbojCcxT_Jix9W2k0Y,90121
30
- redis/asyncio/connection.py,sha256=32MXfAoa5bOj2rNw-8YKJad6kpDmcOBNz2qsZd4Ty6Q,48828
29
+ redis/asyncio/cluster.py,sha256=nQHttKd03Ci10btkVhZ3jm8M9YzMGoOPhIJBgyAOGyw,90498
30
+ redis/asyncio/connection.py,sha256=D28OecfufSf6c2gJ8UhJhorhWMpHeFHxxIaWxvvQHoc,49197
31
31
  redis/asyncio/lock.py,sha256=GxgV6EsyKpMjh74KtaOPxh4fNPuwApz6Th46qhvrAws,12801
32
- redis/asyncio/retry.py,sha256=8carxJLme2f0frB9Z0wU3mHqKzwqzGAGb2TMEtaaMvo,2482
33
- redis/asyncio/sentinel.py,sha256=H7N_hvdATojwY06aH1AawFV-05AImqtOSAq0xKElbbk,14636
32
+ redis/asyncio/retry.py,sha256=Ikm0rsvnFItracA89DdPcejLqb_Sr4QBz73Ow_LUmwU,1880
33
+ redis/asyncio/sentinel.py,sha256=Ppk-jlTubcHpa0lvinZ1pPTtQ5rFHXZkkaCZ7G_TCQs,14868
34
34
  redis/asyncio/utils.py,sha256=31xFzXczDgSRyf6hSjiwue1eDQ_XlP_OJdp5dKxW_aE,718
35
35
  redis/auth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  redis/auth/err.py,sha256=WYkbuDIzwp1S-eAvsya6QMlO6g9QIXbzMITOsTWX0xk,694
@@ -39,10 +39,10 @@ redis/auth/token.py,sha256=qYwAgxFW3S93QDUqp1BTsj7Pj9ZohnixGeOX0s7AsjY,3317
39
39
  redis/auth/token_manager.py,sha256=ShBsYXiBZBJBOMB_Y-pXfLwEOAmc9s1okaCECinNZ7g,12018
40
40
  redis/commands/__init__.py,sha256=cTUH-MGvaLYS0WuoytyqtN1wniw2A1KbkUXcpvOSY3I,576
41
41
  redis/commands/cluster.py,sha256=vdWdpl4mP51oqfYBZHg5CUXt6jPaNp7aCLHyTieDrt8,31248
42
- redis/commands/core.py,sha256=lIi3DUP2kPdBC_RXrSTcUpjI_ORL9nKck9YayWIF_Pc,238242
42
+ redis/commands/core.py,sha256=RjVbTxe_vfnraVOqREH6ofNU2LMX8-ZGSAzd5g3ypvE,241132
43
43
  redis/commands/helpers.py,sha256=VCoPdBMCr4wxdWBw1EB9R7ZBbQM0exAG1kws4XwsCII,3318
44
44
  redis/commands/redismodules.py,sha256=-kLM4RBklDhNh-MXCra81ZTSstIQ-ulRab6v0dYUTdA,2573
45
- redis/commands/sentinel.py,sha256=hRcIQ9x9nEkdcCsJzo6Ves6vk-3tsfQqfJTT_v3oLY0,4110
45
+ redis/commands/sentinel.py,sha256=Q1Xuw7qXA0YRZXGlIKsuOtah8UfF0QnkLywOTRvjiMY,5299
46
46
  redis/commands/bf/__init__.py,sha256=qk4DA9KsMiP4WYqYeP1T5ScBwctsVtlLyMhrYIyq1Zc,8019
47
47
  redis/commands/bf/commands.py,sha256=xeKt8E7G8HB-l922J0DLg07CEIZTVNGx_2Lfyw1gIck,21283
48
48
  redis/commands/bf/info.py,sha256=_OB2v_hAPI9mdVNiBx8jUtH2MhMoct9ZRm-e8In6wQo,3355
@@ -57,7 +57,7 @@ redis/commands/search/aggregation.py,sha256=CcZSZyquLWLrcSblwgt-bSyMvm-TQS9B7N8Q
57
57
  redis/commands/search/commands.py,sha256=ozyF6YgCiMArhb6ScXLPy49hnJwm4CGK4vrJRwSeB-I,38413
58
58
  redis/commands/search/dialect.py,sha256=-7M6kkr33x0FkMtKmUsbeRAE6qxLUbqdJCqIo0UKIXo,105
59
59
  redis/commands/search/document.py,sha256=g2R-PRgq-jN33_GLXzavvse4cpIHBMfjPfPK7tnE9Gc,413
60
- redis/commands/search/field.py,sha256=ZWHYTtrLi-zZojohqXoidfllxP0SiadBW6hnGkBw7mM,5891
60
+ redis/commands/search/field.py,sha256=g9I1LHrVJKO1KtiUwotxrQvpg89e-sx26oClHuaKTn8,5935
61
61
  redis/commands/search/index_definition.py,sha256=VL2CMzjxN0HEIaTn88evnHX1fCEmytbik4vAmiiYSC8,2489
62
62
  redis/commands/search/profile_information.py,sha256=w9SbMiHbcZ1TpsZMe8cMIyO1hGkm5GhnZ_Gqg1feLtc,249
63
63
  redis/commands/search/query.py,sha256=MbSs-cY7hG1OEkO-i6LJ_Ui1D3d2VyDTXPrmb-rty7w,12199
@@ -72,7 +72,7 @@ redis/commands/timeseries/utils.py,sha256=NLwSOS5Dz9N8dYQSzEyBIvrItOWwfQ0xgDj8un
72
72
  redis/commands/vectorset/__init__.py,sha256=_fM0UdYjuzs8YWIUjQGH9QX5FwI0So8_D-5ALWWrWFc,1322
73
73
  redis/commands/vectorset/commands.py,sha256=7CvQNFvkXuG3XPhHJ82y_oBYJwewRFz84aEi3OCH4Rw,12495
74
74
  redis/commands/vectorset/utils.py,sha256=N-x0URyg76XC39CNfBym6FkFCVgm5NthzWKBnc2H0Xc,2981
75
- redis-6.2.0.dist-info/METADATA,sha256=nU7-kjJL_I8KHMRNFmLBeS9tWPwTPOw2_oQ8dEBfSPM,10783
76
- redis-6.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
77
- redis-6.2.0.dist-info/licenses/LICENSE,sha256=pXslClvwPXr-VbdAYzE_Ktt7ANVGwKsUmok5gzP-PMg,1074
78
- redis-6.2.0.dist-info/RECORD,,
75
+ redis-6.3.0.dist-info/METADATA,sha256=9Ji4pvCT4zk3sXoCl10z2-Dz_MtyRWsrFqDbirZ6FiQ,10784
76
+ redis-6.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
77
+ redis-6.3.0.dist-info/licenses/LICENSE,sha256=pXslClvwPXr-VbdAYzE_Ktt7ANVGwKsUmok5gzP-PMg,1074
78
+ redis-6.3.0.dist-info/RECORD,,
File without changes