redis 5.3.0b4__py3-none-any.whl → 6.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. redis/__init__.py +2 -11
  2. redis/_parsers/base.py +14 -2
  3. redis/asyncio/client.py +21 -13
  4. redis/asyncio/cluster.py +79 -56
  5. redis/asyncio/connection.py +40 -11
  6. redis/asyncio/lock.py +26 -5
  7. redis/asyncio/sentinel.py +9 -1
  8. redis/asyncio/utils.py +1 -1
  9. redis/auth/token.py +6 -2
  10. redis/backoff.py +15 -0
  11. redis/client.py +80 -59
  12. redis/cluster.py +114 -52
  13. redis/commands/cluster.py +1 -11
  14. redis/commands/core.py +218 -206
  15. redis/commands/helpers.py +0 -70
  16. redis/commands/redismodules.py +0 -20
  17. redis/commands/search/aggregation.py +3 -1
  18. redis/commands/search/commands.py +41 -14
  19. redis/commands/search/dialect.py +3 -0
  20. redis/commands/search/profile_information.py +14 -0
  21. redis/commands/search/query.py +5 -1
  22. redis/connection.py +48 -23
  23. redis/exceptions.py +4 -1
  24. redis/lock.py +24 -4
  25. redis/ocsp.py +2 -1
  26. redis/sentinel.py +1 -1
  27. redis/typing.py +1 -1
  28. redis/utils.py +107 -1
  29. {redis-5.3.0b4.dist-info → redis-6.0.0b1.dist-info}/METADATA +57 -23
  30. {redis-5.3.0b4.dist-info → redis-6.0.0b1.dist-info}/RECORD +33 -40
  31. {redis-5.3.0b4.dist-info → redis-6.0.0b1.dist-info}/WHEEL +1 -2
  32. redis/commands/graph/__init__.py +0 -263
  33. redis/commands/graph/commands.py +0 -313
  34. redis/commands/graph/edge.py +0 -91
  35. redis/commands/graph/exceptions.py +0 -3
  36. redis/commands/graph/execution_plan.py +0 -211
  37. redis/commands/graph/node.py +0 -88
  38. redis/commands/graph/path.py +0 -78
  39. redis/commands/graph/query_result.py +0 -588
  40. redis-5.3.0b4.dist-info/top_level.txt +0 -1
  41. /redis/commands/search/{indexDefinition.py → index_definition.py} +0 -0
  42. {redis-5.3.0b4.dist-info → redis-6.0.0b1.dist-info/licenses}/LICENSE +0 -0
redis/asyncio/lock.py CHANGED
@@ -1,14 +1,18 @@
1
1
  import asyncio
2
+ import logging
2
3
  import threading
3
4
  import uuid
4
5
  from types import SimpleNamespace
5
6
  from typing import TYPE_CHECKING, Awaitable, Optional, Union
6
7
 
7
8
  from redis.exceptions import LockError, LockNotOwnedError
9
+ from redis.typing import Number
8
10
 
9
11
  if TYPE_CHECKING:
10
12
  from redis.asyncio import Redis, RedisCluster
11
13
 
14
+ logger = logging.getLogger(__name__)
15
+
12
16
 
13
17
  class Lock:
14
18
  """
@@ -82,8 +86,9 @@ class Lock:
82
86
  timeout: Optional[float] = None,
83
87
  sleep: float = 0.1,
84
88
  blocking: bool = True,
85
- blocking_timeout: Optional[float] = None,
89
+ blocking_timeout: Optional[Number] = None,
86
90
  thread_local: bool = True,
91
+ raise_on_release_error: bool = True,
87
92
  ):
88
93
  """
89
94
  Create a new Lock instance named ``name`` using the Redis client
@@ -127,6 +132,11 @@ class Lock:
127
132
  thread-1 would see the token value as "xyz" and would be
128
133
  able to successfully release the thread-2's lock.
129
134
 
135
+ ``raise_on_release_error`` indicates whether to raise an exception when
136
+ the lock is no longer owned when exiting the context manager. By default,
137
+ this is True, meaning an exception will be raised. If False, the warning
138
+ will be logged and the exception will be suppressed.
139
+
130
140
  In some use cases it's necessary to disable thread local storage. For
131
141
  example, if you have code where one thread acquires a lock and passes
132
142
  that lock instance to a worker thread to release later. If thread
@@ -143,6 +153,7 @@ class Lock:
143
153
  self.blocking_timeout = blocking_timeout
144
154
  self.thread_local = bool(thread_local)
145
155
  self.local = threading.local() if self.thread_local else SimpleNamespace()
156
+ self.raise_on_release_error = raise_on_release_error
146
157
  self.local.token = None
147
158
  self.register_scripts()
148
159
 
@@ -162,12 +173,19 @@ class Lock:
162
173
  raise LockError("Unable to acquire lock within the time specified")
163
174
 
164
175
  async def __aexit__(self, exc_type, exc_value, traceback):
165
- await self.release()
176
+ try:
177
+ await self.release()
178
+ except LockError:
179
+ if self.raise_on_release_error:
180
+ raise
181
+ logger.warning(
182
+ "Lock was unlocked or no longer owned when exiting context manager."
183
+ )
166
184
 
167
185
  async def acquire(
168
186
  self,
169
187
  blocking: Optional[bool] = None,
170
- blocking_timeout: Optional[float] = None,
188
+ blocking_timeout: Optional[Number] = None,
171
189
  token: Optional[Union[str, bytes]] = None,
172
190
  ):
173
191
  """
@@ -249,7 +267,10 @@ class Lock:
249
267
  """Releases the already acquired lock"""
250
268
  expected_token = self.local.token
251
269
  if expected_token is None:
252
- raise LockError("Cannot release an unlocked lock")
270
+ raise LockError(
271
+ "Cannot release a lock that's not owned or is already unlocked.",
272
+ lock_name=self.name,
273
+ )
253
274
  self.local.token = None
254
275
  return self.do_release(expected_token)
255
276
 
@@ -262,7 +283,7 @@ class Lock:
262
283
  raise LockNotOwnedError("Cannot release a lock that's no longer owned")
263
284
 
264
285
  def extend(
265
- self, additional_time: float, replace_ttl: bool = False
286
+ self, additional_time: Number, replace_ttl: bool = False
266
287
  ) -> Awaitable[bool]:
267
288
  """
268
289
  Adds more time to an already acquired lock.
redis/asyncio/sentinel.py CHANGED
@@ -198,6 +198,7 @@ class Sentinel(AsyncSentinelCommands):
198
198
  sentinels,
199
199
  min_other_sentinels=0,
200
200
  sentinel_kwargs=None,
201
+ force_master_ip=None,
201
202
  **connection_kwargs,
202
203
  ):
203
204
  # if sentinel_kwargs isn't defined, use the socket_* options from
@@ -214,6 +215,7 @@ class Sentinel(AsyncSentinelCommands):
214
215
  ]
215
216
  self.min_other_sentinels = min_other_sentinels
216
217
  self.connection_kwargs = connection_kwargs
218
+ self._force_master_ip = force_master_ip
217
219
 
218
220
  async def execute_command(self, *args, **kwargs):
219
221
  """
@@ -277,7 +279,13 @@ class Sentinel(AsyncSentinelCommands):
277
279
  sentinel,
278
280
  self.sentinels[0],
279
281
  )
280
- return state["ip"], state["port"]
282
+
283
+ ip = (
284
+ self._force_master_ip
285
+ if self._force_master_ip is not None
286
+ else state["ip"]
287
+ )
288
+ return ip, state["port"]
281
289
 
282
290
  error_info = ""
283
291
  if len(collected_errors) > 0:
redis/asyncio/utils.py CHANGED
@@ -16,7 +16,7 @@ def from_url(url, **kwargs):
16
16
  return Redis.from_url(url, **kwargs)
17
17
 
18
18
 
19
- class pipeline:
19
+ class pipeline: # noqa: N801
20
20
  def __init__(self, redis_obj: "Redis"):
21
21
  self.p: "Pipeline" = redis_obj.pipeline()
22
22
 
redis/auth/token.py CHANGED
@@ -1,7 +1,6 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from datetime import datetime, timezone
3
3
 
4
- import jwt
5
4
  from redis.auth.err import InvalidTokenSchemaErr
6
5
 
7
6
 
@@ -77,10 +76,15 @@ class SimpleToken(TokenInterface):
77
76
 
78
77
 
79
78
  class JWToken(TokenInterface):
80
-
81
79
  REQUIRED_FIELDS = {"exp"}
82
80
 
83
81
  def __init__(self, token: str):
82
+ try:
83
+ import jwt
84
+ except ImportError as ie:
85
+ raise ImportError(
86
+ f"The PyJWT library is required for {self.__class__.__name__}.",
87
+ ) from ie
84
88
  self._value = token
85
89
  self._decoded = jwt.decode(
86
90
  self._value,
redis/backoff.py CHANGED
@@ -110,5 +110,20 @@ class DecorrelatedJitterBackoff(AbstractBackoff):
110
110
  return self._previous_backoff
111
111
 
112
112
 
113
+ class ExponentialWithJitterBackoff(AbstractBackoff):
114
+ """Exponential backoff upon failure, with jitter"""
115
+
116
+ def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE) -> None:
117
+ """
118
+ `cap`: maximum backoff time in seconds
119
+ `base`: base backoff time in seconds
120
+ """
121
+ self._cap = cap
122
+ self._base = base
123
+
124
+ def compute(self, failures: int) -> float:
125
+ return min(self._cap, random.random() * self._base * 2**failures)
126
+
127
+
113
128
  def default_backoff():
114
129
  return EqualJitterBackoff()
redis/client.py CHANGED
@@ -4,7 +4,17 @@ import threading
4
4
  import time
5
5
  import warnings
6
6
  from itertools import chain
7
- from typing import Any, Callable, Dict, List, Optional, Type, Union
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Callable,
11
+ Dict,
12
+ List,
13
+ Mapping,
14
+ Optional,
15
+ Type,
16
+ Union,
17
+ )
8
18
 
9
19
  from redis._parsers.encoders import Encoder
10
20
  from redis._parsers.helpers import (
@@ -53,6 +63,11 @@ from redis.utils import (
53
63
  str_if_bytes,
54
64
  )
55
65
 
66
+ if TYPE_CHECKING:
67
+ import ssl
68
+
69
+ import OpenSSL
70
+
56
71
  SYM_EMPTY = b""
57
72
  EMPTY_RESPONSE = "EMPTY_RESPONSE"
58
73
 
@@ -175,47 +190,47 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
175
190
 
176
191
  def __init__(
177
192
  self,
178
- host="localhost",
179
- port=6379,
180
- db=0,
181
- password=None,
182
- socket_timeout=None,
183
- socket_connect_timeout=None,
184
- socket_keepalive=None,
185
- socket_keepalive_options=None,
186
- connection_pool=None,
187
- unix_socket_path=None,
188
- encoding="utf-8",
189
- encoding_errors="strict",
190
- charset=None,
191
- errors=None,
192
- decode_responses=False,
193
- retry_on_timeout=False,
194
- retry_on_error=None,
195
- ssl=False,
196
- ssl_keyfile=None,
197
- ssl_certfile=None,
198
- ssl_cert_reqs="required",
199
- ssl_ca_certs=None,
200
- ssl_ca_path=None,
201
- ssl_ca_data=None,
202
- ssl_check_hostname=False,
203
- ssl_password=None,
204
- ssl_validate_ocsp=False,
205
- ssl_validate_ocsp_stapled=False,
206
- ssl_ocsp_context=None,
207
- ssl_ocsp_expected_cert=None,
208
- ssl_min_version=None,
209
- ssl_ciphers=None,
210
- max_connections=None,
211
- single_connection_client=False,
212
- health_check_interval=0,
213
- client_name=None,
214
- lib_name="redis-py",
215
- lib_version=get_lib_version(),
216
- username=None,
217
- retry=None,
218
- redis_connect_func=None,
193
+ host: str = "localhost",
194
+ port: int = 6379,
195
+ db: int = 0,
196
+ password: Optional[str] = None,
197
+ socket_timeout: Optional[float] = None,
198
+ socket_connect_timeout: Optional[float] = None,
199
+ socket_keepalive: Optional[bool] = None,
200
+ socket_keepalive_options: Optional[Mapping[int, Union[int, bytes]]] = None,
201
+ connection_pool: Optional[ConnectionPool] = None,
202
+ unix_socket_path: Optional[str] = None,
203
+ encoding: str = "utf-8",
204
+ encoding_errors: str = "strict",
205
+ charset: Optional[str] = None,
206
+ errors: Optional[str] = None,
207
+ decode_responses: bool = False,
208
+ retry_on_timeout: bool = False,
209
+ retry_on_error: Optional[List[Type[Exception]]] = None,
210
+ ssl: bool = False,
211
+ ssl_keyfile: Optional[str] = None,
212
+ ssl_certfile: Optional[str] = None,
213
+ ssl_cert_reqs: str = "required",
214
+ ssl_ca_certs: Optional[str] = None,
215
+ ssl_ca_path: Optional[str] = None,
216
+ ssl_ca_data: Optional[str] = None,
217
+ ssl_check_hostname: bool = False,
218
+ ssl_password: Optional[str] = None,
219
+ ssl_validate_ocsp: bool = False,
220
+ ssl_validate_ocsp_stapled: bool = False,
221
+ ssl_ocsp_context: Optional["OpenSSL.SSL.Context"] = None,
222
+ ssl_ocsp_expected_cert: Optional[str] = None,
223
+ ssl_min_version: Optional["ssl.TLSVersion"] = None,
224
+ ssl_ciphers: Optional[str] = None,
225
+ max_connections: Optional[int] = None,
226
+ single_connection_client: bool = False,
227
+ health_check_interval: int = 0,
228
+ client_name: Optional[str] = None,
229
+ lib_name: Optional[str] = "redis-py",
230
+ lib_version: Optional[str] = get_lib_version(),
231
+ username: Optional[str] = None,
232
+ retry: Optional[Retry] = None,
233
+ redis_connect_func: Optional[Callable[[], None]] = None,
219
234
  credential_provider: Optional[CredentialProvider] = None,
220
235
  protocol: Optional[int] = 2,
221
236
  cache: Optional[CacheInterface] = None,
@@ -351,7 +366,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
351
366
  self.connection = None
352
367
  self._single_connection_client = single_connection_client
353
368
  if self._single_connection_client:
354
- self.connection = self.connection_pool.get_connection("_")
369
+ self.connection = self.connection_pool.get_connection()
355
370
  self._event_dispatcher.dispatch(
356
371
  AfterSingleConnectionInstantiationEvent(
357
372
  self.connection, ClientType.SYNC, self.single_connection_lock
@@ -458,6 +473,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
458
473
  blocking_timeout: Optional[float] = None,
459
474
  lock_class: Union[None, Any] = None,
460
475
  thread_local: bool = True,
476
+ raise_on_release_error: bool = True,
461
477
  ):
462
478
  """
463
479
  Return a new Lock object using key ``name`` that mimics
@@ -504,6 +520,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
504
520
  thread-1 would see the token value as "xyz" and would be
505
521
  able to successfully release the thread-2's lock.
506
522
 
523
+ ``raise_on_release_error`` indicates whether to raise an exception when
524
+ the lock is no longer owned when exiting the context manager. By default,
525
+ this is True, meaning an exception will be raised. If False, the warning
526
+ will be logged and the exception will be suppressed.
527
+
507
528
  In some use cases it's necessary to disable thread local storage. For
508
529
  example, if you have code where one thread acquires a lock and passes
509
530
  that lock instance to a worker thread to release later. If thread
@@ -521,6 +542,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
521
542
  blocking=blocking,
522
543
  blocking_timeout=blocking_timeout,
523
544
  thread_local=thread_local,
545
+ raise_on_release_error=raise_on_release_error,
524
546
  )
525
547
 
526
548
  def pubsub(self, **kwargs):
@@ -548,9 +570,12 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
548
570
  self.close()
549
571
 
550
572
  def __del__(self):
551
- self.close()
573
+ try:
574
+ self.close()
575
+ except Exception:
576
+ pass
552
577
 
553
- def close(self):
578
+ def close(self) -> None:
554
579
  # In case a connection property does not yet exist
555
580
  # (due to a crash earlier in the Redis() constructor), return
556
581
  # immediately as there is nothing to clean-up.
@@ -593,7 +618,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
593
618
  """Execute a command and return a parsed response"""
594
619
  pool = self.connection_pool
595
620
  command_name = args[0]
596
- conn = self.connection or pool.get_connection(command_name, **options)
621
+ conn = self.connection or pool.get_connection()
597
622
 
598
623
  if self._single_connection_client:
599
624
  self.single_connection_lock.acquire()
@@ -652,7 +677,7 @@ class Monitor:
652
677
 
653
678
  def __init__(self, connection_pool):
654
679
  self.connection_pool = connection_pool
655
- self.connection = self.connection_pool.get_connection("MONITOR")
680
+ self.connection = self.connection_pool.get_connection()
656
681
 
657
682
  def __enter__(self):
658
683
  self.connection.send_command("MONITOR")
@@ -825,9 +850,7 @@ class PubSub:
825
850
  # subscribed to one or more channels
826
851
 
827
852
  if self.connection is None:
828
- self.connection = self.connection_pool.get_connection(
829
- "pubsub", self.shard_hint
830
- )
853
+ self.connection = self.connection_pool.get_connection()
831
854
  # register a callback that re-subscribes to any channels we
832
855
  # were listening to when we were disconnected
833
856
  self.connection.register_connect_callback(self.on_connect)
@@ -937,7 +960,7 @@ class PubSub:
937
960
  "did you forget to call subscribe() or psubscribe()?"
938
961
  )
939
962
 
940
- if conn.health_check_interval and time.time() > conn.next_health_check:
963
+ if conn.health_check_interval and time.monotonic() > conn.next_health_check:
941
964
  conn.send_command("PING", self.HEALTH_CHECK_MESSAGE, check_health=False)
942
965
  self.health_check_response_counter += 1
943
966
 
@@ -1087,12 +1110,12 @@ class PubSub:
1087
1110
  """
1088
1111
  if not self.subscribed:
1089
1112
  # Wait for subscription
1090
- start_time = time.time()
1113
+ start_time = time.monotonic()
1091
1114
  if self.subscribed_event.wait(timeout) is True:
1092
1115
  # The connection was subscribed during the timeout time frame.
1093
1116
  # The timeout should be adjusted based on the time spent
1094
1117
  # waiting for the subscription
1095
- time_spent = time.time() - start_time
1118
+ time_spent = time.monotonic() - start_time
1096
1119
  timeout = max(0.0, timeout - time_spent)
1097
1120
  else:
1098
1121
  # The connection isn't subscribed to any channels or patterns,
@@ -1382,7 +1405,7 @@ class Pipeline(Redis):
1382
1405
  conn = self.connection
1383
1406
  # if this is the first call, we need a connection
1384
1407
  if not conn:
1385
- conn = self.connection_pool.get_connection(command_name, self.shard_hint)
1408
+ conn = self.connection_pool.get_connection()
1386
1409
  self.connection = conn
1387
1410
 
1388
1411
  return conn.retry.call_with_retry(
@@ -1501,8 +1524,7 @@ class Pipeline(Redis):
1501
1524
  def annotate_exception(self, exception, number, command):
1502
1525
  cmd = " ".join(map(safe_str, command))
1503
1526
  msg = (
1504
- f"Command # {number} ({cmd}) of pipeline "
1505
- f"caused error: {exception.args[0]}"
1527
+ f"Command # {number} ({cmd}) of pipeline caused error: {exception.args[0]}"
1506
1528
  )
1507
1529
  exception.args = (msg,) + exception.args[1:]
1508
1530
 
@@ -1551,11 +1573,10 @@ class Pipeline(Redis):
1551
1573
  conn.retry_on_error is None
1552
1574
  or isinstance(error, tuple(conn.retry_on_error)) is False
1553
1575
  ):
1554
-
1555
1576
  self.reset()
1556
1577
  raise error
1557
1578
 
1558
- def execute(self, raise_on_error=True):
1579
+ def execute(self, raise_on_error: bool = True) -> List[Any]:
1559
1580
  """Execute all the commands in the current pipeline"""
1560
1581
  stack = self.command_stack
1561
1582
  if not stack and not self.watching:
@@ -1569,7 +1590,7 @@ class Pipeline(Redis):
1569
1590
 
1570
1591
  conn = self.connection
1571
1592
  if not conn:
1572
- conn = self.connection_pool.get_connection("MULTI", self.shard_hint)
1593
+ conn = self.connection_pool.get_connection()
1573
1594
  # assign to self.connection so reset() releases the connection
1574
1595
  # back to the pool after we're done
1575
1596
  self.connection = conn