redis 5.3.0b4__py3-none-any.whl → 6.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/__init__.py +2 -11
- redis/_parsers/base.py +14 -2
- redis/_parsers/resp3.py +2 -2
- redis/asyncio/client.py +103 -83
- redis/asyncio/cluster.py +147 -102
- redis/asyncio/connection.py +77 -24
- redis/asyncio/lock.py +26 -5
- redis/asyncio/retry.py +12 -0
- redis/asyncio/sentinel.py +11 -1
- redis/asyncio/utils.py +1 -1
- redis/auth/token.py +6 -2
- redis/backoff.py +15 -0
- redis/client.py +160 -138
- redis/cluster.py +211 -82
- redis/commands/cluster.py +1 -11
- redis/commands/core.py +219 -207
- redis/commands/helpers.py +19 -76
- redis/commands/json/__init__.py +1 -1
- redis/commands/redismodules.py +5 -17
- redis/commands/search/aggregation.py +3 -1
- redis/commands/search/commands.py +43 -16
- redis/commands/search/dialect.py +3 -0
- redis/commands/search/profile_information.py +14 -0
- redis/commands/search/query.py +5 -1
- redis/commands/timeseries/__init__.py +1 -1
- redis/commands/vectorset/__init__.py +46 -0
- redis/commands/vectorset/commands.py +367 -0
- redis/commands/vectorset/utils.py +94 -0
- redis/connection.py +89 -33
- redis/exceptions.py +4 -1
- redis/lock.py +24 -4
- redis/ocsp.py +2 -1
- redis/retry.py +12 -0
- redis/sentinel.py +3 -1
- redis/typing.py +1 -1
- redis/utils.py +114 -1
- {redis-5.3.0b4.dist-info → redis-6.0.0.dist-info}/METADATA +57 -23
- redis-6.0.0.dist-info/RECORD +78 -0
- {redis-5.3.0b4.dist-info → redis-6.0.0.dist-info}/WHEEL +1 -2
- redis/commands/graph/__init__.py +0 -263
- redis/commands/graph/commands.py +0 -313
- redis/commands/graph/edge.py +0 -91
- redis/commands/graph/exceptions.py +0 -3
- redis/commands/graph/execution_plan.py +0 -211
- redis/commands/graph/node.py +0 -88
- redis/commands/graph/path.py +0 -78
- redis/commands/graph/query_result.py +0 -588
- redis-5.3.0b4.dist-info/RECORD +0 -82
- redis-5.3.0b4.dist-info/top_level.txt +0 -1
- /redis/commands/search/{indexDefinition.py → index_definition.py} +0 -0
- {redis-5.3.0b4.dist-info → redis-6.0.0.dist-info/licenses}/LICENSE +0 -0
redis/connection.py
CHANGED
|
@@ -1,15 +1,14 @@
|
|
|
1
1
|
import copy
|
|
2
2
|
import os
|
|
3
3
|
import socket
|
|
4
|
-
import ssl
|
|
5
4
|
import sys
|
|
6
5
|
import threading
|
|
6
|
+
import time
|
|
7
7
|
import weakref
|
|
8
8
|
from abc import abstractmethod
|
|
9
9
|
from itertools import chain
|
|
10
10
|
from queue import Empty, Full, LifoQueue
|
|
11
|
-
from
|
|
12
|
-
from typing import Any, Callable, Dict, List, Optional, Type, Union
|
|
11
|
+
from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union
|
|
13
12
|
from urllib.parse import parse_qs, unquote, urlparse
|
|
14
13
|
|
|
15
14
|
from redis.cache import (
|
|
@@ -42,12 +41,18 @@ from .utils import (
|
|
|
42
41
|
HIREDIS_AVAILABLE,
|
|
43
42
|
SSL_AVAILABLE,
|
|
44
43
|
compare_versions,
|
|
44
|
+
deprecated_args,
|
|
45
45
|
ensure_string,
|
|
46
46
|
format_error_message,
|
|
47
47
|
get_lib_version,
|
|
48
48
|
str_if_bytes,
|
|
49
49
|
)
|
|
50
50
|
|
|
51
|
+
if SSL_AVAILABLE:
|
|
52
|
+
import ssl
|
|
53
|
+
else:
|
|
54
|
+
ssl = None
|
|
55
|
+
|
|
51
56
|
if HIREDIS_AVAILABLE:
|
|
52
57
|
import hiredis
|
|
53
58
|
|
|
@@ -371,6 +376,9 @@ class AbstractConnection(ConnectionInterface):
|
|
|
371
376
|
|
|
372
377
|
def connect(self):
|
|
373
378
|
"Connects to the Redis server if not already connected"
|
|
379
|
+
self.connect_check_health(check_health=True)
|
|
380
|
+
|
|
381
|
+
def connect_check_health(self, check_health: bool = True):
|
|
374
382
|
if self._sock:
|
|
375
383
|
return
|
|
376
384
|
try:
|
|
@@ -386,7 +394,7 @@ class AbstractConnection(ConnectionInterface):
|
|
|
386
394
|
try:
|
|
387
395
|
if self.redis_connect_func is None:
|
|
388
396
|
# Use the default on_connect function
|
|
389
|
-
self.
|
|
397
|
+
self.on_connect_check_health(check_health=check_health)
|
|
390
398
|
else:
|
|
391
399
|
# Use the passed function redis_connect_func
|
|
392
400
|
self.redis_connect_func(self)
|
|
@@ -416,6 +424,9 @@ class AbstractConnection(ConnectionInterface):
|
|
|
416
424
|
return format_error_message(self._host_error(), exception)
|
|
417
425
|
|
|
418
426
|
def on_connect(self):
|
|
427
|
+
self.on_connect_check_health(check_health=True)
|
|
428
|
+
|
|
429
|
+
def on_connect_check_health(self, check_health: bool = True):
|
|
419
430
|
"Initialize the connection, authenticate and select a database"
|
|
420
431
|
self._parser.on_connect(self)
|
|
421
432
|
parser = self._parser
|
|
@@ -439,7 +450,11 @@ class AbstractConnection(ConnectionInterface):
|
|
|
439
450
|
self._parser.on_connect(self)
|
|
440
451
|
if len(auth_args) == 1:
|
|
441
452
|
auth_args = ["default", auth_args[0]]
|
|
442
|
-
|
|
453
|
+
# avoid checking health here -- PING will fail if we try
|
|
454
|
+
# to check the health prior to the AUTH
|
|
455
|
+
self.send_command(
|
|
456
|
+
"HELLO", self.protocol, "AUTH", *auth_args, check_health=False
|
|
457
|
+
)
|
|
443
458
|
self.handshake_metadata = self.read_response()
|
|
444
459
|
# if response.get(b"proto") != self.protocol and response.get(
|
|
445
460
|
# "proto"
|
|
@@ -470,7 +485,7 @@ class AbstractConnection(ConnectionInterface):
|
|
|
470
485
|
# update cluster exception classes
|
|
471
486
|
self._parser.EXCEPTION_CLASSES = parser.EXCEPTION_CLASSES
|
|
472
487
|
self._parser.on_connect(self)
|
|
473
|
-
self.send_command("HELLO", self.protocol)
|
|
488
|
+
self.send_command("HELLO", self.protocol, check_health=check_health)
|
|
474
489
|
self.handshake_metadata = self.read_response()
|
|
475
490
|
if (
|
|
476
491
|
self.handshake_metadata.get(b"proto") != self.protocol
|
|
@@ -480,28 +495,45 @@ class AbstractConnection(ConnectionInterface):
|
|
|
480
495
|
|
|
481
496
|
# if a client_name is given, set it
|
|
482
497
|
if self.client_name:
|
|
483
|
-
self.send_command(
|
|
498
|
+
self.send_command(
|
|
499
|
+
"CLIENT",
|
|
500
|
+
"SETNAME",
|
|
501
|
+
self.client_name,
|
|
502
|
+
check_health=check_health,
|
|
503
|
+
)
|
|
484
504
|
if str_if_bytes(self.read_response()) != "OK":
|
|
485
505
|
raise ConnectionError("Error setting client name")
|
|
486
506
|
|
|
487
507
|
try:
|
|
488
508
|
# set the library name and version
|
|
489
509
|
if self.lib_name:
|
|
490
|
-
self.send_command(
|
|
510
|
+
self.send_command(
|
|
511
|
+
"CLIENT",
|
|
512
|
+
"SETINFO",
|
|
513
|
+
"LIB-NAME",
|
|
514
|
+
self.lib_name,
|
|
515
|
+
check_health=check_health,
|
|
516
|
+
)
|
|
491
517
|
self.read_response()
|
|
492
518
|
except ResponseError:
|
|
493
519
|
pass
|
|
494
520
|
|
|
495
521
|
try:
|
|
496
522
|
if self.lib_version:
|
|
497
|
-
self.send_command(
|
|
523
|
+
self.send_command(
|
|
524
|
+
"CLIENT",
|
|
525
|
+
"SETINFO",
|
|
526
|
+
"LIB-VER",
|
|
527
|
+
self.lib_version,
|
|
528
|
+
check_health=check_health,
|
|
529
|
+
)
|
|
498
530
|
self.read_response()
|
|
499
531
|
except ResponseError:
|
|
500
532
|
pass
|
|
501
533
|
|
|
502
534
|
# if a database is specified, switch to it
|
|
503
535
|
if self.db:
|
|
504
|
-
self.send_command("SELECT", self.db)
|
|
536
|
+
self.send_command("SELECT", self.db, check_health=check_health)
|
|
505
537
|
if str_if_bytes(self.read_response()) != "OK":
|
|
506
538
|
raise ConnectionError("Invalid Database")
|
|
507
539
|
|
|
@@ -537,13 +569,13 @@ class AbstractConnection(ConnectionInterface):
|
|
|
537
569
|
|
|
538
570
|
def check_health(self):
|
|
539
571
|
"""Check the health of the connection with a PING/PONG"""
|
|
540
|
-
if self.health_check_interval and time() > self.next_health_check:
|
|
572
|
+
if self.health_check_interval and time.monotonic() > self.next_health_check:
|
|
541
573
|
self.retry.call_with_retry(self._send_ping, self._ping_failed)
|
|
542
574
|
|
|
543
575
|
def send_packed_command(self, command, check_health=True):
|
|
544
576
|
"""Send an already packed command to the Redis server"""
|
|
545
577
|
if not self._sock:
|
|
546
|
-
self.
|
|
578
|
+
self.connect_check_health(check_health=False)
|
|
547
579
|
# guard against health check recursion
|
|
548
580
|
if check_health:
|
|
549
581
|
self.check_health()
|
|
@@ -617,9 +649,7 @@ class AbstractConnection(ConnectionInterface):
|
|
|
617
649
|
except OSError as e:
|
|
618
650
|
if disconnect_on_error:
|
|
619
651
|
self.disconnect()
|
|
620
|
-
raise ConnectionError(
|
|
621
|
-
f"Error while reading from {host_error}" f" : {e.args}"
|
|
622
|
-
)
|
|
652
|
+
raise ConnectionError(f"Error while reading from {host_error} : {e.args}")
|
|
623
653
|
except BaseException:
|
|
624
654
|
# Also by default close in case of BaseException. A lot of code
|
|
625
655
|
# relies on this behaviour when doing Command/Response pairs.
|
|
@@ -629,7 +659,7 @@ class AbstractConnection(ConnectionInterface):
|
|
|
629
659
|
raise
|
|
630
660
|
|
|
631
661
|
if self.health_check_interval:
|
|
632
|
-
self.next_health_check = time() + self.health_check_interval
|
|
662
|
+
self.next_health_check = time.monotonic() + self.health_check_interval
|
|
633
663
|
|
|
634
664
|
if isinstance(response, ResponseError):
|
|
635
665
|
try:
|
|
@@ -672,7 +702,7 @@ class AbstractConnection(ConnectionInterface):
|
|
|
672
702
|
output.append(SYM_EMPTY.join(pieces))
|
|
673
703
|
return output
|
|
674
704
|
|
|
675
|
-
def get_protocol(self) -> int
|
|
705
|
+
def get_protocol(self) -> Union[int, str]:
|
|
676
706
|
return self.protocol
|
|
677
707
|
|
|
678
708
|
@property
|
|
@@ -757,6 +787,10 @@ class Connection(AbstractConnection):
|
|
|
757
787
|
except OSError as _:
|
|
758
788
|
err = _
|
|
759
789
|
if sock is not None:
|
|
790
|
+
try:
|
|
791
|
+
sock.shutdown(socket.SHUT_RDWR) # ensure a clean close
|
|
792
|
+
except OSError:
|
|
793
|
+
pass
|
|
760
794
|
sock.close()
|
|
761
795
|
|
|
762
796
|
if err is not None:
|
|
@@ -904,9 +938,11 @@ class CacheProxyConnection(ConnectionInterface):
|
|
|
904
938
|
and self._cache.get(self._current_command_cache_key).status
|
|
905
939
|
!= CacheEntryStatus.IN_PROGRESS
|
|
906
940
|
):
|
|
907
|
-
|
|
941
|
+
res = copy.deepcopy(
|
|
908
942
|
self._cache.get(self._current_command_cache_key).cache_value
|
|
909
943
|
)
|
|
944
|
+
self._current_command_cache_key = None
|
|
945
|
+
return res
|
|
910
946
|
|
|
911
947
|
response = self._conn.read_response(
|
|
912
948
|
disable_decoding=disable_decoding,
|
|
@@ -932,6 +968,8 @@ class CacheProxyConnection(ConnectionInterface):
|
|
|
932
968
|
cache_entry.cache_value = response
|
|
933
969
|
self._cache.set(cache_entry)
|
|
934
970
|
|
|
971
|
+
self._current_command_cache_key = None
|
|
972
|
+
|
|
935
973
|
return response
|
|
936
974
|
|
|
937
975
|
def pack_command(self, *args):
|
|
@@ -990,7 +1028,7 @@ class SSLConnection(Connection):
|
|
|
990
1028
|
ssl_cert_reqs="required",
|
|
991
1029
|
ssl_ca_certs=None,
|
|
992
1030
|
ssl_ca_data=None,
|
|
993
|
-
ssl_check_hostname=
|
|
1031
|
+
ssl_check_hostname=True,
|
|
994
1032
|
ssl_ca_path=None,
|
|
995
1033
|
ssl_password=None,
|
|
996
1034
|
ssl_validate_ocsp=False,
|
|
@@ -1006,7 +1044,7 @@ class SSLConnection(Connection):
|
|
|
1006
1044
|
Args:
|
|
1007
1045
|
ssl_keyfile: Path to an ssl private key. Defaults to None.
|
|
1008
1046
|
ssl_certfile: Path to an ssl certificate. Defaults to None.
|
|
1009
|
-
ssl_cert_reqs: The string value for the SSLContext.verify_mode (none, optional, required). Defaults to "required".
|
|
1047
|
+
ssl_cert_reqs: The string value for the SSLContext.verify_mode (none, optional, required), or an ssl.VerifyMode. Defaults to "required".
|
|
1010
1048
|
ssl_ca_certs: The path to a file of concatenated CA certificates in PEM format. Defaults to None.
|
|
1011
1049
|
ssl_ca_data: Either an ASCII string of one or more PEM-encoded certificates or a bytes-like object of DER-encoded certificates.
|
|
1012
1050
|
ssl_check_hostname: If set, match the hostname during the SSL handshake. Defaults to False.
|
|
@@ -1031,7 +1069,7 @@ class SSLConnection(Connection):
|
|
|
1031
1069
|
if ssl_cert_reqs is None:
|
|
1032
1070
|
ssl_cert_reqs = ssl.CERT_NONE
|
|
1033
1071
|
elif isinstance(ssl_cert_reqs, str):
|
|
1034
|
-
CERT_REQS = {
|
|
1072
|
+
CERT_REQS = { # noqa: N806
|
|
1035
1073
|
"none": ssl.CERT_NONE,
|
|
1036
1074
|
"optional": ssl.CERT_OPTIONAL,
|
|
1037
1075
|
"required": ssl.CERT_REQUIRED,
|
|
@@ -1168,6 +1206,10 @@ class UnixDomainSocketConnection(AbstractConnection):
|
|
|
1168
1206
|
sock.connect(self.path)
|
|
1169
1207
|
except OSError:
|
|
1170
1208
|
# Prevent ResourceWarnings for unclosed sockets.
|
|
1209
|
+
try:
|
|
1210
|
+
sock.shutdown(socket.SHUT_RDWR) # ensure a clean close
|
|
1211
|
+
except OSError:
|
|
1212
|
+
pass
|
|
1171
1213
|
sock.close()
|
|
1172
1214
|
raise
|
|
1173
1215
|
sock.settimeout(self.socket_timeout)
|
|
@@ -1259,6 +1301,9 @@ def parse_url(url):
|
|
|
1259
1301
|
return kwargs
|
|
1260
1302
|
|
|
1261
1303
|
|
|
1304
|
+
_CP = TypeVar("_CP", bound="ConnectionPool")
|
|
1305
|
+
|
|
1306
|
+
|
|
1262
1307
|
class ConnectionPool:
|
|
1263
1308
|
"""
|
|
1264
1309
|
Create a connection pool. ``If max_connections`` is set, then this
|
|
@@ -1274,7 +1319,7 @@ class ConnectionPool:
|
|
|
1274
1319
|
"""
|
|
1275
1320
|
|
|
1276
1321
|
@classmethod
|
|
1277
|
-
def from_url(cls, url, **kwargs):
|
|
1322
|
+
def from_url(cls: Type[_CP], url: str, **kwargs) -> _CP:
|
|
1278
1323
|
"""
|
|
1279
1324
|
Return a connection pool configured from the given URL.
|
|
1280
1325
|
|
|
@@ -1374,6 +1419,7 @@ class ConnectionPool:
|
|
|
1374
1419
|
# will notice the first thread already did the work and simply
|
|
1375
1420
|
# release the lock.
|
|
1376
1421
|
self._fork_lock = threading.Lock()
|
|
1422
|
+
self._lock = threading.Lock()
|
|
1377
1423
|
self.reset()
|
|
1378
1424
|
|
|
1379
1425
|
def __repr__(self) -> (str, str):
|
|
@@ -1391,7 +1437,6 @@ class ConnectionPool:
|
|
|
1391
1437
|
return self.connection_kwargs.get("protocol", None)
|
|
1392
1438
|
|
|
1393
1439
|
def reset(self) -> None:
|
|
1394
|
-
self._lock = threading.Lock()
|
|
1395
1440
|
self._created_connections = 0
|
|
1396
1441
|
self._available_connections = []
|
|
1397
1442
|
self._in_use_connections = set()
|
|
@@ -1454,8 +1499,14 @@ class ConnectionPool:
|
|
|
1454
1499
|
finally:
|
|
1455
1500
|
self._fork_lock.release()
|
|
1456
1501
|
|
|
1457
|
-
|
|
1502
|
+
@deprecated_args(
|
|
1503
|
+
args_to_warn=["*"],
|
|
1504
|
+
reason="Use get_connection() without args instead",
|
|
1505
|
+
version="5.3.0",
|
|
1506
|
+
)
|
|
1507
|
+
def get_connection(self, command_name=None, *keys, **options) -> "Connection":
|
|
1458
1508
|
"Get a connection from the pool"
|
|
1509
|
+
|
|
1459
1510
|
self._checkpid()
|
|
1460
1511
|
with self._lock:
|
|
1461
1512
|
try:
|
|
@@ -1474,7 +1525,7 @@ class ConnectionPool:
|
|
|
1474
1525
|
try:
|
|
1475
1526
|
if connection.can_read() and self.cache is None:
|
|
1476
1527
|
raise ConnectionError("Connection has data")
|
|
1477
|
-
except (ConnectionError, OSError):
|
|
1528
|
+
except (ConnectionError, TimeoutError, OSError):
|
|
1478
1529
|
connection.disconnect()
|
|
1479
1530
|
connection.connect()
|
|
1480
1531
|
if connection.can_read():
|
|
@@ -1518,7 +1569,7 @@ class ConnectionPool:
|
|
|
1518
1569
|
except KeyError:
|
|
1519
1570
|
# Gracefully fail when a connection is returned to this pool
|
|
1520
1571
|
# that the pool doesn't actually own
|
|
1521
|
-
|
|
1572
|
+
return
|
|
1522
1573
|
|
|
1523
1574
|
if self.owns_connection(connection):
|
|
1524
1575
|
self._available_connections.append(connection)
|
|
@@ -1526,10 +1577,10 @@ class ConnectionPool:
|
|
|
1526
1577
|
AfterConnectionReleasedEvent(connection)
|
|
1527
1578
|
)
|
|
1528
1579
|
else:
|
|
1529
|
-
#
|
|
1530
|
-
# to the pool
|
|
1531
|
-
#
|
|
1532
|
-
|
|
1580
|
+
# Pool doesn't own this connection, do not add it back
|
|
1581
|
+
# to the pool.
|
|
1582
|
+
# The created connections count should not be changed,
|
|
1583
|
+
# because the connection was not created by the pool.
|
|
1533
1584
|
connection.disconnect()
|
|
1534
1585
|
return
|
|
1535
1586
|
|
|
@@ -1560,7 +1611,7 @@ class ConnectionPool:
|
|
|
1560
1611
|
"""Close the pool, disconnecting all connections"""
|
|
1561
1612
|
self.disconnect()
|
|
1562
1613
|
|
|
1563
|
-
def set_retry(self, retry:
|
|
1614
|
+
def set_retry(self, retry: Retry) -> None:
|
|
1564
1615
|
self.connection_kwargs.update({"retry": retry})
|
|
1565
1616
|
for conn in self._available_connections:
|
|
1566
1617
|
conn.retry = retry
|
|
@@ -1676,7 +1727,12 @@ class BlockingConnectionPool(ConnectionPool):
|
|
|
1676
1727
|
self._connections.append(connection)
|
|
1677
1728
|
return connection
|
|
1678
1729
|
|
|
1679
|
-
|
|
1730
|
+
@deprecated_args(
|
|
1731
|
+
args_to_warn=["*"],
|
|
1732
|
+
reason="Use get_connection() without args instead",
|
|
1733
|
+
version="5.3.0",
|
|
1734
|
+
)
|
|
1735
|
+
def get_connection(self, command_name=None, *keys, **options):
|
|
1680
1736
|
"""
|
|
1681
1737
|
Get a connection, blocking for ``self.timeout`` until a connection
|
|
1682
1738
|
is available from the pool.
|
|
@@ -1716,7 +1772,7 @@ class BlockingConnectionPool(ConnectionPool):
|
|
|
1716
1772
|
try:
|
|
1717
1773
|
if connection.can_read():
|
|
1718
1774
|
raise ConnectionError("Connection has data")
|
|
1719
|
-
except (ConnectionError, OSError):
|
|
1775
|
+
except (ConnectionError, TimeoutError, OSError):
|
|
1720
1776
|
connection.disconnect()
|
|
1721
1777
|
connection.connect()
|
|
1722
1778
|
if connection.can_read():
|
redis/exceptions.py
CHANGED
|
@@ -79,6 +79,7 @@ class ModuleError(ResponseError):
|
|
|
79
79
|
|
|
80
80
|
class LockError(RedisError, ValueError):
|
|
81
81
|
"Errors acquiring or releasing a lock"
|
|
82
|
+
|
|
82
83
|
# NOTE: For backwards compatibility, this class derives from ValueError.
|
|
83
84
|
# This was originally chosen to behave like threading.Lock.
|
|
84
85
|
|
|
@@ -88,12 +89,14 @@ class LockError(RedisError, ValueError):
|
|
|
88
89
|
|
|
89
90
|
|
|
90
91
|
class LockNotOwnedError(LockError):
|
|
91
|
-
"Error trying to extend or release a lock that is (
|
|
92
|
+
"Error trying to extend or release a lock that is not owned (anymore)"
|
|
93
|
+
|
|
92
94
|
pass
|
|
93
95
|
|
|
94
96
|
|
|
95
97
|
class ChildDeadlockedError(Exception):
|
|
96
98
|
"Error indicating that a child process is deadlocked after a fork()"
|
|
99
|
+
|
|
97
100
|
pass
|
|
98
101
|
|
|
99
102
|
|
redis/lock.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
import threading
|
|
2
3
|
import time as mod_time
|
|
3
4
|
import uuid
|
|
@@ -7,6 +8,8 @@ from typing import Optional, Type
|
|
|
7
8
|
from redis.exceptions import LockError, LockNotOwnedError
|
|
8
9
|
from redis.typing import Number
|
|
9
10
|
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
10
13
|
|
|
11
14
|
class Lock:
|
|
12
15
|
"""
|
|
@@ -82,6 +85,7 @@ class Lock:
|
|
|
82
85
|
blocking: bool = True,
|
|
83
86
|
blocking_timeout: Optional[Number] = None,
|
|
84
87
|
thread_local: bool = True,
|
|
88
|
+
raise_on_release_error: bool = True,
|
|
85
89
|
):
|
|
86
90
|
"""
|
|
87
91
|
Create a new Lock instance named ``name`` using the Redis client
|
|
@@ -125,6 +129,11 @@ class Lock:
|
|
|
125
129
|
thread-1 would see the token value as "xyz" and would be
|
|
126
130
|
able to successfully release the thread-2's lock.
|
|
127
131
|
|
|
132
|
+
``raise_on_release_error`` indicates whether to raise an exception when
|
|
133
|
+
the lock is no longer owned when exiting the context manager. By default,
|
|
134
|
+
this is True, meaning an exception will be raised. If False, the warning
|
|
135
|
+
will be logged and the exception will be suppressed.
|
|
136
|
+
|
|
128
137
|
In some use cases it's necessary to disable thread local storage. For
|
|
129
138
|
example, if you have code where one thread acquires a lock and passes
|
|
130
139
|
that lock instance to a worker thread to release later. If thread
|
|
@@ -140,6 +149,7 @@ class Lock:
|
|
|
140
149
|
self.blocking = blocking
|
|
141
150
|
self.blocking_timeout = blocking_timeout
|
|
142
151
|
self.thread_local = bool(thread_local)
|
|
152
|
+
self.raise_on_release_error = raise_on_release_error
|
|
143
153
|
self.local = threading.local() if self.thread_local else SimpleNamespace()
|
|
144
154
|
self.local.token = None
|
|
145
155
|
self.register_scripts()
|
|
@@ -168,7 +178,14 @@ class Lock:
|
|
|
168
178
|
exc_value: Optional[BaseException],
|
|
169
179
|
traceback: Optional[TracebackType],
|
|
170
180
|
) -> None:
|
|
171
|
-
|
|
181
|
+
try:
|
|
182
|
+
self.release()
|
|
183
|
+
except LockError:
|
|
184
|
+
if self.raise_on_release_error:
|
|
185
|
+
raise
|
|
186
|
+
logger.warning(
|
|
187
|
+
"Lock was unlocked or no longer owned when exiting context manager."
|
|
188
|
+
)
|
|
172
189
|
|
|
173
190
|
def acquire(
|
|
174
191
|
self,
|
|
@@ -251,7 +268,10 @@ class Lock:
|
|
|
251
268
|
"""
|
|
252
269
|
expected_token = self.local.token
|
|
253
270
|
if expected_token is None:
|
|
254
|
-
raise LockError(
|
|
271
|
+
raise LockError(
|
|
272
|
+
"Cannot release a lock that's not owned or is already unlocked.",
|
|
273
|
+
lock_name=self.name,
|
|
274
|
+
)
|
|
255
275
|
self.local.token = None
|
|
256
276
|
self.do_release(expected_token)
|
|
257
277
|
|
|
@@ -264,7 +284,7 @@ class Lock:
|
|
|
264
284
|
lock_name=self.name,
|
|
265
285
|
)
|
|
266
286
|
|
|
267
|
-
def extend(self, additional_time:
|
|
287
|
+
def extend(self, additional_time: Number, replace_ttl: bool = False) -> bool:
|
|
268
288
|
"""
|
|
269
289
|
Adds more time to an already acquired lock.
|
|
270
290
|
|
|
@@ -281,7 +301,7 @@ class Lock:
|
|
|
281
301
|
raise LockError("Cannot extend a lock with no timeout", lock_name=self.name)
|
|
282
302
|
return self.do_extend(additional_time, replace_ttl)
|
|
283
303
|
|
|
284
|
-
def do_extend(self, additional_time:
|
|
304
|
+
def do_extend(self, additional_time: Number, replace_ttl: bool) -> bool:
|
|
285
305
|
additional_time = int(additional_time * 1000)
|
|
286
306
|
if not bool(
|
|
287
307
|
self.lua_extend(
|
redis/ocsp.py
CHANGED
|
@@ -15,6 +15,7 @@ from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
|
|
|
15
15
|
from cryptography.hazmat.primitives.hashes import SHA1, Hash
|
|
16
16
|
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
|
|
17
17
|
from cryptography.x509 import ocsp
|
|
18
|
+
|
|
18
19
|
from redis.exceptions import AuthorizationError, ConnectionError
|
|
19
20
|
|
|
20
21
|
|
|
@@ -56,7 +57,7 @@ def _check_certificate(issuer_cert, ocsp_bytes, validate=True):
|
|
|
56
57
|
if ocsp_response.response_status == ocsp.OCSPResponseStatus.SUCCESSFUL:
|
|
57
58
|
if ocsp_response.certificate_status != ocsp.OCSPCertStatus.GOOD:
|
|
58
59
|
raise ConnectionError(
|
|
59
|
-
f
|
|
60
|
+
f"Received an {str(ocsp_response.certificate_status).split('.')[1]} "
|
|
60
61
|
"ocsp certificate status"
|
|
61
62
|
)
|
|
62
63
|
else:
|
redis/retry.py
CHANGED
|
@@ -44,6 +44,18 @@ class Retry:
|
|
|
44
44
|
set(self._supported_errors + tuple(specified_errors))
|
|
45
45
|
)
|
|
46
46
|
|
|
47
|
+
def get_retries(self) -> int:
|
|
48
|
+
"""
|
|
49
|
+
Get the number of retries.
|
|
50
|
+
"""
|
|
51
|
+
return self._retries
|
|
52
|
+
|
|
53
|
+
def update_retries(self, value: int) -> None:
|
|
54
|
+
"""
|
|
55
|
+
Set the number of retries.
|
|
56
|
+
"""
|
|
57
|
+
self._retries = value
|
|
58
|
+
|
|
47
59
|
def call_with_retry(
|
|
48
60
|
self,
|
|
49
61
|
do: Callable[[], T],
|
redis/sentinel.py
CHANGED
|
@@ -273,7 +273,7 @@ class Sentinel(SentinelCommands):
|
|
|
273
273
|
)
|
|
274
274
|
return (
|
|
275
275
|
f"<{type(self).__module__}.{type(self).__name__}"
|
|
276
|
-
f
|
|
276
|
+
f"(sentinels=[{','.join(sentinel_addresses)}])>"
|
|
277
277
|
)
|
|
278
278
|
|
|
279
279
|
def check_master_state(self, state, service_name):
|
|
@@ -349,6 +349,8 @@ class Sentinel(SentinelCommands):
|
|
|
349
349
|
):
|
|
350
350
|
"""
|
|
351
351
|
Returns a redis client instance for the ``service_name`` master.
|
|
352
|
+
Sentinel client will detect failover and reconnect Redis clients
|
|
353
|
+
automatically.
|
|
352
354
|
|
|
353
355
|
A :py:class:`~redis.sentinel.SentinelConnectionPool` class is
|
|
354
356
|
used to retrieve the master's address before establishing a new
|
redis/typing.py
CHANGED
redis/utils.py
CHANGED
|
@@ -1,7 +1,12 @@
|
|
|
1
|
+
import datetime
|
|
1
2
|
import logging
|
|
3
|
+
import textwrap
|
|
2
4
|
from contextlib import contextmanager
|
|
3
5
|
from functools import wraps
|
|
4
|
-
from typing import Any, Dict, Mapping, Union
|
|
6
|
+
from typing import Any, Dict, List, Mapping, Optional, Union
|
|
7
|
+
|
|
8
|
+
from redis.exceptions import DataError
|
|
9
|
+
from redis.typing import AbsExpiryT, EncodableT, ExpiryT
|
|
5
10
|
|
|
6
11
|
try:
|
|
7
12
|
import hiredis # noqa
|
|
@@ -122,6 +127,71 @@ def deprecated_function(reason="", version="", name=None):
|
|
|
122
127
|
return decorator
|
|
123
128
|
|
|
124
129
|
|
|
130
|
+
def warn_deprecated_arg_usage(
|
|
131
|
+
arg_name: Union[list, str],
|
|
132
|
+
function_name: str,
|
|
133
|
+
reason: str = "",
|
|
134
|
+
version: str = "",
|
|
135
|
+
stacklevel: int = 2,
|
|
136
|
+
):
|
|
137
|
+
import warnings
|
|
138
|
+
|
|
139
|
+
msg = (
|
|
140
|
+
f"Call to '{function_name}' function with deprecated"
|
|
141
|
+
f" usage of input argument/s '{arg_name}'."
|
|
142
|
+
)
|
|
143
|
+
if reason:
|
|
144
|
+
msg += f" ({reason})"
|
|
145
|
+
if version:
|
|
146
|
+
msg += f" -- Deprecated since version {version}."
|
|
147
|
+
warnings.warn(msg, category=DeprecationWarning, stacklevel=stacklevel)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def deprecated_args(
|
|
151
|
+
args_to_warn: list = ["*"],
|
|
152
|
+
allowed_args: list = [],
|
|
153
|
+
reason: str = "",
|
|
154
|
+
version: str = "",
|
|
155
|
+
):
|
|
156
|
+
"""
|
|
157
|
+
Decorator to mark specified args of a function as deprecated.
|
|
158
|
+
If '*' is in args_to_warn, all arguments will be marked as deprecated.
|
|
159
|
+
"""
|
|
160
|
+
|
|
161
|
+
def decorator(func):
|
|
162
|
+
@wraps(func)
|
|
163
|
+
def wrapper(*args, **kwargs):
|
|
164
|
+
# Get function argument names
|
|
165
|
+
arg_names = func.__code__.co_varnames[: func.__code__.co_argcount]
|
|
166
|
+
|
|
167
|
+
provided_args = dict(zip(arg_names, args))
|
|
168
|
+
provided_args.update(kwargs)
|
|
169
|
+
|
|
170
|
+
provided_args.pop("self", None)
|
|
171
|
+
for allowed_arg in allowed_args:
|
|
172
|
+
provided_args.pop(allowed_arg, None)
|
|
173
|
+
|
|
174
|
+
for arg in args_to_warn:
|
|
175
|
+
if arg == "*" and len(provided_args) > 0:
|
|
176
|
+
warn_deprecated_arg_usage(
|
|
177
|
+
list(provided_args.keys()),
|
|
178
|
+
func.__name__,
|
|
179
|
+
reason,
|
|
180
|
+
version,
|
|
181
|
+
stacklevel=3,
|
|
182
|
+
)
|
|
183
|
+
elif arg in provided_args:
|
|
184
|
+
warn_deprecated_arg_usage(
|
|
185
|
+
arg, func.__name__, reason, version, stacklevel=3
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
return func(*args, **kwargs)
|
|
189
|
+
|
|
190
|
+
return wrapper
|
|
191
|
+
|
|
192
|
+
return decorator
|
|
193
|
+
|
|
194
|
+
|
|
125
195
|
def _set_info_logger():
|
|
126
196
|
"""
|
|
127
197
|
Set up a logger that log info logs to stdout.
|
|
@@ -192,3 +262,46 @@ def ensure_string(key):
|
|
|
192
262
|
return key
|
|
193
263
|
else:
|
|
194
264
|
raise TypeError("Key must be either a string or bytes")
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def extract_expire_flags(
|
|
268
|
+
ex: Optional[ExpiryT] = None,
|
|
269
|
+
px: Optional[ExpiryT] = None,
|
|
270
|
+
exat: Optional[AbsExpiryT] = None,
|
|
271
|
+
pxat: Optional[AbsExpiryT] = None,
|
|
272
|
+
) -> List[EncodableT]:
|
|
273
|
+
exp_options: list[EncodableT] = []
|
|
274
|
+
if ex is not None:
|
|
275
|
+
exp_options.append("EX")
|
|
276
|
+
if isinstance(ex, datetime.timedelta):
|
|
277
|
+
exp_options.append(int(ex.total_seconds()))
|
|
278
|
+
elif isinstance(ex, int):
|
|
279
|
+
exp_options.append(ex)
|
|
280
|
+
elif isinstance(ex, str) and ex.isdigit():
|
|
281
|
+
exp_options.append(int(ex))
|
|
282
|
+
else:
|
|
283
|
+
raise DataError("ex must be datetime.timedelta or int")
|
|
284
|
+
elif px is not None:
|
|
285
|
+
exp_options.append("PX")
|
|
286
|
+
if isinstance(px, datetime.timedelta):
|
|
287
|
+
exp_options.append(int(px.total_seconds() * 1000))
|
|
288
|
+
elif isinstance(px, int):
|
|
289
|
+
exp_options.append(px)
|
|
290
|
+
else:
|
|
291
|
+
raise DataError("px must be datetime.timedelta or int")
|
|
292
|
+
elif exat is not None:
|
|
293
|
+
if isinstance(exat, datetime.datetime):
|
|
294
|
+
exat = int(exat.timestamp())
|
|
295
|
+
exp_options.extend(["EXAT", exat])
|
|
296
|
+
elif pxat is not None:
|
|
297
|
+
if isinstance(pxat, datetime.datetime):
|
|
298
|
+
pxat = int(pxat.timestamp() * 1000)
|
|
299
|
+
exp_options.extend(["PXAT", pxat])
|
|
300
|
+
|
|
301
|
+
return exp_options
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def truncate_text(txt, max_length=100):
|
|
305
|
+
return textwrap.shorten(
|
|
306
|
+
text=txt, width=max_length, placeholder="...", break_long_words=True
|
|
307
|
+
)
|