redis 7.0.0b2__py3-none-any.whl → 7.0.0b3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis/__init__.py +1 -1
- redis/asyncio/client.py +14 -5
- redis/asyncio/cluster.py +5 -1
- redis/asyncio/connection.py +18 -0
- redis/asyncio/http/__init__.py +0 -0
- redis/asyncio/http/http_client.py +265 -0
- redis/asyncio/multidb/__init__.py +0 -0
- redis/asyncio/multidb/client.py +528 -0
- redis/asyncio/multidb/command_executor.py +339 -0
- redis/asyncio/multidb/config.py +210 -0
- redis/asyncio/multidb/database.py +69 -0
- redis/asyncio/multidb/event.py +84 -0
- redis/asyncio/multidb/failover.py +125 -0
- redis/asyncio/multidb/failure_detector.py +38 -0
- redis/asyncio/multidb/healthcheck.py +292 -0
- redis/background.py +204 -0
- redis/client.py +22 -3
- redis/cluster.py +3 -1
- redis/commands/core.py +10 -3
- redis/data_structure.py +81 -0
- redis/event.py +84 -10
- redis/http/__init__.py +0 -0
- redis/http/http_client.py +425 -0
- redis/multidb/__init__.py +0 -0
- redis/multidb/circuit.py +144 -0
- redis/multidb/client.py +524 -0
- redis/multidb/command_executor.py +350 -0
- redis/multidb/config.py +207 -0
- redis/multidb/database.py +130 -0
- redis/multidb/event.py +89 -0
- redis/multidb/exception.py +17 -0
- redis/multidb/failover.py +125 -0
- redis/multidb/failure_detector.py +104 -0
- redis/multidb/healthcheck.py +289 -0
- redis/retry.py +14 -1
- redis/utils.py +14 -0
- {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/METADATA +3 -1
- {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/RECORD +40 -14
- {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/WHEEL +0 -0
- {redis-7.0.0b2.dist-info → redis-7.0.0b3.dist-info}/licenses/LICENSE +0 -0
redis/client.py
CHANGED
|
@@ -1186,7 +1186,10 @@ class PubSub:
|
|
|
1186
1186
|
|
|
1187
1187
|
def ping(self, message: Union[str, None] = None) -> bool:
|
|
1188
1188
|
"""
|
|
1189
|
-
Ping the Redis server
|
|
1189
|
+
Ping the Redis server to test connectivity.
|
|
1190
|
+
|
|
1191
|
+
Sends a PING command to the Redis server and returns True if the server
|
|
1192
|
+
responds with "PONG".
|
|
1190
1193
|
"""
|
|
1191
1194
|
args = ["PING", message] if message is not None else ["PING"]
|
|
1192
1195
|
return self.execute_command(*args)
|
|
@@ -1271,6 +1274,8 @@ class PubSub:
|
|
|
1271
1274
|
sleep_time: float = 0.0,
|
|
1272
1275
|
daemon: bool = False,
|
|
1273
1276
|
exception_handler: Optional[Callable] = None,
|
|
1277
|
+
pubsub=None,
|
|
1278
|
+
sharded_pubsub: bool = False,
|
|
1274
1279
|
) -> "PubSubWorkerThread":
|
|
1275
1280
|
for channel, handler in self.channels.items():
|
|
1276
1281
|
if handler is None:
|
|
@@ -1284,8 +1289,13 @@ class PubSub:
|
|
|
1284
1289
|
f"Shard Channel: '{s_channel}' has no handler registered"
|
|
1285
1290
|
)
|
|
1286
1291
|
|
|
1292
|
+
pubsub = self if pubsub is None else pubsub
|
|
1287
1293
|
thread = PubSubWorkerThread(
|
|
1288
|
-
|
|
1294
|
+
pubsub,
|
|
1295
|
+
sleep_time,
|
|
1296
|
+
daemon=daemon,
|
|
1297
|
+
exception_handler=exception_handler,
|
|
1298
|
+
sharded_pubsub=sharded_pubsub,
|
|
1289
1299
|
)
|
|
1290
1300
|
thread.start()
|
|
1291
1301
|
return thread
|
|
@@ -1300,12 +1310,14 @@ class PubSubWorkerThread(threading.Thread):
|
|
|
1300
1310
|
exception_handler: Union[
|
|
1301
1311
|
Callable[[Exception, "PubSub", "PubSubWorkerThread"], None], None
|
|
1302
1312
|
] = None,
|
|
1313
|
+
sharded_pubsub: bool = False,
|
|
1303
1314
|
):
|
|
1304
1315
|
super().__init__()
|
|
1305
1316
|
self.daemon = daemon
|
|
1306
1317
|
self.pubsub = pubsub
|
|
1307
1318
|
self.sleep_time = sleep_time
|
|
1308
1319
|
self.exception_handler = exception_handler
|
|
1320
|
+
self.sharded_pubsub = sharded_pubsub
|
|
1309
1321
|
self._running = threading.Event()
|
|
1310
1322
|
|
|
1311
1323
|
def run(self) -> None:
|
|
@@ -1316,7 +1328,14 @@ class PubSubWorkerThread(threading.Thread):
|
|
|
1316
1328
|
sleep_time = self.sleep_time
|
|
1317
1329
|
while self._running.is_set():
|
|
1318
1330
|
try:
|
|
1319
|
-
|
|
1331
|
+
if not self.sharded_pubsub:
|
|
1332
|
+
pubsub.get_message(
|
|
1333
|
+
ignore_subscribe_messages=True, timeout=sleep_time
|
|
1334
|
+
)
|
|
1335
|
+
else:
|
|
1336
|
+
pubsub.get_sharded_message(
|
|
1337
|
+
ignore_subscribe_messages=True, timeout=sleep_time
|
|
1338
|
+
)
|
|
1320
1339
|
except BaseException as e:
|
|
1321
1340
|
if self.exception_handler is None:
|
|
1322
1341
|
raise
|
redis/cluster.py
CHANGED
|
@@ -695,6 +695,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
|
|
|
695
695
|
self._event_dispatcher = EventDispatcher()
|
|
696
696
|
else:
|
|
697
697
|
self._event_dispatcher = event_dispatcher
|
|
698
|
+
self.startup_nodes = startup_nodes
|
|
698
699
|
self.nodes_manager = NodesManager(
|
|
699
700
|
startup_nodes=startup_nodes,
|
|
700
701
|
from_url=from_url,
|
|
@@ -3164,7 +3165,8 @@ class TransactionStrategy(AbstractStrategy):
|
|
|
3164
3165
|
self._nodes_manager.initialize()
|
|
3165
3166
|
self.reinitialize_counter = 0
|
|
3166
3167
|
else:
|
|
3167
|
-
|
|
3168
|
+
if isinstance(error, AskError):
|
|
3169
|
+
self._nodes_manager.update_moved_exception(error)
|
|
3168
3170
|
|
|
3169
3171
|
self._executing = False
|
|
3170
3172
|
|
redis/commands/core.py
CHANGED
|
@@ -1210,11 +1210,18 @@ class ManagementCommands(CommandsProtocol):
|
|
|
1210
1210
|
"""
|
|
1211
1211
|
return self.execute_command("LATENCY RESET", *events)
|
|
1212
1212
|
|
|
1213
|
-
def ping(self, **kwargs) ->
|
|
1213
|
+
def ping(self, **kwargs) -> Union[Awaitable[bool], bool]:
|
|
1214
1214
|
"""
|
|
1215
|
-
Ping the Redis server
|
|
1215
|
+
Ping the Redis server to test connectivity.
|
|
1216
1216
|
|
|
1217
|
-
|
|
1217
|
+
Sends a PING command to the Redis server and returns True if the server
|
|
1218
|
+
responds with "PONG".
|
|
1219
|
+
|
|
1220
|
+
This command is useful for:
|
|
1221
|
+
- Testing whether a connection is still alive
|
|
1222
|
+
- Verifying the server's ability to serve data
|
|
1223
|
+
|
|
1224
|
+
For more information on the underlying ping command see https://redis.io/commands/ping
|
|
1218
1225
|
"""
|
|
1219
1226
|
return self.execute_command("PING", **kwargs)
|
|
1220
1227
|
|
redis/data_structure.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
from typing import Any, Generic, List, TypeVar
|
|
3
|
+
|
|
4
|
+
from redis.typing import Number
|
|
5
|
+
|
|
6
|
+
T = TypeVar("T")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class WeightedList(Generic[T]):
|
|
10
|
+
"""
|
|
11
|
+
Thread-safe weighted list.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
self._items: List[tuple[Any, Number]] = []
|
|
16
|
+
self._lock = threading.RLock()
|
|
17
|
+
|
|
18
|
+
def add(self, item: Any, weight: float) -> None:
|
|
19
|
+
"""Add item with weight, maintaining sorted order"""
|
|
20
|
+
with self._lock:
|
|
21
|
+
# Find insertion point using binary search
|
|
22
|
+
left, right = 0, len(self._items)
|
|
23
|
+
while left < right:
|
|
24
|
+
mid = (left + right) // 2
|
|
25
|
+
if self._items[mid][1] < weight:
|
|
26
|
+
right = mid
|
|
27
|
+
else:
|
|
28
|
+
left = mid + 1
|
|
29
|
+
|
|
30
|
+
self._items.insert(left, (item, weight))
|
|
31
|
+
|
|
32
|
+
def remove(self, item):
|
|
33
|
+
"""Remove first occurrence of item"""
|
|
34
|
+
with self._lock:
|
|
35
|
+
for i, (stored_item, weight) in enumerate(self._items):
|
|
36
|
+
if stored_item == item:
|
|
37
|
+
self._items.pop(i)
|
|
38
|
+
return weight
|
|
39
|
+
raise ValueError("Item not found")
|
|
40
|
+
|
|
41
|
+
def get_by_weight_range(
|
|
42
|
+
self, min_weight: float, max_weight: float
|
|
43
|
+
) -> List[tuple[Any, Number]]:
|
|
44
|
+
"""Get all items within weight range"""
|
|
45
|
+
with self._lock:
|
|
46
|
+
result = []
|
|
47
|
+
for item, weight in self._items:
|
|
48
|
+
if min_weight <= weight <= max_weight:
|
|
49
|
+
result.append((item, weight))
|
|
50
|
+
return result
|
|
51
|
+
|
|
52
|
+
def get_top_n(self, n: int) -> List[tuple[Any, Number]]:
|
|
53
|
+
"""Get top N the highest weighted items"""
|
|
54
|
+
with self._lock:
|
|
55
|
+
return [(item, weight) for item, weight in self._items[:n]]
|
|
56
|
+
|
|
57
|
+
def update_weight(self, item, new_weight: float):
|
|
58
|
+
with self._lock:
|
|
59
|
+
"""Update weight of an item"""
|
|
60
|
+
old_weight = self.remove(item)
|
|
61
|
+
self.add(item, new_weight)
|
|
62
|
+
return old_weight
|
|
63
|
+
|
|
64
|
+
def __iter__(self):
|
|
65
|
+
"""Iterate in descending weight order"""
|
|
66
|
+
with self._lock:
|
|
67
|
+
items_copy = (
|
|
68
|
+
self._items.copy()
|
|
69
|
+
) # Create snapshot as lock released after each 'yield'
|
|
70
|
+
|
|
71
|
+
for item, weight in items_copy:
|
|
72
|
+
yield item, weight
|
|
73
|
+
|
|
74
|
+
def __len__(self):
|
|
75
|
+
with self._lock:
|
|
76
|
+
return len(self._items)
|
|
77
|
+
|
|
78
|
+
def __getitem__(self, index) -> tuple[Any, Number]:
|
|
79
|
+
with self._lock:
|
|
80
|
+
item, weight = self._items[index]
|
|
81
|
+
return item, weight
|
redis/event.py
CHANGED
|
@@ -2,7 +2,7 @@ import asyncio
|
|
|
2
2
|
import threading
|
|
3
3
|
from abc import ABC, abstractmethod
|
|
4
4
|
from enum import Enum
|
|
5
|
-
from typing import List, Optional, Union
|
|
5
|
+
from typing import Dict, List, Optional, Type, Union
|
|
6
6
|
|
|
7
7
|
from redis.auth.token import TokenInterface
|
|
8
8
|
from redis.credentials import CredentialProvider, StreamingCredentialProvider
|
|
@@ -42,6 +42,17 @@ class EventDispatcherInterface(ABC):
|
|
|
42
42
|
async def dispatch_async(self, event: object):
|
|
43
43
|
pass
|
|
44
44
|
|
|
45
|
+
@abstractmethod
|
|
46
|
+
def register_listeners(
|
|
47
|
+
self,
|
|
48
|
+
mappings: Dict[
|
|
49
|
+
Type[object],
|
|
50
|
+
List[Union[EventListenerInterface, AsyncEventListenerInterface]],
|
|
51
|
+
],
|
|
52
|
+
):
|
|
53
|
+
"""Register additional listeners."""
|
|
54
|
+
pass
|
|
55
|
+
|
|
45
56
|
|
|
46
57
|
class EventException(Exception):
|
|
47
58
|
"""
|
|
@@ -56,11 +67,18 @@ class EventException(Exception):
|
|
|
56
67
|
|
|
57
68
|
class EventDispatcher(EventDispatcherInterface):
|
|
58
69
|
# TODO: Make dispatcher to accept external mappings.
|
|
59
|
-
def __init__(
|
|
70
|
+
def __init__(
|
|
71
|
+
self,
|
|
72
|
+
event_listeners: Optional[
|
|
73
|
+
Dict[Type[object], List[EventListenerInterface]]
|
|
74
|
+
] = None,
|
|
75
|
+
):
|
|
60
76
|
"""
|
|
61
|
-
|
|
77
|
+
Dispatcher that dispatches events to listeners associated with given event.
|
|
62
78
|
"""
|
|
63
|
-
self._event_listeners_mapping
|
|
79
|
+
self._event_listeners_mapping: Dict[
|
|
80
|
+
Type[object], List[EventListenerInterface]
|
|
81
|
+
] = {
|
|
64
82
|
AfterConnectionReleasedEvent: [
|
|
65
83
|
ReAuthConnectionListener(),
|
|
66
84
|
],
|
|
@@ -77,17 +95,47 @@ class EventDispatcher(EventDispatcherInterface):
|
|
|
77
95
|
],
|
|
78
96
|
}
|
|
79
97
|
|
|
98
|
+
self._lock = threading.Lock()
|
|
99
|
+
self._async_lock = None
|
|
100
|
+
|
|
101
|
+
if event_listeners:
|
|
102
|
+
self.register_listeners(event_listeners)
|
|
103
|
+
|
|
80
104
|
def dispatch(self, event: object):
|
|
81
|
-
|
|
105
|
+
with self._lock:
|
|
106
|
+
listeners = self._event_listeners_mapping.get(type(event), [])
|
|
82
107
|
|
|
83
|
-
|
|
84
|
-
|
|
108
|
+
for listener in listeners:
|
|
109
|
+
listener.listen(event)
|
|
85
110
|
|
|
86
111
|
async def dispatch_async(self, event: object):
|
|
87
|
-
|
|
112
|
+
if self._async_lock is None:
|
|
113
|
+
self._async_lock = asyncio.Lock()
|
|
114
|
+
|
|
115
|
+
async with self._async_lock:
|
|
116
|
+
listeners = self._event_listeners_mapping.get(type(event), [])
|
|
88
117
|
|
|
89
|
-
|
|
90
|
-
|
|
118
|
+
for listener in listeners:
|
|
119
|
+
await listener.listen(event)
|
|
120
|
+
|
|
121
|
+
def register_listeners(
|
|
122
|
+
self,
|
|
123
|
+
mappings: Dict[
|
|
124
|
+
Type[object],
|
|
125
|
+
List[Union[EventListenerInterface, AsyncEventListenerInterface]],
|
|
126
|
+
],
|
|
127
|
+
):
|
|
128
|
+
with self._lock:
|
|
129
|
+
for event_type in mappings:
|
|
130
|
+
if event_type in self._event_listeners_mapping:
|
|
131
|
+
self._event_listeners_mapping[event_type] = list(
|
|
132
|
+
set(
|
|
133
|
+
self._event_listeners_mapping[event_type]
|
|
134
|
+
+ mappings[event_type]
|
|
135
|
+
)
|
|
136
|
+
)
|
|
137
|
+
else:
|
|
138
|
+
self._event_listeners_mapping[event_type] = mappings[event_type]
|
|
91
139
|
|
|
92
140
|
|
|
93
141
|
class AfterConnectionReleasedEvent:
|
|
@@ -226,6 +274,32 @@ class AfterAsyncClusterInstantiationEvent:
|
|
|
226
274
|
return self._credential_provider
|
|
227
275
|
|
|
228
276
|
|
|
277
|
+
class OnCommandsFailEvent:
|
|
278
|
+
"""
|
|
279
|
+
Event fired whenever a command fails during the execution.
|
|
280
|
+
"""
|
|
281
|
+
|
|
282
|
+
def __init__(
|
|
283
|
+
self,
|
|
284
|
+
commands: tuple,
|
|
285
|
+
exception: Exception,
|
|
286
|
+
):
|
|
287
|
+
self._commands = commands
|
|
288
|
+
self._exception = exception
|
|
289
|
+
|
|
290
|
+
@property
|
|
291
|
+
def commands(self) -> tuple:
|
|
292
|
+
return self._commands
|
|
293
|
+
|
|
294
|
+
@property
|
|
295
|
+
def exception(self) -> Exception:
|
|
296
|
+
return self._exception
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
class AsyncOnCommandsFailEvent(OnCommandsFailEvent):
|
|
300
|
+
pass
|
|
301
|
+
|
|
302
|
+
|
|
229
303
|
class ReAuthConnectionListener(EventListenerInterface):
|
|
230
304
|
"""
|
|
231
305
|
Listener that performs re-authentication of given connection.
|
redis/http/__init__.py
ADDED
|
File without changes
|