coredis 5.5.0__cp313-cp313-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- 22fe76227e35f92ab5c3__mypyc.cpython-313-darwin.so +0 -0
- coredis/__init__.py +42 -0
- coredis/_enum.py +42 -0
- coredis/_json.py +11 -0
- coredis/_packer.cpython-313-darwin.so +0 -0
- coredis/_packer.py +71 -0
- coredis/_protocols.py +50 -0
- coredis/_py_311_typing.py +20 -0
- coredis/_py_312_typing.py +17 -0
- coredis/_sidecar.py +114 -0
- coredis/_utils.cpython-313-darwin.so +0 -0
- coredis/_utils.py +440 -0
- coredis/_version.py +34 -0
- coredis/_version.pyi +1 -0
- coredis/cache.py +801 -0
- coredis/client/__init__.py +6 -0
- coredis/client/basic.py +1240 -0
- coredis/client/cluster.py +1265 -0
- coredis/commands/__init__.py +64 -0
- coredis/commands/_key_spec.py +517 -0
- coredis/commands/_utils.py +108 -0
- coredis/commands/_validators.py +159 -0
- coredis/commands/_wrappers.py +175 -0
- coredis/commands/bitfield.py +110 -0
- coredis/commands/constants.py +662 -0
- coredis/commands/core.py +8484 -0
- coredis/commands/function.py +408 -0
- coredis/commands/monitor.py +168 -0
- coredis/commands/pubsub.py +905 -0
- coredis/commands/request.py +108 -0
- coredis/commands/script.py +296 -0
- coredis/commands/sentinel.py +246 -0
- coredis/config.py +50 -0
- coredis/connection.py +906 -0
- coredis/constants.cpython-313-darwin.so +0 -0
- coredis/constants.py +37 -0
- coredis/credentials.py +45 -0
- coredis/exceptions.py +360 -0
- coredis/experimental/__init__.py +1 -0
- coredis/globals.py +23 -0
- coredis/modules/__init__.py +121 -0
- coredis/modules/autocomplete.py +138 -0
- coredis/modules/base.py +262 -0
- coredis/modules/filters.py +1319 -0
- coredis/modules/graph.py +362 -0
- coredis/modules/json.py +691 -0
- coredis/modules/response/__init__.py +0 -0
- coredis/modules/response/_callbacks/__init__.py +0 -0
- coredis/modules/response/_callbacks/autocomplete.py +42 -0
- coredis/modules/response/_callbacks/graph.py +237 -0
- coredis/modules/response/_callbacks/json.py +21 -0
- coredis/modules/response/_callbacks/search.py +221 -0
- coredis/modules/response/_callbacks/timeseries.py +158 -0
- coredis/modules/response/types.py +179 -0
- coredis/modules/search.py +1089 -0
- coredis/modules/timeseries.py +1139 -0
- coredis/parser.cpython-313-darwin.so +0 -0
- coredis/parser.py +344 -0
- coredis/pipeline.py +1225 -0
- coredis/pool/__init__.py +11 -0
- coredis/pool/basic.py +453 -0
- coredis/pool/cluster.py +517 -0
- coredis/pool/nodemanager.py +340 -0
- coredis/py.typed +0 -0
- coredis/recipes/__init__.py +0 -0
- coredis/recipes/credentials/__init__.py +5 -0
- coredis/recipes/credentials/iam_provider.py +63 -0
- coredis/recipes/locks/__init__.py +5 -0
- coredis/recipes/locks/extend.lua +17 -0
- coredis/recipes/locks/lua_lock.py +281 -0
- coredis/recipes/locks/release.lua +10 -0
- coredis/response/__init__.py +5 -0
- coredis/response/_callbacks/__init__.py +538 -0
- coredis/response/_callbacks/acl.py +32 -0
- coredis/response/_callbacks/cluster.py +183 -0
- coredis/response/_callbacks/command.py +86 -0
- coredis/response/_callbacks/connection.py +31 -0
- coredis/response/_callbacks/geo.py +58 -0
- coredis/response/_callbacks/hash.py +85 -0
- coredis/response/_callbacks/keys.py +59 -0
- coredis/response/_callbacks/module.py +33 -0
- coredis/response/_callbacks/script.py +85 -0
- coredis/response/_callbacks/sentinel.py +179 -0
- coredis/response/_callbacks/server.py +241 -0
- coredis/response/_callbacks/sets.py +44 -0
- coredis/response/_callbacks/sorted_set.py +204 -0
- coredis/response/_callbacks/streams.py +185 -0
- coredis/response/_callbacks/strings.py +70 -0
- coredis/response/_callbacks/vector_sets.py +159 -0
- coredis/response/_utils.py +33 -0
- coredis/response/types.py +416 -0
- coredis/retry.py +233 -0
- coredis/sentinel.py +477 -0
- coredis/stream.py +369 -0
- coredis/tokens.py +2286 -0
- coredis/typing.py +593 -0
- coredis-5.5.0.dist-info/METADATA +211 -0
- coredis-5.5.0.dist-info/RECORD +100 -0
- coredis-5.5.0.dist-info/WHEEL +6 -0
- coredis-5.5.0.dist-info/licenses/LICENSE +23 -0
|
@@ -0,0 +1,1265 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import contextlib
|
|
5
|
+
import contextvars
|
|
6
|
+
import functools
|
|
7
|
+
import inspect
|
|
8
|
+
import random
|
|
9
|
+
import textwrap
|
|
10
|
+
from abc import ABCMeta
|
|
11
|
+
from ssl import SSLContext
|
|
12
|
+
from typing import TYPE_CHECKING, Any, cast, overload
|
|
13
|
+
|
|
14
|
+
from deprecated.sphinx import versionadded
|
|
15
|
+
|
|
16
|
+
from coredis._utils import b, hash_slot
|
|
17
|
+
from coredis.cache import AbstractCache
|
|
18
|
+
from coredis.client.basic import Client, Redis
|
|
19
|
+
from coredis.commands._key_spec import KeySpec
|
|
20
|
+
from coredis.commands.constants import CommandName, NodeFlag
|
|
21
|
+
from coredis.commands.pubsub import ClusterPubSub, ShardedPubSub, SubscriptionCallback
|
|
22
|
+
from coredis.connection import RedisSSLContext
|
|
23
|
+
from coredis.exceptions import (
|
|
24
|
+
AskError,
|
|
25
|
+
BusyLoadingError,
|
|
26
|
+
ClusterDownError,
|
|
27
|
+
ClusterError,
|
|
28
|
+
ConnectionError,
|
|
29
|
+
MovedError,
|
|
30
|
+
RedisClusterException,
|
|
31
|
+
TimeoutError,
|
|
32
|
+
TryAgainError,
|
|
33
|
+
WatchError,
|
|
34
|
+
)
|
|
35
|
+
from coredis.globals import CACHEABLE_COMMANDS, MODULE_GROUPS, READONLY_COMMANDS
|
|
36
|
+
from coredis.pool import ClusterConnectionPool
|
|
37
|
+
from coredis.pool.nodemanager import ManagedNode
|
|
38
|
+
from coredis.response._callbacks import AsyncPreProcessingCallback, NoopCallback
|
|
39
|
+
from coredis.retry import CompositeRetryPolicy, ConstantRetryPolicy, RetryPolicy
|
|
40
|
+
from coredis.typing import (
|
|
41
|
+
AnyStr,
|
|
42
|
+
AsyncIterator,
|
|
43
|
+
Awaitable,
|
|
44
|
+
Callable,
|
|
45
|
+
Coroutine,
|
|
46
|
+
ExecutionParameters,
|
|
47
|
+
Iterable,
|
|
48
|
+
Iterator,
|
|
49
|
+
Literal,
|
|
50
|
+
Mapping,
|
|
51
|
+
Node,
|
|
52
|
+
Parameters,
|
|
53
|
+
ParamSpec,
|
|
54
|
+
RedisCommand,
|
|
55
|
+
RedisCommandP,
|
|
56
|
+
RedisValueT,
|
|
57
|
+
ResponseType,
|
|
58
|
+
StringT,
|
|
59
|
+
TypeAdapter,
|
|
60
|
+
TypeVar,
|
|
61
|
+
Unpack,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
P = ParamSpec("P")
|
|
65
|
+
R = TypeVar("R")
|
|
66
|
+
|
|
67
|
+
if TYPE_CHECKING:
|
|
68
|
+
import coredis.pipeline
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class ClusterMeta(ABCMeta):
|
|
72
|
+
ROUTING_FLAGS: dict[bytes, NodeFlag]
|
|
73
|
+
SPLIT_FLAGS: dict[bytes, NodeFlag]
|
|
74
|
+
RESULT_CALLBACKS: dict[bytes, Callable[..., ResponseType]]
|
|
75
|
+
NODE_FLAG_DOC_MAPPING = {
|
|
76
|
+
NodeFlag.PRIMARIES: "all primaries",
|
|
77
|
+
NodeFlag.REPLICAS: "all replicas",
|
|
78
|
+
NodeFlag.RANDOM: "a random node",
|
|
79
|
+
NodeFlag.ALL: "all nodes",
|
|
80
|
+
NodeFlag.SLOT_ID: "one or more nodes based on the slots provided",
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
def __new__(
|
|
84
|
+
cls, name: str, bases: tuple[type, ...], namespace: dict[str, object]
|
|
85
|
+
) -> ClusterMeta:
|
|
86
|
+
kls = super().__new__(cls, name, bases, namespace)
|
|
87
|
+
methods = dict(k for k in inspect.getmembers(kls) if inspect.isfunction(k[1]))
|
|
88
|
+
for module in MODULE_GROUPS:
|
|
89
|
+
methods.update(
|
|
90
|
+
{
|
|
91
|
+
f"{module.MODULE}.{k[0]}": k[1]
|
|
92
|
+
for k in inspect.getmembers(module)
|
|
93
|
+
if inspect.isfunction(k[1])
|
|
94
|
+
}
|
|
95
|
+
)
|
|
96
|
+
for method_name, method in methods.items():
|
|
97
|
+
doc_addition = ""
|
|
98
|
+
cmd = getattr(method, "__coredis_command", None)
|
|
99
|
+
if cmd:
|
|
100
|
+
if not cmd.cluster.enabled:
|
|
101
|
+
doc_addition = """
|
|
102
|
+
.. warning:: Not supported in cluster mode
|
|
103
|
+
"""
|
|
104
|
+
else:
|
|
105
|
+
if cmd.cluster.route:
|
|
106
|
+
kls.ROUTING_FLAGS[cmd.command] = cmd.cluster.route
|
|
107
|
+
aggregate_note = ""
|
|
108
|
+
if cmd.cluster.multi_node:
|
|
109
|
+
if cmd.cluster.combine:
|
|
110
|
+
aggregate_note = f"and return {cmd.cluster.combine.response_policy}"
|
|
111
|
+
else:
|
|
112
|
+
aggregate_note = (
|
|
113
|
+
"and a mapping of nodes to results will be returned"
|
|
114
|
+
)
|
|
115
|
+
doc_addition = f"""
|
|
116
|
+
.. admonition:: Cluster note
|
|
117
|
+
|
|
118
|
+
The command will be run on **{cls.NODE_FLAG_DOC_MAPPING[cmd.cluster.route]}** {aggregate_note}
|
|
119
|
+
"""
|
|
120
|
+
elif cmd.cluster.split and cmd.cluster.combine:
|
|
121
|
+
kls.SPLIT_FLAGS[cmd.command] = cmd.cluster.split
|
|
122
|
+
doc_addition = f"""
|
|
123
|
+
.. admonition:: Cluster note
|
|
124
|
+
|
|
125
|
+
The command will be run on **{cls.NODE_FLAG_DOC_MAPPING[cmd.cluster.split]}**
|
|
126
|
+
by distributing the keys to the appropriate nodes and return
|
|
127
|
+
{cmd.cluster.combine.response_policy}.
|
|
128
|
+
|
|
129
|
+
To disable this behavior set :paramref:`RedisCluster.non_atomic_cross_slot` to ``False``
|
|
130
|
+
"""
|
|
131
|
+
if cmd.cluster.multi_node:
|
|
132
|
+
kls.RESULT_CALLBACKS[cmd.command] = cmd.cluster.combine
|
|
133
|
+
if doc_addition and not hasattr(method, "__cluster_docs"):
|
|
134
|
+
if not getattr(method, "__coredis_module", None):
|
|
135
|
+
|
|
136
|
+
def __w(
|
|
137
|
+
func: Callable[P, Awaitable[R]], enabled: bool
|
|
138
|
+
) -> Callable[P, Awaitable[R]]:
|
|
139
|
+
@functools.wraps(func)
|
|
140
|
+
async def _w(*a: P.args, **k: P.kwargs) -> R:
|
|
141
|
+
if not enabled:
|
|
142
|
+
raise NotImplementedError(
|
|
143
|
+
f"{func.__name__} is disabled for cluster client"
|
|
144
|
+
)
|
|
145
|
+
return await func(*a, **k)
|
|
146
|
+
|
|
147
|
+
_w.__doc__ = f"""{textwrap.dedent(method.__doc__ or "")}
|
|
148
|
+
{doc_addition}
|
|
149
|
+
"""
|
|
150
|
+
return _w
|
|
151
|
+
|
|
152
|
+
wrapped = __w(method, cmd.cluster.enabled if cmd else True)
|
|
153
|
+
setattr(wrapped, "__cluster_docs", doc_addition)
|
|
154
|
+
setattr(kls, method_name, wrapped)
|
|
155
|
+
else:
|
|
156
|
+
method.__doc__ = f"""{textwrap.dedent(method.__doc__ or "")}
|
|
157
|
+
{doc_addition}
|
|
158
|
+
"""
|
|
159
|
+
setattr(method, "__cluster_docs", doc_addition)
|
|
160
|
+
return kls
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
RedisClusterT = TypeVar("RedisClusterT", bound="RedisCluster[Any]")
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
class RedisCluster(
|
|
167
|
+
Client[AnyStr],
|
|
168
|
+
metaclass=ClusterMeta,
|
|
169
|
+
):
|
|
170
|
+
MAX_RETRIES = 16
|
|
171
|
+
ROUTING_FLAGS: dict[bytes, NodeFlag] = {}
|
|
172
|
+
SPLIT_FLAGS: dict[bytes, NodeFlag] = {}
|
|
173
|
+
RESULT_CALLBACKS: dict[bytes, Callable[..., Any]] = {}
|
|
174
|
+
|
|
175
|
+
connection_pool: ClusterConnectionPool
|
|
176
|
+
|
|
177
|
+
@overload
|
|
178
|
+
def __init__(
|
|
179
|
+
self: RedisCluster[bytes],
|
|
180
|
+
host: str | None = ...,
|
|
181
|
+
port: int | None = ...,
|
|
182
|
+
*,
|
|
183
|
+
startup_nodes: Iterable[Node] | None = ...,
|
|
184
|
+
stream_timeout: float | None = ...,
|
|
185
|
+
connect_timeout: float | None = ...,
|
|
186
|
+
ssl: bool = ...,
|
|
187
|
+
ssl_context: SSLContext | None = ...,
|
|
188
|
+
ssl_keyfile: str | None = ...,
|
|
189
|
+
ssl_certfile: str | None = ...,
|
|
190
|
+
ssl_cert_reqs: Literal["optional", "required", "none"] | None = ...,
|
|
191
|
+
ssl_check_hostname: bool | None = ...,
|
|
192
|
+
ssl_ca_certs: str | None = ...,
|
|
193
|
+
max_connections: int = ...,
|
|
194
|
+
max_connections_per_node: bool = ...,
|
|
195
|
+
readonly: bool = ...,
|
|
196
|
+
read_from_replicas: bool = ...,
|
|
197
|
+
reinitialize_steps: int | None = ...,
|
|
198
|
+
skip_full_coverage_check: bool = ...,
|
|
199
|
+
nodemanager_follow_cluster: bool = ...,
|
|
200
|
+
encoding: str = ...,
|
|
201
|
+
decode_responses: Literal[False] = ...,
|
|
202
|
+
connection_pool: ClusterConnectionPool | None = ...,
|
|
203
|
+
connection_pool_cls: type[ClusterConnectionPool] = ...,
|
|
204
|
+
protocol_version: Literal[2, 3] = ...,
|
|
205
|
+
verify_version: bool = ...,
|
|
206
|
+
non_atomic_cross_slot: bool = ...,
|
|
207
|
+
cache: AbstractCache | None = ...,
|
|
208
|
+
noreply: bool = ...,
|
|
209
|
+
noevict: bool = ...,
|
|
210
|
+
notouch: bool = ...,
|
|
211
|
+
retry_policy: RetryPolicy = ...,
|
|
212
|
+
type_adapter: TypeAdapter | None = ...,
|
|
213
|
+
**kwargs: Any,
|
|
214
|
+
) -> None: ...
|
|
215
|
+
|
|
216
|
+
@overload
|
|
217
|
+
def __init__(
|
|
218
|
+
self: RedisCluster[str],
|
|
219
|
+
host: str | None = ...,
|
|
220
|
+
port: int | None = ...,
|
|
221
|
+
*,
|
|
222
|
+
startup_nodes: Iterable[Node] | None = ...,
|
|
223
|
+
stream_timeout: float | None = ...,
|
|
224
|
+
connect_timeout: float | None = ...,
|
|
225
|
+
ssl: bool = ...,
|
|
226
|
+
ssl_context: SSLContext | None = ...,
|
|
227
|
+
ssl_keyfile: str | None = ...,
|
|
228
|
+
ssl_certfile: str | None = ...,
|
|
229
|
+
ssl_cert_reqs: Literal["optional", "required", "none"] | None = ...,
|
|
230
|
+
ssl_check_hostname: bool | None = ...,
|
|
231
|
+
ssl_ca_certs: str | None = ...,
|
|
232
|
+
max_connections: int = ...,
|
|
233
|
+
max_connections_per_node: bool = ...,
|
|
234
|
+
readonly: bool = ...,
|
|
235
|
+
read_from_replicas: bool = ...,
|
|
236
|
+
reinitialize_steps: int | None = ...,
|
|
237
|
+
skip_full_coverage_check: bool = ...,
|
|
238
|
+
nodemanager_follow_cluster: bool = ...,
|
|
239
|
+
encoding: str = ...,
|
|
240
|
+
decode_responses: Literal[True] = ...,
|
|
241
|
+
connection_pool: ClusterConnectionPool | None = ...,
|
|
242
|
+
connection_pool_cls: type[ClusterConnectionPool] = ...,
|
|
243
|
+
protocol_version: Literal[2, 3] = ...,
|
|
244
|
+
verify_version: bool = ...,
|
|
245
|
+
non_atomic_cross_slot: bool = ...,
|
|
246
|
+
cache: AbstractCache | None = ...,
|
|
247
|
+
noreply: bool = ...,
|
|
248
|
+
noevict: bool = ...,
|
|
249
|
+
notouch: bool = ...,
|
|
250
|
+
retry_policy: RetryPolicy = ...,
|
|
251
|
+
type_adapter: TypeAdapter | None = ...,
|
|
252
|
+
**kwargs: Any,
|
|
253
|
+
) -> None: ...
|
|
254
|
+
|
|
255
|
+
def __init__(
|
|
256
|
+
self,
|
|
257
|
+
host: str | None = None,
|
|
258
|
+
port: int | None = None,
|
|
259
|
+
*,
|
|
260
|
+
startup_nodes: Iterable[Node] | None = None,
|
|
261
|
+
stream_timeout: float | None = None,
|
|
262
|
+
connect_timeout: float | None = None,
|
|
263
|
+
ssl: bool = False,
|
|
264
|
+
ssl_context: SSLContext | None = None,
|
|
265
|
+
ssl_keyfile: str | None = None,
|
|
266
|
+
ssl_certfile: str | None = None,
|
|
267
|
+
ssl_cert_reqs: Literal["optional", "required", "none"] | None = None,
|
|
268
|
+
ssl_check_hostname: bool | None = None,
|
|
269
|
+
ssl_ca_certs: str | None = None,
|
|
270
|
+
max_connections: int = 32,
|
|
271
|
+
max_connections_per_node: bool = False,
|
|
272
|
+
readonly: bool = False,
|
|
273
|
+
read_from_replicas: bool = False,
|
|
274
|
+
reinitialize_steps: int | None = None,
|
|
275
|
+
skip_full_coverage_check: bool = False,
|
|
276
|
+
nodemanager_follow_cluster: bool = True,
|
|
277
|
+
encoding: str = "utf-8",
|
|
278
|
+
decode_responses: bool = False,
|
|
279
|
+
connection_pool: ClusterConnectionPool | None = None,
|
|
280
|
+
connection_pool_cls: type[ClusterConnectionPool] = ClusterConnectionPool,
|
|
281
|
+
protocol_version: Literal[2, 3] = 3,
|
|
282
|
+
verify_version: bool = True,
|
|
283
|
+
non_atomic_cross_slot: bool = True,
|
|
284
|
+
cache: AbstractCache | None = None,
|
|
285
|
+
noreply: bool = False,
|
|
286
|
+
noevict: bool = False,
|
|
287
|
+
notouch: bool = False,
|
|
288
|
+
retry_policy: RetryPolicy = CompositeRetryPolicy(
|
|
289
|
+
ConstantRetryPolicy((ClusterDownError,), 2, 0.1),
|
|
290
|
+
ConstantRetryPolicy(
|
|
291
|
+
(
|
|
292
|
+
ConnectionError,
|
|
293
|
+
TimeoutError,
|
|
294
|
+
),
|
|
295
|
+
2,
|
|
296
|
+
0.1,
|
|
297
|
+
),
|
|
298
|
+
),
|
|
299
|
+
type_adapter: TypeAdapter | None = None,
|
|
300
|
+
**kwargs: Any,
|
|
301
|
+
) -> None:
|
|
302
|
+
"""
|
|
303
|
+
|
|
304
|
+
Changes
|
|
305
|
+
- .. versionadded:: 4.12.0
|
|
306
|
+
|
|
307
|
+
- :paramref:`retry_policy`
|
|
308
|
+
- :paramref:`noevict`
|
|
309
|
+
- :paramref:`notouch`
|
|
310
|
+
- :meth:`RedisCluster.ensure_persistence` context manager
|
|
311
|
+
- Redis Module support
|
|
312
|
+
|
|
313
|
+
- RedisJSON: :attr:`RedisCluster.json`
|
|
314
|
+
- RedisBloom:
|
|
315
|
+
|
|
316
|
+
- BloomFilter: :attr:`RedisCluster.bf`
|
|
317
|
+
- CuckooFilter: :attr:`RedisCluster.cf`
|
|
318
|
+
- CountMinSketch: :attr:`RedisCluster.cms`
|
|
319
|
+
- TopK: :attr:`RedisCluster.topk`
|
|
320
|
+
- TDigest: :attr:`RedisCluster.tdigest`
|
|
321
|
+
- RedisTimeSeries: :attr:`RedisCluster.timeseries`
|
|
322
|
+
- RedisGraph: :attr:`RedisCluster.graph`
|
|
323
|
+
- RediSearch:
|
|
324
|
+
|
|
325
|
+
- Search & Aggregation: :attr:`RedisCluster.search`
|
|
326
|
+
- Autocomplete: Added :attr:`RedisCluster.autocomplete`
|
|
327
|
+
|
|
328
|
+
- .. versionchanged:: 4.4.0
|
|
329
|
+
|
|
330
|
+
- :paramref:`nodemanager_follow_cluster` now defaults to ``True``
|
|
331
|
+
|
|
332
|
+
- .. deprecated:: 4.4.0
|
|
333
|
+
|
|
334
|
+
- The :paramref:`readonly` argument is deprecated in favour of
|
|
335
|
+
:paramref:`read_from_replicas`
|
|
336
|
+
|
|
337
|
+
- .. versionadded:: 4.3.0
|
|
338
|
+
|
|
339
|
+
- Added :paramref:`connection_pool_cls`
|
|
340
|
+
|
|
341
|
+
- .. versionchanged:: 4.0.0
|
|
342
|
+
|
|
343
|
+
- :paramref:`non_atomic_cross_slot` defaults to ``True``
|
|
344
|
+
- :paramref:`protocol_version`` defaults to ``3``
|
|
345
|
+
|
|
346
|
+
- .. versionadded:: 3.11.0
|
|
347
|
+
|
|
348
|
+
- Added :paramref:`noreply`
|
|
349
|
+
|
|
350
|
+
- .. versionadded:: 3.10.0
|
|
351
|
+
|
|
352
|
+
- Synchronized ssl constructor parameters with :class:`coredis.Redis`
|
|
353
|
+
|
|
354
|
+
- .. versionadded:: 3.9.0
|
|
355
|
+
|
|
356
|
+
- If :paramref:`cache` is provided the client will check & populate
|
|
357
|
+
the cache for read only commands and invalidate it for commands
|
|
358
|
+
that could change the key(s) in the request.
|
|
359
|
+
|
|
360
|
+
- .. versionadded:: 3.6.0
|
|
361
|
+
|
|
362
|
+
- The :paramref:`non_atomic_cross_slot` parameter was added
|
|
363
|
+
|
|
364
|
+
- .. versionchanged:: 3.5.0
|
|
365
|
+
|
|
366
|
+
- The :paramref:`verify_version` parameter now defaults to ``True``
|
|
367
|
+
|
|
368
|
+
- .. versionadded:: 3.1.0
|
|
369
|
+
|
|
370
|
+
- The :paramref:`protocol_version` and :paramref:`verify_version`
|
|
371
|
+
parameters were added
|
|
372
|
+
|
|
373
|
+
:param host: Can be used to point to a startup node
|
|
374
|
+
:param port: Can be used to point to a startup node
|
|
375
|
+
:param startup_nodes: List of nodes that initial bootstrapping can be done
|
|
376
|
+
from
|
|
377
|
+
:param stream_timeout: Timeout (seconds) when reading responses from the server
|
|
378
|
+
:param connect_timeout: Timeout (seconds) for establishing a connection to the server
|
|
379
|
+
:param ssl: Whether to use an SSL connection
|
|
380
|
+
:param ssl_context: If provided the :class:`ssl.SSLContext` will be used when
|
|
381
|
+
establishing the connection. Otherwise either the default context (if no other
|
|
382
|
+
ssl related parameters are provided) or a custom context based on the other
|
|
383
|
+
``ssl_*`` parameters will be used.
|
|
384
|
+
:param ssl_keyfile: Path of the private key to use
|
|
385
|
+
:param ssl_certfile: Path to the certificate corresponding to :paramref:`ssl_keyfile`
|
|
386
|
+
:param ssl_cert_reqs: Whether to try to verify the server's certificates and
|
|
387
|
+
how to behave if verification fails (See :attr:`ssl.SSLContext.verify_mode`).
|
|
388
|
+
:param ssl_check_hostname: Whether to enable hostname checking when establishing
|
|
389
|
+
an ssl connection.
|
|
390
|
+
:param ssl_ca_certs: Path to a concatenated certificate authority file or a directory
|
|
391
|
+
containing several CA certifcates to use for validating the server's certificates
|
|
392
|
+
when :paramref:`ssl_cert_reqs` is not ``"none"``
|
|
393
|
+
(See :meth:`ssl.SSLContext.load_verify_locations`).
|
|
394
|
+
:param max_connections: Maximum number of connections that should be kept open at one time
|
|
395
|
+
:param max_connections_per_node:
|
|
396
|
+
:param read_from_replicas: If ``True`` the client will route readonly commands to replicas
|
|
397
|
+
:param reinitialize_steps: Number of moved errors that result in a cluster
|
|
398
|
+
topology refresh using the startup nodes provided
|
|
399
|
+
:param skip_full_coverage_check: Skips the check of cluster-require-full-coverage config,
|
|
400
|
+
useful for clusters without the CONFIG command (like aws)
|
|
401
|
+
:param nodemanager_follow_cluster: The node manager will during initialization try the
|
|
402
|
+
last set of nodes that it was operating on. This will allow the client to drift along
|
|
403
|
+
side the cluster if the cluster nodes move around alot.
|
|
404
|
+
:param encoding: The codec to use to encode strings transmitted to redis
|
|
405
|
+
and decode responses with. (See :ref:`handbook/encoding:encoding/decoding`)
|
|
406
|
+
:param decode_responses: If ``True`` string responses from the server
|
|
407
|
+
will be decoded using :paramref:`encoding` before being returned.
|
|
408
|
+
(See :ref:`handbook/encoding:encoding/decoding`)
|
|
409
|
+
:param connection_pool: The connection pool instance to use. If not provided
|
|
410
|
+
a new pool will be assigned to this client.
|
|
411
|
+
:param connection_pool_cls: The connection pool class to use when constructing
|
|
412
|
+
a connection pool for this instance.
|
|
413
|
+
:param protocol_version: Whether to use the RESP (``2``) or RESP3 (``3``)
|
|
414
|
+
protocol for parsing responses from the server (Default ``3``).
|
|
415
|
+
(See :ref:`handbook/response:redis response`)
|
|
416
|
+
:param verify_version: Validate redis server version against the documented
|
|
417
|
+
version introduced before executing a command and raises a
|
|
418
|
+
:exc:`CommandNotSupportedError` error if the required version is higher than
|
|
419
|
+
the reported server version
|
|
420
|
+
:param non_atomic_cross_slot: If ``True`` certain commands that can operate
|
|
421
|
+
on multiple keys (cross slot) will be split across the relevant nodes by
|
|
422
|
+
mapping the keys to the appropriate slot and the result merged before being
|
|
423
|
+
returned.
|
|
424
|
+
:param cache: If provided the cache will be used to avoid requests for read only
|
|
425
|
+
commands if the client has already requested the data and it hasn't been invalidated.
|
|
426
|
+
The cache is responsible for any mutations to the keys that happen outside of this client
|
|
427
|
+
:param noreply: If ``True`` the client will not request a response for any
|
|
428
|
+
commands sent to the server.
|
|
429
|
+
:param noevict: Ensures that connections from the client will be excluded from the
|
|
430
|
+
client eviction process even if we're above the configured client eviction threshold.
|
|
431
|
+
:param notouch: Ensures that commands sent by the client will not alter the LRU/LFU
|
|
432
|
+
of the keys they access.
|
|
433
|
+
:param retry_policy: The retry policy to use when interacting with the cluster
|
|
434
|
+
:param type_adapter: The adapter to use for serializing / deserializing customs types
|
|
435
|
+
when interacting with redis commands.
|
|
436
|
+
"""
|
|
437
|
+
|
|
438
|
+
if "db" in kwargs: # noqa
|
|
439
|
+
raise RedisClusterException("Argument 'db' is not possible to use in cluster mode")
|
|
440
|
+
|
|
441
|
+
if connection_pool:
|
|
442
|
+
pool = connection_pool
|
|
443
|
+
else:
|
|
444
|
+
startup_nodes = [] if startup_nodes is None else list(startup_nodes)
|
|
445
|
+
|
|
446
|
+
# Support host/port as argument
|
|
447
|
+
|
|
448
|
+
if host:
|
|
449
|
+
startup_nodes.append(
|
|
450
|
+
Node(
|
|
451
|
+
host=host,
|
|
452
|
+
port=port if port else 7000,
|
|
453
|
+
)
|
|
454
|
+
)
|
|
455
|
+
if ssl_context is not None:
|
|
456
|
+
kwargs["ssl_context"] = ssl_context
|
|
457
|
+
elif ssl:
|
|
458
|
+
ssl_context = RedisSSLContext(
|
|
459
|
+
ssl_keyfile,
|
|
460
|
+
ssl_certfile,
|
|
461
|
+
ssl_cert_reqs,
|
|
462
|
+
ssl_ca_certs,
|
|
463
|
+
ssl_check_hostname,
|
|
464
|
+
).get()
|
|
465
|
+
kwargs["ssl_context"] = ssl_context
|
|
466
|
+
|
|
467
|
+
pool = connection_pool_cls(
|
|
468
|
+
startup_nodes=startup_nodes,
|
|
469
|
+
max_connections=max_connections,
|
|
470
|
+
reinitialize_steps=reinitialize_steps,
|
|
471
|
+
max_connections_per_node=max_connections_per_node,
|
|
472
|
+
skip_full_coverage_check=skip_full_coverage_check,
|
|
473
|
+
nodemanager_follow_cluster=nodemanager_follow_cluster,
|
|
474
|
+
read_from_replicas=readonly or read_from_replicas,
|
|
475
|
+
encoding=encoding,
|
|
476
|
+
decode_responses=decode_responses,
|
|
477
|
+
protocol_version=protocol_version,
|
|
478
|
+
noreply=noreply,
|
|
479
|
+
noevict=noevict,
|
|
480
|
+
notouch=notouch,
|
|
481
|
+
stream_timeout=stream_timeout,
|
|
482
|
+
connect_timeout=connect_timeout,
|
|
483
|
+
**kwargs,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
super().__init__(
|
|
487
|
+
stream_timeout=stream_timeout,
|
|
488
|
+
connect_timeout=connect_timeout,
|
|
489
|
+
connection_pool=pool,
|
|
490
|
+
connection_pool_cls=connection_pool_cls,
|
|
491
|
+
encoding=encoding,
|
|
492
|
+
decode_responses=decode_responses,
|
|
493
|
+
verify_version=verify_version,
|
|
494
|
+
protocol_version=protocol_version,
|
|
495
|
+
noreply=noreply,
|
|
496
|
+
noevict=noevict,
|
|
497
|
+
notouch=notouch,
|
|
498
|
+
retry_policy=retry_policy,
|
|
499
|
+
type_adapter=type_adapter,
|
|
500
|
+
**kwargs,
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
self.refresh_table_asap: bool = False
|
|
504
|
+
self.route_flags: dict[bytes, NodeFlag] = self.__class__.ROUTING_FLAGS.copy()
|
|
505
|
+
self.split_flags: dict[bytes, NodeFlag] = self.__class__.SPLIT_FLAGS.copy()
|
|
506
|
+
self.result_callbacks: dict[bytes, Callable[..., Any]] = (
|
|
507
|
+
self.__class__.RESULT_CALLBACKS.copy()
|
|
508
|
+
)
|
|
509
|
+
self.non_atomic_cross_slot = non_atomic_cross_slot
|
|
510
|
+
self.cache = cache
|
|
511
|
+
self._decodecontext: contextvars.ContextVar[bool | None,] = contextvars.ContextVar(
|
|
512
|
+
"decode", default=None
|
|
513
|
+
)
|
|
514
|
+
self._encodingcontext: contextvars.ContextVar[str | None,] = contextvars.ContextVar(
|
|
515
|
+
"decode", default=None
|
|
516
|
+
)
|
|
517
|
+
|
|
518
|
+
@classmethod
|
|
519
|
+
@overload
|
|
520
|
+
def from_url(
|
|
521
|
+
cls: type[RedisCluster[bytes]],
|
|
522
|
+
url: str,
|
|
523
|
+
*,
|
|
524
|
+
db: int | None = ...,
|
|
525
|
+
skip_full_coverage_check: bool = ...,
|
|
526
|
+
decode_responses: Literal[False] = ...,
|
|
527
|
+
protocol_version: Literal[2, 3] = ...,
|
|
528
|
+
verify_version: bool = ...,
|
|
529
|
+
noreply: bool = ...,
|
|
530
|
+
noevict: bool = ...,
|
|
531
|
+
notouch: bool = ...,
|
|
532
|
+
retry_policy: RetryPolicy = ...,
|
|
533
|
+
type_adapter: TypeAdapter | None = ...,
|
|
534
|
+
cache: AbstractCache | None = ...,
|
|
535
|
+
**kwargs: Any,
|
|
536
|
+
) -> RedisCluster[bytes]: ...
|
|
537
|
+
|
|
538
|
+
@classmethod
|
|
539
|
+
@overload
|
|
540
|
+
def from_url(
|
|
541
|
+
cls: type[RedisCluster[str]],
|
|
542
|
+
url: str,
|
|
543
|
+
*,
|
|
544
|
+
db: int | None = ...,
|
|
545
|
+
skip_full_coverage_check: bool = ...,
|
|
546
|
+
decode_responses: Literal[True],
|
|
547
|
+
protocol_version: Literal[2, 3] = ...,
|
|
548
|
+
verify_version: bool = ...,
|
|
549
|
+
noreply: bool = ...,
|
|
550
|
+
noevict: bool = ...,
|
|
551
|
+
notouch: bool = ...,
|
|
552
|
+
retry_policy: RetryPolicy = ...,
|
|
553
|
+
type_adapter: TypeAdapter | None = ...,
|
|
554
|
+
cache: AbstractCache | None = ...,
|
|
555
|
+
**kwargs: Any,
|
|
556
|
+
) -> RedisCluster[str]: ...
|
|
557
|
+
|
|
558
|
+
@classmethod
|
|
559
|
+
def from_url(
|
|
560
|
+
cls: type[RedisClusterT],
|
|
561
|
+
url: str,
|
|
562
|
+
*,
|
|
563
|
+
db: int | None = None,
|
|
564
|
+
skip_full_coverage_check: bool = False,
|
|
565
|
+
decode_responses: bool = False,
|
|
566
|
+
protocol_version: Literal[2, 3] = 3,
|
|
567
|
+
verify_version: bool = True,
|
|
568
|
+
noreply: bool = False,
|
|
569
|
+
noevict: bool = False,
|
|
570
|
+
notouch: bool = False,
|
|
571
|
+
cache: AbstractCache | None = None,
|
|
572
|
+
retry_policy: RetryPolicy = CompositeRetryPolicy(
|
|
573
|
+
ConstantRetryPolicy((ClusterDownError,), 2, 0.1),
|
|
574
|
+
ConstantRetryPolicy(
|
|
575
|
+
(
|
|
576
|
+
ConnectionError,
|
|
577
|
+
TimeoutError,
|
|
578
|
+
),
|
|
579
|
+
2,
|
|
580
|
+
0.1,
|
|
581
|
+
),
|
|
582
|
+
),
|
|
583
|
+
type_adapter: TypeAdapter | None = None,
|
|
584
|
+
**kwargs: Any,
|
|
585
|
+
) -> RedisClusterT:
|
|
586
|
+
"""
|
|
587
|
+
Return a Cluster client object configured from the startup node in URL,
|
|
588
|
+
which must use either the ``redis://`` scheme
|
|
589
|
+
`<http://www.iana.org/assignments/uri-schemes/prov/redis>`_
|
|
590
|
+
|
|
591
|
+
For example:
|
|
592
|
+
|
|
593
|
+
- ``redis://[:password]@localhost:6379``
|
|
594
|
+
- ``rediss://[:password]@localhost:6379``
|
|
595
|
+
|
|
596
|
+
:paramref:`url` and :paramref:`kwargs` are passed as is to
|
|
597
|
+
the :func:`coredis.ConnectionPool.from_url`.
|
|
598
|
+
"""
|
|
599
|
+
if decode_responses:
|
|
600
|
+
return cls(
|
|
601
|
+
decode_responses=True,
|
|
602
|
+
protocol_version=protocol_version,
|
|
603
|
+
verify_version=verify_version,
|
|
604
|
+
noreply=noreply,
|
|
605
|
+
retry_policy=retry_policy,
|
|
606
|
+
type_adapter=type_adapter,
|
|
607
|
+
cache=cache,
|
|
608
|
+
connection_pool=ClusterConnectionPool.from_url(
|
|
609
|
+
url,
|
|
610
|
+
db=db,
|
|
611
|
+
skip_full_coverage_check=skip_full_coverage_check,
|
|
612
|
+
decode_responses=decode_responses,
|
|
613
|
+
protocol_version=protocol_version,
|
|
614
|
+
noreply=noreply,
|
|
615
|
+
noevict=noevict,
|
|
616
|
+
notouch=notouch,
|
|
617
|
+
**kwargs,
|
|
618
|
+
),
|
|
619
|
+
)
|
|
620
|
+
else:
|
|
621
|
+
return cls(
|
|
622
|
+
decode_responses=False,
|
|
623
|
+
protocol_version=protocol_version,
|
|
624
|
+
verify_version=verify_version,
|
|
625
|
+
noreply=noreply,
|
|
626
|
+
retry_policy=retry_policy,
|
|
627
|
+
type_adapter=type_adapter,
|
|
628
|
+
cache=cache,
|
|
629
|
+
connection_pool=ClusterConnectionPool.from_url(
|
|
630
|
+
url,
|
|
631
|
+
db=db,
|
|
632
|
+
skip_full_coverage_check=skip_full_coverage_check,
|
|
633
|
+
decode_responses=decode_responses,
|
|
634
|
+
protocol_version=protocol_version,
|
|
635
|
+
noreply=noreply,
|
|
636
|
+
noevict=noevict,
|
|
637
|
+
notouch=notouch,
|
|
638
|
+
**kwargs,
|
|
639
|
+
),
|
|
640
|
+
)
|
|
641
|
+
|
|
642
|
+
async def initialize(self) -> RedisCluster[AnyStr]:
|
|
643
|
+
if self.refresh_table_asap:
|
|
644
|
+
self.connection_pool.initialized = False
|
|
645
|
+
await self.connection_pool.initialize()
|
|
646
|
+
self.refresh_table_asap = False
|
|
647
|
+
await self._populate_module_versions()
|
|
648
|
+
if self.cache:
|
|
649
|
+
self.cache = await self.cache.initialize(self)
|
|
650
|
+
return self
|
|
651
|
+
|
|
652
|
+
def __repr__(self) -> str:
|
|
653
|
+
servers = list(
|
|
654
|
+
{f"{info.host}:{info.port}" for info in self.connection_pool.nodes.startup_nodes}
|
|
655
|
+
)
|
|
656
|
+
servers.sort()
|
|
657
|
+
|
|
658
|
+
return "{}<{}>".format(type(self).__name__, ", ".join(servers))
|
|
659
|
+
|
|
660
|
+
@property
|
|
661
|
+
def all_nodes(self) -> Iterator[Redis[AnyStr]]:
|
|
662
|
+
""" """
|
|
663
|
+
for node in self.connection_pool.nodes.all_nodes():
|
|
664
|
+
yield cast(
|
|
665
|
+
Redis[AnyStr],
|
|
666
|
+
self.connection_pool.nodes.get_redis_link(node.host, node.port),
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
@property
|
|
670
|
+
def primaries(self) -> Iterator[Redis[AnyStr]]:
|
|
671
|
+
""" """
|
|
672
|
+
for primary in self.connection_pool.nodes.all_primaries():
|
|
673
|
+
yield cast(
|
|
674
|
+
Redis[AnyStr],
|
|
675
|
+
self.connection_pool.nodes.get_redis_link(primary.host, primary.port),
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
@property
|
|
679
|
+
def replicas(self) -> Iterator[Redis[AnyStr]]:
|
|
680
|
+
""" """
|
|
681
|
+
for replica in self.connection_pool.nodes.all_replicas():
|
|
682
|
+
yield cast(
|
|
683
|
+
Redis[AnyStr],
|
|
684
|
+
self.connection_pool.nodes.get_redis_link(replica.host, replica.port),
|
|
685
|
+
)
|
|
686
|
+
|
|
687
|
+
@property
|
|
688
|
+
def num_replicas_per_shard(self) -> int:
|
|
689
|
+
"""
|
|
690
|
+
Number of replicas per shard of the cluster determined by
|
|
691
|
+
initial cluster topology discovery
|
|
692
|
+
"""
|
|
693
|
+
return self.connection_pool.nodes.replicas_per_shard
|
|
694
|
+
|
|
695
|
+
async def _ensure_initialized(self) -> None:
|
|
696
|
+
if not self.connection_pool.initialized or self.refresh_table_asap:
|
|
697
|
+
await self
|
|
698
|
+
|
|
699
|
+
def _determine_slots(
|
|
700
|
+
self, command: bytes, *args: RedisValueT, **options: Unpack[ExecutionParameters]
|
|
701
|
+
) -> set[int]:
|
|
702
|
+
"""Determines the slots the command and args would touch"""
|
|
703
|
+
keys = cast(tuple[RedisValueT, ...], options.get("keys")) or KeySpec.extract_keys(
|
|
704
|
+
command, *args, readonly_command=self.connection_pool.read_from_replicas
|
|
705
|
+
)
|
|
706
|
+
if (
|
|
707
|
+
command
|
|
708
|
+
in {
|
|
709
|
+
CommandName.EVAL,
|
|
710
|
+
CommandName.EVAL_RO,
|
|
711
|
+
CommandName.EVALSHA,
|
|
712
|
+
CommandName.EVALSHA_RO,
|
|
713
|
+
CommandName.FCALL,
|
|
714
|
+
CommandName.FCALL_RO,
|
|
715
|
+
CommandName.PUBLISH,
|
|
716
|
+
}
|
|
717
|
+
and not keys
|
|
718
|
+
):
|
|
719
|
+
return set()
|
|
720
|
+
|
|
721
|
+
return {hash_slot(b(key)) for key in keys}
|
|
722
|
+
|
|
723
|
+
def _merge_result(
|
|
724
|
+
self,
|
|
725
|
+
command: bytes,
|
|
726
|
+
res: dict[str, R],
|
|
727
|
+
**kwargs: Unpack[ExecutionParameters],
|
|
728
|
+
) -> R:
|
|
729
|
+
assert command in self.result_callbacks
|
|
730
|
+
return cast(
|
|
731
|
+
R,
|
|
732
|
+
self.result_callbacks[command](res, version=self.protocol_version, **kwargs),
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
def determine_node(
|
|
736
|
+
self, command: bytes, *args: RedisValueT, **kwargs: Unpack[ExecutionParameters]
|
|
737
|
+
) -> list[ManagedNode] | None:
|
|
738
|
+
node_flag = self.route_flags.get(command)
|
|
739
|
+
if command in self.split_flags and self.non_atomic_cross_slot:
|
|
740
|
+
node_flag = self.split_flags[command]
|
|
741
|
+
|
|
742
|
+
if node_flag == NodeFlag.RANDOM:
|
|
743
|
+
return [self.connection_pool.nodes.random_node(primary=True)]
|
|
744
|
+
elif node_flag == NodeFlag.PRIMARIES:
|
|
745
|
+
return list(self.connection_pool.nodes.all_primaries())
|
|
746
|
+
elif node_flag == NodeFlag.ALL:
|
|
747
|
+
return list(self.connection_pool.nodes.all_nodes())
|
|
748
|
+
elif node_flag == NodeFlag.SLOT_ID and (
|
|
749
|
+
slot_arguments_range := kwargs.get("slot_arguments_range", None)
|
|
750
|
+
):
|
|
751
|
+
slot_start, slot_end = slot_arguments_range
|
|
752
|
+
nodes = list(
|
|
753
|
+
self.connection_pool.nodes.nodes_from_slots(
|
|
754
|
+
*cast(tuple[int, ...], args[slot_start:slot_end])
|
|
755
|
+
).keys()
|
|
756
|
+
)
|
|
757
|
+
return [self.connection_pool.nodes.nodes[k] for k in nodes]
|
|
758
|
+
return None
|
|
759
|
+
|
|
760
|
+
async def on_connection_error(self, _: BaseException) -> None:
|
|
761
|
+
self.connection_pool.disconnect()
|
|
762
|
+
self.connection_pool.reset()
|
|
763
|
+
self.refresh_table_asap = True
|
|
764
|
+
|
|
765
|
+
async def on_cluster_down_error(self, _: BaseException) -> None:
|
|
766
|
+
self.connection_pool.disconnect()
|
|
767
|
+
self.connection_pool.reset()
|
|
768
|
+
self.refresh_table_asap = True
|
|
769
|
+
|
|
770
|
+
async def execute_command(
|
|
771
|
+
self,
|
|
772
|
+
command: RedisCommandP,
|
|
773
|
+
callback: Callable[..., R] = NoopCallback(),
|
|
774
|
+
**kwargs: Unpack[ExecutionParameters],
|
|
775
|
+
) -> R:
|
|
776
|
+
"""
|
|
777
|
+
Sends a command to one or many nodes in the cluster
|
|
778
|
+
with retries based on :paramref:`RedisCluster.retry_policy`
|
|
779
|
+
"""
|
|
780
|
+
|
|
781
|
+
return await self.retry_policy.call_with_retries(
|
|
782
|
+
lambda: self._execute_command(command, callback=callback, **kwargs),
|
|
783
|
+
failure_hook={
|
|
784
|
+
ConnectionError: self.on_connection_error,
|
|
785
|
+
ClusterDownError: self.on_cluster_down_error,
|
|
786
|
+
},
|
|
787
|
+
before_hook=self._ensure_initialized,
|
|
788
|
+
)
|
|
789
|
+
|
|
790
|
+
async def _execute_command(
|
|
791
|
+
self,
|
|
792
|
+
command: RedisCommandP,
|
|
793
|
+
callback: Callable[..., R] = NoopCallback(),
|
|
794
|
+
**kwargs: Unpack[ExecutionParameters],
|
|
795
|
+
) -> R:
|
|
796
|
+
"""
|
|
797
|
+
Sends a command to one or many nodes in the cluster
|
|
798
|
+
"""
|
|
799
|
+
nodes = self.determine_node(command.name, *command.arguments, **kwargs)
|
|
800
|
+
if nodes and len(nodes) > 1:
|
|
801
|
+
tasks: dict[str, Coroutine[Any, Any, R]] = {}
|
|
802
|
+
node_arg_mapping = self._split_args_over_nodes(
|
|
803
|
+
nodes,
|
|
804
|
+
command.name,
|
|
805
|
+
*command.arguments,
|
|
806
|
+
slot_arguments_range=kwargs.get("slot_arguments_range", None),
|
|
807
|
+
)
|
|
808
|
+
node_name_map = {n.name: n for n in nodes}
|
|
809
|
+
for node_name in node_arg_mapping:
|
|
810
|
+
for portion, pargs in enumerate(node_arg_mapping[node_name]):
|
|
811
|
+
tasks[f"{node_name}:{portion}"] = self._execute_command_on_single_node(
|
|
812
|
+
RedisCommand(command.name, pargs),
|
|
813
|
+
callback=callback,
|
|
814
|
+
node=node_name_map[node_name],
|
|
815
|
+
slots=None,
|
|
816
|
+
**kwargs,
|
|
817
|
+
)
|
|
818
|
+
|
|
819
|
+
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
|
|
820
|
+
if self.noreply:
|
|
821
|
+
return None # type: ignore
|
|
822
|
+
return cast(
|
|
823
|
+
R,
|
|
824
|
+
self._merge_result(command.name, dict(zip(tasks.keys(), results))),
|
|
825
|
+
)
|
|
826
|
+
else:
|
|
827
|
+
node = None
|
|
828
|
+
slots = None
|
|
829
|
+
if not nodes:
|
|
830
|
+
slots = list(self._determine_slots(command.name, *command.arguments, **kwargs))
|
|
831
|
+
else:
|
|
832
|
+
node = nodes.pop()
|
|
833
|
+
return await self._execute_command_on_single_node(
|
|
834
|
+
command,
|
|
835
|
+
callback=callback,
|
|
836
|
+
node=node,
|
|
837
|
+
slots=slots,
|
|
838
|
+
**kwargs,
|
|
839
|
+
)
|
|
840
|
+
|
|
841
|
+
def _split_args_over_nodes(
|
|
842
|
+
self,
|
|
843
|
+
nodes: list[ManagedNode],
|
|
844
|
+
command: bytes,
|
|
845
|
+
*args: RedisValueT,
|
|
846
|
+
slot_arguments_range: tuple[int, int] | None = None,
|
|
847
|
+
) -> dict[str, list[tuple[RedisValueT, ...]]]:
|
|
848
|
+
node_flag = self.route_flags.get(command)
|
|
849
|
+
node_arg_mapping: dict[str, list[tuple[RedisValueT, ...]]] = {}
|
|
850
|
+
if command in self.split_flags and self.non_atomic_cross_slot:
|
|
851
|
+
keys = KeySpec.extract_keys(command, *args)
|
|
852
|
+
if keys:
|
|
853
|
+
key_start: int = args.index(keys[0])
|
|
854
|
+
key_end: int = args.index(keys[-1])
|
|
855
|
+
assert args[key_start : 1 + key_end] == keys, (
|
|
856
|
+
f"Unable to map {command.decode('latin-1')} by keys {keys}"
|
|
857
|
+
)
|
|
858
|
+
|
|
859
|
+
for (
|
|
860
|
+
node_name,
|
|
861
|
+
key_groups,
|
|
862
|
+
) in self.connection_pool.nodes.keys_to_nodes_by_slot(*keys).items():
|
|
863
|
+
for _, node_keys in key_groups.items():
|
|
864
|
+
node_arg_mapping.setdefault(node_name, []).append(
|
|
865
|
+
(
|
|
866
|
+
*args[:key_start],
|
|
867
|
+
*node_keys, # type: ignore
|
|
868
|
+
*args[1 + key_end :],
|
|
869
|
+
)
|
|
870
|
+
)
|
|
871
|
+
if self.cache and command not in READONLY_COMMANDS:
|
|
872
|
+
self.cache.invalidate(*keys)
|
|
873
|
+
elif node_flag == NodeFlag.SLOT_ID and slot_arguments_range:
|
|
874
|
+
# TODO: fix this nonsense put in place just to support a few cluster commands
|
|
875
|
+
# related to slot management in cluster client which really no one needs to be calling
|
|
876
|
+
# through the cluster client.
|
|
877
|
+
slot_start, slot_end = slot_arguments_range
|
|
878
|
+
all_slots = [int(k) for k in args[slot_start:slot_end] if k is not None]
|
|
879
|
+
for node, slots in self.connection_pool.nodes.nodes_from_slots(*all_slots).items():
|
|
880
|
+
node_arg_mapping[node] = [(*slots, *args[slot_end:])] # type: ignore
|
|
881
|
+
else:
|
|
882
|
+
# This command is not meant to be split across nodes and each node
|
|
883
|
+
# should be called with the same arguments
|
|
884
|
+
node_arg_mapping = {node.name: [args] for node in nodes}
|
|
885
|
+
return node_arg_mapping
|
|
886
|
+
|
|
887
|
+
async def _execute_command_on_single_node(
|
|
888
|
+
self,
|
|
889
|
+
command: RedisCommandP,
|
|
890
|
+
callback: Callable[..., R] = NoopCallback(),
|
|
891
|
+
node: ManagedNode | None = None,
|
|
892
|
+
slots: list[int] | None = None,
|
|
893
|
+
**kwargs: Unpack[ExecutionParameters],
|
|
894
|
+
) -> R:
|
|
895
|
+
redirect_addr = None
|
|
896
|
+
|
|
897
|
+
asking = False
|
|
898
|
+
|
|
899
|
+
if not node and not slots:
|
|
900
|
+
try_random_node = True
|
|
901
|
+
try_random_type = NodeFlag.PRIMARIES
|
|
902
|
+
else:
|
|
903
|
+
try_random_node = False
|
|
904
|
+
try_random_type = NodeFlag.ALL
|
|
905
|
+
remaining_attempts = int(self.MAX_RETRIES)
|
|
906
|
+
|
|
907
|
+
while remaining_attempts > 0:
|
|
908
|
+
remaining_attempts -= 1
|
|
909
|
+
if self.refresh_table_asap and not slots:
|
|
910
|
+
await self
|
|
911
|
+
if asking and redirect_addr:
|
|
912
|
+
node = self.connection_pool.nodes.nodes[redirect_addr]
|
|
913
|
+
r = await self.connection_pool.get_connection_by_node(node)
|
|
914
|
+
elif try_random_node:
|
|
915
|
+
r = await self.connection_pool.get_random_connection(
|
|
916
|
+
primary=try_random_type == NodeFlag.PRIMARIES
|
|
917
|
+
)
|
|
918
|
+
if slots:
|
|
919
|
+
try_random_node = False
|
|
920
|
+
elif node:
|
|
921
|
+
r = await self.connection_pool.get_connection_by_node(node)
|
|
922
|
+
elif slots:
|
|
923
|
+
if self.refresh_table_asap:
|
|
924
|
+
# MOVED
|
|
925
|
+
node = self.connection_pool.get_primary_node_by_slots(slots)
|
|
926
|
+
else:
|
|
927
|
+
node = self.connection_pool.get_node_by_slots(slots)
|
|
928
|
+
r = await self.connection_pool.get_connection_by_node(node)
|
|
929
|
+
else:
|
|
930
|
+
continue
|
|
931
|
+
quick_release = self.should_quick_release(command)
|
|
932
|
+
released = False
|
|
933
|
+
try:
|
|
934
|
+
if asking:
|
|
935
|
+
request = await r.create_request(
|
|
936
|
+
CommandName.ASKING, noreply=self.noreply, decode=False
|
|
937
|
+
)
|
|
938
|
+
await request
|
|
939
|
+
asking = False
|
|
940
|
+
keys = KeySpec.extract_keys(command.name, *command.arguments)
|
|
941
|
+
cacheable = (
|
|
942
|
+
self.cache
|
|
943
|
+
and command.name in CACHEABLE_COMMANDS
|
|
944
|
+
and len(keys) == 1
|
|
945
|
+
and not self.noreply
|
|
946
|
+
and self._decodecontext.get() is None
|
|
947
|
+
)
|
|
948
|
+
cache_hit = False
|
|
949
|
+
cached_reply = None
|
|
950
|
+
use_cached = False
|
|
951
|
+
reply = None
|
|
952
|
+
if self.cache:
|
|
953
|
+
if r.tracking_client_id != self.cache.get_client_id(r):
|
|
954
|
+
self.cache.reset()
|
|
955
|
+
await r.update_tracking_client(True, self.cache.get_client_id(r))
|
|
956
|
+
if command.name not in READONLY_COMMANDS:
|
|
957
|
+
self.cache.invalidate(*keys)
|
|
958
|
+
elif cacheable:
|
|
959
|
+
try:
|
|
960
|
+
cached_reply = cast(
|
|
961
|
+
R,
|
|
962
|
+
self.cache.get(
|
|
963
|
+
command.name,
|
|
964
|
+
keys[0],
|
|
965
|
+
*command.arguments,
|
|
966
|
+
),
|
|
967
|
+
)
|
|
968
|
+
use_cached = random.random() * 100.0 < min(100.0, self.cache.confidence)
|
|
969
|
+
cache_hit = True
|
|
970
|
+
except KeyError:
|
|
971
|
+
pass
|
|
972
|
+
|
|
973
|
+
if not (use_cached and cached_reply):
|
|
974
|
+
request = await r.create_request(
|
|
975
|
+
command.name,
|
|
976
|
+
*command.arguments,
|
|
977
|
+
noreply=self.noreply,
|
|
978
|
+
decode=kwargs.get("decode", self._decodecontext.get()),
|
|
979
|
+
encoding=self._encodingcontext.get(),
|
|
980
|
+
)
|
|
981
|
+
if quick_release and not (self.requires_wait or self.requires_waitaof):
|
|
982
|
+
released = True
|
|
983
|
+
self.connection_pool.release(r)
|
|
984
|
+
|
|
985
|
+
reply = await request
|
|
986
|
+
maybe_wait = [
|
|
987
|
+
await self._ensure_wait(command, r),
|
|
988
|
+
await self._ensure_persistence(command, r),
|
|
989
|
+
]
|
|
990
|
+
await asyncio.gather(*maybe_wait)
|
|
991
|
+
if self.noreply:
|
|
992
|
+
return # type: ignore
|
|
993
|
+
else:
|
|
994
|
+
if isinstance(callback, AsyncPreProcessingCallback):
|
|
995
|
+
await callback.pre_process(
|
|
996
|
+
self,
|
|
997
|
+
reply,
|
|
998
|
+
)
|
|
999
|
+
response = callback(
|
|
1000
|
+
cached_reply if cache_hit else reply,
|
|
1001
|
+
version=self.protocol_version,
|
|
1002
|
+
)
|
|
1003
|
+
if self.cache and cacheable:
|
|
1004
|
+
if cache_hit and not use_cached:
|
|
1005
|
+
self.cache.feedback(
|
|
1006
|
+
command.name,
|
|
1007
|
+
keys[0],
|
|
1008
|
+
*command.arguments,
|
|
1009
|
+
match=cached_reply == reply,
|
|
1010
|
+
)
|
|
1011
|
+
if not cache_hit:
|
|
1012
|
+
self.cache.put(
|
|
1013
|
+
command.name,
|
|
1014
|
+
keys[0],
|
|
1015
|
+
*command.arguments,
|
|
1016
|
+
value=reply,
|
|
1017
|
+
)
|
|
1018
|
+
return response
|
|
1019
|
+
except (RedisClusterException, BusyLoadingError, asyncio.CancelledError):
|
|
1020
|
+
raise
|
|
1021
|
+
except MovedError as e:
|
|
1022
|
+
# Reinitialize on ever x number of MovedError.
|
|
1023
|
+
# This counter will increase faster when the same client object
|
|
1024
|
+
# is shared between multiple threads. To reduce the frequency you
|
|
1025
|
+
# can set the variable 'reinitialize_steps' in the constructor.
|
|
1026
|
+
self.refresh_table_asap = True
|
|
1027
|
+
await self.connection_pool.nodes.increment_reinitialize_counter()
|
|
1028
|
+
|
|
1029
|
+
node = self.connection_pool.nodes.set_node(e.host, e.port, server_type="primary")
|
|
1030
|
+
try_random_node = False
|
|
1031
|
+
self.connection_pool.nodes.slots[e.slot_id][0] = node
|
|
1032
|
+
except TryAgainError:
|
|
1033
|
+
if remaining_attempts < self.MAX_RETRIES / 2:
|
|
1034
|
+
await asyncio.sleep(0.05)
|
|
1035
|
+
except AskError as e:
|
|
1036
|
+
redirect_addr, asking = f"{e.host}:{e.port}", True
|
|
1037
|
+
finally:
|
|
1038
|
+
self._ensure_server_version(r.server_version)
|
|
1039
|
+
if not released:
|
|
1040
|
+
self.connection_pool.release(r)
|
|
1041
|
+
|
|
1042
|
+
raise ClusterError("Maximum retries exhausted.")
|
|
1043
|
+
|
|
1044
|
+
@overload
|
|
1045
|
+
def decoding(
|
|
1046
|
+
self, mode: Literal[False], encoding: str | None = None
|
|
1047
|
+
) -> contextlib.AbstractContextManager[RedisCluster[bytes]]: ...
|
|
1048
|
+
|
|
1049
|
+
@overload
|
|
1050
|
+
def decoding(
|
|
1051
|
+
self, mode: Literal[True], encoding: str | None = None
|
|
1052
|
+
) -> contextlib.AbstractContextManager[RedisCluster[str]]: ...
|
|
1053
|
+
|
|
1054
|
+
@contextlib.contextmanager
|
|
1055
|
+
@versionadded(version="4.8.0")
|
|
1056
|
+
def decoding(self, mode: bool, encoding: str | None = None) -> Iterator[RedisCluster[Any]]:
|
|
1057
|
+
"""
|
|
1058
|
+
Context manager to temporarily change the decoding behavior
|
|
1059
|
+
of the client
|
|
1060
|
+
|
|
1061
|
+
:param mode: Whether to decode or not
|
|
1062
|
+
:param encoding: Optional encoding to use if decoding. If not provided
|
|
1063
|
+
the :paramref:`~coredis.RedisCluster.encoding` parameter provided to the client will
|
|
1064
|
+
be used.
|
|
1065
|
+
|
|
1066
|
+
Example::
|
|
1067
|
+
|
|
1068
|
+
client = coredis.RedisCluster(decode_responses=True)
|
|
1069
|
+
await client.set("fubar", "baz")
|
|
1070
|
+
assert await client.get("fubar") == "baz"
|
|
1071
|
+
with client.decoding(False):
|
|
1072
|
+
assert await client.get("fubar") == b"baz"
|
|
1073
|
+
with client.decoding(True):
|
|
1074
|
+
assert await client.get("fubar") == "baz"
|
|
1075
|
+
|
|
1076
|
+
"""
|
|
1077
|
+
prev_decode = self._decodecontext.get()
|
|
1078
|
+
prev_encoding = self._encodingcontext.get()
|
|
1079
|
+
self._decodecontext.set(mode)
|
|
1080
|
+
self._encodingcontext.set(encoding)
|
|
1081
|
+
try:
|
|
1082
|
+
yield self
|
|
1083
|
+
finally:
|
|
1084
|
+
self._decodecontext.set(prev_decode)
|
|
1085
|
+
self._encodingcontext.set(prev_encoding)
|
|
1086
|
+
|
|
1087
|
+
def pubsub(
|
|
1088
|
+
self,
|
|
1089
|
+
ignore_subscribe_messages: bool = False,
|
|
1090
|
+
retry_policy: RetryPolicy | None = None,
|
|
1091
|
+
channels: Parameters[StringT] | None = None,
|
|
1092
|
+
channel_handlers: Mapping[StringT, SubscriptionCallback] | None = None,
|
|
1093
|
+
patterns: Parameters[StringT] | None = None,
|
|
1094
|
+
pattern_handlers: Mapping[StringT, SubscriptionCallback] | None = None,
|
|
1095
|
+
**kwargs: Any,
|
|
1096
|
+
) -> ClusterPubSub[AnyStr]:
|
|
1097
|
+
"""
|
|
1098
|
+
Return a Pub/Sub instance that can be used to consume messages that get
|
|
1099
|
+
published to the subscribed channels or patterns.
|
|
1100
|
+
|
|
1101
|
+
:param ignore_subscribe_messages: Whether to skip subscription
|
|
1102
|
+
acknowledgement messages
|
|
1103
|
+
:param retry_policy: An explicit retry policy to use in the subscriber.
|
|
1104
|
+
:param channels: channels that the constructed Pubsub instance should
|
|
1105
|
+
automatically subscribe to
|
|
1106
|
+
:param channel_handlers: Mapping of channels to automatically subscribe to
|
|
1107
|
+
and the associated handlers that will be invoked when a message is received
|
|
1108
|
+
on the specific channel.
|
|
1109
|
+
:param patterns: patterns that the constructed Pubsub instance should
|
|
1110
|
+
automatically subscribe to
|
|
1111
|
+
:param pattern_handlers: Mapping of patterns to automatically subscribe to
|
|
1112
|
+
and the associated handlers that will be invoked when a message is received
|
|
1113
|
+
on channel matching the pattern.
|
|
1114
|
+
"""
|
|
1115
|
+
return ClusterPubSub[AnyStr](
|
|
1116
|
+
self.connection_pool,
|
|
1117
|
+
ignore_subscribe_messages=ignore_subscribe_messages,
|
|
1118
|
+
retry_policy=retry_policy,
|
|
1119
|
+
channels=channels,
|
|
1120
|
+
channel_handlers=channel_handlers,
|
|
1121
|
+
patterns=patterns,
|
|
1122
|
+
pattern_handlers=pattern_handlers,
|
|
1123
|
+
**kwargs,
|
|
1124
|
+
)
|
|
1125
|
+
|
|
1126
|
+
@versionadded(version="3.6.0")
|
|
1127
|
+
def sharded_pubsub(
|
|
1128
|
+
self,
|
|
1129
|
+
ignore_subscribe_messages: bool = False,
|
|
1130
|
+
read_from_replicas: bool = False,
|
|
1131
|
+
retry_policy: RetryPolicy | None = None,
|
|
1132
|
+
channels: Parameters[StringT] | None = None,
|
|
1133
|
+
channel_handlers: Mapping[StringT, SubscriptionCallback] | None = None,
|
|
1134
|
+
**kwargs: Any,
|
|
1135
|
+
) -> ShardedPubSub[AnyStr]:
|
|
1136
|
+
"""
|
|
1137
|
+
Return a Pub/Sub instance that can be used to consume messages from
|
|
1138
|
+
the subscribed channels in a redis cluster.
|
|
1139
|
+
|
|
1140
|
+
The implementation returned differs from that returned by :meth:`pubsub`
|
|
1141
|
+
as it uses the Sharded Pub/Sub implementation which routes messages
|
|
1142
|
+
to cluster nodes using the same algorithm used to assign keys to slots.
|
|
1143
|
+
This effectively restricts the propagation of messages to be within the
|
|
1144
|
+
shard of a cluster hence affording horizontally scaling the use of Pub/Sub
|
|
1145
|
+
with the cluster itself.
|
|
1146
|
+
|
|
1147
|
+
:param ignore_subscribe_messages: Whether to skip subscription
|
|
1148
|
+
acknowledgement messages
|
|
1149
|
+
:param read_from_replicas: Whether to read messages from replica nodes
|
|
1150
|
+
:param retry_policy: An explicit retry policy to use in the subscriber.
|
|
1151
|
+
:param channels: channels that the constructed Pubsub instance should
|
|
1152
|
+
automatically subscribe to
|
|
1153
|
+
:param channel_handlers: Mapping of channels to automatically subscribe to
|
|
1154
|
+
and the associated handlers that will be invoked when a message is received
|
|
1155
|
+
on the specific channel.
|
|
1156
|
+
|
|
1157
|
+
New in :redis-version:`7.0.0`
|
|
1158
|
+
"""
|
|
1159
|
+
|
|
1160
|
+
return ShardedPubSub[AnyStr](
|
|
1161
|
+
self.connection_pool,
|
|
1162
|
+
ignore_subscribe_messages=ignore_subscribe_messages,
|
|
1163
|
+
read_from_replicas=read_from_replicas,
|
|
1164
|
+
retry_policy=retry_policy,
|
|
1165
|
+
channels=channels,
|
|
1166
|
+
channel_handlers=channel_handlers,
|
|
1167
|
+
**kwargs,
|
|
1168
|
+
)
|
|
1169
|
+
|
|
1170
|
+
async def pipeline(
|
|
1171
|
+
self,
|
|
1172
|
+
transaction: bool | None = None,
|
|
1173
|
+
watches: Parameters[StringT] | None = None,
|
|
1174
|
+
timeout: float | None = None,
|
|
1175
|
+
) -> coredis.pipeline.ClusterPipeline[AnyStr]:
|
|
1176
|
+
"""
|
|
1177
|
+
Returns a new pipeline object that can queue multiple commands for
|
|
1178
|
+
batch execution. Pipelines in cluster mode only provide a subset of the
|
|
1179
|
+
functionality of pipelines in standalone mode.
|
|
1180
|
+
|
|
1181
|
+
Specifically:
|
|
1182
|
+
|
|
1183
|
+
- Each command in the pipeline should only access keys on the same node
|
|
1184
|
+
- Transactions are disabled by default and are only supported if all
|
|
1185
|
+
watched keys route to the same node as where the commands in the multi/exec
|
|
1186
|
+
part of the pipeline.
|
|
1187
|
+
|
|
1188
|
+
:param transaction: indicates whether all commands should be executed atomically.
|
|
1189
|
+
:param watches: If :paramref:`transaction` is True these keys are watched for external
|
|
1190
|
+
changes during the transaction.
|
|
1191
|
+
:param timeout: If specified this value will take precedence over
|
|
1192
|
+
:paramref:`RedisCluster.stream_timeout`
|
|
1193
|
+
|
|
1194
|
+
"""
|
|
1195
|
+
await self.connection_pool.initialize()
|
|
1196
|
+
|
|
1197
|
+
from coredis.pipeline import ClusterPipeline
|
|
1198
|
+
|
|
1199
|
+
return ClusterPipeline[AnyStr](
|
|
1200
|
+
client=self,
|
|
1201
|
+
transaction=transaction,
|
|
1202
|
+
watches=watches,
|
|
1203
|
+
timeout=timeout,
|
|
1204
|
+
)
|
|
1205
|
+
|
|
1206
|
+
async def transaction(
|
|
1207
|
+
self,
|
|
1208
|
+
func: Callable[
|
|
1209
|
+
[coredis.pipeline.ClusterPipeline[AnyStr]],
|
|
1210
|
+
Coroutine[Any, Any, Any],
|
|
1211
|
+
],
|
|
1212
|
+
*watches: StringT,
|
|
1213
|
+
value_from_callable: bool = False,
|
|
1214
|
+
watch_delay: float | None = None,
|
|
1215
|
+
**kwargs: Any,
|
|
1216
|
+
) -> Any:
|
|
1217
|
+
"""
|
|
1218
|
+
Convenience method for executing the callable :paramref:`func` as a
|
|
1219
|
+
transaction while watching all keys specified in :paramref:`watches`.
|
|
1220
|
+
|
|
1221
|
+
:param func: callable should expect a single argument which is a
|
|
1222
|
+
:class:`coredis.pipeline.ClusterPipeline` object retrieved by calling
|
|
1223
|
+
:meth:`~coredis.RedisCluster.pipeline`.
|
|
1224
|
+
:param watches: The keys to watch during the transaction. The keys should route
|
|
1225
|
+
to the same node as the keys touched by the commands in :paramref:`func`
|
|
1226
|
+
:param value_from_callable: Whether to return the result of transaction or the value
|
|
1227
|
+
returned from :paramref:`func`
|
|
1228
|
+
|
|
1229
|
+
.. warning:: Cluster transactions can only be run with commands that
|
|
1230
|
+
route to the same slot.
|
|
1231
|
+
|
|
1232
|
+
.. versionchanged:: 4.9.0
|
|
1233
|
+
|
|
1234
|
+
When the transaction is started with :paramref:`watches` the
|
|
1235
|
+
:class:`~coredis.pipeline.ClusterPipeline` instance passed to :paramref:`func`
|
|
1236
|
+
will not start queuing commands until a call to
|
|
1237
|
+
:meth:`~coredis.pipeline.ClusterPipeline.multi` is made. This makes the cluster
|
|
1238
|
+
implementation consistent with :meth:`coredis.Redis.transaction`
|
|
1239
|
+
"""
|
|
1240
|
+
async with await self.pipeline(True) as pipe:
|
|
1241
|
+
while True:
|
|
1242
|
+
try:
|
|
1243
|
+
if watches:
|
|
1244
|
+
await pipe.watch(*watches)
|
|
1245
|
+
func_value = await func(pipe)
|
|
1246
|
+
exec_value = await pipe.execute()
|
|
1247
|
+
return func_value if value_from_callable else exec_value
|
|
1248
|
+
except WatchError:
|
|
1249
|
+
if watch_delay is not None and watch_delay > 0:
|
|
1250
|
+
await asyncio.sleep(watch_delay)
|
|
1251
|
+
continue
|
|
1252
|
+
|
|
1253
|
+
async def scan_iter(
|
|
1254
|
+
self,
|
|
1255
|
+
match: StringT | None = None,
|
|
1256
|
+
count: int | None = None,
|
|
1257
|
+
type_: StringT | None = None,
|
|
1258
|
+
) -> AsyncIterator[AnyStr]:
|
|
1259
|
+
await self._ensure_initialized()
|
|
1260
|
+
for node in self.primaries:
|
|
1261
|
+
cursor = None
|
|
1262
|
+
while cursor != 0:
|
|
1263
|
+
cursor, data = await node.scan(cursor or 0, match, count, type_)
|
|
1264
|
+
for item in data:
|
|
1265
|
+
yield item
|