coredis 5.2.0__cp314-cp314t-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of coredis might be problematic. Click here for more details.

Files changed (100) hide show
  1. 22fe76227e35f92ab5c3__mypyc.cpython-314t-darwin.so +0 -0
  2. coredis/__init__.py +42 -0
  3. coredis/_enum.py +42 -0
  4. coredis/_json.py +11 -0
  5. coredis/_packer.cpython-314t-darwin.so +0 -0
  6. coredis/_packer.py +71 -0
  7. coredis/_protocols.py +50 -0
  8. coredis/_py_311_typing.py +20 -0
  9. coredis/_py_312_typing.py +17 -0
  10. coredis/_sidecar.py +114 -0
  11. coredis/_utils.cpython-314t-darwin.so +0 -0
  12. coredis/_utils.py +440 -0
  13. coredis/_version.py +34 -0
  14. coredis/_version.pyi +1 -0
  15. coredis/cache.py +801 -0
  16. coredis/client/__init__.py +6 -0
  17. coredis/client/basic.py +1238 -0
  18. coredis/client/cluster.py +1264 -0
  19. coredis/commands/__init__.py +64 -0
  20. coredis/commands/_key_spec.py +517 -0
  21. coredis/commands/_utils.py +108 -0
  22. coredis/commands/_validators.py +159 -0
  23. coredis/commands/_wrappers.py +175 -0
  24. coredis/commands/bitfield.py +110 -0
  25. coredis/commands/constants.py +662 -0
  26. coredis/commands/core.py +8484 -0
  27. coredis/commands/function.py +408 -0
  28. coredis/commands/monitor.py +168 -0
  29. coredis/commands/pubsub.py +905 -0
  30. coredis/commands/request.py +108 -0
  31. coredis/commands/script.py +296 -0
  32. coredis/commands/sentinel.py +246 -0
  33. coredis/config.py +50 -0
  34. coredis/connection.py +906 -0
  35. coredis/constants.cpython-314t-darwin.so +0 -0
  36. coredis/constants.py +37 -0
  37. coredis/credentials.py +45 -0
  38. coredis/exceptions.py +360 -0
  39. coredis/experimental/__init__.py +1 -0
  40. coredis/globals.py +23 -0
  41. coredis/modules/__init__.py +117 -0
  42. coredis/modules/autocomplete.py +138 -0
  43. coredis/modules/base.py +262 -0
  44. coredis/modules/filters.py +1319 -0
  45. coredis/modules/graph.py +362 -0
  46. coredis/modules/json.py +691 -0
  47. coredis/modules/response/__init__.py +0 -0
  48. coredis/modules/response/_callbacks/__init__.py +0 -0
  49. coredis/modules/response/_callbacks/autocomplete.py +42 -0
  50. coredis/modules/response/_callbacks/graph.py +237 -0
  51. coredis/modules/response/_callbacks/json.py +21 -0
  52. coredis/modules/response/_callbacks/search.py +221 -0
  53. coredis/modules/response/_callbacks/timeseries.py +158 -0
  54. coredis/modules/response/types.py +179 -0
  55. coredis/modules/search.py +1089 -0
  56. coredis/modules/timeseries.py +1139 -0
  57. coredis/parser.cpython-314t-darwin.so +0 -0
  58. coredis/parser.py +344 -0
  59. coredis/pipeline.py +1225 -0
  60. coredis/pool/__init__.py +11 -0
  61. coredis/pool/basic.py +453 -0
  62. coredis/pool/cluster.py +517 -0
  63. coredis/pool/nodemanager.py +340 -0
  64. coredis/py.typed +0 -0
  65. coredis/recipes/__init__.py +0 -0
  66. coredis/recipes/credentials/__init__.py +5 -0
  67. coredis/recipes/credentials/iam_provider.py +63 -0
  68. coredis/recipes/locks/__init__.py +5 -0
  69. coredis/recipes/locks/extend.lua +17 -0
  70. coredis/recipes/locks/lua_lock.py +281 -0
  71. coredis/recipes/locks/release.lua +10 -0
  72. coredis/response/__init__.py +5 -0
  73. coredis/response/_callbacks/__init__.py +538 -0
  74. coredis/response/_callbacks/acl.py +32 -0
  75. coredis/response/_callbacks/cluster.py +183 -0
  76. coredis/response/_callbacks/command.py +86 -0
  77. coredis/response/_callbacks/connection.py +31 -0
  78. coredis/response/_callbacks/geo.py +58 -0
  79. coredis/response/_callbacks/hash.py +85 -0
  80. coredis/response/_callbacks/keys.py +59 -0
  81. coredis/response/_callbacks/module.py +33 -0
  82. coredis/response/_callbacks/script.py +85 -0
  83. coredis/response/_callbacks/sentinel.py +179 -0
  84. coredis/response/_callbacks/server.py +241 -0
  85. coredis/response/_callbacks/sets.py +44 -0
  86. coredis/response/_callbacks/sorted_set.py +204 -0
  87. coredis/response/_callbacks/streams.py +185 -0
  88. coredis/response/_callbacks/strings.py +70 -0
  89. coredis/response/_callbacks/vector_sets.py +159 -0
  90. coredis/response/_utils.py +33 -0
  91. coredis/response/types.py +416 -0
  92. coredis/retry.py +233 -0
  93. coredis/sentinel.py +477 -0
  94. coredis/stream.py +369 -0
  95. coredis/tokens.py +2286 -0
  96. coredis/typing.py +580 -0
  97. coredis-5.2.0.dist-info/METADATA +211 -0
  98. coredis-5.2.0.dist-info/RECORD +100 -0
  99. coredis-5.2.0.dist-info/WHEEL +6 -0
  100. coredis-5.2.0.dist-info/licenses/LICENSE +23 -0
@@ -0,0 +1,1264 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import contextlib
5
+ import contextvars
6
+ import functools
7
+ import inspect
8
+ import random
9
+ import textwrap
10
+ from abc import ABCMeta
11
+ from ssl import SSLContext
12
+ from typing import TYPE_CHECKING, Any, cast, overload
13
+
14
+ from deprecated.sphinx import versionadded
15
+
16
+ from coredis._utils import b, hash_slot
17
+ from coredis.cache import AbstractCache
18
+ from coredis.client.basic import Client, Redis
19
+ from coredis.commands._key_spec import KeySpec
20
+ from coredis.commands.constants import CommandName, NodeFlag
21
+ from coredis.commands.pubsub import ClusterPubSub, ShardedPubSub, SubscriptionCallback
22
+ from coredis.connection import RedisSSLContext
23
+ from coredis.exceptions import (
24
+ AskError,
25
+ BusyLoadingError,
26
+ ClusterDownError,
27
+ ClusterError,
28
+ ConnectionError,
29
+ MovedError,
30
+ RedisClusterException,
31
+ TimeoutError,
32
+ TryAgainError,
33
+ WatchError,
34
+ )
35
+ from coredis.globals import CACHEABLE_COMMANDS, MODULE_GROUPS, READONLY_COMMANDS
36
+ from coredis.pool import ClusterConnectionPool
37
+ from coredis.pool.nodemanager import ManagedNode
38
+ from coredis.response._callbacks import AsyncPreProcessingCallback, NoopCallback
39
+ from coredis.retry import CompositeRetryPolicy, ConstantRetryPolicy, RetryPolicy
40
+ from coredis.typing import (
41
+ AnyStr,
42
+ AsyncIterator,
43
+ Awaitable,
44
+ Callable,
45
+ Coroutine,
46
+ ExecutionParameters,
47
+ Iterable,
48
+ Iterator,
49
+ Literal,
50
+ Mapping,
51
+ Node,
52
+ Parameters,
53
+ ParamSpec,
54
+ RedisCommand,
55
+ RedisCommandP,
56
+ RedisValueT,
57
+ ResponseType,
58
+ StringT,
59
+ TypeAdapter,
60
+ TypeVar,
61
+ Unpack,
62
+ )
63
+
64
+ P = ParamSpec("P")
65
+ R = TypeVar("R")
66
+
67
+ if TYPE_CHECKING:
68
+ import coredis.pipeline
69
+
70
+
71
+ class ClusterMeta(ABCMeta):
72
+ ROUTING_FLAGS: dict[bytes, NodeFlag]
73
+ SPLIT_FLAGS: dict[bytes, NodeFlag]
74
+ RESULT_CALLBACKS: dict[bytes, Callable[..., ResponseType]]
75
+ NODE_FLAG_DOC_MAPPING = {
76
+ NodeFlag.PRIMARIES: "all primaries",
77
+ NodeFlag.REPLICAS: "all replicas",
78
+ NodeFlag.RANDOM: "a random node",
79
+ NodeFlag.ALL: "all nodes",
80
+ NodeFlag.SLOT_ID: "one or more nodes based on the slots provided",
81
+ }
82
+
83
+ def __new__(
84
+ cls, name: str, bases: tuple[type, ...], namespace: dict[str, object]
85
+ ) -> ClusterMeta:
86
+ kls = super().__new__(cls, name, bases, namespace)
87
+ methods = dict(k for k in inspect.getmembers(kls) if inspect.isfunction(k[1]))
88
+ for module in MODULE_GROUPS:
89
+ methods.update(
90
+ {
91
+ f"{module.MODULE}.{k[0]}": k[1]
92
+ for k in inspect.getmembers(module)
93
+ if inspect.isfunction(k[1])
94
+ }
95
+ )
96
+ for method_name, method in methods.items():
97
+ doc_addition = ""
98
+ cmd = getattr(method, "__coredis_command", None)
99
+ if cmd:
100
+ if not cmd.cluster.enabled:
101
+ doc_addition = """
102
+ .. warning:: Not supported in cluster mode
103
+ """
104
+ else:
105
+ if cmd.cluster.route:
106
+ kls.ROUTING_FLAGS[cmd.command] = cmd.cluster.route
107
+ aggregate_note = ""
108
+ if cmd.cluster.multi_node:
109
+ if cmd.cluster.combine:
110
+ aggregate_note = f"and return {cmd.cluster.combine.response_policy}"
111
+ else:
112
+ aggregate_note = (
113
+ "and a mapping of nodes to results will be returned"
114
+ )
115
+ doc_addition = f"""
116
+ .. admonition:: Cluster note
117
+
118
+ The command will be run on **{cls.NODE_FLAG_DOC_MAPPING[cmd.cluster.route]}** {aggregate_note}
119
+ """
120
+ elif cmd.cluster.split and cmd.cluster.combine:
121
+ kls.SPLIT_FLAGS[cmd.command] = cmd.cluster.split
122
+ doc_addition = f"""
123
+ .. admonition:: Cluster note
124
+
125
+ The command will be run on **{cls.NODE_FLAG_DOC_MAPPING[cmd.cluster.split]}**
126
+ by distributing the keys to the appropriate nodes and return
127
+ {cmd.cluster.combine.response_policy}.
128
+
129
+ To disable this behavior set :paramref:`RedisCluster.non_atomic_cross_slot` to ``False``
130
+ """
131
+ if cmd.cluster.multi_node:
132
+ kls.RESULT_CALLBACKS[cmd.command] = cmd.cluster.combine
133
+ if doc_addition and not hasattr(method, "__cluster_docs"):
134
+ if not getattr(method, "__coredis_module", None):
135
+
136
+ def __w(
137
+ func: Callable[P, Awaitable[R]], enabled: bool
138
+ ) -> Callable[P, Awaitable[R]]:
139
+ @functools.wraps(func)
140
+ async def _w(*a: P.args, **k: P.kwargs) -> R:
141
+ if not enabled:
142
+ raise NotImplementedError(
143
+ f"{func.__name__} is disabled for cluster client"
144
+ )
145
+ return await func(*a, **k)
146
+
147
+ _w.__doc__ = f"""{textwrap.dedent(method.__doc__ or "")}
148
+ {doc_addition}
149
+ """
150
+ return _w
151
+
152
+ wrapped = __w(method, cmd.cluster.enabled if cmd else True)
153
+ setattr(wrapped, "__cluster_docs", doc_addition)
154
+ setattr(kls, method_name, wrapped)
155
+ else:
156
+ method.__doc__ = f"""{textwrap.dedent(method.__doc__ or "")}
157
+ {doc_addition}
158
+ """
159
+ setattr(method, "__cluster_docs", doc_addition)
160
+ return kls
161
+
162
+
163
+ RedisClusterT = TypeVar("RedisClusterT", bound="RedisCluster[Any]")
164
+
165
+
166
+ class RedisCluster(
167
+ Client[AnyStr],
168
+ metaclass=ClusterMeta,
169
+ ):
170
+ MAX_RETRIES = 16
171
+ ROUTING_FLAGS: dict[bytes, NodeFlag] = {}
172
+ SPLIT_FLAGS: dict[bytes, NodeFlag] = {}
173
+ RESULT_CALLBACKS: dict[bytes, Callable[..., Any]] = {}
174
+
175
+ connection_pool: ClusterConnectionPool
176
+
177
+ @overload
178
+ def __init__(
179
+ self: RedisCluster[bytes],
180
+ host: str | None = ...,
181
+ port: int | None = ...,
182
+ *,
183
+ startup_nodes: Iterable[Node] | None = ...,
184
+ stream_timeout: float | None = ...,
185
+ connect_timeout: float | None = ...,
186
+ ssl: bool = ...,
187
+ ssl_context: SSLContext | None = ...,
188
+ ssl_keyfile: str | None = ...,
189
+ ssl_certfile: str | None = ...,
190
+ ssl_cert_reqs: Literal["optional", "required", "none"] | None = ...,
191
+ ssl_check_hostname: bool | None = ...,
192
+ ssl_ca_certs: str | None = ...,
193
+ max_connections: int = ...,
194
+ max_connections_per_node: bool = ...,
195
+ readonly: bool = ...,
196
+ read_from_replicas: bool = ...,
197
+ reinitialize_steps: int | None = ...,
198
+ skip_full_coverage_check: bool = ...,
199
+ nodemanager_follow_cluster: bool = ...,
200
+ encoding: str = ...,
201
+ decode_responses: Literal[False] = ...,
202
+ connection_pool: ClusterConnectionPool | None = ...,
203
+ connection_pool_cls: type[ClusterConnectionPool] = ...,
204
+ protocol_version: Literal[2, 3] = ...,
205
+ verify_version: bool = ...,
206
+ non_atomic_cross_slot: bool = ...,
207
+ cache: AbstractCache | None = ...,
208
+ noreply: bool = ...,
209
+ noevict: bool = ...,
210
+ notouch: bool = ...,
211
+ retry_policy: RetryPolicy = ...,
212
+ type_adapter: TypeAdapter | None = ...,
213
+ **kwargs: Any,
214
+ ) -> None: ...
215
+
216
+ @overload
217
+ def __init__(
218
+ self: RedisCluster[str],
219
+ host: str | None = ...,
220
+ port: int | None = ...,
221
+ *,
222
+ startup_nodes: Iterable[Node] | None = ...,
223
+ stream_timeout: float | None = ...,
224
+ connect_timeout: float | None = ...,
225
+ ssl: bool = ...,
226
+ ssl_context: SSLContext | None = ...,
227
+ ssl_keyfile: str | None = ...,
228
+ ssl_certfile: str | None = ...,
229
+ ssl_cert_reqs: Literal["optional", "required", "none"] | None = ...,
230
+ ssl_check_hostname: bool | None = ...,
231
+ ssl_ca_certs: str | None = ...,
232
+ max_connections: int = ...,
233
+ max_connections_per_node: bool = ...,
234
+ readonly: bool = ...,
235
+ read_from_replicas: bool = ...,
236
+ reinitialize_steps: int | None = ...,
237
+ skip_full_coverage_check: bool = ...,
238
+ nodemanager_follow_cluster: bool = ...,
239
+ encoding: str = ...,
240
+ decode_responses: Literal[True] = ...,
241
+ connection_pool: ClusterConnectionPool | None = ...,
242
+ connection_pool_cls: type[ClusterConnectionPool] = ...,
243
+ protocol_version: Literal[2, 3] = ...,
244
+ verify_version: bool = ...,
245
+ non_atomic_cross_slot: bool = ...,
246
+ cache: AbstractCache | None = ...,
247
+ noreply: bool = ...,
248
+ noevict: bool = ...,
249
+ notouch: bool = ...,
250
+ retry_policy: RetryPolicy = ...,
251
+ type_adapter: TypeAdapter | None = ...,
252
+ **kwargs: Any,
253
+ ) -> None: ...
254
+
255
+ def __init__(
256
+ self,
257
+ host: str | None = None,
258
+ port: int | None = None,
259
+ *,
260
+ startup_nodes: Iterable[Node] | None = None,
261
+ stream_timeout: float | None = None,
262
+ connect_timeout: float | None = None,
263
+ ssl: bool = False,
264
+ ssl_context: SSLContext | None = None,
265
+ ssl_keyfile: str | None = None,
266
+ ssl_certfile: str | None = None,
267
+ ssl_cert_reqs: Literal["optional", "required", "none"] | None = None,
268
+ ssl_check_hostname: bool | None = None,
269
+ ssl_ca_certs: str | None = None,
270
+ max_connections: int = 32,
271
+ max_connections_per_node: bool = False,
272
+ readonly: bool = False,
273
+ read_from_replicas: bool = False,
274
+ reinitialize_steps: int | None = None,
275
+ skip_full_coverage_check: bool = False,
276
+ nodemanager_follow_cluster: bool = True,
277
+ encoding: str = "utf-8",
278
+ decode_responses: bool = False,
279
+ connection_pool: ClusterConnectionPool | None = None,
280
+ connection_pool_cls: type[ClusterConnectionPool] = ClusterConnectionPool,
281
+ protocol_version: Literal[2, 3] = 3,
282
+ verify_version: bool = True,
283
+ non_atomic_cross_slot: bool = True,
284
+ cache: AbstractCache | None = None,
285
+ noreply: bool = False,
286
+ noevict: bool = False,
287
+ notouch: bool = False,
288
+ retry_policy: RetryPolicy = CompositeRetryPolicy(
289
+ ConstantRetryPolicy((ClusterDownError,), 2, 0.1),
290
+ ConstantRetryPolicy(
291
+ (
292
+ ConnectionError,
293
+ TimeoutError,
294
+ ),
295
+ 2,
296
+ 0.1,
297
+ ),
298
+ ),
299
+ type_adapter: TypeAdapter | None = None,
300
+ **kwargs: Any,
301
+ ) -> None:
302
+ """
303
+
304
+ Changes
305
+ - .. versionadded:: 4.12.0
306
+
307
+ - :paramref:`retry_policy`
308
+ - :paramref:`noevict`
309
+ - :paramref:`notouch`
310
+ - :meth:`RedisCluster.ensure_persistence` context manager
311
+ - Redis Module support
312
+
313
+ - RedisJSON: :attr:`RedisCluster.json`
314
+ - RedisBloom:
315
+
316
+ - BloomFilter: :attr:`RedisCluster.bf`
317
+ - CuckooFilter: :attr:`RedisCluster.cf`
318
+ - CountMinSketch: :attr:`RedisCluster.cms`
319
+ - TopK: :attr:`RedisCluster.topk`
320
+ - TDigest: :attr:`RedisCluster.tdigest`
321
+ - RedisTimeSeries: :attr:`RedisCluster.timeseries`
322
+ - RedisGraph: :attr:`RedisCluster.graph`
323
+ - RediSearch:
324
+
325
+ - Search & Aggregation: :attr:`RedisCluster.search`
326
+ - Autocomplete: Added :attr:`RedisCluster.autocomplete`
327
+
328
+ - .. versionchanged:: 4.4.0
329
+
330
+ - :paramref:`nodemanager_follow_cluster` now defaults to ``True``
331
+
332
+ - .. deprecated:: 4.4.0
333
+
334
+ - The :paramref:`readonly` argument is deprecated in favour of
335
+ :paramref:`read_from_replicas`
336
+
337
+ - .. versionadded:: 4.3.0
338
+
339
+ - Added :paramref:`connection_pool_cls`
340
+
341
+ - .. versionchanged:: 4.0.0
342
+
343
+ - :paramref:`non_atomic_cross_slot` defaults to ``True``
344
+ - :paramref:`protocol_version`` defaults to ``3``
345
+
346
+ - .. versionadded:: 3.11.0
347
+
348
+ - Added :paramref:`noreply`
349
+
350
+ - .. versionadded:: 3.10.0
351
+
352
+ - Synchronized ssl constructor parameters with :class:`coredis.Redis`
353
+
354
+ - .. versionadded:: 3.9.0
355
+
356
+ - If :paramref:`cache` is provided the client will check & populate
357
+ the cache for read only commands and invalidate it for commands
358
+ that could change the key(s) in the request.
359
+
360
+ - .. versionadded:: 3.6.0
361
+
362
+ - The :paramref:`non_atomic_cross_slot` parameter was added
363
+
364
+ - .. versionchanged:: 3.5.0
365
+
366
+ - The :paramref:`verify_version` parameter now defaults to ``True``
367
+
368
+ - .. versionadded:: 3.1.0
369
+
370
+ - The :paramref:`protocol_version` and :paramref:`verify_version`
371
+ parameters were added
372
+
373
+ :param host: Can be used to point to a startup node
374
+ :param port: Can be used to point to a startup node
375
+ :param startup_nodes: List of nodes that initial bootstrapping can be done
376
+ from
377
+ :param stream_timeout: Timeout (seconds) when reading responses from the server
378
+ :param connect_timeout: Timeout (seconds) for establishing a connection to the server
379
+ :param ssl: Whether to use an SSL connection
380
+ :param ssl_context: If provided the :class:`ssl.SSLContext` will be used when
381
+ establishing the connection. Otherwise either the default context (if no other
382
+ ssl related parameters are provided) or a custom context based on the other
383
+ ``ssl_*`` parameters will be used.
384
+ :param ssl_keyfile: Path of the private key to use
385
+ :param ssl_certfile: Path to the certificate corresponding to :paramref:`ssl_keyfile`
386
+ :param ssl_cert_reqs: Whether to try to verify the server's certificates and
387
+ how to behave if verification fails (See :attr:`ssl.SSLContext.verify_mode`).
388
+ :param ssl_check_hostname: Whether to enable hostname checking when establishing
389
+ an ssl connection.
390
+ :param ssl_ca_certs: Path to a concatenated certificate authority file or a directory
391
+ containing several CA certifcates to use for validating the server's certificates
392
+ when :paramref:`ssl_cert_reqs` is not ``"none"``
393
+ (See :meth:`ssl.SSLContext.load_verify_locations`).
394
+ :param max_connections: Maximum number of connections that should be kept open at one time
395
+ :param max_connections_per_node:
396
+ :param read_from_replicas: If ``True`` the client will route readonly commands to replicas
397
+ :param reinitialize_steps: Number of moved errors that result in a cluster
398
+ topology refresh using the startup nodes provided
399
+ :param skip_full_coverage_check: Skips the check of cluster-require-full-coverage config,
400
+ useful for clusters without the CONFIG command (like aws)
401
+ :param nodemanager_follow_cluster: The node manager will during initialization try the
402
+ last set of nodes that it was operating on. This will allow the client to drift along
403
+ side the cluster if the cluster nodes move around alot.
404
+ :param encoding: The codec to use to encode strings transmitted to redis
405
+ and decode responses with. (See :ref:`handbook/encoding:encoding/decoding`)
406
+ :param decode_responses: If ``True`` string responses from the server
407
+ will be decoded using :paramref:`encoding` before being returned.
408
+ (See :ref:`handbook/encoding:encoding/decoding`)
409
+ :param connection_pool: The connection pool instance to use. If not provided
410
+ a new pool will be assigned to this client.
411
+ :param connection_pool_cls: The connection pool class to use when constructing
412
+ a connection pool for this instance.
413
+ :param protocol_version: Whether to use the RESP (``2``) or RESP3 (``3``)
414
+ protocol for parsing responses from the server (Default ``3``).
415
+ (See :ref:`handbook/response:redis response`)
416
+ :param verify_version: Validate redis server version against the documented
417
+ version introduced before executing a command and raises a
418
+ :exc:`CommandNotSupportedError` error if the required version is higher than
419
+ the reported server version
420
+ :param non_atomic_cross_slot: If ``True`` certain commands that can operate
421
+ on multiple keys (cross slot) will be split across the relevant nodes by
422
+ mapping the keys to the appropriate slot and the result merged before being
423
+ returned.
424
+ :param cache: If provided the cache will be used to avoid requests for read only
425
+ commands if the client has already requested the data and it hasn't been invalidated.
426
+ The cache is responsible for any mutations to the keys that happen outside of this client
427
+ :param noreply: If ``True`` the client will not request a response for any
428
+ commands sent to the server.
429
+ :param noevict: Ensures that connections from the client will be excluded from the
430
+ client eviction process even if we're above the configured client eviction threshold.
431
+ :param notouch: Ensures that commands sent by the client will not alter the LRU/LFU
432
+ of the keys they access.
433
+ :param retry_policy: The retry policy to use when interacting with the cluster
434
+ :param type_adapter: The adapter to use for serializing / deserializing customs types
435
+ when interacting with redis commands.
436
+ """
437
+
438
+ if "db" in kwargs: # noqa
439
+ raise RedisClusterException("Argument 'db' is not possible to use in cluster mode")
440
+
441
+ if connection_pool:
442
+ pool = connection_pool
443
+ else:
444
+ startup_nodes = [] if startup_nodes is None else list(startup_nodes)
445
+
446
+ # Support host/port as argument
447
+
448
+ if host:
449
+ startup_nodes.append(
450
+ Node(
451
+ host=host,
452
+ port=port if port else 7000,
453
+ )
454
+ )
455
+ if ssl_context is not None:
456
+ kwargs["ssl_context"] = ssl_context
457
+ elif ssl:
458
+ ssl_context = RedisSSLContext(
459
+ ssl_keyfile,
460
+ ssl_certfile,
461
+ ssl_cert_reqs,
462
+ ssl_ca_certs,
463
+ ssl_check_hostname,
464
+ ).get()
465
+ kwargs["ssl_context"] = ssl_context
466
+
467
+ pool = connection_pool_cls(
468
+ startup_nodes=startup_nodes,
469
+ max_connections=max_connections,
470
+ reinitialize_steps=reinitialize_steps,
471
+ max_connections_per_node=max_connections_per_node,
472
+ skip_full_coverage_check=skip_full_coverage_check,
473
+ nodemanager_follow_cluster=nodemanager_follow_cluster,
474
+ read_from_replicas=readonly or read_from_replicas,
475
+ encoding=encoding,
476
+ decode_responses=decode_responses,
477
+ protocol_version=protocol_version,
478
+ noreply=noreply,
479
+ noevict=noevict,
480
+ notouch=notouch,
481
+ stream_timeout=stream_timeout,
482
+ connect_timeout=connect_timeout,
483
+ **kwargs,
484
+ )
485
+
486
+ super().__init__(
487
+ stream_timeout=stream_timeout,
488
+ connect_timeout=connect_timeout,
489
+ connection_pool=pool,
490
+ connection_pool_cls=connection_pool_cls,
491
+ encoding=encoding,
492
+ decode_responses=decode_responses,
493
+ verify_version=verify_version,
494
+ protocol_version=protocol_version,
495
+ noreply=noreply,
496
+ noevict=noevict,
497
+ notouch=notouch,
498
+ retry_policy=retry_policy,
499
+ type_adapter=type_adapter,
500
+ **kwargs,
501
+ )
502
+
503
+ self.refresh_table_asap: bool = False
504
+ self.route_flags: dict[bytes, NodeFlag] = self.__class__.ROUTING_FLAGS.copy()
505
+ self.split_flags: dict[bytes, NodeFlag] = self.__class__.SPLIT_FLAGS.copy()
506
+ self.result_callbacks: dict[bytes, Callable[..., Any]] = (
507
+ self.__class__.RESULT_CALLBACKS.copy()
508
+ )
509
+ self.non_atomic_cross_slot = non_atomic_cross_slot
510
+ self.cache = cache
511
+ self._decodecontext: contextvars.ContextVar[bool | None,] = contextvars.ContextVar(
512
+ "decode", default=None
513
+ )
514
+ self._encodingcontext: contextvars.ContextVar[str | None,] = contextvars.ContextVar(
515
+ "decode", default=None
516
+ )
517
+
518
+ @classmethod
519
+ @overload
520
+ def from_url(
521
+ cls: type[RedisCluster[bytes]],
522
+ url: str,
523
+ *,
524
+ db: int | None = ...,
525
+ skip_full_coverage_check: bool = ...,
526
+ decode_responses: Literal[False] = ...,
527
+ protocol_version: Literal[2, 3] = ...,
528
+ verify_version: bool = ...,
529
+ noreply: bool = ...,
530
+ noevict: bool = ...,
531
+ notouch: bool = ...,
532
+ retry_policy: RetryPolicy = ...,
533
+ type_adapter: TypeAdapter | None = ...,
534
+ cache: AbstractCache | None = ...,
535
+ **kwargs: Any,
536
+ ) -> RedisCluster[bytes]: ...
537
+
538
+ @classmethod
539
+ @overload
540
+ def from_url(
541
+ cls: type[RedisCluster[str]],
542
+ url: str,
543
+ *,
544
+ db: int | None = ...,
545
+ skip_full_coverage_check: bool = ...,
546
+ decode_responses: Literal[True],
547
+ protocol_version: Literal[2, 3] = ...,
548
+ verify_version: bool = ...,
549
+ noreply: bool = ...,
550
+ noevict: bool = ...,
551
+ notouch: bool = ...,
552
+ retry_policy: RetryPolicy = ...,
553
+ type_adapter: TypeAdapter | None = ...,
554
+ cache: AbstractCache | None = ...,
555
+ **kwargs: Any,
556
+ ) -> RedisCluster[str]: ...
557
+
558
+ @classmethod
559
+ def from_url(
560
+ cls: type[RedisClusterT],
561
+ url: str,
562
+ *,
563
+ db: int | None = None,
564
+ skip_full_coverage_check: bool = False,
565
+ decode_responses: bool = False,
566
+ protocol_version: Literal[2, 3] = 3,
567
+ verify_version: bool = True,
568
+ noreply: bool = False,
569
+ noevict: bool = False,
570
+ notouch: bool = False,
571
+ cache: AbstractCache | None = None,
572
+ retry_policy: RetryPolicy = CompositeRetryPolicy(
573
+ ConstantRetryPolicy((ClusterDownError,), 2, 0.1),
574
+ ConstantRetryPolicy(
575
+ (
576
+ ConnectionError,
577
+ TimeoutError,
578
+ ),
579
+ 2,
580
+ 0.1,
581
+ ),
582
+ ),
583
+ type_adapter: TypeAdapter | None = None,
584
+ **kwargs: Any,
585
+ ) -> RedisClusterT:
586
+ """
587
+ Return a Cluster client object configured from the startup node in URL,
588
+ which must use either the ``redis://`` scheme
589
+ `<http://www.iana.org/assignments/uri-schemes/prov/redis>`_
590
+
591
+ For example:
592
+
593
+ - ``redis://[:password]@localhost:6379``
594
+ - ``rediss://[:password]@localhost:6379``
595
+
596
+ :paramref:`url` and :paramref:`kwargs` are passed as is to
597
+ the :func:`coredis.ConnectionPool.from_url`.
598
+ """
599
+ if decode_responses:
600
+ return cls(
601
+ decode_responses=True,
602
+ protocol_version=protocol_version,
603
+ verify_version=verify_version,
604
+ noreply=noreply,
605
+ retry_policy=retry_policy,
606
+ type_adapter=type_adapter,
607
+ cache=cache,
608
+ connection_pool=ClusterConnectionPool.from_url(
609
+ url,
610
+ db=db,
611
+ skip_full_coverage_check=skip_full_coverage_check,
612
+ decode_responses=decode_responses,
613
+ protocol_version=protocol_version,
614
+ noreply=noreply,
615
+ noevict=noevict,
616
+ notouch=notouch,
617
+ **kwargs,
618
+ ),
619
+ )
620
+ else:
621
+ return cls(
622
+ decode_responses=False,
623
+ protocol_version=protocol_version,
624
+ verify_version=verify_version,
625
+ noreply=noreply,
626
+ retry_policy=retry_policy,
627
+ type_adapter=type_adapter,
628
+ cache=cache,
629
+ connection_pool=ClusterConnectionPool.from_url(
630
+ url,
631
+ db=db,
632
+ skip_full_coverage_check=skip_full_coverage_check,
633
+ decode_responses=decode_responses,
634
+ protocol_version=protocol_version,
635
+ noreply=noreply,
636
+ noevict=noevict,
637
+ notouch=notouch,
638
+ **kwargs,
639
+ ),
640
+ )
641
+
642
+ async def initialize(self) -> RedisCluster[AnyStr]:
643
+ if self.refresh_table_asap:
644
+ self.connection_pool.initialized = False
645
+ await super().initialize()
646
+ if self.cache:
647
+ self.cache = await self.cache.initialize(self)
648
+ self.refresh_table_asap = False
649
+ return self
650
+
651
+ def __repr__(self) -> str:
652
+ servers = list(
653
+ {f"{info.host}:{info.port}" for info in self.connection_pool.nodes.startup_nodes}
654
+ )
655
+ servers.sort()
656
+
657
+ return "{}<{}>".format(type(self).__name__, ", ".join(servers))
658
+
659
+ @property
660
+ def all_nodes(self) -> Iterator[Redis[AnyStr]]:
661
+ """ """
662
+ for node in self.connection_pool.nodes.all_nodes():
663
+ yield cast(
664
+ Redis[AnyStr],
665
+ self.connection_pool.nodes.get_redis_link(node.host, node.port),
666
+ )
667
+
668
+ @property
669
+ def primaries(self) -> Iterator[Redis[AnyStr]]:
670
+ """ """
671
+ for primary in self.connection_pool.nodes.all_primaries():
672
+ yield cast(
673
+ Redis[AnyStr],
674
+ self.connection_pool.nodes.get_redis_link(primary.host, primary.port),
675
+ )
676
+
677
+ @property
678
+ def replicas(self) -> Iterator[Redis[AnyStr]]:
679
+ """ """
680
+ for replica in self.connection_pool.nodes.all_replicas():
681
+ yield cast(
682
+ Redis[AnyStr],
683
+ self.connection_pool.nodes.get_redis_link(replica.host, replica.port),
684
+ )
685
+
686
+ @property
687
+ def num_replicas_per_shard(self) -> int:
688
+ """
689
+ Number of replicas per shard of the cluster determined by
690
+ initial cluster topology discovery
691
+ """
692
+ return self.connection_pool.nodes.replicas_per_shard
693
+
694
+ async def _ensure_initialized(self) -> None:
695
+ if not self.connection_pool.initialized or self.refresh_table_asap:
696
+ await self
697
+
698
+ def _determine_slots(
699
+ self, command: bytes, *args: RedisValueT, **options: Unpack[ExecutionParameters]
700
+ ) -> set[int]:
701
+ """Determines the slots the command and args would touch"""
702
+ keys = cast(tuple[RedisValueT, ...], options.get("keys")) or KeySpec.extract_keys(
703
+ command, *args, readonly_command=self.connection_pool.read_from_replicas
704
+ )
705
+ if (
706
+ command
707
+ in {
708
+ CommandName.EVAL,
709
+ CommandName.EVAL_RO,
710
+ CommandName.EVALSHA,
711
+ CommandName.EVALSHA_RO,
712
+ CommandName.FCALL,
713
+ CommandName.FCALL_RO,
714
+ CommandName.PUBLISH,
715
+ }
716
+ and not keys
717
+ ):
718
+ return set()
719
+
720
+ return {hash_slot(b(key)) for key in keys}
721
+
722
+ def _merge_result(
723
+ self,
724
+ command: bytes,
725
+ res: dict[str, R],
726
+ **kwargs: Unpack[ExecutionParameters],
727
+ ) -> R:
728
+ assert command in self.result_callbacks
729
+ return cast(
730
+ R,
731
+ self.result_callbacks[command](res, version=self.protocol_version, **kwargs),
732
+ )
733
+
734
+ def determine_node(
735
+ self, command: bytes, *args: RedisValueT, **kwargs: Unpack[ExecutionParameters]
736
+ ) -> list[ManagedNode] | None:
737
+ node_flag = self.route_flags.get(command)
738
+ if command in self.split_flags and self.non_atomic_cross_slot:
739
+ node_flag = self.split_flags[command]
740
+
741
+ if node_flag == NodeFlag.RANDOM:
742
+ return [self.connection_pool.nodes.random_node(primary=True)]
743
+ elif node_flag == NodeFlag.PRIMARIES:
744
+ return list(self.connection_pool.nodes.all_primaries())
745
+ elif node_flag == NodeFlag.ALL:
746
+ return list(self.connection_pool.nodes.all_nodes())
747
+ elif node_flag == NodeFlag.SLOT_ID and (
748
+ slot_arguments_range := kwargs.get("slot_arguments_range", None)
749
+ ):
750
+ slot_start, slot_end = slot_arguments_range
751
+ nodes = list(
752
+ self.connection_pool.nodes.nodes_from_slots(
753
+ *cast(tuple[int, ...], args[slot_start:slot_end])
754
+ ).keys()
755
+ )
756
+ return [self.connection_pool.nodes.nodes[k] for k in nodes]
757
+ return None
758
+
759
+ async def on_connection_error(self, _: BaseException) -> None:
760
+ self.connection_pool.disconnect()
761
+ self.connection_pool.reset()
762
+ self.refresh_table_asap = True
763
+
764
+ async def on_cluster_down_error(self, _: BaseException) -> None:
765
+ self.connection_pool.disconnect()
766
+ self.connection_pool.reset()
767
+ self.refresh_table_asap = True
768
+
769
+ async def execute_command(
770
+ self,
771
+ command: RedisCommandP,
772
+ callback: Callable[..., R] = NoopCallback(),
773
+ **kwargs: Unpack[ExecutionParameters],
774
+ ) -> R:
775
+ """
776
+ Sends a command to one or many nodes in the cluster
777
+ with retries based on :paramref:`RedisCluster.retry_policy`
778
+ """
779
+
780
+ return await self.retry_policy.call_with_retries(
781
+ lambda: self._execute_command(command, callback=callback, **kwargs),
782
+ failure_hook={
783
+ ConnectionError: self.on_connection_error,
784
+ ClusterDownError: self.on_cluster_down_error,
785
+ },
786
+ before_hook=self._ensure_initialized,
787
+ )
788
+
789
+ async def _execute_command(
790
+ self,
791
+ command: RedisCommandP,
792
+ callback: Callable[..., R] = NoopCallback(),
793
+ **kwargs: Unpack[ExecutionParameters],
794
+ ) -> R:
795
+ """
796
+ Sends a command to one or many nodes in the cluster
797
+ """
798
+ nodes = self.determine_node(command.name, *command.arguments, **kwargs)
799
+ if nodes and len(nodes) > 1:
800
+ tasks: dict[str, Coroutine[Any, Any, R]] = {}
801
+ node_arg_mapping = self._split_args_over_nodes(
802
+ nodes,
803
+ command.name,
804
+ *command.arguments,
805
+ slot_arguments_range=kwargs.get("slot_arguments_range", None),
806
+ )
807
+ node_name_map = {n.name: n for n in nodes}
808
+ for node_name in node_arg_mapping:
809
+ for portion, pargs in enumerate(node_arg_mapping[node_name]):
810
+ tasks[f"{node_name}:{portion}"] = self._execute_command_on_single_node(
811
+ RedisCommand(command.name, pargs),
812
+ callback=callback,
813
+ node=node_name_map[node_name],
814
+ slots=None,
815
+ **kwargs,
816
+ )
817
+
818
+ results = await asyncio.gather(*tasks.values(), return_exceptions=True)
819
+ if self.noreply:
820
+ return None # type: ignore
821
+ return cast(
822
+ R,
823
+ self._merge_result(command.name, dict(zip(tasks.keys(), results))),
824
+ )
825
+ else:
826
+ node = None
827
+ slots = None
828
+ if not nodes:
829
+ slots = list(self._determine_slots(command.name, *command.arguments, **kwargs))
830
+ else:
831
+ node = nodes.pop()
832
+ return await self._execute_command_on_single_node(
833
+ command,
834
+ callback=callback,
835
+ node=node,
836
+ slots=slots,
837
+ **kwargs,
838
+ )
839
+
840
+ def _split_args_over_nodes(
841
+ self,
842
+ nodes: list[ManagedNode],
843
+ command: bytes,
844
+ *args: RedisValueT,
845
+ slot_arguments_range: tuple[int, int] | None = None,
846
+ ) -> dict[str, list[tuple[RedisValueT, ...]]]:
847
+ node_flag = self.route_flags.get(command)
848
+ node_arg_mapping: dict[str, list[tuple[RedisValueT, ...]]] = {}
849
+ if command in self.split_flags and self.non_atomic_cross_slot:
850
+ keys = KeySpec.extract_keys(command, *args)
851
+ if keys:
852
+ key_start: int = args.index(keys[0])
853
+ key_end: int = args.index(keys[-1])
854
+ assert args[key_start : 1 + key_end] == keys, (
855
+ f"Unable to map {command.decode('latin-1')} by keys {keys}"
856
+ )
857
+
858
+ for (
859
+ node_name,
860
+ key_groups,
861
+ ) in self.connection_pool.nodes.keys_to_nodes_by_slot(*keys).items():
862
+ for _, node_keys in key_groups.items():
863
+ node_arg_mapping.setdefault(node_name, []).append(
864
+ (
865
+ *args[:key_start],
866
+ *node_keys, # type: ignore
867
+ *args[1 + key_end :],
868
+ )
869
+ )
870
+ if self.cache and command not in READONLY_COMMANDS:
871
+ self.cache.invalidate(*keys)
872
+ elif node_flag == NodeFlag.SLOT_ID and slot_arguments_range:
873
+ # TODO: fix this nonsense put in place just to support a few cluster commands
874
+ # related to slot management in cluster client which really no one needs to be calling
875
+ # through the cluster client.
876
+ slot_start, slot_end = slot_arguments_range
877
+ all_slots = [int(k) for k in args[slot_start:slot_end] if k is not None]
878
+ for node, slots in self.connection_pool.nodes.nodes_from_slots(*all_slots).items():
879
+ node_arg_mapping[node] = [(*slots, *args[slot_end:])] # type: ignore
880
+ else:
881
+ # This command is not meant to be split across nodes and each node
882
+ # should be called with the same arguments
883
+ node_arg_mapping = {node.name: [args] for node in nodes}
884
+ return node_arg_mapping
885
+
886
+ async def _execute_command_on_single_node(
887
+ self,
888
+ command: RedisCommandP,
889
+ callback: Callable[..., R] = NoopCallback(),
890
+ node: ManagedNode | None = None,
891
+ slots: list[int] | None = None,
892
+ **kwargs: Unpack[ExecutionParameters],
893
+ ) -> R:
894
+ redirect_addr = None
895
+
896
+ asking = False
897
+
898
+ if not node and not slots:
899
+ try_random_node = True
900
+ try_random_type = NodeFlag.PRIMARIES
901
+ else:
902
+ try_random_node = False
903
+ try_random_type = NodeFlag.ALL
904
+ remaining_attempts = int(self.MAX_RETRIES)
905
+
906
+ while remaining_attempts > 0:
907
+ remaining_attempts -= 1
908
+ if self.refresh_table_asap and not slots:
909
+ await self
910
+ if asking and redirect_addr:
911
+ node = self.connection_pool.nodes.nodes[redirect_addr]
912
+ r = await self.connection_pool.get_connection_by_node(node)
913
+ elif try_random_node:
914
+ r = await self.connection_pool.get_random_connection(
915
+ primary=try_random_type == NodeFlag.PRIMARIES
916
+ )
917
+ if slots:
918
+ try_random_node = False
919
+ elif node:
920
+ r = await self.connection_pool.get_connection_by_node(node)
921
+ elif slots:
922
+ if self.refresh_table_asap:
923
+ # MOVED
924
+ node = self.connection_pool.get_primary_node_by_slots(slots)
925
+ else:
926
+ node = self.connection_pool.get_node_by_slots(slots)
927
+ r = await self.connection_pool.get_connection_by_node(node)
928
+ else:
929
+ continue
930
+ quick_release = self.should_quick_release(command)
931
+ released = False
932
+ try:
933
+ if asking:
934
+ request = await r.create_request(
935
+ CommandName.ASKING, noreply=self.noreply, decode=False
936
+ )
937
+ await request
938
+ asking = False
939
+ keys = KeySpec.extract_keys(command.name, *command.arguments)
940
+ cacheable = (
941
+ self.cache
942
+ and command.name in CACHEABLE_COMMANDS
943
+ and len(keys) == 1
944
+ and not self.noreply
945
+ and self._decodecontext.get() is None
946
+ )
947
+ cache_hit = False
948
+ cached_reply = None
949
+ use_cached = False
950
+ reply = None
951
+ if self.cache:
952
+ if r.tracking_client_id != self.cache.get_client_id(r):
953
+ self.cache.reset()
954
+ await r.update_tracking_client(True, self.cache.get_client_id(r))
955
+ if command.name not in READONLY_COMMANDS:
956
+ self.cache.invalidate(*keys)
957
+ elif cacheable:
958
+ try:
959
+ cached_reply = cast(
960
+ R,
961
+ self.cache.get(
962
+ command.name,
963
+ keys[0],
964
+ *command.arguments,
965
+ ),
966
+ )
967
+ use_cached = random.random() * 100.0 < min(100.0, self.cache.confidence)
968
+ cache_hit = True
969
+ except KeyError:
970
+ pass
971
+
972
+ if not (use_cached and cached_reply):
973
+ request = await r.create_request(
974
+ command.name,
975
+ *command.arguments,
976
+ noreply=self.noreply,
977
+ decode=kwargs.get("decode", self._decodecontext.get()),
978
+ encoding=self._encodingcontext.get(),
979
+ )
980
+ if quick_release and not (self.requires_wait or self.requires_waitaof):
981
+ released = True
982
+ self.connection_pool.release(r)
983
+
984
+ reply = await request
985
+ maybe_wait = [
986
+ await self._ensure_wait(command, r),
987
+ await self._ensure_persistence(command, r),
988
+ ]
989
+ await asyncio.gather(*maybe_wait)
990
+ if self.noreply:
991
+ return # type: ignore
992
+ else:
993
+ if isinstance(callback, AsyncPreProcessingCallback):
994
+ await callback.pre_process(
995
+ self,
996
+ reply,
997
+ )
998
+ response = callback(
999
+ cached_reply if cache_hit else reply,
1000
+ version=self.protocol_version,
1001
+ )
1002
+ if self.cache and cacheable:
1003
+ if cache_hit and not use_cached:
1004
+ self.cache.feedback(
1005
+ command.name,
1006
+ keys[0],
1007
+ *command.arguments,
1008
+ match=cached_reply == reply,
1009
+ )
1010
+ if not cache_hit:
1011
+ self.cache.put(
1012
+ command.name,
1013
+ keys[0],
1014
+ *command.arguments,
1015
+ value=reply,
1016
+ )
1017
+ return response
1018
+ except (RedisClusterException, BusyLoadingError, asyncio.CancelledError):
1019
+ raise
1020
+ except MovedError as e:
1021
+ # Reinitialize on ever x number of MovedError.
1022
+ # This counter will increase faster when the same client object
1023
+ # is shared between multiple threads. To reduce the frequency you
1024
+ # can set the variable 'reinitialize_steps' in the constructor.
1025
+ self.refresh_table_asap = True
1026
+ await self.connection_pool.nodes.increment_reinitialize_counter()
1027
+
1028
+ node = self.connection_pool.nodes.set_node(e.host, e.port, server_type="primary")
1029
+ try_random_node = False
1030
+ self.connection_pool.nodes.slots[e.slot_id][0] = node
1031
+ except TryAgainError:
1032
+ if remaining_attempts < self.MAX_RETRIES / 2:
1033
+ await asyncio.sleep(0.05)
1034
+ except AskError as e:
1035
+ redirect_addr, asking = f"{e.host}:{e.port}", True
1036
+ finally:
1037
+ self._ensure_server_version(r.server_version)
1038
+ if not released:
1039
+ self.connection_pool.release(r)
1040
+
1041
+ raise ClusterError("Maximum retries exhausted.")
1042
+
1043
+ @overload
1044
+ def decoding(
1045
+ self, mode: Literal[False], encoding: str | None = None
1046
+ ) -> contextlib.AbstractContextManager[RedisCluster[bytes]]: ...
1047
+
1048
+ @overload
1049
+ def decoding(
1050
+ self, mode: Literal[True], encoding: str | None = None
1051
+ ) -> contextlib.AbstractContextManager[RedisCluster[str]]: ...
1052
+
1053
+ @contextlib.contextmanager
1054
+ @versionadded(version="4.8.0")
1055
+ def decoding(self, mode: bool, encoding: str | None = None) -> Iterator[RedisCluster[Any]]:
1056
+ """
1057
+ Context manager to temporarily change the decoding behavior
1058
+ of the client
1059
+
1060
+ :param mode: Whether to decode or not
1061
+ :param encoding: Optional encoding to use if decoding. If not provided
1062
+ the :paramref:`~coredis.RedisCluster.encoding` parameter provided to the client will
1063
+ be used.
1064
+
1065
+ Example::
1066
+
1067
+ client = coredis.RedisCluster(decode_responses=True)
1068
+ await client.set("fubar", "baz")
1069
+ assert await client.get("fubar") == "baz"
1070
+ with client.decoding(False):
1071
+ assert await client.get("fubar") == b"baz"
1072
+ with client.decoding(True):
1073
+ assert await client.get("fubar") == "baz"
1074
+
1075
+ """
1076
+ prev_decode = self._decodecontext.get()
1077
+ prev_encoding = self._encodingcontext.get()
1078
+ self._decodecontext.set(mode)
1079
+ self._encodingcontext.set(encoding)
1080
+ try:
1081
+ yield self
1082
+ finally:
1083
+ self._decodecontext.set(prev_decode)
1084
+ self._encodingcontext.set(prev_encoding)
1085
+
1086
+ def pubsub(
1087
+ self,
1088
+ ignore_subscribe_messages: bool = False,
1089
+ retry_policy: RetryPolicy | None = None,
1090
+ channels: Parameters[StringT] | None = None,
1091
+ channel_handlers: Mapping[StringT, SubscriptionCallback] | None = None,
1092
+ patterns: Parameters[StringT] | None = None,
1093
+ pattern_handlers: Mapping[StringT, SubscriptionCallback] | None = None,
1094
+ **kwargs: Any,
1095
+ ) -> ClusterPubSub[AnyStr]:
1096
+ """
1097
+ Return a Pub/Sub instance that can be used to consume messages that get
1098
+ published to the subscribed channels or patterns.
1099
+
1100
+ :param ignore_subscribe_messages: Whether to skip subscription
1101
+ acknowledgement messages
1102
+ :param retry_policy: An explicit retry policy to use in the subscriber.
1103
+ :param channels: channels that the constructed Pubsub instance should
1104
+ automatically subscribe to
1105
+ :param channel_handlers: Mapping of channels to automatically subscribe to
1106
+ and the associated handlers that will be invoked when a message is received
1107
+ on the specific channel.
1108
+ :param patterns: patterns that the constructed Pubsub instance should
1109
+ automatically subscribe to
1110
+ :param pattern_handlers: Mapping of patterns to automatically subscribe to
1111
+ and the associated handlers that will be invoked when a message is received
1112
+ on channel matching the pattern.
1113
+ """
1114
+ return ClusterPubSub[AnyStr](
1115
+ self.connection_pool,
1116
+ ignore_subscribe_messages=ignore_subscribe_messages,
1117
+ retry_policy=retry_policy,
1118
+ channels=channels,
1119
+ channel_handlers=channel_handlers,
1120
+ patterns=patterns,
1121
+ pattern_handlers=pattern_handlers,
1122
+ **kwargs,
1123
+ )
1124
+
1125
+ @versionadded(version="3.6.0")
1126
+ def sharded_pubsub(
1127
+ self,
1128
+ ignore_subscribe_messages: bool = False,
1129
+ read_from_replicas: bool = False,
1130
+ retry_policy: RetryPolicy | None = None,
1131
+ channels: Parameters[StringT] | None = None,
1132
+ channel_handlers: Mapping[StringT, SubscriptionCallback] | None = None,
1133
+ **kwargs: Any,
1134
+ ) -> ShardedPubSub[AnyStr]:
1135
+ """
1136
+ Return a Pub/Sub instance that can be used to consume messages from
1137
+ the subscribed channels in a redis cluster.
1138
+
1139
+ The implementation returned differs from that returned by :meth:`pubsub`
1140
+ as it uses the Sharded Pub/Sub implementation which routes messages
1141
+ to cluster nodes using the same algorithm used to assign keys to slots.
1142
+ This effectively restricts the propagation of messages to be within the
1143
+ shard of a cluster hence affording horizontally scaling the use of Pub/Sub
1144
+ with the cluster itself.
1145
+
1146
+ :param ignore_subscribe_messages: Whether to skip subscription
1147
+ acknowledgement messages
1148
+ :param read_from_replicas: Whether to read messages from replica nodes
1149
+ :param retry_policy: An explicit retry policy to use in the subscriber.
1150
+ :param channels: channels that the constructed Pubsub instance should
1151
+ automatically subscribe to
1152
+ :param channel_handlers: Mapping of channels to automatically subscribe to
1153
+ and the associated handlers that will be invoked when a message is received
1154
+ on the specific channel.
1155
+
1156
+ New in :redis-version:`7.0.0`
1157
+ """
1158
+
1159
+ return ShardedPubSub[AnyStr](
1160
+ self.connection_pool,
1161
+ ignore_subscribe_messages=ignore_subscribe_messages,
1162
+ read_from_replicas=read_from_replicas,
1163
+ retry_policy=retry_policy,
1164
+ channels=channels,
1165
+ channel_handlers=channel_handlers,
1166
+ **kwargs,
1167
+ )
1168
+
1169
+ async def pipeline(
1170
+ self,
1171
+ transaction: bool | None = None,
1172
+ watches: Parameters[StringT] | None = None,
1173
+ timeout: float | None = None,
1174
+ ) -> coredis.pipeline.ClusterPipeline[AnyStr]:
1175
+ """
1176
+ Returns a new pipeline object that can queue multiple commands for
1177
+ batch execution. Pipelines in cluster mode only provide a subset of the
1178
+ functionality of pipelines in standalone mode.
1179
+
1180
+ Specifically:
1181
+
1182
+ - Each command in the pipeline should only access keys on the same node
1183
+ - Transactions are disabled by default and are only supported if all
1184
+ watched keys route to the same node as where the commands in the multi/exec
1185
+ part of the pipeline.
1186
+
1187
+ :param transaction: indicates whether all commands should be executed atomically.
1188
+ :param watches: If :paramref:`transaction` is True these keys are watched for external
1189
+ changes during the transaction.
1190
+ :param timeout: If specified this value will take precedence over
1191
+ :paramref:`RedisCluster.stream_timeout`
1192
+
1193
+ """
1194
+ await self.connection_pool.initialize()
1195
+
1196
+ from coredis.pipeline import ClusterPipeline
1197
+
1198
+ return ClusterPipeline[AnyStr](
1199
+ client=self,
1200
+ transaction=transaction,
1201
+ watches=watches,
1202
+ timeout=timeout,
1203
+ )
1204
+
1205
+ async def transaction(
1206
+ self,
1207
+ func: Callable[
1208
+ [coredis.pipeline.ClusterPipeline[AnyStr]],
1209
+ Coroutine[Any, Any, Any],
1210
+ ],
1211
+ *watches: StringT,
1212
+ value_from_callable: bool = False,
1213
+ watch_delay: float | None = None,
1214
+ **kwargs: Any,
1215
+ ) -> Any:
1216
+ """
1217
+ Convenience method for executing the callable :paramref:`func` as a
1218
+ transaction while watching all keys specified in :paramref:`watches`.
1219
+
1220
+ :param func: callable should expect a single argument which is a
1221
+ :class:`coredis.pipeline.ClusterPipeline` object retrieved by calling
1222
+ :meth:`~coredis.RedisCluster.pipeline`.
1223
+ :param watches: The keys to watch during the transaction. The keys should route
1224
+ to the same node as the keys touched by the commands in :paramref:`func`
1225
+ :param value_from_callable: Whether to return the result of transaction or the value
1226
+ returned from :paramref:`func`
1227
+
1228
+ .. warning:: Cluster transactions can only be run with commands that
1229
+ route to the same slot.
1230
+
1231
+ .. versionchanged:: 4.9.0
1232
+
1233
+ When the transaction is started with :paramref:`watches` the
1234
+ :class:`~coredis.pipeline.ClusterPipeline` instance passed to :paramref:`func`
1235
+ will not start queuing commands until a call to
1236
+ :meth:`~coredis.pipeline.ClusterPipeline.multi` is made. This makes the cluster
1237
+ implementation consistent with :meth:`coredis.Redis.transaction`
1238
+ """
1239
+ async with await self.pipeline(True) as pipe:
1240
+ while True:
1241
+ try:
1242
+ if watches:
1243
+ await pipe.watch(*watches)
1244
+ func_value = await func(pipe)
1245
+ exec_value = await pipe.execute()
1246
+ return func_value if value_from_callable else exec_value
1247
+ except WatchError:
1248
+ if watch_delay is not None and watch_delay > 0:
1249
+ await asyncio.sleep(watch_delay)
1250
+ continue
1251
+
1252
+ async def scan_iter(
1253
+ self,
1254
+ match: StringT | None = None,
1255
+ count: int | None = None,
1256
+ type_: StringT | None = None,
1257
+ ) -> AsyncIterator[AnyStr]:
1258
+ await self._ensure_initialized()
1259
+ for node in self.primaries:
1260
+ cursor = None
1261
+ while cursor != 0:
1262
+ cursor, data = await node.scan(cursor or 0, match, count, type_)
1263
+ for item in data:
1264
+ yield item