coredis 5.5.0__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. 22fe76227e35f92ab5c3__mypyc.cpython-313-darwin.so +0 -0
  2. coredis/__init__.py +42 -0
  3. coredis/_enum.py +42 -0
  4. coredis/_json.py +11 -0
  5. coredis/_packer.cpython-313-darwin.so +0 -0
  6. coredis/_packer.py +71 -0
  7. coredis/_protocols.py +50 -0
  8. coredis/_py_311_typing.py +20 -0
  9. coredis/_py_312_typing.py +17 -0
  10. coredis/_sidecar.py +114 -0
  11. coredis/_utils.cpython-313-darwin.so +0 -0
  12. coredis/_utils.py +440 -0
  13. coredis/_version.py +34 -0
  14. coredis/_version.pyi +1 -0
  15. coredis/cache.py +801 -0
  16. coredis/client/__init__.py +6 -0
  17. coredis/client/basic.py +1240 -0
  18. coredis/client/cluster.py +1265 -0
  19. coredis/commands/__init__.py +64 -0
  20. coredis/commands/_key_spec.py +517 -0
  21. coredis/commands/_utils.py +108 -0
  22. coredis/commands/_validators.py +159 -0
  23. coredis/commands/_wrappers.py +175 -0
  24. coredis/commands/bitfield.py +110 -0
  25. coredis/commands/constants.py +662 -0
  26. coredis/commands/core.py +8484 -0
  27. coredis/commands/function.py +408 -0
  28. coredis/commands/monitor.py +168 -0
  29. coredis/commands/pubsub.py +905 -0
  30. coredis/commands/request.py +108 -0
  31. coredis/commands/script.py +296 -0
  32. coredis/commands/sentinel.py +246 -0
  33. coredis/config.py +50 -0
  34. coredis/connection.py +906 -0
  35. coredis/constants.cpython-313-darwin.so +0 -0
  36. coredis/constants.py +37 -0
  37. coredis/credentials.py +45 -0
  38. coredis/exceptions.py +360 -0
  39. coredis/experimental/__init__.py +1 -0
  40. coredis/globals.py +23 -0
  41. coredis/modules/__init__.py +121 -0
  42. coredis/modules/autocomplete.py +138 -0
  43. coredis/modules/base.py +262 -0
  44. coredis/modules/filters.py +1319 -0
  45. coredis/modules/graph.py +362 -0
  46. coredis/modules/json.py +691 -0
  47. coredis/modules/response/__init__.py +0 -0
  48. coredis/modules/response/_callbacks/__init__.py +0 -0
  49. coredis/modules/response/_callbacks/autocomplete.py +42 -0
  50. coredis/modules/response/_callbacks/graph.py +237 -0
  51. coredis/modules/response/_callbacks/json.py +21 -0
  52. coredis/modules/response/_callbacks/search.py +221 -0
  53. coredis/modules/response/_callbacks/timeseries.py +158 -0
  54. coredis/modules/response/types.py +179 -0
  55. coredis/modules/search.py +1089 -0
  56. coredis/modules/timeseries.py +1139 -0
  57. coredis/parser.cpython-313-darwin.so +0 -0
  58. coredis/parser.py +344 -0
  59. coredis/pipeline.py +1225 -0
  60. coredis/pool/__init__.py +11 -0
  61. coredis/pool/basic.py +453 -0
  62. coredis/pool/cluster.py +517 -0
  63. coredis/pool/nodemanager.py +340 -0
  64. coredis/py.typed +0 -0
  65. coredis/recipes/__init__.py +0 -0
  66. coredis/recipes/credentials/__init__.py +5 -0
  67. coredis/recipes/credentials/iam_provider.py +63 -0
  68. coredis/recipes/locks/__init__.py +5 -0
  69. coredis/recipes/locks/extend.lua +17 -0
  70. coredis/recipes/locks/lua_lock.py +281 -0
  71. coredis/recipes/locks/release.lua +10 -0
  72. coredis/response/__init__.py +5 -0
  73. coredis/response/_callbacks/__init__.py +538 -0
  74. coredis/response/_callbacks/acl.py +32 -0
  75. coredis/response/_callbacks/cluster.py +183 -0
  76. coredis/response/_callbacks/command.py +86 -0
  77. coredis/response/_callbacks/connection.py +31 -0
  78. coredis/response/_callbacks/geo.py +58 -0
  79. coredis/response/_callbacks/hash.py +85 -0
  80. coredis/response/_callbacks/keys.py +59 -0
  81. coredis/response/_callbacks/module.py +33 -0
  82. coredis/response/_callbacks/script.py +85 -0
  83. coredis/response/_callbacks/sentinel.py +179 -0
  84. coredis/response/_callbacks/server.py +241 -0
  85. coredis/response/_callbacks/sets.py +44 -0
  86. coredis/response/_callbacks/sorted_set.py +204 -0
  87. coredis/response/_callbacks/streams.py +185 -0
  88. coredis/response/_callbacks/strings.py +70 -0
  89. coredis/response/_callbacks/vector_sets.py +159 -0
  90. coredis/response/_utils.py +33 -0
  91. coredis/response/types.py +416 -0
  92. coredis/retry.py +233 -0
  93. coredis/sentinel.py +477 -0
  94. coredis/stream.py +369 -0
  95. coredis/tokens.py +2286 -0
  96. coredis/typing.py +593 -0
  97. coredis-5.5.0.dist-info/METADATA +211 -0
  98. coredis-5.5.0.dist-info/RECORD +100 -0
  99. coredis-5.5.0.dist-info/WHEEL +6 -0
  100. coredis-5.5.0.dist-info/licenses/LICENSE +23 -0
@@ -0,0 +1,340 @@
1
+ from __future__ import annotations
2
+
3
+ import dataclasses
4
+ import random
5
+ import warnings
6
+ from typing import TYPE_CHECKING, Any
7
+
8
+ from coredis._utils import b, hash_slot, nativestr
9
+ from coredis.exceptions import (
10
+ ConnectionError,
11
+ RedisClusterException,
12
+ RedisError,
13
+ ResponseError,
14
+ )
15
+ from coredis.typing import (
16
+ Iterable,
17
+ Iterator,
18
+ Literal,
19
+ Node,
20
+ RedisValueT,
21
+ StringT,
22
+ )
23
+
24
+ HASH_SLOTS = 16384
25
+ HASH_SLOTS_SET = set(range(HASH_SLOTS))
26
+
27
+ if TYPE_CHECKING:
28
+ from coredis import Redis
29
+
30
+
31
+ @dataclasses.dataclass
32
+ class ManagedNode:
33
+ """
34
+ Represents a cluster node (primary or replica) in a redis cluster
35
+ """
36
+
37
+ host: str
38
+ port: int
39
+ server_type: Literal["primary", "replica"] | None = None
40
+ node_id: str | None = None
41
+
42
+ @property
43
+ def name(self) -> str:
44
+ return f"{self.host}:{self.port}"
45
+
46
+
47
+ class NodeManager:
48
+ """
49
+ Utility class to manage the topology of a redis cluster
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ startup_nodes: Iterable[Node] | None = None,
55
+ reinitialize_steps: int | None = None,
56
+ skip_full_coverage_check: bool = False,
57
+ nodemanager_follow_cluster: bool = True,
58
+ decode_responses: bool = False,
59
+ **connection_kwargs: Any | None,
60
+ ) -> None:
61
+ """
62
+ :skip_full_coverage_check:
63
+ Skips the check of cluster-require-full-coverage config, useful for clusters
64
+ without the CONFIG command (like aws)
65
+ :nodemanager_follow_cluster:
66
+ The node manager will during initialization try the last set of nodes that
67
+ it was operating on. This will allow the client to drift along side the cluster
68
+ if the cluster nodes move around a slot.
69
+ """
70
+ self.connection_kwargs = connection_kwargs
71
+ self.connection_kwargs.update(decode_responses=decode_responses)
72
+
73
+ self.nodes: dict[str, ManagedNode] = {}
74
+ self.slots: dict[int, list[ManagedNode]] = {}
75
+ self.startup_nodes: list[ManagedNode] = (
76
+ []
77
+ if startup_nodes is None
78
+ else list(ManagedNode(n["host"], n["port"]) for n in startup_nodes if n)
79
+ )
80
+ self.startup_nodes_reachable = False
81
+ self.orig_startup_nodes = list(self.startup_nodes)
82
+ self.reinitialize_counter = 0
83
+ self.reinitialize_steps = reinitialize_steps or 25
84
+ self._skip_full_coverage_check = skip_full_coverage_check
85
+ self.nodemanager_follow_cluster = nodemanager_follow_cluster
86
+ self.replicas_per_shard = 0
87
+
88
+ def keys_to_nodes_by_slot(self, *keys: RedisValueT) -> dict[str, dict[int, list[RedisValueT]]]:
89
+ mapping: dict[str, dict[int, list[RedisValueT]]] = {}
90
+ for k in keys:
91
+ node = self.node_from_slot(hash_slot(b(k)))
92
+ if node:
93
+ mapping.setdefault(node.name, {}).setdefault(hash_slot(b(k)), []).append(k)
94
+ return mapping
95
+
96
+ def node_from_slot(self, slot: int) -> ManagedNode | None:
97
+ for node in self.slots[slot]:
98
+ if node.server_type == "primary":
99
+ return node
100
+ return None # noqa
101
+
102
+ def nodes_from_slots(self, *slots: int) -> dict[str, list[int]]:
103
+ mapping: dict[str, list[int]] = {}
104
+ for slot in slots:
105
+ if node := self.node_from_slot(slot):
106
+ mapping.setdefault(node.name, []).append(slot)
107
+ return mapping
108
+
109
+ def all_nodes(self) -> Iterator[ManagedNode]:
110
+ yield from self.nodes.values()
111
+
112
+ def all_primaries(self) -> Iterator[ManagedNode]:
113
+ for node in self.nodes.values():
114
+ if node.server_type == "primary":
115
+ yield node
116
+
117
+ def all_replicas(self) -> Iterator[ManagedNode]:
118
+ for node in self.nodes.values():
119
+ if node.server_type == "replica":
120
+ yield node
121
+
122
+ def random_startup_node_iter(self, primary: bool = False) -> Iterator[ManagedNode]:
123
+ """A generator that returns a random startup nodes"""
124
+ options = list(
125
+ self.all_primaries()
126
+ if primary
127
+ else (self.startup_nodes if self.startup_nodes_reachable else [])
128
+ )
129
+ while options:
130
+ choice = random.choice(options)
131
+ options.remove(choice)
132
+ yield choice
133
+
134
+ def random_node(self, primary: bool = True) -> ManagedNode:
135
+ if primary:
136
+ return random.choice(list(self.all_primaries()))
137
+ else:
138
+ return random.choice(list(self.nodes.values()))
139
+
140
+ def get_redis_link(self, host: str, port: int) -> Redis[Any]:
141
+ from coredis.client import Redis
142
+
143
+ allowed_keys = (
144
+ "username",
145
+ "password",
146
+ "credential_provider",
147
+ "encoding",
148
+ "decode_responses",
149
+ "stream_timeout",
150
+ "connect_timeout",
151
+ "ssl_context",
152
+ "parser_class",
153
+ "loop",
154
+ "protocol_version",
155
+ )
156
+ connection_kwargs = {k: v for k, v in self.connection_kwargs.items() if k in allowed_keys}
157
+ return Redis(host=host, port=port, **connection_kwargs) # type: ignore
158
+
159
+ async def initialize(self) -> None:
160
+ """
161
+ Initializes the slots cache by asking all startup nodes what the
162
+ current cluster configuration is.
163
+
164
+ TODO: Currently the last node will have the last say about how the configuration is setup.
165
+ Maybe it should stop to try after it have correctly covered all slots or when one node is
166
+ reached and it could execute CLUSTER SLOTS command.
167
+ """
168
+ nodes_cache: dict[str, ManagedNode] = {}
169
+ tmp_slots: dict[int, list[ManagedNode]] = {}
170
+
171
+ all_slots_covered = False
172
+ disagreements: list[str] = []
173
+ self.startup_nodes_reachable = False
174
+
175
+ nodes = self.orig_startup_nodes
176
+ replicas: set[str] = set()
177
+ startup_node_errors: dict[str, list[str]] = {}
178
+
179
+ # With this option the client will attempt to connect to any of the previous set of nodes
180
+ # instead of the original set of startup nodes
181
+ if self.nodemanager_follow_cluster:
182
+ nodes = self.startup_nodes
183
+
184
+ for node in nodes:
185
+ cluster_slots = {}
186
+ try:
187
+ if node:
188
+ r = self.get_redis_link(host=node.host, port=node.port)
189
+ cluster_slots = await r.cluster_slots()
190
+ self.startup_nodes_reachable = True
191
+ except RedisError as err:
192
+ startup_node_errors.setdefault(str(err), []).append(node.name)
193
+ continue
194
+
195
+ all_slots_covered = True
196
+ # If there's only one server in the cluster, its ``host`` is ''
197
+ # Fix it to the host in startup_nodes
198
+ if len(cluster_slots) == 1 and len(self.startup_nodes) == 1:
199
+ slots = cluster_slots.get((0, HASH_SLOTS - 1))
200
+ assert slots
201
+ single_node_slots = slots[0]
202
+ if len(single_node_slots["host"]) == 0:
203
+ single_node_slots["host"] = self.startup_nodes[0].host
204
+ single_node_slots["server_type"] = "master"
205
+
206
+ for min_slot, max_slot in cluster_slots:
207
+ _nodes = cluster_slots.get((min_slot, max_slot))
208
+ assert _nodes
209
+ primary_node = ManagedNode(
210
+ host=_nodes[0]["host"],
211
+ port=_nodes[0]["port"],
212
+ server_type="primary",
213
+ node_id=_nodes[0]["node_id"],
214
+ )
215
+ replica_nodes = [
216
+ ManagedNode(
217
+ host=n["host"],
218
+ port=n["port"],
219
+ server_type="replica",
220
+ node_id=n["node_id"],
221
+ )
222
+ for n in _nodes[1:]
223
+ ]
224
+
225
+ primary_node.host = primary_node.host or node.host
226
+ nodes_cache[primary_node.name] = primary_node
227
+
228
+ for i in range(min_slot, max_slot + 1):
229
+ if i not in tmp_slots:
230
+ tmp_slots[i] = [primary_node]
231
+ for replica_node in replica_nodes:
232
+ nodes_cache[replica_node.name] = replica_node
233
+ tmp_slots[i].append(replica_node)
234
+ replicas.add(replica_node.name)
235
+ else:
236
+ # Validate that 2 nodes want to use the same slot cache setup
237
+ if tmp_slots[i][0].name != node.name:
238
+ disagreements.append(
239
+ f"{tmp_slots[i][0].name} vs {node.name} on slot: {i}",
240
+ )
241
+ if len(disagreements) > 5:
242
+ raise RedisClusterException(
243
+ "startup_nodes could not agree on a valid slots cache."
244
+ f" {', '.join(disagreements)}"
245
+ )
246
+
247
+ self.refresh_table_asap = False
248
+
249
+ if not self._skip_full_coverage_check and (
250
+ await self.cluster_require_full_coverage(nodes_cache)
251
+ ):
252
+ all_slots_covered = set(tmp_slots.keys()) == HASH_SLOTS_SET
253
+
254
+ if all_slots_covered:
255
+ break
256
+
257
+ if not self.startup_nodes_reachable:
258
+ details = ""
259
+ # collapse any startup nodes by error representation
260
+ if startup_node_errors:
261
+ details = " Underlying errors:\n" + "\n".join(
262
+ [f"- {err} [{','.join(nodes)}]" for err, nodes in startup_node_errors.items()]
263
+ )
264
+ raise RedisClusterException(
265
+ "Redis Cluster cannot be connected. "
266
+ "Please provide at least one reachable node."
267
+ f"{details}"
268
+ )
269
+
270
+ if not all_slots_covered:
271
+ raise RedisClusterException(
272
+ "Not all slots are covered after query all startup_nodes. "
273
+ f"{len(tmp_slots)} of {HASH_SLOTS} covered..."
274
+ )
275
+
276
+ # Set the tmp variables to the real variables
277
+ self.slots = tmp_slots
278
+ self.nodes = nodes_cache
279
+ self.replicas_per_shard = int((len(self.nodes) / len(replicas)) - 1 if replicas else 0)
280
+ self.reinitialize_counter = 0
281
+ self.populate_startup_nodes()
282
+
283
+ async def increment_reinitialize_counter(self, ct: int = 1) -> None:
284
+ for _ in range(min(ct, self.reinitialize_steps)):
285
+ self.reinitialize_counter += 1
286
+ if self.reinitialize_counter % self.reinitialize_steps == 0:
287
+ await self.initialize()
288
+
289
+ async def node_require_full_coverage(self, node: ManagedNode) -> bool:
290
+ try:
291
+ r_node = self.get_redis_link(host=node.host, port=node.port)
292
+ node_config = await r_node.config_get(["cluster-require-full-coverage"])
293
+ return "yes" in node_config.values()
294
+ except ResponseError as err:
295
+ warnings.warn(
296
+ "Unable to determine whether the cluster requires full coverage "
297
+ f"due to response error from `CONFIG GET`: {err}. To suppress this "
298
+ "warning use skip_full_coverage=True when initializing the client."
299
+ )
300
+ return False
301
+
302
+ async def cluster_require_full_coverage(self, nodes_cache: dict[str, ManagedNode]) -> bool:
303
+ """
304
+ If exists 'cluster-require-full-coverage no' config on redis servers,
305
+ then even all slots are not covered, cluster still will be able to
306
+ respond
307
+ """
308
+ nodes = nodes_cache or self.nodes
309
+
310
+ for node in nodes.values():
311
+ try:
312
+ if await self.node_require_full_coverage(node):
313
+ return True
314
+ except ConnectionError:
315
+ continue
316
+ return False
317
+
318
+ def set_node(
319
+ self,
320
+ host: StringT,
321
+ port: int,
322
+ server_type: Literal["primary", "replica"],
323
+ ) -> ManagedNode:
324
+ """Updates data for a node"""
325
+ node = ManagedNode(
326
+ host=nativestr(host),
327
+ port=port,
328
+ server_type=server_type,
329
+ node_id=None,
330
+ )
331
+ self.nodes[node.name] = node
332
+ return node
333
+
334
+ def populate_startup_nodes(self) -> None:
335
+ self.startup_nodes.clear()
336
+ for n in self.nodes.values():
337
+ self.startup_nodes.append(n)
338
+
339
+ async def reset(self) -> None:
340
+ await self.initialize()
coredis/py.typed ADDED
File without changes
File without changes
@@ -0,0 +1,5 @@
1
+ from __future__ import annotations
2
+
3
+ from .iam_provider import ElastiCacheIAMProvider
4
+
5
+ __all__ = ["ElastiCacheIAMProvider"]
@@ -0,0 +1,63 @@
1
+ from __future__ import annotations
2
+
3
+ from urllib.parse import ParseResult, urlencode, urlunparse
4
+
5
+ # aiobotocore, botocore, asyncache & cachetools will need to be installed in addition
6
+ # to coredis dependencies. These can also be requested by installing coredis
7
+ # as coredis[recipes]
8
+ import aiobotocore.session
9
+ from aiobotocore.signers import AioRequestSigner
10
+ from asyncache import cached
11
+ from botocore.model import ServiceId
12
+ from cachetools import TTLCache
13
+
14
+ from coredis.credentials import AbstractCredentialProvider, UserPass
15
+
16
+
17
+ class ElastiCacheIAMProvider(AbstractCredentialProvider):
18
+ """
19
+ Credential provider that uses IAM authentication
20
+ to connect to an Elasticache instance.
21
+ """
22
+
23
+ def __init__(self, user: str, cluster_name: str, region: str = "us-east-1") -> None:
24
+ self.user: str = user
25
+ self.cluster_name: str = cluster_name
26
+ self.region: str = region
27
+
28
+ self.session = aiobotocore.session.get_session()
29
+
30
+ @cached(cache=TTLCache(maxsize=128, ttl=900)) # type: ignore[misc]
31
+ async def get_credentials(self) -> UserPass:
32
+ """
33
+ Returns a short-lived token that can be used to connect to an
34
+ IAM enabled Elasticache instance. The token will be cached for
35
+ its lifetime (15 minutes) to avoid unnecessary requests.
36
+ """
37
+ request_signer = AioRequestSigner(
38
+ ServiceId("elasticache"),
39
+ self.region,
40
+ "elasticache",
41
+ "v4",
42
+ await self.session.get_credentials(),
43
+ self.session.get_component("event_emitter"),
44
+ )
45
+ query_params = {"Action": "connect", "User": self.user}
46
+ url = urlunparse(
47
+ ParseResult(
48
+ scheme="https",
49
+ netloc=self.cluster_name,
50
+ path="/",
51
+ query=urlencode(query_params),
52
+ params="",
53
+ fragment="",
54
+ )
55
+ )
56
+ signed_url = await request_signer.generate_presigned_url(
57
+ {"method": "GET", "url": url, "body": {}, "headers": {}, "context": {}},
58
+ operation_name="connect",
59
+ expires_in=900,
60
+ region_name=self.region,
61
+ )
62
+ # Need to strip the protocol so that Elasticache accepts it
63
+ return UserPass(self.user, signed_url.removeprefix("https://"))
@@ -0,0 +1,5 @@
1
+ from __future__ import annotations
2
+
3
+ from .lua_lock import LuaLock
4
+
5
+ __all__ = ["LuaLock"]
@@ -0,0 +1,17 @@
1
+ -- KEYS[1] - lock name
2
+ -- ARGS[1] - token
3
+ -- ARGS[2] - additional milliseconds
4
+ -- return 1 if the locks time was extended, otherwise 0
5
+ local token = redis.call('get', KEYS[1])
6
+ if not token or token ~= ARGV[1] then
7
+ return 0
8
+ end
9
+ local expiration = redis.call('pttl', KEYS[1])
10
+ if not expiration then
11
+ expiration = 0
12
+ end
13
+ if expiration < 0 then
14
+ return 0
15
+ end
16
+ redis.call('pexpire', KEYS[1], expiration + ARGV[2])
17
+ return 1