coredis 5.5.0__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. 22fe76227e35f92ab5c3__mypyc.cpython-313-darwin.so +0 -0
  2. coredis/__init__.py +42 -0
  3. coredis/_enum.py +42 -0
  4. coredis/_json.py +11 -0
  5. coredis/_packer.cpython-313-darwin.so +0 -0
  6. coredis/_packer.py +71 -0
  7. coredis/_protocols.py +50 -0
  8. coredis/_py_311_typing.py +20 -0
  9. coredis/_py_312_typing.py +17 -0
  10. coredis/_sidecar.py +114 -0
  11. coredis/_utils.cpython-313-darwin.so +0 -0
  12. coredis/_utils.py +440 -0
  13. coredis/_version.py +34 -0
  14. coredis/_version.pyi +1 -0
  15. coredis/cache.py +801 -0
  16. coredis/client/__init__.py +6 -0
  17. coredis/client/basic.py +1240 -0
  18. coredis/client/cluster.py +1265 -0
  19. coredis/commands/__init__.py +64 -0
  20. coredis/commands/_key_spec.py +517 -0
  21. coredis/commands/_utils.py +108 -0
  22. coredis/commands/_validators.py +159 -0
  23. coredis/commands/_wrappers.py +175 -0
  24. coredis/commands/bitfield.py +110 -0
  25. coredis/commands/constants.py +662 -0
  26. coredis/commands/core.py +8484 -0
  27. coredis/commands/function.py +408 -0
  28. coredis/commands/monitor.py +168 -0
  29. coredis/commands/pubsub.py +905 -0
  30. coredis/commands/request.py +108 -0
  31. coredis/commands/script.py +296 -0
  32. coredis/commands/sentinel.py +246 -0
  33. coredis/config.py +50 -0
  34. coredis/connection.py +906 -0
  35. coredis/constants.cpython-313-darwin.so +0 -0
  36. coredis/constants.py +37 -0
  37. coredis/credentials.py +45 -0
  38. coredis/exceptions.py +360 -0
  39. coredis/experimental/__init__.py +1 -0
  40. coredis/globals.py +23 -0
  41. coredis/modules/__init__.py +121 -0
  42. coredis/modules/autocomplete.py +138 -0
  43. coredis/modules/base.py +262 -0
  44. coredis/modules/filters.py +1319 -0
  45. coredis/modules/graph.py +362 -0
  46. coredis/modules/json.py +691 -0
  47. coredis/modules/response/__init__.py +0 -0
  48. coredis/modules/response/_callbacks/__init__.py +0 -0
  49. coredis/modules/response/_callbacks/autocomplete.py +42 -0
  50. coredis/modules/response/_callbacks/graph.py +237 -0
  51. coredis/modules/response/_callbacks/json.py +21 -0
  52. coredis/modules/response/_callbacks/search.py +221 -0
  53. coredis/modules/response/_callbacks/timeseries.py +158 -0
  54. coredis/modules/response/types.py +179 -0
  55. coredis/modules/search.py +1089 -0
  56. coredis/modules/timeseries.py +1139 -0
  57. coredis/parser.cpython-313-darwin.so +0 -0
  58. coredis/parser.py +344 -0
  59. coredis/pipeline.py +1225 -0
  60. coredis/pool/__init__.py +11 -0
  61. coredis/pool/basic.py +453 -0
  62. coredis/pool/cluster.py +517 -0
  63. coredis/pool/nodemanager.py +340 -0
  64. coredis/py.typed +0 -0
  65. coredis/recipes/__init__.py +0 -0
  66. coredis/recipes/credentials/__init__.py +5 -0
  67. coredis/recipes/credentials/iam_provider.py +63 -0
  68. coredis/recipes/locks/__init__.py +5 -0
  69. coredis/recipes/locks/extend.lua +17 -0
  70. coredis/recipes/locks/lua_lock.py +281 -0
  71. coredis/recipes/locks/release.lua +10 -0
  72. coredis/response/__init__.py +5 -0
  73. coredis/response/_callbacks/__init__.py +538 -0
  74. coredis/response/_callbacks/acl.py +32 -0
  75. coredis/response/_callbacks/cluster.py +183 -0
  76. coredis/response/_callbacks/command.py +86 -0
  77. coredis/response/_callbacks/connection.py +31 -0
  78. coredis/response/_callbacks/geo.py +58 -0
  79. coredis/response/_callbacks/hash.py +85 -0
  80. coredis/response/_callbacks/keys.py +59 -0
  81. coredis/response/_callbacks/module.py +33 -0
  82. coredis/response/_callbacks/script.py +85 -0
  83. coredis/response/_callbacks/sentinel.py +179 -0
  84. coredis/response/_callbacks/server.py +241 -0
  85. coredis/response/_callbacks/sets.py +44 -0
  86. coredis/response/_callbacks/sorted_set.py +204 -0
  87. coredis/response/_callbacks/streams.py +185 -0
  88. coredis/response/_callbacks/strings.py +70 -0
  89. coredis/response/_callbacks/vector_sets.py +159 -0
  90. coredis/response/_utils.py +33 -0
  91. coredis/response/types.py +416 -0
  92. coredis/retry.py +233 -0
  93. coredis/sentinel.py +477 -0
  94. coredis/stream.py +369 -0
  95. coredis/tokens.py +2286 -0
  96. coredis/typing.py +593 -0
  97. coredis-5.5.0.dist-info/METADATA +211 -0
  98. coredis-5.5.0.dist-info/RECORD +100 -0
  99. coredis-5.5.0.dist-info/WHEEL +6 -0
  100. coredis-5.5.0.dist-info/licenses/LICENSE +23 -0
@@ -0,0 +1,517 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import os
5
+ import random
6
+ import threading
7
+ import time
8
+ import warnings
9
+ from typing import Any, cast
10
+
11
+ import async_timeout
12
+
13
+ from coredis._utils import b, hash_slot
14
+ from coredis.connection import ClusterConnection, Connection
15
+ from coredis.exceptions import ConnectionError, RedisClusterException
16
+ from coredis.globals import READONLY_COMMANDS
17
+ from coredis.pool.basic import ConnectionPool
18
+ from coredis.pool.nodemanager import ManagedNode, NodeManager
19
+ from coredis.typing import (
20
+ Callable,
21
+ ClassVar,
22
+ Iterable,
23
+ Node,
24
+ RedisValueT,
25
+ StringT,
26
+ )
27
+
28
+
29
+ class ClusterConnectionPool(ConnectionPool):
30
+ """
31
+ Custom connection pool for :class:`~coredis.RedisCluster` client
32
+ """
33
+
34
+ #: Mapping of querystring arguments to their parser functions
35
+ URL_QUERY_ARGUMENT_PARSERS: ClassVar[
36
+ dict[str, Callable[..., int | float | bool | str | None]]
37
+ ] = {
38
+ **ConnectionPool.URL_QUERY_ARGUMENT_PARSERS,
39
+ "max_connections_per_node": bool,
40
+ "reinitialize_steps": int,
41
+ "skip_full_coverage_check": bool,
42
+ "read_from_replicas": bool,
43
+ "blocking": bool,
44
+ }
45
+
46
+ nodes: NodeManager
47
+ connection_class: type[ClusterConnection]
48
+
49
+ _created_connections_per_node: dict[str, int]
50
+ _cluster_available_connections: dict[str, asyncio.Queue[Connection | None]]
51
+ _cluster_in_use_connections: dict[str, set[Connection]]
52
+
53
+ def __init__(
54
+ self,
55
+ startup_nodes: Iterable[Node] | None = None,
56
+ connection_class: type[ClusterConnection] = ClusterConnection,
57
+ queue_class: type[asyncio.Queue[Connection | None]] = asyncio.LifoQueue,
58
+ max_connections: int | None = None,
59
+ max_connections_per_node: bool = False,
60
+ reinitialize_steps: int | None = None,
61
+ skip_full_coverage_check: bool = False,
62
+ nodemanager_follow_cluster: bool = True,
63
+ readonly: bool = False,
64
+ read_from_replicas: bool = False,
65
+ max_idle_time: int = 0,
66
+ idle_check_interval: int = 1,
67
+ blocking: bool = False,
68
+ timeout: int = 20,
69
+ **connection_kwargs: Any | None,
70
+ ):
71
+ """
72
+
73
+ Changes
74
+ - .. versionchanged:: 4.4.0
75
+
76
+ - :paramref:`nodemanager_follow_cluster` now defaults to ``True``
77
+
78
+ - .. deprecated:: 4.4.0
79
+
80
+ - :paramref:`readonly` renamed to :paramref:`read_from_replicas`
81
+
82
+ :param max_connections: Maximum number of connections to allow concurrently from this
83
+ client. If the value is ``None`` it will default to 32.
84
+ :param max_connections_per_node: Whether to use the value of :paramref:`max_connections`
85
+ on a per node basis or cluster wide. If ``False`` and :paramref:`blocking` is ``True``
86
+ the per-node connection pools will have a maximum size of :paramref:`max_connections`
87
+ divided by the number of nodes in the cluster.
88
+ :param blocking: If ``True`` the client will block at most :paramref:`timeout` seconds
89
+ if :paramref:`max_connections` is reachd when trying to obtain a connection
90
+ :param timeout: Number of seconds to block if :paramref:`block` is ``True`` when trying to
91
+ obtain a connection.
92
+ :param skip_full_coverage_check:
93
+ Skips the check of cluster-require-full-coverage config, useful for clusters
94
+ without the :rediscommand:`CONFIG` command (For example with AWS Elasticache)
95
+ :param nodemanager_follow_cluster:
96
+ The node manager will during initialization try the last set of nodes that
97
+ it was operating on. This will allow the client to drift along side the cluster
98
+ if the cluster nodes move around alot.
99
+ :param read_from_replicas: If ``True`` the client will route readonly commands to replicas
100
+ """
101
+ super().__init__(
102
+ connection_class=connection_class,
103
+ max_connections=max_connections,
104
+ max_idle_time=max_idle_time,
105
+ idle_check_interval=idle_check_interval,
106
+ **connection_kwargs,
107
+ )
108
+ self.queue_class = queue_class
109
+ # Special case to make from_url method compliant with cluster setting.
110
+ # from_url method will send in the ip and port through a different variable then the
111
+ # regular startup_nodes variable.
112
+
113
+ if startup_nodes is None:
114
+ host = connection_kwargs.pop("host", None)
115
+ port = connection_kwargs.pop("port", None)
116
+ if host and port:
117
+ startup_nodes = [Node(host=str(host), port=int(port))]
118
+ self.blocking = blocking
119
+ self.blocking_timeout = timeout
120
+ self.max_connections = max_connections or 2**31
121
+ self.max_connections_per_node = max_connections_per_node
122
+ self.nodes = NodeManager(
123
+ startup_nodes,
124
+ reinitialize_steps=reinitialize_steps,
125
+ skip_full_coverage_check=skip_full_coverage_check,
126
+ max_connections=self.max_connections,
127
+ nodemanager_follow_cluster=nodemanager_follow_cluster,
128
+ **connection_kwargs, # type: ignore
129
+ )
130
+ self.connection_kwargs = connection_kwargs
131
+ self.connection_kwargs["read_from_replicas"] = read_from_replicas
132
+ self.read_from_replicas = read_from_replicas or readonly
133
+ self.max_idle_time = max_idle_time
134
+ self.idle_check_interval = idle_check_interval
135
+ self.reset()
136
+
137
+ if "stream_timeout" not in self.connection_kwargs:
138
+ self.connection_kwargs["stream_timeout"] = None
139
+ self._init_lock = asyncio.Lock()
140
+
141
+ def __repr__(self) -> str:
142
+ """
143
+ Returns a string with all unique ip:port combinations that this pool
144
+ is connected to
145
+ """
146
+
147
+ return "{}<{}>".format(
148
+ type(self).__name__,
149
+ ", ".join(
150
+ [self.connection_class.describe(node.__dict__) for node in self.nodes.startup_nodes]
151
+ ),
152
+ )
153
+
154
+ async def initialize(self) -> None:
155
+ if not self.initialized:
156
+ async with self._init_lock:
157
+ if not self.initialized:
158
+ await self.nodes.initialize()
159
+ if not self.max_connections_per_node and self.max_connections < len(
160
+ self.nodes.nodes
161
+ ):
162
+ warnings.warn(
163
+ f"The value of max_connections={self.max_connections} "
164
+ "should be atleast equal to the number of nodes "
165
+ f"({len(self.nodes.nodes)}) in the cluster and has been increased by "
166
+ f"{len(self.nodes.nodes) - self.max_connections} connections."
167
+ )
168
+ self.max_connections = len(self.nodes.nodes)
169
+ await super().initialize()
170
+
171
+ async def disconnect_on_idle_time_exceeded(self, connection: Connection) -> None:
172
+ assert isinstance(connection, ClusterConnection)
173
+ while True:
174
+ if (
175
+ time.time() - connection.last_active_at > self.max_idle_time
176
+ and not connection.requests_pending
177
+ ):
178
+ connection.disconnect()
179
+ node = connection.node
180
+ if node.name in self._created_connections_per_node:
181
+ self._created_connections_per_node[node.name] -= 1
182
+ break
183
+ await asyncio.sleep(self.idle_check_interval)
184
+
185
+ def reset(self) -> None:
186
+ """Resets the connection pool back to a clean state"""
187
+ self.pid = os.getpid()
188
+ self._created_connections_per_node = {}
189
+ self._cluster_available_connections = {}
190
+ self._cluster_in_use_connections = {}
191
+ self._check_lock = threading.Lock()
192
+ self.initialized = False
193
+
194
+ def checkpid(self) -> None: # noqa
195
+ if self.pid != os.getpid():
196
+ with self._check_lock:
197
+ if self.pid == os.getpid():
198
+ # another thread already did the work while we waited
199
+ # on the lockself.
200
+
201
+ return
202
+ self.disconnect()
203
+ self.reset()
204
+
205
+ async def get_connection(
206
+ self,
207
+ command_name: bytes | None = None,
208
+ *keys: RedisValueT,
209
+ acquire: bool = True,
210
+ **options: RedisValueT | None,
211
+ ) -> Connection:
212
+ # Only pubsub command/connection should be allowed here
213
+
214
+ if command_name != b"pubsub":
215
+ raise RedisClusterException("Only 'pubsub' commands can use get_connection()")
216
+
217
+ routing_key = options.pop("channel", None)
218
+ node_type = options.pop("node_type", "primary")
219
+
220
+ if not routing_key:
221
+ return await self.get_random_connection()
222
+
223
+ slot = hash_slot(b(routing_key))
224
+ if node_type == "replica":
225
+ node = self.get_replica_node_by_slot(slot)
226
+ else:
227
+ node = self.get_primary_node_by_slot(slot)
228
+ self.checkpid()
229
+
230
+ try:
231
+ connection = self.__node_pool(node.name).get_nowait()
232
+ except asyncio.QueueEmpty:
233
+ connection = None
234
+ if not connection:
235
+ connection = self._make_node_connection(node)
236
+ else:
237
+ if connection.is_connected and connection.needs_handshake:
238
+ await connection.perform_handshake()
239
+
240
+ if acquire:
241
+ self._cluster_in_use_connections.setdefault(node.name, set())
242
+ self._cluster_in_use_connections[node.name].add(connection)
243
+ else:
244
+ self.__node_pool(node.name).put_nowait(connection)
245
+ return connection
246
+
247
+ def _make_node_connection(self, node: ManagedNode) -> Connection:
248
+ """Creates a new connection to a node"""
249
+
250
+ if self.count_all_num_connections(node) >= self.max_connections:
251
+ if self.max_connections_per_node:
252
+ raise ConnectionError(
253
+ f"Too many connection ({self.count_all_num_connections(node)}) for node: {node.name}"
254
+ )
255
+
256
+ raise ConnectionError("Too many connections")
257
+
258
+ self._created_connections_per_node.setdefault(node.name, 0)
259
+ self._created_connections_per_node[node.name] += 1
260
+ connection = self.connection_class(
261
+ host=node.host,
262
+ port=node.port,
263
+ **self.connection_kwargs, # type: ignore
264
+ )
265
+
266
+ # Must store node in the connection to make it eaiser to track
267
+ connection.node = node
268
+
269
+ if self.max_idle_time > self.idle_check_interval > 0:
270
+ # do not await the future
271
+ asyncio.ensure_future(self.disconnect_on_idle_time_exceeded(connection))
272
+
273
+ return connection
274
+
275
+ def __node_pool(self, node: str) -> asyncio.Queue[Connection | None]:
276
+ if not self._cluster_available_connections.get(node):
277
+ self._cluster_available_connections[node] = self.__default_node_queue()
278
+ return self._cluster_available_connections[node]
279
+
280
+ def __default_node_queue(
281
+ self,
282
+ ) -> asyncio.Queue[Connection | None]:
283
+ q_size = max(
284
+ 1,
285
+ int(
286
+ self.max_connections
287
+ if self.max_connections_per_node
288
+ else self.max_connections / len(self.nodes.nodes)
289
+ ),
290
+ )
291
+
292
+ q: asyncio.Queue[Connection | None] = self.queue_class(q_size)
293
+
294
+ # If the queue is non-blocking, we don't need to pre-populate it
295
+ if not self.blocking:
296
+ return q
297
+
298
+ if q_size > 2**16: # noqa
299
+ raise RuntimeError(
300
+ f"Requested unsupported value of max_connections: {q_size} in blocking mode"
301
+ )
302
+
303
+ while True:
304
+ try:
305
+ q.put_nowait(None)
306
+ except asyncio.QueueFull:
307
+ break
308
+ return q
309
+
310
+ def release(self, connection: Connection) -> None:
311
+ """Releases the connection back to the pool"""
312
+ assert isinstance(connection, ClusterConnection)
313
+
314
+ self.checkpid()
315
+
316
+ if connection.pid == self.pid:
317
+ # Remove the current connection from _in_use_connection and add it back to the available
318
+ # pool. There is cases where the connection is to be removed but it will not exist and
319
+ # there must be a safe way to remove
320
+ i_c = self._cluster_in_use_connections.get(connection.node.name, set())
321
+
322
+ if connection in i_c:
323
+ i_c.remove(connection)
324
+ else:
325
+ pass
326
+ try:
327
+ self.__node_pool(connection.node.name).put_nowait(connection)
328
+ except asyncio.QueueFull:
329
+ connection.disconnect()
330
+ # reduce node connection count in case of too many connection error raised
331
+ if connection.node.name in self._created_connections_per_node:
332
+ self._created_connections_per_node[connection.node.name] -= 1
333
+
334
+ def disconnect(self) -> None:
335
+ """Closes all connections in the pool"""
336
+ for node_connections in self._cluster_in_use_connections.values():
337
+ for connection in node_connections:
338
+ connection.disconnect()
339
+ for node, available_connections in self._cluster_available_connections.items():
340
+ removed = 0
341
+ while True:
342
+ try:
343
+ _connection = available_connections.get_nowait()
344
+ if _connection:
345
+ _connection.disconnect()
346
+ if node in self._created_connections_per_node:
347
+ self._created_connections_per_node[node] -= 1
348
+ removed += 1
349
+ except asyncio.QueueEmpty:
350
+ break
351
+ # Refill queue with empty slots
352
+ for _ in range(removed):
353
+ available_connections.put_nowait(None)
354
+
355
+ def count_all_num_connections(self, node: ManagedNode) -> int:
356
+ if self.max_connections_per_node:
357
+ return self._created_connections_per_node.get(node.name, 0)
358
+
359
+ return sum(i for i in self._created_connections_per_node.values())
360
+
361
+ async def get_random_connection(self, primary: bool = False) -> ClusterConnection:
362
+ """Opens new connection to random redis server in the cluster"""
363
+ for node in self.nodes.random_startup_node_iter(primary):
364
+ connection = await self.get_connection_by_node(node)
365
+ if connection:
366
+ return connection
367
+ raise RedisClusterException("Cant reach a single startup node.")
368
+
369
+ async def get_connection_by_key(self, key: StringT) -> ClusterConnection:
370
+ if not key:
371
+ raise RedisClusterException("No way to dispatch this command to Redis Cluster.")
372
+
373
+ return await self.get_connection_by_slot(hash_slot(b(key)))
374
+
375
+ async def get_connection_by_slot(self, slot: int) -> ClusterConnection:
376
+ """
377
+ Determines what server a specific slot belongs to and return a redis
378
+ object that is connected
379
+ """
380
+ self.checkpid()
381
+
382
+ try:
383
+ return await self.get_connection_by_node(self.get_node_by_slot(slot))
384
+ except KeyError:
385
+ return await self.get_random_connection()
386
+
387
+ async def get_connection_by_node(self, node: ManagedNode) -> ClusterConnection:
388
+ """Gets a connection by node"""
389
+ self.checkpid()
390
+
391
+ if not self.blocking:
392
+ try:
393
+ connection = self.__node_pool(node.name).get_nowait()
394
+ except asyncio.QueueEmpty:
395
+ connection = None
396
+ else:
397
+ try:
398
+ async with async_timeout.timeout(self.blocking_timeout):
399
+ connection = await self.__node_pool(node.name).get()
400
+ except asyncio.TimeoutError:
401
+ raise ConnectionError("No connection available.")
402
+
403
+ if not connection:
404
+ connection = self._make_node_connection(node)
405
+
406
+ self._cluster_in_use_connections.setdefault(node.name, set()).add(connection)
407
+ return cast(ClusterConnection, connection)
408
+
409
+ def get_primary_node_by_slot(self, slot: int) -> ManagedNode:
410
+ return self.get_primary_node_by_slots([slot])
411
+
412
+ def get_primary_node_by_slots(self, slots: list[int]) -> ManagedNode:
413
+ nodes = {self.nodes.slots[slot][0].node_id for slot in slots}
414
+ if len(nodes) == 1:
415
+ return self.nodes.slots[slots[0]][0]
416
+ else:
417
+ raise RedisClusterException(f"Unable to map slots {slots} to a single node")
418
+
419
+ def get_replica_node_by_slot(self, slot: int) -> ManagedNode:
420
+ return self.get_replica_node_by_slots([slot])
421
+
422
+ def get_replica_node_by_slots(
423
+ self, slots: list[int], replica_only: bool = False
424
+ ) -> ManagedNode:
425
+ nodes = {self.nodes.slots[slot][0].node_id for slot in slots}
426
+ if len(nodes) == 1:
427
+ slot = slots[0]
428
+ if replica_only:
429
+ return random.choice(
430
+ [node for node in self.nodes.slots[slot] if node.server_type != "primary"]
431
+ )
432
+ else:
433
+ return random.choice(self.nodes.slots[slot])
434
+ else:
435
+ raise RedisClusterException(f"Unable to map slots {slots} to a single node")
436
+
437
+ def get_node_by_slot(self, slot: int, command: bytes | None = None) -> ManagedNode:
438
+ if self.read_from_replicas and command in READONLY_COMMANDS:
439
+ return self.get_replica_node_by_slot(slot)
440
+ return self.get_primary_node_by_slot(slot)
441
+
442
+ def get_node_by_slots(self, slots: list[int], command: bytes | None = None) -> ManagedNode:
443
+ if self.read_from_replicas and command in READONLY_COMMANDS:
444
+ return self.get_replica_node_by_slots(slots)
445
+ return self.get_primary_node_by_slots(slots)
446
+
447
+
448
+ class BlockingClusterConnectionPool(ClusterConnectionPool):
449
+ """
450
+ .. versionadded:: 4.3.0
451
+
452
+ Blocking connection pool for :class:`~coredis.RedisCluster` client
453
+
454
+ .. note:: This is just a convenience subclass of :class:`~coredis.pool.ClusterConnectionPool`
455
+ that sets :paramref:`~coredis.pool.ClusterConnectionPool.blocking` to ``True``
456
+ """
457
+
458
+ def __init__(
459
+ self,
460
+ startup_nodes: Iterable[Node] | None = None,
461
+ connection_class: type[ClusterConnection] = ClusterConnection,
462
+ queue_class: type[asyncio.Queue[Connection | None]] = asyncio.LifoQueue,
463
+ max_connections: int | None = None,
464
+ max_connections_per_node: bool = False,
465
+ reinitialize_steps: int | None = None,
466
+ skip_full_coverage_check: bool = False,
467
+ nodemanager_follow_cluster: bool = True,
468
+ readonly: bool = False,
469
+ read_from_replicas: bool = False,
470
+ max_idle_time: int = 0,
471
+ idle_check_interval: int = 1,
472
+ timeout: int = 20,
473
+ **connection_kwargs: Any | None,
474
+ ):
475
+ """
476
+
477
+ Changes
478
+ - .. versionchanged:: 4.4.0
479
+
480
+ - :paramref:`nodemanager_follow_cluster` now defaults to ``True``
481
+
482
+ - .. deprecated:: 4.4.0
483
+
484
+ - :paramref:`readonly` renamed to :paramref:`read_from_replicas`
485
+
486
+ :param max_connections: Maximum number of connections to allow concurrently from this
487
+ client.
488
+ :param max_connections_per_node: Whether to use the value of :paramref:`max_connections`
489
+ on a per node basis or cluster wide. If ``False`` the per-node connection pools will have
490
+ a maximum size of :paramref:`max_connections` divided by the number of nodes in the
491
+ cluster.
492
+ :param timeout: Number of seconds to block when trying to obtain a connection.
493
+ :param skip_full_coverage_check:
494
+ Skips the check of cluster-require-full-coverage config, useful for clusters
495
+ without the CONFIG command (like aws)
496
+ :param nodemanager_follow_cluster:
497
+ The node manager will during initialization try the last set of nodes that
498
+ it was operating on. This will allow the client to drift along side the cluster
499
+ if the cluster nodes move around alot.
500
+ """
501
+ super().__init__(
502
+ startup_nodes=startup_nodes,
503
+ connection_class=connection_class,
504
+ queue_class=queue_class,
505
+ max_connections=max_connections,
506
+ max_connections_per_node=max_connections_per_node,
507
+ reinitialize_steps=reinitialize_steps,
508
+ skip_full_coverage_check=skip_full_coverage_check,
509
+ nodemanager_follow_cluster=nodemanager_follow_cluster,
510
+ readonly=readonly,
511
+ read_from_replicas=read_from_replicas,
512
+ max_idle_time=max_idle_time,
513
+ idle_check_interval=idle_check_interval,
514
+ timeout=timeout,
515
+ blocking=True,
516
+ **connection_kwargs,
517
+ )