coredis 5.5.0__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. 22fe76227e35f92ab5c3__mypyc.cpython-313-darwin.so +0 -0
  2. coredis/__init__.py +42 -0
  3. coredis/_enum.py +42 -0
  4. coredis/_json.py +11 -0
  5. coredis/_packer.cpython-313-darwin.so +0 -0
  6. coredis/_packer.py +71 -0
  7. coredis/_protocols.py +50 -0
  8. coredis/_py_311_typing.py +20 -0
  9. coredis/_py_312_typing.py +17 -0
  10. coredis/_sidecar.py +114 -0
  11. coredis/_utils.cpython-313-darwin.so +0 -0
  12. coredis/_utils.py +440 -0
  13. coredis/_version.py +34 -0
  14. coredis/_version.pyi +1 -0
  15. coredis/cache.py +801 -0
  16. coredis/client/__init__.py +6 -0
  17. coredis/client/basic.py +1240 -0
  18. coredis/client/cluster.py +1265 -0
  19. coredis/commands/__init__.py +64 -0
  20. coredis/commands/_key_spec.py +517 -0
  21. coredis/commands/_utils.py +108 -0
  22. coredis/commands/_validators.py +159 -0
  23. coredis/commands/_wrappers.py +175 -0
  24. coredis/commands/bitfield.py +110 -0
  25. coredis/commands/constants.py +662 -0
  26. coredis/commands/core.py +8484 -0
  27. coredis/commands/function.py +408 -0
  28. coredis/commands/monitor.py +168 -0
  29. coredis/commands/pubsub.py +905 -0
  30. coredis/commands/request.py +108 -0
  31. coredis/commands/script.py +296 -0
  32. coredis/commands/sentinel.py +246 -0
  33. coredis/config.py +50 -0
  34. coredis/connection.py +906 -0
  35. coredis/constants.cpython-313-darwin.so +0 -0
  36. coredis/constants.py +37 -0
  37. coredis/credentials.py +45 -0
  38. coredis/exceptions.py +360 -0
  39. coredis/experimental/__init__.py +1 -0
  40. coredis/globals.py +23 -0
  41. coredis/modules/__init__.py +121 -0
  42. coredis/modules/autocomplete.py +138 -0
  43. coredis/modules/base.py +262 -0
  44. coredis/modules/filters.py +1319 -0
  45. coredis/modules/graph.py +362 -0
  46. coredis/modules/json.py +691 -0
  47. coredis/modules/response/__init__.py +0 -0
  48. coredis/modules/response/_callbacks/__init__.py +0 -0
  49. coredis/modules/response/_callbacks/autocomplete.py +42 -0
  50. coredis/modules/response/_callbacks/graph.py +237 -0
  51. coredis/modules/response/_callbacks/json.py +21 -0
  52. coredis/modules/response/_callbacks/search.py +221 -0
  53. coredis/modules/response/_callbacks/timeseries.py +158 -0
  54. coredis/modules/response/types.py +179 -0
  55. coredis/modules/search.py +1089 -0
  56. coredis/modules/timeseries.py +1139 -0
  57. coredis/parser.cpython-313-darwin.so +0 -0
  58. coredis/parser.py +344 -0
  59. coredis/pipeline.py +1225 -0
  60. coredis/pool/__init__.py +11 -0
  61. coredis/pool/basic.py +453 -0
  62. coredis/pool/cluster.py +517 -0
  63. coredis/pool/nodemanager.py +340 -0
  64. coredis/py.typed +0 -0
  65. coredis/recipes/__init__.py +0 -0
  66. coredis/recipes/credentials/__init__.py +5 -0
  67. coredis/recipes/credentials/iam_provider.py +63 -0
  68. coredis/recipes/locks/__init__.py +5 -0
  69. coredis/recipes/locks/extend.lua +17 -0
  70. coredis/recipes/locks/lua_lock.py +281 -0
  71. coredis/recipes/locks/release.lua +10 -0
  72. coredis/response/__init__.py +5 -0
  73. coredis/response/_callbacks/__init__.py +538 -0
  74. coredis/response/_callbacks/acl.py +32 -0
  75. coredis/response/_callbacks/cluster.py +183 -0
  76. coredis/response/_callbacks/command.py +86 -0
  77. coredis/response/_callbacks/connection.py +31 -0
  78. coredis/response/_callbacks/geo.py +58 -0
  79. coredis/response/_callbacks/hash.py +85 -0
  80. coredis/response/_callbacks/keys.py +59 -0
  81. coredis/response/_callbacks/module.py +33 -0
  82. coredis/response/_callbacks/script.py +85 -0
  83. coredis/response/_callbacks/sentinel.py +179 -0
  84. coredis/response/_callbacks/server.py +241 -0
  85. coredis/response/_callbacks/sets.py +44 -0
  86. coredis/response/_callbacks/sorted_set.py +204 -0
  87. coredis/response/_callbacks/streams.py +185 -0
  88. coredis/response/_callbacks/strings.py +70 -0
  89. coredis/response/_callbacks/vector_sets.py +159 -0
  90. coredis/response/_utils.py +33 -0
  91. coredis/response/types.py +416 -0
  92. coredis/retry.py +233 -0
  93. coredis/sentinel.py +477 -0
  94. coredis/stream.py +369 -0
  95. coredis/tokens.py +2286 -0
  96. coredis/typing.py +593 -0
  97. coredis-5.5.0.dist-info/METADATA +211 -0
  98. coredis-5.5.0.dist-info/RECORD +100 -0
  99. coredis-5.5.0.dist-info/WHEEL +6 -0
  100. coredis-5.5.0.dist-info/licenses/LICENSE +23 -0
@@ -0,0 +1,905 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import inspect
5
+ from asyncio import CancelledError
6
+ from contextlib import suppress
7
+ from functools import partial
8
+ from types import TracebackType
9
+ from typing import TYPE_CHECKING, Any, cast
10
+
11
+ import async_timeout
12
+ from deprecated.sphinx import versionadded
13
+
14
+ from coredis._enum import CaseAndEncodingInsensitiveEnum
15
+ from coredis._utils import b, hash_slot, nativestr
16
+ from coredis.commands.constants import CommandName
17
+ from coredis.connection import BaseConnection, Connection
18
+ from coredis.exceptions import ConnectionError, PubSubError, TimeoutError
19
+ from coredis.response.types import PubSubMessage
20
+ from coredis.retry import (
21
+ CompositeRetryPolicy,
22
+ ConstantRetryPolicy,
23
+ NoRetryPolicy,
24
+ RetryPolicy,
25
+ )
26
+ from coredis.typing import (
27
+ AnyStr,
28
+ Awaitable,
29
+ Callable,
30
+ Generator,
31
+ Generic,
32
+ Mapping,
33
+ MutableMapping,
34
+ Parameters,
35
+ RedisValueT,
36
+ ResponsePrimitive,
37
+ ResponseType,
38
+ Self,
39
+ StringT,
40
+ TypeVar,
41
+ )
42
+
43
+ if TYPE_CHECKING:
44
+ import coredis.client
45
+ import coredis.connection
46
+ import coredis.pool
47
+
48
+ T = TypeVar("T")
49
+
50
+
51
+ PoolT = TypeVar("PoolT", bound="coredis.pool.ConnectionPool")
52
+
53
+ #: Callables for message handler callbacks. The callbacks
54
+ #: can be sync or async.
55
+ SubscriptionCallback = Callable[[PubSubMessage], Awaitable[None]] | Callable[[PubSubMessage], None]
56
+
57
+
58
+ class PubSubMessageTypes(CaseAndEncodingInsensitiveEnum):
59
+ MESSAGE = b"message"
60
+ PMESSAGE = b"pmessage"
61
+ SMESSAGE = b"smessage"
62
+ SUBSCRIBE = b"subscribe"
63
+ UNSUBSCRIBE = b"unsubscribe"
64
+ PSUBSCRIBE = b"psubscribe"
65
+ PUNSUBSCRIBE = b"punsubscribe"
66
+ SSUBSCRIBE = b"ssubscribe"
67
+ SUNSUBSCRIBE = b"sunsubscribe"
68
+
69
+
70
+ class BasePubSub(Generic[AnyStr, PoolT]):
71
+ PUBLISH_MESSAGE_TYPES = {
72
+ PubSubMessageTypes.MESSAGE.value,
73
+ PubSubMessageTypes.PMESSAGE.value,
74
+ }
75
+ SUBUNSUB_MESSAGE_TYPES = {
76
+ PubSubMessageTypes.SUBSCRIBE.value,
77
+ PubSubMessageTypes.PSUBSCRIBE.value,
78
+ PubSubMessageTypes.UNSUBSCRIBE.value,
79
+ PubSubMessageTypes.PUNSUBSCRIBE.value,
80
+ }
81
+ UNSUBSCRIBE_MESSAGE_TYPES = {
82
+ PubSubMessageTypes.UNSUBSCRIBE.value,
83
+ PubSubMessageTypes.PUNSUBSCRIBE.value,
84
+ }
85
+
86
+ channels: MutableMapping[StringT, SubscriptionCallback | None]
87
+ patterns: MutableMapping[StringT, SubscriptionCallback | None]
88
+
89
+ def __init__(
90
+ self,
91
+ connection_pool: PoolT,
92
+ ignore_subscribe_messages: bool = False,
93
+ retry_policy: RetryPolicy | None = CompositeRetryPolicy(
94
+ ConstantRetryPolicy((ConnectionError,), 3, 0.1),
95
+ ConstantRetryPolicy((TimeoutError,), 2, 0.1),
96
+ ),
97
+ channels: Parameters[StringT] | None = None,
98
+ channel_handlers: Mapping[StringT, SubscriptionCallback] | None = None,
99
+ patterns: Parameters[StringT] | None = None,
100
+ pattern_handlers: Mapping[StringT, SubscriptionCallback] | None = None,
101
+ ):
102
+ self.initialized = False
103
+ self.connection_pool = connection_pool
104
+ self.ignore_subscribe_messages = ignore_subscribe_messages
105
+ self.connection: coredis.connection.Connection | None = None
106
+ self._retry_policy = retry_policy or NoRetryPolicy()
107
+ self._initial_channel_subscriptions = {
108
+ **{nativestr(channel): None for channel in channels or []},
109
+ **{nativestr(k): v for k, v in (channel_handlers or {}).items()},
110
+ }
111
+ self._initial_pattern_subscriptions = {
112
+ **{nativestr(pattern): None for pattern in patterns or []},
113
+ **{nativestr(k): v for k, v in (pattern_handlers or {}).items()},
114
+ }
115
+ self._message_queue: asyncio.Queue[PubSubMessage | None] = asyncio.Queue()
116
+ self._consumer_task: asyncio.Task[None] | None = None
117
+ self._subscribed = asyncio.Event()
118
+ self.reset()
119
+
120
+ @property
121
+ def subscribed(self) -> bool:
122
+ """Indicates if there are subscriptions to any channels or patterns"""
123
+ return bool(self.channels or self.patterns)
124
+
125
+ async def initialize(self) -> Self:
126
+ """
127
+ Ensures the pubsub instance is ready to consume messages
128
+ by establishing a connection to the redis server, setting up any
129
+ initial channel or pattern subscriptions that were specified during
130
+ instantiation and starting the consumer background task.
131
+
132
+ The method can be called multiple times without any
133
+ risk as it will skip initialization if the consumer is already
134
+ initialized.
135
+
136
+ .. important:: This method doesn't need to be called explicitly
137
+ as it will always be called internally before any relevant
138
+ documented interaction.
139
+
140
+ :return: the instance itself
141
+ """
142
+ if not self.initialized:
143
+ self.connection = await self.connection_pool.get_connection()
144
+ self.initialized = True
145
+ if self._initial_channel_subscriptions:
146
+ await self.subscribe(**self._initial_channel_subscriptions)
147
+ if self._initial_pattern_subscriptions:
148
+ await self.psubscribe(**self._initial_pattern_subscriptions)
149
+ self.connection.register_connect_callback(self.on_connect)
150
+ if not self._consumer_task or self._consumer_task.done():
151
+ self._consumer_task = asyncio.create_task(self._consumer())
152
+ return self
153
+
154
+ async def psubscribe(
155
+ self,
156
+ *patterns: StringT,
157
+ **pattern_handlers: SubscriptionCallback | None,
158
+ ) -> None:
159
+ """
160
+ Subscribes to channel patterns. Patterns supplied as keyword arguments
161
+ expect a pattern name as the key and a callable as the value. A
162
+ pattern's callable will be invoked automatically when a message is
163
+ received on that pattern rather than producing a message via
164
+ :meth:`listen`.
165
+ """
166
+ new_patterns: MutableMapping[StringT, SubscriptionCallback | None] = {}
167
+ new_patterns.update(dict.fromkeys(map(self.encode, patterns)))
168
+
169
+ for pattern, handler in pattern_handlers.items():
170
+ new_patterns[self.encode(pattern)] = handler
171
+ await self.execute_command(CommandName.PSUBSCRIBE, *new_patterns.keys())
172
+ # update the patterns dict AFTER we send the command. we don't want to
173
+ # subscribe twice to these patterns, once for the command and again
174
+ # for the reconnection.
175
+ self.patterns.update(new_patterns)
176
+ self._subscribed.set()
177
+
178
+ async def punsubscribe(self, *patterns: StringT) -> None:
179
+ """
180
+ Unsubscribes from the supplied patterns. If empty, unsubscribe from
181
+ all patterns.
182
+ """
183
+ await self.execute_command(CommandName.PUNSUBSCRIBE, *patterns)
184
+
185
+ async def subscribe(
186
+ self,
187
+ *channels: StringT,
188
+ **channel_handlers: SubscriptionCallback | None,
189
+ ) -> None:
190
+ """
191
+ Subscribes to channels. Channels supplied as keyword arguments expect
192
+ a channel name as the key and a callable as the value. A channel's
193
+ callable will be invoked automatically when a message is received on
194
+ that channel rather than producing a message via :meth:`listen` or
195
+ :meth:`get_message`.
196
+ """
197
+
198
+ new_channels: MutableMapping[StringT, SubscriptionCallback | None] = {}
199
+ new_channels.update(dict.fromkeys(map(self.encode, channels)))
200
+
201
+ for channel, handler in channel_handlers.items():
202
+ new_channels[self.encode(channel)] = handler
203
+ await self.execute_command(CommandName.SUBSCRIBE, *new_channels.keys())
204
+ # update the channels dict AFTER we send the command. we don't want to
205
+ # subscribe twice to these channels, once for the command and again
206
+ # for the reconnection.
207
+ self.channels.update(new_channels)
208
+ self._subscribed.set()
209
+
210
+ async def unsubscribe(self, *channels: StringT) -> None:
211
+ """
212
+ Unsubscribes from the supplied channels. If empty, unsubscribe from
213
+ all channels
214
+ """
215
+
216
+ await self.execute_command(CommandName.UNSUBSCRIBE, *channels)
217
+
218
+ async def get_message(
219
+ self,
220
+ ignore_subscribe_messages: bool = False,
221
+ timeout: int | float | None = None,
222
+ ) -> PubSubMessage | None:
223
+ """
224
+ Gets the next message if one is available, otherwise None.
225
+
226
+ :param ignore_subscribe_messages: Whether to skip subscription
227
+ acknowledgement messages
228
+ :param timeout: Number of seconds to wait for a message to be available
229
+ on the connection. If the ``None`` the command will block forever.
230
+ """
231
+
232
+ try:
233
+ await self.initialize()
234
+ async with async_timeout.timeout(timeout):
235
+ return self._filter_ignored_messages(
236
+ await self._message_queue.get(), ignore_subscribe_messages
237
+ )
238
+ except asyncio.TimeoutError:
239
+ return None
240
+
241
+ async def on_connect(self, connection: BaseConnection) -> None:
242
+ """
243
+ Re-subscribe to any channels and patterns previously subscribed to
244
+
245
+ :meta private:
246
+ """
247
+
248
+ if self.channels:
249
+ await self.subscribe(
250
+ **{
251
+ k.decode(self.connection_pool.encoding) if isinstance(k, bytes) else k: v
252
+ for k, v in self.channels.items()
253
+ }
254
+ )
255
+
256
+ if self.patterns:
257
+ await self.psubscribe(
258
+ **{
259
+ k.decode(self.connection_pool.encoding) if isinstance(k, bytes) else k: v
260
+ for k, v in self.patterns.items()
261
+ }
262
+ )
263
+
264
+ def encode(self, value: StringT) -> StringT:
265
+ """
266
+ Encodes the value so that it's identical to what we'll read off the
267
+ connection
268
+
269
+ :meta private:
270
+ """
271
+
272
+ if self.connection_pool.decode_responses and isinstance(value, bytes):
273
+ value = nativestr(value, self.connection_pool.encoding)
274
+ elif not self.connection_pool.decode_responses and isinstance(value, str):
275
+ value = b(value, self.connection_pool.encoding)
276
+
277
+ return value
278
+
279
+ async def execute_command(
280
+ self, command: bytes, *args: RedisValueT, **options: RedisValueT
281
+ ) -> ResponseType | None:
282
+ """
283
+ Executes a publish/subscribe command
284
+
285
+ :meta private:
286
+ """
287
+ await self.initialize()
288
+
289
+ if self.connection is None:
290
+ self.connection = await self.connection_pool.get_connection()
291
+ self.connection.register_connect_callback(self.on_connect)
292
+ assert self.connection
293
+ return await self._execute(self.connection, self.connection.send_command, command, *args)
294
+
295
+ async def parse_response(
296
+ self, block: bool = True, timeout: float | None = None
297
+ ) -> ResponseType:
298
+ """
299
+ Parses the response from a publish/subscribe command
300
+
301
+ :meta private:
302
+ """
303
+ await self.initialize()
304
+
305
+ assert self.connection
306
+ coro = self._execute(
307
+ self.connection,
308
+ partial(
309
+ self.connection.fetch_push_message,
310
+ block=block,
311
+ push_message_types=self.SUBUNSUB_MESSAGE_TYPES | self.PUBLISH_MESSAGE_TYPES,
312
+ ),
313
+ )
314
+
315
+ try:
316
+ return await asyncio.wait_for(coro, timeout if (timeout and timeout > 0) else None)
317
+ except asyncio.TimeoutError:
318
+ return None
319
+
320
+ async def handle_message(self, response: ResponseType) -> PubSubMessage | None:
321
+ """
322
+ Parses a pub/sub message. If the channel or pattern was subscribed to
323
+ with a message handler, the handler is invoked instead of a parsed
324
+ message being returned.
325
+
326
+ :meta private:
327
+ """
328
+ r = cast(list[ResponsePrimitive], response)
329
+ message_type = b(r[0])
330
+ message_type_str = nativestr(r[0])
331
+ message: PubSubMessage
332
+
333
+ if message_type in self.SUBUNSUB_MESSAGE_TYPES:
334
+ message = PubSubMessage(
335
+ type=message_type_str,
336
+ pattern=cast(StringT, r[1]) if message_type[0] == ord(b"p") else None,
337
+ # This field is populated in all cases for backward compatibility
338
+ # as older versions were incorrectly populating the channel
339
+ # with the pattern on psubscribe/punsubscribe responses.
340
+ channel=cast(StringT, r[1]),
341
+ data=cast(int, r[2]),
342
+ )
343
+
344
+ elif message_type in self.PUBLISH_MESSAGE_TYPES:
345
+ if message_type == PubSubMessageTypes.PMESSAGE:
346
+ message = PubSubMessage(
347
+ type="pmessage",
348
+ pattern=cast(StringT, r[1]),
349
+ channel=cast(StringT, r[2]),
350
+ data=cast(StringT, r[3]),
351
+ )
352
+ else:
353
+ message = PubSubMessage(
354
+ type="message",
355
+ pattern=None,
356
+ channel=cast(StringT, r[1]),
357
+ data=cast(StringT, r[2]),
358
+ )
359
+ else:
360
+ raise PubSubError(f"Unknown message type {message_type_str}") # noqa
361
+
362
+ # if this is an unsubscribe message, remove it from memory
363
+ if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
364
+ if message_type == PubSubMessageTypes.PUNSUBSCRIBE:
365
+ subscribed_dict = self.patterns
366
+ else:
367
+ subscribed_dict = self.channels
368
+ subscribed_dict.pop(message["channel"], None)
369
+
370
+ if message_type in self.PUBLISH_MESSAGE_TYPES:
371
+ handler = None
372
+ if message_type == PubSubMessageTypes.PMESSAGE and message["pattern"]:
373
+ handler = self.patterns.get(message["pattern"], None)
374
+ elif message["channel"]:
375
+ handler = self.channels.get(message["channel"], None)
376
+
377
+ if handler:
378
+ handler_response = handler(message)
379
+ if inspect.isawaitable(handler_response):
380
+ await handler_response
381
+ return None
382
+ if not (self.channels or self.patterns):
383
+ self._subscribed.clear()
384
+
385
+ return message
386
+
387
+ async def _consumer(self) -> None:
388
+ while self.initialized:
389
+ try:
390
+ if self.subscribed:
391
+ if response := await self._retry_policy.call_with_retries(
392
+ lambda: self.parse_response(block=True),
393
+ failure_hook=self.reset_connections,
394
+ ):
395
+ self._message_queue.put_nowait(await self.handle_message(response))
396
+ else:
397
+ await self._subscribed.wait()
398
+ except ConnectionError:
399
+ await asyncio.sleep(0)
400
+
401
+ def _filter_ignored_messages(
402
+ self,
403
+ message: PubSubMessage | None,
404
+ ignore_subscribe_messages: bool = False,
405
+ ) -> PubSubMessage | None:
406
+ if (
407
+ message
408
+ and b(message["type"]) in self.SUBUNSUB_MESSAGE_TYPES
409
+ and (self.ignore_subscribe_messages or ignore_subscribe_messages)
410
+ ):
411
+ return None
412
+ return message
413
+
414
+ async def _execute(
415
+ self,
416
+ connection: BaseConnection,
417
+ command: Callable[..., Awaitable[None]] | Callable[..., Awaitable[ResponseType]],
418
+ *args: RedisValueT,
419
+ ) -> ResponseType | None:
420
+ try:
421
+ return await command(*args)
422
+ except asyncio.CancelledError:
423
+ # do not retry if coroutine is cancelled
424
+ if await connection.can_read(): # noqa
425
+ connection.disconnect()
426
+ raise
427
+
428
+ def __await__(self) -> Generator[Any, None, Self]:
429
+ return self.initialize().__await__()
430
+
431
+ def __aiter__(self) -> Self:
432
+ return self
433
+
434
+ async def __anext__(self) -> PubSubMessage:
435
+ await self.initialize()
436
+ while self.subscribed:
437
+ if message := await self.get_message():
438
+ return message
439
+ else:
440
+ continue
441
+ raise StopAsyncIteration()
442
+
443
+ async def __aenter__(self) -> Self:
444
+ await self.initialize()
445
+ return self
446
+
447
+ async def __aexit__(
448
+ self,
449
+ exc_type: type[BaseException] | None,
450
+ exc_value: BaseException | None,
451
+ traceback: TracebackType | None,
452
+ ) -> None:
453
+ await self.aclose()
454
+
455
+ async def aclose(self) -> None:
456
+ """
457
+ Unsubscribe from any channels or patterns & close and return
458
+ connections to the pool
459
+ """
460
+ if self.connection:
461
+ await self.unsubscribe()
462
+ await self.punsubscribe()
463
+ self.close()
464
+
465
+ def close(self) -> None:
466
+ self.reset()
467
+
468
+ def __del__(self) -> None:
469
+ self.reset()
470
+
471
+ def reset(self) -> None:
472
+ """
473
+ Clear subscriptions and disconnect and release any
474
+ connection(s) back to the connection pool.
475
+
476
+ :meta private:
477
+ """
478
+ if self.connection:
479
+ self.connection.disconnect()
480
+ self.connection.clear_connect_callbacks()
481
+ self.connection_pool.release(self.connection)
482
+ self.connection = None
483
+ if self._consumer_task:
484
+ try:
485
+ self._consumer_task.cancel()
486
+ except RuntimeError: # noqa
487
+ pass
488
+ self._consumer_task = None
489
+
490
+ self.channels = {}
491
+ self.patterns = {}
492
+ self.initialized = False
493
+ self._subscribed.clear()
494
+
495
+ async def reset_connections(self, exc: BaseException | None = None) -> None:
496
+ pass
497
+
498
+
499
+ class PubSub(BasePubSub[AnyStr, "coredis.pool.ConnectionPool"]):
500
+ """
501
+ Pub/Sub implementation to be used with :class:`coredis.Redis`
502
+ that is returned by :meth:`coredis.Redis.pubsub`
503
+
504
+ An instance of this class is both an async context manager (to
505
+ ensure that proper clean up of connections & subscriptions happens automatically)
506
+ and an async iterator to consume messages from channels or patterns that it is
507
+ subscribed to.
508
+
509
+ Recommended use::
510
+
511
+ client = coredis.Redis(decode_responses=True)
512
+ async for message in client.pubsub(
513
+ ignore_subscribe_messages=True,
514
+ channels=["channel-1", "channel-2"]
515
+ ):
516
+ match message["channel"]:
517
+ case "channel-1":
518
+ print("first", message["data"])
519
+ case "channel-2":
520
+ print("second", message["data"])
521
+
522
+ Or to explicitly subscribe::
523
+
524
+ client = coredis.Redis(decode_responses=True)
525
+ pubsub = client.pubsub()
526
+ async with pubsub:
527
+ await pubsub.subscribe("channel-1")
528
+ assert (await pubsub.get_message())["channel"] == "channel-1"
529
+ async for message in pubsub:
530
+ print(message["data"])
531
+
532
+ For more details see :ref:`handbook/pubsub:pubsub`
533
+ """
534
+
535
+
536
+ class ClusterPubSub(BasePubSub[AnyStr, "coredis.pool.ClusterConnectionPool"]):
537
+ """
538
+ Pub/Sub implementation to be used with :class:`coredis.RedisCluster`
539
+ that is returned by :meth:`coredis.RedisCluster.pubsub`
540
+
541
+ .. note:: This implementation does not particularly benefit from having
542
+ multiple nodes in a cluster as it subscribes to messages sent to channels
543
+ using ``PUBLISH`` which in cluster mode results in the message being
544
+ broadcasted to every node in the cluster. For this reason the subscribing
545
+ client can subscribe to any node in the cluster to receive messages sent to
546
+ any channel - which inherently limits the potential for scaling.
547
+
548
+ :redis-version:`7.0` introduces the concept of Sharded Pub/Sub which
549
+ can be accessed by instead using :meth:`coredis.RedisCluster.sharded_pubsub`
550
+ which uses the implementation in :class:`coredis.commands.ShardedPubSub`.
551
+
552
+ An instance of this class is both an async context manager (to
553
+ ensure that proper clean up of connections & subscriptions happens automatically)
554
+ and an async iterator to consume messages from channels or patterns that it is
555
+ subscribed to.
556
+
557
+ For more details see :ref:`handbook/pubsub:cluster pub/sub`
558
+
559
+ """
560
+
561
+ async def execute_command(
562
+ self, command: bytes, *args: RedisValueT, **options: RedisValueT
563
+ ) -> ResponseType | None:
564
+ await self.initialize()
565
+ assert self.connection
566
+ return await self._execute(self.connection, self.connection.send_command, command, *args)
567
+
568
+ async def initialize(self) -> Self:
569
+ """
570
+ Ensures the pubsub instance is ready to consume messages
571
+ by establishing a connection to a random cluster node, setting up any
572
+ initial channel or pattern subscriptions that were specified during
573
+ instantiation and starting the consumer background task.
574
+
575
+ The method can be called multiple times without any
576
+ risk as it will skip initialization if the consumer is already
577
+ initialized.
578
+
579
+ .. important:: This method doesn't need to be called explicitly
580
+ as it will always be called internally before any relevant
581
+ documented interaction.
582
+
583
+ :return: the instance itself
584
+ """
585
+ if not self.initialized:
586
+ if self.connection is None:
587
+ await self.reset_connections(None)
588
+ self.initialized = True
589
+ if self._initial_channel_subscriptions:
590
+ await self.subscribe(**self._initial_channel_subscriptions)
591
+ if self._initial_pattern_subscriptions:
592
+ await self.psubscribe(**self._initial_pattern_subscriptions)
593
+ if not self._consumer_task or self._consumer_task.done():
594
+ self._consumer_task = asyncio.create_task(self._consumer())
595
+ return self
596
+
597
+ async def reset_connections(self, exc: BaseException | None = None) -> None:
598
+ if self.connection:
599
+ self.connection.disconnect()
600
+ self.connection_pool.initialized = False
601
+
602
+ await self.connection_pool.initialize()
603
+
604
+ self.connection = await self.connection_pool.get_connection(b"pubsub")
605
+ self.connection.register_connect_callback(self.on_connect)
606
+
607
+
608
+ @versionadded(version="3.6.0")
609
+ class ShardedPubSub(BasePubSub[AnyStr, "coredis.pool.ClusterConnectionPool"]):
610
+ """
611
+ Sharded Pub/Sub implementation to be used with :class:`coredis.RedisCluster`
612
+ that is returned by :meth:`coredis.RedisCluster.sharded_pubsub`
613
+
614
+ For details about the server architecture refer to the `Redis manual entry
615
+ on Sharded Pub/sub <https://redis.io/docs/manual/pubsub/#sharded-pubsub>`__.
616
+
617
+ New in :redis-version:`7.0.0`
618
+
619
+ .. warning:: Sharded PubSub only supports subscription by channel and does
620
+ **NOT** support pattern based subscriptions.
621
+
622
+ An instance of this class is both an async context manager (to
623
+ ensure that proper clean up of connections & subscriptions happens automatically)
624
+ and an async iterator to consume messages from channels that it is subscribed to.
625
+
626
+ For more details see :ref:`handbook/pubsub:sharded pub/sub`
627
+ """
628
+
629
+ PUBLISH_MESSAGE_TYPES = {
630
+ PubSubMessageTypes.MESSAGE.value,
631
+ PubSubMessageTypes.SMESSAGE.value,
632
+ }
633
+ SUBUNSUB_MESSAGE_TYPES = {
634
+ PubSubMessageTypes.SSUBSCRIBE.value,
635
+ PubSubMessageTypes.SUNSUBSCRIBE.value,
636
+ }
637
+ UNSUBSCRIBE_MESSAGE_TYPES = {PubSubMessageTypes.SUNSUBSCRIBE.value}
638
+
639
+ def __init__(
640
+ self,
641
+ connection_pool: coredis.pool.ClusterConnectionPool,
642
+ ignore_subscribe_messages: bool = False,
643
+ retry_policy: RetryPolicy | None = None,
644
+ read_from_replicas: bool = False,
645
+ channels: Parameters[StringT] | None = None,
646
+ channel_handlers: Mapping[StringT, SubscriptionCallback] | None = None,
647
+ ):
648
+ self.shard_connections: dict[str, Connection] = {}
649
+ self.channel_connection_mapping: dict[StringT, Connection] = {}
650
+ self.pending_tasks: dict[str, asyncio.Task[ResponseType]] = {}
651
+ self.read_from_replicas = read_from_replicas
652
+ super().__init__(
653
+ connection_pool,
654
+ ignore_subscribe_messages,
655
+ retry_policy,
656
+ channels=channels,
657
+ channel_handlers=channel_handlers,
658
+ )
659
+
660
+ async def subscribe(
661
+ self,
662
+ *channels: StringT,
663
+ **channel_handlers: SubscriptionCallback | None,
664
+ ) -> None:
665
+ """
666
+ :param channels: The shard channels to subscribe to.
667
+ :param channel_handlers: Channels supplied as keyword arguments expect
668
+ a channel name as the key and a callable as the value. A channel's
669
+ callable will be invoked automatically when a message is received on
670
+ that channel rather than producing a message via :meth:`listen` or
671
+ :meth:`get_message`.
672
+ """
673
+
674
+ await self.initialize()
675
+ new_channels: MutableMapping[StringT, SubscriptionCallback | None] = {}
676
+ new_channels.update(dict.fromkeys(map(self.encode, channels)))
677
+
678
+ for channel, handler in channel_handlers.items():
679
+ new_channels[self.encode(channel)] = handler
680
+ for new_channel in new_channels.keys():
681
+ await self.execute_command(CommandName.SSUBSCRIBE, new_channel, sharded=True)
682
+ self.channels.update(new_channels)
683
+ self._subscribed.set()
684
+
685
+ async def unsubscribe(self, *channels: StringT) -> None:
686
+ """
687
+ :param channels: The shard channels to unsubscribe from. If None are provided,
688
+ this will effectively unsubscribe the client from all channels
689
+ previously subscribed to.
690
+ """
691
+
692
+ for channel in channels or list(self.channels.keys()):
693
+ await self.execute_command(CommandName.SUNSUBSCRIBE, channel, sharded=True)
694
+
695
+ async def psubscribe(
696
+ self,
697
+ *patterns: StringT,
698
+ **pattern_handlers: SubscriptionCallback | None,
699
+ ) -> None:
700
+ """
701
+ Not available in sharded pubsub
702
+
703
+ :meta private:
704
+ """
705
+ raise NotImplementedError("Sharded PubSub does not support subscription by pattern")
706
+
707
+ async def punsubscribe(self, *patterns: StringT) -> None:
708
+ """
709
+ Not available in sharded pubsub
710
+
711
+ :meta private:
712
+ """
713
+ raise NotImplementedError("Sharded PubSub does not support subscription by pattern")
714
+
715
+ async def execute_command(
716
+ self, command: bytes, *args: RedisValueT, **options: RedisValueT
717
+ ) -> ResponseType | None:
718
+ await self.initialize()
719
+
720
+ assert isinstance(args[0], (bytes, str))
721
+ channel = nativestr(args[0])
722
+ slot = hash_slot(b(channel))
723
+ node = self.connection_pool.nodes.node_from_slot(slot)
724
+ if node and node.node_id:
725
+ key = node.node_id
726
+ if self.shard_connections.get(key) is None:
727
+ self.shard_connections[key] = await self.connection_pool.get_connection(
728
+ b"pubsub",
729
+ channel=channel,
730
+ node_type="replica" if self.read_from_replicas else "primary",
731
+ )
732
+ # register a callback that re-subscribes to any channels we
733
+ # were listening to when we were disconnected
734
+ self.shard_connections[key].register_connect_callback(self.on_connect)
735
+
736
+ self.channel_connection_mapping[args[0]] = self.shard_connections[key]
737
+ assert self.shard_connections[key]
738
+ return await self._execute(
739
+ self.shard_connections[key],
740
+ self.shard_connections[key].send_command,
741
+ command,
742
+ *args,
743
+ )
744
+ raise PubSubError(f"Unable to determine shard for channel {args[0]!r}")
745
+
746
+ async def initialize(self) -> Self:
747
+ """
748
+ Ensures the sharded pubsub instance is ready to consume messages
749
+ by ensuring the connection pool is initialized, setting up any
750
+ initial channel subscriptions that were specified during
751
+ instantiation and starting the consumer background task.
752
+
753
+ The method can be called multiple times without any
754
+ risk as it will skip initialization if the consumer is already
755
+ initialized.
756
+
757
+ .. important:: This method doesn't need to be called explicitly
758
+ as it will always be called internally before any relevant
759
+ documented interaction.
760
+
761
+ :return: the instance itself
762
+ """
763
+ if not self.initialized:
764
+ await self.connection_pool.initialize()
765
+ self.initialized = True
766
+ if self._initial_channel_subscriptions:
767
+ await self.subscribe(**self._initial_channel_subscriptions)
768
+ if not self._consumer_task or self._consumer_task.done():
769
+ self._consumer_task = asyncio.create_task(self._consumer())
770
+ return self
771
+
772
+ async def reset_connections(self, exc: BaseException | None = None) -> None:
773
+ for connection in self.shard_connections.values():
774
+ connection.disconnect()
775
+ connection.clear_connect_callbacks()
776
+ self.connection_pool.release(connection)
777
+ self.shard_connections.clear()
778
+ for _, task in self.pending_tasks.items():
779
+ if not task.done():
780
+ task.cancel()
781
+ with suppress(CancelledError):
782
+ await task
783
+ self.pending_tasks.clear()
784
+ self.connection_pool.disconnect()
785
+ self.connection_pool.reset()
786
+ self.connection_pool.initialized = False
787
+ await self.connection_pool.initialize()
788
+ for channel in self.channels:
789
+ slot = hash_slot(b(channel))
790
+ node = self.connection_pool.nodes.node_from_slot(slot)
791
+ if node and node.node_id:
792
+ key = node.node_id
793
+ self.shard_connections[key] = await self.connection_pool.get_connection(
794
+ b"pubsub",
795
+ channel=channel,
796
+ node_type="replica" if self.read_from_replicas else "primary",
797
+ )
798
+ # register a callback that re-subscribes to any channels we
799
+ # were listening to when we were disconnected
800
+ self.shard_connections[key].register_connect_callback(self.on_connect)
801
+ self.channel_connection_mapping[channel] = self.shard_connections[key]
802
+
803
+ async def parse_response(
804
+ self, block: bool = True, timeout: float | None = None
805
+ ) -> ResponseType:
806
+ if not self.shard_connections:
807
+ raise RuntimeError(
808
+ "pubsub connection not set: did you forget to call subscribe() or psubscribe()?"
809
+ )
810
+ result = None
811
+ # Check any stashed results first.
812
+ if self.pending_tasks:
813
+ for node_id, task in list(self.pending_tasks.items()):
814
+ self.pending_tasks.pop(node_id)
815
+ if task.done():
816
+ result = task.result()
817
+ break
818
+ else:
819
+ done, pending = await asyncio.wait(
820
+ [task],
821
+ timeout=0.001,
822
+ return_when=asyncio.FIRST_COMPLETED,
823
+ )
824
+ if done:
825
+ result = done.pop().result()
826
+ break
827
+ else:
828
+ task.cancel()
829
+ with suppress(CancelledError):
830
+ await task
831
+ # If there were no pending results check the shards
832
+ if not result:
833
+ broken_connections = [c for c in self.shard_connections.values() if not c.is_connected]
834
+ if broken_connections:
835
+ for connection in broken_connections:
836
+ try:
837
+ await connection.connect()
838
+ except: # noqa
839
+ raise ConnectionError("Shard connections not stable")
840
+ tasks: dict[str, asyncio.Task[ResponseType]] = {
841
+ node_id: asyncio.create_task(
842
+ connection.fetch_push_message(
843
+ push_message_types=self.SUBUNSUB_MESSAGE_TYPES | self.PUBLISH_MESSAGE_TYPES,
844
+ ),
845
+ )
846
+ for node_id, connection in self.shard_connections.items()
847
+ if node_id not in self.pending_tasks
848
+ }
849
+ if tasks:
850
+ done, pending = await asyncio.wait(
851
+ tasks.values(),
852
+ timeout=timeout if (timeout and timeout > 0) else None,
853
+ return_when=asyncio.FIRST_COMPLETED,
854
+ )
855
+ if done:
856
+ done_task = done.pop()
857
+ result = done_task.result()
858
+
859
+ # Stash any other tasks for the next iteration
860
+ for task in list(done) + list(pending):
861
+ for node_id, scheduled in tasks.items():
862
+ if task == scheduled:
863
+ self.pending_tasks[node_id] = task
864
+ return result
865
+
866
+ async def on_connect(self, connection: BaseConnection) -> None:
867
+ """
868
+ Re-subscribe to any channels previously subscribed to
869
+
870
+ :meta private:
871
+ """
872
+ for channel, handler in self.channels.items():
873
+ if self.channel_connection_mapping[channel] == connection:
874
+ await self.subscribe(
875
+ **{
876
+ (
877
+ channel.decode(self.connection_pool.encoding)
878
+ if isinstance(channel, bytes)
879
+ else channel
880
+ ): handler
881
+ }
882
+ )
883
+
884
+ def reset(self) -> None:
885
+ for connection in self.shard_connections.values():
886
+ connection.disconnect()
887
+ connection.clear_connect_callbacks()
888
+ self.connection_pool.release(connection)
889
+ for _, task in self.pending_tasks.items():
890
+ task.cancel()
891
+ self.pending_tasks.clear()
892
+ self.shard_connections.clear()
893
+ self.channels = {}
894
+ self.patterns = {}
895
+ self.initialized = False
896
+ self._subscribed.clear()
897
+
898
+ async def aclose(self) -> None:
899
+ """
900
+ Unsubscribe from any channels & close and return
901
+ connections to the pool
902
+ """
903
+ if self.shard_connections:
904
+ await self.unsubscribe()
905
+ self.close()