taskiq-redis 1.0.2__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
taskiq_redis/__init__.py CHANGED
@@ -1,14 +1,19 @@
1
1
  """Package for redis integration."""
2
+
2
3
  from taskiq_redis.redis_backend import (
3
4
  RedisAsyncClusterResultBackend,
4
5
  RedisAsyncResultBackend,
5
6
  RedisAsyncSentinelResultBackend,
6
7
  )
7
- from taskiq_redis.redis_broker import ListQueueBroker, PubSubBroker
8
- from taskiq_redis.redis_cluster_broker import ListQueueClusterBroker
8
+ from taskiq_redis.redis_broker import ListQueueBroker, PubSubBroker, RedisStreamBroker
9
+ from taskiq_redis.redis_cluster_broker import (
10
+ ListQueueClusterBroker,
11
+ RedisStreamClusterBroker,
12
+ )
9
13
  from taskiq_redis.redis_sentinel_broker import (
10
14
  ListQueueSentinelBroker,
11
15
  PubSubSentinelBroker,
16
+ RedisStreamSentinelBroker,
12
17
  )
13
18
  from taskiq_redis.schedule_source import (
14
19
  RedisClusterScheduleSource,
@@ -17,15 +22,18 @@ from taskiq_redis.schedule_source import (
17
22
  )
18
23
 
19
24
  __all__ = [
20
- "RedisAsyncClusterResultBackend",
21
- "RedisAsyncResultBackend",
22
- "RedisAsyncSentinelResultBackend",
23
25
  "ListQueueBroker",
24
- "PubSubBroker",
25
26
  "ListQueueClusterBroker",
26
27
  "ListQueueSentinelBroker",
28
+ "PubSubBroker",
27
29
  "PubSubSentinelBroker",
28
- "RedisScheduleSource",
30
+ "RedisAsyncClusterResultBackend",
31
+ "RedisAsyncResultBackend",
32
+ "RedisAsyncSentinelResultBackend",
29
33
  "RedisClusterScheduleSource",
34
+ "RedisScheduleSource",
30
35
  "RedisSentinelScheduleSource",
36
+ "RedisStreamBroker",
37
+ "RedisStreamClusterBroker",
38
+ "RedisStreamSentinelBroker",
31
39
  ]
@@ -8,10 +8,16 @@ class TaskIQRedisError(TaskiqError):
8
8
  class DuplicateExpireTimeSelectedError(ResultBackendError, TaskIQRedisError):
9
9
  """Error if two lifetimes are selected."""
10
10
 
11
+ __template__ = "Choose either result_ex_time or result_px_time."
12
+
11
13
 
12
14
  class ExpireTimeMustBeMoreThanZeroError(ResultBackendError, TaskIQRedisError):
13
15
  """Error if two lifetimes are less or equal zero."""
14
16
 
17
+ __template__ = (
18
+ "You must select one expire time param and it must be more than zero."
19
+ )
20
+
15
21
 
16
22
  class ResultIsMissingError(TaskIQRedisError, ResultGetError):
17
23
  """Error if there is no result when trying to get it."""
@@ -16,10 +16,10 @@ from redis.asyncio import BlockingConnectionPool, Redis, Sentinel
16
16
  from redis.asyncio.cluster import RedisCluster
17
17
  from redis.asyncio.connection import Connection
18
18
  from taskiq import AsyncResultBackend
19
- from taskiq.abc.result_backend import TaskiqResult
20
19
  from taskiq.abc.serializer import TaskiqSerializer
21
20
  from taskiq.compat import model_dump, model_validate
22
21
  from taskiq.depends.progress_tracker import TaskProgress
22
+ from taskiq.result import TaskiqResult
23
23
  from taskiq.serializers import PickleSerializer
24
24
 
25
25
  from taskiq_redis.exceptions import (
@@ -34,8 +34,8 @@ else:
34
34
  from typing_extensions import TypeAlias
35
35
 
36
36
  if TYPE_CHECKING:
37
- _Redis: TypeAlias = Redis[bytes]
38
- _BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection]
37
+ _Redis: TypeAlias = Redis[bytes] # type: ignore
38
+ _BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection] # type: ignore
39
39
  else:
40
40
  _Redis: TypeAlias = Redis
41
41
  _BlockingConnectionPool: TypeAlias = BlockingConnectionPool
@@ -56,6 +56,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
56
56
  result_px_time: Optional[int] = None,
57
57
  max_connection_pool_size: Optional[int] = None,
58
58
  serializer: Optional[TaskiqSerializer] = None,
59
+ prefix_str: Optional[str] = None,
59
60
  **connection_kwargs: Any,
60
61
  ) -> None:
61
62
  """
@@ -82,6 +83,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
82
83
  self.keep_results = keep_results
83
84
  self.result_ex_time = result_ex_time
84
85
  self.result_px_time = result_px_time
86
+ self.prefix_str = prefix_str
85
87
 
86
88
  unavailable_conditions = any(
87
89
  (
@@ -90,14 +92,15 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
90
92
  ),
91
93
  )
92
94
  if unavailable_conditions:
93
- raise ExpireTimeMustBeMoreThanZeroError(
94
- "You must select one expire time param and it must be more than zero.",
95
- )
95
+ raise ExpireTimeMustBeMoreThanZeroError
96
96
 
97
97
  if self.result_ex_time and self.result_px_time:
98
- raise DuplicateExpireTimeSelectedError(
99
- "Choose either result_ex_time or result_px_time.",
100
- )
98
+ raise DuplicateExpireTimeSelectedError
99
+
100
+ def _task_name(self, task_id: str) -> str:
101
+ if self.prefix_str is None:
102
+ return task_id
103
+ return f"{self.prefix_str}:{task_id}"
101
104
 
102
105
  async def shutdown(self) -> None:
103
106
  """Closes redis connection."""
@@ -119,7 +122,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
119
122
  :param result: TaskiqResult instance.
120
123
  """
121
124
  redis_set_params: Dict[str, Union[str, int, bytes]] = {
122
- "name": task_id,
125
+ "name": self._task_name(task_id),
123
126
  "value": self.serializer.dumpb(model_dump(result)),
124
127
  }
125
128
  if self.result_ex_time:
@@ -139,7 +142,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
139
142
  :returns: True if the result is ready else False.
140
143
  """
141
144
  async with Redis(connection_pool=self.redis_pool) as redis:
142
- return bool(await redis.exists(task_id))
145
+ return bool(await redis.exists(self._task_name(task_id)))
143
146
 
144
147
  async def get_result(
145
148
  self,
@@ -154,14 +157,15 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
154
157
  :raises ResultIsMissingError: if there is no result when trying to get it.
155
158
  :return: task's return value.
156
159
  """
160
+ task_name = self._task_name(task_id)
157
161
  async with Redis(connection_pool=self.redis_pool) as redis:
158
162
  if self.keep_results:
159
163
  result_value = await redis.get(
160
- name=task_id,
164
+ name=task_name,
161
165
  )
162
166
  else:
163
167
  result_value = await redis.getdel(
164
- name=task_id,
168
+ name=task_name,
165
169
  )
166
170
 
167
171
  if result_value is None:
@@ -192,7 +196,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
192
196
  :param result: task's TaskProgress instance.
193
197
  """
194
198
  redis_set_params: Dict[str, Union[str, int, bytes]] = {
195
- "name": task_id + PROGRESS_KEY_SUFFIX,
199
+ "name": self._task_name(task_id) + PROGRESS_KEY_SUFFIX,
196
200
  "value": self.serializer.dumpb(model_dump(progress)),
197
201
  }
198
202
  if self.result_ex_time:
@@ -215,7 +219,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
215
219
  """
216
220
  async with Redis(connection_pool=self.redis_pool) as redis:
217
221
  result_value = await redis.get(
218
- name=task_id + PROGRESS_KEY_SUFFIX,
222
+ name=self._task_name(task_id) + PROGRESS_KEY_SUFFIX,
219
223
  )
220
224
 
221
225
  if result_value is None:
@@ -237,6 +241,7 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
237
241
  result_ex_time: Optional[int] = None,
238
242
  result_px_time: Optional[int] = None,
239
243
  serializer: Optional[TaskiqSerializer] = None,
244
+ prefix_str: Optional[str] = None,
240
245
  **connection_kwargs: Any,
241
246
  ) -> None:
242
247
  """
@@ -253,7 +258,7 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
253
258
  :raises ExpireTimeMustBeMoreThanZeroError: if result_ex_time
254
259
  and result_px_time are equal zero.
255
260
  """
256
- self.redis: RedisCluster[bytes] = RedisCluster.from_url(
261
+ self.redis: "RedisCluster" = RedisCluster.from_url(
257
262
  redis_url,
258
263
  **connection_kwargs,
259
264
  )
@@ -261,6 +266,7 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
261
266
  self.keep_results = keep_results
262
267
  self.result_ex_time = result_ex_time
263
268
  self.result_px_time = result_px_time
269
+ self.prefix_str = prefix_str
264
270
 
265
271
  unavailable_conditions = any(
266
272
  (
@@ -269,18 +275,19 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
269
275
  ),
270
276
  )
271
277
  if unavailable_conditions:
272
- raise ExpireTimeMustBeMoreThanZeroError(
273
- "You must select one expire time param and it must be more than zero.",
274
- )
278
+ raise ExpireTimeMustBeMoreThanZeroError
275
279
 
276
280
  if self.result_ex_time and self.result_px_time:
277
- raise DuplicateExpireTimeSelectedError(
278
- "Choose either result_ex_time or result_px_time.",
279
- )
281
+ raise DuplicateExpireTimeSelectedError
282
+
283
+ def _task_name(self, task_id: str) -> str:
284
+ if self.prefix_str is None:
285
+ return task_id
286
+ return f"{self.prefix_str}:{task_id}"
280
287
 
281
288
  async def shutdown(self) -> None:
282
289
  """Closes redis connection."""
283
- await self.redis.aclose() # type: ignore[attr-defined]
290
+ await self.redis.aclose()
284
291
  await super().shutdown()
285
292
 
286
293
  async def set_result(
@@ -298,7 +305,7 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
298
305
  :param result: TaskiqResult instance.
299
306
  """
300
307
  redis_set_params: Dict[str, Union[str, bytes, int]] = {
301
- "name": task_id,
308
+ "name": self._task_name(task_id),
302
309
  "value": self.serializer.dumpb(model_dump(result)),
303
310
  }
304
311
  if self.result_ex_time:
@@ -316,7 +323,7 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
316
323
 
317
324
  :returns: True if the result is ready else False.
318
325
  """
319
- return bool(await self.redis.exists(task_id)) # type: ignore[attr-defined]
326
+ return bool(await self.redis.exists(self._task_name(task_id)))
320
327
 
321
328
  async def get_result(
322
329
  self,
@@ -331,13 +338,14 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
331
338
  :raises ResultIsMissingError: if there is no result when trying to get it.
332
339
  :return: task's return value.
333
340
  """
341
+ task_name = self._task_name(task_id)
334
342
  if self.keep_results:
335
- result_value = await self.redis.get( # type: ignore[attr-defined]
336
- name=task_id,
343
+ result_value = await self.redis.get(
344
+ name=task_name,
337
345
  )
338
346
  else:
339
- result_value = await self.redis.getdel( # type: ignore[attr-defined]
340
- name=task_id,
347
+ result_value = await self.redis.getdel(
348
+ name=task_name,
341
349
  )
342
350
 
343
351
  if result_value is None:
@@ -368,7 +376,7 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
368
376
  :param result: task's TaskProgress instance.
369
377
  """
370
378
  redis_set_params: Dict[str, Union[str, int, bytes]] = {
371
- "name": task_id + PROGRESS_KEY_SUFFIX,
379
+ "name": self._task_name(task_id) + PROGRESS_KEY_SUFFIX,
372
380
  "value": self.serializer.dumpb(model_dump(progress)),
373
381
  }
374
382
  if self.result_ex_time:
@@ -388,8 +396,8 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
388
396
  :param task_id: task's id.
389
397
  :return: task's TaskProgress instance.
390
398
  """
391
- result_value = await self.redis.get( # type: ignore[attr-defined]
392
- name=task_id + PROGRESS_KEY_SUFFIX,
399
+ result_value = await self.redis.get(
400
+ name=self._task_name(task_id) + PROGRESS_KEY_SUFFIX,
393
401
  )
394
402
 
395
403
  if result_value is None:
@@ -414,6 +422,7 @@ class RedisAsyncSentinelResultBackend(AsyncResultBackend[_ReturnType]):
414
422
  min_other_sentinels: int = 0,
415
423
  sentinel_kwargs: Optional[Any] = None,
416
424
  serializer: Optional[TaskiqSerializer] = None,
425
+ prefix_str: Optional[str] = None,
417
426
  **connection_kwargs: Any,
418
427
  ) -> None:
419
428
  """
@@ -443,6 +452,7 @@ class RedisAsyncSentinelResultBackend(AsyncResultBackend[_ReturnType]):
443
452
  self.keep_results = keep_results
444
453
  self.result_ex_time = result_ex_time
445
454
  self.result_px_time = result_px_time
455
+ self.prefix_str = prefix_str
446
456
 
447
457
  unavailable_conditions = any(
448
458
  (
@@ -451,14 +461,15 @@ class RedisAsyncSentinelResultBackend(AsyncResultBackend[_ReturnType]):
451
461
  ),
452
462
  )
453
463
  if unavailable_conditions:
454
- raise ExpireTimeMustBeMoreThanZeroError(
455
- "You must select one expire time param and it must be more than zero.",
456
- )
464
+ raise ExpireTimeMustBeMoreThanZeroError
457
465
 
458
466
  if self.result_ex_time and self.result_px_time:
459
- raise DuplicateExpireTimeSelectedError(
460
- "Choose either result_ex_time or result_px_time.",
461
- )
467
+ raise DuplicateExpireTimeSelectedError
468
+
469
+ def _task_name(self, task_id: str) -> str:
470
+ if self.prefix_str is None:
471
+ return task_id
472
+ return f"{self.prefix_str}:{task_id}"
462
473
 
463
474
  @asynccontextmanager
464
475
  async def _acquire_master_conn(self) -> AsyncIterator[_Redis]:
@@ -480,7 +491,7 @@ class RedisAsyncSentinelResultBackend(AsyncResultBackend[_ReturnType]):
480
491
  :param result: TaskiqResult instance.
481
492
  """
482
493
  redis_set_params: Dict[str, Union[str, bytes, int]] = {
483
- "name": task_id,
494
+ "name": self._task_name(task_id),
484
495
  "value": self.serializer.dumpb(model_dump(result)),
485
496
  }
486
497
  if self.result_ex_time:
@@ -500,7 +511,7 @@ class RedisAsyncSentinelResultBackend(AsyncResultBackend[_ReturnType]):
500
511
  :returns: True if the result is ready else False.
501
512
  """
502
513
  async with self._acquire_master_conn() as redis:
503
- return bool(await redis.exists(task_id))
514
+ return bool(await redis.exists(self._task_name(task_id)))
504
515
 
505
516
  async def get_result(
506
517
  self,
@@ -515,14 +526,15 @@ class RedisAsyncSentinelResultBackend(AsyncResultBackend[_ReturnType]):
515
526
  :raises ResultIsMissingError: if there is no result when trying to get it.
516
527
  :return: task's return value.
517
528
  """
529
+ task_name = self._task_name(task_id)
518
530
  async with self._acquire_master_conn() as redis:
519
531
  if self.keep_results:
520
532
  result_value = await redis.get(
521
- name=task_id,
533
+ name=task_name,
522
534
  )
523
535
  else:
524
536
  result_value = await redis.getdel(
525
- name=task_id,
537
+ name=task_name,
526
538
  )
527
539
 
528
540
  if result_value is None:
@@ -553,7 +565,7 @@ class RedisAsyncSentinelResultBackend(AsyncResultBackend[_ReturnType]):
553
565
  :param result: task's TaskProgress instance.
554
566
  """
555
567
  redis_set_params: Dict[str, Union[str, int, bytes]] = {
556
- "name": task_id + PROGRESS_KEY_SUFFIX,
568
+ "name": self._task_name(task_id) + PROGRESS_KEY_SUFFIX,
557
569
  "value": self.serializer.dumpb(model_dump(progress)),
558
570
  }
559
571
  if self.result_ex_time:
@@ -576,7 +588,7 @@ class RedisAsyncSentinelResultBackend(AsyncResultBackend[_ReturnType]):
576
588
  """
577
589
  async with self._acquire_master_conn() as redis:
578
590
  result_value = await redis.get(
579
- name=task_id + PROGRESS_KEY_SUFFIX,
591
+ name=self._task_name(task_id) + PROGRESS_KEY_SUFFIX,
580
592
  )
581
593
 
582
594
  if result_value is None:
@@ -590,4 +602,4 @@ class RedisAsyncSentinelResultBackend(AsyncResultBackend[_ReturnType]):
590
602
  async def shutdown(self) -> None:
591
603
  """Shutdown sentinel connections."""
592
604
  for sentinel in self.sentinel.sentinels:
593
- await sentinel.aclose() # type: ignore[attr-defined]
605
+ await sentinel.aclose()
@@ -1,8 +1,19 @@
1
1
  import sys
2
+ import uuid
2
3
  from logging import getLogger
3
- from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Optional, TypeVar
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ AsyncGenerator,
8
+ Awaitable,
9
+ Callable,
10
+ Dict,
11
+ Optional,
12
+ TypeVar,
13
+ )
4
14
 
5
- from redis.asyncio import BlockingConnectionPool, Connection, Redis
15
+ from redis.asyncio import BlockingConnectionPool, Connection, Redis, ResponseError
16
+ from taskiq import AckableMessage
6
17
  from taskiq.abc.broker import AsyncBroker
7
18
  from taskiq.abc.result_backend import AsyncResultBackend
8
19
  from taskiq.message import BrokerMessage
@@ -17,7 +28,7 @@ else:
17
28
  from typing_extensions import TypeAlias
18
29
 
19
30
  if TYPE_CHECKING:
20
- _BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection]
31
+ _BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection] # type: ignore
21
32
  else:
22
33
  _BlockingConnectionPool: TypeAlias = BlockingConnectionPool
23
34
 
@@ -111,7 +122,7 @@ class ListQueueBroker(BaseRedisBroker):
111
122
  """
112
123
  queue_name = message.labels.get("queue_name") or self.queue_name
113
124
  async with Redis(connection_pool=self.connection_pool) as redis_conn:
114
- await redis_conn.lpush(queue_name, message.message)
125
+ await redis_conn.lpush(queue_name, message.message) # type: ignore
115
126
 
116
127
  async def listen(self) -> AsyncGenerator[bytes, None]:
117
128
  """
@@ -126,9 +137,135 @@ class ListQueueBroker(BaseRedisBroker):
126
137
  while True:
127
138
  try:
128
139
  async with Redis(connection_pool=self.connection_pool) as redis_conn:
129
- yield (await redis_conn.brpop(self.queue_name))[
140
+ yield (await redis_conn.brpop(self.queue_name))[ # type: ignore
130
141
  redis_brpop_data_position
131
142
  ]
132
143
  except ConnectionError as exc:
133
144
  logger.warning("Redis connection error: %s", exc)
134
145
  continue
146
+
147
+
148
+ class RedisStreamBroker(BaseRedisBroker):
149
+ """
150
+ Redis broker that uses streams for task distribution.
151
+
152
+ You can read more about streams here:
153
+ https://redis.io/docs/latest/develop/data-types/streams
154
+
155
+ This broker supports acknowledgment of messages.
156
+ """
157
+
158
+ def __init__(
159
+ self,
160
+ url: str,
161
+ queue_name: str = "taskiq",
162
+ max_connection_pool_size: Optional[int] = None,
163
+ consumer_group_name: str = "taskiq",
164
+ consumer_name: Optional[str] = None,
165
+ consumer_id: str = "$",
166
+ mkstream: bool = True,
167
+ xread_block: int = 10000,
168
+ additional_streams: Optional[Dict[str, str]] = None,
169
+ **connection_kwargs: Any,
170
+ ) -> None:
171
+ """
172
+ Constructs a new broker that uses streams.
173
+
174
+ :param url: url to redis.
175
+ :param queue_name: name for a key with stream in redis.
176
+ :param max_connection_pool_size: maximum number of connections in pool.
177
+ Each worker opens its own connection. Therefore this value has to be
178
+ at least number of workers + 1.
179
+ :param consumer_group_name: name for a consumer group.
180
+ Redis will keep track of acked messages for this group.
181
+ :param consumer_name: name for a consumer. By default it is a random uuid.
182
+ :param consumer_id: id for a consumer. ID of a message to start reading from.
183
+ $ means start from the latest message.
184
+ :param mkstream: create stream if it does not exist.
185
+ :param xread_block: block time in ms for xreadgroup.
186
+ Better to set it to a bigger value, to avoid unnecessary calls.
187
+ :param additional_streams: additional streams to read from.
188
+ Each key is a stream name, value is a consumer id.
189
+ """
190
+ super().__init__(
191
+ url,
192
+ task_id_generator=None,
193
+ result_backend=None,
194
+ queue_name=queue_name,
195
+ max_connection_pool_size=max_connection_pool_size,
196
+ **connection_kwargs,
197
+ )
198
+ self.consumer_group_name = consumer_group_name
199
+ self.consumer_name = consumer_name or str(uuid.uuid4())
200
+ self.consumer_id = consumer_id
201
+ self.mkstream = mkstream
202
+ self.block = xread_block
203
+ self.additional_streams = additional_streams or {}
204
+
205
+ async def _declare_consumer_group(self) -> None:
206
+ """
207
+ Declare consumber group.
208
+
209
+ Required for proper work of the broker.
210
+ """
211
+ streams = {self.queue_name, *self.additional_streams.keys()}
212
+ async with Redis(connection_pool=self.connection_pool) as redis_conn:
213
+ for stream_name in streams:
214
+ try:
215
+ await redis_conn.xgroup_create(
216
+ stream_name,
217
+ self.consumer_group_name,
218
+ id=self.consumer_id,
219
+ mkstream=self.mkstream,
220
+ )
221
+ except ResponseError as err:
222
+ logger.debug(err)
223
+
224
+ async def startup(self) -> None:
225
+ """Declare consumer group on startup."""
226
+ await super().startup()
227
+ await self._declare_consumer_group()
228
+
229
+ async def kick(self, message: BrokerMessage) -> None:
230
+ """
231
+ Put a message in a list.
232
+
233
+ This method appends a message to the list of all messages.
234
+
235
+ :param message: message to append.
236
+ """
237
+ async with Redis(connection_pool=self.connection_pool) as redis_conn:
238
+ await redis_conn.xadd(self.queue_name, {b"data": message.message})
239
+
240
+ def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
241
+ async def _ack() -> None:
242
+ async with Redis(connection_pool=self.connection_pool) as redis_conn:
243
+ await redis_conn.xack(
244
+ self.queue_name,
245
+ self.consumer_group_name,
246
+ id,
247
+ )
248
+
249
+ return _ack
250
+
251
+ async def listen(self) -> AsyncGenerator[AckableMessage, None]:
252
+ """Listen to incoming messages."""
253
+ async with Redis(connection_pool=self.connection_pool) as redis_conn:
254
+ while True:
255
+ fetched = await redis_conn.xreadgroup(
256
+ self.consumer_group_name,
257
+ self.consumer_name,
258
+ {
259
+ self.queue_name: ">",
260
+ **self.additional_streams, # type: ignore
261
+ },
262
+ block=self.block,
263
+ noack=False,
264
+ )
265
+ for _, msg_list in fetched:
266
+ for msg_id, msg in msg_list:
267
+ logger.debug("Received message: %s", msg)
268
+ yield AckableMessage(
269
+ data=msg[b"data"],
270
+ ack=self._ack_generator(msg_id),
271
+ )
@@ -1,9 +1,14 @@
1
- from typing import Any, AsyncGenerator
1
+ import uuid
2
+ from logging import getLogger
3
+ from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, Optional
2
4
 
3
- from redis.asyncio import RedisCluster
5
+ from redis.asyncio import RedisCluster, ResponseError
6
+ from taskiq import AckableMessage
4
7
  from taskiq.abc.broker import AsyncBroker
5
8
  from taskiq.message import BrokerMessage
6
9
 
10
+ logger = getLogger(__name__)
11
+
7
12
 
8
13
  class BaseRedisClusterBroker(AsyncBroker):
9
14
  """Base broker that works with Redis Cluster."""
@@ -25,7 +30,7 @@ class BaseRedisClusterBroker(AsyncBroker):
25
30
  """
26
31
  super().__init__()
27
32
 
28
- self.redis: RedisCluster[bytes] = RedisCluster.from_url(
33
+ self.redis: "RedisCluster[bytes]" = RedisCluster.from_url( # type: ignore
29
34
  url=url,
30
35
  max_connections=max_connection_pool_size,
31
36
  **connection_kwargs,
@@ -35,7 +40,7 @@ class BaseRedisClusterBroker(AsyncBroker):
35
40
 
36
41
  async def shutdown(self) -> None:
37
42
  """Closes redis connection pool."""
38
- await self.redis.aclose() # type: ignore[attr-defined]
43
+ await self.redis.aclose()
39
44
  await super().shutdown()
40
45
 
41
46
 
@@ -50,7 +55,7 @@ class ListQueueClusterBroker(BaseRedisClusterBroker):
50
55
 
51
56
  :param message: message to append.
52
57
  """
53
- await self.redis.lpush(self.queue_name, message.message) # type: ignore[attr-defined]
58
+ await self.redis.lpush(self.queue_name, message.message) # type: ignore
54
59
 
55
60
  async def listen(self) -> AsyncGenerator[bytes, None]:
56
61
  """
@@ -63,5 +68,121 @@ class ListQueueClusterBroker(BaseRedisClusterBroker):
63
68
  """
64
69
  redis_brpop_data_position = 1
65
70
  while True:
66
- value = await self.redis.brpop([self.queue_name]) # type: ignore[attr-defined]
71
+ value = await self.redis.brpop([self.queue_name]) # type: ignore
67
72
  yield value[redis_brpop_data_position]
73
+
74
+
75
+ class RedisStreamClusterBroker(BaseRedisClusterBroker):
76
+ """
77
+ Redis broker that uses streams for task distribution.
78
+
79
+ You can read more about streams here:
80
+ https://redis.io/docs/latest/develop/data-types/streams
81
+
82
+ This broker supports acknowledgment of messages.
83
+ """
84
+
85
+ def __init__(
86
+ self,
87
+ url: str,
88
+ queue_name: str = "taskiq",
89
+ max_connection_pool_size: int = 2**31,
90
+ consumer_group_name: str = "taskiq",
91
+ consumer_name: Optional[str] = None,
92
+ consumer_id: str = "$",
93
+ mkstream: bool = True,
94
+ xread_block: int = 10000,
95
+ additional_streams: Optional[Dict[str, str]] = None,
96
+ **connection_kwargs: Any,
97
+ ) -> None:
98
+ """
99
+ Constructs a new broker that uses streams.
100
+
101
+ :param url: url to redis.
102
+ :param queue_name: name for a key with stream in redis.
103
+ :param max_connection_pool_size: maximum number of connections in pool.
104
+ Each worker opens its own connection. Therefore this value has to be
105
+ at least number of workers + 1.
106
+ :param consumer_group_name: name for a consumer group.
107
+ Redis will keep track of acked messages for this group.
108
+ :param consumer_name: name for a consumer. By default it is a random uuid.
109
+ :param consumer_id: id for a consumer. ID of a message to start reading from.
110
+ $ means start from the latest message.
111
+ :param mkstream: create stream if it does not exist.
112
+ :param xread_block: block time in ms for xreadgroup.
113
+ Better to set it to a bigger value, to avoid unnecessary calls.
114
+ :param additional_streams: additional streams to read from.
115
+ Each key is a stream name, value is a consumer id.
116
+ """
117
+ super().__init__(
118
+ url,
119
+ queue_name=queue_name,
120
+ max_connection_pool_size=max_connection_pool_size,
121
+ **connection_kwargs,
122
+ )
123
+ self.consumer_group_name = consumer_group_name
124
+ self.consumer_name = consumer_name or str(uuid.uuid4())
125
+ self.consumer_id = consumer_id
126
+ self.mkstream = mkstream
127
+ self.block = xread_block
128
+ self.additional_streams = additional_streams or {}
129
+
130
+ async def _declare_consumer_group(self) -> None:
131
+ streams = {self.queue_name, *self.additional_streams.keys()}
132
+ async with self.redis as redis_conn:
133
+ for stream_name in streams:
134
+ try:
135
+ await redis_conn.xgroup_create(
136
+ stream_name,
137
+ self.consumer_group_name,
138
+ id=self.consumer_id,
139
+ mkstream=self.mkstream,
140
+ )
141
+ except ResponseError as err:
142
+ logger.debug(err)
143
+
144
+ async def startup(self) -> None:
145
+ """Declare consumer group on startup."""
146
+ await super().startup()
147
+ await self._declare_consumer_group()
148
+
149
+ async def kick(self, message: BrokerMessage) -> None:
150
+ """
151
+ Put a message in a list.
152
+
153
+ This method appends a message to the list of all messages.
154
+
155
+ :param message: message to append.
156
+ """
157
+ await self.redis.xadd(self.queue_name, {b"data": message.message})
158
+
159
+ def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
160
+ async def _ack() -> None:
161
+ await self.redis.xack(
162
+ self.queue_name,
163
+ self.consumer_group_name,
164
+ id,
165
+ )
166
+
167
+ return _ack
168
+
169
+ async def listen(self) -> AsyncGenerator[AckableMessage, None]:
170
+ """Listen to the stream for new messages."""
171
+ while True:
172
+ fetched = await self.redis.xreadgroup(
173
+ self.consumer_group_name,
174
+ self.consumer_name,
175
+ {
176
+ self.queue_name: ">",
177
+ **self.additional_streams, # type: ignore
178
+ },
179
+ block=self.block,
180
+ noack=False,
181
+ )
182
+ for _, msg_list in fetched:
183
+ for msg_id, msg in msg_list:
184
+ logger.debug("Received message: %s", msg)
185
+ yield AckableMessage(
186
+ data=msg[b"data"],
187
+ ack=self._ack_generator(msg_id),
188
+ )
@@ -1,4 +1,5 @@
1
1
  import sys
2
+ import uuid
2
3
  from contextlib import asynccontextmanager
3
4
  from logging import getLogger
4
5
  from typing import (
@@ -6,15 +7,18 @@ from typing import (
6
7
  Any,
7
8
  AsyncGenerator,
8
9
  AsyncIterator,
10
+ Awaitable,
9
11
  Callable,
12
+ Dict,
10
13
  List,
11
14
  Optional,
12
15
  Tuple,
13
16
  TypeVar,
14
17
  )
15
18
 
19
+ from redis import ResponseError
16
20
  from redis.asyncio import Redis, Sentinel
17
- from taskiq import AsyncResultBackend, BrokerMessage
21
+ from taskiq import AckableMessage, AsyncResultBackend, BrokerMessage
18
22
  from taskiq.abc.broker import AsyncBroker
19
23
 
20
24
  if sys.version_info >= (3, 10):
@@ -23,7 +27,7 @@ else:
23
27
  from typing_extensions import TypeAlias
24
28
 
25
29
  if TYPE_CHECKING:
26
- _Redis: TypeAlias = Redis[bytes]
30
+ _Redis: TypeAlias = Redis[bytes] # type: ignore
27
31
  else:
28
32
  _Redis: TypeAlias = Redis
29
33
 
@@ -113,7 +117,7 @@ class ListQueueSentinelBroker(BaseSentinelBroker):
113
117
  """
114
118
  queue_name = message.labels.get("queue_name") or self.queue_name
115
119
  async with self._acquire_master_conn() as redis_conn:
116
- await redis_conn.lpush(queue_name, message.message)
120
+ await redis_conn.lpush(queue_name, message.message) # type: ignore
117
121
 
118
122
  async def listen(self) -> AsyncGenerator[bytes, None]:
119
123
  """
@@ -127,6 +131,129 @@ class ListQueueSentinelBroker(BaseSentinelBroker):
127
131
  redis_brpop_data_position = 1
128
132
  async with self._acquire_master_conn() as redis_conn:
129
133
  while True:
130
- yield (await redis_conn.brpop(self.queue_name))[
134
+ yield (await redis_conn.brpop(self.queue_name))[ # type: ignore
131
135
  redis_brpop_data_position
132
136
  ]
137
+
138
+
139
+ class RedisStreamSentinelBroker(BaseSentinelBroker):
140
+ """
141
+ Redis broker that uses streams for task distribution.
142
+
143
+ You can read more about streams here:
144
+ https://redis.io/docs/latest/develop/data-types/streams
145
+
146
+ This broker supports acknowledgment of messages.
147
+ """
148
+
149
+ def __init__(
150
+ self,
151
+ sentinels: List[Tuple[str, int]],
152
+ master_name: str,
153
+ min_other_sentinels: int = 0,
154
+ queue_name: str = "taskiq",
155
+ consumer_group_name: str = "taskiq",
156
+ consumer_name: Optional[str] = None,
157
+ consumer_id: str = "$",
158
+ mkstream: bool = True,
159
+ xread_block: int = 10000,
160
+ additional_streams: Optional[Dict[str, str]] = None,
161
+ **connection_kwargs: Any,
162
+ ) -> None:
163
+ """
164
+ Constructs a new broker that uses streams.
165
+
166
+ :param sentinels: list of nodes to connect to.
167
+ :param queue_name: name for a key with stream in redis.
168
+ :param max_connection_pool_size: maximum number of connections in pool.
169
+ Each worker opens its own connection. Therefore this value has to be
170
+ at least number of workers + 1.
171
+ :param consumer_group_name: name for a consumer group.
172
+ Redis will keep track of acked messages for this group.
173
+ :param consumer_name: name for a consumer. By default it is a random uuid.
174
+ :param consumer_id: id for a consumer. ID of a message to start reading from.
175
+ $ means start from the latest message.
176
+ :param mkstream: create stream if it does not exist.
177
+ :param xread_block: block time in ms for xreadgroup.
178
+ Better to set it to a bigger value, to avoid unnecessary calls.
179
+ :param additional_streams: additional streams to read from.
180
+ Each key is a stream name, value is a consumer id.
181
+ """
182
+ super().__init__(
183
+ sentinels=sentinels,
184
+ master_name=master_name,
185
+ min_other_sentinels=min_other_sentinels,
186
+ task_id_generator=None,
187
+ result_backend=None,
188
+ queue_name=queue_name,
189
+ **connection_kwargs,
190
+ )
191
+ self.consumer_group_name = consumer_group_name
192
+ self.consumer_name = consumer_name or str(uuid.uuid4())
193
+ self.consumer_id = consumer_id
194
+ self.mkstream = mkstream
195
+ self.block = xread_block
196
+ self.additional_streams = additional_streams or {}
197
+
198
+ async def _declare_consumer_group(self) -> None:
199
+ streams = {self.queue_name, *self.additional_streams.keys()}
200
+ async with self._acquire_master_conn() as redis_conn:
201
+ for stream_name in streams:
202
+ try:
203
+ await redis_conn.xgroup_create(
204
+ stream_name,
205
+ self.consumer_group_name,
206
+ id=self.consumer_id,
207
+ mkstream=self.mkstream,
208
+ )
209
+ except ResponseError as err:
210
+ logger.debug(err)
211
+
212
+ async def startup(self) -> None:
213
+ """Declare consumer group on startup."""
214
+ await super().startup()
215
+ await self._declare_consumer_group()
216
+
217
+ async def kick(self, message: BrokerMessage) -> None:
218
+ """
219
+ Put a message in a list.
220
+
221
+ This method appends a message to the list of all messages.
222
+
223
+ :param message: message to append.
224
+ """
225
+ async with self._acquire_master_conn() as redis_conn:
226
+ await redis_conn.xadd(self.queue_name, {b"data": message.message})
227
+
228
+ def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
229
+ async def _ack() -> None:
230
+ async with self._acquire_master_conn() as redis_conn:
231
+ await redis_conn.xack(
232
+ self.queue_name,
233
+ self.consumer_group_name,
234
+ id,
235
+ )
236
+
237
+ return _ack
238
+
239
+ async def listen(self) -> AsyncGenerator[AckableMessage, None]:
240
+ """Listen to the stream for new messages."""
241
+ async with self._acquire_master_conn() as redis_conn:
242
+ while True:
243
+ fetched = await redis_conn.xreadgroup(
244
+ self.consumer_group_name,
245
+ self.consumer_name,
246
+ {
247
+ self.queue_name: ">",
248
+ **self.additional_streams, # type: ignore
249
+ },
250
+ block=self.block,
251
+ noack=False,
252
+ )
253
+ for _, msg_list in fetched:
254
+ for msg_id, msg in msg_list:
255
+ logger.debug("Received message: %s", msg)
256
+ yield AckableMessage(
257
+ data=msg[b"data"],
258
+ ack=self._ack_generator(msg_id),
259
+ )
@@ -21,8 +21,8 @@ else:
21
21
  from typing_extensions import TypeAlias
22
22
 
23
23
  if TYPE_CHECKING:
24
- _Redis: TypeAlias = Redis[bytes]
25
- _BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection]
24
+ _Redis: TypeAlias = Redis[bytes] # type: ignore
25
+ _BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection] # type: ignore
26
26
  else:
27
27
  _Redis: TypeAlias = Redis
28
28
  _BlockingConnectionPool: TypeAlias = BlockingConnectionPool
@@ -140,7 +140,7 @@ class RedisClusterScheduleSource(ScheduleSource):
140
140
  **connection_kwargs: Any,
141
141
  ) -> None:
142
142
  self.prefix = prefix
143
- self.redis: RedisCluster[bytes] = RedisCluster.from_url(
143
+ self.redis: "RedisCluster" = RedisCluster.from_url(
144
144
  url,
145
145
  **connection_kwargs,
146
146
  )
@@ -150,7 +150,7 @@ class RedisClusterScheduleSource(ScheduleSource):
150
150
 
151
151
  async def delete_schedule(self, schedule_id: str) -> None:
152
152
  """Remove schedule by id."""
153
- await self.redis.delete(f"{self.prefix}:{schedule_id}") # type: ignore[attr-defined]
153
+ await self.redis.delete(f"{self.prefix}:{schedule_id}")
154
154
 
155
155
  async def add_schedule(self, schedule: ScheduledTask) -> None:
156
156
  """
@@ -159,7 +159,7 @@ class RedisClusterScheduleSource(ScheduleSource):
159
159
  :param schedule: schedule to add.
160
160
  :param schedule_id: schedule id.
161
161
  """
162
- await self.redis.set( # type: ignore[attr-defined]
162
+ await self.redis.set(
163
163
  f"{self.prefix}:{schedule.schedule_id}",
164
164
  self.serializer.dumpb(model_dump(schedule)),
165
165
  )
@@ -173,8 +173,8 @@ class RedisClusterScheduleSource(ScheduleSource):
173
173
  :return: list of schedules.
174
174
  """
175
175
  schedules = []
176
- async for key in self.redis.scan_iter(f"{self.prefix}:*"): # type: ignore[attr-defined]
177
- raw_schedule = await self.redis.get(key) # type: ignore[attr-defined]
176
+ async for key in self.redis.scan_iter(f"{self.prefix}:*"):
177
+ raw_schedule = await self.redis.get(key)
178
178
  parsed_schedule = model_validate(
179
179
  ScheduledTask,
180
180
  self.serializer.loadb(raw_schedule),
@@ -189,7 +189,7 @@ class RedisClusterScheduleSource(ScheduleSource):
189
189
 
190
190
  async def shutdown(self) -> None:
191
191
  """Shut down the schedule source."""
192
- await self.redis.aclose() # type: ignore[attr-defined]
192
+ await self.redis.aclose()
193
193
 
194
194
 
195
195
  class RedisSentinelScheduleSource(ScheduleSource):
@@ -288,4 +288,4 @@ class RedisSentinelScheduleSource(ScheduleSource):
288
288
  async def shutdown(self) -> None:
289
289
  """Shut down the schedule source."""
290
290
  for sentinel in self.sentinel.sentinels:
291
- await sentinel.aclose() # type: ignore[attr-defined]
291
+ await sentinel.aclose()
@@ -1,22 +1,23 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: taskiq-redis
3
- Version: 1.0.2
3
+ Version: 1.0.3
4
4
  Summary: Redis integration for taskiq
5
- Home-page: https://github.com/taskiq-python/taskiq-redis
6
5
  Keywords: taskiq,tasks,distributed,async,redis,result_backend
7
6
  Author: taskiq-team
8
7
  Author-email: taskiq@norely.com
9
- Requires-Python: >=3.8.1,<4.0.0
8
+ Requires-Python: >=3.9,<4.0
10
9
  Classifier: Programming Language :: Python
11
10
  Classifier: Programming Language :: Python :: 3
12
11
  Classifier: Programming Language :: Python :: 3.9
13
12
  Classifier: Programming Language :: Python :: 3.10
14
13
  Classifier: Programming Language :: Python :: 3.11
15
14
  Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
16
  Classifier: Programming Language :: Python :: 3 :: Only
17
17
  Classifier: Programming Language :: Python :: 3.8
18
18
  Requires-Dist: redis (>=5,<6)
19
- Requires-Dist: taskiq (>=0.11.1,<1)
19
+ Requires-Dist: taskiq (>=0.11.12,<1)
20
+ Project-URL: Homepage, https://github.com/taskiq-python/taskiq-redis
20
21
  Project-URL: Repository, https://github.com/taskiq-python/taskiq-redis
21
22
  Description-Content-Type: text/markdown
22
23
 
@@ -43,17 +44,17 @@ Let's see the example with the redis broker and redis async result:
43
44
  # broker.py
44
45
  import asyncio
45
46
 
46
- from taskiq_redis import ListQueueBroker, RedisAsyncResultBackend
47
+ from taskiq_redis import RedisAsyncResultBackend, RedisStreamBroker
47
48
 
48
- redis_async_result = RedisAsyncResultBackend(
49
+ result_backend = RedisAsyncResultBackend(
49
50
  redis_url="redis://localhost:6379",
50
51
  )
51
52
 
52
53
  # Or you can use PubSubBroker if you need broadcasting
53
- broker = ListQueueBroker(
54
+ # Or ListQueueBroker if you don't want acknowledges
55
+ broker = RedisStreamBroker(
54
56
  url="redis://localhost:6379",
55
- result_backend=redis_async_result,
56
- )
57
+ ).with_result_backend(result_backend)
57
58
 
58
59
 
59
60
  @broker.task
@@ -77,25 +78,48 @@ Launch the workers:
77
78
  Then run the main code:
78
79
  `python3 broker.py`
79
80
 
80
- ## PubSubBroker and ListQueueBroker configuration
81
81
 
82
- We have two brokers with similar interfaces, but with different logic.
83
- The PubSubBroker uses redis' pubsub mechanism and is very powerful,
84
- but it executes every task on all workers, because PUBSUB broadcasts message
85
- to all subscribers.
82
+ ## Brokers
86
83
 
87
- If you want your messages to be processed only once, please use ListQueueBroker.
88
- It uses redis' [LPUSH](https://redis.io/commands/lpush/) and [BRPOP](https://redis.io/commands/brpop/) commands to deal with messages.
84
+ This package contains 6 broker implementations.
85
+ 3 broker types:
86
+ * PubSub broker
87
+ * ListQueue broker
88
+ * Stream broker
89
89
 
90
- Brokers parameters:
91
- * `url` - url to redis.
92
- * `task_id_generator` - custom task_id genertaor.
93
- * `result_backend` - custom result backend.
94
- * `queue_name` - name of the pub/sub channel in redis.
95
- * `max_connection_pool_size` - maximum number of connections in pool.
96
- * Any other keyword arguments are passed to `redis.asyncio.BlockingConnectionPool`.
97
- Notably, you can use `timeout` to set custom timeout in seconds for reconnects
98
- (or set it to `None` to try reconnects indefinitely).
90
+ Each of type is implemented for each redis architecture:
91
+ * Single node
92
+ * Cluster
93
+ * Sentinel
94
+
95
+ Here's a small breakdown of how they differ from eachother.
96
+
97
+
98
+ ### PubSub
99
+
100
+ By default on old redis versions PUBSUB was the way of making redis into a queue.
101
+ But using PUBSUB means that all messages delivered to all subscribed consumers.
102
+
103
+ > [!WARNING]
104
+ > This broker doesn't support acknowledgements. If during message processing
105
+ > Worker was suddenly killed the message is going to be lost.
106
+
107
+ ### ListQueue
108
+
109
+ This broker creates a list of messages at some key. Adding new tasks will be done
110
+ by appending them from the left side using `lpush`, and taking them from the right side using `brpop`.
111
+
112
+ > [!WARNING]
113
+ > This broker doesn't support acknowledgements. If during message processing
114
+ > Worker was suddenly killed the message is going to be lost.
115
+
116
+ ### Stream
117
+
118
+ Stream brokers use redis [stream type](https://redis.io/docs/latest/develop/data-types/streams/) to store and fetch messages.
119
+
120
+ > [!TIP]
121
+ > This broker **supports** acknowledgements and therefore is fine to use in cases when data durability is
122
+ > required.
99
123
 
100
124
  ## RedisAsyncResultBackend configuration
101
125
 
@@ -107,19 +131,21 @@ RedisAsyncResultBackend parameters:
107
131
  * Any other keyword arguments are passed to `redis.asyncio.BlockingConnectionPool`.
108
132
  Notably, you can use `timeout` to set custom timeout in seconds for reconnects
109
133
  (or set it to `None` to try reconnects indefinitely).
110
- > IMPORTANT: **It is highly recommended to use expire time ​​in RedisAsyncResultBackend**
134
+
135
+ > [!WARNING]
136
+ > **It is highly recommended to use expire time in RedisAsyncResultBackend**
111
137
  > If you want to add expiration, either `result_ex_time` or `result_px_time` must be set.
112
- >```python
113
- ># First variant
114
- >redis_async_result = RedisAsyncResultBackend(
115
- > redis_url="redis://localhost:6379",
116
- > result_ex_time=1000,
117
- >)
138
+ > ```python
139
+ > # First variant
140
+ > redis_async_result = RedisAsyncResultBackend(
141
+ > redis_url="redis://localhost:6379",
142
+ > result_ex_time=1000,
143
+ > )
118
144
  >
119
- ># Second variant
120
- >redis_async_result = RedisAsyncResultBackend(
121
- > redis_url="redis://localhost:6379",
122
- > result_px_time=1000000,
123
- >)
124
- >```
145
+ > # Second variant
146
+ > redis_async_result = RedisAsyncResultBackend(
147
+ > redis_url="redis://localhost:6379",
148
+ > result_px_time=1000000,
149
+ > )
150
+ > ```
125
151
 
@@ -0,0 +1,12 @@
1
+ taskiq_redis/__init__.py,sha256=nb2Lx4lVj9m20duzRFQk3nNVswQGylmwLsQN6Qc1lGI,1091
2
+ taskiq_redis/exceptions.py,sha256=7buBJ7CRVWd5WqVqSjtHO8cVL7QzZg-DOM3nB87t-Sk,738
3
+ taskiq_redis/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ taskiq_redis/redis_backend.py,sha256=MLBaUN3Zx-DLvm1r-lgPU82_WZq9kc6oTxYI8LQjd6k,19882
5
+ taskiq_redis/redis_broker.py,sha256=ZLn7LAHj8Sh_oyW5hMgD7PZPQfUdXNPKdqhBcr9Okmg,9775
6
+ taskiq_redis/redis_cluster_broker.py,sha256=FuWl5fP7Fwr9FbytErmhcUGjRCdPexDK2Co2u6kpDlo,6591
7
+ taskiq_redis/redis_sentinel_broker.py,sha256=wHnbG3xuD_ruhhwp4AXo91NNjq8v2iufUZ0i_HbBRVQ,9073
8
+ taskiq_redis/schedule_source.py,sha256=hqpcs2D8W90KUDHREKblisnhGCE9dbVOtKtuJcOTGZw,9915
9
+ taskiq_redis-1.0.3.dist-info/LICENSE,sha256=lEHEEE-ZxmuItxYgUMPiFWdRcAITxE8DFMNyAg4eOYE,1075
10
+ taskiq_redis-1.0.3.dist-info/METADATA,sha256=whl7_U6GIIcNzVZqrLQkktES6Y91awk8-HtgaX5IQ8s,4391
11
+ taskiq_redis-1.0.3.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
12
+ taskiq_redis-1.0.3.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.0
2
+ Generator: poetry-core 2.1.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,12 +0,0 @@
1
- taskiq_redis/__init__.py,sha256=UEW3rQXt4jinMnAKJlpXQhyPDh6SU2in0bPgzfIo3y4,911
2
- taskiq_redis/exceptions.py,sha256=eS4bfZVAjyMsnFs3IF74uYwO1KZOlrYxhxgPqD49ztU,561
3
- taskiq_redis/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- taskiq_redis/redis_backend.py,sha256=mGU3rXJ727X-qYqSfaderTFVGz93NpCyHCf5PDFTjGk,19543
5
- taskiq_redis/redis_broker.py,sha256=JeoA3-quZYqa_wixJefMRYPkZe94x-qoQb6tQKkHLzg,4733
6
- taskiq_redis/redis_cluster_broker.py,sha256=CgPKkoEHZ1moNM-VNmzPQdjjNOrhiVUCNV-7FrUgqTo,2121
7
- taskiq_redis/redis_sentinel_broker.py,sha256=5MxUFIX7qRyDT7IHebLUhxAmmUwk1_b2sxjpSXRcjlo,4114
8
- taskiq_redis/schedule_source.py,sha256=bk96UBg8op-Xqg_PVETgyDb92cDaY69EAjpP8GvYSnY,10068
9
- taskiq_redis-1.0.2.dist-info/LICENSE,sha256=lEHEEE-ZxmuItxYgUMPiFWdRcAITxE8DFMNyAg4eOYE,1075
10
- taskiq_redis-1.0.2.dist-info/METADATA,sha256=6A_nDPLAmO92y_Db7vNUIGiOGqH7gTm_rCpb0KMIPOc,4030
11
- taskiq_redis-1.0.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
12
- taskiq_redis-1.0.2.dist-info/RECORD,,