taskiq-redis 0.5.4__py3-none-any.whl → 0.5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- taskiq_redis/__init__.py +5 -1
- taskiq_redis/redis_backend.py +17 -4
- taskiq_redis/redis_broker.py +9 -5
- taskiq_redis/schedule_source.py +82 -3
- {taskiq_redis-0.5.4.dist-info → taskiq_redis-0.5.6.dist-info}/METADATA +7 -1
- taskiq_redis-0.5.6.dist-info/RECORD +11 -0
- {taskiq_redis-0.5.4.dist-info → taskiq_redis-0.5.6.dist-info}/WHEEL +1 -1
- taskiq_redis-0.5.4.dist-info/RECORD +0 -11
taskiq_redis/__init__.py
CHANGED
|
@@ -5,7 +5,10 @@ from taskiq_redis.redis_backend import (
|
|
|
5
5
|
)
|
|
6
6
|
from taskiq_redis.redis_broker import ListQueueBroker, PubSubBroker
|
|
7
7
|
from taskiq_redis.redis_cluster_broker import ListQueueClusterBroker
|
|
8
|
-
from taskiq_redis.schedule_source import
|
|
8
|
+
from taskiq_redis.schedule_source import (
|
|
9
|
+
RedisClusterScheduleSource,
|
|
10
|
+
RedisScheduleSource,
|
|
11
|
+
)
|
|
9
12
|
|
|
10
13
|
__all__ = [
|
|
11
14
|
"RedisAsyncClusterResultBackend",
|
|
@@ -14,4 +17,5 @@ __all__ = [
|
|
|
14
17
|
"PubSubBroker",
|
|
15
18
|
"ListQueueClusterBroker",
|
|
16
19
|
"RedisScheduleSource",
|
|
20
|
+
"RedisClusterScheduleSource",
|
|
17
21
|
]
|
taskiq_redis/redis_backend.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import pickle
|
|
2
|
-
from typing import Dict, Optional, TypeVar, Union
|
|
2
|
+
from typing import Any, Dict, Optional, TypeVar, Union
|
|
3
3
|
|
|
4
|
-
from redis.asyncio import
|
|
4
|
+
from redis.asyncio import BlockingConnectionPool, Redis
|
|
5
5
|
from redis.asyncio.cluster import RedisCluster
|
|
6
6
|
from taskiq import AsyncResultBackend
|
|
7
7
|
from taskiq.abc.result_backend import TaskiqResult
|
|
@@ -24,6 +24,8 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
|
|
|
24
24
|
keep_results: bool = True,
|
|
25
25
|
result_ex_time: Optional[int] = None,
|
|
26
26
|
result_px_time: Optional[int] = None,
|
|
27
|
+
max_connection_pool_size: Optional[int] = None,
|
|
28
|
+
**connection_kwargs: Any,
|
|
27
29
|
) -> None:
|
|
28
30
|
"""
|
|
29
31
|
Constructs a new result backend.
|
|
@@ -32,13 +34,19 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
|
|
|
32
34
|
:param keep_results: flag to not remove results from Redis after reading.
|
|
33
35
|
:param result_ex_time: expire time in seconds for result.
|
|
34
36
|
:param result_px_time: expire time in milliseconds for result.
|
|
37
|
+
:param max_connection_pool_size: maximum number of connections in pool.
|
|
38
|
+
:param connection_kwargs: additional arguments for redis BlockingConnectionPool.
|
|
35
39
|
|
|
36
40
|
:raises DuplicateExpireTimeSelectedError: if result_ex_time
|
|
37
41
|
and result_px_time are selected.
|
|
38
42
|
:raises ExpireTimeMustBeMoreThanZeroError: if result_ex_time
|
|
39
43
|
and result_px_time are equal zero.
|
|
40
44
|
"""
|
|
41
|
-
self.redis_pool =
|
|
45
|
+
self.redis_pool = BlockingConnectionPool.from_url(
|
|
46
|
+
url=redis_url,
|
|
47
|
+
max_connections=max_connection_pool_size,
|
|
48
|
+
**connection_kwargs,
|
|
49
|
+
)
|
|
42
50
|
self.keep_results = keep_results
|
|
43
51
|
self.result_ex_time = result_ex_time
|
|
44
52
|
self.result_px_time = result_px_time
|
|
@@ -146,6 +154,7 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
|
|
|
146
154
|
keep_results: bool = True,
|
|
147
155
|
result_ex_time: Optional[int] = None,
|
|
148
156
|
result_px_time: Optional[int] = None,
|
|
157
|
+
**connection_kwargs: Any,
|
|
149
158
|
) -> None:
|
|
150
159
|
"""
|
|
151
160
|
Constructs a new result backend.
|
|
@@ -154,13 +163,17 @@ class RedisAsyncClusterResultBackend(AsyncResultBackend[_ReturnType]):
|
|
|
154
163
|
:param keep_results: flag to not remove results from Redis after reading.
|
|
155
164
|
:param result_ex_time: expire time in seconds for result.
|
|
156
165
|
:param result_px_time: expire time in milliseconds for result.
|
|
166
|
+
:param connection_kwargs: additional arguments for RedisCluster.
|
|
157
167
|
|
|
158
168
|
:raises DuplicateExpireTimeSelectedError: if result_ex_time
|
|
159
169
|
and result_px_time are selected.
|
|
160
170
|
:raises ExpireTimeMustBeMoreThanZeroError: if result_ex_time
|
|
161
171
|
and result_px_time are equal zero.
|
|
162
172
|
"""
|
|
163
|
-
self.redis: RedisCluster[bytes] = RedisCluster.from_url(
|
|
173
|
+
self.redis: RedisCluster[bytes] = RedisCluster.from_url(
|
|
174
|
+
redis_url,
|
|
175
|
+
**connection_kwargs,
|
|
176
|
+
)
|
|
164
177
|
self.keep_results = keep_results
|
|
165
178
|
self.result_ex_time = result_ex_time
|
|
166
179
|
self.result_px_time = result_px_time
|
taskiq_redis/redis_broker.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from logging import getLogger
|
|
2
2
|
from typing import Any, AsyncGenerator, Callable, Optional, TypeVar
|
|
3
3
|
|
|
4
|
-
from redis.asyncio import ConnectionPool, Redis
|
|
4
|
+
from redis.asyncio import BlockingConnectionPool, ConnectionPool, Redis
|
|
5
5
|
from taskiq.abc.broker import AsyncBroker
|
|
6
6
|
from taskiq.abc.result_backend import AsyncResultBackend
|
|
7
7
|
from taskiq.message import BrokerMessage
|
|
@@ -31,14 +31,16 @@ class BaseRedisBroker(AsyncBroker):
|
|
|
31
31
|
:param result_backend: custom result backend.
|
|
32
32
|
:param queue_name: name for a list in redis.
|
|
33
33
|
:param max_connection_pool_size: maximum number of connections in pool.
|
|
34
|
-
|
|
34
|
+
Each worker opens its own connection. Therefore this value has to be
|
|
35
|
+
at least number of workers + 1.
|
|
36
|
+
:param connection_kwargs: additional arguments for redis BlockingConnectionPool.
|
|
35
37
|
"""
|
|
36
38
|
super().__init__(
|
|
37
39
|
result_backend=result_backend,
|
|
38
40
|
task_id_generator=task_id_generator,
|
|
39
41
|
)
|
|
40
42
|
|
|
41
|
-
self.connection_pool: ConnectionPool =
|
|
43
|
+
self.connection_pool: ConnectionPool = BlockingConnectionPool.from_url(
|
|
42
44
|
url=url,
|
|
43
45
|
max_connections=max_connection_pool_size,
|
|
44
46
|
**connection_kwargs,
|
|
@@ -60,8 +62,9 @@ class PubSubBroker(BaseRedisBroker):
|
|
|
60
62
|
|
|
61
63
|
:param message: message to send.
|
|
62
64
|
"""
|
|
65
|
+
queue_name = message.labels.get("queue_name") or self.queue_name
|
|
63
66
|
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
64
|
-
await redis_conn.publish(
|
|
67
|
+
await redis_conn.publish(queue_name, message.message)
|
|
65
68
|
|
|
66
69
|
async def listen(self) -> AsyncGenerator[bytes, None]:
|
|
67
70
|
"""
|
|
@@ -95,8 +98,9 @@ class ListQueueBroker(BaseRedisBroker):
|
|
|
95
98
|
|
|
96
99
|
:param message: message to append.
|
|
97
100
|
"""
|
|
101
|
+
queue_name = message.labels.get("queue_name") or self.queue_name
|
|
98
102
|
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
99
|
-
await redis_conn.lpush(
|
|
103
|
+
await redis_conn.lpush(queue_name, message.message)
|
|
100
104
|
|
|
101
105
|
async def listen(self) -> AsyncGenerator[bytes, None]:
|
|
102
106
|
"""
|
taskiq_redis/schedule_source.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from typing import Any, List, Optional
|
|
2
2
|
|
|
3
|
-
from redis.asyncio import ConnectionPool, Redis
|
|
3
|
+
from redis.asyncio import BlockingConnectionPool, ConnectionPool, Redis, RedisCluster
|
|
4
4
|
from taskiq import ScheduleSource
|
|
5
5
|
from taskiq.abc.serializer import TaskiqSerializer
|
|
6
6
|
from taskiq.compat import model_dump, model_validate
|
|
@@ -22,7 +22,7 @@ class RedisScheduleSource(ScheduleSource):
|
|
|
22
22
|
This is how many keys will be fetched at once.
|
|
23
23
|
:param max_connection_pool_size: maximum number of connections in pool.
|
|
24
24
|
:param serializer: serializer for data.
|
|
25
|
-
:param connection_kwargs: additional arguments for
|
|
25
|
+
:param connection_kwargs: additional arguments for redis BlockingConnectionPool.
|
|
26
26
|
"""
|
|
27
27
|
|
|
28
28
|
def __init__(
|
|
@@ -35,7 +35,7 @@ class RedisScheduleSource(ScheduleSource):
|
|
|
35
35
|
**connection_kwargs: Any,
|
|
36
36
|
) -> None:
|
|
37
37
|
self.prefix = prefix
|
|
38
|
-
self.connection_pool: ConnectionPool =
|
|
38
|
+
self.connection_pool: ConnectionPool = BlockingConnectionPool.from_url(
|
|
39
39
|
url=url,
|
|
40
40
|
max_connections=max_connection_pool_size,
|
|
41
41
|
**connection_kwargs,
|
|
@@ -95,3 +95,82 @@ class RedisScheduleSource(ScheduleSource):
|
|
|
95
95
|
async def shutdown(self) -> None:
|
|
96
96
|
"""Shut down the schedule source."""
|
|
97
97
|
await self.connection_pool.disconnect()
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class RedisClusterScheduleSource(ScheduleSource):
|
|
101
|
+
"""
|
|
102
|
+
Source of schedules for redis cluster.
|
|
103
|
+
|
|
104
|
+
This class allows you to store schedules in redis.
|
|
105
|
+
Also it supports dynamic schedules.
|
|
106
|
+
|
|
107
|
+
:param url: url to redis cluster.
|
|
108
|
+
:param prefix: prefix for redis schedule keys.
|
|
109
|
+
:param buffer_size: buffer size for redis scan.
|
|
110
|
+
This is how many keys will be fetched at once.
|
|
111
|
+
:param max_connection_pool_size: maximum number of connections in pool.
|
|
112
|
+
:param serializer: serializer for data.
|
|
113
|
+
:param connection_kwargs: additional arguments for RedisCluster.
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
def __init__(
|
|
117
|
+
self,
|
|
118
|
+
url: str,
|
|
119
|
+
prefix: str = "schedule",
|
|
120
|
+
buffer_size: int = 50,
|
|
121
|
+
serializer: Optional[TaskiqSerializer] = None,
|
|
122
|
+
**connection_kwargs: Any,
|
|
123
|
+
) -> None:
|
|
124
|
+
self.prefix = prefix
|
|
125
|
+
self.redis: RedisCluster[bytes] = RedisCluster.from_url(
|
|
126
|
+
url,
|
|
127
|
+
**connection_kwargs,
|
|
128
|
+
)
|
|
129
|
+
self.buffer_size = buffer_size
|
|
130
|
+
if serializer is None:
|
|
131
|
+
serializer = PickleSerializer()
|
|
132
|
+
self.serializer = serializer
|
|
133
|
+
|
|
134
|
+
async def delete_schedule(self, schedule_id: str) -> None:
|
|
135
|
+
"""Remove schedule by id."""
|
|
136
|
+
await self.redis.delete(f"{self.prefix}:{schedule_id}") # type: ignore[attr-defined]
|
|
137
|
+
|
|
138
|
+
async def add_schedule(self, schedule: ScheduledTask) -> None:
|
|
139
|
+
"""
|
|
140
|
+
Add schedule to redis.
|
|
141
|
+
|
|
142
|
+
:param schedule: schedule to add.
|
|
143
|
+
:param schedule_id: schedule id.
|
|
144
|
+
"""
|
|
145
|
+
await self.redis.set( # type: ignore[attr-defined]
|
|
146
|
+
f"{self.prefix}:{schedule.schedule_id}",
|
|
147
|
+
self.serializer.dumpb(model_dump(schedule)),
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
async def get_schedules(self) -> List[ScheduledTask]:
|
|
151
|
+
"""
|
|
152
|
+
Get all schedules from redis.
|
|
153
|
+
|
|
154
|
+
This method is used by scheduler to get all schedules.
|
|
155
|
+
|
|
156
|
+
:return: list of schedules.
|
|
157
|
+
"""
|
|
158
|
+
schedules = []
|
|
159
|
+
buffer = []
|
|
160
|
+
async for key in self.redis.scan_iter(f"{self.prefix}:*"): # type: ignore[attr-defined]
|
|
161
|
+
buffer.append(key)
|
|
162
|
+
if len(buffer) >= self.buffer_size:
|
|
163
|
+
schedules.extend(await self.redis.mget(buffer)) # type: ignore[attr-defined]
|
|
164
|
+
buffer = []
|
|
165
|
+
if buffer:
|
|
166
|
+
schedules.extend(await self.redis.mget(buffer)) # type: ignore[attr-defined]
|
|
167
|
+
return [
|
|
168
|
+
model_validate(ScheduledTask, self.serializer.loadb(schedule))
|
|
169
|
+
for schedule in schedules
|
|
170
|
+
if schedule
|
|
171
|
+
]
|
|
172
|
+
|
|
173
|
+
async def post_send(self, task: ScheduledTask) -> None:
|
|
174
|
+
"""Delete a task after it's completed."""
|
|
175
|
+
if task.time is not None:
|
|
176
|
+
await self.delete_schedule(task.schedule_id)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: taskiq-redis
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.6
|
|
4
4
|
Summary: Redis integration for taskiq
|
|
5
5
|
Home-page: https://github.com/taskiq-python/taskiq-redis
|
|
6
6
|
Keywords: taskiq,tasks,distributed,async,redis,result_backend
|
|
@@ -93,6 +93,9 @@ Brokers parameters:
|
|
|
93
93
|
* `result_backend` - custom result backend.
|
|
94
94
|
* `queue_name` - name of the pub/sub channel in redis.
|
|
95
95
|
* `max_connection_pool_size` - maximum number of connections in pool.
|
|
96
|
+
* Any other keyword arguments are passed to `redis.asyncio.BlockingConnectionPool`.
|
|
97
|
+
Notably, you can use `timeout` to set custom timeout in seconds for reconnects
|
|
98
|
+
(or set it to `None` to try reconnects indefinitely).
|
|
96
99
|
|
|
97
100
|
## RedisAsyncResultBackend configuration
|
|
98
101
|
|
|
@@ -101,6 +104,9 @@ RedisAsyncResultBackend parameters:
|
|
|
101
104
|
* `keep_results` - flag to not remove results from Redis after reading.
|
|
102
105
|
* `result_ex_time` - expire time in seconds (by default - not specified)
|
|
103
106
|
* `result_px_time` - expire time in milliseconds (by default - not specified)
|
|
107
|
+
* Any other keyword arguments are passed to `redis.asyncio.BlockingConnectionPool`.
|
|
108
|
+
Notably, you can use `timeout` to set custom timeout in seconds for reconnects
|
|
109
|
+
(or set it to `None` to try reconnects indefinitely).
|
|
104
110
|
> IMPORTANT: **It is highly recommended to use expire time in RedisAsyncResultBackend**
|
|
105
111
|
> If you want to add expiration, either `result_ex_time` or `result_px_time` must be set.
|
|
106
112
|
>```python
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
taskiq_redis/__init__.py,sha256=fMdXYxulcaKur66UUlmqAQf_q24jT5UHDYsMYP6J4fw,602
|
|
2
|
+
taskiq_redis/exceptions.py,sha256=eS4bfZVAjyMsnFs3IF74uYwO1KZOlrYxhxgPqD49ztU,561
|
|
3
|
+
taskiq_redis/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
taskiq_redis/redis_backend.py,sha256=BwzFWXLHqpQEhZ675dvt2ueOfB7xjJOAGTSHZSyoR7A,8914
|
|
5
|
+
taskiq_redis/redis_broker.py,sha256=b5oOKXP-uuqffGnNhUsT4HgTPmBiBdAfpUOd5V0VfFc,4254
|
|
6
|
+
taskiq_redis/redis_cluster_broker.py,sha256=CgPKkoEHZ1moNM-VNmzPQdjjNOrhiVUCNV-7FrUgqTo,2121
|
|
7
|
+
taskiq_redis/schedule_source.py,sha256=uznI6wbrdSbD-hIAF3xDGTVXD99SnfWYAuYsTQUIL8E,6202
|
|
8
|
+
taskiq_redis/serializer.py,sha256=x-1ExYoD_EnDiM53lyvI99MdTpNj_pORMIaCL07-6nU,416
|
|
9
|
+
taskiq_redis-0.5.6.dist-info/METADATA,sha256=zM6LiFFui-OFZqAVJnQ-qdh-U7_wKrTBnHYNMJuar4M,4030
|
|
10
|
+
taskiq_redis-0.5.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
11
|
+
taskiq_redis-0.5.6.dist-info/RECORD,,
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
taskiq_redis/__init__.py,sha256=sPv4uLLPSPc8zXK42puvsSMDY5HwbCePdJheYylyApg,527
|
|
2
|
-
taskiq_redis/exceptions.py,sha256=eS4bfZVAjyMsnFs3IF74uYwO1KZOlrYxhxgPqD49ztU,561
|
|
3
|
-
taskiq_redis/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
-
taskiq_redis/redis_backend.py,sha256=Q_pJ1Bz-NpTyFT68UswBvsNYWkLtnSPF1x8QbOnbWI0,8357
|
|
5
|
-
taskiq_redis/redis_broker.py,sha256=qQLWWvY-NacVXkgDGVCe2fyWYPkjZiOnggB0hpPStqw,3957
|
|
6
|
-
taskiq_redis/redis_cluster_broker.py,sha256=CgPKkoEHZ1moNM-VNmzPQdjjNOrhiVUCNV-7FrUgqTo,2121
|
|
7
|
-
taskiq_redis/schedule_source.py,sha256=SWMGC3limXRgyxZS5BawnF3bHGgJpjljByIUL5nC2BY,3414
|
|
8
|
-
taskiq_redis/serializer.py,sha256=x-1ExYoD_EnDiM53lyvI99MdTpNj_pORMIaCL07-6nU,416
|
|
9
|
-
taskiq_redis-0.5.4.dist-info/METADATA,sha256=XKrSxYM3F5RkGwX5mb-HGxertnZWeBpf1f_9c_yaYPQ,3588
|
|
10
|
-
taskiq_redis-0.5.4.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
11
|
-
taskiq_redis-0.5.4.dist-info/RECORD,,
|