taskiq-redis 1.0.2__tar.gz → 1.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- taskiq_redis-1.0.4/PKG-INFO +215 -0
- taskiq_redis-1.0.4/README.md +191 -0
- {taskiq_redis-1.0.2 → taskiq_redis-1.0.4}/pyproject.toml +19 -20
- {taskiq_redis-1.0.2 → taskiq_redis-1.0.4}/taskiq_redis/__init__.py +17 -7
- {taskiq_redis-1.0.2 → taskiq_redis-1.0.4}/taskiq_redis/exceptions.py +6 -0
- taskiq_redis-1.0.4/taskiq_redis/list_schedule_source.py +229 -0
- {taskiq_redis-1.0.2 → taskiq_redis-1.0.4}/taskiq_redis/redis_backend.py +57 -45
- taskiq_redis-1.0.4/taskiq_redis/redis_broker.py +271 -0
- taskiq_redis-1.0.4/taskiq_redis/redis_cluster_broker.py +188 -0
- taskiq_redis-1.0.4/taskiq_redis/redis_sentinel_broker.py +259 -0
- {taskiq_redis-1.0.2 → taskiq_redis-1.0.4}/taskiq_redis/schedule_source.py +9 -9
- taskiq_redis-1.0.2/PKG-INFO +0 -125
- taskiq_redis-1.0.2/README.md +0 -102
- taskiq_redis-1.0.2/taskiq_redis/redis_broker.py +0 -134
- taskiq_redis-1.0.2/taskiq_redis/redis_cluster_broker.py +0 -67
- taskiq_redis-1.0.2/taskiq_redis/redis_sentinel_broker.py +0 -132
- {taskiq_redis-1.0.2 → taskiq_redis-1.0.4}/LICENSE +0 -0
- {taskiq_redis-1.0.2 → taskiq_redis-1.0.4}/taskiq_redis/py.typed +0 -0
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: taskiq-redis
|
|
3
|
+
Version: 1.0.4
|
|
4
|
+
Summary: Redis integration for taskiq
|
|
5
|
+
Keywords: taskiq,tasks,distributed,async,redis,result_backend
|
|
6
|
+
Author: taskiq-team
|
|
7
|
+
Author-email: taskiq@norely.com
|
|
8
|
+
Requires-Python: >=3.9,<4.0
|
|
9
|
+
Classifier: Programming Language :: Python
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
18
|
+
Requires-Dist: redis (>=5,<6)
|
|
19
|
+
Requires-Dist: taskiq (>=0.11.12,<1)
|
|
20
|
+
Project-URL: Homepage, https://github.com/taskiq-python/taskiq-redis
|
|
21
|
+
Project-URL: Repository, https://github.com/taskiq-python/taskiq-redis
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
|
|
24
|
+
# TaskIQ-Redis
|
|
25
|
+
|
|
26
|
+
Taskiq-redis is a plugin for taskiq that adds a new broker and result backend based on redis.
|
|
27
|
+
|
|
28
|
+
# Installation
|
|
29
|
+
|
|
30
|
+
To use this project you must have installed core taskiq library:
|
|
31
|
+
```bash
|
|
32
|
+
pip install taskiq
|
|
33
|
+
```
|
|
34
|
+
This project can be installed using pip:
|
|
35
|
+
```bash
|
|
36
|
+
pip install taskiq-redis
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
# Usage
|
|
40
|
+
|
|
41
|
+
Let's see the example with the redis broker and redis async result:
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
# broker.py
|
|
45
|
+
import asyncio
|
|
46
|
+
|
|
47
|
+
from taskiq_redis import RedisAsyncResultBackend, RedisStreamBroker
|
|
48
|
+
|
|
49
|
+
result_backend = RedisAsyncResultBackend(
|
|
50
|
+
redis_url="redis://localhost:6379",
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Or you can use PubSubBroker if you need broadcasting
|
|
54
|
+
# Or ListQueueBroker if you don't want acknowledges
|
|
55
|
+
broker = RedisStreamBroker(
|
|
56
|
+
url="redis://localhost:6379",
|
|
57
|
+
).with_result_backend(result_backend)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@broker.task
|
|
61
|
+
async def best_task_ever() -> None:
|
|
62
|
+
"""Solve all problems in the world."""
|
|
63
|
+
await asyncio.sleep(5.5)
|
|
64
|
+
print("All problems are solved!")
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
async def main():
|
|
68
|
+
task = await best_task_ever.kiq()
|
|
69
|
+
print(await task.wait_result())
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
if __name__ == "__main__":
|
|
73
|
+
asyncio.run(main())
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
Launch the workers:
|
|
77
|
+
`taskiq worker broker:broker`
|
|
78
|
+
Then run the main code:
|
|
79
|
+
`python3 broker.py`
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
## Brokers
|
|
83
|
+
|
|
84
|
+
This package contains 6 broker implementations.
|
|
85
|
+
3 broker types:
|
|
86
|
+
* PubSub broker
|
|
87
|
+
* ListQueue broker
|
|
88
|
+
* Stream broker
|
|
89
|
+
|
|
90
|
+
Each of type is implemented for each redis architecture:
|
|
91
|
+
* Single node
|
|
92
|
+
* Cluster
|
|
93
|
+
* Sentinel
|
|
94
|
+
|
|
95
|
+
Here's a small breakdown of how they differ from eachother.
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
### PubSub
|
|
99
|
+
|
|
100
|
+
By default on old redis versions PUBSUB was the way of making redis into a queue.
|
|
101
|
+
But using PUBSUB means that all messages delivered to all subscribed consumers.
|
|
102
|
+
|
|
103
|
+
> [!WARNING]
|
|
104
|
+
> This broker doesn't support acknowledgements. If during message processing
|
|
105
|
+
> Worker was suddenly killed the message is going to be lost.
|
|
106
|
+
|
|
107
|
+
### ListQueue
|
|
108
|
+
|
|
109
|
+
This broker creates a list of messages at some key. Adding new tasks will be done
|
|
110
|
+
by appending them from the left side using `lpush`, and taking them from the right side using `brpop`.
|
|
111
|
+
|
|
112
|
+
> [!WARNING]
|
|
113
|
+
> This broker doesn't support acknowledgements. If during message processing
|
|
114
|
+
> Worker was suddenly killed the message is going to be lost.
|
|
115
|
+
|
|
116
|
+
### Stream
|
|
117
|
+
|
|
118
|
+
Stream brokers use redis [stream type](https://redis.io/docs/latest/develop/data-types/streams/) to store and fetch messages.
|
|
119
|
+
|
|
120
|
+
> [!TIP]
|
|
121
|
+
> This broker **supports** acknowledgements and therefore is fine to use in cases when data durability is
|
|
122
|
+
> required.
|
|
123
|
+
|
|
124
|
+
## RedisAsyncResultBackend configuration
|
|
125
|
+
|
|
126
|
+
RedisAsyncResultBackend parameters:
|
|
127
|
+
* `redis_url` - url to redis.
|
|
128
|
+
* `keep_results` - flag to not remove results from Redis after reading.
|
|
129
|
+
* `result_ex_time` - expire time in seconds (by default - not specified)
|
|
130
|
+
* `result_px_time` - expire time in milliseconds (by default - not specified)
|
|
131
|
+
* Any other keyword arguments are passed to `redis.asyncio.BlockingConnectionPool`.
|
|
132
|
+
Notably, you can use `timeout` to set custom timeout in seconds for reconnects
|
|
133
|
+
(or set it to `None` to try reconnects indefinitely).
|
|
134
|
+
|
|
135
|
+
> [!WARNING]
|
|
136
|
+
> **It is highly recommended to use expire time in RedisAsyncResultBackend**
|
|
137
|
+
> If you want to add expiration, either `result_ex_time` or `result_px_time` must be set.
|
|
138
|
+
> ```python
|
|
139
|
+
> # First variant
|
|
140
|
+
> redis_async_result = RedisAsyncResultBackend(
|
|
141
|
+
> redis_url="redis://localhost:6379",
|
|
142
|
+
> result_ex_time=1000,
|
|
143
|
+
> )
|
|
144
|
+
>
|
|
145
|
+
> # Second variant
|
|
146
|
+
> redis_async_result = RedisAsyncResultBackend(
|
|
147
|
+
> redis_url="redis://localhost:6379",
|
|
148
|
+
> result_px_time=1000000,
|
|
149
|
+
> )
|
|
150
|
+
> ```
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
## Schedule sources
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
You can use this package to add dynamic schedule sources. They are used to store
|
|
157
|
+
schedules for taskiq scheduler.
|
|
158
|
+
|
|
159
|
+
The advantage of using schedule sources from this package over default `LabelBased` source is that you can
|
|
160
|
+
dynamically add schedules in it.
|
|
161
|
+
|
|
162
|
+
We have two types of schedules:
|
|
163
|
+
|
|
164
|
+
* `RedisScheduleSource`
|
|
165
|
+
* `ListRedisScheduleSource`
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
### RedisScheduleSource
|
|
169
|
+
|
|
170
|
+
This source is super simple. It stores all schedules by key `{prefix}:{schedule_id}`. When scheduler requests
|
|
171
|
+
schedules, it retrieves all values from redis that start with a given `prefix`.
|
|
172
|
+
|
|
173
|
+
This is very ineficent and should not be used for high-volume schedules. Because if you have `1000` schedules, this scheduler will make at least `20` requests to retrieve them (we use `scan` and `mget` to minimize number of calls).
|
|
174
|
+
|
|
175
|
+
### ListRedisScheduleSource
|
|
176
|
+
|
|
177
|
+
This source holds values in lists.
|
|
178
|
+
|
|
179
|
+
* For cron tasks it uses key `{prefix}:cron`.
|
|
180
|
+
* For timed schedules it uses key `{prefix}:time:{time}` where `{time}` is actually time where schedules should run.
|
|
181
|
+
|
|
182
|
+
The main advantage of this approach is that we only fetch tasks we need to run at a given time and do not perform any excesive calls to redis.
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
### Migration from one source to another
|
|
186
|
+
|
|
187
|
+
To migrate from `RedisScheduleSource` to `ListRedisScheduleSource` you can define the latter as this:
|
|
188
|
+
|
|
189
|
+
```python
|
|
190
|
+
# broker.py
|
|
191
|
+
import asyncio
|
|
192
|
+
import datetime
|
|
193
|
+
|
|
194
|
+
from taskiq import TaskiqScheduler
|
|
195
|
+
|
|
196
|
+
from taskiq_redis import ListRedisScheduleSource, RedisStreamBroker
|
|
197
|
+
from taskiq_redis.schedule_source import RedisScheduleSource
|
|
198
|
+
|
|
199
|
+
broker = RedisStreamBroker(url="redis://localhost:6379")
|
|
200
|
+
|
|
201
|
+
old_source = RedisScheduleSource("redis://localhost/1", prefix="prefix1")
|
|
202
|
+
array_source = ListRedisScheduleSource(
|
|
203
|
+
"redis://localhost/1",
|
|
204
|
+
prefix="prefix2",
|
|
205
|
+
# To migrate schedules from an old source.
|
|
206
|
+
).with_migrate_from(
|
|
207
|
+
old_source,
|
|
208
|
+
# To delete schedules from an old source.
|
|
209
|
+
delete_schedules=True,
|
|
210
|
+
)
|
|
211
|
+
scheduler = TaskiqScheduler(broker, [array_source])
|
|
212
|
+
```
|
|
213
|
+
|
|
214
|
+
During startup the scheduler will try to migrate schedules from an old source to a new one. Please be sure to specify different prefixe just to avoid any kind of collision between these two.
|
|
215
|
+
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
# TaskIQ-Redis
|
|
2
|
+
|
|
3
|
+
Taskiq-redis is a plugin for taskiq that adds a new broker and result backend based on redis.
|
|
4
|
+
|
|
5
|
+
# Installation
|
|
6
|
+
|
|
7
|
+
To use this project you must have installed core taskiq library:
|
|
8
|
+
```bash
|
|
9
|
+
pip install taskiq
|
|
10
|
+
```
|
|
11
|
+
This project can be installed using pip:
|
|
12
|
+
```bash
|
|
13
|
+
pip install taskiq-redis
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
# Usage
|
|
17
|
+
|
|
18
|
+
Let's see the example with the redis broker and redis async result:
|
|
19
|
+
|
|
20
|
+
```python
|
|
21
|
+
# broker.py
|
|
22
|
+
import asyncio
|
|
23
|
+
|
|
24
|
+
from taskiq_redis import RedisAsyncResultBackend, RedisStreamBroker
|
|
25
|
+
|
|
26
|
+
result_backend = RedisAsyncResultBackend(
|
|
27
|
+
redis_url="redis://localhost:6379",
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
# Or you can use PubSubBroker if you need broadcasting
|
|
31
|
+
# Or ListQueueBroker if you don't want acknowledges
|
|
32
|
+
broker = RedisStreamBroker(
|
|
33
|
+
url="redis://localhost:6379",
|
|
34
|
+
).with_result_backend(result_backend)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@broker.task
|
|
38
|
+
async def best_task_ever() -> None:
|
|
39
|
+
"""Solve all problems in the world."""
|
|
40
|
+
await asyncio.sleep(5.5)
|
|
41
|
+
print("All problems are solved!")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
async def main():
|
|
45
|
+
task = await best_task_ever.kiq()
|
|
46
|
+
print(await task.wait_result())
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
if __name__ == "__main__":
|
|
50
|
+
asyncio.run(main())
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Launch the workers:
|
|
54
|
+
`taskiq worker broker:broker`
|
|
55
|
+
Then run the main code:
|
|
56
|
+
`python3 broker.py`
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
## Brokers
|
|
60
|
+
|
|
61
|
+
This package contains 6 broker implementations.
|
|
62
|
+
3 broker types:
|
|
63
|
+
* PubSub broker
|
|
64
|
+
* ListQueue broker
|
|
65
|
+
* Stream broker
|
|
66
|
+
|
|
67
|
+
Each of type is implemented for each redis architecture:
|
|
68
|
+
* Single node
|
|
69
|
+
* Cluster
|
|
70
|
+
* Sentinel
|
|
71
|
+
|
|
72
|
+
Here's a small breakdown of how they differ from eachother.
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
### PubSub
|
|
76
|
+
|
|
77
|
+
By default on old redis versions PUBSUB was the way of making redis into a queue.
|
|
78
|
+
But using PUBSUB means that all messages delivered to all subscribed consumers.
|
|
79
|
+
|
|
80
|
+
> [!WARNING]
|
|
81
|
+
> This broker doesn't support acknowledgements. If during message processing
|
|
82
|
+
> Worker was suddenly killed the message is going to be lost.
|
|
83
|
+
|
|
84
|
+
### ListQueue
|
|
85
|
+
|
|
86
|
+
This broker creates a list of messages at some key. Adding new tasks will be done
|
|
87
|
+
by appending them from the left side using `lpush`, and taking them from the right side using `brpop`.
|
|
88
|
+
|
|
89
|
+
> [!WARNING]
|
|
90
|
+
> This broker doesn't support acknowledgements. If during message processing
|
|
91
|
+
> Worker was suddenly killed the message is going to be lost.
|
|
92
|
+
|
|
93
|
+
### Stream
|
|
94
|
+
|
|
95
|
+
Stream brokers use redis [stream type](https://redis.io/docs/latest/develop/data-types/streams/) to store and fetch messages.
|
|
96
|
+
|
|
97
|
+
> [!TIP]
|
|
98
|
+
> This broker **supports** acknowledgements and therefore is fine to use in cases when data durability is
|
|
99
|
+
> required.
|
|
100
|
+
|
|
101
|
+
## RedisAsyncResultBackend configuration
|
|
102
|
+
|
|
103
|
+
RedisAsyncResultBackend parameters:
|
|
104
|
+
* `redis_url` - url to redis.
|
|
105
|
+
* `keep_results` - flag to not remove results from Redis after reading.
|
|
106
|
+
* `result_ex_time` - expire time in seconds (by default - not specified)
|
|
107
|
+
* `result_px_time` - expire time in milliseconds (by default - not specified)
|
|
108
|
+
* Any other keyword arguments are passed to `redis.asyncio.BlockingConnectionPool`.
|
|
109
|
+
Notably, you can use `timeout` to set custom timeout in seconds for reconnects
|
|
110
|
+
(or set it to `None` to try reconnects indefinitely).
|
|
111
|
+
|
|
112
|
+
> [!WARNING]
|
|
113
|
+
> **It is highly recommended to use expire time in RedisAsyncResultBackend**
|
|
114
|
+
> If you want to add expiration, either `result_ex_time` or `result_px_time` must be set.
|
|
115
|
+
> ```python
|
|
116
|
+
> # First variant
|
|
117
|
+
> redis_async_result = RedisAsyncResultBackend(
|
|
118
|
+
> redis_url="redis://localhost:6379",
|
|
119
|
+
> result_ex_time=1000,
|
|
120
|
+
> )
|
|
121
|
+
>
|
|
122
|
+
> # Second variant
|
|
123
|
+
> redis_async_result = RedisAsyncResultBackend(
|
|
124
|
+
> redis_url="redis://localhost:6379",
|
|
125
|
+
> result_px_time=1000000,
|
|
126
|
+
> )
|
|
127
|
+
> ```
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
## Schedule sources
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
You can use this package to add dynamic schedule sources. They are used to store
|
|
134
|
+
schedules for taskiq scheduler.
|
|
135
|
+
|
|
136
|
+
The advantage of using schedule sources from this package over default `LabelBased` source is that you can
|
|
137
|
+
dynamically add schedules in it.
|
|
138
|
+
|
|
139
|
+
We have two types of schedules:
|
|
140
|
+
|
|
141
|
+
* `RedisScheduleSource`
|
|
142
|
+
* `ListRedisScheduleSource`
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
### RedisScheduleSource
|
|
146
|
+
|
|
147
|
+
This source is super simple. It stores all schedules by key `{prefix}:{schedule_id}`. When scheduler requests
|
|
148
|
+
schedules, it retrieves all values from redis that start with a given `prefix`.
|
|
149
|
+
|
|
150
|
+
This is very ineficent and should not be used for high-volume schedules. Because if you have `1000` schedules, this scheduler will make at least `20` requests to retrieve them (we use `scan` and `mget` to minimize number of calls).
|
|
151
|
+
|
|
152
|
+
### ListRedisScheduleSource
|
|
153
|
+
|
|
154
|
+
This source holds values in lists.
|
|
155
|
+
|
|
156
|
+
* For cron tasks it uses key `{prefix}:cron`.
|
|
157
|
+
* For timed schedules it uses key `{prefix}:time:{time}` where `{time}` is actually time where schedules should run.
|
|
158
|
+
|
|
159
|
+
The main advantage of this approach is that we only fetch tasks we need to run at a given time and do not perform any excesive calls to redis.
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
### Migration from one source to another
|
|
163
|
+
|
|
164
|
+
To migrate from `RedisScheduleSource` to `ListRedisScheduleSource` you can define the latter as this:
|
|
165
|
+
|
|
166
|
+
```python
|
|
167
|
+
# broker.py
|
|
168
|
+
import asyncio
|
|
169
|
+
import datetime
|
|
170
|
+
|
|
171
|
+
from taskiq import TaskiqScheduler
|
|
172
|
+
|
|
173
|
+
from taskiq_redis import ListRedisScheduleSource, RedisStreamBroker
|
|
174
|
+
from taskiq_redis.schedule_source import RedisScheduleSource
|
|
175
|
+
|
|
176
|
+
broker = RedisStreamBroker(url="redis://localhost:6379")
|
|
177
|
+
|
|
178
|
+
old_source = RedisScheduleSource("redis://localhost/1", prefix="prefix1")
|
|
179
|
+
array_source = ListRedisScheduleSource(
|
|
180
|
+
"redis://localhost/1",
|
|
181
|
+
prefix="prefix2",
|
|
182
|
+
# To migrate schedules from an old source.
|
|
183
|
+
).with_migrate_from(
|
|
184
|
+
old_source,
|
|
185
|
+
# To delete schedules from an old source.
|
|
186
|
+
delete_schedules=True,
|
|
187
|
+
)
|
|
188
|
+
scheduler = TaskiqScheduler(broker, [array_source])
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
During startup the scheduler will try to migrate schedules from an old source to a new one. Please be sure to specify different prefixe just to avoid any kind of collision between these two.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "taskiq-redis"
|
|
3
|
-
version = "1.0.
|
|
3
|
+
version = "1.0.4"
|
|
4
4
|
description = "Redis integration for taskiq"
|
|
5
5
|
authors = ["taskiq-team <taskiq@norely.com>"]
|
|
6
6
|
readme = "README.md"
|
|
@@ -25,22 +25,22 @@ keywords = [
|
|
|
25
25
|
]
|
|
26
26
|
|
|
27
27
|
[tool.poetry.dependencies]
|
|
28
|
-
python = "^3.
|
|
29
|
-
taskiq = ">=0.11.
|
|
28
|
+
python = "^3.9"
|
|
29
|
+
taskiq = ">=0.11.12,<1"
|
|
30
30
|
redis = "^5"
|
|
31
31
|
|
|
32
32
|
[tool.poetry.group.dev.dependencies]
|
|
33
|
-
pytest = "^
|
|
33
|
+
pytest = "^8"
|
|
34
34
|
mypy = "^1"
|
|
35
|
-
black = "^
|
|
36
|
-
pytest-cov = "^
|
|
37
|
-
anyio = "^
|
|
38
|
-
pytest-env = "^
|
|
35
|
+
black = "^25"
|
|
36
|
+
pytest-cov = "^6"
|
|
37
|
+
anyio = "^4"
|
|
38
|
+
pytest-env = "^1"
|
|
39
39
|
fakeredis = "^2"
|
|
40
|
-
pre-commit = "^
|
|
41
|
-
pytest-xdist = { version = "^
|
|
40
|
+
pre-commit = "^4"
|
|
41
|
+
pytest-xdist = { version = "^3", extras = ["psutil"] }
|
|
42
42
|
ruff = "^0"
|
|
43
|
-
|
|
43
|
+
freezegun = "^1.5.1"
|
|
44
44
|
|
|
45
45
|
[tool.mypy]
|
|
46
46
|
strict = true
|
|
@@ -56,6 +56,7 @@ warn_return_any = false
|
|
|
56
56
|
[[tool.mypy.overrides]]
|
|
57
57
|
module = ['redis']
|
|
58
58
|
ignore_missing_imports = true
|
|
59
|
+
ignore_errors = true
|
|
59
60
|
strict = false
|
|
60
61
|
|
|
61
62
|
[build-system]
|
|
@@ -65,7 +66,7 @@ build-backend = "poetry.core.masonry.api"
|
|
|
65
66
|
[tool.ruff]
|
|
66
67
|
# List of enabled rulsets.
|
|
67
68
|
# See https://docs.astral.sh/ruff/rules/ for more information.
|
|
68
|
-
select = [
|
|
69
|
+
lint.select = [
|
|
69
70
|
"E", # Error
|
|
70
71
|
"F", # Pyflakes
|
|
71
72
|
"W", # Pycodestyle
|
|
@@ -92,24 +93,22 @@ select = [
|
|
|
92
93
|
"PL", # PyLint checks
|
|
93
94
|
"RUF", # Specific to Ruff checks
|
|
94
95
|
]
|
|
95
|
-
ignore = [
|
|
96
|
+
lint.ignore = [
|
|
96
97
|
"D105", # Missing docstring in magic method
|
|
97
98
|
"D107", # Missing docstring in __init__
|
|
98
99
|
"D212", # Multi-line docstring summary should start at the first line
|
|
99
100
|
"D401", # First line should be in imperative mood
|
|
100
101
|
"D104", # Missing docstring in public package
|
|
101
102
|
"D100", # Missing docstring in public module
|
|
102
|
-
"ANN102", # Missing type annotation for self in method
|
|
103
|
-
"ANN101", # Missing type annotation for argument
|
|
104
103
|
"ANN401", # typing.Any are disallowed in `**kwargs
|
|
105
104
|
"PLR0913", # Too many arguments for function call
|
|
106
105
|
"D106", # Missing docstring in public nested class
|
|
107
106
|
]
|
|
108
107
|
exclude = [".venv/"]
|
|
109
|
-
mccabe = { max-complexity = 10 }
|
|
108
|
+
lint.mccabe = { max-complexity = 10 }
|
|
110
109
|
line-length = 88
|
|
111
110
|
|
|
112
|
-
[tool.ruff.per-file-ignores]
|
|
111
|
+
[tool.ruff.lint.per-file-ignores]
|
|
113
112
|
"tests/*" = [
|
|
114
113
|
"S101", # Use of assert detected
|
|
115
114
|
"S301", # Use of pickle detected
|
|
@@ -119,12 +118,12 @@ line-length = 88
|
|
|
119
118
|
"D101", # Missing docstring in public class
|
|
120
119
|
]
|
|
121
120
|
|
|
122
|
-
[tool.ruff.pydocstyle]
|
|
121
|
+
[tool.ruff.lint.pydocstyle]
|
|
123
122
|
convention = "pep257"
|
|
124
123
|
ignore-decorators = ["typing.overload"]
|
|
125
124
|
|
|
126
|
-
[tool.ruff.pylint]
|
|
125
|
+
[tool.ruff.lint.pylint]
|
|
127
126
|
allow-magic-value-types = ["int", "str", "float"]
|
|
128
127
|
|
|
129
|
-
[tool.ruff.flake8-bugbear]
|
|
128
|
+
[tool.ruff.lint.flake8-bugbear]
|
|
130
129
|
extend-immutable-calls = ["taskiq_dependencies.Depends", "taskiq.TaskiqDepends"]
|
|
@@ -1,14 +1,20 @@
|
|
|
1
1
|
"""Package for redis integration."""
|
|
2
|
+
|
|
3
|
+
from taskiq_redis.list_schedule_source import ListRedisScheduleSource
|
|
2
4
|
from taskiq_redis.redis_backend import (
|
|
3
5
|
RedisAsyncClusterResultBackend,
|
|
4
6
|
RedisAsyncResultBackend,
|
|
5
7
|
RedisAsyncSentinelResultBackend,
|
|
6
8
|
)
|
|
7
|
-
from taskiq_redis.redis_broker import ListQueueBroker, PubSubBroker
|
|
8
|
-
from taskiq_redis.redis_cluster_broker import
|
|
9
|
+
from taskiq_redis.redis_broker import ListQueueBroker, PubSubBroker, RedisStreamBroker
|
|
10
|
+
from taskiq_redis.redis_cluster_broker import (
|
|
11
|
+
ListQueueClusterBroker,
|
|
12
|
+
RedisStreamClusterBroker,
|
|
13
|
+
)
|
|
9
14
|
from taskiq_redis.redis_sentinel_broker import (
|
|
10
15
|
ListQueueSentinelBroker,
|
|
11
16
|
PubSubSentinelBroker,
|
|
17
|
+
RedisStreamSentinelBroker,
|
|
12
18
|
)
|
|
13
19
|
from taskiq_redis.schedule_source import (
|
|
14
20
|
RedisClusterScheduleSource,
|
|
@@ -17,15 +23,19 @@ from taskiq_redis.schedule_source import (
|
|
|
17
23
|
)
|
|
18
24
|
|
|
19
25
|
__all__ = [
|
|
20
|
-
"RedisAsyncClusterResultBackend",
|
|
21
|
-
"RedisAsyncResultBackend",
|
|
22
|
-
"RedisAsyncSentinelResultBackend",
|
|
23
26
|
"ListQueueBroker",
|
|
24
|
-
"PubSubBroker",
|
|
25
27
|
"ListQueueClusterBroker",
|
|
26
28
|
"ListQueueSentinelBroker",
|
|
29
|
+
"ListRedisScheduleSource",
|
|
30
|
+
"PubSubBroker",
|
|
27
31
|
"PubSubSentinelBroker",
|
|
28
|
-
"
|
|
32
|
+
"RedisAsyncClusterResultBackend",
|
|
33
|
+
"RedisAsyncResultBackend",
|
|
34
|
+
"RedisAsyncSentinelResultBackend",
|
|
29
35
|
"RedisClusterScheduleSource",
|
|
36
|
+
"RedisScheduleSource",
|
|
30
37
|
"RedisSentinelScheduleSource",
|
|
38
|
+
"RedisStreamBroker",
|
|
39
|
+
"RedisStreamClusterBroker",
|
|
40
|
+
"RedisStreamSentinelBroker",
|
|
31
41
|
]
|
|
@@ -8,10 +8,16 @@ class TaskIQRedisError(TaskiqError):
|
|
|
8
8
|
class DuplicateExpireTimeSelectedError(ResultBackendError, TaskIQRedisError):
|
|
9
9
|
"""Error if two lifetimes are selected."""
|
|
10
10
|
|
|
11
|
+
__template__ = "Choose either result_ex_time or result_px_time."
|
|
12
|
+
|
|
11
13
|
|
|
12
14
|
class ExpireTimeMustBeMoreThanZeroError(ResultBackendError, TaskIQRedisError):
|
|
13
15
|
"""Error if two lifetimes are less or equal zero."""
|
|
14
16
|
|
|
17
|
+
__template__ = (
|
|
18
|
+
"You must select one expire time param and it must be more than zero."
|
|
19
|
+
)
|
|
20
|
+
|
|
15
21
|
|
|
16
22
|
class ResultIsMissingError(TaskIQRedisError, ResultGetError):
|
|
17
23
|
"""Error if there is no result when trying to get it."""
|