taskiq-redis 1.0.2__py3-none-any.whl → 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- taskiq_redis/__init__.py +17 -7
- taskiq_redis/exceptions.py +6 -0
- taskiq_redis/list_schedule_source.py +229 -0
- taskiq_redis/redis_backend.py +57 -45
- taskiq_redis/redis_broker.py +142 -5
- taskiq_redis/redis_cluster_broker.py +127 -6
- taskiq_redis/redis_sentinel_broker.py +131 -4
- taskiq_redis/schedule_source.py +9 -9
- taskiq_redis-1.0.4.dist-info/METADATA +215 -0
- taskiq_redis-1.0.4.dist-info/RECORD +13 -0
- {taskiq_redis-1.0.2.dist-info → taskiq_redis-1.0.4.dist-info}/WHEEL +1 -1
- taskiq_redis-1.0.2.dist-info/METADATA +0 -125
- taskiq_redis-1.0.2.dist-info/RECORD +0 -12
- {taskiq_redis-1.0.2.dist-info → taskiq_redis-1.0.4.dist-info}/LICENSE +0 -0
taskiq_redis/redis_broker.py
CHANGED
|
@@ -1,8 +1,19 @@
|
|
|
1
1
|
import sys
|
|
2
|
+
import uuid
|
|
2
3
|
from logging import getLogger
|
|
3
|
-
from typing import
|
|
4
|
+
from typing import (
|
|
5
|
+
TYPE_CHECKING,
|
|
6
|
+
Any,
|
|
7
|
+
AsyncGenerator,
|
|
8
|
+
Awaitable,
|
|
9
|
+
Callable,
|
|
10
|
+
Dict,
|
|
11
|
+
Optional,
|
|
12
|
+
TypeVar,
|
|
13
|
+
)
|
|
4
14
|
|
|
5
|
-
from redis.asyncio import BlockingConnectionPool, Connection, Redis
|
|
15
|
+
from redis.asyncio import BlockingConnectionPool, Connection, Redis, ResponseError
|
|
16
|
+
from taskiq import AckableMessage
|
|
6
17
|
from taskiq.abc.broker import AsyncBroker
|
|
7
18
|
from taskiq.abc.result_backend import AsyncResultBackend
|
|
8
19
|
from taskiq.message import BrokerMessage
|
|
@@ -17,7 +28,7 @@ else:
|
|
|
17
28
|
from typing_extensions import TypeAlias
|
|
18
29
|
|
|
19
30
|
if TYPE_CHECKING:
|
|
20
|
-
_BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection]
|
|
31
|
+
_BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection] # type: ignore
|
|
21
32
|
else:
|
|
22
33
|
_BlockingConnectionPool: TypeAlias = BlockingConnectionPool
|
|
23
34
|
|
|
@@ -111,7 +122,7 @@ class ListQueueBroker(BaseRedisBroker):
|
|
|
111
122
|
"""
|
|
112
123
|
queue_name = message.labels.get("queue_name") or self.queue_name
|
|
113
124
|
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
114
|
-
await redis_conn.lpush(queue_name, message.message)
|
|
125
|
+
await redis_conn.lpush(queue_name, message.message) # type: ignore
|
|
115
126
|
|
|
116
127
|
async def listen(self) -> AsyncGenerator[bytes, None]:
|
|
117
128
|
"""
|
|
@@ -126,9 +137,135 @@ class ListQueueBroker(BaseRedisBroker):
|
|
|
126
137
|
while True:
|
|
127
138
|
try:
|
|
128
139
|
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
129
|
-
yield (await redis_conn.brpop(self.queue_name))[
|
|
140
|
+
yield (await redis_conn.brpop(self.queue_name))[ # type: ignore
|
|
130
141
|
redis_brpop_data_position
|
|
131
142
|
]
|
|
132
143
|
except ConnectionError as exc:
|
|
133
144
|
logger.warning("Redis connection error: %s", exc)
|
|
134
145
|
continue
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class RedisStreamBroker(BaseRedisBroker):
|
|
149
|
+
"""
|
|
150
|
+
Redis broker that uses streams for task distribution.
|
|
151
|
+
|
|
152
|
+
You can read more about streams here:
|
|
153
|
+
https://redis.io/docs/latest/develop/data-types/streams
|
|
154
|
+
|
|
155
|
+
This broker supports acknowledgment of messages.
|
|
156
|
+
"""
|
|
157
|
+
|
|
158
|
+
def __init__(
|
|
159
|
+
self,
|
|
160
|
+
url: str,
|
|
161
|
+
queue_name: str = "taskiq",
|
|
162
|
+
max_connection_pool_size: Optional[int] = None,
|
|
163
|
+
consumer_group_name: str = "taskiq",
|
|
164
|
+
consumer_name: Optional[str] = None,
|
|
165
|
+
consumer_id: str = "$",
|
|
166
|
+
mkstream: bool = True,
|
|
167
|
+
xread_block: int = 10000,
|
|
168
|
+
additional_streams: Optional[Dict[str, str]] = None,
|
|
169
|
+
**connection_kwargs: Any,
|
|
170
|
+
) -> None:
|
|
171
|
+
"""
|
|
172
|
+
Constructs a new broker that uses streams.
|
|
173
|
+
|
|
174
|
+
:param url: url to redis.
|
|
175
|
+
:param queue_name: name for a key with stream in redis.
|
|
176
|
+
:param max_connection_pool_size: maximum number of connections in pool.
|
|
177
|
+
Each worker opens its own connection. Therefore this value has to be
|
|
178
|
+
at least number of workers + 1.
|
|
179
|
+
:param consumer_group_name: name for a consumer group.
|
|
180
|
+
Redis will keep track of acked messages for this group.
|
|
181
|
+
:param consumer_name: name for a consumer. By default it is a random uuid.
|
|
182
|
+
:param consumer_id: id for a consumer. ID of a message to start reading from.
|
|
183
|
+
$ means start from the latest message.
|
|
184
|
+
:param mkstream: create stream if it does not exist.
|
|
185
|
+
:param xread_block: block time in ms for xreadgroup.
|
|
186
|
+
Better to set it to a bigger value, to avoid unnecessary calls.
|
|
187
|
+
:param additional_streams: additional streams to read from.
|
|
188
|
+
Each key is a stream name, value is a consumer id.
|
|
189
|
+
"""
|
|
190
|
+
super().__init__(
|
|
191
|
+
url,
|
|
192
|
+
task_id_generator=None,
|
|
193
|
+
result_backend=None,
|
|
194
|
+
queue_name=queue_name,
|
|
195
|
+
max_connection_pool_size=max_connection_pool_size,
|
|
196
|
+
**connection_kwargs,
|
|
197
|
+
)
|
|
198
|
+
self.consumer_group_name = consumer_group_name
|
|
199
|
+
self.consumer_name = consumer_name or str(uuid.uuid4())
|
|
200
|
+
self.consumer_id = consumer_id
|
|
201
|
+
self.mkstream = mkstream
|
|
202
|
+
self.block = xread_block
|
|
203
|
+
self.additional_streams = additional_streams or {}
|
|
204
|
+
|
|
205
|
+
async def _declare_consumer_group(self) -> None:
|
|
206
|
+
"""
|
|
207
|
+
Declare consumber group.
|
|
208
|
+
|
|
209
|
+
Required for proper work of the broker.
|
|
210
|
+
"""
|
|
211
|
+
streams = {self.queue_name, *self.additional_streams.keys()}
|
|
212
|
+
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
213
|
+
for stream_name in streams:
|
|
214
|
+
try:
|
|
215
|
+
await redis_conn.xgroup_create(
|
|
216
|
+
stream_name,
|
|
217
|
+
self.consumer_group_name,
|
|
218
|
+
id=self.consumer_id,
|
|
219
|
+
mkstream=self.mkstream,
|
|
220
|
+
)
|
|
221
|
+
except ResponseError as err:
|
|
222
|
+
logger.debug(err)
|
|
223
|
+
|
|
224
|
+
async def startup(self) -> None:
|
|
225
|
+
"""Declare consumer group on startup."""
|
|
226
|
+
await super().startup()
|
|
227
|
+
await self._declare_consumer_group()
|
|
228
|
+
|
|
229
|
+
async def kick(self, message: BrokerMessage) -> None:
|
|
230
|
+
"""
|
|
231
|
+
Put a message in a list.
|
|
232
|
+
|
|
233
|
+
This method appends a message to the list of all messages.
|
|
234
|
+
|
|
235
|
+
:param message: message to append.
|
|
236
|
+
"""
|
|
237
|
+
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
238
|
+
await redis_conn.xadd(self.queue_name, {b"data": message.message})
|
|
239
|
+
|
|
240
|
+
def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
|
|
241
|
+
async def _ack() -> None:
|
|
242
|
+
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
243
|
+
await redis_conn.xack(
|
|
244
|
+
self.queue_name,
|
|
245
|
+
self.consumer_group_name,
|
|
246
|
+
id,
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
return _ack
|
|
250
|
+
|
|
251
|
+
async def listen(self) -> AsyncGenerator[AckableMessage, None]:
|
|
252
|
+
"""Listen to incoming messages."""
|
|
253
|
+
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
254
|
+
while True:
|
|
255
|
+
fetched = await redis_conn.xreadgroup(
|
|
256
|
+
self.consumer_group_name,
|
|
257
|
+
self.consumer_name,
|
|
258
|
+
{
|
|
259
|
+
self.queue_name: ">",
|
|
260
|
+
**self.additional_streams, # type: ignore
|
|
261
|
+
},
|
|
262
|
+
block=self.block,
|
|
263
|
+
noack=False,
|
|
264
|
+
)
|
|
265
|
+
for _, msg_list in fetched:
|
|
266
|
+
for msg_id, msg in msg_list:
|
|
267
|
+
logger.debug("Received message: %s", msg)
|
|
268
|
+
yield AckableMessage(
|
|
269
|
+
data=msg[b"data"],
|
|
270
|
+
ack=self._ack_generator(msg_id),
|
|
271
|
+
)
|
|
@@ -1,9 +1,14 @@
|
|
|
1
|
-
|
|
1
|
+
import uuid
|
|
2
|
+
from logging import getLogger
|
|
3
|
+
from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, Optional
|
|
2
4
|
|
|
3
|
-
from redis.asyncio import RedisCluster
|
|
5
|
+
from redis.asyncio import RedisCluster, ResponseError
|
|
6
|
+
from taskiq import AckableMessage
|
|
4
7
|
from taskiq.abc.broker import AsyncBroker
|
|
5
8
|
from taskiq.message import BrokerMessage
|
|
6
9
|
|
|
10
|
+
logger = getLogger(__name__)
|
|
11
|
+
|
|
7
12
|
|
|
8
13
|
class BaseRedisClusterBroker(AsyncBroker):
|
|
9
14
|
"""Base broker that works with Redis Cluster."""
|
|
@@ -25,7 +30,7 @@ class BaseRedisClusterBroker(AsyncBroker):
|
|
|
25
30
|
"""
|
|
26
31
|
super().__init__()
|
|
27
32
|
|
|
28
|
-
self.redis: RedisCluster[bytes] = RedisCluster.from_url(
|
|
33
|
+
self.redis: "RedisCluster[bytes]" = RedisCluster.from_url( # type: ignore
|
|
29
34
|
url=url,
|
|
30
35
|
max_connections=max_connection_pool_size,
|
|
31
36
|
**connection_kwargs,
|
|
@@ -35,7 +40,7 @@ class BaseRedisClusterBroker(AsyncBroker):
|
|
|
35
40
|
|
|
36
41
|
async def shutdown(self) -> None:
|
|
37
42
|
"""Closes redis connection pool."""
|
|
38
|
-
await self.redis.aclose()
|
|
43
|
+
await self.redis.aclose()
|
|
39
44
|
await super().shutdown()
|
|
40
45
|
|
|
41
46
|
|
|
@@ -50,7 +55,7 @@ class ListQueueClusterBroker(BaseRedisClusterBroker):
|
|
|
50
55
|
|
|
51
56
|
:param message: message to append.
|
|
52
57
|
"""
|
|
53
|
-
await self.redis.lpush(self.queue_name, message.message) # type: ignore
|
|
58
|
+
await self.redis.lpush(self.queue_name, message.message) # type: ignore
|
|
54
59
|
|
|
55
60
|
async def listen(self) -> AsyncGenerator[bytes, None]:
|
|
56
61
|
"""
|
|
@@ -63,5 +68,121 @@ class ListQueueClusterBroker(BaseRedisClusterBroker):
|
|
|
63
68
|
"""
|
|
64
69
|
redis_brpop_data_position = 1
|
|
65
70
|
while True:
|
|
66
|
-
value = await self.redis.brpop([self.queue_name]) # type: ignore
|
|
71
|
+
value = await self.redis.brpop([self.queue_name]) # type: ignore
|
|
67
72
|
yield value[redis_brpop_data_position]
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class RedisStreamClusterBroker(BaseRedisClusterBroker):
|
|
76
|
+
"""
|
|
77
|
+
Redis broker that uses streams for task distribution.
|
|
78
|
+
|
|
79
|
+
You can read more about streams here:
|
|
80
|
+
https://redis.io/docs/latest/develop/data-types/streams
|
|
81
|
+
|
|
82
|
+
This broker supports acknowledgment of messages.
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
def __init__(
|
|
86
|
+
self,
|
|
87
|
+
url: str,
|
|
88
|
+
queue_name: str = "taskiq",
|
|
89
|
+
max_connection_pool_size: int = 2**31,
|
|
90
|
+
consumer_group_name: str = "taskiq",
|
|
91
|
+
consumer_name: Optional[str] = None,
|
|
92
|
+
consumer_id: str = "$",
|
|
93
|
+
mkstream: bool = True,
|
|
94
|
+
xread_block: int = 10000,
|
|
95
|
+
additional_streams: Optional[Dict[str, str]] = None,
|
|
96
|
+
**connection_kwargs: Any,
|
|
97
|
+
) -> None:
|
|
98
|
+
"""
|
|
99
|
+
Constructs a new broker that uses streams.
|
|
100
|
+
|
|
101
|
+
:param url: url to redis.
|
|
102
|
+
:param queue_name: name for a key with stream in redis.
|
|
103
|
+
:param max_connection_pool_size: maximum number of connections in pool.
|
|
104
|
+
Each worker opens its own connection. Therefore this value has to be
|
|
105
|
+
at least number of workers + 1.
|
|
106
|
+
:param consumer_group_name: name for a consumer group.
|
|
107
|
+
Redis will keep track of acked messages for this group.
|
|
108
|
+
:param consumer_name: name for a consumer. By default it is a random uuid.
|
|
109
|
+
:param consumer_id: id for a consumer. ID of a message to start reading from.
|
|
110
|
+
$ means start from the latest message.
|
|
111
|
+
:param mkstream: create stream if it does not exist.
|
|
112
|
+
:param xread_block: block time in ms for xreadgroup.
|
|
113
|
+
Better to set it to a bigger value, to avoid unnecessary calls.
|
|
114
|
+
:param additional_streams: additional streams to read from.
|
|
115
|
+
Each key is a stream name, value is a consumer id.
|
|
116
|
+
"""
|
|
117
|
+
super().__init__(
|
|
118
|
+
url,
|
|
119
|
+
queue_name=queue_name,
|
|
120
|
+
max_connection_pool_size=max_connection_pool_size,
|
|
121
|
+
**connection_kwargs,
|
|
122
|
+
)
|
|
123
|
+
self.consumer_group_name = consumer_group_name
|
|
124
|
+
self.consumer_name = consumer_name or str(uuid.uuid4())
|
|
125
|
+
self.consumer_id = consumer_id
|
|
126
|
+
self.mkstream = mkstream
|
|
127
|
+
self.block = xread_block
|
|
128
|
+
self.additional_streams = additional_streams or {}
|
|
129
|
+
|
|
130
|
+
async def _declare_consumer_group(self) -> None:
|
|
131
|
+
streams = {self.queue_name, *self.additional_streams.keys()}
|
|
132
|
+
async with self.redis as redis_conn:
|
|
133
|
+
for stream_name in streams:
|
|
134
|
+
try:
|
|
135
|
+
await redis_conn.xgroup_create(
|
|
136
|
+
stream_name,
|
|
137
|
+
self.consumer_group_name,
|
|
138
|
+
id=self.consumer_id,
|
|
139
|
+
mkstream=self.mkstream,
|
|
140
|
+
)
|
|
141
|
+
except ResponseError as err:
|
|
142
|
+
logger.debug(err)
|
|
143
|
+
|
|
144
|
+
async def startup(self) -> None:
|
|
145
|
+
"""Declare consumer group on startup."""
|
|
146
|
+
await super().startup()
|
|
147
|
+
await self._declare_consumer_group()
|
|
148
|
+
|
|
149
|
+
async def kick(self, message: BrokerMessage) -> None:
|
|
150
|
+
"""
|
|
151
|
+
Put a message in a list.
|
|
152
|
+
|
|
153
|
+
This method appends a message to the list of all messages.
|
|
154
|
+
|
|
155
|
+
:param message: message to append.
|
|
156
|
+
"""
|
|
157
|
+
await self.redis.xadd(self.queue_name, {b"data": message.message})
|
|
158
|
+
|
|
159
|
+
def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
|
|
160
|
+
async def _ack() -> None:
|
|
161
|
+
await self.redis.xack(
|
|
162
|
+
self.queue_name,
|
|
163
|
+
self.consumer_group_name,
|
|
164
|
+
id,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
return _ack
|
|
168
|
+
|
|
169
|
+
async def listen(self) -> AsyncGenerator[AckableMessage, None]:
|
|
170
|
+
"""Listen to the stream for new messages."""
|
|
171
|
+
while True:
|
|
172
|
+
fetched = await self.redis.xreadgroup(
|
|
173
|
+
self.consumer_group_name,
|
|
174
|
+
self.consumer_name,
|
|
175
|
+
{
|
|
176
|
+
self.queue_name: ">",
|
|
177
|
+
**self.additional_streams, # type: ignore
|
|
178
|
+
},
|
|
179
|
+
block=self.block,
|
|
180
|
+
noack=False,
|
|
181
|
+
)
|
|
182
|
+
for _, msg_list in fetched:
|
|
183
|
+
for msg_id, msg in msg_list:
|
|
184
|
+
logger.debug("Received message: %s", msg)
|
|
185
|
+
yield AckableMessage(
|
|
186
|
+
data=msg[b"data"],
|
|
187
|
+
ack=self._ack_generator(msg_id),
|
|
188
|
+
)
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import sys
|
|
2
|
+
import uuid
|
|
2
3
|
from contextlib import asynccontextmanager
|
|
3
4
|
from logging import getLogger
|
|
4
5
|
from typing import (
|
|
@@ -6,15 +7,18 @@ from typing import (
|
|
|
6
7
|
Any,
|
|
7
8
|
AsyncGenerator,
|
|
8
9
|
AsyncIterator,
|
|
10
|
+
Awaitable,
|
|
9
11
|
Callable,
|
|
12
|
+
Dict,
|
|
10
13
|
List,
|
|
11
14
|
Optional,
|
|
12
15
|
Tuple,
|
|
13
16
|
TypeVar,
|
|
14
17
|
)
|
|
15
18
|
|
|
19
|
+
from redis import ResponseError
|
|
16
20
|
from redis.asyncio import Redis, Sentinel
|
|
17
|
-
from taskiq import AsyncResultBackend, BrokerMessage
|
|
21
|
+
from taskiq import AckableMessage, AsyncResultBackend, BrokerMessage
|
|
18
22
|
from taskiq.abc.broker import AsyncBroker
|
|
19
23
|
|
|
20
24
|
if sys.version_info >= (3, 10):
|
|
@@ -23,7 +27,7 @@ else:
|
|
|
23
27
|
from typing_extensions import TypeAlias
|
|
24
28
|
|
|
25
29
|
if TYPE_CHECKING:
|
|
26
|
-
_Redis: TypeAlias = Redis[bytes]
|
|
30
|
+
_Redis: TypeAlias = Redis[bytes] # type: ignore
|
|
27
31
|
else:
|
|
28
32
|
_Redis: TypeAlias = Redis
|
|
29
33
|
|
|
@@ -113,7 +117,7 @@ class ListQueueSentinelBroker(BaseSentinelBroker):
|
|
|
113
117
|
"""
|
|
114
118
|
queue_name = message.labels.get("queue_name") or self.queue_name
|
|
115
119
|
async with self._acquire_master_conn() as redis_conn:
|
|
116
|
-
await redis_conn.lpush(queue_name, message.message)
|
|
120
|
+
await redis_conn.lpush(queue_name, message.message) # type: ignore
|
|
117
121
|
|
|
118
122
|
async def listen(self) -> AsyncGenerator[bytes, None]:
|
|
119
123
|
"""
|
|
@@ -127,6 +131,129 @@ class ListQueueSentinelBroker(BaseSentinelBroker):
|
|
|
127
131
|
redis_brpop_data_position = 1
|
|
128
132
|
async with self._acquire_master_conn() as redis_conn:
|
|
129
133
|
while True:
|
|
130
|
-
yield (await redis_conn.brpop(self.queue_name))[
|
|
134
|
+
yield (await redis_conn.brpop(self.queue_name))[ # type: ignore
|
|
131
135
|
redis_brpop_data_position
|
|
132
136
|
]
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class RedisStreamSentinelBroker(BaseSentinelBroker):
|
|
140
|
+
"""
|
|
141
|
+
Redis broker that uses streams for task distribution.
|
|
142
|
+
|
|
143
|
+
You can read more about streams here:
|
|
144
|
+
https://redis.io/docs/latest/develop/data-types/streams
|
|
145
|
+
|
|
146
|
+
This broker supports acknowledgment of messages.
|
|
147
|
+
"""
|
|
148
|
+
|
|
149
|
+
def __init__(
|
|
150
|
+
self,
|
|
151
|
+
sentinels: List[Tuple[str, int]],
|
|
152
|
+
master_name: str,
|
|
153
|
+
min_other_sentinels: int = 0,
|
|
154
|
+
queue_name: str = "taskiq",
|
|
155
|
+
consumer_group_name: str = "taskiq",
|
|
156
|
+
consumer_name: Optional[str] = None,
|
|
157
|
+
consumer_id: str = "$",
|
|
158
|
+
mkstream: bool = True,
|
|
159
|
+
xread_block: int = 10000,
|
|
160
|
+
additional_streams: Optional[Dict[str, str]] = None,
|
|
161
|
+
**connection_kwargs: Any,
|
|
162
|
+
) -> None:
|
|
163
|
+
"""
|
|
164
|
+
Constructs a new broker that uses streams.
|
|
165
|
+
|
|
166
|
+
:param sentinels: list of nodes to connect to.
|
|
167
|
+
:param queue_name: name for a key with stream in redis.
|
|
168
|
+
:param max_connection_pool_size: maximum number of connections in pool.
|
|
169
|
+
Each worker opens its own connection. Therefore this value has to be
|
|
170
|
+
at least number of workers + 1.
|
|
171
|
+
:param consumer_group_name: name for a consumer group.
|
|
172
|
+
Redis will keep track of acked messages for this group.
|
|
173
|
+
:param consumer_name: name for a consumer. By default it is a random uuid.
|
|
174
|
+
:param consumer_id: id for a consumer. ID of a message to start reading from.
|
|
175
|
+
$ means start from the latest message.
|
|
176
|
+
:param mkstream: create stream if it does not exist.
|
|
177
|
+
:param xread_block: block time in ms for xreadgroup.
|
|
178
|
+
Better to set it to a bigger value, to avoid unnecessary calls.
|
|
179
|
+
:param additional_streams: additional streams to read from.
|
|
180
|
+
Each key is a stream name, value is a consumer id.
|
|
181
|
+
"""
|
|
182
|
+
super().__init__(
|
|
183
|
+
sentinels=sentinels,
|
|
184
|
+
master_name=master_name,
|
|
185
|
+
min_other_sentinels=min_other_sentinels,
|
|
186
|
+
task_id_generator=None,
|
|
187
|
+
result_backend=None,
|
|
188
|
+
queue_name=queue_name,
|
|
189
|
+
**connection_kwargs,
|
|
190
|
+
)
|
|
191
|
+
self.consumer_group_name = consumer_group_name
|
|
192
|
+
self.consumer_name = consumer_name or str(uuid.uuid4())
|
|
193
|
+
self.consumer_id = consumer_id
|
|
194
|
+
self.mkstream = mkstream
|
|
195
|
+
self.block = xread_block
|
|
196
|
+
self.additional_streams = additional_streams or {}
|
|
197
|
+
|
|
198
|
+
async def _declare_consumer_group(self) -> None:
|
|
199
|
+
streams = {self.queue_name, *self.additional_streams.keys()}
|
|
200
|
+
async with self._acquire_master_conn() as redis_conn:
|
|
201
|
+
for stream_name in streams:
|
|
202
|
+
try:
|
|
203
|
+
await redis_conn.xgroup_create(
|
|
204
|
+
stream_name,
|
|
205
|
+
self.consumer_group_name,
|
|
206
|
+
id=self.consumer_id,
|
|
207
|
+
mkstream=self.mkstream,
|
|
208
|
+
)
|
|
209
|
+
except ResponseError as err:
|
|
210
|
+
logger.debug(err)
|
|
211
|
+
|
|
212
|
+
async def startup(self) -> None:
|
|
213
|
+
"""Declare consumer group on startup."""
|
|
214
|
+
await super().startup()
|
|
215
|
+
await self._declare_consumer_group()
|
|
216
|
+
|
|
217
|
+
async def kick(self, message: BrokerMessage) -> None:
|
|
218
|
+
"""
|
|
219
|
+
Put a message in a list.
|
|
220
|
+
|
|
221
|
+
This method appends a message to the list of all messages.
|
|
222
|
+
|
|
223
|
+
:param message: message to append.
|
|
224
|
+
"""
|
|
225
|
+
async with self._acquire_master_conn() as redis_conn:
|
|
226
|
+
await redis_conn.xadd(self.queue_name, {b"data": message.message})
|
|
227
|
+
|
|
228
|
+
def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
|
|
229
|
+
async def _ack() -> None:
|
|
230
|
+
async with self._acquire_master_conn() as redis_conn:
|
|
231
|
+
await redis_conn.xack(
|
|
232
|
+
self.queue_name,
|
|
233
|
+
self.consumer_group_name,
|
|
234
|
+
id,
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
return _ack
|
|
238
|
+
|
|
239
|
+
async def listen(self) -> AsyncGenerator[AckableMessage, None]:
|
|
240
|
+
"""Listen to the stream for new messages."""
|
|
241
|
+
async with self._acquire_master_conn() as redis_conn:
|
|
242
|
+
while True:
|
|
243
|
+
fetched = await redis_conn.xreadgroup(
|
|
244
|
+
self.consumer_group_name,
|
|
245
|
+
self.consumer_name,
|
|
246
|
+
{
|
|
247
|
+
self.queue_name: ">",
|
|
248
|
+
**self.additional_streams, # type: ignore
|
|
249
|
+
},
|
|
250
|
+
block=self.block,
|
|
251
|
+
noack=False,
|
|
252
|
+
)
|
|
253
|
+
for _, msg_list in fetched:
|
|
254
|
+
for msg_id, msg in msg_list:
|
|
255
|
+
logger.debug("Received message: %s", msg)
|
|
256
|
+
yield AckableMessage(
|
|
257
|
+
data=msg[b"data"],
|
|
258
|
+
ack=self._ack_generator(msg_id),
|
|
259
|
+
)
|
taskiq_redis/schedule_source.py
CHANGED
|
@@ -21,8 +21,8 @@ else:
|
|
|
21
21
|
from typing_extensions import TypeAlias
|
|
22
22
|
|
|
23
23
|
if TYPE_CHECKING:
|
|
24
|
-
_Redis: TypeAlias = Redis[bytes]
|
|
25
|
-
_BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection]
|
|
24
|
+
_Redis: TypeAlias = Redis[bytes] # type: ignore
|
|
25
|
+
_BlockingConnectionPool: TypeAlias = BlockingConnectionPool[Connection] # type: ignore
|
|
26
26
|
else:
|
|
27
27
|
_Redis: TypeAlias = Redis
|
|
28
28
|
_BlockingConnectionPool: TypeAlias = BlockingConnectionPool
|
|
@@ -140,7 +140,7 @@ class RedisClusterScheduleSource(ScheduleSource):
|
|
|
140
140
|
**connection_kwargs: Any,
|
|
141
141
|
) -> None:
|
|
142
142
|
self.prefix = prefix
|
|
143
|
-
self.redis: RedisCluster
|
|
143
|
+
self.redis: "RedisCluster" = RedisCluster.from_url(
|
|
144
144
|
url,
|
|
145
145
|
**connection_kwargs,
|
|
146
146
|
)
|
|
@@ -150,7 +150,7 @@ class RedisClusterScheduleSource(ScheduleSource):
|
|
|
150
150
|
|
|
151
151
|
async def delete_schedule(self, schedule_id: str) -> None:
|
|
152
152
|
"""Remove schedule by id."""
|
|
153
|
-
await self.redis.delete(f"{self.prefix}:{schedule_id}")
|
|
153
|
+
await self.redis.delete(f"{self.prefix}:{schedule_id}")
|
|
154
154
|
|
|
155
155
|
async def add_schedule(self, schedule: ScheduledTask) -> None:
|
|
156
156
|
"""
|
|
@@ -159,7 +159,7 @@ class RedisClusterScheduleSource(ScheduleSource):
|
|
|
159
159
|
:param schedule: schedule to add.
|
|
160
160
|
:param schedule_id: schedule id.
|
|
161
161
|
"""
|
|
162
|
-
await self.redis.set(
|
|
162
|
+
await self.redis.set(
|
|
163
163
|
f"{self.prefix}:{schedule.schedule_id}",
|
|
164
164
|
self.serializer.dumpb(model_dump(schedule)),
|
|
165
165
|
)
|
|
@@ -173,8 +173,8 @@ class RedisClusterScheduleSource(ScheduleSource):
|
|
|
173
173
|
:return: list of schedules.
|
|
174
174
|
"""
|
|
175
175
|
schedules = []
|
|
176
|
-
async for key in self.redis.scan_iter(f"{self.prefix}:*"):
|
|
177
|
-
raw_schedule = await self.redis.get(key)
|
|
176
|
+
async for key in self.redis.scan_iter(f"{self.prefix}:*"):
|
|
177
|
+
raw_schedule = await self.redis.get(key)
|
|
178
178
|
parsed_schedule = model_validate(
|
|
179
179
|
ScheduledTask,
|
|
180
180
|
self.serializer.loadb(raw_schedule),
|
|
@@ -189,7 +189,7 @@ class RedisClusterScheduleSource(ScheduleSource):
|
|
|
189
189
|
|
|
190
190
|
async def shutdown(self) -> None:
|
|
191
191
|
"""Shut down the schedule source."""
|
|
192
|
-
await self.redis.aclose()
|
|
192
|
+
await self.redis.aclose()
|
|
193
193
|
|
|
194
194
|
|
|
195
195
|
class RedisSentinelScheduleSource(ScheduleSource):
|
|
@@ -288,4 +288,4 @@ class RedisSentinelScheduleSource(ScheduleSource):
|
|
|
288
288
|
async def shutdown(self) -> None:
|
|
289
289
|
"""Shut down the schedule source."""
|
|
290
290
|
for sentinel in self.sentinel.sentinels:
|
|
291
|
-
await sentinel.aclose()
|
|
291
|
+
await sentinel.aclose()
|