taskiq-redis 1.0.4__tar.gz → 1.0.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/PKG-INFO +1 -1
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/pyproject.toml +1 -1
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/taskiq_redis/list_schedule_source.py +5 -5
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/taskiq_redis/redis_broker.py +43 -2
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/LICENSE +0 -0
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/README.md +0 -0
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/taskiq_redis/__init__.py +0 -0
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/taskiq_redis/exceptions.py +0 -0
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/taskiq_redis/py.typed +0 -0
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/taskiq_redis/redis_backend.py +0 -0
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/taskiq_redis/redis_cluster_broker.py +0 -0
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/taskiq_redis/redis_sentinel_broker.py +0 -0
- {taskiq_redis-1.0.4 → taskiq_redis-1.0.6}/taskiq_redis/schedule_source.py +0 -0
|
@@ -13,7 +13,7 @@ logger = getLogger("taskiq.redis_schedule_source")
|
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
class ListRedisScheduleSource(ScheduleSource):
|
|
16
|
-
"""
|
|
16
|
+
"""Schedule source based on arrays."""
|
|
17
17
|
|
|
18
18
|
def __init__(
|
|
19
19
|
self,
|
|
@@ -21,13 +21,13 @@ class ListRedisScheduleSource(ScheduleSource):
|
|
|
21
21
|
prefix: str = "schedule",
|
|
22
22
|
max_connection_pool_size: Optional[int] = None,
|
|
23
23
|
serializer: Optional[TaskiqSerializer] = None,
|
|
24
|
-
|
|
24
|
+
buffer_size: int = 50,
|
|
25
25
|
skip_past_schedules: bool = False,
|
|
26
26
|
**connection_kwargs: Any,
|
|
27
27
|
) -> None:
|
|
28
28
|
super().__init__()
|
|
29
29
|
self._prefix = prefix
|
|
30
|
-
self._buffer_size =
|
|
30
|
+
self._buffer_size = buffer_size
|
|
31
31
|
self._connection_pool = BlockingConnectionPool.from_url(
|
|
32
32
|
url=url,
|
|
33
33
|
max_connections=max_connection_pool_size,
|
|
@@ -185,11 +185,11 @@ class ListRedisScheduleSource(ScheduleSource):
|
|
|
185
185
|
async with Redis(connection_pool=self._connection_pool) as redis:
|
|
186
186
|
buffer = []
|
|
187
187
|
crons = await redis.lrange(self._get_cron_key(), 0, -1) # type: ignore
|
|
188
|
-
logger.debug("Got cron
|
|
188
|
+
logger.debug("Got %d cron schedules", len(crons))
|
|
189
189
|
if crons:
|
|
190
190
|
buffer.extend(crons)
|
|
191
191
|
timed.extend(await redis.lrange(self._get_time_key(current_time), 0, -1)) # type: ignore
|
|
192
|
-
logger.debug("Got timed
|
|
192
|
+
logger.debug("Got %d timed schedules", len(timed))
|
|
193
193
|
if timed:
|
|
194
194
|
buffer.extend(timed)
|
|
195
195
|
while buffer:
|
|
@@ -164,7 +164,10 @@ class RedisStreamBroker(BaseRedisBroker):
|
|
|
164
164
|
consumer_name: Optional[str] = None,
|
|
165
165
|
consumer_id: str = "$",
|
|
166
166
|
mkstream: bool = True,
|
|
167
|
-
xread_block: int =
|
|
167
|
+
xread_block: int = 2000,
|
|
168
|
+
maxlen: Optional[int] = None,
|
|
169
|
+
idle_timeout: int = 600000, # 10 minutes
|
|
170
|
+
unacknowledged_batch_size: int = 100,
|
|
168
171
|
additional_streams: Optional[Dict[str, str]] = None,
|
|
169
172
|
**connection_kwargs: Any,
|
|
170
173
|
) -> None:
|
|
@@ -184,8 +187,12 @@ class RedisStreamBroker(BaseRedisBroker):
|
|
|
184
187
|
:param mkstream: create stream if it does not exist.
|
|
185
188
|
:param xread_block: block time in ms for xreadgroup.
|
|
186
189
|
Better to set it to a bigger value, to avoid unnecessary calls.
|
|
190
|
+
:param maxlen: sets the maximum length of the stream
|
|
191
|
+
trims (the old values of) the stream each time a new element is added
|
|
187
192
|
:param additional_streams: additional streams to read from.
|
|
188
193
|
Each key is a stream name, value is a consumer id.
|
|
194
|
+
:param redeliver_timeout: time in ms to wait before redelivering a message.
|
|
195
|
+
:param unacknowledged_batch_size: number of unacknowledged messages to fetch.
|
|
189
196
|
"""
|
|
190
197
|
super().__init__(
|
|
191
198
|
url,
|
|
@@ -200,7 +207,10 @@ class RedisStreamBroker(BaseRedisBroker):
|
|
|
200
207
|
self.consumer_id = consumer_id
|
|
201
208
|
self.mkstream = mkstream
|
|
202
209
|
self.block = xread_block
|
|
210
|
+
self.maxlen = maxlen
|
|
203
211
|
self.additional_streams = additional_streams or {}
|
|
212
|
+
self.idle_timeout = idle_timeout
|
|
213
|
+
self.unacknowledged_batch_size = unacknowledged_batch_size
|
|
204
214
|
|
|
205
215
|
async def _declare_consumer_group(self) -> None:
|
|
206
216
|
"""
|
|
@@ -235,7 +245,11 @@ class RedisStreamBroker(BaseRedisBroker):
|
|
|
235
245
|
:param message: message to append.
|
|
236
246
|
"""
|
|
237
247
|
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
238
|
-
await redis_conn.xadd(
|
|
248
|
+
await redis_conn.xadd(
|
|
249
|
+
self.queue_name,
|
|
250
|
+
{b"data": message.message},
|
|
251
|
+
maxlen=self.maxlen,
|
|
252
|
+
)
|
|
239
253
|
|
|
240
254
|
def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
|
|
241
255
|
async def _ack() -> None:
|
|
@@ -252,6 +266,7 @@ class RedisStreamBroker(BaseRedisBroker):
|
|
|
252
266
|
"""Listen to incoming messages."""
|
|
253
267
|
async with Redis(connection_pool=self.connection_pool) as redis_conn:
|
|
254
268
|
while True:
|
|
269
|
+
logger.debug("Starting fetching new messages")
|
|
255
270
|
fetched = await redis_conn.xreadgroup(
|
|
256
271
|
self.consumer_group_name,
|
|
257
272
|
self.consumer_name,
|
|
@@ -269,3 +284,29 @@ class RedisStreamBroker(BaseRedisBroker):
|
|
|
269
284
|
data=msg[b"data"],
|
|
270
285
|
ack=self._ack_generator(msg_id),
|
|
271
286
|
)
|
|
287
|
+
logger.debug("Starting fetching unacknowledged messages")
|
|
288
|
+
for stream in [self.queue_name, *self.additional_streams.keys()]:
|
|
289
|
+
lock = redis_conn.lock(
|
|
290
|
+
f"autoclaim:{self.consumer_group_name}:{stream}",
|
|
291
|
+
)
|
|
292
|
+
if await lock.locked():
|
|
293
|
+
continue
|
|
294
|
+
async with lock:
|
|
295
|
+
pending = await redis_conn.xautoclaim(
|
|
296
|
+
name=stream,
|
|
297
|
+
groupname=self.consumer_group_name,
|
|
298
|
+
consumername=self.consumer_name,
|
|
299
|
+
min_idle_time=self.idle_timeout,
|
|
300
|
+
count=self.unacknowledged_batch_size,
|
|
301
|
+
)
|
|
302
|
+
logger.debug(
|
|
303
|
+
"Found %d pending messages in stream %s",
|
|
304
|
+
len(pending),
|
|
305
|
+
stream,
|
|
306
|
+
)
|
|
307
|
+
for msg_id, msg in pending[1]:
|
|
308
|
+
logger.debug("Received message: %s", msg)
|
|
309
|
+
yield AckableMessage(
|
|
310
|
+
data=msg[b"data"],
|
|
311
|
+
ack=self._ack_generator(msg_id),
|
|
312
|
+
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|