pydocket 0.4.0__tar.gz → 0.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- {pydocket-0.4.0 → pydocket-0.5.0}/PKG-INFO +1 -1
- {pydocket-0.4.0 → pydocket-0.5.0}/chaos/driver.py +7 -11
- {pydocket-0.4.0 → pydocket-0.5.0}/chaos/producer.py +10 -1
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/cli.py +25 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/worker.py +124 -72
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/conftest.py +3 -1
- {pydocket-0.4.0 → pydocket-0.5.0}/.cursor/rules/general.mdc +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/.cursor/rules/python-style.mdc +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/.github/codecov.yml +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/.github/workflows/ci.yml +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/.github/workflows/publish.yml +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/.gitignore +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/.pre-commit-config.yaml +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/LICENSE +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/README.md +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/chaos/README.md +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/chaos/__init__.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/chaos/run +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/chaos/tasks.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/pyproject.toml +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/__init__.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/__main__.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/annotations.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/dependencies.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/docket.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/execution.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/instrumentation.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/py.typed +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/src/docket/tasks.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/telemetry/.gitignore +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/telemetry/start +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/telemetry/stop +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/__init__.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/__init__.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/conftest.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/test_module.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/test_parsing.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/test_snapshot.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/test_striking.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/test_tasks.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/test_version.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/test_worker.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/cli/test_workers.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/test_dependencies.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/test_docket.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/test_fundamentals.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/test_instrumentation.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/test_striking.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/tests/test_worker.py +0 -0
- {pydocket-0.4.0 → pydocket-0.5.0}/uv.lock +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.5.0
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -76,9 +76,9 @@ async def run_redis(version: str) -> AsyncGenerator[tuple[str, Container], None]
|
|
|
76
76
|
|
|
77
77
|
async def main(
|
|
78
78
|
mode: Literal["performance", "chaos"] = "chaos",
|
|
79
|
-
tasks: int =
|
|
80
|
-
producers: int =
|
|
81
|
-
workers: int =
|
|
79
|
+
tasks: int = 20000,
|
|
80
|
+
producers: int = 5,
|
|
81
|
+
workers: int = 10,
|
|
82
82
|
):
|
|
83
83
|
async with (
|
|
84
84
|
run_redis("7.4.2") as (redis_url, redis_container),
|
|
@@ -97,9 +97,7 @@ async def main(
|
|
|
97
97
|
# Add in some random strikes to performance test
|
|
98
98
|
for _ in range(100):
|
|
99
99
|
parameter = f"param_{random.randint(1, 100)}"
|
|
100
|
-
operator
|
|
101
|
-
["==", "!=", ">", ">=", "<", "<=", "between"]
|
|
102
|
-
)
|
|
100
|
+
operator = random.choice(list(Operator))
|
|
103
101
|
value = f"val_{random.randint(1, 1000)}"
|
|
104
102
|
await docket.strike("rando", parameter, operator, value)
|
|
105
103
|
|
|
@@ -141,11 +139,9 @@ async def main(
|
|
|
141
139
|
redis_url,
|
|
142
140
|
"--tasks",
|
|
143
141
|
"chaos.tasks:chaos_tasks",
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
"DOCKET_WORKER_REDELIVERY_TIMEOUT": "5s",
|
|
148
|
-
},
|
|
142
|
+
"--redelivery-timeout",
|
|
143
|
+
"5s",
|
|
144
|
+
env=environment | {"OTEL_SERVICE_NAME": "chaos-worker"},
|
|
149
145
|
stdout=subprocess.DEVNULL,
|
|
150
146
|
stderr=subprocess.DEVNULL,
|
|
151
147
|
)
|
|
@@ -1,8 +1,11 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import datetime
|
|
2
3
|
import logging
|
|
3
4
|
import os
|
|
5
|
+
import random
|
|
4
6
|
import sys
|
|
5
7
|
import time
|
|
8
|
+
from datetime import timedelta
|
|
6
9
|
|
|
7
10
|
import redis.exceptions
|
|
8
11
|
|
|
@@ -14,6 +17,10 @@ logging.getLogger().setLevel(logging.INFO)
|
|
|
14
17
|
logger = logging.getLogger("chaos.producer")
|
|
15
18
|
|
|
16
19
|
|
|
20
|
+
def now() -> datetime.datetime:
|
|
21
|
+
return datetime.datetime.now(datetime.timezone.utc)
|
|
22
|
+
|
|
23
|
+
|
|
17
24
|
async def main(tasks_to_produce: int):
|
|
18
25
|
docket = Docket(
|
|
19
26
|
name=os.environ["DOCKET_NAME"],
|
|
@@ -25,7 +32,9 @@ async def main(tasks_to_produce: int):
|
|
|
25
32
|
async with docket:
|
|
26
33
|
async with docket.redis() as r:
|
|
27
34
|
for _ in range(tasks_sent, tasks_to_produce):
|
|
28
|
-
|
|
35
|
+
jitter = 5 * ((random.random() * 2) - 1)
|
|
36
|
+
when = now() + timedelta(seconds=jitter)
|
|
37
|
+
execution = await docket.add(hello, when=when)()
|
|
29
38
|
await r.zadd("hello:sent", {execution.key: time.time()})
|
|
30
39
|
logger.info("Added task %s", execution.key)
|
|
31
40
|
tasks_sent += 1
|
|
@@ -162,6 +162,7 @@ def worker(
|
|
|
162
162
|
"This can be specified multiple times. A task collection is any "
|
|
163
163
|
"iterable of async functions."
|
|
164
164
|
),
|
|
165
|
+
envvar="DOCKET_TASKS",
|
|
165
166
|
),
|
|
166
167
|
] = ["docket.tasks:standard_tasks"],
|
|
167
168
|
docket_: Annotated[
|
|
@@ -236,6 +237,14 @@ def worker(
|
|
|
236
237
|
envvar="DOCKET_WORKER_MINIMUM_CHECK_INTERVAL",
|
|
237
238
|
),
|
|
238
239
|
] = timedelta(milliseconds=100),
|
|
240
|
+
scheduling_resolution: Annotated[
|
|
241
|
+
timedelta,
|
|
242
|
+
typer.Option(
|
|
243
|
+
parser=duration,
|
|
244
|
+
help="How frequently to check for future tasks to be scheduled",
|
|
245
|
+
envvar="DOCKET_WORKER_SCHEDULING_RESOLUTION",
|
|
246
|
+
),
|
|
247
|
+
] = timedelta(milliseconds=250),
|
|
239
248
|
until_finished: Annotated[
|
|
240
249
|
bool,
|
|
241
250
|
typer.Option(
|
|
@@ -260,6 +269,7 @@ def worker(
|
|
|
260
269
|
redelivery_timeout=redelivery_timeout,
|
|
261
270
|
reconnection_delay=reconnection_delay,
|
|
262
271
|
minimum_check_interval=minimum_check_interval,
|
|
272
|
+
scheduling_resolution=scheduling_resolution,
|
|
263
273
|
until_finished=until_finished,
|
|
264
274
|
metrics_port=metrics_port,
|
|
265
275
|
tasks=tasks,
|
|
@@ -542,6 +552,18 @@ def relative_time(now: datetime, when: datetime) -> str:
|
|
|
542
552
|
|
|
543
553
|
@app.command(help="Shows a snapshot of what's on the docket right now")
|
|
544
554
|
def snapshot(
|
|
555
|
+
tasks: Annotated[
|
|
556
|
+
list[str],
|
|
557
|
+
typer.Option(
|
|
558
|
+
"--tasks",
|
|
559
|
+
help=(
|
|
560
|
+
"The dotted path of a task collection to register with the docket. "
|
|
561
|
+
"This can be specified multiple times. A task collection is any "
|
|
562
|
+
"iterable of async functions."
|
|
563
|
+
),
|
|
564
|
+
envvar="DOCKET_TASKS",
|
|
565
|
+
),
|
|
566
|
+
] = ["docket.tasks:standard_tasks"],
|
|
545
567
|
docket_: Annotated[
|
|
546
568
|
str,
|
|
547
569
|
typer.Option(
|
|
@@ -560,6 +582,9 @@ def snapshot(
|
|
|
560
582
|
) -> None:
|
|
561
583
|
async def run() -> DocketSnapshot:
|
|
562
584
|
async with Docket(name=docket_, url=url) as docket:
|
|
585
|
+
for task_path in tasks:
|
|
586
|
+
docket.register_collection(task_path)
|
|
587
|
+
|
|
563
588
|
return await docket.snapshot()
|
|
564
589
|
|
|
565
590
|
snapshot = asyncio.run(run())
|
|
@@ -18,6 +18,7 @@ from uuid import uuid4
|
|
|
18
18
|
import redis.exceptions
|
|
19
19
|
from opentelemetry import propagate, trace
|
|
20
20
|
from opentelemetry.trace import Tracer
|
|
21
|
+
from redis.asyncio import Redis
|
|
21
22
|
|
|
22
23
|
from .docket import (
|
|
23
24
|
Docket,
|
|
@@ -68,6 +69,7 @@ class Worker:
|
|
|
68
69
|
redelivery_timeout: timedelta
|
|
69
70
|
reconnection_delay: timedelta
|
|
70
71
|
minimum_check_interval: timedelta
|
|
72
|
+
scheduling_resolution: timedelta
|
|
71
73
|
|
|
72
74
|
def __init__(
|
|
73
75
|
self,
|
|
@@ -77,6 +79,7 @@ class Worker:
|
|
|
77
79
|
redelivery_timeout: timedelta = timedelta(minutes=5),
|
|
78
80
|
reconnection_delay: timedelta = timedelta(seconds=5),
|
|
79
81
|
minimum_check_interval: timedelta = timedelta(milliseconds=100),
|
|
82
|
+
scheduling_resolution: timedelta = timedelta(milliseconds=250),
|
|
80
83
|
) -> None:
|
|
81
84
|
self.docket = docket
|
|
82
85
|
self.name = name or f"worker:{uuid4()}"
|
|
@@ -84,6 +87,7 @@ class Worker:
|
|
|
84
87
|
self.redelivery_timeout = redelivery_timeout
|
|
85
88
|
self.reconnection_delay = reconnection_delay
|
|
86
89
|
self.minimum_check_interval = minimum_check_interval
|
|
90
|
+
self.scheduling_resolution = scheduling_resolution
|
|
87
91
|
|
|
88
92
|
async def __aenter__(self) -> Self:
|
|
89
93
|
self._heartbeat_task = asyncio.create_task(self._heartbeat())
|
|
@@ -128,6 +132,7 @@ class Worker:
|
|
|
128
132
|
redelivery_timeout: timedelta = timedelta(minutes=5),
|
|
129
133
|
reconnection_delay: timedelta = timedelta(seconds=5),
|
|
130
134
|
minimum_check_interval: timedelta = timedelta(milliseconds=100),
|
|
135
|
+
scheduling_resolution: timedelta = timedelta(milliseconds=250),
|
|
131
136
|
until_finished: bool = False,
|
|
132
137
|
metrics_port: int | None = None,
|
|
133
138
|
tasks: list[str] = ["docket.tasks:standard_tasks"],
|
|
@@ -144,6 +149,7 @@ class Worker:
|
|
|
144
149
|
redelivery_timeout=redelivery_timeout,
|
|
145
150
|
reconnection_delay=reconnection_delay,
|
|
146
151
|
minimum_check_interval=minimum_check_interval,
|
|
152
|
+
scheduling_resolution=scheduling_resolution,
|
|
147
153
|
) as worker:
|
|
148
154
|
if until_finished:
|
|
149
155
|
await worker.run_until_finished()
|
|
@@ -210,57 +216,24 @@ class Worker:
|
|
|
210
216
|
await asyncio.sleep(self.reconnection_delay.total_seconds())
|
|
211
217
|
|
|
212
218
|
async def _worker_loop(self, forever: bool = False):
|
|
213
|
-
|
|
214
|
-
stream_due_tasks: _stream_due_tasks = cast(
|
|
215
|
-
_stream_due_tasks,
|
|
216
|
-
redis.register_script(
|
|
217
|
-
# Lua script to atomically move scheduled tasks to the stream
|
|
218
|
-
# KEYS[1]: queue key (sorted set)
|
|
219
|
-
# KEYS[2]: stream key
|
|
220
|
-
# ARGV[1]: current timestamp
|
|
221
|
-
# ARGV[2]: docket name prefix
|
|
222
|
-
"""
|
|
223
|
-
local total_work = redis.call('ZCARD', KEYS[1])
|
|
224
|
-
local due_work = 0
|
|
225
|
-
|
|
226
|
-
if total_work > 0 then
|
|
227
|
-
local tasks = redis.call('ZRANGEBYSCORE', KEYS[1], 0, ARGV[1])
|
|
228
|
-
|
|
229
|
-
for i, key in ipairs(tasks) do
|
|
230
|
-
local hash_key = ARGV[2] .. ":" .. key
|
|
231
|
-
local task_data = redis.call('HGETALL', hash_key)
|
|
232
|
-
|
|
233
|
-
if #task_data > 0 then
|
|
234
|
-
local task = {}
|
|
235
|
-
for j = 1, #task_data, 2 do
|
|
236
|
-
task[task_data[j]] = task_data[j+1]
|
|
237
|
-
end
|
|
238
|
-
|
|
239
|
-
redis.call('XADD', KEYS[2], '*',
|
|
240
|
-
'key', task['key'],
|
|
241
|
-
'when', task['when'],
|
|
242
|
-
'function', task['function'],
|
|
243
|
-
'args', task['args'],
|
|
244
|
-
'kwargs', task['kwargs'],
|
|
245
|
-
'attempt', task['attempt']
|
|
246
|
-
)
|
|
247
|
-
redis.call('DEL', hash_key)
|
|
248
|
-
due_work = due_work + 1
|
|
249
|
-
end
|
|
250
|
-
end
|
|
251
|
-
end
|
|
252
|
-
|
|
253
|
-
if due_work > 0 then
|
|
254
|
-
redis.call('ZREMRANGEBYSCORE', KEYS[1], 0, ARGV[1])
|
|
255
|
-
end
|
|
219
|
+
should_stop = asyncio.Event()
|
|
256
220
|
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
)
|
|
221
|
+
async with self.docket.redis() as redis:
|
|
222
|
+
scheduler_task = asyncio.create_task(
|
|
223
|
+
self._scheduler_loop(redis, should_stop)
|
|
260
224
|
)
|
|
261
225
|
|
|
262
226
|
active_tasks: dict[asyncio.Task[None], RedisMessageID] = {}
|
|
263
227
|
|
|
228
|
+
async def check_for_work() -> bool:
|
|
229
|
+
async with redis.pipeline() as pipeline:
|
|
230
|
+
pipeline.xlen(self.docket.stream_key)
|
|
231
|
+
pipeline.zcard(self.docket.queue_key)
|
|
232
|
+
results: list[int] = await pipeline.execute()
|
|
233
|
+
stream_len = results[0]
|
|
234
|
+
queue_len = results[1]
|
|
235
|
+
return stream_len > 0 or queue_len > 0
|
|
236
|
+
|
|
264
237
|
async def process_completed_tasks() -> None:
|
|
265
238
|
completed_tasks = {task for task in active_tasks if task.done()}
|
|
266
239
|
for task in completed_tasks:
|
|
@@ -280,10 +253,13 @@ class Worker:
|
|
|
280
253
|
)
|
|
281
254
|
await pipeline.execute()
|
|
282
255
|
|
|
283
|
-
|
|
256
|
+
has_work: bool = True
|
|
257
|
+
|
|
258
|
+
if not forever: # pragma: no branch
|
|
259
|
+
has_work = await check_for_work()
|
|
284
260
|
|
|
285
261
|
try:
|
|
286
|
-
while forever or
|
|
262
|
+
while forever or has_work or active_tasks:
|
|
287
263
|
await process_completed_tasks()
|
|
288
264
|
|
|
289
265
|
available_slots = self.concurrency - len(active_tasks)
|
|
@@ -297,28 +273,13 @@ class Worker:
|
|
|
297
273
|
task = asyncio.create_task(self._execute(message))
|
|
298
274
|
active_tasks[task] = message_id
|
|
299
275
|
|
|
300
|
-
nonlocal available_slots
|
|
276
|
+
nonlocal available_slots
|
|
301
277
|
available_slots -= 1
|
|
302
|
-
future_work += 1
|
|
303
278
|
|
|
304
279
|
if available_slots <= 0:
|
|
305
280
|
await asyncio.sleep(self.minimum_check_interval.total_seconds())
|
|
306
281
|
continue
|
|
307
282
|
|
|
308
|
-
future_work, due_work = await stream_due_tasks(
|
|
309
|
-
keys=[self.docket.queue_key, self.docket.stream_key],
|
|
310
|
-
args=[datetime.now(timezone.utc).timestamp(), self.docket.name],
|
|
311
|
-
)
|
|
312
|
-
if due_work > 0:
|
|
313
|
-
logger.debug(
|
|
314
|
-
"Moved %d/%d due tasks from %s to %s",
|
|
315
|
-
due_work,
|
|
316
|
-
future_work,
|
|
317
|
-
self.docket.queue_key,
|
|
318
|
-
self.docket.stream_key,
|
|
319
|
-
extra=self._log_context(),
|
|
320
|
-
)
|
|
321
|
-
|
|
322
283
|
redeliveries: RedisMessages
|
|
323
284
|
_, redeliveries, *_ = await redis.xautoclaim(
|
|
324
285
|
name=self.docket.stream_key,
|
|
@@ -348,10 +309,14 @@ class Worker:
|
|
|
348
309
|
),
|
|
349
310
|
count=available_slots,
|
|
350
311
|
)
|
|
312
|
+
|
|
351
313
|
for _, messages in new_deliveries:
|
|
352
314
|
for message_id, message in messages:
|
|
353
315
|
start_task(message_id, message)
|
|
354
316
|
|
|
317
|
+
if not forever and not active_tasks and not new_deliveries:
|
|
318
|
+
has_work = await check_for_work()
|
|
319
|
+
|
|
355
320
|
except asyncio.CancelledError:
|
|
356
321
|
if active_tasks: # pragma: no cover
|
|
357
322
|
logger.info(
|
|
@@ -364,7 +329,98 @@ class Worker:
|
|
|
364
329
|
await asyncio.gather(*active_tasks, return_exceptions=True)
|
|
365
330
|
await process_completed_tasks()
|
|
366
331
|
|
|
332
|
+
should_stop.set()
|
|
333
|
+
await scheduler_task
|
|
334
|
+
|
|
335
|
+
async def _scheduler_loop(
|
|
336
|
+
self,
|
|
337
|
+
redis: Redis,
|
|
338
|
+
should_stop: asyncio.Event,
|
|
339
|
+
) -> None:
|
|
340
|
+
"""Loop that moves due tasks from the queue to the stream."""
|
|
341
|
+
|
|
342
|
+
stream_due_tasks: _stream_due_tasks = cast(
|
|
343
|
+
_stream_due_tasks,
|
|
344
|
+
redis.register_script(
|
|
345
|
+
# Lua script to atomically move scheduled tasks to the stream
|
|
346
|
+
# KEYS[1]: queue key (sorted set)
|
|
347
|
+
# KEYS[2]: stream key
|
|
348
|
+
# ARGV[1]: current timestamp
|
|
349
|
+
# ARGV[2]: docket name prefix
|
|
350
|
+
"""
|
|
351
|
+
local total_work = redis.call('ZCARD', KEYS[1])
|
|
352
|
+
local due_work = 0
|
|
353
|
+
|
|
354
|
+
if total_work > 0 then
|
|
355
|
+
local tasks = redis.call('ZRANGEBYSCORE', KEYS[1], 0, ARGV[1])
|
|
356
|
+
|
|
357
|
+
for i, key in ipairs(tasks) do
|
|
358
|
+
local hash_key = ARGV[2] .. ":" .. key
|
|
359
|
+
local task_data = redis.call('HGETALL', hash_key)
|
|
360
|
+
|
|
361
|
+
if #task_data > 0 then
|
|
362
|
+
local task = {}
|
|
363
|
+
for j = 1, #task_data, 2 do
|
|
364
|
+
task[task_data[j]] = task_data[j+1]
|
|
365
|
+
end
|
|
366
|
+
|
|
367
|
+
redis.call('XADD', KEYS[2], '*',
|
|
368
|
+
'key', task['key'],
|
|
369
|
+
'when', task['when'],
|
|
370
|
+
'function', task['function'],
|
|
371
|
+
'args', task['args'],
|
|
372
|
+
'kwargs', task['kwargs'],
|
|
373
|
+
'attempt', task['attempt']
|
|
374
|
+
)
|
|
375
|
+
redis.call('DEL', hash_key)
|
|
376
|
+
due_work = due_work + 1
|
|
377
|
+
end
|
|
378
|
+
end
|
|
379
|
+
end
|
|
380
|
+
|
|
381
|
+
if due_work > 0 then
|
|
382
|
+
redis.call('ZREMRANGEBYSCORE', KEYS[1], 0, ARGV[1])
|
|
383
|
+
end
|
|
384
|
+
|
|
385
|
+
return {total_work, due_work}
|
|
386
|
+
"""
|
|
387
|
+
),
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
total_work: int = sys.maxsize
|
|
391
|
+
|
|
392
|
+
while not should_stop.is_set() or total_work:
|
|
393
|
+
try:
|
|
394
|
+
total_work, due_work = await stream_due_tasks(
|
|
395
|
+
keys=[self.docket.queue_key, self.docket.stream_key],
|
|
396
|
+
args=[datetime.now(timezone.utc).timestamp(), self.docket.name],
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
if due_work > 0:
|
|
400
|
+
logger.debug(
|
|
401
|
+
"Moved %d/%d due tasks from %s to %s",
|
|
402
|
+
due_work,
|
|
403
|
+
total_work,
|
|
404
|
+
self.docket.queue_key,
|
|
405
|
+
self.docket.stream_key,
|
|
406
|
+
extra=self._log_context(),
|
|
407
|
+
)
|
|
408
|
+
except Exception: # pragma: no cover
|
|
409
|
+
logger.exception(
|
|
410
|
+
"Error in scheduler loop",
|
|
411
|
+
exc_info=True,
|
|
412
|
+
extra=self._log_context(),
|
|
413
|
+
)
|
|
414
|
+
finally:
|
|
415
|
+
await asyncio.sleep(self.scheduling_resolution.total_seconds())
|
|
416
|
+
|
|
417
|
+
logger.debug("Scheduler loop finished", extra=self._log_context())
|
|
418
|
+
|
|
367
419
|
async def _execute(self, message: RedisMessage) -> None:
|
|
420
|
+
key = message[b"key"].decode()
|
|
421
|
+
async with self.docket.redis() as redis:
|
|
422
|
+
await redis.delete(self.docket.known_task_key(key))
|
|
423
|
+
|
|
368
424
|
log_context: Mapping[str, str | float] = self._log_context()
|
|
369
425
|
|
|
370
426
|
function_name = message[b"function"].decode()
|
|
@@ -377,9 +433,6 @@ class Worker:
|
|
|
377
433
|
|
|
378
434
|
execution = Execution.from_message(function, message)
|
|
379
435
|
|
|
380
|
-
async with self.docket.redis() as redis:
|
|
381
|
-
await redis.delete(self.docket.known_task_key(execution.key))
|
|
382
|
-
|
|
383
436
|
log_context = {**log_context, **execution.specific_labels()}
|
|
384
437
|
counter_labels = {**self.labels(), **execution.general_labels()}
|
|
385
438
|
|
|
@@ -572,11 +625,10 @@ class Worker:
|
|
|
572
625
|
pipeline.zcount(self.docket.queue_key, 0, now)
|
|
573
626
|
pipeline.zcount(self.docket.queue_key, now, "+inf")
|
|
574
627
|
|
|
575
|
-
(
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
) = await pipeline.execute()
|
|
628
|
+
results: list[int] = await pipeline.execute()
|
|
629
|
+
stream_depth = results[0]
|
|
630
|
+
overdue_depth = results[1]
|
|
631
|
+
schedule_depth = results[2]
|
|
580
632
|
|
|
581
633
|
QUEUE_DEPTH.set(
|
|
582
634
|
stream_depth + overdue_depth, self.docket.labels()
|
|
@@ -155,7 +155,9 @@ async def docket(redis_url: str, aiolib: str) -> AsyncGenerator[Docket, None]:
|
|
|
155
155
|
@pytest.fixture
|
|
156
156
|
async def worker(docket: Docket) -> AsyncGenerator[Worker, None]:
|
|
157
157
|
async with Worker(
|
|
158
|
-
docket,
|
|
158
|
+
docket,
|
|
159
|
+
minimum_check_interval=timedelta(milliseconds=10),
|
|
160
|
+
scheduling_resolution=timedelta(milliseconds=10),
|
|
159
161
|
) as worker:
|
|
160
162
|
yield worker
|
|
161
163
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|