pydocket 0.5.1__tar.gz → 0.5.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- {pydocket-0.5.1 → pydocket-0.5.2}/PKG-INFO +1 -1
- {pydocket-0.5.1 → pydocket-0.5.2}/chaos/driver.py +0 -6
- pydocket-0.5.2/examples/common.py +69 -0
- pydocket-0.5.2/examples/find_and_flood.py +39 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/docket.py +68 -41
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/worker.py +41 -50
- pydocket-0.5.2/tests/cli/__init__.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/test_worker.py +8 -7
- {pydocket-0.5.1 → pydocket-0.5.2}/.cursor/rules/general.mdc +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/.cursor/rules/python-style.mdc +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/.github/codecov.yml +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/.github/workflows/ci.yml +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/.github/workflows/publish.yml +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/.gitignore +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/.pre-commit-config.yaml +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/LICENSE +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/README.md +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/chaos/README.md +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/chaos/__init__.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/chaos/producer.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/chaos/run +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/chaos/tasks.py +0 -0
- {pydocket-0.5.1/tests → pydocket-0.5.2/examples}/__init__.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/pyproject.toml +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/__init__.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/__main__.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/annotations.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/cli.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/dependencies.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/execution.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/instrumentation.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/py.typed +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/src/docket/tasks.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/telemetry/.gitignore +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/telemetry/start +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/telemetry/stop +0 -0
- {pydocket-0.5.1/tests/cli → pydocket-0.5.2/tests}/__init__.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/cli/conftest.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/cli/test_module.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/cli/test_parsing.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/cli/test_snapshot.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/cli/test_striking.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/cli/test_tasks.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/cli/test_version.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/cli/test_worker.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/cli/test_workers.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/conftest.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/test_dependencies.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/test_docket.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/test_fundamentals.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/test_instrumentation.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/tests/test_striking.py +0 -0
- {pydocket-0.5.1 → pydocket-0.5.2}/uv.lock +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.2
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -23,12 +23,6 @@ from .tasks import toxic
|
|
|
23
23
|
|
|
24
24
|
logging.getLogger().setLevel(logging.INFO)
|
|
25
25
|
|
|
26
|
-
# Quiets down the testcontainers logger
|
|
27
|
-
testcontainers_logger = logging.getLogger("testcontainers.core.container")
|
|
28
|
-
testcontainers_logger.setLevel(logging.ERROR)
|
|
29
|
-
testcontainers_logger = logging.getLogger("testcontainers.core.waiting_utils")
|
|
30
|
-
testcontainers_logger.setLevel(logging.ERROR)
|
|
31
|
-
|
|
32
26
|
console = logging.StreamHandler(stream=sys.stdout)
|
|
33
27
|
console.setFormatter(
|
|
34
28
|
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
import socket
|
|
4
|
+
from contextlib import asynccontextmanager
|
|
5
|
+
from typing import AsyncGenerator
|
|
6
|
+
|
|
7
|
+
from docker import DockerClient
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@asynccontextmanager
|
|
11
|
+
async def run_redis(version: str) -> AsyncGenerator[str, None]:
|
|
12
|
+
def get_free_port() -> int:
|
|
13
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
14
|
+
s.bind(("", 0))
|
|
15
|
+
return s.getsockname()[1]
|
|
16
|
+
|
|
17
|
+
port = get_free_port()
|
|
18
|
+
|
|
19
|
+
client = DockerClient.from_env()
|
|
20
|
+
container = client.containers.run(
|
|
21
|
+
f"redis:{version}",
|
|
22
|
+
detach=True,
|
|
23
|
+
ports={"6379/tcp": port},
|
|
24
|
+
auto_remove=True,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Wait for Redis to be ready
|
|
28
|
+
for line in container.logs(stream=True):
|
|
29
|
+
if b"Ready to accept connections" in line:
|
|
30
|
+
break
|
|
31
|
+
|
|
32
|
+
url = f"redis://localhost:{port}/0"
|
|
33
|
+
print("***** Redis is running on %s *****", url)
|
|
34
|
+
try:
|
|
35
|
+
yield url
|
|
36
|
+
finally:
|
|
37
|
+
container.stop()
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
async def run_example_workers(workers: int, concurrency: int, tasks: str):
|
|
41
|
+
async with run_redis("7.4.2") as redis_url:
|
|
42
|
+
processes = [
|
|
43
|
+
await asyncio.create_subprocess_exec(
|
|
44
|
+
"docket",
|
|
45
|
+
"worker",
|
|
46
|
+
"--name",
|
|
47
|
+
f"worker-{i}",
|
|
48
|
+
"--url",
|
|
49
|
+
redis_url,
|
|
50
|
+
"--tasks",
|
|
51
|
+
tasks,
|
|
52
|
+
"--concurrency",
|
|
53
|
+
str(concurrency),
|
|
54
|
+
env={
|
|
55
|
+
**os.environ,
|
|
56
|
+
"PYTHONPATH": os.path.abspath(
|
|
57
|
+
os.path.join(os.path.dirname(__file__), "..")
|
|
58
|
+
),
|
|
59
|
+
},
|
|
60
|
+
)
|
|
61
|
+
for i in range(workers)
|
|
62
|
+
]
|
|
63
|
+
try:
|
|
64
|
+
await asyncio.gather(*[p.wait() for p in processes])
|
|
65
|
+
except asyncio.CancelledError:
|
|
66
|
+
for p in processes:
|
|
67
|
+
p.kill()
|
|
68
|
+
finally:
|
|
69
|
+
await asyncio.gather(*[p.wait() for p in processes])
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from datetime import timedelta
|
|
3
|
+
from logging import Logger, LoggerAdapter
|
|
4
|
+
from typing import Annotated
|
|
5
|
+
|
|
6
|
+
from docket import Docket
|
|
7
|
+
from docket.annotations import Logged
|
|
8
|
+
from docket.dependencies import CurrentDocket, Perpetual, TaskLogger
|
|
9
|
+
|
|
10
|
+
from .common import run_example_workers
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
async def find(
|
|
14
|
+
docket: Docket = CurrentDocket(),
|
|
15
|
+
logger: LoggerAdapter[Logger] = TaskLogger(),
|
|
16
|
+
perpetual: Perpetual = Perpetual(every=timedelta(seconds=3), automatic=True),
|
|
17
|
+
) -> None:
|
|
18
|
+
for i in range(1, 10 + 1):
|
|
19
|
+
await docket.add(flood, key=str(i))(i)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
async def flood(
|
|
23
|
+
item: Annotated[int, Logged],
|
|
24
|
+
logger: LoggerAdapter[Logger] = TaskLogger(),
|
|
25
|
+
) -> None:
|
|
26
|
+
logger.info("Working on %s", item)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
tasks = [find, flood]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
if __name__ == "__main__":
|
|
33
|
+
asyncio.run(
|
|
34
|
+
run_example_workers(
|
|
35
|
+
workers=3,
|
|
36
|
+
concurrency=8,
|
|
37
|
+
tasks="examples.find_and_flood:tasks",
|
|
38
|
+
)
|
|
39
|
+
)
|
|
@@ -28,6 +28,7 @@ from uuid import uuid4
|
|
|
28
28
|
import redis.exceptions
|
|
29
29
|
from opentelemetry import propagate, trace
|
|
30
30
|
from redis.asyncio import ConnectionPool, Redis
|
|
31
|
+
from redis.asyncio.client import Pipeline
|
|
31
32
|
|
|
32
33
|
from .execution import (
|
|
33
34
|
Execution,
|
|
@@ -256,9 +257,14 @@ class Docket:
|
|
|
256
257
|
|
|
257
258
|
async def scheduler(*args: P.args, **kwargs: P.kwargs) -> Execution:
|
|
258
259
|
execution = Execution(function, args, kwargs, when, key, attempt=1)
|
|
259
|
-
|
|
260
|
+
|
|
261
|
+
async with self.redis() as redis:
|
|
262
|
+
async with redis.pipeline() as pipeline:
|
|
263
|
+
await self._schedule(redis, pipeline, execution, replace=False)
|
|
264
|
+
await pipeline.execute()
|
|
260
265
|
|
|
261
266
|
TASKS_ADDED.add(1, {**self.labels(), **execution.general_labels()})
|
|
267
|
+
TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
|
|
262
268
|
|
|
263
269
|
return execution
|
|
264
270
|
|
|
@@ -291,15 +297,48 @@ class Docket:
|
|
|
291
297
|
|
|
292
298
|
async def scheduler(*args: P.args, **kwargs: P.kwargs) -> Execution:
|
|
293
299
|
execution = Execution(function, args, kwargs, when, key, attempt=1)
|
|
294
|
-
|
|
295
|
-
|
|
300
|
+
|
|
301
|
+
async with self.redis() as redis:
|
|
302
|
+
async with redis.pipeline() as pipeline:
|
|
303
|
+
await self._schedule(redis, pipeline, execution, replace=True)
|
|
304
|
+
await pipeline.execute()
|
|
296
305
|
|
|
297
306
|
TASKS_REPLACED.add(1, {**self.labels(), **execution.general_labels()})
|
|
307
|
+
TASKS_CANCELLED.add(1, {**self.labels(), **execution.general_labels()})
|
|
308
|
+
TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
|
|
298
309
|
|
|
299
310
|
return execution
|
|
300
311
|
|
|
301
312
|
return scheduler
|
|
302
313
|
|
|
314
|
+
async def schedule(self, execution: Execution) -> None:
|
|
315
|
+
with tracer.start_as_current_span(
|
|
316
|
+
"docket.schedule",
|
|
317
|
+
attributes={
|
|
318
|
+
**self.labels(),
|
|
319
|
+
**execution.specific_labels(),
|
|
320
|
+
"code.function.name": execution.function.__name__,
|
|
321
|
+
},
|
|
322
|
+
):
|
|
323
|
+
async with self.redis() as redis:
|
|
324
|
+
async with redis.pipeline() as pipeline:
|
|
325
|
+
await self._schedule(redis, pipeline, execution, replace=False)
|
|
326
|
+
await pipeline.execute()
|
|
327
|
+
|
|
328
|
+
TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
|
|
329
|
+
|
|
330
|
+
async def cancel(self, key: str) -> None:
|
|
331
|
+
with tracer.start_as_current_span(
|
|
332
|
+
"docket.cancel",
|
|
333
|
+
attributes={**self.labels(), "docket.key": key},
|
|
334
|
+
):
|
|
335
|
+
async with self.redis() as redis:
|
|
336
|
+
async with redis.pipeline() as pipeline:
|
|
337
|
+
await self._cancel(pipeline, key)
|
|
338
|
+
await pipeline.execute()
|
|
339
|
+
|
|
340
|
+
TASKS_CANCELLED.add(1, self.labels())
|
|
341
|
+
|
|
303
342
|
@property
|
|
304
343
|
def queue_key(self) -> str:
|
|
305
344
|
return f"{self.name}:queue"
|
|
@@ -314,7 +353,13 @@ class Docket:
|
|
|
314
353
|
def parked_task_key(self, key: str) -> str:
|
|
315
354
|
return f"{self.name}:{key}"
|
|
316
355
|
|
|
317
|
-
async def
|
|
356
|
+
async def _schedule(
|
|
357
|
+
self,
|
|
358
|
+
redis: Redis,
|
|
359
|
+
pipeline: Pipeline,
|
|
360
|
+
execution: Execution,
|
|
361
|
+
replace: bool = False,
|
|
362
|
+
) -> None:
|
|
318
363
|
if self.strike_list.is_stricken(execution):
|
|
319
364
|
logger.warning(
|
|
320
365
|
"%r is stricken, skipping schedule of %r",
|
|
@@ -334,53 +379,35 @@ class Docket:
|
|
|
334
379
|
message: dict[bytes, bytes] = execution.as_message()
|
|
335
380
|
propagate.inject(message, setter=message_setter)
|
|
336
381
|
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
**self.labels(),
|
|
341
|
-
**execution.specific_labels(),
|
|
342
|
-
"code.function.name": execution.function.__name__,
|
|
343
|
-
},
|
|
344
|
-
):
|
|
345
|
-
key = execution.key
|
|
346
|
-
when = execution.when
|
|
382
|
+
key = execution.key
|
|
383
|
+
when = execution.when
|
|
384
|
+
known_task_key = self.known_task_key(key)
|
|
347
385
|
|
|
348
|
-
|
|
386
|
+
async with redis.lock(f"{known_task_key}:lock", timeout=10):
|
|
387
|
+
if replace:
|
|
388
|
+
await self._cancel(pipeline, key)
|
|
389
|
+
else:
|
|
349
390
|
# if the task is already in the queue or stream, retain it
|
|
350
|
-
if await redis.exists(
|
|
391
|
+
if await redis.exists(known_task_key):
|
|
351
392
|
logger.debug(
|
|
352
|
-
"Task %r is already in the queue or stream,
|
|
393
|
+
"Task %r is already in the queue or stream, not scheduling",
|
|
353
394
|
key,
|
|
354
395
|
extra=self.labels(),
|
|
355
396
|
)
|
|
356
397
|
return
|
|
357
398
|
|
|
358
|
-
|
|
359
|
-
pipe.set(self.known_task_key(key), when.timestamp())
|
|
360
|
-
|
|
361
|
-
if when <= datetime.now(timezone.utc):
|
|
362
|
-
pipe.xadd(self.stream_key, message) # type: ignore[arg-type]
|
|
363
|
-
else:
|
|
364
|
-
pipe.hset(self.parked_task_key(key), mapping=message) # type: ignore[arg-type]
|
|
365
|
-
pipe.zadd(self.queue_key, {key: when.timestamp()})
|
|
399
|
+
pipeline.set(known_task_key, when.timestamp())
|
|
366
400
|
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
with tracer.start_as_current_span(
|
|
373
|
-
"docket.cancel",
|
|
374
|
-
attributes={**self.labels(), "docket.key": key},
|
|
375
|
-
):
|
|
376
|
-
async with self.redis() as redis:
|
|
377
|
-
async with redis.pipeline() as pipe:
|
|
378
|
-
pipe.delete(self.known_task_key(key))
|
|
379
|
-
pipe.delete(self.parked_task_key(key))
|
|
380
|
-
pipe.zrem(self.queue_key, key)
|
|
381
|
-
await pipe.execute()
|
|
401
|
+
if when <= datetime.now(timezone.utc):
|
|
402
|
+
pipeline.xadd(self.stream_key, message) # type: ignore[arg-type]
|
|
403
|
+
else:
|
|
404
|
+
pipeline.hset(self.parked_task_key(key), mapping=message) # type: ignore[arg-type]
|
|
405
|
+
pipeline.zadd(self.queue_key, {key: when.timestamp()})
|
|
382
406
|
|
|
383
|
-
|
|
407
|
+
async def _cancel(self, pipeline: Pipeline, key: str) -> None:
|
|
408
|
+
pipeline.delete(self.known_task_key(key))
|
|
409
|
+
pipeline.delete(self.parked_task_key(key))
|
|
410
|
+
pipeline.zrem(self.queue_key, key)
|
|
384
411
|
|
|
385
412
|
@property
|
|
386
413
|
def strike_key(self) -> str:
|
|
@@ -17,6 +17,7 @@ import redis.exceptions
|
|
|
17
17
|
from opentelemetry import propagate, trace
|
|
18
18
|
from opentelemetry.trace import Tracer
|
|
19
19
|
from redis.asyncio import Redis
|
|
20
|
+
from redis.exceptions import LockError
|
|
20
21
|
|
|
21
22
|
from .docket import (
|
|
22
23
|
Docket,
|
|
@@ -215,9 +216,6 @@ class Worker:
|
|
|
215
216
|
worker_stopping = asyncio.Event()
|
|
216
217
|
|
|
217
218
|
await self._schedule_all_automatic_perpetual_tasks()
|
|
218
|
-
perpetual_scheduling_task = asyncio.create_task(
|
|
219
|
-
self._perpetual_scheduling_loop(worker_stopping)
|
|
220
|
-
)
|
|
221
219
|
|
|
222
220
|
async with self.docket.redis() as redis:
|
|
223
221
|
scheduler_task = asyncio.create_task(
|
|
@@ -331,7 +329,6 @@ class Worker:
|
|
|
331
329
|
|
|
332
330
|
worker_stopping.set()
|
|
333
331
|
await scheduler_task
|
|
334
|
-
await perpetual_scheduling_task
|
|
335
332
|
|
|
336
333
|
async def _scheduler_loop(
|
|
337
334
|
self,
|
|
@@ -417,65 +414,47 @@ class Worker:
|
|
|
417
414
|
|
|
418
415
|
logger.debug("Scheduler loop finished", extra=self._log_context())
|
|
419
416
|
|
|
420
|
-
async def
|
|
421
|
-
"""Loop that ensures that automatic perpetual tasks are always scheduled."""
|
|
422
|
-
|
|
423
|
-
while not worker_stopping.is_set():
|
|
424
|
-
minimum_interval = self.scheduling_resolution
|
|
425
|
-
try:
|
|
426
|
-
minimum_interval = await self._schedule_all_automatic_perpetual_tasks()
|
|
427
|
-
except Exception: # pragma: no cover
|
|
428
|
-
logger.exception(
|
|
429
|
-
"Error in perpetual scheduling loop",
|
|
430
|
-
exc_info=True,
|
|
431
|
-
extra=self._log_context(),
|
|
432
|
-
)
|
|
433
|
-
finally:
|
|
434
|
-
# Wait until just before the next time any task would need to be
|
|
435
|
-
# scheduled (one scheduling_resolution before the lowest interval)
|
|
436
|
-
interval = max(
|
|
437
|
-
minimum_interval - self.scheduling_resolution,
|
|
438
|
-
self.scheduling_resolution,
|
|
439
|
-
)
|
|
440
|
-
assert interval <= self.scheduling_resolution
|
|
441
|
-
await asyncio.sleep(interval.total_seconds())
|
|
442
|
-
|
|
443
|
-
async def _schedule_all_automatic_perpetual_tasks(self) -> timedelta:
|
|
417
|
+
async def _schedule_all_automatic_perpetual_tasks(self) -> None:
|
|
444
418
|
from .dependencies import Perpetual, get_single_dependency_parameter_of_type
|
|
445
419
|
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
420
|
+
async with self.docket.redis() as redis:
|
|
421
|
+
try:
|
|
422
|
+
async with redis.lock(
|
|
423
|
+
f"{self.docket.name}:perpetual:lock", timeout=10, blocking=False
|
|
424
|
+
):
|
|
425
|
+
for task_function in self.docket.tasks.values():
|
|
426
|
+
perpetual = get_single_dependency_parameter_of_type(
|
|
427
|
+
task_function, Perpetual
|
|
428
|
+
)
|
|
429
|
+
if perpetual is None:
|
|
430
|
+
continue
|
|
453
431
|
|
|
454
|
-
|
|
455
|
-
|
|
432
|
+
if not perpetual.automatic:
|
|
433
|
+
continue
|
|
456
434
|
|
|
457
|
-
|
|
458
|
-
await self.docket.add(task_function, key=key)()
|
|
459
|
-
minimum_interval = min(minimum_interval, perpetual.every)
|
|
435
|
+
key = task_function.__name__
|
|
460
436
|
|
|
461
|
-
|
|
437
|
+
await self.docket.add(task_function, key=key)()
|
|
438
|
+
except LockError: # pragma: no cover
|
|
439
|
+
return
|
|
462
440
|
|
|
463
441
|
async def _execute(self, message: RedisMessage) -> None:
|
|
464
442
|
key = message[b"key"].decode()
|
|
465
|
-
async with self.docket.redis() as redis:
|
|
466
|
-
await redis.delete(self.docket.known_task_key(key))
|
|
467
443
|
|
|
468
444
|
log_context: Mapping[str, str | float] = self._log_context()
|
|
469
445
|
|
|
470
446
|
function_name = message[b"function"].decode()
|
|
471
447
|
function = self.docket.tasks.get(function_name)
|
|
472
448
|
if function is None:
|
|
449
|
+
async with self.docket.redis() as redis:
|
|
450
|
+
await redis.delete(self.docket.known_task_key(key))
|
|
473
451
|
logger.warning(
|
|
474
452
|
"Task function %r not found", function_name, extra=log_context
|
|
475
453
|
)
|
|
476
454
|
return
|
|
477
455
|
|
|
478
456
|
execution = Execution.from_message(function, message)
|
|
457
|
+
dependencies = self._get_dependencies(execution)
|
|
479
458
|
|
|
480
459
|
log_context = {**log_context, **execution.specific_labels()}
|
|
481
460
|
counter_labels = {**self.labels(), **execution.general_labels()}
|
|
@@ -484,6 +463,9 @@ class Worker:
|
|
|
484
463
|
call = execution.call_repr()
|
|
485
464
|
|
|
486
465
|
if self.docket.strike_list.is_stricken(execution):
|
|
466
|
+
async with self.docket.redis() as redis:
|
|
467
|
+
await redis.delete(self.docket.known_task_key(key))
|
|
468
|
+
|
|
487
469
|
arrow = "🗙"
|
|
488
470
|
logger.warning("%s %s", arrow, call, extra=log_context)
|
|
489
471
|
TASKS_STRICKEN.add(1, counter_labels | {"docket.where": "worker"})
|
|
@@ -492,7 +474,12 @@ class Worker:
|
|
|
492
474
|
if execution.key in self._execution_counts:
|
|
493
475
|
self._execution_counts[execution.key] += 1
|
|
494
476
|
|
|
495
|
-
|
|
477
|
+
# Preemptively reschedule the perpetual task for the future, or clear the
|
|
478
|
+
# known task key for this task
|
|
479
|
+
rescheduled = await self._perpetuate_if_requested(execution, dependencies)
|
|
480
|
+
if not rescheduled:
|
|
481
|
+
async with self.docket.redis() as redis:
|
|
482
|
+
await redis.delete(self.docket.known_task_key(key))
|
|
496
483
|
|
|
497
484
|
context = propagate.extract(message, getter=message_getter)
|
|
498
485
|
initiating_context = trace.get_current_span(context).get_span_context()
|
|
@@ -598,7 +585,7 @@ class Worker:
|
|
|
598
585
|
self,
|
|
599
586
|
execution: Execution,
|
|
600
587
|
dependencies: dict[str, "Dependency"],
|
|
601
|
-
duration: timedelta,
|
|
588
|
+
duration: timedelta | None = None,
|
|
602
589
|
) -> bool:
|
|
603
590
|
from .dependencies import Perpetual, get_single_dependency_of_type
|
|
604
591
|
|
|
@@ -607,16 +594,20 @@ class Worker:
|
|
|
607
594
|
return False
|
|
608
595
|
|
|
609
596
|
if perpetual.cancelled:
|
|
597
|
+
await self.docket.cancel(execution.key)
|
|
610
598
|
return False
|
|
611
599
|
|
|
612
600
|
now = datetime.now(timezone.utc)
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
execution.
|
|
601
|
+
when = max(now, now + perpetual.every - (duration or timedelta(0)))
|
|
602
|
+
|
|
603
|
+
await self.docket.replace(execution.function, when, execution.key)(
|
|
604
|
+
*perpetual.args,
|
|
605
|
+
**perpetual.kwargs,
|
|
606
|
+
)
|
|
616
607
|
|
|
617
|
-
|
|
608
|
+
if duration is not None:
|
|
609
|
+
TASKS_PERPETUATED.add(1, {**self.labels(), **execution.specific_labels()})
|
|
618
610
|
|
|
619
|
-
TASKS_PERPETUATED.add(1, {**self.labels(), **execution.specific_labels()})
|
|
620
611
|
return True
|
|
621
612
|
|
|
622
613
|
@property
|
|
File without changes
|
|
@@ -374,9 +374,6 @@ async def test_perpetual_tasks_are_scheduled_close_to_target_time(
|
|
|
374
374
|
):
|
|
375
375
|
timestamps.append(datetime.now(timezone.utc))
|
|
376
376
|
|
|
377
|
-
if len(timestamps) % 2 == 0:
|
|
378
|
-
await asyncio.sleep(0.05)
|
|
379
|
-
|
|
380
377
|
await docket.add(perpetual_task, key="my-key")(a="a", b=2)
|
|
381
378
|
|
|
382
379
|
await worker.run_at_most({"my-key": 8})
|
|
@@ -384,11 +381,15 @@ async def test_perpetual_tasks_are_scheduled_close_to_target_time(
|
|
|
384
381
|
assert len(timestamps) == 8
|
|
385
382
|
|
|
386
383
|
intervals = [next - previous for previous, next in zip(timestamps, timestamps[1:])]
|
|
387
|
-
|
|
388
|
-
|
|
384
|
+
minimum = min(intervals)
|
|
385
|
+
maximum = max(intervals)
|
|
386
|
+
|
|
387
|
+
debug = ", ".join([f"{i.total_seconds() * 1000:.2f}ms" for i in intervals])
|
|
389
388
|
|
|
390
|
-
# even with a variable duration, Docket attempts to schedule them equally
|
|
391
|
-
|
|
389
|
+
# even with a variable duration, Docket attempts to schedule them equally and to
|
|
390
|
+
# abide by the target interval
|
|
391
|
+
assert minimum >= timedelta(milliseconds=50), debug
|
|
392
|
+
assert maximum <= timedelta(milliseconds=75), debug
|
|
392
393
|
|
|
393
394
|
|
|
394
395
|
async def test_worker_can_exit_from_perpetual_tasks_that_queue_further_tasks(
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|