pydocket 0.5.0__tar.gz → 0.5.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydocket might be problematic. Click here for more details.

Files changed (54) hide show
  1. {pydocket-0.5.0 → pydocket-0.5.2}/PKG-INFO +1 -1
  2. {pydocket-0.5.0 → pydocket-0.5.2}/chaos/driver.py +0 -6
  3. pydocket-0.5.2/examples/common.py +69 -0
  4. pydocket-0.5.2/examples/find_and_flood.py +39 -0
  5. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/dependencies.py +42 -2
  6. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/docket.py +68 -41
  7. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/worker.py +67 -37
  8. pydocket-0.5.2/tests/cli/__init__.py +0 -0
  9. {pydocket-0.5.0 → pydocket-0.5.2}/tests/test_fundamentals.py +28 -0
  10. {pydocket-0.5.0 → pydocket-0.5.2}/tests/test_worker.py +8 -7
  11. {pydocket-0.5.0 → pydocket-0.5.2}/.cursor/rules/general.mdc +0 -0
  12. {pydocket-0.5.0 → pydocket-0.5.2}/.cursor/rules/python-style.mdc +0 -0
  13. {pydocket-0.5.0 → pydocket-0.5.2}/.github/codecov.yml +0 -0
  14. {pydocket-0.5.0 → pydocket-0.5.2}/.github/workflows/chaos.yml +0 -0
  15. {pydocket-0.5.0 → pydocket-0.5.2}/.github/workflows/ci.yml +0 -0
  16. {pydocket-0.5.0 → pydocket-0.5.2}/.github/workflows/publish.yml +0 -0
  17. {pydocket-0.5.0 → pydocket-0.5.2}/.gitignore +0 -0
  18. {pydocket-0.5.0 → pydocket-0.5.2}/.pre-commit-config.yaml +0 -0
  19. {pydocket-0.5.0 → pydocket-0.5.2}/LICENSE +0 -0
  20. {pydocket-0.5.0 → pydocket-0.5.2}/README.md +0 -0
  21. {pydocket-0.5.0 → pydocket-0.5.2}/chaos/README.md +0 -0
  22. {pydocket-0.5.0 → pydocket-0.5.2}/chaos/__init__.py +0 -0
  23. {pydocket-0.5.0 → pydocket-0.5.2}/chaos/producer.py +0 -0
  24. {pydocket-0.5.0 → pydocket-0.5.2}/chaos/run +0 -0
  25. {pydocket-0.5.0 → pydocket-0.5.2}/chaos/tasks.py +0 -0
  26. {pydocket-0.5.0/tests → pydocket-0.5.2/examples}/__init__.py +0 -0
  27. {pydocket-0.5.0 → pydocket-0.5.2}/pyproject.toml +0 -0
  28. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/__init__.py +0 -0
  29. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/__main__.py +0 -0
  30. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/annotations.py +0 -0
  31. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/cli.py +0 -0
  32. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/execution.py +0 -0
  33. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/instrumentation.py +0 -0
  34. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/py.typed +0 -0
  35. {pydocket-0.5.0 → pydocket-0.5.2}/src/docket/tasks.py +0 -0
  36. {pydocket-0.5.0 → pydocket-0.5.2}/telemetry/.gitignore +0 -0
  37. {pydocket-0.5.0 → pydocket-0.5.2}/telemetry/start +0 -0
  38. {pydocket-0.5.0 → pydocket-0.5.2}/telemetry/stop +0 -0
  39. {pydocket-0.5.0/tests/cli → pydocket-0.5.2/tests}/__init__.py +0 -0
  40. {pydocket-0.5.0 → pydocket-0.5.2}/tests/cli/conftest.py +0 -0
  41. {pydocket-0.5.0 → pydocket-0.5.2}/tests/cli/test_module.py +0 -0
  42. {pydocket-0.5.0 → pydocket-0.5.2}/tests/cli/test_parsing.py +0 -0
  43. {pydocket-0.5.0 → pydocket-0.5.2}/tests/cli/test_snapshot.py +0 -0
  44. {pydocket-0.5.0 → pydocket-0.5.2}/tests/cli/test_striking.py +0 -0
  45. {pydocket-0.5.0 → pydocket-0.5.2}/tests/cli/test_tasks.py +0 -0
  46. {pydocket-0.5.0 → pydocket-0.5.2}/tests/cli/test_version.py +0 -0
  47. {pydocket-0.5.0 → pydocket-0.5.2}/tests/cli/test_worker.py +0 -0
  48. {pydocket-0.5.0 → pydocket-0.5.2}/tests/cli/test_workers.py +0 -0
  49. {pydocket-0.5.0 → pydocket-0.5.2}/tests/conftest.py +0 -0
  50. {pydocket-0.5.0 → pydocket-0.5.2}/tests/test_dependencies.py +0 -0
  51. {pydocket-0.5.0 → pydocket-0.5.2}/tests/test_docket.py +0 -0
  52. {pydocket-0.5.0 → pydocket-0.5.2}/tests/test_instrumentation.py +0 -0
  53. {pydocket-0.5.0 → pydocket-0.5.2}/tests/test_striking.py +0 -0
  54. {pydocket-0.5.0 → pydocket-0.5.2}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydocket
3
- Version: 0.5.0
3
+ Version: 0.5.2
4
4
  Summary: A distributed background task system for Python functions
5
5
  Project-URL: Homepage, https://github.com/chrisguidry/docket
6
6
  Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
@@ -23,12 +23,6 @@ from .tasks import toxic
23
23
 
24
24
  logging.getLogger().setLevel(logging.INFO)
25
25
 
26
- # Quiets down the testcontainers logger
27
- testcontainers_logger = logging.getLogger("testcontainers.core.container")
28
- testcontainers_logger.setLevel(logging.ERROR)
29
- testcontainers_logger = logging.getLogger("testcontainers.core.waiting_utils")
30
- testcontainers_logger.setLevel(logging.ERROR)
31
-
32
26
  console = logging.StreamHandler(stream=sys.stdout)
33
27
  console.setFormatter(
34
28
  logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
@@ -0,0 +1,69 @@
1
+ import asyncio
2
+ import os
3
+ import socket
4
+ from contextlib import asynccontextmanager
5
+ from typing import AsyncGenerator
6
+
7
+ from docker import DockerClient
8
+
9
+
10
+ @asynccontextmanager
11
+ async def run_redis(version: str) -> AsyncGenerator[str, None]:
12
+ def get_free_port() -> int:
13
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
14
+ s.bind(("", 0))
15
+ return s.getsockname()[1]
16
+
17
+ port = get_free_port()
18
+
19
+ client = DockerClient.from_env()
20
+ container = client.containers.run(
21
+ f"redis:{version}",
22
+ detach=True,
23
+ ports={"6379/tcp": port},
24
+ auto_remove=True,
25
+ )
26
+
27
+ # Wait for Redis to be ready
28
+ for line in container.logs(stream=True):
29
+ if b"Ready to accept connections" in line:
30
+ break
31
+
32
+ url = f"redis://localhost:{port}/0"
33
+ print("***** Redis is running on %s *****", url)
34
+ try:
35
+ yield url
36
+ finally:
37
+ container.stop()
38
+
39
+
40
+ async def run_example_workers(workers: int, concurrency: int, tasks: str):
41
+ async with run_redis("7.4.2") as redis_url:
42
+ processes = [
43
+ await asyncio.create_subprocess_exec(
44
+ "docket",
45
+ "worker",
46
+ "--name",
47
+ f"worker-{i}",
48
+ "--url",
49
+ redis_url,
50
+ "--tasks",
51
+ tasks,
52
+ "--concurrency",
53
+ str(concurrency),
54
+ env={
55
+ **os.environ,
56
+ "PYTHONPATH": os.path.abspath(
57
+ os.path.join(os.path.dirname(__file__), "..")
58
+ ),
59
+ },
60
+ )
61
+ for i in range(workers)
62
+ ]
63
+ try:
64
+ await asyncio.gather(*[p.wait() for p in processes])
65
+ except asyncio.CancelledError:
66
+ for p in processes:
67
+ p.kill()
68
+ finally:
69
+ await asyncio.gather(*[p.wait() for p in processes])
@@ -0,0 +1,39 @@
1
+ import asyncio
2
+ from datetime import timedelta
3
+ from logging import Logger, LoggerAdapter
4
+ from typing import Annotated
5
+
6
+ from docket import Docket
7
+ from docket.annotations import Logged
8
+ from docket.dependencies import CurrentDocket, Perpetual, TaskLogger
9
+
10
+ from .common import run_example_workers
11
+
12
+
13
+ async def find(
14
+ docket: Docket = CurrentDocket(),
15
+ logger: LoggerAdapter[Logger] = TaskLogger(),
16
+ perpetual: Perpetual = Perpetual(every=timedelta(seconds=3), automatic=True),
17
+ ) -> None:
18
+ for i in range(1, 10 + 1):
19
+ await docket.add(flood, key=str(i))(i)
20
+
21
+
22
+ async def flood(
23
+ item: Annotated[int, Logged],
24
+ logger: LoggerAdapter[Logger] = TaskLogger(),
25
+ ) -> None:
26
+ logger.info("Working on %s", item)
27
+
28
+
29
+ tasks = [find, flood]
30
+
31
+
32
+ if __name__ == "__main__":
33
+ asyncio.run(
34
+ run_example_workers(
35
+ workers=3,
36
+ concurrency=8,
37
+ tasks="examples.find_and_flood:tasks",
38
+ )
39
+ )
@@ -2,7 +2,7 @@ import abc
2
2
  import inspect
3
3
  import logging
4
4
  from datetime import timedelta
5
- from typing import Any, Awaitable, Callable, Counter, cast
5
+ from typing import Any, Awaitable, Callable, Counter, TypeVar, cast
6
6
 
7
7
  from .docket import Docket
8
8
  from .execution import Execution
@@ -130,12 +130,29 @@ class Perpetual(Dependency):
130
130
  single = True
131
131
 
132
132
  every: timedelta
133
+ automatic: bool
134
+
133
135
  args: tuple[Any, ...]
134
136
  kwargs: dict[str, Any]
137
+
135
138
  cancelled: bool
136
139
 
137
- def __init__(self, every: timedelta = timedelta(0)) -> None:
140
+ def __init__(
141
+ self,
142
+ every: timedelta = timedelta(0),
143
+ automatic: bool = False,
144
+ ) -> None:
145
+ """Declare a task that should be run perpetually.
146
+
147
+ Args:
148
+ every: The target interval between task executions.
149
+ automatic: If set, this task will be automatically scheduled during worker
150
+ startup and continually through the worker's lifespan. This ensures
151
+ that the task will always be scheduled despite crashes and other
152
+ adverse conditions. Automatic tasks must not require any arguments.
153
+ """
138
154
  self.every = every
155
+ self.automatic = automatic
139
156
  self.cancelled = False
140
157
 
141
158
  def __call__(
@@ -170,6 +187,29 @@ def get_dependency_parameters(
170
187
  return dependencies
171
188
 
172
189
 
190
+ D = TypeVar("D", bound=Dependency)
191
+
192
+
193
+ def get_single_dependency_parameter_of_type(
194
+ function: Callable[..., Awaitable[Any]], dependency_type: type[D]
195
+ ) -> D | None:
196
+ assert dependency_type.single, "Dependency must be single"
197
+ for _, dependency in get_dependency_parameters(function).items():
198
+ if isinstance(dependency, dependency_type):
199
+ return dependency
200
+ return None
201
+
202
+
203
+ def get_single_dependency_of_type(
204
+ dependencies: dict[str, Dependency], dependency_type: type[D]
205
+ ) -> D | None:
206
+ assert dependency_type.single, "Dependency must be single"
207
+ for _, dependency in dependencies.items():
208
+ if isinstance(dependency, dependency_type):
209
+ return dependency
210
+ return None
211
+
212
+
173
213
  def validate_dependencies(function: Callable[..., Awaitable[Any]]) -> None:
174
214
  parameters = get_dependency_parameters(function)
175
215
 
@@ -28,6 +28,7 @@ from uuid import uuid4
28
28
  import redis.exceptions
29
29
  from opentelemetry import propagate, trace
30
30
  from redis.asyncio import ConnectionPool, Redis
31
+ from redis.asyncio.client import Pipeline
31
32
 
32
33
  from .execution import (
33
34
  Execution,
@@ -256,9 +257,14 @@ class Docket:
256
257
 
257
258
  async def scheduler(*args: P.args, **kwargs: P.kwargs) -> Execution:
258
259
  execution = Execution(function, args, kwargs, when, key, attempt=1)
259
- await self.schedule(execution)
260
+
261
+ async with self.redis() as redis:
262
+ async with redis.pipeline() as pipeline:
263
+ await self._schedule(redis, pipeline, execution, replace=False)
264
+ await pipeline.execute()
260
265
 
261
266
  TASKS_ADDED.add(1, {**self.labels(), **execution.general_labels()})
267
+ TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
262
268
 
263
269
  return execution
264
270
 
@@ -291,15 +297,48 @@ class Docket:
291
297
 
292
298
  async def scheduler(*args: P.args, **kwargs: P.kwargs) -> Execution:
293
299
  execution = Execution(function, args, kwargs, when, key, attempt=1)
294
- await self.cancel(key)
295
- await self.schedule(execution)
300
+
301
+ async with self.redis() as redis:
302
+ async with redis.pipeline() as pipeline:
303
+ await self._schedule(redis, pipeline, execution, replace=True)
304
+ await pipeline.execute()
296
305
 
297
306
  TASKS_REPLACED.add(1, {**self.labels(), **execution.general_labels()})
307
+ TASKS_CANCELLED.add(1, {**self.labels(), **execution.general_labels()})
308
+ TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
298
309
 
299
310
  return execution
300
311
 
301
312
  return scheduler
302
313
 
314
+ async def schedule(self, execution: Execution) -> None:
315
+ with tracer.start_as_current_span(
316
+ "docket.schedule",
317
+ attributes={
318
+ **self.labels(),
319
+ **execution.specific_labels(),
320
+ "code.function.name": execution.function.__name__,
321
+ },
322
+ ):
323
+ async with self.redis() as redis:
324
+ async with redis.pipeline() as pipeline:
325
+ await self._schedule(redis, pipeline, execution, replace=False)
326
+ await pipeline.execute()
327
+
328
+ TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
329
+
330
+ async def cancel(self, key: str) -> None:
331
+ with tracer.start_as_current_span(
332
+ "docket.cancel",
333
+ attributes={**self.labels(), "docket.key": key},
334
+ ):
335
+ async with self.redis() as redis:
336
+ async with redis.pipeline() as pipeline:
337
+ await self._cancel(pipeline, key)
338
+ await pipeline.execute()
339
+
340
+ TASKS_CANCELLED.add(1, self.labels())
341
+
303
342
  @property
304
343
  def queue_key(self) -> str:
305
344
  return f"{self.name}:queue"
@@ -314,7 +353,13 @@ class Docket:
314
353
  def parked_task_key(self, key: str) -> str:
315
354
  return f"{self.name}:{key}"
316
355
 
317
- async def schedule(self, execution: Execution) -> None:
356
+ async def _schedule(
357
+ self,
358
+ redis: Redis,
359
+ pipeline: Pipeline,
360
+ execution: Execution,
361
+ replace: bool = False,
362
+ ) -> None:
318
363
  if self.strike_list.is_stricken(execution):
319
364
  logger.warning(
320
365
  "%r is stricken, skipping schedule of %r",
@@ -334,53 +379,35 @@ class Docket:
334
379
  message: dict[bytes, bytes] = execution.as_message()
335
380
  propagate.inject(message, setter=message_setter)
336
381
 
337
- with tracer.start_as_current_span(
338
- "docket.schedule",
339
- attributes={
340
- **self.labels(),
341
- **execution.specific_labels(),
342
- "code.function.name": execution.function.__name__,
343
- },
344
- ):
345
- key = execution.key
346
- when = execution.when
382
+ key = execution.key
383
+ when = execution.when
384
+ known_task_key = self.known_task_key(key)
347
385
 
348
- async with self.redis() as redis:
386
+ async with redis.lock(f"{known_task_key}:lock", timeout=10):
387
+ if replace:
388
+ await self._cancel(pipeline, key)
389
+ else:
349
390
  # if the task is already in the queue or stream, retain it
350
- if await redis.exists(self.known_task_key(key)):
391
+ if await redis.exists(known_task_key):
351
392
  logger.debug(
352
- "Task %r is already in the queue or stream, skipping schedule",
393
+ "Task %r is already in the queue or stream, not scheduling",
353
394
  key,
354
395
  extra=self.labels(),
355
396
  )
356
397
  return
357
398
 
358
- async with redis.pipeline() as pipe:
359
- pipe.set(self.known_task_key(key), when.timestamp())
360
-
361
- if when <= datetime.now(timezone.utc):
362
- pipe.xadd(self.stream_key, message) # type: ignore[arg-type]
363
- else:
364
- pipe.hset(self.parked_task_key(key), mapping=message) # type: ignore[arg-type]
365
- pipe.zadd(self.queue_key, {key: when.timestamp()})
399
+ pipeline.set(known_task_key, when.timestamp())
366
400
 
367
- await pipe.execute()
368
-
369
- TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
370
-
371
- async def cancel(self, key: str) -> None:
372
- with tracer.start_as_current_span(
373
- "docket.cancel",
374
- attributes={**self.labels(), "docket.key": key},
375
- ):
376
- async with self.redis() as redis:
377
- async with redis.pipeline() as pipe:
378
- pipe.delete(self.known_task_key(key))
379
- pipe.delete(self.parked_task_key(key))
380
- pipe.zrem(self.queue_key, key)
381
- await pipe.execute()
401
+ if when <= datetime.now(timezone.utc):
402
+ pipeline.xadd(self.stream_key, message) # type: ignore[arg-type]
403
+ else:
404
+ pipeline.hset(self.parked_task_key(key), mapping=message) # type: ignore[arg-type]
405
+ pipeline.zadd(self.queue_key, {key: when.timestamp()})
382
406
 
383
- TASKS_CANCELLED.add(1, self.labels())
407
+ async def _cancel(self, pipeline: Pipeline, key: str) -> None:
408
+ pipeline.delete(self.known_task_key(key))
409
+ pipeline.delete(self.parked_task_key(key))
410
+ pipeline.zrem(self.queue_key, key)
384
411
 
385
412
  @property
386
413
  def strike_key(self) -> str:
@@ -6,11 +6,9 @@ from datetime import datetime, timedelta, timezone
6
6
  from types import TracebackType
7
7
  from typing import (
8
8
  TYPE_CHECKING,
9
- Any,
10
9
  Mapping,
11
10
  Protocol,
12
11
  Self,
13
- TypeVar,
14
12
  cast,
15
13
  )
16
14
  from uuid import uuid4
@@ -19,6 +17,7 @@ import redis.exceptions
19
17
  from opentelemetry import propagate, trace
20
18
  from opentelemetry.trace import Tracer
21
19
  from redis.asyncio import Redis
20
+ from redis.exceptions import LockError
22
21
 
23
22
  from .docket import (
24
23
  Docket,
@@ -53,8 +52,6 @@ tracer: Tracer = trace.get_tracer(__name__)
53
52
  if TYPE_CHECKING: # pragma: no cover
54
53
  from .dependencies import Dependency
55
54
 
56
- D = TypeVar("D", bound="Dependency")
57
-
58
55
 
59
56
  class _stream_due_tasks(Protocol):
60
57
  async def __call__(
@@ -216,13 +213,14 @@ class Worker:
216
213
  await asyncio.sleep(self.reconnection_delay.total_seconds())
217
214
 
218
215
  async def _worker_loop(self, forever: bool = False):
219
- should_stop = asyncio.Event()
216
+ worker_stopping = asyncio.Event()
217
+
218
+ await self._schedule_all_automatic_perpetual_tasks()
220
219
 
221
220
  async with self.docket.redis() as redis:
222
221
  scheduler_task = asyncio.create_task(
223
- self._scheduler_loop(redis, should_stop)
222
+ self._scheduler_loop(redis, worker_stopping)
224
223
  )
225
-
226
224
  active_tasks: dict[asyncio.Task[None], RedisMessageID] = {}
227
225
 
228
226
  async def check_for_work() -> bool:
@@ -329,13 +327,13 @@ class Worker:
329
327
  await asyncio.gather(*active_tasks, return_exceptions=True)
330
328
  await process_completed_tasks()
331
329
 
332
- should_stop.set()
330
+ worker_stopping.set()
333
331
  await scheduler_task
334
332
 
335
333
  async def _scheduler_loop(
336
334
  self,
337
335
  redis: Redis,
338
- should_stop: asyncio.Event,
336
+ worker_stopping: asyncio.Event,
339
337
  ) -> None:
340
338
  """Loop that moves due tasks from the queue to the stream."""
341
339
 
@@ -389,7 +387,7 @@ class Worker:
389
387
 
390
388
  total_work: int = sys.maxsize
391
389
 
392
- while not should_stop.is_set() or total_work:
390
+ while not worker_stopping.is_set() or total_work:
393
391
  try:
394
392
  total_work, due_work = await stream_due_tasks(
395
393
  keys=[self.docket.queue_key, self.docket.stream_key],
@@ -416,22 +414,47 @@ class Worker:
416
414
 
417
415
  logger.debug("Scheduler loop finished", extra=self._log_context())
418
416
 
417
+ async def _schedule_all_automatic_perpetual_tasks(self) -> None:
418
+ from .dependencies import Perpetual, get_single_dependency_parameter_of_type
419
+
420
+ async with self.docket.redis() as redis:
421
+ try:
422
+ async with redis.lock(
423
+ f"{self.docket.name}:perpetual:lock", timeout=10, blocking=False
424
+ ):
425
+ for task_function in self.docket.tasks.values():
426
+ perpetual = get_single_dependency_parameter_of_type(
427
+ task_function, Perpetual
428
+ )
429
+ if perpetual is None:
430
+ continue
431
+
432
+ if not perpetual.automatic:
433
+ continue
434
+
435
+ key = task_function.__name__
436
+
437
+ await self.docket.add(task_function, key=key)()
438
+ except LockError: # pragma: no cover
439
+ return
440
+
419
441
  async def _execute(self, message: RedisMessage) -> None:
420
442
  key = message[b"key"].decode()
421
- async with self.docket.redis() as redis:
422
- await redis.delete(self.docket.known_task_key(key))
423
443
 
424
444
  log_context: Mapping[str, str | float] = self._log_context()
425
445
 
426
446
  function_name = message[b"function"].decode()
427
447
  function = self.docket.tasks.get(function_name)
428
448
  if function is None:
449
+ async with self.docket.redis() as redis:
450
+ await redis.delete(self.docket.known_task_key(key))
429
451
  logger.warning(
430
452
  "Task function %r not found", function_name, extra=log_context
431
453
  )
432
454
  return
433
455
 
434
456
  execution = Execution.from_message(function, message)
457
+ dependencies = self._get_dependencies(execution)
435
458
 
436
459
  log_context = {**log_context, **execution.specific_labels()}
437
460
  counter_labels = {**self.labels(), **execution.general_labels()}
@@ -440,6 +463,9 @@ class Worker:
440
463
  call = execution.call_repr()
441
464
 
442
465
  if self.docket.strike_list.is_stricken(execution):
466
+ async with self.docket.redis() as redis:
467
+ await redis.delete(self.docket.known_task_key(key))
468
+
443
469
  arrow = "🗙"
444
470
  logger.warning("%s %s", arrow, call, extra=log_context)
445
471
  TASKS_STRICKEN.add(1, counter_labels | {"docket.where": "worker"})
@@ -448,7 +474,12 @@ class Worker:
448
474
  if execution.key in self._execution_counts:
449
475
  self._execution_counts[execution.key] += 1
450
476
 
451
- dependencies = self._get_dependencies(execution)
477
+ # Preemptively reschedule the perpetual task for the future, or clear the
478
+ # known task key for this task
479
+ rescheduled = await self._perpetuate_if_requested(execution, dependencies)
480
+ if not rescheduled:
481
+ async with self.docket.redis() as redis:
482
+ await redis.delete(self.docket.known_task_key(key))
452
483
 
453
484
  context = propagate.extract(message, getter=message_getter)
454
485
  initiating_context = trace.get_current_span(context).get_span_context()
@@ -511,12 +542,12 @@ class Worker:
511
542
  def _get_dependencies(
512
543
  self,
513
544
  execution: Execution,
514
- ) -> dict[str, Any]:
545
+ ) -> dict[str, "Dependency"]:
515
546
  from .dependencies import get_dependency_parameters
516
547
 
517
548
  parameters = get_dependency_parameters(execution.function)
518
549
 
519
- dependencies: dict[str, Any] = {}
550
+ dependencies: dict[str, "Dependency"] = {}
520
551
 
521
552
  for parameter_name, dependency in parameters.items():
522
553
  # If the argument is already provided, skip it, which allows users to call
@@ -532,16 +563,14 @@ class Worker:
532
563
  async def _retry_if_requested(
533
564
  self,
534
565
  execution: Execution,
535
- dependencies: dict[str, Any],
566
+ dependencies: dict[str, "Dependency"],
536
567
  ) -> bool:
537
- from .dependencies import Retry
568
+ from .dependencies import Retry, get_single_dependency_of_type
538
569
 
539
- retries = [retry for retry in dependencies.values() if isinstance(retry, Retry)]
540
- if not retries:
570
+ retry = get_single_dependency_of_type(dependencies, Retry)
571
+ if not retry:
541
572
  return False
542
573
 
543
- retry = retries[0]
544
-
545
574
  if retry.attempts is None or execution.attempt < retry.attempts:
546
575
  execution.when = datetime.now(timezone.utc) + retry.delay
547
576
  execution.attempt += 1
@@ -553,31 +582,32 @@ class Worker:
553
582
  return False
554
583
 
555
584
  async def _perpetuate_if_requested(
556
- self, execution: Execution, dependencies: dict[str, Any], duration: timedelta
585
+ self,
586
+ execution: Execution,
587
+ dependencies: dict[str, "Dependency"],
588
+ duration: timedelta | None = None,
557
589
  ) -> bool:
558
- from .dependencies import Perpetual
559
-
560
- perpetuals = [
561
- perpetual
562
- for perpetual in dependencies.values()
563
- if isinstance(perpetual, Perpetual)
564
- ]
565
- if not perpetuals:
566
- return False
590
+ from .dependencies import Perpetual, get_single_dependency_of_type
567
591
 
568
- perpetual = perpetuals[0]
592
+ perpetual = get_single_dependency_of_type(dependencies, Perpetual)
593
+ if not perpetual:
594
+ return False
569
595
 
570
596
  if perpetual.cancelled:
597
+ await self.docket.cancel(execution.key)
571
598
  return False
572
599
 
573
600
  now = datetime.now(timezone.utc)
574
- execution.when = max(now, now + perpetual.every - duration)
575
- execution.args = perpetual.args
576
- execution.kwargs = perpetual.kwargs
601
+ when = max(now, now + perpetual.every - (duration or timedelta(0)))
602
+
603
+ await self.docket.replace(execution.function, when, execution.key)(
604
+ *perpetual.args,
605
+ **perpetual.kwargs,
606
+ )
577
607
 
578
- await self.docket.schedule(execution)
608
+ if duration is not None:
609
+ TASKS_PERPETUATED.add(1, {**self.labels(), **execution.specific_labels()})
579
610
 
580
- TASKS_PERPETUATED.add(1, {**self.labels(), **execution.specific_labels()})
581
611
  return True
582
612
 
583
613
  @property
File without changes
@@ -1037,3 +1037,31 @@ async def test_perpetual_tasks_perpetuate_even_after_errors(
1037
1037
  await worker.run_at_most({execution.key: 3})
1038
1038
 
1039
1039
  assert calls == 3
1040
+
1041
+
1042
+ async def test_perpetual_tasks_can_be_automatically_scheduled(
1043
+ docket: Docket, worker: Worker
1044
+ ):
1045
+ """Perpetual tasks can be automatically scheduled"""
1046
+
1047
+ calls = 0
1048
+
1049
+ async def my_automatic_task(
1050
+ perpetual: Perpetual = Perpetual(
1051
+ every=timedelta(milliseconds=50), automatic=True
1052
+ ),
1053
+ ):
1054
+ assert isinstance(perpetual, Perpetual)
1055
+
1056
+ assert perpetual.every == timedelta(milliseconds=50)
1057
+
1058
+ nonlocal calls
1059
+ calls += 1
1060
+
1061
+ # Note we never add this task to the docket, we just register it.
1062
+ docket.register(my_automatic_task)
1063
+
1064
+ # The automatic key will be the task function's name
1065
+ await worker.run_at_most({"my_automatic_task": 3})
1066
+
1067
+ assert calls == 3
@@ -374,9 +374,6 @@ async def test_perpetual_tasks_are_scheduled_close_to_target_time(
374
374
  ):
375
375
  timestamps.append(datetime.now(timezone.utc))
376
376
 
377
- if len(timestamps) % 2 == 0:
378
- await asyncio.sleep(0.05)
379
-
380
377
  await docket.add(perpetual_task, key="my-key")(a="a", b=2)
381
378
 
382
379
  await worker.run_at_most({"my-key": 8})
@@ -384,11 +381,15 @@ async def test_perpetual_tasks_are_scheduled_close_to_target_time(
384
381
  assert len(timestamps) == 8
385
382
 
386
383
  intervals = [next - previous for previous, next in zip(timestamps, timestamps[1:])]
387
- total = timedelta(seconds=sum(i.total_seconds() for i in intervals))
388
- average = total / len(intervals)
384
+ minimum = min(intervals)
385
+ maximum = max(intervals)
386
+
387
+ debug = ", ".join([f"{i.total_seconds() * 1000:.2f}ms" for i in intervals])
389
388
 
390
- # even with a variable duration, Docket attempts to schedule them equally
391
- assert timedelta(milliseconds=45) <= average <= timedelta(milliseconds=75)
389
+ # even with a variable duration, Docket attempts to schedule them equally and to
390
+ # abide by the target interval
391
+ assert minimum >= timedelta(milliseconds=50), debug
392
+ assert maximum <= timedelta(milliseconds=75), debug
392
393
 
393
394
 
394
395
  async def test_worker_can_exit_from_perpetual_tasks_that_queue_further_tasks(
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes