pydocket 0.2.0__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydocket might be problematic. Click here for more details.

Files changed (52) hide show
  1. {pydocket-0.2.0 → pydocket-0.3.0}/.pre-commit-config.yaml +2 -2
  2. {pydocket-0.2.0 → pydocket-0.3.0}/PKG-INFO +1 -1
  3. {pydocket-0.2.0 → pydocket-0.3.0}/pyproject.toml +8 -1
  4. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/__init__.py +2 -0
  5. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/dependencies.py +28 -0
  6. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/instrumentation.py +6 -0
  7. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/worker.py +81 -5
  8. pydocket-0.3.0/tests/conftest.py +167 -0
  9. {pydocket-0.2.0 → pydocket-0.3.0}/tests/test_fundamentals.py +138 -0
  10. {pydocket-0.2.0 → pydocket-0.3.0}/tests/test_worker.py +56 -0
  11. pydocket-0.2.0/tests/conftest.py +0 -117
  12. {pydocket-0.2.0 → pydocket-0.3.0}/.cursor/rules/general.mdc +0 -0
  13. {pydocket-0.2.0 → pydocket-0.3.0}/.cursor/rules/python-style.mdc +0 -0
  14. {pydocket-0.2.0 → pydocket-0.3.0}/.github/codecov.yml +0 -0
  15. {pydocket-0.2.0 → pydocket-0.3.0}/.github/workflows/chaos.yml +0 -0
  16. {pydocket-0.2.0 → pydocket-0.3.0}/.github/workflows/ci.yml +0 -0
  17. {pydocket-0.2.0 → pydocket-0.3.0}/.github/workflows/publish.yml +0 -0
  18. {pydocket-0.2.0 → pydocket-0.3.0}/.gitignore +0 -0
  19. {pydocket-0.2.0 → pydocket-0.3.0}/LICENSE +0 -0
  20. {pydocket-0.2.0 → pydocket-0.3.0}/README.md +0 -0
  21. {pydocket-0.2.0 → pydocket-0.3.0}/chaos/README.md +0 -0
  22. {pydocket-0.2.0 → pydocket-0.3.0}/chaos/__init__.py +0 -0
  23. {pydocket-0.2.0 → pydocket-0.3.0}/chaos/driver.py +0 -0
  24. {pydocket-0.2.0 → pydocket-0.3.0}/chaos/producer.py +0 -0
  25. {pydocket-0.2.0 → pydocket-0.3.0}/chaos/run +0 -0
  26. {pydocket-0.2.0 → pydocket-0.3.0}/chaos/tasks.py +0 -0
  27. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/__main__.py +0 -0
  28. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/annotations.py +0 -0
  29. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/cli.py +0 -0
  30. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/docket.py +0 -0
  31. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/execution.py +0 -0
  32. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/py.typed +0 -0
  33. {pydocket-0.2.0 → pydocket-0.3.0}/src/docket/tasks.py +0 -0
  34. {pydocket-0.2.0 → pydocket-0.3.0}/telemetry/.gitignore +0 -0
  35. {pydocket-0.2.0 → pydocket-0.3.0}/telemetry/start +0 -0
  36. {pydocket-0.2.0 → pydocket-0.3.0}/telemetry/stop +0 -0
  37. {pydocket-0.2.0 → pydocket-0.3.0}/tests/__init__.py +0 -0
  38. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/__init__.py +0 -0
  39. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/conftest.py +0 -0
  40. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/test_module.py +0 -0
  41. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/test_parsing.py +0 -0
  42. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/test_snapshot.py +0 -0
  43. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/test_striking.py +0 -0
  44. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/test_tasks.py +0 -0
  45. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/test_version.py +0 -0
  46. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/test_worker.py +0 -0
  47. {pydocket-0.2.0 → pydocket-0.3.0}/tests/cli/test_workers.py +0 -0
  48. {pydocket-0.2.0 → pydocket-0.3.0}/tests/test_dependencies.py +0 -0
  49. {pydocket-0.2.0 → pydocket-0.3.0}/tests/test_docket.py +0 -0
  50. {pydocket-0.2.0 → pydocket-0.3.0}/tests/test_instrumentation.py +0 -0
  51. {pydocket-0.2.0 → pydocket-0.3.0}/tests/test_striking.py +0 -0
  52. {pydocket-0.2.0 → pydocket-0.3.0}/uv.lock +0 -0
@@ -24,13 +24,13 @@ repos:
24
24
  hooks:
25
25
  - id: pyright
26
26
  name: pyright (docket package)
27
- entry: uv run pyright --verifytypes docket --ignoreexternal
27
+ entry: pyright --verifytypes docket --ignoreexternal
28
28
  language: system
29
29
  types: [python]
30
30
  pass_filenames: false
31
31
  - id: pyright
32
32
  name: pyright (source and tests)
33
- entry: uv run pyright tests
33
+ entry: pyright tests
34
34
  language: system
35
35
  types: [python]
36
36
  pass_filenames: false
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydocket
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: A distributed background task system for Python functions
5
5
  Project-URL: Homepage, https://github.com/chrisguidry/docket
6
6
  Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
@@ -65,7 +65,14 @@ source = "vcs"
65
65
  packages = ["src/docket"]
66
66
 
67
67
  [tool.pytest.ini_options]
68
- addopts = "--cov=src/docket --cov=tests --cov-report=term-missing --cov-branch"
68
+ addopts = [
69
+ "--numprocesses=logical",
70
+ "--maxprocesses=4",
71
+ "--cov=src/docket",
72
+ "--cov=tests",
73
+ "--cov-report=term-missing",
74
+ "--cov-branch",
75
+ ]
69
76
  filterwarnings = ["error"]
70
77
 
71
78
  [tool.pyright]
@@ -14,6 +14,7 @@ from .dependencies import (
14
14
  CurrentExecution,
15
15
  CurrentWorker,
16
16
  ExponentialRetry,
17
+ Perpetual,
17
18
  Retry,
18
19
  TaskKey,
19
20
  TaskLogger,
@@ -34,5 +35,6 @@ __all__ = [
34
35
  "Retry",
35
36
  "ExponentialRetry",
36
37
  "Logged",
38
+ "Perpetual",
37
39
  "__version__",
38
40
  ]
@@ -126,6 +126,34 @@ class ExponentialRetry(Retry):
126
126
  return retry
127
127
 
128
128
 
129
+ class Perpetual(Dependency):
130
+ single = True
131
+
132
+ every: timedelta
133
+ args: tuple[Any, ...]
134
+ kwargs: dict[str, Any]
135
+ cancelled: bool
136
+
137
+ def __init__(self, every: timedelta = timedelta(0)) -> None:
138
+ self.every = every
139
+ self.cancelled = False
140
+
141
+ def __call__(
142
+ self, docket: Docket, worker: Worker, execution: Execution
143
+ ) -> "Perpetual":
144
+ perpetual = Perpetual(every=self.every)
145
+ perpetual.args = execution.args
146
+ perpetual.kwargs = execution.kwargs
147
+ return perpetual
148
+
149
+ def cancel(self) -> None:
150
+ self.cancelled = True
151
+
152
+ def perpetuate(self, *args: Any, **kwargs: Any) -> None:
153
+ self.args = args
154
+ self.kwargs = kwargs
155
+
156
+
129
157
  def get_dependency_parameters(
130
158
  function: Callable[..., Awaitable[Any]],
131
159
  ) -> dict[str, Dependency]:
@@ -70,6 +70,12 @@ TASKS_RETRIED = meter.create_counter(
70
70
  unit="1",
71
71
  )
72
72
 
73
+ TASKS_PERPETUATED = meter.create_counter(
74
+ "docket_tasks_perpetuated",
75
+ description="How many tasks that have been self-perpetuated",
76
+ unit="1",
77
+ )
78
+
73
79
  TASK_DURATION = meter.create_histogram(
74
80
  "docket_task_duration",
75
81
  description="How long tasks take to complete",
@@ -7,6 +7,7 @@ from types import TracebackType
7
7
  from typing import (
8
8
  TYPE_CHECKING,
9
9
  Any,
10
+ Callable,
10
11
  Mapping,
11
12
  Protocol,
12
13
  Self,
@@ -35,6 +36,7 @@ from .instrumentation import (
35
36
  TASK_PUNCTUALITY,
36
37
  TASKS_COMPLETED,
37
38
  TASKS_FAILED,
39
+ TASKS_PERPETUATED,
38
40
  TASKS_RETRIED,
39
41
  TASKS_RUNNING,
40
42
  TASKS_STARTED,
@@ -63,6 +65,11 @@ class _stream_due_tasks(Protocol):
63
65
  class Worker:
64
66
  docket: Docket
65
67
  name: str
68
+ concurrency: int
69
+ redelivery_timeout: timedelta
70
+ reconnection_delay: timedelta
71
+ minimum_check_interval: timedelta
72
+ _strike_conditions: list[Callable[[Execution], bool]] = []
66
73
 
67
74
  def __init__(
68
75
  self,
@@ -80,6 +87,10 @@ class Worker:
80
87
  self.reconnection_delay = reconnection_delay
81
88
  self.minimum_check_interval = minimum_check_interval
82
89
 
90
+ self._strike_conditions = [
91
+ docket.strike_list.is_stricken,
92
+ ]
93
+
83
94
  async def __aenter__(self) -> Self:
84
95
  self._heartbeat_task = asyncio.create_task(self._heartbeat())
85
96
 
@@ -151,6 +162,35 @@ class Worker:
151
162
  """Run the worker indefinitely."""
152
163
  return await self._run(forever=True) # pragma: no cover
153
164
 
165
+ async def run_at_most(self, iterations_by_key: Mapping[str, int]) -> None:
166
+ """
167
+ Run the worker until there are no more tasks to process, but limit specified
168
+ task keys to a maximum number of iterations.
169
+
170
+ This is particularly useful for testing self-perpetuating tasks that would
171
+ otherwise run indefinitely.
172
+
173
+ Args:
174
+ iterations_by_key: Maps task keys to their maximum allowed executions
175
+ """
176
+ execution_counts: dict[str, int] = {key: 0 for key in iterations_by_key}
177
+
178
+ def has_reached_max_iterations(execution: Execution) -> bool:
179
+ if execution.key not in iterations_by_key:
180
+ return False
181
+
182
+ if execution_counts[execution.key] >= iterations_by_key[execution.key]:
183
+ return True
184
+
185
+ execution_counts[execution.key] += 1
186
+ return False
187
+
188
+ self._strike_conditions.insert(0, has_reached_max_iterations)
189
+ try:
190
+ await self.run_until_finished()
191
+ finally:
192
+ self._strike_conditions.remove(has_reached_max_iterations)
193
+
154
194
  async def _run(self, forever: bool = False) -> None:
155
195
  logger.info("Starting worker %r with the following tasks:", self.name)
156
196
  for task_name, task in self.docket.tasks.items():
@@ -322,7 +362,7 @@ class Worker:
322
362
  await process_completed_tasks()
323
363
 
324
364
  async def _execute(self, message: RedisMessage) -> None:
325
- log_context: dict[str, str | float] = self._log_context()
365
+ log_context: Mapping[str, str | float] = self._log_context()
326
366
 
327
367
  function_name = message[b"function"].decode()
328
368
  function = self.docket.tasks.get(function_name)
@@ -334,13 +374,13 @@ class Worker:
334
374
 
335
375
  execution = Execution.from_message(function, message)
336
376
 
337
- log_context |= execution.specific_labels()
377
+ log_context = {**log_context, **execution.specific_labels()}
338
378
  counter_labels = {**self.labels(), **execution.general_labels()}
339
379
 
340
380
  arrow = "↬" if execution.attempt > 1 else "↪"
341
381
  call = execution.call_repr()
342
382
 
343
- if self.docket.strike_list.is_stricken(execution):
383
+ if any(condition(execution) for condition in self._strike_conditions):
344
384
  arrow = "🗙"
345
385
  logger.warning("%s %s", arrow, call, extra=log_context)
346
386
  TASKS_STRICKEN.add(1, counter_labels | {"docket.where": "worker"})
@@ -354,7 +394,7 @@ class Worker:
354
394
 
355
395
  start = datetime.now(timezone.utc)
356
396
  punctuality = start - execution.when
357
- log_context["punctuality"] = punctuality.total_seconds()
397
+ log_context = {**log_context, "punctuality": punctuality.total_seconds()}
358
398
  duration = timedelta(0)
359
399
 
360
400
  TASKS_STARTED.add(1, counter_labels)
@@ -385,12 +425,20 @@ class Worker:
385
425
  TASKS_SUCCEEDED.add(1, counter_labels)
386
426
  duration = datetime.now(timezone.utc) - start
387
427
  log_context["duration"] = duration.total_seconds()
388
- logger.info("%s [%s] %s", "↩", duration, call, extra=log_context)
428
+ rescheduled = await self._perpetuate_if_requested(
429
+ execution, dependencies, duration
430
+ )
431
+ arrow = "↫" if rescheduled else "↩"
432
+ logger.info("%s [%s] %s", arrow, duration, call, extra=log_context)
389
433
  except Exception:
390
434
  TASKS_FAILED.add(1, counter_labels)
391
435
  duration = datetime.now(timezone.utc) - start
392
436
  log_context["duration"] = duration.total_seconds()
393
437
  retried = await self._retry_if_requested(execution, dependencies)
438
+ if not retried:
439
+ retried = await self._perpetuate_if_requested(
440
+ execution, dependencies, duration
441
+ )
394
442
  arrow = "↫" if retried else "↩"
395
443
  logger.exception("%s [%s] %s", arrow, duration, call, extra=log_context)
396
444
  finally:
@@ -442,6 +490,34 @@ class Worker:
442
490
 
443
491
  return False
444
492
 
493
+ async def _perpetuate_if_requested(
494
+ self, execution: Execution, dependencies: dict[str, Any], duration: timedelta
495
+ ) -> bool:
496
+ from .dependencies import Perpetual
497
+
498
+ perpetuals = [
499
+ perpetual
500
+ for perpetual in dependencies.values()
501
+ if isinstance(perpetual, Perpetual)
502
+ ]
503
+ if not perpetuals:
504
+ return False
505
+
506
+ perpetual = perpetuals[0]
507
+
508
+ if perpetual.cancelled:
509
+ return False
510
+
511
+ now = datetime.now(timezone.utc)
512
+ execution.when = max(now, now + perpetual.every - duration)
513
+ execution.args = perpetual.args
514
+ execution.kwargs = perpetual.kwargs
515
+
516
+ await self.docket.schedule(execution)
517
+
518
+ TASKS_PERPETUATED.add(1, {**self.labels(), **execution.specific_labels()})
519
+ return True
520
+
445
521
  @property
446
522
  def workers_set(self) -> str:
447
523
  return self.docket.workers_set
@@ -0,0 +1,167 @@
1
+ import fcntl
2
+ import os
3
+ import socket
4
+ import time
5
+ from contextlib import contextmanager
6
+ from datetime import datetime, timedelta, timezone
7
+ from functools import partial
8
+ from typing import AsyncGenerator, Callable, Generator, Iterable, cast
9
+ from unittest.mock import AsyncMock
10
+ from uuid import uuid4
11
+
12
+ import pytest
13
+ import redis.exceptions
14
+ from docker import DockerClient
15
+ from docker.models.containers import Container
16
+ from redis import ConnectionPool, Redis
17
+
18
+ from docket import Docket, Worker
19
+
20
+ REDIS_VERSION = os.environ.get("REDIS_VERSION", "7.4")
21
+
22
+
23
+ @pytest.fixture
24
+ def now() -> Callable[[], datetime]:
25
+ return partial(datetime.now, timezone.utc)
26
+
27
+
28
+ @contextmanager
29
+ def _sync_redis(url: str) -> Generator[Redis, None, None]:
30
+ pool: ConnectionPool | None = None
31
+ redis = Redis.from_url(url) # type: ignore
32
+ try:
33
+ with redis:
34
+ pool = redis.connection_pool # type: ignore
35
+ yield redis
36
+ finally:
37
+ if pool: # pragma: no branch
38
+ pool.disconnect()
39
+
40
+
41
+ @contextmanager
42
+ def _adminitrative_redis(port: int) -> Generator[Redis, None, None]:
43
+ with _sync_redis(f"redis://localhost:{port}/15") as r:
44
+ yield r
45
+
46
+
47
+ def _wait_for_redis(port: int) -> None:
48
+ while True:
49
+ try:
50
+ with _adminitrative_redis(port) as r:
51
+ success = r.ping() # type: ignore
52
+ if success: # pragma: no branch
53
+ return
54
+ except redis.exceptions.ConnectionError: # pragma: no cover
55
+ time.sleep(0.1)
56
+
57
+
58
+ @pytest.fixture(scope="session")
59
+ def redis_server(testrun_uid: str, worker_id: str) -> Generator[Container, None, None]:
60
+ client = DockerClient.from_env()
61
+
62
+ container: Container | None = None
63
+ lock_file_name = f"/tmp/docket-unit-tests-{testrun_uid}-startup"
64
+
65
+ with open(lock_file_name, "w+") as lock_file:
66
+ fcntl.flock(lock_file, fcntl.LOCK_EX)
67
+
68
+ containers: Iterable[Container] = cast(
69
+ Iterable[Container],
70
+ client.containers.list( # type: ignore
71
+ all=True,
72
+ filters={"label": "source=docket-unit-tests"},
73
+ ),
74
+ )
75
+ for c in containers:
76
+ if c.labels.get("testrun_uid") == testrun_uid: # type: ignore
77
+ container = c
78
+ else:
79
+ c.remove(force=True) # pragma: no cover
80
+
81
+ if not container:
82
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
83
+ s.bind(("", 0))
84
+ redis_port = s.getsockname()[1]
85
+
86
+ container = client.containers.run(
87
+ f"redis:{REDIS_VERSION}",
88
+ detach=True,
89
+ ports={"6379/tcp": redis_port},
90
+ labels={
91
+ "source": "docket-unit-tests",
92
+ "testrun_uid": testrun_uid,
93
+ },
94
+ auto_remove=True,
95
+ )
96
+
97
+ _wait_for_redis(redis_port)
98
+ else:
99
+ port_bindings = container.attrs["HostConfig"]["PortBindings"]["6379/tcp"]
100
+ redis_port = int(port_bindings[0]["HostPort"])
101
+
102
+ with _adminitrative_redis(redis_port) as r:
103
+ r.sadd(f"docket-unit-tests:{testrun_uid}", worker_id)
104
+
105
+ try:
106
+ yield container
107
+ finally:
108
+ with _adminitrative_redis(redis_port) as r:
109
+ with r.pipeline() as pipe: # type: ignore
110
+ pipe.srem(f"docket-unit-tests:{testrun_uid}", worker_id)
111
+ pipe.scard(f"docket-unit-tests:{testrun_uid}")
112
+ count: int
113
+ _, count = pipe.execute() # type: ignore
114
+
115
+ if count == 0:
116
+ container.stop()
117
+ os.remove(lock_file_name)
118
+
119
+
120
+ @pytest.fixture
121
+ def redis_port(redis_server: Container) -> int:
122
+ port_bindings = redis_server.attrs["HostConfig"]["PortBindings"]["6379/tcp"]
123
+ return int(port_bindings[0]["HostPort"])
124
+
125
+
126
+ @pytest.fixture(scope="session")
127
+ def redis_db(worker_id: str) -> int:
128
+ if not worker_id or "gw" not in worker_id:
129
+ return 0 # pragma: no cover
130
+ else:
131
+ return 0 + int(worker_id.replace("gw", "")) # pragma: no cover
132
+
133
+
134
+ @pytest.fixture
135
+ def redis_url(redis_port: int, redis_db: int) -> str:
136
+ url = f"redis://localhost:{redis_port}/{redis_db}"
137
+ with _sync_redis(url) as r:
138
+ r.flushdb() # type: ignore
139
+ return url
140
+
141
+
142
+ @pytest.fixture
143
+ async def docket(redis_url: str, aiolib: str) -> AsyncGenerator[Docket, None]:
144
+ async with Docket(name=f"test-docket-{uuid4()}", url=redis_url) as docket:
145
+ yield docket
146
+
147
+
148
+ @pytest.fixture
149
+ async def worker(docket: Docket) -> AsyncGenerator[Worker, None]:
150
+ async with Worker(
151
+ docket, minimum_check_interval=timedelta(milliseconds=10)
152
+ ) as worker:
153
+ yield worker
154
+
155
+
156
+ @pytest.fixture
157
+ def the_task() -> AsyncMock:
158
+ task = AsyncMock()
159
+ task.__name__ = "the_task"
160
+ return task
161
+
162
+
163
+ @pytest.fixture
164
+ def another_task() -> AsyncMock:
165
+ task = AsyncMock()
166
+ task.__name__ = "another_task"
167
+ return task
@@ -22,6 +22,7 @@ from docket import (
22
22
  Execution,
23
23
  ExponentialRetry,
24
24
  Logged,
25
+ Perpetual,
25
26
  Retry,
26
27
  TaskKey,
27
28
  TaskLogger,
@@ -609,6 +610,42 @@ async def test_self_perpetuating_scheduled_tasks(
609
610
  assert calls["second"] == [21, 22, 23]
610
611
 
611
612
 
613
+ async def test_infinitely_self_perpetuating_tasks(
614
+ docket: Docket, worker: Worker, now: Callable[[], datetime]
615
+ ):
616
+ """docket should support testing use cases for infinitely self-perpetuating tasks"""
617
+
618
+ calls: dict[str, list[int]] = {
619
+ "first": [],
620
+ "second": [],
621
+ "unaffected": [],
622
+ }
623
+
624
+ async def the_task(start: int, iteration: int, key: str = TaskKey()):
625
+ calls[key].append(start + iteration)
626
+ soon = now() + timedelta(milliseconds=100)
627
+ await docket.add(the_task, key=key, when=soon)(start, iteration + 1)
628
+
629
+ async def unaffected_task(start: int, iteration: int, key: str = TaskKey()):
630
+ calls[key].append(start + iteration)
631
+ if iteration < 3:
632
+ await docket.add(unaffected_task, key=key)(start, iteration + 1)
633
+
634
+ await docket.add(the_task, key="first")(10, 1)
635
+ await docket.add(the_task, key="second")(20, 1)
636
+ await docket.add(unaffected_task, key="unaffected")(30, 1)
637
+
638
+ # Using worker.run_until_finished() would hang here because the task is always
639
+ # queueing up a future run of itself. With worker.run_at_most(),
640
+ # we can specify tasks keys that will only be allowed to run a limited number of
641
+ # times, thus allowing the worker to exist cleanly.
642
+ await worker.run_at_most({"first": 4, "second": 2})
643
+
644
+ assert calls["first"] == [11, 12, 13, 14]
645
+ assert calls["second"] == [21, 22]
646
+ assert calls["unaffected"] == [31, 32, 33]
647
+
648
+
612
649
  async def test_striking_entire_tasks(
613
650
  docket: Docket, worker: Worker, the_task: AsyncMock, another_task: AsyncMock
614
651
  ):
@@ -818,3 +855,104 @@ async def test_adding_task_with_unbindable_arguments(
818
855
  await worker.run_until_finished()
819
856
 
820
857
  assert "got an unexpected keyword argument 'd'" in caplog.text
858
+
859
+
860
+ async def test_perpetual_tasks(docket: Docket, worker: Worker):
861
+ """Perpetual tasks should reschedule themselves forever"""
862
+
863
+ calls = 0
864
+
865
+ async def perpetual_task(
866
+ a: str,
867
+ b: int,
868
+ perpetual: Perpetual = Perpetual(every=timedelta(milliseconds=50)),
869
+ ):
870
+ assert a == "a"
871
+ assert b == 2
872
+
873
+ assert isinstance(perpetual, Perpetual)
874
+
875
+ assert perpetual.every == timedelta(milliseconds=50)
876
+
877
+ nonlocal calls
878
+ calls += 1
879
+
880
+ execution = await docket.add(perpetual_task)(a="a", b=2)
881
+
882
+ await worker.run_at_most({execution.key: 3})
883
+
884
+ assert calls == 3
885
+
886
+
887
+ async def test_perpetual_tasks_can_cancel_themselves(docket: Docket, worker: Worker):
888
+ """A perpetual task can request its own cancellation"""
889
+ calls = 0
890
+
891
+ async def perpetual_task(
892
+ a: str,
893
+ b: int,
894
+ perpetual: Perpetual = Perpetual(every=timedelta(milliseconds=50)),
895
+ ):
896
+ assert a == "a"
897
+ assert b == 2
898
+
899
+ assert isinstance(perpetual, Perpetual)
900
+
901
+ assert perpetual.every == timedelta(milliseconds=50)
902
+
903
+ nonlocal calls
904
+ calls += 1
905
+
906
+ if calls == 3:
907
+ perpetual.cancel()
908
+
909
+ await docket.add(perpetual_task)(a="a", b=2)
910
+
911
+ await worker.run_until_finished()
912
+
913
+ assert calls == 3
914
+
915
+
916
+ async def test_perpetual_tasks_can_change_their_parameters(
917
+ docket: Docket, worker: Worker
918
+ ):
919
+ """Perpetual tasks may change their parameters each time"""
920
+ arguments: list[tuple[str, int]] = []
921
+
922
+ async def perpetual_task(
923
+ a: str,
924
+ b: int,
925
+ perpetual: Perpetual = Perpetual(every=timedelta(milliseconds=50)),
926
+ ):
927
+ arguments.append((a, b))
928
+ perpetual.perpetuate(a + "a", b=b + 1)
929
+
930
+ execution = await docket.add(perpetual_task)(a="a", b=1)
931
+
932
+ await worker.run_at_most({execution.key: 3})
933
+
934
+ assert len(arguments) == 3
935
+ assert arguments == [("a", 1), ("aa", 2), ("aaa", 3)]
936
+
937
+
938
+ async def test_perpetual_tasks_perpetuate_even_after_errors(
939
+ docket: Docket, worker: Worker
940
+ ):
941
+ """Perpetual tasks may change their parameters each time"""
942
+ calls = 0
943
+
944
+ async def perpetual_task(
945
+ a: str,
946
+ b: int,
947
+ perpetual: Perpetual = Perpetual(every=timedelta(milliseconds=50)),
948
+ ):
949
+ nonlocal calls
950
+ calls += 1
951
+
952
+ raise ValueError("woops!")
953
+
954
+ execution = await docket.add(perpetual_task)(a="a", b=1)
955
+
956
+ await worker.run_at_most({execution.key: 3})
957
+
958
+ assert calls == 3
@@ -11,6 +11,7 @@ import redis.exceptions
11
11
  from redis.asyncio import Redis
12
12
 
13
13
  from docket import CurrentWorker, Docket, Worker
14
+ from docket.dependencies import CurrentDocket, Perpetual
14
15
  from docket.docket import RedisMessage
15
16
  from docket.tasks import standard_tasks
16
17
 
@@ -358,3 +359,58 @@ async def test_worker_recovers_from_redis_errors(
358
359
  assert worker_info.last_seen > error_time, (
359
360
  "Worker should have sent heartbeats after the Redis error"
360
361
  )
362
+
363
+
364
+ async def test_perpetual_tasks_are_scheduled_close_to_target_time(
365
+ docket: Docket, worker: Worker
366
+ ):
367
+ """A perpetual task is scheduled as close to the target period as possible"""
368
+ timestamps: list[datetime] = []
369
+
370
+ async def perpetual_task(
371
+ a: str,
372
+ b: int,
373
+ perpetual: Perpetual = Perpetual(every=timedelta(milliseconds=50)),
374
+ ):
375
+ timestamps.append(datetime.now(timezone.utc))
376
+
377
+ if len(timestamps) % 2 == 0:
378
+ await asyncio.sleep(0.05)
379
+
380
+ await docket.add(perpetual_task, key="my-key")(a="a", b=2)
381
+
382
+ await worker.run_at_most({"my-key": 8})
383
+
384
+ assert len(timestamps) == 8
385
+
386
+ intervals = [next - previous for previous, next in zip(timestamps, timestamps[1:])]
387
+ total = timedelta(seconds=sum(i.total_seconds() for i in intervals))
388
+ average = total / len(intervals)
389
+
390
+ # even with a variable duration, Docket attempts to schedule them equally
391
+ assert timedelta(milliseconds=45) <= average <= timedelta(milliseconds=70)
392
+
393
+
394
+ async def test_worker_can_exit_from_perpetual_tasks_that_queue_further_tasks(
395
+ docket: Docket, worker: Worker
396
+ ):
397
+ """A worker can exit if it's processing a perpetual task that queues more tasks"""
398
+
399
+ inner_calls = 0
400
+
401
+ async def inner_task():
402
+ nonlocal inner_calls
403
+ inner_calls += 1
404
+
405
+ async def perpetual_task(
406
+ docket: Docket = CurrentDocket(),
407
+ perpetual: Perpetual = Perpetual(every=timedelta(milliseconds=50)),
408
+ ):
409
+ await docket.add(inner_task)()
410
+ await docket.add(inner_task)()
411
+
412
+ execution = await docket.add(perpetual_task)()
413
+
414
+ await worker.run_at_most({execution.key: 3})
415
+
416
+ assert inner_calls == 6
@@ -1,117 +0,0 @@
1
- import os
2
- import socket
3
- import time
4
- from contextlib import contextmanager
5
- from datetime import datetime, timezone
6
- from functools import partial
7
- from typing import AsyncGenerator, Callable, Generator, Iterable, cast
8
- from unittest.mock import AsyncMock
9
- from uuid import uuid4
10
-
11
- import pytest
12
- import redis.exceptions
13
- from docker import DockerClient
14
- from docker.models.containers import Container
15
- from redis import ConnectionPool, Redis
16
-
17
- from docket import Docket, Worker
18
-
19
- REDIS_VERSION = os.environ.get("REDIS_VERSION", "7.4")
20
-
21
-
22
- @pytest.fixture
23
- def now() -> Callable[[], datetime]:
24
- return partial(datetime.now, timezone.utc)
25
-
26
-
27
- @pytest.fixture(scope="session")
28
- def redis_port() -> int:
29
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
30
- s.bind(("", 0))
31
- return s.getsockname()[1]
32
-
33
-
34
- @contextmanager
35
- def _sync_redis(url: str) -> Generator[Redis, None, None]:
36
- pool: ConnectionPool | None = None
37
- redis = Redis.from_url(url) # type: ignore
38
- try:
39
- with redis:
40
- pool = redis.connection_pool # type: ignore
41
- yield redis
42
- finally:
43
- if pool: # pragma: no branch
44
- pool.disconnect()
45
-
46
-
47
- @pytest.fixture(scope="session")
48
- def redis_server(redis_port: int) -> Generator[Container, None, None]:
49
- client = DockerClient.from_env()
50
-
51
- container: Container
52
-
53
- # Find and remove any containers from previous test runs
54
- containers: Iterable[Container] = cast(
55
- Iterable[Container],
56
- client.containers.list(all=True, filters={"label": "source=docket-unit-tests"}), # type: ignore
57
- )
58
- for container in containers: # pragma: no cover
59
- container.remove(force=True)
60
-
61
- container = client.containers.run(
62
- f"redis:{REDIS_VERSION}",
63
- detach=True,
64
- ports={"6379/tcp": redis_port},
65
- labels={"source": "docket-unit-tests"},
66
- auto_remove=True,
67
- )
68
-
69
- url = f"redis://localhost:{redis_port}/0"
70
-
71
- while True:
72
- try:
73
- with _sync_redis(url) as r:
74
- success = r.ping() # type: ignore
75
- if success: # pragma: no branch
76
- break
77
- except redis.exceptions.ConnectionError: # pragma: no cover
78
- time.sleep(0.1)
79
-
80
- try:
81
- yield container
82
- finally:
83
- container.stop()
84
-
85
-
86
- @pytest.fixture
87
- def redis_url(redis_server: Container, redis_port: int) -> str:
88
- url = f"redis://localhost:{redis_port}/0"
89
- with _sync_redis(url) as r:
90
- r.flushdb() # type: ignore
91
- return url
92
-
93
-
94
- @pytest.fixture
95
- async def docket(redis_url: str, aiolib: str) -> AsyncGenerator[Docket, None]:
96
- async with Docket(name=f"test-docket-{uuid4()}", url=redis_url) as docket:
97
- yield docket
98
-
99
-
100
- @pytest.fixture
101
- async def worker(docket: Docket) -> AsyncGenerator[Worker, None]:
102
- async with Worker(docket) as worker:
103
- yield worker
104
-
105
-
106
- @pytest.fixture
107
- def the_task() -> AsyncMock:
108
- task = AsyncMock()
109
- task.__name__ = "the_task"
110
- return task
111
-
112
-
113
- @pytest.fixture
114
- def another_task() -> AsyncMock:
115
- task = AsyncMock()
116
- task.__name__ = "another_task"
117
- return task
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes