pydocket 0.3.2__tar.gz → 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydocket might be problematic. Click here for more details.

Files changed (51) hide show
  1. {pydocket-0.3.2 → pydocket-0.5.0}/PKG-INFO +1 -1
  2. {pydocket-0.3.2 → pydocket-0.5.0}/chaos/driver.py +7 -11
  3. {pydocket-0.3.2 → pydocket-0.5.0}/chaos/producer.py +10 -1
  4. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/cli.py +25 -0
  5. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/docket.py +23 -11
  6. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/worker.py +124 -69
  7. {pydocket-0.3.2 → pydocket-0.5.0}/tests/conftest.py +3 -1
  8. {pydocket-0.3.2 → pydocket-0.5.0}/tests/test_fundamentals.py +92 -11
  9. {pydocket-0.3.2 → pydocket-0.5.0}/tests/test_worker.py +1 -1
  10. {pydocket-0.3.2 → pydocket-0.5.0}/.cursor/rules/general.mdc +0 -0
  11. {pydocket-0.3.2 → pydocket-0.5.0}/.cursor/rules/python-style.mdc +0 -0
  12. {pydocket-0.3.2 → pydocket-0.5.0}/.github/codecov.yml +0 -0
  13. {pydocket-0.3.2 → pydocket-0.5.0}/.github/workflows/chaos.yml +0 -0
  14. {pydocket-0.3.2 → pydocket-0.5.0}/.github/workflows/ci.yml +0 -0
  15. {pydocket-0.3.2 → pydocket-0.5.0}/.github/workflows/publish.yml +0 -0
  16. {pydocket-0.3.2 → pydocket-0.5.0}/.gitignore +0 -0
  17. {pydocket-0.3.2 → pydocket-0.5.0}/.pre-commit-config.yaml +0 -0
  18. {pydocket-0.3.2 → pydocket-0.5.0}/LICENSE +0 -0
  19. {pydocket-0.3.2 → pydocket-0.5.0}/README.md +0 -0
  20. {pydocket-0.3.2 → pydocket-0.5.0}/chaos/README.md +0 -0
  21. {pydocket-0.3.2 → pydocket-0.5.0}/chaos/__init__.py +0 -0
  22. {pydocket-0.3.2 → pydocket-0.5.0}/chaos/run +0 -0
  23. {pydocket-0.3.2 → pydocket-0.5.0}/chaos/tasks.py +0 -0
  24. {pydocket-0.3.2 → pydocket-0.5.0}/pyproject.toml +0 -0
  25. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/__init__.py +0 -0
  26. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/__main__.py +0 -0
  27. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/annotations.py +0 -0
  28. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/dependencies.py +0 -0
  29. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/execution.py +0 -0
  30. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/instrumentation.py +0 -0
  31. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/py.typed +0 -0
  32. {pydocket-0.3.2 → pydocket-0.5.0}/src/docket/tasks.py +0 -0
  33. {pydocket-0.3.2 → pydocket-0.5.0}/telemetry/.gitignore +0 -0
  34. {pydocket-0.3.2 → pydocket-0.5.0}/telemetry/start +0 -0
  35. {pydocket-0.3.2 → pydocket-0.5.0}/telemetry/stop +0 -0
  36. {pydocket-0.3.2 → pydocket-0.5.0}/tests/__init__.py +0 -0
  37. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/__init__.py +0 -0
  38. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/conftest.py +0 -0
  39. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/test_module.py +0 -0
  40. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/test_parsing.py +0 -0
  41. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/test_snapshot.py +0 -0
  42. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/test_striking.py +0 -0
  43. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/test_tasks.py +0 -0
  44. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/test_version.py +0 -0
  45. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/test_worker.py +0 -0
  46. {pydocket-0.3.2 → pydocket-0.5.0}/tests/cli/test_workers.py +0 -0
  47. {pydocket-0.3.2 → pydocket-0.5.0}/tests/test_dependencies.py +0 -0
  48. {pydocket-0.3.2 → pydocket-0.5.0}/tests/test_docket.py +0 -0
  49. {pydocket-0.3.2 → pydocket-0.5.0}/tests/test_instrumentation.py +0 -0
  50. {pydocket-0.3.2 → pydocket-0.5.0}/tests/test_striking.py +0 -0
  51. {pydocket-0.3.2 → pydocket-0.5.0}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydocket
3
- Version: 0.3.2
3
+ Version: 0.5.0
4
4
  Summary: A distributed background task system for Python functions
5
5
  Project-URL: Homepage, https://github.com/chrisguidry/docket
6
6
  Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
@@ -76,9 +76,9 @@ async def run_redis(version: str) -> AsyncGenerator[tuple[str, Container], None]
76
76
 
77
77
  async def main(
78
78
  mode: Literal["performance", "chaos"] = "chaos",
79
- tasks: int = 5000,
80
- producers: int = 4,
81
- workers: int = 7,
79
+ tasks: int = 20000,
80
+ producers: int = 5,
81
+ workers: int = 10,
82
82
  ):
83
83
  async with (
84
84
  run_redis("7.4.2") as (redis_url, redis_container),
@@ -97,9 +97,7 @@ async def main(
97
97
  # Add in some random strikes to performance test
98
98
  for _ in range(100):
99
99
  parameter = f"param_{random.randint(1, 100)}"
100
- operator: Operator = random.choice(
101
- ["==", "!=", ">", ">=", "<", "<=", "between"]
102
- )
100
+ operator = random.choice(list(Operator))
103
101
  value = f"val_{random.randint(1, 1000)}"
104
102
  await docket.strike("rando", parameter, operator, value)
105
103
 
@@ -141,11 +139,9 @@ async def main(
141
139
  redis_url,
142
140
  "--tasks",
143
141
  "chaos.tasks:chaos_tasks",
144
- env=environment
145
- | {
146
- "OTEL_SERVICE_NAME": "chaos-worker",
147
- "DOCKET_WORKER_REDELIVERY_TIMEOUT": "5s",
148
- },
142
+ "--redelivery-timeout",
143
+ "5s",
144
+ env=environment | {"OTEL_SERVICE_NAME": "chaos-worker"},
149
145
  stdout=subprocess.DEVNULL,
150
146
  stderr=subprocess.DEVNULL,
151
147
  )
@@ -1,8 +1,11 @@
1
1
  import asyncio
2
+ import datetime
2
3
  import logging
3
4
  import os
5
+ import random
4
6
  import sys
5
7
  import time
8
+ from datetime import timedelta
6
9
 
7
10
  import redis.exceptions
8
11
 
@@ -14,6 +17,10 @@ logging.getLogger().setLevel(logging.INFO)
14
17
  logger = logging.getLogger("chaos.producer")
15
18
 
16
19
 
20
+ def now() -> datetime.datetime:
21
+ return datetime.datetime.now(datetime.timezone.utc)
22
+
23
+
17
24
  async def main(tasks_to_produce: int):
18
25
  docket = Docket(
19
26
  name=os.environ["DOCKET_NAME"],
@@ -25,7 +32,9 @@ async def main(tasks_to_produce: int):
25
32
  async with docket:
26
33
  async with docket.redis() as r:
27
34
  for _ in range(tasks_sent, tasks_to_produce):
28
- execution = await docket.add(hello)()
35
+ jitter = 5 * ((random.random() * 2) - 1)
36
+ when = now() + timedelta(seconds=jitter)
37
+ execution = await docket.add(hello, when=when)()
29
38
  await r.zadd("hello:sent", {execution.key: time.time()})
30
39
  logger.info("Added task %s", execution.key)
31
40
  tasks_sent += 1
@@ -162,6 +162,7 @@ def worker(
162
162
  "This can be specified multiple times. A task collection is any "
163
163
  "iterable of async functions."
164
164
  ),
165
+ envvar="DOCKET_TASKS",
165
166
  ),
166
167
  ] = ["docket.tasks:standard_tasks"],
167
168
  docket_: Annotated[
@@ -236,6 +237,14 @@ def worker(
236
237
  envvar="DOCKET_WORKER_MINIMUM_CHECK_INTERVAL",
237
238
  ),
238
239
  ] = timedelta(milliseconds=100),
240
+ scheduling_resolution: Annotated[
241
+ timedelta,
242
+ typer.Option(
243
+ parser=duration,
244
+ help="How frequently to check for future tasks to be scheduled",
245
+ envvar="DOCKET_WORKER_SCHEDULING_RESOLUTION",
246
+ ),
247
+ ] = timedelta(milliseconds=250),
239
248
  until_finished: Annotated[
240
249
  bool,
241
250
  typer.Option(
@@ -260,6 +269,7 @@ def worker(
260
269
  redelivery_timeout=redelivery_timeout,
261
270
  reconnection_delay=reconnection_delay,
262
271
  minimum_check_interval=minimum_check_interval,
272
+ scheduling_resolution=scheduling_resolution,
263
273
  until_finished=until_finished,
264
274
  metrics_port=metrics_port,
265
275
  tasks=tasks,
@@ -542,6 +552,18 @@ def relative_time(now: datetime, when: datetime) -> str:
542
552
 
543
553
  @app.command(help="Shows a snapshot of what's on the docket right now")
544
554
  def snapshot(
555
+ tasks: Annotated[
556
+ list[str],
557
+ typer.Option(
558
+ "--tasks",
559
+ help=(
560
+ "The dotted path of a task collection to register with the docket. "
561
+ "This can be specified multiple times. A task collection is any "
562
+ "iterable of async functions."
563
+ ),
564
+ envvar="DOCKET_TASKS",
565
+ ),
566
+ ] = ["docket.tasks:standard_tasks"],
545
567
  docket_: Annotated[
546
568
  str,
547
569
  typer.Option(
@@ -560,6 +582,9 @@ def snapshot(
560
582
  ) -> None:
561
583
  async def run() -> DocketSnapshot:
562
584
  async with Docket(name=docket_, url=url) as docket:
585
+ for task_path in tasks:
586
+ docket.register_collection(task_path)
587
+
563
588
  return await docket.snapshot()
564
589
 
565
590
  snapshot = asyncio.run(run())
@@ -132,6 +132,9 @@ class Docket:
132
132
  - "redis://user:password@localhost:6379/0?ssl=true"
133
133
  - "rediss://localhost:6379/0"
134
134
  - "unix:///path/to/redis.sock"
135
+ heartbeat_interval: How often workers send heartbeat messages to the docket.
136
+ missed_heartbeats: How many heartbeats a worker can miss before it is
137
+ considered dead.
135
138
  """
136
139
  self.name = name
137
140
  self.url = url
@@ -305,6 +308,9 @@ class Docket:
305
308
  def stream_key(self) -> str:
306
309
  return f"{self.name}:stream"
307
310
 
311
+ def known_task_key(self, key: str) -> str:
312
+ return f"{self.name}:known:{key}"
313
+
308
314
  def parked_task_key(self, key: str) -> str:
309
315
  return f"{self.name}:{key}"
310
316
 
@@ -340,30 +346,36 @@ class Docket:
340
346
  when = execution.when
341
347
 
342
348
  async with self.redis() as redis:
343
- # if the task is already in the queue, retain it
344
- if await redis.zscore(self.queue_key, key) is not None:
349
+ # if the task is already in the queue or stream, retain it
350
+ if await redis.exists(self.known_task_key(key)):
351
+ logger.debug(
352
+ "Task %r is already in the queue or stream, skipping schedule",
353
+ key,
354
+ extra=self.labels(),
355
+ )
345
356
  return
346
357
 
347
- if when <= datetime.now(timezone.utc):
348
- await redis.xadd(self.stream_key, message) # type: ignore[arg-type]
349
- else:
350
- async with redis.pipeline() as pipe:
358
+ async with redis.pipeline() as pipe:
359
+ pipe.set(self.known_task_key(key), when.timestamp())
360
+
361
+ if when <= datetime.now(timezone.utc):
362
+ pipe.xadd(self.stream_key, message) # type: ignore[arg-type]
363
+ else:
351
364
  pipe.hset(self.parked_task_key(key), mapping=message) # type: ignore[arg-type]
352
365
  pipe.zadd(self.queue_key, {key: when.timestamp()})
353
- await pipe.execute()
366
+
367
+ await pipe.execute()
354
368
 
355
369
  TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
356
370
 
357
371
  async def cancel(self, key: str) -> None:
358
372
  with tracer.start_as_current_span(
359
373
  "docket.cancel",
360
- attributes={
361
- **self.labels(),
362
- "docket.key": key,
363
- },
374
+ attributes={**self.labels(), "docket.key": key},
364
375
  ):
365
376
  async with self.redis() as redis:
366
377
  async with redis.pipeline() as pipe:
378
+ pipe.delete(self.known_task_key(key))
367
379
  pipe.delete(self.parked_task_key(key))
368
380
  pipe.zrem(self.queue_key, key)
369
381
  await pipe.execute()
@@ -18,6 +18,7 @@ from uuid import uuid4
18
18
  import redis.exceptions
19
19
  from opentelemetry import propagate, trace
20
20
  from opentelemetry.trace import Tracer
21
+ from redis.asyncio import Redis
21
22
 
22
23
  from .docket import (
23
24
  Docket,
@@ -68,6 +69,7 @@ class Worker:
68
69
  redelivery_timeout: timedelta
69
70
  reconnection_delay: timedelta
70
71
  minimum_check_interval: timedelta
72
+ scheduling_resolution: timedelta
71
73
 
72
74
  def __init__(
73
75
  self,
@@ -77,6 +79,7 @@ class Worker:
77
79
  redelivery_timeout: timedelta = timedelta(minutes=5),
78
80
  reconnection_delay: timedelta = timedelta(seconds=5),
79
81
  minimum_check_interval: timedelta = timedelta(milliseconds=100),
82
+ scheduling_resolution: timedelta = timedelta(milliseconds=250),
80
83
  ) -> None:
81
84
  self.docket = docket
82
85
  self.name = name or f"worker:{uuid4()}"
@@ -84,6 +87,7 @@ class Worker:
84
87
  self.redelivery_timeout = redelivery_timeout
85
88
  self.reconnection_delay = reconnection_delay
86
89
  self.minimum_check_interval = minimum_check_interval
90
+ self.scheduling_resolution = scheduling_resolution
87
91
 
88
92
  async def __aenter__(self) -> Self:
89
93
  self._heartbeat_task = asyncio.create_task(self._heartbeat())
@@ -128,6 +132,7 @@ class Worker:
128
132
  redelivery_timeout: timedelta = timedelta(minutes=5),
129
133
  reconnection_delay: timedelta = timedelta(seconds=5),
130
134
  minimum_check_interval: timedelta = timedelta(milliseconds=100),
135
+ scheduling_resolution: timedelta = timedelta(milliseconds=250),
131
136
  until_finished: bool = False,
132
137
  metrics_port: int | None = None,
133
138
  tasks: list[str] = ["docket.tasks:standard_tasks"],
@@ -144,6 +149,7 @@ class Worker:
144
149
  redelivery_timeout=redelivery_timeout,
145
150
  reconnection_delay=reconnection_delay,
146
151
  minimum_check_interval=minimum_check_interval,
152
+ scheduling_resolution=scheduling_resolution,
147
153
  ) as worker:
148
154
  if until_finished:
149
155
  await worker.run_until_finished()
@@ -210,57 +216,24 @@ class Worker:
210
216
  await asyncio.sleep(self.reconnection_delay.total_seconds())
211
217
 
212
218
  async def _worker_loop(self, forever: bool = False):
213
- async with self.docket.redis() as redis:
214
- stream_due_tasks: _stream_due_tasks = cast(
215
- _stream_due_tasks,
216
- redis.register_script(
217
- # Lua script to atomically move scheduled tasks to the stream
218
- # KEYS[1]: queue key (sorted set)
219
- # KEYS[2]: stream key
220
- # ARGV[1]: current timestamp
221
- # ARGV[2]: docket name prefix
222
- """
223
- local total_work = redis.call('ZCARD', KEYS[1])
224
- local due_work = 0
225
-
226
- if total_work > 0 then
227
- local tasks = redis.call('ZRANGEBYSCORE', KEYS[1], 0, ARGV[1])
228
-
229
- for i, key in ipairs(tasks) do
230
- local hash_key = ARGV[2] .. ":" .. key
231
- local task_data = redis.call('HGETALL', hash_key)
232
-
233
- if #task_data > 0 then
234
- local task = {}
235
- for j = 1, #task_data, 2 do
236
- task[task_data[j]] = task_data[j+1]
237
- end
238
-
239
- redis.call('XADD', KEYS[2], '*',
240
- 'key', task['key'],
241
- 'when', task['when'],
242
- 'function', task['function'],
243
- 'args', task['args'],
244
- 'kwargs', task['kwargs'],
245
- 'attempt', task['attempt']
246
- )
247
- redis.call('DEL', hash_key)
248
- due_work = due_work + 1
249
- end
250
- end
251
- end
252
-
253
- if due_work > 0 then
254
- redis.call('ZREMRANGEBYSCORE', KEYS[1], 0, ARGV[1])
255
- end
219
+ should_stop = asyncio.Event()
256
220
 
257
- return {total_work, due_work}
258
- """
259
- ),
221
+ async with self.docket.redis() as redis:
222
+ scheduler_task = asyncio.create_task(
223
+ self._scheduler_loop(redis, should_stop)
260
224
  )
261
225
 
262
226
  active_tasks: dict[asyncio.Task[None], RedisMessageID] = {}
263
227
 
228
+ async def check_for_work() -> bool:
229
+ async with redis.pipeline() as pipeline:
230
+ pipeline.xlen(self.docket.stream_key)
231
+ pipeline.zcard(self.docket.queue_key)
232
+ results: list[int] = await pipeline.execute()
233
+ stream_len = results[0]
234
+ queue_len = results[1]
235
+ return stream_len > 0 or queue_len > 0
236
+
264
237
  async def process_completed_tasks() -> None:
265
238
  completed_tasks = {task for task in active_tasks if task.done()}
266
239
  for task in completed_tasks:
@@ -280,10 +253,13 @@ class Worker:
280
253
  )
281
254
  await pipeline.execute()
282
255
 
283
- future_work, due_work = sys.maxsize, 0
256
+ has_work: bool = True
257
+
258
+ if not forever: # pragma: no branch
259
+ has_work = await check_for_work()
284
260
 
285
261
  try:
286
- while forever or future_work or active_tasks:
262
+ while forever or has_work or active_tasks:
287
263
  await process_completed_tasks()
288
264
 
289
265
  available_slots = self.concurrency - len(active_tasks)
@@ -297,28 +273,13 @@ class Worker:
297
273
  task = asyncio.create_task(self._execute(message))
298
274
  active_tasks[task] = message_id
299
275
 
300
- nonlocal available_slots, future_work
276
+ nonlocal available_slots
301
277
  available_slots -= 1
302
- future_work += 1
303
278
 
304
279
  if available_slots <= 0:
305
280
  await asyncio.sleep(self.minimum_check_interval.total_seconds())
306
281
  continue
307
282
 
308
- future_work, due_work = await stream_due_tasks(
309
- keys=[self.docket.queue_key, self.docket.stream_key],
310
- args=[datetime.now(timezone.utc).timestamp(), self.docket.name],
311
- )
312
- if due_work > 0:
313
- logger.debug(
314
- "Moved %d/%d due tasks from %s to %s",
315
- due_work,
316
- future_work,
317
- self.docket.queue_key,
318
- self.docket.stream_key,
319
- extra=self._log_context(),
320
- )
321
-
322
283
  redeliveries: RedisMessages
323
284
  _, redeliveries, *_ = await redis.xautoclaim(
324
285
  name=self.docket.stream_key,
@@ -348,10 +309,14 @@ class Worker:
348
309
  ),
349
310
  count=available_slots,
350
311
  )
312
+
351
313
  for _, messages in new_deliveries:
352
314
  for message_id, message in messages:
353
315
  start_task(message_id, message)
354
316
 
317
+ if not forever and not active_tasks and not new_deliveries:
318
+ has_work = await check_for_work()
319
+
355
320
  except asyncio.CancelledError:
356
321
  if active_tasks: # pragma: no cover
357
322
  logger.info(
@@ -364,7 +329,98 @@ class Worker:
364
329
  await asyncio.gather(*active_tasks, return_exceptions=True)
365
330
  await process_completed_tasks()
366
331
 
332
+ should_stop.set()
333
+ await scheduler_task
334
+
335
+ async def _scheduler_loop(
336
+ self,
337
+ redis: Redis,
338
+ should_stop: asyncio.Event,
339
+ ) -> None:
340
+ """Loop that moves due tasks from the queue to the stream."""
341
+
342
+ stream_due_tasks: _stream_due_tasks = cast(
343
+ _stream_due_tasks,
344
+ redis.register_script(
345
+ # Lua script to atomically move scheduled tasks to the stream
346
+ # KEYS[1]: queue key (sorted set)
347
+ # KEYS[2]: stream key
348
+ # ARGV[1]: current timestamp
349
+ # ARGV[2]: docket name prefix
350
+ """
351
+ local total_work = redis.call('ZCARD', KEYS[1])
352
+ local due_work = 0
353
+
354
+ if total_work > 0 then
355
+ local tasks = redis.call('ZRANGEBYSCORE', KEYS[1], 0, ARGV[1])
356
+
357
+ for i, key in ipairs(tasks) do
358
+ local hash_key = ARGV[2] .. ":" .. key
359
+ local task_data = redis.call('HGETALL', hash_key)
360
+
361
+ if #task_data > 0 then
362
+ local task = {}
363
+ for j = 1, #task_data, 2 do
364
+ task[task_data[j]] = task_data[j+1]
365
+ end
366
+
367
+ redis.call('XADD', KEYS[2], '*',
368
+ 'key', task['key'],
369
+ 'when', task['when'],
370
+ 'function', task['function'],
371
+ 'args', task['args'],
372
+ 'kwargs', task['kwargs'],
373
+ 'attempt', task['attempt']
374
+ )
375
+ redis.call('DEL', hash_key)
376
+ due_work = due_work + 1
377
+ end
378
+ end
379
+ end
380
+
381
+ if due_work > 0 then
382
+ redis.call('ZREMRANGEBYSCORE', KEYS[1], 0, ARGV[1])
383
+ end
384
+
385
+ return {total_work, due_work}
386
+ """
387
+ ),
388
+ )
389
+
390
+ total_work: int = sys.maxsize
391
+
392
+ while not should_stop.is_set() or total_work:
393
+ try:
394
+ total_work, due_work = await stream_due_tasks(
395
+ keys=[self.docket.queue_key, self.docket.stream_key],
396
+ args=[datetime.now(timezone.utc).timestamp(), self.docket.name],
397
+ )
398
+
399
+ if due_work > 0:
400
+ logger.debug(
401
+ "Moved %d/%d due tasks from %s to %s",
402
+ due_work,
403
+ total_work,
404
+ self.docket.queue_key,
405
+ self.docket.stream_key,
406
+ extra=self._log_context(),
407
+ )
408
+ except Exception: # pragma: no cover
409
+ logger.exception(
410
+ "Error in scheduler loop",
411
+ exc_info=True,
412
+ extra=self._log_context(),
413
+ )
414
+ finally:
415
+ await asyncio.sleep(self.scheduling_resolution.total_seconds())
416
+
417
+ logger.debug("Scheduler loop finished", extra=self._log_context())
418
+
367
419
  async def _execute(self, message: RedisMessage) -> None:
420
+ key = message[b"key"].decode()
421
+ async with self.docket.redis() as redis:
422
+ await redis.delete(self.docket.known_task_key(key))
423
+
368
424
  log_context: Mapping[str, str | float] = self._log_context()
369
425
 
370
426
  function_name = message[b"function"].decode()
@@ -569,11 +625,10 @@ class Worker:
569
625
  pipeline.zcount(self.docket.queue_key, 0, now)
570
626
  pipeline.zcount(self.docket.queue_key, now, "+inf")
571
627
 
572
- (
573
- stream_depth,
574
- overdue_depth,
575
- schedule_depth,
576
- ) = await pipeline.execute()
628
+ results: list[int] = await pipeline.execute()
629
+ stream_depth = results[0]
630
+ overdue_depth = results[1]
631
+ schedule_depth = results[2]
577
632
 
578
633
  QUEUE_DEPTH.set(
579
634
  stream_depth + overdue_depth, self.docket.labels()
@@ -155,7 +155,9 @@ async def docket(redis_url: str, aiolib: str) -> AsyncGenerator[Docket, None]:
155
155
  @pytest.fixture
156
156
  async def worker(docket: Docket) -> AsyncGenerator[Worker, None]:
157
157
  async with Worker(
158
- docket, minimum_check_interval=timedelta(milliseconds=10)
158
+ docket,
159
+ minimum_check_interval=timedelta(milliseconds=10),
160
+ scheduling_resolution=timedelta(milliseconds=10),
159
161
  ) as worker:
160
162
  yield worker
161
163
 
@@ -79,7 +79,7 @@ async def test_scheduled_execution(
79
79
  assert when <= now()
80
80
 
81
81
 
82
- async def test_adding_is_itempotent(
82
+ async def test_adding_is_idempotent(
83
83
  docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
84
84
  ):
85
85
  """docket should allow for rescheduling a task for later"""
@@ -159,6 +159,77 @@ async def test_rescheduling_by_name(
159
159
  assert later <= now()
160
160
 
161
161
 
162
+ async def test_task_keys_are_idempotent_in_the_future(
163
+ docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
164
+ ):
165
+ """docket should only allow one task with the same key to be scheduled or due"""
166
+
167
+ key = f"my-cool-task:{uuid4()}"
168
+
169
+ soon = now() + timedelta(milliseconds=10)
170
+ await docket.add(the_task, when=soon, key=key)("a", "b", c="c")
171
+ await docket.add(the_task, when=now(), key=key)("d", "e", c="f")
172
+
173
+ await worker.run_until_finished()
174
+
175
+ the_task.assert_awaited_once_with("a", "b", c="c")
176
+ the_task.reset_mock()
177
+
178
+ # It should be fine to run it afterward
179
+ await docket.add(the_task, key=key)("d", "e", c="f")
180
+
181
+ await worker.run_until_finished()
182
+
183
+ the_task.assert_awaited_once_with("d", "e", c="f")
184
+
185
+
186
+ async def test_task_keys_are_idempotent_between_the_future_and_present(
187
+ docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
188
+ ):
189
+ """docket should only allow one task with the same key to be scheduled or due"""
190
+
191
+ key = f"my-cool-task:{uuid4()}"
192
+
193
+ soon = now() + timedelta(milliseconds=10)
194
+ await docket.add(the_task, when=now(), key=key)("a", "b", c="c")
195
+ await docket.add(the_task, when=soon, key=key)("d", "e", c="f")
196
+
197
+ await worker.run_until_finished()
198
+
199
+ the_task.assert_awaited_once_with("a", "b", c="c")
200
+ the_task.reset_mock()
201
+
202
+ # It should be fine to run it afterward
203
+ await docket.add(the_task, key=key)("d", "e", c="f")
204
+
205
+ await worker.run_until_finished()
206
+
207
+ the_task.assert_awaited_once_with("d", "e", c="f")
208
+
209
+
210
+ async def test_task_keys_are_idempotent_in_the_present(
211
+ docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
212
+ ):
213
+ """docket should only allow one task with the same key to be scheduled or due"""
214
+
215
+ key = f"my-cool-task:{uuid4()}"
216
+
217
+ await docket.add(the_task, when=now(), key=key)("a", "b", c="c")
218
+ await docket.add(the_task, when=now(), key=key)("d", "e", c="f")
219
+
220
+ await worker.run_until_finished()
221
+
222
+ the_task.assert_awaited_once_with("a", "b", c="c")
223
+ the_task.reset_mock()
224
+
225
+ # It should be fine to run it afterward
226
+ await docket.add(the_task, key=key)("d", "e", c="f")
227
+
228
+ await worker.run_until_finished()
229
+
230
+ the_task.assert_awaited_once_with("d", "e", c="f")
231
+
232
+
162
233
  async def test_cancelling_future_task(
163
234
  docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
164
235
  ):
@@ -696,7 +767,8 @@ async def test_striking_entire_parameters(
696
767
  call(customer_id="123", order_id="456"),
697
768
  call(customer_id="456", order_id="789"),
698
769
  # customer_id == 789 is stricken
699
- ]
770
+ ],
771
+ any_order=True,
700
772
  )
701
773
  the_task.reset_mock()
702
774
 
@@ -705,7 +777,8 @@ async def test_striking_entire_parameters(
705
777
  [
706
778
  call(customer_id="456", order_id="012"),
707
779
  # customer_id == 789 is stricken
708
- ]
780
+ ],
781
+ any_order=True,
709
782
  )
710
783
  another_task.reset_mock()
711
784
 
@@ -725,7 +798,8 @@ async def test_striking_entire_parameters(
725
798
  # customer_id == 123 is stricken
726
799
  call(customer_id="456", order_id="789"),
727
800
  # customer_id == 789 is stricken
728
- ]
801
+ ],
802
+ any_order=True,
729
803
  )
730
804
  the_task.reset_mock()
731
805
 
@@ -734,7 +808,8 @@ async def test_striking_entire_parameters(
734
808
  [
735
809
  call(customer_id="456", order_id="012"),
736
810
  # customer_id == 789 is stricken
737
- ]
811
+ ],
812
+ any_order=True,
738
813
  )
739
814
  another_task.reset_mock()
740
815
 
@@ -754,7 +829,8 @@ async def test_striking_entire_parameters(
754
829
  call(customer_id="123", order_id="456"),
755
830
  call(customer_id="456", order_id="789"),
756
831
  # customer_id == 789 is still stricken
757
- ]
832
+ ],
833
+ any_order=True,
758
834
  )
759
835
 
760
836
  assert another_task.call_count == 1
@@ -762,7 +838,8 @@ async def test_striking_entire_parameters(
762
838
  [
763
839
  call(customer_id="456", order_id="012"),
764
840
  # customer_id == 789 is still stricken
765
- ]
841
+ ],
842
+ any_order=True,
766
843
  )
767
844
 
768
845
 
@@ -787,7 +864,8 @@ async def test_striking_tasks_for_specific_parameters(
787
864
  # b <= 2 is stricken, so b=1 is out
788
865
  # b <= 2 is stricken, so b=2 is out
789
866
  call("a", b=3),
790
- ]
867
+ ],
868
+ any_order=True,
791
869
  )
792
870
  the_task.reset_mock()
793
871
 
@@ -797,7 +875,8 @@ async def test_striking_tasks_for_specific_parameters(
797
875
  call("d", b=1),
798
876
  call("d", b=2),
799
877
  call("d", b=3),
800
- ]
878
+ ],
879
+ any_order=True,
801
880
  )
802
881
  another_task.reset_mock()
803
882
 
@@ -818,7 +897,8 @@ async def test_striking_tasks_for_specific_parameters(
818
897
  call("a", b=1),
819
898
  call("a", b=2),
820
899
  call("a", b=3),
821
- ]
900
+ ],
901
+ any_order=True,
822
902
  )
823
903
 
824
904
  assert another_task.call_count == 3
@@ -827,7 +907,8 @@ async def test_striking_tasks_for_specific_parameters(
827
907
  call("d", b=1),
828
908
  call("d", b=2),
829
909
  call("d", b=3),
830
- ]
910
+ ],
911
+ any_order=True,
831
912
  )
832
913
 
833
914
 
@@ -388,7 +388,7 @@ async def test_perpetual_tasks_are_scheduled_close_to_target_time(
388
388
  average = total / len(intervals)
389
389
 
390
390
  # even with a variable duration, Docket attempts to schedule them equally
391
- assert timedelta(milliseconds=45) <= average <= timedelta(milliseconds=70)
391
+ assert timedelta(milliseconds=45) <= average <= timedelta(milliseconds=75)
392
392
 
393
393
 
394
394
  async def test_worker_can_exit_from_perpetual_tasks_that_queue_further_tasks(
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes