pydocket 0.9.1__tar.gz → 0.10.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydocket might be problematic. Click here for more details.

Files changed (71) hide show
  1. {pydocket-0.9.1 → pydocket-0.10.0}/PKG-INFO +1 -1
  2. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/docket.py +35 -151
  3. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_fundamentals.py +8 -25
  4. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_worker.py +13 -317
  5. {pydocket-0.9.1 → pydocket-0.10.0}/.cursor/rules/general.mdc +0 -0
  6. {pydocket-0.9.1 → pydocket-0.10.0}/.cursor/rules/python-style.mdc +0 -0
  7. {pydocket-0.9.1 → pydocket-0.10.0}/.github/codecov.yml +0 -0
  8. {pydocket-0.9.1 → pydocket-0.10.0}/.github/workflows/chaos.yml +0 -0
  9. {pydocket-0.9.1 → pydocket-0.10.0}/.github/workflows/ci.yml +0 -0
  10. {pydocket-0.9.1 → pydocket-0.10.0}/.github/workflows/docs.yml +0 -0
  11. {pydocket-0.9.1 → pydocket-0.10.0}/.github/workflows/publish.yml +0 -0
  12. {pydocket-0.9.1 → pydocket-0.10.0}/.gitignore +0 -0
  13. {pydocket-0.9.1 → pydocket-0.10.0}/.pre-commit-config.yaml +0 -0
  14. {pydocket-0.9.1 → pydocket-0.10.0}/CLAUDE.md +0 -0
  15. {pydocket-0.9.1 → pydocket-0.10.0}/LICENSE +0 -0
  16. {pydocket-0.9.1 → pydocket-0.10.0}/README.md +0 -0
  17. {pydocket-0.9.1 → pydocket-0.10.0}/chaos/README.md +0 -0
  18. {pydocket-0.9.1 → pydocket-0.10.0}/chaos/__init__.py +0 -0
  19. {pydocket-0.9.1 → pydocket-0.10.0}/chaos/driver.py +0 -0
  20. {pydocket-0.9.1 → pydocket-0.10.0}/chaos/producer.py +0 -0
  21. {pydocket-0.9.1 → pydocket-0.10.0}/chaos/run +0 -0
  22. {pydocket-0.9.1 → pydocket-0.10.0}/chaos/tasks.py +0 -0
  23. {pydocket-0.9.1 → pydocket-0.10.0}/docs/advanced-patterns.md +0 -0
  24. {pydocket-0.9.1 → pydocket-0.10.0}/docs/api-reference.md +0 -0
  25. {pydocket-0.9.1 → pydocket-0.10.0}/docs/dependencies.md +0 -0
  26. {pydocket-0.9.1 → pydocket-0.10.0}/docs/getting-started.md +0 -0
  27. {pydocket-0.9.1 → pydocket-0.10.0}/docs/index.md +0 -0
  28. {pydocket-0.9.1 → pydocket-0.10.0}/docs/production.md +0 -0
  29. {pydocket-0.9.1 → pydocket-0.10.0}/docs/testing.md +0 -0
  30. {pydocket-0.9.1 → pydocket-0.10.0}/examples/__init__.py +0 -0
  31. {pydocket-0.9.1 → pydocket-0.10.0}/examples/common.py +0 -0
  32. {pydocket-0.9.1 → pydocket-0.10.0}/examples/concurrency_control.py +0 -0
  33. {pydocket-0.9.1 → pydocket-0.10.0}/examples/find_and_flood.py +0 -0
  34. {pydocket-0.9.1 → pydocket-0.10.0}/examples/self_perpetuating.py +0 -0
  35. {pydocket-0.9.1 → pydocket-0.10.0}/mkdocs.yml +0 -0
  36. {pydocket-0.9.1 → pydocket-0.10.0}/pyproject.toml +0 -0
  37. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/__init__.py +0 -0
  38. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/__main__.py +0 -0
  39. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/annotations.py +0 -0
  40. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/cli.py +0 -0
  41. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/dependencies.py +0 -0
  42. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/execution.py +0 -0
  43. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/instrumentation.py +0 -0
  44. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/py.typed +0 -0
  45. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/tasks.py +0 -0
  46. {pydocket-0.9.1 → pydocket-0.10.0}/src/docket/worker.py +0 -0
  47. {pydocket-0.9.1 → pydocket-0.10.0}/telemetry/.gitignore +0 -0
  48. {pydocket-0.9.1 → pydocket-0.10.0}/telemetry/start +0 -0
  49. {pydocket-0.9.1 → pydocket-0.10.0}/telemetry/stop +0 -0
  50. {pydocket-0.9.1 → pydocket-0.10.0}/tests/__init__.py +0 -0
  51. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/__init__.py +0 -0
  52. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/conftest.py +0 -0
  53. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/test_clear.py +0 -0
  54. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/test_module.py +0 -0
  55. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/test_parsing.py +0 -0
  56. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/test_snapshot.py +0 -0
  57. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/test_striking.py +0 -0
  58. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/test_tasks.py +0 -0
  59. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/test_version.py +0 -0
  60. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/test_worker.py +0 -0
  61. {pydocket-0.9.1 → pydocket-0.10.0}/tests/cli/test_workers.py +0 -0
  62. {pydocket-0.9.1 → pydocket-0.10.0}/tests/conftest.py +0 -0
  63. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_concurrency_basic.py +0 -0
  64. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_concurrency_control.py +0 -0
  65. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_concurrency_refresh.py +0 -0
  66. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_dependencies.py +0 -0
  67. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_docket.py +0 -0
  68. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_execution.py +0 -0
  69. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_instrumentation.py +0 -0
  70. {pydocket-0.9.1 → pydocket-0.10.0}/tests/test_striking.py +0 -0
  71. {pydocket-0.9.1 → pydocket-0.10.0}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydocket
3
- Version: 0.9.1
3
+ Version: 0.10.0
4
4
  Summary: A distributed background task system for Python functions
5
5
  Project-URL: Homepage, https://github.com/chrisguidry/docket
6
6
  Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
@@ -16,7 +16,6 @@ from typing import (
16
16
  Mapping,
17
17
  NoReturn,
18
18
  ParamSpec,
19
- Protocol,
20
19
  Self,
21
20
  Sequence,
22
21
  TypedDict,
@@ -28,6 +27,7 @@ from typing import (
28
27
  import redis.exceptions
29
28
  from opentelemetry import propagate, trace
30
29
  from redis.asyncio import ConnectionPool, Redis
30
+ from redis.asyncio.client import Pipeline
31
31
  from uuid_extensions import uuid7
32
32
 
33
33
  from .execution import (
@@ -55,18 +55,6 @@ logger: logging.Logger = logging.getLogger(__name__)
55
55
  tracer: trace.Tracer = trace.get_tracer(__name__)
56
56
 
57
57
 
58
- class _schedule_task(Protocol):
59
- async def __call__(
60
- self, keys: list[str], args: list[str | float | bytes]
61
- ) -> str: ... # pragma: no cover
62
-
63
-
64
- class _cancel_task(Protocol):
65
- async def __call__(
66
- self, keys: list[str], args: list[str]
67
- ) -> str: ... # pragma: no cover
68
-
69
-
70
58
  P = ParamSpec("P")
71
59
  R = TypeVar("R")
72
60
 
@@ -143,8 +131,6 @@ class Docket:
143
131
 
144
132
  _monitor_strikes_task: asyncio.Task[None]
145
133
  _connection_pool: ConnectionPool
146
- _schedule_task_script: _schedule_task | None
147
- _cancel_task_script: _cancel_task | None
148
134
 
149
135
  def __init__(
150
136
  self,
@@ -170,8 +156,6 @@ class Docket:
170
156
  self.url = url
171
157
  self.heartbeat_interval = heartbeat_interval
172
158
  self.missed_heartbeats = missed_heartbeats
173
- self._schedule_task_script = None
174
- self._cancel_task_script = None
175
159
 
176
160
  @property
177
161
  def worker_group_name(self) -> str:
@@ -316,7 +300,9 @@ class Docket:
316
300
  execution = Execution(function, args, kwargs, when, key, attempt=1)
317
301
 
318
302
  async with self.redis() as redis:
319
- await self._schedule(redis, execution, replace=False)
303
+ async with redis.pipeline() as pipeline:
304
+ await self._schedule(redis, pipeline, execution, replace=False)
305
+ await pipeline.execute()
320
306
 
321
307
  TASKS_ADDED.add(1, {**self.labels(), **execution.general_labels()})
322
308
  TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
@@ -375,7 +361,9 @@ class Docket:
375
361
  execution = Execution(function, args, kwargs, when, key, attempt=1)
376
362
 
377
363
  async with self.redis() as redis:
378
- await self._schedule(redis, execution, replace=True)
364
+ async with redis.pipeline() as pipeline:
365
+ await self._schedule(redis, pipeline, execution, replace=True)
366
+ await pipeline.execute()
379
367
 
380
368
  TASKS_REPLACED.add(1, {**self.labels(), **execution.general_labels()})
381
369
  TASKS_CANCELLED.add(1, {**self.labels(), **execution.general_labels()})
@@ -395,7 +383,9 @@ class Docket:
395
383
  },
396
384
  ):
397
385
  async with self.redis() as redis:
398
- await self._schedule(redis, execution, replace=False)
386
+ async with redis.pipeline() as pipeline:
387
+ await self._schedule(redis, pipeline, execution, replace=False)
388
+ await pipeline.execute()
399
389
 
400
390
  TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
401
391
 
@@ -410,7 +400,9 @@ class Docket:
410
400
  attributes={**self.labels(), "docket.key": key},
411
401
  ):
412
402
  async with self.redis() as redis:
413
- await self._cancel(redis, key)
403
+ async with redis.pipeline() as pipeline:
404
+ await self._cancel(pipeline, key)
405
+ await pipeline.execute()
414
406
 
415
407
  TASKS_CANCELLED.add(1, self.labels())
416
408
 
@@ -431,17 +423,10 @@ class Docket:
431
423
  async def _schedule(
432
424
  self,
433
425
  redis: Redis,
426
+ pipeline: Pipeline,
434
427
  execution: Execution,
435
428
  replace: bool = False,
436
429
  ) -> None:
437
- """Schedule a task atomically.
438
-
439
- Handles:
440
- - Checking for task existence
441
- - Cancelling existing tasks when replacing
442
- - Adding tasks to stream (immediate) or queue (future)
443
- - Tracking stream message IDs for later cancellation
444
- """
445
430
  if self.strike_list.is_stricken(execution):
446
431
  logger.warning(
447
432
  "%r is stricken, skipping schedule of %r",
@@ -464,133 +449,32 @@ class Docket:
464
449
  key = execution.key
465
450
  when = execution.when
466
451
  known_task_key = self.known_task_key(key)
467
- is_immediate = when <= datetime.now(timezone.utc)
468
452
 
469
- # Lock per task key to prevent race conditions between concurrent operations
470
453
  async with redis.lock(f"{known_task_key}:lock", timeout=10):
471
- if self._schedule_task_script is None:
472
- self._schedule_task_script = cast(
473
- _schedule_task,
474
- redis.register_script(
475
- # KEYS: stream_key, known_key, parked_key, queue_key
476
- # ARGV: task_key, when_timestamp, is_immediate, replace, ...message_fields
477
- """
478
- local stream_key = KEYS[1]
479
- local known_key = KEYS[2]
480
- local parked_key = KEYS[3]
481
- local queue_key = KEYS[4]
482
-
483
- local task_key = ARGV[1]
484
- local when_timestamp = ARGV[2]
485
- local is_immediate = ARGV[3] == '1'
486
- local replace = ARGV[4] == '1'
487
-
488
- -- Extract message fields from ARGV[5] onwards
489
- local message = {}
490
- for i = 5, #ARGV, 2 do
491
- message[#message + 1] = ARGV[i] -- field name
492
- message[#message + 1] = ARGV[i + 1] -- field value
493
- end
494
-
495
- -- Handle replacement: cancel existing task if needed
496
- if replace then
497
- local existing_message_id = redis.call('HGET', known_key, 'stream_message_id')
498
- if existing_message_id then
499
- redis.call('XDEL', stream_key, existing_message_id)
500
- end
501
- redis.call('DEL', known_key, parked_key)
502
- redis.call('ZREM', queue_key, task_key)
503
- else
504
- -- Check if task already exists
505
- if redis.call('EXISTS', known_key) == 1 then
506
- return 'EXISTS'
507
- end
508
- end
509
-
510
- if is_immediate then
511
- -- Add to stream and store message ID for later cancellation
512
- local message_id = redis.call('XADD', stream_key, '*', unpack(message))
513
- redis.call('HSET', known_key, 'when', when_timestamp, 'stream_message_id', message_id)
514
- return message_id
515
- else
516
- -- Add to queue with task data in parked hash
517
- redis.call('HSET', known_key, 'when', when_timestamp)
518
- redis.call('HSET', parked_key, unpack(message))
519
- redis.call('ZADD', queue_key, when_timestamp, task_key)
520
- return 'QUEUED'
521
- end
522
- """
523
- ),
524
- )
525
- schedule_task = self._schedule_task_script
454
+ if replace:
455
+ await self._cancel(pipeline, key)
456
+ else:
457
+ # if the task is already in the queue or stream, retain it
458
+ if await redis.exists(known_task_key):
459
+ logger.debug(
460
+ "Task %r is already in the queue or stream, not scheduling",
461
+ key,
462
+ extra=self.labels(),
463
+ )
464
+ return
526
465
 
527
- await schedule_task(
528
- keys=[
529
- self.stream_key,
530
- known_task_key,
531
- self.parked_task_key(key),
532
- self.queue_key,
533
- ],
534
- args=[
535
- key,
536
- str(when.timestamp()),
537
- "1" if is_immediate else "0",
538
- "1" if replace else "0",
539
- *[
540
- item
541
- for field, value in message.items()
542
- for item in (field, value)
543
- ],
544
- ],
545
- )
466
+ pipeline.set(known_task_key, when.timestamp())
546
467
 
547
- async def _cancel(self, redis: Redis, key: str) -> None:
548
- """Cancel a task atomically.
468
+ if when <= datetime.now(timezone.utc):
469
+ pipeline.xadd(self.stream_key, message) # type: ignore[arg-type]
470
+ else:
471
+ pipeline.hset(self.parked_task_key(key), mapping=message) # type: ignore[arg-type]
472
+ pipeline.zadd(self.queue_key, {key: when.timestamp()})
549
473
 
550
- Handles cancellation regardless of task location:
551
- - From the stream (using stored message ID)
552
- - From the queue (scheduled tasks)
553
- - Cleans up all associated metadata keys
554
- """
555
- if self._cancel_task_script is None:
556
- self._cancel_task_script = cast(
557
- _cancel_task,
558
- redis.register_script(
559
- # KEYS: stream_key, known_key, parked_key, queue_key
560
- # ARGV: task_key
561
- """
562
- local stream_key = KEYS[1]
563
- local known_key = KEYS[2]
564
- local parked_key = KEYS[3]
565
- local queue_key = KEYS[4]
566
- local task_key = ARGV[1]
567
-
568
- -- Delete from stream if message ID exists
569
- local message_id = redis.call('HGET', known_key, 'stream_message_id')
570
- if message_id then
571
- redis.call('XDEL', stream_key, message_id)
572
- end
573
-
574
- -- Clean up all task-related keys
575
- redis.call('DEL', known_key, parked_key)
576
- redis.call('ZREM', queue_key, task_key)
577
-
578
- return 'OK'
579
- """
580
- ),
581
- )
582
- cancel_task = self._cancel_task_script
583
-
584
- # Execute the cancellation script
585
- await cancel_task(
586
- keys=[
587
- self.stream_key,
588
- self.known_task_key(key),
589
- self.parked_task_key(key),
590
- self.queue_key,
591
- ],
592
- args=[key],
593
- )
474
+ async def _cancel(self, pipeline: Pipeline, key: str) -> None:
475
+ pipeline.delete(self.known_task_key(key))
476
+ pipeline.delete(self.parked_task_key(key))
477
+ pipeline.zrem(self.queue_key, key)
594
478
 
595
479
  @property
596
480
  def strike_key(self) -> str:
@@ -104,9 +104,13 @@ async def test_adding_is_idempotent(
104
104
  assert soon <= now() < later
105
105
 
106
106
 
107
+ @pytest.mark.skip(
108
+ "Temporarily skipping due to test flake for task rescheduling. "
109
+ "See https://github.com/chrisguidry/docket/issues/149"
110
+ )
107
111
  async def test_rescheduling_later(
108
112
  docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
109
- ):
113
+ ): # pragma: no cover
110
114
  """docket should allow for rescheduling a task for later"""
111
115
 
112
116
  key = f"my-cool-task:{uuid4()}"
@@ -250,10 +254,10 @@ async def test_cancelling_future_task(
250
254
  the_task.assert_not_called()
251
255
 
252
256
 
253
- async def test_cancelling_immediate_task(
257
+ async def test_cancelling_current_task_not_supported(
254
258
  docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
255
259
  ):
256
- """docket can cancel a task that is scheduled immediately"""
260
+ """docket does not allow cancelling a task that is schedule now"""
257
261
 
258
262
  execution = await docket.add(the_task, now())("a", "b", c="c")
259
263
 
@@ -261,28 +265,7 @@ async def test_cancelling_immediate_task(
261
265
 
262
266
  await worker.run_until_finished()
263
267
 
264
- the_task.assert_not_called()
265
-
266
-
267
- async def test_cancellation_is_idempotent(
268
- docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
269
- ):
270
- """Test that canceling the same task twice doesn't error."""
271
- key = f"test-task:{uuid4()}"
272
-
273
- # Schedule a task
274
- later = now() + timedelta(seconds=1)
275
- await docket.add(the_task, later, key=key)("test")
276
-
277
- # Cancel it twice - both should succeed without error
278
- await docket.cancel(key)
279
- await docket.cancel(key) # Should be idempotent
280
-
281
- # Run worker to ensure the task was actually cancelled
282
- await worker.run_until_finished()
283
-
284
- # Task should not have been executed since it was cancelled
285
- the_task.assert_not_called()
268
+ the_task.assert_awaited_once_with("a", "b", c="c")
286
269
 
287
270
 
288
271
  async def test_errors_are_logged(
@@ -1,14 +1,10 @@
1
1
  import asyncio
2
2
  import logging
3
- import time
4
3
  from contextlib import asynccontextmanager
5
- from contextvars import ContextVar
6
4
  from datetime import datetime, timedelta, timezone
7
- from typing import AsyncGenerator, Callable, Iterable
5
+ from typing import AsyncGenerator
8
6
  from unittest.mock import AsyncMock, patch
9
- from uuid import uuid4
10
7
 
11
- import cloudpickle # type: ignore[import]
12
8
  import pytest
13
9
  from redis.asyncio import Redis
14
10
  from redis.exceptions import ConnectionError
@@ -21,8 +17,6 @@ from docket import (
21
17
  Perpetual,
22
18
  Worker,
23
19
  )
24
- from docket.dependencies import Timeout
25
- from docket.execution import Execution
26
20
  from docket.tasks import standard_tasks
27
21
  from docket.worker import ms
28
22
 
@@ -180,6 +174,7 @@ async def test_redeliveries_respect_concurrency_limits(docket: Docket):
180
174
  nonlocal failure_count
181
175
 
182
176
  # Record when this task runs
177
+ import time
183
178
 
184
179
  task_executions.append((customer_id, time.time()))
185
180
 
@@ -560,6 +555,7 @@ async def test_worker_can_be_told_to_skip_automatic_tasks(docket: Docket):
560
555
 
561
556
  async def test_worker_concurrency_limits_task_queuing_behavior(docket: Docket):
562
557
  """Test that concurrency limits control task execution properly"""
558
+ from contextvars import ContextVar
563
559
 
564
560
  # Use contextvar for reliable tracking across async execution
565
561
  execution_log: ContextVar[list[tuple[str, int]]] = ContextVar("execution_log")
@@ -1175,6 +1171,7 @@ async def test_worker_concurrency_edge_cases(docket: Docket):
1175
1171
 
1176
1172
  async def test_worker_timeout_exceeds_redelivery_timeout(docket: Docket):
1177
1173
  """Test worker handles user timeout longer than redelivery timeout."""
1174
+ from docket.dependencies import Timeout
1178
1175
 
1179
1176
  task_executed = False
1180
1177
 
@@ -1253,6 +1250,8 @@ async def test_worker_concurrency_missing_argument_early_return(docket: Docket):
1253
1250
 
1254
1251
  async def test_worker_no_concurrency_dependency_in_function(docket: Docket):
1255
1252
  """Test _can_start_task with function that has no concurrency dependency."""
1253
+ from docket.execution import Execution
1254
+ from datetime import datetime, timezone
1256
1255
 
1257
1256
  async def task_without_concurrency_dependency():
1258
1257
  await asyncio.sleep(0.001)
@@ -1278,6 +1277,8 @@ async def test_worker_no_concurrency_dependency_in_function(docket: Docket):
1278
1277
 
1279
1278
  async def test_worker_no_concurrency_dependency_in_release(docket: Docket):
1280
1279
  """Test _release_concurrency_slot with function that has no concurrency dependency."""
1280
+ from docket.execution import Execution
1281
+ from datetime import datetime, timezone
1281
1282
 
1282
1283
  async def task_without_concurrency_dependency():
1283
1284
  await asyncio.sleep(0.001)
@@ -1302,6 +1303,8 @@ async def test_worker_no_concurrency_dependency_in_release(docket: Docket):
1302
1303
 
1303
1304
  async def test_worker_missing_concurrency_argument_in_release(docket: Docket):
1304
1305
  """Test _release_concurrency_slot when concurrency argument is missing."""
1306
+ from docket.execution import Execution
1307
+ from datetime import datetime, timezone
1305
1308
 
1306
1309
  async def task_with_missing_arg(
1307
1310
  concurrency: ConcurrencyLimit = ConcurrencyLimit(
@@ -1330,6 +1333,8 @@ async def test_worker_missing_concurrency_argument_in_release(docket: Docket):
1330
1333
 
1331
1334
  async def test_worker_concurrency_missing_argument_in_can_start(docket: Docket):
1332
1335
  """Test _can_start_task with missing concurrency argument during execution."""
1336
+ from docket.execution import Execution
1337
+ from datetime import datetime, timezone
1333
1338
 
1334
1339
  async def task_with_missing_concurrency_arg(
1335
1340
  concurrency: ConcurrencyLimit = ConcurrencyLimit(
@@ -1378,6 +1383,7 @@ async def test_worker_exception_before_dependencies(docket: Docket):
1378
1383
  task_failed = False
1379
1384
 
1380
1385
  # Mock resolved_dependencies to fail before setting dependencies
1386
+ from unittest.mock import patch, AsyncMock
1381
1387
 
1382
1388
  await docket.add(task_that_will_fail)()
1383
1389
 
@@ -1421,313 +1427,3 @@ async def test_finally_block_releases_concurrency_on_success(docket: Docket):
1421
1427
 
1422
1428
  # If both tasks completed, the finally block successfully released slots
1423
1429
  assert task_completed
1424
-
1425
-
1426
- async def test_replacement_race_condition_stream_tasks(
1427
- docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
1428
- ):
1429
- """Test that replace() properly cancels tasks already in the stream.
1430
-
1431
- This reproduces the race condition where:
1432
- 1. Task is scheduled for immediate execution
1433
- 2. Scheduler moves it to stream
1434
- 3. replace() tries to cancel but only checks queue/hash, not stream
1435
- 4. Both original and replacement tasks execute
1436
- """
1437
- key = f"my-cool-task:{uuid4()}"
1438
-
1439
- # Schedule a task immediately (will be moved to stream quickly)
1440
- await docket.add(the_task, now(), key=key)("a", "b", c="c")
1441
-
1442
- # Let the scheduler move the task to the stream
1443
- # The scheduler runs every 250ms by default
1444
- await asyncio.sleep(0.3)
1445
-
1446
- # Now replace the task - this should cancel the one in the stream
1447
- later = now() + timedelta(milliseconds=100)
1448
- await docket.replace(the_task, later, key=key)("b", "c", c="d")
1449
-
1450
- # Run the worker to completion
1451
- await worker.run_until_finished()
1452
-
1453
- # Should only execute the replacement task, not both
1454
- the_task.assert_awaited_once_with("b", "c", c="d")
1455
- assert the_task.await_count == 1, (
1456
- f"Task was called {the_task.await_count} times, expected 1"
1457
- )
1458
-
1459
-
1460
- async def test_replace_task_in_queue_before_stream(
1461
- docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
1462
- ):
1463
- """Test that replace() works correctly when task is still in queue."""
1464
- key = f"my-cool-task:{uuid4()}"
1465
-
1466
- # Schedule a task slightly in the future (stays in queue)
1467
- soon = now() + timedelta(seconds=1)
1468
- await docket.add(the_task, soon, key=key)("a", "b", c="c")
1469
-
1470
- # Replace immediately (before scheduler can move it)
1471
- later = now() + timedelta(milliseconds=100)
1472
- await docket.replace(the_task, later, key=key)("b", "c", c="d")
1473
-
1474
- await worker.run_until_finished()
1475
-
1476
- # Should only execute the replacement
1477
- the_task.assert_awaited_once_with("b", "c", c="d")
1478
- assert the_task.await_count == 1
1479
-
1480
-
1481
- async def test_rapid_replace_operations(
1482
- docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
1483
- ):
1484
- """Test multiple rapid replace operations."""
1485
- key = f"my-cool-task:{uuid4()}"
1486
-
1487
- # Schedule initial task
1488
- await docket.add(the_task, now(), key=key)("a", "b", c="c")
1489
-
1490
- # Rapid replacements
1491
- for i in range(5):
1492
- when = now() + timedelta(milliseconds=50 + i * 10)
1493
- await docket.replace(the_task, when, key=key)(f"arg{i}", b=f"b{i}")
1494
-
1495
- await worker.run_until_finished()
1496
-
1497
- # Should only execute the last replacement
1498
- the_task.assert_awaited_once_with("arg4", b="b4")
1499
- assert the_task.await_count == 1
1500
-
1501
-
1502
- async def test_wrongtype_error_with_legacy_known_task_key(
1503
- docket: Docket,
1504
- worker: Worker,
1505
- the_task: AsyncMock,
1506
- now: Callable[[], datetime],
1507
- caplog: pytest.LogCaptureFixture,
1508
- ) -> None:
1509
- """Test graceful handling when known task keys exist as strings from legacy implementations.
1510
-
1511
- Regression test for issue where worker scheduler would get WRONGTYPE errors when trying to
1512
- HSET on known task keys that existed as string values from older docket versions.
1513
-
1514
- The original error occurred when:
1515
- 1. A legacy docket created known task keys as simple string values (timestamps)
1516
- 2. The new scheduler tried to HSET stream_message_id on these keys
1517
- 3. Redis threw WRONGTYPE error because you can't HSET on a string key
1518
- 4. This caused scheduler loop failures in production
1519
-
1520
- This test reproduces that scenario by manually setting up the legacy state,
1521
- then verifies the new code handles it gracefully without errors.
1522
- """
1523
- key = f"legacy-task:{uuid4()}"
1524
-
1525
- # Simulate legacy behavior: create the known task key as a string
1526
- # This is what older versions of docket would have done
1527
- async with docket.redis() as redis:
1528
- known_task_key = docket.known_task_key(key)
1529
- when = now() + timedelta(seconds=1)
1530
-
1531
- # Set up legacy state: known key as string, task in queue with parked data
1532
- await redis.set(known_task_key, str(when.timestamp()))
1533
- await redis.zadd(docket.queue_key, {key: when.timestamp()})
1534
-
1535
- await redis.hset( # type: ignore
1536
- docket.parked_task_key(key),
1537
- mapping={
1538
- "key": key,
1539
- "when": when.isoformat(),
1540
- "function": "trace",
1541
- "args": cloudpickle.dumps(["legacy task test"]), # type: ignore[arg-type]
1542
- "kwargs": cloudpickle.dumps({}), # type: ignore[arg-type]
1543
- "attempt": "1",
1544
- },
1545
- )
1546
-
1547
- # Capture logs to ensure no errors occur and see task execution
1548
- with caplog.at_level(logging.INFO):
1549
- await worker.run_until_finished()
1550
-
1551
- # Should not have any ERROR logs now that the issue is fixed
1552
- error_logs = [record for record in caplog.records if record.levelname == "ERROR"]
1553
- assert len(error_logs) == 0, (
1554
- f"Expected no error logs, but got: {[r.message for r in error_logs]}"
1555
- )
1556
-
1557
- # The task should execute successfully
1558
- # Since we used trace, we should see an INFO log with the message
1559
- info_logs = [record for record in caplog.records if record.levelname == "INFO"]
1560
- trace_logs = [
1561
- record for record in info_logs if "legacy task test" in record.message
1562
- ]
1563
- assert len(trace_logs) > 0, (
1564
- f"Expected to see trace log with 'legacy task test', got: {[r.message for r in info_logs]}"
1565
- )
1566
-
1567
-
1568
- async def count_redis_keys_by_type(redis: Redis, prefix: str) -> dict[str, int]:
1569
- """Count Redis keys by type for a given prefix."""
1570
- pattern = f"{prefix}*"
1571
- keys: Iterable[str] = await redis.keys(pattern) # type: ignore
1572
- counts: dict[str, int] = {}
1573
-
1574
- for key in keys:
1575
- key_type = await redis.type(key)
1576
- key_type_str = (
1577
- key_type.decode() if isinstance(key_type, bytes) else str(key_type)
1578
- )
1579
- counts[key_type_str] = counts.get(key_type_str, 0) + 1
1580
-
1581
- return counts
1582
-
1583
-
1584
- class KeyCountChecker:
1585
- """Helper to verify Redis key counts remain consistent across operations."""
1586
-
1587
- def __init__(self, docket: Docket, redis: Redis) -> None:
1588
- self.docket = docket
1589
- self.redis = redis
1590
- self.baseline_counts: dict[str, int] = {}
1591
-
1592
- async def capture_baseline(self) -> None:
1593
- """Capture baseline key counts after worker priming."""
1594
- self.baseline_counts = await count_redis_keys_by_type(
1595
- self.redis, self.docket.name
1596
- )
1597
- print(f"Baseline key counts: {self.baseline_counts}")
1598
-
1599
- async def verify_keys_increased(self, operation: str) -> None:
1600
- """Verify that key counts increased after scheduling operation."""
1601
- current_counts = await count_redis_keys_by_type(self.redis, self.docket.name)
1602
- print(f"After {operation} key counts: {current_counts}")
1603
-
1604
- total_current = sum(current_counts.values())
1605
- total_baseline = sum(self.baseline_counts.values())
1606
- assert total_current > total_baseline, (
1607
- f"Expected more keys after {operation}, but got {total_current} vs {total_baseline}"
1608
- )
1609
-
1610
- async def verify_keys_returned_to_baseline(self, operation: str) -> None:
1611
- """Verify that key counts returned to baseline after operation completion."""
1612
- final_counts = await count_redis_keys_by_type(self.redis, self.docket.name)
1613
- print(f"Final key counts: {final_counts}")
1614
-
1615
- # Check each key type matches baseline
1616
- all_key_types = set(self.baseline_counts.keys()) | set(final_counts.keys())
1617
- for key_type in all_key_types:
1618
- baseline_count = self.baseline_counts.get(key_type, 0)
1619
- final_count = final_counts.get(key_type, 0)
1620
- assert final_count == baseline_count, (
1621
- f"Memory leak detected after {operation}: {key_type} keys not cleaned up properly. "
1622
- f"Baseline: {baseline_count}, Final: {final_count}"
1623
- )
1624
-
1625
-
1626
- async def test_redis_key_cleanup_successful_task(
1627
- docket: Docket, worker: Worker
1628
- ) -> None:
1629
- """Test that Redis keys are properly cleaned up after successful task execution.
1630
-
1631
- This test systematically counts Redis keys before and after task operations to detect
1632
- memory leaks where keys are not properly cleaned up.
1633
- """
1634
- # Prime the worker (run once with no tasks to establish baseline)
1635
- await worker.run_until_finished()
1636
-
1637
- # Create and register a simple task
1638
- task_executed = False
1639
-
1640
- async def successful_task():
1641
- nonlocal task_executed
1642
- task_executed = True
1643
- await asyncio.sleep(0.01) # Small delay to ensure proper execution flow
1644
-
1645
- docket.register(successful_task)
1646
-
1647
- async with docket.redis() as redis:
1648
- checker = KeyCountChecker(docket, redis)
1649
- await checker.capture_baseline()
1650
-
1651
- # Schedule the task
1652
- await docket.add(successful_task)()
1653
- await checker.verify_keys_increased("scheduling")
1654
-
1655
- # Execute the task
1656
- await worker.run_until_finished()
1657
-
1658
- # Verify task executed successfully
1659
- assert task_executed, "Task should have executed successfully"
1660
-
1661
- # Verify cleanup
1662
- await checker.verify_keys_returned_to_baseline("successful task execution")
1663
-
1664
-
1665
- async def test_redis_key_cleanup_failed_task(docket: Docket, worker: Worker) -> None:
1666
- """Test that Redis keys are properly cleaned up after failed task execution."""
1667
- # Prime the worker
1668
- await worker.run_until_finished()
1669
-
1670
- # Create a task that will fail
1671
- task_attempted = False
1672
-
1673
- async def failing_task():
1674
- nonlocal task_attempted
1675
- task_attempted = True
1676
- raise ValueError("Intentional test failure")
1677
-
1678
- docket.register(failing_task)
1679
-
1680
- async with docket.redis() as redis:
1681
- checker = KeyCountChecker(docket, redis)
1682
- await checker.capture_baseline()
1683
-
1684
- # Schedule the task
1685
- await docket.add(failing_task)()
1686
- await checker.verify_keys_increased("scheduling")
1687
-
1688
- # Execute the task (should fail)
1689
- await worker.run_until_finished()
1690
-
1691
- # Verify task was attempted
1692
- assert task_attempted, "Task should have been attempted"
1693
-
1694
- # Verify cleanup despite failure
1695
- await checker.verify_keys_returned_to_baseline("failed task execution")
1696
-
1697
-
1698
- async def test_redis_key_cleanup_cancelled_task(docket: Docket, worker: Worker) -> None:
1699
- """Test that Redis keys are properly cleaned up after task cancellation."""
1700
- # Prime the worker
1701
- await worker.run_until_finished()
1702
-
1703
- # Create a task that won't be executed
1704
- task_executed = False
1705
-
1706
- async def task_to_cancel():
1707
- nonlocal task_executed
1708
- task_executed = True # pragma: no cover
1709
-
1710
- docket.register(task_to_cancel)
1711
-
1712
- async with docket.redis() as redis:
1713
- checker = KeyCountChecker(docket, redis)
1714
- await checker.capture_baseline()
1715
-
1716
- # Schedule the task for future execution
1717
- future_time = datetime.now(timezone.utc) + timedelta(seconds=10)
1718
- execution = await docket.add(task_to_cancel, future_time)()
1719
- await checker.verify_keys_increased("scheduling")
1720
-
1721
- # Cancel the task
1722
- await docket.cancel(execution.key)
1723
-
1724
- # Run worker to process any cleanup
1725
- await worker.run_until_finished()
1726
-
1727
- # Verify task was not executed
1728
- assert not task_executed, (
1729
- "Task should not have been executed after cancellation"
1730
- )
1731
-
1732
- # Verify cleanup after cancellation
1733
- await checker.verify_keys_returned_to_baseline("task cancellation")
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes