pydocket 0.9.0__tar.gz → 0.9.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydocket might be problematic. Click here for more details.

Files changed (71) hide show
  1. {pydocket-0.9.0 → pydocket-0.9.2}/PKG-INFO +1 -1
  2. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/docket.py +17 -8
  3. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/worker.py +3 -5
  4. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_worker.py +265 -13
  5. {pydocket-0.9.0 → pydocket-0.9.2}/.cursor/rules/general.mdc +0 -0
  6. {pydocket-0.9.0 → pydocket-0.9.2}/.cursor/rules/python-style.mdc +0 -0
  7. {pydocket-0.9.0 → pydocket-0.9.2}/.github/codecov.yml +0 -0
  8. {pydocket-0.9.0 → pydocket-0.9.2}/.github/workflows/chaos.yml +0 -0
  9. {pydocket-0.9.0 → pydocket-0.9.2}/.github/workflows/ci.yml +0 -0
  10. {pydocket-0.9.0 → pydocket-0.9.2}/.github/workflows/docs.yml +0 -0
  11. {pydocket-0.9.0 → pydocket-0.9.2}/.github/workflows/publish.yml +0 -0
  12. {pydocket-0.9.0 → pydocket-0.9.2}/.gitignore +0 -0
  13. {pydocket-0.9.0 → pydocket-0.9.2}/.pre-commit-config.yaml +0 -0
  14. {pydocket-0.9.0 → pydocket-0.9.2}/CLAUDE.md +0 -0
  15. {pydocket-0.9.0 → pydocket-0.9.2}/LICENSE +0 -0
  16. {pydocket-0.9.0 → pydocket-0.9.2}/README.md +0 -0
  17. {pydocket-0.9.0 → pydocket-0.9.2}/chaos/README.md +0 -0
  18. {pydocket-0.9.0 → pydocket-0.9.2}/chaos/__init__.py +0 -0
  19. {pydocket-0.9.0 → pydocket-0.9.2}/chaos/driver.py +0 -0
  20. {pydocket-0.9.0 → pydocket-0.9.2}/chaos/producer.py +0 -0
  21. {pydocket-0.9.0 → pydocket-0.9.2}/chaos/run +0 -0
  22. {pydocket-0.9.0 → pydocket-0.9.2}/chaos/tasks.py +0 -0
  23. {pydocket-0.9.0 → pydocket-0.9.2}/docs/advanced-patterns.md +0 -0
  24. {pydocket-0.9.0 → pydocket-0.9.2}/docs/api-reference.md +0 -0
  25. {pydocket-0.9.0 → pydocket-0.9.2}/docs/dependencies.md +0 -0
  26. {pydocket-0.9.0 → pydocket-0.9.2}/docs/getting-started.md +0 -0
  27. {pydocket-0.9.0 → pydocket-0.9.2}/docs/index.md +0 -0
  28. {pydocket-0.9.0 → pydocket-0.9.2}/docs/production.md +0 -0
  29. {pydocket-0.9.0 → pydocket-0.9.2}/docs/testing.md +0 -0
  30. {pydocket-0.9.0 → pydocket-0.9.2}/examples/__init__.py +0 -0
  31. {pydocket-0.9.0 → pydocket-0.9.2}/examples/common.py +0 -0
  32. {pydocket-0.9.0 → pydocket-0.9.2}/examples/concurrency_control.py +0 -0
  33. {pydocket-0.9.0 → pydocket-0.9.2}/examples/find_and_flood.py +0 -0
  34. {pydocket-0.9.0 → pydocket-0.9.2}/examples/self_perpetuating.py +0 -0
  35. {pydocket-0.9.0 → pydocket-0.9.2}/mkdocs.yml +0 -0
  36. {pydocket-0.9.0 → pydocket-0.9.2}/pyproject.toml +0 -0
  37. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/__init__.py +0 -0
  38. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/__main__.py +0 -0
  39. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/annotations.py +0 -0
  40. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/cli.py +0 -0
  41. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/dependencies.py +0 -0
  42. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/execution.py +0 -0
  43. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/instrumentation.py +0 -0
  44. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/py.typed +0 -0
  45. {pydocket-0.9.0 → pydocket-0.9.2}/src/docket/tasks.py +0 -0
  46. {pydocket-0.9.0 → pydocket-0.9.2}/telemetry/.gitignore +0 -0
  47. {pydocket-0.9.0 → pydocket-0.9.2}/telemetry/start +0 -0
  48. {pydocket-0.9.0 → pydocket-0.9.2}/telemetry/stop +0 -0
  49. {pydocket-0.9.0 → pydocket-0.9.2}/tests/__init__.py +0 -0
  50. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/__init__.py +0 -0
  51. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/conftest.py +0 -0
  52. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/test_clear.py +0 -0
  53. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/test_module.py +0 -0
  54. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/test_parsing.py +0 -0
  55. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/test_snapshot.py +0 -0
  56. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/test_striking.py +0 -0
  57. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/test_tasks.py +0 -0
  58. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/test_version.py +0 -0
  59. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/test_worker.py +0 -0
  60. {pydocket-0.9.0 → pydocket-0.9.2}/tests/cli/test_workers.py +0 -0
  61. {pydocket-0.9.0 → pydocket-0.9.2}/tests/conftest.py +0 -0
  62. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_concurrency_basic.py +0 -0
  63. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_concurrency_control.py +0 -0
  64. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_concurrency_refresh.py +0 -0
  65. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_dependencies.py +0 -0
  66. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_docket.py +0 -0
  67. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_execution.py +0 -0
  68. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_fundamentals.py +0 -0
  69. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_instrumentation.py +0 -0
  70. {pydocket-0.9.0 → pydocket-0.9.2}/tests/test_striking.py +0 -0
  71. {pydocket-0.9.0 → pydocket-0.9.2}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydocket
3
- Version: 0.9.0
3
+ Version: 0.9.2
4
4
  Summary: A distributed background task system for Python functions
5
5
  Project-URL: Homepage, https://github.com/chrisguidry/docket
6
6
  Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
@@ -428,6 +428,9 @@ class Docket:
428
428
  def parked_task_key(self, key: str) -> str:
429
429
  return f"{self.name}:{key}"
430
430
 
431
+ def stream_id_key(self, key: str) -> str:
432
+ return f"{self.name}:stream-id:{key}"
433
+
431
434
  async def _schedule(
432
435
  self,
433
436
  redis: Redis,
@@ -472,13 +475,14 @@ class Docket:
472
475
  self._schedule_task_script = cast(
473
476
  _schedule_task,
474
477
  redis.register_script(
475
- # KEYS: stream_key, known_key, parked_key, queue_key
478
+ # KEYS: stream_key, known_key, parked_key, queue_key, stream_id_key
476
479
  # ARGV: task_key, when_timestamp, is_immediate, replace, ...message_fields
477
480
  """
478
481
  local stream_key = KEYS[1]
479
482
  local known_key = KEYS[2]
480
483
  local parked_key = KEYS[3]
481
484
  local queue_key = KEYS[4]
485
+ local stream_id_key = KEYS[5]
482
486
 
483
487
  local task_key = ARGV[1]
484
488
  local when_timestamp = ARGV[2]
@@ -494,11 +498,11 @@ class Docket:
494
498
 
495
499
  -- Handle replacement: cancel existing task if needed
496
500
  if replace then
497
- local existing_message_id = redis.call('HGET', known_key, 'stream_message_id')
501
+ local existing_message_id = redis.call('GET', stream_id_key)
498
502
  if existing_message_id then
499
503
  redis.call('XDEL', stream_key, existing_message_id)
500
504
  end
501
- redis.call('DEL', known_key, parked_key)
505
+ redis.call('DEL', known_key, parked_key, stream_id_key)
502
506
  redis.call('ZREM', queue_key, task_key)
503
507
  else
504
508
  -- Check if task already exists
@@ -510,11 +514,12 @@ class Docket:
510
514
  if is_immediate then
511
515
  -- Add to stream and store message ID for later cancellation
512
516
  local message_id = redis.call('XADD', stream_key, '*', unpack(message))
513
- redis.call('HSET', known_key, 'when', when_timestamp, 'stream_message_id', message_id)
517
+ redis.call('SET', known_key, when_timestamp)
518
+ redis.call('SET', stream_id_key, message_id)
514
519
  return message_id
515
520
  else
516
521
  -- Add to queue with task data in parked hash
517
- redis.call('HSET', known_key, 'when', when_timestamp)
522
+ redis.call('SET', known_key, when_timestamp)
518
523
  redis.call('HSET', parked_key, unpack(message))
519
524
  redis.call('ZADD', queue_key, when_timestamp, task_key)
520
525
  return 'QUEUED'
@@ -530,6 +535,7 @@ class Docket:
530
535
  known_task_key,
531
536
  self.parked_task_key(key),
532
537
  self.queue_key,
538
+ self.stream_id_key(key),
533
539
  ],
534
540
  args=[
535
541
  key,
@@ -556,23 +562,24 @@ class Docket:
556
562
  self._cancel_task_script = cast(
557
563
  _cancel_task,
558
564
  redis.register_script(
559
- # KEYS: stream_key, known_key, parked_key, queue_key
565
+ # KEYS: stream_key, known_key, parked_key, queue_key, stream_id_key
560
566
  # ARGV: task_key
561
567
  """
562
568
  local stream_key = KEYS[1]
563
569
  local known_key = KEYS[2]
564
570
  local parked_key = KEYS[3]
565
571
  local queue_key = KEYS[4]
572
+ local stream_id_key = KEYS[5]
566
573
  local task_key = ARGV[1]
567
574
 
568
575
  -- Delete from stream if message ID exists
569
- local message_id = redis.call('HGET', known_key, 'stream_message_id')
576
+ local message_id = redis.call('GET', stream_id_key)
570
577
  if message_id then
571
578
  redis.call('XDEL', stream_key, message_id)
572
579
  end
573
580
 
574
581
  -- Clean up all task-related keys
575
- redis.call('DEL', known_key, parked_key)
582
+ redis.call('DEL', known_key, parked_key, stream_id_key)
576
583
  redis.call('ZREM', queue_key, task_key)
577
584
 
578
585
  return 'OK'
@@ -588,6 +595,7 @@ class Docket:
588
595
  self.known_task_key(key),
589
596
  self.parked_task_key(key),
590
597
  self.queue_key,
598
+ self.stream_id_key(key),
591
599
  ],
592
600
  args=[key],
593
601
  )
@@ -897,6 +905,7 @@ class Docket:
897
905
  key = key_bytes.decode()
898
906
  pipeline.delete(self.parked_task_key(key))
899
907
  pipeline.delete(self.known_task_key(key))
908
+ pipeline.delete(self.stream_id_key(key))
900
909
 
901
910
  await pipeline.execute()
902
911
 
@@ -406,7 +406,7 @@ class Worker:
406
406
  task[task_data[j]] = task_data[j+1]
407
407
  end
408
408
 
409
- local message_id = redis.call('XADD', KEYS[2], '*',
409
+ redis.call('XADD', KEYS[2], '*',
410
410
  'key', task['key'],
411
411
  'when', task['when'],
412
412
  'function', task['function'],
@@ -414,9 +414,6 @@ class Worker:
414
414
  'kwargs', task['kwargs'],
415
415
  'attempt', task['attempt']
416
416
  )
417
- -- Store the message ID in the known task key
418
- local known_key = ARGV[2] .. ":known:" .. key
419
- redis.call('HSET', known_key, 'stream_message_id', message_id)
420
417
  redis.call('DEL', hash_key)
421
418
  due_work = due_work + 1
422
419
  end
@@ -498,7 +495,8 @@ class Worker:
498
495
 
499
496
  logger.debug("Deleting known task", extra=self._log_context())
500
497
  known_task_key = self.docket.known_task_key(key)
501
- await redis.delete(known_task_key)
498
+ stream_id_key = self.docket.stream_id_key(key)
499
+ await redis.delete(known_task_key, stream_id_key)
502
500
 
503
501
  async def _execute(self, execution: Execution) -> None:
504
502
  log_context = {**self._log_context(), **execution.specific_labels()}
@@ -1,11 +1,14 @@
1
1
  import asyncio
2
2
  import logging
3
+ import time
3
4
  from contextlib import asynccontextmanager
5
+ from contextvars import ContextVar
4
6
  from datetime import datetime, timedelta, timezone
5
- from typing import AsyncGenerator, Callable
7
+ from typing import AsyncGenerator, Callable, Iterable
6
8
  from unittest.mock import AsyncMock, patch
7
9
  from uuid import uuid4
8
10
 
11
+ import cloudpickle # type: ignore[import]
9
12
  import pytest
10
13
  from redis.asyncio import Redis
11
14
  from redis.exceptions import ConnectionError
@@ -18,6 +21,8 @@ from docket import (
18
21
  Perpetual,
19
22
  Worker,
20
23
  )
24
+ from docket.dependencies import Timeout
25
+ from docket.execution import Execution
21
26
  from docket.tasks import standard_tasks
22
27
  from docket.worker import ms
23
28
 
@@ -175,7 +180,6 @@ async def test_redeliveries_respect_concurrency_limits(docket: Docket):
175
180
  nonlocal failure_count
176
181
 
177
182
  # Record when this task runs
178
- import time
179
183
 
180
184
  task_executions.append((customer_id, time.time()))
181
185
 
@@ -556,7 +560,6 @@ async def test_worker_can_be_told_to_skip_automatic_tasks(docket: Docket):
556
560
 
557
561
  async def test_worker_concurrency_limits_task_queuing_behavior(docket: Docket):
558
562
  """Test that concurrency limits control task execution properly"""
559
- from contextvars import ContextVar
560
563
 
561
564
  # Use contextvar for reliable tracking across async execution
562
565
  execution_log: ContextVar[list[tuple[str, int]]] = ContextVar("execution_log")
@@ -1172,7 +1175,6 @@ async def test_worker_concurrency_edge_cases(docket: Docket):
1172
1175
 
1173
1176
  async def test_worker_timeout_exceeds_redelivery_timeout(docket: Docket):
1174
1177
  """Test worker handles user timeout longer than redelivery timeout."""
1175
- from docket.dependencies import Timeout
1176
1178
 
1177
1179
  task_executed = False
1178
1180
 
@@ -1251,8 +1253,6 @@ async def test_worker_concurrency_missing_argument_early_return(docket: Docket):
1251
1253
 
1252
1254
  async def test_worker_no_concurrency_dependency_in_function(docket: Docket):
1253
1255
  """Test _can_start_task with function that has no concurrency dependency."""
1254
- from docket.execution import Execution
1255
- from datetime import datetime, timezone
1256
1256
 
1257
1257
  async def task_without_concurrency_dependency():
1258
1258
  await asyncio.sleep(0.001)
@@ -1278,8 +1278,6 @@ async def test_worker_no_concurrency_dependency_in_function(docket: Docket):
1278
1278
 
1279
1279
  async def test_worker_no_concurrency_dependency_in_release(docket: Docket):
1280
1280
  """Test _release_concurrency_slot with function that has no concurrency dependency."""
1281
- from docket.execution import Execution
1282
- from datetime import datetime, timezone
1283
1281
 
1284
1282
  async def task_without_concurrency_dependency():
1285
1283
  await asyncio.sleep(0.001)
@@ -1304,8 +1302,6 @@ async def test_worker_no_concurrency_dependency_in_release(docket: Docket):
1304
1302
 
1305
1303
  async def test_worker_missing_concurrency_argument_in_release(docket: Docket):
1306
1304
  """Test _release_concurrency_slot when concurrency argument is missing."""
1307
- from docket.execution import Execution
1308
- from datetime import datetime, timezone
1309
1305
 
1310
1306
  async def task_with_missing_arg(
1311
1307
  concurrency: ConcurrencyLimit = ConcurrencyLimit(
@@ -1334,8 +1330,6 @@ async def test_worker_missing_concurrency_argument_in_release(docket: Docket):
1334
1330
 
1335
1331
  async def test_worker_concurrency_missing_argument_in_can_start(docket: Docket):
1336
1332
  """Test _can_start_task with missing concurrency argument during execution."""
1337
- from docket.execution import Execution
1338
- from datetime import datetime, timezone
1339
1333
 
1340
1334
  async def task_with_missing_concurrency_arg(
1341
1335
  concurrency: ConcurrencyLimit = ConcurrencyLimit(
@@ -1384,7 +1378,6 @@ async def test_worker_exception_before_dependencies(docket: Docket):
1384
1378
  task_failed = False
1385
1379
 
1386
1380
  # Mock resolved_dependencies to fail before setting dependencies
1387
- from unittest.mock import patch, AsyncMock
1388
1381
 
1389
1382
  await docket.add(task_that_will_fail)()
1390
1383
 
@@ -1504,3 +1497,262 @@ async def test_rapid_replace_operations(
1504
1497
  # Should only execute the last replacement
1505
1498
  the_task.assert_awaited_once_with("arg4", b="b4")
1506
1499
  assert the_task.await_count == 1
1500
+
1501
+
1502
+ async def test_wrongtype_error_with_legacy_known_task_key(
1503
+ docket: Docket,
1504
+ worker: Worker,
1505
+ the_task: AsyncMock,
1506
+ now: Callable[[], datetime],
1507
+ caplog: pytest.LogCaptureFixture,
1508
+ ) -> None:
1509
+ """Test graceful handling when known task keys exist as strings from legacy implementations.
1510
+
1511
+ Regression test for issue where worker scheduler would get WRONGTYPE errors when trying to
1512
+ HSET on known task keys that existed as string values from older docket versions.
1513
+
1514
+ The original error occurred when:
1515
+ 1. A legacy docket created known task keys as simple string values (timestamps)
1516
+ 2. The new scheduler tried to HSET stream_message_id on these keys
1517
+ 3. Redis threw WRONGTYPE error because you can't HSET on a string key
1518
+ 4. This caused scheduler loop failures in production
1519
+
1520
+ This test reproduces that scenario by manually setting up the legacy state,
1521
+ then verifies the new code handles it gracefully without errors.
1522
+ """
1523
+ key = f"legacy-task:{uuid4()}"
1524
+
1525
+ # Simulate legacy behavior: create the known task key as a string
1526
+ # This is what older versions of docket would have done
1527
+ async with docket.redis() as redis:
1528
+ known_task_key = docket.known_task_key(key)
1529
+ when = now() + timedelta(seconds=1)
1530
+
1531
+ # Set up legacy state: known key as string, task in queue with parked data
1532
+ await redis.set(known_task_key, str(when.timestamp()))
1533
+ await redis.zadd(docket.queue_key, {key: when.timestamp()})
1534
+
1535
+ await redis.hset( # type: ignore
1536
+ docket.parked_task_key(key),
1537
+ mapping={
1538
+ "key": key,
1539
+ "when": when.isoformat(),
1540
+ "function": "trace",
1541
+ "args": cloudpickle.dumps(["legacy task test"]), # type: ignore[arg-type]
1542
+ "kwargs": cloudpickle.dumps({}), # type: ignore[arg-type]
1543
+ "attempt": "1",
1544
+ },
1545
+ )
1546
+
1547
+ # Capture logs to ensure no errors occur and see task execution
1548
+ with caplog.at_level(logging.INFO):
1549
+ await worker.run_until_finished()
1550
+
1551
+ # Should not have any ERROR logs now that the issue is fixed
1552
+ error_logs = [record for record in caplog.records if record.levelname == "ERROR"]
1553
+ assert len(error_logs) == 0, (
1554
+ f"Expected no error logs, but got: {[r.message for r in error_logs]}"
1555
+ )
1556
+
1557
+ # The task should execute successfully
1558
+ # Since we used trace, we should see an INFO log with the message
1559
+ info_logs = [record for record in caplog.records if record.levelname == "INFO"]
1560
+ trace_logs = [
1561
+ record for record in info_logs if "legacy task test" in record.message
1562
+ ]
1563
+ assert len(trace_logs) > 0, (
1564
+ f"Expected to see trace log with 'legacy task test', got: {[r.message for r in info_logs]}"
1565
+ )
1566
+
1567
+
1568
+ async def count_redis_keys_by_type(redis: Redis, prefix: str) -> dict[str, int]:
1569
+ """Count Redis keys by type for a given prefix."""
1570
+ pattern = f"{prefix}*"
1571
+ keys: Iterable[str] = await redis.keys(pattern) # type: ignore
1572
+ counts: dict[str, int] = {}
1573
+
1574
+ for key in keys:
1575
+ key_type = await redis.type(key)
1576
+ key_type_str = (
1577
+ key_type.decode() if isinstance(key_type, bytes) else str(key_type)
1578
+ )
1579
+ counts[key_type_str] = counts.get(key_type_str, 0) + 1
1580
+
1581
+ return counts
1582
+
1583
+
1584
+ class KeyCountChecker:
1585
+ """Helper to verify Redis key counts remain consistent across operations."""
1586
+
1587
+ def __init__(self, docket: Docket, redis: Redis) -> None:
1588
+ self.docket = docket
1589
+ self.redis = redis
1590
+ self.baseline_counts: dict[str, int] = {}
1591
+
1592
+ async def capture_baseline(self) -> None:
1593
+ """Capture baseline key counts after worker priming."""
1594
+ self.baseline_counts = await count_redis_keys_by_type(
1595
+ self.redis, self.docket.name
1596
+ )
1597
+ print(f"Baseline key counts: {self.baseline_counts}")
1598
+
1599
+ async def verify_keys_increased(self, operation: str) -> None:
1600
+ """Verify that key counts increased after scheduling operation."""
1601
+ current_counts = await count_redis_keys_by_type(self.redis, self.docket.name)
1602
+ print(f"After {operation} key counts: {current_counts}")
1603
+
1604
+ total_current = sum(current_counts.values())
1605
+ total_baseline = sum(self.baseline_counts.values())
1606
+ assert total_current > total_baseline, (
1607
+ f"Expected more keys after {operation}, but got {total_current} vs {total_baseline}"
1608
+ )
1609
+
1610
+ async def verify_keys_returned_to_baseline(self, operation: str) -> None:
1611
+ """Verify that key counts returned to baseline after operation completion."""
1612
+ final_counts = await count_redis_keys_by_type(self.redis, self.docket.name)
1613
+ print(f"Final key counts: {final_counts}")
1614
+
1615
+ # Check each key type matches baseline
1616
+ all_key_types = set(self.baseline_counts.keys()) | set(final_counts.keys())
1617
+ for key_type in all_key_types:
1618
+ baseline_count = self.baseline_counts.get(key_type, 0)
1619
+ final_count = final_counts.get(key_type, 0)
1620
+ assert final_count == baseline_count, (
1621
+ f"Memory leak detected after {operation}: {key_type} keys not cleaned up properly. "
1622
+ f"Baseline: {baseline_count}, Final: {final_count}"
1623
+ )
1624
+
1625
+
1626
+ async def test_redis_key_cleanup_successful_task(
1627
+ docket: Docket, worker: Worker
1628
+ ) -> None:
1629
+ """Test that Redis keys are properly cleaned up after successful task execution.
1630
+
1631
+ This test systematically counts Redis keys before and after task operations to detect
1632
+ memory leaks where keys are not properly cleaned up.
1633
+ """
1634
+ # Prime the worker (run once with no tasks to establish baseline)
1635
+ await worker.run_until_finished()
1636
+
1637
+ # Create and register a simple task
1638
+ task_executed = False
1639
+
1640
+ async def successful_task():
1641
+ nonlocal task_executed
1642
+ task_executed = True
1643
+ await asyncio.sleep(0.01) # Small delay to ensure proper execution flow
1644
+
1645
+ docket.register(successful_task)
1646
+
1647
+ async with docket.redis() as redis:
1648
+ checker = KeyCountChecker(docket, redis)
1649
+ await checker.capture_baseline()
1650
+
1651
+ # Schedule the task
1652
+ await docket.add(successful_task)()
1653
+ await checker.verify_keys_increased("scheduling")
1654
+
1655
+ # Execute the task
1656
+ await worker.run_until_finished()
1657
+
1658
+ # Verify task executed successfully
1659
+ assert task_executed, "Task should have executed successfully"
1660
+
1661
+ # Verify cleanup
1662
+ await checker.verify_keys_returned_to_baseline("successful task execution")
1663
+
1664
+
1665
+ async def test_redis_key_cleanup_failed_task(docket: Docket, worker: Worker) -> None:
1666
+ """Test that Redis keys are properly cleaned up after failed task execution."""
1667
+ # Prime the worker
1668
+ await worker.run_until_finished()
1669
+
1670
+ # Create a task that will fail
1671
+ task_attempted = False
1672
+
1673
+ async def failing_task():
1674
+ nonlocal task_attempted
1675
+ task_attempted = True
1676
+ raise ValueError("Intentional test failure")
1677
+
1678
+ docket.register(failing_task)
1679
+
1680
+ async with docket.redis() as redis:
1681
+ checker = KeyCountChecker(docket, redis)
1682
+ await checker.capture_baseline()
1683
+
1684
+ # Schedule the task
1685
+ await docket.add(failing_task)()
1686
+ await checker.verify_keys_increased("scheduling")
1687
+
1688
+ # Execute the task (should fail)
1689
+ await worker.run_until_finished()
1690
+
1691
+ # Verify task was attempted
1692
+ assert task_attempted, "Task should have been attempted"
1693
+
1694
+ # Verify cleanup despite failure
1695
+ await checker.verify_keys_returned_to_baseline("failed task execution")
1696
+
1697
+
1698
+ async def test_redis_key_cleanup_cancelled_task(docket: Docket, worker: Worker) -> None:
1699
+ """Test that Redis keys are properly cleaned up after task cancellation."""
1700
+ # Prime the worker
1701
+ await worker.run_until_finished()
1702
+
1703
+ # Create a task that won't be executed
1704
+ task_executed = False
1705
+
1706
+ async def task_to_cancel():
1707
+ nonlocal task_executed
1708
+ task_executed = True # pragma: no cover
1709
+
1710
+ docket.register(task_to_cancel)
1711
+
1712
+ async with docket.redis() as redis:
1713
+ checker = KeyCountChecker(docket, redis)
1714
+ await checker.capture_baseline()
1715
+
1716
+ # Schedule the task for future execution
1717
+ future_time = datetime.now(timezone.utc) + timedelta(seconds=10)
1718
+ execution = await docket.add(task_to_cancel, future_time)()
1719
+ await checker.verify_keys_increased("scheduling")
1720
+
1721
+ # Cancel the task
1722
+ await docket.cancel(execution.key)
1723
+
1724
+ # Run worker to process any cleanup
1725
+ await worker.run_until_finished()
1726
+
1727
+ # Verify task was not executed
1728
+ assert not task_executed, (
1729
+ "Task should not have been executed after cancellation"
1730
+ )
1731
+
1732
+ # Verify cleanup after cancellation
1733
+ await checker.verify_keys_returned_to_baseline("task cancellation")
1734
+
1735
+
1736
+ async def test_replace_task_with_legacy_known_key(
1737
+ docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
1738
+ ):
1739
+ """Test that replace() works with legacy string known_keys.
1740
+
1741
+ This reproduces the exact production scenario where replace() would get
1742
+ WRONGTYPE errors when trying to HGET on legacy string known_keys.
1743
+ The main goal is to verify no WRONGTYPE error occurs.
1744
+ """
1745
+ key = f"legacy-replace-task:{uuid4()}"
1746
+
1747
+ # Simulate legacy state: create known_key as string (old format)
1748
+ async with docket.redis() as redis:
1749
+ known_task_key = docket.known_task_key(key)
1750
+ when = now()
1751
+
1752
+ # Create legacy known_key as STRING (what old code did)
1753
+ await redis.set(known_task_key, str(when.timestamp()))
1754
+
1755
+ # Now try to replace - this should work without WRONGTYPE error
1756
+ # The key point is that this call succeeds without throwing WRONGTYPE
1757
+ replacement_time = now() + timedelta(seconds=1)
1758
+ await docket.replace("trace", replacement_time, key=key)("replacement message")
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes