pydocket 0.10.0__tar.gz → 0.11.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- pydocket-0.11.0/.github/workflows/claude-code-review.yml +40 -0
- pydocket-0.11.0/.github/workflows/claude.yml +42 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/PKG-INFO +1 -1
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/docket.py +160 -35
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/execution.py +3 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/instrumentation.py +6 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/worker.py +16 -4
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_fundamentals.py +25 -8
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_instrumentation.py +50 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_worker.py +342 -13
- {pydocket-0.10.0 → pydocket-0.11.0}/.cursor/rules/general.mdc +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/.cursor/rules/python-style.mdc +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/.github/codecov.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/.github/workflows/ci.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/.github/workflows/docs.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/.github/workflows/publish.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/.gitignore +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/.pre-commit-config.yaml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/CLAUDE.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/LICENSE +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/README.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/chaos/README.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/chaos/__init__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/chaos/driver.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/chaos/producer.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/chaos/run +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/chaos/tasks.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/docs/advanced-patterns.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/docs/api-reference.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/docs/dependencies.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/docs/getting-started.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/docs/index.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/docs/production.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/docs/testing.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/examples/__init__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/examples/common.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/examples/concurrency_control.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/examples/find_and_flood.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/examples/self_perpetuating.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/mkdocs.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/pyproject.toml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/__init__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/__main__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/annotations.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/cli.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/dependencies.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/py.typed +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/src/docket/tasks.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/telemetry/.gitignore +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/telemetry/start +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/telemetry/stop +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/__init__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/__init__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/conftest.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/test_clear.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/test_module.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/test_parsing.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/test_snapshot.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/test_striking.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/test_tasks.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/test_version.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/test_worker.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/cli/test_workers.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/conftest.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_concurrency_basic.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_concurrency_control.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_concurrency_refresh.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_dependencies.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_docket.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_execution.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/tests/test_striking.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.0}/uv.lock +0 -0
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
name: Claude Code Review
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
pull_request:
|
|
5
|
+
types: [opened, synchronize]
|
|
6
|
+
|
|
7
|
+
jobs:
|
|
8
|
+
claude-review:
|
|
9
|
+
runs-on: ubuntu-latest
|
|
10
|
+
permissions:
|
|
11
|
+
contents: read
|
|
12
|
+
pull-requests: read
|
|
13
|
+
issues: read
|
|
14
|
+
id-token: write
|
|
15
|
+
|
|
16
|
+
steps:
|
|
17
|
+
- name: Checkout repository
|
|
18
|
+
uses: actions/checkout@v4
|
|
19
|
+
with:
|
|
20
|
+
fetch-depth: 1
|
|
21
|
+
|
|
22
|
+
- name: Run Claude Code Review
|
|
23
|
+
id: claude-review
|
|
24
|
+
uses: anthropics/claude-code-action@beta
|
|
25
|
+
with:
|
|
26
|
+
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
|
27
|
+
model: "claude-opus-4-1-20250805"
|
|
28
|
+
|
|
29
|
+
# Direct prompt for automated review (no @claude mention needed)
|
|
30
|
+
direct_prompt: |
|
|
31
|
+
Please review this pull request and provide feedback on:
|
|
32
|
+
- Code quality and best practices
|
|
33
|
+
- Potential bugs or issues
|
|
34
|
+
- Performance considerations
|
|
35
|
+
- Security concerns
|
|
36
|
+
- Test coverage, which must be maintained at 100% for this project
|
|
37
|
+
|
|
38
|
+
Be constructive and helpful in your feedback.
|
|
39
|
+
|
|
40
|
+
use_sticky_comment: true
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
name: Claude Code
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
issue_comment:
|
|
5
|
+
types: [created]
|
|
6
|
+
pull_request_review_comment:
|
|
7
|
+
types: [created]
|
|
8
|
+
issues:
|
|
9
|
+
types: [opened, assigned]
|
|
10
|
+
pull_request_review:
|
|
11
|
+
types: [submitted]
|
|
12
|
+
|
|
13
|
+
jobs:
|
|
14
|
+
claude:
|
|
15
|
+
if: |
|
|
16
|
+
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
|
17
|
+
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
|
18
|
+
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
|
19
|
+
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
|
20
|
+
runs-on: ubuntu-latest
|
|
21
|
+
permissions:
|
|
22
|
+
contents: read
|
|
23
|
+
pull-requests: read
|
|
24
|
+
issues: read
|
|
25
|
+
id-token: write
|
|
26
|
+
actions: read # Required for Claude to read CI results on PRs
|
|
27
|
+
steps:
|
|
28
|
+
- name: Checkout repository
|
|
29
|
+
uses: actions/checkout@v4
|
|
30
|
+
with:
|
|
31
|
+
fetch-depth: 1
|
|
32
|
+
|
|
33
|
+
- name: Run Claude Code
|
|
34
|
+
id: claude
|
|
35
|
+
uses: anthropics/claude-code-action@beta
|
|
36
|
+
with:
|
|
37
|
+
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
|
38
|
+
|
|
39
|
+
additional_permissions: |
|
|
40
|
+
actions: read
|
|
41
|
+
|
|
42
|
+
model: "claude-opus-4-1-20250805"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.11.0
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -16,6 +16,7 @@ from typing import (
|
|
|
16
16
|
Mapping,
|
|
17
17
|
NoReturn,
|
|
18
18
|
ParamSpec,
|
|
19
|
+
Protocol,
|
|
19
20
|
Self,
|
|
20
21
|
Sequence,
|
|
21
22
|
TypedDict,
|
|
@@ -27,7 +28,6 @@ from typing import (
|
|
|
27
28
|
import redis.exceptions
|
|
28
29
|
from opentelemetry import propagate, trace
|
|
29
30
|
from redis.asyncio import ConnectionPool, Redis
|
|
30
|
-
from redis.asyncio.client import Pipeline
|
|
31
31
|
from uuid_extensions import uuid7
|
|
32
32
|
|
|
33
33
|
from .execution import (
|
|
@@ -55,6 +55,18 @@ logger: logging.Logger = logging.getLogger(__name__)
|
|
|
55
55
|
tracer: trace.Tracer = trace.get_tracer(__name__)
|
|
56
56
|
|
|
57
57
|
|
|
58
|
+
class _schedule_task(Protocol):
|
|
59
|
+
async def __call__(
|
|
60
|
+
self, keys: list[str], args: list[str | float | bytes]
|
|
61
|
+
) -> str: ... # pragma: no cover
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class _cancel_task(Protocol):
|
|
65
|
+
async def __call__(
|
|
66
|
+
self, keys: list[str], args: list[str]
|
|
67
|
+
) -> str: ... # pragma: no cover
|
|
68
|
+
|
|
69
|
+
|
|
58
70
|
P = ParamSpec("P")
|
|
59
71
|
R = TypeVar("R")
|
|
60
72
|
|
|
@@ -131,6 +143,8 @@ class Docket:
|
|
|
131
143
|
|
|
132
144
|
_monitor_strikes_task: asyncio.Task[None]
|
|
133
145
|
_connection_pool: ConnectionPool
|
|
146
|
+
_schedule_task_script: _schedule_task | None
|
|
147
|
+
_cancel_task_script: _cancel_task | None
|
|
134
148
|
|
|
135
149
|
def __init__(
|
|
136
150
|
self,
|
|
@@ -156,6 +170,8 @@ class Docket:
|
|
|
156
170
|
self.url = url
|
|
157
171
|
self.heartbeat_interval = heartbeat_interval
|
|
158
172
|
self.missed_heartbeats = missed_heartbeats
|
|
173
|
+
self._schedule_task_script = None
|
|
174
|
+
self._cancel_task_script = None
|
|
159
175
|
|
|
160
176
|
@property
|
|
161
177
|
def worker_group_name(self) -> str:
|
|
@@ -300,9 +316,7 @@ class Docket:
|
|
|
300
316
|
execution = Execution(function, args, kwargs, when, key, attempt=1)
|
|
301
317
|
|
|
302
318
|
async with self.redis() as redis:
|
|
303
|
-
|
|
304
|
-
await self._schedule(redis, pipeline, execution, replace=False)
|
|
305
|
-
await pipeline.execute()
|
|
319
|
+
await self._schedule(redis, execution, replace=False)
|
|
306
320
|
|
|
307
321
|
TASKS_ADDED.add(1, {**self.labels(), **execution.general_labels()})
|
|
308
322
|
TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
|
|
@@ -361,9 +375,7 @@ class Docket:
|
|
|
361
375
|
execution = Execution(function, args, kwargs, when, key, attempt=1)
|
|
362
376
|
|
|
363
377
|
async with self.redis() as redis:
|
|
364
|
-
|
|
365
|
-
await self._schedule(redis, pipeline, execution, replace=True)
|
|
366
|
-
await pipeline.execute()
|
|
378
|
+
await self._schedule(redis, execution, replace=True)
|
|
367
379
|
|
|
368
380
|
TASKS_REPLACED.add(1, {**self.labels(), **execution.general_labels()})
|
|
369
381
|
TASKS_CANCELLED.add(1, {**self.labels(), **execution.general_labels()})
|
|
@@ -383,9 +395,7 @@ class Docket:
|
|
|
383
395
|
},
|
|
384
396
|
):
|
|
385
397
|
async with self.redis() as redis:
|
|
386
|
-
|
|
387
|
-
await self._schedule(redis, pipeline, execution, replace=False)
|
|
388
|
-
await pipeline.execute()
|
|
398
|
+
await self._schedule(redis, execution, replace=False)
|
|
389
399
|
|
|
390
400
|
TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
|
|
391
401
|
|
|
@@ -400,9 +410,7 @@ class Docket:
|
|
|
400
410
|
attributes={**self.labels(), "docket.key": key},
|
|
401
411
|
):
|
|
402
412
|
async with self.redis() as redis:
|
|
403
|
-
|
|
404
|
-
await self._cancel(pipeline, key)
|
|
405
|
-
await pipeline.execute()
|
|
413
|
+
await self._cancel(redis, key)
|
|
406
414
|
|
|
407
415
|
TASKS_CANCELLED.add(1, self.labels())
|
|
408
416
|
|
|
@@ -420,13 +428,23 @@ class Docket:
|
|
|
420
428
|
def parked_task_key(self, key: str) -> str:
|
|
421
429
|
return f"{self.name}:{key}"
|
|
422
430
|
|
|
431
|
+
def stream_id_key(self, key: str) -> str:
|
|
432
|
+
return f"{self.name}:stream-id:{key}"
|
|
433
|
+
|
|
423
434
|
async def _schedule(
|
|
424
435
|
self,
|
|
425
436
|
redis: Redis,
|
|
426
|
-
pipeline: Pipeline,
|
|
427
437
|
execution: Execution,
|
|
428
438
|
replace: bool = False,
|
|
429
439
|
) -> None:
|
|
440
|
+
"""Schedule a task atomically.
|
|
441
|
+
|
|
442
|
+
Handles:
|
|
443
|
+
- Checking for task existence
|
|
444
|
+
- Cancelling existing tasks when replacing
|
|
445
|
+
- Adding tasks to stream (immediate) or queue (future)
|
|
446
|
+
- Tracking stream message IDs for later cancellation
|
|
447
|
+
"""
|
|
430
448
|
if self.strike_list.is_stricken(execution):
|
|
431
449
|
logger.warning(
|
|
432
450
|
"%r is stricken, skipping schedule of %r",
|
|
@@ -449,32 +467,138 @@ class Docket:
|
|
|
449
467
|
key = execution.key
|
|
450
468
|
when = execution.when
|
|
451
469
|
known_task_key = self.known_task_key(key)
|
|
470
|
+
is_immediate = when <= datetime.now(timezone.utc)
|
|
452
471
|
|
|
472
|
+
# Lock per task key to prevent race conditions between concurrent operations
|
|
453
473
|
async with redis.lock(f"{known_task_key}:lock", timeout=10):
|
|
454
|
-
if
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
"
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
474
|
+
if self._schedule_task_script is None:
|
|
475
|
+
self._schedule_task_script = cast(
|
|
476
|
+
_schedule_task,
|
|
477
|
+
redis.register_script(
|
|
478
|
+
# KEYS: stream_key, known_key, parked_key, queue_key, stream_id_key
|
|
479
|
+
# ARGV: task_key, when_timestamp, is_immediate, replace, ...message_fields
|
|
480
|
+
"""
|
|
481
|
+
local stream_key = KEYS[1]
|
|
482
|
+
local known_key = KEYS[2]
|
|
483
|
+
local parked_key = KEYS[3]
|
|
484
|
+
local queue_key = KEYS[4]
|
|
485
|
+
local stream_id_key = KEYS[5]
|
|
486
|
+
|
|
487
|
+
local task_key = ARGV[1]
|
|
488
|
+
local when_timestamp = ARGV[2]
|
|
489
|
+
local is_immediate = ARGV[3] == '1'
|
|
490
|
+
local replace = ARGV[4] == '1'
|
|
491
|
+
|
|
492
|
+
-- Extract message fields from ARGV[5] onwards
|
|
493
|
+
local message = {}
|
|
494
|
+
for i = 5, #ARGV, 2 do
|
|
495
|
+
message[#message + 1] = ARGV[i] -- field name
|
|
496
|
+
message[#message + 1] = ARGV[i + 1] -- field value
|
|
497
|
+
end
|
|
498
|
+
|
|
499
|
+
-- Handle replacement: cancel existing task if needed
|
|
500
|
+
if replace then
|
|
501
|
+
local existing_message_id = redis.call('GET', stream_id_key)
|
|
502
|
+
if existing_message_id then
|
|
503
|
+
redis.call('XDEL', stream_key, existing_message_id)
|
|
504
|
+
end
|
|
505
|
+
redis.call('DEL', known_key, parked_key, stream_id_key)
|
|
506
|
+
redis.call('ZREM', queue_key, task_key)
|
|
507
|
+
else
|
|
508
|
+
-- Check if task already exists
|
|
509
|
+
if redis.call('EXISTS', known_key) == 1 then
|
|
510
|
+
return 'EXISTS'
|
|
511
|
+
end
|
|
512
|
+
end
|
|
513
|
+
|
|
514
|
+
if is_immediate then
|
|
515
|
+
-- Add to stream and store message ID for later cancellation
|
|
516
|
+
local message_id = redis.call('XADD', stream_key, '*', unpack(message))
|
|
517
|
+
redis.call('SET', known_key, when_timestamp)
|
|
518
|
+
redis.call('SET', stream_id_key, message_id)
|
|
519
|
+
return message_id
|
|
520
|
+
else
|
|
521
|
+
-- Add to queue with task data in parked hash
|
|
522
|
+
redis.call('SET', known_key, when_timestamp)
|
|
523
|
+
redis.call('HSET', parked_key, unpack(message))
|
|
524
|
+
redis.call('ZADD', queue_key, when_timestamp, task_key)
|
|
525
|
+
return 'QUEUED'
|
|
526
|
+
end
|
|
527
|
+
"""
|
|
528
|
+
),
|
|
529
|
+
)
|
|
530
|
+
schedule_task = self._schedule_task_script
|
|
465
531
|
|
|
466
|
-
|
|
532
|
+
await schedule_task(
|
|
533
|
+
keys=[
|
|
534
|
+
self.stream_key,
|
|
535
|
+
known_task_key,
|
|
536
|
+
self.parked_task_key(key),
|
|
537
|
+
self.queue_key,
|
|
538
|
+
self.stream_id_key(key),
|
|
539
|
+
],
|
|
540
|
+
args=[
|
|
541
|
+
key,
|
|
542
|
+
str(when.timestamp()),
|
|
543
|
+
"1" if is_immediate else "0",
|
|
544
|
+
"1" if replace else "0",
|
|
545
|
+
*[
|
|
546
|
+
item
|
|
547
|
+
for field, value in message.items()
|
|
548
|
+
for item in (field, value)
|
|
549
|
+
],
|
|
550
|
+
],
|
|
551
|
+
)
|
|
467
552
|
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
else:
|
|
471
|
-
pipeline.hset(self.parked_task_key(key), mapping=message) # type: ignore[arg-type]
|
|
472
|
-
pipeline.zadd(self.queue_key, {key: when.timestamp()})
|
|
553
|
+
async def _cancel(self, redis: Redis, key: str) -> None:
|
|
554
|
+
"""Cancel a task atomically.
|
|
473
555
|
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
556
|
+
Handles cancellation regardless of task location:
|
|
557
|
+
- From the stream (using stored message ID)
|
|
558
|
+
- From the queue (scheduled tasks)
|
|
559
|
+
- Cleans up all associated metadata keys
|
|
560
|
+
"""
|
|
561
|
+
if self._cancel_task_script is None:
|
|
562
|
+
self._cancel_task_script = cast(
|
|
563
|
+
_cancel_task,
|
|
564
|
+
redis.register_script(
|
|
565
|
+
# KEYS: stream_key, known_key, parked_key, queue_key, stream_id_key
|
|
566
|
+
# ARGV: task_key
|
|
567
|
+
"""
|
|
568
|
+
local stream_key = KEYS[1]
|
|
569
|
+
local known_key = KEYS[2]
|
|
570
|
+
local parked_key = KEYS[3]
|
|
571
|
+
local queue_key = KEYS[4]
|
|
572
|
+
local stream_id_key = KEYS[5]
|
|
573
|
+
local task_key = ARGV[1]
|
|
574
|
+
|
|
575
|
+
-- Delete from stream if message ID exists
|
|
576
|
+
local message_id = redis.call('GET', stream_id_key)
|
|
577
|
+
if message_id then
|
|
578
|
+
redis.call('XDEL', stream_key, message_id)
|
|
579
|
+
end
|
|
580
|
+
|
|
581
|
+
-- Clean up all task-related keys
|
|
582
|
+
redis.call('DEL', known_key, parked_key, stream_id_key)
|
|
583
|
+
redis.call('ZREM', queue_key, task_key)
|
|
584
|
+
|
|
585
|
+
return 'OK'
|
|
586
|
+
"""
|
|
587
|
+
),
|
|
588
|
+
)
|
|
589
|
+
cancel_task = self._cancel_task_script
|
|
590
|
+
|
|
591
|
+
# Execute the cancellation script
|
|
592
|
+
await cancel_task(
|
|
593
|
+
keys=[
|
|
594
|
+
self.stream_key,
|
|
595
|
+
self.known_task_key(key),
|
|
596
|
+
self.parked_task_key(key),
|
|
597
|
+
self.queue_key,
|
|
598
|
+
self.stream_id_key(key),
|
|
599
|
+
],
|
|
600
|
+
args=[key],
|
|
601
|
+
)
|
|
478
602
|
|
|
479
603
|
@property
|
|
480
604
|
def strike_key(self) -> str:
|
|
@@ -781,6 +905,7 @@ class Docket:
|
|
|
781
905
|
key = key_bytes.decode()
|
|
782
906
|
pipeline.delete(self.parked_task_key(key))
|
|
783
907
|
pipeline.delete(self.known_task_key(key))
|
|
908
|
+
pipeline.delete(self.stream_id_key(key))
|
|
784
909
|
|
|
785
910
|
await pipeline.execute()
|
|
786
911
|
|
|
@@ -51,6 +51,7 @@ class Execution:
|
|
|
51
51
|
key: str,
|
|
52
52
|
attempt: int,
|
|
53
53
|
trace_context: opentelemetry.context.Context | None = None,
|
|
54
|
+
redelivered: bool = False,
|
|
54
55
|
) -> None:
|
|
55
56
|
self.function = function
|
|
56
57
|
self.args = args
|
|
@@ -59,6 +60,7 @@ class Execution:
|
|
|
59
60
|
self.key = key
|
|
60
61
|
self.attempt = attempt
|
|
61
62
|
self.trace_context = trace_context
|
|
63
|
+
self.redelivered = redelivered
|
|
62
64
|
|
|
63
65
|
def as_message(self) -> Message:
|
|
64
66
|
return {
|
|
@@ -80,6 +82,7 @@ class Execution:
|
|
|
80
82
|
key=message[b"key"].decode(),
|
|
81
83
|
attempt=int(message[b"attempt"].decode()),
|
|
82
84
|
trace_context=propagate.extract(message, getter=message_getter),
|
|
85
|
+
redelivered=False, # Default to False, will be set to True in worker if it's a redelivery
|
|
83
86
|
)
|
|
84
87
|
|
|
85
88
|
def general_labels(self) -> Mapping[str, str]:
|
|
@@ -40,6 +40,12 @@ TASKS_STARTED = meter.create_counter(
|
|
|
40
40
|
unit="1",
|
|
41
41
|
)
|
|
42
42
|
|
|
43
|
+
TASKS_REDELIVERED = meter.create_counter(
|
|
44
|
+
"docket_tasks_redelivered",
|
|
45
|
+
description="How many tasks started that were redelivered from another worker",
|
|
46
|
+
unit="1",
|
|
47
|
+
)
|
|
48
|
+
|
|
43
49
|
TASKS_STRICKEN = meter.create_counter(
|
|
44
50
|
"docket_tasks_stricken",
|
|
45
51
|
description="How many tasks have been stricken from executing",
|
|
@@ -47,6 +47,7 @@ from .instrumentation import (
|
|
|
47
47
|
TASKS_COMPLETED,
|
|
48
48
|
TASKS_FAILED,
|
|
49
49
|
TASKS_PERPETUATED,
|
|
50
|
+
TASKS_REDELIVERED,
|
|
50
51
|
TASKS_RETRIED,
|
|
51
52
|
TASKS_RUNNING,
|
|
52
53
|
TASKS_STARTED,
|
|
@@ -286,7 +287,11 @@ class Worker:
|
|
|
286
287
|
count=available_slots,
|
|
287
288
|
)
|
|
288
289
|
|
|
289
|
-
def start_task(
|
|
290
|
+
def start_task(
|
|
291
|
+
message_id: RedisMessageID,
|
|
292
|
+
message: RedisMessage,
|
|
293
|
+
is_redelivery: bool = False,
|
|
294
|
+
) -> bool:
|
|
290
295
|
function_name = message[b"function"].decode()
|
|
291
296
|
if not (function := self.docket.tasks.get(function_name)):
|
|
292
297
|
logger.warning(
|
|
@@ -297,6 +302,7 @@ class Worker:
|
|
|
297
302
|
return False
|
|
298
303
|
|
|
299
304
|
execution = Execution.from_message(function, message)
|
|
305
|
+
execution.redelivered = is_redelivery
|
|
300
306
|
|
|
301
307
|
task = asyncio.create_task(self._execute(execution), name=execution.key)
|
|
302
308
|
active_tasks[task] = message_id
|
|
@@ -342,12 +348,15 @@ class Worker:
|
|
|
342
348
|
continue
|
|
343
349
|
|
|
344
350
|
for source in [get_redeliveries, get_new_deliveries]:
|
|
345
|
-
for
|
|
351
|
+
for stream_key, messages in await source(redis):
|
|
352
|
+
is_redelivery = stream_key == b"__redelivery__"
|
|
346
353
|
for message_id, message in messages:
|
|
347
354
|
if not message: # pragma: no cover
|
|
348
355
|
continue
|
|
349
356
|
|
|
350
|
-
task_started = start_task(
|
|
357
|
+
task_started = start_task(
|
|
358
|
+
message_id, message, is_redelivery
|
|
359
|
+
)
|
|
351
360
|
if not task_started:
|
|
352
361
|
# Other errors - delete and ack
|
|
353
362
|
await self._delete_known_task(redis, message)
|
|
@@ -495,7 +504,8 @@ class Worker:
|
|
|
495
504
|
|
|
496
505
|
logger.debug("Deleting known task", extra=self._log_context())
|
|
497
506
|
known_task_key = self.docket.known_task_key(key)
|
|
498
|
-
|
|
507
|
+
stream_id_key = self.docket.stream_id_key(key)
|
|
508
|
+
await redis.delete(known_task_key, stream_id_key)
|
|
499
509
|
|
|
500
510
|
async def _execute(self, execution: Execution) -> None:
|
|
501
511
|
log_context = {**self._log_context(), **execution.specific_labels()}
|
|
@@ -520,6 +530,8 @@ class Worker:
|
|
|
520
530
|
duration = 0.0
|
|
521
531
|
|
|
522
532
|
TASKS_STARTED.add(1, counter_labels)
|
|
533
|
+
if execution.redelivered:
|
|
534
|
+
TASKS_REDELIVERED.add(1, counter_labels)
|
|
523
535
|
TASKS_RUNNING.add(1, counter_labels)
|
|
524
536
|
TASK_PUNCTUALITY.record(punctuality, counter_labels)
|
|
525
537
|
|
|
@@ -104,13 +104,9 @@ async def test_adding_is_idempotent(
|
|
|
104
104
|
assert soon <= now() < later
|
|
105
105
|
|
|
106
106
|
|
|
107
|
-
@pytest.mark.skip(
|
|
108
|
-
"Temporarily skipping due to test flake for task rescheduling. "
|
|
109
|
-
"See https://github.com/chrisguidry/docket/issues/149"
|
|
110
|
-
)
|
|
111
107
|
async def test_rescheduling_later(
|
|
112
108
|
docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
|
|
113
|
-
):
|
|
109
|
+
):
|
|
114
110
|
"""docket should allow for rescheduling a task for later"""
|
|
115
111
|
|
|
116
112
|
key = f"my-cool-task:{uuid4()}"
|
|
@@ -254,10 +250,10 @@ async def test_cancelling_future_task(
|
|
|
254
250
|
the_task.assert_not_called()
|
|
255
251
|
|
|
256
252
|
|
|
257
|
-
async def
|
|
253
|
+
async def test_cancelling_immediate_task(
|
|
258
254
|
docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
|
|
259
255
|
):
|
|
260
|
-
"""docket
|
|
256
|
+
"""docket can cancel a task that is scheduled immediately"""
|
|
261
257
|
|
|
262
258
|
execution = await docket.add(the_task, now())("a", "b", c="c")
|
|
263
259
|
|
|
@@ -265,7 +261,28 @@ async def test_cancelling_current_task_not_supported(
|
|
|
265
261
|
|
|
266
262
|
await worker.run_until_finished()
|
|
267
263
|
|
|
268
|
-
the_task.
|
|
264
|
+
the_task.assert_not_called()
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
async def test_cancellation_is_idempotent(
|
|
268
|
+
docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
|
|
269
|
+
):
|
|
270
|
+
"""Test that canceling the same task twice doesn't error."""
|
|
271
|
+
key = f"test-task:{uuid4()}"
|
|
272
|
+
|
|
273
|
+
# Schedule a task
|
|
274
|
+
later = now() + timedelta(seconds=1)
|
|
275
|
+
await docket.add(the_task, later, key=key)("test")
|
|
276
|
+
|
|
277
|
+
# Cancel it twice - both should succeed without error
|
|
278
|
+
await docket.cancel(key)
|
|
279
|
+
await docket.cancel(key) # Should be idempotent
|
|
280
|
+
|
|
281
|
+
# Run worker to ensure the task was actually cancelled
|
|
282
|
+
await worker.run_until_finished()
|
|
283
|
+
|
|
284
|
+
# Task should not have been executed since it was cancelled
|
|
285
|
+
the_task.assert_not_called()
|
|
269
286
|
|
|
270
287
|
|
|
271
288
|
async def test_errors_are_logged(
|
|
@@ -376,6 +376,14 @@ def TASKS_RETRIED(monkeypatch: pytest.MonkeyPatch) -> Mock:
|
|
|
376
376
|
return mock
|
|
377
377
|
|
|
378
378
|
|
|
379
|
+
@pytest.fixture
|
|
380
|
+
def TASKS_REDELIVERED(monkeypatch: pytest.MonkeyPatch) -> Mock:
|
|
381
|
+
"""Mock for the TASKS_REDELIVERED counter."""
|
|
382
|
+
mock = Mock(spec=Counter.add)
|
|
383
|
+
monkeypatch.setattr("docket.instrumentation.TASKS_REDELIVERED.add", mock)
|
|
384
|
+
return mock
|
|
385
|
+
|
|
386
|
+
|
|
379
387
|
async def test_worker_execution_increments_task_counters(
|
|
380
388
|
docket: Docket,
|
|
381
389
|
worker: Worker,
|
|
@@ -386,6 +394,7 @@ async def test_worker_execution_increments_task_counters(
|
|
|
386
394
|
TASKS_SUCCEEDED: Mock,
|
|
387
395
|
TASKS_FAILED: Mock,
|
|
388
396
|
TASKS_RETRIED: Mock,
|
|
397
|
+
TASKS_REDELIVERED: Mock,
|
|
389
398
|
):
|
|
390
399
|
"""Should increment the appropriate task counters when a worker executes a task."""
|
|
391
400
|
await docket.add(the_task)()
|
|
@@ -397,6 +406,7 @@ async def test_worker_execution_increments_task_counters(
|
|
|
397
406
|
TASKS_SUCCEEDED.assert_called_once_with(1, worker_labels)
|
|
398
407
|
TASKS_FAILED.assert_not_called()
|
|
399
408
|
TASKS_RETRIED.assert_not_called()
|
|
409
|
+
TASKS_REDELIVERED.assert_not_called()
|
|
400
410
|
|
|
401
411
|
|
|
402
412
|
async def test_failed_task_increments_failure_counter(
|
|
@@ -409,6 +419,7 @@ async def test_failed_task_increments_failure_counter(
|
|
|
409
419
|
TASKS_SUCCEEDED: Mock,
|
|
410
420
|
TASKS_FAILED: Mock,
|
|
411
421
|
TASKS_RETRIED: Mock,
|
|
422
|
+
TASKS_REDELIVERED: Mock,
|
|
412
423
|
):
|
|
413
424
|
"""Should increment the TASKS_FAILED counter when a task fails."""
|
|
414
425
|
the_task.side_effect = ValueError("Womp")
|
|
@@ -422,6 +433,7 @@ async def test_failed_task_increments_failure_counter(
|
|
|
422
433
|
TASKS_FAILED.assert_called_once_with(1, worker_labels)
|
|
423
434
|
TASKS_SUCCEEDED.assert_not_called()
|
|
424
435
|
TASKS_RETRIED.assert_not_called()
|
|
436
|
+
TASKS_REDELIVERED.assert_not_called()
|
|
425
437
|
|
|
426
438
|
|
|
427
439
|
async def test_retried_task_increments_retry_counter(
|
|
@@ -433,6 +445,7 @@ async def test_retried_task_increments_retry_counter(
|
|
|
433
445
|
TASKS_SUCCEEDED: Mock,
|
|
434
446
|
TASKS_FAILED: Mock,
|
|
435
447
|
TASKS_RETRIED: Mock,
|
|
448
|
+
TASKS_REDELIVERED: Mock,
|
|
436
449
|
):
|
|
437
450
|
"""Should increment the TASKS_RETRIED counter when a task is retried."""
|
|
438
451
|
|
|
@@ -448,6 +461,7 @@ async def test_retried_task_increments_retry_counter(
|
|
|
448
461
|
assert TASKS_FAILED.call_count == 2
|
|
449
462
|
assert TASKS_RETRIED.call_count == 1
|
|
450
463
|
TASKS_SUCCEEDED.assert_not_called()
|
|
464
|
+
TASKS_REDELIVERED.assert_not_called()
|
|
451
465
|
|
|
452
466
|
|
|
453
467
|
async def test_exhausted_retried_task_increments_retry_counter(
|
|
@@ -459,6 +473,7 @@ async def test_exhausted_retried_task_increments_retry_counter(
|
|
|
459
473
|
TASKS_SUCCEEDED: Mock,
|
|
460
474
|
TASKS_FAILED: Mock,
|
|
461
475
|
TASKS_RETRIED: Mock,
|
|
476
|
+
TASKS_REDELIVERED: Mock,
|
|
462
477
|
):
|
|
463
478
|
"""Should increment the appropriate counters when retries are exhausted."""
|
|
464
479
|
|
|
@@ -474,6 +489,41 @@ async def test_exhausted_retried_task_increments_retry_counter(
|
|
|
474
489
|
TASKS_FAILED.assert_called_once_with(1, worker_labels)
|
|
475
490
|
TASKS_RETRIED.assert_not_called()
|
|
476
491
|
TASKS_SUCCEEDED.assert_not_called()
|
|
492
|
+
TASKS_REDELIVERED.assert_not_called()
|
|
493
|
+
|
|
494
|
+
|
|
495
|
+
async def test_redelivered_tasks_increment_redelivered_counter(
|
|
496
|
+
docket: Docket,
|
|
497
|
+
worker_labels: dict[str, str],
|
|
498
|
+
TASKS_STARTED: Mock,
|
|
499
|
+
TASKS_COMPLETED: Mock,
|
|
500
|
+
TASKS_SUCCEEDED: Mock,
|
|
501
|
+
TASKS_FAILED: Mock,
|
|
502
|
+
TASKS_RETRIED: Mock,
|
|
503
|
+
TASKS_REDELIVERED: Mock,
|
|
504
|
+
):
|
|
505
|
+
"""Should increment the TASKS_REDELIVERED counter for redelivered tasks."""
|
|
506
|
+
|
|
507
|
+
async def test_task():
|
|
508
|
+
await asyncio.sleep(0.01)
|
|
509
|
+
|
|
510
|
+
await docket.add(test_task)()
|
|
511
|
+
|
|
512
|
+
worker = Worker(docket, redelivery_timeout=timedelta(milliseconds=50))
|
|
513
|
+
|
|
514
|
+
async with worker:
|
|
515
|
+
worker._execute = AsyncMock(side_effect=Exception("Simulated worker failure")) # type: ignore[assignment]
|
|
516
|
+
|
|
517
|
+
with pytest.raises(Exception, match="Simulated worker failure"):
|
|
518
|
+
await worker.run_until_finished()
|
|
519
|
+
|
|
520
|
+
await asyncio.sleep(0.075)
|
|
521
|
+
|
|
522
|
+
worker2 = Worker(docket, redelivery_timeout=timedelta(milliseconds=100))
|
|
523
|
+
async with worker2:
|
|
524
|
+
await worker2.run_until_finished()
|
|
525
|
+
|
|
526
|
+
assert TASKS_REDELIVERED.call_count >= 1
|
|
477
527
|
|
|
478
528
|
|
|
479
529
|
@pytest.fixture
|