pydocket 0.10.0__py3-none-any.whl → 0.11.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- docket/__init__.py +2 -0
- docket/agenda.py +201 -0
- docket/docket.py +161 -36
- docket/execution.py +3 -0
- docket/instrumentation.py +6 -0
- docket/worker.py +18 -6
- {pydocket-0.10.0.dist-info → pydocket-0.11.1.dist-info}/METADATA +1 -1
- pydocket-0.11.1.dist-info/RECORD +17 -0
- pydocket-0.10.0.dist-info/RECORD +0 -16
- {pydocket-0.10.0.dist-info → pydocket-0.11.1.dist-info}/WHEEL +0 -0
- {pydocket-0.10.0.dist-info → pydocket-0.11.1.dist-info}/entry_points.txt +0 -0
- {pydocket-0.10.0.dist-info → pydocket-0.11.1.dist-info}/licenses/LICENSE +0 -0
docket/__init__.py
CHANGED
|
@@ -8,6 +8,7 @@ from importlib.metadata import version
|
|
|
8
8
|
|
|
9
9
|
__version__ = version("pydocket")
|
|
10
10
|
|
|
11
|
+
from .agenda import Agenda
|
|
11
12
|
from .annotations import Logged
|
|
12
13
|
from .dependencies import (
|
|
13
14
|
ConcurrencyLimit,
|
|
@@ -29,6 +30,7 @@ from .worker import Worker
|
|
|
29
30
|
|
|
30
31
|
__all__ = [
|
|
31
32
|
"__version__",
|
|
33
|
+
"Agenda",
|
|
32
34
|
"ConcurrencyLimit",
|
|
33
35
|
"CurrentDocket",
|
|
34
36
|
"CurrentExecution",
|
docket/agenda.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agenda - A collection of tasks that can be scheduled together.
|
|
3
|
+
|
|
4
|
+
The Agenda class provides a way to collect multiple tasks and then scatter them
|
|
5
|
+
evenly over a time period to avoid overwhelming the system with immediate work.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import random
|
|
9
|
+
from datetime import datetime, timedelta, timezone
|
|
10
|
+
from typing import Any, Awaitable, Callable, Iterator, ParamSpec, TypeVar, overload
|
|
11
|
+
|
|
12
|
+
from uuid_extensions import uuid7
|
|
13
|
+
|
|
14
|
+
from .docket import Docket
|
|
15
|
+
from .execution import Execution, TaskFunction
|
|
16
|
+
|
|
17
|
+
P = ParamSpec("P")
|
|
18
|
+
R = TypeVar("R")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Agenda:
|
|
22
|
+
"""A collection of tasks to be scheduled together on a Docket.
|
|
23
|
+
|
|
24
|
+
The Agenda allows you to build up a collection of tasks with their arguments,
|
|
25
|
+
then schedule them all at once using various timing strategies like scattering.
|
|
26
|
+
|
|
27
|
+
Example:
|
|
28
|
+
>>> agenda = Agenda()
|
|
29
|
+
>>> agenda.add(process_item)(item1)
|
|
30
|
+
>>> agenda.add(process_item)(item2)
|
|
31
|
+
>>> agenda.add(send_email)(email)
|
|
32
|
+
>>> await agenda.scatter(docket, over=timedelta(minutes=50))
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self) -> None:
|
|
36
|
+
"""Initialize an empty Agenda."""
|
|
37
|
+
self._tasks: list[
|
|
38
|
+
tuple[TaskFunction | str, tuple[Any, ...], dict[str, Any]]
|
|
39
|
+
] = []
|
|
40
|
+
|
|
41
|
+
def __len__(self) -> int:
|
|
42
|
+
"""Return the number of tasks in the agenda."""
|
|
43
|
+
return len(self._tasks)
|
|
44
|
+
|
|
45
|
+
def __iter__(
|
|
46
|
+
self,
|
|
47
|
+
) -> Iterator[tuple[TaskFunction | str, tuple[Any, ...], dict[str, Any]]]:
|
|
48
|
+
"""Iterate over tasks in the agenda."""
|
|
49
|
+
return iter(self._tasks)
|
|
50
|
+
|
|
51
|
+
@overload
|
|
52
|
+
def add(
|
|
53
|
+
self,
|
|
54
|
+
function: Callable[P, Awaitable[R]],
|
|
55
|
+
) -> Callable[P, None]:
|
|
56
|
+
"""Add a task function to the agenda.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
function: The task function to add.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
A callable that accepts the task arguments.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
@overload
|
|
66
|
+
def add(
|
|
67
|
+
self,
|
|
68
|
+
function: str,
|
|
69
|
+
) -> Callable[..., None]:
|
|
70
|
+
"""Add a task by name to the agenda.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
function: The name of a registered task.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
A callable that accepts the task arguments.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def add(
|
|
80
|
+
self,
|
|
81
|
+
function: Callable[P, Awaitable[R]] | str,
|
|
82
|
+
) -> Callable[..., None]:
|
|
83
|
+
"""Add a task to the agenda.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
function: The task function or name to add.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
A callable that accepts the task arguments and adds them to the agenda.
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
def scheduler(*args: Any, **kwargs: Any) -> None:
|
|
93
|
+
self._tasks.append((function, args, kwargs))
|
|
94
|
+
|
|
95
|
+
return scheduler
|
|
96
|
+
|
|
97
|
+
def clear(self) -> None:
|
|
98
|
+
"""Clear all tasks from the agenda."""
|
|
99
|
+
self._tasks.clear()
|
|
100
|
+
|
|
101
|
+
async def scatter(
|
|
102
|
+
self,
|
|
103
|
+
docket: Docket,
|
|
104
|
+
over: timedelta,
|
|
105
|
+
start: datetime | None = None,
|
|
106
|
+
jitter: timedelta | None = None,
|
|
107
|
+
) -> list[Execution]:
|
|
108
|
+
"""Scatter the tasks in this agenda over a time period.
|
|
109
|
+
|
|
110
|
+
Tasks are distributed evenly across the specified time window,
|
|
111
|
+
optionally with random jitter to prevent thundering herd effects.
|
|
112
|
+
|
|
113
|
+
If an error occurs during scheduling, some tasks may have already been
|
|
114
|
+
scheduled successfully before the failure occurred.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
docket: The Docket to schedule tasks on.
|
|
118
|
+
over: Time period to scatter tasks over (required).
|
|
119
|
+
start: When to start scattering from. Defaults to now.
|
|
120
|
+
jitter: Maximum random offset to add/subtract from each scheduled time.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
List of Execution objects for the scheduled tasks.
|
|
124
|
+
|
|
125
|
+
Raises:
|
|
126
|
+
KeyError: If any task name is not registered with the docket.
|
|
127
|
+
ValueError: If any task is stricken or 'over' is not positive.
|
|
128
|
+
"""
|
|
129
|
+
if over.total_seconds() <= 0:
|
|
130
|
+
raise ValueError("'over' parameter must be a positive duration")
|
|
131
|
+
|
|
132
|
+
if not self._tasks:
|
|
133
|
+
return []
|
|
134
|
+
|
|
135
|
+
if start is None:
|
|
136
|
+
start = datetime.now(timezone.utc)
|
|
137
|
+
|
|
138
|
+
# Calculate even distribution over the time period
|
|
139
|
+
task_count = len(self._tasks)
|
|
140
|
+
|
|
141
|
+
if task_count == 1:
|
|
142
|
+
# Single task goes in the middle of the window
|
|
143
|
+
schedule_times = [start + over / 2]
|
|
144
|
+
else:
|
|
145
|
+
# Distribute tasks evenly across the window
|
|
146
|
+
# For n tasks, we want n points from start to start+over inclusive
|
|
147
|
+
interval = over / (task_count - 1)
|
|
148
|
+
schedule_times = [start + interval * i for i in range(task_count)]
|
|
149
|
+
|
|
150
|
+
# Apply jitter if specified
|
|
151
|
+
if jitter:
|
|
152
|
+
jittered_times: list[datetime] = []
|
|
153
|
+
for schedule_time in schedule_times:
|
|
154
|
+
# Random offset between -jitter and +jitter
|
|
155
|
+
offset = timedelta(
|
|
156
|
+
seconds=random.uniform(
|
|
157
|
+
-jitter.total_seconds(), jitter.total_seconds()
|
|
158
|
+
)
|
|
159
|
+
)
|
|
160
|
+
# Ensure the jittered time doesn't go before start
|
|
161
|
+
jittered_time = max(schedule_time + offset, start)
|
|
162
|
+
jittered_times.append(jittered_time)
|
|
163
|
+
schedule_times = jittered_times
|
|
164
|
+
|
|
165
|
+
# Build all Execution objects first, validating as we go
|
|
166
|
+
executions: list[Execution] = []
|
|
167
|
+
for (task_func, args, kwargs), schedule_time in zip(
|
|
168
|
+
self._tasks, schedule_times
|
|
169
|
+
):
|
|
170
|
+
# Resolve task function if given by name
|
|
171
|
+
if isinstance(task_func, str):
|
|
172
|
+
if task_func not in docket.tasks:
|
|
173
|
+
raise KeyError(f"Task '{task_func}' is not registered")
|
|
174
|
+
resolved_func = docket.tasks[task_func]
|
|
175
|
+
else:
|
|
176
|
+
# Ensure task is registered
|
|
177
|
+
if task_func not in docket.tasks.values():
|
|
178
|
+
docket.register(task_func)
|
|
179
|
+
resolved_func = task_func
|
|
180
|
+
|
|
181
|
+
# Create execution with unique key
|
|
182
|
+
key = str(uuid7())
|
|
183
|
+
execution = Execution(
|
|
184
|
+
function=resolved_func,
|
|
185
|
+
args=args,
|
|
186
|
+
kwargs=kwargs,
|
|
187
|
+
when=schedule_time,
|
|
188
|
+
key=key,
|
|
189
|
+
attempt=1,
|
|
190
|
+
)
|
|
191
|
+
executions.append(execution)
|
|
192
|
+
|
|
193
|
+
# Schedule all tasks - if any fail, some tasks may have been scheduled
|
|
194
|
+
for execution in executions:
|
|
195
|
+
scheduler = docket.add(
|
|
196
|
+
execution.function, when=execution.when, key=execution.key
|
|
197
|
+
)
|
|
198
|
+
# Actually schedule the task - if this fails, earlier tasks remain scheduled
|
|
199
|
+
await scheduler(*execution.args, **execution.kwargs)
|
|
200
|
+
|
|
201
|
+
return executions
|
docket/docket.py
CHANGED
|
@@ -16,6 +16,7 @@ from typing import (
|
|
|
16
16
|
Mapping,
|
|
17
17
|
NoReturn,
|
|
18
18
|
ParamSpec,
|
|
19
|
+
Protocol,
|
|
19
20
|
Self,
|
|
20
21
|
Sequence,
|
|
21
22
|
TypedDict,
|
|
@@ -27,7 +28,6 @@ from typing import (
|
|
|
27
28
|
import redis.exceptions
|
|
28
29
|
from opentelemetry import propagate, trace
|
|
29
30
|
from redis.asyncio import ConnectionPool, Redis
|
|
30
|
-
from redis.asyncio.client import Pipeline
|
|
31
31
|
from uuid_extensions import uuid7
|
|
32
32
|
|
|
33
33
|
from .execution import (
|
|
@@ -55,6 +55,18 @@ logger: logging.Logger = logging.getLogger(__name__)
|
|
|
55
55
|
tracer: trace.Tracer = trace.get_tracer(__name__)
|
|
56
56
|
|
|
57
57
|
|
|
58
|
+
class _schedule_task(Protocol):
|
|
59
|
+
async def __call__(
|
|
60
|
+
self, keys: list[str], args: list[str | float | bytes]
|
|
61
|
+
) -> str: ... # pragma: no cover
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class _cancel_task(Protocol):
|
|
65
|
+
async def __call__(
|
|
66
|
+
self, keys: list[str], args: list[str]
|
|
67
|
+
) -> str: ... # pragma: no cover
|
|
68
|
+
|
|
69
|
+
|
|
58
70
|
P = ParamSpec("P")
|
|
59
71
|
R = TypeVar("R")
|
|
60
72
|
|
|
@@ -131,6 +143,8 @@ class Docket:
|
|
|
131
143
|
|
|
132
144
|
_monitor_strikes_task: asyncio.Task[None]
|
|
133
145
|
_connection_pool: ConnectionPool
|
|
146
|
+
_schedule_task_script: _schedule_task | None
|
|
147
|
+
_cancel_task_script: _cancel_task | None
|
|
134
148
|
|
|
135
149
|
def __init__(
|
|
136
150
|
self,
|
|
@@ -156,6 +170,8 @@ class Docket:
|
|
|
156
170
|
self.url = url
|
|
157
171
|
self.heartbeat_interval = heartbeat_interval
|
|
158
172
|
self.missed_heartbeats = missed_heartbeats
|
|
173
|
+
self._schedule_task_script = None
|
|
174
|
+
self._cancel_task_script = None
|
|
159
175
|
|
|
160
176
|
@property
|
|
161
177
|
def worker_group_name(self) -> str:
|
|
@@ -300,9 +316,7 @@ class Docket:
|
|
|
300
316
|
execution = Execution(function, args, kwargs, when, key, attempt=1)
|
|
301
317
|
|
|
302
318
|
async with self.redis() as redis:
|
|
303
|
-
|
|
304
|
-
await self._schedule(redis, pipeline, execution, replace=False)
|
|
305
|
-
await pipeline.execute()
|
|
319
|
+
await self._schedule(redis, execution, replace=False)
|
|
306
320
|
|
|
307
321
|
TASKS_ADDED.add(1, {**self.labels(), **execution.general_labels()})
|
|
308
322
|
TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
|
|
@@ -361,9 +375,7 @@ class Docket:
|
|
|
361
375
|
execution = Execution(function, args, kwargs, when, key, attempt=1)
|
|
362
376
|
|
|
363
377
|
async with self.redis() as redis:
|
|
364
|
-
|
|
365
|
-
await self._schedule(redis, pipeline, execution, replace=True)
|
|
366
|
-
await pipeline.execute()
|
|
378
|
+
await self._schedule(redis, execution, replace=True)
|
|
367
379
|
|
|
368
380
|
TASKS_REPLACED.add(1, {**self.labels(), **execution.general_labels()})
|
|
369
381
|
TASKS_CANCELLED.add(1, {**self.labels(), **execution.general_labels()})
|
|
@@ -383,9 +395,7 @@ class Docket:
|
|
|
383
395
|
},
|
|
384
396
|
):
|
|
385
397
|
async with self.redis() as redis:
|
|
386
|
-
|
|
387
|
-
await self._schedule(redis, pipeline, execution, replace=False)
|
|
388
|
-
await pipeline.execute()
|
|
398
|
+
await self._schedule(redis, execution, replace=False)
|
|
389
399
|
|
|
390
400
|
TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
|
|
391
401
|
|
|
@@ -400,9 +410,7 @@ class Docket:
|
|
|
400
410
|
attributes={**self.labels(), "docket.key": key},
|
|
401
411
|
):
|
|
402
412
|
async with self.redis() as redis:
|
|
403
|
-
|
|
404
|
-
await self._cancel(pipeline, key)
|
|
405
|
-
await pipeline.execute()
|
|
413
|
+
await self._cancel(redis, key)
|
|
406
414
|
|
|
407
415
|
TASKS_CANCELLED.add(1, self.labels())
|
|
408
416
|
|
|
@@ -420,13 +428,23 @@ class Docket:
|
|
|
420
428
|
def parked_task_key(self, key: str) -> str:
|
|
421
429
|
return f"{self.name}:{key}"
|
|
422
430
|
|
|
431
|
+
def stream_id_key(self, key: str) -> str:
|
|
432
|
+
return f"{self.name}:stream-id:{key}"
|
|
433
|
+
|
|
423
434
|
async def _schedule(
|
|
424
435
|
self,
|
|
425
436
|
redis: Redis,
|
|
426
|
-
pipeline: Pipeline,
|
|
427
437
|
execution: Execution,
|
|
428
438
|
replace: bool = False,
|
|
429
439
|
) -> None:
|
|
440
|
+
"""Schedule a task atomically.
|
|
441
|
+
|
|
442
|
+
Handles:
|
|
443
|
+
- Checking for task existence
|
|
444
|
+
- Cancelling existing tasks when replacing
|
|
445
|
+
- Adding tasks to stream (immediate) or queue (future)
|
|
446
|
+
- Tracking stream message IDs for later cancellation
|
|
447
|
+
"""
|
|
430
448
|
if self.strike_list.is_stricken(execution):
|
|
431
449
|
logger.warning(
|
|
432
450
|
"%r is stricken, skipping schedule of %r",
|
|
@@ -437,7 +455,7 @@ class Docket:
|
|
|
437
455
|
1,
|
|
438
456
|
{
|
|
439
457
|
**self.labels(),
|
|
440
|
-
**execution.
|
|
458
|
+
**execution.general_labels(),
|
|
441
459
|
"docket.where": "docket",
|
|
442
460
|
},
|
|
443
461
|
)
|
|
@@ -449,32 +467,138 @@ class Docket:
|
|
|
449
467
|
key = execution.key
|
|
450
468
|
when = execution.when
|
|
451
469
|
known_task_key = self.known_task_key(key)
|
|
470
|
+
is_immediate = when <= datetime.now(timezone.utc)
|
|
452
471
|
|
|
472
|
+
# Lock per task key to prevent race conditions between concurrent operations
|
|
453
473
|
async with redis.lock(f"{known_task_key}:lock", timeout=10):
|
|
454
|
-
if
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
"
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
474
|
+
if self._schedule_task_script is None:
|
|
475
|
+
self._schedule_task_script = cast(
|
|
476
|
+
_schedule_task,
|
|
477
|
+
redis.register_script(
|
|
478
|
+
# KEYS: stream_key, known_key, parked_key, queue_key, stream_id_key
|
|
479
|
+
# ARGV: task_key, when_timestamp, is_immediate, replace, ...message_fields
|
|
480
|
+
"""
|
|
481
|
+
local stream_key = KEYS[1]
|
|
482
|
+
local known_key = KEYS[2]
|
|
483
|
+
local parked_key = KEYS[3]
|
|
484
|
+
local queue_key = KEYS[4]
|
|
485
|
+
local stream_id_key = KEYS[5]
|
|
486
|
+
|
|
487
|
+
local task_key = ARGV[1]
|
|
488
|
+
local when_timestamp = ARGV[2]
|
|
489
|
+
local is_immediate = ARGV[3] == '1'
|
|
490
|
+
local replace = ARGV[4] == '1'
|
|
491
|
+
|
|
492
|
+
-- Extract message fields from ARGV[5] onwards
|
|
493
|
+
local message = {}
|
|
494
|
+
for i = 5, #ARGV, 2 do
|
|
495
|
+
message[#message + 1] = ARGV[i] -- field name
|
|
496
|
+
message[#message + 1] = ARGV[i + 1] -- field value
|
|
497
|
+
end
|
|
498
|
+
|
|
499
|
+
-- Handle replacement: cancel existing task if needed
|
|
500
|
+
if replace then
|
|
501
|
+
local existing_message_id = redis.call('GET', stream_id_key)
|
|
502
|
+
if existing_message_id then
|
|
503
|
+
redis.call('XDEL', stream_key, existing_message_id)
|
|
504
|
+
end
|
|
505
|
+
redis.call('DEL', known_key, parked_key, stream_id_key)
|
|
506
|
+
redis.call('ZREM', queue_key, task_key)
|
|
507
|
+
else
|
|
508
|
+
-- Check if task already exists
|
|
509
|
+
if redis.call('EXISTS', known_key) == 1 then
|
|
510
|
+
return 'EXISTS'
|
|
511
|
+
end
|
|
512
|
+
end
|
|
513
|
+
|
|
514
|
+
if is_immediate then
|
|
515
|
+
-- Add to stream and store message ID for later cancellation
|
|
516
|
+
local message_id = redis.call('XADD', stream_key, '*', unpack(message))
|
|
517
|
+
redis.call('SET', known_key, when_timestamp)
|
|
518
|
+
redis.call('SET', stream_id_key, message_id)
|
|
519
|
+
return message_id
|
|
520
|
+
else
|
|
521
|
+
-- Add to queue with task data in parked hash
|
|
522
|
+
redis.call('SET', known_key, when_timestamp)
|
|
523
|
+
redis.call('HSET', parked_key, unpack(message))
|
|
524
|
+
redis.call('ZADD', queue_key, when_timestamp, task_key)
|
|
525
|
+
return 'QUEUED'
|
|
526
|
+
end
|
|
527
|
+
"""
|
|
528
|
+
),
|
|
529
|
+
)
|
|
530
|
+
schedule_task = self._schedule_task_script
|
|
465
531
|
|
|
466
|
-
|
|
532
|
+
await schedule_task(
|
|
533
|
+
keys=[
|
|
534
|
+
self.stream_key,
|
|
535
|
+
known_task_key,
|
|
536
|
+
self.parked_task_key(key),
|
|
537
|
+
self.queue_key,
|
|
538
|
+
self.stream_id_key(key),
|
|
539
|
+
],
|
|
540
|
+
args=[
|
|
541
|
+
key,
|
|
542
|
+
str(when.timestamp()),
|
|
543
|
+
"1" if is_immediate else "0",
|
|
544
|
+
"1" if replace else "0",
|
|
545
|
+
*[
|
|
546
|
+
item
|
|
547
|
+
for field, value in message.items()
|
|
548
|
+
for item in (field, value)
|
|
549
|
+
],
|
|
550
|
+
],
|
|
551
|
+
)
|
|
467
552
|
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
else:
|
|
471
|
-
pipeline.hset(self.parked_task_key(key), mapping=message) # type: ignore[arg-type]
|
|
472
|
-
pipeline.zadd(self.queue_key, {key: when.timestamp()})
|
|
553
|
+
async def _cancel(self, redis: Redis, key: str) -> None:
|
|
554
|
+
"""Cancel a task atomically.
|
|
473
555
|
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
556
|
+
Handles cancellation regardless of task location:
|
|
557
|
+
- From the stream (using stored message ID)
|
|
558
|
+
- From the queue (scheduled tasks)
|
|
559
|
+
- Cleans up all associated metadata keys
|
|
560
|
+
"""
|
|
561
|
+
if self._cancel_task_script is None:
|
|
562
|
+
self._cancel_task_script = cast(
|
|
563
|
+
_cancel_task,
|
|
564
|
+
redis.register_script(
|
|
565
|
+
# KEYS: stream_key, known_key, parked_key, queue_key, stream_id_key
|
|
566
|
+
# ARGV: task_key
|
|
567
|
+
"""
|
|
568
|
+
local stream_key = KEYS[1]
|
|
569
|
+
local known_key = KEYS[2]
|
|
570
|
+
local parked_key = KEYS[3]
|
|
571
|
+
local queue_key = KEYS[4]
|
|
572
|
+
local stream_id_key = KEYS[5]
|
|
573
|
+
local task_key = ARGV[1]
|
|
574
|
+
|
|
575
|
+
-- Delete from stream if message ID exists
|
|
576
|
+
local message_id = redis.call('GET', stream_id_key)
|
|
577
|
+
if message_id then
|
|
578
|
+
redis.call('XDEL', stream_key, message_id)
|
|
579
|
+
end
|
|
580
|
+
|
|
581
|
+
-- Clean up all task-related keys
|
|
582
|
+
redis.call('DEL', known_key, parked_key, stream_id_key)
|
|
583
|
+
redis.call('ZREM', queue_key, task_key)
|
|
584
|
+
|
|
585
|
+
return 'OK'
|
|
586
|
+
"""
|
|
587
|
+
),
|
|
588
|
+
)
|
|
589
|
+
cancel_task = self._cancel_task_script
|
|
590
|
+
|
|
591
|
+
# Execute the cancellation script
|
|
592
|
+
await cancel_task(
|
|
593
|
+
keys=[
|
|
594
|
+
self.stream_key,
|
|
595
|
+
self.known_task_key(key),
|
|
596
|
+
self.parked_task_key(key),
|
|
597
|
+
self.queue_key,
|
|
598
|
+
self.stream_id_key(key),
|
|
599
|
+
],
|
|
600
|
+
args=[key],
|
|
601
|
+
)
|
|
478
602
|
|
|
479
603
|
@property
|
|
480
604
|
def strike_key(self) -> str:
|
|
@@ -781,6 +905,7 @@ class Docket:
|
|
|
781
905
|
key = key_bytes.decode()
|
|
782
906
|
pipeline.delete(self.parked_task_key(key))
|
|
783
907
|
pipeline.delete(self.known_task_key(key))
|
|
908
|
+
pipeline.delete(self.stream_id_key(key))
|
|
784
909
|
|
|
785
910
|
await pipeline.execute()
|
|
786
911
|
|
docket/execution.py
CHANGED
|
@@ -51,6 +51,7 @@ class Execution:
|
|
|
51
51
|
key: str,
|
|
52
52
|
attempt: int,
|
|
53
53
|
trace_context: opentelemetry.context.Context | None = None,
|
|
54
|
+
redelivered: bool = False,
|
|
54
55
|
) -> None:
|
|
55
56
|
self.function = function
|
|
56
57
|
self.args = args
|
|
@@ -59,6 +60,7 @@ class Execution:
|
|
|
59
60
|
self.key = key
|
|
60
61
|
self.attempt = attempt
|
|
61
62
|
self.trace_context = trace_context
|
|
63
|
+
self.redelivered = redelivered
|
|
62
64
|
|
|
63
65
|
def as_message(self) -> Message:
|
|
64
66
|
return {
|
|
@@ -80,6 +82,7 @@ class Execution:
|
|
|
80
82
|
key=message[b"key"].decode(),
|
|
81
83
|
attempt=int(message[b"attempt"].decode()),
|
|
82
84
|
trace_context=propagate.extract(message, getter=message_getter),
|
|
85
|
+
redelivered=False, # Default to False, will be set to True in worker if it's a redelivery
|
|
83
86
|
)
|
|
84
87
|
|
|
85
88
|
def general_labels(self) -> Mapping[str, str]:
|
docket/instrumentation.py
CHANGED
|
@@ -40,6 +40,12 @@ TASKS_STARTED = meter.create_counter(
|
|
|
40
40
|
unit="1",
|
|
41
41
|
)
|
|
42
42
|
|
|
43
|
+
TASKS_REDELIVERED = meter.create_counter(
|
|
44
|
+
"docket_tasks_redelivered",
|
|
45
|
+
description="How many tasks started that were redelivered from another worker",
|
|
46
|
+
unit="1",
|
|
47
|
+
)
|
|
48
|
+
|
|
43
49
|
TASKS_STRICKEN = meter.create_counter(
|
|
44
50
|
"docket_tasks_stricken",
|
|
45
51
|
description="How many tasks have been stricken from executing",
|
docket/worker.py
CHANGED
|
@@ -47,6 +47,7 @@ from .instrumentation import (
|
|
|
47
47
|
TASKS_COMPLETED,
|
|
48
48
|
TASKS_FAILED,
|
|
49
49
|
TASKS_PERPETUATED,
|
|
50
|
+
TASKS_REDELIVERED,
|
|
50
51
|
TASKS_RETRIED,
|
|
51
52
|
TASKS_RUNNING,
|
|
52
53
|
TASKS_STARTED,
|
|
@@ -286,7 +287,11 @@ class Worker:
|
|
|
286
287
|
count=available_slots,
|
|
287
288
|
)
|
|
288
289
|
|
|
289
|
-
def start_task(
|
|
290
|
+
def start_task(
|
|
291
|
+
message_id: RedisMessageID,
|
|
292
|
+
message: RedisMessage,
|
|
293
|
+
is_redelivery: bool = False,
|
|
294
|
+
) -> bool:
|
|
290
295
|
function_name = message[b"function"].decode()
|
|
291
296
|
if not (function := self.docket.tasks.get(function_name)):
|
|
292
297
|
logger.warning(
|
|
@@ -297,6 +302,7 @@ class Worker:
|
|
|
297
302
|
return False
|
|
298
303
|
|
|
299
304
|
execution = Execution.from_message(function, message)
|
|
305
|
+
execution.redelivered = is_redelivery
|
|
300
306
|
|
|
301
307
|
task = asyncio.create_task(self._execute(execution), name=execution.key)
|
|
302
308
|
active_tasks[task] = message_id
|
|
@@ -342,12 +348,15 @@ class Worker:
|
|
|
342
348
|
continue
|
|
343
349
|
|
|
344
350
|
for source in [get_redeliveries, get_new_deliveries]:
|
|
345
|
-
for
|
|
351
|
+
for stream_key, messages in await source(redis):
|
|
352
|
+
is_redelivery = stream_key == b"__redelivery__"
|
|
346
353
|
for message_id, message in messages:
|
|
347
354
|
if not message: # pragma: no cover
|
|
348
355
|
continue
|
|
349
356
|
|
|
350
|
-
task_started = start_task(
|
|
357
|
+
task_started = start_task(
|
|
358
|
+
message_id, message, is_redelivery
|
|
359
|
+
)
|
|
351
360
|
if not task_started:
|
|
352
361
|
# Other errors - delete and ack
|
|
353
362
|
await self._delete_known_task(redis, message)
|
|
@@ -495,7 +504,8 @@ class Worker:
|
|
|
495
504
|
|
|
496
505
|
logger.debug("Deleting known task", extra=self._log_context())
|
|
497
506
|
known_task_key = self.docket.known_task_key(key)
|
|
498
|
-
|
|
507
|
+
stream_id_key = self.docket.stream_id_key(key)
|
|
508
|
+
await redis.delete(known_task_key, stream_id_key)
|
|
499
509
|
|
|
500
510
|
async def _execute(self, execution: Execution) -> None:
|
|
501
511
|
log_context = {**self._log_context(), **execution.specific_labels()}
|
|
@@ -520,6 +530,8 @@ class Worker:
|
|
|
520
530
|
duration = 0.0
|
|
521
531
|
|
|
522
532
|
TASKS_STARTED.add(1, counter_labels)
|
|
533
|
+
if execution.redelivered:
|
|
534
|
+
TASKS_REDELIVERED.add(1, counter_labels)
|
|
523
535
|
TASKS_RUNNING.add(1, counter_labels)
|
|
524
536
|
TASK_PUNCTUALITY.record(punctuality, counter_labels)
|
|
525
537
|
|
|
@@ -720,7 +732,7 @@ class Worker:
|
|
|
720
732
|
execution.attempt += 1
|
|
721
733
|
await self.docket.schedule(execution)
|
|
722
734
|
|
|
723
|
-
TASKS_RETRIED.add(1, {**self.labels(), **execution.
|
|
735
|
+
TASKS_RETRIED.add(1, {**self.labels(), **execution.general_labels()})
|
|
724
736
|
return True
|
|
725
737
|
|
|
726
738
|
async def _perpetuate_if_requested(
|
|
@@ -746,7 +758,7 @@ class Worker:
|
|
|
746
758
|
)
|
|
747
759
|
|
|
748
760
|
if duration is not None:
|
|
749
|
-
TASKS_PERPETUATED.add(1, {**self.labels(), **execution.
|
|
761
|
+
TASKS_PERPETUATED.add(1, {**self.labels(), **execution.general_labels()})
|
|
750
762
|
|
|
751
763
|
return True
|
|
752
764
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.11.1
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
docket/__init__.py,sha256=ChJS2JRyruj22Vi504eXrmQNPQ97L_Sj52OJCuhjoeM,956
|
|
2
|
+
docket/__main__.py,sha256=wcCrL4PjG51r5wVKqJhcoJPTLfHW0wNbD31DrUN0MWI,28
|
|
3
|
+
docket/agenda.py,sha256=RqrVkCuWAvwn_q6graCU-lLRQltbJ0QQheJ34T-Gjck,6667
|
|
4
|
+
docket/annotations.py,sha256=wttix9UOeMFMAWXAIJUfUw5GjESJZsACb4YXJCozP7Q,2348
|
|
5
|
+
docket/cli.py,sha256=rTfri2--u4Q5PlXyh7Ub_F5uh3-TtZOWLUp9WY_TvAE,25750
|
|
6
|
+
docket/dependencies.py,sha256=BC0bnt10cr9_S1p5JAP_bnC9RwZkTr9ulPBrxC7eZnA,20247
|
|
7
|
+
docket/docket.py,sha256=NWyulaZYfcNeaqZSJMG54bHqTC5gVggzYFHjpTTY90A,31240
|
|
8
|
+
docket/execution.py,sha256=Lqzgj5EO3v5OD0w__5qBut7WnlEcHZfAYj-BYRdiJf8,15138
|
|
9
|
+
docket/instrumentation.py,sha256=zLYgtuXbNOcotcSlD9pgLVdNp2rPddyxj9JwM3K19Go,5667
|
|
10
|
+
docket/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
|
+
docket/tasks.py,sha256=RIlSM2omh-YDwVnCz6M5MtmK8T_m_s1w2OlRRxDUs6A,1437
|
|
12
|
+
docket/worker.py,sha256=P4j9uHXt5KcU5e9S4SmQ9v6OCRFMLjYwbMR9PeRvVXc,35390
|
|
13
|
+
pydocket-0.11.1.dist-info/METADATA,sha256=zl27q0Qf9js2bjyDHEjVWighIGWd4me36FLqY3yt5MI,5419
|
|
14
|
+
pydocket-0.11.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
15
|
+
pydocket-0.11.1.dist-info/entry_points.txt,sha256=4WOk1nUlBsUT5O3RyMci2ImuC5XFswuopElYcLHtD5k,47
|
|
16
|
+
pydocket-0.11.1.dist-info/licenses/LICENSE,sha256=YuVWU_ZXO0K_k2FG8xWKe5RGxV24AhJKTvQmKfqXuyk,1087
|
|
17
|
+
pydocket-0.11.1.dist-info/RECORD,,
|
pydocket-0.10.0.dist-info/RECORD
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
docket/__init__.py,sha256=onwZzh73tESWoFBukbcW-7gjxoXb-yI7dutRD7tPN6g,915
|
|
2
|
-
docket/__main__.py,sha256=wcCrL4PjG51r5wVKqJhcoJPTLfHW0wNbD31DrUN0MWI,28
|
|
3
|
-
docket/annotations.py,sha256=wttix9UOeMFMAWXAIJUfUw5GjESJZsACb4YXJCozP7Q,2348
|
|
4
|
-
docket/cli.py,sha256=rTfri2--u4Q5PlXyh7Ub_F5uh3-TtZOWLUp9WY_TvAE,25750
|
|
5
|
-
docket/dependencies.py,sha256=BC0bnt10cr9_S1p5JAP_bnC9RwZkTr9ulPBrxC7eZnA,20247
|
|
6
|
-
docket/docket.py,sha256=Cw7QB1d0eDwSgwn0Rj26WjFsXSe7MJtfsUBBHGalL7A,26262
|
|
7
|
-
docket/execution.py,sha256=r_2RGC1qhtAcBUg7E6wewLEgftrf3hIxNbH0HnYPbek,14961
|
|
8
|
-
docket/instrumentation.py,sha256=ogvzrfKbWsdPGfdg4hByH3_r5d3b5AwwQkSrmXw0hRg,5492
|
|
9
|
-
docket/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
-
docket/tasks.py,sha256=RIlSM2omh-YDwVnCz6M5MtmK8T_m_s1w2OlRRxDUs6A,1437
|
|
11
|
-
docket/worker.py,sha256=pOBRoEbakUwAGVKAuCNPSMyHRBSalUxtMc93QZewX7M,34928
|
|
12
|
-
pydocket-0.10.0.dist-info/METADATA,sha256=Tsm_S5NTj5yOPmt-q4KAKjdEDjH6ZRzz_ITVapnFk64,5419
|
|
13
|
-
pydocket-0.10.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
-
pydocket-0.10.0.dist-info/entry_points.txt,sha256=4WOk1nUlBsUT5O3RyMci2ImuC5XFswuopElYcLHtD5k,47
|
|
15
|
-
pydocket-0.10.0.dist-info/licenses/LICENSE,sha256=YuVWU_ZXO0K_k2FG8xWKe5RGxV24AhJKTvQmKfqXuyk,1087
|
|
16
|
-
pydocket-0.10.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|