pydocket 0.5.2__py3-none-any.whl → 0.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- docket/__init__.py +4 -0
- docket/annotations.py +23 -0
- docket/dependencies.py +182 -38
- docket/docket.py +5 -4
- docket/execution.py +35 -12
- docket/worker.py +264 -226
- {pydocket-0.5.2.dist-info → pydocket-0.6.1.dist-info}/METADATA +1 -1
- pydocket-0.6.1.dist-info/RECORD +16 -0
- pydocket-0.5.2.dist-info/RECORD +0 -16
- {pydocket-0.5.2.dist-info → pydocket-0.6.1.dist-info}/WHEEL +0 -0
- {pydocket-0.5.2.dist-info → pydocket-0.6.1.dist-info}/entry_points.txt +0 -0
- {pydocket-0.5.2.dist-info → pydocket-0.6.1.dist-info}/licenses/LICENSE +0 -0
docket/__init__.py
CHANGED
|
@@ -13,11 +13,13 @@ from .dependencies import (
|
|
|
13
13
|
CurrentDocket,
|
|
14
14
|
CurrentExecution,
|
|
15
15
|
CurrentWorker,
|
|
16
|
+
Depends,
|
|
16
17
|
ExponentialRetry,
|
|
17
18
|
Perpetual,
|
|
18
19
|
Retry,
|
|
19
20
|
TaskKey,
|
|
20
21
|
TaskLogger,
|
|
22
|
+
Timeout,
|
|
21
23
|
)
|
|
22
24
|
from .docket import Docket
|
|
23
25
|
from .execution import Execution
|
|
@@ -36,5 +38,7 @@ __all__ = [
|
|
|
36
38
|
"ExponentialRetry",
|
|
37
39
|
"Logged",
|
|
38
40
|
"Perpetual",
|
|
41
|
+
"Timeout",
|
|
42
|
+
"Depends",
|
|
39
43
|
"__version__",
|
|
40
44
|
]
|
docket/annotations.py
CHANGED
|
@@ -4,8 +4,14 @@ from typing import Any, Iterable, Mapping, Self
|
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class Annotation(abc.ABC):
|
|
7
|
+
_cache: dict[tuple[type[Self], inspect.Signature], Mapping[str, Self]] = {}
|
|
8
|
+
|
|
7
9
|
@classmethod
|
|
8
10
|
def annotated_parameters(cls, signature: inspect.Signature) -> Mapping[str, Self]:
|
|
11
|
+
key = (cls, signature)
|
|
12
|
+
if key in cls._cache:
|
|
13
|
+
return cls._cache[key]
|
|
14
|
+
|
|
9
15
|
annotated: dict[str, Self] = {}
|
|
10
16
|
|
|
11
17
|
for param_name, param in signature.parameters.items():
|
|
@@ -23,8 +29,25 @@ class Annotation(abc.ABC):
|
|
|
23
29
|
elif isinstance(arg_type, type) and issubclass(arg_type, cls):
|
|
24
30
|
annotated[param_name] = arg_type()
|
|
25
31
|
|
|
32
|
+
cls._cache[key] = annotated
|
|
26
33
|
return annotated
|
|
27
34
|
|
|
28
35
|
|
|
29
36
|
class Logged(Annotation):
|
|
30
37
|
"""Instructs docket to include arguments to this parameter in the log."""
|
|
38
|
+
|
|
39
|
+
length_only: bool = False
|
|
40
|
+
|
|
41
|
+
def __init__(self, length_only: bool = False) -> None:
|
|
42
|
+
self.length_only = length_only
|
|
43
|
+
|
|
44
|
+
def format(self, argument: Any) -> str:
|
|
45
|
+
if self.length_only:
|
|
46
|
+
if isinstance(argument, (dict, set)):
|
|
47
|
+
return f"{{len {len(argument)}}}"
|
|
48
|
+
elif isinstance(argument, tuple):
|
|
49
|
+
return f"(len {len(argument)})"
|
|
50
|
+
elif hasattr(argument, "__len__"):
|
|
51
|
+
return f"[len {len(argument)}]"
|
|
52
|
+
|
|
53
|
+
return repr(argument)
|
docket/dependencies.py
CHANGED
|
@@ -1,35 +1,60 @@
|
|
|
1
1
|
import abc
|
|
2
|
-
import inspect
|
|
3
2
|
import logging
|
|
3
|
+
import time
|
|
4
|
+
from contextlib import AsyncExitStack, asynccontextmanager
|
|
5
|
+
from contextvars import ContextVar
|
|
4
6
|
from datetime import timedelta
|
|
5
|
-
from
|
|
7
|
+
from types import TracebackType
|
|
8
|
+
from typing import (
|
|
9
|
+
TYPE_CHECKING,
|
|
10
|
+
Any,
|
|
11
|
+
AsyncContextManager,
|
|
12
|
+
AsyncGenerator,
|
|
13
|
+
Awaitable,
|
|
14
|
+
Callable,
|
|
15
|
+
Counter,
|
|
16
|
+
Generic,
|
|
17
|
+
TypeVar,
|
|
18
|
+
cast,
|
|
19
|
+
)
|
|
6
20
|
|
|
7
21
|
from .docket import Docket
|
|
8
|
-
from .execution import Execution
|
|
9
|
-
|
|
22
|
+
from .execution import Execution, TaskFunction, get_signature
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
25
|
+
from .worker import Worker
|
|
10
26
|
|
|
11
27
|
|
|
12
28
|
class Dependency(abc.ABC):
|
|
13
29
|
single: bool = False
|
|
14
30
|
|
|
31
|
+
docket: ContextVar[Docket] = ContextVar("docket")
|
|
32
|
+
worker: ContextVar["Worker"] = ContextVar("worker")
|
|
33
|
+
execution: ContextVar[Execution] = ContextVar("execution")
|
|
34
|
+
|
|
15
35
|
@abc.abstractmethod
|
|
16
|
-
def
|
|
17
|
-
|
|
18
|
-
|
|
36
|
+
async def __aenter__(self) -> Any: ... # pragma: no cover
|
|
37
|
+
|
|
38
|
+
async def __aexit__(
|
|
39
|
+
self,
|
|
40
|
+
exc_type: type[BaseException] | None,
|
|
41
|
+
exc_value: BaseException | None,
|
|
42
|
+
traceback: TracebackType | None,
|
|
43
|
+
) -> bool: ... # pragma: no cover
|
|
19
44
|
|
|
20
45
|
|
|
21
46
|
class _CurrentWorker(Dependency):
|
|
22
|
-
def
|
|
23
|
-
return worker
|
|
47
|
+
async def __aenter__(self) -> "Worker":
|
|
48
|
+
return self.worker.get()
|
|
24
49
|
|
|
25
50
|
|
|
26
|
-
def CurrentWorker() -> Worker:
|
|
27
|
-
return cast(Worker, _CurrentWorker())
|
|
51
|
+
def CurrentWorker() -> "Worker":
|
|
52
|
+
return cast("Worker", _CurrentWorker())
|
|
28
53
|
|
|
29
54
|
|
|
30
55
|
class _CurrentDocket(Dependency):
|
|
31
|
-
def
|
|
32
|
-
return docket
|
|
56
|
+
async def __aenter__(self) -> Docket:
|
|
57
|
+
return self.docket.get()
|
|
33
58
|
|
|
34
59
|
|
|
35
60
|
def CurrentDocket() -> Docket:
|
|
@@ -37,10 +62,8 @@ def CurrentDocket() -> Docket:
|
|
|
37
62
|
|
|
38
63
|
|
|
39
64
|
class _CurrentExecution(Dependency):
|
|
40
|
-
def
|
|
41
|
-
self
|
|
42
|
-
) -> Execution:
|
|
43
|
-
return execution
|
|
65
|
+
async def __aenter__(self) -> Execution:
|
|
66
|
+
return self.execution.get()
|
|
44
67
|
|
|
45
68
|
|
|
46
69
|
def CurrentExecution() -> Execution:
|
|
@@ -48,8 +71,8 @@ def CurrentExecution() -> Execution:
|
|
|
48
71
|
|
|
49
72
|
|
|
50
73
|
class _TaskKey(Dependency):
|
|
51
|
-
def
|
|
52
|
-
return execution.key
|
|
74
|
+
async def __aenter__(self) -> str:
|
|
75
|
+
return self.execution.get().key
|
|
53
76
|
|
|
54
77
|
|
|
55
78
|
def TaskKey() -> str:
|
|
@@ -57,15 +80,14 @@ def TaskKey() -> str:
|
|
|
57
80
|
|
|
58
81
|
|
|
59
82
|
class _TaskLogger(Dependency):
|
|
60
|
-
def
|
|
61
|
-
|
|
62
|
-
) -> logging.LoggerAdapter[logging.Logger]:
|
|
83
|
+
async def __aenter__(self) -> logging.LoggerAdapter[logging.Logger]:
|
|
84
|
+
execution = self.execution.get()
|
|
63
85
|
logger = logging.getLogger(f"docket.task.{execution.function.__name__}")
|
|
64
86
|
return logging.LoggerAdapter(
|
|
65
87
|
logger,
|
|
66
88
|
{
|
|
67
|
-
**docket.labels(),
|
|
68
|
-
**worker.labels(),
|
|
89
|
+
**self.docket.get().labels(),
|
|
90
|
+
**self.worker.get().labels(),
|
|
69
91
|
**execution.specific_labels(),
|
|
70
92
|
},
|
|
71
93
|
)
|
|
@@ -85,7 +107,8 @@ class Retry(Dependency):
|
|
|
85
107
|
self.delay = delay
|
|
86
108
|
self.attempt = 1
|
|
87
109
|
|
|
88
|
-
def
|
|
110
|
+
async def __aenter__(self) -> "Retry":
|
|
111
|
+
execution = self.execution.get()
|
|
89
112
|
retry = Retry(attempts=self.attempts, delay=self.delay)
|
|
90
113
|
retry.attempt = execution.attempt
|
|
91
114
|
return retry
|
|
@@ -104,9 +127,9 @@ class ExponentialRetry(Retry):
|
|
|
104
127
|
self.minimum_delay = minimum_delay
|
|
105
128
|
self.maximum_delay = maximum_delay
|
|
106
129
|
|
|
107
|
-
def
|
|
108
|
-
|
|
109
|
-
|
|
130
|
+
async def __aenter__(self) -> "ExponentialRetry":
|
|
131
|
+
execution = self.execution.get()
|
|
132
|
+
|
|
110
133
|
retry = ExponentialRetry(
|
|
111
134
|
attempts=self.attempts,
|
|
112
135
|
minimum_delay=self.minimum_delay,
|
|
@@ -155,9 +178,8 @@ class Perpetual(Dependency):
|
|
|
155
178
|
self.automatic = automatic
|
|
156
179
|
self.cancelled = False
|
|
157
180
|
|
|
158
|
-
def
|
|
159
|
-
|
|
160
|
-
) -> "Perpetual":
|
|
181
|
+
async def __aenter__(self) -> "Perpetual":
|
|
182
|
+
execution = self.execution.get()
|
|
161
183
|
perpetual = Perpetual(every=self.every)
|
|
162
184
|
perpetual.args = execution.args
|
|
163
185
|
perpetual.kwargs = execution.kwargs
|
|
@@ -171,27 +193,121 @@ class Perpetual(Dependency):
|
|
|
171
193
|
self.kwargs = kwargs
|
|
172
194
|
|
|
173
195
|
|
|
196
|
+
class Timeout(Dependency):
|
|
197
|
+
single = True
|
|
198
|
+
|
|
199
|
+
base: timedelta
|
|
200
|
+
|
|
201
|
+
_deadline: float
|
|
202
|
+
|
|
203
|
+
def __init__(self, base: timedelta) -> None:
|
|
204
|
+
self.base = base
|
|
205
|
+
|
|
206
|
+
async def __aenter__(self) -> "Timeout":
|
|
207
|
+
timeout = Timeout(base=self.base)
|
|
208
|
+
timeout.start()
|
|
209
|
+
return timeout
|
|
210
|
+
|
|
211
|
+
def start(self) -> None:
|
|
212
|
+
self._deadline = time.monotonic() + self.base.total_seconds()
|
|
213
|
+
|
|
214
|
+
def expired(self) -> bool:
|
|
215
|
+
return time.monotonic() >= self._deadline
|
|
216
|
+
|
|
217
|
+
def remaining(self) -> timedelta:
|
|
218
|
+
return timedelta(seconds=self._deadline - time.monotonic())
|
|
219
|
+
|
|
220
|
+
def extend(self, by: timedelta | None = None) -> None:
|
|
221
|
+
if by is None:
|
|
222
|
+
by = self.base
|
|
223
|
+
self._deadline += by.total_seconds()
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
R = TypeVar("R")
|
|
227
|
+
|
|
228
|
+
DependencyFunction = Callable[..., Awaitable[R] | AsyncContextManager[R]]
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
_parameter_cache: dict[
|
|
232
|
+
TaskFunction | DependencyFunction[Any],
|
|
233
|
+
dict[str, Dependency],
|
|
234
|
+
] = {}
|
|
235
|
+
|
|
236
|
+
|
|
174
237
|
def get_dependency_parameters(
|
|
175
|
-
function:
|
|
238
|
+
function: TaskFunction | DependencyFunction[Any],
|
|
176
239
|
) -> dict[str, Dependency]:
|
|
177
|
-
|
|
240
|
+
if function in _parameter_cache:
|
|
241
|
+
return _parameter_cache[function]
|
|
242
|
+
|
|
243
|
+
dependencies: dict[str, Dependency] = {}
|
|
178
244
|
|
|
179
|
-
signature =
|
|
245
|
+
signature = get_signature(function)
|
|
180
246
|
|
|
181
|
-
for
|
|
247
|
+
for parameter, param in signature.parameters.items():
|
|
182
248
|
if not isinstance(param.default, Dependency):
|
|
183
249
|
continue
|
|
184
250
|
|
|
185
|
-
dependencies[
|
|
251
|
+
dependencies[parameter] = param.default
|
|
186
252
|
|
|
253
|
+
_parameter_cache[function] = dependencies
|
|
187
254
|
return dependencies
|
|
188
255
|
|
|
189
256
|
|
|
257
|
+
class _Depends(Dependency, Generic[R]):
|
|
258
|
+
dependency: DependencyFunction[R]
|
|
259
|
+
|
|
260
|
+
cache: ContextVar[dict[DependencyFunction[Any], Any]] = ContextVar("cache")
|
|
261
|
+
stack: ContextVar[AsyncExitStack] = ContextVar("stack")
|
|
262
|
+
|
|
263
|
+
def __init__(
|
|
264
|
+
self, dependency: Callable[[], Awaitable[R] | AsyncContextManager[R]]
|
|
265
|
+
) -> None:
|
|
266
|
+
self.dependency = dependency
|
|
267
|
+
|
|
268
|
+
async def _resolve_parameters(
|
|
269
|
+
self,
|
|
270
|
+
function: TaskFunction | DependencyFunction[Any],
|
|
271
|
+
) -> dict[str, Any]:
|
|
272
|
+
stack = self.stack.get()
|
|
273
|
+
|
|
274
|
+
arguments: dict[str, Any] = {}
|
|
275
|
+
parameters = get_dependency_parameters(function)
|
|
276
|
+
|
|
277
|
+
for parameter, dependency in parameters.items():
|
|
278
|
+
arguments[parameter] = await stack.enter_async_context(dependency)
|
|
279
|
+
|
|
280
|
+
return arguments
|
|
281
|
+
|
|
282
|
+
async def __aenter__(self) -> R:
|
|
283
|
+
cache = self.cache.get()
|
|
284
|
+
|
|
285
|
+
if self.dependency in cache:
|
|
286
|
+
return cache[self.dependency]
|
|
287
|
+
|
|
288
|
+
stack = self.stack.get()
|
|
289
|
+
arguments = await self._resolve_parameters(self.dependency)
|
|
290
|
+
|
|
291
|
+
value = self.dependency(**arguments)
|
|
292
|
+
|
|
293
|
+
if isinstance(value, AsyncContextManager):
|
|
294
|
+
value = await stack.enter_async_context(value)
|
|
295
|
+
else:
|
|
296
|
+
value = await value
|
|
297
|
+
|
|
298
|
+
cache[self.dependency] = value
|
|
299
|
+
return value
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
def Depends(dependency: DependencyFunction[R]) -> R:
|
|
303
|
+
return cast(R, _Depends(dependency))
|
|
304
|
+
|
|
305
|
+
|
|
190
306
|
D = TypeVar("D", bound=Dependency)
|
|
191
307
|
|
|
192
308
|
|
|
193
309
|
def get_single_dependency_parameter_of_type(
|
|
194
|
-
function:
|
|
310
|
+
function: TaskFunction, dependency_type: type[D]
|
|
195
311
|
) -> D | None:
|
|
196
312
|
assert dependency_type.single, "Dependency must be single"
|
|
197
313
|
for _, dependency in get_dependency_parameters(function).items():
|
|
@@ -210,7 +326,7 @@ def get_single_dependency_of_type(
|
|
|
210
326
|
return None
|
|
211
327
|
|
|
212
328
|
|
|
213
|
-
def validate_dependencies(function:
|
|
329
|
+
def validate_dependencies(function: TaskFunction) -> None:
|
|
214
330
|
parameters = get_dependency_parameters(function)
|
|
215
331
|
|
|
216
332
|
counts = Counter(type(dependency) for dependency in parameters.values())
|
|
@@ -220,3 +336,31 @@ def validate_dependencies(function: Callable[..., Awaitable[Any]]) -> None:
|
|
|
220
336
|
raise ValueError(
|
|
221
337
|
f"Only one {dependency_type.__name__} dependency is allowed per task"
|
|
222
338
|
)
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
@asynccontextmanager
|
|
342
|
+
async def resolved_dependencies(
|
|
343
|
+
worker: "Worker", execution: Execution
|
|
344
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
345
|
+
# Set context variables once at the beginning
|
|
346
|
+
Dependency.docket.set(worker.docket)
|
|
347
|
+
Dependency.worker.set(worker)
|
|
348
|
+
Dependency.execution.set(execution)
|
|
349
|
+
|
|
350
|
+
_Depends.cache.set({})
|
|
351
|
+
|
|
352
|
+
async with AsyncExitStack() as stack:
|
|
353
|
+
_Depends.stack.set(stack)
|
|
354
|
+
|
|
355
|
+
arguments: dict[str, Any] = {}
|
|
356
|
+
|
|
357
|
+
parameters = get_dependency_parameters(execution.function)
|
|
358
|
+
for parameter, dependency in parameters.items():
|
|
359
|
+
kwargs = execution.kwargs
|
|
360
|
+
if parameter in kwargs:
|
|
361
|
+
arguments[parameter] = kwargs[parameter]
|
|
362
|
+
continue
|
|
363
|
+
|
|
364
|
+
arguments[parameter] = await stack.enter_async_context(dependency)
|
|
365
|
+
|
|
366
|
+
yield arguments
|
docket/docket.py
CHANGED
|
@@ -38,6 +38,7 @@ from .execution import (
|
|
|
38
38
|
Strike,
|
|
39
39
|
StrikeInstruction,
|
|
40
40
|
StrikeList,
|
|
41
|
+
TaskFunction,
|
|
41
42
|
)
|
|
42
43
|
from .instrumentation import (
|
|
43
44
|
REDIS_DISRUPTIONS,
|
|
@@ -57,7 +58,7 @@ tracer: trace.Tracer = trace.get_tracer(__name__)
|
|
|
57
58
|
P = ParamSpec("P")
|
|
58
59
|
R = TypeVar("R")
|
|
59
60
|
|
|
60
|
-
TaskCollection = Iterable[
|
|
61
|
+
TaskCollection = Iterable[TaskFunction]
|
|
61
62
|
|
|
62
63
|
RedisStreamID = bytes
|
|
63
64
|
RedisMessageID = bytes
|
|
@@ -91,7 +92,7 @@ class RunningExecution(Execution):
|
|
|
91
92
|
worker: str,
|
|
92
93
|
started: datetime,
|
|
93
94
|
) -> None:
|
|
94
|
-
self.function:
|
|
95
|
+
self.function: TaskFunction = execution.function
|
|
95
96
|
self.args: tuple[Any, ...] = execution.args
|
|
96
97
|
self.kwargs: dict[str, Any] = execution.kwargs
|
|
97
98
|
self.when: datetime = execution.when
|
|
@@ -111,7 +112,7 @@ class DocketSnapshot:
|
|
|
111
112
|
|
|
112
113
|
|
|
113
114
|
class Docket:
|
|
114
|
-
tasks: dict[str,
|
|
115
|
+
tasks: dict[str, TaskFunction]
|
|
115
116
|
strike_list: StrikeList
|
|
116
117
|
|
|
117
118
|
_monitor_strikes_task: asyncio.Task[None]
|
|
@@ -197,7 +198,7 @@ class Docket:
|
|
|
197
198
|
finally:
|
|
198
199
|
await asyncio.shield(r.__aexit__(None, None, None))
|
|
199
200
|
|
|
200
|
-
def register(self, function:
|
|
201
|
+
def register(self, function: TaskFunction) -> None:
|
|
201
202
|
from .dependencies import validate_dependencies
|
|
202
203
|
|
|
203
204
|
validate_dependencies(function)
|
docket/execution.py
CHANGED
|
@@ -7,23 +7,40 @@ from typing import Any, Awaitable, Callable, Hashable, Literal, Mapping, Self, c
|
|
|
7
7
|
|
|
8
8
|
import cloudpickle # type: ignore[import]
|
|
9
9
|
|
|
10
|
+
from opentelemetry import trace, propagate
|
|
11
|
+
import opentelemetry.context
|
|
10
12
|
|
|
11
13
|
from .annotations import Logged
|
|
14
|
+
from docket.instrumentation import message_getter
|
|
12
15
|
|
|
13
16
|
logger: logging.Logger = logging.getLogger(__name__)
|
|
14
17
|
|
|
18
|
+
TaskFunction = Callable[..., Awaitable[Any]]
|
|
15
19
|
Message = dict[bytes, bytes]
|
|
16
20
|
|
|
17
21
|
|
|
22
|
+
_signature_cache: dict[Callable[..., Any], inspect.Signature] = {}
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_signature(function: Callable[..., Any]) -> inspect.Signature:
|
|
26
|
+
if function in _signature_cache:
|
|
27
|
+
return _signature_cache[function]
|
|
28
|
+
|
|
29
|
+
signature = inspect.signature(function)
|
|
30
|
+
_signature_cache[function] = signature
|
|
31
|
+
return signature
|
|
32
|
+
|
|
33
|
+
|
|
18
34
|
class Execution:
|
|
19
35
|
def __init__(
|
|
20
36
|
self,
|
|
21
|
-
function:
|
|
37
|
+
function: TaskFunction,
|
|
22
38
|
args: tuple[Any, ...],
|
|
23
39
|
kwargs: dict[str, Any],
|
|
24
40
|
when: datetime,
|
|
25
41
|
key: str,
|
|
26
42
|
attempt: int,
|
|
43
|
+
trace_context: opentelemetry.context.Context | None = None,
|
|
27
44
|
) -> None:
|
|
28
45
|
self.function = function
|
|
29
46
|
self.args = args
|
|
@@ -31,6 +48,7 @@ class Execution:
|
|
|
31
48
|
self.when = when
|
|
32
49
|
self.key = key
|
|
33
50
|
self.attempt = attempt
|
|
51
|
+
self.trace_context = trace_context
|
|
34
52
|
|
|
35
53
|
def as_message(self) -> Message:
|
|
36
54
|
return {
|
|
@@ -43,9 +61,7 @@ class Execution:
|
|
|
43
61
|
}
|
|
44
62
|
|
|
45
63
|
@classmethod
|
|
46
|
-
def from_message(
|
|
47
|
-
cls, function: Callable[..., Awaitable[Any]], message: Message
|
|
48
|
-
) -> Self:
|
|
64
|
+
def from_message(cls, function: TaskFunction, message: Message) -> Self:
|
|
49
65
|
return cls(
|
|
50
66
|
function=function,
|
|
51
67
|
args=cloudpickle.loads(message[b"args"]),
|
|
@@ -53,6 +69,7 @@ class Execution:
|
|
|
53
69
|
when=datetime.fromisoformat(message[b"when"].decode()),
|
|
54
70
|
key=message[b"key"].decode(),
|
|
55
71
|
attempt=int(message[b"attempt"].decode()),
|
|
72
|
+
trace_context=propagate.extract(message, getter=message_getter),
|
|
56
73
|
)
|
|
57
74
|
|
|
58
75
|
def general_labels(self) -> Mapping[str, str]:
|
|
@@ -68,28 +85,32 @@ class Execution:
|
|
|
68
85
|
|
|
69
86
|
def call_repr(self) -> str:
|
|
70
87
|
arguments: list[str] = []
|
|
71
|
-
signature = inspect.signature(self.function)
|
|
72
88
|
function_name = self.function.__name__
|
|
73
89
|
|
|
90
|
+
signature = get_signature(self.function)
|
|
74
91
|
logged_parameters = Logged.annotated_parameters(signature)
|
|
75
|
-
|
|
76
92
|
parameter_names = list(signature.parameters.keys())
|
|
77
93
|
|
|
78
94
|
for i, argument in enumerate(self.args[: len(parameter_names)]):
|
|
79
95
|
parameter_name = parameter_names[i]
|
|
80
|
-
if
|
|
81
|
-
arguments.append(
|
|
96
|
+
if logged := logged_parameters.get(parameter_name):
|
|
97
|
+
arguments.append(logged.format(argument))
|
|
82
98
|
else:
|
|
83
99
|
arguments.append("...")
|
|
84
100
|
|
|
85
101
|
for parameter_name, argument in self.kwargs.items():
|
|
86
|
-
if
|
|
87
|
-
arguments.append(f"{parameter_name}={
|
|
102
|
+
if logged := logged_parameters.get(parameter_name):
|
|
103
|
+
arguments.append(f"{parameter_name}={logged.format(argument)}")
|
|
88
104
|
else:
|
|
89
105
|
arguments.append(f"{parameter_name}=...")
|
|
90
106
|
|
|
91
107
|
return f"{function_name}({', '.join(arguments)}){{{self.key}}}"
|
|
92
108
|
|
|
109
|
+
def incoming_span_links(self) -> list[trace.Link]:
|
|
110
|
+
initiating_span = trace.get_current_span(self.trace_context)
|
|
111
|
+
initiating_context = initiating_span.get_span_context()
|
|
112
|
+
return [trace.Link(initiating_context)] if initiating_context.is_valid else []
|
|
113
|
+
|
|
93
114
|
|
|
94
115
|
class Operator(enum.StrEnum):
|
|
95
116
|
EQUAL = "=="
|
|
@@ -217,10 +238,10 @@ class StrikeList:
|
|
|
217
238
|
if function_name in self.task_strikes and not task_strikes:
|
|
218
239
|
return True
|
|
219
240
|
|
|
220
|
-
|
|
241
|
+
signature = get_signature(execution.function)
|
|
221
242
|
|
|
222
243
|
try:
|
|
223
|
-
bound_args =
|
|
244
|
+
bound_args = signature.bind(*execution.args, **execution.kwargs)
|
|
224
245
|
bound_args.apply_defaults()
|
|
225
246
|
except TypeError:
|
|
226
247
|
# If we can't make sense of the arguments, just assume the task is fine
|
|
@@ -265,6 +286,8 @@ class StrikeList:
|
|
|
265
286
|
case "between": # pragma: no branch
|
|
266
287
|
lower, upper = strike_value
|
|
267
288
|
return lower <= value <= upper
|
|
289
|
+
case _: # pragma: no cover
|
|
290
|
+
raise ValueError(f"Unknown operator: {operator}")
|
|
268
291
|
except (ValueError, TypeError):
|
|
269
292
|
# If we can't make the comparison due to incompatible types, just log the
|
|
270
293
|
# error and assume the task is not stricken
|
docket/worker.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
import inspect
|
|
3
2
|
import logging
|
|
4
3
|
import sys
|
|
4
|
+
import time
|
|
5
5
|
from datetime import datetime, timedelta, timezone
|
|
6
6
|
from types import TracebackType
|
|
7
7
|
from typing import (
|
|
8
|
-
|
|
8
|
+
Coroutine,
|
|
9
9
|
Mapping,
|
|
10
10
|
Protocol,
|
|
11
11
|
Self,
|
|
@@ -13,18 +13,27 @@ from typing import (
|
|
|
13
13
|
)
|
|
14
14
|
from uuid import uuid4
|
|
15
15
|
|
|
16
|
-
import
|
|
17
|
-
from opentelemetry import propagate, trace
|
|
16
|
+
from opentelemetry import trace
|
|
18
17
|
from opentelemetry.trace import Tracer
|
|
19
18
|
from redis.asyncio import Redis
|
|
20
|
-
from redis.exceptions import LockError
|
|
21
|
-
|
|
19
|
+
from redis.exceptions import ConnectionError, LockError
|
|
20
|
+
|
|
21
|
+
from docket.execution import get_signature
|
|
22
|
+
|
|
23
|
+
from .dependencies import (
|
|
24
|
+
Dependency,
|
|
25
|
+
Perpetual,
|
|
26
|
+
Retry,
|
|
27
|
+
Timeout,
|
|
28
|
+
get_single_dependency_of_type,
|
|
29
|
+
get_single_dependency_parameter_of_type,
|
|
30
|
+
resolved_dependencies,
|
|
31
|
+
)
|
|
22
32
|
from .docket import (
|
|
23
33
|
Docket,
|
|
24
34
|
Execution,
|
|
25
35
|
RedisMessage,
|
|
26
36
|
RedisMessageID,
|
|
27
|
-
RedisMessages,
|
|
28
37
|
RedisReadGroupResponse,
|
|
29
38
|
)
|
|
30
39
|
from .instrumentation import (
|
|
@@ -41,7 +50,6 @@ from .instrumentation import (
|
|
|
41
50
|
TASKS_STARTED,
|
|
42
51
|
TASKS_STRICKEN,
|
|
43
52
|
TASKS_SUCCEEDED,
|
|
44
|
-
message_getter,
|
|
45
53
|
metrics_server,
|
|
46
54
|
)
|
|
47
55
|
|
|
@@ -49,10 +57,6 @@ logger: logging.Logger = logging.getLogger(__name__)
|
|
|
49
57
|
tracer: Tracer = trace.get_tracer(__name__)
|
|
50
58
|
|
|
51
59
|
|
|
52
|
-
if TYPE_CHECKING: # pragma: no cover
|
|
53
|
-
from .dependencies import Dependency
|
|
54
|
-
|
|
55
|
-
|
|
56
60
|
class _stream_due_tasks(Protocol):
|
|
57
61
|
async def __call__(
|
|
58
62
|
self, keys: list[str], args: list[str | float]
|
|
@@ -75,7 +79,7 @@ class Worker:
|
|
|
75
79
|
concurrency: int = 10,
|
|
76
80
|
redelivery_timeout: timedelta = timedelta(minutes=5),
|
|
77
81
|
reconnection_delay: timedelta = timedelta(seconds=5),
|
|
78
|
-
minimum_check_interval: timedelta = timedelta(milliseconds=
|
|
82
|
+
minimum_check_interval: timedelta = timedelta(milliseconds=250),
|
|
79
83
|
scheduling_resolution: timedelta = timedelta(milliseconds=250),
|
|
80
84
|
) -> None:
|
|
81
85
|
self.docket = docket
|
|
@@ -197,13 +201,14 @@ class Worker:
|
|
|
197
201
|
async def _run(self, forever: bool = False) -> None:
|
|
198
202
|
logger.info("Starting worker %r with the following tasks:", self.name)
|
|
199
203
|
for task_name, task in self.docket.tasks.items():
|
|
200
|
-
signature =
|
|
204
|
+
signature = get_signature(task)
|
|
201
205
|
logger.info("* %s%s", task_name, signature)
|
|
202
206
|
|
|
203
207
|
while True:
|
|
204
208
|
try:
|
|
205
|
-
|
|
206
|
-
|
|
209
|
+
async with self.docket.redis() as redis:
|
|
210
|
+
return await self._worker_loop(redis, forever=forever)
|
|
211
|
+
except ConnectionError:
|
|
207
212
|
REDIS_DISRUPTIONS.add(1, self.labels())
|
|
208
213
|
logger.warning(
|
|
209
214
|
"Error connecting to redis, retrying in %s...",
|
|
@@ -212,123 +217,135 @@ class Worker:
|
|
|
212
217
|
)
|
|
213
218
|
await asyncio.sleep(self.reconnection_delay.total_seconds())
|
|
214
219
|
|
|
215
|
-
async def _worker_loop(self, forever: bool = False):
|
|
220
|
+
async def _worker_loop(self, redis: Redis, forever: bool = False):
|
|
216
221
|
worker_stopping = asyncio.Event()
|
|
217
222
|
|
|
218
223
|
await self._schedule_all_automatic_perpetual_tasks()
|
|
219
224
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
225
|
+
scheduler_task = asyncio.create_task(
|
|
226
|
+
self._scheduler_loop(redis, worker_stopping)
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
active_tasks: dict[asyncio.Task[None], RedisMessageID] = {}
|
|
230
|
+
available_slots = self.concurrency
|
|
231
|
+
|
|
232
|
+
log_context = self._log_context()
|
|
233
|
+
|
|
234
|
+
async def check_for_work() -> bool:
|
|
235
|
+
logger.debug("Checking for work", extra=log_context)
|
|
236
|
+
async with redis.pipeline() as pipeline:
|
|
237
|
+
pipeline.xlen(self.docket.stream_key)
|
|
238
|
+
pipeline.zcard(self.docket.queue_key)
|
|
239
|
+
results: list[int] = await pipeline.execute()
|
|
240
|
+
stream_len = results[0]
|
|
241
|
+
queue_len = results[1]
|
|
242
|
+
return stream_len > 0 or queue_len > 0
|
|
243
|
+
|
|
244
|
+
async def get_redeliveries(redis: Redis) -> RedisReadGroupResponse:
|
|
245
|
+
logger.debug("Getting redeliveries", extra=log_context)
|
|
246
|
+
_, redeliveries, *_ = await redis.xautoclaim(
|
|
247
|
+
name=self.docket.stream_key,
|
|
248
|
+
groupname=self.docket.worker_group_name,
|
|
249
|
+
consumername=self.name,
|
|
250
|
+
min_idle_time=int(self.redelivery_timeout.total_seconds() * 1000),
|
|
251
|
+
start_id="0-0",
|
|
252
|
+
count=available_slots,
|
|
253
|
+
)
|
|
254
|
+
return [(b"__redelivery__", redeliveries)]
|
|
255
|
+
|
|
256
|
+
async def get_new_deliveries(redis: Redis) -> RedisReadGroupResponse:
|
|
257
|
+
logger.debug("Getting new deliveries", extra=log_context)
|
|
258
|
+
return await redis.xreadgroup(
|
|
259
|
+
groupname=self.docket.worker_group_name,
|
|
260
|
+
consumername=self.name,
|
|
261
|
+
streams={self.docket.stream_key: ">"},
|
|
262
|
+
block=int(self.minimum_check_interval.total_seconds() * 1000),
|
|
263
|
+
count=available_slots,
|
|
223
264
|
)
|
|
224
|
-
active_tasks: dict[asyncio.Task[None], RedisMessageID] = {}
|
|
225
|
-
|
|
226
|
-
async def check_for_work() -> bool:
|
|
227
|
-
async with redis.pipeline() as pipeline:
|
|
228
|
-
pipeline.xlen(self.docket.stream_key)
|
|
229
|
-
pipeline.zcard(self.docket.queue_key)
|
|
230
|
-
results: list[int] = await pipeline.execute()
|
|
231
|
-
stream_len = results[0]
|
|
232
|
-
queue_len = results[1]
|
|
233
|
-
return stream_len > 0 or queue_len > 0
|
|
234
|
-
|
|
235
|
-
async def process_completed_tasks() -> None:
|
|
236
|
-
completed_tasks = {task for task in active_tasks if task.done()}
|
|
237
|
-
for task in completed_tasks:
|
|
238
|
-
message_id = active_tasks.pop(task)
|
|
239
|
-
|
|
240
|
-
await task
|
|
241
|
-
|
|
242
|
-
async with redis.pipeline() as pipeline:
|
|
243
|
-
pipeline.xack(
|
|
244
|
-
self.docket.stream_key,
|
|
245
|
-
self.docket.worker_group_name,
|
|
246
|
-
message_id,
|
|
247
|
-
)
|
|
248
|
-
pipeline.xdel(
|
|
249
|
-
self.docket.stream_key,
|
|
250
|
-
message_id,
|
|
251
|
-
)
|
|
252
|
-
await pipeline.execute()
|
|
253
265
|
|
|
254
|
-
|
|
266
|
+
def start_task(message_id: RedisMessageID, message: RedisMessage) -> bool:
|
|
267
|
+
function_name = message[b"function"].decode()
|
|
268
|
+
if not (function := self.docket.tasks.get(function_name)):
|
|
269
|
+
logger.warning(
|
|
270
|
+
"Task function %r not found",
|
|
271
|
+
function_name,
|
|
272
|
+
extra=log_context,
|
|
273
|
+
)
|
|
274
|
+
return False
|
|
255
275
|
|
|
256
|
-
|
|
257
|
-
has_work = await check_for_work()
|
|
276
|
+
execution = Execution.from_message(function, message)
|
|
258
277
|
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
await process_completed_tasks()
|
|
278
|
+
task = asyncio.create_task(self._execute(execution), name=execution.key)
|
|
279
|
+
active_tasks[task] = message_id
|
|
262
280
|
|
|
263
|
-
|
|
281
|
+
nonlocal available_slots
|
|
282
|
+
available_slots -= 1
|
|
264
283
|
|
|
265
|
-
|
|
266
|
-
message_id: RedisMessageID, message: RedisMessage
|
|
267
|
-
) -> None:
|
|
268
|
-
if not message: # pragma: no cover
|
|
269
|
-
return
|
|
284
|
+
return True
|
|
270
285
|
|
|
271
|
-
|
|
272
|
-
|
|
286
|
+
async def process_completed_tasks() -> None:
|
|
287
|
+
completed_tasks = {task for task in active_tasks if task.done()}
|
|
288
|
+
for task in completed_tasks:
|
|
289
|
+
message_id = active_tasks.pop(task)
|
|
290
|
+
await task
|
|
291
|
+
await ack_message(redis, message_id)
|
|
292
|
+
|
|
293
|
+
async def ack_message(redis: Redis, message_id: RedisMessageID) -> None:
|
|
294
|
+
logger.debug("Acknowledging message", extra=log_context)
|
|
295
|
+
async with redis.pipeline() as pipeline:
|
|
296
|
+
pipeline.xack(
|
|
297
|
+
self.docket.stream_key,
|
|
298
|
+
self.docket.worker_group_name,
|
|
299
|
+
message_id,
|
|
300
|
+
)
|
|
301
|
+
pipeline.xdel(
|
|
302
|
+
self.docket.stream_key,
|
|
303
|
+
message_id,
|
|
304
|
+
)
|
|
305
|
+
await pipeline.execute()
|
|
273
306
|
|
|
274
|
-
|
|
275
|
-
available_slots -= 1
|
|
307
|
+
has_work: bool = True
|
|
276
308
|
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
redeliveries: RedisMessages
|
|
282
|
-
_, redeliveries, *_ = await redis.xautoclaim(
|
|
283
|
-
name=self.docket.stream_key,
|
|
284
|
-
groupname=self.docket.worker_group_name,
|
|
285
|
-
consumername=self.name,
|
|
286
|
-
min_idle_time=int(
|
|
287
|
-
self.redelivery_timeout.total_seconds() * 1000
|
|
288
|
-
),
|
|
289
|
-
start_id="0-0",
|
|
290
|
-
count=available_slots,
|
|
291
|
-
)
|
|
309
|
+
try:
|
|
310
|
+
while forever or has_work or active_tasks:
|
|
311
|
+
await process_completed_tasks()
|
|
292
312
|
|
|
293
|
-
|
|
294
|
-
start_task(message_id, message)
|
|
313
|
+
available_slots = self.concurrency - len(active_tasks)
|
|
295
314
|
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
new_deliveries: RedisReadGroupResponse = await redis.xreadgroup(
|
|
300
|
-
groupname=self.docket.worker_group_name,
|
|
301
|
-
consumername=self.name,
|
|
302
|
-
streams={self.docket.stream_key: ">"},
|
|
303
|
-
block=(
|
|
304
|
-
int(self.minimum_check_interval.total_seconds() * 1000)
|
|
305
|
-
if forever or active_tasks
|
|
306
|
-
else None
|
|
307
|
-
),
|
|
308
|
-
count=available_slots,
|
|
309
|
-
)
|
|
315
|
+
if available_slots <= 0:
|
|
316
|
+
await asyncio.sleep(self.minimum_check_interval.total_seconds())
|
|
317
|
+
continue
|
|
310
318
|
|
|
311
|
-
|
|
319
|
+
for source in [get_redeliveries, get_new_deliveries]:
|
|
320
|
+
for _, messages in await source(redis):
|
|
312
321
|
for message_id, message in messages:
|
|
313
|
-
|
|
322
|
+
if not message: # pragma: no cover
|
|
323
|
+
continue
|
|
314
324
|
|
|
315
|
-
|
|
316
|
-
|
|
325
|
+
if not start_task(message_id, message):
|
|
326
|
+
await self._delete_known_task(redis, message)
|
|
327
|
+
await ack_message(redis, message_id)
|
|
317
328
|
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
logger.info(
|
|
321
|
-
"Shutdown requested, finishing %d active tasks...",
|
|
322
|
-
len(active_tasks),
|
|
323
|
-
extra=self._log_context(),
|
|
324
|
-
)
|
|
325
|
-
finally:
|
|
326
|
-
if active_tasks:
|
|
327
|
-
await asyncio.gather(*active_tasks, return_exceptions=True)
|
|
328
|
-
await process_completed_tasks()
|
|
329
|
+
if available_slots <= 0:
|
|
330
|
+
break
|
|
329
331
|
|
|
330
|
-
|
|
331
|
-
|
|
332
|
+
if not forever and not active_tasks:
|
|
333
|
+
has_work = await check_for_work()
|
|
334
|
+
|
|
335
|
+
except asyncio.CancelledError:
|
|
336
|
+
if active_tasks: # pragma: no cover
|
|
337
|
+
logger.info(
|
|
338
|
+
"Shutdown requested, finishing %d active tasks...",
|
|
339
|
+
len(active_tasks),
|
|
340
|
+
extra=log_context,
|
|
341
|
+
)
|
|
342
|
+
finally:
|
|
343
|
+
if active_tasks:
|
|
344
|
+
await asyncio.gather(*active_tasks, return_exceptions=True)
|
|
345
|
+
await process_completed_tasks()
|
|
346
|
+
|
|
347
|
+
worker_stopping.set()
|
|
348
|
+
await scheduler_task
|
|
332
349
|
|
|
333
350
|
async def _scheduler_loop(
|
|
334
351
|
self,
|
|
@@ -387,8 +404,11 @@ class Worker:
|
|
|
387
404
|
|
|
388
405
|
total_work: int = sys.maxsize
|
|
389
406
|
|
|
407
|
+
log_context = self._log_context()
|
|
408
|
+
|
|
390
409
|
while not worker_stopping.is_set() or total_work:
|
|
391
410
|
try:
|
|
411
|
+
logger.debug("Scheduling due tasks", extra=log_context)
|
|
392
412
|
total_work, due_work = await stream_due_tasks(
|
|
393
413
|
keys=[self.docket.queue_key, self.docket.stream_key],
|
|
394
414
|
args=[datetime.now(timezone.utc).timestamp(), self.docket.name],
|
|
@@ -401,22 +421,20 @@ class Worker:
|
|
|
401
421
|
total_work,
|
|
402
422
|
self.docket.queue_key,
|
|
403
423
|
self.docket.stream_key,
|
|
404
|
-
extra=
|
|
424
|
+
extra=log_context,
|
|
405
425
|
)
|
|
406
426
|
except Exception: # pragma: no cover
|
|
407
427
|
logger.exception(
|
|
408
428
|
"Error in scheduler loop",
|
|
409
429
|
exc_info=True,
|
|
410
|
-
extra=
|
|
430
|
+
extra=log_context,
|
|
411
431
|
)
|
|
412
432
|
finally:
|
|
413
433
|
await asyncio.sleep(self.scheduling_resolution.total_seconds())
|
|
414
434
|
|
|
415
|
-
logger.debug("Scheduler loop finished", extra=
|
|
435
|
+
logger.debug("Scheduler loop finished", extra=log_context)
|
|
416
436
|
|
|
417
437
|
async def _schedule_all_automatic_perpetual_tasks(self) -> None:
|
|
418
|
-
from .dependencies import Perpetual, get_single_dependency_parameter_of_type
|
|
419
|
-
|
|
420
438
|
async with self.docket.redis() as redis:
|
|
421
439
|
try:
|
|
422
440
|
async with redis.lock(
|
|
@@ -438,157 +456,170 @@ class Worker:
|
|
|
438
456
|
except LockError: # pragma: no cover
|
|
439
457
|
return
|
|
440
458
|
|
|
441
|
-
async def
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
async with self.docket.redis() as redis:
|
|
450
|
-
await redis.delete(self.docket.known_task_key(key))
|
|
451
|
-
logger.warning(
|
|
452
|
-
"Task function %r not found", function_name, extra=log_context
|
|
453
|
-
)
|
|
459
|
+
async def _delete_known_task(
|
|
460
|
+
self, redis: Redis, execution_or_message: Execution | RedisMessage
|
|
461
|
+
) -> None:
|
|
462
|
+
if isinstance(execution_or_message, Execution):
|
|
463
|
+
key = execution_or_message.key
|
|
464
|
+
elif bytes_key := execution_or_message.get(b"key"):
|
|
465
|
+
key = bytes_key.decode()
|
|
466
|
+
else: # pragma: no cover
|
|
454
467
|
return
|
|
455
468
|
|
|
456
|
-
|
|
457
|
-
|
|
469
|
+
logger.debug("Deleting known task", extra=self._log_context())
|
|
470
|
+
known_task_key = self.docket.known_task_key(key)
|
|
471
|
+
await redis.delete(known_task_key)
|
|
458
472
|
|
|
459
|
-
|
|
473
|
+
async def _execute(self, execution: Execution) -> None:
|
|
474
|
+
log_context = {**self._log_context(), **execution.specific_labels()}
|
|
460
475
|
counter_labels = {**self.labels(), **execution.general_labels()}
|
|
461
476
|
|
|
462
|
-
arrow = "↬" if execution.attempt > 1 else "↪"
|
|
463
477
|
call = execution.call_repr()
|
|
464
478
|
|
|
465
479
|
if self.docket.strike_list.is_stricken(execution):
|
|
466
480
|
async with self.docket.redis() as redis:
|
|
467
|
-
await
|
|
481
|
+
await self._delete_known_task(redis, execution)
|
|
468
482
|
|
|
469
|
-
|
|
470
|
-
logger.warning("%s %s", arrow, call, extra=log_context)
|
|
483
|
+
logger.warning("🗙 %s", call, extra=log_context)
|
|
471
484
|
TASKS_STRICKEN.add(1, counter_labels | {"docket.where": "worker"})
|
|
472
485
|
return
|
|
473
486
|
|
|
474
487
|
if execution.key in self._execution_counts:
|
|
475
488
|
self._execution_counts[execution.key] += 1
|
|
476
489
|
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
async with self.docket.redis() as redis:
|
|
482
|
-
await redis.delete(self.docket.known_task_key(key))
|
|
483
|
-
|
|
484
|
-
context = propagate.extract(message, getter=message_getter)
|
|
485
|
-
initiating_context = trace.get_current_span(context).get_span_context()
|
|
486
|
-
links = [trace.Link(initiating_context)] if initiating_context.is_valid else []
|
|
487
|
-
|
|
488
|
-
start = datetime.now(timezone.utc)
|
|
489
|
-
punctuality = start - execution.when
|
|
490
|
-
log_context = {**log_context, "punctuality": punctuality.total_seconds()}
|
|
491
|
-
duration = timedelta(0)
|
|
490
|
+
start = time.time()
|
|
491
|
+
punctuality = start - execution.when.timestamp()
|
|
492
|
+
log_context = {**log_context, "punctuality": punctuality}
|
|
493
|
+
duration = 0.0
|
|
492
494
|
|
|
493
495
|
TASKS_STARTED.add(1, counter_labels)
|
|
494
496
|
TASKS_RUNNING.add(1, counter_labels)
|
|
495
|
-
TASK_PUNCTUALITY.record(punctuality
|
|
496
|
-
|
|
497
|
-
logger.info("%s [%s] %s", arrow, punctuality, call, extra=log_context)
|
|
498
|
-
|
|
499
|
-
try:
|
|
500
|
-
with tracer.start_as_current_span(
|
|
501
|
-
execution.function.__name__,
|
|
502
|
-
kind=trace.SpanKind.CONSUMER,
|
|
503
|
-
attributes={
|
|
504
|
-
**self.labels(),
|
|
505
|
-
**execution.specific_labels(),
|
|
506
|
-
"code.function.name": execution.function.__name__,
|
|
507
|
-
},
|
|
508
|
-
links=links,
|
|
509
|
-
):
|
|
510
|
-
await execution.function(
|
|
511
|
-
*execution.args,
|
|
512
|
-
**{
|
|
513
|
-
**execution.kwargs,
|
|
514
|
-
**dependencies,
|
|
515
|
-
},
|
|
516
|
-
)
|
|
497
|
+
TASK_PUNCTUALITY.record(punctuality, counter_labels)
|
|
517
498
|
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
499
|
+
arrow = "↬" if execution.attempt > 1 else "↪"
|
|
500
|
+
logger.info("%s [%s] %s", arrow, ms(punctuality), call, extra=log_context)
|
|
501
|
+
|
|
502
|
+
with tracer.start_as_current_span(
|
|
503
|
+
execution.function.__name__,
|
|
504
|
+
kind=trace.SpanKind.CONSUMER,
|
|
505
|
+
attributes={
|
|
506
|
+
**self.labels(),
|
|
507
|
+
**execution.specific_labels(),
|
|
508
|
+
"code.function.name": execution.function.__name__,
|
|
509
|
+
},
|
|
510
|
+
links=execution.incoming_span_links(),
|
|
511
|
+
):
|
|
512
|
+
async with resolved_dependencies(self, execution) as dependencies:
|
|
513
|
+
# Preemptively reschedule the perpetual task for the future, or clear
|
|
514
|
+
# the known task key for this task
|
|
515
|
+
rescheduled = await self._perpetuate_if_requested(
|
|
516
|
+
execution, dependencies
|
|
534
517
|
)
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
518
|
+
if not rescheduled:
|
|
519
|
+
async with self.docket.redis() as redis:
|
|
520
|
+
await self._delete_known_task(redis, execution)
|
|
521
|
+
|
|
522
|
+
try:
|
|
523
|
+
if timeout := get_single_dependency_of_type(dependencies, Timeout):
|
|
524
|
+
await self._run_function_with_timeout(
|
|
525
|
+
execution, dependencies, timeout
|
|
526
|
+
)
|
|
527
|
+
else:
|
|
528
|
+
await execution.function(
|
|
529
|
+
*execution.args,
|
|
530
|
+
**{
|
|
531
|
+
**execution.kwargs,
|
|
532
|
+
**dependencies,
|
|
533
|
+
},
|
|
534
|
+
)
|
|
541
535
|
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
execution: Execution,
|
|
545
|
-
) -> dict[str, "Dependency"]:
|
|
546
|
-
from .dependencies import get_dependency_parameters
|
|
536
|
+
duration = log_context["duration"] = time.time() - start
|
|
537
|
+
TASKS_SUCCEEDED.add(1, counter_labels)
|
|
547
538
|
|
|
548
|
-
|
|
539
|
+
rescheduled = await self._perpetuate_if_requested(
|
|
540
|
+
execution, dependencies, timedelta(seconds=duration)
|
|
541
|
+
)
|
|
549
542
|
|
|
550
|
-
|
|
543
|
+
arrow = "↫" if rescheduled else "↩"
|
|
544
|
+
logger.info(
|
|
545
|
+
"%s [%s] %s", arrow, ms(duration), call, extra=log_context
|
|
546
|
+
)
|
|
547
|
+
except Exception:
|
|
548
|
+
duration = log_context["duration"] = time.time() - start
|
|
549
|
+
TASKS_FAILED.add(1, counter_labels)
|
|
550
|
+
|
|
551
|
+
retried = await self._retry_if_requested(execution, dependencies)
|
|
552
|
+
if not retried:
|
|
553
|
+
retried = await self._perpetuate_if_requested(
|
|
554
|
+
execution, dependencies, timedelta(seconds=duration)
|
|
555
|
+
)
|
|
551
556
|
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
557
|
+
arrow = "↫" if retried else "↩"
|
|
558
|
+
logger.exception(
|
|
559
|
+
"%s [%s] %s", arrow, ms(duration), call, extra=log_context
|
|
560
|
+
)
|
|
561
|
+
finally:
|
|
562
|
+
TASKS_RUNNING.add(-1, counter_labels)
|
|
563
|
+
TASKS_COMPLETED.add(1, counter_labels)
|
|
564
|
+
TASK_DURATION.record(duration, counter_labels)
|
|
558
565
|
|
|
559
|
-
|
|
566
|
+
async def _run_function_with_timeout(
|
|
567
|
+
self,
|
|
568
|
+
execution: Execution,
|
|
569
|
+
dependencies: dict[str, Dependency],
|
|
570
|
+
timeout: Timeout,
|
|
571
|
+
) -> None:
|
|
572
|
+
task_coro = cast(
|
|
573
|
+
Coroutine[None, None, None],
|
|
574
|
+
execution.function(*execution.args, **execution.kwargs, **dependencies),
|
|
575
|
+
)
|
|
576
|
+
task = asyncio.create_task(task_coro)
|
|
577
|
+
try:
|
|
578
|
+
while not task.done(): # pragma: no branch
|
|
579
|
+
remaining = timeout.remaining().total_seconds()
|
|
580
|
+
if timeout.expired():
|
|
581
|
+
task.cancel()
|
|
582
|
+
break
|
|
583
|
+
|
|
584
|
+
try:
|
|
585
|
+
await asyncio.wait_for(asyncio.shield(task), timeout=remaining)
|
|
586
|
+
return
|
|
587
|
+
except asyncio.TimeoutError:
|
|
588
|
+
continue
|
|
589
|
+
finally:
|
|
590
|
+
if not task.done():
|
|
591
|
+
task.cancel()
|
|
560
592
|
|
|
561
|
-
|
|
593
|
+
try:
|
|
594
|
+
await task
|
|
595
|
+
except asyncio.CancelledError:
|
|
596
|
+
raise asyncio.TimeoutError
|
|
562
597
|
|
|
563
598
|
async def _retry_if_requested(
|
|
564
599
|
self,
|
|
565
600
|
execution: Execution,
|
|
566
|
-
dependencies: dict[str,
|
|
601
|
+
dependencies: dict[str, Dependency],
|
|
567
602
|
) -> bool:
|
|
568
|
-
from .dependencies import Retry, get_single_dependency_of_type
|
|
569
|
-
|
|
570
603
|
retry = get_single_dependency_of_type(dependencies, Retry)
|
|
571
604
|
if not retry:
|
|
572
605
|
return False
|
|
573
606
|
|
|
574
|
-
if retry.attempts is None
|
|
575
|
-
|
|
576
|
-
execution.attempt += 1
|
|
577
|
-
await self.docket.schedule(execution)
|
|
607
|
+
if retry.attempts is not None and execution.attempt >= retry.attempts:
|
|
608
|
+
return False
|
|
578
609
|
|
|
579
|
-
|
|
580
|
-
|
|
610
|
+
execution.when = datetime.now(timezone.utc) + retry.delay
|
|
611
|
+
execution.attempt += 1
|
|
612
|
+
await self.docket.schedule(execution)
|
|
581
613
|
|
|
582
|
-
|
|
614
|
+
TASKS_RETRIED.add(1, {**self.labels(), **execution.specific_labels()})
|
|
615
|
+
return True
|
|
583
616
|
|
|
584
617
|
async def _perpetuate_if_requested(
|
|
585
618
|
self,
|
|
586
619
|
execution: Execution,
|
|
587
|
-
dependencies: dict[str,
|
|
620
|
+
dependencies: dict[str, Dependency],
|
|
588
621
|
duration: timedelta | None = None,
|
|
589
622
|
) -> bool:
|
|
590
|
-
from .dependencies import Perpetual, get_single_dependency_of_type
|
|
591
|
-
|
|
592
623
|
perpetual = get_single_dependency_of_type(dependencies, Perpetual)
|
|
593
624
|
if not perpetual:
|
|
594
625
|
return False
|
|
@@ -667,7 +698,7 @@ class Worker:
|
|
|
667
698
|
|
|
668
699
|
except asyncio.CancelledError: # pragma: no cover
|
|
669
700
|
return
|
|
670
|
-
except
|
|
701
|
+
except ConnectionError:
|
|
671
702
|
REDIS_DISRUPTIONS.add(1, self.labels())
|
|
672
703
|
logger.exception(
|
|
673
704
|
"Error sending worker heartbeat",
|
|
@@ -680,3 +711,10 @@ class Worker:
|
|
|
680
711
|
exc_info=True,
|
|
681
712
|
extra=self._log_context(),
|
|
682
713
|
)
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def ms(seconds: float) -> str:
|
|
717
|
+
if seconds < 100:
|
|
718
|
+
return f"{seconds * 1000:6.0f}ms"
|
|
719
|
+
else:
|
|
720
|
+
return f"{seconds:6.0f}s "
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.6.1
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
docket/__init__.py,sha256=124XWbyQQHO1lhCoLQ-oheZnu4vNDHIaq4Whb7z3ogI,831
|
|
2
|
+
docket/__main__.py,sha256=Vkuh7aJ-Bl7QVpVbbkUksAd_hn05FiLmWbc-8kbhZQ4,34
|
|
3
|
+
docket/annotations.py,sha256=6sCgQxsgOjBN6ithFdXulXq4CPNSdyFocwyJ1gK9v2Q,1688
|
|
4
|
+
docket/cli.py,sha256=OWql6QFthSbvRCGkIg-ufo26F48z0eCmzRXJYOdyAEc,20309
|
|
5
|
+
docket/dependencies.py,sha256=pkjseBZjdSpgW9g2H4cZ_RXIRZ2ZfdngBCXJGUcbmao,10052
|
|
6
|
+
docket/docket.py,sha256=KJxgiyOskEHsRQOmfgLpJCYDNNleHI-vEKK3uBPL_K8,21420
|
|
7
|
+
docket/execution.py,sha256=f3LLt9bC7ExEZhgde5OBo1faKLYv-8ryfNLXSswo318,13579
|
|
8
|
+
docket/instrumentation.py,sha256=bZlGA02JoJcY0J1WGm5_qXDfY0AXKr0ZLAYu67wkeKY,4611
|
|
9
|
+
docket/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
+
docket/tasks.py,sha256=RIlSM2omh-YDwVnCz6M5MtmK8T_m_s1w2OlRRxDUs6A,1437
|
|
11
|
+
docket/worker.py,sha256=NrzmfpjHjQaGS8CoTOiKM5Bn88tPh_q2hz9f4hFegSk,26280
|
|
12
|
+
pydocket-0.6.1.dist-info/METADATA,sha256=mxI1OHWe9W9bAyi8QiH69eMSsSk1Dm2oDvh301BJFgo,13092
|
|
13
|
+
pydocket-0.6.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
+
pydocket-0.6.1.dist-info/entry_points.txt,sha256=4WOk1nUlBsUT5O3RyMci2ImuC5XFswuopElYcLHtD5k,47
|
|
15
|
+
pydocket-0.6.1.dist-info/licenses/LICENSE,sha256=YuVWU_ZXO0K_k2FG8xWKe5RGxV24AhJKTvQmKfqXuyk,1087
|
|
16
|
+
pydocket-0.6.1.dist-info/RECORD,,
|
pydocket-0.5.2.dist-info/RECORD
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
docket/__init__.py,sha256=7oruGALDoU6W_ntF-mMxxv3FFtO970DVzj3lUgoVIiM,775
|
|
2
|
-
docket/__main__.py,sha256=Vkuh7aJ-Bl7QVpVbbkUksAd_hn05FiLmWbc-8kbhZQ4,34
|
|
3
|
-
docket/annotations.py,sha256=GZwOPtPXyeIhnsLh3TQMBnXrjtTtSmF4Ratv4vjPx8U,950
|
|
4
|
-
docket/cli.py,sha256=OWql6QFthSbvRCGkIg-ufo26F48z0eCmzRXJYOdyAEc,20309
|
|
5
|
-
docket/dependencies.py,sha256=0P8GJTMWrzm9uZkQejCiRfT6IBisY7Hp1-4HAGTWv6w,6326
|
|
6
|
-
docket/docket.py,sha256=p2G7QNn4H0sUhDlAI5BO5C6cRTy1ZWUZmFEuohX3RM8,21470
|
|
7
|
-
docket/execution.py,sha256=PDrlAr8VzmB6JvqKO71YhXUcTcGQW7eyXrSKiTcAexE,12508
|
|
8
|
-
docket/instrumentation.py,sha256=bZlGA02JoJcY0J1WGm5_qXDfY0AXKr0ZLAYu67wkeKY,4611
|
|
9
|
-
docket/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
-
docket/tasks.py,sha256=RIlSM2omh-YDwVnCz6M5MtmK8T_m_s1w2OlRRxDUs6A,1437
|
|
11
|
-
docket/worker.py,sha256=gqY_N7H9Jxh_0YIYQk0mucj_UrZNKItkT1xkuhwYmlY,25301
|
|
12
|
-
pydocket-0.5.2.dist-info/METADATA,sha256=VbNbGmDdseQkzH64LFmsPNtw6kwbIc8cL73jlhS0vck,13092
|
|
13
|
-
pydocket-0.5.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
-
pydocket-0.5.2.dist-info/entry_points.txt,sha256=4WOk1nUlBsUT5O3RyMci2ImuC5XFswuopElYcLHtD5k,47
|
|
15
|
-
pydocket-0.5.2.dist-info/licenses/LICENSE,sha256=YuVWU_ZXO0K_k2FG8xWKe5RGxV24AhJKTvQmKfqXuyk,1087
|
|
16
|
-
pydocket-0.5.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|