pydocket 0.6.0__py3-none-any.whl → 0.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- docket/__init__.py +11 -9
- docket/annotations.py +7 -0
- docket/cli.py +8 -0
- docket/dependencies.py +41 -1
- docket/execution.py +12 -3
- docket/worker.py +87 -55
- {pydocket-0.6.0.dist-info → pydocket-0.6.2.dist-info}/METADATA +1 -1
- pydocket-0.6.2.dist-info/RECORD +16 -0
- pydocket-0.6.0.dist-info/RECORD +0 -16
- {pydocket-0.6.0.dist-info → pydocket-0.6.2.dist-info}/WHEEL +0 -0
- {pydocket-0.6.0.dist-info → pydocket-0.6.2.dist-info}/entry_points.txt +0 -0
- {pydocket-0.6.0.dist-info → pydocket-0.6.2.dist-info}/licenses/LICENSE +0 -0
docket/__init__.py
CHANGED
|
@@ -17,6 +17,7 @@ from .dependencies import (
|
|
|
17
17
|
ExponentialRetry,
|
|
18
18
|
Perpetual,
|
|
19
19
|
Retry,
|
|
20
|
+
TaskArgument,
|
|
20
21
|
TaskKey,
|
|
21
22
|
TaskLogger,
|
|
22
23
|
Timeout,
|
|
@@ -26,19 +27,20 @@ from .execution import Execution
|
|
|
26
27
|
from .worker import Worker
|
|
27
28
|
|
|
28
29
|
__all__ = [
|
|
29
|
-
"
|
|
30
|
-
"Worker",
|
|
31
|
-
"Execution",
|
|
30
|
+
"__version__",
|
|
32
31
|
"CurrentDocket",
|
|
33
|
-
"CurrentWorker",
|
|
34
32
|
"CurrentExecution",
|
|
35
|
-
"
|
|
36
|
-
"
|
|
37
|
-
"
|
|
33
|
+
"CurrentWorker",
|
|
34
|
+
"Depends",
|
|
35
|
+
"Docket",
|
|
36
|
+
"Execution",
|
|
38
37
|
"ExponentialRetry",
|
|
39
38
|
"Logged",
|
|
40
39
|
"Perpetual",
|
|
40
|
+
"Retry",
|
|
41
|
+
"TaskArgument",
|
|
42
|
+
"TaskKey",
|
|
43
|
+
"TaskLogger",
|
|
41
44
|
"Timeout",
|
|
42
|
-
"
|
|
43
|
-
"__version__",
|
|
45
|
+
"Worker",
|
|
44
46
|
]
|
docket/annotations.py
CHANGED
|
@@ -4,8 +4,14 @@ from typing import Any, Iterable, Mapping, Self
|
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class Annotation(abc.ABC):
|
|
7
|
+
_cache: dict[tuple[type[Self], inspect.Signature], Mapping[str, Self]] = {}
|
|
8
|
+
|
|
7
9
|
@classmethod
|
|
8
10
|
def annotated_parameters(cls, signature: inspect.Signature) -> Mapping[str, Self]:
|
|
11
|
+
key = (cls, signature)
|
|
12
|
+
if key in cls._cache:
|
|
13
|
+
return cls._cache[key]
|
|
14
|
+
|
|
9
15
|
annotated: dict[str, Self] = {}
|
|
10
16
|
|
|
11
17
|
for param_name, param in signature.parameters.items():
|
|
@@ -23,6 +29,7 @@ class Annotation(abc.ABC):
|
|
|
23
29
|
elif isinstance(arg_type, type) and issubclass(arg_type, cls):
|
|
24
30
|
annotated[param_name] = arg_type()
|
|
25
31
|
|
|
32
|
+
cls._cache[key] = annotated
|
|
26
33
|
return annotated
|
|
27
34
|
|
|
28
35
|
|
docket/cli.py
CHANGED
|
@@ -245,6 +245,13 @@ def worker(
|
|
|
245
245
|
envvar="DOCKET_WORKER_SCHEDULING_RESOLUTION",
|
|
246
246
|
),
|
|
247
247
|
] = timedelta(milliseconds=250),
|
|
248
|
+
schedule_automatic_tasks: Annotated[
|
|
249
|
+
bool,
|
|
250
|
+
typer.Option(
|
|
251
|
+
"--schedule-automatic-tasks",
|
|
252
|
+
help="Schedule automatic tasks",
|
|
253
|
+
),
|
|
254
|
+
] = True,
|
|
248
255
|
until_finished: Annotated[
|
|
249
256
|
bool,
|
|
250
257
|
typer.Option(
|
|
@@ -270,6 +277,7 @@ def worker(
|
|
|
270
277
|
reconnection_delay=reconnection_delay,
|
|
271
278
|
minimum_check_interval=minimum_check_interval,
|
|
272
279
|
scheduling_resolution=scheduling_resolution,
|
|
280
|
+
schedule_automatic_tasks=schedule_automatic_tasks,
|
|
273
281
|
until_finished=until_finished,
|
|
274
282
|
metrics_port=metrics_port,
|
|
275
283
|
tasks=tasks,
|
docket/dependencies.py
CHANGED
|
@@ -79,6 +79,22 @@ def TaskKey() -> str:
|
|
|
79
79
|
return cast(str, _TaskKey())
|
|
80
80
|
|
|
81
81
|
|
|
82
|
+
class _TaskArgument(Dependency):
|
|
83
|
+
parameter: str | None
|
|
84
|
+
|
|
85
|
+
def __init__(self, parameter: str | None = None) -> None:
|
|
86
|
+
self.parameter = parameter
|
|
87
|
+
|
|
88
|
+
async def __aenter__(self) -> Any:
|
|
89
|
+
assert self.parameter is not None
|
|
90
|
+
execution = self.execution.get()
|
|
91
|
+
return execution.get_argument(self.parameter)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def TaskArgument(parameter: str | None = None) -> Any:
|
|
95
|
+
return cast(Any, _TaskArgument(parameter))
|
|
96
|
+
|
|
97
|
+
|
|
82
98
|
class _TaskLogger(Dependency):
|
|
83
99
|
async def __aenter__(self) -> logging.LoggerAdapter[logging.Logger]:
|
|
84
100
|
execution = self.execution.get()
|
|
@@ -275,6 +291,11 @@ class _Depends(Dependency, Generic[R]):
|
|
|
275
291
|
parameters = get_dependency_parameters(function)
|
|
276
292
|
|
|
277
293
|
for parameter, dependency in parameters.items():
|
|
294
|
+
# Special case for TaskArguments, they are "magical" and infer the parameter
|
|
295
|
+
# they refer to from the parameter name (unless otherwise specified)
|
|
296
|
+
if isinstance(dependency, _TaskArgument) and not dependency.parameter:
|
|
297
|
+
dependency.parameter = parameter
|
|
298
|
+
|
|
278
299
|
arguments[parameter] = await stack.enter_async_context(dependency)
|
|
279
300
|
|
|
280
301
|
return arguments
|
|
@@ -338,6 +359,12 @@ def validate_dependencies(function: TaskFunction) -> None:
|
|
|
338
359
|
)
|
|
339
360
|
|
|
340
361
|
|
|
362
|
+
class FailedDependency:
|
|
363
|
+
def __init__(self, parameter: str, error: Exception) -> None:
|
|
364
|
+
self.parameter = parameter
|
|
365
|
+
self.error = error
|
|
366
|
+
|
|
367
|
+
|
|
341
368
|
@asynccontextmanager
|
|
342
369
|
async def resolved_dependencies(
|
|
343
370
|
worker: "Worker", execution: Execution
|
|
@@ -361,6 +388,19 @@ async def resolved_dependencies(
|
|
|
361
388
|
arguments[parameter] = kwargs[parameter]
|
|
362
389
|
continue
|
|
363
390
|
|
|
364
|
-
|
|
391
|
+
# Special case for TaskArguments, they are "magical" and infer the parameter
|
|
392
|
+
# they refer to from the parameter name (unless otherwise specified). At
|
|
393
|
+
# the top-level task function call, it doesn't make sense to specify one
|
|
394
|
+
# _without_ a parameter name, so we'll call that a failed dependency.
|
|
395
|
+
if isinstance(dependency, _TaskArgument) and not dependency.parameter:
|
|
396
|
+
arguments[parameter] = FailedDependency(
|
|
397
|
+
parameter, ValueError("No parameter name specified")
|
|
398
|
+
)
|
|
399
|
+
continue
|
|
400
|
+
|
|
401
|
+
try:
|
|
402
|
+
arguments[parameter] = await stack.enter_async_context(dependency)
|
|
403
|
+
except Exception as error:
|
|
404
|
+
arguments[parameter] = FailedDependency(parameter, error)
|
|
365
405
|
|
|
366
406
|
yield arguments
|
docket/execution.py
CHANGED
|
@@ -7,7 +7,7 @@ from typing import Any, Awaitable, Callable, Hashable, Literal, Mapping, Self, c
|
|
|
7
7
|
|
|
8
8
|
import cloudpickle # type: ignore[import]
|
|
9
9
|
|
|
10
|
-
from opentelemetry import propagate
|
|
10
|
+
from opentelemetry import trace, propagate
|
|
11
11
|
import opentelemetry.context
|
|
12
12
|
|
|
13
13
|
from .annotations import Logged
|
|
@@ -83,13 +83,17 @@ class Execution:
|
|
|
83
83
|
"docket.attempt": self.attempt,
|
|
84
84
|
}
|
|
85
85
|
|
|
86
|
+
def get_argument(self, parameter: str) -> Any:
|
|
87
|
+
signature = get_signature(self.function)
|
|
88
|
+
bound_args = signature.bind(*self.args, **self.kwargs)
|
|
89
|
+
return bound_args.arguments[parameter]
|
|
90
|
+
|
|
86
91
|
def call_repr(self) -> str:
|
|
87
92
|
arguments: list[str] = []
|
|
88
|
-
signature = get_signature(self.function)
|
|
89
93
|
function_name = self.function.__name__
|
|
90
94
|
|
|
95
|
+
signature = get_signature(self.function)
|
|
91
96
|
logged_parameters = Logged.annotated_parameters(signature)
|
|
92
|
-
|
|
93
97
|
parameter_names = list(signature.parameters.keys())
|
|
94
98
|
|
|
95
99
|
for i, argument in enumerate(self.args[: len(parameter_names)]):
|
|
@@ -107,6 +111,11 @@ class Execution:
|
|
|
107
111
|
|
|
108
112
|
return f"{function_name}({', '.join(arguments)}){{{self.key}}}"
|
|
109
113
|
|
|
114
|
+
def incoming_span_links(self) -> list[trace.Link]:
|
|
115
|
+
initiating_span = trace.get_current_span(self.trace_context)
|
|
116
|
+
initiating_context = initiating_span.get_span_context()
|
|
117
|
+
return [trace.Link(initiating_context)] if initiating_context.is_valid else []
|
|
118
|
+
|
|
110
119
|
|
|
111
120
|
class Operator(enum.StrEnum):
|
|
112
121
|
EQUAL = "=="
|
docket/worker.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import logging
|
|
3
3
|
import sys
|
|
4
|
+
import time
|
|
4
5
|
from datetime import datetime, timedelta, timezone
|
|
5
6
|
from types import TracebackType
|
|
6
7
|
from typing import (
|
|
@@ -21,6 +22,7 @@ from docket.execution import get_signature
|
|
|
21
22
|
|
|
22
23
|
from .dependencies import (
|
|
23
24
|
Dependency,
|
|
25
|
+
FailedDependency,
|
|
24
26
|
Perpetual,
|
|
25
27
|
Retry,
|
|
26
28
|
Timeout,
|
|
@@ -70,6 +72,7 @@ class Worker:
|
|
|
70
72
|
reconnection_delay: timedelta
|
|
71
73
|
minimum_check_interval: timedelta
|
|
72
74
|
scheduling_resolution: timedelta
|
|
75
|
+
schedule_automatic_tasks: bool
|
|
73
76
|
|
|
74
77
|
def __init__(
|
|
75
78
|
self,
|
|
@@ -80,6 +83,7 @@ class Worker:
|
|
|
80
83
|
reconnection_delay: timedelta = timedelta(seconds=5),
|
|
81
84
|
minimum_check_interval: timedelta = timedelta(milliseconds=250),
|
|
82
85
|
scheduling_resolution: timedelta = timedelta(milliseconds=250),
|
|
86
|
+
schedule_automatic_tasks: bool = True,
|
|
83
87
|
) -> None:
|
|
84
88
|
self.docket = docket
|
|
85
89
|
self.name = name or f"worker:{uuid4()}"
|
|
@@ -88,6 +92,7 @@ class Worker:
|
|
|
88
92
|
self.reconnection_delay = reconnection_delay
|
|
89
93
|
self.minimum_check_interval = minimum_check_interval
|
|
90
94
|
self.scheduling_resolution = scheduling_resolution
|
|
95
|
+
self.schedule_automatic_tasks = schedule_automatic_tasks
|
|
91
96
|
|
|
92
97
|
async def __aenter__(self) -> Self:
|
|
93
98
|
self._heartbeat_task = asyncio.create_task(self._heartbeat())
|
|
@@ -133,6 +138,7 @@ class Worker:
|
|
|
133
138
|
reconnection_delay: timedelta = timedelta(seconds=5),
|
|
134
139
|
minimum_check_interval: timedelta = timedelta(milliseconds=100),
|
|
135
140
|
scheduling_resolution: timedelta = timedelta(milliseconds=250),
|
|
141
|
+
schedule_automatic_tasks: bool = True,
|
|
136
142
|
until_finished: bool = False,
|
|
137
143
|
metrics_port: int | None = None,
|
|
138
144
|
tasks: list[str] = ["docket.tasks:standard_tasks"],
|
|
@@ -150,6 +156,7 @@ class Worker:
|
|
|
150
156
|
reconnection_delay=reconnection_delay,
|
|
151
157
|
minimum_check_interval=minimum_check_interval,
|
|
152
158
|
scheduling_resolution=scheduling_resolution,
|
|
159
|
+
schedule_automatic_tasks=schedule_automatic_tasks,
|
|
153
160
|
) as worker:
|
|
154
161
|
if until_finished:
|
|
155
162
|
await worker.run_until_finished()
|
|
@@ -219,7 +226,8 @@ class Worker:
|
|
|
219
226
|
async def _worker_loop(self, redis: Redis, forever: bool = False):
|
|
220
227
|
worker_stopping = asyncio.Event()
|
|
221
228
|
|
|
222
|
-
|
|
229
|
+
if self.schedule_automatic_tasks:
|
|
230
|
+
await self._schedule_all_automatic_perpetual_tasks()
|
|
223
231
|
|
|
224
232
|
scheduler_task = asyncio.create_task(
|
|
225
233
|
self._scheduler_loop(redis, worker_stopping)
|
|
@@ -228,8 +236,10 @@ class Worker:
|
|
|
228
236
|
active_tasks: dict[asyncio.Task[None], RedisMessageID] = {}
|
|
229
237
|
available_slots = self.concurrency
|
|
230
238
|
|
|
239
|
+
log_context = self._log_context()
|
|
240
|
+
|
|
231
241
|
async def check_for_work() -> bool:
|
|
232
|
-
logger.debug("Checking for work", extra=
|
|
242
|
+
logger.debug("Checking for work", extra=log_context)
|
|
233
243
|
async with redis.pipeline() as pipeline:
|
|
234
244
|
pipeline.xlen(self.docket.stream_key)
|
|
235
245
|
pipeline.zcard(self.docket.queue_key)
|
|
@@ -239,7 +249,7 @@ class Worker:
|
|
|
239
249
|
return stream_len > 0 or queue_len > 0
|
|
240
250
|
|
|
241
251
|
async def get_redeliveries(redis: Redis) -> RedisReadGroupResponse:
|
|
242
|
-
logger.debug("Getting redeliveries", extra=
|
|
252
|
+
logger.debug("Getting redeliveries", extra=log_context)
|
|
243
253
|
_, redeliveries, *_ = await redis.xautoclaim(
|
|
244
254
|
name=self.docket.stream_key,
|
|
245
255
|
groupname=self.docket.worker_group_name,
|
|
@@ -251,7 +261,7 @@ class Worker:
|
|
|
251
261
|
return [(b"__redelivery__", redeliveries)]
|
|
252
262
|
|
|
253
263
|
async def get_new_deliveries(redis: Redis) -> RedisReadGroupResponse:
|
|
254
|
-
logger.debug("Getting new deliveries", extra=
|
|
264
|
+
logger.debug("Getting new deliveries", extra=log_context)
|
|
255
265
|
return await redis.xreadgroup(
|
|
256
266
|
groupname=self.docket.worker_group_name,
|
|
257
267
|
consumername=self.name,
|
|
@@ -261,21 +271,18 @@ class Worker:
|
|
|
261
271
|
)
|
|
262
272
|
|
|
263
273
|
def start_task(message_id: RedisMessageID, message: RedisMessage) -> bool:
|
|
264
|
-
if not message: # pragma: no cover
|
|
265
|
-
return False
|
|
266
|
-
|
|
267
274
|
function_name = message[b"function"].decode()
|
|
268
275
|
if not (function := self.docket.tasks.get(function_name)):
|
|
269
276
|
logger.warning(
|
|
270
277
|
"Task function %r not found",
|
|
271
278
|
function_name,
|
|
272
|
-
extra=
|
|
279
|
+
extra=log_context,
|
|
273
280
|
)
|
|
274
281
|
return False
|
|
275
282
|
|
|
276
283
|
execution = Execution.from_message(function, message)
|
|
277
284
|
|
|
278
|
-
task = asyncio.create_task(self._execute(execution))
|
|
285
|
+
task = asyncio.create_task(self._execute(execution), name=execution.key)
|
|
279
286
|
active_tasks[task] = message_id
|
|
280
287
|
|
|
281
288
|
nonlocal available_slots
|
|
@@ -283,8 +290,15 @@ class Worker:
|
|
|
283
290
|
|
|
284
291
|
return True
|
|
285
292
|
|
|
293
|
+
async def process_completed_tasks() -> None:
|
|
294
|
+
completed_tasks = {task for task in active_tasks if task.done()}
|
|
295
|
+
for task in completed_tasks:
|
|
296
|
+
message_id = active_tasks.pop(task)
|
|
297
|
+
await task
|
|
298
|
+
await ack_message(redis, message_id)
|
|
299
|
+
|
|
286
300
|
async def ack_message(redis: Redis, message_id: RedisMessageID) -> None:
|
|
287
|
-
logger.debug("Acknowledging message", extra=
|
|
301
|
+
logger.debug("Acknowledging message", extra=log_context)
|
|
288
302
|
async with redis.pipeline() as pipeline:
|
|
289
303
|
pipeline.xack(
|
|
290
304
|
self.docket.stream_key,
|
|
@@ -297,13 +311,6 @@ class Worker:
|
|
|
297
311
|
)
|
|
298
312
|
await pipeline.execute()
|
|
299
313
|
|
|
300
|
-
async def process_completed_tasks() -> None:
|
|
301
|
-
completed_tasks = {task for task in active_tasks if task.done()}
|
|
302
|
-
for task in completed_tasks:
|
|
303
|
-
message_id = active_tasks.pop(task)
|
|
304
|
-
await task
|
|
305
|
-
await ack_message(redis, message_id)
|
|
306
|
-
|
|
307
314
|
has_work: bool = True
|
|
308
315
|
|
|
309
316
|
try:
|
|
@@ -319,6 +326,9 @@ class Worker:
|
|
|
319
326
|
for source in [get_redeliveries, get_new_deliveries]:
|
|
320
327
|
for _, messages in await source(redis):
|
|
321
328
|
for message_id, message in messages:
|
|
329
|
+
if not message: # pragma: no cover
|
|
330
|
+
continue
|
|
331
|
+
|
|
322
332
|
if not start_task(message_id, message):
|
|
323
333
|
await self._delete_known_task(redis, message)
|
|
324
334
|
await ack_message(redis, message_id)
|
|
@@ -334,7 +344,7 @@ class Worker:
|
|
|
334
344
|
logger.info(
|
|
335
345
|
"Shutdown requested, finishing %d active tasks...",
|
|
336
346
|
len(active_tasks),
|
|
337
|
-
extra=
|
|
347
|
+
extra=log_context,
|
|
338
348
|
)
|
|
339
349
|
finally:
|
|
340
350
|
if active_tasks:
|
|
@@ -401,9 +411,11 @@ class Worker:
|
|
|
401
411
|
|
|
402
412
|
total_work: int = sys.maxsize
|
|
403
413
|
|
|
414
|
+
log_context = self._log_context()
|
|
415
|
+
|
|
404
416
|
while not worker_stopping.is_set() or total_work:
|
|
405
417
|
try:
|
|
406
|
-
logger.debug("Scheduling due tasks", extra=
|
|
418
|
+
logger.debug("Scheduling due tasks", extra=log_context)
|
|
407
419
|
total_work, due_work = await stream_due_tasks(
|
|
408
420
|
keys=[self.docket.queue_key, self.docket.stream_key],
|
|
409
421
|
args=[datetime.now(timezone.utc).timestamp(), self.docket.name],
|
|
@@ -416,18 +428,18 @@ class Worker:
|
|
|
416
428
|
total_work,
|
|
417
429
|
self.docket.queue_key,
|
|
418
430
|
self.docket.stream_key,
|
|
419
|
-
extra=
|
|
431
|
+
extra=log_context,
|
|
420
432
|
)
|
|
421
433
|
except Exception: # pragma: no cover
|
|
422
434
|
logger.exception(
|
|
423
435
|
"Error in scheduler loop",
|
|
424
436
|
exc_info=True,
|
|
425
|
-
extra=
|
|
437
|
+
extra=log_context,
|
|
426
438
|
)
|
|
427
439
|
finally:
|
|
428
440
|
await asyncio.sleep(self.scheduling_resolution.total_seconds())
|
|
429
441
|
|
|
430
|
-
logger.debug("Scheduler loop finished", extra=
|
|
442
|
+
logger.debug("Scheduler loop finished", extra=log_context)
|
|
431
443
|
|
|
432
444
|
async def _schedule_all_automatic_perpetual_tasks(self) -> None:
|
|
433
445
|
async with self.docket.redis() as redis:
|
|
@@ -469,38 +481,30 @@ class Worker:
|
|
|
469
481
|
log_context = {**self._log_context(), **execution.specific_labels()}
|
|
470
482
|
counter_labels = {**self.labels(), **execution.general_labels()}
|
|
471
483
|
|
|
472
|
-
arrow = "↬" if execution.attempt > 1 else "↪"
|
|
473
484
|
call = execution.call_repr()
|
|
474
485
|
|
|
475
486
|
if self.docket.strike_list.is_stricken(execution):
|
|
476
487
|
async with self.docket.redis() as redis:
|
|
477
488
|
await self._delete_known_task(redis, execution)
|
|
478
489
|
|
|
479
|
-
|
|
480
|
-
logger.warning("%s %s", arrow, call, extra=log_context)
|
|
490
|
+
logger.warning("🗙 %s", call, extra=log_context)
|
|
481
491
|
TASKS_STRICKEN.add(1, counter_labels | {"docket.where": "worker"})
|
|
482
492
|
return
|
|
483
493
|
|
|
484
494
|
if execution.key in self._execution_counts:
|
|
485
495
|
self._execution_counts[execution.key] += 1
|
|
486
496
|
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
start = datetime.now(timezone.utc)
|
|
492
|
-
punctuality = start - execution.when
|
|
493
|
-
log_context = {
|
|
494
|
-
**log_context,
|
|
495
|
-
"punctuality": punctuality.total_seconds(),
|
|
496
|
-
}
|
|
497
|
-
duration = timedelta(0)
|
|
497
|
+
start = time.time()
|
|
498
|
+
punctuality = start - execution.when.timestamp()
|
|
499
|
+
log_context = {**log_context, "punctuality": punctuality}
|
|
500
|
+
duration = 0.0
|
|
498
501
|
|
|
499
502
|
TASKS_STARTED.add(1, counter_labels)
|
|
500
503
|
TASKS_RUNNING.add(1, counter_labels)
|
|
501
|
-
TASK_PUNCTUALITY.record(punctuality
|
|
504
|
+
TASK_PUNCTUALITY.record(punctuality, counter_labels)
|
|
502
505
|
|
|
503
|
-
|
|
506
|
+
arrow = "↬" if execution.attempt > 1 else "↪"
|
|
507
|
+
logger.info("%s [%s] %s", arrow, ms(punctuality), call, extra=log_context)
|
|
504
508
|
|
|
505
509
|
with tracer.start_as_current_span(
|
|
506
510
|
execution.function.__name__,
|
|
@@ -510,7 +514,7 @@ class Worker:
|
|
|
510
514
|
**execution.specific_labels(),
|
|
511
515
|
"code.function.name": execution.function.__name__,
|
|
512
516
|
},
|
|
513
|
-
links=
|
|
517
|
+
links=execution.incoming_span_links(),
|
|
514
518
|
):
|
|
515
519
|
async with resolved_dependencies(self, execution) as dependencies:
|
|
516
520
|
# Preemptively reschedule the perpetual task for the future, or clear
|
|
@@ -523,6 +527,23 @@ class Worker:
|
|
|
523
527
|
await self._delete_known_task(redis, execution)
|
|
524
528
|
|
|
525
529
|
try:
|
|
530
|
+
dependency_failures = {
|
|
531
|
+
k: v
|
|
532
|
+
for k, v in dependencies.items()
|
|
533
|
+
if isinstance(v, FailedDependency)
|
|
534
|
+
}
|
|
535
|
+
if dependency_failures:
|
|
536
|
+
raise ExceptionGroup(
|
|
537
|
+
(
|
|
538
|
+
"Failed to resolve dependencies for parameter(s): "
|
|
539
|
+
+ ", ".join(dependency_failures.keys())
|
|
540
|
+
),
|
|
541
|
+
[
|
|
542
|
+
dependency.error
|
|
543
|
+
for dependency in dependency_failures.values()
|
|
544
|
+
],
|
|
545
|
+
)
|
|
546
|
+
|
|
526
547
|
if timeout := get_single_dependency_of_type(dependencies, Timeout):
|
|
527
548
|
await self._run_function_with_timeout(
|
|
528
549
|
execution, dependencies, timeout
|
|
@@ -536,31 +557,35 @@ class Worker:
|
|
|
536
557
|
},
|
|
537
558
|
)
|
|
538
559
|
|
|
560
|
+
duration = log_context["duration"] = time.time() - start
|
|
539
561
|
TASKS_SUCCEEDED.add(1, counter_labels)
|
|
540
|
-
|
|
541
|
-
log_context["duration"] = duration.total_seconds()
|
|
562
|
+
|
|
542
563
|
rescheduled = await self._perpetuate_if_requested(
|
|
543
|
-
execution, dependencies, duration
|
|
564
|
+
execution, dependencies, timedelta(seconds=duration)
|
|
544
565
|
)
|
|
566
|
+
|
|
545
567
|
arrow = "↫" if rescheduled else "↩"
|
|
546
|
-
logger.info(
|
|
568
|
+
logger.info(
|
|
569
|
+
"%s [%s] %s", arrow, ms(duration), call, extra=log_context
|
|
570
|
+
)
|
|
547
571
|
except Exception:
|
|
572
|
+
duration = log_context["duration"] = time.time() - start
|
|
548
573
|
TASKS_FAILED.add(1, counter_labels)
|
|
549
|
-
|
|
550
|
-
log_context["duration"] = duration.total_seconds()
|
|
574
|
+
|
|
551
575
|
retried = await self._retry_if_requested(execution, dependencies)
|
|
552
576
|
if not retried:
|
|
553
577
|
retried = await self._perpetuate_if_requested(
|
|
554
|
-
execution, dependencies, duration
|
|
578
|
+
execution, dependencies, timedelta(seconds=duration)
|
|
555
579
|
)
|
|
580
|
+
|
|
556
581
|
arrow = "↫" if retried else "↩"
|
|
557
582
|
logger.exception(
|
|
558
|
-
"%s [%s] %s", arrow, duration, call, extra=log_context
|
|
583
|
+
"%s [%s] %s", arrow, ms(duration), call, extra=log_context
|
|
559
584
|
)
|
|
560
585
|
finally:
|
|
561
586
|
TASKS_RUNNING.add(-1, counter_labels)
|
|
562
587
|
TASKS_COMPLETED.add(1, counter_labels)
|
|
563
|
-
TASK_DURATION.record(duration
|
|
588
|
+
TASK_DURATION.record(duration, counter_labels)
|
|
564
589
|
|
|
565
590
|
async def _run_function_with_timeout(
|
|
566
591
|
self,
|
|
@@ -603,15 +628,15 @@ class Worker:
|
|
|
603
628
|
if not retry:
|
|
604
629
|
return False
|
|
605
630
|
|
|
606
|
-
if retry.attempts is None
|
|
607
|
-
|
|
608
|
-
execution.attempt += 1
|
|
609
|
-
await self.docket.schedule(execution)
|
|
631
|
+
if retry.attempts is not None and execution.attempt >= retry.attempts:
|
|
632
|
+
return False
|
|
610
633
|
|
|
611
|
-
|
|
612
|
-
|
|
634
|
+
execution.when = datetime.now(timezone.utc) + retry.delay
|
|
635
|
+
execution.attempt += 1
|
|
636
|
+
await self.docket.schedule(execution)
|
|
613
637
|
|
|
614
|
-
|
|
638
|
+
TASKS_RETRIED.add(1, {**self.labels(), **execution.specific_labels()})
|
|
639
|
+
return True
|
|
615
640
|
|
|
616
641
|
async def _perpetuate_if_requested(
|
|
617
642
|
self,
|
|
@@ -710,3 +735,10 @@ class Worker:
|
|
|
710
735
|
exc_info=True,
|
|
711
736
|
extra=self._log_context(),
|
|
712
737
|
)
|
|
738
|
+
|
|
739
|
+
|
|
740
|
+
def ms(seconds: float) -> str:
|
|
741
|
+
if seconds < 100:
|
|
742
|
+
return f"{seconds * 1000:6.0f}ms"
|
|
743
|
+
else:
|
|
744
|
+
return f"{seconds:6.0f}s "
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.6.
|
|
3
|
+
Version: 0.6.2
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
docket/__init__.py,sha256=sY1T_NVsXQNOmOhOnfYmZ95dcE_52Ov6DSIVIMZp-1w,869
|
|
2
|
+
docket/__main__.py,sha256=Vkuh7aJ-Bl7QVpVbbkUksAd_hn05FiLmWbc-8kbhZQ4,34
|
|
3
|
+
docket/annotations.py,sha256=6sCgQxsgOjBN6ithFdXulXq4CPNSdyFocwyJ1gK9v2Q,1688
|
|
4
|
+
docket/cli.py,sha256=znHN7eqaD_PFpSFn7iXa_uZlKzVWDrKkrmOd1CNuZRk,20561
|
|
5
|
+
docket/dependencies.py,sha256=ykuJpL_MZMHUPX6ORys1YMHLCjS2Rd8vrQhYu8od-Ro,11682
|
|
6
|
+
docket/docket.py,sha256=KJxgiyOskEHsRQOmfgLpJCYDNNleHI-vEKK3uBPL_K8,21420
|
|
7
|
+
docket/execution.py,sha256=MXrLYjvhPzwqjjQx8CoDCbLqSyT_GI7kqGJtfKiemkY,13790
|
|
8
|
+
docket/instrumentation.py,sha256=bZlGA02JoJcY0J1WGm5_qXDfY0AXKr0ZLAYu67wkeKY,4611
|
|
9
|
+
docket/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
+
docket/tasks.py,sha256=RIlSM2omh-YDwVnCz6M5MtmK8T_m_s1w2OlRRxDUs6A,1437
|
|
11
|
+
docket/worker.py,sha256=DzqwMWdMuieVNt6J4_99zER7dGoVjVBPS4NlmQJXNdc,27347
|
|
12
|
+
pydocket-0.6.2.dist-info/METADATA,sha256=eFFi2KLLfn9-4fv1tWRSlT4o83R0B0n3NUleZttA86s,13092
|
|
13
|
+
pydocket-0.6.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
+
pydocket-0.6.2.dist-info/entry_points.txt,sha256=4WOk1nUlBsUT5O3RyMci2ImuC5XFswuopElYcLHtD5k,47
|
|
15
|
+
pydocket-0.6.2.dist-info/licenses/LICENSE,sha256=YuVWU_ZXO0K_k2FG8xWKe5RGxV24AhJKTvQmKfqXuyk,1087
|
|
16
|
+
pydocket-0.6.2.dist-info/RECORD,,
|
pydocket-0.6.0.dist-info/RECORD
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
docket/__init__.py,sha256=124XWbyQQHO1lhCoLQ-oheZnu4vNDHIaq4Whb7z3ogI,831
|
|
2
|
-
docket/__main__.py,sha256=Vkuh7aJ-Bl7QVpVbbkUksAd_hn05FiLmWbc-8kbhZQ4,34
|
|
3
|
-
docket/annotations.py,sha256=I00zB32BYWOQSNEjjCkc5n5DwTnT277I_BRYUJPS7w4,1474
|
|
4
|
-
docket/cli.py,sha256=OWql6QFthSbvRCGkIg-ufo26F48z0eCmzRXJYOdyAEc,20309
|
|
5
|
-
docket/dependencies.py,sha256=pkjseBZjdSpgW9g2H4cZ_RXIRZ2ZfdngBCXJGUcbmao,10052
|
|
6
|
-
docket/docket.py,sha256=KJxgiyOskEHsRQOmfgLpJCYDNNleHI-vEKK3uBPL_K8,21420
|
|
7
|
-
docket/execution.py,sha256=da1uYxSNAfz5FuNyCzX4I_PglHiMaf1oEv--K5TkjXc,13297
|
|
8
|
-
docket/instrumentation.py,sha256=bZlGA02JoJcY0J1WGm5_qXDfY0AXKr0ZLAYu67wkeKY,4611
|
|
9
|
-
docket/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
-
docket/tasks.py,sha256=RIlSM2omh-YDwVnCz6M5MtmK8T_m_s1w2OlRRxDUs6A,1437
|
|
11
|
-
docket/worker.py,sha256=3sMcwGfSJ0Q4y5AuaqdgiGniDhJ21nM2PQmroJi_Q-A,26430
|
|
12
|
-
pydocket-0.6.0.dist-info/METADATA,sha256=ktk1hqLmP_VSqYmdRtHFDPbEeRQD1J66ZAHEqaDXejk,13092
|
|
13
|
-
pydocket-0.6.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
-
pydocket-0.6.0.dist-info/entry_points.txt,sha256=4WOk1nUlBsUT5O3RyMci2ImuC5XFswuopElYcLHtD5k,47
|
|
15
|
-
pydocket-0.6.0.dist-info/licenses/LICENSE,sha256=YuVWU_ZXO0K_k2FG8xWKe5RGxV24AhJKTvQmKfqXuyk,1087
|
|
16
|
-
pydocket-0.6.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|