pydocket 0.6.0__tar.gz → 0.6.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydocket might be problematic. Click here for more details.

Files changed (54) hide show
  1. {pydocket-0.6.0 → pydocket-0.6.2}/PKG-INFO +1 -1
  2. {pydocket-0.6.0 → pydocket-0.6.2}/examples/find_and_flood.py +1 -1
  3. {pydocket-0.6.0 → pydocket-0.6.2}/pyproject.toml +3 -0
  4. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/__init__.py +11 -9
  5. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/annotations.py +7 -0
  6. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/cli.py +8 -0
  7. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/dependencies.py +41 -1
  8. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/execution.py +12 -3
  9. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/worker.py +87 -55
  10. {pydocket-0.6.0 → pydocket-0.6.2}/tests/test_dependencies.py +48 -1
  11. {pydocket-0.6.0 → pydocket-0.6.2}/tests/test_fundamentals.py +64 -0
  12. {pydocket-0.6.0 → pydocket-0.6.2}/tests/test_worker.py +38 -0
  13. {pydocket-0.6.0 → pydocket-0.6.2}/.cursor/rules/general.mdc +0 -0
  14. {pydocket-0.6.0 → pydocket-0.6.2}/.cursor/rules/python-style.mdc +0 -0
  15. {pydocket-0.6.0 → pydocket-0.6.2}/.github/codecov.yml +0 -0
  16. {pydocket-0.6.0 → pydocket-0.6.2}/.github/workflows/chaos.yml +0 -0
  17. {pydocket-0.6.0 → pydocket-0.6.2}/.github/workflows/ci.yml +0 -0
  18. {pydocket-0.6.0 → pydocket-0.6.2}/.github/workflows/publish.yml +0 -0
  19. {pydocket-0.6.0 → pydocket-0.6.2}/.gitignore +0 -0
  20. {pydocket-0.6.0 → pydocket-0.6.2}/.pre-commit-config.yaml +0 -0
  21. {pydocket-0.6.0 → pydocket-0.6.2}/LICENSE +0 -0
  22. {pydocket-0.6.0 → pydocket-0.6.2}/README.md +0 -0
  23. {pydocket-0.6.0 → pydocket-0.6.2}/chaos/README.md +0 -0
  24. {pydocket-0.6.0 → pydocket-0.6.2}/chaos/__init__.py +0 -0
  25. {pydocket-0.6.0 → pydocket-0.6.2}/chaos/driver.py +0 -0
  26. {pydocket-0.6.0 → pydocket-0.6.2}/chaos/producer.py +0 -0
  27. {pydocket-0.6.0 → pydocket-0.6.2}/chaos/run +0 -0
  28. {pydocket-0.6.0 → pydocket-0.6.2}/chaos/tasks.py +0 -0
  29. {pydocket-0.6.0 → pydocket-0.6.2}/examples/__init__.py +0 -0
  30. {pydocket-0.6.0 → pydocket-0.6.2}/examples/common.py +0 -0
  31. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/__main__.py +0 -0
  32. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/docket.py +0 -0
  33. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/instrumentation.py +0 -0
  34. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/py.typed +0 -0
  35. {pydocket-0.6.0 → pydocket-0.6.2}/src/docket/tasks.py +0 -0
  36. {pydocket-0.6.0 → pydocket-0.6.2}/telemetry/.gitignore +0 -0
  37. {pydocket-0.6.0 → pydocket-0.6.2}/telemetry/start +0 -0
  38. {pydocket-0.6.0 → pydocket-0.6.2}/telemetry/stop +0 -0
  39. {pydocket-0.6.0 → pydocket-0.6.2}/tests/__init__.py +0 -0
  40. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/__init__.py +0 -0
  41. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/conftest.py +0 -0
  42. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/test_module.py +0 -0
  43. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/test_parsing.py +0 -0
  44. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/test_snapshot.py +0 -0
  45. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/test_striking.py +0 -0
  46. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/test_tasks.py +0 -0
  47. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/test_version.py +0 -0
  48. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/test_worker.py +0 -0
  49. {pydocket-0.6.0 → pydocket-0.6.2}/tests/cli/test_workers.py +0 -0
  50. {pydocket-0.6.0 → pydocket-0.6.2}/tests/conftest.py +0 -0
  51. {pydocket-0.6.0 → pydocket-0.6.2}/tests/test_docket.py +0 -0
  52. {pydocket-0.6.0 → pydocket-0.6.2}/tests/test_instrumentation.py +0 -0
  53. {pydocket-0.6.0 → pydocket-0.6.2}/tests/test_striking.py +0 -0
  54. {pydocket-0.6.0 → pydocket-0.6.2}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydocket
3
- Version: 0.6.0
3
+ Version: 0.6.2
4
4
  Summary: A distributed background task system for Python functions
5
5
  Project-URL: Homepage, https://github.com/chrisguidry/docket
6
6
  Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
@@ -16,7 +16,7 @@ async def find(
16
16
  perpetual: Perpetual = Perpetual(every=timedelta(seconds=3), automatic=True),
17
17
  ) -> None:
18
18
  for i in range(1, 10 + 1):
19
- await docket.add(flood, key=str(i))(i)
19
+ await docket.add(flood, key=f"item-{i}")(i)
20
20
 
21
21
 
22
22
  async def flood(
@@ -64,6 +64,9 @@ source = "vcs"
64
64
  [tool.hatch.build.targets.wheel]
65
65
  packages = ["src/docket"]
66
66
 
67
+ [tool.ruff]
68
+ target-version = "py312"
69
+
67
70
  [tool.pytest.ini_options]
68
71
  addopts = [
69
72
  "--numprocesses=logical",
@@ -17,6 +17,7 @@ from .dependencies import (
17
17
  ExponentialRetry,
18
18
  Perpetual,
19
19
  Retry,
20
+ TaskArgument,
20
21
  TaskKey,
21
22
  TaskLogger,
22
23
  Timeout,
@@ -26,19 +27,20 @@ from .execution import Execution
26
27
  from .worker import Worker
27
28
 
28
29
  __all__ = [
29
- "Docket",
30
- "Worker",
31
- "Execution",
30
+ "__version__",
32
31
  "CurrentDocket",
33
- "CurrentWorker",
34
32
  "CurrentExecution",
35
- "TaskKey",
36
- "TaskLogger",
37
- "Retry",
33
+ "CurrentWorker",
34
+ "Depends",
35
+ "Docket",
36
+ "Execution",
38
37
  "ExponentialRetry",
39
38
  "Logged",
40
39
  "Perpetual",
40
+ "Retry",
41
+ "TaskArgument",
42
+ "TaskKey",
43
+ "TaskLogger",
41
44
  "Timeout",
42
- "Depends",
43
- "__version__",
45
+ "Worker",
44
46
  ]
@@ -4,8 +4,14 @@ from typing import Any, Iterable, Mapping, Self
4
4
 
5
5
 
6
6
  class Annotation(abc.ABC):
7
+ _cache: dict[tuple[type[Self], inspect.Signature], Mapping[str, Self]] = {}
8
+
7
9
  @classmethod
8
10
  def annotated_parameters(cls, signature: inspect.Signature) -> Mapping[str, Self]:
11
+ key = (cls, signature)
12
+ if key in cls._cache:
13
+ return cls._cache[key]
14
+
9
15
  annotated: dict[str, Self] = {}
10
16
 
11
17
  for param_name, param in signature.parameters.items():
@@ -23,6 +29,7 @@ class Annotation(abc.ABC):
23
29
  elif isinstance(arg_type, type) and issubclass(arg_type, cls):
24
30
  annotated[param_name] = arg_type()
25
31
 
32
+ cls._cache[key] = annotated
26
33
  return annotated
27
34
 
28
35
 
@@ -245,6 +245,13 @@ def worker(
245
245
  envvar="DOCKET_WORKER_SCHEDULING_RESOLUTION",
246
246
  ),
247
247
  ] = timedelta(milliseconds=250),
248
+ schedule_automatic_tasks: Annotated[
249
+ bool,
250
+ typer.Option(
251
+ "--schedule-automatic-tasks",
252
+ help="Schedule automatic tasks",
253
+ ),
254
+ ] = True,
248
255
  until_finished: Annotated[
249
256
  bool,
250
257
  typer.Option(
@@ -270,6 +277,7 @@ def worker(
270
277
  reconnection_delay=reconnection_delay,
271
278
  minimum_check_interval=minimum_check_interval,
272
279
  scheduling_resolution=scheduling_resolution,
280
+ schedule_automatic_tasks=schedule_automatic_tasks,
273
281
  until_finished=until_finished,
274
282
  metrics_port=metrics_port,
275
283
  tasks=tasks,
@@ -79,6 +79,22 @@ def TaskKey() -> str:
79
79
  return cast(str, _TaskKey())
80
80
 
81
81
 
82
+ class _TaskArgument(Dependency):
83
+ parameter: str | None
84
+
85
+ def __init__(self, parameter: str | None = None) -> None:
86
+ self.parameter = parameter
87
+
88
+ async def __aenter__(self) -> Any:
89
+ assert self.parameter is not None
90
+ execution = self.execution.get()
91
+ return execution.get_argument(self.parameter)
92
+
93
+
94
+ def TaskArgument(parameter: str | None = None) -> Any:
95
+ return cast(Any, _TaskArgument(parameter))
96
+
97
+
82
98
  class _TaskLogger(Dependency):
83
99
  async def __aenter__(self) -> logging.LoggerAdapter[logging.Logger]:
84
100
  execution = self.execution.get()
@@ -275,6 +291,11 @@ class _Depends(Dependency, Generic[R]):
275
291
  parameters = get_dependency_parameters(function)
276
292
 
277
293
  for parameter, dependency in parameters.items():
294
+ # Special case for TaskArguments, they are "magical" and infer the parameter
295
+ # they refer to from the parameter name (unless otherwise specified)
296
+ if isinstance(dependency, _TaskArgument) and not dependency.parameter:
297
+ dependency.parameter = parameter
298
+
278
299
  arguments[parameter] = await stack.enter_async_context(dependency)
279
300
 
280
301
  return arguments
@@ -338,6 +359,12 @@ def validate_dependencies(function: TaskFunction) -> None:
338
359
  )
339
360
 
340
361
 
362
+ class FailedDependency:
363
+ def __init__(self, parameter: str, error: Exception) -> None:
364
+ self.parameter = parameter
365
+ self.error = error
366
+
367
+
341
368
  @asynccontextmanager
342
369
  async def resolved_dependencies(
343
370
  worker: "Worker", execution: Execution
@@ -361,6 +388,19 @@ async def resolved_dependencies(
361
388
  arguments[parameter] = kwargs[parameter]
362
389
  continue
363
390
 
364
- arguments[parameter] = await stack.enter_async_context(dependency)
391
+ # Special case for TaskArguments, they are "magical" and infer the parameter
392
+ # they refer to from the parameter name (unless otherwise specified). At
393
+ # the top-level task function call, it doesn't make sense to specify one
394
+ # _without_ a parameter name, so we'll call that a failed dependency.
395
+ if isinstance(dependency, _TaskArgument) and not dependency.parameter:
396
+ arguments[parameter] = FailedDependency(
397
+ parameter, ValueError("No parameter name specified")
398
+ )
399
+ continue
400
+
401
+ try:
402
+ arguments[parameter] = await stack.enter_async_context(dependency)
403
+ except Exception as error:
404
+ arguments[parameter] = FailedDependency(parameter, error)
365
405
 
366
406
  yield arguments
@@ -7,7 +7,7 @@ from typing import Any, Awaitable, Callable, Hashable, Literal, Mapping, Self, c
7
7
 
8
8
  import cloudpickle # type: ignore[import]
9
9
 
10
- from opentelemetry import propagate
10
+ from opentelemetry import trace, propagate
11
11
  import opentelemetry.context
12
12
 
13
13
  from .annotations import Logged
@@ -83,13 +83,17 @@ class Execution:
83
83
  "docket.attempt": self.attempt,
84
84
  }
85
85
 
86
+ def get_argument(self, parameter: str) -> Any:
87
+ signature = get_signature(self.function)
88
+ bound_args = signature.bind(*self.args, **self.kwargs)
89
+ return bound_args.arguments[parameter]
90
+
86
91
  def call_repr(self) -> str:
87
92
  arguments: list[str] = []
88
- signature = get_signature(self.function)
89
93
  function_name = self.function.__name__
90
94
 
95
+ signature = get_signature(self.function)
91
96
  logged_parameters = Logged.annotated_parameters(signature)
92
-
93
97
  parameter_names = list(signature.parameters.keys())
94
98
 
95
99
  for i, argument in enumerate(self.args[: len(parameter_names)]):
@@ -107,6 +111,11 @@ class Execution:
107
111
 
108
112
  return f"{function_name}({', '.join(arguments)}){{{self.key}}}"
109
113
 
114
+ def incoming_span_links(self) -> list[trace.Link]:
115
+ initiating_span = trace.get_current_span(self.trace_context)
116
+ initiating_context = initiating_span.get_span_context()
117
+ return [trace.Link(initiating_context)] if initiating_context.is_valid else []
118
+
110
119
 
111
120
  class Operator(enum.StrEnum):
112
121
  EQUAL = "=="
@@ -1,6 +1,7 @@
1
1
  import asyncio
2
2
  import logging
3
3
  import sys
4
+ import time
4
5
  from datetime import datetime, timedelta, timezone
5
6
  from types import TracebackType
6
7
  from typing import (
@@ -21,6 +22,7 @@ from docket.execution import get_signature
21
22
 
22
23
  from .dependencies import (
23
24
  Dependency,
25
+ FailedDependency,
24
26
  Perpetual,
25
27
  Retry,
26
28
  Timeout,
@@ -70,6 +72,7 @@ class Worker:
70
72
  reconnection_delay: timedelta
71
73
  minimum_check_interval: timedelta
72
74
  scheduling_resolution: timedelta
75
+ schedule_automatic_tasks: bool
73
76
 
74
77
  def __init__(
75
78
  self,
@@ -80,6 +83,7 @@ class Worker:
80
83
  reconnection_delay: timedelta = timedelta(seconds=5),
81
84
  minimum_check_interval: timedelta = timedelta(milliseconds=250),
82
85
  scheduling_resolution: timedelta = timedelta(milliseconds=250),
86
+ schedule_automatic_tasks: bool = True,
83
87
  ) -> None:
84
88
  self.docket = docket
85
89
  self.name = name or f"worker:{uuid4()}"
@@ -88,6 +92,7 @@ class Worker:
88
92
  self.reconnection_delay = reconnection_delay
89
93
  self.minimum_check_interval = minimum_check_interval
90
94
  self.scheduling_resolution = scheduling_resolution
95
+ self.schedule_automatic_tasks = schedule_automatic_tasks
91
96
 
92
97
  async def __aenter__(self) -> Self:
93
98
  self._heartbeat_task = asyncio.create_task(self._heartbeat())
@@ -133,6 +138,7 @@ class Worker:
133
138
  reconnection_delay: timedelta = timedelta(seconds=5),
134
139
  minimum_check_interval: timedelta = timedelta(milliseconds=100),
135
140
  scheduling_resolution: timedelta = timedelta(milliseconds=250),
141
+ schedule_automatic_tasks: bool = True,
136
142
  until_finished: bool = False,
137
143
  metrics_port: int | None = None,
138
144
  tasks: list[str] = ["docket.tasks:standard_tasks"],
@@ -150,6 +156,7 @@ class Worker:
150
156
  reconnection_delay=reconnection_delay,
151
157
  minimum_check_interval=minimum_check_interval,
152
158
  scheduling_resolution=scheduling_resolution,
159
+ schedule_automatic_tasks=schedule_automatic_tasks,
153
160
  ) as worker:
154
161
  if until_finished:
155
162
  await worker.run_until_finished()
@@ -219,7 +226,8 @@ class Worker:
219
226
  async def _worker_loop(self, redis: Redis, forever: bool = False):
220
227
  worker_stopping = asyncio.Event()
221
228
 
222
- await self._schedule_all_automatic_perpetual_tasks()
229
+ if self.schedule_automatic_tasks:
230
+ await self._schedule_all_automatic_perpetual_tasks()
223
231
 
224
232
  scheduler_task = asyncio.create_task(
225
233
  self._scheduler_loop(redis, worker_stopping)
@@ -228,8 +236,10 @@ class Worker:
228
236
  active_tasks: dict[asyncio.Task[None], RedisMessageID] = {}
229
237
  available_slots = self.concurrency
230
238
 
239
+ log_context = self._log_context()
240
+
231
241
  async def check_for_work() -> bool:
232
- logger.debug("Checking for work", extra=self._log_context())
242
+ logger.debug("Checking for work", extra=log_context)
233
243
  async with redis.pipeline() as pipeline:
234
244
  pipeline.xlen(self.docket.stream_key)
235
245
  pipeline.zcard(self.docket.queue_key)
@@ -239,7 +249,7 @@ class Worker:
239
249
  return stream_len > 0 or queue_len > 0
240
250
 
241
251
  async def get_redeliveries(redis: Redis) -> RedisReadGroupResponse:
242
- logger.debug("Getting redeliveries", extra=self._log_context())
252
+ logger.debug("Getting redeliveries", extra=log_context)
243
253
  _, redeliveries, *_ = await redis.xautoclaim(
244
254
  name=self.docket.stream_key,
245
255
  groupname=self.docket.worker_group_name,
@@ -251,7 +261,7 @@ class Worker:
251
261
  return [(b"__redelivery__", redeliveries)]
252
262
 
253
263
  async def get_new_deliveries(redis: Redis) -> RedisReadGroupResponse:
254
- logger.debug("Getting new deliveries", extra=self._log_context())
264
+ logger.debug("Getting new deliveries", extra=log_context)
255
265
  return await redis.xreadgroup(
256
266
  groupname=self.docket.worker_group_name,
257
267
  consumername=self.name,
@@ -261,21 +271,18 @@ class Worker:
261
271
  )
262
272
 
263
273
  def start_task(message_id: RedisMessageID, message: RedisMessage) -> bool:
264
- if not message: # pragma: no cover
265
- return False
266
-
267
274
  function_name = message[b"function"].decode()
268
275
  if not (function := self.docket.tasks.get(function_name)):
269
276
  logger.warning(
270
277
  "Task function %r not found",
271
278
  function_name,
272
- extra=self._log_context(),
279
+ extra=log_context,
273
280
  )
274
281
  return False
275
282
 
276
283
  execution = Execution.from_message(function, message)
277
284
 
278
- task = asyncio.create_task(self._execute(execution))
285
+ task = asyncio.create_task(self._execute(execution), name=execution.key)
279
286
  active_tasks[task] = message_id
280
287
 
281
288
  nonlocal available_slots
@@ -283,8 +290,15 @@ class Worker:
283
290
 
284
291
  return True
285
292
 
293
+ async def process_completed_tasks() -> None:
294
+ completed_tasks = {task for task in active_tasks if task.done()}
295
+ for task in completed_tasks:
296
+ message_id = active_tasks.pop(task)
297
+ await task
298
+ await ack_message(redis, message_id)
299
+
286
300
  async def ack_message(redis: Redis, message_id: RedisMessageID) -> None:
287
- logger.debug("Acknowledging message", extra=self._log_context())
301
+ logger.debug("Acknowledging message", extra=log_context)
288
302
  async with redis.pipeline() as pipeline:
289
303
  pipeline.xack(
290
304
  self.docket.stream_key,
@@ -297,13 +311,6 @@ class Worker:
297
311
  )
298
312
  await pipeline.execute()
299
313
 
300
- async def process_completed_tasks() -> None:
301
- completed_tasks = {task for task in active_tasks if task.done()}
302
- for task in completed_tasks:
303
- message_id = active_tasks.pop(task)
304
- await task
305
- await ack_message(redis, message_id)
306
-
307
314
  has_work: bool = True
308
315
 
309
316
  try:
@@ -319,6 +326,9 @@ class Worker:
319
326
  for source in [get_redeliveries, get_new_deliveries]:
320
327
  for _, messages in await source(redis):
321
328
  for message_id, message in messages:
329
+ if not message: # pragma: no cover
330
+ continue
331
+
322
332
  if not start_task(message_id, message):
323
333
  await self._delete_known_task(redis, message)
324
334
  await ack_message(redis, message_id)
@@ -334,7 +344,7 @@ class Worker:
334
344
  logger.info(
335
345
  "Shutdown requested, finishing %d active tasks...",
336
346
  len(active_tasks),
337
- extra=self._log_context(),
347
+ extra=log_context,
338
348
  )
339
349
  finally:
340
350
  if active_tasks:
@@ -401,9 +411,11 @@ class Worker:
401
411
 
402
412
  total_work: int = sys.maxsize
403
413
 
414
+ log_context = self._log_context()
415
+
404
416
  while not worker_stopping.is_set() or total_work:
405
417
  try:
406
- logger.debug("Scheduling due tasks", extra=self._log_context())
418
+ logger.debug("Scheduling due tasks", extra=log_context)
407
419
  total_work, due_work = await stream_due_tasks(
408
420
  keys=[self.docket.queue_key, self.docket.stream_key],
409
421
  args=[datetime.now(timezone.utc).timestamp(), self.docket.name],
@@ -416,18 +428,18 @@ class Worker:
416
428
  total_work,
417
429
  self.docket.queue_key,
418
430
  self.docket.stream_key,
419
- extra=self._log_context(),
431
+ extra=log_context,
420
432
  )
421
433
  except Exception: # pragma: no cover
422
434
  logger.exception(
423
435
  "Error in scheduler loop",
424
436
  exc_info=True,
425
- extra=self._log_context(),
437
+ extra=log_context,
426
438
  )
427
439
  finally:
428
440
  await asyncio.sleep(self.scheduling_resolution.total_seconds())
429
441
 
430
- logger.debug("Scheduler loop finished", extra=self._log_context())
442
+ logger.debug("Scheduler loop finished", extra=log_context)
431
443
 
432
444
  async def _schedule_all_automatic_perpetual_tasks(self) -> None:
433
445
  async with self.docket.redis() as redis:
@@ -469,38 +481,30 @@ class Worker:
469
481
  log_context = {**self._log_context(), **execution.specific_labels()}
470
482
  counter_labels = {**self.labels(), **execution.general_labels()}
471
483
 
472
- arrow = "↬" if execution.attempt > 1 else "↪"
473
484
  call = execution.call_repr()
474
485
 
475
486
  if self.docket.strike_list.is_stricken(execution):
476
487
  async with self.docket.redis() as redis:
477
488
  await self._delete_known_task(redis, execution)
478
489
 
479
- arrow = "🗙"
480
- logger.warning("%s %s", arrow, call, extra=log_context)
490
+ logger.warning("🗙 %s", call, extra=log_context)
481
491
  TASKS_STRICKEN.add(1, counter_labels | {"docket.where": "worker"})
482
492
  return
483
493
 
484
494
  if execution.key in self._execution_counts:
485
495
  self._execution_counts[execution.key] += 1
486
496
 
487
- initiating_span = trace.get_current_span(execution.trace_context)
488
- initiating_context = initiating_span.get_span_context()
489
- links = [trace.Link(initiating_context)] if initiating_context.is_valid else []
490
-
491
- start = datetime.now(timezone.utc)
492
- punctuality = start - execution.when
493
- log_context = {
494
- **log_context,
495
- "punctuality": punctuality.total_seconds(),
496
- }
497
- duration = timedelta(0)
497
+ start = time.time()
498
+ punctuality = start - execution.when.timestamp()
499
+ log_context = {**log_context, "punctuality": punctuality}
500
+ duration = 0.0
498
501
 
499
502
  TASKS_STARTED.add(1, counter_labels)
500
503
  TASKS_RUNNING.add(1, counter_labels)
501
- TASK_PUNCTUALITY.record(punctuality.total_seconds(), counter_labels)
504
+ TASK_PUNCTUALITY.record(punctuality, counter_labels)
502
505
 
503
- logger.info("%s [%s] %s", arrow, punctuality, call, extra=log_context)
506
+ arrow = "↬" if execution.attempt > 1 else "↪"
507
+ logger.info("%s [%s] %s", arrow, ms(punctuality), call, extra=log_context)
504
508
 
505
509
  with tracer.start_as_current_span(
506
510
  execution.function.__name__,
@@ -510,7 +514,7 @@ class Worker:
510
514
  **execution.specific_labels(),
511
515
  "code.function.name": execution.function.__name__,
512
516
  },
513
- links=links,
517
+ links=execution.incoming_span_links(),
514
518
  ):
515
519
  async with resolved_dependencies(self, execution) as dependencies:
516
520
  # Preemptively reschedule the perpetual task for the future, or clear
@@ -523,6 +527,23 @@ class Worker:
523
527
  await self._delete_known_task(redis, execution)
524
528
 
525
529
  try:
530
+ dependency_failures = {
531
+ k: v
532
+ for k, v in dependencies.items()
533
+ if isinstance(v, FailedDependency)
534
+ }
535
+ if dependency_failures:
536
+ raise ExceptionGroup(
537
+ (
538
+ "Failed to resolve dependencies for parameter(s): "
539
+ + ", ".join(dependency_failures.keys())
540
+ ),
541
+ [
542
+ dependency.error
543
+ for dependency in dependency_failures.values()
544
+ ],
545
+ )
546
+
526
547
  if timeout := get_single_dependency_of_type(dependencies, Timeout):
527
548
  await self._run_function_with_timeout(
528
549
  execution, dependencies, timeout
@@ -536,31 +557,35 @@ class Worker:
536
557
  },
537
558
  )
538
559
 
560
+ duration = log_context["duration"] = time.time() - start
539
561
  TASKS_SUCCEEDED.add(1, counter_labels)
540
- duration = datetime.now(timezone.utc) - start
541
- log_context["duration"] = duration.total_seconds()
562
+
542
563
  rescheduled = await self._perpetuate_if_requested(
543
- execution, dependencies, duration
564
+ execution, dependencies, timedelta(seconds=duration)
544
565
  )
566
+
545
567
  arrow = "↫" if rescheduled else "↩"
546
- logger.info("%s [%s] %s", arrow, duration, call, extra=log_context)
568
+ logger.info(
569
+ "%s [%s] %s", arrow, ms(duration), call, extra=log_context
570
+ )
547
571
  except Exception:
572
+ duration = log_context["duration"] = time.time() - start
548
573
  TASKS_FAILED.add(1, counter_labels)
549
- duration = datetime.now(timezone.utc) - start
550
- log_context["duration"] = duration.total_seconds()
574
+
551
575
  retried = await self._retry_if_requested(execution, dependencies)
552
576
  if not retried:
553
577
  retried = await self._perpetuate_if_requested(
554
- execution, dependencies, duration
578
+ execution, dependencies, timedelta(seconds=duration)
555
579
  )
580
+
556
581
  arrow = "↫" if retried else "↩"
557
582
  logger.exception(
558
- "%s [%s] %s", arrow, duration, call, extra=log_context
583
+ "%s [%s] %s", arrow, ms(duration), call, extra=log_context
559
584
  )
560
585
  finally:
561
586
  TASKS_RUNNING.add(-1, counter_labels)
562
587
  TASKS_COMPLETED.add(1, counter_labels)
563
- TASK_DURATION.record(duration.total_seconds(), counter_labels)
588
+ TASK_DURATION.record(duration, counter_labels)
564
589
 
565
590
  async def _run_function_with_timeout(
566
591
  self,
@@ -603,15 +628,15 @@ class Worker:
603
628
  if not retry:
604
629
  return False
605
630
 
606
- if retry.attempts is None or execution.attempt < retry.attempts:
607
- execution.when = datetime.now(timezone.utc) + retry.delay
608
- execution.attempt += 1
609
- await self.docket.schedule(execution)
631
+ if retry.attempts is not None and execution.attempt >= retry.attempts:
632
+ return False
610
633
 
611
- TASKS_RETRIED.add(1, {**self.labels(), **execution.specific_labels()})
612
- return True
634
+ execution.when = datetime.now(timezone.utc) + retry.delay
635
+ execution.attempt += 1
636
+ await self.docket.schedule(execution)
613
637
 
614
- return False
638
+ TASKS_RETRIED.add(1, {**self.labels(), **execution.specific_labels()})
639
+ return True
615
640
 
616
641
  async def _perpetuate_if_requested(
617
642
  self,
@@ -710,3 +735,10 @@ class Worker:
710
735
  exc_info=True,
711
736
  extra=self._log_context(),
712
737
  )
738
+
739
+
740
+ def ms(seconds: float) -> str:
741
+ if seconds < 100:
742
+ return f"{seconds * 1000:6.0f}ms"
743
+ else:
744
+ return f"{seconds:6.0f}s "
@@ -1,7 +1,9 @@
1
+ import logging
2
+
1
3
  import pytest
2
4
 
3
5
  from docket import CurrentDocket, CurrentWorker, Docket, Worker
4
- from docket.dependencies import Retry
6
+ from docket.dependencies import Depends, Retry, TaskArgument
5
7
 
6
8
 
7
9
  async def test_dependencies_may_be_duplicated(docket: Docket, worker: Worker):
@@ -91,3 +93,48 @@ async def test_user_provide_retries_are_used(docket: Docket, worker: Worker):
91
93
  await worker.run_until_finished()
92
94
 
93
95
  assert calls == 2
96
+
97
+
98
+ async def test_dependencies_error_for_missing_task_argument(
99
+ docket: Docket, worker: Worker, caplog: pytest.LogCaptureFixture
100
+ ):
101
+ """A task will fail when asking for a missing task argument"""
102
+
103
+ async def dependency_one(nope: list[str] = TaskArgument()) -> list[str]:
104
+ raise NotImplementedError("This should not be called") # pragma: no cover
105
+
106
+ async def dependent_task(
107
+ a: list[str],
108
+ b: list[str] = TaskArgument("a"),
109
+ c: list[str] = Depends(dependency_one),
110
+ ) -> None:
111
+ raise NotImplementedError("This should not be called") # pragma: no cover
112
+
113
+ await docket.add(dependent_task)(a=["hello", "world"])
114
+
115
+ await worker.run_until_finished()
116
+
117
+ with caplog.at_level(logging.ERROR):
118
+ await worker.run_until_finished()
119
+
120
+ assert "Failed to resolve dependencies for parameter(s): c" in caplog.text
121
+ assert "ExceptionGroup" in caplog.text
122
+ assert "KeyError: 'nope'" in caplog.text
123
+
124
+
125
+ async def test_a_task_argument_cannot_ask_for_itself(
126
+ docket: Docket, worker: Worker, caplog: pytest.LogCaptureFixture
127
+ ):
128
+ """A task argument cannot ask for itself"""
129
+
130
+ # This task would be nonsense, because it's asking for itself.
131
+ async def dependent_task(a: list[str] = TaskArgument()) -> None:
132
+ raise NotImplementedError("This should not be called") # pragma: no cover
133
+
134
+ await docket.add(dependent_task)()
135
+
136
+ with caplog.at_level(logging.ERROR):
137
+ await worker.run_until_finished()
138
+
139
+ assert "Failed to resolve dependencies for parameter(s): a" in caplog.text
140
+ assert "ValueError: No parameter name specified" in caplog.text
@@ -27,6 +27,7 @@ from docket import (
27
27
  Logged,
28
28
  Perpetual,
29
29
  Retry,
30
+ TaskArgument,
30
31
  TaskKey,
31
32
  TaskLogger,
32
33
  Timeout,
@@ -1383,3 +1384,66 @@ async def test_dependencies_can_ask_for_docket_dependencies(
1383
1384
  await docket.add(dependent_task)()
1384
1385
 
1385
1386
  await worker.run_until_finished()
1387
+
1388
+
1389
+ async def test_dependency_failures_are_task_failures(
1390
+ docket: Docket, worker: Worker, caplog: pytest.LogCaptureFixture
1391
+ ):
1392
+ """A task dependency failure will cause the task to fail"""
1393
+
1394
+ called: bool = False
1395
+
1396
+ async def dependency_one() -> str:
1397
+ raise ValueError("this one is bad")
1398
+
1399
+ async def dependency_two() -> str:
1400
+ raise ValueError("and so is this one")
1401
+
1402
+ async def dependent_task(
1403
+ a: str = Depends(dependency_one),
1404
+ b: str = Depends(dependency_two),
1405
+ ) -> None:
1406
+ nonlocal called
1407
+ called = True # pragma: no cover
1408
+
1409
+ await docket.add(dependent_task)()
1410
+
1411
+ with caplog.at_level(logging.ERROR):
1412
+ await worker.run_until_finished()
1413
+
1414
+ assert not called
1415
+
1416
+ assert "Failed to resolve dependencies for parameter(s): a, b" in caplog.text
1417
+ assert "ValueError: this one is bad" in caplog.text
1418
+ assert "ValueError: and so is this one" in caplog.text
1419
+
1420
+
1421
+ async def test_dependencies_can_ask_for_task_arguments(docket: Docket, worker: Worker):
1422
+ """A task dependency can ask for a task argument"""
1423
+
1424
+ called = 0
1425
+
1426
+ async def dependency_one(a: list[str] = TaskArgument()) -> list[str]:
1427
+ return a
1428
+
1429
+ async def dependency_two(another_name: list[str] = TaskArgument("a")) -> list[str]:
1430
+ return another_name
1431
+
1432
+ async def dependent_task(
1433
+ a: list[str],
1434
+ b: list[str] = TaskArgument("a"),
1435
+ c: list[str] = Depends(dependency_one),
1436
+ d: list[str] = Depends(dependency_two),
1437
+ ) -> None:
1438
+ assert a is b
1439
+ assert a is c
1440
+ assert a is d
1441
+
1442
+ nonlocal called
1443
+ called += 1
1444
+
1445
+ await docket.add(dependent_task)(a=["hello", "world"])
1446
+
1447
+ await worker.run_until_finished()
1448
+
1449
+ assert called == 1
@@ -13,6 +13,7 @@ from docket import CurrentWorker, Docket, Worker
13
13
  from docket.dependencies import CurrentDocket, Perpetual
14
14
  from docket.execution import Execution
15
15
  from docket.tasks import standard_tasks
16
+ from docket.worker import ms
16
17
 
17
18
 
18
19
  async def test_worker_acknowledges_messages(
@@ -475,3 +476,40 @@ async def test_worker_can_exit_from_long_horizon_perpetual_tasks(
475
476
  await worker.run_at_most({"my-key": 1})
476
477
 
477
478
  assert calls == 1
479
+
480
+
481
+ def test_formatting_durations():
482
+ assert ms(0.000001) == " 0ms"
483
+ assert ms(0.000010) == " 0ms"
484
+ assert ms(0.000100) == " 0ms"
485
+ assert ms(0.001000) == " 1ms"
486
+ assert ms(0.010000) == " 10ms"
487
+ assert ms(0.100000) == " 100ms"
488
+ assert ms(1.000000) == " 1000ms"
489
+ assert ms(10.00000) == " 10000ms"
490
+ assert ms(100.0000) == " 100s "
491
+ assert ms(1000.000) == " 1000s "
492
+ assert ms(10000.00) == " 10000s "
493
+ assert ms(100000.0) == "100000s "
494
+
495
+
496
+ async def test_worker_can_be_told_to_skip_automatic_tasks(docket: Docket):
497
+ """A worker can be told to skip automatic tasks"""
498
+
499
+ called = False
500
+
501
+ async def perpetual_task(
502
+ perpetual: Perpetual = Perpetual(
503
+ every=timedelta(milliseconds=50), automatic=True
504
+ ),
505
+ ):
506
+ nonlocal called
507
+ called = True # pragma: no cover
508
+
509
+ docket.register(perpetual_task)
510
+
511
+ # Without the flag, this would hang because the task would always be scheduled
512
+ async with Worker(docket, schedule_automatic_tasks=False) as worker:
513
+ await worker.run_until_finished()
514
+
515
+ assert not called
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes