pydocket 0.1.4__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydocket might be problematic. Click here for more details.

Files changed (51) hide show
  1. {pydocket-0.1.4 → pydocket-0.2.0}/PKG-INFO +1 -1
  2. {pydocket-0.1.4 → pydocket-0.2.0}/chaos/driver.py +18 -18
  3. {pydocket-0.1.4 → pydocket-0.2.0}/pyproject.toml +1 -3
  4. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/cli.py +8 -0
  5. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/dependencies.py +8 -9
  6. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/docket.py +37 -40
  7. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/execution.py +19 -8
  8. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/instrumentation.py +53 -0
  9. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/tasks.py +11 -9
  10. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/worker.py +71 -54
  11. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/test_snapshot.py +1 -1
  12. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/test_tasks.py +24 -10
  13. {pydocket-0.1.4 → pydocket-0.2.0}/tests/conftest.py +9 -17
  14. {pydocket-0.1.4 → pydocket-0.2.0}/tests/test_instrumentation.py +134 -28
  15. {pydocket-0.1.4 → pydocket-0.2.0}/uv.lock +6 -7
  16. {pydocket-0.1.4 → pydocket-0.2.0}/.cursor/rules/general.mdc +0 -0
  17. {pydocket-0.1.4 → pydocket-0.2.0}/.cursor/rules/python-style.mdc +0 -0
  18. {pydocket-0.1.4 → pydocket-0.2.0}/.github/codecov.yml +0 -0
  19. {pydocket-0.1.4 → pydocket-0.2.0}/.github/workflows/chaos.yml +0 -0
  20. {pydocket-0.1.4 → pydocket-0.2.0}/.github/workflows/ci.yml +0 -0
  21. {pydocket-0.1.4 → pydocket-0.2.0}/.github/workflows/publish.yml +0 -0
  22. {pydocket-0.1.4 → pydocket-0.2.0}/.gitignore +0 -0
  23. {pydocket-0.1.4 → pydocket-0.2.0}/.pre-commit-config.yaml +0 -0
  24. {pydocket-0.1.4 → pydocket-0.2.0}/LICENSE +0 -0
  25. {pydocket-0.1.4 → pydocket-0.2.0}/README.md +0 -0
  26. {pydocket-0.1.4 → pydocket-0.2.0}/chaos/README.md +0 -0
  27. {pydocket-0.1.4 → pydocket-0.2.0}/chaos/__init__.py +0 -0
  28. {pydocket-0.1.4 → pydocket-0.2.0}/chaos/producer.py +0 -0
  29. {pydocket-0.1.4 → pydocket-0.2.0}/chaos/run +0 -0
  30. {pydocket-0.1.4 → pydocket-0.2.0}/chaos/tasks.py +0 -0
  31. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/__init__.py +0 -0
  32. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/__main__.py +0 -0
  33. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/annotations.py +0 -0
  34. {pydocket-0.1.4 → pydocket-0.2.0}/src/docket/py.typed +0 -0
  35. {pydocket-0.1.4 → pydocket-0.2.0}/telemetry/.gitignore +0 -0
  36. {pydocket-0.1.4 → pydocket-0.2.0}/telemetry/start +0 -0
  37. {pydocket-0.1.4 → pydocket-0.2.0}/telemetry/stop +0 -0
  38. {pydocket-0.1.4 → pydocket-0.2.0}/tests/__init__.py +0 -0
  39. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/__init__.py +0 -0
  40. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/conftest.py +0 -0
  41. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/test_module.py +0 -0
  42. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/test_parsing.py +0 -0
  43. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/test_striking.py +0 -0
  44. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/test_version.py +0 -0
  45. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/test_worker.py +0 -0
  46. {pydocket-0.1.4 → pydocket-0.2.0}/tests/cli/test_workers.py +0 -0
  47. {pydocket-0.1.4 → pydocket-0.2.0}/tests/test_dependencies.py +0 -0
  48. {pydocket-0.1.4 → pydocket-0.2.0}/tests/test_docket.py +0 -0
  49. {pydocket-0.1.4 → pydocket-0.2.0}/tests/test_fundamentals.py +0 -0
  50. {pydocket-0.1.4 → pydocket-0.2.0}/tests/test_striking.py +0 -0
  51. {pydocket-0.1.4 → pydocket-0.2.0}/tests/test_worker.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydocket
3
- Version: 0.1.4
3
+ Version: 0.2.0
4
4
  Summary: A distributed background task system for Python functions
5
5
  Project-URL: Homepage, https://github.com/chrisguidry/docket
6
6
  Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
@@ -6,9 +6,9 @@ import socket
6
6
  import sys
7
7
  from asyncio import subprocess
8
8
  from asyncio.subprocess import Process
9
- from contextlib import contextmanager
9
+ from contextlib import asynccontextmanager
10
10
  from datetime import timedelta
11
- from typing import Any, Generator, Literal, Sequence
11
+ from typing import Any, AsyncGenerator, Literal, Sequence
12
12
  from uuid import uuid4
13
13
 
14
14
  import redis.exceptions
@@ -46,8 +46,8 @@ def python_entrypoint() -> list[str]:
46
46
  return [sys.executable]
47
47
 
48
48
 
49
- @contextmanager
50
- def run_redis(version: str) -> Generator[tuple[str, Container], None, None]:
49
+ @asynccontextmanager
50
+ async def run_redis(version: str) -> AsyncGenerator[tuple[str, Container], None]:
51
51
  def get_free_port() -> int:
52
52
  with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
53
53
  s.bind(("", 0))
@@ -80,12 +80,14 @@ async def main(
80
80
  producers: int = 4,
81
81
  workers: int = 7,
82
82
  ):
83
- with run_redis("7.4.2") as (redis_url, redis_container):
84
- logger.info("Redis running at %s", redis_url)
85
- docket = Docket(
83
+ async with (
84
+ run_redis("7.4.2") as (redis_url, redis_container),
85
+ Docket(
86
86
  name=f"test-docket-{uuid4()}",
87
87
  url=redis_url,
88
- )
88
+ ) as docket,
89
+ ):
90
+ logger.info("Redis running at %s", redis_url)
89
91
  environment = {
90
92
  **os.environ,
91
93
  "DOCKET_NAME": docket.name,
@@ -93,14 +95,13 @@ async def main(
93
95
  }
94
96
 
95
97
  # Add in some random strikes to performance test
96
- async with docket:
97
- for _ in range(100):
98
- parameter = f"param_{random.randint(1, 100)}"
99
- operator: Operator = random.choice(
100
- ["==", "!=", ">", ">=", "<", "<=", "between"]
101
- )
102
- value = f"val_{random.randint(1, 1000)}"
103
- await docket.strike("rando", parameter, operator, value)
98
+ for _ in range(100):
99
+ parameter = f"param_{random.randint(1, 100)}"
100
+ operator: Operator = random.choice(
101
+ ["==", "!=", ">", ">=", "<", "<=", "between"]
102
+ )
103
+ value = f"val_{random.randint(1, 1000)}"
104
+ await docket.strike("rando", parameter, operator, value)
104
105
 
105
106
  if tasks % producers != 0:
106
107
  raise ValueError("total_tasks must be divisible by total_producers")
@@ -199,8 +200,7 @@ async def main(
199
200
  elif chaos_chance < 0.15:
200
201
  logger.warning("CHAOS: Queuing a toxic task...")
201
202
  try:
202
- async with docket:
203
- await docket.add(toxic)()
203
+ await docket.add(toxic)()
204
204
  except redis.exceptions.ConnectionError:
205
205
  pass
206
206
 
@@ -45,7 +45,7 @@ dev = [
45
45
  "pre-commit>=4.1.0",
46
46
  "pyright>=1.1.396",
47
47
  "pytest>=8.3.4",
48
- "pytest-asyncio>=0.25.3",
48
+ "pytest-aio>=1.9.0",
49
49
  "pytest-cov>=6.0.0",
50
50
  "pytest-xdist>=3.6.1",
51
51
  "ruff>=0.9.7",
@@ -66,8 +66,6 @@ packages = ["src/docket"]
66
66
 
67
67
  [tool.pytest.ini_options]
68
68
  addopts = "--cov=src/docket --cov=tests --cov-report=term-missing --cov-branch"
69
- asyncio_mode = "auto"
70
- asyncio_default_fixture_loop_scope = "session"
71
69
  filterwarnings = ["error"]
72
70
 
73
71
  [tool.pyright]
@@ -243,6 +243,13 @@ def worker(
243
243
  help="Exit after the current docket is finished",
244
244
  ),
245
245
  ] = False,
246
+ metrics_port: Annotated[
247
+ int | None,
248
+ typer.Option(
249
+ "--metrics-port",
250
+ help="The port to serve Prometheus metrics on",
251
+ ),
252
+ ] = None,
246
253
  ) -> None:
247
254
  asyncio.run(
248
255
  Worker.run(
@@ -254,6 +261,7 @@ def worker(
254
261
  reconnection_delay=reconnection_delay,
255
262
  minimum_check_interval=minimum_check_interval,
256
263
  until_finished=until_finished,
264
+ metrics_port=metrics_port,
257
265
  tasks=tasks,
258
266
  )
259
267
  )
@@ -61,15 +61,14 @@ class _TaskLogger(Dependency):
61
61
  self, docket: Docket, worker: Worker, execution: Execution
62
62
  ) -> logging.LoggerAdapter[logging.Logger]:
63
63
  logger = logging.getLogger(f"docket.task.{execution.function.__name__}")
64
-
65
- extra = {
66
- "execution.key": execution.key,
67
- "execution.attempt": execution.attempt,
68
- "worker.name": worker.name,
69
- "docket.name": docket.name,
70
- }
71
-
72
- return logging.LoggerAdapter(logger, extra)
64
+ return logging.LoggerAdapter(
65
+ logger,
66
+ {
67
+ **docket.labels(),
68
+ **worker.labels(),
69
+ **execution.specific_labels(),
70
+ },
71
+ )
73
72
 
74
73
 
75
74
  def TaskLogger() -> logging.LoggerAdapter[logging.Logger]:
@@ -13,6 +13,7 @@ from typing import (
13
13
  Collection,
14
14
  Hashable,
15
15
  Iterable,
16
+ Mapping,
16
17
  NoReturn,
17
18
  ParamSpec,
18
19
  Self,
@@ -26,7 +27,7 @@ from uuid import uuid4
26
27
 
27
28
  import redis.exceptions
28
29
  from opentelemetry import propagate, trace
29
- from redis.asyncio import Redis
30
+ from redis.asyncio import ConnectionPool, Redis
30
31
 
31
32
  from .execution import (
32
33
  Execution,
@@ -112,11 +113,14 @@ class Docket:
112
113
  tasks: dict[str, Callable[..., Awaitable[Any]]]
113
114
  strike_list: StrikeList
114
115
 
116
+ _monitor_strikes_task: asyncio.Task[None]
117
+ _connection_pool: ConnectionPool
118
+
115
119
  def __init__(
116
120
  self,
117
121
  name: str = "docket",
118
122
  url: str = "redis://localhost:6379/0",
119
- heartbeat_interval: timedelta = timedelta(seconds=1),
123
+ heartbeat_interval: timedelta = timedelta(seconds=2),
120
124
  missed_heartbeats: int = 5,
121
125
  ) -> None:
122
126
  """
@@ -144,6 +148,7 @@ class Docket:
144
148
  self.tasks = {fn.__name__: fn for fn in standard_tasks}
145
149
  self.strike_list = StrikeList()
146
150
 
151
+ self._connection_pool = ConnectionPool.from_url(self.url) # type: ignore
147
152
  self._monitor_strikes_task = asyncio.create_task(self._monitor_strikes())
148
153
 
149
154
  # Ensure that the stream and worker group exist
@@ -176,23 +181,17 @@ class Docket:
176
181
  except asyncio.CancelledError:
177
182
  pass
178
183
 
184
+ await asyncio.shield(self._connection_pool.disconnect())
185
+ del self._connection_pool
186
+
179
187
  @asynccontextmanager
180
188
  async def redis(self) -> AsyncGenerator[Redis, None]:
181
- redis: Redis | None = None
189
+ r = Redis(connection_pool=self._connection_pool)
190
+ await r.__aenter__()
182
191
  try:
183
- redis = await Redis.from_url(
184
- self.url,
185
- single_connection_client=True,
186
- )
187
- await redis.__aenter__()
188
- try:
189
- yield redis
190
- finally:
191
- await asyncio.shield(redis.__aexit__(None, None, None))
192
+ yield r
192
193
  finally:
193
- # redis 4.6.0 doesn't automatically disconnect and leaves connections open
194
- if redis:
195
- await asyncio.shield(redis.connection_pool.disconnect())
194
+ await asyncio.shield(r.__aexit__(None, None, None))
196
195
 
197
196
  def register(self, function: Callable[..., Awaitable[Any]]) -> None:
198
197
  from .dependencies import validate_dependencies
@@ -214,6 +213,11 @@ class Docket:
214
213
  for function in collection:
215
214
  self.register(function)
216
215
 
216
+ def labels(self) -> Mapping[str, str]:
217
+ return {
218
+ "docket.name": self.name,
219
+ }
220
+
217
221
  @overload
218
222
  def add(
219
223
  self,
@@ -251,7 +255,7 @@ class Docket:
251
255
  execution = Execution(function, args, kwargs, when, key, attempt=1)
252
256
  await self.schedule(execution)
253
257
 
254
- TASKS_ADDED.add(1, {"docket": self.name, "task": function.__name__})
258
+ TASKS_ADDED.add(1, {**self.labels(), **execution.general_labels()})
255
259
 
256
260
  return execution
257
261
 
@@ -287,7 +291,7 @@ class Docket:
287
291
  await self.cancel(key)
288
292
  await self.schedule(execution)
289
293
 
290
- TASKS_REPLACED.add(1, {"docket": self.name, "task": function.__name__})
294
+ TASKS_REPLACED.add(1, {**self.labels(), **execution.general_labels()})
291
295
 
292
296
  return execution
293
297
 
@@ -314,9 +318,9 @@ class Docket:
314
318
  TASKS_STRICKEN.add(
315
319
  1,
316
320
  {
317
- "docket": self.name,
318
- "task": execution.function.__name__,
319
- "where": "docket",
321
+ **self.labels(),
322
+ **execution.specific_labels(),
323
+ "docket.where": "docket",
320
324
  },
321
325
  )
322
326
  return
@@ -327,10 +331,8 @@ class Docket:
327
331
  with tracer.start_as_current_span(
328
332
  "docket.schedule",
329
333
  attributes={
330
- "docket.name": self.name,
331
- "docket.execution.when": execution.when.isoformat(),
332
- "docket.execution.key": execution.key,
333
- "docket.execution.attempt": execution.attempt,
334
+ **self.labels(),
335
+ **execution.specific_labels(),
334
336
  "code.function.name": execution.function.__name__,
335
337
  },
336
338
  ):
@@ -350,16 +352,14 @@ class Docket:
350
352
  pipe.zadd(self.queue_key, {key: when.timestamp()})
351
353
  await pipe.execute()
352
354
 
353
- TASKS_SCHEDULED.add(
354
- 1, {"docket": self.name, "task": execution.function.__name__}
355
- )
355
+ TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
356
356
 
357
357
  async def cancel(self, key: str) -> None:
358
358
  with tracer.start_as_current_span(
359
359
  "docket.cancel",
360
360
  attributes={
361
- "docket.name": self.name,
362
- "docket.execution.key": key,
361
+ **self.labels(),
362
+ "docket.key": key,
363
363
  },
364
364
  ):
365
365
  async with self.redis() as redis:
@@ -368,7 +368,7 @@ class Docket:
368
368
  pipe.zrem(self.queue_key, key)
369
369
  await pipe.execute()
370
370
 
371
- TASKS_CANCELLED.add(1, {"docket": self.name})
371
+ TASKS_CANCELLED.add(1, self.labels())
372
372
 
373
373
  @property
374
374
  def strike_key(self) -> str:
@@ -408,8 +408,8 @@ class Docket:
408
408
  with tracer.start_as_current_span(
409
409
  f"docket.{instruction.direction}",
410
410
  attributes={
411
- "docket.name": self.name,
412
- **instruction.as_span_attributes(),
411
+ **self.labels(),
412
+ **instruction.labels(),
413
413
  },
414
414
  ):
415
415
  async with self.redis() as redis:
@@ -441,18 +441,15 @@ class Docket:
441
441
  else "Restoring"
442
442
  ),
443
443
  instruction.call_repr(),
444
- extra={"docket": self.name},
444
+ extra=self.labels(),
445
445
  )
446
446
 
447
- counter_labels = {"docket": self.name}
448
- if instruction.function:
449
- counter_labels["task"] = instruction.function
450
- if instruction.parameter:
451
- counter_labels["parameter"] = instruction.parameter
452
-
453
447
  STRIKES_IN_EFFECT.add(
454
448
  1 if instruction.direction == "strike" else -1,
455
- counter_labels,
449
+ {
450
+ **self.labels(),
451
+ **instruction.labels(),
452
+ },
456
453
  )
457
454
 
458
455
  except redis.exceptions.ConnectionError: # pragma: no cover
@@ -3,7 +3,7 @@ import enum
3
3
  import inspect
4
4
  import logging
5
5
  from datetime import datetime
6
- from typing import Any, Awaitable, Callable, Hashable, Literal, Self, cast
6
+ from typing import Any, Awaitable, Callable, Hashable, Literal, Mapping, Self, cast
7
7
 
8
8
  import cloudpickle # type: ignore[import]
9
9
 
@@ -55,6 +55,17 @@ class Execution:
55
55
  attempt=int(message[b"attempt"].decode()),
56
56
  )
57
57
 
58
+ def general_labels(self) -> Mapping[str, str]:
59
+ return {"docket.task": self.function.__name__}
60
+
61
+ def specific_labels(self) -> Mapping[str, str | int]:
62
+ return {
63
+ "docket.task": self.function.__name__,
64
+ "docket.key": self.key,
65
+ "docket.when": self.when.isoformat(),
66
+ "docket.attempt": self.attempt,
67
+ }
68
+
58
69
  def call_repr(self) -> str:
59
70
  arguments: list[str] = []
60
71
  signature = inspect.signature(self.function)
@@ -131,17 +142,17 @@ class StrikeInstruction(abc.ABC):
131
142
  else:
132
143
  return Restore(function, parameter, operator, value)
133
144
 
134
- def as_span_attributes(self) -> dict[str, str]:
135
- span_attributes: dict[str, str] = {}
145
+ def labels(self) -> Mapping[str, str]:
146
+ labels: dict[str, str] = {}
136
147
  if self.function:
137
- span_attributes["docket.function"] = self.function
148
+ labels["docket.task"] = self.function
138
149
 
139
150
  if self.parameter:
140
- span_attributes["docket.parameter"] = self.parameter
141
- span_attributes["docket.operator"] = self.operator
142
- span_attributes["docket.value"] = repr(self.value)
151
+ labels["docket.parameter"] = self.parameter
152
+ labels["docket.operator"] = self.operator
153
+ labels["docket.value"] = repr(self.value)
143
154
 
144
- return span_attributes
155
+ return labels
145
156
 
146
157
  def call_repr(self) -> str:
147
158
  return (
@@ -1,5 +1,12 @@
1
+ import threading
2
+ from contextlib import contextmanager
3
+ from typing import Generator, cast
4
+
1
5
  from opentelemetry import metrics
6
+ from opentelemetry.exporter.prometheus import PrometheusMetricReader
7
+ from opentelemetry.metrics import set_meter_provider
2
8
  from opentelemetry.propagators.textmap import Getter, Setter
9
+ from opentelemetry.sdk.metrics import MeterProvider
3
10
 
4
11
  meter: metrics.Meter = metrics.get_meter("docket")
5
12
 
@@ -93,6 +100,17 @@ STRIKES_IN_EFFECT = meter.create_up_down_counter(
93
100
  unit="1",
94
101
  )
95
102
 
103
+ QUEUE_DEPTH = meter.create_gauge(
104
+ "docket_queue_depth",
105
+ description="How many tasks are due to be executed now",
106
+ unit="1",
107
+ )
108
+ SCHEDULE_DEPTH = meter.create_gauge(
109
+ "docket_schedule_depth",
110
+ description="How many tasks are scheduled to be executed in the future",
111
+ unit="1",
112
+ )
113
+
96
114
  Message = dict[bytes, bytes]
97
115
 
98
116
 
@@ -119,3 +137,38 @@ class MessageSetter(Setter[Message]):
119
137
 
120
138
  message_getter: MessageGetter = MessageGetter()
121
139
  message_setter: MessageSetter = MessageSetter()
140
+
141
+
142
+ @contextmanager
143
+ def metrics_server(
144
+ host: str = "0.0.0.0", port: int | None = None
145
+ ) -> Generator[None, None, None]:
146
+ if port is None:
147
+ yield
148
+ return
149
+
150
+ from wsgiref.types import WSGIApplication
151
+
152
+ from prometheus_client import REGISTRY
153
+ from prometheus_client.exposition import (
154
+ ThreadingWSGIServer,
155
+ _SilentHandler, # type: ignore[member-access]
156
+ make_server, # type: ignore[import]
157
+ make_wsgi_app, # type: ignore[import]
158
+ )
159
+
160
+ set_meter_provider(MeterProvider(metric_readers=[PrometheusMetricReader()]))
161
+
162
+ server = make_server(
163
+ host,
164
+ port,
165
+ cast(WSGIApplication, make_wsgi_app(registry=REGISTRY)),
166
+ ThreadingWSGIServer,
167
+ handler_class=_SilentHandler,
168
+ )
169
+ with server:
170
+ t = threading.Thread(target=server.serve_forever)
171
+ t.daemon = True
172
+ t.start()
173
+
174
+ yield
@@ -2,16 +2,21 @@ import asyncio
2
2
  import logging
3
3
  from datetime import datetime, timezone
4
4
 
5
- from .dependencies import CurrentDocket, CurrentExecution, CurrentWorker, Retry
5
+ from .dependencies import (
6
+ CurrentDocket,
7
+ CurrentExecution,
8
+ CurrentWorker,
9
+ Retry,
10
+ TaskLogger,
11
+ )
6
12
  from .docket import Docket, TaskCollection
7
13
  from .execution import Execution
8
14
  from .worker import Worker
9
15
 
10
- logger: logging.Logger = logging.getLogger(__name__)
11
-
12
16
 
13
17
  async def trace(
14
18
  message: str,
19
+ logger: logging.LoggerAdapter[logging.Logger] = TaskLogger(),
15
20
  docket: Docket = CurrentDocket(),
16
21
  worker: Worker = CurrentWorker(),
17
22
  execution: Execution = CurrentExecution(),
@@ -23,11 +28,6 @@ async def trace(
23
28
  docket.name,
24
29
  (datetime.now(timezone.utc) - execution.when),
25
30
  worker.name,
26
- extra={
27
- "docket.name": docket.name,
28
- "worker.name": worker.name,
29
- "execution.key": execution.key,
30
- },
31
31
  )
32
32
 
33
33
 
@@ -45,7 +45,9 @@ async def fail(
45
45
  )
46
46
 
47
47
 
48
- async def sleep(seconds: float) -> None:
48
+ async def sleep(
49
+ seconds: float, logger: logging.LoggerAdapter[logging.Logger] = TaskLogger()
50
+ ) -> None:
49
51
  logger.info("Sleeping for %s seconds", seconds)
50
52
  await asyncio.sleep(seconds)
51
53