pydocket 0.15.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,225 @@
1
+ from contextlib import contextmanager
2
+ from threading import Thread
3
+ from typing import Generator, cast
4
+
5
+ from opentelemetry import metrics
6
+ from opentelemetry.exporter.prometheus import PrometheusMetricReader
7
+ from opentelemetry.metrics import set_meter_provider
8
+ from opentelemetry.propagators.textmap import Getter, Setter
9
+ from opentelemetry.sdk.metrics import MeterProvider
10
+
11
+ meter: metrics.Meter = metrics.get_meter("docket")
12
+
13
+ TASKS_ADDED = meter.create_counter(
14
+ "docket_tasks_added",
15
+ description="How many tasks added to the docket",
16
+ unit="1",
17
+ )
18
+
19
+ TASKS_REPLACED = meter.create_counter(
20
+ "docket_tasks_replaced",
21
+ description="How many tasks replaced on the docket",
22
+ unit="1",
23
+ )
24
+
25
+ TASKS_SCHEDULED = meter.create_counter(
26
+ "docket_tasks_scheduled",
27
+ description="How many tasks added or replaced on the docket",
28
+ unit="1",
29
+ )
30
+
31
+ TASKS_CANCELLED = meter.create_counter(
32
+ "docket_tasks_cancelled",
33
+ description="How many tasks cancelled from the docket",
34
+ unit="1",
35
+ )
36
+
37
+ TASKS_STARTED = meter.create_counter(
38
+ "docket_tasks_started",
39
+ description="How many tasks started",
40
+ unit="1",
41
+ )
42
+
43
+ TASKS_REDELIVERED = meter.create_counter(
44
+ "docket_tasks_redelivered",
45
+ description="How many tasks started that were redelivered from another worker",
46
+ unit="1",
47
+ )
48
+
49
+ TASKS_STRICKEN = meter.create_counter(
50
+ "docket_tasks_stricken",
51
+ description="How many tasks have been stricken from executing",
52
+ unit="1",
53
+ )
54
+
55
+ TASKS_COMPLETED = meter.create_counter(
56
+ "docket_tasks_completed",
57
+ description="How many tasks that have completed in any state",
58
+ unit="1",
59
+ )
60
+
61
+ TASKS_FAILED = meter.create_counter(
62
+ "docket_tasks_failed",
63
+ description="How many tasks that have failed",
64
+ unit="1",
65
+ )
66
+
67
+ TASKS_SUCCEEDED = meter.create_counter(
68
+ "docket_tasks_succeeded",
69
+ description="How many tasks that have succeeded",
70
+ unit="1",
71
+ )
72
+
73
+ TASKS_RETRIED = meter.create_counter(
74
+ "docket_tasks_retried",
75
+ description="How many tasks that have been retried",
76
+ unit="1",
77
+ )
78
+
79
+ TASKS_PERPETUATED = meter.create_counter(
80
+ "docket_tasks_perpetuated",
81
+ description="How many tasks that have been self-perpetuated",
82
+ unit="1",
83
+ )
84
+
85
+ TASK_DURATION = meter.create_histogram(
86
+ "docket_task_duration",
87
+ description="How long tasks take to complete",
88
+ unit="s",
89
+ )
90
+
91
+ TASK_PUNCTUALITY = meter.create_histogram(
92
+ "docket_task_punctuality",
93
+ description="How close a task was to its scheduled time",
94
+ unit="s",
95
+ )
96
+
97
+ TASKS_RUNNING = meter.create_up_down_counter(
98
+ "docket_tasks_running",
99
+ description="How many tasks that are currently running",
100
+ unit="1",
101
+ )
102
+
103
+ REDIS_DISRUPTIONS = meter.create_counter(
104
+ "docket_redis_disruptions",
105
+ description="How many times the Redis connection has been disrupted",
106
+ unit="1",
107
+ )
108
+
109
+ STRIKES_IN_EFFECT = meter.create_up_down_counter(
110
+ "docket_strikes_in_effect",
111
+ description="How many strikes are currently in effect",
112
+ unit="1",
113
+ )
114
+
115
+ QUEUE_DEPTH = meter.create_gauge(
116
+ "docket_queue_depth",
117
+ description="How many tasks are due to be executed now",
118
+ unit="1",
119
+ )
120
+ SCHEDULE_DEPTH = meter.create_gauge(
121
+ "docket_schedule_depth",
122
+ description="How many tasks are scheduled to be executed in the future",
123
+ unit="1",
124
+ )
125
+
126
+ CACHE_SIZE = meter.create_gauge(
127
+ "docket_cache_size",
128
+ description="Size of internal docket caches",
129
+ unit="1",
130
+ )
131
+
132
+ Message = dict[bytes, bytes]
133
+
134
+
135
+ class MessageGetter(Getter[Message]):
136
+ def get(self, carrier: Message, key: str) -> list[str] | None:
137
+ val = carrier.get(key.encode(), None)
138
+ if val is None:
139
+ return None
140
+ return [val.decode()]
141
+
142
+ def keys(self, carrier: Message) -> list[str]:
143
+ return [key.decode() for key in carrier.keys()]
144
+
145
+
146
+ class MessageSetter(Setter[Message]):
147
+ def set(
148
+ self,
149
+ carrier: Message,
150
+ key: str,
151
+ value: str,
152
+ ) -> None:
153
+ carrier[key.encode()] = value.encode()
154
+
155
+
156
+ message_getter: MessageGetter = MessageGetter()
157
+ message_setter: MessageSetter = MessageSetter()
158
+
159
+
160
+ @contextmanager
161
+ def healthcheck_server(
162
+ host: str = "0.0.0.0", port: int | None = None
163
+ ) -> Generator[None, None, None]:
164
+ if port is None:
165
+ yield
166
+ return
167
+
168
+ from http.server import BaseHTTPRequestHandler, HTTPServer
169
+
170
+ class HealthcheckHandler(BaseHTTPRequestHandler):
171
+ def do_GET(self):
172
+ self.send_response(200)
173
+ self.send_header("Content-type", "text/plain")
174
+ self.end_headers()
175
+ self.wfile.write(b"OK")
176
+
177
+ def log_message(self, format: str, *args: object) -> None:
178
+ # Suppress access logs from the webserver
179
+ pass
180
+
181
+ server = HTTPServer((host, port), HealthcheckHandler)
182
+ with server:
183
+ Thread(target=server.serve_forever, daemon=True).start()
184
+
185
+ yield
186
+
187
+
188
+ @contextmanager
189
+ def metrics_server(
190
+ host: str = "0.0.0.0", port: int | None = None
191
+ ) -> Generator[None, None, None]:
192
+ if port is None:
193
+ yield
194
+ return
195
+
196
+ import sys
197
+ from typing import Any
198
+
199
+ # wsgiref.types was added in Python 3.11
200
+ if sys.version_info >= (3, 11): # pragma: no cover
201
+ from wsgiref.types import WSGIApplication
202
+ else: # pragma: no cover
203
+ WSGIApplication = Any # type: ignore[misc,assignment]
204
+
205
+ from prometheus_client import REGISTRY
206
+ from prometheus_client.exposition import (
207
+ ThreadingWSGIServer,
208
+ _SilentHandler, # type: ignore[member-access]
209
+ make_server, # type: ignore[import]
210
+ make_wsgi_app, # type: ignore[import]
211
+ )
212
+
213
+ set_meter_provider(MeterProvider(metric_readers=[PrometheusMetricReader()]))
214
+
215
+ server = make_server(
216
+ host,
217
+ port,
218
+ cast(WSGIApplication, make_wsgi_app(registry=REGISTRY)),
219
+ ThreadingWSGIServer,
220
+ handler_class=_SilentHandler,
221
+ )
222
+ with server:
223
+ Thread(target=server.serve_forever, daemon=True).start()
224
+
225
+ yield
docket/py.typed ADDED
File without changes
docket/tasks.py ADDED
@@ -0,0 +1,59 @@
1
+ import asyncio
2
+ import logging
3
+ from datetime import datetime, timezone
4
+
5
+ from .dependencies import (
6
+ CurrentDocket,
7
+ CurrentExecution,
8
+ CurrentWorker,
9
+ Retry,
10
+ TaskLogger,
11
+ )
12
+ from .docket import Docket, TaskCollection
13
+ from .execution import Execution
14
+ from .worker import Worker
15
+
16
+
17
+ async def trace(
18
+ message: str,
19
+ logger: "logging.LoggerAdapter[logging.Logger]" = TaskLogger(),
20
+ docket: Docket = CurrentDocket(),
21
+ worker: Worker = CurrentWorker(),
22
+ execution: Execution = CurrentExecution(),
23
+ ) -> None:
24
+ logger.info(
25
+ "%s: %r added to docket %r %s ago now running on worker %r",
26
+ message,
27
+ execution.key,
28
+ docket.name,
29
+ (datetime.now(timezone.utc) - execution.when),
30
+ worker.name,
31
+ )
32
+
33
+
34
+ async def fail(
35
+ message: str,
36
+ docket: Docket = CurrentDocket(),
37
+ worker: Worker = CurrentWorker(),
38
+ execution: Execution = CurrentExecution(),
39
+ retry: Retry = Retry(attempts=2),
40
+ ) -> None:
41
+ raise Exception(
42
+ f"{message}: {execution.key} added to docket "
43
+ f"{docket.name} {datetime.now(timezone.utc) - execution.when} "
44
+ f"ago now running on worker {worker.name}"
45
+ )
46
+
47
+
48
+ async def sleep(
49
+ seconds: float, logger: "logging.LoggerAdapter[logging.Logger]" = TaskLogger()
50
+ ) -> None:
51
+ logger.info("Sleeping for %s seconds", seconds)
52
+ await asyncio.sleep(seconds)
53
+
54
+
55
+ standard_tasks: TaskCollection = [
56
+ trace,
57
+ fail,
58
+ sleep,
59
+ ]
docket/testing.py ADDED
@@ -0,0 +1,235 @@
1
+ """Testing utilities for making assertions about scheduled tasks.
2
+
3
+ Example usage:
4
+ from docket import Docket, testing
5
+
6
+ docket = Docket("redis://localhost:6379/0")
7
+
8
+ # Schedule a task
9
+ await docket.add(my_task)("arg1", kwarg1="value1")
10
+
11
+ # Assert it's scheduled
12
+ await testing.assert_task_scheduled(docket, my_task, args=("arg1",))
13
+
14
+ # After completion
15
+ await worker.run_until_finished()
16
+ await testing.assert_no_tasks(docket)
17
+ """
18
+
19
+ from collections.abc import Callable
20
+ from typing import Any
21
+
22
+ from docket.docket import Docket
23
+ from docket.execution import Execution
24
+
25
+
26
+ def _matches_criteria(
27
+ execution: Execution,
28
+ function: str | Callable[..., Any],
29
+ args: tuple[Any, ...] | None,
30
+ kwargs: dict[str, Any] | None,
31
+ key: str | None,
32
+ ) -> bool:
33
+ """Check if an execution matches the given criteria."""
34
+ # Check function name
35
+ function_name = function if isinstance(function, str) else function.__name__
36
+ if execution.function.__name__ != function_name:
37
+ return False
38
+
39
+ # Check key if specified
40
+ if key is not None and execution.key != key:
41
+ return False
42
+
43
+ # Check args if specified
44
+ if args is not None and execution.args != args:
45
+ return False
46
+
47
+ # Check kwargs if specified (subset matching)
48
+ if kwargs is not None:
49
+ for k, v in kwargs.items():
50
+ if k not in execution.kwargs or execution.kwargs[k] != v:
51
+ return False
52
+
53
+ return True
54
+
55
+
56
+ def _format_criteria(
57
+ function: str | Callable[..., Any],
58
+ args: tuple[Any, ...] | None,
59
+ kwargs: dict[str, Any] | None,
60
+ key: str | None,
61
+ ) -> str:
62
+ """Format criteria for error messages."""
63
+ parts: list[str] = []
64
+
65
+ function_name = function if isinstance(function, str) else function.__name__
66
+ parts.append(f"function={function_name}")
67
+
68
+ if key is not None:
69
+ parts.append(f"key={key!r}")
70
+ if args is not None:
71
+ parts.append(f"args={args!r}")
72
+ if kwargs is not None:
73
+ parts.append(f"kwargs={kwargs!r}")
74
+
75
+ return ", ".join(parts)
76
+
77
+
78
+ async def assert_task_scheduled(
79
+ docket: Docket,
80
+ function: str | Callable[..., Any],
81
+ *,
82
+ args: tuple[Any, ...] | None = None,
83
+ kwargs: dict[str, Any] | None = None,
84
+ key: str | None = None,
85
+ ) -> None:
86
+ """Assert that a task matching the criteria is scheduled.
87
+
88
+ Args:
89
+ docket: The Docket instance to check
90
+ function: The task function or function name (string)
91
+ args: Optional tuple of positional arguments to match
92
+ kwargs: Optional dict of keyword arguments to match (subset matching)
93
+ key: Optional task key to match
94
+
95
+ Raises:
96
+ AssertionError: If no matching task is found
97
+
98
+ Example:
99
+ await assert_task_scheduled(docket, my_task)
100
+ await assert_task_scheduled(docket, my_task, args=("foo",))
101
+ await assert_task_scheduled(docket, "my_task", key="task-123")
102
+ """
103
+ snapshot = await docket.snapshot()
104
+
105
+ # Check all scheduled tasks (both immediate and future)
106
+ all_tasks = list(snapshot.future)
107
+
108
+ for execution in all_tasks:
109
+ if _matches_criteria(execution, function, args, kwargs, key):
110
+ return
111
+
112
+ # Build error message
113
+ criteria = _format_criteria(function, args, kwargs, key)
114
+ function_name = function if isinstance(function, str) else function.__name__
115
+
116
+ if not all_tasks:
117
+ raise AssertionError(
118
+ f"Task {function_name} not found: no tasks scheduled on docket"
119
+ )
120
+
121
+ # Show what we found instead
122
+ found_tasks = [
123
+ f" - {e.function.__name__}(args={e.args!r}, kwargs={e.kwargs!r}, key={e.key!r})"
124
+ for e in all_tasks
125
+ ]
126
+ found_str = "\n".join(found_tasks)
127
+
128
+ raise AssertionError(
129
+ f"Task {function_name} not found with {criteria}\n\n"
130
+ f"Scheduled tasks:\n{found_str}"
131
+ )
132
+
133
+
134
+ async def assert_task_not_scheduled(
135
+ docket: Docket,
136
+ function: str | Callable[..., Any],
137
+ *,
138
+ args: tuple[Any, ...] | None = None,
139
+ kwargs: dict[str, Any] | None = None,
140
+ key: str | None = None,
141
+ ) -> None:
142
+ """Assert that no task matching the criteria is scheduled.
143
+
144
+ Args:
145
+ docket: The Docket instance to check
146
+ function: The task function or function name (string)
147
+ args: Optional tuple of positional arguments to match
148
+ kwargs: Optional dict of keyword arguments to match (subset matching)
149
+ key: Optional task key to match
150
+
151
+ Raises:
152
+ AssertionError: If a matching task is found
153
+
154
+ Example:
155
+ await assert_task_not_scheduled(docket, my_task)
156
+ await assert_task_not_scheduled(docket, my_task, args=("foo",))
157
+ """
158
+ snapshot = await docket.snapshot()
159
+
160
+ # Check all scheduled tasks (both immediate and future)
161
+ all_tasks = list(snapshot.future)
162
+
163
+ for execution in all_tasks:
164
+ if _matches_criteria(execution, function, args, kwargs, key):
165
+ function_name = function if isinstance(function, str) else function.__name__
166
+ raise AssertionError(
167
+ f"Task {function_name} found but should not be scheduled\n"
168
+ f"Found: {execution.function.__name__}(args={execution.args!r}, "
169
+ f"kwargs={execution.kwargs!r}, key={execution.key!r})"
170
+ )
171
+
172
+
173
+ async def assert_task_count(
174
+ docket: Docket,
175
+ function: str | Callable[..., Any] | None = None,
176
+ *,
177
+ count: int,
178
+ ) -> None:
179
+ """Assert the number of scheduled tasks matches the expected count.
180
+
181
+ Args:
182
+ docket: The Docket instance to check
183
+ function: Optional task function or name to count (if None, counts all tasks)
184
+ count: Expected number of tasks
185
+
186
+ Raises:
187
+ AssertionError: If the count doesn't match
188
+
189
+ Example:
190
+ await assert_task_count(docket, count=5) # All tasks
191
+ await assert_task_count(docket, my_task, count=2) # Specific function
192
+ """
193
+ snapshot = await docket.snapshot()
194
+
195
+ # Check all scheduled tasks (both immediate and future)
196
+ all_tasks = list(snapshot.future)
197
+
198
+ if function is None:
199
+ actual_count = len(all_tasks)
200
+ function_desc = "all tasks"
201
+ else:
202
+ function_name = function if isinstance(function, str) else function.__name__
203
+ actual_count = sum(1 for e in all_tasks if e.function.__name__ == function_name)
204
+ function_desc = f"tasks for {function_name}"
205
+
206
+ if actual_count != count:
207
+ raise AssertionError(f"Expected {count} {function_desc}, found {actual_count}")
208
+
209
+
210
+ async def assert_no_tasks(docket: Docket) -> None:
211
+ """Assert that no tasks are scheduled on the docket.
212
+
213
+ Args:
214
+ docket: The Docket instance to check
215
+
216
+ Raises:
217
+ AssertionError: If any tasks are scheduled
218
+
219
+ Example:
220
+ await assert_no_tasks(docket)
221
+ """
222
+ snapshot = await docket.snapshot()
223
+
224
+ # Check all scheduled tasks (both immediate and future)
225
+ all_tasks = list(snapshot.future)
226
+
227
+ if all_tasks:
228
+ found_tasks = [
229
+ f" - {e.function.__name__}(args={e.args!r}, kwargs={e.kwargs!r}, key={e.key!r})"
230
+ for e in all_tasks
231
+ ]
232
+ found_str = "\n".join(found_tasks)
233
+ raise AssertionError(
234
+ f"Expected no tasks, found {len(all_tasks)} task(s) scheduled:\n{found_str}"
235
+ )