pydocket 0.11.0__tar.gz → 0.11.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pydocket-0.11.0 → pydocket-0.11.1}/PKG-INFO +1 -1
- {pydocket-0.11.0 → pydocket-0.11.1}/docs/advanced-patterns.md +132 -0
- pydocket-0.11.1/examples/agenda_scatter.py +128 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/__init__.py +2 -0
- pydocket-0.11.1/src/docket/agenda.py +201 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/docket.py +1 -1
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/worker.py +2 -2
- pydocket-0.11.1/tests/test_agenda.py +404 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_instrumentation.py +61 -1
- {pydocket-0.11.0 → pydocket-0.11.1}/.cursor/rules/general.mdc +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.cursor/rules/python-style.mdc +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.github/codecov.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.github/workflows/ci.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.github/workflows/claude-code-review.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.github/workflows/claude.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.github/workflows/docs.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.github/workflows/publish.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.gitignore +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/.pre-commit-config.yaml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/CLAUDE.md +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/LICENSE +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/README.md +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/chaos/README.md +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/chaos/__init__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/chaos/driver.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/chaos/producer.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/chaos/run +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/chaos/tasks.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/docs/api-reference.md +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/docs/dependencies.md +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/docs/getting-started.md +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/docs/index.md +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/docs/production.md +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/docs/testing.md +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/examples/__init__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/examples/common.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/examples/concurrency_control.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/examples/find_and_flood.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/examples/self_perpetuating.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/mkdocs.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/pyproject.toml +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/__main__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/annotations.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/cli.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/dependencies.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/execution.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/instrumentation.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/py.typed +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/src/docket/tasks.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/telemetry/.gitignore +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/telemetry/start +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/telemetry/stop +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/__init__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/__init__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/conftest.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/test_clear.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/test_module.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/test_parsing.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/test_snapshot.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/test_striking.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/test_tasks.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/test_version.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/test_worker.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/cli/test_workers.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/conftest.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_concurrency_basic.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_concurrency_control.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_concurrency_refresh.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_dependencies.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_docket.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_execution.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_fundamentals.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_striking.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/tests/test_worker.py +0 -0
- {pydocket-0.11.0 → pydocket-0.11.1}/uv.lock +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.11.
|
|
3
|
+
Version: 0.11.1
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -140,6 +140,138 @@ async def process_single_order(order_id: int) -> None:
|
|
|
140
140
|
|
|
141
141
|
This pattern separates discovery (finding work) from execution (doing work), allowing for better load distribution and fault isolation. The perpetual task stays lightweight and fast, while the actual work is distributed across many workers.
|
|
142
142
|
|
|
143
|
+
## Task Scattering with Agenda
|
|
144
|
+
|
|
145
|
+
For "find-and-flood" workloads, you often want to distribute a batch of tasks over time rather than scheduling them all immediately. The `Agenda` class collects related tasks and scatters them evenly across a time window.
|
|
146
|
+
|
|
147
|
+
### Basic Scattering
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
from datetime import timedelta
|
|
151
|
+
from docket import Agenda, Docket
|
|
152
|
+
|
|
153
|
+
async def process_item(item_id: int) -> None:
|
|
154
|
+
await perform_expensive_operation(item_id)
|
|
155
|
+
await update_database(item_id)
|
|
156
|
+
|
|
157
|
+
async with Docket() as docket:
|
|
158
|
+
# Build an agenda of tasks
|
|
159
|
+
agenda = Agenda()
|
|
160
|
+
for item_id in range(1, 101): # 100 items to process
|
|
161
|
+
agenda.add(process_item)(item_id)
|
|
162
|
+
|
|
163
|
+
# Scatter them evenly over 50 minutes to avoid overwhelming the system
|
|
164
|
+
executions = await agenda.scatter(docket, over=timedelta(minutes=50))
|
|
165
|
+
print(f"Scheduled {len(executions)} tasks over 50 minutes")
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
Tasks are distributed evenly across the time window. For 100 tasks over 50 minutes, they'll be scheduled approximately 30 seconds apart.
|
|
169
|
+
|
|
170
|
+
### Jitter for Thundering Herd Prevention
|
|
171
|
+
|
|
172
|
+
Add random jitter to prevent multiple processes from scheduling identical work at exactly the same times:
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
# Scatter with ±30 second jitter around each scheduled time
|
|
176
|
+
await agenda.scatter(
|
|
177
|
+
docket,
|
|
178
|
+
over=timedelta(minutes=50),
|
|
179
|
+
jitter=timedelta(seconds=30)
|
|
180
|
+
)
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
### Future Scatter Windows
|
|
184
|
+
|
|
185
|
+
Schedule the entire batch to start at a specific time in the future:
|
|
186
|
+
|
|
187
|
+
```python
|
|
188
|
+
from datetime import datetime, timezone
|
|
189
|
+
|
|
190
|
+
# Start scattering in 2 hours, spread over 30 minutes
|
|
191
|
+
start_time = datetime.now(timezone.utc) + timedelta(hours=2)
|
|
192
|
+
await agenda.scatter(
|
|
193
|
+
docket,
|
|
194
|
+
start=start_time,
|
|
195
|
+
over=timedelta(minutes=30)
|
|
196
|
+
)
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
### Mixed Task Types
|
|
200
|
+
|
|
201
|
+
Agendas can contain different types of tasks:
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
async def send_email(user_id: str, template: str) -> None:
|
|
205
|
+
await email_service.send(user_id, template)
|
|
206
|
+
|
|
207
|
+
async def update_analytics(event_data: dict[str, str]) -> None:
|
|
208
|
+
await analytics_service.track(event_data)
|
|
209
|
+
|
|
210
|
+
# Create a mixed agenda
|
|
211
|
+
agenda = Agenda()
|
|
212
|
+
agenda.add(process_item)(item_id=1001)
|
|
213
|
+
agenda.add(send_email)("user123", "welcome")
|
|
214
|
+
agenda.add(update_analytics)({"event": "signup", "user": "user123"})
|
|
215
|
+
agenda.add(process_item)(item_id=1002)
|
|
216
|
+
|
|
217
|
+
# All tasks will be scattered in the order they were added
|
|
218
|
+
await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
### Single Task Positioning
|
|
222
|
+
|
|
223
|
+
When scattering a single task, it's positioned at the midpoint of the time window:
|
|
224
|
+
|
|
225
|
+
```python
|
|
226
|
+
agenda = Agenda()
|
|
227
|
+
agenda.add(process_item)(item_id=42)
|
|
228
|
+
|
|
229
|
+
# This task will be scheduled 5 minutes from now (middle of 10-minute window)
|
|
230
|
+
await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
### Agenda Reusability
|
|
234
|
+
|
|
235
|
+
Agendas can be reused for multiple scatter operations:
|
|
236
|
+
|
|
237
|
+
```python
|
|
238
|
+
# Create a reusable template
|
|
239
|
+
daily_cleanup_agenda = Agenda()
|
|
240
|
+
daily_cleanup_agenda.add(cleanup_temp_files)()
|
|
241
|
+
daily_cleanup_agenda.add(compress_old_logs)()
|
|
242
|
+
daily_cleanup_agenda.add(update_metrics)()
|
|
243
|
+
|
|
244
|
+
# Use it multiple times with different timing
|
|
245
|
+
await daily_cleanup_agenda.scatter(docket, over=timedelta(hours=1))
|
|
246
|
+
|
|
247
|
+
# Later, scatter the same tasks over a different window
|
|
248
|
+
tomorrow = datetime.now(timezone.utc) + timedelta(days=1)
|
|
249
|
+
await daily_cleanup_agenda.scatter(
|
|
250
|
+
docket,
|
|
251
|
+
start=tomorrow,
|
|
252
|
+
over=timedelta(minutes=30)
|
|
253
|
+
)
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
### Failure Behavior
|
|
257
|
+
|
|
258
|
+
Keep in mind that, if an error occurs during scheduling, some tasks may have already been scheduled successfully:
|
|
259
|
+
|
|
260
|
+
```python
|
|
261
|
+
agenda = Agenda()
|
|
262
|
+
agenda.add(valid_task)("arg1")
|
|
263
|
+
agenda.add(valid_task)("arg2")
|
|
264
|
+
agenda.add("nonexistent_task")("arg3") # This will cause an error
|
|
265
|
+
agenda.add(valid_task)("arg4")
|
|
266
|
+
|
|
267
|
+
try:
|
|
268
|
+
await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
269
|
+
except KeyError:
|
|
270
|
+
# The first two tasks were scheduled successfully
|
|
271
|
+
# The error prevented the fourth task from being scheduled
|
|
272
|
+
pass
|
|
273
|
+
```
|
|
274
|
+
|
|
143
275
|
## Striking and Restoring Tasks
|
|
144
276
|
|
|
145
277
|
Striking allows you to temporarily disable tasks without redeploying code. This is invaluable for incident response, gradual rollouts, or handling problematic customers.
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
"""
|
|
3
|
+
Example demonstrating the Agenda scatter functionality for rate-limited workloads.
|
|
4
|
+
|
|
5
|
+
This example shows a real-world scenario: sending bulk notifications while respecting
|
|
6
|
+
rate limits to avoid overwhelming your notification service or triggering spam filters.
|
|
7
|
+
|
|
8
|
+
Without scatter: All 26 notifications would try to send immediately, potentially:
|
|
9
|
+
- Overwhelming your notification service
|
|
10
|
+
- Triggering rate limits or spam detection
|
|
11
|
+
- Creating a poor user experience with delayed/failed sends
|
|
12
|
+
|
|
13
|
+
With scatter: Notifications are distributed evenly over time, respecting limits.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import logging
|
|
18
|
+
from datetime import datetime, timedelta, timezone
|
|
19
|
+
|
|
20
|
+
from docket import Agenda, CurrentExecution, Docket, Execution, Worker
|
|
21
|
+
|
|
22
|
+
logging.basicConfig(
|
|
23
|
+
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
|
24
|
+
)
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
async def send_notification(
|
|
29
|
+
user: str, message: str, execution: Execution = CurrentExecution()
|
|
30
|
+
) -> None:
|
|
31
|
+
"""Send a notification to a user."""
|
|
32
|
+
delay = (execution.when - datetime.now(timezone.utc)).total_seconds()
|
|
33
|
+
if delay > 0.1:
|
|
34
|
+
logger.info(f"📅 Notification for {user} scheduled {delay:.1f}s from now")
|
|
35
|
+
else:
|
|
36
|
+
logger.info(f"📧 Sending to {user}: '{message}'")
|
|
37
|
+
# Simulate API call to notification service
|
|
38
|
+
await asyncio.sleep(0.2)
|
|
39
|
+
logger.info(f"✓ Delivered to {user}")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
async def main() -> None:
|
|
43
|
+
"""Demonstrate scatter for rate-limited notification sending."""
|
|
44
|
+
|
|
45
|
+
async with Docket(name="notification-scatter") as docket:
|
|
46
|
+
docket.register(send_notification)
|
|
47
|
+
|
|
48
|
+
logger.info("=== Bulk Notification Campaign ===")
|
|
49
|
+
logger.info("Scenario: Alert 26 users about a flash sale")
|
|
50
|
+
logger.info("Constraint: Notification service allows max 30 messages/minute")
|
|
51
|
+
logger.info("Strategy: Scatter over 60 seconds (~1 message every 2.3 seconds)")
|
|
52
|
+
logger.info("")
|
|
53
|
+
|
|
54
|
+
# Build the list of users to notify (e.g., from a database query)
|
|
55
|
+
users = [
|
|
56
|
+
"alice@example.com",
|
|
57
|
+
"bob@example.com",
|
|
58
|
+
"charlie@example.com",
|
|
59
|
+
"diana@example.com",
|
|
60
|
+
"eve@example.com",
|
|
61
|
+
"frank@example.com",
|
|
62
|
+
"grace@example.com",
|
|
63
|
+
"henry@example.com",
|
|
64
|
+
"iris@example.com",
|
|
65
|
+
"jack@example.com",
|
|
66
|
+
"kate@example.com",
|
|
67
|
+
"liam@example.com",
|
|
68
|
+
"maya@example.com",
|
|
69
|
+
"noah@example.com",
|
|
70
|
+
"olivia@example.com",
|
|
71
|
+
"peter@example.com",
|
|
72
|
+
"quinn@example.com",
|
|
73
|
+
"ruby@example.com",
|
|
74
|
+
"sam@example.com",
|
|
75
|
+
"tara@example.com",
|
|
76
|
+
"uma@example.com",
|
|
77
|
+
"victor@example.com",
|
|
78
|
+
"wendy@example.com",
|
|
79
|
+
"xavier@example.com",
|
|
80
|
+
"yara@example.com",
|
|
81
|
+
"zoe@example.com",
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
agenda = Agenda()
|
|
85
|
+
|
|
86
|
+
# Queue all notifications
|
|
87
|
+
logger.info(f"📋 Preparing notifications for {len(users)} users...")
|
|
88
|
+
for user in users:
|
|
89
|
+
agenda.add(send_notification)(user, "Flash Sale: 50% off for next hour!")
|
|
90
|
+
|
|
91
|
+
# Scatter over 60 seconds to respect rate limit
|
|
92
|
+
logger.info("🎯 Scattering notifications over 60 seconds...")
|
|
93
|
+
logger.info("")
|
|
94
|
+
|
|
95
|
+
executions = await agenda.scatter(
|
|
96
|
+
docket,
|
|
97
|
+
over=timedelta(seconds=60),
|
|
98
|
+
jitter=timedelta(seconds=0.5), # Small jitter for natural spacing
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Show the distribution preview
|
|
102
|
+
first_three = executions[:3]
|
|
103
|
+
last_three = executions[-3:]
|
|
104
|
+
for i, exec in enumerate(first_three, 1):
|
|
105
|
+
delay = (exec.when - datetime.now(timezone.utc)).total_seconds()
|
|
106
|
+
logger.info(f" Message #{i} scheduled for +{delay:.1f}s")
|
|
107
|
+
logger.info(f" ... {len(executions) - 6} more evenly distributed ...")
|
|
108
|
+
for i, exec in enumerate(last_three, len(executions) - 2):
|
|
109
|
+
delay = (exec.when - datetime.now(timezone.utc)).total_seconds()
|
|
110
|
+
logger.info(f" Message #{i} scheduled for +{delay:.1f}s")
|
|
111
|
+
logger.info("")
|
|
112
|
+
|
|
113
|
+
# Run worker to process the scattered notifications
|
|
114
|
+
logger.info("🚀 Starting notification sender...")
|
|
115
|
+
logger.info(" Watch how notifications flow steadily, not in a flood!")
|
|
116
|
+
logger.info("")
|
|
117
|
+
|
|
118
|
+
start_time = datetime.now(timezone.utc)
|
|
119
|
+
async with Worker(docket, concurrency=2) as worker:
|
|
120
|
+
await worker.run_until_finished()
|
|
121
|
+
|
|
122
|
+
elapsed = (datetime.now(timezone.utc) - start_time).total_seconds()
|
|
123
|
+
logger.info("")
|
|
124
|
+
logger.info(f"✅ All {len(users)} notifications sent in {elapsed:.1f} seconds")
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
if __name__ == "__main__":
|
|
128
|
+
asyncio.run(main())
|
|
@@ -8,6 +8,7 @@ from importlib.metadata import version
|
|
|
8
8
|
|
|
9
9
|
__version__ = version("pydocket")
|
|
10
10
|
|
|
11
|
+
from .agenda import Agenda
|
|
11
12
|
from .annotations import Logged
|
|
12
13
|
from .dependencies import (
|
|
13
14
|
ConcurrencyLimit,
|
|
@@ -29,6 +30,7 @@ from .worker import Worker
|
|
|
29
30
|
|
|
30
31
|
__all__ = [
|
|
31
32
|
"__version__",
|
|
33
|
+
"Agenda",
|
|
32
34
|
"ConcurrencyLimit",
|
|
33
35
|
"CurrentDocket",
|
|
34
36
|
"CurrentExecution",
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agenda - A collection of tasks that can be scheduled together.
|
|
3
|
+
|
|
4
|
+
The Agenda class provides a way to collect multiple tasks and then scatter them
|
|
5
|
+
evenly over a time period to avoid overwhelming the system with immediate work.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import random
|
|
9
|
+
from datetime import datetime, timedelta, timezone
|
|
10
|
+
from typing import Any, Awaitable, Callable, Iterator, ParamSpec, TypeVar, overload
|
|
11
|
+
|
|
12
|
+
from uuid_extensions import uuid7
|
|
13
|
+
|
|
14
|
+
from .docket import Docket
|
|
15
|
+
from .execution import Execution, TaskFunction
|
|
16
|
+
|
|
17
|
+
P = ParamSpec("P")
|
|
18
|
+
R = TypeVar("R")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Agenda:
|
|
22
|
+
"""A collection of tasks to be scheduled together on a Docket.
|
|
23
|
+
|
|
24
|
+
The Agenda allows you to build up a collection of tasks with their arguments,
|
|
25
|
+
then schedule them all at once using various timing strategies like scattering.
|
|
26
|
+
|
|
27
|
+
Example:
|
|
28
|
+
>>> agenda = Agenda()
|
|
29
|
+
>>> agenda.add(process_item)(item1)
|
|
30
|
+
>>> agenda.add(process_item)(item2)
|
|
31
|
+
>>> agenda.add(send_email)(email)
|
|
32
|
+
>>> await agenda.scatter(docket, over=timedelta(minutes=50))
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self) -> None:
|
|
36
|
+
"""Initialize an empty Agenda."""
|
|
37
|
+
self._tasks: list[
|
|
38
|
+
tuple[TaskFunction | str, tuple[Any, ...], dict[str, Any]]
|
|
39
|
+
] = []
|
|
40
|
+
|
|
41
|
+
def __len__(self) -> int:
|
|
42
|
+
"""Return the number of tasks in the agenda."""
|
|
43
|
+
return len(self._tasks)
|
|
44
|
+
|
|
45
|
+
def __iter__(
|
|
46
|
+
self,
|
|
47
|
+
) -> Iterator[tuple[TaskFunction | str, tuple[Any, ...], dict[str, Any]]]:
|
|
48
|
+
"""Iterate over tasks in the agenda."""
|
|
49
|
+
return iter(self._tasks)
|
|
50
|
+
|
|
51
|
+
@overload
|
|
52
|
+
def add(
|
|
53
|
+
self,
|
|
54
|
+
function: Callable[P, Awaitable[R]],
|
|
55
|
+
) -> Callable[P, None]:
|
|
56
|
+
"""Add a task function to the agenda.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
function: The task function to add.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
A callable that accepts the task arguments.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
@overload
|
|
66
|
+
def add(
|
|
67
|
+
self,
|
|
68
|
+
function: str,
|
|
69
|
+
) -> Callable[..., None]:
|
|
70
|
+
"""Add a task by name to the agenda.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
function: The name of a registered task.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
A callable that accepts the task arguments.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def add(
|
|
80
|
+
self,
|
|
81
|
+
function: Callable[P, Awaitable[R]] | str,
|
|
82
|
+
) -> Callable[..., None]:
|
|
83
|
+
"""Add a task to the agenda.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
function: The task function or name to add.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
A callable that accepts the task arguments and adds them to the agenda.
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
def scheduler(*args: Any, **kwargs: Any) -> None:
|
|
93
|
+
self._tasks.append((function, args, kwargs))
|
|
94
|
+
|
|
95
|
+
return scheduler
|
|
96
|
+
|
|
97
|
+
def clear(self) -> None:
|
|
98
|
+
"""Clear all tasks from the agenda."""
|
|
99
|
+
self._tasks.clear()
|
|
100
|
+
|
|
101
|
+
async def scatter(
|
|
102
|
+
self,
|
|
103
|
+
docket: Docket,
|
|
104
|
+
over: timedelta,
|
|
105
|
+
start: datetime | None = None,
|
|
106
|
+
jitter: timedelta | None = None,
|
|
107
|
+
) -> list[Execution]:
|
|
108
|
+
"""Scatter the tasks in this agenda over a time period.
|
|
109
|
+
|
|
110
|
+
Tasks are distributed evenly across the specified time window,
|
|
111
|
+
optionally with random jitter to prevent thundering herd effects.
|
|
112
|
+
|
|
113
|
+
If an error occurs during scheduling, some tasks may have already been
|
|
114
|
+
scheduled successfully before the failure occurred.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
docket: The Docket to schedule tasks on.
|
|
118
|
+
over: Time period to scatter tasks over (required).
|
|
119
|
+
start: When to start scattering from. Defaults to now.
|
|
120
|
+
jitter: Maximum random offset to add/subtract from each scheduled time.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
List of Execution objects for the scheduled tasks.
|
|
124
|
+
|
|
125
|
+
Raises:
|
|
126
|
+
KeyError: If any task name is not registered with the docket.
|
|
127
|
+
ValueError: If any task is stricken or 'over' is not positive.
|
|
128
|
+
"""
|
|
129
|
+
if over.total_seconds() <= 0:
|
|
130
|
+
raise ValueError("'over' parameter must be a positive duration")
|
|
131
|
+
|
|
132
|
+
if not self._tasks:
|
|
133
|
+
return []
|
|
134
|
+
|
|
135
|
+
if start is None:
|
|
136
|
+
start = datetime.now(timezone.utc)
|
|
137
|
+
|
|
138
|
+
# Calculate even distribution over the time period
|
|
139
|
+
task_count = len(self._tasks)
|
|
140
|
+
|
|
141
|
+
if task_count == 1:
|
|
142
|
+
# Single task goes in the middle of the window
|
|
143
|
+
schedule_times = [start + over / 2]
|
|
144
|
+
else:
|
|
145
|
+
# Distribute tasks evenly across the window
|
|
146
|
+
# For n tasks, we want n points from start to start+over inclusive
|
|
147
|
+
interval = over / (task_count - 1)
|
|
148
|
+
schedule_times = [start + interval * i for i in range(task_count)]
|
|
149
|
+
|
|
150
|
+
# Apply jitter if specified
|
|
151
|
+
if jitter:
|
|
152
|
+
jittered_times: list[datetime] = []
|
|
153
|
+
for schedule_time in schedule_times:
|
|
154
|
+
# Random offset between -jitter and +jitter
|
|
155
|
+
offset = timedelta(
|
|
156
|
+
seconds=random.uniform(
|
|
157
|
+
-jitter.total_seconds(), jitter.total_seconds()
|
|
158
|
+
)
|
|
159
|
+
)
|
|
160
|
+
# Ensure the jittered time doesn't go before start
|
|
161
|
+
jittered_time = max(schedule_time + offset, start)
|
|
162
|
+
jittered_times.append(jittered_time)
|
|
163
|
+
schedule_times = jittered_times
|
|
164
|
+
|
|
165
|
+
# Build all Execution objects first, validating as we go
|
|
166
|
+
executions: list[Execution] = []
|
|
167
|
+
for (task_func, args, kwargs), schedule_time in zip(
|
|
168
|
+
self._tasks, schedule_times
|
|
169
|
+
):
|
|
170
|
+
# Resolve task function if given by name
|
|
171
|
+
if isinstance(task_func, str):
|
|
172
|
+
if task_func not in docket.tasks:
|
|
173
|
+
raise KeyError(f"Task '{task_func}' is not registered")
|
|
174
|
+
resolved_func = docket.tasks[task_func]
|
|
175
|
+
else:
|
|
176
|
+
# Ensure task is registered
|
|
177
|
+
if task_func not in docket.tasks.values():
|
|
178
|
+
docket.register(task_func)
|
|
179
|
+
resolved_func = task_func
|
|
180
|
+
|
|
181
|
+
# Create execution with unique key
|
|
182
|
+
key = str(uuid7())
|
|
183
|
+
execution = Execution(
|
|
184
|
+
function=resolved_func,
|
|
185
|
+
args=args,
|
|
186
|
+
kwargs=kwargs,
|
|
187
|
+
when=schedule_time,
|
|
188
|
+
key=key,
|
|
189
|
+
attempt=1,
|
|
190
|
+
)
|
|
191
|
+
executions.append(execution)
|
|
192
|
+
|
|
193
|
+
# Schedule all tasks - if any fail, some tasks may have been scheduled
|
|
194
|
+
for execution in executions:
|
|
195
|
+
scheduler = docket.add(
|
|
196
|
+
execution.function, when=execution.when, key=execution.key
|
|
197
|
+
)
|
|
198
|
+
# Actually schedule the task - if this fails, earlier tasks remain scheduled
|
|
199
|
+
await scheduler(*execution.args, **execution.kwargs)
|
|
200
|
+
|
|
201
|
+
return executions
|
|
@@ -732,7 +732,7 @@ class Worker:
|
|
|
732
732
|
execution.attempt += 1
|
|
733
733
|
await self.docket.schedule(execution)
|
|
734
734
|
|
|
735
|
-
TASKS_RETRIED.add(1, {**self.labels(), **execution.
|
|
735
|
+
TASKS_RETRIED.add(1, {**self.labels(), **execution.general_labels()})
|
|
736
736
|
return True
|
|
737
737
|
|
|
738
738
|
async def _perpetuate_if_requested(
|
|
@@ -758,7 +758,7 @@ class Worker:
|
|
|
758
758
|
)
|
|
759
759
|
|
|
760
760
|
if duration is not None:
|
|
761
|
-
TASKS_PERPETUATED.add(1, {**self.labels(), **execution.
|
|
761
|
+
TASKS_PERPETUATED.add(1, {**self.labels(), **execution.general_labels()})
|
|
762
762
|
|
|
763
763
|
return True
|
|
764
764
|
|
|
@@ -0,0 +1,404 @@
|
|
|
1
|
+
from datetime import datetime, timedelta, timezone
|
|
2
|
+
from typing import Any
|
|
3
|
+
from unittest.mock import AsyncMock, patch
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
|
|
7
|
+
from docket import Docket
|
|
8
|
+
from docket.agenda import Agenda
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@pytest.fixture
|
|
12
|
+
def agenda() -> Agenda:
|
|
13
|
+
"""Create a fresh agenda for testing."""
|
|
14
|
+
return Agenda()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def test_agenda_creation(agenda: Agenda):
|
|
18
|
+
"""Agenda should be created empty."""
|
|
19
|
+
assert len(agenda) == 0
|
|
20
|
+
assert list(agenda) == []
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
async def test_agenda_add_single_task(agenda: Agenda, the_task: AsyncMock):
|
|
24
|
+
"""Should add a single task to the agenda."""
|
|
25
|
+
agenda.add(the_task)("arg1", kwarg1="value1")
|
|
26
|
+
|
|
27
|
+
assert len(agenda) == 1
|
|
28
|
+
tasks = list(agenda)
|
|
29
|
+
assert tasks[0][0] == the_task
|
|
30
|
+
assert tasks[0][1] == ("arg1",)
|
|
31
|
+
assert tasks[0][2] == {"kwarg1": "value1"}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
async def test_agenda_add_multiple_tasks(
|
|
35
|
+
agenda: Agenda, the_task: AsyncMock, another_task: AsyncMock
|
|
36
|
+
):
|
|
37
|
+
"""Should add multiple tasks to the agenda."""
|
|
38
|
+
agenda.add(the_task)("arg1")
|
|
39
|
+
agenda.add(another_task)("arg2", key="value")
|
|
40
|
+
agenda.add(the_task)("arg3")
|
|
41
|
+
|
|
42
|
+
assert len(agenda) == 3
|
|
43
|
+
tasks = list(agenda)
|
|
44
|
+
assert tasks[0][0] == the_task
|
|
45
|
+
assert tasks[1][0] == another_task
|
|
46
|
+
assert tasks[2][0] == the_task
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
async def test_agenda_scatter_basic(
|
|
50
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
51
|
+
):
|
|
52
|
+
"""Should scatter tasks evenly over the specified timeframe."""
|
|
53
|
+
docket.register(the_task)
|
|
54
|
+
|
|
55
|
+
# Add 3 tasks to scatter over 60 seconds
|
|
56
|
+
agenda.add(the_task)("task1")
|
|
57
|
+
agenda.add(the_task)("task2")
|
|
58
|
+
agenda.add(the_task)("task3")
|
|
59
|
+
|
|
60
|
+
start_time = datetime.now(timezone.utc)
|
|
61
|
+
executions = await agenda.scatter(docket, over=timedelta(seconds=60))
|
|
62
|
+
|
|
63
|
+
assert len(executions) == 3
|
|
64
|
+
|
|
65
|
+
# Tasks should be scheduled at 0, 30, and 60 seconds
|
|
66
|
+
expected_times = [
|
|
67
|
+
start_time,
|
|
68
|
+
start_time + timedelta(seconds=30),
|
|
69
|
+
start_time + timedelta(seconds=60),
|
|
70
|
+
]
|
|
71
|
+
|
|
72
|
+
for execution, expected_time in zip(executions, expected_times):
|
|
73
|
+
# Allow 1 second tolerance for test execution time
|
|
74
|
+
assert abs((execution.when - expected_time).total_seconds()) < 1
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
async def test_agenda_scatter_with_start_time(
|
|
78
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
79
|
+
):
|
|
80
|
+
"""Should scatter tasks starting from a future time."""
|
|
81
|
+
docket.register(the_task)
|
|
82
|
+
|
|
83
|
+
agenda.add(the_task)("task1")
|
|
84
|
+
agenda.add(the_task)("task2")
|
|
85
|
+
|
|
86
|
+
start_time = datetime.now(timezone.utc) + timedelta(minutes=10)
|
|
87
|
+
executions = await agenda.scatter(
|
|
88
|
+
docket, start=start_time, over=timedelta(minutes=20)
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
assert len(executions) == 2
|
|
92
|
+
|
|
93
|
+
# Tasks should be scheduled at start and start+20min
|
|
94
|
+
assert abs((executions[0].when - start_time).total_seconds()) < 1
|
|
95
|
+
assert (
|
|
96
|
+
abs((executions[1].when - (start_time + timedelta(minutes=20))).total_seconds())
|
|
97
|
+
< 1
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
async def test_agenda_scatter_with_jitter(
|
|
102
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
103
|
+
):
|
|
104
|
+
"""Should add random jitter to scheduled times."""
|
|
105
|
+
docket.register(the_task)
|
|
106
|
+
|
|
107
|
+
# Add many tasks to verify jitter is applied
|
|
108
|
+
for i in range(5):
|
|
109
|
+
agenda.add(the_task)(f"task{i}")
|
|
110
|
+
|
|
111
|
+
start_time = datetime.now(timezone.utc)
|
|
112
|
+
executions = await agenda.scatter(
|
|
113
|
+
docket, over=timedelta(minutes=10), jitter=timedelta(seconds=30)
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
assert len(executions) == 5
|
|
117
|
+
|
|
118
|
+
# Calculate expected base times (without jitter)
|
|
119
|
+
base_times = [start_time + timedelta(minutes=i * 2.5) for i in range(5)]
|
|
120
|
+
|
|
121
|
+
# Each task should be within ±30 seconds of its base time
|
|
122
|
+
for execution, base_time in zip(executions, base_times):
|
|
123
|
+
diff = abs((execution.when - base_time).total_seconds())
|
|
124
|
+
assert diff <= 30
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
async def test_agenda_scatter_with_large_jitter(
|
|
128
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
129
|
+
):
|
|
130
|
+
"""Should ensure jittered times never go before start even with large jitter."""
|
|
131
|
+
docket.register(the_task)
|
|
132
|
+
|
|
133
|
+
# Add tasks that will be scheduled close to start
|
|
134
|
+
for i in range(3):
|
|
135
|
+
agenda.add(the_task)(f"task{i}")
|
|
136
|
+
|
|
137
|
+
start_time = datetime.now(timezone.utc)
|
|
138
|
+
|
|
139
|
+
# Use a very large jitter (5 minutes) on a short window (1 minute)
|
|
140
|
+
# This could potentially push times before start without our safety check
|
|
141
|
+
executions = await agenda.scatter(
|
|
142
|
+
docket, start=start_time, over=timedelta(minutes=1), jitter=timedelta(minutes=5)
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
assert len(executions) == 3
|
|
146
|
+
|
|
147
|
+
# All scheduled times should be at or after start_time
|
|
148
|
+
for execution in executions:
|
|
149
|
+
assert execution.when >= start_time, (
|
|
150
|
+
f"Task scheduled at {execution.when} is before start {start_time}"
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
async def test_agenda_scatter_single_task(
|
|
155
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
156
|
+
):
|
|
157
|
+
"""Should handle scattering a single task."""
|
|
158
|
+
docket.register(the_task)
|
|
159
|
+
|
|
160
|
+
agenda.add(the_task)("single")
|
|
161
|
+
|
|
162
|
+
start_time = datetime.now(timezone.utc)
|
|
163
|
+
executions = await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
164
|
+
|
|
165
|
+
assert len(executions) == 1
|
|
166
|
+
# Single task should be scheduled in the middle of the window
|
|
167
|
+
expected_time = start_time + timedelta(minutes=5)
|
|
168
|
+
assert abs((executions[0].when - expected_time).total_seconds()) < 1
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
async def test_agenda_scatter_empty(docket: Docket, agenda: Agenda):
|
|
172
|
+
"""Should handle scattering an empty agenda."""
|
|
173
|
+
executions = await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
174
|
+
assert executions == []
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
async def test_agenda_scatter_heterogeneous_tasks(
|
|
178
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock, another_task: AsyncMock
|
|
179
|
+
):
|
|
180
|
+
"""Should scatter different types of tasks."""
|
|
181
|
+
docket.register(the_task)
|
|
182
|
+
docket.register(another_task)
|
|
183
|
+
|
|
184
|
+
agenda.add(the_task)("task1", key="value1")
|
|
185
|
+
agenda.add(another_task)(42, flag=True)
|
|
186
|
+
agenda.add(the_task)("task2")
|
|
187
|
+
agenda.add(another_task)(99)
|
|
188
|
+
|
|
189
|
+
executions = await agenda.scatter(docket, over=timedelta(seconds=90))
|
|
190
|
+
|
|
191
|
+
assert len(executions) == 4
|
|
192
|
+
|
|
193
|
+
# Verify task types are preserved
|
|
194
|
+
assert executions[0].function == the_task
|
|
195
|
+
assert executions[1].function == another_task
|
|
196
|
+
assert executions[2].function == the_task
|
|
197
|
+
assert executions[3].function == another_task
|
|
198
|
+
|
|
199
|
+
# Verify arguments are preserved
|
|
200
|
+
assert executions[0].args == ("task1",)
|
|
201
|
+
assert executions[0].kwargs == {"key": "value1"}
|
|
202
|
+
assert executions[1].args == (42,)
|
|
203
|
+
assert executions[1].kwargs == {"flag": True}
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
async def test_agenda_scatter_preserves_order(
|
|
207
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
208
|
+
):
|
|
209
|
+
"""Should preserve task order when scattering."""
|
|
210
|
+
docket.register(the_task)
|
|
211
|
+
|
|
212
|
+
for i in range(10):
|
|
213
|
+
agenda.add(the_task)(f"task{i}")
|
|
214
|
+
|
|
215
|
+
executions = await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
216
|
+
|
|
217
|
+
assert len(executions) == 10
|
|
218
|
+
|
|
219
|
+
# Tasks should be scheduled in the same order they were added
|
|
220
|
+
for i, execution in enumerate(executions):
|
|
221
|
+
assert execution.args == (f"task{i}",)
|
|
222
|
+
|
|
223
|
+
# And times should be monotonically increasing
|
|
224
|
+
for i in range(1, len(executions)):
|
|
225
|
+
assert executions[i].when >= executions[i - 1].when
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
async def test_agenda_reusability(docket: Docket, agenda: Agenda, the_task: AsyncMock):
|
|
229
|
+
"""Agenda should be reusable for multiple scatter operations."""
|
|
230
|
+
docket.register(the_task)
|
|
231
|
+
|
|
232
|
+
agenda.add(the_task)("task1")
|
|
233
|
+
agenda.add(the_task)("task2")
|
|
234
|
+
|
|
235
|
+
# First scatter
|
|
236
|
+
executions1 = await agenda.scatter(docket, over=timedelta(seconds=60))
|
|
237
|
+
assert len(executions1) == 2
|
|
238
|
+
|
|
239
|
+
# Second scatter with different timing
|
|
240
|
+
start_time = datetime.now(timezone.utc) + timedelta(hours=1)
|
|
241
|
+
executions2 = await agenda.scatter(
|
|
242
|
+
docket, start=start_time, over=timedelta(minutes=30)
|
|
243
|
+
)
|
|
244
|
+
assert len(executions2) == 2
|
|
245
|
+
|
|
246
|
+
# Executions should be different instances with different times
|
|
247
|
+
assert executions1[0].when != executions2[0].when
|
|
248
|
+
assert executions1[1].when != executions2[1].when
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
async def test_agenda_scatter_requires_over_parameter(
|
|
252
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
253
|
+
):
|
|
254
|
+
"""Should raise error if 'over' parameter is not provided."""
|
|
255
|
+
docket.register(the_task)
|
|
256
|
+
|
|
257
|
+
agenda.add(the_task)("task1")
|
|
258
|
+
agenda.add(the_task)("task2")
|
|
259
|
+
|
|
260
|
+
with pytest.raises(
|
|
261
|
+
TypeError, match="missing 1 required positional argument: 'over'"
|
|
262
|
+
):
|
|
263
|
+
await agenda.scatter(docket) # type: ignore[call-arg]
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
async def test_agenda_scatter_with_task_by_name(
|
|
267
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
268
|
+
):
|
|
269
|
+
"""Should support adding tasks by name."""
|
|
270
|
+
docket.register(the_task)
|
|
271
|
+
|
|
272
|
+
# Add task by its registered name
|
|
273
|
+
agenda.add("the_task")("arg1", key="value")
|
|
274
|
+
|
|
275
|
+
executions = await agenda.scatter(docket, over=timedelta(seconds=60))
|
|
276
|
+
|
|
277
|
+
assert len(executions) == 1
|
|
278
|
+
assert executions[0].function == the_task
|
|
279
|
+
assert executions[0].args == ("arg1",)
|
|
280
|
+
assert executions[0].kwargs == {"key": "value"}
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
async def test_agenda_scatter_with_non_positive_over_parameter(
|
|
284
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
285
|
+
):
|
|
286
|
+
"""Should raise ValueError if 'over' parameter is not positive."""
|
|
287
|
+
docket.register(the_task)
|
|
288
|
+
|
|
289
|
+
agenda.add(the_task)("task1")
|
|
290
|
+
agenda.add(the_task)("task2")
|
|
291
|
+
|
|
292
|
+
# Test with zero duration
|
|
293
|
+
with pytest.raises(
|
|
294
|
+
ValueError, match="'over' parameter must be a positive duration"
|
|
295
|
+
):
|
|
296
|
+
await agenda.scatter(docket, over=timedelta(seconds=0))
|
|
297
|
+
|
|
298
|
+
# Test with negative duration
|
|
299
|
+
with pytest.raises(
|
|
300
|
+
ValueError, match="'over' parameter must be a positive duration"
|
|
301
|
+
):
|
|
302
|
+
await agenda.scatter(docket, over=timedelta(seconds=-60))
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
async def test_agenda_scatter_partial_scheduling_behavior(
|
|
306
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock, another_task: AsyncMock
|
|
307
|
+
):
|
|
308
|
+
"""Documents the partial scheduling behavior when failures occur."""
|
|
309
|
+
docket.register(the_task)
|
|
310
|
+
# Don't register another_task initially
|
|
311
|
+
|
|
312
|
+
# Test validation failure - unregistered task fails fast before any scheduling
|
|
313
|
+
agenda.add(the_task)("task1")
|
|
314
|
+
agenda.add(the_task)("task2")
|
|
315
|
+
agenda.add("unregistered_task")("will_fail") # This will fail validation
|
|
316
|
+
agenda.add(the_task)("task3")
|
|
317
|
+
|
|
318
|
+
# The scatter should fail during validation before scheduling anything
|
|
319
|
+
with pytest.raises(KeyError, match="Task 'unregistered_task' is not registered"):
|
|
320
|
+
await agenda.scatter(docket, over=timedelta(seconds=60))
|
|
321
|
+
|
|
322
|
+
# Verify no tasks were scheduled (failed during validation)
|
|
323
|
+
snapshot = await docket.snapshot()
|
|
324
|
+
assert len(snapshot.future) == 0
|
|
325
|
+
|
|
326
|
+
# Test successful case with all registered tasks
|
|
327
|
+
agenda2 = Agenda()
|
|
328
|
+
docket.register(another_task)
|
|
329
|
+
|
|
330
|
+
agenda2.add(the_task)("task1")
|
|
331
|
+
agenda2.add(the_task)("task2")
|
|
332
|
+
agenda2.add(another_task)("task3")
|
|
333
|
+
agenda2.add(the_task)("task4")
|
|
334
|
+
|
|
335
|
+
# All tasks should be scheduled successfully
|
|
336
|
+
executions = await agenda2.scatter(docket, over=timedelta(seconds=60))
|
|
337
|
+
assert len(executions) == 4
|
|
338
|
+
|
|
339
|
+
# Verify all tasks are in the docket
|
|
340
|
+
snapshot = await docket.snapshot()
|
|
341
|
+
assert len(snapshot.future) == 4
|
|
342
|
+
|
|
343
|
+
# Clear for next test
|
|
344
|
+
await docket.clear()
|
|
345
|
+
|
|
346
|
+
# Test partial failure during scheduling - earlier tasks remain scheduled
|
|
347
|
+
agenda3 = Agenda()
|
|
348
|
+
agenda3.add(the_task)("task1")
|
|
349
|
+
agenda3.add(the_task)("task2")
|
|
350
|
+
agenda3.add(the_task)("task3")
|
|
351
|
+
|
|
352
|
+
call_count = 0
|
|
353
|
+
original_add = docket.add
|
|
354
|
+
|
|
355
|
+
def failing_add(*args: Any, **kwargs: Any) -> Any:
|
|
356
|
+
nonlocal call_count
|
|
357
|
+
call_count += 1
|
|
358
|
+
if call_count == 2:
|
|
359
|
+
# Fail on the second task
|
|
360
|
+
raise RuntimeError("Simulated scheduling failure")
|
|
361
|
+
return original_add(*args, **kwargs)
|
|
362
|
+
|
|
363
|
+
with patch.object(docket, "add", side_effect=failing_add):
|
|
364
|
+
with pytest.raises(RuntimeError, match="Simulated scheduling failure"):
|
|
365
|
+
await agenda3.scatter(docket, over=timedelta(seconds=60))
|
|
366
|
+
|
|
367
|
+
# The first task should have been scheduled successfully before the failure
|
|
368
|
+
snapshot = await docket.snapshot()
|
|
369
|
+
assert len(snapshot.future) == 1 # First task was scheduled
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
async def test_agenda_scatter_auto_registers_unregistered_functions(
|
|
373
|
+
docket: Docket, agenda: Agenda, the_task: AsyncMock
|
|
374
|
+
):
|
|
375
|
+
"""Should automatically register task functions that aren't already registered."""
|
|
376
|
+
# the_task is NOT registered yet
|
|
377
|
+
assert the_task not in docket.tasks.values()
|
|
378
|
+
|
|
379
|
+
agenda.add(the_task)("task1")
|
|
380
|
+
agenda.add(the_task)("task2")
|
|
381
|
+
|
|
382
|
+
# scatter should auto-register the task
|
|
383
|
+
executions = await agenda.scatter(docket, over=timedelta(seconds=30))
|
|
384
|
+
assert len(executions) == 2
|
|
385
|
+
|
|
386
|
+
# Now the task should be registered
|
|
387
|
+
assert the_task in docket.tasks.values()
|
|
388
|
+
|
|
389
|
+
# Verify tasks were scheduled
|
|
390
|
+
snapshot = await docket.snapshot()
|
|
391
|
+
assert len(snapshot.future) == 2
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
async def test_agenda_clear(agenda: Agenda, the_task: AsyncMock):
|
|
395
|
+
"""Should support clearing all tasks from agenda."""
|
|
396
|
+
agenda.add(the_task)("task1")
|
|
397
|
+
agenda.add(the_task)("task2")
|
|
398
|
+
|
|
399
|
+
assert len(agenda) == 2
|
|
400
|
+
|
|
401
|
+
agenda.clear()
|
|
402
|
+
|
|
403
|
+
assert len(agenda) == 0
|
|
404
|
+
assert list(agenda) == []
|
|
@@ -13,7 +13,7 @@ from opentelemetry.sdk.trace import Span, TracerProvider
|
|
|
13
13
|
from opentelemetry.trace import StatusCode
|
|
14
14
|
|
|
15
15
|
from docket import Docket, Worker
|
|
16
|
-
from docket.dependencies import Retry
|
|
16
|
+
from docket.dependencies import Perpetual, Retry
|
|
17
17
|
from docket.instrumentation import (
|
|
18
18
|
healthcheck_server,
|
|
19
19
|
message_getter,
|
|
@@ -376,6 +376,14 @@ def TASKS_RETRIED(monkeypatch: pytest.MonkeyPatch) -> Mock:
|
|
|
376
376
|
return mock
|
|
377
377
|
|
|
378
378
|
|
|
379
|
+
@pytest.fixture
|
|
380
|
+
def TASKS_PERPETUATED(monkeypatch: pytest.MonkeyPatch) -> Mock:
|
|
381
|
+
"""Mock for the TASKS_PERPETUATED counter."""
|
|
382
|
+
mock = Mock(spec=Counter.add)
|
|
383
|
+
monkeypatch.setattr("docket.instrumentation.TASKS_PERPETUATED.add", mock)
|
|
384
|
+
return mock
|
|
385
|
+
|
|
386
|
+
|
|
379
387
|
@pytest.fixture
|
|
380
388
|
def TASKS_REDELIVERED(monkeypatch: pytest.MonkeyPatch) -> Mock:
|
|
381
389
|
"""Mock for the TASKS_REDELIVERED counter."""
|
|
@@ -492,6 +500,58 @@ async def test_exhausted_retried_task_increments_retry_counter(
|
|
|
492
500
|
TASKS_REDELIVERED.assert_not_called()
|
|
493
501
|
|
|
494
502
|
|
|
503
|
+
async def test_retried_task_metric_uses_bounded_labels(
|
|
504
|
+
docket: Docket,
|
|
505
|
+
worker: Worker,
|
|
506
|
+
worker_labels: dict[str, str],
|
|
507
|
+
TASKS_RETRIED: Mock,
|
|
508
|
+
):
|
|
509
|
+
"""TASKS_RETRIED should only use bounded-cardinality labels (not task keys)."""
|
|
510
|
+
|
|
511
|
+
async def the_task(retry: Retry = Retry(attempts=2)):
|
|
512
|
+
raise ValueError("Always fails")
|
|
513
|
+
|
|
514
|
+
await docket.add(the_task)()
|
|
515
|
+
await worker.run_until_finished()
|
|
516
|
+
|
|
517
|
+
assert TASKS_RETRIED.call_count == 1
|
|
518
|
+
call_labels = TASKS_RETRIED.call_args.args[1]
|
|
519
|
+
|
|
520
|
+
assert "docket.name" in call_labels
|
|
521
|
+
assert "docket.worker" in call_labels
|
|
522
|
+
assert "docket.task" in call_labels
|
|
523
|
+
assert "docket.key" not in call_labels
|
|
524
|
+
assert "docket.when" not in call_labels
|
|
525
|
+
assert "docket.attempt" not in call_labels
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
async def test_perpetuated_task_metric_uses_bounded_labels(
|
|
529
|
+
docket: Docket,
|
|
530
|
+
worker: Worker,
|
|
531
|
+
worker_labels: dict[str, str],
|
|
532
|
+
TASKS_PERPETUATED: Mock,
|
|
533
|
+
):
|
|
534
|
+
"""TASKS_PERPETUATED should only use bounded-cardinality labels (not task keys)."""
|
|
535
|
+
|
|
536
|
+
async def the_task(
|
|
537
|
+
perpetual: Perpetual = Perpetual(every=timedelta(milliseconds=50)),
|
|
538
|
+
):
|
|
539
|
+
pass
|
|
540
|
+
|
|
541
|
+
execution = await docket.add(the_task)()
|
|
542
|
+
await worker.run_at_most({execution.key: 2})
|
|
543
|
+
|
|
544
|
+
assert TASKS_PERPETUATED.call_count >= 1
|
|
545
|
+
call_labels = TASKS_PERPETUATED.call_args.args[1]
|
|
546
|
+
|
|
547
|
+
assert "docket.name" in call_labels
|
|
548
|
+
assert "docket.worker" in call_labels
|
|
549
|
+
assert "docket.task" in call_labels
|
|
550
|
+
assert "docket.key" not in call_labels
|
|
551
|
+
assert "docket.when" not in call_labels
|
|
552
|
+
assert "docket.attempt" not in call_labels
|
|
553
|
+
|
|
554
|
+
|
|
495
555
|
async def test_redelivered_tasks_increment_redelivered_counter(
|
|
496
556
|
docket: Docket,
|
|
497
557
|
worker_labels: dict[str, str],
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|