pydocket 0.10.0__tar.gz → 0.11.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydocket-0.11.1/.github/workflows/claude-code-review.yml +40 -0
- pydocket-0.11.1/.github/workflows/claude.yml +42 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/PKG-INFO +1 -1
- {pydocket-0.10.0 → pydocket-0.11.1}/docs/advanced-patterns.md +132 -0
- pydocket-0.11.1/examples/agenda_scatter.py +128 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/__init__.py +2 -0
- pydocket-0.11.1/src/docket/agenda.py +201 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/docket.py +161 -36
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/execution.py +3 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/instrumentation.py +6 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/worker.py +18 -6
- pydocket-0.11.1/tests/test_agenda.py +404 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_fundamentals.py +25 -8
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_instrumentation.py +111 -1
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_worker.py +342 -13
- {pydocket-0.10.0 → pydocket-0.11.1}/.cursor/rules/general.mdc +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/.cursor/rules/python-style.mdc +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/.github/codecov.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/.github/workflows/ci.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/.github/workflows/docs.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/.github/workflows/publish.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/.gitignore +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/.pre-commit-config.yaml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/CLAUDE.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/LICENSE +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/README.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/chaos/README.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/chaos/__init__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/chaos/driver.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/chaos/producer.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/chaos/run +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/chaos/tasks.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/docs/api-reference.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/docs/dependencies.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/docs/getting-started.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/docs/index.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/docs/production.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/docs/testing.md +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/examples/__init__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/examples/common.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/examples/concurrency_control.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/examples/find_and_flood.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/examples/self_perpetuating.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/mkdocs.yml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/pyproject.toml +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/__main__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/annotations.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/cli.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/dependencies.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/py.typed +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/src/docket/tasks.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/telemetry/.gitignore +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/telemetry/start +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/telemetry/stop +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/__init__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/__init__.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/conftest.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/test_clear.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/test_module.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/test_parsing.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/test_snapshot.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/test_striking.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/test_tasks.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/test_version.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/test_worker.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/cli/test_workers.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/conftest.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_concurrency_basic.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_concurrency_control.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_concurrency_refresh.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_dependencies.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_docket.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_execution.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/tests/test_striking.py +0 -0
- {pydocket-0.10.0 → pydocket-0.11.1}/uv.lock +0 -0
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
name: Claude Code Review
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
pull_request:
|
|
5
|
+
types: [opened, synchronize]
|
|
6
|
+
|
|
7
|
+
jobs:
|
|
8
|
+
claude-review:
|
|
9
|
+
runs-on: ubuntu-latest
|
|
10
|
+
permissions:
|
|
11
|
+
contents: read
|
|
12
|
+
pull-requests: read
|
|
13
|
+
issues: read
|
|
14
|
+
id-token: write
|
|
15
|
+
|
|
16
|
+
steps:
|
|
17
|
+
- name: Checkout repository
|
|
18
|
+
uses: actions/checkout@v4
|
|
19
|
+
with:
|
|
20
|
+
fetch-depth: 1
|
|
21
|
+
|
|
22
|
+
- name: Run Claude Code Review
|
|
23
|
+
id: claude-review
|
|
24
|
+
uses: anthropics/claude-code-action@beta
|
|
25
|
+
with:
|
|
26
|
+
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
|
27
|
+
model: "claude-opus-4-1-20250805"
|
|
28
|
+
|
|
29
|
+
# Direct prompt for automated review (no @claude mention needed)
|
|
30
|
+
direct_prompt: |
|
|
31
|
+
Please review this pull request and provide feedback on:
|
|
32
|
+
- Code quality and best practices
|
|
33
|
+
- Potential bugs or issues
|
|
34
|
+
- Performance considerations
|
|
35
|
+
- Security concerns
|
|
36
|
+
- Test coverage, which must be maintained at 100% for this project
|
|
37
|
+
|
|
38
|
+
Be constructive and helpful in your feedback.
|
|
39
|
+
|
|
40
|
+
use_sticky_comment: true
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
name: Claude Code
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
issue_comment:
|
|
5
|
+
types: [created]
|
|
6
|
+
pull_request_review_comment:
|
|
7
|
+
types: [created]
|
|
8
|
+
issues:
|
|
9
|
+
types: [opened, assigned]
|
|
10
|
+
pull_request_review:
|
|
11
|
+
types: [submitted]
|
|
12
|
+
|
|
13
|
+
jobs:
|
|
14
|
+
claude:
|
|
15
|
+
if: |
|
|
16
|
+
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
|
17
|
+
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
|
18
|
+
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
|
19
|
+
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
|
20
|
+
runs-on: ubuntu-latest
|
|
21
|
+
permissions:
|
|
22
|
+
contents: read
|
|
23
|
+
pull-requests: read
|
|
24
|
+
issues: read
|
|
25
|
+
id-token: write
|
|
26
|
+
actions: read # Required for Claude to read CI results on PRs
|
|
27
|
+
steps:
|
|
28
|
+
- name: Checkout repository
|
|
29
|
+
uses: actions/checkout@v4
|
|
30
|
+
with:
|
|
31
|
+
fetch-depth: 1
|
|
32
|
+
|
|
33
|
+
- name: Run Claude Code
|
|
34
|
+
id: claude
|
|
35
|
+
uses: anthropics/claude-code-action@beta
|
|
36
|
+
with:
|
|
37
|
+
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
|
38
|
+
|
|
39
|
+
additional_permissions: |
|
|
40
|
+
actions: read
|
|
41
|
+
|
|
42
|
+
model: "claude-opus-4-1-20250805"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.11.1
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -140,6 +140,138 @@ async def process_single_order(order_id: int) -> None:
|
|
|
140
140
|
|
|
141
141
|
This pattern separates discovery (finding work) from execution (doing work), allowing for better load distribution and fault isolation. The perpetual task stays lightweight and fast, while the actual work is distributed across many workers.
|
|
142
142
|
|
|
143
|
+
## Task Scattering with Agenda
|
|
144
|
+
|
|
145
|
+
For "find-and-flood" workloads, you often want to distribute a batch of tasks over time rather than scheduling them all immediately. The `Agenda` class collects related tasks and scatters them evenly across a time window.
|
|
146
|
+
|
|
147
|
+
### Basic Scattering
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
from datetime import timedelta
|
|
151
|
+
from docket import Agenda, Docket
|
|
152
|
+
|
|
153
|
+
async def process_item(item_id: int) -> None:
|
|
154
|
+
await perform_expensive_operation(item_id)
|
|
155
|
+
await update_database(item_id)
|
|
156
|
+
|
|
157
|
+
async with Docket() as docket:
|
|
158
|
+
# Build an agenda of tasks
|
|
159
|
+
agenda = Agenda()
|
|
160
|
+
for item_id in range(1, 101): # 100 items to process
|
|
161
|
+
agenda.add(process_item)(item_id)
|
|
162
|
+
|
|
163
|
+
# Scatter them evenly over 50 minutes to avoid overwhelming the system
|
|
164
|
+
executions = await agenda.scatter(docket, over=timedelta(minutes=50))
|
|
165
|
+
print(f"Scheduled {len(executions)} tasks over 50 minutes")
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
Tasks are distributed evenly across the time window. For 100 tasks over 50 minutes, they'll be scheduled approximately 30 seconds apart.
|
|
169
|
+
|
|
170
|
+
### Jitter for Thundering Herd Prevention
|
|
171
|
+
|
|
172
|
+
Add random jitter to prevent multiple processes from scheduling identical work at exactly the same times:
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
# Scatter with ±30 second jitter around each scheduled time
|
|
176
|
+
await agenda.scatter(
|
|
177
|
+
docket,
|
|
178
|
+
over=timedelta(minutes=50),
|
|
179
|
+
jitter=timedelta(seconds=30)
|
|
180
|
+
)
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
### Future Scatter Windows
|
|
184
|
+
|
|
185
|
+
Schedule the entire batch to start at a specific time in the future:
|
|
186
|
+
|
|
187
|
+
```python
|
|
188
|
+
from datetime import datetime, timezone
|
|
189
|
+
|
|
190
|
+
# Start scattering in 2 hours, spread over 30 minutes
|
|
191
|
+
start_time = datetime.now(timezone.utc) + timedelta(hours=2)
|
|
192
|
+
await agenda.scatter(
|
|
193
|
+
docket,
|
|
194
|
+
start=start_time,
|
|
195
|
+
over=timedelta(minutes=30)
|
|
196
|
+
)
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
### Mixed Task Types
|
|
200
|
+
|
|
201
|
+
Agendas can contain different types of tasks:
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
async def send_email(user_id: str, template: str) -> None:
|
|
205
|
+
await email_service.send(user_id, template)
|
|
206
|
+
|
|
207
|
+
async def update_analytics(event_data: dict[str, str]) -> None:
|
|
208
|
+
await analytics_service.track(event_data)
|
|
209
|
+
|
|
210
|
+
# Create a mixed agenda
|
|
211
|
+
agenda = Agenda()
|
|
212
|
+
agenda.add(process_item)(item_id=1001)
|
|
213
|
+
agenda.add(send_email)("user123", "welcome")
|
|
214
|
+
agenda.add(update_analytics)({"event": "signup", "user": "user123"})
|
|
215
|
+
agenda.add(process_item)(item_id=1002)
|
|
216
|
+
|
|
217
|
+
# All tasks will be scattered in the order they were added
|
|
218
|
+
await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
### Single Task Positioning
|
|
222
|
+
|
|
223
|
+
When scattering a single task, it's positioned at the midpoint of the time window:
|
|
224
|
+
|
|
225
|
+
```python
|
|
226
|
+
agenda = Agenda()
|
|
227
|
+
agenda.add(process_item)(item_id=42)
|
|
228
|
+
|
|
229
|
+
# This task will be scheduled 5 minutes from now (middle of 10-minute window)
|
|
230
|
+
await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
### Agenda Reusability
|
|
234
|
+
|
|
235
|
+
Agendas can be reused for multiple scatter operations:
|
|
236
|
+
|
|
237
|
+
```python
|
|
238
|
+
# Create a reusable template
|
|
239
|
+
daily_cleanup_agenda = Agenda()
|
|
240
|
+
daily_cleanup_agenda.add(cleanup_temp_files)()
|
|
241
|
+
daily_cleanup_agenda.add(compress_old_logs)()
|
|
242
|
+
daily_cleanup_agenda.add(update_metrics)()
|
|
243
|
+
|
|
244
|
+
# Use it multiple times with different timing
|
|
245
|
+
await daily_cleanup_agenda.scatter(docket, over=timedelta(hours=1))
|
|
246
|
+
|
|
247
|
+
# Later, scatter the same tasks over a different window
|
|
248
|
+
tomorrow = datetime.now(timezone.utc) + timedelta(days=1)
|
|
249
|
+
await daily_cleanup_agenda.scatter(
|
|
250
|
+
docket,
|
|
251
|
+
start=tomorrow,
|
|
252
|
+
over=timedelta(minutes=30)
|
|
253
|
+
)
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
### Failure Behavior
|
|
257
|
+
|
|
258
|
+
Keep in mind that, if an error occurs during scheduling, some tasks may have already been scheduled successfully:
|
|
259
|
+
|
|
260
|
+
```python
|
|
261
|
+
agenda = Agenda()
|
|
262
|
+
agenda.add(valid_task)("arg1")
|
|
263
|
+
agenda.add(valid_task)("arg2")
|
|
264
|
+
agenda.add("nonexistent_task")("arg3") # This will cause an error
|
|
265
|
+
agenda.add(valid_task)("arg4")
|
|
266
|
+
|
|
267
|
+
try:
|
|
268
|
+
await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
269
|
+
except KeyError:
|
|
270
|
+
# The first two tasks were scheduled successfully
|
|
271
|
+
# The error prevented the fourth task from being scheduled
|
|
272
|
+
pass
|
|
273
|
+
```
|
|
274
|
+
|
|
143
275
|
## Striking and Restoring Tasks
|
|
144
276
|
|
|
145
277
|
Striking allows you to temporarily disable tasks without redeploying code. This is invaluable for incident response, gradual rollouts, or handling problematic customers.
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
"""
|
|
3
|
+
Example demonstrating the Agenda scatter functionality for rate-limited workloads.
|
|
4
|
+
|
|
5
|
+
This example shows a real-world scenario: sending bulk notifications while respecting
|
|
6
|
+
rate limits to avoid overwhelming your notification service or triggering spam filters.
|
|
7
|
+
|
|
8
|
+
Without scatter: All 26 notifications would try to send immediately, potentially:
|
|
9
|
+
- Overwhelming your notification service
|
|
10
|
+
- Triggering rate limits or spam detection
|
|
11
|
+
- Creating a poor user experience with delayed/failed sends
|
|
12
|
+
|
|
13
|
+
With scatter: Notifications are distributed evenly over time, respecting limits.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import logging
|
|
18
|
+
from datetime import datetime, timedelta, timezone
|
|
19
|
+
|
|
20
|
+
from docket import Agenda, CurrentExecution, Docket, Execution, Worker
|
|
21
|
+
|
|
22
|
+
logging.basicConfig(
|
|
23
|
+
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
|
24
|
+
)
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
async def send_notification(
|
|
29
|
+
user: str, message: str, execution: Execution = CurrentExecution()
|
|
30
|
+
) -> None:
|
|
31
|
+
"""Send a notification to a user."""
|
|
32
|
+
delay = (execution.when - datetime.now(timezone.utc)).total_seconds()
|
|
33
|
+
if delay > 0.1:
|
|
34
|
+
logger.info(f"📅 Notification for {user} scheduled {delay:.1f}s from now")
|
|
35
|
+
else:
|
|
36
|
+
logger.info(f"📧 Sending to {user}: '{message}'")
|
|
37
|
+
# Simulate API call to notification service
|
|
38
|
+
await asyncio.sleep(0.2)
|
|
39
|
+
logger.info(f"✓ Delivered to {user}")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
async def main() -> None:
|
|
43
|
+
"""Demonstrate scatter for rate-limited notification sending."""
|
|
44
|
+
|
|
45
|
+
async with Docket(name="notification-scatter") as docket:
|
|
46
|
+
docket.register(send_notification)
|
|
47
|
+
|
|
48
|
+
logger.info("=== Bulk Notification Campaign ===")
|
|
49
|
+
logger.info("Scenario: Alert 26 users about a flash sale")
|
|
50
|
+
logger.info("Constraint: Notification service allows max 30 messages/minute")
|
|
51
|
+
logger.info("Strategy: Scatter over 60 seconds (~1 message every 2.3 seconds)")
|
|
52
|
+
logger.info("")
|
|
53
|
+
|
|
54
|
+
# Build the list of users to notify (e.g., from a database query)
|
|
55
|
+
users = [
|
|
56
|
+
"alice@example.com",
|
|
57
|
+
"bob@example.com",
|
|
58
|
+
"charlie@example.com",
|
|
59
|
+
"diana@example.com",
|
|
60
|
+
"eve@example.com",
|
|
61
|
+
"frank@example.com",
|
|
62
|
+
"grace@example.com",
|
|
63
|
+
"henry@example.com",
|
|
64
|
+
"iris@example.com",
|
|
65
|
+
"jack@example.com",
|
|
66
|
+
"kate@example.com",
|
|
67
|
+
"liam@example.com",
|
|
68
|
+
"maya@example.com",
|
|
69
|
+
"noah@example.com",
|
|
70
|
+
"olivia@example.com",
|
|
71
|
+
"peter@example.com",
|
|
72
|
+
"quinn@example.com",
|
|
73
|
+
"ruby@example.com",
|
|
74
|
+
"sam@example.com",
|
|
75
|
+
"tara@example.com",
|
|
76
|
+
"uma@example.com",
|
|
77
|
+
"victor@example.com",
|
|
78
|
+
"wendy@example.com",
|
|
79
|
+
"xavier@example.com",
|
|
80
|
+
"yara@example.com",
|
|
81
|
+
"zoe@example.com",
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
agenda = Agenda()
|
|
85
|
+
|
|
86
|
+
# Queue all notifications
|
|
87
|
+
logger.info(f"📋 Preparing notifications for {len(users)} users...")
|
|
88
|
+
for user in users:
|
|
89
|
+
agenda.add(send_notification)(user, "Flash Sale: 50% off for next hour!")
|
|
90
|
+
|
|
91
|
+
# Scatter over 60 seconds to respect rate limit
|
|
92
|
+
logger.info("🎯 Scattering notifications over 60 seconds...")
|
|
93
|
+
logger.info("")
|
|
94
|
+
|
|
95
|
+
executions = await agenda.scatter(
|
|
96
|
+
docket,
|
|
97
|
+
over=timedelta(seconds=60),
|
|
98
|
+
jitter=timedelta(seconds=0.5), # Small jitter for natural spacing
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Show the distribution preview
|
|
102
|
+
first_three = executions[:3]
|
|
103
|
+
last_three = executions[-3:]
|
|
104
|
+
for i, exec in enumerate(first_three, 1):
|
|
105
|
+
delay = (exec.when - datetime.now(timezone.utc)).total_seconds()
|
|
106
|
+
logger.info(f" Message #{i} scheduled for +{delay:.1f}s")
|
|
107
|
+
logger.info(f" ... {len(executions) - 6} more evenly distributed ...")
|
|
108
|
+
for i, exec in enumerate(last_three, len(executions) - 2):
|
|
109
|
+
delay = (exec.when - datetime.now(timezone.utc)).total_seconds()
|
|
110
|
+
logger.info(f" Message #{i} scheduled for +{delay:.1f}s")
|
|
111
|
+
logger.info("")
|
|
112
|
+
|
|
113
|
+
# Run worker to process the scattered notifications
|
|
114
|
+
logger.info("🚀 Starting notification sender...")
|
|
115
|
+
logger.info(" Watch how notifications flow steadily, not in a flood!")
|
|
116
|
+
logger.info("")
|
|
117
|
+
|
|
118
|
+
start_time = datetime.now(timezone.utc)
|
|
119
|
+
async with Worker(docket, concurrency=2) as worker:
|
|
120
|
+
await worker.run_until_finished()
|
|
121
|
+
|
|
122
|
+
elapsed = (datetime.now(timezone.utc) - start_time).total_seconds()
|
|
123
|
+
logger.info("")
|
|
124
|
+
logger.info(f"✅ All {len(users)} notifications sent in {elapsed:.1f} seconds")
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
if __name__ == "__main__":
|
|
128
|
+
asyncio.run(main())
|
|
@@ -8,6 +8,7 @@ from importlib.metadata import version
|
|
|
8
8
|
|
|
9
9
|
__version__ = version("pydocket")
|
|
10
10
|
|
|
11
|
+
from .agenda import Agenda
|
|
11
12
|
from .annotations import Logged
|
|
12
13
|
from .dependencies import (
|
|
13
14
|
ConcurrencyLimit,
|
|
@@ -29,6 +30,7 @@ from .worker import Worker
|
|
|
29
30
|
|
|
30
31
|
__all__ = [
|
|
31
32
|
"__version__",
|
|
33
|
+
"Agenda",
|
|
32
34
|
"ConcurrencyLimit",
|
|
33
35
|
"CurrentDocket",
|
|
34
36
|
"CurrentExecution",
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agenda - A collection of tasks that can be scheduled together.
|
|
3
|
+
|
|
4
|
+
The Agenda class provides a way to collect multiple tasks and then scatter them
|
|
5
|
+
evenly over a time period to avoid overwhelming the system with immediate work.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import random
|
|
9
|
+
from datetime import datetime, timedelta, timezone
|
|
10
|
+
from typing import Any, Awaitable, Callable, Iterator, ParamSpec, TypeVar, overload
|
|
11
|
+
|
|
12
|
+
from uuid_extensions import uuid7
|
|
13
|
+
|
|
14
|
+
from .docket import Docket
|
|
15
|
+
from .execution import Execution, TaskFunction
|
|
16
|
+
|
|
17
|
+
P = ParamSpec("P")
|
|
18
|
+
R = TypeVar("R")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Agenda:
|
|
22
|
+
"""A collection of tasks to be scheduled together on a Docket.
|
|
23
|
+
|
|
24
|
+
The Agenda allows you to build up a collection of tasks with their arguments,
|
|
25
|
+
then schedule them all at once using various timing strategies like scattering.
|
|
26
|
+
|
|
27
|
+
Example:
|
|
28
|
+
>>> agenda = Agenda()
|
|
29
|
+
>>> agenda.add(process_item)(item1)
|
|
30
|
+
>>> agenda.add(process_item)(item2)
|
|
31
|
+
>>> agenda.add(send_email)(email)
|
|
32
|
+
>>> await agenda.scatter(docket, over=timedelta(minutes=50))
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self) -> None:
|
|
36
|
+
"""Initialize an empty Agenda."""
|
|
37
|
+
self._tasks: list[
|
|
38
|
+
tuple[TaskFunction | str, tuple[Any, ...], dict[str, Any]]
|
|
39
|
+
] = []
|
|
40
|
+
|
|
41
|
+
def __len__(self) -> int:
|
|
42
|
+
"""Return the number of tasks in the agenda."""
|
|
43
|
+
return len(self._tasks)
|
|
44
|
+
|
|
45
|
+
def __iter__(
|
|
46
|
+
self,
|
|
47
|
+
) -> Iterator[tuple[TaskFunction | str, tuple[Any, ...], dict[str, Any]]]:
|
|
48
|
+
"""Iterate over tasks in the agenda."""
|
|
49
|
+
return iter(self._tasks)
|
|
50
|
+
|
|
51
|
+
@overload
|
|
52
|
+
def add(
|
|
53
|
+
self,
|
|
54
|
+
function: Callable[P, Awaitable[R]],
|
|
55
|
+
) -> Callable[P, None]:
|
|
56
|
+
"""Add a task function to the agenda.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
function: The task function to add.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
A callable that accepts the task arguments.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
@overload
|
|
66
|
+
def add(
|
|
67
|
+
self,
|
|
68
|
+
function: str,
|
|
69
|
+
) -> Callable[..., None]:
|
|
70
|
+
"""Add a task by name to the agenda.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
function: The name of a registered task.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
A callable that accepts the task arguments.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def add(
|
|
80
|
+
self,
|
|
81
|
+
function: Callable[P, Awaitable[R]] | str,
|
|
82
|
+
) -> Callable[..., None]:
|
|
83
|
+
"""Add a task to the agenda.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
function: The task function or name to add.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
A callable that accepts the task arguments and adds them to the agenda.
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
def scheduler(*args: Any, **kwargs: Any) -> None:
|
|
93
|
+
self._tasks.append((function, args, kwargs))
|
|
94
|
+
|
|
95
|
+
return scheduler
|
|
96
|
+
|
|
97
|
+
def clear(self) -> None:
|
|
98
|
+
"""Clear all tasks from the agenda."""
|
|
99
|
+
self._tasks.clear()
|
|
100
|
+
|
|
101
|
+
async def scatter(
|
|
102
|
+
self,
|
|
103
|
+
docket: Docket,
|
|
104
|
+
over: timedelta,
|
|
105
|
+
start: datetime | None = None,
|
|
106
|
+
jitter: timedelta | None = None,
|
|
107
|
+
) -> list[Execution]:
|
|
108
|
+
"""Scatter the tasks in this agenda over a time period.
|
|
109
|
+
|
|
110
|
+
Tasks are distributed evenly across the specified time window,
|
|
111
|
+
optionally with random jitter to prevent thundering herd effects.
|
|
112
|
+
|
|
113
|
+
If an error occurs during scheduling, some tasks may have already been
|
|
114
|
+
scheduled successfully before the failure occurred.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
docket: The Docket to schedule tasks on.
|
|
118
|
+
over: Time period to scatter tasks over (required).
|
|
119
|
+
start: When to start scattering from. Defaults to now.
|
|
120
|
+
jitter: Maximum random offset to add/subtract from each scheduled time.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
List of Execution objects for the scheduled tasks.
|
|
124
|
+
|
|
125
|
+
Raises:
|
|
126
|
+
KeyError: If any task name is not registered with the docket.
|
|
127
|
+
ValueError: If any task is stricken or 'over' is not positive.
|
|
128
|
+
"""
|
|
129
|
+
if over.total_seconds() <= 0:
|
|
130
|
+
raise ValueError("'over' parameter must be a positive duration")
|
|
131
|
+
|
|
132
|
+
if not self._tasks:
|
|
133
|
+
return []
|
|
134
|
+
|
|
135
|
+
if start is None:
|
|
136
|
+
start = datetime.now(timezone.utc)
|
|
137
|
+
|
|
138
|
+
# Calculate even distribution over the time period
|
|
139
|
+
task_count = len(self._tasks)
|
|
140
|
+
|
|
141
|
+
if task_count == 1:
|
|
142
|
+
# Single task goes in the middle of the window
|
|
143
|
+
schedule_times = [start + over / 2]
|
|
144
|
+
else:
|
|
145
|
+
# Distribute tasks evenly across the window
|
|
146
|
+
# For n tasks, we want n points from start to start+over inclusive
|
|
147
|
+
interval = over / (task_count - 1)
|
|
148
|
+
schedule_times = [start + interval * i for i in range(task_count)]
|
|
149
|
+
|
|
150
|
+
# Apply jitter if specified
|
|
151
|
+
if jitter:
|
|
152
|
+
jittered_times: list[datetime] = []
|
|
153
|
+
for schedule_time in schedule_times:
|
|
154
|
+
# Random offset between -jitter and +jitter
|
|
155
|
+
offset = timedelta(
|
|
156
|
+
seconds=random.uniform(
|
|
157
|
+
-jitter.total_seconds(), jitter.total_seconds()
|
|
158
|
+
)
|
|
159
|
+
)
|
|
160
|
+
# Ensure the jittered time doesn't go before start
|
|
161
|
+
jittered_time = max(schedule_time + offset, start)
|
|
162
|
+
jittered_times.append(jittered_time)
|
|
163
|
+
schedule_times = jittered_times
|
|
164
|
+
|
|
165
|
+
# Build all Execution objects first, validating as we go
|
|
166
|
+
executions: list[Execution] = []
|
|
167
|
+
for (task_func, args, kwargs), schedule_time in zip(
|
|
168
|
+
self._tasks, schedule_times
|
|
169
|
+
):
|
|
170
|
+
# Resolve task function if given by name
|
|
171
|
+
if isinstance(task_func, str):
|
|
172
|
+
if task_func not in docket.tasks:
|
|
173
|
+
raise KeyError(f"Task '{task_func}' is not registered")
|
|
174
|
+
resolved_func = docket.tasks[task_func]
|
|
175
|
+
else:
|
|
176
|
+
# Ensure task is registered
|
|
177
|
+
if task_func not in docket.tasks.values():
|
|
178
|
+
docket.register(task_func)
|
|
179
|
+
resolved_func = task_func
|
|
180
|
+
|
|
181
|
+
# Create execution with unique key
|
|
182
|
+
key = str(uuid7())
|
|
183
|
+
execution = Execution(
|
|
184
|
+
function=resolved_func,
|
|
185
|
+
args=args,
|
|
186
|
+
kwargs=kwargs,
|
|
187
|
+
when=schedule_time,
|
|
188
|
+
key=key,
|
|
189
|
+
attempt=1,
|
|
190
|
+
)
|
|
191
|
+
executions.append(execution)
|
|
192
|
+
|
|
193
|
+
# Schedule all tasks - if any fail, some tasks may have been scheduled
|
|
194
|
+
for execution in executions:
|
|
195
|
+
scheduler = docket.add(
|
|
196
|
+
execution.function, when=execution.when, key=execution.key
|
|
197
|
+
)
|
|
198
|
+
# Actually schedule the task - if this fails, earlier tasks remain scheduled
|
|
199
|
+
await scheduler(*execution.args, **execution.kwargs)
|
|
200
|
+
|
|
201
|
+
return executions
|