pydocket 0.11.0__tar.gz → 0.12.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- pydocket-0.12.0/.coveragerc-memory +10 -0
- pydocket-0.12.0/.github/workflows/ci.yml +107 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/.github/workflows/claude-code-review.yml +1 -1
- {pydocket-0.11.0 → pydocket-0.12.0}/.github/workflows/claude.yml +1 -1
- {pydocket-0.11.0 → pydocket-0.12.0}/.gitignore +2 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/CLAUDE.md +1 -3
- {pydocket-0.11.0 → pydocket-0.12.0}/PKG-INFO +21 -3
- {pydocket-0.11.0 → pydocket-0.12.0}/README.md +14 -1
- {pydocket-0.11.0 → pydocket-0.12.0}/docs/advanced-patterns.md +132 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/docs/dependencies.md +135 -40
- {pydocket-0.11.0 → pydocket-0.12.0}/docs/getting-started.md +1 -1
- {pydocket-0.11.0 → pydocket-0.12.0}/docs/testing.md +70 -0
- pydocket-0.12.0/examples/agenda_scatter.py +128 -0
- pydocket-0.12.0/examples/fastapi_background_tasks.py +204 -0
- pydocket-0.12.0/examples/local_development.py +98 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/pyproject.toml +26 -4
- pydocket-0.12.0/sitecustomize.py +7 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/__init__.py +2 -0
- pydocket-0.12.0/src/docket/agenda.py +201 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/annotations.py +3 -1
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/cli.py +35 -8
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/dependencies.py +83 -22
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/docket.py +29 -4
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/execution.py +10 -11
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/instrumentation.py +8 -1
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/tasks.py +2 -2
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/worker.py +31 -21
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/cli/test_clear.py +71 -122
- pydocket-0.12.0/tests/cli/test_module.py +10 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/cli/test_parsing.py +7 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/cli/test_snapshot.py +132 -118
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/cli/test_striking.py +78 -108
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/cli/test_tasks.py +32 -45
- pydocket-0.12.0/tests/cli/test_url_validation.py +59 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/cli/test_version.py +5 -6
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/cli/test_worker.py +45 -68
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/cli/test_workers.py +27 -29
- pydocket-0.12.0/tests/cli/utils.py +53 -0
- pydocket-0.12.0/tests/conftest.py +209 -0
- pydocket-0.12.0/tests/test_agenda.py +404 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_dependencies.py +189 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_fundamentals.py +222 -4
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_instrumentation.py +61 -1
- pydocket-0.12.0/tests/test_memory_backend.py +113 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_worker.py +12 -0
- pydocket-0.12.0/uv.lock +2212 -0
- pydocket-0.11.0/.github/workflows/ci.yml +0 -65
- pydocket-0.11.0/tests/cli/conftest.py +0 -8
- pydocket-0.11.0/tests/cli/test_module.py +0 -22
- pydocket-0.11.0/tests/conftest.py +0 -180
- pydocket-0.11.0/uv.lock +0 -1444
- {pydocket-0.11.0 → pydocket-0.12.0}/.cursor/rules/general.mdc +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/.cursor/rules/python-style.mdc +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/.github/codecov.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/.github/workflows/docs.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/.github/workflows/publish.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/.pre-commit-config.yaml +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/LICENSE +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/chaos/README.md +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/chaos/__init__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/chaos/driver.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/chaos/producer.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/chaos/run +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/chaos/tasks.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/docs/api-reference.md +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/docs/index.md +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/docs/production.md +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/examples/__init__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/examples/common.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/examples/concurrency_control.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/examples/find_and_flood.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/examples/self_perpetuating.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/mkdocs.yml +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/__main__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/src/docket/py.typed +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/telemetry/.gitignore +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/telemetry/start +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/telemetry/stop +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/__init__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/cli/__init__.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_concurrency_basic.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_concurrency_control.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_concurrency_refresh.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_docket.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_execution.py +0 -0
- {pydocket-0.11.0 → pydocket-0.12.0}/tests/test_striking.py +0 -0
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
name: Docket CI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main]
|
|
6
|
+
pull_request:
|
|
7
|
+
workflow_call:
|
|
8
|
+
|
|
9
|
+
jobs:
|
|
10
|
+
test:
|
|
11
|
+
name: Test Python ${{ matrix.python-version }}, ${{ matrix.backend.name }}
|
|
12
|
+
runs-on: ubuntu-latest
|
|
13
|
+
strategy:
|
|
14
|
+
fail-fast: false
|
|
15
|
+
matrix:
|
|
16
|
+
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
|
17
|
+
backend:
|
|
18
|
+
- name: "Redis 6.2, redis-py <5"
|
|
19
|
+
redis-version: "6.2"
|
|
20
|
+
redis-py-version: ">=4.6,<5"
|
|
21
|
+
- name: "Redis 7.4, redis-py >=5"
|
|
22
|
+
redis-version: "7.4"
|
|
23
|
+
redis-py-version: ">=5"
|
|
24
|
+
- name: "Valkey 8.0, redis-py >=5"
|
|
25
|
+
redis-version: "valkey-8.0"
|
|
26
|
+
redis-py-version: ">=5"
|
|
27
|
+
- name: "Memory (in-memory backend)"
|
|
28
|
+
redis-version: "memory"
|
|
29
|
+
redis-py-version: ">=5"
|
|
30
|
+
exclude:
|
|
31
|
+
# Python 3.10 + Redis 6.2 + redis-py <5 combination is skipped
|
|
32
|
+
- python-version: "3.10"
|
|
33
|
+
backend:
|
|
34
|
+
name: "Redis 6.2, redis-py <5"
|
|
35
|
+
redis-version: "6.2"
|
|
36
|
+
redis-py-version: ">=4.6,<5"
|
|
37
|
+
include:
|
|
38
|
+
- python-version: "3.10"
|
|
39
|
+
cov-threshold: 100
|
|
40
|
+
pytest-args: ""
|
|
41
|
+
# Python 3.11 coverage reporting is unstable, so use 98% threshold
|
|
42
|
+
- python-version: "3.11"
|
|
43
|
+
cov-threshold: 98
|
|
44
|
+
pytest-args: ""
|
|
45
|
+
- python-version: "3.12"
|
|
46
|
+
cov-threshold: 100
|
|
47
|
+
pytest-args: ""
|
|
48
|
+
- python-version: "3.13"
|
|
49
|
+
cov-threshold: 100
|
|
50
|
+
pytest-args: ""
|
|
51
|
+
- python-version: "3.14"
|
|
52
|
+
cov-threshold: 100
|
|
53
|
+
pytest-args: ""
|
|
54
|
+
# Memory backend: CLI tests are skipped via pytest skip markers because
|
|
55
|
+
# CLI rejects memory:// URLs. Use separate coverage config to exclude CLI.
|
|
56
|
+
- backend:
|
|
57
|
+
name: "Memory (in-memory backend)"
|
|
58
|
+
redis-version: "memory"
|
|
59
|
+
redis-py-version: ">=5"
|
|
60
|
+
cov-threshold: 98 # CLI tests are excluded from coverage and some lines are only covered by CLI tests
|
|
61
|
+
pytest-args: "--cov-config=.coveragerc-memory"
|
|
62
|
+
|
|
63
|
+
steps:
|
|
64
|
+
- uses: actions/checkout@v4
|
|
65
|
+
|
|
66
|
+
- name: Install uv and set Python version
|
|
67
|
+
uses: astral-sh/setup-uv@v5
|
|
68
|
+
with:
|
|
69
|
+
python-version: ${{ matrix.python-version }}
|
|
70
|
+
enable-cache: true
|
|
71
|
+
cache-dependency-glob: "pyproject.toml"
|
|
72
|
+
|
|
73
|
+
- name: Install dependencies
|
|
74
|
+
run: uv sync --dev --upgrade-package 'redis${{ matrix.backend.redis-py-version }}'
|
|
75
|
+
|
|
76
|
+
- name: Run tests
|
|
77
|
+
env:
|
|
78
|
+
REDIS_VERSION: ${{ matrix.backend.redis-version }}
|
|
79
|
+
run: uv run pytest --cov-branch --cov-fail-under=${{ matrix.cov-threshold }} --cov-report=xml --cov-report=term-missing:skip-covered ${{ matrix.pytest-args }}
|
|
80
|
+
|
|
81
|
+
- name: Upload coverage reports to Codecov
|
|
82
|
+
uses: codecov/codecov-action@v5
|
|
83
|
+
with:
|
|
84
|
+
token: ${{ secrets.CODECOV_TOKEN }}
|
|
85
|
+
flags: python-${{ matrix.python-version }}
|
|
86
|
+
|
|
87
|
+
pre-commit:
|
|
88
|
+
name: Pre-commit checks
|
|
89
|
+
runs-on: ubuntu-latest
|
|
90
|
+
steps:
|
|
91
|
+
- uses: actions/checkout@v4
|
|
92
|
+
|
|
93
|
+
- name: Install uv and set Python version
|
|
94
|
+
uses: astral-sh/setup-uv@v5
|
|
95
|
+
with:
|
|
96
|
+
python-version: "3.10"
|
|
97
|
+
enable-cache: true
|
|
98
|
+
cache-dependency-glob: "pyproject.toml"
|
|
99
|
+
|
|
100
|
+
- name: Install dependencies
|
|
101
|
+
run: |
|
|
102
|
+
uv sync --dev
|
|
103
|
+
uv pip install pip
|
|
104
|
+
|
|
105
|
+
- uses: pre-commit/action@v3.0.1
|
|
106
|
+
with:
|
|
107
|
+
extra_args: --all-files
|
|
@@ -23,7 +23,7 @@ jobs:
|
|
|
23
23
|
id: claude-review
|
|
24
24
|
uses: anthropics/claude-code-action@beta
|
|
25
25
|
with:
|
|
26
|
-
|
|
26
|
+
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
|
27
27
|
model: "claude-opus-4-1-20250805"
|
|
28
28
|
|
|
29
29
|
# Direct prompt for automated review (no @claude mention needed)
|
|
@@ -6,7 +6,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
|
|
6
6
|
|
|
7
7
|
**Docket** (`pydocket` on PyPI) is a distributed background task system for Python functions with Redis-backed persistence. It enables scheduling both immediate and future work with comprehensive dependency injection, retry mechanisms, and fault tolerance.
|
|
8
8
|
|
|
9
|
-
**Key Requirements**: Python 3.
|
|
9
|
+
**Key Requirements**: Python 3.10+, Redis 6.2+ or Valkey 8.0+
|
|
10
10
|
|
|
11
11
|
## Development Commands
|
|
12
12
|
|
|
@@ -58,7 +58,6 @@ pre-commit install
|
|
|
58
58
|
### Key Classes
|
|
59
59
|
|
|
60
60
|
- **`Docket`** (`src/docket/docket.py`): Central task registry and scheduler
|
|
61
|
-
|
|
62
61
|
- `add()`: Schedule tasks for execution
|
|
63
62
|
- `replace()`: Replace existing scheduled tasks
|
|
64
63
|
- `cancel()`: Cancel pending tasks
|
|
@@ -66,7 +65,6 @@ pre-commit install
|
|
|
66
65
|
- `snapshot()`: Get current state for observability
|
|
67
66
|
|
|
68
67
|
- **`Worker`** (`src/docket/worker.py`): Task execution engine
|
|
69
|
-
|
|
70
68
|
- `run_forever()`/`run_until_finished()`: Main execution loops
|
|
71
69
|
- Handles concurrency, retries, and dependency injection
|
|
72
70
|
- Maintains heartbeat for liveness tracking
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.12.0
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -19,11 +19,15 @@ Classifier: Development Status :: 4 - Beta
|
|
|
19
19
|
Classifier: License :: OSI Approved :: MIT License
|
|
20
20
|
Classifier: Operating System :: OS Independent
|
|
21
21
|
Classifier: Programming Language :: Python :: 3
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
23
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
22
24
|
Classifier: Programming Language :: Python :: 3.12
|
|
23
25
|
Classifier: Programming Language :: Python :: 3.13
|
|
26
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
24
27
|
Classifier: Typing :: Typed
|
|
25
|
-
Requires-Python: >=3.
|
|
28
|
+
Requires-Python: >=3.10
|
|
26
29
|
Requires-Dist: cloudpickle>=3.1.1
|
|
30
|
+
Requires-Dist: exceptiongroup>=1.2.0; python_version < '3.11'
|
|
27
31
|
Requires-Dist: opentelemetry-api>=1.30.0
|
|
28
32
|
Requires-Dist: opentelemetry-exporter-prometheus>=0.51b0
|
|
29
33
|
Requires-Dist: prometheus-client>=0.21.1
|
|
@@ -31,6 +35,7 @@ Requires-Dist: python-json-logger>=3.2.1
|
|
|
31
35
|
Requires-Dist: redis>=4.6
|
|
32
36
|
Requires-Dist: rich>=13.9.4
|
|
33
37
|
Requires-Dist: typer>=0.15.1
|
|
38
|
+
Requires-Dist: typing-extensions>=4.12.0
|
|
34
39
|
Requires-Dist: uuid7>=0.1.0
|
|
35
40
|
Description-Content-Type: text/markdown
|
|
36
41
|
|
|
@@ -69,6 +74,7 @@ from docket import Docket, Worker
|
|
|
69
74
|
|
|
70
75
|
async with Docket() as docket:
|
|
71
76
|
async with Worker(docket) as worker:
|
|
77
|
+
worker.register(greet)
|
|
72
78
|
await worker.run_until_finished()
|
|
73
79
|
```
|
|
74
80
|
|
|
@@ -98,7 +104,7 @@ reference](https://chrisguidry.github.io/docket/api-reference/).
|
|
|
98
104
|
## Installing `docket`
|
|
99
105
|
|
|
100
106
|
Docket is [available on PyPI](https://pypi.org/project/pydocket/) under the package name
|
|
101
|
-
`pydocket`. It targets Python 3.
|
|
107
|
+
`pydocket`. It targets Python 3.10 or above.
|
|
102
108
|
|
|
103
109
|
With [`uv`](https://docs.astral.sh/uv/):
|
|
104
110
|
|
|
@@ -119,6 +125,18 @@ pip install pydocket
|
|
|
119
125
|
Docket requires a [Redis](http://redis.io/) server with Streams support (which was
|
|
120
126
|
introduced in Redis 5.0.0). Docket is tested with Redis 6 and 7.
|
|
121
127
|
|
|
128
|
+
For testing without Redis, Docket includes [fakeredis](https://github.com/cunla/fakeredis-py) for in-memory operation:
|
|
129
|
+
|
|
130
|
+
```python
|
|
131
|
+
from docket import Docket
|
|
132
|
+
|
|
133
|
+
async with Docket(name="my-docket", url="memory://my-docket") as docket:
|
|
134
|
+
# Use docket normally - all operations are in-memory
|
|
135
|
+
...
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
See [Testing with Docket](https://chrisguidry.github.io/docket/testing/#using-in-memory-backend-no-redis-required) for more details.
|
|
139
|
+
|
|
122
140
|
# Hacking on `docket`
|
|
123
141
|
|
|
124
142
|
We use [`uv`](https://docs.astral.sh/uv/) for project management, so getting set up
|
|
@@ -33,6 +33,7 @@ from docket import Docket, Worker
|
|
|
33
33
|
|
|
34
34
|
async with Docket() as docket:
|
|
35
35
|
async with Worker(docket) as worker:
|
|
36
|
+
worker.register(greet)
|
|
36
37
|
await worker.run_until_finished()
|
|
37
38
|
```
|
|
38
39
|
|
|
@@ -62,7 +63,7 @@ reference](https://chrisguidry.github.io/docket/api-reference/).
|
|
|
62
63
|
## Installing `docket`
|
|
63
64
|
|
|
64
65
|
Docket is [available on PyPI](https://pypi.org/project/pydocket/) under the package name
|
|
65
|
-
`pydocket`. It targets Python 3.
|
|
66
|
+
`pydocket`. It targets Python 3.10 or above.
|
|
66
67
|
|
|
67
68
|
With [`uv`](https://docs.astral.sh/uv/):
|
|
68
69
|
|
|
@@ -83,6 +84,18 @@ pip install pydocket
|
|
|
83
84
|
Docket requires a [Redis](http://redis.io/) server with Streams support (which was
|
|
84
85
|
introduced in Redis 5.0.0). Docket is tested with Redis 6 and 7.
|
|
85
86
|
|
|
87
|
+
For testing without Redis, Docket includes [fakeredis](https://github.com/cunla/fakeredis-py) for in-memory operation:
|
|
88
|
+
|
|
89
|
+
```python
|
|
90
|
+
from docket import Docket
|
|
91
|
+
|
|
92
|
+
async with Docket(name="my-docket", url="memory://my-docket") as docket:
|
|
93
|
+
# Use docket normally - all operations are in-memory
|
|
94
|
+
...
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
See [Testing with Docket](https://chrisguidry.github.io/docket/testing/#using-in-memory-backend-no-redis-required) for more details.
|
|
98
|
+
|
|
86
99
|
# Hacking on `docket`
|
|
87
100
|
|
|
88
101
|
We use [`uv`](https://docs.astral.sh/uv/) for project management, so getting set up
|
|
@@ -140,6 +140,138 @@ async def process_single_order(order_id: int) -> None:
|
|
|
140
140
|
|
|
141
141
|
This pattern separates discovery (finding work) from execution (doing work), allowing for better load distribution and fault isolation. The perpetual task stays lightweight and fast, while the actual work is distributed across many workers.
|
|
142
142
|
|
|
143
|
+
## Task Scattering with Agenda
|
|
144
|
+
|
|
145
|
+
For "find-and-flood" workloads, you often want to distribute a batch of tasks over time rather than scheduling them all immediately. The `Agenda` class collects related tasks and scatters them evenly across a time window.
|
|
146
|
+
|
|
147
|
+
### Basic Scattering
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
from datetime import timedelta
|
|
151
|
+
from docket import Agenda, Docket
|
|
152
|
+
|
|
153
|
+
async def process_item(item_id: int) -> None:
|
|
154
|
+
await perform_expensive_operation(item_id)
|
|
155
|
+
await update_database(item_id)
|
|
156
|
+
|
|
157
|
+
async with Docket() as docket:
|
|
158
|
+
# Build an agenda of tasks
|
|
159
|
+
agenda = Agenda()
|
|
160
|
+
for item_id in range(1, 101): # 100 items to process
|
|
161
|
+
agenda.add(process_item)(item_id)
|
|
162
|
+
|
|
163
|
+
# Scatter them evenly over 50 minutes to avoid overwhelming the system
|
|
164
|
+
executions = await agenda.scatter(docket, over=timedelta(minutes=50))
|
|
165
|
+
print(f"Scheduled {len(executions)} tasks over 50 minutes")
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
Tasks are distributed evenly across the time window. For 100 tasks over 50 minutes, they'll be scheduled approximately 30 seconds apart.
|
|
169
|
+
|
|
170
|
+
### Jitter for Thundering Herd Prevention
|
|
171
|
+
|
|
172
|
+
Add random jitter to prevent multiple processes from scheduling identical work at exactly the same times:
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
# Scatter with ±30 second jitter around each scheduled time
|
|
176
|
+
await agenda.scatter(
|
|
177
|
+
docket,
|
|
178
|
+
over=timedelta(minutes=50),
|
|
179
|
+
jitter=timedelta(seconds=30)
|
|
180
|
+
)
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
### Future Scatter Windows
|
|
184
|
+
|
|
185
|
+
Schedule the entire batch to start at a specific time in the future:
|
|
186
|
+
|
|
187
|
+
```python
|
|
188
|
+
from datetime import datetime, timezone
|
|
189
|
+
|
|
190
|
+
# Start scattering in 2 hours, spread over 30 minutes
|
|
191
|
+
start_time = datetime.now(timezone.utc) + timedelta(hours=2)
|
|
192
|
+
await agenda.scatter(
|
|
193
|
+
docket,
|
|
194
|
+
start=start_time,
|
|
195
|
+
over=timedelta(minutes=30)
|
|
196
|
+
)
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
### Mixed Task Types
|
|
200
|
+
|
|
201
|
+
Agendas can contain different types of tasks:
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
async def send_email(user_id: str, template: str) -> None:
|
|
205
|
+
await email_service.send(user_id, template)
|
|
206
|
+
|
|
207
|
+
async def update_analytics(event_data: dict[str, str]) -> None:
|
|
208
|
+
await analytics_service.track(event_data)
|
|
209
|
+
|
|
210
|
+
# Create a mixed agenda
|
|
211
|
+
agenda = Agenda()
|
|
212
|
+
agenda.add(process_item)(item_id=1001)
|
|
213
|
+
agenda.add(send_email)("user123", "welcome")
|
|
214
|
+
agenda.add(update_analytics)({"event": "signup", "user": "user123"})
|
|
215
|
+
agenda.add(process_item)(item_id=1002)
|
|
216
|
+
|
|
217
|
+
# All tasks will be scattered in the order they were added
|
|
218
|
+
await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
### Single Task Positioning
|
|
222
|
+
|
|
223
|
+
When scattering a single task, it's positioned at the midpoint of the time window:
|
|
224
|
+
|
|
225
|
+
```python
|
|
226
|
+
agenda = Agenda()
|
|
227
|
+
agenda.add(process_item)(item_id=42)
|
|
228
|
+
|
|
229
|
+
# This task will be scheduled 5 minutes from now (middle of 10-minute window)
|
|
230
|
+
await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
### Agenda Reusability
|
|
234
|
+
|
|
235
|
+
Agendas can be reused for multiple scatter operations:
|
|
236
|
+
|
|
237
|
+
```python
|
|
238
|
+
# Create a reusable template
|
|
239
|
+
daily_cleanup_agenda = Agenda()
|
|
240
|
+
daily_cleanup_agenda.add(cleanup_temp_files)()
|
|
241
|
+
daily_cleanup_agenda.add(compress_old_logs)()
|
|
242
|
+
daily_cleanup_agenda.add(update_metrics)()
|
|
243
|
+
|
|
244
|
+
# Use it multiple times with different timing
|
|
245
|
+
await daily_cleanup_agenda.scatter(docket, over=timedelta(hours=1))
|
|
246
|
+
|
|
247
|
+
# Later, scatter the same tasks over a different window
|
|
248
|
+
tomorrow = datetime.now(timezone.utc) + timedelta(days=1)
|
|
249
|
+
await daily_cleanup_agenda.scatter(
|
|
250
|
+
docket,
|
|
251
|
+
start=tomorrow,
|
|
252
|
+
over=timedelta(minutes=30)
|
|
253
|
+
)
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
### Failure Behavior
|
|
257
|
+
|
|
258
|
+
Keep in mind that, if an error occurs during scheduling, some tasks may have already been scheduled successfully:
|
|
259
|
+
|
|
260
|
+
```python
|
|
261
|
+
agenda = Agenda()
|
|
262
|
+
agenda.add(valid_task)("arg1")
|
|
263
|
+
agenda.add(valid_task)("arg2")
|
|
264
|
+
agenda.add("nonexistent_task")("arg3") # This will cause an error
|
|
265
|
+
agenda.add(valid_task)("arg4")
|
|
266
|
+
|
|
267
|
+
try:
|
|
268
|
+
await agenda.scatter(docket, over=timedelta(minutes=10))
|
|
269
|
+
except KeyError:
|
|
270
|
+
# The first two tasks were scheduled successfully
|
|
271
|
+
# The error prevented the fourth task from being scheduled
|
|
272
|
+
pass
|
|
273
|
+
```
|
|
274
|
+
|
|
143
275
|
## Striking and Restoring Tasks
|
|
144
276
|
|
|
145
277
|
Striking allows you to temporarily disable tasks without redeploying code. This is invaluable for incident response, gradual rollouts, or handling problematic customers.
|
|
@@ -160,7 +160,43 @@ Timeouts work alongside retries. If a task times out, it can be retried accordin
|
|
|
160
160
|
|
|
161
161
|
## Custom Dependencies
|
|
162
162
|
|
|
163
|
-
Create your own dependencies using `Depends()` for reusable resources and patterns
|
|
163
|
+
Create your own dependencies using `Depends()` for reusable resources and patterns. Dependencies can be either synchronous or asynchronous.
|
|
164
|
+
|
|
165
|
+
### Synchronous Dependencies
|
|
166
|
+
|
|
167
|
+
Use sync dependencies for pure computations and in-memory operations:
|
|
168
|
+
|
|
169
|
+
```python
|
|
170
|
+
from docket import Depends
|
|
171
|
+
|
|
172
|
+
# In-memory config lookup - no I/O
|
|
173
|
+
def get_config() -> dict:
|
|
174
|
+
"""Access configuration from memory."""
|
|
175
|
+
return {"api_url": "https://api.example.com", "timeout": 30}
|
|
176
|
+
|
|
177
|
+
# Pure computation - no I/O
|
|
178
|
+
def build_request_headers(config: dict = Depends(get_config)) -> dict:
|
|
179
|
+
"""Construct headers from config."""
|
|
180
|
+
return {
|
|
181
|
+
"User-Agent": "MyApp/1.0",
|
|
182
|
+
"Timeout": str(config["timeout"])
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
async def call_api(
|
|
186
|
+
headers: dict = Depends(build_request_headers)
|
|
187
|
+
) -> None:
|
|
188
|
+
# Headers are computed without blocking
|
|
189
|
+
# Network I/O happens here (async)
|
|
190
|
+
response = await http_client.get(url, headers=headers)
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
**Important**: Synchronous dependencies should **NOT** include blocking I/O operations (file access, network calls, database queries, etc.) as it will block the event loop and prevent tasks from being executed. Use async dependencies for any I/O. Sync dependencies are best for:
|
|
194
|
+
- Pure computations
|
|
195
|
+
- In-memory data structure access
|
|
196
|
+
- Configuration lookups from memory
|
|
197
|
+
- Non-blocking transformations
|
|
198
|
+
|
|
199
|
+
### Asynchronous Dependencies
|
|
164
200
|
|
|
165
201
|
```python
|
|
166
202
|
from contextlib import asynccontextmanager
|
|
@@ -168,30 +204,78 @@ from docket import Depends
|
|
|
168
204
|
|
|
169
205
|
@asynccontextmanager
|
|
170
206
|
async def get_database_connection():
|
|
171
|
-
"""
|
|
207
|
+
"""Async dependency that returns a database connection."""
|
|
172
208
|
conn = await database.connect()
|
|
173
209
|
try:
|
|
174
210
|
yield conn
|
|
175
211
|
finally:
|
|
176
212
|
await conn.close()
|
|
177
213
|
|
|
178
|
-
@asynccontextmanager
|
|
179
|
-
async def get_redis_client():
|
|
180
|
-
"""Another dependency for Redis operations."""
|
|
181
|
-
client = redis.Redis(host='localhost', port=6379)
|
|
182
|
-
try:
|
|
183
|
-
yield client
|
|
184
|
-
finally:
|
|
185
|
-
client.close()
|
|
186
|
-
|
|
187
214
|
async def process_user_data(
|
|
188
215
|
user_id: int,
|
|
189
|
-
db=Depends(get_database_connection)
|
|
190
|
-
cache=Depends(get_redis_client)
|
|
216
|
+
db=Depends(get_database_connection)
|
|
191
217
|
) -> None:
|
|
192
|
-
#
|
|
218
|
+
# Database connection is automatically provided and cleaned up
|
|
193
219
|
user = await db.fetch_user(user_id)
|
|
194
|
-
await
|
|
220
|
+
await db.update_user(user_id, {"last_seen": datetime.now()})
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
### Synchronous Context Managers
|
|
224
|
+
|
|
225
|
+
Use sync context managers only for managing in-memory resources or quick non-blocking operations:
|
|
226
|
+
|
|
227
|
+
```python
|
|
228
|
+
from contextlib import contextmanager
|
|
229
|
+
from docket import Depends
|
|
230
|
+
|
|
231
|
+
# In-memory resource tracking - no I/O
|
|
232
|
+
@contextmanager
|
|
233
|
+
def track_operation(operation_name: str):
|
|
234
|
+
"""Track operation execution without blocking."""
|
|
235
|
+
operations_in_progress.add(operation_name) # In-memory set
|
|
236
|
+
try:
|
|
237
|
+
yield operation_name
|
|
238
|
+
finally:
|
|
239
|
+
operations_in_progress.remove(operation_name)
|
|
240
|
+
|
|
241
|
+
async def process_data(
|
|
242
|
+
tracker=Depends(lambda: track_operation("data_processing"))
|
|
243
|
+
) -> None:
|
|
244
|
+
# Operation tracked in memory, no blocking
|
|
245
|
+
await perform_async_work()
|
|
246
|
+
```
|
|
247
|
+
|
|
248
|
+
### Mixed Sync and Async Dependencies
|
|
249
|
+
|
|
250
|
+
You can freely mix synchronous and asynchronous dependencies in the same task. Use sync for computations, async for I/O:
|
|
251
|
+
|
|
252
|
+
```python
|
|
253
|
+
# Sync - in-memory config lookup
|
|
254
|
+
def get_local_config() -> dict:
|
|
255
|
+
"""Access local config from memory - no I/O."""
|
|
256
|
+
return {"retry_count": 3, "batch_size": 100}
|
|
257
|
+
|
|
258
|
+
# Async - network I/O
|
|
259
|
+
async def get_remote_config() -> dict:
|
|
260
|
+
"""Fetch remote config via network - requires I/O."""
|
|
261
|
+
response = await http_client.get("/api/config")
|
|
262
|
+
return await response.json()
|
|
263
|
+
|
|
264
|
+
# Sync - pure computation
|
|
265
|
+
def merge_configs(
|
|
266
|
+
local: dict = Depends(get_local_config),
|
|
267
|
+
remote: dict = Depends(get_remote_config)
|
|
268
|
+
) -> dict:
|
|
269
|
+
"""Merge configs without blocking - pure computation."""
|
|
270
|
+
return {**local, **remote}
|
|
271
|
+
|
|
272
|
+
async def process_batch(
|
|
273
|
+
config: dict = Depends(merge_configs)
|
|
274
|
+
) -> None:
|
|
275
|
+
# Config is computed/fetched appropriately
|
|
276
|
+
# Now do the actual I/O work
|
|
277
|
+
for i in range(config["batch_size"]):
|
|
278
|
+
await process_item(i, retries=config["retry_count"])
|
|
195
279
|
```
|
|
196
280
|
|
|
197
281
|
### Nested Dependencies
|
|
@@ -220,31 +304,7 @@ async def update_user_profile(
|
|
|
220
304
|
await user_service.update_profile(user_id, profile_data)
|
|
221
305
|
```
|
|
222
306
|
|
|
223
|
-
Dependencies are resolved once per task execution and cached, so if multiple parameters depend on the same resource, only one instance is created.
|
|
224
|
-
|
|
225
|
-
### Context Manager Dependencies
|
|
226
|
-
|
|
227
|
-
Dependencies can be async context managers for automatic resource cleanup:
|
|
228
|
-
|
|
229
|
-
```python
|
|
230
|
-
from contextlib import asynccontextmanager
|
|
231
|
-
|
|
232
|
-
@asynccontextmanager
|
|
233
|
-
async def get_file_lock(filename: str):
|
|
234
|
-
"""A dependency that provides file locking."""
|
|
235
|
-
lock = await acquire_file_lock(filename)
|
|
236
|
-
try:
|
|
237
|
-
yield lock
|
|
238
|
-
finally:
|
|
239
|
-
await release_file_lock(filename)
|
|
240
|
-
|
|
241
|
-
async def process_shared_file(
|
|
242
|
-
filename: str,
|
|
243
|
-
file_lock=Depends(lambda: get_file_lock("shared.txt"))
|
|
244
|
-
) -> None:
|
|
245
|
-
# File is locked before task starts, unlocked after task completes
|
|
246
|
-
await process_file_safely(filename)
|
|
247
|
-
```
|
|
307
|
+
Dependencies are resolved once per task execution and cached, so if multiple parameters depend on the same resource, only one instance is created. This caching works across both sync and async dependencies.
|
|
248
308
|
|
|
249
309
|
### Dependencies with Built-in Context
|
|
250
310
|
|
|
@@ -337,6 +397,41 @@ If `unreliable_dependency` fails, the task won't execute and the error will be l
|
|
|
337
397
|
|
|
338
398
|
## Dependency Guidelines
|
|
339
399
|
|
|
400
|
+
### Choose Sync vs Async Appropriately
|
|
401
|
+
|
|
402
|
+
**Use synchronous dependencies for:**
|
|
403
|
+
- Pure computations (math, string manipulation, data transformations)
|
|
404
|
+
- In-memory data structure access (dicts, lists, sets)
|
|
405
|
+
- Configuration lookups from memory
|
|
406
|
+
- Non-blocking operations that complete instantly
|
|
407
|
+
|
|
408
|
+
**Use asynchronous dependencies for:**
|
|
409
|
+
- Network I/O (HTTP requests, API calls)
|
|
410
|
+
- File I/O (reading/writing files)
|
|
411
|
+
- Database queries
|
|
412
|
+
- Any operation that involves `await`
|
|
413
|
+
- Resource management requiring async cleanup
|
|
414
|
+
|
|
415
|
+
```python
|
|
416
|
+
# ✅ Good: Sync for pure computation
|
|
417
|
+
def calculate_batch_size(item_count: int) -> int:
|
|
418
|
+
return min(item_count, 1000)
|
|
419
|
+
|
|
420
|
+
# ✅ Good: Async for I/O
|
|
421
|
+
async def fetch_user_data(user_id: int) -> dict:
|
|
422
|
+
return await api_client.get(f"/users/{user_id}")
|
|
423
|
+
|
|
424
|
+
# ❌ Bad: Sync with blocking I/O
|
|
425
|
+
def load_config_from_file() -> dict:
|
|
426
|
+
with open("config.json") as f: # Blocks the event loop!
|
|
427
|
+
return json.load(f)
|
|
428
|
+
|
|
429
|
+
# ✅ Good: Use async for file I/O instead
|
|
430
|
+
async def load_config_from_file() -> dict:
|
|
431
|
+
async with aiofiles.open("config.json") as f:
|
|
432
|
+
return json.loads(await f.read())
|
|
433
|
+
```
|
|
434
|
+
|
|
340
435
|
### Design for Reusability
|
|
341
436
|
|
|
342
437
|
Create dependencies that can be used across multiple tasks:
|