pydocket 0.0.2__tar.gz → 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- pydocket-0.1.0/.github/codecov.yml +24 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/.github/workflows/ci.yml +2 -1
- {pydocket-0.0.2 → pydocket-0.1.0}/.pre-commit-config.yaml +7 -1
- pydocket-0.1.0/PKG-INFO +388 -0
- pydocket-0.1.0/README.md +353 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/chaos/driver.py +55 -24
- {pydocket-0.0.2 → pydocket-0.1.0}/pyproject.toml +5 -3
- pydocket-0.1.0/src/docket/cli.py +689 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/src/docket/dependencies.py +1 -1
- pydocket-0.1.0/src/docket/docket.py +595 -0
- pydocket-0.1.0/src/docket/execution.py +343 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/src/docket/instrumentation.py +18 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/src/docket/tasks.py +7 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/src/docket/worker.py +193 -86
- pydocket-0.1.0/tests/cli/test_snapshot.py +205 -0
- pydocket-0.1.0/tests/cli/test_striking.py +201 -0
- pydocket-0.0.2/tests/cli/test_trace.py → pydocket-0.1.0/tests/cli/test_tasks.py +29 -7
- pydocket-0.1.0/tests/cli/test_workers.py +79 -0
- pydocket-0.1.0/tests/conftest.py +93 -0
- pydocket-0.1.0/tests/test_docket.py +14 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/test_fundamentals.py +216 -3
- pydocket-0.1.0/tests/test_striking.py +209 -0
- pydocket-0.1.0/tests/test_worker.py +360 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/uv.lock +198 -54
- pydocket-0.0.2/PKG-INFO +0 -36
- pydocket-0.0.2/README.md +0 -2
- pydocket-0.0.2/src/docket/cli.py +0 -254
- pydocket-0.0.2/src/docket/docket.py +0 -238
- pydocket-0.0.2/src/docket/execution.py +0 -74
- pydocket-0.0.2/tests/conftest.py +0 -51
- pydocket-0.0.2/tests/test_worker.py +0 -91
- {pydocket-0.0.2 → pydocket-0.1.0}/.cursorrules +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/.github/workflows/publish.yml +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/.gitignore +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/LICENSE +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/chaos/README.md +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/chaos/__init__.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/chaos/producer.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/chaos/run +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/chaos/tasks.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/src/docket/__init__.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/src/docket/__main__.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/src/docket/annotations.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/src/docket/py.typed +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/telemetry/.gitignore +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/telemetry/start +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/telemetry/stop +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/__init__.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/cli/__init__.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/cli/conftest.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/cli/test_module.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/cli/test_parsing.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/cli/test_version.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/cli/test_worker.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/test_dependencies.py +0 -0
- {pydocket-0.0.2 → pydocket-0.1.0}/tests/test_instrumentation.py +0 -0
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
coverage:
|
|
2
|
+
status:
|
|
3
|
+
project:
|
|
4
|
+
default:
|
|
5
|
+
target: 100%
|
|
6
|
+
threshold: 0%
|
|
7
|
+
informational: false
|
|
8
|
+
only_pulls: false
|
|
9
|
+
patch:
|
|
10
|
+
default:
|
|
11
|
+
target: 100%
|
|
12
|
+
threshold: 0%
|
|
13
|
+
informational: false
|
|
14
|
+
only_pulls: false
|
|
15
|
+
precision: 2
|
|
16
|
+
round: down
|
|
17
|
+
range: "100...100"
|
|
18
|
+
|
|
19
|
+
comment:
|
|
20
|
+
layout: "reach, diff, flags, files"
|
|
21
|
+
behavior: default
|
|
22
|
+
require_changes: false
|
|
23
|
+
require_base: no
|
|
24
|
+
require_head: no
|
|
@@ -30,12 +30,13 @@ jobs:
|
|
|
30
30
|
run: uv sync --dev
|
|
31
31
|
|
|
32
32
|
- name: Run tests
|
|
33
|
-
run: uv run pytest --cov-branch --cov-report=xml
|
|
33
|
+
run: uv run pytest --cov-branch --cov-report=xml --cov-report=term-missing:skip-covered
|
|
34
34
|
|
|
35
35
|
- name: Upload coverage reports to Codecov
|
|
36
36
|
uses: codecov/codecov-action@v5
|
|
37
37
|
with:
|
|
38
38
|
token: ${{ secrets.CODECOV_TOKEN }}
|
|
39
|
+
flags: python-${{ matrix.python-version }}
|
|
39
40
|
|
|
40
41
|
pre-commit:
|
|
41
42
|
name: Pre-commit checks
|
|
@@ -23,8 +23,14 @@ repos:
|
|
|
23
23
|
- repo: local
|
|
24
24
|
hooks:
|
|
25
25
|
- id: pyright
|
|
26
|
-
name: pyright
|
|
26
|
+
name: pyright (docket package)
|
|
27
27
|
entry: uv run pyright --verifytypes docket --ignoreexternal
|
|
28
28
|
language: system
|
|
29
29
|
types: [python]
|
|
30
30
|
pass_filenames: false
|
|
31
|
+
- id: pyright
|
|
32
|
+
name: pyright (source and tests)
|
|
33
|
+
entry: uv run pyright tests
|
|
34
|
+
language: system
|
|
35
|
+
types: [python]
|
|
36
|
+
pass_filenames: false
|
pydocket-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pydocket
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A distributed background task system for Python functions
|
|
5
|
+
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
|
+
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
7
|
+
Author-email: Chris Guidry <guid@omg.lol>
|
|
8
|
+
License: # Released under MIT License
|
|
9
|
+
|
|
10
|
+
Copyright (c) 2025 Chris Guidry.
|
|
11
|
+
|
|
12
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
13
|
+
|
|
14
|
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
15
|
+
|
|
16
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
17
|
+
License-File: LICENSE
|
|
18
|
+
Classifier: Development Status :: 4 - Beta
|
|
19
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
20
|
+
Classifier: Operating System :: OS Independent
|
|
21
|
+
Classifier: Programming Language :: Python :: 3
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
23
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
24
|
+
Classifier: Typing :: Typed
|
|
25
|
+
Requires-Python: >=3.12
|
|
26
|
+
Requires-Dist: cloudpickle>=3.1.1
|
|
27
|
+
Requires-Dist: opentelemetry-api>=1.30.0
|
|
28
|
+
Requires-Dist: opentelemetry-exporter-prometheus>=0.51b0
|
|
29
|
+
Requires-Dist: prometheus-client>=0.21.1
|
|
30
|
+
Requires-Dist: python-json-logger>=3.2.1
|
|
31
|
+
Requires-Dist: redis>=5.2.1
|
|
32
|
+
Requires-Dist: rich>=13.9.4
|
|
33
|
+
Requires-Dist: typer>=0.15.1
|
|
34
|
+
Description-Content-Type: text/markdown
|
|
35
|
+
|
|
36
|
+
Docket is a distributed background task system for Python functions with a focus
|
|
37
|
+
on the scheduling of future work as seamlessly and efficiency as immediate work.
|
|
38
|
+
|
|
39
|
+
[](https://pypi.org/project/pydocket/)
|
|
40
|
+
[](https://pypi.org/project/pydocket/)
|
|
41
|
+
[](https://github.com/chrisguidry/docket/actions/workflows/ci.yml)
|
|
42
|
+
[](https://app.codecov.io/gh/chrisguidry/docket)
|
|
43
|
+
[](https://github.com/chrisguidry/docket/blob/main/LICENSE)
|
|
44
|
+
|
|
45
|
+
## At a glance
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
from datetime import datetime, timedelta, timezone
|
|
49
|
+
|
|
50
|
+
from docket import Docket
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
async def greet(name: str, greeting="Hello") -> None:
|
|
54
|
+
print(f"{greeting}, {name} at {datetime.now()}!")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
async with Docket() as docket:
|
|
58
|
+
await docket.add(greet)("Jane")
|
|
59
|
+
|
|
60
|
+
now = datetime.now(timezone.utc)
|
|
61
|
+
soon = now + timedelta(seconds=3)
|
|
62
|
+
await docket.add(greet, when=soon)("John", greeting="Howdy")
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
from docket import Docket, Worker
|
|
67
|
+
|
|
68
|
+
async with Docket() as docket:
|
|
69
|
+
async with Worker(docket) as worker:
|
|
70
|
+
await worker.run_until_finished()
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
```
|
|
74
|
+
Hello, Jane at 2025-03-05 13:58:21.552644!
|
|
75
|
+
Howdy, John at 2025-03-05 13:58:24.550773!
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## Why `docket`?
|
|
79
|
+
|
|
80
|
+
⚡️ Snappy one-way background task processing without any bloat
|
|
81
|
+
|
|
82
|
+
📅 Schedule immediate or future work seamlessly with the same interface
|
|
83
|
+
|
|
84
|
+
⏭️ Skip problematic tasks or parameters without redeploying
|
|
85
|
+
|
|
86
|
+
🌊 Purpose-built for Redis streams
|
|
87
|
+
|
|
88
|
+
🧩 Fully type-complete and type-aware for your background task functions
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
## Installing `docket`
|
|
92
|
+
|
|
93
|
+
Docket is [available on PyPI](https://pypi.org/project/pydocket/) under the package name
|
|
94
|
+
`pydocket`. It targets Python 3.12 or above.
|
|
95
|
+
|
|
96
|
+
With [`uv`](https://docs.astral.sh/uv/):
|
|
97
|
+
|
|
98
|
+
```bash
|
|
99
|
+
uv pip install pydocket
|
|
100
|
+
|
|
101
|
+
or
|
|
102
|
+
|
|
103
|
+
uv add pydocket
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
With `pip`:
|
|
107
|
+
|
|
108
|
+
```bash
|
|
109
|
+
pip install pydocket
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
Docket requires a [Redis](http://redis.io/) server with Streams support (which was
|
|
113
|
+
introduced in Redis 5.0.0). Docket is tested with Redis 7.
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
## Creating a `Docket`
|
|
117
|
+
|
|
118
|
+
Each `Docket` should have a name that will be shared across your system, like the name
|
|
119
|
+
of a topic or queue. By default this is `"docket"`. You can support many separate
|
|
120
|
+
dockets on a single Redis server as long as they have different names.
|
|
121
|
+
|
|
122
|
+
Docket accepts a URL to connect to the Redis server (defaulting to the local
|
|
123
|
+
server), and you can pass any additional connection configuration you need on that
|
|
124
|
+
connection URL.
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
async with Docket(name="orders", url="redis://my-redis:6379/0") as docket:
|
|
128
|
+
...
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
The `name` and `url` together represent a single shared docket of work across all your
|
|
132
|
+
system.
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
## Scheduling work
|
|
136
|
+
|
|
137
|
+
A `Docket` is the entrypoint to scheduling immediate and future work. You define work
|
|
138
|
+
in the form of `async` functions that return `None`. These task functions can accept
|
|
139
|
+
any parameter types, so long as they can be serialized with
|
|
140
|
+
[`cloudpickle`](https://github.com/cloudpipe/cloudpickle).
|
|
141
|
+
|
|
142
|
+
```python
|
|
143
|
+
def now() -> datetime:
|
|
144
|
+
return datetime.now(timezone.utc)
|
|
145
|
+
|
|
146
|
+
async def send_welcome_email(customer_id: int, name: str) -> None:
|
|
147
|
+
...
|
|
148
|
+
|
|
149
|
+
async def send_followup_email(customer_id: int, name: str) -> None:
|
|
150
|
+
...
|
|
151
|
+
|
|
152
|
+
async with Docket() as docket:
|
|
153
|
+
await docket.add(send_welcome_email)(12345, "Jane Smith")
|
|
154
|
+
|
|
155
|
+
tomorrow = now() + timedelta(days=1)
|
|
156
|
+
await docket.add(send_followup_email, when=tomorrow)(12345, "Jane Smith")
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
`docket.add` schedules both immediate work (the default) or future work (with the
|
|
160
|
+
`when: datetime` parameter).
|
|
161
|
+
|
|
162
|
+
All task executions are identified with a `key` that captures the unique essence of that
|
|
163
|
+
piece of work. By default they are randomly assigned UUIDs, but assigning your own keys
|
|
164
|
+
unlocks many powerful capabilities.
|
|
165
|
+
|
|
166
|
+
```python
|
|
167
|
+
async with Docket() as docket:
|
|
168
|
+
await docket.add(send_welcome_email)(12345, "Jane Smith")
|
|
169
|
+
|
|
170
|
+
tomorrow = now() + timedelta(days=1)
|
|
171
|
+
key = "welcome-email-for-12345"
|
|
172
|
+
await docket.add(send_followup_email, when=tomorrow, key=key)(12345, "Jane Smith")
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
If you've given your future work a `key`, then only one unique instance of that
|
|
176
|
+
execution will exist in the future:
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
key = "welcome-email-for-12345"
|
|
180
|
+
await docket.add(send_followup_email, when=tomorrow, key=key)(12345, "Jane Smith")
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
Calling `.add` a second time with the same key won't do anything, so luckily your
|
|
184
|
+
customer won't get two emails!
|
|
185
|
+
|
|
186
|
+
However, at any time later you can replace that task execution to alter _when_ it will
|
|
187
|
+
happen:
|
|
188
|
+
|
|
189
|
+
```python
|
|
190
|
+
key = "welcome-email-for-12345"
|
|
191
|
+
next_week = now() + timedelta(days=7)
|
|
192
|
+
await docket.replace(send_followup_email, when=next_week, key=key)(12345, "Jane Smith")
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
_what arguments_ will be passed:
|
|
196
|
+
|
|
197
|
+
```python
|
|
198
|
+
key = "welcome-email-for-12345"
|
|
199
|
+
await docket.replace(send_followup_email, when=tomorrow, key=key)(12345, "Jane Q. Smith")
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
Or just cancel it outright:
|
|
203
|
+
|
|
204
|
+
```python
|
|
205
|
+
await docket.cancel("welcome-email-for-12345")
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
Tasks may also be called by name, in cases where you can't or don't want to import the
|
|
209
|
+
module that has your tasks. This may be common in a distributed environment where the
|
|
210
|
+
code of your task system just isn't available, or it requires heavyweight libraries that
|
|
211
|
+
you wouldn't want to import into your web server. In this case, you will lose the
|
|
212
|
+
type-checking for `.add` and `.replace` calls, but otherwise everything will work as
|
|
213
|
+
it does with the actual function:
|
|
214
|
+
|
|
215
|
+
```python
|
|
216
|
+
await docket.add("send_followup_email", when=tomorrow)(12345, "Jane Smith")
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
These primitives of `.add`, `.replace`, and `.cancel` are sufficient to build a
|
|
220
|
+
large-scale and robust system of background tasks for your application.
|
|
221
|
+
|
|
222
|
+
## Writing tasks
|
|
223
|
+
|
|
224
|
+
Tasks are any `async` function that takes `cloudpickle`-able parameters, and returns
|
|
225
|
+
`None`. Returning `None` is a strong signal that these are _fire-and-forget_ tasks
|
|
226
|
+
whose results aren't used or waited-on by your application. These are the only kinds of
|
|
227
|
+
tasks that Docket supports.
|
|
228
|
+
|
|
229
|
+
Docket uses a parameter-based dependency and configuration pattern, which has become
|
|
230
|
+
common in frameworks like [FastAPI](https://fastapi.tiangolo.com/),
|
|
231
|
+
[Typer](https://typer.tiangolo.com/), or [FastMCP](https://github.com/jlowin/fastmcp).
|
|
232
|
+
As such, there is no decorator for tasks.
|
|
233
|
+
|
|
234
|
+
A very common requirement for tasks is that they have access to schedule further work
|
|
235
|
+
on their own docket, especially for chains of self-perpetuating tasks to implement
|
|
236
|
+
distributed polling and other periodic systems. One of the first dependencies you may
|
|
237
|
+
look for is the `CurrentDocket`:
|
|
238
|
+
|
|
239
|
+
```python
|
|
240
|
+
from docket import Docket, CurrentDocket
|
|
241
|
+
|
|
242
|
+
POLLING_INTERVAL = timedelta(seconds=10)
|
|
243
|
+
|
|
244
|
+
async def poll_for_changes(file: Path, docket: Docket = CurrentDocket()) -> None:
|
|
245
|
+
if file.exists():
|
|
246
|
+
...do something interesting...
|
|
247
|
+
return
|
|
248
|
+
else:
|
|
249
|
+
await docket.add(poll_for_changes, when=now() + POLLING_INTERVAL)(file)
|
|
250
|
+
```
|
|
251
|
+
|
|
252
|
+
Here the argument to `docket` is an instance of `Docket` with the same name and URL as
|
|
253
|
+
the worker it's running on. You can ask for the `CurrentWorker` and `CurrentExecution`
|
|
254
|
+
as well. Many times it could be useful to have your own task `key` available in order
|
|
255
|
+
to idempotently schedule future work:
|
|
256
|
+
|
|
257
|
+
```python
|
|
258
|
+
from docket import Docket, CurrentDocket, TaskKey
|
|
259
|
+
|
|
260
|
+
async def poll_for_changes(
|
|
261
|
+
file: Path,
|
|
262
|
+
key: str = TaskKey(),
|
|
263
|
+
docket: Docket = CurrentDocket()
|
|
264
|
+
) -> None:
|
|
265
|
+
if file.exists():
|
|
266
|
+
...do something interesting...
|
|
267
|
+
return
|
|
268
|
+
else:
|
|
269
|
+
await docket.add(poll_for_changes, when=now() + POLLING_INTERVAL, key=key)(file)
|
|
270
|
+
```
|
|
271
|
+
|
|
272
|
+
This helps to ensure that there is one continuous "chain" of these future tasks, as they
|
|
273
|
+
all use the same key.
|
|
274
|
+
|
|
275
|
+
Configuring the retry behavior for a task is also done with a dependency:
|
|
276
|
+
|
|
277
|
+
```python
|
|
278
|
+
from datetime import timedelta
|
|
279
|
+
from docket import Retry
|
|
280
|
+
|
|
281
|
+
async def faily(retry: Retry = Retry(attempts=5, delay=timedelta(seconds=3))):
|
|
282
|
+
if retry.attempt == 4:
|
|
283
|
+
print("whew!")
|
|
284
|
+
return
|
|
285
|
+
|
|
286
|
+
raise ValueError("whoops!")
|
|
287
|
+
```
|
|
288
|
+
|
|
289
|
+
In this case, the task `faily` will run 4 times with a delay of 3 seconds between each
|
|
290
|
+
attempt. If it were to get to 5 attempts, no more would be attempted. This is a
|
|
291
|
+
linear retry, and an `ExponentialRetry` is also available:
|
|
292
|
+
|
|
293
|
+
```python
|
|
294
|
+
from datetime import timedelta
|
|
295
|
+
from docket import Retry, ExponentialRetry
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
async def faily(
|
|
299
|
+
retry: Retry = Retry(
|
|
300
|
+
attempts=5,
|
|
301
|
+
minimum_delay=timedelta(seconds=2),
|
|
302
|
+
maximum_delay=timedelta(seconds=32),
|
|
303
|
+
),
|
|
304
|
+
):
|
|
305
|
+
if retry.attempt == 4:
|
|
306
|
+
print("whew!")
|
|
307
|
+
return
|
|
308
|
+
|
|
309
|
+
raise ValueError("whoops!")
|
|
310
|
+
```
|
|
311
|
+
|
|
312
|
+
This would retry in 2, 4, 8, then 16 seconds before that fourth attempt succeeded.
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
## Running workers
|
|
316
|
+
|
|
317
|
+
You can run as many workers as you like to process the tasks on your docket. You can
|
|
318
|
+
either run a worker programmatically in Python, or via the CLI. Clients using docket
|
|
319
|
+
have the advantage that they are usually passing the task functions, but workers don't
|
|
320
|
+
necessarily know which tasks they are supposed to run. Docket solves this by allowing
|
|
321
|
+
you to explicitly register tasks.
|
|
322
|
+
|
|
323
|
+
In `my_tasks.py`:
|
|
324
|
+
|
|
325
|
+
```python
|
|
326
|
+
async def my_first_task():
|
|
327
|
+
...
|
|
328
|
+
|
|
329
|
+
async def my_second_task():
|
|
330
|
+
...
|
|
331
|
+
|
|
332
|
+
my_task_collection = [
|
|
333
|
+
my_first_task,
|
|
334
|
+
my_second_task,
|
|
335
|
+
]
|
|
336
|
+
```
|
|
337
|
+
|
|
338
|
+
From Python:
|
|
339
|
+
|
|
340
|
+
```python
|
|
341
|
+
from my_tasks import my_task_collection
|
|
342
|
+
|
|
343
|
+
async with Docket() as docket:
|
|
344
|
+
for task in my_task_collection:
|
|
345
|
+
docket.register(task)
|
|
346
|
+
|
|
347
|
+
async with Worker(docket) as worker:
|
|
348
|
+
await worker.run_forever()
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
From the CLI:
|
|
352
|
+
|
|
353
|
+
```bash
|
|
354
|
+
docket worker --tasks my_tasks:my_task_collection
|
|
355
|
+
```
|
|
356
|
+
|
|
357
|
+
By default, workers will process up to 10 tasks concurrently, but you can adjust this
|
|
358
|
+
to your needs with the `concurrency=` keyword argument or the `--concurrency` CLI
|
|
359
|
+
option.
|
|
360
|
+
|
|
361
|
+
When a worker crashes ungracefully, any tasks it was currently executing will be held
|
|
362
|
+
for a period of time before being redelivered to other workers. You can control this
|
|
363
|
+
time period with `redelivery_timeout=` or `--redelivery-timeout`. You'd want to set
|
|
364
|
+
this to a value higher than the longest task you expect to run. For queues of very fast
|
|
365
|
+
tasks, a few seconds may be ideal; for long data-processing steps involving large
|
|
366
|
+
amount of data, you may need minutes.
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
# Hacking on `docket`
|
|
370
|
+
|
|
371
|
+
We use [`uv`](https://docs.astral.sh/uv/) for project management, so getting set up
|
|
372
|
+
should be as simple as cloning the repo and running:
|
|
373
|
+
|
|
374
|
+
```bash
|
|
375
|
+
uv sync
|
|
376
|
+
```
|
|
377
|
+
|
|
378
|
+
The to run the test suite:
|
|
379
|
+
|
|
380
|
+
```bash
|
|
381
|
+
pytest
|
|
382
|
+
```
|
|
383
|
+
|
|
384
|
+
We aim to main 100% test coverage, which is required for all PRs to `docket`. We
|
|
385
|
+
believe that `docket` should stay small, simple, understandable, and reliable, and that
|
|
386
|
+
begins with testing all the dusty branches and corners. This will give us the
|
|
387
|
+
confidence to upgrade dependencies quickly and to adapt to new versions of Redis over
|
|
388
|
+
time.
|