pydocket 0.6.4__tar.gz → 0.7.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- {pydocket-0.6.4 → pydocket-0.7.1}/.github/workflows/ci.yml +2 -2
- {pydocket-0.6.4 → pydocket-0.7.1}/.github/workflows/publish.yml +9 -4
- {pydocket-0.6.4 → pydocket-0.7.1}/.gitignore +7 -9
- pydocket-0.7.1/CLAUDE.md +127 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/PKG-INFO +1 -1
- {pydocket-0.6.4 → pydocket-0.7.1}/chaos/driver.py +24 -8
- {pydocket-0.6.4 → pydocket-0.7.1}/chaos/tasks.py +8 -1
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/annotations.py +4 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/cli.py +26 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/dependencies.py +22 -4
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/docket.py +43 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/execution.py +3 -1
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/instrumentation.py +6 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/worker.py +8 -3
- pydocket-0.7.1/tests/cli/test_clear.py +253 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/conftest.py +5 -1
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/test_dependencies.py +123 -1
- pydocket-0.7.1/tests/test_docket.py +168 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/test_instrumentation.py +92 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/uv.lock +417 -416
- pydocket-0.6.4/tests/test_docket.py +0 -14
- {pydocket-0.6.4 → pydocket-0.7.1}/.cursor/rules/general.mdc +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/.cursor/rules/python-style.mdc +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/.github/codecov.yml +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/.github/workflows/docs.yml +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/.pre-commit-config.yaml +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/LICENSE +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/README.md +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/chaos/README.md +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/chaos/__init__.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/chaos/producer.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/chaos/run +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/docs/api-reference.md +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/docs/getting-started.md +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/docs/index.md +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/examples/__init__.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/examples/common.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/examples/find_and_flood.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/examples/self_perpetuating.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/mkdocs.yml +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/pyproject.toml +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/__init__.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/__main__.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/py.typed +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/src/docket/tasks.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/telemetry/.gitignore +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/telemetry/start +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/telemetry/stop +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/__init__.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/__init__.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/conftest.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/test_module.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/test_parsing.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/test_snapshot.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/test_striking.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/test_tasks.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/test_version.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/test_worker.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/cli/test_workers.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/test_execution.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/test_fundamentals.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/test_striking.py +0 -0
- {pydocket-0.6.4 → pydocket-0.7.1}/tests/test_worker.py +0 -0
|
@@ -15,7 +15,7 @@ jobs:
|
|
|
15
15
|
fail-fast: false
|
|
16
16
|
matrix:
|
|
17
17
|
python-version: ["3.12", "3.13"]
|
|
18
|
-
redis-version: ["6.2", "7.4"]
|
|
18
|
+
redis-version: ["6.2", "7.4", "valkey-8.0"]
|
|
19
19
|
redis-py-version: [">=4.6,<5", ">=5"]
|
|
20
20
|
|
|
21
21
|
steps:
|
|
@@ -34,7 +34,7 @@ jobs:
|
|
|
34
34
|
- name: Run tests
|
|
35
35
|
env:
|
|
36
36
|
REDIS_VERSION: ${{ matrix.redis-version }}
|
|
37
|
-
run: uv run pytest --cov-branch --cov-report=xml --cov-report=term-missing:skip-covered
|
|
37
|
+
run: uv run pytest --cov-branch --cov-fail-under=100 --cov-report=xml --cov-report=term-missing:skip-covered
|
|
38
38
|
|
|
39
39
|
- name: Upload coverage reports to Codecov
|
|
40
40
|
uses: codecov/codecov-action@v5
|
|
@@ -13,8 +13,11 @@ jobs:
|
|
|
13
13
|
name: Build and publish to PyPI
|
|
14
14
|
runs-on: ubuntu-latest
|
|
15
15
|
needs: ci
|
|
16
|
+
environment:
|
|
17
|
+
name: pypi
|
|
18
|
+
url: https://pypi.org/p/pydocket
|
|
16
19
|
permissions:
|
|
17
|
-
id-token: write
|
|
20
|
+
id-token: write # Required for trusted publishing and PEP 740 attestations
|
|
18
21
|
contents: read
|
|
19
22
|
|
|
20
23
|
steps:
|
|
@@ -31,10 +34,12 @@ jobs:
|
|
|
31
34
|
cache-dependency-glob: "pyproject.toml"
|
|
32
35
|
|
|
33
36
|
- name: Install build dependencies
|
|
34
|
-
run: uv pip install
|
|
37
|
+
run: uv pip install hatchling hatch-vcs
|
|
35
38
|
|
|
36
39
|
- name: Build package
|
|
37
40
|
run: uv build
|
|
38
41
|
|
|
39
|
-
- name: Publish to PyPI
|
|
40
|
-
|
|
42
|
+
- name: Publish to PyPI with PEP 740 attestations
|
|
43
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
|
44
|
+
with:
|
|
45
|
+
packages-dir: dist/
|
|
@@ -1,13 +1,11 @@
|
|
|
1
|
-
# Python-generated files
|
|
2
|
-
__pycache__/
|
|
3
|
-
*.py[oc]
|
|
4
|
-
build/
|
|
5
|
-
dist/
|
|
6
|
-
wheels/
|
|
7
1
|
*.egg-info
|
|
8
|
-
|
|
9
|
-
# Virtual environments
|
|
10
|
-
.venv
|
|
2
|
+
*.py[oc]
|
|
11
3
|
.coverage
|
|
12
4
|
.envrc
|
|
13
5
|
.python-version
|
|
6
|
+
.venv
|
|
7
|
+
.worktrees/
|
|
8
|
+
__pycache__/
|
|
9
|
+
build/
|
|
10
|
+
dist/
|
|
11
|
+
wheels/
|
pydocket-0.7.1/CLAUDE.md
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
# CLAUDE.md
|
|
2
|
+
|
|
3
|
+
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
|
4
|
+
|
|
5
|
+
## Project Overview
|
|
6
|
+
|
|
7
|
+
**Docket** (`pydocket` on PyPI) is a distributed background task system for Python functions with Redis-backed persistence. It enables scheduling both immediate and future work with comprehensive dependency injection, retry mechanisms, and fault tolerance.
|
|
8
|
+
|
|
9
|
+
**Key Requirements**: Python 3.12+, Redis 6.2+ or Valkey 8.0+
|
|
10
|
+
|
|
11
|
+
## Development Commands
|
|
12
|
+
|
|
13
|
+
### Testing
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
# Run full test suite with coverage and parallel execution
|
|
17
|
+
pytest
|
|
18
|
+
|
|
19
|
+
# Run specific test
|
|
20
|
+
pytest tests/test_docket.py::test_specific_function
|
|
21
|
+
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
The project REQUIRES 100% test coverage
|
|
25
|
+
|
|
26
|
+
### Code Quality
|
|
27
|
+
|
|
28
|
+
```bash
|
|
29
|
+
# Lint and format code
|
|
30
|
+
ruff check
|
|
31
|
+
ruff format
|
|
32
|
+
|
|
33
|
+
# Type checking
|
|
34
|
+
pyright
|
|
35
|
+
pyright tests
|
|
36
|
+
|
|
37
|
+
# Run all pre-commit hooks
|
|
38
|
+
pre-commit run --all-files
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
### Development Setup
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
# Install development dependencies
|
|
45
|
+
uv sync --group dev
|
|
46
|
+
|
|
47
|
+
# Install pre-commit hooks
|
|
48
|
+
pre-commit install
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
### Git Workflow
|
|
52
|
+
|
|
53
|
+
- This project uses Github for issue tracking
|
|
54
|
+
- This project can use git worktrees under .worktrees/
|
|
55
|
+
|
|
56
|
+
## Core Architecture
|
|
57
|
+
|
|
58
|
+
### Key Classes
|
|
59
|
+
|
|
60
|
+
- **`Docket`** (`src/docket/docket.py`): Central task registry and scheduler
|
|
61
|
+
|
|
62
|
+
- `add()`: Schedule tasks for execution
|
|
63
|
+
- `replace()`: Replace existing scheduled tasks
|
|
64
|
+
- `cancel()`: Cancel pending tasks
|
|
65
|
+
- `strike()`/`restore()`: Conditionally block/unblock tasks
|
|
66
|
+
- `snapshot()`: Get current state for observability
|
|
67
|
+
|
|
68
|
+
- **`Worker`** (`src/docket/worker.py`): Task execution engine
|
|
69
|
+
|
|
70
|
+
- `run_forever()`/`run_until_finished()`: Main execution loops
|
|
71
|
+
- Handles concurrency, retries, and dependency injection
|
|
72
|
+
- Maintains heartbeat for liveness tracking
|
|
73
|
+
|
|
74
|
+
- **`Execution`** (`src/docket/execution.py`): Task execution context with metadata
|
|
75
|
+
|
|
76
|
+
### Dependencies System (`src/docket/dependencies.py`)
|
|
77
|
+
|
|
78
|
+
Rich dependency injection supporting:
|
|
79
|
+
|
|
80
|
+
- Context access: `CurrentDocket`, `CurrentWorker`, `CurrentExecution`
|
|
81
|
+
- Retry strategies: `Retry`, `ExponentialRetry`
|
|
82
|
+
- Special behaviors: `Perpetual` (self-rescheduling), `Timeout`
|
|
83
|
+
- Custom injection: `Depends()`
|
|
84
|
+
- Contextual logging: `TaskLogger`
|
|
85
|
+
|
|
86
|
+
### Redis Data Model
|
|
87
|
+
|
|
88
|
+
- **Streams**: `{docket}:stream` (ready tasks), `{docket}:strikes` (commands)
|
|
89
|
+
- **Sorted Sets**: `{docket}:queue` (scheduled tasks), `{docket}:workers` (heartbeats)
|
|
90
|
+
- **Hashes**: `{docket}:{key}` (parked task data)
|
|
91
|
+
- **Sets**: `{docket}:worker-tasks:{worker}` (worker capabilities)
|
|
92
|
+
|
|
93
|
+
### Task Lifecycle
|
|
94
|
+
|
|
95
|
+
1. Registration with `Docket.register()` or `@docket.task`
|
|
96
|
+
2. Scheduling: immediate → Redis stream, future → Redis sorted set
|
|
97
|
+
3. Worker processing: scheduler moves due tasks, workers consume via consumer groups
|
|
98
|
+
4. Execution: dependency injection, retry logic, acknowledgment
|
|
99
|
+
|
|
100
|
+
## Project Structure
|
|
101
|
+
|
|
102
|
+
### Source Code
|
|
103
|
+
|
|
104
|
+
- `src/docket/` - Main package
|
|
105
|
+
- `__init__.py` - Public API exports
|
|
106
|
+
- `docket.py` - Core Docket class
|
|
107
|
+
- `worker.py` - Worker implementation
|
|
108
|
+
- `execution.py` - Task execution context
|
|
109
|
+
- `dependencies.py` - Dependency injection system
|
|
110
|
+
- `tasks.py` - Built-in utility tasks
|
|
111
|
+
- `cli.py` - Command-line interface
|
|
112
|
+
|
|
113
|
+
### Testing and Examples
|
|
114
|
+
|
|
115
|
+
- `tests/` - Comprehensive test suite
|
|
116
|
+
- `examples/` - Usage examples
|
|
117
|
+
- `chaos/` - Chaos testing framework
|
|
118
|
+
|
|
119
|
+
## CLI Usage
|
|
120
|
+
|
|
121
|
+
```bash
|
|
122
|
+
# Run a worker
|
|
123
|
+
docket worker --url redis://localhost:6379/0 --tasks your.module --concurrency 4
|
|
124
|
+
|
|
125
|
+
# See all commands
|
|
126
|
+
docket --help
|
|
127
|
+
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.7.1
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -153,13 +153,20 @@ async def main(
|
|
|
153
153
|
sent_tasks = await r.zcard("hello:sent")
|
|
154
154
|
received_tasks = await r.zcard("hello:received")
|
|
155
155
|
|
|
156
|
+
stream_length = await r.xlen(docket.stream_key)
|
|
157
|
+
pending = await r.xpending(
|
|
158
|
+
docket.stream_key, docket.worker_group_name
|
|
159
|
+
)
|
|
160
|
+
|
|
156
161
|
logger.info(
|
|
157
|
-
"sent: %d, received: %d, clients: %d",
|
|
162
|
+
"sent: %d, received: %d, stream: %d, pending: %d, clients: %d",
|
|
158
163
|
sent_tasks,
|
|
159
164
|
received_tasks,
|
|
165
|
+
stream_length,
|
|
166
|
+
pending["pending"],
|
|
160
167
|
connected_clients,
|
|
161
168
|
)
|
|
162
|
-
if sent_tasks >= tasks:
|
|
169
|
+
if sent_tasks >= tasks and received_tasks >= sent_tasks:
|
|
163
170
|
break
|
|
164
171
|
except redis.exceptions.ConnectionError as e:
|
|
165
172
|
logger.error(
|
|
@@ -177,16 +184,13 @@ async def main(
|
|
|
177
184
|
|
|
178
185
|
elif chaos_chance < 0.10:
|
|
179
186
|
worker_index = random.randrange(len(worker_processes))
|
|
180
|
-
worker_to_kill = worker_processes
|
|
187
|
+
worker_to_kill = worker_processes[worker_index]
|
|
181
188
|
|
|
182
189
|
logger.warning("CHAOS: Killing worker %d...", worker_index)
|
|
183
190
|
try:
|
|
184
|
-
worker_to_kill.
|
|
191
|
+
worker_to_kill.kill()
|
|
185
192
|
except ProcessLookupError:
|
|
186
193
|
logger.warning(" What is dead may never die!")
|
|
187
|
-
|
|
188
|
-
logger.warning("CHAOS: Replacing worker %d...", worker_index)
|
|
189
|
-
worker_processes.append(await spawn_worker())
|
|
190
194
|
elif chaos_chance < 0.15:
|
|
191
195
|
logger.warning("CHAOS: Queuing a toxic task...")
|
|
192
196
|
try:
|
|
@@ -194,6 +198,17 @@ async def main(
|
|
|
194
198
|
except redis.exceptions.ConnectionError:
|
|
195
199
|
pass
|
|
196
200
|
|
|
201
|
+
# Check if any worker processes have died and replace them
|
|
202
|
+
for i in range(len(worker_processes)):
|
|
203
|
+
process = worker_processes[i]
|
|
204
|
+
if process.returncode is not None:
|
|
205
|
+
logger.warning(
|
|
206
|
+
"Worker %d has died with code %d, replacing it...",
|
|
207
|
+
i,
|
|
208
|
+
process.returncode,
|
|
209
|
+
)
|
|
210
|
+
worker_processes[i] = await spawn_worker()
|
|
211
|
+
|
|
197
212
|
await asyncio.sleep(0.25)
|
|
198
213
|
|
|
199
214
|
async with docket.redis() as r:
|
|
@@ -225,5 +240,6 @@ async def main(
|
|
|
225
240
|
|
|
226
241
|
if __name__ == "__main__":
|
|
227
242
|
mode = sys.argv[1] if len(sys.argv) > 1 else "chaos"
|
|
243
|
+
tasks = int(sys.argv[2]) if len(sys.argv) > 2 else 20000
|
|
228
244
|
assert mode in ("performance", "chaos")
|
|
229
|
-
asyncio.run(main(mode=mode))
|
|
245
|
+
asyncio.run(main(mode=mode, tasks=tasks))
|
|
@@ -1,4 +1,6 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import logging
|
|
3
|
+
import random
|
|
2
4
|
import sys
|
|
3
5
|
import time
|
|
4
6
|
|
|
@@ -29,7 +31,12 @@ async def hello(
|
|
|
29
31
|
|
|
30
32
|
|
|
31
33
|
async def toxic():
|
|
32
|
-
|
|
34
|
+
if random.random() < 0.25:
|
|
35
|
+
sys.exit(42)
|
|
36
|
+
elif random.random() < 0.5:
|
|
37
|
+
raise Exception("Boom")
|
|
38
|
+
else:
|
|
39
|
+
await asyncio.sleep(random.uniform(0.01, 0.05))
|
|
33
40
|
|
|
34
41
|
|
|
35
42
|
chaos_tasks = [hello, toxic]
|
|
@@ -2,6 +2,8 @@ import abc
|
|
|
2
2
|
import inspect
|
|
3
3
|
from typing import Any, Iterable, Mapping, Self
|
|
4
4
|
|
|
5
|
+
from .instrumentation import CACHE_SIZE
|
|
6
|
+
|
|
5
7
|
|
|
6
8
|
class Annotation(abc.ABC):
|
|
7
9
|
_cache: dict[tuple[type[Self], inspect.Signature], Mapping[str, Self]] = {}
|
|
@@ -10,6 +12,7 @@ class Annotation(abc.ABC):
|
|
|
10
12
|
def annotated_parameters(cls, signature: inspect.Signature) -> Mapping[str, Self]:
|
|
11
13
|
key = (cls, signature)
|
|
12
14
|
if key in cls._cache:
|
|
15
|
+
CACHE_SIZE.set(len(cls._cache), {"cache": "annotation"})
|
|
13
16
|
return cls._cache[key]
|
|
14
17
|
|
|
15
18
|
annotated: dict[str, Self] = {}
|
|
@@ -30,6 +33,7 @@ class Annotation(abc.ABC):
|
|
|
30
33
|
annotated[param_name] = arg_type()
|
|
31
34
|
|
|
32
35
|
cls._cache[key] = annotated
|
|
36
|
+
CACHE_SIZE.set(len(cls._cache), {"cache": "annotation"})
|
|
33
37
|
return annotated
|
|
34
38
|
|
|
35
39
|
|
|
@@ -358,6 +358,32 @@ def strike(
|
|
|
358
358
|
asyncio.run(run())
|
|
359
359
|
|
|
360
360
|
|
|
361
|
+
@app.command(help="Clear all pending and scheduled tasks from the docket")
|
|
362
|
+
def clear(
|
|
363
|
+
docket_: Annotated[
|
|
364
|
+
str,
|
|
365
|
+
typer.Option(
|
|
366
|
+
"--docket",
|
|
367
|
+
help="The name of the docket",
|
|
368
|
+
envvar="DOCKET_NAME",
|
|
369
|
+
),
|
|
370
|
+
] = "docket",
|
|
371
|
+
url: Annotated[
|
|
372
|
+
str,
|
|
373
|
+
typer.Option(
|
|
374
|
+
help="The URL of the Redis server",
|
|
375
|
+
envvar="DOCKET_URL",
|
|
376
|
+
),
|
|
377
|
+
] = "redis://localhost:6379/0",
|
|
378
|
+
) -> None:
|
|
379
|
+
async def run() -> None:
|
|
380
|
+
async with Docket(name=docket_, url=url) as docket:
|
|
381
|
+
cleared_count = await docket.clear()
|
|
382
|
+
print(f"Cleared {cleared_count} tasks from docket '{docket_}'")
|
|
383
|
+
|
|
384
|
+
asyncio.run(run())
|
|
385
|
+
|
|
386
|
+
|
|
361
387
|
@app.command(help="Restores a task or parameters to the Docket")
|
|
362
388
|
def restore(
|
|
363
389
|
function: Annotated[
|
|
@@ -3,7 +3,7 @@ import logging
|
|
|
3
3
|
import time
|
|
4
4
|
from contextlib import AsyncExitStack, asynccontextmanager
|
|
5
5
|
from contextvars import ContextVar
|
|
6
|
-
from datetime import timedelta
|
|
6
|
+
from datetime import datetime, timedelta, timezone
|
|
7
7
|
from types import TracebackType
|
|
8
8
|
from typing import (
|
|
9
9
|
TYPE_CHECKING,
|
|
@@ -14,12 +14,14 @@ from typing import (
|
|
|
14
14
|
Callable,
|
|
15
15
|
Counter,
|
|
16
16
|
Generic,
|
|
17
|
+
NoReturn,
|
|
17
18
|
TypeVar,
|
|
18
19
|
cast,
|
|
19
20
|
)
|
|
20
21
|
|
|
21
22
|
from .docket import Docket
|
|
22
23
|
from .execution import Execution, TaskFunction, get_signature
|
|
24
|
+
from .instrumentation import CACHE_SIZE
|
|
23
25
|
|
|
24
26
|
if TYPE_CHECKING: # pragma: no cover
|
|
25
27
|
from .worker import Worker
|
|
@@ -188,6 +190,10 @@ def TaskLogger() -> logging.LoggerAdapter[logging.Logger]:
|
|
|
188
190
|
return cast(logging.LoggerAdapter[logging.Logger], _TaskLogger())
|
|
189
191
|
|
|
190
192
|
|
|
193
|
+
class ForcedRetry(Exception):
|
|
194
|
+
"""Raised when a task requests a retry via `in_` or `at`"""
|
|
195
|
+
|
|
196
|
+
|
|
191
197
|
class Retry(Dependency):
|
|
192
198
|
"""Configures linear retries for a task. You can specify the total number of
|
|
193
199
|
attempts (or `None` to retry indefinitely), and the delay between attempts.
|
|
@@ -222,6 +228,17 @@ class Retry(Dependency):
|
|
|
222
228
|
retry.attempt = execution.attempt
|
|
223
229
|
return retry
|
|
224
230
|
|
|
231
|
+
def at(self, when: datetime) -> NoReturn:
|
|
232
|
+
now = datetime.now(timezone.utc)
|
|
233
|
+
diff = when - now
|
|
234
|
+
diff = diff if diff.total_seconds() >= 0 else timedelta(0)
|
|
235
|
+
|
|
236
|
+
self.in_(diff)
|
|
237
|
+
|
|
238
|
+
def in_(self, when: timedelta) -> NoReturn:
|
|
239
|
+
self.delay: timedelta = when
|
|
240
|
+
raise ForcedRetry()
|
|
241
|
+
|
|
225
242
|
|
|
226
243
|
class ExponentialRetry(Retry):
|
|
227
244
|
"""Configures exponential retries for a task. You can specify the total number
|
|
@@ -251,7 +268,6 @@ class ExponentialRetry(Retry):
|
|
|
251
268
|
maximum_delay: The maximum delay between attempts.
|
|
252
269
|
"""
|
|
253
270
|
super().__init__(attempts=attempts, delay=minimum_delay)
|
|
254
|
-
self.minimum_delay = minimum_delay
|
|
255
271
|
self.maximum_delay = maximum_delay
|
|
256
272
|
|
|
257
273
|
async def __aenter__(self) -> "ExponentialRetry":
|
|
@@ -259,14 +275,14 @@ class ExponentialRetry(Retry):
|
|
|
259
275
|
|
|
260
276
|
retry = ExponentialRetry(
|
|
261
277
|
attempts=self.attempts,
|
|
262
|
-
minimum_delay=self.
|
|
278
|
+
minimum_delay=self.delay,
|
|
263
279
|
maximum_delay=self.maximum_delay,
|
|
264
280
|
)
|
|
265
281
|
retry.attempt = execution.attempt
|
|
266
282
|
|
|
267
283
|
if execution.attempt > 1:
|
|
268
284
|
backoff_factor = 2 ** (execution.attempt - 1)
|
|
269
|
-
calculated_delay = self.
|
|
285
|
+
calculated_delay = self.delay * backoff_factor
|
|
270
286
|
|
|
271
287
|
if calculated_delay > self.maximum_delay:
|
|
272
288
|
retry.delay = self.maximum_delay
|
|
@@ -400,6 +416,7 @@ def get_dependency_parameters(
|
|
|
400
416
|
function: TaskFunction | DependencyFunction[Any],
|
|
401
417
|
) -> dict[str, Dependency]:
|
|
402
418
|
if function in _parameter_cache:
|
|
419
|
+
CACHE_SIZE.set(len(_parameter_cache), {"cache": "parameter"})
|
|
403
420
|
return _parameter_cache[function]
|
|
404
421
|
|
|
405
422
|
dependencies: dict[str, Dependency] = {}
|
|
@@ -413,6 +430,7 @@ def get_dependency_parameters(
|
|
|
413
430
|
dependencies[parameter] = param.default
|
|
414
431
|
|
|
415
432
|
_parameter_cache[function] = dependencies
|
|
433
|
+
CACHE_SIZE.set(len(_parameter_cache), {"cache": "parameter"})
|
|
416
434
|
return dependencies
|
|
417
435
|
|
|
418
436
|
|
|
@@ -743,3 +743,46 @@ class Docket:
|
|
|
743
743
|
workers.append(WorkerInfo(worker_name, last_seen, task_names))
|
|
744
744
|
|
|
745
745
|
return workers
|
|
746
|
+
|
|
747
|
+
async def clear(self) -> int:
|
|
748
|
+
"""Clear all pending and scheduled tasks from the docket.
|
|
749
|
+
|
|
750
|
+
This removes all tasks from the stream (immediate tasks) and queue
|
|
751
|
+
(scheduled tasks), along with their associated parked data. Running
|
|
752
|
+
tasks are not affected.
|
|
753
|
+
|
|
754
|
+
Returns:
|
|
755
|
+
The total number of tasks that were cleared.
|
|
756
|
+
"""
|
|
757
|
+
with tracer.start_as_current_span(
|
|
758
|
+
"docket.clear",
|
|
759
|
+
attributes=self.labels(),
|
|
760
|
+
):
|
|
761
|
+
async with self.redis() as redis:
|
|
762
|
+
async with redis.pipeline() as pipeline:
|
|
763
|
+
# Get counts before clearing
|
|
764
|
+
pipeline.xlen(self.stream_key)
|
|
765
|
+
pipeline.zcard(self.queue_key)
|
|
766
|
+
pipeline.zrange(self.queue_key, 0, -1)
|
|
767
|
+
|
|
768
|
+
stream_count: int
|
|
769
|
+
queue_count: int
|
|
770
|
+
scheduled_keys: list[bytes]
|
|
771
|
+
stream_count, queue_count, scheduled_keys = await pipeline.execute()
|
|
772
|
+
|
|
773
|
+
# Clear all data
|
|
774
|
+
# Trim stream to 0 messages instead of deleting it to preserve consumer group
|
|
775
|
+
if stream_count > 0:
|
|
776
|
+
pipeline.xtrim(self.stream_key, maxlen=0, approximate=False)
|
|
777
|
+
pipeline.delete(self.queue_key)
|
|
778
|
+
|
|
779
|
+
# Clear parked task data and known task keys
|
|
780
|
+
for key_bytes in scheduled_keys:
|
|
781
|
+
key = key_bytes.decode()
|
|
782
|
+
pipeline.delete(self.parked_task_key(key))
|
|
783
|
+
pipeline.delete(self.known_task_key(key))
|
|
784
|
+
|
|
785
|
+
await pipeline.execute()
|
|
786
|
+
|
|
787
|
+
total_cleared = stream_count + queue_count
|
|
788
|
+
return total_cleared
|
|
@@ -19,7 +19,7 @@ import opentelemetry.context
|
|
|
19
19
|
from opentelemetry import propagate, trace
|
|
20
20
|
|
|
21
21
|
from .annotations import Logged
|
|
22
|
-
from .instrumentation import message_getter
|
|
22
|
+
from .instrumentation import CACHE_SIZE, message_getter
|
|
23
23
|
|
|
24
24
|
logger: logging.Logger = logging.getLogger(__name__)
|
|
25
25
|
|
|
@@ -32,10 +32,12 @@ _signature_cache: dict[Callable[..., Any], inspect.Signature] = {}
|
|
|
32
32
|
|
|
33
33
|
def get_signature(function: Callable[..., Any]) -> inspect.Signature:
|
|
34
34
|
if function in _signature_cache:
|
|
35
|
+
CACHE_SIZE.set(len(_signature_cache), {"cache": "signature"})
|
|
35
36
|
return _signature_cache[function]
|
|
36
37
|
|
|
37
38
|
signature = inspect.signature(function)
|
|
38
39
|
_signature_cache[function] = signature
|
|
40
|
+
CACHE_SIZE.set(len(_signature_cache), {"cache": "signature"})
|
|
39
41
|
return signature
|
|
40
42
|
|
|
41
43
|
|
|
@@ -15,7 +15,7 @@ from typing import (
|
|
|
15
15
|
)
|
|
16
16
|
|
|
17
17
|
from opentelemetry import trace
|
|
18
|
-
from opentelemetry.trace import Tracer
|
|
18
|
+
from opentelemetry.trace import Status, StatusCode, Tracer
|
|
19
19
|
from redis.asyncio import Redis
|
|
20
20
|
from redis.exceptions import ConnectionError, LockError
|
|
21
21
|
|
|
@@ -531,7 +531,7 @@ class Worker:
|
|
|
531
531
|
"code.function.name": execution.function.__name__,
|
|
532
532
|
},
|
|
533
533
|
links=execution.incoming_span_links(),
|
|
534
|
-
):
|
|
534
|
+
) as span:
|
|
535
535
|
try:
|
|
536
536
|
async with resolved_dependencies(self, execution) as dependencies:
|
|
537
537
|
# Preemptively reschedule the perpetual task for the future, or clear
|
|
@@ -576,6 +576,8 @@ class Worker:
|
|
|
576
576
|
duration = log_context["duration"] = time.time() - start
|
|
577
577
|
TASKS_SUCCEEDED.add(1, counter_labels)
|
|
578
578
|
|
|
579
|
+
span.set_status(Status(StatusCode.OK))
|
|
580
|
+
|
|
579
581
|
rescheduled = await self._perpetuate_if_requested(
|
|
580
582
|
execution, dependencies, timedelta(seconds=duration)
|
|
581
583
|
)
|
|
@@ -584,10 +586,13 @@ class Worker:
|
|
|
584
586
|
logger.info(
|
|
585
587
|
"%s [%s] %s", arrow, ms(duration), call, extra=log_context
|
|
586
588
|
)
|
|
587
|
-
except Exception:
|
|
589
|
+
except Exception as e:
|
|
588
590
|
duration = log_context["duration"] = time.time() - start
|
|
589
591
|
TASKS_FAILED.add(1, counter_labels)
|
|
590
592
|
|
|
593
|
+
span.record_exception(e)
|
|
594
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
595
|
+
|
|
591
596
|
retried = await self._retry_if_requested(execution, dependencies)
|
|
592
597
|
if not retried:
|
|
593
598
|
retried = await self._perpetuate_if_requested(
|